From 8c43f95249ecd46191ab8676dc929fb92632e916 Mon Sep 17 00:00:00 2001 From: Ryan Rose Date: Mon, 27 Jul 2020 08:15:48 -0400 Subject: [PATCH 1/6] temp removal of k8s --- go.mod | 1 - go.sum | 2 -- redispool.go | 30 +++++++++++++++--------------- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/go.mod b/go.mod index b2da034..aaf5de5 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,6 @@ require ( github.com/FZambia/sentinel v1.1.0 github.com/Jim-Lambert-Bose/cache v1.0.3 github.com/alicebob/miniredis/v2 v2.11.0 - github.com/ericchiang/k8s v1.2.0 github.com/gin-gonic/gin v1.4.0 github.com/gomodule/redigo v2.0.0+incompatible github.com/google/uuid v1.1.1 diff --git a/go.sum b/go.sum index 20c41e1..8764fa1 100644 --- a/go.sum +++ b/go.sum @@ -23,8 +23,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/ericchiang/k8s v1.2.0 h1:vxrMwEzY43oxu8aZyD/7b1s8tsBM+xoUoxjWECWFbPI= -github.com/ericchiang/k8s v1.2.0/go.mod h1:/OmBgSq2cd9IANnsGHGlEz27nwMZV2YxlpXuQtU3Bz4= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ= diff --git a/redispool.go b/redispool.go index c1960e1..b07c6bc 100644 --- a/redispool.go +++ b/redispool.go @@ -9,7 +9,6 @@ import ( sentinel "github.com/FZambia/sentinel" "github.com/Jim-Lambert-Bose/cache/persistence" - "github.com/ericchiang/k8s" "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" ) @@ -112,20 +111,21 @@ func NewSentinelPool( // inCluster - are we executing in the K8s cluster func inCluster(logger *logrus.Entry) bool { - // globals.InCluster = true - // return - namespace := os.Getenv("MY_POD_NAMESPACE") - logger.Infof("inCluster: env == %s", namespace) - // defer recoverInCluster() // maybe it won't panic, so we don't need this - _, err := k8s.NewInClusterClient() - // standard service account has no privs to list pods!!! - // so we're done - if err != nil { - logger.Infof("inCluster: false") - return false - } - logger.Infof("inCluster: true") - return true + // // globals.InCluster = true + // // return + // namespace := os.Getenv("MY_POD_NAMESPACE") + // logger.Infof("inCluster: env == %s", namespace) + // // defer recoverInCluster() // maybe it won't panic, so we don't need this + // _, err := k8s.NewInClusterClient() + // // standard service account has no privs to list pods!!! + // // so we're done + // if err != nil { + // logger.Infof("inCluster: false") + // return false + // } + // logger.Infof("inCluster: true") + // return true + return false } // RedisConnectionInfo - define all the things needed to manage a connection to redis (optionally using sentinel) From 355e5dca3b572529287bbf123eeace2d6f92a441 Mon Sep 17 00:00:00 2001 From: Ryan Rose Date: Mon, 27 Jul 2020 08:35:03 -0400 Subject: [PATCH 2/6] use k8s client to check in cluster --- go.mod | 9 +- go.sum | 125 +- redispool.go | 15 +- vendor/github.com/davecgh/go-spew/LICENSE | 15 + .../github.com/davecgh/go-spew/spew/bypass.go | 145 + .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 + .../github.com/davecgh/go-spew/spew/config.go | 306 + vendor/github.com/davecgh/go-spew/spew/doc.go | 211 + .../github.com/davecgh/go-spew/spew/dump.go | 509 + .../github.com/davecgh/go-spew/spew/format.go | 419 + .../github.com/davecgh/go-spew/spew/spew.go | 148 + vendor/github.com/ericchiang/k8s/.gitignore | 2 - vendor/github.com/ericchiang/k8s/.travis.yml | 36 - vendor/github.com/ericchiang/k8s/Makefile | 52 - vendor/github.com/ericchiang/k8s/README.md | 291 - .../k8s/apis/meta/v1/generated.pb.go | 9991 --- .../ericchiang/k8s/apis/meta/v1/json.go | 48 - vendor/github.com/ericchiang/k8s/client.go | 489 - vendor/github.com/ericchiang/k8s/codec.go | 98 - vendor/github.com/ericchiang/k8s/config.go | 170 - vendor/github.com/ericchiang/k8s/discovery.go | 66 - vendor/github.com/ericchiang/k8s/labels.go | 87 - vendor/github.com/ericchiang/k8s/resource.go | 274 - .../ericchiang/k8s/runtime/generated.pb.go | 887 - .../k8s/runtime/schema/generated.pb.go | 45 - .../k8s/util/intstr/generated.pb.go | 399 - vendor/github.com/ericchiang/k8s/watch.go | 199 - .../k8s/watch/versioned/generated.pb.go | 405 - vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 23 + vendor/github.com/gogo/protobuf/LICENSE | 35 + .../github.com/gogo/protobuf/proto/Makefile | 43 + .../{golang => gogo}/protobuf/proto/clone.go | 7 +- .../gogo/protobuf/proto/custom_gogo.go | 39 + .../{golang => gogo}/protobuf/proto/decode.go | 0 .../gogo/protobuf/proto/deprecated.go | 63 + .../github.com/gogo/protobuf/proto/discard.go | 350 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 49 + .../{golang => gogo}/protobuf/proto/encode.go | 2 + .../gogo/protobuf/proto/encode_gogo.go | 33 + .../{golang => gogo}/protobuf/proto/equal.go | 3 +- .../gogo/protobuf/proto/extensions.go | 605 + .../gogo/protobuf/proto/extensions_gogo.go | 389 + .../{golang => gogo}/protobuf/proto/lib.go | 32 +- .../gogo/protobuf/proto/lib_gogo.go | 50 + .../protobuf/proto/message_set.go | 0 .../protobuf/proto/pointer_reflect.go | 5 +- .../protobuf/proto/pointer_reflect_gogo.go | 59 + .../protobuf/proto/pointer_unsafe.go | 15 +- .../protobuf/proto/pointer_unsafe_gogo.go | 56 + .../gogo/protobuf/proto/properties.go | 610 + .../gogo/protobuf/proto/properties_gogo.go | 36 + .../gogo/protobuf/proto/skip_gogo.go | 119 + .../protobuf/proto/table_marshal.go | 323 +- .../gogo/protobuf/proto/table_marshal_gogo.go | 388 + .../protobuf/proto/table_merge.go | 32 +- .../protobuf/proto/table_unmarshal.go | 280 +- .../protobuf/proto/table_unmarshal_gogo.go | 385 + .../{golang => gogo}/protobuf/proto/text.go | 131 +- .../gogo/protobuf/proto/text_gogo.go | 57 + .../protobuf/proto/text_parser.go | 150 +- .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 49 + .../gogo/protobuf/proto/wrappers.go | 1888 + .../gogo/protobuf/proto/wrappers_gogo.go | 113 + .../gogo/protobuf/sortkeys/sortkeys.go | 101 + .../golang/protobuf/proto/buffer.go | 324 + .../golang/protobuf/proto/defaults.go | 63 + .../golang/protobuf/proto/deprecated.go | 126 +- .../golang/protobuf/proto/discard.go | 356 +- .../golang/protobuf/proto/extensions.go | 771 +- .../golang/protobuf/proto/properties.go | 648 +- .../github.com/golang/protobuf/proto/proto.go | 167 + .../golang/protobuf/proto/registry.go | 323 + .../golang/protobuf/proto/text_decode.go | 801 + .../golang/protobuf/proto/text_encode.go | 560 + .../github.com/golang/protobuf/proto/wire.go | 78 + .../golang/protobuf/proto/wrappers.go | 34 + .../github.com/golang/protobuf/ptypes/any.go | 165 + .../golang/protobuf/ptypes/any/any.pb.go | 62 + .../github.com/golang/protobuf/ptypes/doc.go | 6 + .../golang/protobuf/ptypes/duration.go | 72 + .../protobuf/ptypes/duration/duration.pb.go | 63 + .../golang/protobuf/ptypes/timestamp.go | 103 + .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 + vendor/github.com/google/gofuzz/.travis.yml | 13 + .../github.com/google/gofuzz/CONTRIBUTING.md | 67 + .../{ericchiang/k8s => google/gofuzz}/LICENSE | 0 vendor/github.com/google/gofuzz/README.md | 71 + vendor/github.com/google/gofuzz/doc.go | 18 + vendor/github.com/google/gofuzz/fuzz.go | 506 + vendor/github.com/google/gofuzz/go.mod | 3 + vendor/github.com/googleapis/gnostic/LICENSE | 203 + .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8789 ++ .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 7347 ++ .../gnostic/OpenAPIv2/OpenAPIv2.proto | 666 + .../googleapis/gnostic/OpenAPIv2/README.md | 14 + .../googleapis/gnostic/OpenAPIv2/document.go | 26 + .../gnostic/OpenAPIv2/openapi-2.0.json | 1610 + .../googleapis/gnostic/compiler/README.md | 4 + .../googleapis/gnostic/compiler/context.go | 43 + .../googleapis/gnostic/compiler/error.go | 61 + .../googleapis/gnostic/compiler/extensions.go | 85 + .../googleapis/gnostic/compiler/helpers.go | 385 + .../googleapis/gnostic/compiler/main.go | 16 + .../googleapis/gnostic/compiler/reader.go | 307 + .../googleapis/gnostic/extensions/README.md | 13 + .../gnostic/extensions/extension.pb.go | 465 + .../gnostic/extensions/extension.proto | 96 + .../gnostic/extensions/extensions.go | 64 + .../googleapis/gnostic/jsonschema/README.md | 4 + .../googleapis/gnostic/jsonschema/base.go | 84 + .../googleapis/gnostic/jsonschema/display.go | 229 + .../googleapis/gnostic/jsonschema/models.go | 228 + .../gnostic/jsonschema/operations.go | 394 + .../googleapis/gnostic/jsonschema/reader.go | 442 + .../googleapis/gnostic/jsonschema/schema.json | 150 + .../googleapis/gnostic/jsonschema/writer.go | 369 + vendor/github.com/json-iterator/go/iter.go | 27 + .../github.com/json-iterator/go/iter_array.go | 10 +- .../json-iterator/go/iter_object.go | 24 +- .../json-iterator/go/iter_skip_sloppy.go | 19 + vendor/github.com/json-iterator/go/reflect.go | 5 + .../json-iterator/go/reflect_extension.go | 2 +- .../json-iterator/go/reflect_map.go | 4 + .../json-iterator/go/reflect_marshaler.go | 12 +- .../go/reflect_struct_decoder.go | 44 + vendor/golang.org/x/crypto/AUTHORS | 3 + vendor/golang.org/x/crypto/CONTRIBUTORS | 3 + vendor/golang.org/x/crypto/LICENSE | 27 + vendor/golang.org/x/crypto/PATENTS | 22 + .../x/crypto/ssh/terminal/terminal.go | 955 + .../golang.org/x/crypto/ssh/terminal/util.go | 114 + .../ssh/terminal/util_aix.go} | 11 +- .../x/crypto/ssh/terminal/util_bsd.go | 12 + .../x/crypto/ssh/terminal/util_linux.go | 10 + .../x/crypto/ssh/terminal/util_plan9.go | 58 + .../x/crypto/ssh/terminal/util_solaris.go | 124 + .../x/crypto/ssh/terminal/util_windows.go | 105 + vendor/golang.org/x/net/context/context.go | 56 + .../x/net/context/ctxhttp/ctxhttp.go | 71 + vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 + vendor/golang.org/x/net/context/pre_go19.go | 109 + vendor/golang.org/x/net/http2/server.go | 2 +- .../x/net/http2/writesched_random.go | 9 +- vendor/golang.org/x/oauth2/.travis.yml | 13 + vendor/golang.org/x/oauth2/AUTHORS | 3 + vendor/golang.org/x/oauth2/CONTRIBUTING.md | 26 + vendor/golang.org/x/oauth2/CONTRIBUTORS | 3 + vendor/golang.org/x/oauth2/LICENSE | 27 + vendor/golang.org/x/oauth2/README.md | 36 + vendor/golang.org/x/oauth2/go.mod | 10 + vendor/golang.org/x/oauth2/go.sum | 12 + .../x/oauth2/internal/client_appengine.go | 13 + vendor/golang.org/x/oauth2/internal/doc.go | 6 + vendor/golang.org/x/oauth2/internal/oauth2.go | 37 + vendor/golang.org/x/oauth2/internal/token.go | 294 + .../golang.org/x/oauth2/internal/transport.go | 33 + vendor/golang.org/x/oauth2/oauth2.go | 381 + vendor/golang.org/x/oauth2/token.go | 178 + vendor/golang.org/x/oauth2/transport.go | 89 + .../golang.org/x/sys/unix/affinity_linux.go | 46 +- vendor/golang.org/x/sys/unix/ioctl.go | 41 +- vendor/golang.org/x/sys/unix/mkall.sh | 4 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 50 +- .../x/sys/unix/sockcmsg_dragonfly.go | 16 + .../golang.org/x/sys/unix/sockcmsg_linux.go | 2 +- vendor/golang.org/x/sys/unix/sockcmsg_unix.go | 36 +- .../x/sys/unix/sockcmsg_unix_other.go | 38 + vendor/golang.org/x/sys/unix/syscall_aix.go | 39 +- .../golang.org/x/sys/unix/syscall_aix_ppc.go | 4 + .../x/sys/unix/syscall_aix_ppc64.go | 4 + vendor/golang.org/x/sys/unix/syscall_bsd.go | 4 +- .../x/sys/unix/syscall_darwin.1_12.go | 29 + .../x/sys/unix/syscall_darwin.1_13.go | 103 + .../golang.org/x/sys/unix/syscall_darwin.go | 40 +- .../x/sys/unix/syscall_darwin_386.1_11.go | 9 + .../x/sys/unix/syscall_darwin_386.go | 8 +- .../x/sys/unix/syscall_darwin_amd64.1_11.go | 9 + .../x/sys/unix/syscall_darwin_amd64.go | 8 +- .../x/sys/unix/syscall_darwin_arm.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm.go | 16 +- .../x/sys/unix/syscall_darwin_arm64.1_11.go | 11 + .../x/sys/unix/syscall_darwin_arm64.go | 16 +- .../x/sys/unix/syscall_darwin_libSystem.go | 2 + .../x/sys/unix/syscall_dragonfly.go | 59 +- .../x/sys/unix/syscall_dragonfly_amd64.go | 4 + .../golang.org/x/sys/unix/syscall_freebsd.go | 41 +- .../x/sys/unix/syscall_freebsd_386.go | 4 + .../x/sys/unix/syscall_freebsd_amd64.go | 4 + .../x/sys/unix/syscall_freebsd_arm.go | 4 + .../x/sys/unix/syscall_freebsd_arm64.go | 4 + vendor/golang.org/x/sys/unix/syscall_linux.go | 175 +- .../x/sys/unix/syscall_linux_386.go | 4 + .../x/sys/unix/syscall_linux_amd64.go | 4 + .../x/sys/unix/syscall_linux_arm.go | 4 + .../x/sys/unix/syscall_linux_arm64.go | 4 + .../x/sys/unix/syscall_linux_mips64x.go | 4 + .../x/sys/unix/syscall_linux_mipsx.go | 4 + .../x/sys/unix/syscall_linux_ppc64x.go | 4 + .../x/sys/unix/syscall_linux_riscv64.go | 4 + .../x/sys/unix/syscall_linux_s390x.go | 4 + .../x/sys/unix/syscall_linux_sparc64.go | 4 + .../golang.org/x/sys/unix/syscall_netbsd.go | 41 +- .../x/sys/unix/syscall_netbsd_386.go | 4 + .../x/sys/unix/syscall_netbsd_amd64.go | 4 + .../x/sys/unix/syscall_netbsd_arm.go | 4 + .../x/sys/unix/syscall_netbsd_arm64.go | 4 + .../golang.org/x/sys/unix/syscall_openbsd.go | 41 +- .../x/sys/unix/syscall_openbsd_386.go | 4 + .../x/sys/unix/syscall_openbsd_amd64.go | 4 + .../x/sys/unix/syscall_openbsd_arm.go | 4 + .../x/sys/unix/syscall_openbsd_arm64.go | 4 + .../golang.org/x/sys/unix/syscall_solaris.go | 34 +- .../x/sys/unix/syscall_solaris_amd64.go | 4 + .../x/sys/unix/zerrors_darwin_386.go | 3 +- .../x/sys/unix/zerrors_darwin_amd64.go | 3 +- .../x/sys/unix/zerrors_darwin_arm.go | 3 +- .../x/sys/unix/zerrors_darwin_arm64.go | 3 +- .../x/sys/unix/zerrors_dragonfly_amd64.go | 1 + .../x/sys/unix/zerrors_freebsd_386.go | 3 +- .../x/sys/unix/zerrors_freebsd_amd64.go | 3 +- .../x/sys/unix/zerrors_freebsd_arm.go | 3 +- .../x/sys/unix/zerrors_freebsd_arm64.go | 3 +- .../x/sys/unix/zerrors_linux_386.go | 117 +- .../x/sys/unix/zerrors_linux_amd64.go | 117 +- .../x/sys/unix/zerrors_linux_arm.go | 117 +- .../x/sys/unix/zerrors_linux_arm64.go | 119 +- .../x/sys/unix/zerrors_linux_mips.go | 117 +- .../x/sys/unix/zerrors_linux_mips64.go | 117 +- .../x/sys/unix/zerrors_linux_mips64le.go | 117 +- .../x/sys/unix/zerrors_linux_mipsle.go | 117 +- .../x/sys/unix/zerrors_linux_ppc64.go | 117 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 117 +- .../x/sys/unix/zerrors_linux_riscv64.go | 117 +- .../x/sys/unix/zerrors_linux_s390x.go | 117 +- .../x/sys/unix/zerrors_linux_sparc64.go | 117 +- .../x/sys/unix/zerrors_netbsd_386.go | 3 +- .../x/sys/unix/zerrors_netbsd_amd64.go | 3 +- .../x/sys/unix/zerrors_netbsd_arm.go | 3 +- .../x/sys/unix/zerrors_netbsd_arm64.go | 3 +- .../x/sys/unix/zerrors_openbsd_386.go | 17 +- .../x/sys/unix/zerrors_openbsd_amd64.go | 6 +- .../x/sys/unix/zerrors_openbsd_arm.go | 11 +- .../x/sys/unix/zerrors_openbsd_arm64.go | 1 + .../x/sys/unix/zerrors_solaris_amd64.go | 3 +- .../x/sys/unix/zsyscall_darwin_386.1_11.go | 93 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_386.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_386.go | 114 +- .../x/sys/unix/zsyscall_darwin_386.s | 12 +- .../x/sys/unix/zsyscall_darwin_amd64.1_11.go | 93 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_amd64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_amd64.go | 99 +- .../x/sys/unix/zsyscall_darwin_amd64.s | 10 +- .../x/sys/unix/zsyscall_darwin_arm.1_11.go | 33 +- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm.go | 56 +- .../x/sys/unix/zsyscall_darwin_arm.s | 6 +- .../x/sys/unix/zsyscall_darwin_arm64.1_11.go | 33 +- .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 41 + .../x/sys/unix/zsyscall_darwin_arm64.1_13.s | 12 + .../x/sys/unix/zsyscall_darwin_arm64.go | 56 +- .../x/sys/unix/zsyscall_darwin_arm64.s | 6 +- .../x/sys/unix/zsyscall_dragonfly_amd64.go | 5 +- .../x/sys/unix/zsyscall_freebsd_386.go | 5 +- .../x/sys/unix/zsyscall_freebsd_amd64.go | 45 +- .../x/sys/unix/zsyscall_freebsd_arm.go | 45 +- .../x/sys/unix/zsyscall_freebsd_arm64.go | 45 +- .../x/sys/unix/zsyscall_linux_386.go | 30 + .../x/sys/unix/zsyscall_linux_amd64.go | 30 + .../x/sys/unix/zsyscall_linux_arm.go | 30 + .../x/sys/unix/zsyscall_linux_arm64.go | 30 + .../x/sys/unix/zsyscall_linux_mips.go | 30 + .../x/sys/unix/zsyscall_linux_mips64.go | 30 + .../x/sys/unix/zsyscall_linux_mips64le.go | 30 + .../x/sys/unix/zsyscall_linux_mipsle.go | 30 + .../x/sys/unix/zsyscall_linux_ppc64.go | 30 + .../x/sys/unix/zsyscall_linux_ppc64le.go | 30 + .../x/sys/unix/zsyscall_linux_riscv64.go | 30 + .../x/sys/unix/zsyscall_linux_s390x.go | 30 + .../x/sys/unix/zsyscall_linux_sparc64.go | 30 + .../x/sys/unix/zsyscall_netbsd_386.go | 37 +- .../x/sys/unix/zsyscall_netbsd_amd64.go | 37 +- .../x/sys/unix/zsyscall_netbsd_arm.go | 37 +- .../x/sys/unix/zsyscall_netbsd_arm64.go | 37 +- .../x/sys/unix/zsyscall_openbsd_386.go | 37 +- .../x/sys/unix/zsyscall_openbsd_amd64.go | 37 +- .../x/sys/unix/zsyscall_openbsd_arm.go | 37 +- .../x/sys/unix/zsyscall_openbsd_arm64.go | 37 +- .../x/sys/unix/zsyscall_solaris_amd64.go | 5 +- .../x/sys/unix/zsysnum_linux_386.go | 2 + .../x/sys/unix/zsysnum_linux_amd64.go | 2 + .../x/sys/unix/zsysnum_linux_arm.go | 2 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 2 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 2 + .../x/sys/unix/zsysnum_linux_riscv64.go | 2 + .../x/sys/unix/zsysnum_linux_s390x.go | 2 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../golang.org/x/sys/unix/ztypes_linux_386.go | 137 +- .../x/sys/unix/ztypes_linux_amd64.go | 137 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 137 +- .../x/sys/unix/ztypes_linux_arm64.go | 137 +- .../x/sys/unix/ztypes_linux_mips.go | 137 +- .../x/sys/unix/ztypes_linux_mips64.go | 137 +- .../x/sys/unix/ztypes_linux_mips64le.go | 137 +- .../x/sys/unix/ztypes_linux_mipsle.go | 137 +- .../x/sys/unix/ztypes_linux_ppc64.go | 137 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 137 +- .../x/sys/unix/ztypes_linux_riscv64.go | 138 +- .../x/sys/unix/ztypes_linux_s390x.go | 137 +- .../x/sys/unix/ztypes_linux_sparc64.go | 137 +- .../x/sys/windows/asm_windows_386.s | 13 - .../x/sys/windows/asm_windows_amd64.s | 13 - .../golang.org/x/sys/windows/dll_windows.go | 22 +- vendor/golang.org/x/sys/windows/mksyscall.go | 2 +- .../x/sys/windows/security_windows.go | 602 +- vendor/golang.org/x/sys/windows/service.go | 4 + .../x/sys/windows/syscall_windows.go | 71 +- .../golang.org/x/sys/windows/types_windows.go | 112 +- .../x/sys/windows/zsyscall_windows.go | 1175 +- .../golang.org/x/text/transform/transform.go | 6 +- vendor/golang.org/x/text/unicode/bidi/bidi.go | 2 +- .../golang.org/x/text/unicode/bidi/bracket.go | 4 +- vendor/golang.org/x/text/unicode/bidi/core.go | 2 +- .../x/text/unicode/bidi/tables10.0.0.go | 2 +- .../x/text/unicode/bidi/tables11.0.0.go | 1887 + .../x/text/unicode/norm/composition.go | 8 +- .../x/text/unicode/norm/forminfo.go | 19 + vendor/golang.org/x/text/unicode/norm/iter.go | 3 +- .../x/text/unicode/norm/normalize.go | 4 +- .../x/text/unicode/norm/readwriter.go | 4 +- .../x/text/unicode/norm/tables10.0.0.go | 1892 +- .../x/text/unicode/norm/tables11.0.0.go | 7693 ++ .../x/text/unicode/norm/tables9.0.0.go | 1890 +- .../x/text/unicode/norm/transform.go | 10 +- vendor/golang.org/x/time/AUTHORS | 3 + vendor/golang.org/x/time/CONTRIBUTORS | 3 + vendor/golang.org/x/time/LICENSE | 27 + vendor/golang.org/x/time/PATENTS | 22 + vendor/golang.org/x/time/rate/rate.go | 402 + vendor/google.golang.org/appengine/LICENSE | 202 + .../appengine/internal/api.go | 671 + .../appengine/internal/api_classic.go | 169 + .../appengine/internal/api_common.go | 123 + .../appengine/internal/app_id.go | 28 + .../appengine/internal/base/api_base.pb.go | 308 + .../appengine/internal/base/api_base.proto | 33 + .../internal/datastore/datastore_v3.pb.go | 4367 + .../internal/datastore/datastore_v3.proto | 551 + .../appengine/internal/identity.go | 55 + .../appengine/internal/identity_classic.go | 61 + .../appengine/internal/identity_flex.go | 11 + .../appengine/internal/identity_vm.go | 134 + .../appengine/internal/internal.go | 110 + .../appengine/internal/log/log_service.pb.go | 1313 + .../appengine/internal/log/log_service.proto | 150 + .../appengine/internal/main.go | 16 + .../appengine/internal/main_common.go | 7 + .../appengine/internal/main_vm.go | 69 + .../appengine/internal/metadata.go | 60 + .../appengine/internal/net.go | 56 + .../appengine/internal/regen.sh | 40 + .../internal/remote_api/remote_api.pb.go | 361 + .../internal/remote_api/remote_api.proto | 44 + .../appengine/internal/transaction.go | 115 + .../internal/urlfetch/urlfetch_service.pb.go | 527 + .../internal/urlfetch/urlfetch_service.proto | 64 + .../appengine/urlfetch/urlfetch.go | 210 + vendor/google.golang.org/protobuf/AUTHORS | 3 + .../google.golang.org/protobuf/CONTRIBUTORS | 3 + vendor/google.golang.org/protobuf/LICENSE | 27 + vendor/google.golang.org/protobuf/PATENTS | 22 + .../protobuf/encoding/prototext/decode.go | 789 + .../protobuf/encoding/prototext/doc.go | 7 + .../protobuf/encoding/prototext/encode.go | 426 + .../protobuf/encoding/protowire/wire.go | 538 + .../protobuf/internal/descfmt/stringer.go | 316 + .../protobuf/internal/descopts/options.go | 29 + .../protobuf/internal/detrand/rand.go | 61 + .../internal/encoding/defval/default.go | 213 + .../encoding/messageset/messageset.go | 258 + .../protobuf/internal/encoding/tag/tag.go | 207 + .../protobuf/internal/encoding/text/decode.go | 665 + .../internal/encoding/text/decode_number.go | 190 + .../internal/encoding/text/decode_string.go | 161 + .../internal/encoding/text/decode_token.go | 373 + .../protobuf/internal/encoding/text/doc.go | 29 + .../protobuf/internal/encoding/text/encode.go | 267 + .../protobuf/internal/errors/errors.go | 89 + .../protobuf/internal/errors/is_go112.go | 39 + .../protobuf/internal/errors/is_go113.go | 12 + .../protobuf/internal/fieldnum/any_gen.go | 13 + .../protobuf/internal/fieldnum/api_gen.go | 35 + .../internal/fieldnum/descriptor_gen.go | 240 + .../protobuf/internal/fieldnum/doc.go | 7 + .../internal/fieldnum/duration_gen.go | 13 + .../protobuf/internal/fieldnum/empty_gen.go | 10 + .../internal/fieldnum/field_mask_gen.go | 12 + .../internal/fieldnum/source_context_gen.go | 12 + .../protobuf/internal/fieldnum/struct_gen.go | 33 + .../internal/fieldnum/timestamp_gen.go | 13 + .../protobuf/internal/fieldnum/type_gen.go | 53 + .../internal/fieldnum/wrappers_gen.go | 52 + .../protobuf/internal/fieldsort/fieldsort.go | 40 + .../protobuf/internal/filedesc/build.go | 155 + .../protobuf/internal/filedesc/desc.go | 613 + .../protobuf/internal/filedesc/desc_init.go | 471 + .../protobuf/internal/filedesc/desc_lazy.go | 704 + .../protobuf/internal/filedesc/desc_list.go | 286 + .../internal/filedesc/desc_list_gen.go | 345 + .../protobuf/internal/filedesc/placeholder.go | 107 + .../protobuf/internal/filetype/build.go | 297 + .../protobuf/internal/flags/flags.go | 24 + .../internal/flags/proto_legacy_disable.go | 9 + .../internal/flags/proto_legacy_enable.go | 9 + .../protobuf/internal/genname/name.go | 25 + .../protobuf/internal/impl/api_export.go | 170 + .../protobuf/internal/impl/checkinit.go | 141 + .../protobuf/internal/impl/codec_extension.go | 223 + .../protobuf/internal/impl/codec_field.go | 828 + .../protobuf/internal/impl/codec_gen.go | 5637 ++ .../protobuf/internal/impl/codec_map.go | 388 + .../protobuf/internal/impl/codec_map_go111.go | 37 + .../protobuf/internal/impl/codec_map_go112.go | 11 + .../protobuf/internal/impl/codec_message.go | 159 + .../internal/impl/codec_messageset.go | 120 + .../protobuf/internal/impl/codec_reflect.go | 209 + .../protobuf/internal/impl/codec_tables.go | 557 + .../protobuf/internal/impl/codec_unsafe.go | 17 + .../protobuf/internal/impl/convert.go | 467 + .../protobuf/internal/impl/convert_list.go | 141 + .../protobuf/internal/impl/convert_map.go | 121 + .../protobuf/internal/impl/decode.go | 274 + .../protobuf/internal/impl/encode.go | 199 + .../protobuf/internal/impl/enum.go | 21 + .../protobuf/internal/impl/extension.go | 156 + .../protobuf/internal/impl/legacy_enum.go | 219 + .../protobuf/internal/impl/legacy_export.go | 92 + .../internal/impl/legacy_extension.go | 175 + .../protobuf/internal/impl/legacy_file.go | 81 + .../protobuf/internal/impl/legacy_message.go | 502 + .../protobuf/internal/impl/merge.go | 176 + .../protobuf/internal/impl/merge_gen.go | 209 + .../protobuf/internal/impl/message.go | 215 + .../protobuf/internal/impl/message_reflect.go | 364 + .../internal/impl/message_reflect_field.go | 466 + .../internal/impl/message_reflect_gen.go | 249 + .../protobuf/internal/impl/pointer_reflect.go | 177 + .../protobuf/internal/impl/pointer_unsafe.go | 173 + .../protobuf/internal/impl/validate.go | 575 + .../protobuf/internal/impl/weak.go | 74 + .../protobuf/internal/mapsort/mapsort.go | 43 + .../protobuf/internal/pragma/pragma.go | 29 + .../protobuf/internal/set/ints.go | 58 + .../protobuf/internal/strs/strings.go | 196 + .../protobuf/internal/strs/strings_pure.go | 27 + .../protobuf/internal/strs/strings_unsafe.go | 94 + .../protobuf/internal/version/version.go | 79 + .../protobuf/proto/checkinit.go | 71 + .../protobuf/proto/decode.go | 270 + .../protobuf/proto/decode_gen.go | 603 + .../google.golang.org/protobuf/proto/doc.go | 94 + .../protobuf/proto/encode.go | 343 + .../protobuf/proto/encode_gen.go | 97 + .../google.golang.org/protobuf/proto/equal.go | 154 + .../protobuf/proto/extension.go | 92 + .../google.golang.org/protobuf/proto/merge.go | 139 + .../protobuf/proto/messageset.go | 88 + .../google.golang.org/protobuf/proto/proto.go | 34 + .../protobuf/proto/proto_methods.go | 19 + .../protobuf/proto/proto_reflect.go | 19 + .../google.golang.org/protobuf/proto/reset.go | 43 + .../google.golang.org/protobuf/proto/size.go | 94 + .../protobuf/proto/size_gen.go | 55 + .../protobuf/proto/wrappers.go | 29 + .../protobuf/reflect/protoreflect/methods.go | 77 + .../protobuf/reflect/protoreflect/proto.go | 478 + .../protobuf/reflect/protoreflect/source.go | 52 + .../protobuf/reflect/protoreflect/type.go | 631 + .../protobuf/reflect/protoreflect/value.go | 285 + .../reflect/protoreflect/value_pure.go | 59 + .../reflect/protoreflect/value_union.go | 409 + .../reflect/protoreflect/value_unsafe.go | 98 + .../reflect/protoregistry/registry.go | 768 + .../protobuf/runtime/protoiface/legacy.go | 15 + .../protobuf/runtime/protoiface/methods.go | 167 + .../protobuf/runtime/protoimpl/impl.go | 44 + .../protobuf/runtime/protoimpl/version.go | 56 + .../protobuf/types/known/anypb/any.pb.go | 287 + .../types/known/durationpb/duration.pb.go | 249 + .../types/known/timestamppb/timestamp.pb.go | 271 + vendor/gopkg.in/inf.v0/LICENSE | 28 + vendor/gopkg.in/inf.v0/dec.go | 615 + vendor/gopkg.in/inf.v0/rounder.go | 145 + vendor/gopkg.in/yaml.v2/.travis.yml | 18 +- vendor/gopkg.in/yaml.v2/decode.go | 48 +- vendor/gopkg.in/yaml.v2/resolve.go | 2 +- vendor/gopkg.in/yaml.v2/scannerc.go | 123 +- vendor/gopkg.in/yaml.v2/yaml.go | 2 +- vendor/gopkg.in/yaml.v2/yamlh.go | 1 + vendor/gopkg.in/yaml.v3/.travis.yml | 17 + vendor/gopkg.in/yaml.v3/LICENSE | 50 + vendor/gopkg.in/yaml.v3/NOTICE | 13 + vendor/gopkg.in/yaml.v3/README.md | 150 + vendor/gopkg.in/yaml.v3/apic.go | 747 + vendor/gopkg.in/yaml.v3/decode.go | 948 + vendor/gopkg.in/yaml.v3/emitterc.go | 2022 + vendor/gopkg.in/yaml.v3/encode.go | 572 + vendor/gopkg.in/yaml.v3/go.mod | 5 + vendor/gopkg.in/yaml.v3/parserc.go | 1249 + vendor/gopkg.in/yaml.v3/readerc.go | 434 + vendor/gopkg.in/yaml.v3/resolve.go | 326 + vendor/gopkg.in/yaml.v3/scannerc.go | 3028 + vendor/gopkg.in/yaml.v3/sorter.go | 134 + vendor/gopkg.in/yaml.v3/writerc.go | 48 + vendor/gopkg.in/yaml.v3/yaml.go | 693 + vendor/gopkg.in/yaml.v3/yamlh.go | 807 + vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 + vendor/k8s.io/api/LICENSE | 202 + .../api/admissionregistration/v1beta1/doc.go | 26 + .../v1beta1/generated.pb.go | 3444 + .../v1beta1/generated.proto | 487 + .../admissionregistration/v1beta1/register.go | 53 + .../admissionregistration/v1beta1/types.go | 559 + .../v1beta1/types_swagger_doc_generated.go | 151 + .../v1beta1/zz_generated.deepcopy.go | 396 + vendor/k8s.io/api/apps/v1/doc.go | 21 + vendor/k8s.io/api/apps/v1/generated.pb.go | 8238 ++ vendor/k8s.io/api/apps/v1/generated.proto | 701 + vendor/k8s.io/api/apps/v1/register.go | 60 + vendor/k8s.io/api/apps/v1/types.go | 826 + .../apps/v1/types_swagger_doc_generated.go | 365 + .../api/apps/v1/zz_generated.deepcopy.go | 772 + vendor/k8s.io/api/apps/v1beta1/doc.go | 21 + .../k8s.io/api/apps/v1beta1/generated.pb.go | 6246 ++ .../k8s.io/api/apps/v1beta1/generated.proto | 484 + vendor/k8s.io/api/apps/v1beta1/register.go | 58 + vendor/k8s.io/api/apps/v1beta1/types.go | 567 + .../v1beta1/types_swagger_doc_generated.go | 273 + .../api/apps/v1beta1/zz_generated.deepcopy.go | 594 + vendor/k8s.io/api/apps/v1beta2/doc.go | 21 + .../k8s.io/api/apps/v1beta2/generated.pb.go | 9014 +++ .../k8s.io/api/apps/v1beta2/generated.proto | 752 + vendor/k8s.io/api/apps/v1beta2/register.go | 61 + vendor/k8s.io/api/apps/v1beta2/types.go | 876 + .../v1beta2/types_swagger_doc_generated.go | 396 + .../api/apps/v1beta2/zz_generated.deepcopy.go | 839 + .../api/auditregistration/v1alpha1/doc.go | 23 + .../v1alpha1/generated.pb.go | 2030 + .../v1alpha1/generated.proto | 162 + .../auditregistration/v1alpha1/register.go | 56 + .../api/auditregistration/v1alpha1/types.go | 198 + .../v1alpha1/types_swagger_doc_generated.go | 111 + .../v1alpha1/zz_generated.deepcopy.go | 229 + vendor/k8s.io/api/authentication/v1/doc.go | 22 + .../api/authentication/v1/generated.pb.go | 2581 + .../api/authentication/v1/generated.proto | 182 + .../k8s.io/api/authentication/v1/register.go | 52 + vendor/k8s.io/api/authentication/v1/types.go | 189 + .../v1/types_swagger_doc_generated.go | 115 + .../v1/zz_generated.deepcopy.go | 244 + .../k8s.io/api/authentication/v1beta1/doc.go | 22 + .../authentication/v1beta1/generated.pb.go | 1558 + .../authentication/v1beta1/generated.proto | 118 + .../api/authentication/v1beta1/register.go | 51 + .../api/authentication/v1beta1/types.go | 110 + .../v1beta1/types_swagger_doc_generated.go | 74 + .../v1beta1/zz_generated.deepcopy.go | 152 + vendor/k8s.io/api/authorization/v1/doc.go | 23 + .../api/authorization/v1/generated.pb.go | 4087 + .../api/authorization/v1/generated.proto | 272 + .../k8s.io/api/authorization/v1/register.go | 55 + vendor/k8s.io/api/authorization/v1/types.go | 268 + .../v1/types_swagger_doc_generated.go | 173 + .../authorization/v1/zz_generated.deepcopy.go | 385 + .../k8s.io/api/authorization/v1beta1/doc.go | 23 + .../api/authorization/v1beta1/generated.pb.go | 4087 + .../api/authorization/v1beta1/generated.proto | 272 + .../api/authorization/v1beta1/register.go | 55 + .../k8s.io/api/authorization/v1beta1/types.go | 268 + .../v1beta1/types_swagger_doc_generated.go | 173 + .../v1beta1/zz_generated.deepcopy.go | 385 + vendor/k8s.io/api/autoscaling/v1/doc.go | 21 + .../k8s.io/api/autoscaling/v1/generated.pb.go | 5570 ++ .../k8s.io/api/autoscaling/v1/generated.proto | 419 + vendor/k8s.io/api/autoscaling/v1/register.go | 53 + vendor/k8s.io/api/autoscaling/v1/types.go | 432 + .../v1/types_swagger_doc_generated.go | 250 + .../autoscaling/v1/zz_generated.deepcopy.go | 515 + vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 21 + .../api/autoscaling/v2beta1/generated.pb.go | 5095 ++ .../api/autoscaling/v2beta1/generated.proto | 400 + .../api/autoscaling/v2beta1/register.go | 52 + .../k8s.io/api/autoscaling/v2beta1/types.go | 408 + .../v2beta1/types_swagger_doc_generated.go | 221 + .../v2beta1/zz_generated.deepcopy.go | 466 + vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 21 + .../api/autoscaling/v2beta2/generated.pb.go | 6062 ++ .../api/autoscaling/v2beta2/generated.proto | 438 + .../api/autoscaling/v2beta2/register.go | 50 + .../k8s.io/api/autoscaling/v2beta2/types.go | 480 + .../v2beta2/types_swagger_doc_generated.go | 273 + .../v2beta2/zz_generated.deepcopy.go | 565 + vendor/k8s.io/api/batch/v1/doc.go | 21 + vendor/k8s.io/api/batch/v1/generated.pb.go | 1854 + vendor/k8s.io/api/batch/v1/generated.proto | 184 + vendor/k8s.io/api/batch/v1/register.go | 52 + vendor/k8s.io/api/batch/v1/types.go | 193 + .../batch/v1/types_swagger_doc_generated.go | 95 + .../api/batch/v1/zz_generated.deepcopy.go | 188 + vendor/k8s.io/api/batch/v1beta1/doc.go | 21 + .../k8s.io/api/batch/v1beta1/generated.pb.go | 1743 + .../k8s.io/api/batch/v1beta1/generated.proto | 137 + vendor/k8s.io/api/batch/v1beta1/register.go | 53 + vendor/k8s.io/api/batch/v1beta1/types.go | 158 + .../v1beta1/types_swagger_doc_generated.go | 96 + .../batch/v1beta1/zz_generated.deepcopy.go | 194 + vendor/k8s.io/api/batch/v2alpha1/doc.go | 21 + .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1743 + .../k8s.io/api/batch/v2alpha1/generated.proto | 135 + vendor/k8s.io/api/batch/v2alpha1/register.go | 53 + vendor/k8s.io/api/batch/v2alpha1/types.go | 156 + .../v2alpha1/types_swagger_doc_generated.go | 96 + .../batch/v2alpha1/zz_generated.deepcopy.go | 194 + vendor/k8s.io/api/certificates/v1beta1/doc.go | 23 + .../api/certificates/v1beta1/generated.pb.go | 1956 + .../api/certificates/v1beta1/generated.proto | 134 + .../api/certificates/v1beta1/register.go | 59 + .../k8s.io/api/certificates/v1beta1/types.go | 190 + .../v1beta1/types_swagger_doc_generated.go | 75 + .../v1beta1/zz_generated.deepcopy.go | 202 + vendor/k8s.io/api/coordination/v1/doc.go | 23 + .../api/coordination/v1/generated.pb.go | 976 + .../api/coordination/v1/generated.proto | 80 + vendor/k8s.io/api/coordination/v1/register.go | 53 + vendor/k8s.io/api/coordination/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 63 + .../coordination/v1/zz_generated.deepcopy.go | 124 + vendor/k8s.io/api/coordination/v1beta1/doc.go | 23 + .../api/coordination/v1beta1/generated.pb.go | 976 + .../api/coordination/v1beta1/generated.proto | 80 + .../api/coordination/v1beta1/register.go | 53 + .../k8s.io/api/coordination/v1beta1/types.go | 74 + .../v1beta1/types_swagger_doc_generated.go | 63 + .../v1beta1/zz_generated.deepcopy.go | 124 + .../api/core/v1/annotation_key_constants.go | 106 + vendor/k8s.io/api/core/v1/doc.go | 22 + vendor/k8s.io/api/core/v1/generated.pb.go | 66605 ++++++++++++++++ vendor/k8s.io/api/core/v1/generated.proto | 5332 ++ vendor/k8s.io/api/core/v1/objectreference.go | 33 + vendor/k8s.io/api/core/v1/register.go | 100 + vendor/k8s.io/api/core/v1/resource.go | 64 + vendor/k8s.io/api/core/v1/taint.go | 39 + vendor/k8s.io/api/core/v1/toleration.go | 56 + vendor/k8s.io/api/core/v1/types.go | 5961 ++ .../core/v1/types_swagger_doc_generated.go | 2473 + .../k8s.io/api/core/v1/well_known_labels.go | 50 + .../k8s.io/api/core/v1/well_known_taints.go | 48 + .../api/core/v1/zz_generated.deepcopy.go | 5805 ++ vendor/k8s.io/api/events/v1beta1/doc.go | 23 + .../k8s.io/api/events/v1beta1/generated.pb.go | 1448 + .../k8s.io/api/events/v1beta1/generated.proto | 122 + vendor/k8s.io/api/events/v1beta1/register.go | 53 + vendor/k8s.io/api/events/v1beta1/types.go | 123 + .../v1beta1/types_swagger_doc_generated.go | 73 + .../events/v1beta1/zz_generated.deepcopy.go | 117 + vendor/k8s.io/api/extensions/v1beta1/doc.go | 21 + .../api/extensions/v1beta1/generated.pb.go | 15315 ++++ .../api/extensions/v1beta1/generated.proto | 1234 + .../k8s.io/api/extensions/v1beta1/register.go | 65 + vendor/k8s.io/api/extensions/v1beta1/types.go | 1465 + .../v1beta1/types_swagger_doc_generated.go | 656 + .../v1beta1/zz_generated.deepcopy.go | 1489 + vendor/k8s.io/api/networking/v1/doc.go | 22 + .../k8s.io/api/networking/v1/generated.pb.go | 2202 + .../k8s.io/api/networking/v1/generated.proto | 195 + vendor/k8s.io/api/networking/v1/register.go | 53 + vendor/k8s.io/api/networking/v1/types.go | 203 + .../v1/types_swagger_doc_generated.go | 113 + .../networking/v1/zz_generated.deepcopy.go | 262 + vendor/k8s.io/api/networking/v1beta1/doc.go | 22 + .../api/networking/v1beta1/generated.pb.go | 3183 + .../api/networking/v1beta1/generated.proto | 276 + .../k8s.io/api/networking/v1beta1/register.go | 58 + vendor/k8s.io/api/networking/v1beta1/types.go | 320 + .../v1beta1/types_swagger_doc_generated.go | 160 + .../v1beta1/well_known_annotations.go | 32 + .../v1beta1/zz_generated.deepcopy.go | 351 + vendor/k8s.io/api/node/v1alpha1/doc.go | 23 + .../k8s.io/api/node/v1alpha1/generated.pb.go | 1583 + .../k8s.io/api/node/v1alpha1/generated.proto | 118 + vendor/k8s.io/api/node/v1alpha1/register.go | 52 + vendor/k8s.io/api/node/v1alpha1/types.go | 116 + .../v1alpha1/types_swagger_doc_generated.go | 80 + .../node/v1alpha1/zz_generated.deepcopy.go | 165 + vendor/k8s.io/api/node/v1beta1/doc.go | 23 + .../k8s.io/api/node/v1beta1/generated.pb.go | 1412 + .../k8s.io/api/node/v1beta1/generated.proto | 108 + vendor/k8s.io/api/node/v1beta1/register.go | 52 + vendor/k8s.io/api/node/v1beta1/types.go | 106 + .../v1beta1/types_swagger_doc_generated.go | 71 + .../api/node/v1beta1/zz_generated.deepcopy.go | 148 + vendor/k8s.io/api/policy/v1beta1/doc.go | 24 + .../k8s.io/api/policy/v1beta1/generated.pb.go | 5561 ++ .../k8s.io/api/policy/v1beta1/generated.proto | 400 + vendor/k8s.io/api/policy/v1beta1/register.go | 56 + vendor/k8s.io/api/policy/v1beta1/types.go | 489 + .../v1beta1/types_swagger_doc_generated.go | 243 + .../policy/v1beta1/zz_generated.deepcopy.go | 540 + vendor/k8s.io/api/rbac/v1/doc.go | 23 + vendor/k8s.io/api/rbac/v1/generated.pb.go | 3266 + vendor/k8s.io/api/rbac/v1/generated.proto | 199 + vendor/k8s.io/api/rbac/v1/register.go | 58 + vendor/k8s.io/api/rbac/v1/types.go | 237 + .../rbac/v1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1/zz_generated.deepcopy.go | 389 + vendor/k8s.io/api/rbac/v1alpha1/doc.go | 23 + .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 3267 + .../k8s.io/api/rbac/v1alpha1/generated.proto | 208 + vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 + vendor/k8s.io/api/rbac/v1alpha1/types.go | 246 + .../v1alpha1/types_swagger_doc_generated.go | 158 + .../rbac/v1alpha1/zz_generated.deepcopy.go | 389 + vendor/k8s.io/api/rbac/v1beta1/doc.go | 23 + .../k8s.io/api/rbac/v1beta1/generated.pb.go | 3266 + .../k8s.io/api/rbac/v1beta1/generated.proto | 208 + vendor/k8s.io/api/rbac/v1beta1/register.go | 58 + vendor/k8s.io/api/rbac/v1beta1/types.go | 245 + .../v1beta1/types_swagger_doc_generated.go | 158 + .../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 + vendor/k8s.io/api/scheduling/v1/doc.go | 23 + .../k8s.io/api/scheduling/v1/generated.pb.go | 735 + .../k8s.io/api/scheduling/v1/generated.proto | 75 + vendor/k8s.io/api/scheduling/v1/register.go | 55 + vendor/k8s.io/api/scheduling/v1/types.go | 74 + .../v1/types_swagger_doc_generated.go | 53 + .../scheduling/v1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 23 + .../api/scheduling/v1alpha1/generated.pb.go | 735 + .../api/scheduling/v1alpha1/generated.proto | 76 + .../api/scheduling/v1alpha1/register.go | 52 + .../k8s.io/api/scheduling/v1alpha1/types.go | 75 + .../v1alpha1/types_swagger_doc_generated.go | 53 + .../v1alpha1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/scheduling/v1beta1/doc.go | 23 + .../api/scheduling/v1beta1/generated.pb.go | 735 + .../api/scheduling/v1beta1/generated.proto | 76 + .../k8s.io/api/scheduling/v1beta1/register.go | 52 + vendor/k8s.io/api/scheduling/v1beta1/types.go | 75 + .../v1beta1/types_swagger_doc_generated.go | 53 + .../v1beta1/zz_generated.deepcopy.go | 90 + vendor/k8s.io/api/settings/v1alpha1/doc.go | 23 + .../api/settings/v1alpha1/generated.pb.go | 1053 + .../api/settings/v1alpha1/generated.proto | 75 + .../k8s.io/api/settings/v1alpha1/register.go | 52 + vendor/k8s.io/api/settings/v1alpha1/types.go | 70 + .../v1alpha1/types_swagger_doc_generated.go | 61 + .../v1alpha1/zz_generated.deepcopy.go | 131 + vendor/k8s.io/api/storage/v1/doc.go | 22 + vendor/k8s.io/api/storage/v1/generated.pb.go | 4461 ++ vendor/k8s.io/api/storage/v1/generated.proto | 370 + vendor/k8s.io/api/storage/v1/register.go | 62 + vendor/k8s.io/api/storage/v1/types.go | 438 + .../storage/v1/types_swagger_doc_generated.go | 200 + .../api/storage/v1/zz_generated.deepcopy.go | 494 + vendor/k8s.io/api/storage/v1alpha1/doc.go | 22 + .../api/storage/v1alpha1/generated.pb.go | 1813 + .../api/storage/v1alpha1/generated.proto | 136 + .../k8s.io/api/storage/v1alpha1/register.go | 50 + vendor/k8s.io/api/storage/v1alpha1/types.go | 136 + .../v1alpha1/types_swagger_doc_generated.go | 93 + .../storage/v1alpha1/zz_generated.deepcopy.go | 180 + vendor/k8s.io/api/storage/v1beta1/doc.go | 22 + .../api/storage/v1beta1/generated.pb.go | 4461 ++ .../api/storage/v1beta1/generated.proto | 372 + vendor/k8s.io/api/storage/v1beta1/register.go | 62 + vendor/k8s.io/api/storage/v1beta1/types.go | 440 + .../v1beta1/types_swagger_doc_generated.go | 200 + .../storage/v1beta1/zz_generated.deepcopy.go | 494 + vendor/k8s.io/apimachinery/LICENSE | 202 + .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 25 + .../k8s.io/apimachinery/pkg/api/errors/doc.go | 18 + .../apimachinery/pkg/api/errors/errors.go | 691 + .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 23 + .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 + .../apimachinery/pkg/api/meta/errors.go | 121 + .../pkg/api/meta/firsthit_restmapper.go | 97 + .../k8s.io/apimachinery/pkg/api/meta/help.go | 264 + .../apimachinery/pkg/api/meta/interfaces.go | 134 + .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 + .../k8s.io/apimachinery/pkg/api/meta/meta.go | 648 + .../pkg/api/meta/multirestmapper.go | 210 + .../apimachinery/pkg/api/meta/priority.go | 222 + .../apimachinery/pkg/api/meta/restmapper.go | 518 + .../apimachinery/pkg/api/resource/OWNERS | 16 + .../apimachinery/pkg/api/resource/amount.go | 299 + .../pkg/api/resource/generated.pb.go | 89 + .../pkg/api/resource/generated.proto | 88 + .../apimachinery/pkg/api/resource/math.go | 310 + .../apimachinery/pkg/api/resource/quantity.go | 733 + .../pkg/api/resource/quantity_proto.go | 288 + .../pkg/api/resource/scale_int.go | 95 + .../apimachinery/pkg/api/resource/suffix.go | 198 + .../pkg/api/resource/zz_generated.deepcopy.go | 27 + .../apimachinery/pkg/apis/meta/v1/OWNERS | 32 + .../pkg/apis/meta/v1/controller_ref.go | 65 + .../pkg/apis/meta/v1/conversion.go | 347 + .../apimachinery/pkg/apis/meta/v1/deepcopy.go | 46 + .../apimachinery/pkg/apis/meta/v1/doc.go | 24 + .../apimachinery/pkg/apis/meta/v1/duration.go | 65 + .../pkg/apis/meta/v1/generated.pb.go | 11087 +++ .../pkg/apis/meta/v1/generated.proto | 1024 + .../pkg/apis/meta/v1/group_version.go | 148 + .../apimachinery/pkg/apis/meta/v1/helpers.go | 282 + .../apimachinery/pkg/apis/meta/v1/labels.go | 55 + .../apimachinery/pkg/apis/meta/v1/meta.go | 178 + .../pkg/apis/meta/v1/micro_time.go | 196 + .../pkg/apis/meta/v1/micro_time_proto.go | 80 + .../apimachinery/pkg/apis/meta/v1/register.go | 108 + .../apimachinery/pkg/apis/meta/v1/time.go | 197 + .../pkg/apis/meta/v1/time_proto.go | 100 + .../apimachinery/pkg/apis/meta/v1/types.go | 1316 + .../meta/v1/types_swagger_doc_generated.go | 442 + .../pkg/apis/meta/v1/unstructured/helpers.go | 508 + .../apis/meta/v1/unstructured/unstructured.go | 496 + .../meta/v1/unstructured/unstructured_list.go | 210 + .../v1/unstructured/zz_generated.deepcopy.go | 55 + .../apimachinery/pkg/apis/meta/v1/watch.go | 89 + .../apis/meta/v1/zz_generated.conversion.go | 523 + .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 1173 + .../pkg/apis/meta/v1/zz_generated.defaults.go | 32 + .../apimachinery/pkg/conversion/converter.go | 817 + .../apimachinery/pkg/conversion/deep_equal.go | 36 + .../k8s.io/apimachinery/pkg/conversion/doc.go | 24 + .../apimachinery/pkg/conversion/helper.go | 39 + .../pkg/conversion/queryparams/convert.go | 194 + .../pkg/conversion/queryparams/doc.go | 19 + vendor/k8s.io/apimachinery/pkg/fields/doc.go | 19 + .../k8s.io/apimachinery/pkg/fields/fields.go | 62 + .../apimachinery/pkg/fields/requirements.go | 30 + .../apimachinery/pkg/fields/selector.go | 476 + vendor/k8s.io/apimachinery/pkg/labels/doc.go | 19 + .../k8s.io/apimachinery/pkg/labels/labels.go | 181 + .../apimachinery/pkg/labels/selector.go | 914 + .../pkg/labels/zz_generated.deepcopy.go | 42 + .../k8s.io/apimachinery/pkg/runtime/codec.go | 396 + .../apimachinery/pkg/runtime/codec_check.go | 48 + .../apimachinery/pkg/runtime/conversion.go | 196 + .../apimachinery/pkg/runtime/converter.go | 707 + vendor/k8s.io/apimachinery/pkg/runtime/doc.go | 51 + .../apimachinery/pkg/runtime/embedded.go | 149 + .../k8s.io/apimachinery/pkg/runtime/error.go | 151 + .../apimachinery/pkg/runtime/extension.go | 51 + .../apimachinery/pkg/runtime/generated.pb.go | 855 + .../apimachinery/pkg/runtime/generated.proto | 127 + .../k8s.io/apimachinery/pkg/runtime/helper.go | 259 + .../apimachinery/pkg/runtime/interfaces.go | 344 + .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 + .../apimachinery/pkg/runtime/negotiate.go | 146 + .../apimachinery/pkg/runtime/register.go | 31 + .../pkg/runtime/schema/generated.pb.go | 59 + .../pkg/runtime/schema/generated.proto | 26 + .../pkg/runtime/schema/group_version.go | 314 + .../pkg/runtime/schema/interfaces.go | 40 + .../k8s.io/apimachinery/pkg/runtime/scheme.go | 715 + .../pkg/runtime/scheme_builder.go | 48 + .../pkg/runtime/serializer/codec_factory.go | 324 + .../pkg/runtime/serializer/json/json.go | 388 + .../pkg/runtime/serializer/json/meta.go | 63 + .../runtime/serializer/negotiated_codec.go | 43 + .../pkg/runtime/serializer/protobuf/doc.go | 18 + .../runtime/serializer/protobuf/protobuf.go | 472 + .../serializer/recognizer/recognizer.go | 127 + .../runtime/serializer/streaming/streaming.go | 137 + .../serializer/versioning/versioning.go | 250 + .../pkg/runtime/swagger_doc_generator.go | 262 + .../k8s.io/apimachinery/pkg/runtime/types.go | 126 + .../apimachinery/pkg/runtime/types_proto.go | 89 + .../pkg/runtime/zz_generated.deepcopy.go | 75 + .../apimachinery/pkg/selection/operator.go | 33 + vendor/k8s.io/apimachinery/pkg/types/doc.go | 18 + .../apimachinery/pkg/types/namespacedname.go | 43 + .../k8s.io/apimachinery/pkg/types/nodename.go | 43 + vendor/k8s.io/apimachinery/pkg/types/patch.go | 29 + vendor/k8s.io/apimachinery/pkg/types/uid.go | 22 + .../apimachinery/pkg/util/clock/clock.go | 393 + .../apimachinery/pkg/util/errors/doc.go | 18 + .../apimachinery/pkg/util/errors/errors.go | 249 + .../apimachinery/pkg/util/framer/framer.go | 167 + .../pkg/util/intstr/generated.pb.go | 372 + .../pkg/util/intstr/generated.proto | 43 + .../apimachinery/pkg/util/intstr/intstr.go | 185 + .../k8s.io/apimachinery/pkg/util/json/json.go | 156 + .../pkg/util/naming/from_stack.go | 93 + .../k8s.io/apimachinery/pkg/util/net/http.go | 484 + .../apimachinery/pkg/util/net/interface.go | 457 + .../apimachinery/pkg/util/net/port_range.go | 149 + .../apimachinery/pkg/util/net/port_split.go | 77 + .../k8s.io/apimachinery/pkg/util/net/util.go | 73 + .../apimachinery/pkg/util/runtime/runtime.go | 173 + .../k8s.io/apimachinery/pkg/util/sets/byte.go | 205 + .../k8s.io/apimachinery/pkg/util/sets/doc.go | 20 + .../apimachinery/pkg/util/sets/empty.go | 23 + .../k8s.io/apimachinery/pkg/util/sets/int.go | 205 + .../apimachinery/pkg/util/sets/int32.go | 205 + .../apimachinery/pkg/util/sets/int64.go | 205 + .../apimachinery/pkg/util/sets/string.go | 205 + .../pkg/util/validation/field/errors.go | 272 + .../pkg/util/validation/field/path.go | 91 + .../pkg/util/validation/validation.go | 498 + .../apimachinery/pkg/util/yaml/decoder.go | 344 + vendor/k8s.io/apimachinery/pkg/version/doc.go | 20 + .../apimachinery/pkg/version/helpers.go | 88 + .../k8s.io/apimachinery/pkg/version/types.go | 37 + vendor/k8s.io/apimachinery/pkg/watch/doc.go | 19 + .../k8s.io/apimachinery/pkg/watch/filter.go | 105 + vendor/k8s.io/apimachinery/pkg/watch/mux.go | 260 + .../apimachinery/pkg/watch/streamwatcher.go | 132 + vendor/k8s.io/apimachinery/pkg/watch/watch.go | 322 + .../pkg/watch/zz_generated.deepcopy.go | 40 + .../forked/golang/reflect/deep_equal.go | 388 + vendor/k8s.io/client-go/LICENSE | 202 + .../client-go/discovery/discovery_client.go | 508 + vendor/k8s.io/client-go/discovery/doc.go | 19 + vendor/k8s.io/client-go/discovery/helper.go | 125 + .../k8s.io/client-go/kubernetes/clientset.go | 580 + vendor/k8s.io/client-go/kubernetes/doc.go | 20 + vendor/k8s.io/client-go/kubernetes/import.go | 19 + .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 + .../client-go/kubernetes/scheme/register.go | 126 + .../v1beta1/admissionregistration_client.go | 95 + .../admissionregistration/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 23 + .../v1beta1/mutatingwebhookconfiguration.go | 164 + .../v1beta1/validatingwebhookconfiguration.go | 164 + .../kubernetes/typed/apps/v1/apps_client.go | 110 + .../typed/apps/v1/controllerrevision.go | 174 + .../kubernetes/typed/apps/v1/daemonset.go | 191 + .../kubernetes/typed/apps/v1/deployment.go | 223 + .../client-go/kubernetes/typed/apps/v1/doc.go | 20 + .../typed/apps/v1/generated_expansion.go | 29 + .../kubernetes/typed/apps/v1/replicaset.go | 223 + .../kubernetes/typed/apps/v1/statefulset.go | 223 + .../typed/apps/v1beta1/apps_client.go | 100 + .../typed/apps/v1beta1/controllerrevision.go | 174 + .../typed/apps/v1beta1/deployment.go | 191 + .../kubernetes/typed/apps/v1beta1/doc.go | 20 + .../typed/apps/v1beta1/generated_expansion.go | 25 + .../typed/apps/v1beta1/statefulset.go | 191 + .../typed/apps/v1beta2/apps_client.go | 110 + .../typed/apps/v1beta2/controllerrevision.go | 174 + .../typed/apps/v1beta2/daemonset.go | 191 + .../typed/apps/v1beta2/deployment.go | 191 + .../kubernetes/typed/apps/v1beta2/doc.go | 20 + .../typed/apps/v1beta2/generated_expansion.go | 29 + .../typed/apps/v1beta2/replicaset.go | 191 + .../typed/apps/v1beta2/statefulset.go | 222 + .../v1alpha1/auditregistration_client.go | 90 + .../auditregistration/v1alpha1/auditsink.go | 164 + .../typed/auditregistration/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 21 + .../v1/authentication_client.go | 90 + .../kubernetes/typed/authentication/v1/doc.go | 20 + .../authentication/v1/generated_expansion.go | 19 + .../typed/authentication/v1/tokenreview.go | 46 + .../v1/tokenreview_expansion.go | 35 + .../v1beta1/authentication_client.go | 90 + .../typed/authentication/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 19 + .../authentication/v1beta1/tokenreview.go | 46 + .../v1beta1/tokenreview_expansion.go | 35 + .../authorization/v1/authorization_client.go | 105 + .../kubernetes/typed/authorization/v1/doc.go | 20 + .../authorization/v1/generated_expansion.go | 19 + .../v1/localsubjectaccessreview.go | 48 + .../v1/localsubjectaccessreview_expansion.go | 36 + .../v1/selfsubjectaccessreview.go | 46 + .../v1/selfsubjectaccessreview_expansion.go | 35 + .../v1/selfsubjectrulesreview.go | 46 + .../v1/selfsubjectrulesreview_expansion.go | 35 + .../authorization/v1/subjectaccessreview.go | 46 + .../v1/subjectaccessreview_expansion.go | 36 + .../v1beta1/authorization_client.go | 105 + .../typed/authorization/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 19 + .../v1beta1/localsubjectaccessreview.go | 48 + .../localsubjectaccessreview_expansion.go | 36 + .../v1beta1/selfsubjectaccessreview.go | 46 + .../selfsubjectaccessreview_expansion.go | 35 + .../v1beta1/selfsubjectrulesreview.go | 46 + .../selfsubjectrulesreview_expansion.go | 35 + .../v1beta1/subjectaccessreview.go | 46 + .../v1beta1/subjectaccessreview_expansion.go | 36 + .../autoscaling/v1/autoscaling_client.go | 90 + .../kubernetes/typed/autoscaling/v1/doc.go | 20 + .../autoscaling/v1/generated_expansion.go | 21 + .../autoscaling/v1/horizontalpodautoscaler.go | 191 + .../autoscaling/v2beta1/autoscaling_client.go | 90 + .../typed/autoscaling/v2beta1/doc.go | 20 + .../v2beta1/generated_expansion.go | 21 + .../v2beta1/horizontalpodautoscaler.go | 191 + .../autoscaling/v2beta2/autoscaling_client.go | 90 + .../typed/autoscaling/v2beta2/doc.go | 20 + .../v2beta2/generated_expansion.go | 21 + .../v2beta2/horizontalpodautoscaler.go | 191 + .../kubernetes/typed/batch/v1/batch_client.go | 90 + .../kubernetes/typed/batch/v1/doc.go | 20 + .../typed/batch/v1/generated_expansion.go | 21 + .../kubernetes/typed/batch/v1/job.go | 191 + .../typed/batch/v1beta1/batch_client.go | 90 + .../kubernetes/typed/batch/v1beta1/cronjob.go | 191 + .../kubernetes/typed/batch/v1beta1/doc.go | 20 + .../batch/v1beta1/generated_expansion.go | 21 + .../typed/batch/v2alpha1/batch_client.go | 90 + .../typed/batch/v2alpha1/cronjob.go | 191 + .../kubernetes/typed/batch/v2alpha1/doc.go | 20 + .../batch/v2alpha1/generated_expansion.go | 21 + .../v1beta1/certificates_client.go | 90 + .../v1beta1/certificatesigningrequest.go | 180 + .../certificatesigningrequest_expansion.go | 37 + .../typed/certificates/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 19 + .../coordination/v1/coordination_client.go | 90 + .../kubernetes/typed/coordination/v1/doc.go | 20 + .../coordination/v1/generated_expansion.go | 21 + .../kubernetes/typed/coordination/v1/lease.go | 174 + .../v1beta1/coordination_client.go | 90 + .../typed/coordination/v1beta1/doc.go | 20 + .../v1beta1/generated_expansion.go | 21 + .../typed/coordination/v1beta1/lease.go | 174 + .../typed/core/v1/componentstatus.go | 164 + .../kubernetes/typed/core/v1/configmap.go | 174 + .../kubernetes/typed/core/v1/core_client.go | 165 + .../client-go/kubernetes/typed/core/v1/doc.go | 20 + .../kubernetes/typed/core/v1/endpoints.go | 174 + .../kubernetes/typed/core/v1/event.go | 174 + .../typed/core/v1/event_expansion.go | 164 + .../typed/core/v1/generated_expansion.go | 39 + .../kubernetes/typed/core/v1/limitrange.go | 174 + .../kubernetes/typed/core/v1/namespace.go | 164 + .../typed/core/v1/namespace_expansion.go | 31 + .../kubernetes/typed/core/v1/node.go | 180 + .../typed/core/v1/node_expansion.go | 43 + .../typed/core/v1/persistentvolume.go | 180 + .../typed/core/v1/persistentvolumeclaim.go | 191 + .../client-go/kubernetes/typed/core/v1/pod.go | 191 + .../kubernetes/typed/core/v1/pod_expansion.go | 45 + .../kubernetes/typed/core/v1/podtemplate.go | 174 + .../typed/core/v1/replicationcontroller.go | 223 + .../kubernetes/typed/core/v1/resourcequota.go | 191 + .../kubernetes/typed/core/v1/secret.go | 174 + .../kubernetes/typed/core/v1/service.go | 174 + .../typed/core/v1/service_expansion.go | 41 + .../typed/core/v1/serviceaccount.go | 174 + .../typed/core/v1/serviceaccount_expansion.go | 41 + .../kubernetes/typed/events/v1beta1/doc.go | 20 + .../kubernetes/typed/events/v1beta1/event.go | 174 + .../typed/events/v1beta1/events_client.go | 90 + .../events/v1beta1/generated_expansion.go | 21 + .../typed/extensions/v1beta1/daemonset.go | 191 + .../typed/extensions/v1beta1/deployment.go | 222 + .../v1beta1/deployment_expansion.go | 29 + .../typed/extensions/v1beta1/doc.go | 20 + .../extensions/v1beta1/extensions_client.go | 110 + .../extensions/v1beta1/generated_expansion.go | 27 + .../typed/extensions/v1beta1/ingress.go | 191 + .../extensions/v1beta1/podsecuritypolicy.go | 164 + .../typed/extensions/v1beta1/replicaset.go | 222 + .../kubernetes/typed/networking/v1/doc.go | 20 + .../networking/v1/generated_expansion.go | 21 + .../typed/networking/v1/networking_client.go | 90 + .../typed/networking/v1/networkpolicy.go | 174 + .../typed/networking/v1beta1/doc.go | 20 + .../networking/v1beta1/generated_expansion.go | 21 + .../typed/networking/v1beta1/ingress.go | 191 + .../networking/v1beta1/networking_client.go | 90 + .../kubernetes/typed/node/v1alpha1/doc.go | 20 + .../node/v1alpha1/generated_expansion.go | 21 + .../typed/node/v1alpha1/node_client.go | 90 + .../typed/node/v1alpha1/runtimeclass.go | 164 + .../kubernetes/typed/node/v1beta1/doc.go | 20 + .../typed/node/v1beta1/generated_expansion.go | 21 + .../typed/node/v1beta1/node_client.go | 90 + .../typed/node/v1beta1/runtimeclass.go | 164 + .../kubernetes/typed/policy/v1beta1/doc.go | 20 + .../typed/policy/v1beta1/eviction.go | 48 + .../policy/v1beta1/eviction_expansion.go | 38 + .../policy/v1beta1/generated_expansion.go | 23 + .../policy/v1beta1/poddisruptionbudget.go | 191 + .../typed/policy/v1beta1/podsecuritypolicy.go | 164 + .../typed/policy/v1beta1/policy_client.go | 100 + .../kubernetes/typed/rbac/v1/clusterrole.go | 164 + .../typed/rbac/v1/clusterrolebinding.go | 164 + .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 + .../typed/rbac/v1/generated_expansion.go | 27 + .../kubernetes/typed/rbac/v1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1/role.go | 174 + .../kubernetes/typed/rbac/v1/rolebinding.go | 174 + .../typed/rbac/v1alpha1/clusterrole.go | 164 + .../typed/rbac/v1alpha1/clusterrolebinding.go | 164 + .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 + .../rbac/v1alpha1/generated_expansion.go | 27 + .../typed/rbac/v1alpha1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1alpha1/role.go | 174 + .../typed/rbac/v1alpha1/rolebinding.go | 174 + .../typed/rbac/v1beta1/clusterrole.go | 164 + .../typed/rbac/v1beta1/clusterrolebinding.go | 164 + .../kubernetes/typed/rbac/v1beta1/doc.go | 20 + .../typed/rbac/v1beta1/generated_expansion.go | 27 + .../typed/rbac/v1beta1/rbac_client.go | 105 + .../kubernetes/typed/rbac/v1beta1/role.go | 174 + .../typed/rbac/v1beta1/rolebinding.go | 174 + .../kubernetes/typed/scheduling/v1/doc.go | 20 + .../scheduling/v1/generated_expansion.go | 21 + .../typed/scheduling/v1/priorityclass.go | 164 + .../typed/scheduling/v1/scheduling_client.go | 90 + .../typed/scheduling/v1alpha1/doc.go | 20 + .../v1alpha1/generated_expansion.go | 21 + .../scheduling/v1alpha1/priorityclass.go | 164 + .../scheduling/v1alpha1/scheduling_client.go | 90 + .../typed/scheduling/v1beta1/doc.go | 20 + .../scheduling/v1beta1/generated_expansion.go | 21 + .../typed/scheduling/v1beta1/priorityclass.go | 164 + .../scheduling/v1beta1/scheduling_client.go | 90 + .../kubernetes/typed/settings/v1alpha1/doc.go | 20 + .../settings/v1alpha1/generated_expansion.go | 21 + .../typed/settings/v1alpha1/podpreset.go | 174 + .../settings/v1alpha1/settings_client.go | 90 + .../kubernetes/typed/storage/v1/doc.go | 20 + .../typed/storage/v1/generated_expansion.go | 23 + .../typed/storage/v1/storage_client.go | 95 + .../typed/storage/v1/storageclass.go | 164 + .../typed/storage/v1/volumeattachment.go | 180 + .../kubernetes/typed/storage/v1alpha1/doc.go | 20 + .../storage/v1alpha1/generated_expansion.go | 21 + .../typed/storage/v1alpha1/storage_client.go | 90 + .../storage/v1alpha1/volumeattachment.go | 180 + .../typed/storage/v1beta1/csidriver.go | 164 + .../typed/storage/v1beta1/csinode.go | 164 + .../kubernetes/typed/storage/v1beta1/doc.go | 20 + .../storage/v1beta1/generated_expansion.go | 27 + .../typed/storage/v1beta1/storage_client.go | 105 + .../typed/storage/v1beta1/storageclass.go | 164 + .../typed/storage/v1beta1/volumeattachment.go | 180 + .../pkg/apis/clientauthentication/OWNERS | 9 + .../pkg/apis/clientauthentication/doc.go | 20 + .../pkg/apis/clientauthentication/register.go | 50 + .../pkg/apis/clientauthentication/types.go | 77 + .../apis/clientauthentication/v1alpha1/doc.go | 24 + .../clientauthentication/v1alpha1/register.go | 55 + .../clientauthentication/v1alpha1/types.go | 78 + .../v1alpha1/zz_generated.conversion.go | 176 + .../v1alpha1/zz_generated.deepcopy.go | 128 + .../v1alpha1/zz_generated.defaults.go | 32 + .../v1beta1/conversion.go | 26 + .../apis/clientauthentication/v1beta1/doc.go | 24 + .../clientauthentication/v1beta1/register.go | 55 + .../clientauthentication/v1beta1/types.go | 59 + .../v1beta1/zz_generated.conversion.go | 142 + .../v1beta1/zz_generated.deepcopy.go | 92 + .../v1beta1/zz_generated.defaults.go | 32 + .../zz_generated.deepcopy.go | 128 + .../client-go/pkg/version/.gitattributes | 1 + vendor/k8s.io/client-go/pkg/version/base.go | 63 + vendor/k8s.io/client-go/pkg/version/def.bzl | 38 + vendor/k8s.io/client-go/pkg/version/doc.go | 21 + .../k8s.io/client-go/pkg/version/version.go | 42 + .../plugin/pkg/client/auth/exec/exec.go | 360 + vendor/k8s.io/client-go/rest/OWNERS | 26 + vendor/k8s.io/client-go/rest/client.go | 258 + vendor/k8s.io/client-go/rest/config.go | 551 + vendor/k8s.io/client-go/rest/plugin.go | 73 + vendor/k8s.io/client-go/rest/request.go | 1201 + vendor/k8s.io/client-go/rest/transport.go | 117 + vendor/k8s.io/client-go/rest/url_utils.go | 97 + vendor/k8s.io/client-go/rest/urlbackoff.go | 107 + vendor/k8s.io/client-go/rest/watch/decoder.go | 72 + vendor/k8s.io/client-go/rest/watch/encoder.go | 56 + .../client-go/rest/zz_generated.deepcopy.go | 52 + .../client-go/tools/clientcmd/api/doc.go | 19 + .../client-go/tools/clientcmd/api/helpers.go | 188 + .../client-go/tools/clientcmd/api/register.go | 46 + .../client-go/tools/clientcmd/api/types.go | 262 + .../clientcmd/api/zz_generated.deepcopy.go | 324 + vendor/k8s.io/client-go/tools/metrics/OWNERS | 9 + .../k8s.io/client-go/tools/metrics/metrics.go | 61 + .../k8s.io/client-go/tools/reference/ref.go | 126 + vendor/k8s.io/client-go/transport/OWNERS | 9 + vendor/k8s.io/client-go/transport/cache.go | 117 + vendor/k8s.io/client-go/transport/config.go | 126 + .../client-go/transport/round_trippers.go | 564 + .../client-go/transport/token_source.go | 140 + .../k8s.io/client-go/transport/transport.go | 227 + vendor/k8s.io/client-go/util/cert/OWNERS | 9 + vendor/k8s.io/client-go/util/cert/cert.go | 206 + vendor/k8s.io/client-go/util/cert/csr.go | 75 + vendor/k8s.io/client-go/util/cert/io.go | 98 + vendor/k8s.io/client-go/util/cert/pem.go | 61 + .../util/connrotation/connrotation.go | 105 + .../client-go/util/flowcontrol/backoff.go | 149 + .../client-go/util/flowcontrol/throttle.go | 143 + vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 + vendor/k8s.io/client-go/util/keyutil/key.go | 323 + vendor/k8s.io/klog/.travis.yml | 16 + vendor/k8s.io/klog/CONTRIBUTING.md | 22 + vendor/k8s.io/klog/LICENSE | 191 + vendor/k8s.io/klog/OWNERS | 19 + vendor/k8s.io/klog/README.md | 97 + vendor/k8s.io/klog/RELEASE.md | 9 + vendor/k8s.io/klog/SECURITY_CONTACTS | 20 + vendor/k8s.io/klog/code-of-conduct.md | 3 + vendor/k8s.io/klog/go.mod | 5 + vendor/k8s.io/klog/go.sum | 2 + vendor/k8s.io/klog/klog.go | 1308 + vendor/k8s.io/klog/klog_file.go | 139 + vendor/k8s.io/utils/LICENSE | 202 + vendor/k8s.io/utils/integer/integer.go | 73 + vendor/modules.txt | 222 +- .../structured-merge-diff/v3/LICENSE | 201 + .../v3/value/allocator.go | 203 + .../structured-merge-diff/v3/value/doc.go | 21 + .../structured-merge-diff/v3/value/fields.go | 97 + .../v3/value/jsontagutil.go | 91 + .../structured-merge-diff/v3/value/list.go | 139 + .../v3/value/listreflect.go | 98 + .../v3/value/listunstructured.go | 74 + .../structured-merge-diff/v3/value/map.go | 270 + .../v3/value/mapreflect.go | 209 + .../v3/value/mapunstructured.go | 190 + .../v3/value/reflectcache.go | 463 + .../structured-merge-diff/v3/value/scalar.go | 50 + .../v3/value/structreflect.go | 208 + .../structured-merge-diff/v3/value/value.go | 347 + .../v3/value/valuereflect.go | 294 + .../v3/value/valueunstructured.go | 178 + vendor/sigs.k8s.io/yaml/.gitignore | 20 + vendor/sigs.k8s.io/yaml/.travis.yml | 13 + vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 + vendor/sigs.k8s.io/yaml/LICENSE | 50 + vendor/sigs.k8s.io/yaml/OWNERS | 27 + vendor/sigs.k8s.io/yaml/README.md | 123 + vendor/sigs.k8s.io/yaml/RELEASE.md | 9 + vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 + vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 + vendor/sigs.k8s.io/yaml/fields.go | 502 + vendor/sigs.k8s.io/yaml/go.mod | 8 + vendor/sigs.k8s.io/yaml/go.sum | 9 + vendor/sigs.k8s.io/yaml/yaml.go | 380 + vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 + 1261 files changed, 435153 insertions(+), 18686 deletions(-) create mode 100644 vendor/github.com/davecgh/go-spew/LICENSE create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go delete mode 100644 vendor/github.com/ericchiang/k8s/.gitignore delete mode 100644 vendor/github.com/ericchiang/k8s/.travis.yml delete mode 100644 vendor/github.com/ericchiang/k8s/Makefile delete mode 100644 vendor/github.com/ericchiang/k8s/README.md delete mode 100644 vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go delete mode 100644 vendor/github.com/ericchiang/k8s/apis/meta/v1/json.go delete mode 100644 vendor/github.com/ericchiang/k8s/client.go delete mode 100644 vendor/github.com/ericchiang/k8s/codec.go delete mode 100644 vendor/github.com/ericchiang/k8s/config.go delete mode 100644 vendor/github.com/ericchiang/k8s/discovery.go delete mode 100644 vendor/github.com/ericchiang/k8s/labels.go delete mode 100644 vendor/github.com/ericchiang/k8s/resource.go delete mode 100644 vendor/github.com/ericchiang/k8s/runtime/generated.pb.go delete mode 100644 vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go delete mode 100644 vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go delete mode 100644 vendor/github.com/ericchiang/k8s/watch.go delete mode 100644 vendor/github.com/ericchiang/k8s/watch/versioned/generated.pb.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/CONTRIBUTORS create mode 100644 vendor/github.com/gogo/protobuf/LICENSE create mode 100644 vendor/github.com/gogo/protobuf/proto/Makefile rename vendor/github.com/{golang => gogo}/protobuf/proto/clone.go (96%) create mode 100644 vendor/github.com/gogo/protobuf/proto/custom_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/decode.go (100%) create mode 100644 vendor/github.com/gogo/protobuf/proto/deprecated.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/encode.go (99%) create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/equal.go (99%) create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/lib.go (98%) create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/message_set.go (100%) rename vendor/github.com/{golang => gogo}/protobuf/proto/pointer_reflect.go (99%) create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/pointer_unsafe.go (96%) create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/table_marshal.go (90%) create mode 100644 vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/table_merge.go (94%) rename vendor/github.com/{golang => gogo}/protobuf/proto/table_unmarshal.go (86%) create mode 100644 vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/text.go (87%) create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go rename vendor/github.com/{golang => gogo}/protobuf/proto/text_parser.go (85%) create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go create mode 100644 vendor/github.com/golang/protobuf/proto/buffer.go create mode 100644 vendor/github.com/golang/protobuf/proto/defaults.go create mode 100644 vendor/github.com/golang/protobuf/proto/proto.go create mode 100644 vendor/github.com/golang/protobuf/proto/registry.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/wire.go create mode 100644 vendor/github.com/golang/protobuf/proto/wrappers.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go create mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go create mode 100644 vendor/github.com/google/gofuzz/.travis.yml create mode 100644 vendor/github.com/google/gofuzz/CONTRIBUTING.md rename vendor/github.com/{ericchiang/k8s => google/gofuzz}/LICENSE (100%) create mode 100644 vendor/github.com/google/gofuzz/README.md create mode 100644 vendor/github.com/google/gofuzz/doc.go create mode 100644 vendor/github.com/google/gofuzz/fuzz.go create mode 100644 vendor/github.com/google/gofuzz/go.mod create mode 100644 vendor/github.com/googleapis/gnostic/LICENSE create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go create mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json create mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md create mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/extensions.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go create mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go create mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto create mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/README.md create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/base.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/display.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/models.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/operations.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/reader.go create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/schema.json create mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/writer.go create mode 100644 vendor/golang.org/x/crypto/AUTHORS create mode 100644 vendor/golang.org/x/crypto/CONTRIBUTORS create mode 100644 vendor/golang.org/x/crypto/LICENSE create mode 100644 vendor/golang.org/x/crypto/PATENTS create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/terminal.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util.go rename vendor/golang.org/x/{sys/windows/asm_windows_arm.s => crypto/ssh/terminal/util_aix.go} (51%) create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_linux.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go create mode 100644 vendor/golang.org/x/crypto/ssh/terminal/util_windows.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/oauth2/.travis.yml create mode 100644 vendor/golang.org/x/oauth2/AUTHORS create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTING.md create mode 100644 vendor/golang.org/x/oauth2/CONTRIBUTORS create mode 100644 vendor/golang.org/x/oauth2/LICENSE create mode 100644 vendor/golang.org/x/oauth2/README.md create mode 100644 vendor/golang.org/x/oauth2/go.mod create mode 100644 vendor/golang.org/x/oauth2/go.sum create mode 100644 vendor/golang.org/x/oauth2/internal/client_appengine.go create mode 100644 vendor/golang.org/x/oauth2/internal/doc.go create mode 100644 vendor/golang.org/x/oauth2/internal/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/internal/token.go create mode 100644 vendor/golang.org/x/oauth2/internal/transport.go create mode 100644 vendor/golang.org/x/oauth2/oauth2.go create mode 100644 vendor/golang.org/x/oauth2/token.go create mode 100644 vendor/golang.org/x/oauth2/transport.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go create mode 100644 vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go create mode 100644 vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 vendor/golang.org/x/sys/windows/asm_windows_amd64.s create mode 100644 vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go create mode 100644 vendor/golang.org/x/text/unicode/norm/tables11.0.0.go create mode 100644 vendor/golang.org/x/time/AUTHORS create mode 100644 vendor/golang.org/x/time/CONTRIBUTORS create mode 100644 vendor/golang.org/x/time/LICENSE create mode 100644 vendor/golang.org/x/time/PATENTS create mode 100644 vendor/golang.org/x/time/rate/rate.go create mode 100644 vendor/google.golang.org/appengine/LICENSE create mode 100644 vendor/google.golang.org/appengine/internal/api.go create mode 100644 vendor/google.golang.org/appengine/internal/api_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/api_common.go create mode 100644 vendor/google.golang.org/appengine/internal/app_id.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/base/api_base.proto create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto create mode 100644 vendor/google.golang.org/appengine/internal/identity.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_classic.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_flex.go create mode 100644 vendor/google.golang.org/appengine/internal/identity_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/internal.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/log/log_service.proto create mode 100644 vendor/google.golang.org/appengine/internal/main.go create mode 100644 vendor/google.golang.org/appengine/internal/main_common.go create mode 100644 vendor/google.golang.org/appengine/internal/main_vm.go create mode 100644 vendor/google.golang.org/appengine/internal/metadata.go create mode 100644 vendor/google.golang.org/appengine/internal/net.go create mode 100644 vendor/google.golang.org/appengine/internal/regen.sh create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto create mode 100644 vendor/google.golang.org/appengine/internal/transaction.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go create mode 100644 vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto create mode 100644 vendor/google.golang.org/appengine/urlfetch/urlfetch.go create mode 100644 vendor/google.golang.org/protobuf/AUTHORS create mode 100644 vendor/google.golang.org/protobuf/CONTRIBUTORS create mode 100644 vendor/google.golang.org/protobuf/LICENSE create mode 100644 vendor/google.golang.org/protobuf/PATENTS create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/decode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/doc.go create mode 100644 vendor/google.golang.org/protobuf/encoding/prototext/encode.go create mode 100644 vendor/google.golang.org/protobuf/encoding/protowire/wire.go create mode 100644 vendor/google.golang.org/protobuf/internal/descfmt/stringer.go create mode 100644 vendor/google.golang.org/protobuf/internal/descopts/options.go create mode 100644 vendor/google.golang.org/protobuf/internal/detrand/rand.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/defval/default.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/encoding/text/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/errors.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/errors/is_go113.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/doc.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go create mode 100644 vendor/google.golang.org/protobuf/internal/filetype/build.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/flags.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go create mode 100644 vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go create mode 100644 vendor/google.golang.org/protobuf/internal/genname/name.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/api_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_tables.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_list.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/convert_map.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/decode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/encode.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_export.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_file.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/legacy_message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/merge_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/validate.go create mode 100644 vendor/google.golang.org/protobuf/internal/impl/weak.go create mode 100644 vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go create mode 100644 vendor/google.golang.org/protobuf/internal/pragma/pragma.go create mode 100644 vendor/google.golang.org/protobuf/internal/set/ints.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_pure.go create mode 100644 vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/internal/version/version.go create mode 100644 vendor/google.golang.org/protobuf/proto/checkinit.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode.go create mode 100644 vendor/google.golang.org/protobuf/proto/decode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/doc.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode.go create mode 100644 vendor/google.golang.org/protobuf/proto/encode_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/equal.go create mode 100644 vendor/google.golang.org/protobuf/proto/extension.go create mode 100644 vendor/google.golang.org/protobuf/proto/merge.go create mode 100644 vendor/google.golang.org/protobuf/proto/messageset.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_methods.go create mode 100644 vendor/google.golang.org/protobuf/proto/proto_reflect.go create mode 100644 vendor/google.golang.org/protobuf/proto/reset.go create mode 100644 vendor/google.golang.org/protobuf/proto/size.go create mode 100644 vendor/google.golang.org/protobuf/proto/size_gen.go create mode 100644 vendor/google.golang.org/protobuf/proto/wrappers.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/source.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/type.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go create mode 100644 vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoiface/methods.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go create mode 100644 vendor/google.golang.org/protobuf/runtime/protoimpl/version.go create mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go create mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go create mode 100644 vendor/gopkg.in/inf.v0/LICENSE create mode 100644 vendor/gopkg.in/inf.v0/dec.go create mode 100644 vendor/gopkg.in/inf.v0/rounder.go create mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml create mode 100644 vendor/gopkg.in/yaml.v3/LICENSE create mode 100644 vendor/gopkg.in/yaml.v3/NOTICE create mode 100644 vendor/gopkg.in/yaml.v3/README.md create mode 100644 vendor/gopkg.in/yaml.v3/apic.go create mode 100644 vendor/gopkg.in/yaml.v3/decode.go create mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go create mode 100644 vendor/gopkg.in/yaml.v3/encode.go create mode 100644 vendor/gopkg.in/yaml.v3/go.mod create mode 100644 vendor/gopkg.in/yaml.v3/parserc.go create mode 100644 vendor/gopkg.in/yaml.v3/readerc.go create mode 100644 vendor/gopkg.in/yaml.v3/resolve.go create mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go create mode 100644 vendor/gopkg.in/yaml.v3/sorter.go create mode 100644 vendor/gopkg.in/yaml.v3/writerc.go create mode 100644 vendor/gopkg.in/yaml.v3/yaml.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go create mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go create mode 100644 vendor/k8s.io/api/LICENSE create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apps/v1/doc.go create mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1/register.go create mode 100644 vendor/k8s.io/api/apps/v1/types.go create mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1beta1/register.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/types.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/doc.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto create mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/authentication/v1beta1/register.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authorization/v1/doc.go create mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/authorization/v1/generated.proto create mode 100644 vendor/k8s.io/api/authorization/v1/register.go create mode 100644 vendor/k8s.io/api/authorization/v1/types.go create mode 100644 vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/authorization/v1beta1/register.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v1/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/doc.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.proto create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/register.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v1/doc.go create mode 100644 vendor/k8s.io/api/batch/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v1/register.go create mode 100644 vendor/k8s.io/api/batch/v1/types.go create mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.proto create mode 100644 vendor/k8s.io/api/batch/v2alpha1/register.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/certificates/v1beta1/register.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1/register.go create mode 100644 vendor/k8s.io/api/coordination/v1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/coordination/v1beta1/register.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/types.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go create mode 100644 vendor/k8s.io/api/core/v1/doc.go create mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/core/v1/generated.proto create mode 100644 vendor/k8s.io/api/core/v1/objectreference.go create mode 100644 vendor/k8s.io/api/core/v1/register.go create mode 100644 vendor/k8s.io/api/core/v1/resource.go create mode 100644 vendor/k8s.io/api/core/v1/taint.go create mode 100644 vendor/k8s.io/api/core/v1/toleration.go create mode 100644 vendor/k8s.io/api/core/v1/types.go create mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go create mode 100644 vendor/k8s.io/api/core/v1/well_known_taints.go create mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/events/v1beta1/register.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types.go create mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/extensions/v1beta1/register.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1/register.go create mode 100644 vendor/k8s.io/api/networking/v1/types.go create mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go create mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/node/v1beta1/register.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types.go create mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/policy/v1beta1/register.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1/doc.go create mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1/generated.proto create mode 100644 vendor/k8s.io/api/rbac/v1/register.go create mode 100644 vendor/k8s.io/api/rbac/v1/types.go create mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/rbac/v1beta1/register.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/register.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1/register.go create mode 100644 vendor/k8s.io/api/storage/v1/types.go create mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/doc.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.proto create mode 100644 vendor/k8s.io/api/storage/v1beta1/register.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/LICENSE create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/errors/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/amount.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/math.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go create mode 100644 vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/helper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go create mode 100644 vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/fields.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/requirements.go create mode 100644 vendor/k8s.io/apimachinery/pkg/fields/selector.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/labels.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/selector.go create mode 100644 vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/converter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/embedded.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/error.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/extension.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/helper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/selection/operator.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/namespacedname.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/nodename.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/patch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/types/uid.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/clock/clock.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/errors/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/framer/framer.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/json/json.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/http.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/interface.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_range.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/port_split.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/net/util.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/byte.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/empty.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int64.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/string.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/validation/validation.go create mode 100644 vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/helpers.go create mode 100644 vendor/k8s.io/apimachinery/pkg/version/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/doc.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/filter.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/mux.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/watch.go create mode 100644 vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go create mode 100644 vendor/k8s.io/client-go/LICENSE create mode 100644 vendor/k8s.io/client-go/discovery/discovery_client.go create mode 100644 vendor/k8s.io/client-go/discovery/doc.go create mode 100644 vendor/k8s.io/client-go/discovery/helper.go create mode 100644 vendor/k8s.io/client-go/kubernetes/clientset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/import.go create mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/register.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go create mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/pkg/version/.gitattributes create mode 100644 vendor/k8s.io/client-go/pkg/version/base.go create mode 100644 vendor/k8s.io/client-go/pkg/version/def.bzl create mode 100644 vendor/k8s.io/client-go/pkg/version/doc.go create mode 100644 vendor/k8s.io/client-go/pkg/version/version.go create mode 100644 vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go create mode 100644 vendor/k8s.io/client-go/rest/OWNERS create mode 100644 vendor/k8s.io/client-go/rest/client.go create mode 100644 vendor/k8s.io/client-go/rest/config.go create mode 100644 vendor/k8s.io/client-go/rest/plugin.go create mode 100644 vendor/k8s.io/client-go/rest/request.go create mode 100644 vendor/k8s.io/client-go/rest/transport.go create mode 100644 vendor/k8s.io/client-go/rest/url_utils.go create mode 100644 vendor/k8s.io/client-go/rest/urlbackoff.go create mode 100644 vendor/k8s.io/client-go/rest/watch/decoder.go create mode 100644 vendor/k8s.io/client-go/rest/watch/encoder.go create mode 100644 vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/doc.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/register.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/types.go create mode 100644 vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/tools/metrics/OWNERS create mode 100644 vendor/k8s.io/client-go/tools/metrics/metrics.go create mode 100644 vendor/k8s.io/client-go/tools/reference/ref.go create mode 100644 vendor/k8s.io/client-go/transport/OWNERS create mode 100644 vendor/k8s.io/client-go/transport/cache.go create mode 100644 vendor/k8s.io/client-go/transport/config.go create mode 100644 vendor/k8s.io/client-go/transport/round_trippers.go create mode 100644 vendor/k8s.io/client-go/transport/token_source.go create mode 100644 vendor/k8s.io/client-go/transport/transport.go create mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS create mode 100644 vendor/k8s.io/client-go/util/cert/cert.go create mode 100644 vendor/k8s.io/client-go/util/cert/csr.go create mode 100644 vendor/k8s.io/client-go/util/cert/io.go create mode 100644 vendor/k8s.io/client-go/util/cert/pem.go create mode 100644 vendor/k8s.io/client-go/util/connrotation/connrotation.go create mode 100644 vendor/k8s.io/client-go/util/flowcontrol/backoff.go create mode 100644 vendor/k8s.io/client-go/util/flowcontrol/throttle.go create mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS create mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go create mode 100644 vendor/k8s.io/klog/.travis.yml create mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md create mode 100644 vendor/k8s.io/klog/LICENSE create mode 100644 vendor/k8s.io/klog/OWNERS create mode 100644 vendor/k8s.io/klog/README.md create mode 100644 vendor/k8s.io/klog/RELEASE.md create mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS create mode 100644 vendor/k8s.io/klog/code-of-conduct.md create mode 100644 vendor/k8s.io/klog/go.mod create mode 100644 vendor/k8s.io/klog/go.sum create mode 100644 vendor/k8s.io/klog/klog.go create mode 100644 vendor/k8s.io/klog/klog_file.go create mode 100644 vendor/k8s.io/utils/LICENSE create mode 100644 vendor/k8s.io/utils/integer/integer.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go create mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go create mode 100644 vendor/sigs.k8s.io/yaml/.gitignore create mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml create mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md create mode 100644 vendor/sigs.k8s.io/yaml/LICENSE create mode 100644 vendor/sigs.k8s.io/yaml/OWNERS create mode 100644 vendor/sigs.k8s.io/yaml/README.md create mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md create mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS create mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md create mode 100644 vendor/sigs.k8s.io/yaml/fields.go create mode 100644 vendor/sigs.k8s.io/yaml/go.mod create mode 100644 vendor/sigs.k8s.io/yaml/go.sum create mode 100644 vendor/sigs.k8s.io/yaml/yaml.go create mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/go.mod b/go.mod index aaf5de5..16f55d4 100644 --- a/go.mod +++ b/go.mod @@ -10,14 +10,17 @@ require ( github.com/gin-gonic/gin v1.4.0 github.com/gomodule/redigo v2.0.0+incompatible github.com/google/uuid v1.1.1 + github.com/googleapis/gnostic v0.5.1 // indirect github.com/hashicorp/golang-lru v0.5.3 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/kr/pretty v0.1.0 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/prometheus/client_golang v1.1.0 github.com/sirupsen/logrus v1.4.2 github.com/ugorji/go v1.1.7 // indirect github.com/zsais/go-gin-prometheus v0.1.0 - golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 // indirect - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + k8s.io/api v0.18.6 // indirect + k8s.io/client-go v11.0.0+incompatible + k8s.io/utils v0.0.0-20200724153422-f32512634ab7 // indirect ) diff --git a/go.sum b/go.sum index 8764fa1..f791c1d 100644 --- a/go.sum +++ b/go.sum @@ -1,3 +1,4 @@ +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71 h1:J52um+Sp3v8TpSY0wOgpjr84np+xvrY3503DRirJ6wI= github.com/Bose/minisentinel v0.0.0-20191213132324-b7726ed8ed71/go.mod h1:E4OavwrrOME3uj3Zm9Rla8ZDqlAR5GqKA+mMIPoilYk= github.com/FZambia/sentinel v1.0.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI= @@ -5,6 +6,9 @@ github.com/FZambia/sentinel v1.1.0 h1:qrCBfxc8SvJihYNjBWgwUI93ZCvFe/PJIPTHKmlp8a github.com/FZambia/sentinel v1.1.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI= github.com/Jim-Lambert-Bose/cache v1.0.3 h1:kgkWQcKRtswM08CStWuucbLbjeK8TmaWBTr/S2Ei/x4= github.com/Jim-Lambert-Bose/cache v1.0.3/go.mod h1:rZHUsMi0xjPwopKMTEGC3HfmnrMvqPsIPvaQ70bDzps= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= @@ -23,6 +27,13 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= +github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= github.com/gin-gonic/gin v1.4.0 h1:3tMoCCfM7ppqsR0ptz/wi1impNpT7/9wQtMZ8lr1mCQ= @@ -30,34 +41,68 @@ github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= +github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= +github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= +github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= +github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= +github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc= @@ -72,7 +117,14 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -97,11 +149,19 @@ github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62/go.mod h1:65XQgovT github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= @@ -113,15 +173,25 @@ github.com/yuin/gopher-lua v0.0.0-20191213034115-f46add6fdb5c/go.mod h1:gqRgreBU github.com/zsais/go-gin-prometheus v0.1.0 h1:bkLv1XCdzqVgQ36ScgRi09MA2UC1t3tAB6nsfErsGO4= github.com/zsais/go-gin-prometheus v0.1.0/go.mod h1:Slirjzuz8uM8Cw0jmPNqbneoqcUtY2GGjn2bEd4NRLY= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 h1:k7pJ2yAPLPgbskkFdhRCsA77k2fySZ1zf2zCjvQCiIM= -golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ymfAeJIyd0upUIElB+lI= +golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -129,16 +199,67 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdOCQUEXhbk/P4A9WmJq0= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= +golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= +golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= +gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= +k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= +k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= +k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= +k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= +k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= +k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= +k8s.io/utils v0.0.0-20200724153422-f32512634ab7 h1:bYyloM4UeWug24euLZfEH7muFQoqvFj9h/pxAnOZLt4= +k8s.io/utils v0.0.0-20200724153422-f32512634ab7/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= +sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/redispool.go b/redispool.go index b07c6bc..6af2e43 100644 --- a/redispool.go +++ b/redispool.go @@ -11,6 +11,8 @@ import ( "github.com/Jim-Lambert-Bose/cache/persistence" "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) // Type - define the type of cache: read or write @@ -125,7 +127,18 @@ func inCluster(logger *logrus.Entry) bool { // } // logger.Infof("inCluster: true") // return true - return false + + config, err := rest.InClusterConfig() + if err != nil { + logger.Infof("unable to create k8s config: %s", err) + return false + } + _, err := kubernetes.NewForConfig(config) + if err != nil { + logger.Infof("unable to create k8s clientset: %s", err) + return false + } + return true } // RedisConnectionInfo - define all the things needed to manage a connection to redis (optionally using sentinel) diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE new file mode 100644 index 0000000..bc52e96 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/LICENSE @@ -0,0 +1,15 @@ +ISC License + +Copyright (c) 2012-2016 Dave Collins + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7929947 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,145 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// Go versions prior to 1.4 are disabled because they use a different layout +// for interfaces which make the implementation of unsafeReflectValue more complex. +// +build !js,!appengine,!safe,!disableunsafe,go1.4 + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +type flag uintptr + +var ( + // flagRO indicates whether the value field of a reflect.Value + // is read-only. + flagRO flag + + // flagAddr indicates whether the address of the reflect.Value's + // value may be taken. + flagAddr flag +) + +// flagKindMask holds the bits that make up the kind +// part of the flags field. In all the supported versions, +// it is in the lower 5 bits. +const flagKindMask = flag(0x1f) + +// Different versions of Go have used different +// bit layouts for the flags type. This table +// records the known combinations. +var okFlags = []struct { + ro, addr flag +}{{ + // From Go 1.4 to 1.5 + ro: 1 << 5, + addr: 1 << 7, +}, { + // Up to Go tip. + ro: 1<<5 | 1<<6, + addr: 1 << 8, +}} + +var flagValOffset = func() uintptr { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + return field.Offset +}() + +// flagField returns a pointer to the flag field of a reflect.Value. +func flagField(v *reflect.Value) *flag { + return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) reflect.Value { + if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { + return v + } + flagFieldPtr := flagField(&v) + *flagFieldPtr &^= flagRO + *flagFieldPtr |= flagAddr + return v +} + +// Sanity checks against future reflect package changes +// to the type or semantics of the Value.flag field. +func init() { + field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") + if !ok { + panic("reflect.Value has no flag field") + } + if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { + panic("reflect.Value flag field has changed kind") + } + type t0 int + var t struct { + A t0 + // t0 will have flagEmbedRO set. + t0 + // a will have flagStickyRO set + a t0 + } + vA := reflect.ValueOf(t).FieldByName("A") + va := reflect.ValueOf(t).FieldByName("a") + vt0 := reflect.ValueOf(t).FieldByName("t0") + + // Infer flagRO from the difference between the flags + // for the (otherwise identical) fields in t. + flagPublic := *flagField(&vA) + flagWithRO := *flagField(&va) | *flagField(&vt0) + flagRO = flagPublic ^ flagWithRO + + // Infer flagAddr from the difference between a value + // taken from a pointer and not. + vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") + flagNoPtr := *flagField(&vA) + flagPtr := *flagField(&vPtrA) + flagAddr = flagNoPtr ^ flagPtr + + // Check that the inferred flags tally with one of the known versions. + for _, f := range okFlags { + if flagRO == f.ro && flagAddr == f.addr { + return + } + } + panic("reflect.Value read-only flag has changed semantics") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..205c28d --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe !go1.4 + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/ericchiang/k8s/.gitignore b/vendor/github.com/ericchiang/k8s/.gitignore deleted file mode 100644 index 0a0b4f2..0000000 --- a/vendor/github.com/ericchiang/k8s/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -assets -_output/ diff --git a/vendor/github.com/ericchiang/k8s/.travis.yml b/vendor/github.com/ericchiang/k8s/.travis.yml deleted file mode 100644 index 2dcb76c..0000000 --- a/vendor/github.com/ericchiang/k8s/.travis.yml +++ /dev/null @@ -1,36 +0,0 @@ -language: go - -sudo: required - -go: - - 1.8 - - 1.9 - - '1.10' - -services: - - docker - -env: - - K8S_CLIENT_TEST=1 KUBECONFIG=scripts/kubeconfig - -install: - - go get -v ./... - - go get -v github.com/ghodss/yaml # Required for examples. - - ./scripts/run-kube.sh - - curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.9.1/bin/linux/amd64/kubectl - - chmod +x kubectl - - mv kubectl $GOPATH/bin - -script: - - make - - make test - - make test-examples - - make verify-generate - - -notifications: - email: false - -branches: - only: - - master diff --git a/vendor/github.com/ericchiang/k8s/Makefile b/vendor/github.com/ericchiang/k8s/Makefile deleted file mode 100644 index e1c6c1c..0000000 --- a/vendor/github.com/ericchiang/k8s/Makefile +++ /dev/null @@ -1,52 +0,0 @@ -KUBE_VERSION=1.10.1 - -build: - go build -v ./... - -test: - go test -v ./... - -test-examples: - @for example in $(shell find examples/ -name '*.go'); do \ - go build -v $$example || exit 1; \ - done - -.PHONY: generate -generate: _output/kubernetes _output/bin/protoc _output/bin/gomvpkg _output/bin/protoc-gen-gofast _output/src/github.com/golang/protobuf - ./scripts/generate.sh - go run scripts/register.go - cp scripts/json.go.partial apis/meta/v1/json.go - -.PHONY: verify-generate -verify-generate: generate - ./scripts/git-diff.sh - -_output/bin/protoc-gen-gofast: - ./scripts/go-install.sh \ - https://github.com/gogo/protobuf \ - github.com/gogo/protobuf \ - github.com/gogo/protobuf/protoc-gen-gofast \ - tags/v0.5 - -_output/bin/gomvpkg: - ./scripts/go-install.sh \ - https://github.com/golang/tools \ - golang.org/x/tools \ - golang.org/x/tools/cmd/gomvpkg \ - fbec762f837dc349b73d1eaa820552e2ad177942 - -_output/src/github.com/golang/protobuf: - git clone https://github.com/golang/protobuf _output/src/github.com/golang/protobuf - -_output/bin/protoc: - ./scripts/get-protoc.sh - -_output/kubernetes: - mkdir -p _output - curl -o _output/kubernetes.zip -L https://github.com/kubernetes/kubernetes/archive/v$(KUBE_VERSION).zip - unzip _output/kubernetes.zip -d _output > /dev/null - mv _output/kubernetes-$(KUBE_VERSION) _output/kubernetes - -.PHONY: clean -clean: - rm -rf _output diff --git a/vendor/github.com/ericchiang/k8s/README.md b/vendor/github.com/ericchiang/k8s/README.md deleted file mode 100644 index 897bd66..0000000 --- a/vendor/github.com/ericchiang/k8s/README.md +++ /dev/null @@ -1,291 +0,0 @@ -# A simple Go client for Kubernetes - -[![GoDoc](https://godoc.org/github.com/ericchiang/k8s?status.svg)](https://godoc.org/github.com/ericchiang/k8s) -[![Build Status](https://travis-ci.org/ericchiang/k8s.svg?branch=master)](https://travis-ci.org/ericchiang/k8s) - -A slimmed down Go client generated using Kubernetes' [protocol buffer][protobuf] support. This package behaves similarly to [official Kubernetes' Go client][client-go], but only imports two external dependencies. - -```go -package main - -import ( - "context" - "fmt" - "log" - - "github.com/ericchiang/k8s" - corev1 "github.com/ericchiang/k8s/apis/core/v1" -) - -func main() { - client, err := k8s.NewInClusterClient() - if err != nil { - log.Fatal(err) - } - - var nodes corev1.NodeList - if err := client.List(context.Background(), "", &nodes); err != nil { - log.Fatal(err) - } - for _, node := range nodes.Items { - fmt.Printf("name=%q schedulable=%t\n", *node.Metadata.Name, !*node.Spec.Unschedulable) - } -} -``` - -## Requirements - -* Go 1.7+ (this package uses "context" features added in 1.7) -* Kubernetes 1.3+ (protobuf support was added in 1.3) -* [github.com/golang/protobuf/proto][go-proto] (protobuf serialization) -* [golang.org/x/net/http2][go-http2] (HTTP/2 support) - -## Usage - -### Create, update, delete - -The type of the object passed to `Create`, `Update`, and `Delete` determine the resource being acted on. - -```go -configMap := &corev1.ConfigMap{ - Metadata: &metav1.ObjectMeta{ - Name: k8s.String("my-configmap"), - Namespace: k8s.String("my-namespace"), - }, - Data: map[string]string{"hello": "world"}, -} - -if err := client.Create(ctx, configMap); err != nil { - // handle error -} - -configMap.Data["hello"] = "kubernetes" - -if err := client.Update(ctx, configMap); err != nil { - // handle error -} - -if err := client.Delete(ctx, configMap); err != nil { - // handle error -} -``` - -### Get, list, watch - -Getting a resource requires providing a namespace (for namespaced objects) and a name. - -```go -// Get the "cluster-info" configmap from the "kube-public" namespace -var configMap corev1.ConfigMap -err := client.Get(ctx, "kube-public", "cluster-info", &configMap) -``` - -When performing a list operation, the namespace to list or watch is also required. - -```go -// Pods from the "custom-namespace" -var pods corev1.PodList -err := client.List(ctx, "custom-namespace", &pods) -``` - -A special value `AllNamespaces` indicates that the list or watch should be performed on all cluster resources. - -```go -// Pods in all namespaces -var pods corev1.PodList -err := client.List(ctx, k8s.AllNamespaces, &pods) -``` - -Watches require a example type to determine what resource they're watching. `Watch` returns an type which can be used to receive a stream of events. These events include resources of the same kind and the kind of the event (added, modified, deleted). - -```go -// Watch configmaps in the "kube-system" namespace -var configMap corev1.ConfigMap -watcher, err := client.Watch(ctx, "kube-system", &configMap) -if err != nil { - // handle error -} -defer watcher.Close() - -for { - cm := new(corev1.ConfigMap) - eventType, err := watcher.Next(cm) - if err != nil { - // watcher encountered and error, exit or create a new watcher - } - fmt.Println(eventType, *cm.Metadata.Name) -} -``` - -Both in-cluster and out-of-cluster clients are initialized with a primary namespace. This is the recommended value to use when listing or watching. - -```go -client, err := k8s.NewInClusterClient() -if err != nil { - // handle error -} - -// List pods in the namespace the client is running in. -var pods corev1.PodList -err := client.List(ctx, client.Namespace, &pods) -``` - -### Custom resources - -Client operations support user defined resources, such as resources provided by [CustomResourceDefinitions][crds] and [aggregated API servers][custom-api-servers]. To use a custom resource, define an equivalent Go struct then register it with the `k8s` package. By default the client will use JSON serialization when encoding and decoding custom resources. - -```go -import ( - "github.com/ericchiang/k8s" - metav1 "github.com/ericchiang/k8s/apis/meta/v1" -) - -type MyResource struct { - Metadata *metav1.ObjectMeta `json:"metadata"` - Foo string `json:"foo"` - Bar int `json:"bar"` -} - -// Required for MyResource to implement k8s.Resource -func (m *MyResource) GetMetadata() *metav1.ObjectMeta { - return m.Metadata -} - -type MyResourceList struct { - Metadata *metav1.ListMeta `json:"metadata"` - Items []MyResource `json:"items"` -} - -// Require for MyResourceList to implement k8s.ResourceList -func (m *MyResourceList) GetMetadata() *metav1.ListMeta { - return m.Metadata -} - -func init() { - // Register resources with the k8s package. - k8s.Register("resource.example.com", "v1", "myresources", true, &MyResource{}) - k8s.RegisterList("resource.example.com", "v1", "myresources", true, &MyResourceList{}) -} -``` - -Once registered, the library can use the custom resources like any other. - -```go -func do(ctx context.Context, client *k8s.Client, namespace string) error { - r := &MyResource{ - Metadata: &metav1.ObjectMeta{ - Name: k8s.String("my-custom-resource"), - Namespace: &namespace, - }, - Foo: "hello, world!", - Bar: 42, - } - if err := client.Create(ctx, r); err != nil { - return fmt.Errorf("create: %v", err) - } - r.Bar = -8 - if err := client.Update(ctx, r); err != nil { - return fmt.Errorf("update: %v", err) - } - if err := client.Delete(ctx, r); err != nil { - return fmt.Errorf("delete: %v", err) - } - return nil -} -``` - -If the custom type implements [`proto.Message`][proto-msg], the client will prefer protobuf when encoding and decoding the type. - -### Label selectors - -Label selectors can be provided to any list operation. - -```go -l := new(k8s.LabelSelector) -l.Eq("tier", "production") -l.In("app", "database", "frontend") - -var pods corev1.PodList -err := client.List(ctx, "custom-namespace", &pods, l.Selector()) -``` - -### Subresources - -Access subresources using the `Subresource` option. - -```go -err := client.Update(ctx, &pod, k8s.Subresource("status")) -``` - -### Creating out-of-cluster clients - -Out-of-cluster clients can be constructed by either creating an `http.Client` manually or parsing a [`Config`][config] object. The following is an example of creating a client from a kubeconfig: - -```go -import ( - "io/ioutil" - - "github.com/ericchiang/k8s" - - "github.com/ghodss/yaml" -) - -// loadClient parses a kubeconfig from a file and returns a Kubernetes -// client. It does not support extensions or client auth providers. -func loadClient(kubeconfigPath string) (*k8s.Client, error) { - data, err := ioutil.ReadFile(kubeconfigPath) - if err != nil { - return nil, fmt.Errorf("read kubeconfig: %v", err) - } - - // Unmarshal YAML into a Kubernetes config object. - var config k8s.Config - if err := yaml.Unmarshal(data, &config); err != nil { - return nil, fmt.Errorf("unmarshal kubeconfig: %v", err) - } - return k8s.NewClient(&config) -} -``` - -### Errors - -Errors returned by the Kubernetes API are formatted as [`unversioned.Status`][unversioned-status] objects and surfaced by clients as [`*k8s.APIError`][k8s-error]s. Programs that need to inspect error codes or failure details can use a type cast to access this information. - -```go -// createConfigMap creates a configmap in the client's default namespace -// but does not return an error if a configmap of the same name already -// exists. -func createConfigMap(client *k8s.Client, name string, values map[string]string) error { - cm := &v1.ConfigMap{ - Metadata: &metav1.ObjectMeta{ - Name: &name, - Namespace: &client.Namespace, - }, - Data: values, - } - - err := client.Create(context.TODO(), cm) - - // If an HTTP error was returned by the API server, it will be of type - // *k8s.APIError. This can be used to inspect the status code. - if apiErr, ok := err.(*k8s.APIError); ok { - // Resource already exists. Carry on. - if apiErr.Code == http.StatusConflict { - return nil - } - } - return fmt.Errorf("create configmap: %v", err) -} -``` - -[client-go]: https://github.com/kubernetes/client-go -[go-proto]: https://godoc.org/github.com/golang/protobuf/proto -[go-http2]: https://godoc.org/golang.org/x/net/http2 -[protobuf]: https://developers.google.com/protocol-buffers/ -[unversioned-status]: https://godoc.org/github.com/ericchiang/k8s/api/unversioned#Status -[k8s-error]: https://godoc.org/github.com/ericchiang/k8s#APIError -[config]: https://godoc.org/github.com/ericchiang/k8s#Config -[string]: https://godoc.org/github.com/ericchiang/k8s#String -[crds]: https://kubernetes.io/docs/tasks/access-kubernetes-api/extend-api-custom-resource-definitions/ -[custom-api-servers]: https://kubernetes.io/docs/concepts/api-extension/apiserver-aggregation/ -[proto-msg]: https://godoc.org/github.com/golang/protobuf/proto#Message diff --git a/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go b/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go deleted file mode 100644 index 896d515..0000000 --- a/vendor/github.com/ericchiang/k8s/apis/meta/v1/generated.pb.go +++ /dev/null @@ -1,9991 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto - - It has these top-level messages: - APIGroup - APIGroupList - APIResource - APIResourceList - APIVersions - DeleteOptions - Duration - ExportOptions - GetOptions - GroupKind - GroupResource - GroupVersion - GroupVersionForDiscovery - GroupVersionKind - GroupVersionResource - Initializer - Initializers - LabelSelector - LabelSelectorRequirement - List - ListMeta - ListOptions - MicroTime - ObjectMeta - OwnerReference - Patch - Preconditions - RootPaths - ServerAddressByClientCIDR - Status - StatusCause - StatusDetails - Time - Timestamp - TypeMeta - Verbs - WatchEvent -*/ -package v1 - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import k8s_io_apimachinery_pkg_runtime "github.com/ericchiang/k8s/runtime" -import _ "github.com/ericchiang/k8s/runtime/schema" -import _ "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// APIGroup contains the name, the supported versions, and the preferred version -// of a group. -type APIGroup struct { - // name is the name of the group. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // versions are the versions supported in this group. - Versions []*GroupVersionForDiscovery `protobuf:"bytes,2,rep,name=versions" json:"versions,omitempty"` - // preferredVersion is the version preferred by the API server, which - // probably is the storage version. - // +optional - PreferredVersion *GroupVersionForDiscovery `protobuf:"bytes,3,opt,name=preferredVersion" json:"preferredVersion,omitempty"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []*ServerAddressByClientCIDR `protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs" json:"serverAddressByClientCIDRs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (m *APIGroup) String() string { return proto.CompactTextString(m) } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *APIGroup) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *APIGroup) GetVersions() []*GroupVersionForDiscovery { - if m != nil { - return m.Versions - } - return nil -} - -func (m *APIGroup) GetPreferredVersion() *GroupVersionForDiscovery { - if m != nil { - return m.PreferredVersion - } - return nil -} - -func (m *APIGroup) GetServerAddressByClientCIDRs() []*ServerAddressByClientCIDR { - if m != nil { - return m.ServerAddressByClientCIDRs - } - return nil -} - -// APIGroupList is a list of APIGroup, to allow clients to discover the API at -// /apis. -type APIGroupList struct { - // groups is a list of APIGroup. - Groups []*APIGroup `protobuf:"bytes,1,rep,name=groups" json:"groups,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (m *APIGroupList) String() string { return proto.CompactTextString(m) } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *APIGroupList) GetGroups() []*APIGroup { - if m != nil { - return m.Groups - } - return nil -} - -// APIResource specifies the name of a resource and whether it is namespaced. -type APIResource struct { - // name is the plural name of the resource. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. - // The singularName is more correct for reporting status on a single item and both singular and plural are allowed - // from the kubectl CLI interface. - SingularName *string `protobuf:"bytes,6,opt,name=singularName" json:"singularName,omitempty"` - // namespaced indicates if a resource is namespaced or not. - Namespaced *bool `protobuf:"varint,2,opt,name=namespaced" json:"namespaced,omitempty"` - // group is the preferred group of the resource. Empty implies the group of the containing resource list. - // For subresources, this may have a different value, for example: Scale". - Group *string `protobuf:"bytes,8,opt,name=group" json:"group,omitempty"` - // version is the preferred version of the resource. Empty implies the version of the containing resource list - // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". - Version *string `protobuf:"bytes,9,opt,name=version" json:"version,omitempty"` - // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - // verbs is a list of supported kube verbs (this includes get, list, watch, create, - // update, patch, delete, deletecollection, and proxy) - Verbs *Verbs `protobuf:"bytes,4,opt,name=verbs" json:"verbs,omitempty"` - // shortNames is a list of suggested short names of the resource. - ShortNames []string `protobuf:"bytes,5,rep,name=shortNames" json:"shortNames,omitempty"` - // categories is a list of the grouped resources this resource belongs to (e.g. 'all') - Categories []string `protobuf:"bytes,7,rep,name=categories" json:"categories,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIResource) Reset() { *m = APIResource{} } -func (m *APIResource) String() string { return proto.CompactTextString(m) } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *APIResource) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *APIResource) GetSingularName() string { - if m != nil && m.SingularName != nil { - return *m.SingularName - } - return "" -} - -func (m *APIResource) GetNamespaced() bool { - if m != nil && m.Namespaced != nil { - return *m.Namespaced - } - return false -} - -func (m *APIResource) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *APIResource) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *APIResource) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *APIResource) GetVerbs() *Verbs { - if m != nil { - return m.Verbs - } - return nil -} - -func (m *APIResource) GetShortNames() []string { - if m != nil { - return m.ShortNames - } - return nil -} - -func (m *APIResource) GetCategories() []string { - if m != nil { - return m.Categories - } - return nil -} - -// APIResourceList is a list of APIResource, it is used to expose the name of the -// resources supported in a specific group and version, and if the resource -// is namespaced. -type APIResourceList struct { - // groupVersion is the group and version this APIResourceList is for. - GroupVersion *string `protobuf:"bytes,1,opt,name=groupVersion" json:"groupVersion,omitempty"` - // resources contains the name of the resources and if they are namespaced. - Resources []*APIResource `protobuf:"bytes,2,rep,name=resources" json:"resources,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (m *APIResourceList) String() string { return proto.CompactTextString(m) } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *APIResourceList) GetGroupVersion() string { - if m != nil && m.GroupVersion != nil { - return *m.GroupVersion - } - return "" -} - -func (m *APIResourceList) GetResources() []*APIResource { - if m != nil { - return m.Resources - } - return nil -} - -// APIVersions lists the versions that are available, to allow clients to -// discover the API at /api, which is the root path of the legacy v1 API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type APIVersions struct { - // versions are the api versions that are available. - Versions []string `protobuf:"bytes,1,rep,name=versions" json:"versions,omitempty"` - // a map of client CIDR to server address that is serving this group. - // This is to help clients reach servers in the most network-efficient way possible. - // Clients can use the appropriate server address as per the CIDR that they match. - // In case of multiple matches, clients should use the longest matching CIDR. - // The server returns only those CIDRs that it thinks that the client can match. - // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. - // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. - ServerAddressByClientCIDRs []*ServerAddressByClientCIDR `protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs" json:"serverAddressByClientCIDRs,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (m *APIVersions) String() string { return proto.CompactTextString(m) } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *APIVersions) GetVersions() []string { - if m != nil { - return m.Versions - } - return nil -} - -func (m *APIVersions) GetServerAddressByClientCIDRs() []*ServerAddressByClientCIDR { - if m != nil { - return m.ServerAddressByClientCIDRs - } - return nil -} - -// DeleteOptions may be provided when deleting an API object. -type DeleteOptions struct { - // The duration in seconds before the object should be deleted. Value must be non-negative integer. - // The value zero indicates delete immediately. If this value is nil, the default grace period for the - // specified type will be used. - // Defaults to a per object value if not specified. zero means delete immediately. - // +optional - GracePeriodSeconds *int64 `protobuf:"varint,1,opt,name=gracePeriodSeconds" json:"gracePeriodSeconds,omitempty"` - // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be - // returned. - // +optional - Preconditions *Preconditions `protobuf:"bytes,2,opt,name=preconditions" json:"preconditions,omitempty"` - // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. - // Should the dependent objects be orphaned. If true/false, the "orphan" - // finalizer will be added to/removed from the object's finalizers list. - // Either this field or PropagationPolicy may be set, but not both. - // +optional - OrphanDependents *bool `protobuf:"varint,3,opt,name=orphanDependents" json:"orphanDependents,omitempty"` - // Whether and how garbage collection will be performed. - // Either this field or OrphanDependents may be set, but not both. - // The default policy is decided by the existing finalizer set in the - // metadata.finalizers and the resource-specific default policy. - // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - - // allow the garbage collector to delete the dependents in the background; - // 'Foreground' - a cascading policy that deletes all dependents in the - // foreground. - // +optional - PropagationPolicy *string `protobuf:"bytes,4,opt,name=propagationPolicy" json:"propagationPolicy,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (m *DeleteOptions) String() string { return proto.CompactTextString(m) } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *DeleteOptions) GetGracePeriodSeconds() int64 { - if m != nil && m.GracePeriodSeconds != nil { - return *m.GracePeriodSeconds - } - return 0 -} - -func (m *DeleteOptions) GetPreconditions() *Preconditions { - if m != nil { - return m.Preconditions - } - return nil -} - -func (m *DeleteOptions) GetOrphanDependents() bool { - if m != nil && m.OrphanDependents != nil { - return *m.OrphanDependents - } - return false -} - -func (m *DeleteOptions) GetPropagationPolicy() string { - if m != nil && m.PropagationPolicy != nil { - return *m.PropagationPolicy - } - return "" -} - -// Duration is a wrapper around time.Duration which supports correct -// marshaling to YAML and JSON. In particular, it marshals into strings, which -// can be used as map keys in json. -type Duration struct { - Duration *int64 `protobuf:"varint,1,opt,name=duration" json:"duration,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Duration) Reset() { *m = Duration{} } -func (m *Duration) String() string { return proto.CompactTextString(m) } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *Duration) GetDuration() int64 { - if m != nil && m.Duration != nil { - return *m.Duration - } - return 0 -} - -// ExportOptions is the query options to the standard REST get call. -type ExportOptions struct { - // Should this value be exported. Export strips fields that a user can not specify. - Export *bool `protobuf:"varint,1,opt,name=export" json:"export,omitempty"` - // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. - Exact *bool `protobuf:"varint,2,opt,name=exact" json:"exact,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (m *ExportOptions) String() string { return proto.CompactTextString(m) } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *ExportOptions) GetExport() bool { - if m != nil && m.Export != nil { - return *m.Export - } - return false -} - -func (m *ExportOptions) GetExact() bool { - if m != nil && m.Exact != nil { - return *m.Exact - } - return false -} - -// GetOptions is the standard query options to the standard REST get call. -type GetOptions struct { - // When specified: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - ResourceVersion *string `protobuf:"bytes,1,opt,name=resourceVersion" json:"resourceVersion,omitempty"` - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized *bool `protobuf:"varint,2,opt,name=includeUninitialized" json:"includeUninitialized,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (m *GetOptions) String() string { return proto.CompactTextString(m) } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *GetOptions) GetResourceVersion() string { - if m != nil && m.ResourceVersion != nil { - return *m.ResourceVersion - } - return "" -} - -func (m *GetOptions) GetIncludeUninitialized() bool { - if m != nil && m.IncludeUninitialized != nil { - return *m.IncludeUninitialized - } - return false -} - -// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupKind struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Kind *string `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (m *GroupKind) String() string { return proto.CompactTextString(m) } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *GroupKind) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupKind) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying -// concepts during lookup stages without having partially valid types -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupResource struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Resource *string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (m *GroupResource) String() string { return proto.CompactTextString(m) } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *GroupResource) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupResource) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -// GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersion struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (m *GroupVersion) String() string { return proto.CompactTextString(m) } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *GroupVersion) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupVersion) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -// GroupVersion contains the "group/version" and "version" string of a version. -// It is made a struct to keep extensibility. -type GroupVersionForDiscovery struct { - // groupVersion specifies the API group and version in the form "group/version" - GroupVersion *string `protobuf:"bytes,1,opt,name=groupVersion" json:"groupVersion,omitempty"` - // version specifies the version in the form of "version". This is to save - // the clients the trouble of splitting the GroupVersion. - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } -func (m *GroupVersionForDiscovery) String() string { return proto.CompactTextString(m) } -func (*GroupVersionForDiscovery) ProtoMessage() {} -func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} -} - -func (m *GroupVersionForDiscovery) GetGroupVersion() string { - if m != nil && m.GroupVersion != nil { - return *m.GroupVersion - } - return "" -} - -func (m *GroupVersionForDiscovery) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionKind struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (m *GroupVersionKind) String() string { return proto.CompactTextString(m) } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *GroupVersionKind) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupVersionKind) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GroupVersionKind) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion -// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// -// +protobuf.options.(gogoproto.goproto_stringer)=false -type GroupVersionResource struct { - Group *string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` - Resource *string `protobuf:"bytes,3,opt,name=resource" json:"resource,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (m *GroupVersionResource) String() string { return proto.CompactTextString(m) } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *GroupVersionResource) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *GroupVersionResource) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *GroupVersionResource) GetResource() string { - if m != nil && m.Resource != nil { - return *m.Resource - } - return "" -} - -// Initializer is information about an initializer that has not yet completed. -type Initializer struct { - // name of the process that is responsible for initializing this object. - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Initializer) Reset() { *m = Initializer{} } -func (m *Initializer) String() string { return proto.CompactTextString(m) } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *Initializer) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -// Initializers tracks the progress of initialization. -type Initializers struct { - // Pending is a list of initializers that must execute in order before this object is visible. - // When the last pending initializer is removed, and no failing result is set, the initializers - // struct will be set to nil and the object is considered as initialized and visible to all - // clients. - // +patchMergeKey=name - // +patchStrategy=merge - Pending []*Initializer `protobuf:"bytes,1,rep,name=pending" json:"pending,omitempty"` - // If result is set with the Failure field, the object will be persisted to storage and then deleted, - // ensuring that other clients can observe the deletion. - Result *Status `protobuf:"bytes,2,opt,name=result" json:"result,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Initializers) Reset() { *m = Initializers{} } -func (m *Initializers) String() string { return proto.CompactTextString(m) } -func (*Initializers) ProtoMessage() {} -func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *Initializers) GetPending() []*Initializer { - if m != nil { - return m.Pending - } - return nil -} - -func (m *Initializers) GetResult() *Status { - if m != nil { - return m.Result - } - return nil -} - -// A label selector is a label query over a set of resources. The result of matchLabels and -// matchExpressions are ANDed. An empty label selector matches all objects. A null -// label selector matches no objects. -type LabelSelector struct { - // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels - // map is equivalent to an element of matchExpressions, whose key field is "key", the - // operator is "In", and the values array contains only "value". The requirements are ANDed. - // +optional - MatchLabels map[string]string `protobuf:"bytes,1,rep,name=matchLabels" json:"matchLabels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // matchExpressions is a list of label selector requirements. The requirements are ANDed. - // +optional - MatchExpressions []*LabelSelectorRequirement `protobuf:"bytes,2,rep,name=matchExpressions" json:"matchExpressions,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (m *LabelSelector) String() string { return proto.CompactTextString(m) } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *LabelSelector) GetMatchLabels() map[string]string { - if m != nil { - return m.MatchLabels - } - return nil -} - -func (m *LabelSelector) GetMatchExpressions() []*LabelSelectorRequirement { - if m != nil { - return m.MatchExpressions - } - return nil -} - -// A label selector requirement is a selector that contains values, a key, and an operator that -// relates the key and values. -type LabelSelectorRequirement struct { - // key is the label key that the selector applies to. - // +patchMergeKey=key - // +patchStrategy=merge - Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` - // operator represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists and DoesNotExist. - Operator *string `protobuf:"bytes,2,opt,name=operator" json:"operator,omitempty"` - // values is an array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. This array is replaced during a strategic - // merge patch. - // +optional - Values []string `protobuf:"bytes,3,rep,name=values" json:"values,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } -func (m *LabelSelectorRequirement) String() string { return proto.CompactTextString(m) } -func (*LabelSelectorRequirement) ProtoMessage() {} -func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{18} -} - -func (m *LabelSelectorRequirement) GetKey() string { - if m != nil && m.Key != nil { - return *m.Key - } - return "" -} - -func (m *LabelSelectorRequirement) GetOperator() string { - if m != nil && m.Operator != nil { - return *m.Operator - } - return "" -} - -func (m *LabelSelectorRequirement) GetValues() []string { - if m != nil { - return m.Values - } - return nil -} - -// List holds a list of objects, which may not be known by the server. -type List struct { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - Metadata *ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // List of objects - Items []*k8s_io_apimachinery_pkg_runtime.RawExtension `protobuf:"bytes,2,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *List) Reset() { *m = List{} } -func (m *List) String() string { return proto.CompactTextString(m) } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *List) GetMetadata() *ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *List) GetItems() []*k8s_io_apimachinery_pkg_runtime.RawExtension { - if m != nil { - return m.Items - } - return nil -} - -// ListMeta describes metadata that synthetic resources must have, including lists and -// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. -type ListMeta struct { - // selfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - SelfLink *string `protobuf:"bytes,1,opt,name=selfLink" json:"selfLink,omitempty"` - // String that identifies the server's internal version of this object that - // can be used by clients to determine when objects have changed. - // Value must be treated as opaque by clients and passed unmodified back to the server. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - ResourceVersion *string `protobuf:"bytes,2,opt,name=resourceVersion" json:"resourceVersion,omitempty"` - // continue may be set if the user set a limit on the number of items returned, and indicates that - // the server has more data available. The value is opaque and may be used to issue another request - // to the endpoint that served this list to retrieve the next set of available objects. Continuing a - // list may not be possible if the server configuration has changed or more than a few minutes have - // passed. The resourceVersion field returned when using this continue value will be identical to - // the value in the first response. - Continue *string `protobuf:"bytes,3,opt,name=continue" json:"continue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (m *ListMeta) String() string { return proto.CompactTextString(m) } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *ListMeta) GetSelfLink() string { - if m != nil && m.SelfLink != nil { - return *m.SelfLink - } - return "" -} - -func (m *ListMeta) GetResourceVersion() string { - if m != nil && m.ResourceVersion != nil { - return *m.ResourceVersion - } - return "" -} - -func (m *ListMeta) GetContinue() string { - if m != nil && m.Continue != nil { - return *m.Continue - } - return "" -} - -// ListOptions is the query options to a standard REST list call. -type ListOptions struct { - // A selector to restrict the list of returned objects by their labels. - // Defaults to everything. - // +optional - LabelSelector *string `protobuf:"bytes,1,opt,name=labelSelector" json:"labelSelector,omitempty"` - // A selector to restrict the list of returned objects by their fields. - // Defaults to everything. - // +optional - FieldSelector *string `protobuf:"bytes,2,opt,name=fieldSelector" json:"fieldSelector,omitempty"` - // If true, partially initialized resources are included in the response. - // +optional - IncludeUninitialized *bool `protobuf:"varint,6,opt,name=includeUninitialized" json:"includeUninitialized,omitempty"` - // Watch for changes to the described resources and return them as a stream of - // add, update, and remove notifications. Specify resourceVersion. - // +optional - Watch *bool `protobuf:"varint,3,opt,name=watch" json:"watch,omitempty"` - // When specified with a watch call, shows changes that occur after that particular version of a resource. - // Defaults to changes from the beginning of history. - // When specified for list: - // - if unset, then the result is returned from remote storage based on quorum-read flag; - // - if it's 0, then we simply return what we currently have in cache, no guarantee; - // - if set to non zero, then the result is at least as fresh as given rv. - // +optional - ResourceVersion *string `protobuf:"bytes,4,opt,name=resourceVersion" json:"resourceVersion,omitempty"` - // Timeout for the list/watch call. - // This limits the duration of the call, regardless of any activity or inactivity. - // +optional - TimeoutSeconds *int64 `protobuf:"varint,5,opt,name=timeoutSeconds" json:"timeoutSeconds,omitempty"` - // limit is a maximum number of responses to return for a list call. If more items exist, the - // server will set the `continue` field on the list metadata to a value that can be used with the - // same initial query to retrieve the next set of results. Setting a limit may return fewer than - // the requested amount of items (up to zero items) in the event all requested objects are - // filtered out and clients should only use the presence of the continue field to determine whether - // more results are available. Servers may choose not to support the limit argument and will return - // all of the available results. If limit is specified and the continue field is empty, clients may - // assume that no more results are available. This field is not supported if watch is true. - // - // The server guarantees that the objects returned when using continue will be identical to issuing - // a single list call without a limit - that is, no objects created, modified, or deleted after the - // first request is issued will be included in any subsequent continued requests. This is sometimes - // referred to as a consistent snapshot, and ensures that a client that is using limit to receive - // smaller chunks of a very large result can ensure they see all possible objects. If objects are - // updated during a chunked list the version of the object that was present at the time the first list - // result was calculated is returned. - Limit *int64 `protobuf:"varint,7,opt,name=limit" json:"limit,omitempty"` - // The continue option should be set when retrieving more results from the server. Since this value - // is server defined, clients may only use the continue value from a previous query result with - // identical query parameters (except for the value of continue) and the server may reject a continue - // value it does not recognize. If the specified continue value is no longer valid whether due to - // expiration (generally five to fifteen minutes) or a configuration change on the server the server - // will respond with a 410 ResourceExpired error indicating the client must restart their list without - // the continue field. This field is not supported when watch is true. Clients may start a watch from - // the last resourceVersion value returned by the server and not miss any modifications. - Continue *string `protobuf:"bytes,8,opt,name=continue" json:"continue,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (m *ListOptions) String() string { return proto.CompactTextString(m) } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } - -func (m *ListOptions) GetLabelSelector() string { - if m != nil && m.LabelSelector != nil { - return *m.LabelSelector - } - return "" -} - -func (m *ListOptions) GetFieldSelector() string { - if m != nil && m.FieldSelector != nil { - return *m.FieldSelector - } - return "" -} - -func (m *ListOptions) GetIncludeUninitialized() bool { - if m != nil && m.IncludeUninitialized != nil { - return *m.IncludeUninitialized - } - return false -} - -func (m *ListOptions) GetWatch() bool { - if m != nil && m.Watch != nil { - return *m.Watch - } - return false -} - -func (m *ListOptions) GetResourceVersion() string { - if m != nil && m.ResourceVersion != nil { - return *m.ResourceVersion - } - return "" -} - -func (m *ListOptions) GetTimeoutSeconds() int64 { - if m != nil && m.TimeoutSeconds != nil { - return *m.TimeoutSeconds - } - return 0 -} - -func (m *ListOptions) GetLimit() int64 { - if m != nil && m.Limit != nil { - return *m.Limit - } - return 0 -} - -func (m *ListOptions) GetContinue() string { - if m != nil && m.Continue != nil { - return *m.Continue - } - return "" -} - -// MicroTime is version of Time with microsecond level precision. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -// +protobuf.options.(gogoproto.goproto_stringer)=false -type MicroTime struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - Nanos *int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (m *MicroTime) String() string { return proto.CompactTextString(m) } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *MicroTime) GetSeconds() int64 { - if m != nil && m.Seconds != nil { - return *m.Seconds - } - return 0 -} - -func (m *MicroTime) GetNanos() int32 { - if m != nil && m.Nanos != nil { - return *m.Nanos - } - return 0 -} - -// ObjectMeta is metadata that all persisted resources must have, which includes all objects -// users must create. -type ObjectMeta struct { - // Name must be unique within a namespace. Is required when creating resources, although - // some resources may allow a client to request the generation of an appropriate name - // automatically. Name is primarily intended for creation idempotence and configuration - // definition. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - // +optional - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // GenerateName is an optional prefix, used by the server, to generate a unique - // name ONLY IF the Name field has not been provided. - // If this field is used, the name returned to the client will be different - // than the name passed. This value will also be combined with a unique suffix. - // The provided value has the same validation rules as the Name field, - // and may be truncated by the length of the suffix required to make the value - // unique on the server. - // - // If this field is specified and the generated name exists, the server will - // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason - // ServerTimeout indicating a unique name could not be found in the time allotted, and the client - // should retry (optionally after the time indicated in the Retry-After header). - // - // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency - // +optional - GenerateName *string `protobuf:"bytes,2,opt,name=generateName" json:"generateName,omitempty"` - // Namespace defines the space within each name must be unique. An empty namespace is - // equivalent to the "default" namespace, but "default" is the canonical representation. - // Not all objects are required to be scoped to a namespace - the value of this field for - // those objects will be empty. - // - // Must be a DNS_LABEL. - // Cannot be updated. - // More info: http://kubernetes.io/docs/user-guide/namespaces - // +optional - Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` - // SelfLink is a URL representing this object. - // Populated by the system. - // Read-only. - // +optional - SelfLink *string `protobuf:"bytes,4,opt,name=selfLink" json:"selfLink,omitempty"` - // UID is the unique in time and space value for this object. It is typically generated by - // the server on successful creation of a resource and is not allowed to change on PUT - // operations. - // - // Populated by the system. - // Read-only. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - // +optional - Uid *string `protobuf:"bytes,5,opt,name=uid" json:"uid,omitempty"` - // An opaque value that represents the internal version of this object that can - // be used by clients to determine when objects have changed. May be used for optimistic - // concurrency, change detection, and the watch operation on a resource or set of resources. - // Clients must treat these values as opaque and passed unmodified back to the server. - // They may only be valid for a particular resource or set of resources. - // - // Populated by the system. - // Read-only. - // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - ResourceVersion *string `protobuf:"bytes,6,opt,name=resourceVersion" json:"resourceVersion,omitempty"` - // A sequence number representing a specific generation of the desired state. - // Populated by the system. Read-only. - // +optional - Generation *int64 `protobuf:"varint,7,opt,name=generation" json:"generation,omitempty"` - // CreationTimestamp is a timestamp representing the server time when this object was - // created. It is not guaranteed to be set in happens-before order across separate operations. - // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // - // Populated by the system. - // Read-only. - // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - CreationTimestamp *Time `protobuf:"bytes,8,opt,name=creationTimestamp" json:"creationTimestamp,omitempty"` - // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This - // field is set by the server when a graceful deletion is requested by the user, and is not - // directly settable by a client. The resource is expected to be deleted (no longer visible - // from resource lists, and not reachable by name) after the time in this field, once the - // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. - // Once the deletionTimestamp is set, this value may not be unset or be set further into the - // future, although it may be shortened or the resource may be deleted prior to this time. - // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react - // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, - // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, - // remove the pod from the API. In the presence of network partitions, this object may still - // exist after this timestamp, until an administrator or automated process can determine the - // resource is fully terminated. - // If not set, graceful deletion of the object has not been requested. - // - // Populated by the system when a graceful deletion is requested. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - DeletionTimestamp *Time `protobuf:"bytes,9,opt,name=deletionTimestamp" json:"deletionTimestamp,omitempty"` - // Number of seconds allowed for this object to gracefully terminate before - // it will be removed from the system. Only set when deletionTimestamp is also set. - // May only be shortened. - // Read-only. - // +optional - DeletionGracePeriodSeconds *int64 `protobuf:"varint,10,opt,name=deletionGracePeriodSeconds" json:"deletionGracePeriodSeconds,omitempty"` - // Map of string keys and values that can be used to organize and categorize - // (scope and select) objects. May match selectors of replication controllers - // and services. - // More info: http://kubernetes.io/docs/user-guide/labels - // +optional - Labels map[string]string `protobuf:"bytes,11,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // Annotations is an unstructured key value map stored with a resource that may be - // set by external tools to store and retrieve arbitrary metadata. They are not - // queryable and should be preserved when modifying objects. - // More info: http://kubernetes.io/docs/user-guide/annotations - // +optional - Annotations map[string]string `protobuf:"bytes,12,rep,name=annotations" json:"annotations,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // List of objects depended by this object. If ALL objects in the list have - // been deleted, this object will be garbage collected. If this object is managed by a controller, - // then an entry in this list will point to this controller, with the controller field set to true. - // There cannot be more than one managing controller. - // +optional - // +patchMergeKey=uid - // +patchStrategy=merge - OwnerReferences []*OwnerReference `protobuf:"bytes,13,rep,name=ownerReferences" json:"ownerReferences,omitempty"` - // An initializer is a controller which enforces some system invariant at object creation time. - // This field is a list of initializers that have not yet acted on this object. If nil or empty, - // this object has been completely initialized. Otherwise, the object is considered uninitialized - // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to - // observe uninitialized objects. - // - // When an object is created, the system will populate this list with the current set of initializers. - // Only privileged users may set or modify this list. Once it is empty, it may not be modified further - // by any user. - Initializers *Initializers `protobuf:"bytes,16,opt,name=initializers" json:"initializers,omitempty"` - // Must be empty before the object is deleted from the registry. Each entry - // is an identifier for the responsible component that will remove the entry - // from the list. If the deletionTimestamp of the object is non-nil, entries - // in this list can only be removed. - // +optional - // +patchStrategy=merge - Finalizers []string `protobuf:"bytes,14,rep,name=finalizers" json:"finalizers,omitempty"` - // The name of the cluster which the object belongs to. - // This is used to distinguish resources with same name and namespace in different clusters. - // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. - // +optional - ClusterName *string `protobuf:"bytes,15,opt,name=clusterName" json:"clusterName,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (m *ObjectMeta) String() string { return proto.CompactTextString(m) } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *ObjectMeta) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *ObjectMeta) GetGenerateName() string { - if m != nil && m.GenerateName != nil { - return *m.GenerateName - } - return "" -} - -func (m *ObjectMeta) GetNamespace() string { - if m != nil && m.Namespace != nil { - return *m.Namespace - } - return "" -} - -func (m *ObjectMeta) GetSelfLink() string { - if m != nil && m.SelfLink != nil { - return *m.SelfLink - } - return "" -} - -func (m *ObjectMeta) GetUid() string { - if m != nil && m.Uid != nil { - return *m.Uid - } - return "" -} - -func (m *ObjectMeta) GetResourceVersion() string { - if m != nil && m.ResourceVersion != nil { - return *m.ResourceVersion - } - return "" -} - -func (m *ObjectMeta) GetGeneration() int64 { - if m != nil && m.Generation != nil { - return *m.Generation - } - return 0 -} - -func (m *ObjectMeta) GetCreationTimestamp() *Time { - if m != nil { - return m.CreationTimestamp - } - return nil -} - -func (m *ObjectMeta) GetDeletionTimestamp() *Time { - if m != nil { - return m.DeletionTimestamp - } - return nil -} - -func (m *ObjectMeta) GetDeletionGracePeriodSeconds() int64 { - if m != nil && m.DeletionGracePeriodSeconds != nil { - return *m.DeletionGracePeriodSeconds - } - return 0 -} - -func (m *ObjectMeta) GetLabels() map[string]string { - if m != nil { - return m.Labels - } - return nil -} - -func (m *ObjectMeta) GetAnnotations() map[string]string { - if m != nil { - return m.Annotations - } - return nil -} - -func (m *ObjectMeta) GetOwnerReferences() []*OwnerReference { - if m != nil { - return m.OwnerReferences - } - return nil -} - -func (m *ObjectMeta) GetInitializers() *Initializers { - if m != nil { - return m.Initializers - } - return nil -} - -func (m *ObjectMeta) GetFinalizers() []string { - if m != nil { - return m.Finalizers - } - return nil -} - -func (m *ObjectMeta) GetClusterName() string { - if m != nil && m.ClusterName != nil { - return *m.ClusterName - } - return "" -} - -// OwnerReference contains enough information to let you identify an owning -// object. Currently, an owning object must be in the same namespace, so there -// is no namespace field. -type OwnerReference struct { - // API version of the referent. - ApiVersion *string `protobuf:"bytes,5,opt,name=apiVersion" json:"apiVersion,omitempty"` - // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // Name of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"` - // UID of the referent. - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - Uid *string `protobuf:"bytes,4,opt,name=uid" json:"uid,omitempty"` - // If true, this reference points to the managing controller. - // +optional - Controller *bool `protobuf:"varint,6,opt,name=controller" json:"controller,omitempty"` - // If true, AND if the owner has the "foregroundDeletion" finalizer, then - // the owner cannot be deleted from the key-value store until this - // reference is removed. - // Defaults to false. - // To set this field, a user needs "delete" permission of the owner, - // otherwise 422 (Unprocessable Entity) will be returned. - // +optional - BlockOwnerDeletion *bool `protobuf:"varint,7,opt,name=blockOwnerDeletion" json:"blockOwnerDeletion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (m *OwnerReference) String() string { return proto.CompactTextString(m) } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *OwnerReference) GetApiVersion() string { - if m != nil && m.ApiVersion != nil { - return *m.ApiVersion - } - return "" -} - -func (m *OwnerReference) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *OwnerReference) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *OwnerReference) GetUid() string { - if m != nil && m.Uid != nil { - return *m.Uid - } - return "" -} - -func (m *OwnerReference) GetController() bool { - if m != nil && m.Controller != nil { - return *m.Controller - } - return false -} - -func (m *OwnerReference) GetBlockOwnerDeletion() bool { - if m != nil && m.BlockOwnerDeletion != nil { - return *m.BlockOwnerDeletion - } - return false -} - -// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. -type Patch struct { - XXX_unrecognized []byte `json:"-"` -} - -func (m *Patch) Reset() { *m = Patch{} } -func (m *Patch) String() string { return proto.CompactTextString(m) } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -type Preconditions struct { - // Specifies the target UID. - // +optional - Uid *string `protobuf:"bytes,1,opt,name=uid" json:"uid,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (m *Preconditions) String() string { return proto.CompactTextString(m) } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } - -func (m *Preconditions) GetUid() string { - if m != nil && m.Uid != nil { - return *m.Uid - } - return "" -} - -// RootPaths lists the paths available at root. -// For example: "/healthz", "/apis". -type RootPaths struct { - // paths are the paths available at root. - Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (m *RootPaths) String() string { return proto.CompactTextString(m) } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *RootPaths) GetPaths() []string { - if m != nil { - return m.Paths - } - return nil -} - -// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. -type ServerAddressByClientCIDR struct { - // The CIDR with which clients can match their IP to figure out the server address that they should use. - ClientCIDR *string `protobuf:"bytes,1,opt,name=clientCIDR" json:"clientCIDR,omitempty"` - // Address of this server, suitable for a client that matches the above CIDR. - // This can be a hostname, hostname:port, IP or IP:port. - ServerAddress *string `protobuf:"bytes,2,opt,name=serverAddress" json:"serverAddress,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } -func (m *ServerAddressByClientCIDR) String() string { return proto.CompactTextString(m) } -func (*ServerAddressByClientCIDR) ProtoMessage() {} -func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{28} -} - -func (m *ServerAddressByClientCIDR) GetClientCIDR() string { - if m != nil && m.ClientCIDR != nil { - return *m.ClientCIDR - } - return "" -} - -func (m *ServerAddressByClientCIDR) GetServerAddress() string { - if m != nil && m.ServerAddress != nil { - return *m.ServerAddress - } - return "" -} - -// Status is a return value for calls that don't return other objects. -type Status struct { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - Metadata *ListMeta `protobuf:"bytes,1,opt,name=metadata" json:"metadata,omitempty"` - // Status of the operation. - // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status *string `protobuf:"bytes,2,opt,name=status" json:"status,omitempty"` - // A human-readable description of the status of this operation. - // +optional - Message *string `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"` - // A machine-readable description of why this operation is in the - // "Failure" status. If this value is empty there - // is no information available. A Reason clarifies an HTTP status - // code but does not override it. - // +optional - Reason *string `protobuf:"bytes,4,opt,name=reason" json:"reason,omitempty"` - // Extended data associated with the reason. Each reason may define its - // own extended details. This field is optional and the data returned - // is not guaranteed to conform to any schema except that defined by - // the reason type. - // +optional - Details *StatusDetails `protobuf:"bytes,5,opt,name=details" json:"details,omitempty"` - // Suggested HTTP return code for this status, 0 if not set. - // +optional - Code *int32 `protobuf:"varint,6,opt,name=code" json:"code,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Status) Reset() { *m = Status{} } -func (m *Status) String() string { return proto.CompactTextString(m) } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } - -func (m *Status) GetMetadata() *ListMeta { - if m != nil { - return m.Metadata - } - return nil -} - -func (m *Status) GetStatus() string { - if m != nil && m.Status != nil { - return *m.Status - } - return "" -} - -func (m *Status) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *Status) GetReason() string { - if m != nil && m.Reason != nil { - return *m.Reason - } - return "" -} - -func (m *Status) GetDetails() *StatusDetails { - if m != nil { - return m.Details - } - return nil -} - -func (m *Status) GetCode() int32 { - if m != nil && m.Code != nil { - return *m.Code - } - return 0 -} - -// StatusCause provides more information about an api.Status failure, including -// cases when multiple errors are encountered. -type StatusCause struct { - // A machine-readable description of the cause of the error. If this value is - // empty there is no information available. - // +optional - Reason *string `protobuf:"bytes,1,opt,name=reason" json:"reason,omitempty"` - // A human-readable description of the cause of the error. This field may be - // presented as-is to a reader. - // +optional - Message *string `protobuf:"bytes,2,opt,name=message" json:"message,omitempty"` - // The field of the resource that has caused this error, as named by its JSON - // serialization. May include dot and postfix notation for nested attributes. - // Arrays are zero-indexed. Fields may appear more than once in an array of - // causes due to fields having multiple errors. - // Optional. - // - // Examples: - // "name" - the field "name" on the current resource - // "items[0].name" - the field "name" on the first array entry in "items" - // +optional - Field *string `protobuf:"bytes,3,opt,name=field" json:"field,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (m *StatusCause) String() string { return proto.CompactTextString(m) } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } - -func (m *StatusCause) GetReason() string { - if m != nil && m.Reason != nil { - return *m.Reason - } - return "" -} - -func (m *StatusCause) GetMessage() string { - if m != nil && m.Message != nil { - return *m.Message - } - return "" -} - -func (m *StatusCause) GetField() string { - if m != nil && m.Field != nil { - return *m.Field - } - return "" -} - -// StatusDetails is a set of additional properties that MAY be set by the -// server to provide additional information about a response. The Reason -// field of a Status object defines what attributes will be set. Clients -// must ignore fields that do not match the defined type of each attribute, -// and should assume that any attribute may be empty, invalid, or under -// defined. -type StatusDetails struct { - // The name attribute of the resource associated with the status StatusReason - // (when there is a single name which can be described). - // +optional - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - // The group attribute of the resource associated with the status StatusReason. - // +optional - Group *string `protobuf:"bytes,2,opt,name=group" json:"group,omitempty"` - // The kind attribute of the resource associated with the status StatusReason. - // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` - // UID of the resource. - // (when there is a single resource which can be described). - // More info: http://kubernetes.io/docs/user-guide/identifiers#uids - // +optional - Uid *string `protobuf:"bytes,6,opt,name=uid" json:"uid,omitempty"` - // The Causes array includes more details associated with the StatusReason - // failure. Not all StatusReasons may provide detailed causes. - // +optional - Causes []*StatusCause `protobuf:"bytes,4,rep,name=causes" json:"causes,omitempty"` - // If specified, the time in seconds before the operation should be retried. Some errors may indicate - // the client must take an alternate action - for those errors this field may indicate how long to wait - // before taking the alternate action. - // +optional - RetryAfterSeconds *int32 `protobuf:"varint,5,opt,name=retryAfterSeconds" json:"retryAfterSeconds,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (m *StatusDetails) String() string { return proto.CompactTextString(m) } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } - -func (m *StatusDetails) GetName() string { - if m != nil && m.Name != nil { - return *m.Name - } - return "" -} - -func (m *StatusDetails) GetGroup() string { - if m != nil && m.Group != nil { - return *m.Group - } - return "" -} - -func (m *StatusDetails) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *StatusDetails) GetUid() string { - if m != nil && m.Uid != nil { - return *m.Uid - } - return "" -} - -func (m *StatusDetails) GetCauses() []*StatusCause { - if m != nil { - return m.Causes - } - return nil -} - -func (m *StatusDetails) GetRetryAfterSeconds() int32 { - if m != nil && m.RetryAfterSeconds != nil { - return *m.RetryAfterSeconds - } - return 0 -} - -// Time is a wrapper around time.Time which supports correct -// marshaling to YAML and JSON. Wrappers are provided for many -// of the factory methods that the time package offers. -// -// +protobuf.options.marshal=false -// +protobuf.as=Timestamp -// +protobuf.options.(gogoproto.goproto_stringer)=false -type Time struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - Nanos *int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Time) Reset() { *m = Time{} } -func (m *Time) String() string { return proto.CompactTextString(m) } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } - -func (m *Time) GetSeconds() int64 { - if m != nil && m.Seconds != nil { - return *m.Seconds - } - return 0 -} - -func (m *Time) GetNanos() int32 { - if m != nil && m.Nanos != nil { - return *m.Nanos - } - return 0 -} - -// Timestamp is a struct that is equivalent to Time, but intended for -// protobuf marshalling/unmarshalling. It is generated into a serialization -// that matches Time. Do not use in Go structs. -type Timestamp struct { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds *int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. This field may be limited in precision depending on context. - Nanos *int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (m *Timestamp) String() string { return proto.CompactTextString(m) } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } - -func (m *Timestamp) GetSeconds() int64 { - if m != nil && m.Seconds != nil { - return *m.Seconds - } - return 0 -} - -func (m *Timestamp) GetNanos() int32 { - if m != nil && m.Nanos != nil { - return *m.Nanos - } - return 0 -} - -// TypeMeta describes an individual object in an API response or request -// with strings representing the type of the object and its API schema version. -// Structures that are versioned or persisted should inline TypeMeta. -// -// +k8s:deepcopy-gen=false -type TypeMeta struct { - // Kind is a string value representing the REST resource this object represents. - // Servers may infer this from the endpoint the client submits requests to. - // Cannot be updated. - // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - Kind *string `protobuf:"bytes,1,opt,name=kind" json:"kind,omitempty"` - // APIVersion defines the versioned schema of this representation of an object. - // Servers should convert recognized schemas to the latest internal value, and - // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources - // +optional - ApiVersion *string `protobuf:"bytes,2,opt,name=apiVersion" json:"apiVersion,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (m *TypeMeta) String() string { return proto.CompactTextString(m) } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } - -func (m *TypeMeta) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -func (m *TypeMeta) GetApiVersion() string { - if m != nil && m.ApiVersion != nil { - return *m.ApiVersion - } - return "" -} - -// Verbs masks the value so protobuf can generate -// -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type Verbs struct { - Items []string `protobuf:"bytes,1,rep,name=items" json:"items,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Verbs) Reset() { *m = Verbs{} } -func (m *Verbs) String() string { return proto.CompactTextString(m) } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } - -func (m *Verbs) GetItems() []string { - if m != nil { - return m.Items - } - return nil -} - -// Event represents a single event to a watched resource. -// -// +protobuf=true -// +k8s:deepcopy-gen=true -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type WatchEvent struct { - Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *Status is recommended; other types may make sense - // depending on context. - Object *k8s_io_apimachinery_pkg_runtime.RawExtension `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (m *WatchEvent) String() string { return proto.CompactTextString(m) } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } - -func (m *WatchEvent) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *WatchEvent) GetObject() *k8s_io_apimachinery_pkg_runtime.RawExtension { - if m != nil { - return m.Object - } - return nil -} - -func init() { - proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") - proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") - proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") - proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") - proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") - proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") - proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") - proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") - proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") - proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") - proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") - proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") - proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") - proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") - proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") - proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") - proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") - proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") - proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") - proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") - proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") - proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") - proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") - proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") - proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") - proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") - proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") - proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") - proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") - proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") - proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") - proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") - proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") - proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") - proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") - proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") -} -func (m *APIGroup) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if len(m.Versions) > 0 { - for _, msg := range m.Versions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.PreferredVersion != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) - n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIGroupList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Groups) > 0 { - for _, msg := range m.Groups { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.Namespaced != nil { - dAtA[i] = 0x10 - i++ - if *m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Kind != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.Verbs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) - n2, err := m.Verbs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.SingularName != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SingularName))) - i += copy(dAtA[i:], *m.SingularName) - } - if len(m.Categories) > 0 { - for _, s := range m.Categories { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Group != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIResourceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GroupVersion != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupVersion))) - i += copy(dAtA[i:], *m.GroupVersion) - } - if len(m.Resources) > 0 { - for _, msg := range m.Resources { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *APIVersions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, msg := range m.ServerAddressByClientCIDRs { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GracePeriodSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) - n3, err := m.Preconditions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.OrphanDependents != nil { - dAtA[i] = 0x18 - i++ - if *m.OrphanDependents { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.PropagationPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i += copy(dAtA[i:], *m.PropagationPolicy) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Duration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Duration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Duration)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ExportOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Export != nil { - dAtA[i] = 0x8 - i++ - if *m.Export { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Exact != nil { - dAtA[i] = 0x10 - i++ - if *m.Exact { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GetOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceVersion != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } - if m.IncludeUninitialized != nil { - dAtA[i] = 0x10 - i++ - if *m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupKind) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Kind != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Resource != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Resource))) - i += copy(dAtA[i:], *m.Resource) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GroupVersion != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GroupVersion))) - i += copy(dAtA[i:], *m.GroupVersion) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.Kind != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Group != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Version != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if m.Resource != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Resource))) - i += copy(dAtA[i:], *m.Resource) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Initializer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Initializers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Pending) > 0 { - for _, msg := range m.Pending { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Result != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) - n4, err := m.Result.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *LabelSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, _ := range m.MatchLabels { - dAtA[i] = 0xa - i++ - v := m.MatchLabels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Key != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Key))) - i += copy(dAtA[i:], *m.Key) - } - if m.Operator != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Operator))) - i += copy(dAtA[i:], *m.Operator) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *List) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n5, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ListMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.SelfLink != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelfLink))) - i += copy(dAtA[i:], *m.SelfLink) - } - if m.ResourceVersion != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } - if m.Continue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Continue))) - i += copy(dAtA[i:], *m.Continue) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ListOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LabelSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.LabelSelector))) - i += copy(dAtA[i:], *m.LabelSelector) - } - if m.FieldSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FieldSelector))) - i += copy(dAtA[i:], *m.FieldSelector) - } - if m.Watch != nil { - dAtA[i] = 0x18 - i++ - if *m.Watch { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.ResourceVersion != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } - if m.TimeoutSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - if m.IncludeUninitialized != nil { - dAtA[i] = 0x30 - i++ - if *m.IncludeUninitialized { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Limit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Limit)) - } - if m.Continue != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Continue))) - i += copy(dAtA[i:], *m.Continue) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *MicroTime) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MicroTime) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Seconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Seconds)) - } - if m.Nanos != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.GenerateName != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GenerateName))) - i += copy(dAtA[i:], *m.GenerateName) - } - if m.Namespace != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Namespace))) - i += copy(dAtA[i:], *m.Namespace) - } - if m.SelfLink != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelfLink))) - i += copy(dAtA[i:], *m.SelfLink) - } - if m.Uid != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) - i += copy(dAtA[i:], *m.Uid) - } - if m.ResourceVersion != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i += copy(dAtA[i:], *m.ResourceVersion) - } - if m.Generation != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Generation)) - } - if m.CreationTimestamp != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) - n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.DeletionTimestamp != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) - n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.DeletionGracePeriodSeconds != nil { - dAtA[i] = 0x50 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - for k, _ := range m.Labels { - dAtA[i] = 0x5a - i++ - v := m.Labels[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.Annotations) > 0 { - for k, _ := range m.Annotations { - dAtA[i] = 0x62 - i++ - v := m.Annotations[k] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.OwnerReferences) > 0 { - for _, msg := range m.OwnerReferences { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0x72 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.ClusterName != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClusterName))) - i += copy(dAtA[i:], *m.ClusterName) - } - if m.Initializers != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) - n8, err := m.Initializers.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *OwnerReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Kind != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.Name != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.Uid != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) - i += copy(dAtA[i:], *m.Uid) - } - if m.ApiVersion != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ApiVersion))) - i += copy(dAtA[i:], *m.ApiVersion) - } - if m.Controller != nil { - dAtA[i] = 0x30 - i++ - if *m.Controller { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.BlockOwnerDeletion != nil { - dAtA[i] = 0x38 - i++ - if *m.BlockOwnerDeletion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Patch) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Preconditions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Uid != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) - i += copy(dAtA[i:], *m.Uid) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *RootPaths) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ClientCIDR != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ClientCIDR))) - i += copy(dAtA[i:], *m.ClientCIDR) - } - if m.ServerAddress != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ServerAddress))) - i += copy(dAtA[i:], *m.ServerAddress) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Status) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Status) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Metadata != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metadata.Size())) - n9, err := m.Metadata.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.Status != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Status))) - i += copy(dAtA[i:], *m.Status) - } - if m.Message != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) - i += copy(dAtA[i:], *m.Message) - } - if m.Reason != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i += copy(dAtA[i:], *m.Reason) - } - if m.Details != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) - n10, err := m.Details.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - if m.Code != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Code)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *StatusCause) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Reason != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Reason))) - i += copy(dAtA[i:], *m.Reason) - } - if m.Message != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Message))) - i += copy(dAtA[i:], *m.Message) - } - if m.Field != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Field))) - i += copy(dAtA[i:], *m.Field) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *StatusDetails) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Name != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Name))) - i += copy(dAtA[i:], *m.Name) - } - if m.Group != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Group))) - i += copy(dAtA[i:], *m.Group) - } - if m.Kind != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if len(m.Causes) > 0 { - for _, msg := range m.Causes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.RetryAfterSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RetryAfterSeconds)) - } - if m.Uid != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Uid))) - i += copy(dAtA[i:], *m.Uid) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Time) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Time) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Seconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Seconds)) - } - if m.Nanos != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Timestamp) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Seconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Seconds)) - } - if m.Nanos != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *TypeMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Kind != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.ApiVersion != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ApiVersion))) - i += copy(dAtA[i:], *m.ApiVersion) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Verbs) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Verbs) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, s := range m.Items { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *WatchEvent) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n11, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *APIGroup) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Versions) > 0 { - for _, e := range m.Versions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.PreferredVersion != nil { - l = m.PreferredVersion.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIGroupList) Size() (n int) { - var l int - _ = l - if len(m.Groups) > 0 { - for _, e := range m.Groups { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIResource) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Namespaced != nil { - n += 2 - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Verbs != nil { - l = m.Verbs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ShortNames) > 0 { - for _, s := range m.ShortNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.SingularName != nil { - l = len(*m.SingularName) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Categories) > 0 { - for _, s := range m.Categories { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIResourceList) Size() (n int) { - var l int - _ = l - if m.GroupVersion != nil { - l = len(*m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Resources) > 0 { - for _, e := range m.Resources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *APIVersions) Size() (n int) { - var l int - _ = l - if len(m.Versions) > 0 { - for _, s := range m.Versions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ServerAddressByClientCIDRs) > 0 { - for _, e := range m.ServerAddressByClientCIDRs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *DeleteOptions) Size() (n int) { - var l int - _ = l - if m.GracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) - } - if m.Preconditions != nil { - l = m.Preconditions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.OrphanDependents != nil { - n += 2 - } - if m.PropagationPolicy != nil { - l = len(*m.PropagationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Duration) Size() (n int) { - var l int - _ = l - if m.Duration != nil { - n += 1 + sovGenerated(uint64(*m.Duration)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ExportOptions) Size() (n int) { - var l int - _ = l - if m.Export != nil { - n += 2 - } - if m.Exact != nil { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GetOptions) Size() (n int) { - var l int - _ = l - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IncludeUninitialized != nil { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupKind) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupResource) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = len(*m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersion) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersionForDiscovery) Size() (n int) { - var l int - _ = l - if m.GroupVersion != nil { - l = len(*m.GroupVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersionKind) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *GroupVersionResource) Size() (n int) { - var l int - _ = l - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = len(*m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Initializer) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Initializers) Size() (n int) { - var l int - _ = l - if len(m.Pending) > 0 { - for _, e := range m.Pending { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Result != nil { - l = m.Result.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LabelSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchLabels) > 0 { - for k, v := range m.MatchLabels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *LabelSelectorRequirement) Size() (n int) { - var l int - _ = l - if m.Key != nil { - l = len(*m.Key) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Operator != nil { - l = len(*m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *List) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListMeta) Size() (n int) { - var l int - _ = l - if m.SelfLink != nil { - l = len(*m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Continue != nil { - l = len(*m.Continue) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ListOptions) Size() (n int) { - var l int - _ = l - if m.LabelSelector != nil { - l = len(*m.LabelSelector) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FieldSelector != nil { - l = len(*m.FieldSelector) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Watch != nil { - n += 2 - } - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if m.IncludeUninitialized != nil { - n += 2 - } - if m.Limit != nil { - n += 1 + sovGenerated(uint64(*m.Limit)) - } - if m.Continue != nil { - l = len(*m.Continue) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *MicroTime) Size() (n int) { - var l int - _ = l - if m.Seconds != nil { - n += 1 + sovGenerated(uint64(*m.Seconds)) - } - if m.Nanos != nil { - n += 1 + sovGenerated(uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ObjectMeta) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GenerateName != nil { - l = len(*m.GenerateName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Namespace != nil { - l = len(*m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SelfLink != nil { - l = len(*m.SelfLink) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Uid != nil { - l = len(*m.Uid) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Generation != nil { - n += 1 + sovGenerated(uint64(*m.Generation)) - } - if m.CreationTimestamp != nil { - l = m.CreationTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DeletionTimestamp != nil { - l = m.DeletionTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DeletionGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) - } - if len(m.Labels) > 0 { - for k, v := range m.Labels { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Annotations) > 0 { - for k, v := range m.Annotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.OwnerReferences) > 0 { - for _, e := range m.OwnerReferences { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ClusterName != nil { - l = len(*m.ClusterName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Initializers != nil { - l = m.Initializers.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *OwnerReference) Size() (n int) { - var l int - _ = l - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Uid != nil { - l = len(*m.Uid) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ApiVersion != nil { - l = len(*m.ApiVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Controller != nil { - n += 2 - } - if m.BlockOwnerDeletion != nil { - n += 2 - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Patch) Size() (n int) { - var l int - _ = l - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Preconditions) Size() (n int) { - var l int - _ = l - if m.Uid != nil { - l = len(*m.Uid) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *RootPaths) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, s := range m.Paths { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ServerAddressByClientCIDR) Size() (n int) { - var l int - _ = l - if m.ClientCIDR != nil { - l = len(*m.ClientCIDR) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ServerAddress != nil { - l = len(*m.ServerAddress) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Status) Size() (n int) { - var l int - _ = l - if m.Metadata != nil { - l = m.Metadata.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Status != nil { - l = len(*m.Status) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Message != nil { - l = len(*m.Message) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Details != nil { - l = m.Details.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Code != nil { - n += 1 + sovGenerated(uint64(*m.Code)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusCause) Size() (n int) { - var l int - _ = l - if m.Reason != nil { - l = len(*m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Message != nil { - l = len(*m.Message) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Field != nil { - l = len(*m.Field) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *StatusDetails) Size() (n int) { - var l int - _ = l - if m.Name != nil { - l = len(*m.Name) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Group != nil { - l = len(*m.Group) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Causes) > 0 { - for _, e := range m.Causes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.RetryAfterSeconds != nil { - n += 1 + sovGenerated(uint64(*m.RetryAfterSeconds)) - } - if m.Uid != nil { - l = len(*m.Uid) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Time) Size() (n int) { - var l int - _ = l - if m.Seconds != nil { - n += 1 + sovGenerated(uint64(*m.Seconds)) - } - if m.Nanos != nil { - n += 1 + sovGenerated(uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Timestamp) Size() (n int) { - var l int - _ = l - if m.Seconds != nil { - n += 1 + sovGenerated(uint64(*m.Seconds)) - } - if m.Nanos != nil { - n += 1 + sovGenerated(uint64(*m.Nanos)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ApiVersion != nil { - l = len(*m.ApiVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Verbs) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, s := range m.Items { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *WatchEvent) Size() (n int) { - var l int - _ = l - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *APIGroup) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, &GroupVersionForDiscovery{}) - if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PreferredVersion == nil { - m.PreferredVersion = &GroupVersionForDiscovery{} - } - if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, &ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIGroupList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, &APIGroup{}) - if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Namespaced = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Verbs == nil { - m.Verbs = &Verbs{} - } - if err := m.Verbs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SingularName = &s - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIResourceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GroupVersion = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, &APIResource{}) - if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *APIVersions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, &ServerAddressByClientCIDR{}) - if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.GracePeriodSeconds = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Preconditions == nil { - m.Preconditions = &Preconditions{} - } - if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.OrphanDependents = &b - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.PropagationPolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Duration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Duration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Duration = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExportOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Export = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Exact = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.IncludeUninitialized = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupKind) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Resource = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersion) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GroupVersion = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Resource = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Initializers) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializers: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Pending = append(m.Pending, &Initializer{}) - if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Result == nil { - m.Result = &Status{} - } - if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MatchLabels == nil { - m.MatchLabels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.MatchLabels[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, &LabelSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Key = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Operator = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *List) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: List: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &k8s_io_apimachinery_pkg_runtime.RawExtension{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SelfLink = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Continue = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ListOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.LabelSelector = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FieldSelector = &s - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Watch = &b - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.IncludeUninitialized = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Limit = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Continue = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MicroTime) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MicroTime: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MicroTime: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Seconds = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nanos = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GenerateName = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Namespace = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SelfLink = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Uid = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Generation = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CreationTimestamp == nil { - m.CreationTimestamp = &Time{} - } - if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeletionTimestamp == nil { - m.DeletionTimestamp = &Time{} - } - if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DeletionGracePeriodSeconds = &v - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OwnerReferences = append(m.OwnerReferences, &OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ClusterName = &s - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Initializers == nil { - m.Initializers = &Initializers{} - } - if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OwnerReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Uid = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ApiVersion = &s - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.BlockOwnerDeletion = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Patch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Patch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Preconditions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Uid = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RootPaths) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ClientCIDR = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ServerAddress = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Status) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Status: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Metadata == nil { - m.Metadata = &ListMeta{} - } - if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Status = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Reason = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Details == nil { - m.Details = &StatusDetails{} - } - if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Code = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusCause) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Reason = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Message = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Field = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatusDetails) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Name = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Group = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Causes = append(m.Causes, &StatusCause{}) - if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RetryAfterSeconds = &v - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Uid = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Time) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Time: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Time: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Seconds = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nanos = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Timestamp) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Seconds = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nanos = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ApiVersion = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Verbs) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Verbs: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WatchEvent) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &k8s_io_apimachinery_pkg_runtime.RawExtension{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1775 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0x4f, 0x6f, 0x23, 0x49, - 0x15, 0xa7, 0x3b, 0xb1, 0x63, 0x3f, 0xc7, 0x33, 0xd9, 0x52, 0xb4, 0x6a, 0x2c, 0x88, 0x32, 0xad, - 0xd5, 0x2a, 0x5a, 0x16, 0x47, 0x99, 0x5d, 0x56, 0x0b, 0x0b, 0x41, 0xd9, 0x71, 0x76, 0x14, 0x76, - 0x86, 0x89, 0x6a, 0x66, 0xc3, 0xb2, 0x07, 0x44, 0xa5, 0xbb, 0xe2, 0xd4, 0xa4, 0xdd, 0xdd, 0x54, - 0x95, 0x3d, 0x63, 0x2e, 0x88, 0x0b, 0x5f, 0x01, 0xae, 0x88, 0x03, 0x12, 0xe2, 0x13, 0xf0, 0x09, - 0x38, 0x72, 0xe4, 0x88, 0x86, 0x03, 0x5f, 0x80, 0x33, 0x42, 0x55, 0x5d, 0xd5, 0xae, 0xb6, 0xdb, - 0x59, 0x7b, 0x35, 0x7b, 0x72, 0xbd, 0xd7, 0xf5, 0x7e, 0xef, 0xd5, 0xab, 0xf7, 0xaf, 0x0c, 0xef, - 0xdf, 0x7c, 0x28, 0xfa, 0x2c, 0x3b, 0x24, 0x39, 0x1b, 0x91, 0xe8, 0x9a, 0xa5, 0x94, 0x4f, 0x0f, - 0xf3, 0x9b, 0xa1, 0x62, 0x88, 0xc3, 0x11, 0x95, 0xe4, 0x70, 0x72, 0x74, 0x38, 0xa4, 0x29, 0xe5, - 0x44, 0xd2, 0xb8, 0x9f, 0xf3, 0x4c, 0x66, 0xe8, 0xad, 0x42, 0xaa, 0xef, 0x4a, 0xf5, 0xf3, 0x9b, - 0xa1, 0x62, 0x88, 0xbe, 0x92, 0xea, 0x4f, 0x8e, 0x7a, 0x87, 0xcb, 0xb0, 0xf9, 0x38, 0x95, 0x6c, - 0x44, 0xe7, 0x61, 0x7b, 0x1f, 0x7c, 0x99, 0x80, 0x88, 0xae, 0xe9, 0x88, 0x2c, 0xc8, 0xbd, 0xb7, - 0x4c, 0x6e, 0x2c, 0x59, 0x72, 0xc8, 0x52, 0x29, 0x24, 0x9f, 0x17, 0x0a, 0xff, 0xe3, 0x43, 0xeb, - 0xe4, 0xfc, 0xec, 0x21, 0xcf, 0xc6, 0x39, 0x42, 0xb0, 0x99, 0x92, 0x11, 0x0d, 0xbc, 0x7d, 0xef, - 0xa0, 0x8d, 0xf5, 0x1a, 0x7d, 0x01, 0xad, 0x09, 0xe5, 0x82, 0x65, 0xa9, 0x08, 0xfc, 0xfd, 0x8d, - 0x83, 0xce, 0xfd, 0xe3, 0xfe, 0x2a, 0xe7, 0xee, 0x6b, 0xc8, 0x8b, 0x42, 0xf4, 0x93, 0x8c, 0x0f, - 0x98, 0x88, 0xb2, 0x09, 0xe5, 0x53, 0x5c, 0xe2, 0xa1, 0xe7, 0xb0, 0x93, 0x73, 0x7a, 0x45, 0x39, - 0xa7, 0xb1, 0xd9, 0x19, 0x6c, 0xec, 0x7b, 0xaf, 0x41, 0xc7, 0x02, 0x2e, 0xfa, 0x0d, 0xf4, 0x04, - 0xe5, 0x13, 0xca, 0x4f, 0xe2, 0x98, 0x53, 0x21, 0x3e, 0x9e, 0x3e, 0x48, 0x18, 0x4d, 0xe5, 0x83, - 0xb3, 0x01, 0x16, 0xc1, 0xa6, 0x3e, 0xd9, 0x8f, 0x57, 0xd3, 0xfa, 0x74, 0x19, 0x0e, 0xbe, 0x45, - 0x45, 0x78, 0x01, 0xdb, 0xd6, 0xd1, 0x8f, 0x98, 0x90, 0xe8, 0x13, 0x68, 0x0e, 0x15, 0x21, 0x02, - 0x4f, 0x2b, 0xef, 0xaf, 0xa6, 0xdc, 0x62, 0x60, 0x23, 0x1d, 0xfe, 0xd5, 0x87, 0xce, 0xc9, 0xf9, - 0x19, 0xa6, 0x22, 0x1b, 0xf3, 0x88, 0xd6, 0x5e, 0xe2, 0x1e, 0x80, 0xfa, 0x15, 0x39, 0x89, 0x68, - 0x1c, 0xf8, 0xfb, 0xde, 0x41, 0x0b, 0x3b, 0x1c, 0x25, 0x73, 0xc3, 0xd2, 0x58, 0x3b, 0xbf, 0x8d, - 0xf5, 0x1a, 0x9d, 0x40, 0x63, 0x42, 0xf9, 0xa5, 0xf2, 0x8d, 0xba, 0x91, 0xef, 0xac, 0x66, 0xde, - 0x85, 0x12, 0xc1, 0x85, 0xa4, 0x52, 0x2b, 0xae, 0x33, 0x2e, 0x7f, 0xaa, 0x34, 0x05, 0x8d, 0xfd, - 0x8d, 0x83, 0x36, 0x76, 0x38, 0x28, 0x84, 0x6d, 0xc1, 0xd2, 0xe1, 0x38, 0x21, 0x5c, 0x31, 0x82, - 0xa6, 0x56, 0x5f, 0xe1, 0x29, 0x8c, 0x88, 0x48, 0x3a, 0xcc, 0x38, 0xa3, 0x22, 0xd8, 0x2a, 0x30, - 0x66, 0x1c, 0xb4, 0x0b, 0x0d, 0xed, 0x88, 0xa0, 0xa5, 0x85, 0x0b, 0x02, 0x05, 0xb0, 0x65, 0xa2, - 0x2c, 0x68, 0x6b, 0xbe, 0x25, 0xc3, 0xdf, 0x79, 0x70, 0xd7, 0x71, 0x97, 0xbe, 0x8a, 0x10, 0xb6, - 0x87, 0x4e, 0x24, 0x19, 0xd7, 0x55, 0x78, 0xe8, 0x09, 0xb4, 0xb9, 0x91, 0xb1, 0x89, 0x70, 0xb4, - 0xf2, 0x8d, 0x59, 0x6d, 0x78, 0x86, 0x11, 0xfe, 0xc5, 0xd3, 0xf7, 0x76, 0x61, 0x93, 0xa1, 0xe7, - 0x24, 0x9a, 0xa7, 0x8f, 0x39, 0x4b, 0x94, 0xdb, 0x83, 0xd7, 0xff, 0xfa, 0x83, 0xf7, 0xbf, 0x1e, - 0x74, 0x07, 0x34, 0xa1, 0x92, 0x3e, 0xc9, 0xa5, 0x36, 0xa9, 0x0f, 0x68, 0xc8, 0x49, 0x44, 0xcf, - 0x29, 0x67, 0x59, 0xfc, 0x94, 0x46, 0x59, 0x1a, 0x0b, 0xed, 0xb9, 0x0d, 0x5c, 0xf3, 0x05, 0xfd, - 0x1c, 0xba, 0x39, 0xd7, 0x6b, 0x26, 0x4d, 0x31, 0x51, 0x61, 0xf5, 0xde, 0x6a, 0x56, 0x9f, 0xbb, - 0xa2, 0xb8, 0x8a, 0x84, 0xde, 0x81, 0x9d, 0x8c, 0xe7, 0xd7, 0x24, 0x1d, 0xd0, 0x9c, 0xa6, 0x31, - 0x4d, 0xa5, 0xd0, 0x91, 0xdc, 0xc2, 0x0b, 0x7c, 0xf4, 0x2e, 0xbc, 0x91, 0xf3, 0x2c, 0x27, 0x43, - 0xa2, 0x64, 0xcf, 0xb3, 0x84, 0x45, 0x53, 0x1d, 0xe1, 0x6d, 0xbc, 0xf8, 0x21, 0x7c, 0x1b, 0x5a, - 0x83, 0x31, 0xd7, 0x1c, 0x75, 0x3f, 0xb1, 0x59, 0x9b, 0x63, 0x96, 0x74, 0xf8, 0x23, 0xe8, 0x9e, - 0xbe, 0xcc, 0x33, 0x2e, 0xad, 0x77, 0xde, 0x84, 0x26, 0xd5, 0x0c, 0xbd, 0xb5, 0x85, 0x0d, 0xa5, - 0xa2, 0x95, 0xbe, 0x24, 0x91, 0x34, 0x39, 0x58, 0x10, 0xe1, 0x73, 0x80, 0x87, 0xb4, 0x94, 0x3d, - 0x80, 0xbb, 0x36, 0x4a, 0xaa, 0x01, 0x39, 0xcf, 0x46, 0xf7, 0x61, 0x97, 0xa5, 0x51, 0x32, 0x8e, - 0xe9, 0x67, 0x29, 0x4b, 0x99, 0x64, 0x24, 0x61, 0xbf, 0x2e, 0x13, 0xbc, 0xf6, 0x5b, 0xf8, 0x3d, - 0x68, 0xeb, 0xfa, 0xf1, 0xa9, 0xca, 0xf1, 0x32, 0x79, 0x3c, 0x37, 0x79, 0x6c, 0x35, 0xf0, 0x67, - 0xd5, 0x20, 0x3c, 0x81, 0x6e, 0x51, 0x76, 0x6c, 0x99, 0xa9, 0x17, 0xed, 0x41, 0xcb, 0x1a, 0x69, - 0xc4, 0x4b, 0x3a, 0x3c, 0x86, 0x6d, 0xb7, 0x5e, 0x2f, 0x41, 0x70, 0x32, 0xd7, 0xaf, 0x66, 0xee, - 0xe7, 0x10, 0x2c, 0xab, 0xf7, 0x2b, 0x65, 0xf0, 0x72, 0xe4, 0x0b, 0xd8, 0x71, 0x91, 0x6f, 0x71, - 0xcd, 0x52, 0x8c, 0xba, 0x12, 0x1a, 0x5e, 0xc2, 0xae, 0x8b, 0xfb, 0x25, 0xbe, 0x5b, 0x8e, 0xed, - 0x7a, 0x75, 0x63, 0xce, 0xab, 0xf7, 0xa0, 0x73, 0x56, 0x5e, 0x2f, 0xaf, 0xab, 0xfe, 0xe1, 0x1f, - 0x3d, 0xd8, 0x76, 0xf6, 0x08, 0xf4, 0x29, 0x6c, 0xa9, 0x84, 0x60, 0xe9, 0xd0, 0xf4, 0x9e, 0x15, - 0x2b, 0x99, 0x03, 0x82, 0x2d, 0x02, 0x1a, 0x40, 0x93, 0x53, 0x31, 0x4e, 0xa4, 0xc9, 0xe8, 0x77, - 0x57, 0xac, 0x43, 0x92, 0xc8, 0xb1, 0xc0, 0x46, 0x36, 0xfc, 0x93, 0x0f, 0xdd, 0x47, 0xe4, 0x92, - 0x26, 0x4f, 0x69, 0x42, 0x23, 0x99, 0x71, 0x74, 0x05, 0x9d, 0x11, 0x91, 0xd1, 0xb5, 0xe6, 0xda, - 0x26, 0x39, 0x58, 0x0d, 0xbc, 0x82, 0xd4, 0x7f, 0x3c, 0x83, 0x39, 0x4d, 0x25, 0x9f, 0x62, 0x17, - 0x58, 0x0d, 0x21, 0x9a, 0x3c, 0x7d, 0x99, 0xab, 0xba, 0xb7, 0xfe, 0xa0, 0x53, 0x51, 0x86, 0xe9, - 0xaf, 0xc6, 0x8c, 0xd3, 0x11, 0x4d, 0x25, 0x5e, 0xc0, 0xed, 0x1d, 0xc3, 0xce, 0xbc, 0x31, 0x68, - 0x07, 0x36, 0x6e, 0xe8, 0xd4, 0x5c, 0x98, 0x5a, 0xaa, 0xf0, 0x98, 0x90, 0x64, 0x6c, 0x33, 0xa8, - 0x20, 0x7e, 0xe0, 0x7f, 0xe8, 0x85, 0xbf, 0x84, 0x60, 0x99, 0xb6, 0x1a, 0x9c, 0x1e, 0xb4, 0xb2, - 0x5c, 0x4d, 0x7b, 0x19, 0xb7, 0xc9, 0x68, 0x69, 0x55, 0xa0, 0x34, 0xac, 0xaa, 0x94, 0xaa, 0xd7, - 0x18, 0x2a, 0xfc, 0xbd, 0x07, 0x9b, 0xba, 0x27, 0xfe, 0x04, 0x5a, 0xea, 0x80, 0x31, 0x91, 0x44, - 0x63, 0xae, 0x3c, 0xa0, 0x28, 0xe9, 0xc7, 0x54, 0x12, 0x5c, 0xca, 0xa3, 0x07, 0xd0, 0x60, 0x92, - 0x8e, 0xac, 0x5f, 0xbf, 0xbb, 0x14, 0xc8, 0x4c, 0xb8, 0x7d, 0x4c, 0x5e, 0x9c, 0xbe, 0x94, 0x34, - 0xd5, 0x59, 0x53, 0xc8, 0x86, 0x09, 0xb4, 0x2c, 0xb4, 0x3a, 0x99, 0xa0, 0xc9, 0xd5, 0x23, 0x96, - 0xde, 0x98, 0x03, 0x97, 0x74, 0x5d, 0xf9, 0xf4, 0xeb, 0xcb, 0x67, 0x0f, 0x5a, 0x51, 0x96, 0x4a, - 0x96, 0x8e, 0xcb, 0xb4, 0xb2, 0x74, 0xf8, 0x67, 0x1f, 0x3a, 0x4a, 0x9d, 0x2d, 0xca, 0x6f, 0x41, - 0x37, 0x71, 0x3d, 0x6f, 0xd4, 0x56, 0x99, 0x6a, 0xd7, 0x15, 0xa3, 0x49, 0x5c, 0xee, 0x2a, 0x34, - 0x57, 0x99, 0xea, 0x7e, 0x5f, 0xa8, 0x28, 0x30, 0x4d, 0xaa, 0x20, 0xea, 0xec, 0xde, 0xac, 0xb7, - 0xfb, 0x6d, 0xb8, 0xa3, 0xbc, 0x94, 0x8d, 0xa5, 0x6d, 0xbb, 0x0d, 0xdd, 0x8f, 0xe6, 0xb8, 0x4b, - 0xdb, 0x43, 0x73, 0x79, 0x7b, 0x50, 0xb6, 0x25, 0x6c, 0xc4, 0x64, 0xb0, 0xa5, 0x21, 0x0b, 0xa2, - 0xe2, 0xa9, 0xd6, 0x9c, 0xa7, 0x3e, 0x82, 0xf6, 0x63, 0x16, 0xf1, 0xec, 0x19, 0x1b, 0x51, 0x55, - 0xc3, 0x44, 0x65, 0x14, 0xb0, 0xa4, 0x02, 0x4e, 0x49, 0x9a, 0x15, 0x7d, 0xbf, 0x81, 0x0b, 0x22, - 0xfc, 0xdf, 0x16, 0xc0, 0x93, 0xcb, 0xe7, 0x34, 0x2a, 0xee, 0xb5, 0x6e, 0x76, 0x55, 0xa5, 0xdd, - 0x3c, 0x5a, 0xf4, 0x90, 0xe8, 0x9b, 0xd2, 0xee, 0xf0, 0xd0, 0xb7, 0xa0, 0x5d, 0x4e, 0xb3, 0xe6, - 0x2a, 0x67, 0x8c, 0x4a, 0xb4, 0x6c, 0xce, 0x45, 0xcb, 0x0e, 0x6c, 0x8c, 0x59, 0xac, 0x1d, 0xd8, - 0xc6, 0x6a, 0x59, 0x77, 0x0f, 0xcd, 0xfa, 0x7b, 0xd8, 0x03, 0x30, 0x56, 0xa8, 0x4d, 0x85, 0xc3, - 0x1c, 0x0e, 0xfa, 0x1c, 0xde, 0x88, 0x38, 0xd5, 0x6b, 0xe5, 0x1c, 0x21, 0xc9, 0xa8, 0x18, 0x53, - 0x3b, 0xf7, 0xdf, 0x59, 0x2d, 0x97, 0x94, 0x18, 0x5e, 0x04, 0x51, 0xc8, 0xb1, 0x9a, 0xc6, 0x2a, - 0xc8, 0xed, 0xf5, 0x91, 0x17, 0x40, 0xd0, 0x31, 0xf4, 0x2c, 0xf3, 0xe1, 0xe2, 0x78, 0x07, 0xfa, - 0x8c, 0xb7, 0xec, 0x40, 0xcf, 0xa0, 0x99, 0x14, 0x05, 0xbb, 0xa3, 0x73, 0xfd, 0x87, 0xab, 0x99, - 0x33, 0x8b, 0x81, 0xbe, 0x5b, 0xa8, 0x0d, 0x16, 0x8a, 0xa0, 0x43, 0xd2, 0x34, 0x93, 0xa4, 0x18, - 0x1d, 0xb7, 0x35, 0xf4, 0xc9, 0xda, 0xd0, 0x27, 0x33, 0x0c, 0xd3, 0x08, 0x1c, 0x54, 0xf4, 0x0b, - 0xb8, 0x9b, 0xbd, 0x48, 0x29, 0xc7, 0xea, 0xe5, 0x48, 0x53, 0x35, 0xe7, 0x77, 0xb5, 0xa2, 0xf7, - 0x57, 0x54, 0x54, 0x11, 0xc6, 0xf3, 0x60, 0x2a, 0x5c, 0xae, 0x58, 0x6a, 0x7a, 0x70, 0x70, 0xa7, - 0x78, 0xc9, 0xcc, 0x38, 0x68, 0x1f, 0x3a, 0x51, 0x32, 0x16, 0x92, 0x16, 0x8f, 0xa1, 0xbb, 0x3a, - 0xe8, 0x5c, 0x16, 0xba, 0x80, 0x6d, 0xe6, 0xf4, 0xf1, 0x60, 0x47, 0xdf, 0xf8, 0xfd, 0xb5, 0x9b, - 0xb7, 0xc0, 0x15, 0x9c, 0xde, 0xf7, 0xa1, 0xf3, 0x15, 0x3b, 0x92, 0xea, 0x68, 0xf3, 0x5e, 0x5d, - 0xab, 0xa3, 0xfd, 0xcd, 0x83, 0x3b, 0x55, 0xc7, 0x95, 0x93, 0x94, 0xe7, 0x3c, 0x46, 0x6d, 0x61, - 0xd8, 0x70, 0x0a, 0x83, 0x49, 0xdd, 0xcd, 0x59, 0xea, 0xee, 0x01, 0x90, 0x9c, 0xd9, 0xac, 0x2d, - 0x72, 0xda, 0xe1, 0xe8, 0xb7, 0x64, 0x96, 0x4a, 0x9e, 0x25, 0x09, 0xe5, 0xa6, 0x0c, 0x3a, 0x1c, - 0xf5, 0xa6, 0xb9, 0x4c, 0xb2, 0xe8, 0x46, 0x1b, 0x34, 0x30, 0x41, 0xae, 0x13, 0xbb, 0x85, 0x6b, - 0xbe, 0x84, 0x5b, 0xd0, 0x38, 0x57, 0xb5, 0x3b, 0xbc, 0x07, 0xdd, 0xca, 0x0b, 0xc5, 0xda, 0xe6, - 0x95, 0xb6, 0x85, 0xf7, 0xa0, 0x8d, 0xb3, 0x4c, 0x9e, 0x13, 0x79, 0xad, 0x8b, 0x61, 0xae, 0x16, - 0xe6, 0xa1, 0x57, 0x10, 0x21, 0x81, 0x6f, 0x2e, 0x7d, 0x9d, 0x69, 0xdb, 0x4b, 0xca, 0x00, 0x3b, - 0x1c, 0xd5, 0x7a, 0x2a, 0xef, 0x37, 0xdb, 0x7a, 0x2a, 0xcc, 0xf0, 0xb7, 0x3e, 0x34, 0x8b, 0xc9, - 0xeb, 0xb5, 0x36, 0xf8, 0x37, 0xa1, 0x29, 0x34, 0xaa, 0xd1, 0x6a, 0x28, 0xd5, 0x0e, 0x46, 0x54, - 0x08, 0x32, 0xb4, 0x37, 0x67, 0x49, 0x25, 0xc1, 0x29, 0x11, 0x65, 0x93, 0x33, 0x14, 0x7a, 0x0c, - 0x5b, 0x31, 0x95, 0x84, 0x25, 0x45, 0x53, 0x5b, 0xf9, 0x81, 0x58, 0x1c, 0x6a, 0x50, 0x88, 0x62, - 0x8b, 0xa1, 0xe2, 0x26, 0xca, 0xe2, 0xe2, 0x9f, 0x85, 0x06, 0xd6, 0xeb, 0xf0, 0x33, 0xe8, 0x14, - 0xbb, 0x1f, 0x90, 0xb1, 0x70, 0x2d, 0xf1, 0x2a, 0x96, 0x38, 0xb6, 0xfb, 0x55, 0xdb, 0x77, 0xa1, - 0xa1, 0x1b, 0xba, 0x39, 0x53, 0x41, 0x84, 0xff, 0xf4, 0xa0, 0x5b, 0xb1, 0xa2, 0xb6, 0x9b, 0x95, - 0xa3, 0xbf, 0x5f, 0xf7, 0xe2, 0x72, 0xff, 0x7f, 0x39, 0x83, 0x66, 0xa4, 0x0c, 0xb4, 0x7f, 0x4e, - 0x1d, 0xad, 0xe3, 0x08, 0x7d, 0x34, 0x6c, 0x00, 0xd4, 0xa3, 0x97, 0x53, 0xc9, 0xa7, 0x27, 0x57, - 0x92, 0x72, 0x77, 0x66, 0x68, 0xe0, 0xc5, 0x0f, 0x36, 0x76, 0x9b, 0xb3, 0xd8, 0xfd, 0x00, 0x36, - 0xbf, 0x52, 0x77, 0xff, 0x08, 0xda, 0xb3, 0xce, 0xb2, 0xae, 0xf0, 0x31, 0xb4, 0x9e, 0x4d, 0x73, - 0x6a, 0xe7, 0x82, 0x85, 0x92, 0x50, 0x4d, 0x76, 0x7f, 0x3e, 0xd9, 0xc3, 0x6f, 0x43, 0x43, 0xff, - 0x19, 0xa5, 0xe0, 0x8b, 0xe9, 0xd3, 0x24, 0x5b, 0x31, 0x4e, 0x0e, 0x01, 0x7e, 0xa6, 0xc7, 0xf3, - 0x89, 0x1a, 0x9e, 0x11, 0x6c, 0xca, 0x69, 0x5e, 0x5e, 0x95, 0x5a, 0xa3, 0x53, 0x68, 0x66, 0xba, - 0x77, 0x98, 0x87, 0xcd, 0x9a, 0x63, 0xab, 0x11, 0xfe, 0x78, 0xf7, 0xef, 0xaf, 0xf6, 0xbc, 0x7f, - 0xbc, 0xda, 0xf3, 0xfe, 0xf5, 0x6a, 0xcf, 0xfb, 0xc3, 0xbf, 0xf7, 0xbe, 0xf1, 0x85, 0x3f, 0x39, - 0xfa, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa2, 0xaa, 0xeb, 0xab, 0x72, 0x16, 0x00, 0x00, -} diff --git a/vendor/github.com/ericchiang/k8s/apis/meta/v1/json.go b/vendor/github.com/ericchiang/k8s/apis/meta/v1/json.go deleted file mode 100644 index 9a7369d..0000000 --- a/vendor/github.com/ericchiang/k8s/apis/meta/v1/json.go +++ /dev/null @@ -1,48 +0,0 @@ -package v1 - -import ( - "encoding/json" - "time" -) - -// JSON marshaling logic for the Time type so it can be used for custom -// resources, which serialize to JSON. - -func (t Time) MarshalJSON() ([]byte, error) { - var seconds, nanos int64 - if t.Seconds != nil { - seconds = *t.Seconds - } - if t.Nanos != nil { - nanos = int64(*t.Nanos) - } - return json.Marshal(time.Unix(seconds, nanos)) -} - -func (t *Time) UnmarshalJSON(p []byte) error { - var t1 time.Time - if err := json.Unmarshal(p, &t1); err != nil { - return err - } - seconds := t1.Unix() - nanos := int32(t1.UnixNano()) - t.Seconds = &seconds - t.Nanos = &nanos - return nil -} - -// Status must implement json.Unmarshaler for the codec to deserialize a JSON -// payload into it. -// -// See https://github.com/ericchiang/k8s/issues/82 - -type jsonStatus Status - -func (s *Status) UnmarshalJSON(data []byte) error { - var j jsonStatus - if err := json.Unmarshal(data, &j); err != nil { - return err - } - *s = Status(j) - return nil -} diff --git a/vendor/github.com/ericchiang/k8s/client.go b/vendor/github.com/ericchiang/k8s/client.go deleted file mode 100644 index 7df6e32..0000000 --- a/vendor/github.com/ericchiang/k8s/client.go +++ /dev/null @@ -1,489 +0,0 @@ -/* -Package k8s implements a Kubernetes client. - - import ( - "context" - - "github.com/ericchiang/k8s" - appsv1 "github.com/ericchiang/k8s/apis/apps/v1" - ) - - func listDeployments(ctx context.Context) (*appsv1.DeploymentList, error) { - c, err := k8s.NewInClusterClient() - if err != nil { - return nil, err - } - - var deployments appsv1.DeploymentList - if err := c.List(ctx, "my-namespace", &deployments); err != nil { - return nil, err - } - return deployments, nil - } - -*/ -package k8s - -import ( - "bytes" - "context" - "crypto/tls" - "crypto/x509" - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "os" - "time" - - "golang.org/x/net/http2" - - metav1 "github.com/ericchiang/k8s/apis/meta/v1" -) - -const ( - // AllNamespaces is given to list and watch operations to signify that the code should - // list or watch resources in all namespaces. - AllNamespaces = allNamespaces - // Actual definition is private in case we want to change it later. - allNamespaces = "" - - namespaceDefault = "default" -) - -// String returns a pointer to a string. Useful for creating API objects -// that take pointers instead of literals. -func String(s string) *string { return &s } - -// Int is a convenience for converting an int literal to a pointer to an int. -func Int(i int) *int { return &i } - -// Int32 is a convenience for converting an int32 literal to a pointer to an int32. -func Int32(i int32) *int32 { return &i } - -// Bool is a convenience for converting a bool literal to a pointer to a bool. -func Bool(b bool) *bool { return &b } - -const ( - // Types for watch events. - EventAdded = "ADDED" - EventDeleted = "DELETED" - EventModified = "MODIFIED" - EventError = "ERROR" -) - -// Client is a Kuberntes client. -type Client struct { - // The URL of the API server. - Endpoint string - - // Namespace is the name fo the default reconciled from the client's config. - // It is set when constructing a client using NewClient(), and defaults to - // the value "default". - // - // This value should be used to access the client's default namespace. For - // example, to create a configmap in the default namespace, use client.Namespace - // when to fill the ObjectMeta: - // - // client, err := k8s.NewClient(config) - // if err != nil { - // // handle error - // } - // cm := v1.ConfigMap{ - // Metadata: &metav1.ObjectMeta{ - // Name: &k8s.String("my-configmap"), - // Namespace: &client.Namespace, - // }, - // Data: map[string]string{"foo": "bar", "spam": "eggs"}, - // } - // err := client.Create(ctx, cm) - // - Namespace string - - // SetHeaders provides a hook for modifying the HTTP headers of all requests. - // - // client, err := k8s.NewClient(config) - // if err != nil { - // // handle error - // } - // client.SetHeaders = func(h http.Header) error { - // h.Set("Authorization", "Bearer "+mytoken) - // return nil - // } - // - SetHeaders func(h http.Header) error - - Client *http.Client -} - -func (c *Client) newRequest(ctx context.Context, verb, url string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(verb, url, body) - if err != nil { - return nil, err - } - if c.SetHeaders != nil { - if err := c.SetHeaders(req.Header); err != nil { - return nil, err - } - } - return req.WithContext(ctx), nil -} - -// NewClient initializes a client from a client config. -func NewClient(config *Config) (*Client, error) { - if len(config.Contexts) == 0 { - if config.CurrentContext != "" { - return nil, fmt.Errorf("no contexts with name %q", config.CurrentContext) - } - - if n := len(config.Clusters); n == 0 { - return nil, errors.New("no clusters provided") - } else if n > 1 { - return nil, errors.New("multiple clusters but no current context") - } - if n := len(config.AuthInfos); n == 0 { - return nil, errors.New("no users provided") - } else if n > 1 { - return nil, errors.New("multiple users but no current context") - } - - return newClient(config.Clusters[0].Cluster, config.AuthInfos[0].AuthInfo, namespaceDefault) - } - - var ctx Context - if config.CurrentContext == "" { - if n := len(config.Contexts); n == 0 { - return nil, errors.New("no contexts provided") - } else if n > 1 { - return nil, errors.New("multiple contexts but no current context") - } - ctx = config.Contexts[0].Context - } else { - for _, c := range config.Contexts { - if c.Name == config.CurrentContext { - ctx = c.Context - goto configFound - } - } - return nil, fmt.Errorf("no config named %q", config.CurrentContext) - configFound: - } - - if ctx.Cluster == "" { - return nil, fmt.Errorf("context doesn't have a cluster") - } - if ctx.AuthInfo == "" { - return nil, fmt.Errorf("context doesn't have a user") - } - var ( - user AuthInfo - cluster Cluster - ) - - for _, u := range config.AuthInfos { - if u.Name == ctx.AuthInfo { - user = u.AuthInfo - goto userFound - } - } - return nil, fmt.Errorf("no user named %q", ctx.AuthInfo) -userFound: - - for _, c := range config.Clusters { - if c.Name == ctx.Cluster { - cluster = c.Cluster - goto clusterFound - } - } - return nil, fmt.Errorf("no cluster named %q", ctx.Cluster) -clusterFound: - - namespace := ctx.Namespace - if namespace == "" { - namespace = namespaceDefault - } - - return newClient(cluster, user, namespace) -} - -// NewInClusterClient returns a client that uses the service account bearer token mounted -// into Kubernetes pods. -func NewInClusterClient() (*Client, error) { - host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") - if len(host) == 0 || len(port) == 0 { - return nil, errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") - } - namespace, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - return nil, err - } - - cluster := Cluster{ - Server: "https://" + host + ":" + port, - CertificateAuthority: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", - } - user := AuthInfo{TokenFile: "/var/run/secrets/kubernetes.io/serviceaccount/token"} - return newClient(cluster, user, string(namespace)) -} - -func load(filepath string, data []byte) (out []byte, err error) { - if filepath != "" { - data, err = ioutil.ReadFile(filepath) - } - return data, err -} - -func newClient(cluster Cluster, user AuthInfo, namespace string) (*Client, error) { - if cluster.Server == "" { - // NOTE: kubectl defaults to localhost:8080, but it's probably better to just - // be strict. - return nil, fmt.Errorf("no cluster endpoint provided") - } - - ca, err := load(cluster.CertificateAuthority, cluster.CertificateAuthorityData) - if err != nil { - return nil, fmt.Errorf("loading certificate authority: %v", err) - } - - clientCert, err := load(user.ClientCertificate, user.ClientCertificateData) - if err != nil { - return nil, fmt.Errorf("load client cert: %v", err) - } - clientKey, err := load(user.ClientKey, user.ClientKeyData) - if err != nil { - return nil, fmt.Errorf("load client cert: %v", err) - } - - // See https://github.com/gtank/cryptopasta - tlsConfig := &tls.Config{ - MinVersion: tls.VersionTLS12, - InsecureSkipVerify: cluster.InsecureSkipTLSVerify, - } - - if len(ca) != 0 { - tlsConfig.RootCAs = x509.NewCertPool() - if !tlsConfig.RootCAs.AppendCertsFromPEM(ca) { - return nil, errors.New("certificate authority doesn't contain any certificates") - } - } - if len(clientCert) != 0 { - cert, err := tls.X509KeyPair(clientCert, clientKey) - if err != nil { - return nil, fmt.Errorf("invalid client cert and key pair: %v", err) - } - tlsConfig.Certificates = []tls.Certificate{cert} - } - - token := user.Token - if user.TokenFile != "" { - data, err := ioutil.ReadFile(user.TokenFile) - if err != nil { - return nil, fmt.Errorf("load token file: %v", err) - } - token = string(data) - } - - transport := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - DialContext: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).DialContext, - TLSClientConfig: tlsConfig, - MaxIdleConns: 100, - IdleConnTimeout: 90 * time.Second, - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, - } - if err := http2.ConfigureTransport(transport); err != nil { - return nil, err - } - - client := &Client{ - Endpoint: cluster.Server, - Namespace: namespace, - Client: &http.Client{ - Transport: transport, - }, - } - - if token != "" { - client.SetHeaders = func(h http.Header) error { - h.Set("Authorization", "Bearer "+token) - return nil - } - } - if user.Username != "" && user.Password != "" { - auth := user.Username + ":" + user.Password - auth = "Basic " + base64.StdEncoding.EncodeToString([]byte(auth)) - client.SetHeaders = func(h http.Header) error { - h.Set("Authorization", auth) - return nil - } - } - return client, nil -} - -// APIError is an error from a unexpected status code. -type APIError struct { - // The status object returned by the Kubernetes API, - Status *metav1.Status - - // Status code returned by the HTTP request. - // - // NOTE: For some reason the value set in Status.Code - // doesn't correspond to the HTTP status code. Possibly - // a bug? - Code int -} - -func (e *APIError) Error() string { - if e.Status != nil && e.Status.Message != nil && e.Status.Status != nil { - return fmt.Sprintf("kubernetes api: %s %d %s", *e.Status.Status, e.Code, *e.Status.Message) - } - return fmt.Sprintf("%#v", e) -} - -func checkStatusCode(contentType string, statusCode int, body []byte) error { - if statusCode/100 == 2 { - return nil - } - - return newAPIError(contentType, statusCode, body) -} - -func newAPIError(contentType string, statusCode int, body []byte) error { - status := new(metav1.Status) - if err := unmarshal(body, contentType, status); err != nil { - return fmt.Errorf("decode error status %d: %v", statusCode, err) - } - return &APIError{status, statusCode} -} - -func (c *Client) client() *http.Client { - if c.Client == nil { - return http.DefaultClient - } - return c.Client -} - -// Create creates a resource of a registered type. The API version and resource -// type is determined by the type of the req argument. The result is unmarshaled -// into req. -// -// configMap := corev1.ConfigMap{ -// Metadata: &metav1.ObjectMeta{ -// Name: k8s.String("my-configmap"), -// Namespace: k8s.String("my-namespace"), -// }, -// Data: map[string]string{ -// "my-key": "my-val", -// }, -// } -// if err := client.Create(ctx, &configMap); err != nil { -// // handle error -// } -// // resource is updated with response of create request -// fmt.Println(conifgMap.Metaata.GetCreationTimestamp()) -// -func (c *Client) Create(ctx context.Context, req Resource, options ...Option) error { - url, err := resourceURL(c.Endpoint, req, false, options...) - if err != nil { - return err - } - return c.do(ctx, "POST", url, req, req) -} - -func (c *Client) Delete(ctx context.Context, req Resource, options ...Option) error { - url, err := resourceURL(c.Endpoint, req, true, options...) - if err != nil { - return err - } - o := &deleteOptions{ - Kind: "DeleteOptions", - APIVersion: "v1", - PropagationPolicy: "Background", - } - for _, option := range options { - option.updateDelete(req, o) - } - - return c.do(ctx, "DELETE", url, o, nil) -} - -func (c *Client) Update(ctx context.Context, req Resource, options ...Option) error { - url, err := resourceURL(c.Endpoint, req, true, options...) - if err != nil { - return err - } - return c.do(ctx, "PUT", url, req, req) -} - -func (c *Client) Get(ctx context.Context, namespace, name string, resp Resource, options ...Option) error { - url, err := resourceGetURL(c.Endpoint, namespace, name, resp, options...) - if err != nil { - return err - } - return c.do(ctx, "GET", url, nil, resp) -} - -func (c *Client) List(ctx context.Context, namespace string, resp ResourceList, options ...Option) error { - url, err := resourceListURL(c.Endpoint, namespace, resp, options...) - if err != nil { - return err - } - return c.do(ctx, "GET", url, nil, resp) -} - -func (c *Client) do(ctx context.Context, verb, url string, req, resp interface{}) error { - var ( - contentType string - body io.Reader - ) - if req != nil { - ct, data, err := marshal(req) - if err != nil { - return fmt.Errorf("encoding object: %v", err) - } - contentType = ct - body = bytes.NewReader(data) - } - r, err := http.NewRequest(verb, url, body) - if err != nil { - return fmt.Errorf("new request: %v", err) - } - if contentType != "" { - r.Header.Set("Content-Type", contentType) - r.Header.Set("Accept", contentType) - } else if resp != nil { - r.Header.Set("Accept", contentTypeFor(resp)) - } - if c.SetHeaders != nil { - c.SetHeaders(r.Header) - } - - re, err := c.client().Do(r) - if err != nil { - return fmt.Errorf("performing request: %v", err) - } - defer re.Body.Close() - - respBody, err := ioutil.ReadAll(re.Body) - if err != nil { - return fmt.Errorf("read body: %v", err) - } - - respCT := re.Header.Get("Content-Type") - if err := checkStatusCode(respCT, re.StatusCode, respBody); err != nil { - return err - } - if resp != nil { - if err := unmarshal(respBody, respCT, resp); err != nil { - return fmt.Errorf("decode response: %v", err) - } - } - return nil -} diff --git a/vendor/github.com/ericchiang/k8s/codec.go b/vendor/github.com/ericchiang/k8s/codec.go deleted file mode 100644 index cf86416..0000000 --- a/vendor/github.com/ericchiang/k8s/codec.go +++ /dev/null @@ -1,98 +0,0 @@ -package k8s - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - - "github.com/ericchiang/k8s/runtime" - "github.com/golang/protobuf/proto" -) - -const ( - contentTypePB = "application/vnd.kubernetes.protobuf" - contentTypeJSON = "application/json" -) - -func contentTypeFor(i interface{}) string { - if _, ok := i.(proto.Message); ok { - return contentTypePB - } - return contentTypeJSON -} - -// marshal encodes an object and returns the content type of that resource -// and the marshaled representation. -// -// marshal prefers protobuf encoding, but falls back to JSON. -func marshal(i interface{}) (string, []byte, error) { - if _, ok := i.(proto.Message); ok { - data, err := marshalPB(i) - return contentTypePB, data, err - } - data, err := json.Marshal(i) - return contentTypeJSON, data, err -} - -// unmarshal decoded an object given the content type of the encoded form. -func unmarshal(data []byte, contentType string, i interface{}) error { - msg, isPBMsg := i.(proto.Message) - if contentType == contentTypePB && isPBMsg { - if err := unmarshalPB(data, msg); err != nil { - return fmt.Errorf("decode protobuf: %v", err) - } - return nil - } - if isPBMsg { - // only decode into JSON of a protobuf message if the type - // explicitly implements json.Unmarshaler - if _, ok := i.(json.Unmarshaler); !ok { - return fmt.Errorf("cannot decode json payload into protobuf object %T", i) - } - } - if err := json.Unmarshal(data, i); err != nil { - return fmt.Errorf("decode json: %v", err) - } - return nil -} - -var magicBytes = []byte{0x6b, 0x38, 0x73, 0x00} - -func unmarshalPB(b []byte, msg proto.Message) error { - if len(b) < len(magicBytes) { - return errors.New("payload is not a kubernetes protobuf object") - } - if !bytes.Equal(b[:len(magicBytes)], magicBytes) { - return errors.New("payload is not a kubernetes protobuf object") - } - - u := new(runtime.Unknown) - if err := u.Unmarshal(b[len(magicBytes):]); err != nil { - return fmt.Errorf("unmarshal unknown: %v", err) - } - return proto.Unmarshal(u.Raw, msg) -} - -func marshalPB(obj interface{}) ([]byte, error) { - message, ok := obj.(proto.Message) - if !ok { - return nil, fmt.Errorf("expected obj of type proto.Message, got %T", obj) - } - payload, err := proto.Marshal(message) - if err != nil { - return nil, err - } - - // The URL path informs the API server what the API group, version, and resource - // of the object. We don't need to specify it here to talk to the API server. - body, err := (&runtime.Unknown{Raw: payload}).Marshal() - if err != nil { - return nil, err - } - - d := make([]byte, len(magicBytes)+len(body)) - copy(d[:len(magicBytes)], magicBytes) - copy(d[len(magicBytes):], body) - return d, nil -} diff --git a/vendor/github.com/ericchiang/k8s/config.go b/vendor/github.com/ericchiang/k8s/config.go deleted file mode 100644 index 3b9972e..0000000 --- a/vendor/github.com/ericchiang/k8s/config.go +++ /dev/null @@ -1,170 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package k8s - -import ( - "github.com/ericchiang/k8s/runtime" -) - -// Where possible, json tags match the cli argument names. -// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. - -// Config holds the information needed to build connect to remote kubernetes clusters as a given user -type Config struct { - // Legacy field from pkg/api/types.go TypeMeta. - // TODO(jlowdermilk): remove this after eliminating downstream dependencies. - // +optional - Kind string `json:"kind,omitempty" yaml:"kind,omitempty"` - // DEPRECATED: APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // Because a cluster can run multiple API groups and potentially multiple versions of each, it no longer makes sense to specify - // a single value for the cluster version. - // This field isn't really needed anyway, so we are deprecating it without replacement. - // It will be ignored if it is present. - // +optional - APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty"` - // Preferences holds general information to be use for cli interactions - Preferences Preferences `json:"preferences" yaml:"preferences"` - // Clusters is a map of referencable names to cluster configs - Clusters []NamedCluster `json:"clusters" yaml:"clusters"` - // AuthInfos is a map of referencable names to user configs - AuthInfos []NamedAuthInfo `json:"users" yaml:"users"` - // Contexts is a map of referencable names to context configs - Contexts []NamedContext `json:"contexts" yaml:"contexts"` - // CurrentContext is the name of the context that you would like to use by default - CurrentContext string `json:"current-context" yaml:"current-context"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -type Preferences struct { - // +optional - Colors bool `json:"colors,omitempty" yaml:"colors,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// Cluster contains information about how to communicate with a kubernetes cluster -type Cluster struct { - // Server is the address of the kubernetes cluster (https://hostname:port). - Server string `json:"server" yaml:"server"` - // APIVersion is the preferred api version for communicating with the kubernetes cluster (v1, v2, etc). - // +optional - APIVersion string `json:"api-version,omitempty" yaml:"api-version,omitempty"` - // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. - // +optional - InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty" yaml:"insecure-skip-tls-verify,omitempty"` - // CertificateAuthority is the path to a cert file for the certificate authority. - // +optional - CertificateAuthority string `json:"certificate-authority,omitempty" yaml:"certificate-authority,omitempty"` - // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority - // +optional - CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty" yaml:"certificate-authority-data,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. -type AuthInfo struct { - // ClientCertificate is the path to a client cert file for TLS. - // +optional - ClientCertificate string `json:"client-certificate,omitempty" yaml:"client-certificate,omitempty"` - // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate - // +optional - ClientCertificateData []byte `json:"client-certificate-data,omitempty" yaml:"client-certificate-data,omitempty"` - // ClientKey is the path to a client key file for TLS. - // +optional - ClientKey string `json:"client-key,omitempty" yaml:"client-key,omitempty"` - // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey - // +optional - ClientKeyData []byte `json:"client-key-data,omitempty" yaml:"client-key-data,omitempty"` - // Token is the bearer token for authentication to the kubernetes cluster. - // +optional - Token string `json:"token,omitempty" yaml:"token,omitempty"` - // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. - // +optional - TokenFile string `json:"tokenFile,omitempty" yaml:"tokenFile,omitempty"` - // Impersonate is the username to imperonate. The name matches the flag. - // +optional - Impersonate string `json:"as,omitempty" yaml:"as,omitempty"` - // Username is the username for basic authentication to the kubernetes cluster. - // +optional - Username string `json:"username,omitempty" yaml:"username,omitempty"` - // Password is the password for basic authentication to the kubernetes cluster. - // +optional - Password string `json:"password,omitempty" yaml:"password,omitempty"` - // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. - // +optional - AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty" yaml:"auth-provider,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) -type Context struct { - // Cluster is the name of the cluster for this context - Cluster string `json:"cluster" yaml:"cluster"` - // AuthInfo is the name of the authInfo for this context - AuthInfo string `json:"user" yaml:"user"` - // Namespace is the default namespace to use on unspecified requests - // +optional - Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"` - // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields - // +optional - Extensions []NamedExtension `json:"extensions,omitempty" yaml:"extensions,omitempty"` -} - -// NamedCluster relates nicknames to cluster information -type NamedCluster struct { - // Name is the nickname for this Cluster - Name string `json:"name" yaml:"name"` - // Cluster holds the cluster information - Cluster Cluster `json:"cluster" yaml:"cluster"` -} - -// NamedContext relates nicknames to context information -type NamedContext struct { - // Name is the nickname for this Context - Name string `json:"name" yaml:"name"` - // Context holds the context information - Context Context `json:"context" yaml:"context"` -} - -// NamedAuthInfo relates nicknames to auth information -type NamedAuthInfo struct { - // Name is the nickname for this AuthInfo - Name string `json:"name" yaml:"name"` - // AuthInfo holds the auth information - AuthInfo AuthInfo `json:"user" yaml:"user"` -} - -// NamedExtension relates nicknames to extension information -type NamedExtension struct { - // Name is the nickname for this Extension - Name string `json:"name" yaml:"name"` - // Extension holds the extension information - Extension runtime.RawExtension `json:"extension" yaml:"extension"` -} - -// AuthProviderConfig holds the configuration for a specified auth provider. -type AuthProviderConfig struct { - Name string `json:"name" yaml:"name"` - Config map[string]string `json:"config" yaml:"config"` -} diff --git a/vendor/github.com/ericchiang/k8s/discovery.go b/vendor/github.com/ericchiang/k8s/discovery.go deleted file mode 100644 index a219dae..0000000 --- a/vendor/github.com/ericchiang/k8s/discovery.go +++ /dev/null @@ -1,66 +0,0 @@ -package k8s - -import ( - "context" - "path" - - metav1 "github.com/ericchiang/k8s/apis/meta/v1" -) - -type Version struct { - Major string `json:"major"` - Minor string `json:"minor"` - GitVersion string `json:"gitVersion"` - GitCommit string `json:"gitCommit"` - GitTreeState string `json:"gitTreeState"` - BuildDate string `json:"buildDate"` - GoVersion string `json:"goVersion"` - Compiler string `json:"compiler"` - Platform string `json:"platform"` -} - -// Discovery is a client used to determine the API version and supported -// resources of the server. -type Discovery struct { - client *Client -} - -func NewDiscoveryClient(c *Client) *Discovery { - return &Discovery{c} -} - -func (d *Discovery) get(ctx context.Context, path string, resp interface{}) error { - return d.client.do(ctx, "GET", urlForPath(d.client.Endpoint, path), nil, resp) -} - -func (d *Discovery) Version(ctx context.Context) (*Version, error) { - var v Version - if err := d.get(ctx, "version", &v); err != nil { - return nil, err - } - return &v, nil -} - -func (d *Discovery) APIGroups(ctx context.Context) (*metav1.APIGroupList, error) { - var groups metav1.APIGroupList - if err := d.get(ctx, "apis", &groups); err != nil { - return nil, err - } - return &groups, nil -} - -func (d *Discovery) APIGroup(ctx context.Context, name string) (*metav1.APIGroup, error) { - var group metav1.APIGroup - if err := d.get(ctx, path.Join("apis", name), &group); err != nil { - return nil, err - } - return &group, nil -} - -func (d *Discovery) APIResources(ctx context.Context, groupName, groupVersion string) (*metav1.APIResourceList, error) { - var list metav1.APIResourceList - if err := d.get(ctx, path.Join("apis", groupName, groupVersion), &list); err != nil { - return nil, err - } - return &list, nil -} diff --git a/vendor/github.com/ericchiang/k8s/labels.go b/vendor/github.com/ericchiang/k8s/labels.go deleted file mode 100644 index 7f05cca..0000000 --- a/vendor/github.com/ericchiang/k8s/labels.go +++ /dev/null @@ -1,87 +0,0 @@ -package k8s - -import ( - "regexp" - "strings" -) - -const ( - qnameCharFmt string = "[A-Za-z0-9]" - qnameExtCharFmt string = "[-A-Za-z0-9_./]" - qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt - qualifiedNameMaxLength int = 63 - labelValueFmt string = "(" + qualifiedNameFmt + ")?" -) - -var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") - -// LabelSelector represents a Kubernetes label selector. -// -// Any values that don't conform to Kubernetes label value restrictions -// will be silently dropped. -// -// l := new(k8s.LabelSelector) -// l.Eq("component", "frontend") -// l.In("type", "prod", "staging") -// -type LabelSelector struct { - stmts []string -} - -func (l *LabelSelector) Selector() Option { - return QueryParam("labelSelector", l.String()) -} - -func (l *LabelSelector) String() string { - return strings.Join(l.stmts, ",") -} - -func validLabelValue(s string) bool { - if len(s) > 63 || len(s) == 0 { - return false - } - return labelValueRegexp.MatchString(s) -} - -// Eq selects labels which have the key and the key has the provide value. -func (l *LabelSelector) Eq(key, val string) { - if !validLabelValue(key) || !validLabelValue(val) { - return - } - l.stmts = append(l.stmts, key+"="+val) -} - -// NotEq selects labels where the key is present and has a different value -// than the value provided. -func (l *LabelSelector) NotEq(key, val string) { - if !validLabelValue(key) || !validLabelValue(val) { - return - } - l.stmts = append(l.stmts, key+"!="+val) -} - -// In selects labels which have the key and the key has one of the provided values. -func (l *LabelSelector) In(key string, vals ...string) { - if !validLabelValue(key) || len(vals) == 0 { - return - } - for _, val := range vals { - if !validLabelValue(val) { - return - } - } - l.stmts = append(l.stmts, key+" in ("+strings.Join(vals, ", ")+")") -} - -// NotIn selects labels which have the key and the key is not one of the provided values. -func (l *LabelSelector) NotIn(key string, vals ...string) { - if !validLabelValue(key) || len(vals) == 0 { - return - } - for _, val := range vals { - if !validLabelValue(val) { - return - } - } - l.stmts = append(l.stmts, key+" notin ("+strings.Join(vals, ", ")+")") -} diff --git a/vendor/github.com/ericchiang/k8s/resource.go b/vendor/github.com/ericchiang/k8s/resource.go deleted file mode 100644 index 3cf32f5..0000000 --- a/vendor/github.com/ericchiang/k8s/resource.go +++ /dev/null @@ -1,274 +0,0 @@ -package k8s - -import ( - "errors" - "fmt" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - metav1 "github.com/ericchiang/k8s/apis/meta/v1" -) - -// Option represents optional call parameters, such as label selectors. -type Option interface { - updateURL(base string, v url.Values) string - updateDelete(r Resource, d *deleteOptions) -} - -type optionFunc func(base string, v url.Values) string - -func (f optionFunc) updateDelete(r Resource, d *deleteOptions) {} -func (f optionFunc) updateURL(base string, v url.Values) string { return f(base, v) } - -type deleteOptions struct { - Kind string `json:"kind"` - APIVersion string `json:"apiVersion"` - - GracePeriod *int64 `json:"gracePeriodSeconds,omitempty"` - Preconditions struct { - UID string `json:"uid,omitempty"` - } `json:"preconditions"` - PropagationPolicy string `json:"propagationPolicy"` -} - -// QueryParam can be used to manually set a URL query parameter by name. -func QueryParam(name, value string) Option { - return optionFunc(func(base string, v url.Values) string { - v.Set(name, value) - return base - }) -} - -type deleteOptionFunc func(r Resource, d *deleteOptions) - -func (f deleteOptionFunc) updateDelete(r Resource, d *deleteOptions) { f(r, d) } -func (f deleteOptionFunc) updateURL(base string, v url.Values) string { return base } - -func DeleteAtomic() Option { - return deleteOptionFunc(func(r Resource, d *deleteOptions) { - d.Preconditions.UID = *r.GetMetadata().Uid - }) -} - -// DeletePropagationOrphan orphans the dependent resources during a delete. -func DeletePropagationOrphan() Option { - return deleteOptionFunc(func(r Resource, d *deleteOptions) { - d.PropagationPolicy = "Orphan" - }) -} - -// DeletePropagationBackground deletes the resources and causes the garbage -// collector to delete dependent resources in the background. -func DeletePropagationBackground() Option { - return deleteOptionFunc(func(r Resource, d *deleteOptions) { - d.PropagationPolicy = "Background" - }) -} - -// DeletePropagationForeground deletes the resources and causes the garbage -// collector to delete dependent resources and wait for all dependents whose -// ownerReference.blockOwnerDeletion=true. API sever will put the "foregroundDeletion" -// finalizer on the object, and sets its deletionTimestamp. This policy is -// cascading, i.e., the dependents will be deleted with Foreground. -func DeletePropagationForeground() Option { - return deleteOptionFunc(func(r Resource, d *deleteOptions) { - d.PropagationPolicy = "Foreground" - }) -} - -func DeleteGracePeriod(d time.Duration) Option { - seconds := int64(d / time.Second) - return deleteOptionFunc(func(r Resource, d *deleteOptions) { - d.GracePeriod = &seconds - }) -} - -// ResourceVersion causes watch operations to only show changes since -// a particular version of a resource. -func ResourceVersion(resourceVersion string) Option { - return QueryParam("resourceVersion", resourceVersion) -} - -// Timeout declares the timeout for list and watch operations. Timeout -// is only accurate to the second. -func Timeout(d time.Duration) Option { - return QueryParam( - "timeoutSeconds", - strconv.FormatInt(int64(d/time.Second), 10), - ) -} - -// Subresource is a way to interact with a part of an API object without needing -// permissions on the entire resource. For example, a node isn't able to modify -// a pod object, but can update the "pods/status" subresource. -// -// Common subresources are "status" and "scale". -// -// See https://kubernetes.io/docs/reference/api-concepts/ -func Subresource(name string) Option { - return optionFunc(func(base string, v url.Values) string { - return base + "/" + name - }) -} - -type resourceType struct { - apiGroup string - apiVersion string - name string - namespaced bool -} - -var ( - resources = map[reflect.Type]resourceType{} - resourceLists = map[reflect.Type]resourceType{} -) - -// Resource is a Kubernetes resource, such as a Node or Pod. -type Resource interface { - GetMetadata() *metav1.ObjectMeta -} - -// Resource is list of common Kubernetes resources, such as a NodeList or -// PodList. -type ResourceList interface { - GetMetadata() *metav1.ListMeta -} - -func Register(apiGroup, apiVersion, name string, namespaced bool, r Resource) { - rt := reflect.TypeOf(r) - if _, ok := resources[rt]; ok { - panic(fmt.Sprintf("resource registered twice %T", r)) - } - resources[rt] = resourceType{apiGroup, apiVersion, name, namespaced} -} - -func RegisterList(apiGroup, apiVersion, name string, namespaced bool, l ResourceList) { - rt := reflect.TypeOf(l) - if _, ok := resources[rt]; ok { - panic(fmt.Sprintf("resource registered twice %T", l)) - } - resourceLists[rt] = resourceType{apiGroup, apiVersion, name, namespaced} -} - -func urlFor(endpoint, apiGroup, apiVersion, namespace, resource, name string, options ...Option) string { - basePath := "apis/" - if apiGroup == "" { - basePath = "api/" - } - - var p string - if namespace != "" { - p = path.Join(basePath, apiGroup, apiVersion, "namespaces", namespace, resource, name) - } else { - p = path.Join(basePath, apiGroup, apiVersion, resource, name) - } - e := "" - if strings.HasSuffix(endpoint, "/") { - e = endpoint + p - } else { - e = endpoint + "/" + p - } - if len(options) == 0 { - return e - } - - v := url.Values{} - for _, option := range options { - e = option.updateURL(e, v) - } - if len(v) == 0 { - return e - } - return e + "?" + v.Encode() -} - -func urlForPath(endpoint, path string) string { - if strings.HasPrefix(path, "/") { - path = path[1:] - } - if strings.HasSuffix(endpoint, "/") { - return endpoint + path - } - return endpoint + "/" + path -} - -func resourceURL(endpoint string, r Resource, withName bool, options ...Option) (string, error) { - t, ok := resources[reflect.TypeOf(r)] - if !ok { - return "", fmt.Errorf("unregistered type %T", r) - } - meta := r.GetMetadata() - if meta == nil { - return "", errors.New("resource has no object meta") - } - switch { - case t.namespaced && (meta.Namespace == nil || *meta.Namespace == ""): - return "", errors.New("no resource namespace provided") - case !t.namespaced && (meta.Namespace != nil && *meta.Namespace != ""): - return "", errors.New("resource not namespaced") - case withName && (meta.Name == nil || *meta.Name == ""): - return "", errors.New("no resource name provided") - } - name := "" - if withName { - name = *meta.Name - } - namespace := "" - if t.namespaced { - namespace = *meta.Namespace - } - - return urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, name, options...), nil -} - -func resourceGetURL(endpoint, namespace, name string, r Resource, options ...Option) (string, error) { - t, ok := resources[reflect.TypeOf(r)] - if !ok { - return "", fmt.Errorf("unregistered type %T", r) - } - - if !t.namespaced && namespace != "" { - return "", fmt.Errorf("type not namespaced") - } - if t.namespaced && namespace == "" { - return "", fmt.Errorf("no namespace provided") - } - - return urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, name, options...), nil -} - -func resourceListURL(endpoint, namespace string, r ResourceList, options ...Option) (string, error) { - t, ok := resourceLists[reflect.TypeOf(r)] - if !ok { - return "", fmt.Errorf("unregistered type %T", r) - } - - if !t.namespaced && namespace != "" { - return "", fmt.Errorf("type not namespaced") - } - - return urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, "", options...), nil -} - -func resourceWatchURL(endpoint, namespace string, r Resource, options ...Option) (string, error) { - t, ok := resources[reflect.TypeOf(r)] - if !ok { - return "", fmt.Errorf("unregistered type %T", r) - } - - if !t.namespaced && namespace != "" { - return "", fmt.Errorf("type not namespaced") - } - - url := urlFor(endpoint, t.apiGroup, t.apiVersion, namespace, t.name, "", options...) - if strings.Contains(url, "?") { - url = url + "&watch=true" - } else { - url = url + "?watch=true" - } - return url, nil -} diff --git a/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go b/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go deleted file mode 100644 index de8ca3c..0000000 --- a/vendor/github.com/ericchiang/k8s/runtime/generated.pb.go +++ /dev/null @@ -1,887 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/apimachinery/pkg/runtime/generated.proto - -/* - Package runtime is a generated protocol buffer package. - - It is generated from these files: - k8s.io/apimachinery/pkg/runtime/generated.proto - - It has these top-level messages: - RawExtension - TypeMeta - Unknown -*/ -package runtime - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// RawExtension is used to hold extensions in external versions. -// -// To use this, make a field which has RawExtension as its type in your external, versioned -// struct, and Object in your internal struct. You also need to register your -// various plugin types. -// -// // Internal package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.Object `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // External package: -// type MyAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// MyPlugin runtime.RawExtension `json:"myPlugin"` -// } -// type PluginA struct { -// AOption string `json:"aOption"` -// } -// -// // On the wire, the JSON will look something like this: -// { -// "kind":"MyAPIObject", -// "apiVersion":"v1", -// "myPlugin": { -// "kind":"PluginA", -// "aOption":"foo", -// }, -// } -// -// So what happens? Decode first uses json or yaml to unmarshal the serialized data into -// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. -// The next step is to copy (using pkg/conversion) into the internal struct. The runtime -// package's DefaultScheme has conversion functions installed which will unpack the -// JSON stored in RawExtension, turning it into the correct object type, and storing it -// in the Object. (TODO: In the case where the object is of an unknown type, a -// runtime.Unknown object will be created and stored.) -// -// +k8s:deepcopy-gen=true -// +protobuf=true -// +k8s:openapi-gen=true -type RawExtension struct { - // Raw is the underlying serialization of this object. - // - // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `protobuf:"bytes,1,opt,name=raw" json:"raw,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (m *RawExtension) String() string { return proto.CompactTextString(m) } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *RawExtension) GetRaw() []byte { - if m != nil { - return m.Raw - } - return nil -} - -// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, -// like this: -// type MyAwesomeAPIObject struct { -// runtime.TypeMeta `json:",inline"` -// ... // other fields -// } -// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// -// TypeMeta is provided here for convenience. You may use it directly from this package or define -// your own with the same fields. -// -// +k8s:deepcopy-gen=false -// +protobuf=true -// +k8s:openapi-gen=true -type TypeMeta struct { - // +optional - ApiVersion *string `protobuf:"bytes,1,opt,name=apiVersion" json:"apiVersion,omitempty"` - // +optional - Kind *string `protobuf:"bytes,2,opt,name=kind" json:"kind,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (m *TypeMeta) String() string { return proto.CompactTextString(m) } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *TypeMeta) GetApiVersion() string { - if m != nil && m.ApiVersion != nil { - return *m.ApiVersion - } - return "" -} - -func (m *TypeMeta) GetKind() string { - if m != nil && m.Kind != nil { - return *m.Kind - } - return "" -} - -// Unknown allows api objects with unknown types to be passed-through. This can be used -// to deal with the API objects from a plug-in. Unknown objects still have functioning -// TypeMeta features-- kind, version, etc. -// TODO: Make this object have easy access to field based accessors and settors for -// metadata and field mutatation. -// -// +k8s:deepcopy-gen=true -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +protobuf=true -// +k8s:openapi-gen=true -type Unknown struct { - TypeMeta *TypeMeta `protobuf:"bytes,1,opt,name=typeMeta" json:"typeMeta,omitempty"` - // Raw will hold the complete serialized object which couldn't be matched - // with a registered type. Most likely, nothing should be done with this - // except for passing it through the system. - Raw []byte `protobuf:"bytes,2,opt,name=raw" json:"raw,omitempty"` - // ContentEncoding is encoding used to encode 'Raw' data. - // Unspecified means no encoding. - ContentEncoding *string `protobuf:"bytes,3,opt,name=contentEncoding" json:"contentEncoding,omitempty"` - // ContentType is serialization method used to serialize 'Raw'. - // Unspecified means ContentTypeJSON. - ContentType *string `protobuf:"bytes,4,opt,name=contentType" json:"contentType,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Unknown) Reset() { *m = Unknown{} } -func (m *Unknown) String() string { return proto.CompactTextString(m) } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *Unknown) GetTypeMeta() *TypeMeta { - if m != nil { - return m.TypeMeta - } - return nil -} - -func (m *Unknown) GetRaw() []byte { - if m != nil { - return m.Raw - } - return nil -} - -func (m *Unknown) GetContentEncoding() string { - if m != nil && m.ContentEncoding != nil { - return *m.ContentEncoding - } - return "" -} - -func (m *Unknown) GetContentType() string { - if m != nil && m.ContentType != nil { - return *m.ContentType - } - return "" -} - -func init() { - proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") - proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") - proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") -} -func (m *RawExtension) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Raw != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *TypeMeta) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ApiVersion != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ApiVersion))) - i += copy(dAtA[i:], *m.ApiVersion) - } - if m.Kind != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Unknown) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TypeMeta != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) - n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.Raw != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i += copy(dAtA[i:], m.Raw) - } - if m.ContentEncoding != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ContentEncoding))) - i += copy(dAtA[i:], *m.ContentEncoding) - } - if m.ContentType != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ContentType))) - i += copy(dAtA[i:], *m.ContentType) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *RawExtension) Size() (n int) { - var l int - _ = l - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *TypeMeta) Size() (n int) { - var l int - _ = l - if m.ApiVersion != nil { - l = len(*m.ApiVersion) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Unknown) Size() (n int) { - var l int - _ = l - if m.TypeMeta != nil { - l = m.TypeMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ContentEncoding != nil { - l = len(*m.ContentEncoding) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ContentType != nil { - l = len(*m.ContentType) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *RawExtension) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypeMeta) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApiVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ApiVersion = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Unknown) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Unknown: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TypeMeta == nil { - m.TypeMeta = &TypeMeta{} - } - if err := m.TypeMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ContentEncoding = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ContentType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 278 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0xb1, 0x4e, 0xf3, 0x30, - 0x14, 0x85, 0x7f, 0xb7, 0x95, 0xda, 0xff, 0xb6, 0x12, 0xc8, 0x53, 0x60, 0x08, 0x55, 0xa6, 0xb2, - 0xd8, 0x12, 0x2c, 0x4c, 0x0c, 0x48, 0x19, 0x59, 0x22, 0x60, 0x60, 0xb3, 0x12, 0x2b, 0x58, 0xa1, - 0xd7, 0x96, 0x73, 0xab, 0xd0, 0x37, 0xe1, 0x29, 0x78, 0x0e, 0x46, 0x1e, 0x01, 0x85, 0x17, 0x41, - 0x31, 0x69, 0x15, 0x55, 0x42, 0x6c, 0x47, 0xc7, 0xe7, 0x3b, 0x3e, 0x17, 0x64, 0x75, 0x55, 0x0b, - 0x63, 0xa5, 0x72, 0x66, 0xad, 0xf2, 0x27, 0x83, 0xda, 0x6f, 0xa5, 0xab, 0x4a, 0xe9, 0x37, 0x48, - 0x66, 0xad, 0x65, 0xa9, 0x51, 0x7b, 0x45, 0xba, 0x10, 0xce, 0x5b, 0xb2, 0xfc, 0xec, 0x07, 0x10, - 0x43, 0x40, 0xb8, 0xaa, 0x14, 0x3d, 0x70, 0x7a, 0xf9, 0x5b, 0xe3, 0x86, 0xcc, 0xb3, 0x34, 0x48, - 0x35, 0xf9, 0xc3, 0xd6, 0x64, 0x09, 0x8b, 0x4c, 0x35, 0xe9, 0x0b, 0x69, 0xac, 0x8d, 0x45, 0x7e, - 0x0c, 0x63, 0xaf, 0x9a, 0x88, 0x2d, 0xd9, 0x6a, 0x91, 0x75, 0x32, 0xb9, 0x86, 0xd9, 0xdd, 0xd6, - 0xe9, 0x5b, 0x4d, 0x8a, 0xc7, 0x00, 0xca, 0x99, 0x07, 0xed, 0xbb, 0x6c, 0x08, 0xfd, 0xcf, 0x06, - 0x0e, 0xe7, 0x30, 0xa9, 0x0c, 0x16, 0xd1, 0x28, 0xbc, 0x04, 0x9d, 0xbc, 0x31, 0x98, 0xde, 0x63, - 0x85, 0xb6, 0x41, 0x9e, 0xc2, 0x8c, 0xfa, 0xae, 0x40, 0xcf, 0x2f, 0xce, 0xc5, 0x1f, 0x67, 0x89, - 0xdd, 0xe7, 0xd9, 0x1e, 0xdd, 0x8d, 0x1c, 0xed, 0x47, 0xf2, 0x15, 0x1c, 0xe5, 0x16, 0x49, 0x23, - 0xa5, 0x98, 0xdb, 0xc2, 0x60, 0x19, 0x8d, 0xc3, 0x86, 0x43, 0x9b, 0x2f, 0x61, 0xde, 0x5b, 0x5d, - 0x71, 0x34, 0x09, 0xa9, 0xa1, 0x75, 0x73, 0xf2, 0xde, 0xc6, 0xec, 0xa3, 0x8d, 0xd9, 0x67, 0x1b, - 0xb3, 0xd7, 0xaf, 0xf8, 0xdf, 0xe3, 0xb4, 0xdf, 0xf2, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x60, 0x1f, - 0x94, 0x77, 0xb5, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go b/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go deleted file mode 100644 index 83bda63..0000000 --- a/vendor/github.com/ericchiang/k8s/runtime/schema/generated.pb.go +++ /dev/null @@ -1,45 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/apimachinery/pkg/runtime/schema/generated.proto - -/* - Package schema is a generated protocol buffer package. - - It is generated from these files: - k8s.io/apimachinery/pkg/runtime/schema/generated.proto - - It has these top-level messages: -*/ -package schema - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import _ "github.com/ericchiang/k8s/util/intstr" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -func init() { - proto.RegisterFile("k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 138 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xcb, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, - 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2a, 0xcd, 0x2b, 0xc9, 0xcc, 0x4d, 0xd5, 0x2f, 0x4e, 0xce, 0x48, - 0xcd, 0x4d, 0xd4, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, - 0x2f, 0xc9, 0x17, 0x52, 0x83, 0xe8, 0xd3, 0x43, 0xd6, 0xa7, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd5, - 0xa7, 0x07, 0xd1, 0x27, 0x65, 0x8c, 0xcb, 0xfc, 0xd2, 0x92, 0xcc, 0x1c, 0xfd, 0xcc, 0xbc, 0x92, - 0xe2, 0x92, 0x22, 0x74, 0xc3, 0x9d, 0x24, 0x4e, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, - 0xc1, 0x23, 0x39, 0xc6, 0x19, 0x8f, 0xe5, 0x18, 0xa2, 0xd8, 0x20, 0xc6, 0x01, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xf9, 0x8e, 0xdb, 0x42, 0xaf, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go b/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go deleted file mode 100644 index 36a796a..0000000 --- a/vendor/github.com/ericchiang/k8s/util/intstr/generated.pb.go +++ /dev/null @@ -1,399 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/apimachinery/pkg/util/intstr/generated.proto - -/* - Package intstr is a generated protocol buffer package. - - It is generated from these files: - k8s.io/apimachinery/pkg/util/intstr/generated.proto - - It has these top-level messages: - IntOrString -*/ -package intstr - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// IntOrString is a type that can hold an int32 or a string. When used in -// JSON or YAML marshalling and unmarshalling, it produces or consumes the -// inner type. This allows you to have, for example, a JSON field that can -// accept a name or number. -// TODO: Rename to Int32OrString -// -// +protobuf=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -// +k8s:openapi-gen=true -type IntOrString struct { - Type *int64 `protobuf:"varint,1,opt,name=type" json:"type,omitempty"` - IntVal *int32 `protobuf:"varint,2,opt,name=intVal" json:"intVal,omitempty"` - StrVal *string `protobuf:"bytes,3,opt,name=strVal" json:"strVal,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (m *IntOrString) String() string { return proto.CompactTextString(m) } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *IntOrString) GetType() int64 { - if m != nil && m.Type != nil { - return *m.Type - } - return 0 -} - -func (m *IntOrString) GetIntVal() int32 { - if m != nil && m.IntVal != nil { - return *m.IntVal - } - return 0 -} - -func (m *IntOrString) GetStrVal() string { - if m != nil && m.StrVal != nil { - return *m.StrVal - } - return "" -} - -func init() { - proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") -} -func (m *IntOrString) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Type)) - } - if m.IntVal != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.IntVal)) - } - if m.StrVal != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StrVal))) - i += copy(dAtA[i:], *m.StrVal) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *IntOrString) Size() (n int) { - var l int - _ = l - if m.Type != nil { - n += 1 + sovGenerated(uint64(*m.Type)) - } - if m.IntVal != nil { - n += 1 + sovGenerated(uint64(*m.IntVal)) - } - if m.StrVal != nil { - l = len(*m.StrVal) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *IntOrString) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Type = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.IntVal = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.StrVal = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 182 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x32, 0xce, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0x4f, 0x2c, 0xc8, 0xcc, 0x4d, 0x4c, 0xce, 0xc8, 0xcc, 0x4b, 0x2d, 0xaa, 0xd4, - 0x2f, 0xc8, 0x4e, 0xd7, 0x2f, 0x2d, 0xc9, 0xcc, 0xd1, 0xcf, 0xcc, 0x2b, 0x29, 0x2e, 0x29, 0xd2, - 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, - 0x52, 0x86, 0x68, 0xd2, 0x43, 0xd6, 0xa4, 0x57, 0x90, 0x9d, 0xae, 0x07, 0xd2, 0xa4, 0x07, 0xd1, - 0xa4, 0x14, 0xc8, 0xc5, 0xed, 0x99, 0x57, 0xe2, 0x5f, 0x14, 0x5c, 0x52, 0x94, 0x99, 0x97, 0x2e, - 0x24, 0xc4, 0xc5, 0x52, 0x52, 0x59, 0x90, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x1c, 0x04, 0x66, - 0x0b, 0x89, 0x71, 0xb1, 0x65, 0xe6, 0x95, 0x84, 0x25, 0xe6, 0x48, 0x30, 0x29, 0x30, 0x6a, 0xb0, - 0x06, 0x41, 0x79, 0x20, 0xf1, 0xe2, 0x92, 0x22, 0x90, 0x38, 0xb3, 0x02, 0xa3, 0x06, 0x67, 0x10, - 0x94, 0xe7, 0x24, 0x71, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, - 0xce, 0x78, 0x2c, 0xc7, 0x10, 0xc5, 0x06, 0xb1, 0x0c, 0x10, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xa6, - 0x56, 0x6e, 0xc7, 0x00, 0x00, 0x00, -} diff --git a/vendor/github.com/ericchiang/k8s/watch.go b/vendor/github.com/ericchiang/k8s/watch.go deleted file mode 100644 index 91c82dc..0000000 --- a/vendor/github.com/ericchiang/k8s/watch.go +++ /dev/null @@ -1,199 +0,0 @@ -package k8s - -import ( - "bytes" - "context" - "encoding/binary" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - - "github.com/ericchiang/k8s/runtime" - "github.com/ericchiang/k8s/watch/versioned" - "github.com/golang/protobuf/proto" -) - -// Decode events from a watch stream. -// -// See: https://github.com/kubernetes/community/blob/master/contributors/design-proposals/protobuf.md#streaming-wire-format - -// Watcher receives a stream of events tracking a particular resource within -// a namespace or across all namespaces. -// -// Watcher does not automatically reconnect. If a watch fails, a new watch must -// be initialized. -type Watcher struct { - watcher interface { - Next(Resource) (string, error) - Close() error - } -} - -// Next decodes the next event from the watch stream. Errors are fatal, and -// indicate that the watcher should no longer be used, and must be recreated. -func (w *Watcher) Next(r Resource) (string, error) { - return w.watcher.Next(r) -} - -// Close closes the active connection with the API server being used for -// the watch. -func (w *Watcher) Close() error { - return w.watcher.Close() -} - -type watcherJSON struct { - d *json.Decoder - c io.Closer -} - -func (w *watcherJSON) Close() error { - return w.c.Close() -} - -func (w *watcherJSON) Next(r Resource) (string, error) { - var event struct { - Type string `json:"type"` - Object json.RawMessage `json:"object"` - } - if err := w.d.Decode(&event); err != nil { - return "", fmt.Errorf("decode event: %v", err) - } - if event.Type == "" { - return "", errors.New("wwatch event had no type field") - } - if err := json.Unmarshal([]byte(event.Object), r); err != nil { - return "", fmt.Errorf("decode resource: %v", err) - } - return event.Type, nil -} - -type watcherPB struct { - r io.ReadCloser -} - -func (w *watcherPB) Next(r Resource) (string, error) { - msg, ok := r.(proto.Message) - if !ok { - return "", errors.New("object was not a protobuf message") - } - event, unknown, err := w.next() - if err != nil { - return "", err - } - if event.Type == nil || *event.Type == "" { - return "", errors.New("watch event had no type field") - } - if err := proto.Unmarshal(unknown.Raw, msg); err != nil { - return "", err - } - return *event.Type, nil -} - -func (w *watcherPB) Close() error { - return w.r.Close() -} - -func (w *watcherPB) next() (*versioned.Event, *runtime.Unknown, error) { - length := make([]byte, 4) - if _, err := io.ReadFull(w.r, length); err != nil { - return nil, nil, err - } - - body := make([]byte, int(binary.BigEndian.Uint32(length))) - if _, err := io.ReadFull(w.r, body); err != nil { - return nil, nil, fmt.Errorf("read frame body: %v", err) - } - - var event versioned.Event - if err := proto.Unmarshal(body, &event); err != nil { - return nil, nil, err - } - - if event.Object == nil { - return nil, nil, fmt.Errorf("event had no underlying object") - } - - unknown, err := parseUnknown(event.Object.Raw) - if err != nil { - return nil, nil, err - } - - return &event, unknown, nil -} - -var unknownPrefix = []byte{0x6b, 0x38, 0x73, 0x00} - -func parseUnknown(b []byte) (*runtime.Unknown, error) { - if !bytes.HasPrefix(b, unknownPrefix) { - return nil, errors.New("bytes did not start with expected prefix") - } - - var u runtime.Unknown - if err := proto.Unmarshal(b[len(unknownPrefix):], &u); err != nil { - return nil, err - } - return &u, nil -} - -// Watch creates a watch on a resource. It takes an example Resource to -// determine what endpoint to watch. -// -// Watch does not automatically reconnect. If a watch fails, a new watch must -// be initialized. -// -// // Watch configmaps in the "kube-system" namespace -// var configMap corev1.ConfigMap -// watcher, err := client.Watch(ctx, "kube-system", &configMap) -// if err != nil { -// // handle error -// } -// defer watcher.Close() // Always close the returned watcher. -// -// for { -// cm := new(corev1.ConfigMap) -// eventType, err := watcher.Next(cm) -// if err != nil { -// // watcher encountered and error, exit or create a new watcher -// } -// fmt.Println(eventType, *cm.Metadata.Name) -// } -// -func (c *Client) Watch(ctx context.Context, namespace string, r Resource, options ...Option) (*Watcher, error) { - url, err := resourceWatchURL(c.Endpoint, namespace, r, options...) - if err != nil { - return nil, err - } - - ct := contentTypeFor(r) - - req, err := c.newRequest(ctx, "GET", url, nil) - if err != nil { - return nil, err - } - req.Header.Set("Accept", ct) - - resp, err := c.client().Do(req) - if err != nil { - return nil, err - } - - if resp.StatusCode/100 != 2 { - body, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, err - } - return nil, newAPIError(resp.Header.Get("Content-Type"), resp.StatusCode, body) - } - - if ct == contentTypePB { - return &Watcher{&watcherPB{r: resp.Body}}, nil - } - - return &Watcher{&watcherJSON{ - d: json.NewDecoder(resp.Body), - c: resp.Body, - }}, nil -} diff --git a/vendor/github.com/ericchiang/k8s/watch/versioned/generated.pb.go b/vendor/github.com/ericchiang/k8s/watch/versioned/generated.pb.go deleted file mode 100644 index 30d3d4a..0000000 --- a/vendor/github.com/ericchiang/k8s/watch/versioned/generated.pb.go +++ /dev/null @@ -1,405 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/pkg/watch/versioned/generated.proto -// DO NOT EDIT! - -/* - Package versioned is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/pkg/watch/versioned/generated.proto - - It has these top-level messages: - Event -*/ -package versioned - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import k8s_io_kubernetes_pkg_runtime "github.com/ericchiang/k8s/runtime" -import _ "github.com/ericchiang/k8s/util/intstr" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -// Event represents a single event to a watched resource. -// -// +protobuf=true -// +k8s:openapi-gen=true -type Event struct { - Type *string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - // Object is: - // * If Type is Added or Modified: the new state of the object. - // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Error: *api.Status is recommended; other types may make sense - // depending on context. - Object *k8s_io_kubernetes_pkg_runtime.RawExtension `protobuf:"bytes,2,opt,name=object" json:"object,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *Event) GetType() string { - if m != nil && m.Type != nil { - return *m.Type - } - return "" -} - -func (m *Event) GetObject() *k8s_io_kubernetes_pkg_runtime.RawExtension { - if m != nil { - return m.Object - } - return nil -} - -func init() { - proto.RegisterType((*Event)(nil), "github.com/ericchiang.k8s.watch.versioned.Event") -} -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Type != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n1, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Event) Size() (n int) { - var l int - _ = l - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &k8s_io_kubernetes_pkg_runtime.RawExtension{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("github.com/ericchiang/k8s/watch/versioned/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 214 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0xcd, 0xb6, 0x28, 0xd6, - 0xcb, 0xcc, 0xd7, 0xcf, 0x2e, 0x4d, 0x4a, 0x2d, 0xca, 0x4b, 0x2d, 0x49, 0x2d, 0xd6, 0x2f, 0xc8, - 0x4e, 0xd7, 0x2f, 0x4f, 0x2c, 0x49, 0xce, 0xd0, 0x2f, 0x4b, 0x2d, 0x2a, 0xce, 0xcc, 0xcf, 0x4b, - 0x4d, 0xd1, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0x4a, 0x2c, 0x49, 0x4d, 0xd1, 0x2b, 0x28, 0xca, 0x2f, - 0xc9, 0x17, 0x52, 0x85, 0x68, 0xd3, 0x43, 0x68, 0xd3, 0x2b, 0xc8, 0x4e, 0xd7, 0x03, 0x6b, 0xd3, - 0x83, 0x6b, 0x93, 0xd2, 0xc5, 0x6e, 0x7a, 0x51, 0x69, 0x5e, 0x49, 0x66, 0x6e, 0x2a, 0xba, 0xa9, - 0x52, 0x86, 0xd8, 0x95, 0x97, 0x96, 0x64, 0xe6, 0xe8, 0x67, 0xe6, 0x95, 0x14, 0x97, 0x14, 0xa1, - 0x6b, 0x51, 0x4a, 0xe0, 0x62, 0x75, 0x2d, 0x4b, 0xcd, 0x2b, 0x11, 0x12, 0xe2, 0x62, 0x29, 0xa9, - 0x2c, 0x48, 0x95, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0c, 0x02, 0xb3, 0x85, 0x9c, 0xb9, 0xd8, 0xf2, - 0x93, 0xb2, 0x52, 0x93, 0x4b, 0x24, 0x98, 0x14, 0x18, 0x35, 0xb8, 0x8d, 0xb4, 0xf5, 0xb0, 0x3b, - 0x1b, 0xea, 0x1e, 0xbd, 0xa0, 0xc4, 0x72, 0xd7, 0x8a, 0x92, 0xd4, 0x3c, 0x90, 0xeb, 0x83, 0xa0, - 0x5a, 0x9d, 0xa4, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, - 0x19, 0x8f, 0xe5, 0x18, 0xa2, 0x38, 0xe1, 0x1e, 0x04, 0x04, 0x00, 0x00, 0xff, 0xff, 0xb4, 0x05, - 0x8c, 0x51, 0x3f, 0x01, 0x00, 0x00, -} diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 0000000..3d97fc7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1b4f6c2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -0,0 +1,23 @@ +Anton Povarov +Brian Goff +Clayton Coleman +Denis Smirnov +DongYun Kang +Dwayne Schultz +Georg Apitz +Gustav Paul +Johan Brandhorst +John Shahid +John Tuley +Laurent +Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo +Stephen J Day +Tamir Duberstein +Todd Eisenberger +Tormod Erevik Lea +Vyacheslav Kim +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 0000000..f57de90 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 0000000..00d65f3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go similarity index 96% rename from vendor/github.com/golang/protobuf/proto/clone.go rename to vendor/github.com/gogo/protobuf/proto/clone.go index 3cd3249..a26b046 100644 --- a/vendor/github.com/golang/protobuf/proto/clone.go +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -108,7 +108,12 @@ func mergeStruct(out, in reflect.Value) { mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) } - if emIn, err := extendable(in.Addr().Interface()); err == nil { + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { emOut, _ := extendable(out.Addr().Interface()) mIn, muIn := emIn.extensionsRead() if mIn != nil { diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 0000000..2455248 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go similarity index 100% rename from vendor/github.com/golang/protobuf/proto/decode.go rename to vendor/github.com/gogo/protobuf/proto/decode.go diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go new file mode 100644 index 0000000..35b882c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 0000000..fe1bd7d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 0000000..93464c9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 0000000..e748e17 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go similarity index 99% rename from vendor/github.com/golang/protobuf/proto/encode.go rename to vendor/github.com/gogo/protobuf/proto/encode.go index 3abfed2..9581ccd 100644 --- a/vendor/github.com/golang/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -189,6 +189,8 @@ type Marshaler interface { // prefixed by a varint-encoded length. func (p *Buffer) EncodeMessage(pb Message) error { siz := Size(pb) + sizVar := SizeVarint(uint64(siz)) + p.grow(siz + sizVar) p.EncodeVarint(uint64(siz)) return p.Marshal(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 0000000..0f5fb17 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go similarity index 99% rename from vendor/github.com/golang/protobuf/proto/equal.go rename to vendor/github.com/gogo/protobuf/proto/equal.go index f9b6e41..d4db5a1 100644 --- a/vendor/github.com/golang/protobuf/proto/equal.go +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -246,8 +246,7 @@ func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { return false } - m1 := extensionAsLegacyType(e1.value) - m2 := extensionAsLegacyType(e2.value) + m1, m2 := e1.value, e2.value if m1 == nil && m2 == nil { // Both have only encoded form. diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 0000000..341c6f5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,605 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + ClearExtension(pb, extension) + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 0000000..6f1ae12 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,389 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMapBackwards(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + end := len(data) + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[end-len(e.enc):], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + end -= n + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go similarity index 98% rename from vendor/github.com/golang/protobuf/proto/lib.go rename to vendor/github.com/gogo/protobuf/proto/lib.go index fdd328b..80db1c1 100644 --- a/vendor/github.com/golang/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -99,7 +99,7 @@ The resulting file, test.pb.go, is: package example - import proto "github.com/golang/protobuf/proto" + import proto "github.com/gogo/protobuf/proto" import math "math" type FOO int32 @@ -226,7 +226,7 @@ To create and play with a Test object: import ( "log" - "github.com/golang/protobuf/proto" + "github.com/gogo/protobuf/proto" pb "./example.pb" ) @@ -503,7 +503,7 @@ func (p *Buffer) DebugPrint(s string, b []byte) { var u uint64 obuf := p.buf - index := p.index + sindex := p.index p.buf = b p.index = 0 depth := 0 @@ -598,7 +598,7 @@ out: fmt.Printf("\n") p.buf = obuf - p.index = index + p.index = sindex } // SetDefaults sets unset protocol buffer fields to their default values. @@ -608,9 +608,11 @@ func SetDefaults(pb Message) { setDefaults(reflect.ValueOf(pb), true, false) } -// v is a pointer to a struct. +// v is a struct. func setDefaults(v reflect.Value, recur, zeros bool) { - v = v.Elem() + if v.Kind() == reflect.Ptr { + v = v.Elem() + } defaultMu.RLock() dm, ok := defaults[v.Type()] @@ -712,8 +714,11 @@ func setDefaults(v reflect.Value, recur, zeros bool) { for _, ni := range dm.nested { f := v.Field(ni) - // f is *T or []*T or map[T]*T + // f is *T or T or []*T or []T switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + case reflect.Ptr: if f.IsNil() { continue @@ -723,7 +728,7 @@ func setDefaults(v reflect.Value, recur, zeros bool) { case reflect.Slice: for i := 0; i < f.Len(); i++ { e := f.Index(i) - if e.IsNil() { + if e.Kind() == reflect.Ptr && e.IsNil() { continue } setDefaults(e, recur, zeros) @@ -795,6 +800,9 @@ func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { var canHaveDefault bool switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + case reflect.Ptr: if ft.Elem().Kind() == reflect.Struct { nestedMessage = true @@ -804,7 +812,7 @@ func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMes case reflect.Slice: switch ft.Elem().Kind() { - case reflect.Ptr: + case reflect.Ptr, reflect.Struct: nestedMessage = true // repeated message case reflect.Uint8: canHaveDefault = true // bytes field @@ -943,15 +951,15 @@ func isProto3Zero(v reflect.Value) bool { const ( // ProtoPackageIsVersion3 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion3 = true + GoGoProtoPackageIsVersion3 = true // ProtoPackageIsVersion2 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion2 = true + GoGoProtoPackageIsVersion2 = true // ProtoPackageIsVersion1 is referenced from generated protocol buffer files // to assert that that code is compatible with this version of the proto package. - ProtoPackageIsVersion1 = true + GoGoProtoPackageIsVersion1 = true ) // InternalMessageInfo is a type used internally by generated .pb.go files. diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 0000000..b3aa391 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go similarity index 100% rename from vendor/github.com/golang/protobuf/proto/message_set.go rename to vendor/github.com/gogo/protobuf/proto/message_set.go diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go similarity index 99% rename from vendor/github.com/golang/protobuf/proto/pointer_reflect.go rename to vendor/github.com/gogo/protobuf/proto/pointer_reflect.go index 94fa919..b6cad90 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -79,13 +79,10 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) pointer { +func toAddrPointer(i *interface{}, isptr bool) pointer { v := reflect.ValueOf(*i) u := reflect.New(v.Type()) u.Elem().Set(v) - if deref { - u = u.Elem() - } return pointer{v: u} } diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 0000000..7ffd3c2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go similarity index 96% rename from vendor/github.com/golang/protobuf/proto/pointer_unsafe.go rename to vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go index dbfffe0..d55a335 100644 --- a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -85,21 +85,16 @@ func toPointer(i *Message) pointer { // toAddrPointer converts an interface to a pointer that points to // the interface data. -func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { +func toAddrPointer(i *interface{}, isptr bool) pointer { // Super-tricky - read or get the address of data word of interface value. if isptr { // The interface is of pointer type, thus it is a direct interface. // The data word is the pointer data itself. We take its address. - p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} - } else { - // The interface is not of pointer type. The data word is the pointer - // to the data. - p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} } - if deref { - p.p = *(*unsafe.Pointer)(p.p) - } - return p + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} } // valToPointer converts v to a pointer. v must be of pointer type. diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 0000000..aca8eed --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 0000000..28da147 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,610 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + WktPointer bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + case f == "wktptr": + p.WktPointer = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + if p.WktPointer && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.MapValProp.CustomType = p.CustomType + p.MapValProp.StdDuration = p.StdDuration + p.MapValProp.StdTime = p.StdTime + p.MapValProp.WktPointer = p.WktPointer + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + if isOneofMessage { + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 0000000..40ea3dd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 0000000..5a5fd93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/golang/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go similarity index 90% rename from vendor/github.com/golang/protobuf/proto/table_marshal.go rename to vendor/github.com/gogo/protobuf/proto/table_marshal.go index 5cb11fa..f8babde 100644 --- a/vendor/github.com/golang/protobuf/proto/table_marshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -65,6 +65,11 @@ type marshalInfo struct { hasmarshaler bool // has custom marshaler sync.RWMutex // protect extElems map, also for initialization extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte } // marshalFieldInfo is the information used for marshaling a field of a message. @@ -87,12 +92,13 @@ type marshalElemInfo struct { sizer sizer marshaler marshaler isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) - deref bool // dereference the pointer before operating on it; implies isptr } var ( marshalInfoMap = map[reflect.Type]*marshalInfo{} marshalInfoLock sync.Mutex + + uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind() ) // getMarshalInfo returns the information to marshal a given type of message. @@ -169,6 +175,17 @@ func (u *marshalInfo) size(ptr pointer) int { // If the message can marshal itself, let it do it, for compatibility. // NOTE: This is not efficient. if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) b, _ := m.Marshal() return len(b) @@ -194,10 +211,15 @@ func (u *marshalInfo) size(ptr pointer) int { m := *ptr.offset(u.v1extensions).toOldExtensions() n += u.sizeV1Extensions(m) } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } if u.unrecognized.IsValid() { s := *ptr.offset(u.unrecognized).toBytes() n += len(s) } + // cache the result for use in marshal if u.sizecache.IsValid() { atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) @@ -252,9 +274,13 @@ func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte return b, err } } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } for _, f := range u.fields { if f.required { - if ptr.offset(f.field).getPointer().isNil() { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { // Required field is not set. // We record the error but keep going, to give a complete marshaling. if errLater == nil { @@ -309,8 +335,16 @@ func (u *marshalInfo) computeMarshalInfo() { u.unrecognized = invalidField u.extensions = invalidField u.v1extensions = invalidField + u.bytesExtensions = invalidField u.sizecache = invalidField + isOneofMessage := false + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } // If the message can marshal itself, let it do it, for compatibility. // NOTE: This is not efficient. if reflect.PtrTo(t).Implements(marshalerType) { @@ -319,20 +353,14 @@ func (u *marshalInfo) computeMarshalInfo() { return } - // get oneof implementers - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - n := t.NumField() // deal with XXX fields first for i := 0; i < t.NumField(); i++ { f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } if !strings.HasPrefix(f.Name, "XXX_") { continue } @@ -345,7 +373,11 @@ func (u *marshalInfo) computeMarshalInfo() { u.extensions = toField(&f) u.messageset = f.Tag.Get("protobuf_messageset") == "1" case "XXX_extensions": - u.v1extensions = toField(&f) + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } case "XXX_NoUnkeyedLiteral": // nothing to do default: @@ -354,6 +386,18 @@ func (u *marshalInfo) computeMarshalInfo() { n-- } + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if isOneofMessage { + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + } + // normal fields fields := make([]marshalFieldInfo, n) // batch allocation u.fields = make([]*marshalFieldInfo, 0, n) @@ -411,22 +455,13 @@ func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { panic("tag is not an integer") } wt := wiretype(tags[0]) - if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { - t = t.Elem() - } - sizer, marshaler := typeMarshaler(t, tags, false, false) - var deref bool - if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { - t = reflect.PtrTo(t) - deref = true - } + sizr, marshalr := typeMarshaler(t, tags, false, false) e = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, + sizer: sizr, + marshaler: marshalr, isptr: t.Kind() == reflect.Ptr, - deref: deref, } // update cache @@ -479,12 +514,12 @@ func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofI panic("tag is not an integer") } wt := wiretype(tags[0]) - sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value fi.oneofElems[t.Elem()] = &marshalElemInfo{ wiretag: uint64(tag)<<3 | wt, tagsize: SizeVarint(uint64(tag) << 3), - sizer: sizer, - marshaler: marshaler, + sizer: sizr, + marshaler: marshalr, } } } @@ -548,6 +583,10 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma packed := false proto3 := false + ctype := false + isTime := false + isDuration := false + isWktPointer := false validateUTF8 := true for i := 2; i < len(tags); i++ { if tags[i] == "packed" { @@ -556,8 +595,169 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma if tags[i] == "proto3" { proto3 = true } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + if tags[i] == "wktptr" { + isWktPointer = true + } } validateUTF8 = validateUTF8 && proto3 + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdDoubleValueMarshaler(getMarshalInfo(t)) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdFloatValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdFloatValueMarshaler(getMarshalInfo(t)) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt64ValueMarshaler(getMarshalInfo(t)) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdUInt32ValueMarshaler(getMarshalInfo(t)) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBoolValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBoolValueMarshaler(getMarshalInfo(t)) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdStringValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdStringValueMarshaler(getMarshalInfo(t)) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValuePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeStdBytesValueSliceMarshaler(getMarshalInfo(t)) + } + return makeStdBytesValueMarshaler(getMarshalInfo(t)) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } switch t.Kind() { case reflect.Bool: @@ -799,10 +999,17 @@ func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, ma } return makeGroupMarshaler(getMarshalInfo(t)) case "bytes": - if slice { - return makeMessageSliceMarshaler(getMarshalInfo(t)) + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) } - return makeMessageMarshaler(getMarshalInfo(t)) } } panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) @@ -2281,8 +2488,26 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { t := f.Type keyType := t.Key() valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + stdOptions := false + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "stdduration" { + valTags = append(valTags, t) + stdOptions = true + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map keyWireTag := 1<<3 | wiretype(keyTags[0]) @@ -2301,7 +2526,7 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { // If value is not message type, we don't have size cache, // but it cannot be nested either. Just use valSizer. valCachedSizer := valSizer - if valIsPtr && valType.Elem().Kind() == reflect.Struct { + if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct { u := getMarshalInfo(valType.Elem()) valCachedSizer = func(ptr pointer, tagsize int) int { // Same as message sizer, but use cache. @@ -2319,8 +2544,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range m.MapKeys() { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) n += siz + SizeVarint(uint64(siz)) + tagsize } @@ -2338,8 +2563,8 @@ func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { for _, k := range keys { ki := k.Interface() vi := m.MapIndex(k).Interface() - kaddr := toAddrPointer(&ki, false, false) // pointer to key - vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value b = appendVarint(b, tag) siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) b = appendVarint(b, uint64(siz)) @@ -2408,7 +2633,7 @@ func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { // the last time this function was called. ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } mu.Unlock() @@ -2443,7 +2668,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2474,7 +2699,7 @@ func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2519,7 +2744,7 @@ func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, 1) // message, tag = 3 (size=1) } mu.Unlock() @@ -2562,7 +2787,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) if !nerr.Merge(err) { return b, err @@ -2600,7 +2825,7 @@ func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, de ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) b = append(b, 1<<3|WireEndGroup) if !nerr.Merge(err) { @@ -2630,7 +2855,7 @@ func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) n += ei.sizer(p, ei.tagsize) } return n @@ -2665,7 +2890,7 @@ func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, determ ei := u.getExtElemInfo(e.desc) v := e.value - p := toAddrPointer(&v, ei.isptr, ei.deref) + p := toAddrPointer(&v, ei.isptr) b, err = ei.marshaler(b, p, ei.wiretag, deterministic) if !nerr.Merge(err) { return b, err @@ -2736,16 +2961,24 @@ func Marshal(pb Message) ([]byte, error) { // a Buffer for most applications. func (p *Buffer) Marshal(pb Message) error { var err error + if p.deterministic { + if _, ok := pb.(Marshaler); ok { + return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb) + } + } if m, ok := pb.(newMarshaler); ok { siz := m.XXX_Size() p.grow(siz) // make sure buf has enough capacity - p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz] + pp, err = m.XXX_Marshal(pp, p.deterministic) + p.buf = append(p.buf, pp...) return err } if m, ok := pb.(Marshaler); ok { // If the message can marshal itself, let it do it, for compatibility. // NOTE: This is not efficient. - b, err := m.Marshal() + var b []byte + b, err = m.Marshal() p.buf = append(p.buf, b...) return err } diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 0000000..997f57c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/golang/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go similarity index 94% rename from vendor/github.com/golang/protobuf/proto/table_merge.go rename to vendor/github.com/gogo/protobuf/proto/table_merge.go index 5525def..60dcf70 100644 --- a/vendor/github.com/golang/protobuf/proto/table_merge.go +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -530,10 +530,32 @@ func (mi *mergeInfo) computeMergeInfo() { } case reflect.Struct: switch { + case isSlice && !isPointer: // E.g. []pb.T + mergeInfo := getMergeInfo(tf) + zero := reflect.Zero(tf) + mfi.merge = func(dst, src pointer) { + // TODO: Make this faster? + dstsp := dst.asPointerTo(f.Type) + dsts := dstsp.Elem() + srcs := src.asPointerTo(f.Type).Elem() + for i := 0; i < srcs.Len(); i++ { + dsts = reflect.Append(dsts, zero) + srcElement := srcs.Index(i).Addr() + dstElement := dsts.Index(dsts.Len() - 1).Addr() + mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement)) + } + if dsts.IsNil() { + dsts = reflect.MakeSlice(f.Type, 0, 0) + } + dstsp.Elem().Set(dsts) + } case !isPointer: - panic(fmt.Sprintf("message field %s without pointer", tf)) + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } case isSlice: // E.g., []*pb.T - mi := getMergeInfo(tf) + mergeInfo := getMergeInfo(tf) mfi.merge = func(dst, src pointer) { sps := src.getPointerSlice() if sps != nil { @@ -542,7 +564,7 @@ func (mi *mergeInfo) computeMergeInfo() { var dp pointer if !sp.isNil() { dp = valToPointer(reflect.New(tf)) - mi.merge(dp, sp) + mergeInfo.merge(dp, sp) } dps = append(dps, dp) } @@ -553,7 +575,7 @@ func (mi *mergeInfo) computeMergeInfo() { } } default: // E.g., *pb.T - mi := getMergeInfo(tf) + mergeInfo := getMergeInfo(tf) mfi.merge = func(dst, src pointer) { sp := src.getPointer() if !sp.isNil() { @@ -562,7 +584,7 @@ func (mi *mergeInfo) computeMergeInfo() { dp = valToPointer(reflect.New(tf)) dst.setPointer(dp) } - mi.merge(dp, sp) + mergeInfo.merge(dp, sp) } } } diff --git a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go similarity index 86% rename from vendor/github.com/golang/protobuf/proto/table_unmarshal.go rename to vendor/github.com/gogo/protobuf/proto/table_unmarshal.go index acee2fc..9372293 100644 --- a/vendor/github.com/golang/protobuf/proto/table_unmarshal.go +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -80,6 +80,8 @@ type unmarshalInfo struct { oldExtensions field // offset of old-form extensions field (of type map[int]Extension) extensionRanges []ExtensionRange // if non-nil, implies extensions field is valid isMessageSet bool // if true, implies extensions field is valid + + bytesExtensions field // offset of XXX_extensions with type []byte } // An unmarshaler takes a stream of bytes and a pointer to a field of a message. @@ -232,10 +234,13 @@ func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error { z = &e.enc break } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } panic("no extensions field available") } } - // Use wire type to skip data. var err error b0 := b @@ -278,6 +283,7 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { u.unrecognized = invalidField u.extensions = invalidField u.oldExtensions = invalidField + u.bytesExtensions = invalidField // List of the generated type and offset for each oneof field. type oneofField struct { @@ -309,11 +315,14 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } if f.Name == "XXX_extensions" { // An older form of the extensions field. - if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { - panic("bad type for XXX_extensions field: " + f.Type.Name()) + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue } - u.oldExtensions = toField(&f) - continue + panic("bad type for XXX_extensions field: " + f.Type.Name()) } if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { continue @@ -362,50 +371,53 @@ func (u *unmarshalInfo) computeUnmarshalInfo() { } // Find any types associated with oneof fields. - var oneofImplementers []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oneofImplementers = m.XXX_OneofFuncs() - case oneofWrappersIface: - oneofImplementers = m.XXX_OneofWrappers() - } - for _, v := range oneofImplementers { - tptr := reflect.TypeOf(v) // *Msg_X - typ := tptr.Elem() // Msg_X - - f := typ.Field(0) // oneof implementers have one field - baseUnmarshal := fieldUnmarshaler(&f) - tags := strings.Split(f.Tag.Get("protobuf"), ",") - fieldNum, err := strconv.Atoi(tags[1]) - if err != nil { - panic("protobuf tag field not an integer: " + tags[1]) - } - var name string - for _, tag := range tags { - if strings.HasPrefix(tag, "name=") { - name = strings.TrimPrefix(tag, "name=") - break + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if len(oneofFields) > 0 { + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } } - } - // Find the oneof field that this struct implements. - // Might take O(n^2) to process all of the oneofs, but who cares. - for _, of := range oneofFields { - if tptr.Implements(of.ityp) { - // We have found the corresponding interface for this struct. - // That lets us know where this struct should be stored - // when we encounter it during unmarshaling. - unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) - u.setTag(fieldNum, of.field, unmarshal, 0, name) + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } } - } + } } // Get extension ranges, if any. fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") if fn.IsValid() { - if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { panic("a message with extensions, but no extensions field in " + t.Name()) } u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) @@ -459,6 +471,10 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { tagArray := strings.Split(tags, ",") encoding := tagArray[0] name := "unknown" + ctype := false + isTime := false + isDuration := false + isWktPointer := false proto3 := false validateUTF8 := true for _, tag := range tagArray[3:] { @@ -468,6 +484,18 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { if tag == "proto3" { proto3 = true } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + if tag == "wktptr" { + isWktPointer = true + } } validateUTF8 = validateUTF8 && proto3 @@ -483,6 +511,152 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { t = t.Elem() } + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + if isWktPointer { + switch t.Kind() { + case reflect.Float64: + if pointer { + if slice { + return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Float32: + if pointer { + if slice { + return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int64: + if pointer { + if slice { + return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint64: + if pointer { + if slice { + return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Int32: + if pointer { + if slice { + return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Uint32: + if pointer { + if slice { + return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.Bool: + if pointer { + if slice { + return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name) + case reflect.String: + if pointer { + if slice { + return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name) + case uint8SliceType: + if pointer { + if slice { + return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name) + } + if slice { + return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name) + } + return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name) + default: + panic(fmt.Sprintf("unknown wktpointer type %#v", t)) + } + } + // We'll never have both pointer and slice for basic types. if pointer && slice && t.Kind() != reflect.Struct { panic("both pointer and slice for basic type in " + t.Name()) @@ -636,7 +810,13 @@ func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { case reflect.Struct: // message or group field if !pointer { - panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } } switch encoding { case "bytes": @@ -1735,8 +1915,24 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler { t := f.Type kt := t.Key() vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + if t == "wktptr" { + valTags = append(valTags, t) + } + } unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) - unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) return func(b []byte, f pointer, w int) ([]byte, error) { // The map entry is a submessage. Figure out how big it is. if w != WireBytes { diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 0000000..00d6c7a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go similarity index 87% rename from vendor/github.com/golang/protobuf/proto/text.go rename to vendor/github.com/gogo/protobuf/proto/text.go index 1aaee72..87416af 100644 --- a/vendor/github.com/golang/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -1,3 +1,8 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. @@ -45,6 +50,8 @@ import ( "reflect" "sort" "strings" + "sync" + "time" ) var ( @@ -311,7 +318,11 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { } continue } - if err := tm.writeAny(w, v, props); err != nil { + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -392,7 +403,7 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { // empty bytes field continue } - if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { // proto3 non-repeated scalar field; skip if zero value if isProto3Zero(fv) { continue @@ -435,8 +446,11 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { } } - // Enums have a String method, so writeAny will work fine. - if err := tm.writeAny(w, fv, props); err != nil { + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -446,7 +460,13 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { } // Extensions (the XXX_extensions field). - pv := sv.Addr() + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } if _, err := extendable(pv.Interface()); err == nil { if err := tm.writeExtensions(w, pv); err != nil { return err @@ -456,10 +476,62 @@ func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { return nil } +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + // writeAny writes an arbitrary field. func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + // Floats have special cases. if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { x := v.Float() @@ -519,8 +591,8 @@ func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Propert // mutating this value. v = v.Addr() } - if etm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := etm.MarshalText() + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() if err != nil { return err } @@ -602,27 +674,27 @@ func writeUnknownStruct(w *textWriter, data []byte) (err error) { for b.index < len(b.buf) { x, err := b.DecodeVarint() if err != nil { - _, err := fmt.Fprintf(w, "/* %v */\n", err) - return err + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr } wire, tag := x&7, x>>3 if wire == WireEndGroup { w.unindent() - if _, err := w.Write(endBraceNewline); err != nil { - return err + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr } continue } - if _, err := fmt.Fprint(w, tag); err != nil { - return err + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr } if wire != WireStartGroup { - if err := w.WriteByte(':'); err != nil { + if err = w.WriteByte(':'); err != nil { return err } } if !w.compact || wire == WireStartGroup { - if err := w.WriteByte(' '); err != nil { + if err = w.WriteByte(' '); err != nil { return err } } @@ -652,7 +724,7 @@ func writeUnknownStruct(w *textWriter, data []byte) (err error) { if err != nil { return err } - if err = w.WriteByte('\n'); err != nil { + if err := w.WriteByte('\n'); err != nil { return err } } @@ -678,15 +750,30 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // pv is assumed to be a pointer to a protocol message struct that is extendable. func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] - ep, _ := extendable(pv.Interface()) + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } // Order the extensions by ID. // This isn't strictly necessary, but it will give us // canonical output, which will also make testing easier. - m, mu := ep.extensionsRead() - if m == nil { - return nil - } + mu.Lock() ids := make([]int32, 0, len(m)) for id := range m { @@ -709,7 +796,7 @@ func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error continue } - pb, err := GetExtension(ep, desc) + pb, err := GetExtension(e, desc) if err != nil { return fmt.Errorf("failed getting extension: %v", err) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 0000000..1d6c6aa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go similarity index 85% rename from vendor/github.com/golang/protobuf/proto/text_parser.go rename to vendor/github.com/gogo/protobuf/proto/text_parser.go index bb55a3a..1ce0be2 100644 --- a/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -1,3 +1,8 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// // Go support for Protocol Buffers - Google's data interchange format // // Copyright 2010 The Go Authors. All rights reserved. @@ -41,6 +46,7 @@ import ( "reflect" "strconv" "strings" + "time" "unicode/utf8" ) @@ -742,7 +748,119 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { if tok.value == "" { return p.errorf("unexpected EOF") } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() @@ -767,15 +885,15 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { if err != nil { return err } - tok := p.next() - if tok.err != nil { - return tok.err + ntok := p.next() + if ntok.err != nil { + return ntok.err } - if tok.value == "]" { + if ntok.value == "]" { break } - if tok.value != "," { - return p.errorf("Expected ']' or ',' found %q", tok.value) + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) } } return nil @@ -805,6 +923,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.SetFloat(f) return nil } + case reflect.Int8: + if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil { + fv.SetInt(x) + return nil + } + case reflect.Int16: + if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil { + fv.SetInt(x) + return nil + } case reflect.Int32: if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { fv.SetInt(x) @@ -852,6 +980,16 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } // TODO: Handle nested messages which implement encoding.TextUnmarshaler. return p.readStruct(fv, terminator) + case reflect.Uint8: + if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint16: + if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil { + fv.SetUint(x) + return nil + } case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { fv.SetUint(uint64(x)) diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 0000000..9324f65 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 0000000..38439fa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go new file mode 100644 index 0000000..b175d1b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go @@ -0,0 +1,1888 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64) + v := &float64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float64) + v := &float64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float64) + v := &float64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32) + v := &float32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(float32) + v := &float32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*float32) + v := &float32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &float32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64) + v := &int64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int64) + v := &int64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int64) + v := &int64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64) + v := &uint64Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint64) + v := &uint64Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint64) + v := &uint64Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint64Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32) + v := &int32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(int32) + v := &int32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*int32) + v := &int32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &int32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32) + v := &uint32Value{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(uint32) + v := &uint32Value{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*uint32) + v := &uint32Value{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &uint32Value{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool) + v := &boolValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(bool) + v := &boolValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*bool) + v := &boolValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &boolValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string) + v := &stringValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(string) + v := &stringValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*string) + v := &stringValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &stringValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte) + v := &bytesValue{*t} + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().([]byte) + v := &bytesValue{t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*[]byte) + v := &bytesValue{*t} + siz := Size(v) + buf, err := Marshal(v) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&m.Value)) + return b[x:], nil + } +} + +func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &bytesValue{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(m.Value)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go new file mode 100644 index 0000000..c1cf7bf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go @@ -0,0 +1,113 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +type float64Value struct { + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float64Value) Reset() { *m = float64Value{} } +func (*float64Value) ProtoMessage() {} +func (*float64Value) String() string { return "float64" } + +type float32Value struct { + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *float32Value) Reset() { *m = float32Value{} } +func (*float32Value) ProtoMessage() {} +func (*float32Value) String() string { return "float32" } + +type int64Value struct { + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int64Value) Reset() { *m = int64Value{} } +func (*int64Value) ProtoMessage() {} +func (*int64Value) String() string { return "int64" } + +type uint64Value struct { + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint64Value) Reset() { *m = uint64Value{} } +func (*uint64Value) ProtoMessage() {} +func (*uint64Value) String() string { return "uint64" } + +type int32Value struct { + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *int32Value) Reset() { *m = int32Value{} } +func (*int32Value) ProtoMessage() {} +func (*int32Value) String() string { return "int32" } + +type uint32Value struct { + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *uint32Value) Reset() { *m = uint32Value{} } +func (*uint32Value) ProtoMessage() {} +func (*uint32Value) String() string { return "uint32" } + +type boolValue struct { + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *boolValue) Reset() { *m = boolValue{} } +func (*boolValue) ProtoMessage() {} +func (*boolValue) String() string { return "bool" } + +type stringValue struct { + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *stringValue) Reset() { *m = stringValue{} } +func (*stringValue) ProtoMessage() {} +func (*stringValue) String() string { return "string" } + +type bytesValue struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *bytesValue) Reset() { *m = bytesValue{} } +func (*bytesValue) ProtoMessage() {} +func (*bytesValue) String() string { return "[]byte" } + +func init() { + RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue") + RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue") + RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value") + RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value") + RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value") + RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value") + RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue") + RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue") + RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue") +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 0000000..ceadde6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go new file mode 100644 index 0000000..e810e6f --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/buffer.go @@ -0,0 +1,324 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + WireVarint = 0 + WireFixed32 = 5 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 +) + +// EncodeVarint returns the varint encoded bytes of v. +func EncodeVarint(v uint64) []byte { + return protowire.AppendVarint(nil, v) +} + +// SizeVarint returns the length of the varint encoded bytes of v. +// This is equal to len(EncodeVarint(v)). +func SizeVarint(v uint64) int { + return protowire.SizeVarint(v) +} + +// DecodeVarint parses a varint encoded integer from b, +// returning the integer value and the length of the varint. +// It returns (0, 0) if there is a parse error. +func DecodeVarint(b []byte) (uint64, int) { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, 0 + } + return v, n +} + +// Buffer is a buffer for encoding and decoding the protobuf wire format. +// It may be reused between invocations to reduce memory usage. +type Buffer struct { + buf []byte + idx int + deterministic bool +} + +// NewBuffer allocates a new Buffer initialized with buf, +// where the contents of buf are considered the unread portion of the buffer. +func NewBuffer(buf []byte) *Buffer { + return &Buffer{buf: buf} +} + +// SetDeterministic specifies whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (b *Buffer) SetDeterministic(deterministic bool) { + b.deterministic = deterministic +} + +// SetBuf sets buf as the internal buffer, +// where the contents of buf are considered the unread portion of the buffer. +func (b *Buffer) SetBuf(buf []byte) { + b.buf = buf + b.idx = 0 +} + +// Reset clears the internal buffer of all written and unread data. +func (b *Buffer) Reset() { + b.buf = b.buf[:0] + b.idx = 0 +} + +// Bytes returns the internal buffer. +func (b *Buffer) Bytes() []byte { + return b.buf +} + +// Unread returns the unread portion of the buffer. +func (b *Buffer) Unread() []byte { + return b.buf[b.idx:] +} + +// Marshal appends the wire-format encoding of m to the buffer. +func (b *Buffer) Marshal(m Message) error { + var err error + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// Unmarshal parses the wire-format message in the buffer and +// places the decoded results in m. +// It does not reset m before unmarshaling. +func (b *Buffer) Unmarshal(m Message) error { + err := UnmarshalMerge(b.Unread(), m) + b.idx = len(b.buf) + return err +} + +type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields } + +func (m *unknownFields) String() string { panic("not implemented") } +func (m *unknownFields) Reset() { panic("not implemented") } +func (m *unknownFields) ProtoMessage() { panic("not implemented") } + +// DebugPrint dumps the encoded bytes of b with a header and footer including s +// to stdout. This is only intended for debugging. +func (*Buffer) DebugPrint(s string, b []byte) { + m := MessageReflect(new(unknownFields)) + m.SetUnknown(b) + b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface()) + fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s) +} + +// EncodeVarint appends an unsigned varint encoding to the buffer. +func (b *Buffer) EncodeVarint(v uint64) error { + b.buf = protowire.AppendVarint(b.buf, v) + return nil +} + +// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag32(v uint64) error { + return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) +} + +// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer. +func (b *Buffer) EncodeZigzag64(v uint64) error { + return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63)))) +} + +// EncodeFixed32 appends a 32-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed32(v uint64) error { + b.buf = protowire.AppendFixed32(b.buf, uint32(v)) + return nil +} + +// EncodeFixed64 appends a 64-bit little-endian integer to the buffer. +func (b *Buffer) EncodeFixed64(v uint64) error { + b.buf = protowire.AppendFixed64(b.buf, uint64(v)) + return nil +} + +// EncodeRawBytes appends a length-prefixed raw bytes to the buffer. +func (b *Buffer) EncodeRawBytes(v []byte) error { + b.buf = protowire.AppendBytes(b.buf, v) + return nil +} + +// EncodeStringBytes appends a length-prefixed raw bytes to the buffer. +// It does not validate whether v contains valid UTF-8. +func (b *Buffer) EncodeStringBytes(v string) error { + b.buf = protowire.AppendString(b.buf, v) + return nil +} + +// EncodeMessage appends a length-prefixed encoded message to the buffer. +func (b *Buffer) EncodeMessage(m Message) error { + var err error + b.buf = protowire.AppendVarint(b.buf, uint64(Size(m))) + b.buf, err = marshalAppend(b.buf, m, b.deterministic) + return err +} + +// DecodeVarint consumes an encoded unsigned varint from the buffer. +func (b *Buffer) DecodeVarint() (uint64, error) { + v, n := protowire.ConsumeVarint(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag32() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil +} + +// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer. +func (b *Buffer) DecodeZigzag64() (uint64, error) { + v, err := b.DecodeVarint() + if err != nil { + return 0, err + } + return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil +} + +// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed32() (uint64, error) { + v, n := protowire.ConsumeFixed32(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer. +func (b *Buffer) DecodeFixed64() (uint64, error) { + v, n := protowire.ConsumeFixed64(b.buf[b.idx:]) + if n < 0 { + return 0, protowire.ParseError(n) + } + b.idx += n + return uint64(v), nil +} + +// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer. +// If alloc is specified, it returns a copy the raw bytes +// rather than a sub-slice of the buffer. +func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) { + v, n := protowire.ConsumeBytes(b.buf[b.idx:]) + if n < 0 { + return nil, protowire.ParseError(n) + } + b.idx += n + if alloc { + v = append([]byte(nil), v...) + } + return v, nil +} + +// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer. +// It does not validate whether the raw bytes contain valid UTF-8. +func (b *Buffer) DecodeStringBytes() (string, error) { + v, n := protowire.ConsumeString(b.buf[b.idx:]) + if n < 0 { + return "", protowire.ParseError(n) + } + b.idx += n + return v, nil +} + +// DecodeMessage consumes a length-prefixed message from the buffer. +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeMessage(m Message) error { + v, err := b.DecodeRawBytes(false) + if err != nil { + return err + } + return UnmarshalMerge(v, m) +} + +// DecodeGroup consumes a message group from the buffer. +// It assumes that the start group marker has already been consumed and +// consumes all bytes until (and including the end group marker). +// It does not reset m before unmarshaling. +func (b *Buffer) DecodeGroup(m Message) error { + v, n, err := consumeGroup(b.buf[b.idx:]) + if err != nil { + return err + } + b.idx += n + return UnmarshalMerge(v, m) +} + +// consumeGroup parses b until it finds an end group marker, returning +// the raw bytes of the message (excluding the end group marker) and the +// the total length of the message (including the end group marker). +func consumeGroup(b []byte) ([]byte, int, error) { + b0 := b + depth := 1 // assume this follows a start group marker + for { + _, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return nil, 0, protowire.ParseError(tagLen) + } + b = b[tagLen:] + + var valLen int + switch wtyp { + case protowire.VarintType: + _, valLen = protowire.ConsumeVarint(b) + case protowire.Fixed32Type: + _, valLen = protowire.ConsumeFixed32(b) + case protowire.Fixed64Type: + _, valLen = protowire.ConsumeFixed64(b) + case protowire.BytesType: + _, valLen = protowire.ConsumeBytes(b) + case protowire.StartGroupType: + depth++ + case protowire.EndGroupType: + depth-- + default: + return nil, 0, errors.New("proto: cannot parse reserved wire type") + } + if valLen < 0 { + return nil, 0, protowire.ParseError(valLen) + } + b = b[valLen:] + + if depth == 0 { + return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil + } + } +} diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go new file mode 100644 index 0000000..d399bf0 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/defaults.go @@ -0,0 +1,63 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// SetDefaults sets unpopulated scalar fields to their default values. +// Fields within a oneof are not set even if they have a default value. +// SetDefaults is recursively called upon any populated message fields. +func SetDefaults(m Message) { + if m != nil { + setDefaults(MessageReflect(m)) + } +} + +func setDefaults(m protoreflect.Message) { + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + if !m.Has(fd) { + if fd.HasDefault() && fd.ContainingOneof() == nil { + v := fd.Default() + if fd.Kind() == protoreflect.BytesKind { + v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes + } + m.Set(fd, v) + } + continue + } + } + + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + setDefaults(m.Get(fd).Message()) + } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + setDefaults(ls.Get(i).Message()) + } + } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + setDefaults(v.Message()) + return true + }) + } + } + return true + }) +} diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go index 35b882c..e8db57e 100644 --- a/vendor/github.com/golang/protobuf/proto/deprecated.go +++ b/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -1,63 +1,113 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2018 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -import "errors" +import ( + "encoding/json" + "errors" + "fmt" + "strconv" -// Deprecated: do not use. + protoV2 "google.golang.org/protobuf/proto" +) + +var ( + // Deprecated: No longer returned. + ErrNil = errors.New("proto: Marshal called with nil") + + // Deprecated: No longer returned. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") + + // Deprecated: No longer returned. + ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") +) + +// Deprecated: Do not use. type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } -// Deprecated: do not use. +// Deprecated: Do not use. func GetStats() Stats { return Stats{} } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSet(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSet([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func MarshalMessageSetJSON(interface{}) ([]byte, error) { return nil, errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func UnmarshalMessageSetJSON([]byte, interface{}) error { return errors.New("proto: not implemented") } -// Deprecated: do not use. +// Deprecated: Do not use. func RegisterMessageSetType(Message, int32, string) {} + +// Deprecated: Do not use. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// Deprecated: Do not use. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// Deprecated: Do not use; this type existed for intenal-use only. +type InternalMessageInfo struct{} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) DiscardUnknown(m Message) { + DiscardUnknown(m) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) { + return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Size(m Message) int { + return protoV2.Size(MessageV2(m)) +} + +// Deprecated: Do not use; this method existed for intenal-use only. +func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error { + return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m)) +} diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go index dea2617..2187e87 100644 --- a/vendor/github.com/golang/protobuf/proto/discard.go +++ b/vendor/github.com/golang/protobuf/proto/discard.go @@ -1,48 +1,13 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2017 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto import ( - "fmt" - "reflect" - "strings" - "sync" - "sync/atomic" + "google.golang.org/protobuf/reflect/protoreflect" ) -type generatedDiscarder interface { - XXX_DiscardUnknown() -} - // DiscardUnknown recursively discards all unknown fields from this message // and all embedded messages. // @@ -51,300 +16,43 @@ type generatedDiscarder interface { // marshal to be able to produce a message that continues to have those // unrecognized fields. To avoid this, DiscardUnknown is used to // explicitly clear the unknown fields after unmarshaling. -// -// For proto2 messages, the unknown fields of message extensions are only -// discarded from messages that have been accessed via GetExtension. func DiscardUnknown(m Message) { - if m, ok := m.(generatedDiscarder); ok { - m.XXX_DiscardUnknown() - return - } - // TODO: Dynamically populate a InternalMessageInfo for legacy messages, - // but the master branch has no implementation for InternalMessageInfo, - // so it would be more work to replicate that approach. - discardLegacy(m) -} - -// DiscardUnknown recursively discards all unknown fields. -func (a *InternalMessageInfo) DiscardUnknown(m Message) { - di := atomicLoadDiscardInfo(&a.discard) - if di == nil { - di = getDiscardInfo(reflect.TypeOf(m).Elem()) - atomicStoreDiscardInfo(&a.discard, di) - } - di.discard(toPointer(&m)) -} - -type discardInfo struct { - typ reflect.Type - - initialized int32 // 0: only typ is valid, 1: everything is valid - lock sync.Mutex - - fields []discardFieldInfo - unrecognized field -} - -type discardFieldInfo struct { - field field // Offset of field, guaranteed to be valid - discard func(src pointer) -} - -var ( - discardInfoMap = map[reflect.Type]*discardInfo{} - discardInfoLock sync.Mutex -) - -func getDiscardInfo(t reflect.Type) *discardInfo { - discardInfoLock.Lock() - defer discardInfoLock.Unlock() - di := discardInfoMap[t] - if di == nil { - di = &discardInfo{typ: t} - discardInfoMap[t] = di + if m != nil { + discardUnknown(MessageReflect(m)) } - return di } -func (di *discardInfo) discard(src pointer) { - if src.isNil() { - return // Nothing to do. - } - - if atomic.LoadInt32(&di.initialized) == 0 { - di.computeDiscardInfo() - } - - for _, fi := range di.fields { - sfp := src.offset(fi.field) - fi.discard(sfp) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { - // Ignore lock since DiscardUnknown is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - DiscardUnknown(m) +func discardUnknown(m protoreflect.Message) { + m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool { + switch { + // Handle singular message. + case fd.Cardinality() != protoreflect.Repeated: + if fd.Message() != nil { + discardUnknown(m.Get(fd).Message()) } - } - } - - if di.unrecognized.IsValid() { - *src.offset(di.unrecognized).toBytes() = nil - } -} - -func (di *discardInfo) computeDiscardInfo() { - di.lock.Lock() - defer di.lock.Unlock() - if di.initialized != 0 { - return - } - t := di.typ - n := t.NumField() - - for i := 0; i < n; i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue - } - - dfi := discardFieldInfo{field: toField(&f)} - tf := f.Type - - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) - case isSlice: // E.g., []*pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sps := src.getPointerSlice() - for _, sp := range sps { - if !sp.isNil() { - di.discard(sp) - } - } - } - default: // E.g., *pb.T - di := getDiscardInfo(tf) - dfi.discard = func(src pointer) { - sp := src.getPointer() - if !sp.isNil() { - di.discard(sp) - } + // Handle list of messages. + case fd.IsList(): + if fd.Message() != nil { + ls := m.Get(fd).List() + for i := 0; i < ls.Len(); i++ { + discardUnknown(ls.Get(i).Message()) } } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) - default: // E.g., map[K]V - if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) - dfi.discard = func(src pointer) { - sm := src.asPointerTo(tf).Elem() - if sm.Len() == 0 { - return - } - for _, key := range sm.MapKeys() { - val := sm.MapIndex(key) - DiscardUnknown(val.Interface().(Message)) - } - } - } else { - dfi.discard = func(pointer) {} // Noop - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) - default: // E.g., interface{} - // TODO: Make this faster? - dfi.discard = func(src pointer) { - su := src.asPointerTo(tf).Elem() - if !su.IsNil() { - sv := su.Elem().Elem().Field(0) - if sv.Kind() == reflect.Ptr && sv.IsNil() { - return - } - switch sv.Type().Kind() { - case reflect.Ptr: // Proto struct (e.g., *T) - DiscardUnknown(sv.Interface().(Message)) - } - } - } + // Handle map of messages. + case fd.IsMap(): + if fd.MapValue().Message() != nil { + ms := m.Get(fd).Map() + ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool { + discardUnknown(v.Message()) + return true + }) } - default: - continue - } - di.fields = append(di.fields, dfi) - } - - di.unrecognized = invalidField - if f, ok := t.FieldByName("XXX_unrecognized"); ok { - if f.Type != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - di.unrecognized = toField(&f) - } - - atomic.StoreInt32(&di.initialized, 1) -} - -func discardLegacy(m Message) { - v := reflect.ValueOf(m) - if v.Kind() != reflect.Ptr || v.IsNil() { - return - } - v = v.Elem() - if v.Kind() != reflect.Struct { - return - } - t := v.Type() - - for i := 0; i < v.NumField(); i++ { - f := t.Field(i) - if strings.HasPrefix(f.Name, "XXX_") { - continue } - vf := v.Field(i) - tf := f.Type + return true + }) - // Unwrap tf to get its most basic type. - var isPointer, isSlice bool - if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { - isSlice = true - tf = tf.Elem() - } - if tf.Kind() == reflect.Ptr { - isPointer = true - tf = tf.Elem() - } - if isPointer && isSlice && tf.Kind() != reflect.Struct { - panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) - } - - switch tf.Kind() { - case reflect.Struct: - switch { - case !isPointer: - panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) - case isSlice: // E.g., []*pb.T - for j := 0; j < vf.Len(); j++ { - discardLegacy(vf.Index(j).Interface().(Message)) - } - default: // E.g., *pb.T - discardLegacy(vf.Interface().(Message)) - } - case reflect.Map: - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) - default: // E.g., map[K]V - tv := vf.Type().Elem() - if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) - for _, key := range vf.MapKeys() { - val := vf.MapIndex(key) - discardLegacy(val.Interface().(Message)) - } - } - } - case reflect.Interface: - // Must be oneof field. - switch { - case isPointer || isSlice: - panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) - default: // E.g., test_proto.isCommunique_Union interface - if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { - vf = vf.Elem() // E.g., *test_proto.Communique_Msg - if !vf.IsNil() { - vf = vf.Elem() // E.g., test_proto.Communique_Msg - vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value - if vf.Kind() == reflect.Ptr { - discardLegacy(vf.Interface().(Message)) - } - } - } - } - } - } - - if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { - if vf.Type() != reflect.TypeOf([]byte{}) { - panic("expected XXX_unrecognized to be of type []byte") - } - vf.Set(reflect.ValueOf([]byte(nil))) - } - - // For proto2 messages, only discard unknown fields in message extensions - // that have been accessed via GetExtension. - if em, err := extendable(m); err == nil { - // Ignore lock since discardLegacy is not concurrency safe. - emm, _ := em.extensionsRead() - for _, mx := range emm { - if m, ok := mx.value.(Message); ok { - discardLegacy(m) - } - } + // Discard unknown fields. + if len(m.GetUnknown()) > 0 { + m.SetUnknown(nil) } } diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go index fa88add..42fc120 100644 --- a/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -1,607 +1,356 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Types and routines for supporting protocol buffer extensions. - */ - import ( "errors" "fmt" - "io" "reflect" - "strconv" - "sync" -) - -// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. -var ErrMissingExtension = errors.New("proto: missing extension") - -// ExtensionRange represents a range of message extensions for a protocol buffer. -// Used in code generated by the protocol compiler. -type ExtensionRange struct { - Start, End int32 // both inclusive -} - -// extendableProto is an interface implemented by any protocol buffer generated by the current -// proto compiler that may be extended. -type extendableProto interface { - Message - ExtensionRangeArray() []ExtensionRange - extensionsWrite() map[int32]Extension - extensionsRead() (map[int32]Extension, sync.Locker) -} - -// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous -// version of the proto compiler that may be extended. -type extendableProtoV1 interface { - Message - ExtensionRangeArray() []ExtensionRange - ExtensionMap() map[int32]Extension -} -// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. -type extensionAdapter struct { - extendableProtoV1 -} + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) -func (e extensionAdapter) extensionsWrite() map[int32]Extension { - return e.ExtensionMap() -} +type ( + // ExtensionDesc represents an extension descriptor and + // is used to interact with an extension field in a message. + // + // Variables of this type are generated in code by protoc-gen-go. + ExtensionDesc = protoimpl.ExtensionInfo -func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { - return e.ExtensionMap(), notLocker{} -} + // ExtensionRange represents a range of message extensions. + // Used in code generated by protoc-gen-go. + ExtensionRange = protoiface.ExtensionRangeV1 -// notLocker is a sync.Locker whose Lock and Unlock methods are nops. -type notLocker struct{} + // Deprecated: Do not use; this is an internal type. + Extension = protoimpl.ExtensionFieldV1 -func (n notLocker) Lock() {} -func (n notLocker) Unlock() {} + // Deprecated: Do not use; this is an internal type. + XXX_InternalExtensions = protoimpl.ExtensionFields +) -// extendable returns the extendableProto interface for the given generated proto message. -// If the proto message has the old extension format, it returns a wrapper that implements -// the extendableProto interface. -func extendable(p interface{}) (extendableProto, error) { - switch p := p.(type) { - case extendableProto: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return p, nil - case extendableProtoV1: - if isNilPtr(p) { - return nil, fmt.Errorf("proto: nil %T is not extendable", p) - } - return extensionAdapter{p}, nil - } - // Don't allocate a specific error containing %T: - // this is the hot path for Clone and MarshalText. - return nil, errNotExtendable -} +// ErrMissingExtension reports whether the extension was not present. +var ErrMissingExtension = errors.New("proto: missing extension") var errNotExtendable = errors.New("proto: not an extendable proto.Message") -func isNilPtr(x interface{}) bool { - v := reflect.ValueOf(x) - return v.Kind() == reflect.Ptr && v.IsNil() -} - -// XXX_InternalExtensions is an internal representation of proto extensions. -// -// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, -// thus gaining the unexported 'extensions' method, which can be called only from the proto package. -// -// The methods of XXX_InternalExtensions are not concurrency safe in general, -// but calls to logically read-only methods such as has and get may be executed concurrently. -type XXX_InternalExtensions struct { - // The struct must be indirect so that if a user inadvertently copies a - // generated message and its embedded XXX_InternalExtensions, they - // avoid the mayhem of a copied mutex. - // - // The mutex serializes all logically read-only operations to p.extensionMap. - // It is up to the client to ensure that write operations to p.extensionMap are - // mutually exclusive with other accesses. - p *struct { - mu sync.Mutex - extensionMap map[int32]Extension +// HasExtension reports whether the extension field is present in m +// either as an explicitly populated field or as an unknown field. +func HasExtension(m Message, xt *ExtensionDesc) (has bool) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return false } -} -// extensionsWrite returns the extension map, creating it on first use. -func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { - if e.p == nil { - e.p = new(struct { - mu sync.Mutex - extensionMap map[int32]Extension + // Check whether any populated known field matches the field number. + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + has = mr.Has(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + has = int32(fd.Number()) == xt.Field + return !has }) - e.p.extensionMap = make(map[int32]Extension) } - return e.p.extensionMap -} -// extensionsRead returns the extensions map for read-only use. It may be nil. -// The caller must hold the returned mutex's lock when accessing Elements within the map. -func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { - if e.p == nil { - return nil, nil + // Check whether any unknown field matches the field number. + for b := mr.GetUnknown(); !has && len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + has = int32(num) == xt.Field + b = b[n:] } - return e.p.extensionMap, &e.p.mu -} - -// ExtensionDesc represents an extension specification. -// Used in generated code from the protocol compiler. -type ExtensionDesc struct { - ExtendedType Message // nil pointer to the type that is being extended - ExtensionType interface{} // nil pointer to the extension type - Field int32 // field number - Name string // fully-qualified name of extension, for text formatting - Tag string // protobuf tag style - Filename string // name of the file in which the extension is defined -} - -func (ed *ExtensionDesc) repeated() bool { - t := reflect.TypeOf(ed.ExtensionType) - return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 -} - -// Extension represents an extension in a message. -type Extension struct { - // When an extension is stored in a message using SetExtension - // only desc and value are set. When the message is marshaled - // enc will be set to the encoded form of the message. - // - // When a message is unmarshaled and contains extensions, each - // extension will have only enc set. When such an extension is - // accessed using GetExtension (or GetExtensions) desc and value - // will be set. - desc *ExtensionDesc - - // value is a concrete value for the extension field. Let the type of - // desc.ExtensionType be the "API type" and the type of Extension.value - // be the "storage type". The API type and storage type are the same except: - // * For scalars (except []byte), the API type uses *T, - // while the storage type uses T. - // * For repeated fields, the API type uses []T, while the storage type - // uses *[]T. - // - // The reason for the divergence is so that the storage type more naturally - // matches what is expected of when retrieving the values through the - // protobuf reflection APIs. - // - // The value may only be populated if desc is also populated. - value interface{} - - // enc is the raw bytes for the extension field. - enc []byte + return has } -// SetRawExtension is for testing only. -func SetRawExtension(base Message, id int32, b []byte) { - epb, err := extendable(base) - if err != nil { +// ClearExtension removes the extension field from m +// either as an explicitly populated field or as an unknown field. +func ClearExtension(m Message, xt *ExtensionDesc) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - extmap := epb.extensionsWrite() - extmap[id] = Extension{enc: b} -} -// isExtensionField returns true iff the given field number is in an extension range. -func isExtensionField(pb extendableProto, field int32) bool { - for _, er := range pb.ExtensionRangeArray() { - if er.Start <= field && field <= er.End { + xtd := xt.TypeDescriptor() + if isValidExtension(mr.Descriptor(), xtd) { + mr.Clear(xtd) + } else { + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if int32(fd.Number()) == xt.Field { + mr.Clear(fd) + return false + } return true - } - } - return false -} - -// checkExtensionTypes checks that the given extension is valid for pb. -func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { - var pbi interface{} = pb - // Check the extended type. - if ea, ok := pbi.(extensionAdapter); ok { - pbi = ea.extendableProtoV1 - } - if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { - return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) - } - // Check the range. - if !isExtensionField(pb, extension.Field) { - return errors.New("proto: bad extension number; not in declared ranges") - } - return nil -} - -// extPropKey is sufficient to uniquely identify an extension. -type extPropKey struct { - base reflect.Type - field int32 -} - -var extProp = struct { - sync.RWMutex - m map[extPropKey]*Properties -}{ - m: make(map[extPropKey]*Properties), -} - -func extensionProperties(ed *ExtensionDesc) *Properties { - key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} - - extProp.RLock() - if prop, ok := extProp.m[key]; ok { - extProp.RUnlock() - return prop - } - extProp.RUnlock() - - extProp.Lock() - defer extProp.Unlock() - // Check again. - if prop, ok := extProp.m[key]; ok { - return prop - } - - prop := new(Properties) - prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) - extProp.m[key] = prop - return prop -} - -// HasExtension returns whether the given extension is present in pb. -func HasExtension(pb Message, extension *ExtensionDesc) bool { - // TODO: Check types, field numbers, etc.? - epb, err := extendable(pb) - if err != nil { - return false - } - extmap, mu := epb.extensionsRead() - if extmap == nil { - return false + }) } - mu.Lock() - _, ok := extmap[extension.Field] - mu.Unlock() - return ok + clearUnknown(mr, fieldNum(xt.Field)) } -// ClearExtension removes the given extension from pb. -func ClearExtension(pb Message, extension *ExtensionDesc) { - epb, err := extendable(pb) - if err != nil { +// ClearAllExtensions clears all extensions from m. +// This includes populated fields and unknown fields in the extension range. +func ClearAllExtensions(m Message) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - // TODO: Check types, field numbers, etc.? - extmap := epb.extensionsWrite() - delete(extmap, extension.Field) + + mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + if fd.IsExtension() { + mr.Clear(fd) + } + return true + }) + clearUnknown(mr, mr.Descriptor().ExtensionRanges()) } -// GetExtension retrieves a proto2 extended field from pb. +// GetExtension retrieves a proto2 extended field from m. // // If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), // then GetExtension parses the encoded field and returns a Go value of the specified type. // If the field is not present, then the default value is returned (if one is specified), // otherwise ErrMissingExtension is reported. // -// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), -// then GetExtension returns the raw encoded bytes of the field extension. -func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err - } - - if extension.ExtendedType != nil { - // can only check type if this is a complete descriptor - if err := checkExtensionTypes(epb, extension); err != nil { - return nil, err +// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes for the extension field. +func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } + + // Retrieve the unknown fields for this extension field. + var bo protoreflect.RawFields + for bi := mr.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if int32(num) == xt.Field { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - emap, mu := epb.extensionsRead() - if emap == nil { - return defaultExtensionValue(extension) - } - mu.Lock() - defer mu.Unlock() - e, ok := emap[extension.Field] - if !ok { - // defaultExtensionValue returns the default value or - // ErrMissingExtension if there is no default. - return defaultExtensionValue(extension) - } - - if e.value != nil { - // Already decoded. Check the descriptor, though. - if e.desc != extension { - // This shouldn't happen. If it does, it means that - // GetExtension was called twice with two different - // descriptors with the same field number. - return nil, errors.New("proto: descriptor conflict") - } - return extensionAsLegacyType(e.value), nil + // For type incomplete descriptors, only retrieve the unknown fields. + if xt.ExtensionType == nil { + return []byte(bo), nil } - if extension.ExtensionType == nil { - // incomplete descriptor - return e.enc, nil + // If the extension field only exists as unknown fields, unmarshal it. + // This is rarely done since proto.Unmarshal eagerly unmarshals extensions. + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - - v, err := decodeExtension(e.enc, extension) - if err != nil { - return nil, err + if !mr.Has(xtd) && len(bo) > 0 { + m2 := mr.New() + if err := (proto.UnmarshalOptions{ + Resolver: extensionResolver{xt}, + }.Unmarshal(bo, m2.Interface())); err != nil { + return nil, err + } + if m2.Has(xtd) { + mr.Set(xtd, m2.Get(xtd)) + clearUnknown(mr, fieldNum(xt.Field)) + } } - // Remember the decoded version and drop the encoded version. - // That way it is safe to mutate what we return. - e.value = extensionAsStorageType(v) - e.desc = extension - e.enc = nil - emap[extension.Field] = e - return extensionAsLegacyType(e.value), nil -} - -// defaultExtensionValue returns the default value for extension. -// If no default for an extension is defined ErrMissingExtension is returned. -func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { - if extension.ExtensionType == nil { - // incomplete descriptor, so no default + // Check whether the message has the extension field set or a default. + var pv protoreflect.Value + switch { + case mr.Has(xtd): + pv = mr.Get(xtd) + case xtd.HasDefault(): + pv = xtd.Default() + default: return nil, ErrMissingExtension } - t := reflect.TypeOf(extension.ExtensionType) - props := extensionProperties(extension) - - sf, _, err := fieldDefault(t, props) - if err != nil { - return nil, err - } - - if sf == nil || sf.value == nil { - // There is no default value. - return nil, ErrMissingExtension + v := xt.InterfaceOf(pv) + rv := reflect.ValueOf(v) + if isScalarKind(rv.Kind()) { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() } + return v, nil +} - if t.Kind() != reflect.Ptr { - // We do not need to return a Ptr, we can directly return sf.value. - return sf.value, nil - } +// extensionResolver is a custom extension resolver that stores a single +// extension type that takes precedence over the global registry. +type extensionResolver struct{ xt protoreflect.ExtensionType } - // We need to return an interface{} that is a pointer to sf.value. - value := reflect.New(t).Elem() - value.Set(reflect.New(value.Type().Elem())) - if sf.kind == reflect.Int32 { - // We may have an int32 or an enum, but the underlying data is int32. - // Since we can't set an int32 into a non int32 reflect.value directly - // set it as a int32. - value.Elem().SetInt(int64(sf.value.(int32))) - } else { - value.Elem().Set(reflect.ValueOf(sf.value)) +func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByName(field) } -// decodeExtension decodes an extension encoded in b. -func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { - t := reflect.TypeOf(extension.ExtensionType) - unmarshal := typeUnmarshaler(t, extension.Tag) - - // t is a pointer to a struct, pointer to basic type or a slice. - // Allocate space to store the pointer/slice. - value := reflect.New(t).Elem() - - var err error - for { - x, n := decodeVarint(b) - if n == 0 { - return nil, io.ErrUnexpectedEOF - } - b = b[n:] - wire := int(x) & 7 - - b, err = unmarshal(b, valToPointer(value.Addr()), wire) - if err != nil { - return nil, err - } - - if len(b) == 0 { - break - } +func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field { + return r.xt, nil } - return value.Interface(), nil + return protoregistry.GlobalTypes.FindExtensionByNumber(message, field) } -// GetExtensions returns a slice of the extensions present in pb that are also listed in es. -// The returned slice has the same length as es; missing extensions will appear as nil elements. -func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// GetExtensions returns a list of the extensions values present in m, +// corresponding with the provided list of extension descriptors, xts. +// If an extension is missing in m, the corresponding value is nil. +func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return nil, errNotExtendable } - extensions = make([]interface{}, len(es)) - for i, e := range es { - extensions[i], err = GetExtension(epb, e) - if err == ErrMissingExtension { - err = nil - } + + vs := make([]interface{}, len(xts)) + for i, xt := range xts { + v, err := GetExtension(m, xt) if err != nil { - return + if err == ErrMissingExtension { + continue + } + return vs, err } + vs[i] = v } - return + return vs, nil } -// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. -// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing -// just the Field field, which defines the extension's field number. -func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { - epb, err := extendable(pb) - if err != nil { - return nil, err +// SetExtension sets an extension field in m to the provided value. +func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return errNotExtendable } - registeredExtensions := RegisteredExtensions(pb) - emap, mu := epb.extensionsRead() - if emap == nil { - return nil, nil + rv := reflect.ValueOf(v) + if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType) } - mu.Lock() - defer mu.Unlock() - extensions := make([]*ExtensionDesc, 0, len(emap)) - for extid, e := range emap { - desc := e.desc - if desc == nil { - desc = registeredExtensions[extid] - if desc == nil { - desc = &ExtensionDesc{Field: extid} - } + if rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", v) + } + if isScalarKind(rv.Elem().Kind()) { + v = rv.Elem().Interface() } - - extensions = append(extensions, desc) } - return extensions, nil -} -// SetExtension sets the specified extension of pb to the specified value. -func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { - epb, err := extendable(pb) - if err != nil { - return err - } - if err := checkExtensionTypes(epb, extension); err != nil { - return err - } - typ := reflect.TypeOf(extension.ExtensionType) - if typ != reflect.TypeOf(value) { - return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + xtd := xt.TypeDescriptor() + if !isValidExtension(mr.Descriptor(), xtd) { + return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m) } - // nil extension values need to be caught early, because the - // encoder can't distinguish an ErrNil due to a nil extension - // from an ErrNil due to a missing field. Extensions are - // always optional, so the encoder would just swallow the error - // and drop all the extensions from the encoded message. - if reflect.ValueOf(value).IsNil() { - return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) - } - - extmap := epb.extensionsWrite() - extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + mr.Set(xtd, xt.ValueOf(v)) + clearUnknown(mr, fieldNum(xt.Field)) return nil } -// ClearAllExtensions clears all extensions from pb. -func ClearAllExtensions(pb Message) { - epb, err := extendable(pb) - if err != nil { +// SetRawExtension inserts b into the unknown fields of m. +// +// Deprecated: Use Message.ProtoReflect.SetUnknown instead. +func SetRawExtension(m Message, fnum int32, b []byte) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { return } - m := epb.extensionsWrite() - for k := range m { - delete(m, k) + + // Verify that the raw field is valid. + for b0 := b; len(b0) > 0; { + num, _, n := protowire.ConsumeField(b0) + if int32(num) != fnum { + panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum)) + } + b0 = b0[n:] } -} -// A global registry of extensions. -// The generated code will register the generated descriptors by calling RegisterExtension. + ClearExtension(m, &ExtensionDesc{Field: fnum}) + mr.SetUnknown(append(mr.GetUnknown(), b...)) +} -var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) +// ExtensionDescs returns a list of extension descriptors found in m, +// containing descriptors for both populated extension fields in m and +// also unknown fields of m that are in the extension range. +// For the later case, an type incomplete descriptor is provided where only +// the ExtensionDesc.Field field is populated. +// The order of the extension descriptors is undefined. +func ExtensionDescs(m Message) ([]*ExtensionDesc, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 { + return nil, errNotExtendable + } -// RegisterExtension is called from the generated code. -func RegisterExtension(desc *ExtensionDesc) { - st := reflect.TypeOf(desc.ExtendedType).Elem() - m := extensionMaps[st] - if m == nil { - m = make(map[int32]*ExtensionDesc) - extensionMaps[st] = m + // Collect a set of known extension descriptors. + extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc) + mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor) + if xd, ok := xt.Type().(*ExtensionDesc); ok { + extDescs[fd.Number()] = xd + } + } + return true + }) + + // Collect a set of unknown extension descriptors. + extRanges := mr.Descriptor().ExtensionRanges() + for b := mr.GetUnknown(); len(b) > 0; { + num, _, n := protowire.ConsumeField(b) + if extRanges.Has(num) && extDescs[num] == nil { + extDescs[num] = nil + } + b = b[n:] } - if _, ok := m[desc.Field]; ok { - panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + + // Transpose the set of descriptors into a list. + var xts []*ExtensionDesc + for num, xt := range extDescs { + if xt == nil { + xt = &ExtensionDesc{Field: int32(num)} + } + xts = append(xts, xt) } - m[desc.Field] = desc + return xts, nil } -// RegisteredExtensions returns a map of the registered extensions of a -// protocol buffer struct, indexed by the extension number. -// The argument pb should be a nil pointer to the struct type. -func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { - return extensionMaps[reflect.TypeOf(pb).Elem()] +// isValidExtension reports whether xtd is a valid extension descriptor for md. +func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool { + return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number()) } -// extensionAsLegacyType converts an value in the storage type as the API type. -// See Extension.value. -func extensionAsLegacyType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { +// isScalarKind reports whether k is a protobuf scalar kind (except bytes). +// This function exists for historical reasons since the representation of +// scalars differs between v1 and v2, where v1 uses *T and v2 uses T. +func isScalarKind(k reflect.Kind) bool { + switch k { case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - // Represent primitive types as a pointer to the value. - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Slice: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } + return true + default: + return false } - return v } -// extensionAsStorageType converts an value in the API type as the storage type. -// See Extension.value. -func extensionAsStorageType(v interface{}) interface{} { - switch rv := reflect.ValueOf(v); rv.Kind() { - case reflect.Ptr: - // Represent slice types as the value itself. - switch rv.Type().Elem().Kind() { - case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: - if rv.IsNil() { - v = reflect.Zero(rv.Type().Elem()).Interface() - } else { - v = rv.Elem().Interface() - } - } - case reflect.Slice: - // Represent slice types as a pointer to the value. - if rv.Type().Elem().Kind() != reflect.Uint8 { - rv2 := reflect.New(rv.Type()) - rv2.Elem().Set(rv) - v = rv2.Interface() +// clearUnknown removes unknown fields from m where remover.Has reports true. +func clearUnknown(m protoreflect.Message, remover interface { + Has(protoreflect.FieldNumber) bool +}) { + var bo protoreflect.RawFields + for bi := m.GetUnknown(); len(bi) > 0; { + num, _, n := protowire.ConsumeField(bi) + if !remover.Has(num) { + bo = append(bo, bi[:n]...) } + bi = bi[n:] } - return v + if bi := m.GetUnknown(); len(bi) != len(bo) { + m.SetUnknown(bo) + } +} + +type fieldNum protoreflect.FieldNumber + +func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool { + return protoreflect.FieldNumber(n1) == n2 } diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go index a4b8c0c..dcdc220 100644 --- a/vendor/github.com/golang/protobuf/proto/properties.go +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -1,162 +1,104 @@ -// Go support for Protocol Buffers - Google's data interchange format -// -// Copyright 2010 The Go Authors. All rights reserved. -// https://github.com/golang/protobuf -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. package proto -/* - * Routines for encoding data into the wire format for protocol buffers. - */ - import ( "fmt" - "log" "reflect" - "sort" "strconv" "strings" "sync" -) - -const debug bool = false -// Constants that identify the encoding of a value on the wire. -const ( - WireVarint = 0 - WireFixed64 = 1 - WireBytes = 2 - WireStartGroup = 3 - WireEndGroup = 4 - WireFixed32 = 5 + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" ) -// tagMap is an optimization over map[int]int for typical protocol buffer -// use-cases. Encoded protocol buffers are often in tag order with small tag -// numbers. -type tagMap struct { - fastTags []int - slowTags map[int]int -} - -// tagMapFastLimit is the upper bound on the tag number that will be stored in -// the tagMap slice rather than its map. -const tagMapFastLimit = 1024 - -func (p *tagMap) get(t int) (int, bool) { - if t > 0 && t < tagMapFastLimit { - if t >= len(p.fastTags) { - return 0, false - } - fi := p.fastTags[t] - return fi, fi >= 0 - } - fi, ok := p.slowTags[t] - return fi, ok -} - -func (p *tagMap) put(t int, fi int) { - if t > 0 && t < tagMapFastLimit { - for len(p.fastTags) < t+1 { - p.fastTags = append(p.fastTags, -1) - } - p.fastTags[t] = fi - return - } - if p.slowTags == nil { - p.slowTags = make(map[int]int) - } - p.slowTags[t] = fi -} - -// StructProperties represents properties for all the fields of a struct. -// decoderTags and decoderOrigNames should only be used by the decoder. +// StructProperties represents protocol buffer type information for a +// generated protobuf message in the open-struct API. +// +// Deprecated: Do not use. type StructProperties struct { - Prop []*Properties // properties for each field - reqCount int // required count - decoderTags tagMap // map from proto tag to struct field number - decoderOrigNames map[string]int // map from original name to struct field number - order []int // list of struct field numbers in tag order + // Prop are the properties for each field. + // + // Fields belonging to a oneof are stored in OneofTypes instead, with a + // single Properties representing the parent oneof held here. + // + // The order of Prop matches the order of fields in the Go struct. + // Struct fields that are not related to protobufs have a "XXX_" prefix + // in the Properties.Name and must be ignored by the user. + Prop []*Properties // OneofTypes contains information about the oneof fields in this message. - // It is keyed by the original name of a field. + // It is keyed by the protobuf field name. OneofTypes map[string]*OneofProperties } -// OneofProperties represents information about a specific field in a oneof. -type OneofProperties struct { - Type reflect.Type // pointer to generated struct type for this oneof field - Field int // struct field number of the containing oneof in the message - Prop *Properties -} - -// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. -// See encode.go, (*Buffer).enc_struct. - -func (sp *StructProperties) Len() int { return len(sp.order) } -func (sp *StructProperties) Less(i, j int) bool { - return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag -} -func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } - -// Properties represents the protocol-specific behavior of a single struct field. +// Properties represents the type information for a protobuf message field. +// +// Deprecated: Do not use. type Properties struct { - Name string // name of the field, for error messages - OrigName string // original name before protocol compiler (always set) - JSONName string // name to use for JSON; determined by protoc - Wire string + // Name is a placeholder name with little meaningful semantic value. + // If the name has an "XXX_" prefix, the entire Properties must be ignored. + Name string + // OrigName is the protobuf field name or oneof name. + OrigName string + // JSONName is the JSON name for the protobuf field. + JSONName string + // Enum is a placeholder name for enums. + // For historical reasons, this is neither the Go name for the enum, + // nor the protobuf name for the enum. + Enum string // Deprecated: Do not use. + // Weak contains the full name of the weakly referenced message. + Weak string + // Wire is a string representation of the wire type. + Wire string + // WireType is the protobuf wire type for the field. WireType int - Tag int + // Tag is the protobuf field number. + Tag int + // Required reports whether this is a required field. Required bool + // Optional reports whether this is a optional field. Optional bool + // Repeated reports whether this is a repeated field. Repeated bool - Packed bool // relevant for repeated primitives only - Enum string // set for enum types only - proto3 bool // whether this is known to be a proto3 field - oneof bool // whether this is a oneof field - - Default string // default value - HasDefault bool // whether an explicit default was provided - - stype reflect.Type // set for struct types only - sprop *StructProperties // set for struct types only + // Packed reports whether this is a packed repeated field of scalars. + Packed bool + // Proto3 reports whether this field operates under the proto3 syntax. + Proto3 bool + // Oneof reports whether this field belongs within a oneof. + Oneof bool + + // Default is the default value in string form. + Default string + // HasDefault reports whether the field has a default value. + HasDefault bool + + // MapKeyProp is the properties for the key field for a map field. + MapKeyProp *Properties + // MapValProp is the properties for the value field for a map field. + MapValProp *Properties +} - mtype reflect.Type // set for map types only - MapKeyProp *Properties // set for map types only - MapValProp *Properties // set for map types only +// OneofProperties represents the type information for a protobuf oneof. +// +// Deprecated: Do not use. +type OneofProperties struct { + // Type is a pointer to the generated wrapper type for the field value. + // This is nil for messages that are not in the open-struct API. + Type reflect.Type + // Field is the index into StructProperties.Prop for the containing oneof. + Field int + // Prop is the properties for the field. + Prop *Properties } // String formats the properties in the protobuf struct field tag style. func (p *Properties) String() string { s := p.Wire - s += "," - s += strconv.Itoa(p.Tag) + s += "," + strconv.Itoa(p.Tag) if p.Required { s += ",req" } @@ -170,18 +112,21 @@ func (p *Properties) String() string { s += ",packed" } s += ",name=" + p.OrigName - if p.JSONName != p.OrigName { + if p.JSONName != "" { s += ",json=" + p.JSONName } - if p.proto3 { + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if len(p.Weak) > 0 { + s += ",weak=" + p.Weak + } + if p.Proto3 { s += ",proto3" } - if p.oneof { + if p.Oneof { s += ",oneof" } - if len(p.Enum) > 0 { - s += ",enum=" + p.Enum - } if p.HasDefault { s += ",def=" + p.Default } @@ -189,356 +134,173 @@ func (p *Properties) String() string { } // Parse populates p by parsing a string in the protobuf struct field tag style. -func (p *Properties) Parse(s string) { - // "bytes,49,opt,name=foo,def=hello!" - fields := strings.Split(s, ",") // breaks def=, but handled below. - if len(fields) < 2 { - log.Printf("proto: tag has too few fields: %q", s) - return - } - - p.Wire = fields[0] - switch p.Wire { - case "varint": - p.WireType = WireVarint - case "fixed32": - p.WireType = WireFixed32 - case "fixed64": - p.WireType = WireFixed64 - case "zigzag32": - p.WireType = WireVarint - case "zigzag64": - p.WireType = WireVarint - case "bytes", "group": - p.WireType = WireBytes - // no numeric converter for non-numeric types - default: - log.Printf("proto: tag has unknown wire type: %q", s) - return - } - - var err error - p.Tag, err = strconv.Atoi(fields[1]) - if err != nil { - return - } - -outer: - for i := 2; i < len(fields); i++ { - f := fields[i] - switch { - case f == "req": - p.Required = true - case f == "opt": +func (p *Properties) Parse(tag string) { + // For example: "bytes,49,opt,name=foo,def=hello!" + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + p.OrigName = s[len("name="):] + case strings.HasPrefix(s, "json="): + p.JSONName = s[len("json="):] + case strings.HasPrefix(s, "enum="): + p.Enum = s[len("enum="):] + case strings.HasPrefix(s, "weak="): + p.Weak = s[len("weak="):] + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + p.Tag = int(n) + case s == "opt": p.Optional = true - case f == "rep": + case s == "req": + p.Required = true + case s == "rep": p.Repeated = true - case f == "packed": + case s == "varint" || s == "zigzag32" || s == "zigzag64": + p.Wire = s + p.WireType = WireVarint + case s == "fixed32": + p.Wire = s + p.WireType = WireFixed32 + case s == "fixed64": + p.Wire = s + p.WireType = WireFixed64 + case s == "bytes": + p.Wire = s + p.WireType = WireBytes + case s == "group": + p.Wire = s + p.WireType = WireStartGroup + case s == "packed": p.Packed = true - case strings.HasPrefix(f, "name="): - p.OrigName = f[5:] - case strings.HasPrefix(f, "json="): - p.JSONName = f[5:] - case strings.HasPrefix(f, "enum="): - p.Enum = f[5:] - case f == "proto3": - p.proto3 = true - case f == "oneof": - p.oneof = true - case strings.HasPrefix(f, "def="): + case s == "proto3": + p.Proto3 = true + case s == "oneof": + p.Oneof = true + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. p.HasDefault = true - p.Default = f[4:] // rest of string - if i+1 < len(fields) { - // Commas aren't escaped, and def is always last. - p.Default += "," + strings.Join(fields[i+1:], ",") - break outer - } - } - } -} - -var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() - -// setFieldProps initializes the field properties for submessages and maps. -func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { - switch t1 := typ; t1.Kind() { - case reflect.Ptr: - if t1.Elem().Kind() == reflect.Struct { - p.stype = t1.Elem() - } - - case reflect.Slice: - if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { - p.stype = t2.Elem() - } - - case reflect.Map: - p.mtype = t1 - p.MapKeyProp = &Properties{} - p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) - p.MapValProp = &Properties{} - vtype := p.mtype.Elem() - if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { - // The value type is not a message (*T) or bytes ([]byte), - // so we need encoders for the pointer to this type. - vtype = reflect.PtrTo(vtype) - } - p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) - } - - if p.stype != nil { - if lockGetProp { - p.sprop = GetProperties(p.stype) - } else { - p.sprop = getPropertiesLocked(p.stype) + p.Default, i = tag[len("def="):], len(tag) } + tag = strings.TrimPrefix(tag[i:], ",") } } -var ( - marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() -) - // Init populates the properties from a protocol buffer struct tag. +// +// Deprecated: Do not use. func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { - p.init(typ, name, tag, f, true) -} - -func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { - // "bytes,49,opt,def=hello!" p.Name = name p.OrigName = name if tag == "" { return } p.Parse(tag) - p.setFieldProps(typ, f, lockGetProp) + + if typ != nil && typ.Kind() == reflect.Map { + p.MapKeyProp = new(Properties) + p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil) + p.MapValProp = new(Properties) + p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil) + } } -var ( - propertiesMu sync.RWMutex - propertiesMap = make(map[reflect.Type]*StructProperties) -) +var propertiesCache sync.Map // map[reflect.Type]*StructProperties -// GetProperties returns the list of properties for the type represented by t. -// t must represent a generated struct type of a protocol message. +// GetProperties returns the list of properties for the type represented by t, +// which must be a generated protocol buffer message in the open-struct API, +// where protobuf message fields are represented by exported Go struct fields. +// +// Deprecated: Use protobuf reflection instead. func GetProperties(t reflect.Type) *StructProperties { - if t.Kind() != reflect.Struct { - panic("proto: type must have kind struct") - } - - // Most calls to GetProperties in a long-running program will be - // retrieving details for types we have seen before. - propertiesMu.RLock() - sprop, ok := propertiesMap[t] - propertiesMu.RUnlock() - if ok { - return sprop + if p, ok := propertiesCache.Load(t); ok { + return p.(*StructProperties) } - - propertiesMu.Lock() - sprop = getPropertiesLocked(t) - propertiesMu.Unlock() - return sprop + p, _ := propertiesCache.LoadOrStore(t, newProperties(t)) + return p.(*StructProperties) } -type ( - oneofFuncsIface interface { - XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) - } - oneofWrappersIface interface { - XXX_OneofWrappers() []interface{} - } -) - -// getPropertiesLocked requires that propertiesMu is held. -func getPropertiesLocked(t reflect.Type) *StructProperties { - if prop, ok := propertiesMap[t]; ok { - return prop +func newProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) } + var hasOneof bool prop := new(StructProperties) - // in case of recursive protos, fill this in now. - propertiesMap[t] = prop - - // build properties - prop.Prop = make([]*Properties, t.NumField()) - prop.order = make([]int, t.NumField()) + // Construct a list of properties for each field in the struct. for i := 0; i < t.NumField(); i++ { - f := t.Field(i) p := new(Properties) - name := f.Name - p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + f := t.Field(i) + tagField := f.Tag.Get("protobuf") + p.Init(f.Type, f.Name, tagField, &f) - oneof := f.Tag.Get("protobuf_oneof") // special case - if oneof != "" { - // Oneof fields don't use the traditional protobuf tag. - p.OrigName = oneof + tagOneof := f.Tag.Get("protobuf_oneof") + if tagOneof != "" { + hasOneof = true + p.OrigName = tagOneof } - prop.Prop[i] = p - prop.order[i] = i - if debug { - print(i, " ", f.Name, " ", t.String(), " ") - if p.Tag > 0 { - print(p.String()) - } - print("\n") + + // Rename unrelated struct fields with the "XXX_" prefix since so much + // user code simply checks for this to exclude special fields. + if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") { + p.Name = "XXX_" + p.Name + p.OrigName = "XXX_" + p.OrigName + } else if p.Weak != "" { + p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field } + + prop.Prop = append(prop.Prop, p) } - // Re-order prop.order. - sort.Sort(prop) + // Construct a mapping of oneof field names to properties. + if hasOneof { + var oneofWrappers []interface{} + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{}) + } + if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok { + oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{}) + } + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok { + if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok { + oneofWrappers = m.ProtoMessageInfo().OneofWrappers + } + } - var oots []interface{} - switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { - case oneofFuncsIface: - _, _, _, oots = m.XXX_OneofFuncs() - case oneofWrappersIface: - oots = m.XXX_OneofWrappers() - } - if len(oots) > 0 { - // Interpret oneof metadata. prop.OneofTypes = make(map[string]*OneofProperties) - for _, oot := range oots { - oop := &OneofProperties{ - Type: reflect.ValueOf(oot).Type(), // *T + for _, wrapper := range oneofWrappers { + p := &OneofProperties{ + Type: reflect.ValueOf(wrapper).Type(), // *T Prop: new(Properties), } - sft := oop.Type.Elem().Field(0) - oop.Prop.Name = sft.Name - oop.Prop.Parse(sft.Tag.Get("protobuf")) - // There will be exactly one interface field that - // this new value is assignable to. - for i := 0; i < t.NumField(); i++ { - f := t.Field(i) - if f.Type.Kind() != reflect.Interface { - continue + f := p.Type.Elem().Field(0) + p.Prop.Name = f.Name + p.Prop.Parse(f.Tag.Get("protobuf")) + + // Determine the struct field that contains this oneof. + // Each wrapper is assignable to exactly one parent field. + var foundOneof bool + for i := 0; i < t.NumField() && !foundOneof; i++ { + if p.Type.AssignableTo(t.Field(i).Type) { + p.Field = i + foundOneof = true } - if !oop.Type.AssignableTo(f.Type) { - continue - } - oop.Field = i - break } - prop.OneofTypes[oop.Prop.OrigName] = oop - } - } - - // build required counts - // build tags - reqCount := 0 - prop.decoderOrigNames = make(map[string]int) - for i, p := range prop.Prop { - if strings.HasPrefix(p.Name, "XXX_") { - // Internal fields should not appear in tags/origNames maps. - // They are handled specially when encoding and decoding. - continue - } - if p.Required { - reqCount++ + if !foundOneof { + panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t)) + } + prop.OneofTypes[p.Prop.OrigName] = p } - prop.decoderTags.put(p.Tag, i) - prop.decoderOrigNames[p.OrigName] = i } - prop.reqCount = reqCount return prop } -// A global registry of enum types. -// The generated code will register the generated maps by calling RegisterEnum. - -var enumValueMaps = make(map[string]map[string]int32) - -// RegisterEnum is called from the generated code to install the enum descriptor -// maps into the global table to aid parsing text format protocol buffers. -func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { - if _, ok := enumValueMaps[typeName]; ok { - panic("proto: duplicate enum registered: " + typeName) - } - enumValueMaps[typeName] = valueMap -} - -// EnumValueMap returns the mapping from names to integers of the -// enum type enumType, or a nil if not found. -func EnumValueMap(enumType string) map[string]int32 { - return enumValueMaps[enumType] -} - -// A registry of all linked message types. -// The string is a fully-qualified proto name ("pkg.Message"). -var ( - protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers - protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types - revProtoTypes = make(map[reflect.Type]string) -) - -// RegisterType is called from generated code and maps from the fully qualified -// proto name to the type (pointer to struct) of the protocol buffer. -func RegisterType(x Message, name string) { - if _, ok := protoTypedNils[name]; ok { - // TODO: Some day, make this a panic. - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { - // Generated code always calls RegisterType with nil x. - // This check is just for extra safety. - protoTypedNils[name] = x - } else { - protoTypedNils[name] = reflect.Zero(t).Interface().(Message) - } - revProtoTypes[t] = name -} - -// RegisterMapType is called from generated code and maps from the fully qualified -// proto name to the native map type of the proto map definition. -func RegisterMapType(x interface{}, name string) { - if reflect.TypeOf(x).Kind() != reflect.Map { - panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) - } - if _, ok := protoMapTypes[name]; ok { - log.Printf("proto: duplicate proto type registered: %s", name) - return - } - t := reflect.TypeOf(x) - protoMapTypes[name] = t - revProtoTypes[t] = name -} - -// MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { - type xname interface { - XXX_MessageName() string - } - if m, ok := x.(xname); ok { - return m.XXX_MessageName() - } - return revProtoTypes[reflect.TypeOf(x)] -} - -// MessageType returns the message type (pointer to struct) for a named message. -// The type is not guaranteed to implement proto.Message if the name refers to a -// map entry. -func MessageType(name string) reflect.Type { - if t, ok := protoTypedNils[name]; ok { - return reflect.TypeOf(t) - } - return protoMapTypes[name] -} - -// A registry of all linked proto files. -var ( - protoFiles = make(map[string][]byte) // file name => fileDescriptor -) - -// RegisterFile is called from generated code and maps from the -// full file name of a .proto file to its compressed FileDescriptorProto. -func RegisterFile(filename string, fileDescriptor []byte) { - protoFiles[filename] = fileDescriptor -} - -// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. -func FileDescriptor(filename string) []byte { return protoFiles[filename] } +func (sp *StructProperties) Len() int { return len(sp.Prop) } +func (sp *StructProperties) Less(i, j int) bool { return false } +func (sp *StructProperties) Swap(i, j int) { return } diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go new file mode 100644 index 0000000..5aee89c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/proto.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functionality for handling protocol buffer messages. +// In particular, it provides marshaling and unmarshaling between a protobuf +// message and the binary wire format. +// +// See https://developers.google.com/protocol-buffers/docs/gotutorial for +// more information. +// +// Deprecated: Use the "google.golang.org/protobuf/proto" package instead. +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + "google.golang.org/protobuf/runtime/protoimpl" +) + +const ( + ProtoPackageIsVersion1 = true + ProtoPackageIsVersion2 = true + ProtoPackageIsVersion3 = true + ProtoPackageIsVersion4 = true +) + +// GeneratedEnum is any enum type generated by protoc-gen-go +// which is a named int32 kind. +// This type exists for documentation purposes. +type GeneratedEnum interface{} + +// GeneratedMessage is any message type generated by protoc-gen-go +// which is a pointer to a named struct kind. +// This type exists for documentation purposes. +type GeneratedMessage interface{} + +// Message is a protocol buffer message. +// +// This is the v1 version of the message interface and is marginally better +// than an empty interface as it lacks any method to programatically interact +// with the contents of the message. +// +// A v2 message is declared in "google.golang.org/protobuf/proto".Message and +// exposes protobuf reflection as a first-class feature of the interface. +// +// To convert a v1 message to a v2 message, use the MessageV2 function. +// To convert a v2 message to a v1 message, use the MessageV1 function. +type Message = protoiface.MessageV1 + +// MessageV1 converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func MessageV1(m GeneratedMessage) protoiface.MessageV1 { + return protoimpl.X.ProtoMessageV1Of(m) +} + +// MessageV2 converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func MessageV2(m GeneratedMessage) protoV2.Message { + return protoimpl.X.ProtoMessageV2Of(m) +} + +// MessageReflect returns a reflective view for a message. +// It returns nil if m is nil. +func MessageReflect(m Message) protoreflect.Message { + return protoimpl.X.MessageOf(m) +} + +// Marshaler is implemented by messages that can marshal themselves. +// This interface is used by the following functions: Size, Marshal, +// Buffer.Marshal, and Buffer.EncodeMessage. +// +// Deprecated: Do not implement. +type Marshaler interface { + // Marshal formats the encoded bytes of the message. + // It should be deterministic and emit valid protobuf wire data. + // The caller takes ownership of the returned buffer. + Marshal() ([]byte, error) +} + +// Unmarshaler is implemented by messages that can unmarshal themselves. +// This interface is used by the following functions: Unmarshal, UnmarshalMerge, +// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup. +// +// Deprecated: Do not implement. +type Unmarshaler interface { + // Unmarshal parses the encoded bytes of the protobuf wire input. + // The provided buffer is only valid for during method call. + // It should not reset the receiver message. + Unmarshal([]byte) error +} + +// Merger is implemented by messages that can merge themselves. +// This interface is used by the following functions: Clone and Merge. +// +// Deprecated: Do not implement. +type Merger interface { + // Merge merges the contents of src into the receiver message. + // It clones all data structures in src such that it aliases no mutable + // memory referenced by src. + Merge(src Message) +} + +// RequiredNotSetError is an error type returned when +// marshaling or unmarshaling a message with missing required fields. +type RequiredNotSetError struct { + err error +} + +func (e *RequiredNotSetError) Error() string { + if e.err != nil { + return e.err.Error() + } + return "proto: required field not set" +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +func checkRequiredNotSet(m protoV2.Message) error { + if err := protoV2.CheckInitialized(m); err != nil { + return &RequiredNotSetError{err: err} + } + return nil +} + +// Clone returns a deep copy of src. +func Clone(src Message) Message { + return MessageV1(protoV2.Clone(MessageV2(src))) +} + +// Merge merges src into dst, which must be messages of the same type. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +func Merge(dst, src Message) { + protoV2.Merge(MessageV2(dst), MessageV2(src)) +} + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they are the same protobuf message type, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + return protoV2.Equal(MessageV2(x), MessageV2(y)) +} + +func isMessageSet(md protoreflect.MessageDescriptor) bool { + ms, ok := md.(interface{ IsMessageSet() bool }) + return ok && ms.IsMessageSet() +} diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go new file mode 100644 index 0000000..1e7ff64 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/registry.go @@ -0,0 +1,323 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoimpl" +) + +// filePath is the path to the proto source file. +type filePath = string // e.g., "google/protobuf/descriptor.proto" + +// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto. +type fileDescGZIP = []byte + +var fileCache sync.Map // map[filePath]fileDescGZIP + +// RegisterFile is called from generated code to register the compressed +// FileDescriptorProto with the file path for a proto source file. +// +// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead. +func RegisterFile(s filePath, d fileDescGZIP) { + // Decompress the descriptor. + zr, err := gzip.NewReader(bytes.NewReader(d)) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err)) + } + + // Construct a protoreflect.FileDescriptor from the raw descriptor. + // Note that DescBuilder.Build automatically registers the constructed + // file descriptor with the v2 registry. + protoimpl.DescBuilder{RawDescriptor: b}.Build() + + // Locally cache the raw descriptor form for the file. + fileCache.Store(s, d) +} + +// FileDescriptor returns the compressed FileDescriptorProto given the file path +// for a proto source file. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead. +func FileDescriptor(s filePath) fileDescGZIP { + if v, ok := fileCache.Load(s); ok { + return v.(fileDescGZIP) + } + + // Find the descriptor in the v2 registry. + var b []byte + if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil { + if fd, ok := fd.(interface{ ProtoLegacyRawDesc() []byte }); ok { + b = fd.ProtoLegacyRawDesc() + } else { + // TODO: Use protodesc.ToFileDescriptorProto to construct + // a descriptorpb.FileDescriptorProto and marshal it. + // However, doing so causes the proto package to have a dependency + // on descriptorpb, leading to cyclic dependency issues. + } + } + + // Locally cache the raw descriptor form for the file. + if len(b) > 0 { + v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b)) + return v.(fileDescGZIP) + } + return nil +} + +// enumName is the name of an enum. For historical reasons, the enum name is +// neither the full Go name nor the full protobuf name of the enum. +// The name is the dot-separated combination of just the proto package that the +// enum is declared within followed by the Go type name of the generated enum. +type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum" + +// enumsByName maps enum values by name to their numeric counterpart. +type enumsByName = map[string]int32 + +// enumsByNumber maps enum values by number to their name counterpart. +type enumsByNumber = map[int32]string + +var enumCache sync.Map // map[enumName]enumsByName +var numFilesCache sync.Map // map[protoreflect.FullName]int + +// RegisterEnum is called from the generated code to register the mapping of +// enum value names to enum numbers for the enum identified by s. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead. +func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) { + if _, ok := enumCache.Load(s); ok { + panic("proto: duplicate enum registered: " + s) + } + enumCache.Store(s, m) + + // This does not forward registration to the v2 registry since this API + // lacks sufficient information to construct a complete v2 enum descriptor. +} + +// EnumValueMap returns the mapping from enum value names to enum numbers for +// the enum of the given name. It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead. +func EnumValueMap(s enumName) enumsByName { + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + + // Check whether the cache is stale. If the number of files in the current + // package differs, then it means that some enums may have been recently + // registered upstream that we do not know about. + var protoPkg protoreflect.FullName + if i := strings.LastIndexByte(s, '.'); i >= 0 { + protoPkg = protoreflect.FullName(s[:i]) + } + v, _ := numFilesCache.Load(protoPkg) + numFiles, _ := v.(int) + if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles { + return nil // cache is up-to-date; was not found earlier + } + + // Update the enum cache for all enums declared in the given proto package. + numFiles = 0 + protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool { + walkEnums(fd, func(ed protoreflect.EnumDescriptor) { + name := protoimpl.X.LegacyEnumName(ed) + if _, ok := enumCache.Load(name); !ok { + m := make(enumsByName) + evs := ed.Values() + for i := evs.Len() - 1; i >= 0; i-- { + ev := evs.Get(i) + m[string(ev.Name())] = int32(ev.Number()) + } + enumCache.LoadOrStore(name, m) + } + }) + numFiles++ + return true + }) + numFilesCache.Store(protoPkg, numFiles) + + // Check cache again for enum map. + if v, ok := enumCache.Load(s); ok { + return v.(enumsByName) + } + return nil +} + +// walkEnums recursively walks all enums declared in d. +func walkEnums(d interface { + Enums() protoreflect.EnumDescriptors + Messages() protoreflect.MessageDescriptors +}, f func(protoreflect.EnumDescriptor)) { + eds := d.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + } + mds := d.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + walkEnums(mds.Get(i), f) + } +} + +// messageName is the full name of protobuf message. +type messageName = string + +var messageTypeCache sync.Map // map[messageName]reflect.Type + +// RegisterType is called from generated code to register the message Go type +// for a message of the given name. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead. +func RegisterType(m Message, s messageName) { + mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s)) + if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil { + panic(err) + } + messageTypeCache.Store(s, reflect.TypeOf(m)) +} + +// RegisterMapType is called from generated code to register the Go map type +// for a protobuf message representing a map entry. +// +// Deprecated: Do not use. +func RegisterMapType(m interface{}, s messageName) { + t := reflect.TypeOf(m) + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid map kind: %v", t)) + } + if _, ok := messageTypeCache.Load(s); ok { + panic(fmt.Errorf("proto: duplicate proto message registered: %s", s)) + } + messageTypeCache.Store(s, t) +} + +// MessageType returns the message type for a named message. +// It returns nil if not found. +// +// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead. +func MessageType(s messageName) reflect.Type { + if v, ok := messageTypeCache.Load(s); ok { + return v.(reflect.Type) + } + + // Derive the message type from the v2 registry. + var t reflect.Type + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil { + t = messageGoType(mt) + } + + // If we could not get a concrete type, it is possible that it is a + // pseudo-message for a map entry. + if t == nil { + d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s)) + if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() { + kt := goTypeForField(md.Fields().ByNumber(1)) + vt := goTypeForField(md.Fields().ByNumber(2)) + t = reflect.MapOf(kt, vt) + } + } + + // Locally cache the message type for the given name. + if t != nil { + v, _ := messageTypeCache.LoadOrStore(s, t) + return v.(reflect.Type) + } + return nil +} + +func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type { + switch k := fd.Kind(); k { + case protoreflect.EnumKind: + if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil { + return enumGoType(et) + } + return reflect.TypeOf(protoreflect.EnumNumber(0)) + case protoreflect.MessageKind, protoreflect.GroupKind: + if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil { + return messageGoType(mt) + } + return reflect.TypeOf((*protoreflect.Message)(nil)).Elem() + default: + return reflect.TypeOf(fd.Default().Interface()) + } +} + +func enumGoType(et protoreflect.EnumType) reflect.Type { + return reflect.TypeOf(et.New(0)) +} + +func messageGoType(mt protoreflect.MessageType) reflect.Type { + return reflect.TypeOf(MessageV1(mt.Zero().Interface())) +} + +// MessageName returns the full protobuf name for the given message type. +// +// Deprecated: Use protoreflect.MessageDescriptor.FullName instead. +func MessageName(m Message) messageName { + if m == nil { + return "" + } + if m, ok := m.(interface{ XXX_MessageName() messageName }); ok { + return m.XXX_MessageName() + } + return messageName(protoimpl.X.MessageDescriptorOf(m).FullName()) +} + +// RegisterExtension is called from the generated code to register +// the extension descriptor. +// +// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead. +func RegisterExtension(d *ExtensionDesc) { + if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil { + panic(err) + } +} + +type extensionsByNumber = map[int32]*ExtensionDesc + +var extensionCache sync.Map // map[messageName]extensionsByNumber + +// RegisteredExtensions returns a map of the registered extensions for the +// provided protobuf message, indexed by the extension field number. +// +// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead. +func RegisteredExtensions(m Message) extensionsByNumber { + // Check whether the cache is stale. If the number of extensions for + // the given message differs, then it means that some extensions were + // recently registered upstream that we do not know about. + s := MessageName(m) + v, _ := extensionCache.Load(s) + xs, _ := v.(extensionsByNumber) + if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) { + return xs // cache is up-to-date + } + + // Cache is stale, re-compute the extensions map. + xs = make(extensionsByNumber) + protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool { + if xd, ok := xt.(*ExtensionDesc); ok { + xs[int32(xt.TypeDescriptor().Number())] = xd + } else { + // TODO: This implies that the protoreflect.ExtensionType is a + // custom type not generated by protoc-gen-go. We could try and + // convert the type to an ExtensionDesc. + } + return true + }) + extensionCache.Store(s, xs) + return xs +} diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go new file mode 100644 index 0000000..4a59310 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_decode.go @@ -0,0 +1,801 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/prototext" + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextUnmarshalV2 = false + +// ParseError is returned by UnmarshalText. +type ParseError struct { + Message string + + // Deprecated: Do not use. + Line, Offset int +} + +func (e *ParseError) Error() string { + if wrapTextUnmarshalV2 { + return e.Message + } + if e.Line == 1 { + return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message) + } + return fmt.Sprintf("line %d: %v", e.Line, e.Message) +} + +// UnmarshalText parses a proto text formatted string into m. +func UnmarshalText(s string, m Message) error { + if u, ok := m.(encoding.TextUnmarshaler); ok { + return u.UnmarshalText([]byte(s)) + } + + m.Reset() + mi := MessageV2(m) + + if wrapTextUnmarshalV2 { + err := prototext.UnmarshalOptions{ + AllowPartial: true, + }.Unmarshal([]byte(s), mi) + if err != nil { + return &ParseError{Message: err.Error()} + } + return checkRequiredNotSet(mi) + } else { + if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil { + return err + } + return checkRequiredNotSet(mi) + } +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) { + md := m.Descriptor() + fds := md.Fields() + + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + seen := make(map[protoreflect.FieldNumber]bool) + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + if err := p.unmarshalExtensionOrAny(m, seen); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := protoreflect.Name(tok.value) + fd := fds.ByName(name) + switch { + case fd == nil: + gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name { + fd = gd + } + case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name: + fd = nil + case fd.IsWeak() && fd.Message().IsPlaceholder(): + fd = nil + } + if fd == nil { + typeName := string(md.FullName()) + if m, ok := m.Interface().(Message); ok { + t := reflect.TypeOf(m) + if t.Kind() == reflect.Ptr { + typeName = t.Elem().String() + } + } + return p.errorf("unknown field name %q in %v", name, typeName) + } + if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name()) + } + if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] { + return p.errorf("non-repeated field %q was repeated", fd.Name()) + } + seen[fd.Number()] = true + + // Consume any colon. + if err := p.checkForColon(fd); err != nil { + return err + } + + // Parse into the field. + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + if v, err = p.unmarshalValue(v, fd); err != nil { + return err + } + m.Set(fd, v) + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + } + return nil +} + +func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error { + name, err := p.consumeExtensionOrAnyName() + if err != nil { + return err + } + + // If it contains a slash, it's an Any type URL. + if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 { + tok := p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + + mt, err := protoregistry.GlobalTypes.FindMessageByURL(name) + if err != nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):]) + } + m2 := mt.New() + if err := p.unmarshalMessage(m2, terminator); err != nil { + return err + } + b, err := protoV2.Marshal(m2.Interface()) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err) + } + + urlFD := m.Descriptor().Fields().ByName("type_url") + valFD := m.Descriptor().Fields().ByName("value") + if seen[urlFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name()) + } + if seen[valFD.Number()] { + return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name()) + } + m.Set(urlFD, protoreflect.ValueOfString(name)) + m.Set(valFD, protoreflect.ValueOfBytes(b)) + seen[urlFD.Number()] = true + seen[valFD.Number()] = true + return nil + } + + xname := protoreflect.FullName(name) + xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname) + if xt == nil && isMessageSet(m.Descriptor()) { + xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension")) + } + if xt == nil { + return p.errorf("unrecognized extension %q", name) + } + fd := xt.TypeDescriptor() + if fd.ContainingMessage().FullName() != m.Descriptor().FullName() { + return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName()) + } + + if err := p.checkForColon(fd); err != nil { + return err + } + + v := m.Get(fd) + if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) { + v = m.Mutable(fd) + } + v, err = p.unmarshalValue(v, fd) + if err != nil { + return err + } + m.Set(fd, v) + return p.consumeOptionalSeparator() +} + +func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch { + case fd.IsList(): + lv := v.List() + var err error + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return v, p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return v, nil + } + + // One value of the repeated field. + p.back() + vv := lv.NewElement() + vv, err = p.unmarshalSingularValue(vv, fd) + if err != nil { + return v, err + } + lv.Append(vv) + return v, nil + case fd.IsMap(): + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + + keyFD := fd.MapKey() + valFD := fd.MapValue() + + mv := v.Map() + kv := keyFD.Default() + vv := mv.NewValue() + for { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == terminator { + break + } + var err error + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return v, err + } + if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + case "value": + if err := p.checkForColon(valFD); err != nil { + return v, err + } + if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil { + return v, err + } + if err := p.consumeOptionalSeparator(); err != nil { + return v, err + } + default: + p.back() + return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + mv.Set(kv.MapKey(), vv) + return v, nil + default: + p.back() + return p.unmarshalSingularValue(v, fd) + } +} + +func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) { + tok := p.next() + if tok.err != nil { + return v, tok.err + } + if tok.value == "" { + return v, p.errorf("unexpected EOF") + } + + switch fd.Kind() { + case protoreflect.BoolKind: + switch tok.value { + case "true", "1", "t", "True": + return protoreflect.ValueOfBool(true), nil + case "false", "0", "f", "False": + return protoreflect.ValueOfBool(false), nil + } + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil + } + } + case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(x)), nil + } + + // The C++ parser accepts large positive hex numbers that uses + // two's complement arithmetic to represent negative numbers. + // This feature is here for backwards compatibility with C++. + if strings.HasPrefix(tok.value, "0x") { + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil + } + } + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfUint32(uint32(x)), nil + } + case protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + return protoreflect.ValueOfUint64(uint64(x)), nil + } + case protoreflect.FloatKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 32); err == nil { + return protoreflect.ValueOfFloat32(float32(x)), nil + } + case protoreflect.DoubleKind: + // Ignore 'f' for compatibility with output generated by C++, + // but don't remove 'f' when the value is "-inf" or "inf". + v := tok.value + if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" { + v = v[:len(v)-len("f")] + } + if x, err := strconv.ParseFloat(v, 64); err == nil { + return protoreflect.ValueOfFloat64(float64(x)), nil + } + case protoreflect.StringKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfString(tok.unquoted), nil + } + case protoreflect.BytesKind: + if isQuote(tok.value[0]) { + return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil + } + case protoreflect.EnumKind: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil + } + vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value)) + if vd != nil { + return protoreflect.ValueOfEnum(vd.Number()), nil + } + case protoreflect.MessageKind, protoreflect.GroupKind: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return v, p.errorf("expected '{' or '<', found %q", tok.value) + } + err := p.unmarshalMessage(v.Message(), terminator) + return v, err + default: + panic(fmt.Sprintf("invalid kind %v", fd.Kind())) + } + return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value) +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + if fd.Message() == nil { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +// consumeExtensionOrAnyName consumes an extension name or an Any type URL and +// the following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtensionOrAnyName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in unmarshalMessage to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +var errBadUTF8 = errors.New("proto: bad UTF-8") + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go new file mode 100644 index 0000000..a31134e --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_encode.go @@ -0,0 +1,560 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "encoding" + "fmt" + "io" + "math" + "sort" + "strings" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const wrapTextMarshalV2 = false + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line) + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes the proto text format of m to w. +func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error { + b, err := tm.marshal(m) + if len(b) > 0 { + if _, err := w.Write(b); err != nil { + return err + } + } + return err +} + +// Text returns a proto text formatted string of m. +func (tm *TextMarshaler) Text(m Message) string { + b, _ := tm.marshal(m) + return string(b) +} + +func (tm *TextMarshaler) marshal(m Message) ([]byte, error) { + mr := MessageReflect(m) + if mr == nil || !mr.IsValid() { + return []byte(""), nil + } + + if wrapTextMarshalV2 { + if m, ok := m.(encoding.TextMarshaler); ok { + return m.MarshalText() + } + + opts := prototext.MarshalOptions{ + AllowPartial: true, + EmitUnknown: true, + } + if !tm.Compact { + opts.Indent = " " + } + if !tm.ExpandAny { + opts.Resolver = (*protoregistry.Types)(nil) + } + return opts.Marshal(mr.Interface()) + } else { + w := &textWriter{ + compact: tm.Compact, + expandAny: tm.ExpandAny, + complete: true, + } + + if m, ok := m.(encoding.TextMarshaler); ok { + b, err := m.MarshalText() + if err != nil { + return nil, err + } + w.Write(b) + return w.buf, nil + } + + err := w.writeMessage(mr) + return w.buf, err + } +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// MarshalText writes the proto text format of m to w. +func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) } + +// MarshalTextString returns a proto text formatted string of m. +func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) } + +// CompactText writes the compact proto text format of m to w. +func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) } + +// CompactTextString returns a compact proto text formatted string of m. +func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) } + +var ( + newline = []byte("\n") + endBraceNewline = []byte("}\n") + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + compact bool // same as TextMarshaler.Compact + expandAny bool // same as TextMarshaler.ExpandAny + complete bool // whether the current position is a complete line + indent int // indentation level; never negative + buf []byte +} + +func (w *textWriter) Write(p []byte) (n int, _ error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, p...) + w.complete = false + return len(p), nil + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + w.buf = append(w.buf, ' ') + n++ + } + w.buf = append(w.buf, frag...) + n += len(frag) + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + w.buf = append(w.buf, frag...) + n += len(frag) + if i+1 < len(frags) { + w.buf = append(w.buf, '\n') + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + w.buf = append(w.buf, c) + w.complete = c == '\n' + return nil +} + +func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + + if fd.Kind() != protoreflect.GroupKind { + w.buf = append(w.buf, fd.Name()...) + w.WriteByte(':') + } else { + // Use message type name for group field name. + w.buf = append(w.buf, fd.Message().Name()...) + } + + if !w.compact { + w.WriteByte(' ') + } +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) { + md := m.Descriptor() + fdURL := md.Fields().ByName("type_url") + fdVal := md.Fields().ByName("value") + + url := m.Get(fdURL).String() + mt, err := protoregistry.GlobalTypes.FindMessageByURL(url) + if err != nil { + return false, nil + } + + b := m.Get(fdVal).Bytes() + m2 := mt.New() + if err := proto.Unmarshal(b, m2.Interface()); err != nil { + return false, nil + } + w.Write([]byte("[")) + if requiresQuotes(url) { + w.writeQuotedString(url) + } else { + w.Write([]byte(url)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.indent++ + } + if err := w.writeMessage(m2); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.indent-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (w *textWriter) writeMessage(m protoreflect.Message) error { + md := m.Descriptor() + if w.expandAny && md.FullName() == "google.protobuf.Any" { + if canExpand, err := w.writeProto3Any(m); canExpand { + return err + } + } + + fds := md.Fields() + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + if fd == nil || !m.Has(fd) { + continue + } + + switch { + case fd.IsList(): + lv := m.Get(fd).List() + for j := 0; j < lv.Len(); j++ { + w.writeName(fd) + v := lv.Get(j) + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + } + case fd.IsMap(): + kfd := fd.MapKey() + vfd := fd.MapValue() + mv := m.Get(fd).Map() + + type entry struct{ key, val protoreflect.Value } + var entries []entry + mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + entries = append(entries, entry{k.Value(), v}) + return true + }) + sort.Slice(entries, func(i, j int) bool { + switch kfd.Kind() { + case protoreflect.BoolKind: + return !entries[i].key.Bool() && entries[j].key.Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return entries[i].key.Int() < entries[j].key.Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return entries[i].key.Uint() < entries[j].key.Uint() + case protoreflect.StringKind: + return entries[i].key.String() < entries[j].key.String() + default: + panic("invalid kind") + } + }) + for _, entry := range entries { + w.writeName(fd) + w.WriteByte('<') + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + w.writeName(kfd) + if err := w.writeSingularValue(entry.key, kfd); err != nil { + return err + } + w.WriteByte('\n') + w.writeName(vfd) + if err := w.writeSingularValue(entry.val, vfd); err != nil { + return err + } + w.WriteByte('\n') + w.indent-- + w.WriteByte('>') + w.WriteByte('\n') + } + default: + w.writeName(fd) + if err := w.writeSingularValue(m.Get(fd), fd); err != nil { + return err + } + w.WriteByte('\n') + } + } + + if b := m.GetUnknown(); len(b) > 0 { + w.writeUnknownFields(b) + } + return w.writeExtensions(m) +} + +func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + switch fd.Kind() { + case protoreflect.FloatKind, protoreflect.DoubleKind: + switch vf := v.Float(); { + case math.IsInf(vf, +1): + w.Write(posInf) + case math.IsInf(vf, -1): + w.Write(negInf) + case math.IsNaN(vf): + w.Write(nan) + default: + fmt.Fprint(w, v.Interface()) + } + case protoreflect.StringKind: + // NOTE: This does not validate UTF-8 for historical reasons. + w.writeQuotedString(string(v.String())) + case protoreflect.BytesKind: + w.writeQuotedString(string(v.Bytes())) + case protoreflect.MessageKind, protoreflect.GroupKind: + var bra, ket byte = '<', '>' + if fd.Kind() == protoreflect.GroupKind { + bra, ket = '{', '}' + } + w.WriteByte(bra) + if !w.compact { + w.WriteByte('\n') + } + w.indent++ + m := v.Message() + if m2, ok := m.Interface().(encoding.TextMarshaler); ok { + b, err := m2.MarshalText() + if err != nil { + return err + } + w.Write(b) + } else { + w.writeMessage(m) + } + w.indent-- + w.WriteByte(ket) + case protoreflect.EnumKind: + if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil { + fmt.Fprint(w, ev.Name()) + } else { + fmt.Fprint(w, v.Enum()) + } + default: + fmt.Fprint(w, v.Interface()) + } + return nil +} + +// writeQuotedString writes a quoted string in the protocol buffer text format. +func (w *textWriter) writeQuotedString(s string) { + w.WriteByte('"') + for i := 0; i < len(s); i++ { + switch c := s[i]; c { + case '\n': + w.buf = append(w.buf, `\n`...) + case '\r': + w.buf = append(w.buf, `\r`...) + case '\t': + w.buf = append(w.buf, `\t`...) + case '"': + w.buf = append(w.buf, `\"`...) + case '\\': + w.buf = append(w.buf, `\\`...) + default: + if isPrint := c >= 0x20 && c < 0x7f; isPrint { + w.buf = append(w.buf, c) + } else { + w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...) + } + } + } + w.WriteByte('"') +} + +func (w *textWriter) writeUnknownFields(b []byte) { + if !w.compact { + fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b)) + } + + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return + } + b = b[n:] + + if wtyp == protowire.EndGroupType { + w.indent-- + w.Write(endBraceNewline) + continue + } + fmt.Fprint(w, num) + if wtyp != protowire.StartGroupType { + w.WriteByte(':') + } + if !w.compact || wtyp == protowire.StartGroupType { + w.WriteByte(' ') + } + switch wtyp { + case protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed32Type: + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.Fixed64Type: + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprint(w, v) + case protowire.BytesType: + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return + } + b = b[n:] + fmt.Fprintf(w, "%q", v) + case protowire.StartGroupType: + w.WriteByte('{') + w.indent++ + default: + fmt.Fprintf(w, "/* unknown wire type %d */", wtyp) + } + w.WriteByte('\n') + } +} + +// writeExtensions writes all the extensions in m. +func (w *textWriter) writeExtensions(m protoreflect.Message) error { + md := m.Descriptor() + if md.ExtensionRanges().Len() == 0 { + return nil + } + + type ext struct { + desc protoreflect.FieldDescriptor + val protoreflect.Value + } + var exts []ext + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + exts = append(exts, ext{fd, v}) + } + return true + }) + sort.Slice(exts, func(i, j int) bool { + return exts[i].desc.Number() < exts[j].desc.Number() + }) + + for _, ext := range exts { + // For message set, use the name of the message as the extension name. + name := string(ext.desc.FullName()) + if isMessageSet(ext.desc.ContainingMessage()) { + name = strings.TrimSuffix(name, ".message_set_extension") + } + + if !ext.desc.IsList() { + if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil { + return err + } + } else { + lv := ext.val.List() + for i := 0; i < lv.Len(); i++ { + if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil { + return err + } + } + } + } + return nil +} + +func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error { + fmt.Fprintf(w, "[%s]:", name) + if !w.compact { + w.WriteByte(' ') + } + if err := w.writeSingularValue(v, fd); err != nil { + return err + } + w.WriteByte('\n') + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + for i := 0; i < w.indent*2; i++ { + w.buf = append(w.buf, ' ') + } + w.complete = false +} diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go new file mode 100644 index 0000000..d7c28da --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wire.go @@ -0,0 +1,78 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + protoV2 "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + if m == nil { + return 0 + } + mi := MessageV2(m) + return protoV2.Size(mi) +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + b, err := marshalAppend(nil, m, false) + if b == nil { + b = zeroBytes + } + return b, err +} + +var zeroBytes = make([]byte, 0, 0) + +func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) { + if m == nil { + return nil, ErrNil + } + mi := MessageV2(m) + nbuf, err := protoV2.MarshalOptions{ + Deterministic: deterministic, + AllowPartial: true, + }.MarshalAppend(buf, mi) + if err != nil { + return buf, err + } + if len(buf) == len(nbuf) { + if !mi.ProtoReflect().IsValid() { + return buf, ErrNil + } + } + return nbuf, checkRequiredNotSet(mi) +} + +// Unmarshal parses a wire-format message in b and places the decoded results in m. +// +// Unmarshal resets m before starting to unmarshal, so any existing data in m is always +// removed. Use UnmarshalMerge to preserve and append to existing data. +func Unmarshal(b []byte, m Message) error { + m.Reset() + return UnmarshalMerge(b, m) +} + +// UnmarshalMerge parses a wire-format message in b and places the decoded results in m. +func UnmarshalMerge(b []byte, m Message) error { + mi := MessageV2(m) + out, err := protoV2.UnmarshalOptions{ + AllowPartial: true, + Merge: true, + }.UnmarshalState(protoiface.UnmarshalInput{ + Buf: b, + Message: mi.ProtoReflect(), + }) + if err != nil { + return err + } + if out.Flags&protoiface.UnmarshalInitialized > 0 { + return nil + } + return checkRequiredNotSet(mi) +} diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go new file mode 100644 index 0000000..398e348 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/wrappers.go @@ -0,0 +1,34 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int stores v in a new int32 value and returns a pointer to it. +// +// Deprecated: Use Int32 instead. +func Int(v int) *int32 { return Int32(int32(v)) } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go new file mode 100644 index 0000000..e729dcf --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any.go @@ -0,0 +1,165 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "fmt" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + + anypb "github.com/golang/protobuf/ptypes/any" +) + +const urlPrefix = "type.googleapis.com/" + +// AnyMessageName returns the message name contained in an anypb.Any message. +// Most type assertions should use the Is function instead. +func AnyMessageName(any *anypb.Any) (string, error) { + name, err := anyMessageName(any) + return string(name), err +} +func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + name := protoreflect.FullName(any.TypeUrl) + if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { + name = name[i+len("/"):] + } + if !name.IsValid() { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return name, nil +} + +// MarshalAny marshals the given message m into an anypb.Any message. +func MarshalAny(m proto.Message) (*anypb.Any, error) { + switch dm := m.(type) { + case DynamicAny: + m = dm.Message + case *DynamicAny: + if dm == nil { + return nil, proto.ErrNil + } + m = dm.Message + } + b, err := proto.Marshal(m) + if err != nil { + return nil, err + } + return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil +} + +// Empty returns a new message of the type specified in an anypb.Any message. +// It returns protoregistry.NotFound if the corresponding message type could not +// be resolved in the global registry. +func Empty(any *anypb.Any) (proto.Message, error) { + name, err := anyMessageName(any) + if err != nil { + return nil, err + } + mt, err := protoregistry.GlobalTypes.FindMessageByName(name) + if err != nil { + return nil, err + } + return proto.MessageV1(mt.New().Interface()), nil +} + +// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message +// into the provided message m. It returns an error if the target message +// does not match the type in the Any message or if an unmarshal error occurs. +// +// The target message m may be a *DynamicAny message. If the underlying message +// type could not be resolved, then this returns protoregistry.NotFound. +func UnmarshalAny(any *anypb.Any, m proto.Message) error { + if dm, ok := m.(*DynamicAny); ok { + if dm.Message == nil { + var err error + dm.Message, err = Empty(any) + if err != nil { + return err + } + } + m = dm.Message + } + + anyName, err := AnyMessageName(any) + if err != nil { + return err + } + msgName := proto.MessageName(m) + if anyName != msgName { + return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) + } + return proto.Unmarshal(any.Value, m) +} + +// Is reports whether the Any message contains a message of the specified type. +func Is(any *anypb.Any, m proto.Message) bool { + if any == nil || m == nil { + return false + } + name := proto.MessageName(m) + if !strings.HasSuffix(any.TypeUrl, name) { + return false + } + return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in an anypb.Any message. +// The allocated message is stored in the embedded proto.Message. +// +// Example: +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct{ proto.Message } + +func (m DynamicAny) String() string { + if m.Message == nil { + return "" + } + return m.Message.String() +} +func (m DynamicAny) Reset() { + if m.Message == nil { + return + } + m.Message.Reset() +} +func (m DynamicAny) ProtoMessage() { + return +} +func (m DynamicAny) ProtoReflect() protoreflect.Message { + if m.Message == nil { + return nil + } + return dynamicAny{proto.MessageReflect(m.Message)} +} + +type dynamicAny struct{ protoreflect.Message } + +func (m dynamicAny) Type() protoreflect.MessageType { + return dynamicAnyType{m.Message.Type()} +} +func (m dynamicAny) New() protoreflect.Message { + return dynamicAnyType{m.Message.Type()}.New() +} +func (m dynamicAny) Interface() protoreflect.ProtoMessage { + return DynamicAny{proto.MessageV1(m.Message.Interface())} +} + +type dynamicAnyType struct{ protoreflect.MessageType } + +func (t dynamicAnyType) New() protoreflect.Message { + return dynamicAny{t.MessageType.New()} +} +func (t dynamicAnyType) Zero() protoreflect.Message { + return dynamicAny{t.MessageType.Zero()} +} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go new file mode 100644 index 0000000..0ef27d3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -0,0 +1,62 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/any/any.proto + +package any + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/any.proto. + +type Any = anypb.Any + +var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, + 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, + 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, + 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } +func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { + if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_any_any_proto = out.File + file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go new file mode 100644 index 0000000..fb9edd5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/doc.go @@ -0,0 +1,6 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ptypes provides functionality for interacting with well-known types. +package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go new file mode 100644 index 0000000..6110ae8 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + durationpb "github.com/golang/protobuf/ptypes/duration" +) + +// Range of google.protobuf.Duration as specified in duration.proto. +// This is about 10,000 years in seconds. +const ( + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// Duration converts a durationpb.Duration to a time.Duration. +// Duration returns an error if dur is invalid or overflows a time.Duration. +func Duration(dur *durationpb.Duration) (time.Duration, error) { + if err := validateDuration(dur); err != nil { + return 0, err + } + d := time.Duration(dur.Seconds) * time.Second + if int64(d/time.Second) != dur.Seconds { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + if dur.Nanos != 0 { + d += time.Duration(dur.Nanos) * time.Nanosecond + if (d < 0) != (dur.Nanos < 0) { + return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a durationpb.Duration. +func DurationProto(d time.Duration) *durationpb.Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &durationpb.Duration{ + Seconds: int64(secs), + Nanos: int32(nanos), + } +} + +// validateDuration determines whether the durationpb.Duration is valid +// according to the definition in google/protobuf/duration.proto. +// A valid durpb.Duration may still be too large to fit into a time.Duration +// Note that the range of durationpb.Duration is about 10,000 years, +// while the range of time.Duration is about 290 years. +func validateDuration(dur *durationpb.Duration) error { + if dur == nil { + return errors.New("duration: nil Duration") + } + if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { + return fmt.Errorf("duration: %v: seconds out of range", dur) + } + if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { + return fmt.Errorf("duration: %v: nanos out of range", dur) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { + return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go new file mode 100644 index 0000000..d0079ee --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -0,0 +1,63 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/duration/duration.proto + +package duration + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/duration.proto. + +type Duration = durationpb.Duration + +var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ + 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, + 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, + 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } +func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { + if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File + file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go new file mode 100644 index 0000000..026d0d4 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ptypes + +import ( + "errors" + "fmt" + "time" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" +) + +// Range of google.protobuf.Duration as specified in timestamp.proto. +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// Timestamp converts a timestamppb.Timestamp to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return +// value is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *timestamppb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { + ts := ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. +// For invalid Timestamps, it returns an error message in parentheses. +func TimestampString(ts *timestamppb.Timestamp) string { + t, err := Timestamp(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) +// and has a Nanos field in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes the problem. +// +// Every valid Timestamp can be represented by a time.Time, +// but the converse is not true. +func validateTimestamp(ts *timestamppb.Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) + } + return nil +} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go new file mode 100644 index 0000000..a76f807 --- /dev/null +++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -0,0 +1,64 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + +package timestamp + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" + reflect "reflect" +) + +// Symbols defined in public import of google/protobuf/timestamp.proto. + +type Timestamp = timestamppb.Timestamp + +var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, + 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, + 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, + 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x33, +} + +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} +var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } +func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { + if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 0, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, + DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, + }.Build() + File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil + file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil +} diff --git a/vendor/github.com/google/gofuzz/.travis.yml b/vendor/github.com/google/gofuzz/.travis.yml new file mode 100644 index 0000000..f8684d9 --- /dev/null +++ b/vendor/github.com/google/gofuzz/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - 1.4 + - 1.3 + - 1.2 + - tip + +install: + - if ! go get code.google.com/p/go.tools/cmd/cover; then go get golang.org/x/tools/cmd/cover; fi + +script: + - go test -cover diff --git a/vendor/github.com/google/gofuzz/CONTRIBUTING.md b/vendor/github.com/google/gofuzz/CONTRIBUTING.md new file mode 100644 index 0000000..51cf5cd --- /dev/null +++ b/vendor/github.com/google/gofuzz/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# How to contribute # + +We'd love to accept your patches and contributions to this project. There are +a just a few small guidelines you need to follow. + + +## Contributor License Agreement ## + +Contributions to any Google project must be accompanied by a Contributor +License Agreement. This is not a copyright **assignment**, it simply gives +Google permission to use and redistribute your contributions as part of the +project. + + * If you are an individual writing original source code and you're sure you + own the intellectual property, then you'll need to sign an [individual + CLA][]. + + * If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a [corporate CLA][]. + +You generally only need to submit a CLA once, so if you've already submitted +one (even if it was for a different project), you probably don't need to do it +again. + +[individual CLA]: https://developers.google.com/open-source/cla/individual +[corporate CLA]: https://developers.google.com/open-source/cla/corporate + + +## Submitting a patch ## + + 1. It's generally best to start by opening a new issue describing the bug or + feature you're intending to fix. Even if you think it's relatively minor, + it's helpful to know what people are working on. Mention in the initial + issue that you are planning to work on that bug or feature so that it can + be assigned to you. + + 1. Follow the normal process of [forking][] the project, and setup a new + branch to work in. It's important that each group of changes be done in + separate branches in order to ensure that a pull request only includes the + commits related to that bug or feature. + + 1. Go makes it very simple to ensure properly formatted code, so always run + `go fmt` on your code before committing it. You should also run + [golint][] over your code. As noted in the [golint readme][], it's not + strictly necessary that your code be completely "lint-free", but this will + help you find common style issues. + + 1. Any significant changes should almost always be accompanied by tests. The + project already has good test coverage, so look at some of the existing + tests if you're unsure how to go about it. [gocov][] and [gocov-html][] + are invaluable tools for seeing which parts of your code aren't being + exercised by your tests. + + 1. Do your best to have [well-formed commit messages][] for each change. + This provides consistency throughout the project, and ensures that commit + messages are able to be formatted properly by various git tools. + + 1. Finally, push the commits to your fork and submit a [pull request][]. + +[forking]: https://help.github.com/articles/fork-a-repo +[golint]: https://github.com/golang/lint +[golint readme]: https://github.com/golang/lint/blob/master/README +[gocov]: https://github.com/axw/gocov +[gocov-html]: https://github.com/matm/gocov-html +[well-formed commit messages]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html +[squash]: http://git-scm.com/book/en/Git-Tools-Rewriting-History#Squashing-Commits +[pull request]: https://help.github.com/articles/creating-a-pull-request diff --git a/vendor/github.com/ericchiang/k8s/LICENSE b/vendor/github.com/google/gofuzz/LICENSE similarity index 100% rename from vendor/github.com/ericchiang/k8s/LICENSE rename to vendor/github.com/google/gofuzz/LICENSE diff --git a/vendor/github.com/google/gofuzz/README.md b/vendor/github.com/google/gofuzz/README.md new file mode 100644 index 0000000..386c2a4 --- /dev/null +++ b/vendor/github.com/google/gofuzz/README.md @@ -0,0 +1,71 @@ +gofuzz +====== + +gofuzz is a library for populating go objects with random values. + +[![GoDoc](https://godoc.org/github.com/google/gofuzz?status.svg)](https://godoc.org/github.com/google/gofuzz) +[![Travis](https://travis-ci.org/google/gofuzz.svg?branch=master)](https://travis-ci.org/google/gofuzz) + +This is useful for testing: + +* Do your project's objects really serialize/unserialize correctly in all cases? +* Is there an incorrectly formatted object that will cause your project to panic? + +Import with ```import "github.com/google/gofuzz"``` + +You can use it on single variables: +```go +f := fuzz.New() +var myInt int +f.Fuzz(&myInt) // myInt gets a random value. +``` + +You can use it on maps: +```go +f := fuzz.New().NilChance(0).NumElements(1, 1) +var myMap map[ComplexKeyType]string +f.Fuzz(&myMap) // myMap will have exactly one element. +``` + +Customize the chance of getting a nil pointer: +```go +f := fuzz.New().NilChance(.5) +var fancyStruct struct { + A, B, C, D *string +} +f.Fuzz(&fancyStruct) // About half the pointers should be set. +``` + +You can even customize the randomization completely if needed: +```go +type MyEnum string +const ( + A MyEnum = "A" + B MyEnum = "B" +) +type MyInfo struct { + Type MyEnum + AInfo *string + BInfo *string +} + +f := fuzz.New().NilChance(0).Funcs( + func(e *MyInfo, c fuzz.Continue) { + switch c.Intn(2) { + case 0: + e.Type = A + c.Fuzz(&e.AInfo) + case 1: + e.Type = B + c.Fuzz(&e.BInfo) + } + }, +) + +var myObject MyInfo +f.Fuzz(&myObject) // Type will correspond to whether A or B info is set. +``` + +See more examples in ```example_test.go```. + +Happy testing! diff --git a/vendor/github.com/google/gofuzz/doc.go b/vendor/github.com/google/gofuzz/doc.go new file mode 100644 index 0000000..9f9956d --- /dev/null +++ b/vendor/github.com/google/gofuzz/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fuzz is a library for populating go objects with random values. +package fuzz diff --git a/vendor/github.com/google/gofuzz/fuzz.go b/vendor/github.com/google/gofuzz/fuzz.go new file mode 100644 index 0000000..da0a5f9 --- /dev/null +++ b/vendor/github.com/google/gofuzz/fuzz.go @@ -0,0 +1,506 @@ +/* +Copyright 2014 Google Inc. All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzz + +import ( + "fmt" + "math/rand" + "reflect" + "regexp" + "time" +) + +// fuzzFuncMap is a map from a type to a fuzzFunc that handles that type. +type fuzzFuncMap map[reflect.Type]reflect.Value + +// Fuzzer knows how to fill any object with random fields. +type Fuzzer struct { + fuzzFuncs fuzzFuncMap + defaultFuzzFuncs fuzzFuncMap + r *rand.Rand + nilChance float64 + minElements int + maxElements int + maxDepth int + skipFieldPatterns []*regexp.Regexp +} + +// New returns a new Fuzzer. Customize your Fuzzer further by calling Funcs, +// RandSource, NilChance, or NumElements in any order. +func New() *Fuzzer { + return NewWithSeed(time.Now().UnixNano()) +} + +func NewWithSeed(seed int64) *Fuzzer { + f := &Fuzzer{ + defaultFuzzFuncs: fuzzFuncMap{ + reflect.TypeOf(&time.Time{}): reflect.ValueOf(fuzzTime), + }, + + fuzzFuncs: fuzzFuncMap{}, + r: rand.New(rand.NewSource(seed)), + nilChance: .2, + minElements: 1, + maxElements: 10, + maxDepth: 100, + } + return f +} + +// Funcs adds each entry in fuzzFuncs as a custom fuzzing function. +// +// Each entry in fuzzFuncs must be a function taking two parameters. +// The first parameter must be a pointer or map. It is the variable that +// function will fill with random data. The second parameter must be a +// fuzz.Continue, which will provide a source of randomness and a way +// to automatically continue fuzzing smaller pieces of the first parameter. +// +// These functions are called sensibly, e.g., if you wanted custom string +// fuzzing, the function `func(s *string, c fuzz.Continue)` would get +// called and passed the address of strings. Maps and pointers will always +// be made/new'd for you, ignoring the NilChange option. For slices, it +// doesn't make much sense to pre-create them--Fuzzer doesn't know how +// long you want your slice--so take a pointer to a slice, and make it +// yourself. (If you don't want your map/pointer type pre-made, take a +// pointer to it, and make it yourself.) See the examples for a range of +// custom functions. +func (f *Fuzzer) Funcs(fuzzFuncs ...interface{}) *Fuzzer { + for i := range fuzzFuncs { + v := reflect.ValueOf(fuzzFuncs[i]) + if v.Kind() != reflect.Func { + panic("Need only funcs!") + } + t := v.Type() + if t.NumIn() != 2 || t.NumOut() != 0 { + panic("Need 2 in and 0 out params!") + } + argT := t.In(0) + switch argT.Kind() { + case reflect.Ptr, reflect.Map: + default: + panic("fuzzFunc must take pointer or map type") + } + if t.In(1) != reflect.TypeOf(Continue{}) { + panic("fuzzFunc's second parameter must be type fuzz.Continue") + } + f.fuzzFuncs[argT] = v + } + return f +} + +// RandSource causes f to get values from the given source of randomness. +// Use if you want deterministic fuzzing. +func (f *Fuzzer) RandSource(s rand.Source) *Fuzzer { + f.r = rand.New(s) + return f +} + +// NilChance sets the probability of creating a nil pointer, map, or slice to +// 'p'. 'p' should be between 0 (no nils) and 1 (all nils), inclusive. +func (f *Fuzzer) NilChance(p float64) *Fuzzer { + if p < 0 || p > 1 { + panic("p should be between 0 and 1, inclusive.") + } + f.nilChance = p + return f +} + +// NumElements sets the minimum and maximum number of elements that will be +// added to a non-nil map or slice. +func (f *Fuzzer) NumElements(atLeast, atMost int) *Fuzzer { + if atLeast > atMost { + panic("atLeast must be <= atMost") + } + if atLeast < 0 { + panic("atLeast must be >= 0") + } + f.minElements = atLeast + f.maxElements = atMost + return f +} + +func (f *Fuzzer) genElementCount() int { + if f.minElements == f.maxElements { + return f.minElements + } + return f.minElements + f.r.Intn(f.maxElements-f.minElements+1) +} + +func (f *Fuzzer) genShouldFill() bool { + return f.r.Float64() > f.nilChance +} + +// MaxDepth sets the maximum number of recursive fuzz calls that will be made +// before stopping. This includes struct members, pointers, and map and slice +// elements. +func (f *Fuzzer) MaxDepth(d int) *Fuzzer { + f.maxDepth = d + return f +} + +// Skip fields which match the supplied pattern. Call this multiple times if needed +// This is useful to skip XXX_ fields generated by protobuf +func (f *Fuzzer) SkipFieldsWithPattern(pattern *regexp.Regexp) *Fuzzer { + f.skipFieldPatterns = append(f.skipFieldPatterns, pattern) + return f +} + +// Fuzz recursively fills all of obj's fields with something random. First +// this tries to find a custom fuzz function (see Funcs). If there is no +// custom function this tests whether the object implements fuzz.Interface and, +// if so, calls Fuzz on it to fuzz itself. If that fails, this will see if +// there is a default fuzz function provided by this package. If all of that +// fails, this will generate random values for all primitive fields and then +// recurse for all non-primitives. +// +// This is safe for cyclic or tree-like structs, up to a limit. Use the +// MaxDepth method to adjust how deep you need it to recurse. +// +// obj must be a pointer. Only exported (public) fields can be set (thanks, +// golang :/ ) Intended for tests, so will panic on bad input or unimplemented +// fields. +func (f *Fuzzer) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, 0) +} + +// FuzzNoCustom is just like Fuzz, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +// Not safe for cyclic or tree-like structs! +// obj must be a pointer. Only exported (public) fields can be set (thanks, golang :/ ) +// Intended for tests, so will panic on bad input or unimplemented fields. +func (f *Fuzzer) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + f.fuzzWithContext(v, flagNoCustomFuzz) +} + +const ( + // Do not try to find a custom fuzz function. Does not apply recursively. + flagNoCustomFuzz uint64 = 1 << iota +) + +func (f *Fuzzer) fuzzWithContext(v reflect.Value, flags uint64) { + fc := &fuzzerContext{fuzzer: f} + fc.doFuzz(v, flags) +} + +// fuzzerContext carries context about a single fuzzing run, which lets Fuzzer +// be thread-safe. +type fuzzerContext struct { + fuzzer *Fuzzer + curDepth int +} + +func (fc *fuzzerContext) doFuzz(v reflect.Value, flags uint64) { + if fc.curDepth >= fc.fuzzer.maxDepth { + return + } + fc.curDepth++ + defer func() { fc.curDepth-- }() + + if !v.CanSet() { + return + } + + if flags&flagNoCustomFuzz == 0 { + // Check for both pointer and non-pointer custom functions. + if v.CanAddr() && fc.tryCustom(v.Addr()) { + return + } + if fc.tryCustom(v) { + return + } + } + + if fn, ok := fillFuncMap[v.Kind()]; ok { + fn(v, fc.fuzzer.r) + return + } + switch v.Kind() { + case reflect.Map: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.MakeMap(v.Type())) + n := fc.fuzzer.genElementCount() + for i := 0; i < n; i++ { + key := reflect.New(v.Type().Key()).Elem() + fc.doFuzz(key, 0) + val := reflect.New(v.Type().Elem()).Elem() + fc.doFuzz(val, 0) + v.SetMapIndex(key, val) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Ptr: + if fc.fuzzer.genShouldFill() { + v.Set(reflect.New(v.Type().Elem())) + fc.doFuzz(v.Elem(), 0) + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Slice: + if fc.fuzzer.genShouldFill() { + n := fc.fuzzer.genElementCount() + v.Set(reflect.MakeSlice(v.Type(), n, n)) + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Array: + if fc.fuzzer.genShouldFill() { + n := v.Len() + for i := 0; i < n; i++ { + fc.doFuzz(v.Index(i), 0) + } + return + } + v.Set(reflect.Zero(v.Type())) + case reflect.Struct: + for i := 0; i < v.NumField(); i++ { + skipField := false + fieldName := v.Type().Field(i).Name + for _, pattern := range fc.fuzzer.skipFieldPatterns { + if pattern.MatchString(fieldName) { + skipField = true + break + } + } + if !skipField { + fc.doFuzz(v.Field(i), 0) + } + } + case reflect.Chan: + fallthrough + case reflect.Func: + fallthrough + case reflect.Interface: + fallthrough + default: + panic(fmt.Sprintf("Can't handle %#v", v.Interface())) + } +} + +// tryCustom searches for custom handlers, and returns true iff it finds a match +// and successfully randomizes v. +func (fc *fuzzerContext) tryCustom(v reflect.Value) bool { + // First: see if we have a fuzz function for it. + doCustom, ok := fc.fuzzer.fuzzFuncs[v.Type()] + if !ok { + // Second: see if it can fuzz itself. + if v.CanInterface() { + intf := v.Interface() + if fuzzable, ok := intf.(Interface); ok { + fuzzable.Fuzz(Continue{fc: fc, Rand: fc.fuzzer.r}) + return true + } + } + // Finally: see if there is a default fuzz function. + doCustom, ok = fc.fuzzer.defaultFuzzFuncs[v.Type()] + if !ok { + return false + } + } + + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.New(v.Type().Elem())) + } + case reflect.Map: + if v.IsNil() { + if !v.CanSet() { + return false + } + v.Set(reflect.MakeMap(v.Type())) + } + default: + return false + } + + doCustom.Call([]reflect.Value{v, reflect.ValueOf(Continue{ + fc: fc, + Rand: fc.fuzzer.r, + })}) + return true +} + +// Interface represents an object that knows how to fuzz itself. Any time we +// find a type that implements this interface we will delegate the act of +// fuzzing itself. +type Interface interface { + Fuzz(c Continue) +} + +// Continue can be passed to custom fuzzing functions to allow them to use +// the correct source of randomness and to continue fuzzing their members. +type Continue struct { + fc *fuzzerContext + + // For convenience, Continue implements rand.Rand via embedding. + // Use this for generating any randomness if you want your fuzzing + // to be repeatable for a given seed. + *rand.Rand +} + +// Fuzz continues fuzzing obj. obj must be a pointer. +func (c Continue) Fuzz(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, 0) +} + +// FuzzNoCustom continues fuzzing obj, except that any custom fuzz function for +// obj's type will not be called and obj will not be tested for fuzz.Interface +// conformance. This applies only to obj and not other instances of obj's +// type. +func (c Continue) FuzzNoCustom(obj interface{}) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + panic("needed ptr!") + } + v = v.Elem() + c.fc.doFuzz(v, flagNoCustomFuzz) +} + +// RandString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func (c Continue) RandString() string { + return randString(c.Rand) +} + +// RandUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func (c Continue) RandUint64() uint64 { + return randUint64(c.Rand) +} + +// RandBool returns true or false randomly. +func (c Continue) RandBool() bool { + return randBool(c.Rand) +} + +func fuzzInt(v reflect.Value, r *rand.Rand) { + v.SetInt(int64(randUint64(r))) +} + +func fuzzUint(v reflect.Value, r *rand.Rand) { + v.SetUint(randUint64(r)) +} + +func fuzzTime(t *time.Time, c Continue) { + var sec, nsec int64 + // Allow for about 1000 years of random time values, which keeps things + // like JSON parsing reasonably happy. + sec = c.Rand.Int63n(1000 * 365 * 24 * 60 * 60) + c.Fuzz(&nsec) + *t = time.Unix(sec, nsec) +} + +var fillFuncMap = map[reflect.Kind]func(reflect.Value, *rand.Rand){ + reflect.Bool: func(v reflect.Value, r *rand.Rand) { + v.SetBool(randBool(r)) + }, + reflect.Int: fuzzInt, + reflect.Int8: fuzzInt, + reflect.Int16: fuzzInt, + reflect.Int32: fuzzInt, + reflect.Int64: fuzzInt, + reflect.Uint: fuzzUint, + reflect.Uint8: fuzzUint, + reflect.Uint16: fuzzUint, + reflect.Uint32: fuzzUint, + reflect.Uint64: fuzzUint, + reflect.Uintptr: fuzzUint, + reflect.Float32: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(float64(r.Float32())) + }, + reflect.Float64: func(v reflect.Value, r *rand.Rand) { + v.SetFloat(r.Float64()) + }, + reflect.Complex64: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.Complex128: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, + reflect.String: func(v reflect.Value, r *rand.Rand) { + v.SetString(randString(r)) + }, + reflect.UnsafePointer: func(v reflect.Value, r *rand.Rand) { + panic("unimplemented") + }, +} + +// randBool returns true or false randomly. +func randBool(r *rand.Rand) bool { + if r.Int()&1 == 1 { + return true + } + return false +} + +type charRange struct { + first, last rune +} + +// choose returns a random unicode character from the given range, using the +// given randomness source. +func (r *charRange) choose(rand *rand.Rand) rune { + count := int64(r.last - r.first) + return r.first + rune(rand.Int63n(count)) +} + +var unicodeRanges = []charRange{ + {' ', '~'}, // ASCII characters + {'\u00a0', '\u02af'}, // Multi-byte encoded characters + {'\u4e00', '\u9fff'}, // Common CJK (even longer encodings) +} + +// randString makes a random string up to 20 characters long. The returned string +// may include a variety of (valid) UTF-8 encodings. +func randString(r *rand.Rand) string { + n := r.Intn(20) + runes := make([]rune, n) + for i := range runes { + runes[i] = unicodeRanges[r.Intn(len(unicodeRanges))].choose(r) + } + return string(runes) +} + +// randUint64 makes random 64 bit numbers. +// Weirdly, rand doesn't have a function that gives you 64 random bits. +func randUint64(r *rand.Rand) uint64 { + return uint64(r.Uint32())<<32 | uint64(r.Uint32()) +} diff --git a/vendor/github.com/google/gofuzz/go.mod b/vendor/github.com/google/gofuzz/go.mod new file mode 100644 index 0000000..8ec4fe9 --- /dev/null +++ b/vendor/github.com/google/gofuzz/go.mod @@ -0,0 +1,3 @@ +module github.com/google/gofuzz + +go 1.12 diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 0000000..6b0b127 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 0000000..af6c0ee --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8789 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v3" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := compiler.BoolForScalarNode(in) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + matched = true + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes := compiler.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := compiler.SequenceNodeForNode(v5) + if ok { + x.Schemes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := compiler.SequenceNodeForNode(v6) + if ok { + x.Consumes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := compiler.SequenceNodeForNode(v7) + if ok { + x.Produces = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := compiler.SequenceNodeForNode(v14) + if ok { + for _, item := range a.Content { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := compiler.SequenceNodeForNode(v5) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v21) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := compiler.IntForScalarNode(v10) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := compiler.IntForScalarNode(v13) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v16) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = compiler.StringForScalarNode(v18) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedString{} + pair.Name = k + pair.Value, _ = compiler.StringForScalarNode(v) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := compiler.SequenceNodeForNode(v1) + if ok { + x.Tags = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := compiler.SequenceNodeForNode(v6) + if ok { + x.Produces = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := compiler.SequenceNodeForNode(v7) + if ok { + x.Consumes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := compiler.SequenceNodeForNode(v8) + if ok { + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := compiler.SequenceNodeForNode(v10) + if ok { + x.Schemes = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := compiler.SequenceNodeForNode(v12) + if ok { + for _, item := range a.Content { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := compiler.SequenceNodeForNode(v9) + if ok { + for _, item := range a.Content { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = compiler.StringForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + v, ok := compiler.FloatForScalarNode(v10) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + v, ok := compiler.FloatForScalarNode(v12) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v19) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + v, ok := compiler.FloatForScalarNode(v21) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + v, ok := compiler.FloatForScalarNode(v8) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := compiler.IntForScalarNode(v10) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := compiler.IntForScalarNode(v13) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v15) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v16) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + v, ok := compiler.FloatForScalarNode(v17) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = compiler.BoolForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = compiler.StringForScalarNode(v6) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = compiler.StringForScalarNode(v7) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = compiler.StringForScalarNode(v9) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + v, ok := compiler.FloatForScalarNode(v11) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + v, ok := compiler.FloatForScalarNode(v13) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := compiler.IntForScalarNode(v16) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v17) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := compiler.IntForScalarNode(v19) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v20) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v21) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + v, ok := compiler.FloatForScalarNode(v22) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = compiler.StringForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + v, ok := compiler.FloatForScalarNode(v6) + if ok { + x.MultipleOf = v + } else { + message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + v, ok := compiler.FloatForScalarNode(v7) + if ok { + x.Maximum = v + } else { + message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + v, ok := compiler.FloatForScalarNode(v9) + if ok { + x.Minimum = v + } else { + message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := compiler.IntForScalarNode(v11) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := compiler.IntForScalarNode(v12) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = compiler.StringForScalarNode(v13) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := compiler.IntForScalarNode(v14) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := compiler.IntForScalarNode(v15) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = compiler.BoolForScalarNode(v16) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := compiler.IntForScalarNode(v17) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := compiler.IntForScalarNode(v18) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := compiler.SequenceNodeForNode(v19) + if ok { + x.Required = compiler.StringArrayForSequenceNode(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := compiler.SequenceNodeForNode(v20) + if ok { + for _, item := range a.Content { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := compiler.SequenceNodeForNode(v24) + if ok { + for _, item := range a.Content { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = compiler.StringForScalarNode(v26) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = compiler.BoolForScalarNode(v27) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + x.Value = make([]string, 0) + for _, node := range in.Content { + s, _ := compiler.StringForScalarNode(node) + x.Value = append(x.Value, s) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + v1 := in + switch v1.Kind { + case yaml.ScalarNode: + x.Value = make([]string, 0) + x.Value = append(x.Value, v1.Value) + case yaml.SequenceNode: + x.Value = make([]string, 0) + for _, v := range v1.Content { + value := v.Value + ok := v.Kind == yaml.ScalarNode + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = compiler.StringForScalarNode(v1) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = compiler.StringForScalarNode(v2) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = compiler.StringForScalarNode(v3) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = compiler.BoolForScalarNode(v4) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = compiler.BoolForScalarNode(v5) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for i := 0; i < len(m.Content); i += 2 { + k, ok := compiler.StringForScalarNode(m.Content[i]) + if ok { + v := m.Content[i+1] + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.CallExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes := compiler.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return compiler.NewScalarNodeForBool(v1.Boolean) + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() *yaml.Node { + var err error + var node yaml.Node + err = yaml.Unmarshal([]byte(m.Yaml), &node) + if err == nil { + if node.Kind == yaml.DocumentNode { + return node.Content[0] + } + return &node + } else { + return nil + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.Email != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) + info.Content = append(info.Content, m.Info.ToRawInfo()) + if m.Host != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) + } + if m.BasePath != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) + } + if len(m.Schemes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) + } + if len(m.Consumes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) + } + if len(m.Produces) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) + info.Content = append(info.Content, m.Paths.ToRawInfo()) + if m.Definitions != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) + info.Content = append(info.Content, m.Definitions.ToRawInfo()) + } + if m.Parameters != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, m.Parameters.ToRawInfo()) + } + if m.Responses != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if m.SecurityDefinitions != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) + info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) + } + if len(m.Tags) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Tags { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, items) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.TermsOfService != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) + } + if m.Contact != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) + info.Content = append(info.Content, m.Contact.ToRawInfo()) + } + if m.License != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) + info.Content = append(info.Content, m.License.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Schema) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Schema { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, items) + } + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Url != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Value != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) + if m.Scopes != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) + info.Content = append(info.Content, m.Scopes.ToRawInfo()) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Tags) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) + } + if m.Summary != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.OperationId != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) + } + if len(m.Produces) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) + } + if len(m.Consumes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) + info.Content = append(info.Content, m.Responses.ToRawInfo()) + if len(m.Schemes) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) + } + if m.Deprecated != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) + } + if len(m.Security) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Security { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) + info.Content = append(info.Content, items) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Get != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) + info.Content = append(info.Content, m.Get.ToRawInfo()) + } + if m.Put != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) + info.Content = append(info.Content, m.Put.ToRawInfo()) + } + if m.Post != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) + info.Content = append(info.Content, m.Post.ToRawInfo()) + } + if m.Delete != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) + info.Content = append(info.Content, m.Delete.ToRawInfo()) + } + if m.Options != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) + info.Content = append(info.Content, m.Options.ToRawInfo()) + } + if m.Head != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) + info.Content = append(info.Content, m.Head.ToRawInfo()) + } + if m.Patch != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) + info.Content = append(info.Content, m.Patch.ToRawInfo()) + } + if len(m.Parameters) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Parameters { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) + info.Content = append(info.Content, items) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.Path != nil { + for _, item := range m.Path { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Required != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) + } + if m.In != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.AllowEmptyValue != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) + } + if m.Type != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Items != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, m.Items.ToRawInfo()) + } + if m.CollectionFormat != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + if m.Schema != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) + info.Content = append(info.Content, m.Schema.ToRawInfo()) + } + if m.Headers != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) + info.Content = append(info.Content, m.Headers.ToRawInfo()) + } + if m.Examples != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) + info.Content = append(info.Content, m.Examples.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.XRef != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) + } + if m.Format != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) + } + if m.Title != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) + } + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.Default != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) + info.Content = append(info.Content, m.Default.ToRawInfo()) + } + if m.MultipleOf != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) + } + if m.Maximum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) + } + if m.ExclusiveMaximum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) + } + if m.Minimum != 0.0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) + } + if m.ExclusiveMinimum != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) + } + if m.MaxLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) + } + if m.MinLength != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) + } + if m.Pattern != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) + } + if m.MaxItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) + } + if m.MinItems != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) + } + if m.UniqueItems != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) + } + if m.MaxProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) + } + if m.MinProperties != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) + info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) + } + if len(m.Required) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) + } + if len(m.Enum) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.Enum { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) + info.Content = append(info.Content, items) + } + if m.AdditionalProperties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) + info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) + } + if m.Type != nil { + if len(m.Type.Value) == 1 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) + } else { + info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) + } + } + if m.Items != nil { + items := compiler.NewSequenceNode() + for _, item := range m.Items.Schema { + items.Content = append(items.Content, item.ToRawInfo()) + } + if len(items.Content) == 1 { + items = items.Content[0] + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) + info.Content = append(info.Content, items) + } + if len(m.AllOf) != 0 { + items := compiler.NewSequenceNode() + for _, item := range m.AllOf { + items.Content = append(items.Content, item.ToRawInfo()) + } + info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) + info.Content = append(info.Content, items) + } + if m.Properties != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) + info.Content = append(info.Content, m.Properties.ToRawInfo()) + } + if m.Discriminator != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) + } + if m.ReadOnly != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) + } + if m.Xml != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) + info.Content = append(info.Content, m.Xml.ToRawInfo()) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.Example != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) + info.Content = append(info.Content, m.Example.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() *yaml.Node { + return compiler.NewSequenceNodeForStringArray(m.Value) +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + // always include this required field. + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + if m.Description != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) + } + if m.ExternalDocs != nil { + info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) + info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if len(m.Value) != 0 { + info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) + info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() *yaml.Node { + info := compiler.NewMappingNode() + if m == nil { + return info + } + if m.Name != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) + } + if m.Namespace != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) + } + if m.Prefix != "" { + info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) + info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) + } + if m.Attribute != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) + } + if m.Wrapped != false { + info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) + info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) + info.Content = append(info.Content, item.Value.ToRawInfo()) + } + } + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 0000000..559ddea --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,7347 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.12.3 +// source: openapiv2/OpenAPIv2.proto + +package openapi_v2 + +import ( + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type AdditionalPropertiesItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *AdditionalPropertiesItem) Reset() { + *x = AdditionalPropertiesItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AdditionalPropertiesItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AdditionalPropertiesItem) ProtoMessage() {} + +func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} +} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (x *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} + +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} +} + +func (x *Any) GetValue() *any.Any { + if x != nil { + return x.Value + } + return nil +} + +func (x *Any) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +type ApiKeySecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *ApiKeySecurity) Reset() { + *x = ApiKeySecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ApiKeySecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ApiKeySecurity) ProtoMessage() {} + +func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} +} + +func (x *ApiKeySecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *ApiKeySecurity) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ApiKeySecurity) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *ApiKeySecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *BasicAuthenticationSecurity) Reset() { + *x = BasicAuthenticationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BasicAuthenticationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BasicAuthenticationSecurity) ProtoMessage() {} + +func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} +} + +func (x *BasicAuthenticationSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *BasicAuthenticationSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type BodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *BodyParameter) Reset() { + *x = BodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *BodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BodyParameter) ProtoMessage() {} + +func (x *BodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. +func (*BodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} +} + +func (x *BodyParameter) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *BodyParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *BodyParameter) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *BodyParameter) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *BodyParameter) GetSchema() *Schema { + if x != nil { + return x.Schema + } + return nil +} + +func (x *BodyParameter) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Contact) Reset() { + *x = Contact{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Contact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Contact) ProtoMessage() {} + +func (x *Contact) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Contact.ProtoReflect.Descriptor instead. +func (*Contact) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} +} + +func (x *Contact) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Contact) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *Contact) GetEmail() string { + if x != nil { + return x.Email + } + return "" +} + +func (x *Contact) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Default struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Default) Reset() { + *x = Default{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Default) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Default) ProtoMessage() {} + +func (x *Default) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Default.ProtoReflect.Descriptor instead. +func (*Default) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} +} + +func (x *Default) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Definitions) Reset() { + *x = Definitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Definitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Definitions) ProtoMessage() {} + +func (x *Definitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. +func (*Definitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} +} + +func (x *Definitions) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Document struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Document) Reset() { + *x = Document{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Document) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Document) ProtoMessage() {} + +func (x *Document) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Document.ProtoReflect.Descriptor instead. +func (*Document) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} +} + +func (x *Document) GetSwagger() string { + if x != nil { + return x.Swagger + } + return "" +} + +func (x *Document) GetInfo() *Info { + if x != nil { + return x.Info + } + return nil +} + +func (x *Document) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *Document) GetBasePath() string { + if x != nil { + return x.BasePath + } + return "" +} + +func (x *Document) GetSchemes() []string { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Document) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Document) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Document) GetPaths() *Paths { + if x != nil { + return x.Paths + } + return nil +} + +func (x *Document) GetDefinitions() *Definitions { + if x != nil { + return x.Definitions + } + return nil +} + +func (x *Document) GetParameters() *ParameterDefinitions { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Document) GetResponses() *ResponseDefinitions { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Document) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { + if x != nil { + return x.SecurityDefinitions + } + return nil +} + +func (x *Document) GetTags() []*Tag { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Document) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Document) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Examples struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Examples) Reset() { + *x = Examples{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Examples) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Examples) ProtoMessage() {} + +func (x *Examples) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Examples.ProtoReflect.Descriptor instead. +func (*Examples) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} +} + +func (x *Examples) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *ExternalDocs) Reset() { + *x = ExternalDocs{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExternalDocs) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExternalDocs) ProtoMessage() {} + +func (x *ExternalDocs) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. +func (*ExternalDocs) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} +} + +func (x *ExternalDocs) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *ExternalDocs) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *ExternalDocs) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FileSchema) Reset() { + *x = FileSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FileSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FileSchema) ProtoMessage() {} + +func (x *FileSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. +func (*FileSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} +} + +func (x *FileSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *FileSchema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *FileSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *FileSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *FileSchema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *FileSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FileSchema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *FileSchema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *FileSchema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *FileSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *FormDataParameterSubSchema) Reset() { + *x = FormDataParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *FormDataParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*FormDataParameterSubSchema) ProtoMessage() {} + +func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} +} + +func (x *FormDataParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *FormDataParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *FormDataParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *FormDataParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *FormDataParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *FormDataParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *FormDataParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *FormDataParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *FormDataParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *FormDataParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *FormDataParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *FormDataParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *FormDataParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Header struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Header) Reset() { + *x = Header{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Header) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Header) ProtoMessage() {} + +func (x *Header) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Header.ProtoReflect.Descriptor instead. +func (*Header) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} +} + +func (x *Header) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Header) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Header) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *Header) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *Header) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *Header) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Header) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Header) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Header) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Header) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Header) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Header) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Header) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Header) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Header) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Header) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Header) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Header) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Header) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *HeaderParameterSubSchema) Reset() { + *x = HeaderParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HeaderParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HeaderParameterSubSchema) ProtoMessage() {} + +func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} +} + +func (x *HeaderParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *HeaderParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *HeaderParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *HeaderParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *HeaderParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *HeaderParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *HeaderParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *HeaderParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *HeaderParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *HeaderParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *HeaderParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *HeaderParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *HeaderParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Headers struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Headers) Reset() { + *x = Headers{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Headers) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Headers) ProtoMessage() {} + +func (x *Headers) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Headers.ProtoReflect.Descriptor instead. +func (*Headers) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} +} + +func (x *Headers) GetAdditionalProperties() []*NamedHeader { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Info) Reset() { + *x = Info{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Info) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Info) ProtoMessage() {} + +func (x *Info) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Info.ProtoReflect.Descriptor instead. +func (*Info) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} +} + +func (x *Info) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Info) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Info) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Info) GetTermsOfService() string { + if x != nil { + return x.TermsOfService + } + return "" +} + +func (x *Info) GetContact() *Contact { + if x != nil { + return x.Contact + } + return nil +} + +func (x *Info) GetLicense() *License { + if x != nil { + return x.License + } + return nil +} + +func (x *Info) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type ItemsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` +} + +func (x *ItemsItem) Reset() { + *x = ItemsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ItemsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ItemsItem) ProtoMessage() {} + +func (x *ItemsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. +func (*ItemsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} +} + +func (x *ItemsItem) GetSchema() []*Schema { + if x != nil { + return x.Schema + } + return nil +} + +type JsonReference struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` +} + +func (x *JsonReference) Reset() { + *x = JsonReference{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *JsonReference) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*JsonReference) ProtoMessage() {} + +func (x *JsonReference) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. +func (*JsonReference) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} +} + +func (x *JsonReference) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *JsonReference) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +type License struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *License) Reset() { + *x = License{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *License) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*License) ProtoMessage() {} + +func (x *License) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use License.ProtoReflect.Descriptor instead. +func (*License) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} +} + +func (x *License) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *License) GetUrl() string { + if x != nil { + return x.Url + } + return "" +} + +func (x *License) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedAny) Reset() { + *x = NamedAny{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedAny) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedAny) ProtoMessage() {} + +func (x *NamedAny) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. +func (*NamedAny) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} +} + +func (x *NamedAny) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedAny) GetValue() *Any { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedHeader) Reset() { + *x = NamedHeader{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedHeader) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedHeader) ProtoMessage() {} + +func (x *NamedHeader) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. +func (*NamedHeader) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} +} + +func (x *NamedHeader) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedHeader) GetValue() *Header { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedParameter) Reset() { + *x = NamedParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedParameter) ProtoMessage() {} + +func (x *NamedParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. +func (*NamedParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} +} + +func (x *NamedParameter) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedParameter) GetValue() *Parameter { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedPathItem) Reset() { + *x = NamedPathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedPathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedPathItem) ProtoMessage() {} + +func (x *NamedPathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. +func (*NamedPathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} +} + +func (x *NamedPathItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedPathItem) GetValue() *PathItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponse) Reset() { + *x = NamedResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponse) ProtoMessage() {} + +func (x *NamedResponse) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. +func (*NamedResponse) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} +} + +func (x *NamedResponse) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponse) GetValue() *Response { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedResponseValue) Reset() { + *x = NamedResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedResponseValue) ProtoMessage() {} + +func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. +func (*NamedResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} +} + +func (x *NamedResponseValue) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedResponseValue) GetValue() *ResponseValue { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSchema) Reset() { + *x = NamedSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSchema) ProtoMessage() {} + +func (x *NamedSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. +func (*NamedSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} +} + +func (x *NamedSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSchema) GetValue() *Schema { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedSecurityDefinitionsItem) Reset() { + *x = NamedSecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedSecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} + +func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} +} + +func (x *NamedSecurityDefinitionsItem) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if x != nil { + return x.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedString) Reset() { + *x = NamedString{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedString) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedString) ProtoMessage() {} + +func (x *NamedString) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. +func (*NamedString) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} +} + +func (x *NamedString) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedString) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Map key + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *NamedStringArray) Reset() { + *x = NamedStringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NamedStringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NamedStringArray) ProtoMessage() {} + +func (x *NamedStringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. +func (*NamedStringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} +} + +func (x *NamedStringArray) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *NamedStringArray) GetValue() *StringArray { + if x != nil { + return x.Value + } + return nil +} + +type NonBodyParameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (x *NonBodyParameter) Reset() { + *x = NonBodyParameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NonBodyParameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NonBodyParameter) ProtoMessage() {} + +func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. +func (*NonBodyParameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} +} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` +} + +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +type Oauth2AccessCodeSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2AccessCodeSecurity) Reset() { + *x = Oauth2AccessCodeSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2AccessCodeSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} + +func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} +} + +func (x *Oauth2AccessCodeSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2ApplicationSecurity) Reset() { + *x = Oauth2ApplicationSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2ApplicationSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2ApplicationSecurity) ProtoMessage() {} + +func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} +} + +func (x *Oauth2ApplicationSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2ImplicitSecurity) Reset() { + *x = Oauth2ImplicitSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2ImplicitSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2ImplicitSecurity) ProtoMessage() {} + +func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} +} + +func (x *Oauth2ImplicitSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if x != nil { + return x.AuthorizationUrl + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Oauth2PasswordSecurity) Reset() { + *x = Oauth2PasswordSecurity{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2PasswordSecurity) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2PasswordSecurity) ProtoMessage() {} + +func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} +} + +func (x *Oauth2PasswordSecurity) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetFlow() string { + if x != nil { + return x.Flow + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if x != nil { + return x.Scopes + } + return nil +} + +func (x *Oauth2PasswordSecurity) GetTokenUrl() string { + if x != nil { + return x.TokenUrl + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Oauth2Scopes) Reset() { + *x = Oauth2Scopes{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Oauth2Scopes) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Oauth2Scopes) ProtoMessage() {} + +func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} +} + +func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Operation struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Operation) Reset() { + *x = Operation{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Operation) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Operation) ProtoMessage() {} + +func (x *Operation) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Operation.ProtoReflect.Descriptor instead. +func (*Operation) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} +} + +func (x *Operation) GetTags() []string { + if x != nil { + return x.Tags + } + return nil +} + +func (x *Operation) GetSummary() string { + if x != nil { + return x.Summary + } + return "" +} + +func (x *Operation) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Operation) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Operation) GetOperationId() string { + if x != nil { + return x.OperationId + } + return "" +} + +func (x *Operation) GetProduces() []string { + if x != nil { + return x.Produces + } + return nil +} + +func (x *Operation) GetConsumes() []string { + if x != nil { + return x.Consumes + } + return nil +} + +func (x *Operation) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *Operation) GetResponses() *Responses { + if x != nil { + return x.Responses + } + return nil +} + +func (x *Operation) GetSchemes() []string { + if x != nil { + return x.Schemes + } + return nil +} + +func (x *Operation) GetDeprecated() bool { + if x != nil { + return x.Deprecated + } + return false +} + +func (x *Operation) GetSecurity() []*SecurityRequirement { + if x != nil { + return x.Security + } + return nil +} + +func (x *Operation) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Parameter struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (x *Parameter) Reset() { + *x = Parameter{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Parameter) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Parameter) ProtoMessage() {} + +func (x *Parameter) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. +func (*Parameter) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} +} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` +} + +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} + +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ParameterDefinitions) Reset() { + *x = ParameterDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParameterDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParameterDefinitions) ProtoMessage() {} + +func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} +} + +func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ParametersItem) Reset() { + *x = ParametersItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ParametersItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ParametersItem) ProtoMessage() {} + +func (x *ParametersItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. +func (*ParametersItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} +} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ParametersItem) GetParameter() *Parameter { + if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (x *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` +} + +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} + +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +type PathItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathItem) Reset() { + *x = PathItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathItem) ProtoMessage() {} + +func (x *PathItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. +func (*PathItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} +} + +func (x *PathItem) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *PathItem) GetGet() *Operation { + if x != nil { + return x.Get + } + return nil +} + +func (x *PathItem) GetPut() *Operation { + if x != nil { + return x.Put + } + return nil +} + +func (x *PathItem) GetPost() *Operation { + if x != nil { + return x.Post + } + return nil +} + +func (x *PathItem) GetDelete() *Operation { + if x != nil { + return x.Delete + } + return nil +} + +func (x *PathItem) GetOptions() *Operation { + if x != nil { + return x.Options + } + return nil +} + +func (x *PathItem) GetHead() *Operation { + if x != nil { + return x.Head + } + return nil +} + +func (x *PathItem) GetPatch() *Operation { + if x != nil { + return x.Patch + } + return nil +} + +func (x *PathItem) GetParameters() []*ParametersItem { + if x != nil { + return x.Parameters + } + return nil +} + +func (x *PathItem) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PathParameterSubSchema) Reset() { + *x = PathParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PathParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PathParameterSubSchema) ProtoMessage() {} + +func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} +} + +func (x *PathParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *PathParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *PathParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *PathParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *PathParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PathParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *PathParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *PathParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *PathParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *PathParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *PathParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *PathParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *PathParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *PathParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *PathParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *PathParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *PathParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *PathParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` +} + +func (x *Paths) Reset() { + *x = Paths{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Paths) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Paths) ProtoMessage() {} + +func (x *Paths) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Paths.ProtoReflect.Descriptor instead. +func (*Paths) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} +} + +func (x *Paths) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +func (x *Paths) GetPath() []*NamedPathItem { + if x != nil { + return x.Path + } + return nil +} + +type PrimitivesItems struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *PrimitivesItems) Reset() { + *x = PrimitivesItems{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PrimitivesItems) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PrimitivesItems) ProtoMessage() {} + +func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. +func (*PrimitivesItems) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} +} + +func (x *PrimitivesItems) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *PrimitivesItems) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *PrimitivesItems) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *PrimitivesItems) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *PrimitivesItems) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *PrimitivesItems) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *PrimitivesItems) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *PrimitivesItems) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *PrimitivesItems) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *PrimitivesItems) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *PrimitivesItems) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *PrimitivesItems) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *PrimitivesItems) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *PrimitivesItems) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *PrimitivesItems) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *PrimitivesItems) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *PrimitivesItems) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Properties struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *Properties) Reset() { + *x = Properties{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Properties) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Properties) ProtoMessage() {} + +func (x *Properties) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Properties.ProtoReflect.Descriptor instead. +func (*Properties) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} +} + +func (x *Properties) GetAdditionalProperties() []*NamedSchema { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *QueryParameterSubSchema) Reset() { + *x = QueryParameterSubSchema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *QueryParameterSubSchema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*QueryParameterSubSchema) ProtoMessage() {} + +func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} +} + +func (x *QueryParameterSubSchema) GetRequired() bool { + if x != nil { + return x.Required + } + return false +} + +func (x *QueryParameterSubSchema) GetIn() string { + if x != nil { + return x.In + } + return "" +} + +func (x *QueryParameterSubSchema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *QueryParameterSubSchema) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if x != nil { + return x.AllowEmptyValue + } + return false +} + +func (x *QueryParameterSubSchema) GetType() string { + if x != nil { + return x.Type + } + return "" +} + +func (x *QueryParameterSubSchema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if x != nil { + return x.Items + } + return nil +} + +func (x *QueryParameterSubSchema) GetCollectionFormat() string { + if x != nil { + return x.CollectionFormat + } + return "" +} + +func (x *QueryParameterSubSchema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *QueryParameterSubSchema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *QueryParameterSubSchema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *QueryParameterSubSchema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *QueryParameterSubSchema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *QueryParameterSubSchema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *QueryParameterSubSchema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *QueryParameterSubSchema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *QueryParameterSubSchema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *QueryParameterSubSchema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *QueryParameterSubSchema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type Response struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Response) Reset() { + *x = Response{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Response) ProtoMessage() {} + +func (x *Response) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Response.ProtoReflect.Descriptor instead. +func (*Response) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} +} + +func (x *Response) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Response) GetSchema() *SchemaItem { + if x != nil { + return x.Schema + } + return nil +} + +func (x *Response) GetHeaders() *Headers { + if x != nil { + return x.Headers + } + return nil +} + +func (x *Response) GetExamples() *Examples { + if x != nil { + return x.Examples + } + return nil +} + +func (x *Response) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// One or more JSON representations for responses +type ResponseDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *ResponseDefinitions) Reset() { + *x = ResponseDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseDefinitions) ProtoMessage() {} + +func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} +} + +func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (x *ResponseValue) Reset() { + *x = ResponseValue{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResponseValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResponseValue) ProtoMessage() {} + +func (x *ResponseValue) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. +func (*ResponseValue) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} +} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *ResponseValue) GetResponse() *Response { + if x, ok := x.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (x *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` +} + +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} + +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Responses) Reset() { + *x = Responses{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Responses) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Responses) ProtoMessage() {} + +func (x *Responses) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Responses.ProtoReflect.Descriptor instead. +func (*Responses) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} +} + +func (x *Responses) GetResponseCode() []*NamedResponseValue { + if x != nil { + return x.ResponseCode + } + return nil +} + +func (x *Responses) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Schema) Reset() { + *x = Schema{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Schema) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Schema) ProtoMessage() {} + +func (x *Schema) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Schema.ProtoReflect.Descriptor instead. +func (*Schema) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} +} + +func (x *Schema) GetXRef() string { + if x != nil { + return x.XRef + } + return "" +} + +func (x *Schema) GetFormat() string { + if x != nil { + return x.Format + } + return "" +} + +func (x *Schema) GetTitle() string { + if x != nil { + return x.Title + } + return "" +} + +func (x *Schema) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Schema) GetDefault() *Any { + if x != nil { + return x.Default + } + return nil +} + +func (x *Schema) GetMultipleOf() float64 { + if x != nil { + return x.MultipleOf + } + return 0 +} + +func (x *Schema) GetMaximum() float64 { + if x != nil { + return x.Maximum + } + return 0 +} + +func (x *Schema) GetExclusiveMaximum() bool { + if x != nil { + return x.ExclusiveMaximum + } + return false +} + +func (x *Schema) GetMinimum() float64 { + if x != nil { + return x.Minimum + } + return 0 +} + +func (x *Schema) GetExclusiveMinimum() bool { + if x != nil { + return x.ExclusiveMinimum + } + return false +} + +func (x *Schema) GetMaxLength() int64 { + if x != nil { + return x.MaxLength + } + return 0 +} + +func (x *Schema) GetMinLength() int64 { + if x != nil { + return x.MinLength + } + return 0 +} + +func (x *Schema) GetPattern() string { + if x != nil { + return x.Pattern + } + return "" +} + +func (x *Schema) GetMaxItems() int64 { + if x != nil { + return x.MaxItems + } + return 0 +} + +func (x *Schema) GetMinItems() int64 { + if x != nil { + return x.MinItems + } + return 0 +} + +func (x *Schema) GetUniqueItems() bool { + if x != nil { + return x.UniqueItems + } + return false +} + +func (x *Schema) GetMaxProperties() int64 { + if x != nil { + return x.MaxProperties + } + return 0 +} + +func (x *Schema) GetMinProperties() int64 { + if x != nil { + return x.MinProperties + } + return 0 +} + +func (x *Schema) GetRequired() []string { + if x != nil { + return x.Required + } + return nil +} + +func (x *Schema) GetEnum() []*Any { + if x != nil { + return x.Enum + } + return nil +} + +func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +func (x *Schema) GetType() *TypeItem { + if x != nil { + return x.Type + } + return nil +} + +func (x *Schema) GetItems() *ItemsItem { + if x != nil { + return x.Items + } + return nil +} + +func (x *Schema) GetAllOf() []*Schema { + if x != nil { + return x.AllOf + } + return nil +} + +func (x *Schema) GetProperties() *Properties { + if x != nil { + return x.Properties + } + return nil +} + +func (x *Schema) GetDiscriminator() string { + if x != nil { + return x.Discriminator + } + return "" +} + +func (x *Schema) GetReadOnly() bool { + if x != nil { + return x.ReadOnly + } + return false +} + +func (x *Schema) GetXml() *Xml { + if x != nil { + return x.Xml + } + return nil +} + +func (x *Schema) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Schema) GetExample() *Any { + if x != nil { + return x.Example + } + return nil +} + +func (x *Schema) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type SchemaItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SchemaItem) Reset() { + *x = SchemaItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SchemaItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SchemaItem) ProtoMessage() {} + +func (x *SchemaItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. +func (*SchemaItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} +} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SchemaItem) GetSchema() *Schema { + if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (x *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` +} + +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} + +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +type SecurityDefinitions struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityDefinitions) Reset() { + *x = SecurityDefinitions{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitions) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitions) ProtoMessage() {} + +func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} +} + +func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Types that are assignable to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (x *SecurityDefinitionsItem) Reset() { + *x = SecurityDefinitionsItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityDefinitionsItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityDefinitionsItem) ProtoMessage() {} + +func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} +} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` +} + +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +type SecurityRequirement struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *SecurityRequirement) Reset() { + *x = SecurityRequirement{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SecurityRequirement) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SecurityRequirement) ProtoMessage() {} + +func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. +func (*SecurityRequirement) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} +} + +func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type StringArray struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *StringArray) Reset() { + *x = StringArray{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringArray) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringArray) ProtoMessage() {} + +func (x *StringArray) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. +func (*StringArray) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} +} + +func (x *StringArray) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +type Tag struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Tag) Reset() { + *x = Tag{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Tag) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Tag) ProtoMessage() {} + +func (x *Tag) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Tag.ProtoReflect.Descriptor instead. +func (*Tag) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} +} + +func (x *Tag) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Tag) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *Tag) GetExternalDocs() *ExternalDocs { + if x != nil { + return x.ExternalDocs + } + return nil +} + +func (x *Tag) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +type TypeItem struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` +} + +func (x *TypeItem) Reset() { + *x = TypeItem{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TypeItem) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TypeItem) ProtoMessage() {} + +func (x *TypeItem) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. +func (*TypeItem) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} +} + +func (x *TypeItem) GetValue() []string { + if x != nil { + return x.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` +} + +func (x *VendorExtension) Reset() { + *x = VendorExtension{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *VendorExtension) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*VendorExtension) ProtoMessage() {} + +func (x *VendorExtension) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. +func (*VendorExtension) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} +} + +func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { + if x != nil { + return x.AdditionalProperties + } + return nil +} + +type Xml struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` +} + +func (x *Xml) Reset() { + *x = Xml{} + if protoimpl.UnsafeEnabled { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Xml) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Xml) ProtoMessage() {} + +func (x *Xml) ProtoReflect() protoreflect.Message { + mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Xml.ProtoReflect.Descriptor instead. +func (*Xml) Descriptor() ([]byte, []int) { + return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} +} + +func (x *Xml) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Xml) GetNamespace() string { + if x != nil { + return x.Namespace + } + return "" +} + +func (x *Xml) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *Xml) GetAttribute() bool { + if x != nil { + return x.Attribute + } + return false +} + +func (x *Xml) GetWrapped() bool { + if x != nil { + return x.Wrapped + } + return false +} + +func (x *Xml) GetVendorExtension() []*NamedAny { + if x != nil { + return x.VendorExtension + } + return nil +} + +var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor + +var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, + 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, + 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, + 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, + 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, + 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, + 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, + 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, + 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, + 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, + 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, + 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, + 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, + 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, + 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, + 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, + 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, + 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, + 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, + 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, + 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, + 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, + 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, + 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, + 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, + 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, + 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, + 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, + 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, + 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, + 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, + 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, + 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, + 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, + 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, + 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, + 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, + 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, + 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, + 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, + 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, + 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, + 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, + 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, + 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, + 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, + 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, + 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, + 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, + 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, + 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, + 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, + 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, + 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, + 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, + 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, + 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, + 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, + 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, + 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, + 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, + 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, + 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, + 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, + 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, + 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, + 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, + 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, + 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, + 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, + 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, + 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, + 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, + 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, + 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, + 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, + 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, + 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, + 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, + 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, + 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, + 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, + 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, + 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, + 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, + 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, + 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, + 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, + 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, + 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, + 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, + 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, + 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, + 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, + 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, + 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, + 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, + 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, + 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, + 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, + 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, + 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, + 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, + 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, + 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, + 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, + 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, + 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, + 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, + 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, + 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, + 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, + 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, + 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, + 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, + 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, + 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, + 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, + 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, + 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, + 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, + 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, + 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, + 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, + 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, + 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, + 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, + 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, + 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, + 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, + 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, + 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, + 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, + 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, + 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, + 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, + 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, + 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, + 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, + 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, + 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, + 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, + 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, + 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, + 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, + 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, + 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, + 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, + 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, + 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, + 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, + 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, + 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, + 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, + 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, + 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, + 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, + 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, + 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, + 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, + 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, + 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, + 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, + 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, + 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, + 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, + 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, + 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, + 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, + 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, + 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, + 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, + 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, + 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, + 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, + 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, + 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, + 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, + 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, + 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, + 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, + 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, + 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, + 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, + 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, + 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, + 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, + 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, + 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, + 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, + 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, + 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, + 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, + 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, + 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, + 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, + 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, + 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, + 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, + 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, + 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, + 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, + 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, + 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, + 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, + 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, + 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, + 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, + 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, + 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, + 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, + 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, + 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, + 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, + 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, + 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, + 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, + 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, + 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, + 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, + 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, + 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, + 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, + 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, + 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, + 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, + 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, + 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, + 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, + 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, + 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, + 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, + 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, + 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, + 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, + 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, + 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, + 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, + 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, + 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, + 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, + 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, + 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, + 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, + 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, + 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, + 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, + 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, + 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, + 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, + 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, + 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, + 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, + 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, + 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, + 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, + 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, + 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, + 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, + 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, + 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, + 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, + 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, + 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, + 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, + 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, + 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, + 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, + 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, + 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, + 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, + 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, + 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, + 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, + 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, + 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, + 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, + 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, + 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, + 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, + 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, + 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, + 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, + 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, + 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, + 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, + 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, + 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, + 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, + 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, + 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, + 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, + 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, + 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, + 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, + 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, + 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, + 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, + 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, + 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, + 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, + 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, + 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, + 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, + 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, + 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, + 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, + 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, + 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, + 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, + 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, + 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, + 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3c, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, + 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x4f, + 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once + file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc +) + +func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { + file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { + file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) + }) + return file_openapiv2_OpenAPIv2_proto_rawDescData +} + +var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) +var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ + (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem + (*Any)(nil), // 1: openapi.v2.Any + (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity + (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity + (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter + (*Contact)(nil), // 5: openapi.v2.Contact + (*Default)(nil), // 6: openapi.v2.Default + (*Definitions)(nil), // 7: openapi.v2.Definitions + (*Document)(nil), // 8: openapi.v2.Document + (*Examples)(nil), // 9: openapi.v2.Examples + (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs + (*FileSchema)(nil), // 11: openapi.v2.FileSchema + (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema + (*Header)(nil), // 13: openapi.v2.Header + (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema + (*Headers)(nil), // 15: openapi.v2.Headers + (*Info)(nil), // 16: openapi.v2.Info + (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem + (*JsonReference)(nil), // 18: openapi.v2.JsonReference + (*License)(nil), // 19: openapi.v2.License + (*NamedAny)(nil), // 20: openapi.v2.NamedAny + (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader + (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter + (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem + (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse + (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue + (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema + (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem + (*NamedString)(nil), // 28: openapi.v2.NamedString + (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray + (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter + (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity + (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity + (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity + (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity + (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes + (*Operation)(nil), // 36: openapi.v2.Operation + (*Parameter)(nil), // 37: openapi.v2.Parameter + (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions + (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem + (*PathItem)(nil), // 40: openapi.v2.PathItem + (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema + (*Paths)(nil), // 42: openapi.v2.Paths + (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems + (*Properties)(nil), // 44: openapi.v2.Properties + (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema + (*Response)(nil), // 46: openapi.v2.Response + (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions + (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue + (*Responses)(nil), // 49: openapi.v2.Responses + (*Schema)(nil), // 50: openapi.v2.Schema + (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem + (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions + (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem + (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement + (*StringArray)(nil), // 55: openapi.v2.StringArray + (*Tag)(nil), // 56: openapi.v2.Tag + (*TypeItem)(nil), // 57: openapi.v2.TypeItem + (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension + (*Xml)(nil), // 59: openapi.v2.Xml + (*any.Any)(nil), // 60: google.protobuf.Any +} +var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ + 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema + 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any + 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema + 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny + 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema + 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info + 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths + 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions + 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions + 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions + 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement + 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions + 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag + 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any + 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any + 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems + 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any + 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any + 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader + 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact + 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License + 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema + 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any + 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header + 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter + 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem + 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response + 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue + 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema + 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem + 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray + 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema + 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema + 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema + 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema + 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes + 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny + 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString + 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs + 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem + 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses + 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement + 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny + 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter + 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter + 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter + 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter + 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference + 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation + 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation + 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation + 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation + 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation + 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation + 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation + 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem + 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny + 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny + 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem + 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems + 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any + 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any + 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny + 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema + 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems + 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any + 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any + 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny + 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem + 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers + 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples + 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny + 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse + 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response + 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference + 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue + 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny + 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any + 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any + 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem + 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem + 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem + 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema + 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties + 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml + 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs + 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any + 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny + 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema + 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema + 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem + 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity + 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity + 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity + 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity + 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity + 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity + 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray + 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs + 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny + 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny + 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny + 133, // [133:133] is the sub-list for method output_type + 133, // [133:133] is the sub-list for method input_type + 133, // [133:133] is the sub-list for extension type_name + 133, // [133:133] is the sub-list for extension extendee + 0, // [0:133] is the sub-list for field type_name +} + +func init() { file_openapiv2_OpenAPIv2_proto_init() } +func file_openapiv2_OpenAPIv2_proto_init() { + if File_openapiv2_OpenAPIv2_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AdditionalPropertiesItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ApiKeySecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BasicAuthenticationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*BodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Contact); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Default); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Definitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Document); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Examples); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExternalDocs); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FileSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*FormDataParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Header); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HeaderParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Headers); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Info); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ItemsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*JsonReference); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*License); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedAny); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedHeader); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedPathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedSecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedString); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NamedStringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NonBodyParameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2AccessCodeSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ApplicationSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2ImplicitSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2PasswordSecurity); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Oauth2Scopes); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Operation); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Parameter); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParameterDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ParametersItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PathParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Paths); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PrimitivesItems); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Properties); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*QueryParameterSubSchema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Response); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResponseValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Responses); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Schema); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SchemaItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitions); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityDefinitionsItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SecurityRequirement); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringArray); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Tag); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TypeItem); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*VendorExtension); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Xml); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } + file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, + NumEnums: 0, + NumMessages: 60, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, + DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, + MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, + }.Build() + File_openapiv2_OpenAPIv2_proto = out.File + file_openapiv2_OpenAPIv2_proto_rawDesc = nil + file_openapiv2_OpenAPIv2_proto_goTypes = nil + file_openapiv2_OpenAPIv2_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 0000000..00ac1b0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,666 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +// The Go package name. +option go_package = "openapiv2;openapi_v2"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for responses +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 0000000..5276128 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,14 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model and related code for +supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol +Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into +the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler +generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer +compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go new file mode 100644 index 0000000..ddeed5c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go @@ -0,0 +1,26 @@ +// Copyright 2020 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapi_v2 + +import "github.com/googleapis/gnostic/compiler" + +// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. +func ParseDocument(b []byte) (*Document, error) { + info, err := compiler.ReadInfoFromBytes("", b) + if err != nil { + return nil, err + } + return NewDocument(info.Content[0], compiler.NewContextWithExtensions("$root", nil, nil)) +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json new file mode 100644 index 0000000..afa12b7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for responses" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 0000000..ee9783d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,4 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic +extensions. diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 0000000..1250a6c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 0000000..7db2399 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/googleapis/gnostic/compiler/extensions.go new file mode 100644 index 0000000..20848a0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extensions.go @@ -0,0 +1,85 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + "strings" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + extensions "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v3" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// CallExtension calls a binary extension handler. +func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { + if context == nil || context.ExtensionHandlers == nil { + return false, nil, nil + } + handled = false + for _, handler := range *(context.ExtensionHandlers) { + response, err = handler.handle(in, extensionName) + if response == nil { + continue + } else { + handled = true + break + } + } + return handled, response, err +} + +func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + yamlData, _ := yaml.Marshal(in) + request := &extensions.ExtensionHandlerRequest{ + CompilerVersion: &extensions.Version{ + Major: 0, + Minor: 1, + Patch: 0, + }, + Wrapper: &extensions.Wrapper{ + Version: "unknown", // TODO: set this to the type/version of spec being parsed. + Yaml: string(yamlData), + ExtensionName: extensionName, + }, + } + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + if err != nil { + return nil, err + } + response := &extensions.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil || !response.Handled { + return nil, err + } + if len(response.Errors) != 0 { + return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 0000000..a580f40 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,385 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "regexp" + "sort" + "strconv" + + "github.com/googleapis/gnostic/jsonschema" + "gopkg.in/yaml.v3" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a *yaml.Node if possible. +func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { + if in == nil { + return nil, false + } + return in, true +} + +// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. +func SortedKeysForMap(m *yaml.Node) []string { + keys := make([]string, 0) + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + keys = append(keys, m.Content[i].Value) + } + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. +func MapHasKey(m *yaml.Node, key string) bool { + if m == nil { + return false + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return true + } + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m *yaml.Node, key string) *yaml.Node { + if m == nil { + return nil + } + if m.Kind == yaml.MappingNode { + for i := 0; i < len(m.Content); i += 2 { + itemKey := m.Content[i].Value + if key == itemKey { + return m.Content[i+1] + } + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// SequenceNodeForNode returns a node if it is a SequenceNode. +func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { + if node.Kind != yaml.SequenceNode { + return nil, false + } + return node, true +} + +// BoolForScalarNode returns the bool value of a node. +func BoolForScalarNode(node *yaml.Node) (bool, bool) { + if node == nil { + return false, false + } + if node.Kind == yaml.DocumentNode { + return BoolForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return false, false + } + if node.Tag != "!!bool" { + return false, false + } + v, err := strconv.ParseBool(node.Value) + if err != nil { + return false, false + } + return v, true +} + +// IntForScalarNode returns the integer value of a node. +func IntForScalarNode(node *yaml.Node) (int64, bool) { + if node == nil { + return 0, false + } + if node.Kind == yaml.DocumentNode { + return IntForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0, false + } + if node.Tag != "!!int" { + return 0, false + } + v, err := strconv.ParseInt(node.Value, 10, 64) + if err != nil { + return 0, false + } + return v, true +} + +// FloatForScalarNode returns the float value of a node. +func FloatForScalarNode(node *yaml.Node) (float64, bool) { + if node == nil { + return 0.0, false + } + if node.Kind == yaml.DocumentNode { + return FloatForScalarNode(node.Content[0]) + } + if node.Kind != yaml.ScalarNode { + return 0.0, false + } + if (node.Tag != "!!int") && (node.Tag != "!!float") { + return 0.0, false + } + v, err := strconv.ParseFloat(node.Value, 64) + if err != nil { + return 0.0, false + } + return v, true +} + +// StringForScalarNode returns the string value of a node. +func StringForScalarNode(node *yaml.Node) (string, bool) { + if node == nil { + return "", false + } + if node.Kind == yaml.DocumentNode { + return StringForScalarNode(node.Content[0]) + } + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!int": + return node.Value, true + case "!!str": + return node.Value, true + case "!!null": + return "", true + default: + return "", false + } + default: + return "", false + } +} + +// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. +func StringArrayForSequenceNode(node *yaml.Node) []string { + stringArray := make([]string, 0) + for _, item := range node.Content { + v, ok := StringForScalarNode(item) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + if m == nil || m.Kind != yaml.MappingNode { + return invalidKeys + } + for i := 0; i < len(m.Content); i += 2 { + key := m.Content[i].Value + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + return invalidKeys +} + +// NewMappingNode creates a new Mapping node. +func NewMappingNode() *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: make([]*yaml.Node, 0), + } +} + +// NewSequenceNode creates a new Sequence node. +func NewSequenceNode() *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + return node +} + +// NewScalarNodeForString creates a new node to hold a string. +func NewScalarNodeForString(s string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: s, + } +} + +// NewSequenceNodeForStringArray creates a new node to hold an array of strings. +func NewSequenceNodeForStringArray(strings []string) *yaml.Node { + node := &yaml.Node{ + Kind: yaml.SequenceNode, + Content: make([]*yaml.Node, 0), + } + for _, s := range strings { + node.Content = append(node.Content, NewScalarNodeForString(s)) + } + return node +} + +// NewScalarNodeForBool creates a new node to hold a bool. +func NewScalarNodeForBool(b bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", b), + } +} + +// NewScalarNodeForFloat creates a new node to hold a float. +func NewScalarNodeForFloat(f float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%g", f), + } +} + +// NewScalarNodeForInt creates a new node to hold an integer. +func NewScalarNodeForInt(i int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", i), + } +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} + +// Description returns a human-readable represention of an item. +func Description(item interface{}) string { + value, ok := item.(*yaml.Node) + if ok { + return jsonschema.Render(value) + } + return fmt.Sprintf("%+v", item) +} + +// Display returns a description of a node for use in error messages. +func Display(node *yaml.Node) string { + switch node.Kind { + case yaml.ScalarNode: + switch node.Tag { + case "!!str": + return fmt.Sprintf("%s (string)", node.Value) + } + } + return fmt.Sprintf("%+v (%T)", node, node) +} + +// Marshal creates a yaml version of a structure in our preferred style +func Marshal(in *yaml.Node) []byte { + clearStyle(in) + //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) + bytes, _ := yaml.Marshal(in) + + return bytes +} + +func clearStyle(node *yaml.Node) { + node.Style = 0 + for _, c := range node.Content { + clearStyle(c) + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 0000000..ce9fcc4 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 0000000..be0e8b4 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,307 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" + "sync" + + yaml "gopkg.in/yaml.v3" +) + +var verboseReader = false + +var fileCache map[string][]byte +var infoCache map[string]*yaml.Node + +var fileCacheEnable = true +var infoCacheEnable = true + +// These locks are used to synchronize accesses to the fileCache and infoCache +// maps (above). They are global state and can throw thread-related errors +// when modified from separate goroutines. The general strategy is to protect +// all public functions in this file with mutex Lock() calls. As a result, to +// avoid deadlock, these public functions should not call other public +// functions, so some public functions have private equivalents. +// In the future, we might consider replacing the maps with sync.Map and +// eliminating these mutexes. +var fileCacheMutex sync.Mutex +var infoCacheMutex sync.Mutex + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]*yaml.Node, 0) + } +} + +// EnableFileCache turns on file caching. +func EnableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = true +} + +// EnableInfoCache turns on parsed info caching. +func EnableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = true +} + +// DisableFileCache turns off file caching. +func DisableFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCacheEnable = false +} + +// DisableInfoCache turns off parsed info caching. +func DisableInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCacheEnable = false +} + +// RemoveFromFileCache removes an entry from the file cache. +func RemoveFromFileCache(fileurl string) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + if !fileCacheEnable { + return + } + initializeFileCache() + delete(fileCache, fileurl) +} + +// RemoveFromInfoCache removes an entry from the info cache. +func RemoveFromInfoCache(filename string) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + if !infoCacheEnable { + return + } + initializeInfoCache() + delete(infoCache, filename) +} + +// GetInfoCache returns the info cache map. +func GetInfoCache() map[string]*yaml.Node { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + if infoCache == nil { + initializeInfoCache() + } + return infoCache +} + +// ClearFileCache clears the file cache. +func ClearFileCache() { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + fileCache = make(map[string][]byte, 0) +} + +// ClearInfoCache clears the info cache. +func ClearInfoCache() { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + infoCache = make(map[string]*yaml.Node) +} + +// ClearCaches clears all caches. +func ClearCaches() { + ClearFileCache() + ClearInfoCache() +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return fetchFile(fileurl) +} + +func fetchFile(fileurl string) ([]byte, error) { + var bytes []byte + initializeFileCache() + if fileCacheEnable { + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + defer response.Body.Close() + if response.StatusCode != 200 { + return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) + } + bytes, err = ioutil.ReadAll(response.Body) + if fileCacheEnable && err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + return readBytesForFile(filename) +} + +func readBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := fetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a *yaml.Node. +func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + return readInfoFromBytes(filename, bytes) +} + +func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { + initializeInfoCache() + if infoCacheEnable { + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + } + var info yaml.Node + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + if infoCacheEnable && len(filename) > 0 { + infoCache[filename] = &info + } + return &info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { + fileCacheMutex.Lock() + defer fileCacheMutex.Unlock() + infoCacheMutex.Lock() + defer infoCacheMutex.Unlock() + initializeInfoCache() + if infoCacheEnable { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + } + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = parts[0] + if _, err := url.ParseRequestURI(parts[0]); err != nil { + // It is not an URL, so the file is local + filename = basedir + parts[0] + } + } else { + filename = basefile + } + bytes, err := readBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := readInfoFromBytes(filename, bytes) + if info != nil && info.Kind == yaml.DocumentNode { + info = info.Content[0] + } + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if info == nil { + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m := info + if true { + found := false + for i := 0; i < len(m.Content); i += 2 { + if m.Content[i].Value == key { + info = m.Content[i+1] + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + if infoCacheEnable { + infoCache[ref] = info + } + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 0000000..4b5d63e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,13 @@ +# Extensions + +**Extension Support is experimental.** + +This directory contains support code for building Gnostic extensio handlers and +associated examples. + +Extension handlers can be used to compile vendor or specification extensions +into protocol buffer structures. + +Like plugins, extension handlers are built as separate executables. Extension +bodies are written to extension handlers as serialized +ExtensionHandlerRequests. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 0000000..4198fa1 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,465 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.12.3 +// source: extensions/extension.proto + +package gnostic_extension_v1 + +import ( + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// The version number of Gnostic. +type Version struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` +} + +func (x *Version) Reset() { + *x = Version{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Version) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Version) ProtoMessage() {} + +func (x *Version) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Version.ProtoReflect.Descriptor instead. +func (*Version) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{0} +} + +func (x *Version) GetMajor() int32 { + if x != nil { + return x.Major + } + return 0 +} + +func (x *Version) GetMinor() int32 { + if x != nil { + return x.Minor + } + return 0 +} + +func (x *Version) GetPatch() int32 { + if x != nil { + return x.Patch + } + return 0 +} + +func (x *Version) GetSuffix() string { + if x != nil { + return x.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The extension to process. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` + // The version number of Gnostic. + CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` +} + +func (x *ExtensionHandlerRequest) Reset() { + *x = ExtensionHandlerRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionHandlerRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionHandlerRequest) ProtoMessage() {} + +func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{1} +} + +func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if x != nil { + return x.Wrapper + } + return nil +} + +func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if x != nil { + return x.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` + // Error message(s). If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` + // text output + Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *ExtensionHandlerResponse) Reset() { + *x = ExtensionHandlerResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExtensionHandlerResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExtensionHandlerResponse) ProtoMessage() {} + +func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{2} +} + +func (x *ExtensionHandlerResponse) GetHandled() bool { + if x != nil { + return x.Handled + } + return false +} + +func (x *ExtensionHandlerResponse) GetErrors() []string { + if x != nil { + return x.Errors + } + return nil +} + +func (x *ExtensionHandlerResponse) GetValue() *any.Any { + if x != nil { + return x.Value + } + return nil +} + +type Wrapper struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` + // Name of the extension. + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` + // YAML-formatted extension value. + Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` +} + +func (x *Wrapper) Reset() { + *x = Wrapper{} + if protoimpl.UnsafeEnabled { + mi := &file_extensions_extension_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Wrapper) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Wrapper) ProtoMessage() {} + +func (x *Wrapper) ProtoReflect() protoreflect.Message { + mi := &file_extensions_extension_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. +func (*Wrapper) Descriptor() ([]byte, []int) { + return file_extensions_extension_proto_rawDescGZIP(), []int{3} +} + +func (x *Wrapper) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *Wrapper) GetExtensionName() string { + if x != nil { + return x.ExtensionName + } + return "" +} + +func (x *Wrapper) GetYaml() string { + if x != nil { + return x.Yaml + } + return "" +} + +var File_extensions_extension_proto protoreflect.FileDescriptor + +var file_extensions_extension_proto_rawDesc = []byte{ + 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, + 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, + 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, + 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, + 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, + 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, + 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, + 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, + 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, + 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, + 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, + 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, + 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, + 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, + 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, + 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, + 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, + 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f, + 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, + 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, + 0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, + 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_extensions_extension_proto_rawDescOnce sync.Once + file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc +) + +func file_extensions_extension_proto_rawDescGZIP() []byte { + file_extensions_extension_proto_rawDescOnce.Do(func() { + file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) + }) + return file_extensions_extension_proto_rawDescData +} + +var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_extensions_extension_proto_goTypes = []interface{}{ + (*Version)(nil), // 0: gnostic.extension.v1.Version + (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest + (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse + (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper + (*any.Any)(nil), // 4: google.protobuf.Any +} +var file_extensions_extension_proto_depIdxs = []int32{ + 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper + 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version + 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any + 3, // [3:3] is the sub-list for method output_type + 3, // [3:3] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_extensions_extension_proto_init() } +func file_extensions_extension_proto_init() { + if File_extensions_extension_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Version); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExtensionHandlerResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Wrapper); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_extensions_extension_proto_rawDesc, + NumEnums: 0, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_extensions_extension_proto_goTypes, + DependencyIndexes: file_extensions_extension_proto_depIdxs, + MessageInfos: file_extensions_extension_proto_msgTypes, + }.Build() + File_extensions_extension_proto = out.File + file_extensions_extension_proto_rawDesc = nil + file_extensions_extension_proto_goTypes = nil + file_extensions_extension_proto_depIdxs = nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 0000000..8ac1faf --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,96 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package gnostic.extension.v1; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "GnosticExtension"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.gnostic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "GNX"; // "Gnostic Extension" + +// The Go package name. +option go_package = "extensions;gnostic_extension_v1"; + +// The version number of Gnostic. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The extension to process. + Wrapper wrapper = 1; + + // The version number of Gnostic. + Version compiler_version = 2; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message(s). If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string errors = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension. + string extension_name = 2; + + // YAML-formatted extension value. + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 0000000..ec8afd0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,64 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package gnostic_extension_v1 + +import ( + "io/ioutil" + "log" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +// Main implements the main program of an extension handler. +func Main(handler extensionHandler) { + // unpack the request + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + log.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + log.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + log.Println("Input error:", err.Error()) + os.Exit(1) + } + // call the handler + handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) + // respond with the output of the handler + response := &ExtensionHandlerResponse{ + Handled: false, // default assumption + Errors: make([]string, 0), + } + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } else if handled { + response.Handled = true + response.Value, err = ptypes.MarshalAny(output) + if err != nil { + response.Errors = append(response.Errors, err.Error()) + } + } + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/googleapis/gnostic/jsonschema/README.md new file mode 100644 index 0000000..6793c51 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/README.md @@ -0,0 +1,4 @@ +# jsonschema + +This directory contains code for reading, writing, and manipulating JSON +schemas. diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/googleapis/gnostic/jsonschema/base.go new file mode 100644 index 0000000..0af8b14 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/base.go @@ -0,0 +1,84 @@ + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package jsonschema + +import ( + "encoding/base64" +) + +func baseSchemaBytes() ([]byte, error){ + return base64.StdEncoding.DecodeString( +`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi +JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl +c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK +ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg +ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp +bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp +dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl +ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK +ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v +bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg +ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki +LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p +bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s +CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog +ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg +ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog +ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg +ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog +ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 +IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog +ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 +ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl +ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw +ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg +ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg +ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg +ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg +IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 +aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg +ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg +ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg +ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK +ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi +ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP +ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy +ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg +ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv +ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi +OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl +SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs +dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k +ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk +cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl +cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh +ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg +ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg +ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk +ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk +ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 +IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi +b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 +LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl +cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv +bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog +ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq +ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg +ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg +ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg +ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg +ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu +aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh +bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl +cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs +CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs +ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg +ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg +ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy +YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 +IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg +fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 +IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 +c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/googleapis/gnostic/jsonschema/display.go new file mode 100644 index 0000000..028a760 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/display.go @@ -0,0 +1,229 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "strings" +) + +// +// DISPLAY +// The following methods display Schemas. +// + +// Description returns a string representation of a string or string array. +func (s *StringOrStringArray) Description() string { + if s.String != nil { + return *s.String + } + if s.StringArray != nil { + return strings.Join(*s.StringArray, ", ") + } + return "" +} + +// Returns a string representation of a Schema. +func (schema *Schema) String() string { + return schema.describeSchema("") +} + +// Helper: Returns a string representation of a Schema indented by a specified string. +func (schema *Schema) describeSchema(indent string) string { + result := "" + if schema.Schema != nil { + result += indent + "$schema: " + *(schema.Schema) + "\n" + } + if schema.ID != nil { + result += indent + "id: " + *(schema.ID) + "\n" + } + if schema.MultipleOf != nil { + result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) + } + if schema.Maximum != nil { + result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) + } + if schema.ExclusiveMaximum != nil { + result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) + } + if schema.ExclusiveMinimum != nil { + result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) + } + if schema.MinLength != nil { + result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) + } + if schema.Pattern != nil { + result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) + } + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + result += indent + "additionalItems:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalItems.Boolean) + result += indent + fmt.Sprintf("additionalItems: %+v\n", b) + } + } + if schema.Items != nil { + result += indent + "items:\n" + items := schema.Items + if items.SchemaArray != nil { + for i, s := range *(items.SchemaArray) { + result += indent + " " + fmt.Sprintf("%d", i) + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } else if items.Schema != nil { + result += items.Schema.describeSchema(indent + " " + " ") + } + } + if schema.MaxItems != nil { + result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) + } + if schema.MinItems != nil { + result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) + } + if schema.UniqueItems != nil { + result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) + } + if schema.MaxProperties != nil { + result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) + } + if schema.MinProperties != nil { + result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) + } + if schema.Required != nil { + result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) + } + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + result += indent + "additionalProperties:\n" + result += s.describeSchema(indent + " ") + } else { + b := *(schema.AdditionalProperties.Boolean) + result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) + } + } + if schema.Properties != nil { + result += indent + "properties:\n" + for _, pair := range *(schema.Properties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.PatternProperties != nil { + result += indent + "patternProperties:\n" + for _, pair := range *(schema.PatternProperties) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Dependencies != nil { + result += indent + "dependencies:\n" + for _, pair := range *(schema.Dependencies) { + name := pair.Name + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } else { + a := schemaOrStringArray.StringArray + if a != nil { + result += indent + " " + name + ":\n" + for _, s2 := range *a { + result += indent + " " + " " + s2 + "\n" + } + } + } + + } + } + if schema.Enumeration != nil { + result += indent + "enumeration:\n" + for _, value := range *(schema.Enumeration) { + if value.String != nil { + result += indent + " " + fmt.Sprintf("%+v\n", *value.String) + } else { + result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) + } + } + } + if schema.Type != nil { + result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) + } + if schema.AllOf != nil { + result += indent + "allOf:\n" + for _, s := range *(schema.AllOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.AnyOf != nil { + result += indent + "anyOf:\n" + for _, s := range *(schema.AnyOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.OneOf != nil { + result += indent + "oneOf:\n" + for _, s := range *(schema.OneOf) { + result += s.describeSchema(indent + " ") + result += indent + "-\n" + } + } + if schema.Not != nil { + result += indent + "not:\n" + result += schema.Not.describeSchema(indent + " ") + } + if schema.Definitions != nil { + result += indent + "definitions:\n" + for _, pair := range *(schema.Definitions) { + name := pair.Name + s := pair.Value + result += indent + " " + name + ":\n" + result += s.describeSchema(indent + " " + " ") + } + } + if schema.Title != nil { + result += indent + "title: " + *(schema.Title) + "\n" + } + if schema.Description != nil { + result += indent + "description: " + *(schema.Description) + "\n" + } + if schema.Default != nil { + result += indent + "default:\n" + result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) + } + if schema.Format != nil { + result += indent + "format: " + *(schema.Format) + "\n" + } + if schema.Ref != nil { + result += indent + "$ref: " + *(schema.Ref) + "\n" + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/googleapis/gnostic/jsonschema/models.go new file mode 100644 index 0000000..4781bdc --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/models.go @@ -0,0 +1,228 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package jsonschema supports the reading, writing, and manipulation +// of JSON Schemas. +package jsonschema + +import "gopkg.in/yaml.v3" + +// The Schema struct models a JSON Schema and, because schemas are +// defined hierarchically, contains many references to itself. +// All fields are pointers and are nil if the associated values +// are not specified. +type Schema struct { + Schema *string // $schema + ID *string // id keyword used for $ref resolution scope + Ref *string // $ref, i.e. JSON Pointers + + // http://json-schema.org/latest/json-schema-validation.html + // 5.1. Validation keywords for numeric instances (number and integer) + MultipleOf *SchemaNumber + Maximum *SchemaNumber + ExclusiveMaximum *bool + Minimum *SchemaNumber + ExclusiveMinimum *bool + + // 5.2. Validation keywords for strings + MaxLength *int64 + MinLength *int64 + Pattern *string + + // 5.3. Validation keywords for arrays + AdditionalItems *SchemaOrBoolean + Items *SchemaOrSchemaArray + MaxItems *int64 + MinItems *int64 + UniqueItems *bool + + // 5.4. Validation keywords for objects + MaxProperties *int64 + MinProperties *int64 + Required *[]string + AdditionalProperties *SchemaOrBoolean + Properties *[]*NamedSchema + PatternProperties *[]*NamedSchema + Dependencies *[]*NamedSchemaOrStringArray + + // 5.5. Validation keywords for any instance type + Enumeration *[]SchemaEnumValue + Type *StringOrStringArray + AllOf *[]*Schema + AnyOf *[]*Schema + OneOf *[]*Schema + Not *Schema + Definitions *[]*NamedSchema + + // 6. Metadata keywords + Title *string + Description *string + Default *yaml.Node + + // 7. Semantic validation with "format" + Format *string +} + +// These helper structs represent "combination" types that generally can +// have values of one type or another. All are used to represent parts +// of Schemas. + +// SchemaNumber represents a value that can be either an Integer or a Float. +type SchemaNumber struct { + Integer *int64 + Float *float64 +} + +// NewSchemaNumberWithInteger creates and returns a new object +func NewSchemaNumberWithInteger(i int64) *SchemaNumber { + result := &SchemaNumber{} + result.Integer = &i + return result +} + +// NewSchemaNumberWithFloat creates and returns a new object +func NewSchemaNumberWithFloat(f float64) *SchemaNumber { + result := &SchemaNumber{} + result.Float = &f + return result +} + +// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. +type SchemaOrBoolean struct { + Schema *Schema + Boolean *bool +} + +// NewSchemaOrBooleanWithSchema creates and returns a new object +func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Schema = s + return result +} + +// NewSchemaOrBooleanWithBoolean creates and returns a new object +func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { + result := &SchemaOrBoolean{} + result.Boolean = &b + return result +} + +// StringOrStringArray represents a value that can be either +// a String or an Array of Strings. +type StringOrStringArray struct { + String *string + StringArray *[]string +} + +// NewStringOrStringArrayWithString creates and returns a new object +func NewStringOrStringArrayWithString(s string) *StringOrStringArray { + result := &StringOrStringArray{} + result.String = &s + return result +} + +// NewStringOrStringArrayWithStringArray creates and returns a new object +func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { + result := &StringOrStringArray{} + result.StringArray = &a + return result +} + +// SchemaOrStringArray represents a value that can be either +// a Schema or an Array of Strings. +type SchemaOrStringArray struct { + Schema *Schema + StringArray *[]string +} + +// SchemaOrSchemaArray represents a value that can be either +// a Schema or an Array of Schemas. +type SchemaOrSchemaArray struct { + Schema *Schema + SchemaArray *[]*Schema +} + +// NewSchemaOrSchemaArrayWithSchema creates and returns a new object +func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.Schema = s + return result +} + +// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object +func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { + result := &SchemaOrSchemaArray{} + result.SchemaArray = &a + return result +} + +// SchemaEnumValue represents a value that can be part of an +// enumeration in a Schema. +type SchemaEnumValue struct { + String *string + Bool *bool +} + +// NamedSchema is a name-value pair that is used to emulate maps +// with ordered keys. +type NamedSchema struct { + Name string + Value *Schema +} + +// NewNamedSchema creates and returns a new object +func NewNamedSchema(name string, value *Schema) *NamedSchema { + return &NamedSchema{Name: name, Value: value} +} + +// NamedSchemaOrStringArray is a name-value pair that is used +// to emulate maps with ordered keys. +type NamedSchemaOrStringArray struct { + Name string + Value *SchemaOrStringArray +} + +// Access named subschemas by name + +func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { + if array == nil { + return nil + } + for _, pair := range *array { + if pair.Name == name { + return pair.Value + } + } + return nil +} + +// PropertyWithName returns the selected element. +func (s *Schema) PropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Properties, name) +} + +// PatternPropertyWithName returns the selected element. +func (s *Schema) PatternPropertyWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.PatternProperties, name) +} + +// DefinitionWithName returns the selected element. +func (s *Schema) DefinitionWithName(name string) *Schema { + return namedSchemaArrayElementWithName(s.Definitions, name) +} + +// AddProperty adds a named property. +func (s *Schema) AddProperty(name string, property *Schema) { + *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go new file mode 100644 index 0000000..ba8dd4a --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go @@ -0,0 +1,394 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + "log" + "strings" +) + +// +// OPERATIONS +// The following methods perform operations on Schemas. +// + +// IsEmpty returns true if no members of the Schema are specified. +func (schema *Schema) IsEmpty() bool { + return (schema.Schema == nil) && + (schema.ID == nil) && + (schema.MultipleOf == nil) && + (schema.Maximum == nil) && + (schema.ExclusiveMaximum == nil) && + (schema.Minimum == nil) && + (schema.ExclusiveMinimum == nil) && + (schema.MaxLength == nil) && + (schema.MinLength == nil) && + (schema.Pattern == nil) && + (schema.AdditionalItems == nil) && + (schema.Items == nil) && + (schema.MaxItems == nil) && + (schema.MinItems == nil) && + (schema.UniqueItems == nil) && + (schema.MaxProperties == nil) && + (schema.MinProperties == nil) && + (schema.Required == nil) && + (schema.AdditionalProperties == nil) && + (schema.Properties == nil) && + (schema.PatternProperties == nil) && + (schema.Dependencies == nil) && + (schema.Enumeration == nil) && + (schema.Type == nil) && + (schema.AllOf == nil) && + (schema.AnyOf == nil) && + (schema.OneOf == nil) && + (schema.Not == nil) && + (schema.Definitions == nil) && + (schema.Title == nil) && + (schema.Description == nil) && + (schema.Default == nil) && + (schema.Format == nil) && + (schema.Ref == nil) +} + +// IsEqual returns true if two schemas are equal. +func (schema *Schema) IsEqual(schema2 *Schema) bool { + return schema.String() == schema2.String() +} + +// SchemaOperation represents a function that can be applied to a Schema. +type SchemaOperation func(schema *Schema, context string) + +// Applies a specified function to a Schema and all of the Schemas that it contains. +func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { + + if schema.AdditionalItems != nil { + s := schema.AdditionalItems.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalItems") + } + } + + if schema.Items != nil { + if schema.Items.SchemaArray != nil { + for _, s := range *(schema.Items.SchemaArray) { + s.applyToSchemas(operation, "Items.SchemaArray") + } + } else if schema.Items.Schema != nil { + schema.Items.Schema.applyToSchemas(operation, "Items.Schema") + } + } + + if schema.AdditionalProperties != nil { + s := schema.AdditionalProperties.Schema + if s != nil { + s.applyToSchemas(operation, "AdditionalProperties") + } + } + + if schema.Properties != nil { + for _, pair := range *(schema.Properties) { + s := pair.Value + s.applyToSchemas(operation, "Properties") + } + } + if schema.PatternProperties != nil { + for _, pair := range *(schema.PatternProperties) { + s := pair.Value + s.applyToSchemas(operation, "PatternProperties") + } + } + + if schema.Dependencies != nil { + for _, pair := range *(schema.Dependencies) { + schemaOrStringArray := pair.Value + s := schemaOrStringArray.Schema + if s != nil { + s.applyToSchemas(operation, "Dependencies") + } + } + } + + if schema.AllOf != nil { + for _, s := range *(schema.AllOf) { + s.applyToSchemas(operation, "AllOf") + } + } + if schema.AnyOf != nil { + for _, s := range *(schema.AnyOf) { + s.applyToSchemas(operation, "AnyOf") + } + } + if schema.OneOf != nil { + for _, s := range *(schema.OneOf) { + s.applyToSchemas(operation, "OneOf") + } + } + if schema.Not != nil { + schema.Not.applyToSchemas(operation, "Not") + } + + if schema.Definitions != nil { + for _, pair := range *(schema.Definitions) { + s := pair.Value + s.applyToSchemas(operation, "Definitions") + } + } + + operation(schema, context) +} + +// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. +func (schema *Schema) CopyProperties(source *Schema) { + if source.Schema != nil { + schema.Schema = source.Schema + } + if source.ID != nil { + schema.ID = source.ID + } + if source.MultipleOf != nil { + schema.MultipleOf = source.MultipleOf + } + if source.Maximum != nil { + schema.Maximum = source.Maximum + } + if source.ExclusiveMaximum != nil { + schema.ExclusiveMaximum = source.ExclusiveMaximum + } + if source.Minimum != nil { + schema.Minimum = source.Minimum + } + if source.ExclusiveMinimum != nil { + schema.ExclusiveMinimum = source.ExclusiveMinimum + } + if source.MaxLength != nil { + schema.MaxLength = source.MaxLength + } + if source.MinLength != nil { + schema.MinLength = source.MinLength + } + if source.Pattern != nil { + schema.Pattern = source.Pattern + } + if source.AdditionalItems != nil { + schema.AdditionalItems = source.AdditionalItems + } + if source.Items != nil { + schema.Items = source.Items + } + if source.MaxItems != nil { + schema.MaxItems = source.MaxItems + } + if source.MinItems != nil { + schema.MinItems = source.MinItems + } + if source.UniqueItems != nil { + schema.UniqueItems = source.UniqueItems + } + if source.MaxProperties != nil { + schema.MaxProperties = source.MaxProperties + } + if source.MinProperties != nil { + schema.MinProperties = source.MinProperties + } + if source.Required != nil { + schema.Required = source.Required + } + if source.AdditionalProperties != nil { + schema.AdditionalProperties = source.AdditionalProperties + } + if source.Properties != nil { + schema.Properties = source.Properties + } + if source.PatternProperties != nil { + schema.PatternProperties = source.PatternProperties + } + if source.Dependencies != nil { + schema.Dependencies = source.Dependencies + } + if source.Enumeration != nil { + schema.Enumeration = source.Enumeration + } + if source.Type != nil { + schema.Type = source.Type + } + if source.AllOf != nil { + schema.AllOf = source.AllOf + } + if source.AnyOf != nil { + schema.AnyOf = source.AnyOf + } + if source.OneOf != nil { + schema.OneOf = source.OneOf + } + if source.Not != nil { + schema.Not = source.Not + } + if source.Definitions != nil { + schema.Definitions = source.Definitions + } + if source.Title != nil { + schema.Title = source.Title + } + if source.Description != nil { + schema.Description = source.Description + } + if source.Default != nil { + schema.Default = source.Default + } + if source.Format != nil { + schema.Format = source.Format + } + if source.Ref != nil { + schema.Ref = source.Ref + } +} + +// TypeIs returns true if the Type of a Schema includes the specified type +func (schema *Schema) TypeIs(typeName string) bool { + if schema.Type != nil { + // the schema Type is either a string or an array of strings + if schema.Type.String != nil { + return (*(schema.Type.String) == typeName) + } else if schema.Type.StringArray != nil { + for _, n := range *(schema.Type.StringArray) { + if n == typeName { + return true + } + } + } + } + return false +} + +// ResolveRefs resolves "$ref" elements in a Schema and its children. +// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, +// the reference is kept and we expect downstream tools to separately model these +// referenced schemas. +func (schema *Schema) ResolveRefs() { + rootSchema := schema + count := 1 + for count > 0 { + count = 0 + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.Ref != nil { + resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) + if err != nil { + log.Printf("%+v", err) + } else if resolvedRef.TypeIs("object") { + // don't substitute for objects, we'll model the referenced schema with a class + } else if context == "OneOf" { + // don't substitute for references inside oneOf declarations + } else if resolvedRef.OneOf != nil { + // don't substitute for references that contain oneOf declarations + } else if resolvedRef.AdditionalProperties != nil { + // don't substitute for references that look like objects + } else { + schema.Ref = nil + schema.CopyProperties(resolvedRef) + count++ + } + } + }, "") + } +} + +// resolveJSONPointer resolves JSON pointers. +// This current implementation is very crude and custom for OpenAPI 2.0 schemas. +// It panics for any pointer that it is unable to resolve. +func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { + parts := strings.Split(ref, "#") + if len(parts) == 2 { + documentName := parts[0] + "#" + if documentName == "#" && schema.ID != nil { + documentName = *(schema.ID) + } + path := parts[1] + document := schemas[documentName] + pathParts := strings.Split(path, "/") + + // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases + if len(pathParts) == 1 { + return document, nil + } else if len(pathParts) == 3 { + switch pathParts[1] { + case "definitions": + dictionary := document.Definitions + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + case "properties": + dictionary := document.Properties + for _, pair := range *dictionary { + if pair.Name == pathParts[2] { + result = pair.Value + } + } + default: + break + } + } + } + if result == nil { + return nil, fmt.Errorf("unresolved pointer: %+v", ref) + } + return result, nil +} + +// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. +func (schema *Schema) ResolveAllOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AllOf != nil { + for _, allOf := range *(schema.AllOf) { + schema.CopyProperties(allOf) + } + schema.AllOf = nil + } + }, "resolveAllOfs") +} + +// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". +func (schema *Schema) ResolveAnyOfs() { + schema.applyToSchemas( + func(schema *Schema, context string) { + if schema.AnyOf != nil { + schema.OneOf = schema.AnyOf + schema.AnyOf = nil + } + }, "resolveAnyOfs") +} + +// return a pointer to a copy of a passed-in string +func stringptr(input string) (output *string) { + return &input +} + +// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperty(name string) { + *schema.Properties = append(*schema.Properties, + NewNamedSchema(name, + &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) +} + +// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition +func (schema *Schema) CopyOfficialSchemaProperties(names []string) { + for _, name := range names { + schema.CopyOfficialSchemaProperty(name) + } +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go new file mode 100644 index 0000000..b8583d4 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go @@ -0,0 +1,442 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate go run generate-base.go + +package jsonschema + +import ( + "fmt" + "io/ioutil" + "strconv" + + "gopkg.in/yaml.v3" +) + +// This is a global map of all known Schemas. +// It is initialized when the first Schema is created and inserted. +var schemas map[string]*Schema + +// NewBaseSchema builds a schema object from an embedded json representation. +func NewBaseSchema() (schema *Schema, err error) { + b, err := baseSchemaBytes() + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(b, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromFile reads a schema from a file. +// Currently this assumes that schemas are stored in the source distribution of this project. +func NewSchemaFromFile(filename string) (schema *Schema, err error) { + file, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + var node yaml.Node + err = yaml.Unmarshal(file, &node) + if err != nil { + return nil, err + } + return NewSchemaFromObject(&node), nil +} + +// NewSchemaFromObject constructs a schema from a parsed JSON object. +// Due to the complexity of the schema representation, this is a +// custom reader and not the standard Go JSON reader (encoding/json). +func NewSchemaFromObject(jsonData *yaml.Node) *Schema { + switch jsonData.Kind { + case yaml.DocumentNode: + return NewSchemaFromObject(jsonData.Content[0]) + case yaml.MappingNode: + schema := &Schema{} + + for i := 0; i < len(jsonData.Content); i += 2 { + k := jsonData.Content[i].Value + v := jsonData.Content[i+1] + + switch k { + case "$schema": + schema.Schema = schema.stringValue(v) + case "id": + schema.ID = schema.stringValue(v) + + case "multipleOf": + schema.MultipleOf = schema.numberValue(v) + case "maximum": + schema.Maximum = schema.numberValue(v) + case "exclusiveMaximum": + schema.ExclusiveMaximum = schema.boolValue(v) + case "minimum": + schema.Minimum = schema.numberValue(v) + case "exclusiveMinimum": + schema.ExclusiveMinimum = schema.boolValue(v) + + case "maxLength": + schema.MaxLength = schema.intValue(v) + case "minLength": + schema.MinLength = schema.intValue(v) + case "pattern": + schema.Pattern = schema.stringValue(v) + + case "additionalItems": + schema.AdditionalItems = schema.schemaOrBooleanValue(v) + case "items": + schema.Items = schema.schemaOrSchemaArrayValue(v) + case "maxItems": + schema.MaxItems = schema.intValue(v) + case "minItems": + schema.MinItems = schema.intValue(v) + case "uniqueItems": + schema.UniqueItems = schema.boolValue(v) + + case "maxProperties": + schema.MaxProperties = schema.intValue(v) + case "minProperties": + schema.MinProperties = schema.intValue(v) + case "required": + schema.Required = schema.arrayOfStringsValue(v) + case "additionalProperties": + schema.AdditionalProperties = schema.schemaOrBooleanValue(v) + case "properties": + schema.Properties = schema.mapOfSchemasValue(v) + case "patternProperties": + schema.PatternProperties = schema.mapOfSchemasValue(v) + case "dependencies": + schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) + + case "enum": + schema.Enumeration = schema.arrayOfEnumValuesValue(v) + + case "type": + schema.Type = schema.stringOrStringArrayValue(v) + case "allOf": + schema.AllOf = schema.arrayOfSchemasValue(v) + case "anyOf": + schema.AnyOf = schema.arrayOfSchemasValue(v) + case "oneOf": + schema.OneOf = schema.arrayOfSchemasValue(v) + case "not": + schema.Not = NewSchemaFromObject(v) + case "definitions": + schema.Definitions = schema.mapOfSchemasValue(v) + + case "title": + schema.Title = schema.stringValue(v) + case "description": + schema.Description = schema.stringValue(v) + + case "default": + schema.Default = v + + case "format": + schema.Format = schema.stringValue(v) + case "$ref": + schema.Ref = schema.stringValue(v) + default: + fmt.Printf("UNSUPPORTED (%s)\n", k) + } + } + + // insert schema in global map + if schema.ID != nil { + if schemas == nil { + schemas = make(map[string]*Schema, 0) + } + schemas[*(schema.ID)] = schema + } + return schema + + default: + fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) + return nil + } + + return nil +} + +// +// BUILDERS +// The following methods build elements of Schemas from interface{} values. +// Each returns nil if it is unable to build the desired element. +// + +// Gets the string value of an interface{} value if possible. +func (schema *Schema) stringValue(v *yaml.Node) *string { + switch v.Kind { + case yaml.ScalarNode: + return &v.Value + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the numeric value of an interface{} value if possible. +func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { + number := &SchemaNumber{} + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + number.Float = &v2 + return number + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + number.Integer = &v2 + return number + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("stringValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the integer value of an interface{} value if possible. +func (schema *Schema) intValue(v *yaml.Node) *int64 { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!float": + v2, _ := strconv.ParseFloat(v.Value, 64) + v3 := int64(v2) + return &v3 + case "!!int": + v2, _ := strconv.ParseInt(v.Value, 10, 64) + return &v2 + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("intValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets the bool value of an interface{} value if possible. +func (schema *Schema) boolValue(v *yaml.Node) *bool { + switch v.Kind { + case yaml.ScalarNode: + switch v.Tag { + case "!!bool": + v2, _ := strconv.ParseBool(v.Value) + return &v2 + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + default: + fmt.Printf("boolValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a map of Schemas from an interface{} value if possible. +func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { + switch v.Kind { + case yaml.MappingNode: + m := make([]*NamedSchema, 0) + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} + m = append(m, pair) + } + return &m + default: + fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of Schemas from an interface{} value if possible. +func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) + } + } + return &m + case yaml.MappingNode: + m := make([]*Schema, 0) + s := NewSchemaFromObject(v) + m = append(m, s) + return &m + default: + fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a Schema or an array of Schemas from an interface{} value if possible. +func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { + switch v.Kind { + case yaml.SequenceNode: + m := make([]*Schema, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.MappingNode: + s := NewSchemaFromObject(v2) + m = append(m, s) + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) + } + } + return &SchemaOrSchemaArray{SchemaArray: &m} + case yaml.MappingNode: + s := NewSchemaFromObject(v) + return &SchemaOrSchemaArray{Schema: s} + default: + fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of strings from an interface{} value if possible. +func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { + switch v.Kind { + case yaml.ScalarNode: + a := []string{v.Value} + return &a + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + return &a + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets a string or an array of strings from an interface{} value if possible. +func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { + switch v.Kind { + case yaml.ScalarNode: + s := &StringOrStringArray{} + s.String = &v.Value + return s + case yaml.SequenceNode: + a := make([]string, 0) + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + a = append(a, v2.Value) + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) + } + } + s := &StringOrStringArray{} + s.StringArray = &a + return s + default: + fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) + } + return nil +} + +// Gets an array of enum values from an interface{} value if possible. +func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { + a := make([]SchemaEnumValue, 0) + switch v.Kind { + case yaml.SequenceNode: + for _, v2 := range v.Content { + switch v2.Kind { + case yaml.ScalarNode: + switch v2.Tag { + case "!!str": + a = append(a, SchemaEnumValue{String: &v2.Value}) + case "!!bool": + v3, _ := strconv.ParseBool(v2.Value) + a = append(a, SchemaEnumValue{Bool: &v3}) + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) + } + return &a +} + +// Gets a map of schemas or string arrays from an interface{} value if possible. +func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { + m := make([]*NamedSchemaOrStringArray, 0) + switch v.Kind { + case yaml.MappingNode: + for i := 0; i < len(v.Content); i += 2 { + k2 := v.Content[i].Value + v2 := v.Content[i+1] + switch v2.Kind { + case yaml.SequenceNode: + a := make([]string, 0) + for _, v3 := range v2.Content { + switch v3.Kind { + case yaml.ScalarNode: + a = append(a, v3.Value) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) + } + } + s := &SchemaOrStringArray{} + s.StringArray = &a + pair := &NamedSchemaOrStringArray{Name: k2, Value: s} + m = append(m, pair) + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) + } + } + default: + fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) + } + return &m +} + +// Gets a schema or a boolean value from an interface{} value if possible. +func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { + schemaOrBoolean := &SchemaOrBoolean{} + switch v.Kind { + case yaml.ScalarNode: + v2, _ := strconv.ParseBool(v.Value) + schemaOrBoolean.Boolean = &v2 + case yaml.MappingNode: + schemaOrBoolean.Schema = NewSchemaFromObject(v) + default: + fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) + } + return schemaOrBoolean +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json new file mode 100644 index 0000000..85eb502 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json @@ -0,0 +1,150 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uri" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go new file mode 100644 index 0000000..340dc5f --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go @@ -0,0 +1,369 @@ +// Copyright 2017 Google LLC. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package jsonschema + +import ( + "fmt" + + "gopkg.in/yaml.v3" +) + +const indentation = " " + +func renderMappingNode(node *yaml.Node, indent string) (result string) { + result = "{\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i += 2 { + // first print the key + key := node.Content[i].Value + result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) + // then the value + value := node.Content[i+1] + switch value.Kind { + case yaml.ScalarNode: + result += "\"" + value.Value + "\"" + case yaml.MappingNode: + result += renderMappingNode(value, innerIndent) + case yaml.SequenceNode: + result += renderSequenceNode(value, innerIndent) + default: + result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) + } + if i < len(node.Content)-2 { + result += "," + } + result += "\n" + } + + result += indent + "}" + return result +} + +func renderSequenceNode(node *yaml.Node, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i := 0; i < len(node.Content); i++ { + item := node.Content[i] + switch item.Kind { + case yaml.ScalarNode: + result += innerIndent + "\"" + item.Value + "\"" + case yaml.MappingNode: + result += innerIndent + renderMappingNode(item, innerIndent) + "" + default: + result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) + } + if i < len(node.Content)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +func renderStringArray(array []string, indent string) (result string) { + result = "[\n" + innerIndent := indent + indentation + for i, item := range array { + result += innerIndent + "\"" + item + "\"" + if i < len(array)-1 { + result += "," + } + result += "\n" + } + result += indent + "]" + return result +} + +// Render renders a yaml.Node as JSON +func Render(node *yaml.Node) string { + if node.Kind == yaml.DocumentNode { + if len(node.Content) == 1 { + return Render(node.Content[0]) + } + } else if node.Kind == yaml.MappingNode { + return renderMappingNode(node, "") + "\n" + } else if node.Kind == yaml.SequenceNode { + return renderSequenceNode(node, "") + "\n" + } + return "" +} + +func (object *SchemaNumber) nodeValue() *yaml.Node { + if object.Integer != nil { + return nodeForInt64(*object.Integer) + } else if object.Float != nil { + return nodeForFloat64(*object.Float) + } else { + return nil + } +} + +func (object *SchemaOrBoolean) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.Boolean != nil { + return nodeForBoolean(*object.Boolean) + } else { + return nil + } +} + +func nodeForStringArray(array []string) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, nodeForString(item)) + } + return nodeForSequence(content) +} + +func nodeForSchemaArray(array []*Schema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func (object *StringOrStringArray) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrStringArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.StringArray != nil { + return nodeForStringArray(*(object.StringArray)) + } else { + return nil + } +} + +func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { + if object.Schema != nil { + return object.Schema.nodeValue() + } else if object.SchemaArray != nil { + return nodeForSchemaArray(*(object.SchemaArray)) + } else { + return nil + } +} + +func (object *SchemaEnumValue) nodeValue() *yaml.Node { + if object.String != nil { + return nodeForString(*object.String) + } else if object.Bool != nil { + return nodeForBoolean(*object.Bool) + } else { + return nil + } +} + +func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, pair := range *(array) { + content = appendPair(content, pair.Name, pair.Value.nodeValue()) + } + return nodeForMapping(content) +} + +func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { + content := make([]*yaml.Node, 0) + for _, item := range *array { + content = append(content, item.nodeValue()) + } + return nodeForSequence(content) +} + +func nodeForMapping(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.MappingNode, + Content: content, + } +} + +func nodeForSequence(content []*yaml.Node) *yaml.Node { + return &yaml.Node{ + Kind: yaml.SequenceNode, + Content: content, + } +} + +func nodeForString(value string) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!str", + Value: value, + } +} + +func nodeForBoolean(value bool) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!bool", + Value: fmt.Sprintf("%t", value), + } +} + +func nodeForInt64(value int64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!int", + Value: fmt.Sprintf("%d", value), + } +} + +func nodeForFloat64(value float64) *yaml.Node { + return &yaml.Node{ + Kind: yaml.ScalarNode, + Tag: "!!float", + Value: fmt.Sprintf("%f", value), + } +} + +func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { + nodes = append(nodes, nodeForString(name)) + nodes = append(nodes, value) + return nodes +} + +func (schema *Schema) nodeValue() *yaml.Node { + n := &yaml.Node{Kind: yaml.MappingNode} + content := make([]*yaml.Node, 0) + if schema.Title != nil { + content = appendPair(content, "title", nodeForString(*schema.Title)) + } + if schema.ID != nil { + content = appendPair(content, "id", nodeForString(*schema.ID)) + } + if schema.Schema != nil { + content = appendPair(content, "$schema", nodeForString(*schema.Schema)) + } + if schema.Type != nil { + content = appendPair(content, "type", schema.Type.nodeValue()) + } + if schema.Items != nil { + content = appendPair(content, "items", schema.Items.nodeValue()) + } + if schema.Description != nil { + content = appendPair(content, "description", nodeForString(*schema.Description)) + } + if schema.Required != nil { + content = appendPair(content, "required", nodeForStringArray(*schema.Required)) + } + if schema.AdditionalProperties != nil { + content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) + } + if schema.PatternProperties != nil { + content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) + } + if schema.Properties != nil { + content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) + } + if schema.Dependencies != nil { + content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) + } + if schema.Ref != nil { + content = appendPair(content, "$ref", nodeForString(*schema.Ref)) + } + if schema.MultipleOf != nil { + content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) + } + if schema.Maximum != nil { + content = appendPair(content, "maximum", schema.Maximum.nodeValue()) + } + if schema.ExclusiveMaximum != nil { + content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) + } + if schema.Minimum != nil { + content = appendPair(content, "minimum", schema.Minimum.nodeValue()) + } + if schema.ExclusiveMinimum != nil { + content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) + } + if schema.MaxLength != nil { + content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) + } + if schema.MinLength != nil { + content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) + } + if schema.Pattern != nil { + content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) + } + if schema.AdditionalItems != nil { + content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) + } + if schema.MaxItems != nil { + content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) + } + if schema.MinItems != nil { + content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) + } + if schema.UniqueItems != nil { + content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) + } + if schema.MaxProperties != nil { + content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) + } + if schema.MinProperties != nil { + content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) + } + if schema.Enumeration != nil { + content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) + } + if schema.AllOf != nil { + content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) + } + if schema.AnyOf != nil { + content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) + } + if schema.OneOf != nil { + content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) + } + if schema.Not != nil { + content = appendPair(content, "not", schema.Not.nodeValue()) + } + if schema.Definitions != nil { + content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) + } + if schema.Default != nil { + // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) + } + if schema.Format != nil { + content = appendPair(content, "format", nodeForString(*schema.Format)) + } + n.Content = content + return n +} + +// JSONString returns a json representation of a schema. +func (schema *Schema) JSONString() string { + node := schema.nodeValue() + return Render(node) +} diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go index 95ae54f..29b31cf 100644 --- a/vendor/github.com/json-iterator/go/iter.go +++ b/vendor/github.com/json-iterator/go/iter.go @@ -74,6 +74,7 @@ type Iterator struct { buf []byte head int tail int + depth int captureStartedAt int captured []byte Error error @@ -88,6 +89,7 @@ func NewIterator(cfg API) *Iterator { buf: nil, head: 0, tail: 0, + depth: 0, } } @@ -99,6 +101,7 @@ func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { buf: make([]byte, bufSize), head: 0, tail: 0, + depth: 0, } } @@ -110,6 +113,7 @@ func ParseBytes(cfg API, input []byte) *Iterator { buf: input, head: 0, tail: len(input), + depth: 0, } } @@ -128,6 +132,7 @@ func (iter *Iterator) Reset(reader io.Reader) *Iterator { iter.reader = reader iter.head = 0 iter.tail = 0 + iter.depth = 0 return iter } @@ -137,6 +142,7 @@ func (iter *Iterator) ResetBytes(input []byte) *Iterator { iter.buf = input iter.head = 0 iter.tail = len(input) + iter.depth = 0 return iter } @@ -320,3 +326,24 @@ func (iter *Iterator) Read() interface{} { return nil } } + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go index 6188cb4..204fe0e 100644 --- a/vendor/github.com/json-iterator/go/iter_array.go +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -28,26 +28,32 @@ func (iter *Iterator) ReadArray() (ret bool) { func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { c := iter.nextToken() if c == '[' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c != ']' { iter.unreadByte() if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() for c == ',' { if !callback(iter) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != ']' { iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } - return true + return iter.decrementDepth() } if c == 'n' { iter.skipThreeBytes('u', 'l', 'l') diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go index 1c57576..b651371 100644 --- a/vendor/github.com/json-iterator/go/iter_object.go +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -112,6 +112,9 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() var field string if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() @@ -121,6 +124,7 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -131,20 +135,23 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { @@ -159,15 +166,20 @@ func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { c := iter.nextToken() if c == '{' { + if !iter.incrementDepth() { + return false + } c = iter.nextToken() if c == '"' { iter.unreadByte() field := iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() @@ -175,23 +187,27 @@ func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { field = iter.ReadString() if iter.nextToken() != ':' { iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() return false } if !callback(iter, field) { + iter.decrementDepth() return false } c = iter.nextToken() } if c != '}' { iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() return false } - return true + return iter.decrementDepth() } if c == '}' { - return true + return iter.decrementDepth() } iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() return false } if c == 'n' { diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go index 8fcdc3b..9303de4 100644 --- a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -22,6 +22,9 @@ func (iter *Iterator) skipNumber() { func (iter *Iterator) skipArray() { level := 1 + if !iter.incrementDepth() { + return + } for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -31,8 +34,14 @@ func (iter *Iterator) skipArray() { i = iter.head - 1 // it will be i++ soon case '[': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case ']': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { @@ -50,6 +59,10 @@ func (iter *Iterator) skipArray() { func (iter *Iterator) skipObject() { level := 1 + if !iter.incrementDepth() { + return + } + for { for i := iter.head; i < iter.tail; i++ { switch iter.buf[i] { @@ -59,8 +72,14 @@ func (iter *Iterator) skipObject() { i = iter.head - 1 // it will be i++ soon case '{': // If open symbol, increase level level++ + if !iter.incrementDepth() { + return + } case '}': // If close symbol, increase level level-- + if !iter.decrementDepth() { + return + } // If we have returned to the original level, we're done if level == 0 { diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go index 4459e20..74974ba 100644 --- a/vendor/github.com/json-iterator/go/reflect.go +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -60,6 +60,7 @@ func (b *ctx) append(prefix string) *ctx { // ReadVal copy the underlying JSON into go interface, same as json.Unmarshal func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth cacheKey := reflect2.RTypeOf(obj) decoder := iter.cfg.getDecoderFromCache(cacheKey) if decoder == nil { @@ -76,6 +77,10 @@ func (iter *Iterator) ReadVal(obj interface{}) { return } decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } } // WriteVal copy the go interface into underlying JSON, same as json.Marshal diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go index 05e8fbf..e27e8d1 100644 --- a/vendor/github.com/json-iterator/go/reflect_extension.go +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -341,10 +341,10 @@ func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { if ctx.onlyTaggedField && !hastag && !field.Anonymous() { continue } - tagParts := strings.Split(tag, ",") if tag == "-" { continue } + tagParts := strings.Split(tag, ",") if field.Anonymous() && (tag == "" || tagParts[0] == "") { if field.Type().Kind() == reflect.Struct { structDescriptor := describeStruct(ctx, field.Type()) diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go index 547b442..08e9a39 100644 --- a/vendor/github.com/json-iterator/go/reflect_map.go +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -249,6 +249,10 @@ type mapEncoder struct { } func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } stream.WriteObjectStart() iter := encoder.mapType.UnsafeIterate(ptr) for i := 0; iter.HasNext(); i++ { diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go index fea5071..3e21f37 100644 --- a/vendor/github.com/json-iterator/go/reflect_marshaler.go +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -3,8 +3,9 @@ package jsoniter import ( "encoding" "encoding/json" - "github.com/modern-go/reflect2" "unsafe" + + "github.com/modern-go/reflect2" ) var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() @@ -93,10 +94,17 @@ func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { stream.WriteNil() return } - bytes, err := json.Marshal(obj) + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() if err != nil { stream.Error = err } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } stream.Write(bytes) } } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 932641a..5ad5cc5 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -500,6 +500,9 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } var c byte for c = ','; c == ','; c = iter.nextToken() { decoder.decodeOneField(ptr, iter) @@ -510,6 +513,7 @@ func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if c != '}' { iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) } + iter.decrementDepth() } func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { @@ -571,6 +575,9 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { if iter.readFieldHash() == decoder.fieldHash { decoder.fieldDecoder.Decode(ptr, iter) @@ -584,6 +591,7 @@ func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type twoFieldsStructDecoder struct { @@ -598,6 +606,9 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -614,6 +625,7 @@ func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type threeFieldsStructDecoder struct { @@ -630,6 +642,9 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -648,6 +663,7 @@ func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fourFieldsStructDecoder struct { @@ -666,6 +682,9 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -686,6 +705,7 @@ func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type fiveFieldsStructDecoder struct { @@ -706,6 +726,9 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -728,6 +751,7 @@ func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sixFieldsStructDecoder struct { @@ -750,6 +774,9 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -774,6 +801,7 @@ func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type sevenFieldsStructDecoder struct { @@ -798,6 +826,9 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -824,6 +855,7 @@ func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type eightFieldsStructDecoder struct { @@ -850,6 +882,9 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -878,6 +913,7 @@ func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterat if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type nineFieldsStructDecoder struct { @@ -906,6 +942,9 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -936,6 +975,7 @@ func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterato if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type tenFieldsStructDecoder struct { @@ -966,6 +1006,9 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if !iter.readObjectStart() { return } + if !iter.incrementDepth() { + return + } for { switch iter.readFieldHash() { case decoder.fieldHash1: @@ -998,6 +1041,7 @@ func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator if iter.Error != nil && iter.Error != io.EOF { iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) } + iter.decrementDepth() } type structFieldDecoder struct { diff --git a/vendor/golang.org/x/crypto/AUTHORS b/vendor/golang.org/x/crypto/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/golang.org/x/crypto/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/crypto/CONTRIBUTORS b/vendor/golang.org/x/crypto/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/golang.org/x/crypto/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/crypto/LICENSE b/vendor/golang.org/x/crypto/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/crypto/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/crypto/PATENTS b/vendor/golang.org/x/crypto/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/crypto/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/crypto/ssh/terminal/terminal.go b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go new file mode 100644 index 0000000..9d666ff --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/terminal.go @@ -0,0 +1,955 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import ( + "bytes" + "io" + "sync" + "unicode/utf8" +) + +// EscapeCodes contains escape sequences that can be written to the terminal in +// order to achieve different styles of text. +type EscapeCodes struct { + // Foreground colors + Black, Red, Green, Yellow, Blue, Magenta, Cyan, White []byte + + // Reset all attributes + Reset []byte +} + +var vt100EscapeCodes = EscapeCodes{ + Black: []byte{keyEscape, '[', '3', '0', 'm'}, + Red: []byte{keyEscape, '[', '3', '1', 'm'}, + Green: []byte{keyEscape, '[', '3', '2', 'm'}, + Yellow: []byte{keyEscape, '[', '3', '3', 'm'}, + Blue: []byte{keyEscape, '[', '3', '4', 'm'}, + Magenta: []byte{keyEscape, '[', '3', '5', 'm'}, + Cyan: []byte{keyEscape, '[', '3', '6', 'm'}, + White: []byte{keyEscape, '[', '3', '7', 'm'}, + + Reset: []byte{keyEscape, '[', '0', 'm'}, +} + +// Terminal contains the state for running a VT100 terminal that is capable of +// reading lines of input. +type Terminal struct { + // AutoCompleteCallback, if non-null, is called for each keypress with + // the full input line and the current position of the cursor (in + // bytes, as an index into |line|). If it returns ok=false, the key + // press is processed normally. Otherwise it returns a replacement line + // and the new cursor position. + AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool) + + // Escape contains a pointer to the escape codes for this terminal. + // It's always a valid pointer, although the escape codes themselves + // may be empty if the terminal doesn't support them. + Escape *EscapeCodes + + // lock protects the terminal and the state in this object from + // concurrent processing of a key press and a Write() call. + lock sync.Mutex + + c io.ReadWriter + prompt []rune + + // line is the current line being entered. + line []rune + // pos is the logical position of the cursor in line + pos int + // echo is true if local echo is enabled + echo bool + // pasteActive is true iff there is a bracketed paste operation in + // progress. + pasteActive bool + + // cursorX contains the current X value of the cursor where the left + // edge is 0. cursorY contains the row number where the first row of + // the current line is 0. + cursorX, cursorY int + // maxLine is the greatest value of cursorY so far. + maxLine int + + termWidth, termHeight int + + // outBuf contains the terminal data to be sent. + outBuf []byte + // remainder contains the remainder of any partial key sequences after + // a read. It aliases into inBuf. + remainder []byte + inBuf [256]byte + + // history contains previously entered commands so that they can be + // accessed with the up and down keys. + history stRingBuffer + // historyIndex stores the currently accessed history entry, where zero + // means the immediately previous entry. + historyIndex int + // When navigating up and down the history it's possible to return to + // the incomplete, initial line. That value is stored in + // historyPending. + historyPending string +} + +// NewTerminal runs a VT100 terminal on the given ReadWriter. If the ReadWriter is +// a local terminal, that terminal must first have been put into raw mode. +// prompt is a string that is written at the start of each input line (i.e. +// "> "). +func NewTerminal(c io.ReadWriter, prompt string) *Terminal { + return &Terminal{ + Escape: &vt100EscapeCodes, + c: c, + prompt: []rune(prompt), + termWidth: 80, + termHeight: 24, + echo: true, + historyIndex: -1, + } +} + +const ( + keyCtrlD = 4 + keyCtrlU = 21 + keyEnter = '\r' + keyEscape = 27 + keyBackspace = 127 + keyUnknown = 0xd800 /* UTF-16 surrogate area */ + iota + keyUp + keyDown + keyLeft + keyRight + keyAltLeft + keyAltRight + keyHome + keyEnd + keyDeleteWord + keyDeleteLine + keyClearScreen + keyPasteStart + keyPasteEnd +) + +var ( + crlf = []byte{'\r', '\n'} + pasteStart = []byte{keyEscape, '[', '2', '0', '0', '~'} + pasteEnd = []byte{keyEscape, '[', '2', '0', '1', '~'} +) + +// bytesToKey tries to parse a key sequence from b. If successful, it returns +// the key and the remainder of the input. Otherwise it returns utf8.RuneError. +func bytesToKey(b []byte, pasteActive bool) (rune, []byte) { + if len(b) == 0 { + return utf8.RuneError, nil + } + + if !pasteActive { + switch b[0] { + case 1: // ^A + return keyHome, b[1:] + case 5: // ^E + return keyEnd, b[1:] + case 8: // ^H + return keyBackspace, b[1:] + case 11: // ^K + return keyDeleteLine, b[1:] + case 12: // ^L + return keyClearScreen, b[1:] + case 23: // ^W + return keyDeleteWord, b[1:] + case 14: // ^N + return keyDown, b[1:] + case 16: // ^P + return keyUp, b[1:] + } + } + + if b[0] != keyEscape { + if !utf8.FullRune(b) { + return utf8.RuneError, b + } + r, l := utf8.DecodeRune(b) + return r, b[l:] + } + + if !pasteActive && len(b) >= 3 && b[0] == keyEscape && b[1] == '[' { + switch b[2] { + case 'A': + return keyUp, b[3:] + case 'B': + return keyDown, b[3:] + case 'C': + return keyRight, b[3:] + case 'D': + return keyLeft, b[3:] + case 'H': + return keyHome, b[3:] + case 'F': + return keyEnd, b[3:] + } + } + + if !pasteActive && len(b) >= 6 && b[0] == keyEscape && b[1] == '[' && b[2] == '1' && b[3] == ';' && b[4] == '3' { + switch b[5] { + case 'C': + return keyAltRight, b[6:] + case 'D': + return keyAltLeft, b[6:] + } + } + + if !pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteStart) { + return keyPasteStart, b[6:] + } + + if pasteActive && len(b) >= 6 && bytes.Equal(b[:6], pasteEnd) { + return keyPasteEnd, b[6:] + } + + // If we get here then we have a key that we don't recognise, or a + // partial sequence. It's not clear how one should find the end of a + // sequence without knowing them all, but it seems that [a-zA-Z~] only + // appears at the end of a sequence. + for i, c := range b[0:] { + if c >= 'a' && c <= 'z' || c >= 'A' && c <= 'Z' || c == '~' { + return keyUnknown, b[i+1:] + } + } + + return utf8.RuneError, b +} + +// queue appends data to the end of t.outBuf +func (t *Terminal) queue(data []rune) { + t.outBuf = append(t.outBuf, []byte(string(data))...) +} + +var eraseUnderCursor = []rune{' ', keyEscape, '[', 'D'} +var space = []rune{' '} + +func isPrintable(key rune) bool { + isInSurrogateArea := key >= 0xd800 && key <= 0xdbff + return key >= 32 && !isInSurrogateArea +} + +// moveCursorToPos appends data to t.outBuf which will move the cursor to the +// given, logical position in the text. +func (t *Terminal) moveCursorToPos(pos int) { + if !t.echo { + return + } + + x := visualLength(t.prompt) + pos + y := x / t.termWidth + x = x % t.termWidth + + up := 0 + if y < t.cursorY { + up = t.cursorY - y + } + + down := 0 + if y > t.cursorY { + down = y - t.cursorY + } + + left := 0 + if x < t.cursorX { + left = t.cursorX - x + } + + right := 0 + if x > t.cursorX { + right = x - t.cursorX + } + + t.cursorX = x + t.cursorY = y + t.move(up, down, left, right) +} + +func (t *Terminal) move(up, down, left, right int) { + movement := make([]rune, 3*(up+down+left+right)) + m := movement + for i := 0; i < up; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'A' + m = m[3:] + } + for i := 0; i < down; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'B' + m = m[3:] + } + for i := 0; i < left; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'D' + m = m[3:] + } + for i := 0; i < right; i++ { + m[0] = keyEscape + m[1] = '[' + m[2] = 'C' + m = m[3:] + } + + t.queue(movement) +} + +func (t *Terminal) clearLineToRight() { + op := []rune{keyEscape, '[', 'K'} + t.queue(op) +} + +const maxLineLength = 4096 + +func (t *Terminal) setLine(newLine []rune, newPos int) { + if t.echo { + t.moveCursorToPos(0) + t.writeLine(newLine) + for i := len(newLine); i < len(t.line); i++ { + t.writeLine(space) + } + t.moveCursorToPos(newPos) + } + t.line = newLine + t.pos = newPos +} + +func (t *Terminal) advanceCursor(places int) { + t.cursorX += places + t.cursorY += t.cursorX / t.termWidth + if t.cursorY > t.maxLine { + t.maxLine = t.cursorY + } + t.cursorX = t.cursorX % t.termWidth + + if places > 0 && t.cursorX == 0 { + // Normally terminals will advance the current position + // when writing a character. But that doesn't happen + // for the last character in a line. However, when + // writing a character (except a new line) that causes + // a line wrap, the position will be advanced two + // places. + // + // So, if we are stopping at the end of a line, we + // need to write a newline so that our cursor can be + // advanced to the next line. + t.outBuf = append(t.outBuf, '\r', '\n') + } +} + +func (t *Terminal) eraseNPreviousChars(n int) { + if n == 0 { + return + } + + if t.pos < n { + n = t.pos + } + t.pos -= n + t.moveCursorToPos(t.pos) + + copy(t.line[t.pos:], t.line[n+t.pos:]) + t.line = t.line[:len(t.line)-n] + if t.echo { + t.writeLine(t.line[t.pos:]) + for i := 0; i < n; i++ { + t.queue(space) + } + t.advanceCursor(n) + t.moveCursorToPos(t.pos) + } +} + +// countToLeftWord returns then number of characters from the cursor to the +// start of the previous word. +func (t *Terminal) countToLeftWord() int { + if t.pos == 0 { + return 0 + } + + pos := t.pos - 1 + for pos > 0 { + if t.line[pos] != ' ' { + break + } + pos-- + } + for pos > 0 { + if t.line[pos] == ' ' { + pos++ + break + } + pos-- + } + + return t.pos - pos +} + +// countToRightWord returns then number of characters from the cursor to the +// start of the next word. +func (t *Terminal) countToRightWord() int { + pos := t.pos + for pos < len(t.line) { + if t.line[pos] == ' ' { + break + } + pos++ + } + for pos < len(t.line) { + if t.line[pos] != ' ' { + break + } + pos++ + } + return pos - t.pos +} + +// visualLength returns the number of visible glyphs in s. +func visualLength(runes []rune) int { + inEscapeSeq := false + length := 0 + + for _, r := range runes { + switch { + case inEscapeSeq: + if (r >= 'a' && r <= 'z') || (r >= 'A' && r <= 'Z') { + inEscapeSeq = false + } + case r == '\x1b': + inEscapeSeq = true + default: + length++ + } + } + + return length +} + +// handleKey processes the given key and, optionally, returns a line of text +// that the user has entered. +func (t *Terminal) handleKey(key rune) (line string, ok bool) { + if t.pasteActive && key != keyEnter { + t.addKeyToLine(key) + return + } + + switch key { + case keyBackspace: + if t.pos == 0 { + return + } + t.eraseNPreviousChars(1) + case keyAltLeft: + // move left by a word. + t.pos -= t.countToLeftWord() + t.moveCursorToPos(t.pos) + case keyAltRight: + // move right by a word. + t.pos += t.countToRightWord() + t.moveCursorToPos(t.pos) + case keyLeft: + if t.pos == 0 { + return + } + t.pos-- + t.moveCursorToPos(t.pos) + case keyRight: + if t.pos == len(t.line) { + return + } + t.pos++ + t.moveCursorToPos(t.pos) + case keyHome: + if t.pos == 0 { + return + } + t.pos = 0 + t.moveCursorToPos(t.pos) + case keyEnd: + if t.pos == len(t.line) { + return + } + t.pos = len(t.line) + t.moveCursorToPos(t.pos) + case keyUp: + entry, ok := t.history.NthPreviousEntry(t.historyIndex + 1) + if !ok { + return "", false + } + if t.historyIndex == -1 { + t.historyPending = string(t.line) + } + t.historyIndex++ + runes := []rune(entry) + t.setLine(runes, len(runes)) + case keyDown: + switch t.historyIndex { + case -1: + return + case 0: + runes := []rune(t.historyPending) + t.setLine(runes, len(runes)) + t.historyIndex-- + default: + entry, ok := t.history.NthPreviousEntry(t.historyIndex - 1) + if ok { + t.historyIndex-- + runes := []rune(entry) + t.setLine(runes, len(runes)) + } + } + case keyEnter: + t.moveCursorToPos(len(t.line)) + t.queue([]rune("\r\n")) + line = string(t.line) + ok = true + t.line = t.line[:0] + t.pos = 0 + t.cursorX = 0 + t.cursorY = 0 + t.maxLine = 0 + case keyDeleteWord: + // Delete zero or more spaces and then one or more characters. + t.eraseNPreviousChars(t.countToLeftWord()) + case keyDeleteLine: + // Delete everything from the current cursor position to the + // end of line. + for i := t.pos; i < len(t.line); i++ { + t.queue(space) + t.advanceCursor(1) + } + t.line = t.line[:t.pos] + t.moveCursorToPos(t.pos) + case keyCtrlD: + // Erase the character under the current position. + // The EOF case when the line is empty is handled in + // readLine(). + if t.pos < len(t.line) { + t.pos++ + t.eraseNPreviousChars(1) + } + case keyCtrlU: + t.eraseNPreviousChars(t.pos) + case keyClearScreen: + // Erases the screen and moves the cursor to the home position. + t.queue([]rune("\x1b[2J\x1b[H")) + t.queue(t.prompt) + t.cursorX, t.cursorY = 0, 0 + t.advanceCursor(visualLength(t.prompt)) + t.setLine(t.line, t.pos) + default: + if t.AutoCompleteCallback != nil { + prefix := string(t.line[:t.pos]) + suffix := string(t.line[t.pos:]) + + t.lock.Unlock() + newLine, newPos, completeOk := t.AutoCompleteCallback(prefix+suffix, len(prefix), key) + t.lock.Lock() + + if completeOk { + t.setLine([]rune(newLine), utf8.RuneCount([]byte(newLine)[:newPos])) + return + } + } + if !isPrintable(key) { + return + } + if len(t.line) == maxLineLength { + return + } + t.addKeyToLine(key) + } + return +} + +// addKeyToLine inserts the given key at the current position in the current +// line. +func (t *Terminal) addKeyToLine(key rune) { + if len(t.line) == cap(t.line) { + newLine := make([]rune, len(t.line), 2*(1+len(t.line))) + copy(newLine, t.line) + t.line = newLine + } + t.line = t.line[:len(t.line)+1] + copy(t.line[t.pos+1:], t.line[t.pos:]) + t.line[t.pos] = key + if t.echo { + t.writeLine(t.line[t.pos:]) + } + t.pos++ + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) writeLine(line []rune) { + for len(line) != 0 { + remainingOnLine := t.termWidth - t.cursorX + todo := len(line) + if todo > remainingOnLine { + todo = remainingOnLine + } + t.queue(line[:todo]) + t.advanceCursor(visualLength(line[:todo])) + line = line[todo:] + } +} + +// writeWithCRLF writes buf to w but replaces all occurrences of \n with \r\n. +func writeWithCRLF(w io.Writer, buf []byte) (n int, err error) { + for len(buf) > 0 { + i := bytes.IndexByte(buf, '\n') + todo := len(buf) + if i >= 0 { + todo = i + } + + var nn int + nn, err = w.Write(buf[:todo]) + n += nn + if err != nil { + return n, err + } + buf = buf[todo:] + + if i >= 0 { + if _, err = w.Write(crlf); err != nil { + return n, err + } + n++ + buf = buf[1:] + } + } + + return n, nil +} + +func (t *Terminal) Write(buf []byte) (n int, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + if t.cursorX == 0 && t.cursorY == 0 { + // This is the easy case: there's nothing on the screen that we + // have to move out of the way. + return writeWithCRLF(t.c, buf) + } + + // We have a prompt and possibly user input on the screen. We + // have to clear it first. + t.move(0 /* up */, 0 /* down */, t.cursorX /* left */, 0 /* right */) + t.cursorX = 0 + t.clearLineToRight() + + for t.cursorY > 0 { + t.move(1 /* up */, 0, 0, 0) + t.cursorY-- + t.clearLineToRight() + } + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + + if n, err = writeWithCRLF(t.c, buf); err != nil { + return + } + + t.writeLine(t.prompt) + if t.echo { + t.writeLine(t.line) + } + + t.moveCursorToPos(t.pos) + + if _, err = t.c.Write(t.outBuf); err != nil { + return + } + t.outBuf = t.outBuf[:0] + return +} + +// ReadPassword temporarily changes the prompt and reads a password, without +// echo, from the terminal. +func (t *Terminal) ReadPassword(prompt string) (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + oldPrompt := t.prompt + t.prompt = []rune(prompt) + t.echo = false + + line, err = t.readLine() + + t.prompt = oldPrompt + t.echo = true + + return +} + +// ReadLine returns a line of input from the terminal. +func (t *Terminal) ReadLine() (line string, err error) { + t.lock.Lock() + defer t.lock.Unlock() + + return t.readLine() +} + +func (t *Terminal) readLine() (line string, err error) { + // t.lock must be held at this point + + if t.cursorX == 0 && t.cursorY == 0 { + t.writeLine(t.prompt) + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + } + + lineIsPasted := t.pasteActive + + for { + rest := t.remainder + lineOk := false + for !lineOk { + var key rune + key, rest = bytesToKey(rest, t.pasteActive) + if key == utf8.RuneError { + break + } + if !t.pasteActive { + if key == keyCtrlD { + if len(t.line) == 0 { + return "", io.EOF + } + } + if key == keyPasteStart { + t.pasteActive = true + if len(t.line) == 0 { + lineIsPasted = true + } + continue + } + } else if key == keyPasteEnd { + t.pasteActive = false + continue + } + if !t.pasteActive { + lineIsPasted = false + } + line, lineOk = t.handleKey(key) + } + if len(rest) > 0 { + n := copy(t.inBuf[:], rest) + t.remainder = t.inBuf[:n] + } else { + t.remainder = nil + } + t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + if lineOk { + if t.echo { + t.historyIndex = -1 + t.history.Add(line) + } + if lineIsPasted { + err = ErrPasteIndicator + } + return + } + + // t.remainder is a slice at the beginning of t.inBuf + // containing a partial key sequence + readBuf := t.inBuf[len(t.remainder):] + var n int + + t.lock.Unlock() + n, err = t.c.Read(readBuf) + t.lock.Lock() + + if err != nil { + return + } + + t.remainder = t.inBuf[:n+len(t.remainder)] + } +} + +// SetPrompt sets the prompt to be used when reading subsequent lines. +func (t *Terminal) SetPrompt(prompt string) { + t.lock.Lock() + defer t.lock.Unlock() + + t.prompt = []rune(prompt) +} + +func (t *Terminal) clearAndRepaintLinePlusNPrevious(numPrevLines int) { + // Move cursor to column zero at the start of the line. + t.move(t.cursorY, 0, t.cursorX, 0) + t.cursorX, t.cursorY = 0, 0 + t.clearLineToRight() + for t.cursorY < numPrevLines { + // Move down a line + t.move(0, 1, 0, 0) + t.cursorY++ + t.clearLineToRight() + } + // Move back to beginning. + t.move(t.cursorY, 0, 0, 0) + t.cursorX, t.cursorY = 0, 0 + + t.queue(t.prompt) + t.advanceCursor(visualLength(t.prompt)) + t.writeLine(t.line) + t.moveCursorToPos(t.pos) +} + +func (t *Terminal) SetSize(width, height int) error { + t.lock.Lock() + defer t.lock.Unlock() + + if width == 0 { + width = 1 + } + + oldWidth := t.termWidth + t.termWidth, t.termHeight = width, height + + switch { + case width == oldWidth: + // If the width didn't change then nothing else needs to be + // done. + return nil + case len(t.line) == 0 && t.cursorX == 0 && t.cursorY == 0: + // If there is nothing on current line and no prompt printed, + // just do nothing + return nil + case width < oldWidth: + // Some terminals (e.g. xterm) will truncate lines that were + // too long when shinking. Others, (e.g. gnome-terminal) will + // attempt to wrap them. For the former, repainting t.maxLine + // works great, but that behaviour goes badly wrong in the case + // of the latter because they have doubled every full line. + + // We assume that we are working on a terminal that wraps lines + // and adjust the cursor position based on every previous line + // wrapping and turning into two. This causes the prompt on + // xterms to move upwards, which isn't great, but it avoids a + // huge mess with gnome-terminal. + if t.cursorX >= t.termWidth { + t.cursorX = t.termWidth - 1 + } + t.cursorY *= 2 + t.clearAndRepaintLinePlusNPrevious(t.maxLine * 2) + case width > oldWidth: + // If the terminal expands then our position calculations will + // be wrong in the future because we think the cursor is + // |t.pos| chars into the string, but there will be a gap at + // the end of any wrapped line. + // + // But the position will actually be correct until we move, so + // we can move back to the beginning and repaint everything. + t.clearAndRepaintLinePlusNPrevious(t.maxLine) + } + + _, err := t.c.Write(t.outBuf) + t.outBuf = t.outBuf[:0] + return err +} + +type pasteIndicatorError struct{} + +func (pasteIndicatorError) Error() string { + return "terminal: ErrPasteIndicator not correctly handled" +} + +// ErrPasteIndicator may be returned from ReadLine as the error, in addition +// to valid line data. It indicates that bracketed paste mode is enabled and +// that the returned line consists only of pasted data. Programs may wish to +// interpret pasted data more literally than typed data. +var ErrPasteIndicator = pasteIndicatorError{} + +// SetBracketedPasteMode requests that the terminal bracket paste operations +// with markers. Not all terminals support this but, if it is supported, then +// enabling this mode will stop any autocomplete callback from running due to +// pastes. Additionally, any lines that are completely pasted will be returned +// from ReadLine with the error set to ErrPasteIndicator. +func (t *Terminal) SetBracketedPasteMode(on bool) { + if on { + io.WriteString(t.c, "\x1b[?2004h") + } else { + io.WriteString(t.c, "\x1b[?2004l") + } +} + +// stRingBuffer is a ring buffer of strings. +type stRingBuffer struct { + // entries contains max elements. + entries []string + max int + // head contains the index of the element most recently added to the ring. + head int + // size contains the number of elements in the ring. + size int +} + +func (s *stRingBuffer) Add(a string) { + if s.entries == nil { + const defaultNumEntries = 100 + s.entries = make([]string, defaultNumEntries) + s.max = defaultNumEntries + } + + s.head = (s.head + 1) % s.max + s.entries[s.head] = a + if s.size < s.max { + s.size++ + } +} + +// NthPreviousEntry returns the value passed to the nth previous call to Add. +// If n is zero then the immediately prior value is returned, if one, then the +// next most recent, and so on. If such an element doesn't exist then ok is +// false. +func (s *stRingBuffer) NthPreviousEntry(n int) (value string, ok bool) { + if n >= s.size { + return "", false + } + index := s.head - n + if index < 0 { + index += s.max + } + return s.entries[index], true +} + +// readPasswordLine reads from reader until it finds \n or io.EOF. +// The slice returned does not include the \n. +// readPasswordLine also ignores any \r it finds. +func readPasswordLine(reader io.Reader) ([]byte, error) { + var buf [1]byte + var ret []byte + + for { + n, err := reader.Read(buf[:]) + if n > 0 { + switch buf[0] { + case '\n': + return ret, nil + case '\r': + // remove \r from passwords on Windows + default: + ret = append(ret, buf[0]) + } + continue + } + if err != nil { + if err == io.EOF && len(ret) > 0 { + return ret, nil + } + return ret, err + } + } +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util.go b/vendor/golang.org/x/crypto/ssh/terminal/util.go new file mode 100644 index 0000000..3911040 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util.go @@ -0,0 +1,114 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin dragonfly freebsd linux,!appengine netbsd openbsd + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + // This attempts to replicate the behaviour documented for cfmakeraw in + // the termios(3) manpage. + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return unix.IoctlSetTermios(fd, ioctlWriteTermios, &state.termios) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return -1, -1, err + } + return int(ws.Col), int(ws.Row), nil +} + +// passwordReader is an io.Reader that reads from a specific file descriptor. +type passwordReader int + +func (r passwordReader) Read(buf []byte) (int, error) { + return unix.Read(int(r), buf) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + termios, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + if err != nil { + return nil, err + } + + newState := *termios + newState.Lflag &^= unix.ECHO + newState.Lflag |= unix.ICANON | unix.ISIG + newState.Iflag |= unix.ICRNL + if err := unix.IoctlSetTermios(fd, ioctlWriteTermios, &newState); err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, ioctlWriteTermios, termios) + + return readPasswordLine(passwordReader(fd)) +} diff --git a/vendor/golang.org/x/sys/windows/asm_windows_arm.s b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go similarity index 51% rename from vendor/golang.org/x/sys/windows/asm_windows_arm.s rename to vendor/golang.org/x/crypto/ssh/terminal/util_aix.go index 55d8b91..dfcd627 100644 --- a/vendor/golang.org/x/sys/windows/asm_windows_arm.s +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_aix.go @@ -2,10 +2,11 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -#include "textflag.h" +// +build aix -TEXT ·getprocaddress(SB),NOSPLIT,$0 - B syscall·getprocaddress(SB) +package terminal -TEXT ·loadlibrary(SB),NOSPLIT,$0 - B syscall·loadlibrary(SB) +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go new file mode 100644 index 0000000..cb23a59 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_bsd.go @@ -0,0 +1,12 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin dragonfly freebsd netbsd openbsd + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA +const ioctlWriteTermios = unix.TIOCSETA diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go new file mode 100644 index 0000000..5fadfe8 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_linux.go @@ -0,0 +1,10 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package terminal + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS +const ioctlWriteTermios = unix.TCSETS diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go new file mode 100644 index 0000000..9317ac7 --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_plan9.go @@ -0,0 +1,58 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "fmt" + "runtime" +) + +type State struct{} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + return false +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: MakeRaw not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + return nil, fmt.Errorf("terminal: GetState not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return fmt.Errorf("terminal: Restore not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + return 0, 0, fmt.Errorf("terminal: GetSize not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + return nil, fmt.Errorf("terminal: ReadPassword not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go new file mode 100644 index 0000000..3d5f06a --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_solaris.go @@ -0,0 +1,124 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build solaris + +package terminal // import "golang.org/x/crypto/ssh/terminal" + +import ( + "golang.org/x/sys/unix" + "io" + "syscall" +) + +// State contains the state of a terminal. +type State struct { + termios unix.Termios +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + // see also: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libast/common/uwin/getpass.c + val, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + oldState := *val + + newState := oldState + newState.Lflag &^= syscall.ECHO + newState.Lflag |= syscall.ICANON | syscall.ISIG + newState.Iflag |= syscall.ICRNL + err = unix.IoctlSetTermios(fd, unix.TCSETS, &newState) + if err != nil { + return nil, err + } + + defer unix.IoctlSetTermios(fd, unix.TCSETS, &oldState) + + var buf [16]byte + var ret []byte + for { + n, err := syscall.Read(fd, buf[:]) + if err != nil { + return nil, err + } + if n == 0 { + if len(ret) == 0 { + return nil, io.EOF + } + break + } + if buf[n-1] == '\n' { + n-- + } + ret = append(ret, buf[:n]...) + if n < len(buf) { + break + } + } + + return ret, nil +} + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +// see http://cr.illumos.org/~webrev/andy_js/1060/ +func MakeRaw(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON + termios.Oflag &^= unix.OPOST + termios.Lflag &^= unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN + termios.Cflag &^= unix.CSIZE | unix.PARENB + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := unix.IoctlSetTermios(fd, unix.TCSETS, termios); err != nil { + return nil, err + } + + return &oldState, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, oldState *State) error { + return unix.IoctlSetTermios(fd, unix.TCSETS, &oldState.termios) +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + termios, err := unix.IoctlGetTermios(fd, unix.TCGETS) + if err != nil { + return nil, err + } + + return &State{termios: *termios}, nil +} + +// GetSize returns the dimensions of the given terminal. +func GetSize(fd int) (width, height int, err error) { + ws, err := unix.IoctlGetWinsize(fd, unix.TIOCGWINSZ) + if err != nil { + return 0, 0, err + } + return int(ws.Col), int(ws.Row), nil +} diff --git a/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go new file mode 100644 index 0000000..5cfdf8f --- /dev/null +++ b/vendor/golang.org/x/crypto/ssh/terminal/util_windows.go @@ -0,0 +1,105 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows + +// Package terminal provides support functions for dealing with terminals, as +// commonly found on UNIX systems. +// +// Putting a terminal into raw mode is the most common requirement: +// +// oldState, err := terminal.MakeRaw(0) +// if err != nil { +// panic(err) +// } +// defer terminal.Restore(0, oldState) +package terminal + +import ( + "os" + + "golang.org/x/sys/windows" +) + +type State struct { + mode uint32 +} + +// IsTerminal returns whether the given file descriptor is a terminal. +func IsTerminal(fd int) bool { + var st uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &st) + return err == nil +} + +// MakeRaw put the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + raw := st &^ (windows.ENABLE_ECHO_INPUT | windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), raw); err != nil { + return nil, err + } + return &State{st}, nil +} + +// GetState returns the current state of a terminal which may be useful to +// restore the terminal after a signal. +func GetState(fd int) (*State, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + return &State{st}, nil +} + +// Restore restores the terminal connected to the given file descriptor to a +// previous state. +func Restore(fd int, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +// GetSize returns the visible dimensions of the given terminal. +// +// These dimensions don't include any scrollback buffer height. +func GetSize(fd int) (width, height int, err error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return 0, 0, err + } + return int(info.Window.Right - info.Window.Left + 1), int(info.Window.Bottom - info.Window.Top + 1), nil +} + +// ReadPassword reads a line of input from a terminal without local echo. This +// is commonly used for inputting passwords and other sensitive data. The slice +// returned does not include the \n. +func ReadPassword(fd int) ([]byte, error) { + var st uint32 + if err := windows.GetConsoleMode(windows.Handle(fd), &st); err != nil { + return nil, err + } + old := st + + st &^= (windows.ENABLE_ECHO_INPUT) + st |= (windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT | windows.ENABLE_PROCESSED_OUTPUT) + if err := windows.SetConsoleMode(windows.Handle(fd), st); err != nil { + return nil, err + } + + defer windows.SetConsoleMode(windows.Handle(fd), old) + + var h windows.Handle + p, _ := windows.GetCurrentProcess() + if err := windows.DuplicateHandle(p, windows.Handle(fd), p, &h, 0, false, windows.DUPLICATE_SAME_ACCESS); err != nil { + return nil, err + } + + f := os.NewFile(uintptr(h), "stdin") + defer f.Close() + return readPasswordLine(f) +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..a3c021d --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,56 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// As of Go 1.7 this package is available in the standard library under the +// name context. https://golang.org/pkg/context. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go new file mode 100644 index 0000000..37dc0cf --- /dev/null +++ b/vendor/golang.org/x/net/context/ctxhttp/ctxhttp.go @@ -0,0 +1,71 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package ctxhttp provides helper functions for performing context-aware HTTP requests. +package ctxhttp // import "golang.org/x/net/context/ctxhttp" + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// Do sends an HTTP request with the provided http.Client and returns +// an HTTP response. +// +// If the client is nil, http.DefaultClient is used. +// +// The provided ctx must be non-nil. If it is canceled or times out, +// ctx.Err() will be returned. +func Do(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error) { + if client == nil { + client = http.DefaultClient + } + resp, err := client.Do(req.WithContext(ctx)) + // If we got an error, and the context has been canceled, + // the context's error is probably more useful. + if err != nil { + select { + case <-ctx.Done(): + err = ctx.Err() + default: + } + } + return resp, err +} + +// Get issues a GET request via the Do function. +func Get(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("GET", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Head issues a HEAD request via the Do function. +func Head(ctx context.Context, client *http.Client, url string) (*http.Response, error) { + req, err := http.NewRequest("HEAD", url, nil) + if err != nil { + return nil, err + } + return Do(ctx, client, req) +} + +// Post issues a POST request via the Do function. +func Post(ctx context.Context, client *http.Client, url string, bodyType string, body io.Reader) (*http.Response, error) { + req, err := http.NewRequest("POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", bodyType) + return Do(ctx, client, req) +} + +// PostForm issues a POST request via the Do function. +func PostForm(ctx context.Context, client *http.Client, url string, data url.Values) (*http.Response, error) { + return Post(ctx, client, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..d20f52b --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000..d88bd1d --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..0f35592 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000..b105f80 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 34d4293..5e01ce9 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -322,7 +322,7 @@ type ServeConnOpts struct { } func (o *ServeConnOpts) context() context.Context { - if o != nil && o.Context != nil { + if o.Context != nil { return o.Context } return context.Background() diff --git a/vendor/golang.org/x/net/http2/writesched_random.go b/vendor/golang.org/x/net/http2/writesched_random.go index 36d7919..9a7b9e5 100644 --- a/vendor/golang.org/x/net/http2/writesched_random.go +++ b/vendor/golang.org/x/net/http2/writesched_random.go @@ -19,7 +19,8 @@ type randomWriteScheduler struct { zero writeQueue // sq contains the stream-specific queues, keyed by stream ID. - // When a stream is idle or closed, it's deleted from the map. + // When a stream is idle, closed, or emptied, it's deleted + // from the map. sq map[uint32]*writeQueue // pool of empty queues for reuse. @@ -63,8 +64,12 @@ func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) { return ws.zero.shift(), true } // Iterate over all non-idle streams until finding one that can be consumed. - for _, q := range ws.sq { + for streamID, q := range ws.sq { if wr, ok := q.consume(math.MaxInt32); ok { + if q.empty() { + delete(ws.sq, streamID) + ws.queuePool.put(q) + } return wr, true } } diff --git a/vendor/golang.org/x/oauth2/.travis.yml b/vendor/golang.org/x/oauth2/.travis.yml new file mode 100644 index 0000000..fa139db --- /dev/null +++ b/vendor/golang.org/x/oauth2/.travis.yml @@ -0,0 +1,13 @@ +language: go + +go: + - tip + +install: + - export GOPATH="$HOME/gopath" + - mkdir -p "$GOPATH/src/golang.org/x" + - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2" + - go get -v -t -d golang.org/x/oauth2/... + +script: + - go test -v golang.org/x/oauth2/... diff --git a/vendor/golang.org/x/oauth2/AUTHORS b/vendor/golang.org/x/oauth2/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/oauth2/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTING.md b/vendor/golang.org/x/oauth2/CONTRIBUTING.md new file mode 100644 index 0000000..dfbed62 --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTING.md @@ -0,0 +1,26 @@ +# Contributing to Go + +Go is an open source project. + +It is the work of hundreds of contributors. We appreciate your help! + +## Filing issues + +When [filing an issue](https://github.com/golang/oauth2/issues), make sure to answer these five questions: + +1. What version of Go are you using (`go version`)? +2. What operating system and processor architecture are you using? +3. What did you do? +4. What did you expect to see? +5. What did you see instead? + +General questions should go to the [golang-nuts mailing list](https://groups.google.com/group/golang-nuts) instead of the issue tracker. +The gophers there will answer or ask you to file an issue if you've tripped over a bug. + +## Contributing code + +Please read the [Contribution Guidelines](https://golang.org/doc/contribute.html) +before sending patches. + +Unless otherwise noted, the Go source files are distributed under +the BSD-style license found in the LICENSE file. diff --git a/vendor/golang.org/x/oauth2/CONTRIBUTORS b/vendor/golang.org/x/oauth2/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/oauth2/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/oauth2/LICENSE b/vendor/golang.org/x/oauth2/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/oauth2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md new file mode 100644 index 0000000..8cfd606 --- /dev/null +++ b/vendor/golang.org/x/oauth2/README.md @@ -0,0 +1,36 @@ +# OAuth2 for Go + +[![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) +[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) + +oauth2 package contains a client implementation for OAuth 2.0 spec. + +## Installation + +~~~~ +go get golang.org/x/oauth2 +~~~~ + +Or you can manually git clone the repository to +`$(go env GOPATH)/src/golang.org/x/oauth2`. + +See godoc for further documentation and examples. + +* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) +* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) + +## Policy for new packages + +We no longer accept new provider-specific packages in this repo if all +they do is add a single endpoint variable. If you just want to add a +single endpoint, add it to the +[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +package. + +## Report Issues / Send Patches + +This repository uses Gerrit for code changes. To learn how to submit changes to +this repository, see https://golang.org/doc/contribute.html. + +The main issue tracker for the oauth2 repository is located at +https://github.com/golang/oauth2/issues. diff --git a/vendor/golang.org/x/oauth2/go.mod b/vendor/golang.org/x/oauth2/go.mod new file mode 100644 index 0000000..b345781 --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.mod @@ -0,0 +1,10 @@ +module golang.org/x/oauth2 + +go 1.11 + +require ( + cloud.google.com/go v0.34.0 + golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 // indirect + google.golang.org/appengine v1.4.0 +) diff --git a/vendor/golang.org/x/oauth2/go.sum b/vendor/golang.org/x/oauth2/go.sum new file mode 100644 index 0000000..6f0079b --- /dev/null +++ b/vendor/golang.org/x/oauth2/go.sum @@ -0,0 +1,12 @@ +cloud.google.com/go v0.34.0 h1:eOI3/cP2VTU6uZLDYAoic+eyzzB9YyGmJ7eIjl8rOPg= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e h1:bRhVy7zSSasaqNksaRZiA5EEI+Ei4I1nO5Jh72wfHlg= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go new file mode 100644 index 0000000..7434871 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -0,0 +1,13 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import "google.golang.org/appengine/urlfetch" + +func init() { + appengineClientHook = urlfetch.Client +} diff --git a/vendor/golang.org/x/oauth2/internal/doc.go b/vendor/golang.org/x/oauth2/internal/doc.go new file mode 100644 index 0000000..03265e8 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/doc.go @@ -0,0 +1,6 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package internal contains support packages for oauth2 package. +package internal diff --git a/vendor/golang.org/x/oauth2/internal/oauth2.go b/vendor/golang.org/x/oauth2/internal/oauth2.go new file mode 100644 index 0000000..c0ab196 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/oauth2.go @@ -0,0 +1,37 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" + "fmt" +) + +// ParseKey converts the binary contents of a private key file +// to an *rsa.PrivateKey. It detects whether the private key is in a +// PEM container or not. If so, it extracts the the private key +// from PEM container before conversion. It only supports PEM +// containers with no passphrase. +func ParseKey(key []byte) (*rsa.PrivateKey, error) { + block, _ := pem.Decode(key) + if block != nil { + key = block.Bytes + } + parsedKey, err := x509.ParsePKCS8PrivateKey(key) + if err != nil { + parsedKey, err = x509.ParsePKCS1PrivateKey(key) + if err != nil { + return nil, fmt.Errorf("private key should be a PEM or plain PKCS1 or PKCS8; parse error: %v", err) + } + } + parsed, ok := parsedKey.(*rsa.PrivateKey) + if !ok { + return nil, errors.New("private key is invalid") + } + return parsed, nil +} diff --git a/vendor/golang.org/x/oauth2/internal/token.go b/vendor/golang.org/x/oauth2/internal/token.go new file mode 100644 index 0000000..355c386 --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/token.go @@ -0,0 +1,294 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "mime" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/net/context/ctxhttp" +) + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// This type is a mirror of oauth2.Token and exists to break +// an otherwise-circular dependency. Other internal packages +// should convert this Token into an oauth2.Token before use. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time + + // Raw optionally contains extra metadata from the server + // when updating a token. + Raw interface{} +} + +// tokenJSON is the struct representing the HTTP response from OAuth2 +// providers returning a token in JSON form. +type tokenJSON struct { + AccessToken string `json:"access_token"` + TokenType string `json:"token_type"` + RefreshToken string `json:"refresh_token"` + ExpiresIn expirationTime `json:"expires_in"` // at least PayPal returns string, while most return number +} + +func (e *tokenJSON) expiry() (t time.Time) { + if v := e.ExpiresIn; v != 0 { + return time.Now().Add(time.Duration(v) * time.Second) + } + return +} + +type expirationTime int32 + +func (e *expirationTime) UnmarshalJSON(b []byte) error { + if len(b) == 0 || string(b) == "null" { + return nil + } + var n json.Number + err := json.Unmarshal(b, &n) + if err != nil { + return err + } + i, err := n.Int64() + if err != nil { + return err + } + if i > math.MaxInt32 { + i = math.MaxInt32 + } + *e = expirationTime(i) + return nil +} + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// AuthStyle is a copy of the golang.org/x/oauth2 package's AuthStyle type. +type AuthStyle int + +const ( + AuthStyleUnknown AuthStyle = 0 + AuthStyleInParams AuthStyle = 1 + AuthStyleInHeader AuthStyle = 2 +) + +// authStyleCache is the set of tokenURLs we've successfully used via +// RetrieveToken and which style auth we ended up using. +// It's called a cache, but it doesn't (yet?) shrink. It's expected that +// the set of OAuth2 servers a program contacts over time is fixed and +// small. +var authStyleCache struct { + sync.Mutex + m map[string]AuthStyle // keyed by tokenURL +} + +// ResetAuthCache resets the global authentication style cache used +// for AuthStyleUnknown token requests. +func ResetAuthCache() { + authStyleCache.Lock() + defer authStyleCache.Unlock() + authStyleCache.m = nil +} + +// lookupAuthStyle reports which auth style we last used with tokenURL +// when calling RetrieveToken and whether we have ever done so. +func lookupAuthStyle(tokenURL string) (style AuthStyle, ok bool) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + style, ok = authStyleCache.m[tokenURL] + return +} + +// setAuthStyle adds an entry to authStyleCache, documented above. +func setAuthStyle(tokenURL string, v AuthStyle) { + authStyleCache.Lock() + defer authStyleCache.Unlock() + if authStyleCache.m == nil { + authStyleCache.m = make(map[string]AuthStyle) + } + authStyleCache.m[tokenURL] = v +} + +// newTokenRequest returns a new *http.Request to retrieve a new token +// from tokenURL using the provided clientID, clientSecret, and POST +// body parameters. +// +// inParams is whether the clientID & clientSecret should be encoded +// as the POST body. An 'inParams' value of true means to send it in +// the POST body (along with any values in v); false means to send it +// in the Authorization header. +func newTokenRequest(tokenURL, clientID, clientSecret string, v url.Values, authStyle AuthStyle) (*http.Request, error) { + if authStyle == AuthStyleInParams { + v = cloneURLValues(v) + if clientID != "" { + v.Set("client_id", clientID) + } + if clientSecret != "" { + v.Set("client_secret", clientSecret) + } + } + req, err := http.NewRequest("POST", tokenURL, strings.NewReader(v.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded") + if authStyle == AuthStyleInHeader { + req.SetBasicAuth(url.QueryEscape(clientID), url.QueryEscape(clientSecret)) + } + return req, nil +} + +func cloneURLValues(v url.Values) url.Values { + v2 := make(url.Values, len(v)) + for k, vv := range v { + v2[k] = append([]string(nil), vv...) + } + return v2 +} + +func RetrieveToken(ctx context.Context, clientID, clientSecret, tokenURL string, v url.Values, authStyle AuthStyle) (*Token, error) { + needsAuthStyleProbe := authStyle == 0 + if needsAuthStyleProbe { + if style, ok := lookupAuthStyle(tokenURL); ok { + authStyle = style + needsAuthStyleProbe = false + } else { + authStyle = AuthStyleInHeader // the first way we'll try + } + } + req, err := newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + if err != nil { + return nil, err + } + token, err := doTokenRoundTrip(ctx, req) + if err != nil && needsAuthStyleProbe { + // If we get an error, assume the server wants the + // clientID & clientSecret in a different form. + // See https://code.google.com/p/goauth2/issues/detail?id=31 for background. + // In summary: + // - Reddit only accepts client secret in the Authorization header + // - Dropbox accepts either it in URL param or Auth header, but not both. + // - Google only accepts URL param (not spec compliant?), not Auth header + // - Stripe only accepts client secret in Auth header with Bearer method, not Basic + // + // We used to maintain a big table in this code of all the sites and which way + // they went, but maintaining it didn't scale & got annoying. + // So just try both ways. + authStyle = AuthStyleInParams // the second way we'll try + req, _ = newTokenRequest(tokenURL, clientID, clientSecret, v, authStyle) + token, err = doTokenRoundTrip(ctx, req) + } + if needsAuthStyleProbe && err == nil { + setAuthStyle(tokenURL, authStyle) + } + // Don't overwrite `RefreshToken` with an empty value + // if this was a token refreshing request. + if token != nil && token.RefreshToken == "" { + token.RefreshToken = v.Get("refresh_token") + } + return token, err +} + +func doTokenRoundTrip(ctx context.Context, req *http.Request) (*Token, error) { + r, err := ctxhttp.Do(ctx, ContextClient(ctx), req) + if err != nil { + return nil, err + } + body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20)) + r.Body.Close() + if err != nil { + return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err) + } + if code := r.StatusCode; code < 200 || code > 299 { + return nil, &RetrieveError{ + Response: r, + Body: body, + } + } + + var token *Token + content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type")) + switch content { + case "application/x-www-form-urlencoded", "text/plain": + vals, err := url.ParseQuery(string(body)) + if err != nil { + return nil, err + } + token = &Token{ + AccessToken: vals.Get("access_token"), + TokenType: vals.Get("token_type"), + RefreshToken: vals.Get("refresh_token"), + Raw: vals, + } + e := vals.Get("expires_in") + expires, _ := strconv.Atoi(e) + if expires != 0 { + token.Expiry = time.Now().Add(time.Duration(expires) * time.Second) + } + default: + var tj tokenJSON + if err = json.Unmarshal(body, &tj); err != nil { + return nil, err + } + token = &Token{ + AccessToken: tj.AccessToken, + TokenType: tj.TokenType, + RefreshToken: tj.RefreshToken, + Expiry: tj.expiry(), + Raw: make(map[string]interface{}), + } + json.Unmarshal(body, &token.Raw) // no error checks for optional fields + } + if token.AccessToken == "" { + return nil, errors.New("oauth2: server response missing access_token") + } + return token, nil +} + +type RetrieveError struct { + Response *http.Response + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/internal/transport.go b/vendor/golang.org/x/oauth2/internal/transport.go new file mode 100644 index 0000000..572074a --- /dev/null +++ b/vendor/golang.org/x/oauth2/internal/transport.go @@ -0,0 +1,33 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +import ( + "context" + "net/http" +) + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient ContextKey + +// ContextKey is just an empty struct. It exists so HTTPClient can be +// an immutable public variable with a unique type. It's immutable +// because nobody else can create a ContextKey, being unexported. +type ContextKey struct{} + +var appengineClientHook func(context.Context) *http.Client + +func ContextClient(ctx context.Context) *http.Client { + if ctx != nil { + if hc, ok := ctx.Value(HTTPClient).(*http.Client); ok { + return hc + } + } + if appengineClientHook != nil { + return appengineClientHook(ctx) + } + return http.DefaultClient +} diff --git a/vendor/golang.org/x/oauth2/oauth2.go b/vendor/golang.org/x/oauth2/oauth2.go new file mode 100644 index 0000000..291df5c --- /dev/null +++ b/vendor/golang.org/x/oauth2/oauth2.go @@ -0,0 +1,381 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package oauth2 provides support for making +// OAuth2 authorized and authenticated HTTP requests, +// as specified in RFC 6749. +// It can additionally grant authorization with Bearer JWT. +package oauth2 // import "golang.org/x/oauth2" + +import ( + "bytes" + "context" + "errors" + "net/http" + "net/url" + "strings" + "sync" + + "golang.org/x/oauth2/internal" +) + +// NoContext is the default context you should supply if not using +// your own context.Context (see https://golang.org/x/net/context). +// +// Deprecated: Use context.Background() or context.TODO() instead. +var NoContext = context.TODO() + +// RegisterBrokenAuthHeaderProvider previously did something. It is now a no-op. +// +// Deprecated: this function no longer does anything. Caller code that +// wants to avoid potential extra HTTP requests made during +// auto-probing of the provider's auth style should set +// Endpoint.AuthStyle. +func RegisterBrokenAuthHeaderProvider(tokenURL string) {} + +// Config describes a typical 3-legged OAuth2 flow, with both the +// client application information and the server's endpoint URLs. +// For the client credentials 2-legged OAuth2 flow, see the clientcredentials +// package (https://golang.org/x/oauth2/clientcredentials). +type Config struct { + // ClientID is the application's ID. + ClientID string + + // ClientSecret is the application's secret. + ClientSecret string + + // Endpoint contains the resource server's token endpoint + // URLs. These are constants specific to each server and are + // often available via site-specific packages, such as + // google.Endpoint or github.Endpoint. + Endpoint Endpoint + + // RedirectURL is the URL to redirect users going through + // the OAuth flow, after the resource owner's URLs. + RedirectURL string + + // Scope specifies optional requested permissions. + Scopes []string +} + +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. + // Token must be safe for concurrent use by multiple goroutines. + // The returned Token must not be modified. + Token() (*Token, error) +} + +// Endpoint represents an OAuth 2.0 provider's authorization and token +// endpoint URLs. +type Endpoint struct { + AuthURL string + TokenURL string + + // AuthStyle optionally specifies how the endpoint wants the + // client ID & client secret sent. The zero value means to + // auto-detect. + AuthStyle AuthStyle +} + +// AuthStyle represents how requests for tokens are authenticated +// to the server. +type AuthStyle int + +const ( + // AuthStyleAutoDetect means to auto-detect which authentication + // style the provider wants by trying both ways and caching + // the successful way for the future. + AuthStyleAutoDetect AuthStyle = 0 + + // AuthStyleInParams sends the "client_id" and "client_secret" + // in the POST body as application/x-www-form-urlencoded parameters. + AuthStyleInParams AuthStyle = 1 + + // AuthStyleInHeader sends the client_id and client_password + // using HTTP Basic Authorization. This is an optional style + // described in the OAuth2 RFC 6749 section 2.3.1. + AuthStyleInHeader AuthStyle = 2 +) + +var ( + // AccessTypeOnline and AccessTypeOffline are options passed + // to the Options.AuthCodeURL method. They modify the + // "access_type" field that gets sent in the URL returned by + // AuthCodeURL. + // + // Online is the default if neither is specified. If your + // application needs to refresh access tokens when the user + // is not present at the browser, then use offline. This will + // result in your application obtaining a refresh token the + // first time your application exchanges an authorization + // code for a user. + AccessTypeOnline AuthCodeOption = SetAuthURLParam("access_type", "online") + AccessTypeOffline AuthCodeOption = SetAuthURLParam("access_type", "offline") + + // ApprovalForce forces the users to view the consent dialog + // and confirm the permissions request at the URL returned + // from AuthCodeURL, even if they've already done so. + ApprovalForce AuthCodeOption = SetAuthURLParam("prompt", "consent") +) + +// An AuthCodeOption is passed to Config.AuthCodeURL. +type AuthCodeOption interface { + setValue(url.Values) +} + +type setParam struct{ k, v string } + +func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) } + +// SetAuthURLParam builds an AuthCodeOption which passes key/value parameters +// to a provider's authorization endpoint. +func SetAuthURLParam(key, value string) AuthCodeOption { + return setParam{key, value} +} + +// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page +// that asks for permissions for the required scopes explicitly. +// +// State is a token to protect the user from CSRF attacks. You must +// always provide a non-empty string and validate that it matches the +// the state query parameter on your redirect callback. +// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info. +// +// Opts may include AccessTypeOnline or AccessTypeOffline, as well +// as ApprovalForce. +// It can also be used to pass the PKCE challenge. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string { + var buf bytes.Buffer + buf.WriteString(c.Endpoint.AuthURL) + v := url.Values{ + "response_type": {"code"}, + "client_id": {c.ClientID}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + if state != "" { + // TODO(light): Docs say never to omit state; don't allow empty. + v.Set("state", state) + } + for _, opt := range opts { + opt.setValue(v) + } + if strings.Contains(c.Endpoint.AuthURL, "?") { + buf.WriteByte('&') + } else { + buf.WriteByte('?') + } + buf.WriteString(v.Encode()) + return buf.String() +} + +// PasswordCredentialsToken converts a resource owner username and password +// pair into a token. +// +// Per the RFC, this grant type should only be used "when there is a high +// degree of trust between the resource owner and the client (e.g., the client +// is part of the device operating system or a highly privileged application), +// and when other authorization grant types are not available." +// See https://tools.ietf.org/html/rfc6749#section-4.3 for more info. +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +func (c *Config) PasswordCredentialsToken(ctx context.Context, username, password string) (*Token, error) { + v := url.Values{ + "grant_type": {"password"}, + "username": {username}, + "password": {password}, + } + if len(c.Scopes) > 0 { + v.Set("scope", strings.Join(c.Scopes, " ")) + } + return retrieveToken(ctx, c, v) +} + +// Exchange converts an authorization code into a token. +// +// It is used after a resource provider redirects the user back +// to the Redirect URI (the URL obtained from AuthCodeURL). +// +// The provided context optionally controls which HTTP client is used. See the HTTPClient variable. +// +// The code will be in the *http.Request.FormValue("code"). Before +// calling Exchange, be sure to validate FormValue("state"). +// +// Opts may include the PKCE verifier code if previously used in AuthCodeURL. +// See https://www.oauth.com/oauth2-servers/pkce/ for more info. +func (c *Config) Exchange(ctx context.Context, code string, opts ...AuthCodeOption) (*Token, error) { + v := url.Values{ + "grant_type": {"authorization_code"}, + "code": {code}, + } + if c.RedirectURL != "" { + v.Set("redirect_uri", c.RedirectURL) + } + for _, opt := range opts { + opt.setValue(v) + } + return retrieveToken(ctx, c, v) +} + +// Client returns an HTTP client using the provided token. +// The token will auto-refresh as necessary. The underlying +// HTTP transport will be obtained using the provided context. +// The returned client and its Transport should not be modified. +func (c *Config) Client(ctx context.Context, t *Token) *http.Client { + return NewClient(ctx, c.TokenSource(ctx, t)) +} + +// TokenSource returns a TokenSource that returns t until t expires, +// automatically refreshing it as necessary using the provided context. +// +// Most users will use Config.Client instead. +func (c *Config) TokenSource(ctx context.Context, t *Token) TokenSource { + tkr := &tokenRefresher{ + ctx: ctx, + conf: c, + } + if t != nil { + tkr.refreshToken = t.RefreshToken + } + return &reuseTokenSource{ + t: t, + new: tkr, + } +} + +// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token" +// HTTP requests to renew a token using a RefreshToken. +type tokenRefresher struct { + ctx context.Context // used to get HTTP requests + conf *Config + refreshToken string +} + +// WARNING: Token is not safe for concurrent access, as it +// updates the tokenRefresher's refreshToken field. +// Within this package, it is used by reuseTokenSource which +// synchronizes calls to this method with its own mutex. +func (tf *tokenRefresher) Token() (*Token, error) { + if tf.refreshToken == "" { + return nil, errors.New("oauth2: token expired and refresh token is not set") + } + + tk, err := retrieveToken(tf.ctx, tf.conf, url.Values{ + "grant_type": {"refresh_token"}, + "refresh_token": {tf.refreshToken}, + }) + + if err != nil { + return nil, err + } + if tf.refreshToken != tk.RefreshToken { + tf.refreshToken = tk.RefreshToken + } + return tk, err +} + +// reuseTokenSource is a TokenSource that holds a single token in memory +// and validates its expiry before each call to retrieve it with +// Token. If it's expired, it will be auto-refreshed using the +// new TokenSource. +type reuseTokenSource struct { + new TokenSource // called when t is expired. + + mu sync.Mutex // guards t + t *Token +} + +// Token returns the current token if it's still valid, else will +// refresh the current token (using r.Context for HTTP client +// information) and return the new one. +func (s *reuseTokenSource) Token() (*Token, error) { + s.mu.Lock() + defer s.mu.Unlock() + if s.t.Valid() { + return s.t, nil + } + t, err := s.new.Token() + if err != nil { + return nil, err + } + s.t = t + return t, nil +} + +// StaticTokenSource returns a TokenSource that always returns the same token. +// Because the provided token t is never refreshed, StaticTokenSource is only +// useful for tokens that never expire. +func StaticTokenSource(t *Token) TokenSource { + return staticTokenSource{t} +} + +// staticTokenSource is a TokenSource that always returns the same Token. +type staticTokenSource struct { + t *Token +} + +func (s staticTokenSource) Token() (*Token, error) { + return s.t, nil +} + +// HTTPClient is the context key to use with golang.org/x/net/context's +// WithValue function to associate an *http.Client value with a context. +var HTTPClient internal.ContextKey + +// NewClient creates an *http.Client from a Context and TokenSource. +// The returned client is not valid beyond the lifetime of the context. +// +// Note that if a custom *http.Client is provided via the Context it +// is used only for token acquisition and is not used to configure the +// *http.Client returned from NewClient. +// +// As a special case, if src is nil, a non-OAuth2 client is returned +// using the provided context. This exists to support related OAuth2 +// packages. +func NewClient(ctx context.Context, src TokenSource) *http.Client { + if src == nil { + return internal.ContextClient(ctx) + } + return &http.Client{ + Transport: &Transport{ + Base: internal.ContextClient(ctx).Transport, + Source: ReuseTokenSource(nil, src), + }, + } +} + +// ReuseTokenSource returns a TokenSource which repeatedly returns the +// same token as long as it's valid, starting with t. +// When its cached token is invalid, a new token is obtained from src. +// +// ReuseTokenSource is typically used to reuse tokens from a cache +// (such as a file on disk) between runs of a program, rather than +// obtaining new tokens unnecessarily. +// +// The initial token t may be nil, in which case the TokenSource is +// wrapped in a caching version if it isn't one already. This also +// means it's always safe to wrap ReuseTokenSource around any other +// TokenSource without adverse effects. +func ReuseTokenSource(t *Token, src TokenSource) TokenSource { + // Don't wrap a reuseTokenSource in itself. That would work, + // but cause an unnecessary number of mutex operations. + // Just build the equivalent one. + if rt, ok := src.(*reuseTokenSource); ok { + if t == nil { + // Just use it directly. + return rt + } + src = rt.new + } + return &reuseTokenSource{ + t: t, + new: src, + } +} diff --git a/vendor/golang.org/x/oauth2/token.go b/vendor/golang.org/x/oauth2/token.go new file mode 100644 index 0000000..8227203 --- /dev/null +++ b/vendor/golang.org/x/oauth2/token.go @@ -0,0 +1,178 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "context" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "golang.org/x/oauth2/internal" +) + +// expiryDelta determines how earlier a token should be considered +// expired than its actual expiration time. It is used to avoid late +// expirations due to client-server time mismatches. +const expiryDelta = 10 * time.Second + +// Token represents the credentials used to authorize +// the requests to access protected resources on the OAuth 2.0 +// provider's backend. +// +// Most users of this package should not access fields of Token +// directly. They're exported mostly for use by related packages +// implementing derivative OAuth2 flows. +type Token struct { + // AccessToken is the token that authorizes and authenticates + // the requests. + AccessToken string `json:"access_token"` + + // TokenType is the type of token. + // The Type method returns either this or "Bearer", the default. + TokenType string `json:"token_type,omitempty"` + + // RefreshToken is a token that's used by the application + // (as opposed to the user) to refresh the access token + // if it expires. + RefreshToken string `json:"refresh_token,omitempty"` + + // Expiry is the optional expiration time of the access token. + // + // If zero, TokenSource implementations will reuse the same + // token forever and RefreshToken or equivalent + // mechanisms for that TokenSource will not be used. + Expiry time.Time `json:"expiry,omitempty"` + + // raw optionally contains extra metadata from the server + // when updating a token. + raw interface{} +} + +// Type returns t.TokenType if non-empty, else "Bearer". +func (t *Token) Type() string { + if strings.EqualFold(t.TokenType, "bearer") { + return "Bearer" + } + if strings.EqualFold(t.TokenType, "mac") { + return "MAC" + } + if strings.EqualFold(t.TokenType, "basic") { + return "Basic" + } + if t.TokenType != "" { + return t.TokenType + } + return "Bearer" +} + +// SetAuthHeader sets the Authorization header to r using the access +// token in t. +// +// This method is unnecessary when using Transport or an HTTP Client +// returned by this package. +func (t *Token) SetAuthHeader(r *http.Request) { + r.Header.Set("Authorization", t.Type()+" "+t.AccessToken) +} + +// WithExtra returns a new Token that's a clone of t, but using the +// provided raw extra map. This is only intended for use by packages +// implementing derivative OAuth2 flows. +func (t *Token) WithExtra(extra interface{}) *Token { + t2 := new(Token) + *t2 = *t + t2.raw = extra + return t2 +} + +// Extra returns an extra field. +// Extra fields are key-value pairs returned by the server as a +// part of the token retrieval response. +func (t *Token) Extra(key string) interface{} { + if raw, ok := t.raw.(map[string]interface{}); ok { + return raw[key] + } + + vals, ok := t.raw.(url.Values) + if !ok { + return nil + } + + v := vals.Get(key) + switch s := strings.TrimSpace(v); strings.Count(s, ".") { + case 0: // Contains no "."; try to parse as int + if i, err := strconv.ParseInt(s, 10, 64); err == nil { + return i + } + case 1: // Contains a single "."; try to parse as float + if f, err := strconv.ParseFloat(s, 64); err == nil { + return f + } + } + + return v +} + +// timeNow is time.Now but pulled out as a variable for tests. +var timeNow = time.Now + +// expired reports whether the token is expired. +// t must be non-nil. +func (t *Token) expired() bool { + if t.Expiry.IsZero() { + return false + } + return t.Expiry.Round(0).Add(-expiryDelta).Before(timeNow()) +} + +// Valid reports whether t is non-nil, has an AccessToken, and is not expired. +func (t *Token) Valid() bool { + return t != nil && t.AccessToken != "" && !t.expired() +} + +// tokenFromInternal maps an *internal.Token struct into +// a *Token struct. +func tokenFromInternal(t *internal.Token) *Token { + if t == nil { + return nil + } + return &Token{ + AccessToken: t.AccessToken, + TokenType: t.TokenType, + RefreshToken: t.RefreshToken, + Expiry: t.Expiry, + raw: t.Raw, + } +} + +// retrieveToken takes a *Config and uses that to retrieve an *internal.Token. +// This token is then mapped from *internal.Token into an *oauth2.Token which is returned along +// with an error.. +func retrieveToken(ctx context.Context, c *Config, v url.Values) (*Token, error) { + tk, err := internal.RetrieveToken(ctx, c.ClientID, c.ClientSecret, c.Endpoint.TokenURL, v, internal.AuthStyle(c.Endpoint.AuthStyle)) + if err != nil { + if rErr, ok := err.(*internal.RetrieveError); ok { + return nil, (*RetrieveError)(rErr) + } + return nil, err + } + return tokenFromInternal(tk), nil +} + +// RetrieveError is the error returned when the token endpoint returns a +// non-2XX HTTP status code. +type RetrieveError struct { + Response *http.Response + // Body is the body that was consumed by reading Response.Body. + // It may be truncated. + Body []byte +} + +func (r *RetrieveError) Error() string { + return fmt.Sprintf("oauth2: cannot fetch token: %v\nResponse: %s", r.Response.Status, r.Body) +} diff --git a/vendor/golang.org/x/oauth2/transport.go b/vendor/golang.org/x/oauth2/transport.go new file mode 100644 index 0000000..9065791 --- /dev/null +++ b/vendor/golang.org/x/oauth2/transport.go @@ -0,0 +1,89 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package oauth2 + +import ( + "errors" + "log" + "net/http" + "sync" +) + +// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests, +// wrapping a base RoundTripper and adding an Authorization header +// with a token from the supplied Sources. +// +// Transport is a low-level mechanism. Most code will use the +// higher-level Config.Client method instead. +type Transport struct { + // Source supplies the token to add to outgoing requests' + // Authorization headers. + Source TokenSource + + // Base is the base RoundTripper used to make HTTP requests. + // If nil, http.DefaultTransport is used. + Base http.RoundTripper +} + +// RoundTrip authorizes and authenticates the request with an +// access token from Transport's Source. +func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { + reqBodyClosed := false + if req.Body != nil { + defer func() { + if !reqBodyClosed { + req.Body.Close() + } + }() + } + + if t.Source == nil { + return nil, errors.New("oauth2: Transport's Source is nil") + } + token, err := t.Source.Token() + if err != nil { + return nil, err + } + + req2 := cloneRequest(req) // per RoundTripper contract + token.SetAuthHeader(req2) + + // req.Body is assumed to be closed by the base RoundTripper. + reqBodyClosed = true + return t.base().RoundTrip(req2) +} + +var cancelOnce sync.Once + +// CancelRequest does nothing. It used to be a legacy cancellation mechanism +// but now only it only logs on first use to warn that it's deprecated. +// +// Deprecated: use contexts for cancellation instead. +func (t *Transport) CancelRequest(req *http.Request) { + cancelOnce.Do(func() { + log.Printf("deprecated: golang.org/x/oauth2: Transport.CancelRequest no longer does anything; use contexts") + }) +} + +func (t *Transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + return r2 +} diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 14e4d5c..6e5c81a 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -7,6 +7,7 @@ package unix import ( + "math/bits" "unsafe" ) @@ -79,50 +80,7 @@ func (s *CPUSet) IsSet(cpu int) bool { func (s *CPUSet) Count() int { c := 0 for _, b := range s { - c += onesCount64(uint64(b)) + c += bits.OnesCount64(uint64(b)) } return c } - -// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. -// Once this package can require Go 1.9, we can delete this -// and update the caller to use bits.OnesCount64. -func onesCount64(x uint64) int { - const m0 = 0x5555555555555555 // 01010101 ... - const m1 = 0x3333333333333333 // 00110011 ... - const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - - // Unused in this function, but definitions preserved for - // documentation purposes: - // - // const m3 = 0x00ff00ff00ff00ff // etc. - // const m4 = 0x0000ffff0000ffff - // - // Implementation: Parallel summing of adjacent bits. - // See "Hacker's Delight", Chap. 5: Counting Bits. - // The following pattern shows the general approach: - // - // x = x>>1&(m0&m) + x&(m0&m) - // x = x>>2&(m1&m) + x&(m1&m) - // x = x>>4&(m2&m) + x&(m2&m) - // x = x>>8&(m3&m) + x&(m3&m) - // x = x>>16&(m4&m) + x&(m4&m) - // x = x>>32&(m5&m) + x&(m5&m) - // return int(x) - // - // Masking (& operations) can be left away when there's no - // danger that a field's sum will carry over into the next - // field: Since the result cannot be > 64, 8 bits is enough - // and we can ignore the masks for the shifts by 8 and up. - // Per "Hacker's Delight", the first line can be simplified - // more, but it saves at best one instruction, so we leave - // it alone for clarity. - const m = 1<<64 - 1 - x = x>>1&(m0&m) + x&(m0&m) - x = x>>2&(m1&m) + x&(m1&m) - x = (x>>4 + x) & (m2 & m) - x += x >> 8 - x += x >> 16 - x += x >> 32 - return int(x) & (1<<7 - 1) -} diff --git a/vendor/golang.org/x/sys/unix/ioctl.go b/vendor/golang.org/x/sys/unix/ioctl.go index f121a8d..3559e5d 100644 --- a/vendor/golang.org/x/sys/unix/ioctl.go +++ b/vendor/golang.org/x/sys/unix/ioctl.go @@ -6,7 +6,19 @@ package unix -import "runtime" +import ( + "runtime" + "unsafe" +) + +// ioctl itself should not be exposed directly, but additional get/set +// functions for specific types are permissible. + +// IoctlSetInt performs an ioctl operation which sets an integer value +// on fd, using the specified request number. +func IoctlSetInt(fd int, req uint, value int) error { + return ioctl(fd, req, uintptr(value)) +} // IoctlSetWinsize performs an ioctl on fd with a *Winsize argument. // @@ -14,7 +26,7 @@ import "runtime" func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // TODO: if we get the chance, remove the req parameter and // hardcode TIOCSWINSZ. - err := ioctlSetWinsize(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } @@ -24,7 +36,30 @@ func IoctlSetWinsize(fd int, req uint, value *Winsize) error { // The req value will usually be TCSETA or TIOCSETA. func IoctlSetTermios(fd int, req uint, value *Termios) error { // TODO: if we get the chance, remove the req parameter. - err := ioctlSetTermios(fd, req, value) + err := ioctl(fd, req, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } + +// IoctlGetInt performs an ioctl operation which gets an integer value +// from fd, using the specified request number. +// +// A few ioctl requests use the return value as an output parameter; +// for those, IoctlRetInt should be used instead of this function. +func IoctlGetInt(fd int, req uint) (int, error) { + var value int + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return value, err +} + +func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { + var value Winsize + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} + +func IoctlGetTermios(fd int, req uint) (*Termios, error) { + var value Termios + err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) + return &value, err +} diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/golang.org/x/sys/unix/mkall.sh index 5a22eca..890ec46 100644 --- a/vendor/golang.org/x/sys/unix/mkall.sh +++ b/vendor/golang.org/x/sys/unix/mkall.sh @@ -212,9 +212,11 @@ esac echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in && gofmt -w zsyscall_$GOOSARCH.go && gofmt -w zsyscall_"$GOOSARCH"_gccgo.go && gofmt -w zsyscall_"$GOOSARCH"_gc.go " ; elif [ "$GOOS" == "darwin" ]; then # pre-1.12, direct syscalls - echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; + echo "$mksyscall -tags $GOOS,$GOARCH,!go1.12 $syscall_goos syscall_darwin_${GOARCH}.1_11.go $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.1_11.go"; # 1.12 and later, syscalls via libSystem echo "$mksyscall -tags $GOOS,$GOARCH,go1.12 $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; + # 1.13 and later, syscalls via libSystem (including syscallPtr) + echo "$mksyscall -tags $GOOS,$GOARCH,go1.13 syscall_darwin.1_13.go |gofmt >zsyscall_$GOOSARCH.1_13.go"; else echo "$mksyscall -tags $GOOS,$GOARCH $syscall_goos $GOOSARCH_in |gofmt >zsyscall_$GOOSARCH.go"; fi diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 14624b9..67b8482 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -60,6 +60,7 @@ includes_Darwin=' #include #include #include +#include #include #include #include @@ -80,6 +81,7 @@ includes_Darwin=' includes_DragonFly=' #include #include +#include #include #include #include @@ -103,6 +105,7 @@ includes_FreeBSD=' #include #include #include +#include #include #include #include @@ -179,24 +182,31 @@ struct ltchars { #include #include #include +#include #include #include #include #include +#include #include +#include #include +#include +#include +#include +#include +#include +#include +#include #include +#include #include #include #include #include #include #include -#include -#include -#include -#include -#include +#include #include #include #include @@ -206,26 +216,23 @@ struct ltchars { #include #include #include +#include #include +#include #include #include +#include #include -#include #include #include -#include -#include -#include #include -#include -#include +#include #include -#include +#include +#include +#include #include -#include -#include -#include -#include + #include #include @@ -264,6 +271,11 @@ struct ltchars { #define FS_KEY_DESC_PREFIX "fscrypt:" #define FS_KEY_DESC_PREFIX_SIZE 8 #define FS_MAX_KEY_SIZE 64 + +// The code generator produces -0x1 for (~0), but an unsigned value is necessary +// for the tipc_subscr timeout __u32 field. +#undef TIPC_WAIT_FOREVER +#define TIPC_WAIT_FOREVER 0xffffffff ' includes_NetBSD=' @@ -273,6 +285,7 @@ includes_NetBSD=' #include #include #include +#include #include #include #include @@ -299,6 +312,7 @@ includes_OpenBSD=' #include #include #include +#include #include #include #include @@ -335,6 +349,7 @@ includes_OpenBSD=' includes_SunOS=' #include #include +#include #include #include #include @@ -427,6 +442,7 @@ ccflags="$@" $2 == "XCASE" || $2 == "ALTWERASE" || $2 == "NOKERNINFO" || + $2 == "NFDBITS" || $2 ~ /^PAR/ || $2 ~ /^SIG[^_]/ || $2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ || @@ -451,6 +467,7 @@ ccflags="$@" $2 ~ /^SYSCTL_VERS/ || $2 !~ "MNT_BITS" && $2 ~ /^(MS|MNT|UMOUNT)_/ || + $2 ~ /^NS_GET_/ || $2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ || $2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT)_/ || $2 ~ /^KEXEC_/ || @@ -506,6 +523,7 @@ ccflags="$@" $2 ~ /^XDP_/ || $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || + $2 ~ /^TIPC_/ || $2 !~ "WMESGLEN" && $2 ~ /^W[A-Z0-9]+$/ || $2 ~/^PPPIOC/ || diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go b/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go new file mode 100644 index 0000000..5144dee --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_dragonfly.go @@ -0,0 +1,16 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package unix + +// Round the length of a raw sockaddr up to align it properly. +func cmsgAlignOf(salen int) int { + salign := SizeofPtr + if SizeofPtr == 8 && !supportsABI(_dragonflyABIChangeVersion) { + // 64-bit Dragonfly before the September 2019 ABI changes still requires + // 32-bit aligned access to network subsystem. + salign = 4 + } + return (salen + salign - 1) & ^(salign - 1) +} diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go index 6079eb4..8bf4570 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_linux.go @@ -17,7 +17,7 @@ func UnixCredentials(ucred *Ucred) []byte { h.Level = SOL_SOCKET h.Type = SCM_CREDENTIALS h.SetLen(CmsgLen(SizeofUcred)) - *((*Ucred)(cmsgData(h))) = *ucred + *(*Ucred)(h.data(0)) = *ucred return b } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go index 062bcab..003916e 100644 --- a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix.go @@ -9,35 +9,9 @@ package unix import ( - "runtime" "unsafe" ) -// Round the length of a raw sockaddr up to align it properly. -func cmsgAlignOf(salen int) int { - salign := SizeofPtr - - switch runtime.GOOS { - case "aix": - // There is no alignment on AIX. - salign = 1 - case "darwin", "dragonfly", "solaris", "illumos": - // NOTE: It seems like 64-bit Darwin, DragonFly BSD, - // illumos, and Solaris kernels still require 32-bit - // aligned access to network subsystem. - if SizeofPtr == 8 { - salign = 4 - } - case "netbsd", "openbsd": - // NetBSD and OpenBSD armv7 require 64-bit alignment. - if runtime.GOARCH == "arm" { - salign = 8 - } - } - - return (salen + salign - 1) & ^(salign - 1) -} - // CmsgLen returns the value to store in the Len field of the Cmsghdr // structure, taking into account any necessary alignment. func CmsgLen(datalen int) int { @@ -50,8 +24,8 @@ func CmsgSpace(datalen int) int { return cmsgAlignOf(SizeofCmsghdr) + cmsgAlignOf(datalen) } -func cmsgData(h *Cmsghdr) unsafe.Pointer { - return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr))) +func (h *Cmsghdr) data(offset uintptr) unsafe.Pointer { + return unsafe.Pointer(uintptr(unsafe.Pointer(h)) + uintptr(cmsgAlignOf(SizeofCmsghdr)) + offset) } // SocketControlMessage represents a socket control message. @@ -94,10 +68,8 @@ func UnixRights(fds ...int) []byte { h.Level = SOL_SOCKET h.Type = SCM_RIGHTS h.SetLen(CmsgLen(datalen)) - data := cmsgData(h) - for _, fd := range fds { - *(*int32)(data) = int32(fd) - data = unsafe.Pointer(uintptr(data) + 4) + for i, fd := range fds { + *(*int32)(h.data(4 * uintptr(i))) = int32(fd) } return b } diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go new file mode 100644 index 0000000..7d08dae --- /dev/null +++ b/vendor/golang.org/x/sys/unix/sockcmsg_unix_other.go @@ -0,0 +1,38 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build aix darwin freebsd linux netbsd openbsd solaris + +package unix + +import ( + "runtime" +) + +// Round the length of a raw sockaddr up to align it properly. +func cmsgAlignOf(salen int) int { + salign := SizeofPtr + + // dragonfly needs to check ABI version at runtime, see cmsgAlignOf in + // sockcmsg_dragonfly.go + switch runtime.GOOS { + case "aix": + // There is no alignment on AIX. + salign = 1 + case "darwin", "illumos", "solaris": + // NOTE: It seems like 64-bit Darwin, Illumos and Solaris + // kernels still require 32-bit aligned access to network + // subsystem. + if SizeofPtr == 8 { + salign = 4 + } + case "netbsd", "openbsd": + // NetBSD and OpenBSD armv7 require 64-bit alignment. + if runtime.GOARCH == "arm" { + salign = 8 + } + } + + return (salen + salign - 1) & ^(salign - 1) +} diff --git a/vendor/golang.org/x/sys/unix/syscall_aix.go b/vendor/golang.org/x/sys/unix/syscall_aix.go index 1aa065f..9ad8a0d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix.go @@ -350,49 +350,12 @@ func (w WaitStatus) Signal() Signal { func (w WaitStatus) Continued() bool { return w&0x01000000 != 0 } -func (w WaitStatus) CoreDump() bool { return w&0x200 != 0 } +func (w WaitStatus) CoreDump() bool { return w&0x80 == 0x80 } func (w WaitStatus) TrapCause() int { return -1 } //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - // fcntl must never be called with cmd=F_DUP2FD because it doesn't work on AIX // There is no way to create a custom fcntl and to keep //sys fcntl easily, // Therefore, the programmer must call dup2 instead of fcntl in this case. diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go index bf05603..b3c8e33 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go index 13d4321..9a6e024 100644 --- a/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_aix_ppc64.go @@ -29,6 +29,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/golang.org/x/sys/unix/syscall_bsd.go index 97a8eef..d52bcc4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_bsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_bsd.go @@ -237,7 +237,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -413,8 +413,6 @@ func Kevent(kq int, changes, events []Kevent_t, timeout *Timespec) (n int, err e return kevent(kq, change, len(changes), event, len(events), timeout) } -//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL - // sysctlmib translates name to mib number and appends any additional args. func sysctlmib(name string, args ...int) ([]_C_int, error) { // Translate name to mib number. diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go new file mode 100644 index 0000000..6a15cba --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_12.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.12,!go1.13 + +package unix + +import ( + "unsafe" +) + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // To implement this using libSystem we'd need syscall_syscallPtr for + // fdopendir. However, syscallPtr was only added in Go 1.13, so we fall + // back to raw syscalls for this func on Go 1.12. + var p unsafe.Pointer + if len(buf) > 0 { + p = unsafe.Pointer(&buf[0]) + } else { + p = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(p), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + return n, errnoErr(e1) + } + return n, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go new file mode 100644 index 0000000..24960c3 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -0,0 +1,103 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,go1.13 + +package unix + +import "unsafe" + +//sys closedir(dir uintptr) (err error) +//sys readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) + +func fdopendir(fd int) (dir uintptr, err error) { + r0, _, e1 := syscall_syscallPtr(funcPC(libc_fdopendir_trampoline), uintptr(fd), 0, 0) + dir = uintptr(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_fdopendir_trampoline() + +//go:linkname libc_fdopendir libc_fdopendir +//go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + // Simulate Getdirentries using fdopendir/readdir_r/closedir. + const ptrSize = unsafe.Sizeof(uintptr(0)) + + // We store the number of entries to skip in the seek + // offset of fd. See issue #31368. + // It's not the full required semantics, but should handle the case + // of calling Getdirentries or ReadDirent repeatedly. + // It won't handle assigning the results of lseek to *basep, or handle + // the directory being edited underfoot. + skip, err := Seek(fd, 0, 1 /* SEEK_CUR */) + if err != nil { + return 0, err + } + + // We need to duplicate the incoming file descriptor + // because the caller expects to retain control of it, but + // fdopendir expects to take control of its argument. + // Just Dup'ing the file descriptor is not enough, as the + // result shares underlying state. Use Openat to make a really + // new file descriptor referring to the same directory. + fd2, err := Openat(fd, ".", O_RDONLY, 0) + if err != nil { + return 0, err + } + d, err := fdopendir(fd2) + if err != nil { + Close(fd2) + return 0, err + } + defer closedir(d) + + var cnt int64 + for { + var entry Dirent + var entryp *Dirent + e := readdir_r(d, &entry, &entryp) + if e != 0 { + return n, errnoErr(e) + } + if entryp == nil { + break + } + if skip > 0 { + skip-- + cnt++ + continue + } + reclen := int(entry.Reclen) + if reclen > len(buf) { + // Not enough room. Return for now. + // The counter will let us know where we should start up again. + // Note: this strategy for suspending in the middle and + // restarting is O(n^2) in the length of the directory. Oh well. + break + } + // Copy entry into return buffer. + s := struct { + ptr unsafe.Pointer + siz int + cap int + }{ptr: unsafe.Pointer(&entry), siz: reclen, cap: reclen} + copy(buf, *(*[]byte)(unsafe.Pointer(&s))) + buf = buf[reclen:] + n += reclen + cnt++ + } + // Set the seek offset of the input fd to record + // how many files we've already returned. + _, err = Seek(fd, cnt, 0 /* SEEK_SET */) + if err != nil { + return n, err + } + + return n, nil +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go index 216b4ac..c5018a3 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go @@ -89,7 +89,6 @@ func direntNamlen(buf []byte) (uint64, bool) { return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen)) } -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) func PtraceAttach(pid int) (err error) { return ptrace(PT_ATTACH, pid, 0, 0) } func PtraceDetach(pid int) (err error) { return ptrace(PT_DETACH, pid, 0, 0) } @@ -340,43 +339,6 @@ func Kill(pid int, signum syscall.Signal) (err error) { return kill(pid, int(sig //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -498,7 +460,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sys Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go new file mode 100644 index 0000000..6b223f9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.1_11.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,386,!go1.12 + +package unix + +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 489726f..dd756e7 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -10,6 +10,9 @@ import ( "syscall" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -43,6 +46,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -56,7 +63,6 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go new file mode 100644 index 0000000..68ebd6f --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.1_11.go @@ -0,0 +1,9 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,amd64,!go1.12 + +package unix + +//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 914b89b..7f148c4 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -10,6 +10,9 @@ import ( "syscall" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL +//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -43,6 +46,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -56,7 +63,6 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Fstat(fd int, stat *Stat_t) (err error) = SYS_FSTAT64 //sys Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) = SYS_FSTATAT64 //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 -//sys Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) = SYS_GETDIRENTRIES64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go new file mode 100644 index 0000000..c81510d --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.1_11.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,386,!go1.12 + +package unix + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index 4a284cf..58be02e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -8,6 +8,14 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } @@ -41,6 +49,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -58,7 +70,3 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go new file mode 100644 index 0000000..01d4504 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.1_11.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,arm64,!go1.12 + +package unix + +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + return 0, ENOSYS +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 52dcd88..1ee931f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -10,6 +10,14 @@ import ( "syscall" ) +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ENOTSUP +} + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) error { + return ENOTSUP +} + func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: sec, Nsec: nsec} } @@ -43,6 +51,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } @@ -60,7 +72,3 @@ const SYS___SYSCTL = SYS_SYSCTL //sys Lstat(path string, stat *Stat_t) (err error) //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) - -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - return 0, ENOSYS -} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go index 4b4ae46..f34c86c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_libSystem.go @@ -15,6 +15,7 @@ func syscall_syscall6X(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err func syscall_syscall9(fn, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err Errno) // 32-bit only func syscall_rawSyscall(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, err Errno) +func syscall_syscallPtr(fn, a1, a2, a3 uintptr) (r1, r2 uintptr, err Errno) //go:linkname syscall_syscall syscall.syscall //go:linkname syscall_syscall6 syscall.syscall6 @@ -22,6 +23,7 @@ func syscall_rawSyscall6(fn, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2 uintptr, er //go:linkname syscall_syscall9 syscall.syscall9 //go:linkname syscall_rawSyscall syscall.rawSyscall //go:linkname syscall_rawSyscall6 syscall.rawSyscall6 +//go:linkname syscall_syscallPtr syscall.syscallPtr // Find the entry point for f. See comments in runtime/proc.go for the // function of the same name. diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go index 260a400..7387eb2 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly.go @@ -12,7 +12,25 @@ package unix -import "unsafe" +import ( + "sync" + "unsafe" +) + +// See version list in https://github.com/DragonFlyBSD/DragonFlyBSD/blob/master/sys/sys/param.h +var ( + osreldateOnce sync.Once + osreldate uint32 +) + +// First __DragonFly_version after September 2019 ABI changes +// http://lists.dragonflybsd.org/pipermail/users/2019-September/358280.html +const _dragonflyABIChangeVersion = 500705 + +func supportsABI(ver uint32) bool { + osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) + return osreldate >= ver +} // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { @@ -150,43 +168,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func sysctlUname(mib []_C_int, old *byte, oldlen *uintptr) error { err := sysctl(mib, old, oldlen, nil, 0) if err != nil { @@ -325,7 +306,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go index 9babb31..a6b4830 100644 --- a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/golang.org/x/sys/unix/syscall_freebsd.go index 329d240..25ac934 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd.go @@ -36,6 +36,8 @@ var ( // INO64_FIRST from /usr/src/lib/libc/sys/compat-ino64.h const _ino64First = 1200031 +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + func supportsABI(ver uint32) bool { osreldateOnce.Do(func() { osreldate, _ = SysctlUint32("kern.osreldate") }) return osreldate >= ver @@ -201,43 +203,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func Uname(uname *Utsname) error { mib := []_C_int{CTL_KERN, KERN_OSTYPE} n := unsafe.Sizeof(uname.Sysname) @@ -688,7 +653,7 @@ func PtraceSingleStep(pid int) (err error) { //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go index 21e0395..dcc5645 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go index 9c945a6..321c3ba 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go index 5cd6243..6977008 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go index a318054..dbbbfd6 100644 --- a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_freebsd_arm64.go @@ -33,6 +33,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 637b501..26903bc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -71,6 +71,17 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { // ioctl itself should not be exposed directly, but additional get/set // functions for specific types are permissible. +// IoctlRetInt performs an ioctl operation specified by req on a device +// associated with opened file descriptor fd, and returns a non-negative +// integer that is returned by the ioctl syscall. +func IoctlRetInt(fd int, req uint) (int, error) { + ret, _, err := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), 0) + if err != 0 { + return 0, err + } + return int(ret), nil +} + // IoctlSetPointerInt performs an ioctl operation which sets an // integer value on fd, using the specified request number. The ioctl // argument is called with a pointer to the integer value, rather than @@ -80,52 +91,18 @@ func IoctlSetPointerInt(fd int, req uint, value int) error { return ioctl(fd, req, uintptr(unsafe.Pointer(&v))) } -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetRTCTime(fd int, value *RTCTime) error { err := ioctl(fd, RTC_SET_TIME, uintptr(unsafe.Pointer(value))) runtime.KeepAlive(value) return err } -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - func IoctlGetUint32(fd int, req uint) (uint32, error) { var value uint32 err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) return value, err } -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetRTCTime(fd int) (*RTCTime, error) { var value RTCTime err := ioctl(fd, RTC_RD_TIME, uintptr(unsafe.Pointer(&value))) @@ -798,6 +775,70 @@ func (sa *SockaddrPPPoE) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrPPPoX, nil } +// SockaddrTIPC implements the Sockaddr interface for AF_TIPC type sockets. +// For more information on TIPC, see: http://tipc.sourceforge.net/. +type SockaddrTIPC struct { + // Scope is the publication scopes when binding service/service range. + // Should be set to TIPC_CLUSTER_SCOPE or TIPC_NODE_SCOPE. + Scope int + + // Addr is the type of address used to manipulate a socket. Addr must be + // one of: + // - *TIPCSocketAddr: "id" variant in the C addr union + // - *TIPCServiceRange: "nameseq" variant in the C addr union + // - *TIPCServiceName: "name" variant in the C addr union + // + // If nil, EINVAL will be returned when the structure is used. + Addr TIPCAddr + + raw RawSockaddrTIPC +} + +// TIPCAddr is implemented by types that can be used as an address for +// SockaddrTIPC. It is only implemented by *TIPCSocketAddr, *TIPCServiceRange, +// and *TIPCServiceName. +type TIPCAddr interface { + tipcAddrtype() uint8 + tipcAddr() [12]byte +} + +func (sa *TIPCSocketAddr) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCSocketAddr{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCSocketAddr) tipcAddrtype() uint8 { return TIPC_SOCKET_ADDR } + +func (sa *TIPCServiceRange) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceRange{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceRange) tipcAddrtype() uint8 { return TIPC_SERVICE_RANGE } + +func (sa *TIPCServiceName) tipcAddr() [12]byte { + var out [12]byte + copy(out[:], (*(*[unsafe.Sizeof(TIPCServiceName{})]byte)(unsafe.Pointer(sa)))[:]) + return out +} + +func (sa *TIPCServiceName) tipcAddrtype() uint8 { return TIPC_SERVICE_ADDR } + +func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Addr == nil { + return nil, 0, EINVAL + } + + sa.raw.Family = AF_TIPC + sa.raw.Scope = int8(sa.Scope) + sa.raw.Addrtype = sa.Addr.tipcAddrtype() + sa.raw.Addr = sa.Addr.tipcAddr() + + return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -843,7 +884,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -923,6 +964,27 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { break } } + return sa, nil + case AF_TIPC: + pp := (*RawSockaddrTIPC)(unsafe.Pointer(rsa)) + + sa := &SockaddrTIPC{ + Scope: int(pp.Scope), + } + + // Determine which union variant is present in pp.Addr by checking + // pp.Addrtype. + switch pp.Addrtype { + case TIPC_SERVICE_RANGE: + sa.Addr = (*TIPCServiceRange)(unsafe.Pointer(&pp.Addr)) + case TIPC_SERVICE_ADDR: + sa.Addr = (*TIPCServiceName)(unsafe.Pointer(&pp.Addr)) + case TIPC_SOCKET_ADDR: + sa.Addr = (*TIPCSocketAddr)(unsafe.Pointer(&pp.Addr)) + default: + return nil, EINVAL + } + return sa, nil } return nil, EAFNOSUPPORT @@ -1160,6 +1222,34 @@ func KeyctlDHCompute(params *KeyctlDHParams, buffer []byte) (size int, err error return keyctlDH(KEYCTL_DH_COMPUTE, params, buffer) } +// KeyctlRestrictKeyring implements the KEYCTL_RESTRICT_KEYRING command. This +// command limits the set of keys that can be linked to the keyring, regardless +// of keyring permissions. The command requires the "setattr" permission. +// +// When called with an empty keyType the command locks the keyring, preventing +// any further keys from being linked to the keyring. +// +// The "asymmetric" keyType defines restrictions requiring key payloads to be +// DER encoded X.509 certificates signed by keys in another keyring. Restrictions +// for "asymmetric" include "builtin_trusted", "builtin_and_secondary_trusted", +// "key_or_keyring:", and "key_or_keyring::chain". +// +// As of Linux 4.12, only the "asymmetric" keyType defines type-specific +// restrictions. +// +// See the full documentation at: +// http://man7.org/linux/man-pages/man3/keyctl_restrict_keyring.3.html +// http://man7.org/linux/man-pages/man2/keyctl.2.html +func KeyctlRestrictKeyring(ringid int, keyType string, restriction string) error { + if keyType == "" { + return keyctlRestrictKeyring(KEYCTL_RESTRICT_KEYRING, ringid) + } + return keyctlRestrictKeyringByType(KEYCTL_RESTRICT_KEYRING, ringid, keyType, restriction) +} + +//sys keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) = SYS_KEYCTL +//sys keyctlRestrictKeyring(cmd int, arg2 int) (err error) = SYS_KEYCTL + func Recvmsg(fd int, p, oob []byte, flags int) (n, oobn int, recvflags int, from Sockaddr, err error) { var msg Msghdr var rsa RawSockaddrAny @@ -1403,8 +1493,12 @@ func PtraceSyscall(pid int, signal int) (err error) { func PtraceSingleStep(pid int) (err error) { return ptrace(PTRACE_SINGLESTEP, pid, 0, 0) } +func PtraceInterrupt(pid int) (err error) { return ptrace(PTRACE_INTERRUPT, pid, 0, 0) } + func PtraceAttach(pid int) (err error) { return ptrace(PTRACE_ATTACH, pid, 0, 0) } +func PtraceSeize(pid int) (err error) { return ptrace(PTRACE_SEIZE, pid, 0, 0) } + func PtraceDetach(pid int) (err error) { return ptrace(PTRACE_DETACH, pid, 0, 0) } //sys reboot(magic1 uint, magic2 uint, cmd int, arg string) (err error) @@ -1761,6 +1855,17 @@ func OpenByHandleAt(mountFD int, handle FileHandle, flags int) (fd int, err erro return openByHandleAt(mountFD, handle.fileHandle, flags) } +// Klogset wraps the sys_syslog system call; it sets console_loglevel to +// the value specified by arg and passes a dummy pointer to bufp. +func Klogset(typ int, arg int) (err error) { + var p unsafe.Pointer + _, _, errno := Syscall(SYS_SYSLOG, uintptr(typ), uintptr(p), uintptr(arg)) + if errno != 0 { + return errnoErr(errno) + } + return nil +} + /* * Unimplemented */ diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/golang.org/x/sys/unix/syscall_linux_386.go index e2f8cf6..e7fa665 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_386.go @@ -372,6 +372,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go index 87a3074..088ce0f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go @@ -163,6 +163,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go index f626794..11930fc 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm.go @@ -252,6 +252,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go index cb20b15..251e2d9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go @@ -180,6 +180,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go index b3b21ec..7562fe9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mips64x.go @@ -208,6 +208,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go index 5144d4e..a939ff8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_mipsx.go @@ -220,6 +220,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go index 0a100b6..28d6d0f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go @@ -91,6 +91,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go index 6230f64..6798c26 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_riscv64.go @@ -179,6 +179,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go index f81dbdc..eb5cb1a 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_s390x.go @@ -120,6 +120,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go index b695656..37321c1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_sparc64.go @@ -107,6 +107,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint64(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint64(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/golang.org/x/sys/unix/syscall_netbsd.go index 5ef3090..f95463e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd.go @@ -18,6 +18,8 @@ import ( "unsafe" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -187,43 +189,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetPtmget(fd int, req uint) (*Ptmget, error) { var value Ptmget err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -365,7 +330,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go index 24f74e5..24da8b5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go index 6878bf7..25a0ac8 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go index dbbfcf7..21591ec 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go index f343446..8047496 100644 --- a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_netbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/golang.org/x/sys/unix/syscall_openbsd.go index 1a074b2..7fe65ef 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd.go @@ -18,6 +18,8 @@ import ( "unsafe" ) +//sys sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) = SYS___SYSCTL + // SockaddrDatalink implements the Sockaddr interface for AF_LINK type sockets. type SockaddrDatalink struct { Len uint8 @@ -178,43 +180,6 @@ func setattrlistTimes(path string, times []Timespec, flags int) error { //sys ioctl(fd int, req uint, arg uintptr) (err error) -// ioctl itself should not be exposed directly, but additional get/set -// functions for specific types are permissible. - -// IoctlSetInt performs an ioctl operation which sets an integer value -// on fd, using the specified request number. -func IoctlSetInt(fd int, req uint, value int) error { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) error { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -// IoctlGetInt performs an ioctl operation which gets an integer value -// from fd, using the specified request number. -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - //sys ppoll(fds *PollFd, nfds int, timeout *Timespec, sigmask *Sigset_t) (n int, err error) func Ppoll(fds []PollFd, timeout *Timespec, sigmask *Sigset_t) (n int, err error) { @@ -340,7 +305,7 @@ func Uname(uname *Utsname) error { //sys Revoke(path string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = SYS_LSEEK -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go index d62da60..42b5a0e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go index 9a35334..6ea4b48 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go index 5d812aa..1c3d26f 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go index 0fb39cf..a8c458c 100644 --- a/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_openbsd_arm64.go @@ -28,6 +28,10 @@ func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = uint32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go index 0153a31..0e2a696 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go @@ -391,7 +391,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -553,40 +553,10 @@ func Minor(dev uint64) uint32 { //sys ioctl(fd int, req uint, arg uintptr) (err error) -func IoctlSetInt(fd int, req uint, value int) (err error) { - return ioctl(fd, req, uintptr(value)) -} - -func ioctlSetWinsize(fd int, req uint, value *Winsize) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - -func ioctlSetTermios(fd int, req uint, value *Termios) (err error) { - return ioctl(fd, req, uintptr(unsafe.Pointer(value))) -} - func IoctlSetTermio(fd int, req uint, value *Termio) (err error) { return ioctl(fd, req, uintptr(unsafe.Pointer(value))) } -func IoctlGetInt(fd int, req uint) (int, error) { - var value int - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return value, err -} - -func IoctlGetWinsize(fd int, req uint) (*Winsize, error) { - var value Winsize - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - -func IoctlGetTermios(fd int, req uint) (*Termios, error) { - var value Termios - err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) - return &value, err -} - func IoctlGetTermio(fd int, req uint) (*Termio, error) { var value Termio err := ioctl(fd, req, uintptr(unsafe.Pointer(&value))) @@ -679,7 +649,7 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e //sys Renameat(olddirfd int, oldpath string, newdirfd int, newpath string) (err error) //sys Rmdir(path string) (err error) //sys Seek(fd int, offset int64, whence int) (newoffset int64, err error) = lseek -//sys Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) +//sys Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) //sysnb Setegid(egid int) (err error) //sysnb Seteuid(euid int) (err error) //sysnb Setgid(gid int) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go index 91c32dd..b22a34d 100644 --- a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go @@ -18,6 +18,10 @@ func (iov *Iovec) SetLen(length int) { iov.Len = uint64(length) } +func (msghdr *Msghdr) SetIovlen(length int) { + msghdr.Iovlen = int32(length) +} + func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go index 3b39d74..6217cdb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go @@ -3,7 +3,7 @@ // +build 386,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go index 8fe5547..e3ff2ee 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go @@ -3,7 +3,7 @@ // +build amd64,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go index 7a97777..3e41757 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go @@ -3,7 +3,7 @@ // +build arm,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go index 6d56d8a..cbd8ed1 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go @@ -3,7 +3,7 @@ // +build arm64,darwin -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -980,6 +980,7 @@ const ( NET_RT_MAXID = 0xa NET_RT_STAT = 0x4 NET_RT_TRASH = 0x5 + NFDBITS = 0x20 NL0 = 0x0 NL1 = 0x100 NL2 = 0x200 diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go index bbe6089..6130471 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go @@ -938,6 +938,7 @@ const ( NET_RT_FLAGS = 0x2 NET_RT_IFLIST = 0x3 NET_RT_MAXID = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go index d2bbaab..b72544f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go @@ -3,7 +3,7 @@ // +build 386,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -1055,6 +1055,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go index 4f8db78..9f38267 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1056,6 +1056,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go index 53e5de6..16db56a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go @@ -3,7 +3,7 @@ // +build arm,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- _const.go package unix @@ -1063,6 +1063,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go index d4a192f..1a1de34 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm64.go @@ -3,7 +3,7 @@ // +build arm64,freebsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1056,6 +1056,7 @@ const ( NET_RT_IFLIST = 0x3 NET_RT_IFLISTL = 0x5 NET_RT_IFMALIST = 0x4 + NFDBITS = 0x40 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 5213d82..fcf5796 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -722,6 +726,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -987,6 +992,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1085,6 +1091,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1097,6 +1114,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1361,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1426,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1686,6 +1712,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1724,6 +1751,10 @@ const ( PTRACE_SINGLEBLOCK = 0x21 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 @@ -1784,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1857,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1881,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1888,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1900,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1908,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1994,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2132,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2432,6 +2469,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2445,7 +2547,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2644,6 +2746,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2660,6 +2764,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 39b630c..5bcf3db 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -722,6 +726,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -987,6 +992,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1085,6 +1091,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1097,6 +1114,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1342,6 +1361,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1406,6 +1426,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1672,6 +1696,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1687,6 +1713,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1725,6 +1752,10 @@ const ( PTRACE_SINGLEBLOCK = 0x21 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 @@ -1785,7 +1816,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1858,6 +1889,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1882,6 +1914,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1889,7 +1922,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1901,6 +1934,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1909,8 +1943,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1995,6 +2029,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2133,6 +2169,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2433,6 +2470,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2446,7 +2548,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2644,6 +2746,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2660,6 +2764,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index c59a1be..3e02dcf 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1669,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1690,6 +1716,7 @@ const ( PTRACE_GETSIGMASK = 0x420a PTRACE_GETVFPREGS = 0x1b PTRACE_GETWMMXREGS = 0x12 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x16 PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 @@ -1730,6 +1757,10 @@ const ( PTRACE_SET_SYSCALL = 0x17 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 PT_DATA_ADDR = 0x10004 PT_TEXT_ADDR = 0x10000 @@ -1791,7 +1822,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1864,6 +1895,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1888,6 +1920,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1895,7 +1928,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1907,6 +1940,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1915,8 +1949,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2001,6 +2035,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2139,6 +2175,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2439,6 +2476,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2452,7 +2554,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2650,6 +2752,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2666,6 +2770,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 5f35c19..2293f8b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -561,6 +564,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -724,6 +728,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -989,6 +994,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1087,6 +1093,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1099,6 +1116,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1343,6 +1362,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1407,6 +1427,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1672,6 +1696,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1711,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1717,6 +1744,12 @@ const ( PTRACE_SETSIGMASK = 0x420b PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 + PTRACE_SYSEMU = 0x1f + PTRACE_SYSEMU_SINGLESTEP = 0x20 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1775,7 +1808,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1848,6 +1881,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1872,6 +1906,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1879,7 +1914,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1891,6 +1926,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1899,8 +1935,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1985,6 +2021,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2123,6 +2161,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2424,6 +2463,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2437,7 +2541,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2635,6 +2739,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2651,6 +2757,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index 7f1b7be..57742ea 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1669,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1683,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1726,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1784,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1857,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1881,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1888,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1900,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1908,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1994,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2132,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2434,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2447,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 603d88b..33bfa6c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1669,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1683,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1726,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1784,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1857,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1881,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1888,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1900,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1908,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1994,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2132,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2434,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2447,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index ed178f8..89fd414 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1669,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1683,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1726,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1784,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1857,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1881,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1888,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1900,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1908,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1994,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2132,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2434,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2447,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 080b789..aabe5e4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x20 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1669,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1683,6 +1709,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_GET_THREAD_AREA = 0x19 PTRACE_GET_THREAD_AREA_3264 = 0xc4 PTRACE_GET_WATCH_REGS = 0xd0 @@ -1726,6 +1753,10 @@ const ( PTRACE_SET_WATCH_REGS = 0xd1 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1784,7 +1815,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1857,6 +1888,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1881,6 +1913,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1888,7 +1921,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1900,6 +1933,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1908,8 +1942,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1994,6 +2028,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2132,6 +2168,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2434,6 +2471,71 @@ const ( TIOCSTI = 0x5472 TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x8000 TPACKET_ALIGNMENT = 0x10 @@ -2447,7 +2549,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2646,6 +2748,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2662,6 +2766,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 961e8ea..2722791 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1339,6 +1358,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1405,6 +1425,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1690,6 +1716,7 @@ const ( PTRACE_GETVRREGS = 0x12 PTRACE_GETVSRREGS = 0x1b PTRACE_GET_DEBUGREG = 0x19 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1729,6 +1756,10 @@ const ( PTRACE_SINGLEBLOCK = 0x100 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1d PTRACE_SYSEMU_SINGLESTEP = 0x1e PTRACE_TRACEME = 0x0 @@ -1842,7 +1873,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1915,6 +1946,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1939,6 +1971,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1946,7 +1979,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1958,6 +1991,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1966,8 +2000,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2052,6 +2086,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2190,6 +2226,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2494,6 +2531,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 TPACKET_ALIGNMENT = 0x10 @@ -2507,7 +2609,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2705,6 +2807,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2721,6 +2825,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0xc00 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 6e0538f..e33be41 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1339,6 +1358,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1405,6 +1425,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80000000 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1695,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1690,6 +1716,7 @@ const ( PTRACE_GETVRREGS = 0x12 PTRACE_GETVSRREGS = 0x1b PTRACE_GET_DEBUGREG = 0x19 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1729,6 +1756,10 @@ const ( PTRACE_SINGLEBLOCK = 0x100 PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_SYSEMU = 0x1d PTRACE_SYSEMU_SINGLESTEP = 0x1e PTRACE_TRACEME = 0x0 @@ -1842,7 +1873,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1915,6 +1946,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1939,6 +1971,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1946,7 +1979,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1958,6 +1991,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1966,8 +2000,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2052,6 +2086,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2190,6 +2226,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2494,6 +2531,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x400000 TPACKET_ALIGNMENT = 0x10 @@ -2507,7 +2609,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2705,6 +2807,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2721,6 +2825,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0xc00 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 06c0148..b9908d3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1669,6 +1693,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1682,6 +1708,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1714,6 +1741,10 @@ const ( PTRACE_SETSIGMASK = 0x420b PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 QNX4_SUPER_MAGIC = 0x2f QNX6_SUPER_MAGIC = 0x68191122 @@ -1772,7 +1803,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1845,6 +1876,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1869,6 +1901,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1876,7 +1909,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1888,6 +1921,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1896,8 +1930,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -1982,6 +2016,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2120,6 +2156,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2420,6 +2457,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2433,7 +2535,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2631,6 +2733,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2647,6 +2751,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 3987509..85647f4 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -253,6 +253,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -304,9 +305,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -460,6 +462,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -560,6 +563,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -721,6 +725,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x0 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -986,6 +991,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1084,6 +1090,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1096,6 +1113,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1340,6 +1359,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1404,6 +1424,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0xb703 + NS_GET_OWNER_UID = 0xb704 + NS_GET_PARENT = 0xb702 + NS_GET_USERNS = 0xb701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1671,6 +1695,8 @@ const ( PTRACE_DETACH = 0x11 PTRACE_DISABLE_TE = 0x5010 PTRACE_ENABLE_TE = 0x5009 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1685,6 +1711,7 @@ const ( PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a PTRACE_GET_LAST_BREAK = 0x5006 + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1728,6 +1755,10 @@ const ( PTRACE_SINGLEBLOCK = 0xc PTRACE_SINGLESTEP = 0x9 PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TE_ABORT_RAND = 0x5011 PTRACE_TRACEME = 0x0 PT_ACR0 = 0x90 @@ -1845,7 +1876,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1918,6 +1949,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1942,6 +1974,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1949,7 +1982,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1961,6 +1994,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1969,8 +2003,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2055,6 +2089,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2193,6 +2229,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x44 SO_DOMAIN = 0x27 SO_DONTROUTE = 0x5 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2493,6 +2530,71 @@ const ( TIOCSTI = 0x5412 TIOCSWINSZ = 0x5414 TIOCVHANGUP = 0x5437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2506,7 +2608,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2704,6 +2806,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2720,6 +2824,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 8d80f99..c0095a5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -256,6 +256,7 @@ const ( BPF_F_STACK_BUILD_ID = 0x20 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_F_TEST_RND_HI32 = 0x4 BPF_F_TUNINFO_IPV6 = 0x1 BPF_F_USER_BUILD_ID = 0x800 BPF_F_USER_STACK = 0x100 @@ -307,9 +308,10 @@ const ( BPF_RET = 0x6 BPF_RSH = 0x70 BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 BPF_ST = 0x2 BPF_STX = 0x3 @@ -463,6 +465,7 @@ const ( DAXFS_MAGIC = 0x64646178 DEBUGFS_MAGIC = 0x64626720 DEVPTS_SUPER_MAGIC = 0x1cd1 + DMA_BUF_MAGIC = 0x444d4142 DT_BLK = 0x6 DT_CHR = 0x2 DT_DIR = 0x4 @@ -564,6 +567,7 @@ const ( ETH_P_IRDA = 0x17 ETH_P_LAT = 0x6004 ETH_P_LINK_CTL = 0x886c + ETH_P_LLDP = 0x88cc ETH_P_LOCALTALK = 0x9 ETH_P_LOOP = 0x60 ETH_P_LOOPBACK = 0x9000 @@ -725,6 +729,7 @@ const ( F_OFD_SETLKW = 0x26 F_OK = 0x0 F_RDLCK = 0x1 + F_SEAL_FUTURE_WRITE = 0x10 F_SEAL_GROW = 0x4 F_SEAL_SEAL = 0x1 F_SEAL_SHRINK = 0x2 @@ -990,6 +995,7 @@ const ( IPV6_RECVRTHDR = 0x38 IPV6_RECVTCLASS = 0x42 IPV6_ROUTER_ALERT = 0x16 + IPV6_ROUTER_ALERT_ISOLATE = 0x1e IPV6_RTHDR = 0x39 IPV6_RTHDRDSTOPTS = 0x37 IPV6_RTHDR_LOOSE = 0x0 @@ -1088,6 +1094,17 @@ const ( KEXEC_PRESERVE_CONTEXT = 0x2 KEXEC_SEGMENT_MAX = 0x10 KEYCTL_ASSUME_AUTHORITY = 0x10 + KEYCTL_CAPABILITIES = 0x1f + KEYCTL_CAPS0_BIG_KEY = 0x10 + KEYCTL_CAPS0_CAPABILITIES = 0x1 + KEYCTL_CAPS0_DIFFIE_HELLMAN = 0x4 + KEYCTL_CAPS0_INVALIDATE = 0x20 + KEYCTL_CAPS0_MOVE = 0x80 + KEYCTL_CAPS0_PERSISTENT_KEYRINGS = 0x2 + KEYCTL_CAPS0_PUBLIC_KEY = 0x8 + KEYCTL_CAPS0_RESTRICT_KEYRING = 0x40 + KEYCTL_CAPS1_NS_KEYRING_NAME = 0x1 + KEYCTL_CAPS1_NS_KEY_TAG = 0x2 KEYCTL_CHOWN = 0x4 KEYCTL_CLEAR = 0x7 KEYCTL_DESCRIBE = 0x6 @@ -1100,6 +1117,8 @@ const ( KEYCTL_INVALIDATE = 0x15 KEYCTL_JOIN_SESSION_KEYRING = 0x1 KEYCTL_LINK = 0x8 + KEYCTL_MOVE = 0x1e + KEYCTL_MOVE_EXCL = 0x1 KEYCTL_NEGATE = 0xd KEYCTL_PKEY_DECRYPT = 0x1a KEYCTL_PKEY_ENCRYPT = 0x19 @@ -1344,6 +1363,7 @@ const ( NETLINK_XFRM = 0x6 NETNSA_MAX = 0x5 NETNSA_NSID_NOT_ASSIGNED = -0x1 + NFDBITS = 0x40 NFNETLINK_V0 = 0x0 NFNLGRP_ACCT_QUOTA = 0x8 NFNLGRP_CONNTRACK_DESTROY = 0x3 @@ -1408,6 +1428,10 @@ const ( NLM_F_ROOT = 0x100 NOFLSH = 0x80 NSFS_MAGIC = 0x6e736673 + NS_GET_NSTYPE = 0x2000b703 + NS_GET_OWNER_UID = 0x2000b704 + NS_GET_PARENT = 0x2000b702 + NS_GET_USERNS = 0x2000b701 OCFS2_SUPER_MAGIC = 0x7461636f OCRNL = 0x8 OFDEL = 0x80 @@ -1673,6 +1697,8 @@ const ( PTRACE_ATTACH = 0x10 PTRACE_CONT = 0x7 PTRACE_DETACH = 0x11 + PTRACE_EVENTMSG_SYSCALL_ENTRY = 0x1 + PTRACE_EVENTMSG_SYSCALL_EXIT = 0x2 PTRACE_EVENT_CLONE = 0x3 PTRACE_EVENT_EXEC = 0x4 PTRACE_EVENT_EXIT = 0x6 @@ -1690,6 +1716,7 @@ const ( PTRACE_GETREGSET = 0x4204 PTRACE_GETSIGINFO = 0x4202 PTRACE_GETSIGMASK = 0x420a + PTRACE_GET_SYSCALL_INFO = 0x420e PTRACE_INTERRUPT = 0x4207 PTRACE_KILL = 0x8 PTRACE_LISTEN = 0x4208 @@ -1729,6 +1756,10 @@ const ( PTRACE_SINGLESTEP = 0x9 PTRACE_SPARC_DETACH = 0xb PTRACE_SYSCALL = 0x18 + PTRACE_SYSCALL_INFO_ENTRY = 0x1 + PTRACE_SYSCALL_INFO_EXIT = 0x2 + PTRACE_SYSCALL_INFO_NONE = 0x0 + PTRACE_SYSCALL_INFO_SECCOMP = 0x3 PTRACE_TRACEME = 0x0 PTRACE_WRITEDATA = 0x11 PTRACE_WRITETEXT = 0x13 @@ -1837,7 +1868,7 @@ const ( RTAX_UNSPEC = 0x0 RTAX_WINDOW = 0x3 RTA_ALIGNTO = 0x4 - RTA_MAX = 0x1d + RTA_MAX = 0x1e RTCF_DIRECTSRC = 0x4000000 RTCF_DOREDIRECT = 0x1000000 RTCF_LOG = 0x2000000 @@ -1910,6 +1941,7 @@ const ( RTM_DELMDB = 0x55 RTM_DELNEIGH = 0x1d RTM_DELNETCONF = 0x51 + RTM_DELNEXTHOP = 0x69 RTM_DELNSID = 0x59 RTM_DELQDISC = 0x25 RTM_DELROUTE = 0x19 @@ -1934,6 +1966,7 @@ const ( RTM_GETNEIGH = 0x1e RTM_GETNEIGHTBL = 0x42 RTM_GETNETCONF = 0x52 + RTM_GETNEXTHOP = 0x6a RTM_GETNSID = 0x5a RTM_GETQDISC = 0x26 RTM_GETROUTE = 0x1a @@ -1941,7 +1974,7 @@ const ( RTM_GETSTATS = 0x5e RTM_GETTCLASS = 0x2a RTM_GETTFILTER = 0x2e - RTM_MAX = 0x67 + RTM_MAX = 0x6b RTM_NEWACTION = 0x30 RTM_NEWADDR = 0x14 RTM_NEWADDRLABEL = 0x48 @@ -1953,6 +1986,7 @@ const ( RTM_NEWNEIGH = 0x1c RTM_NEWNEIGHTBL = 0x40 RTM_NEWNETCONF = 0x50 + RTM_NEWNEXTHOP = 0x68 RTM_NEWNSID = 0x58 RTM_NEWPREFIX = 0x34 RTM_NEWQDISC = 0x24 @@ -1961,8 +1995,8 @@ const ( RTM_NEWSTATS = 0x5c RTM_NEWTCLASS = 0x28 RTM_NEWTFILTER = 0x2c - RTM_NR_FAMILIES = 0x16 - RTM_NR_MSGTYPES = 0x58 + RTM_NR_FAMILIES = 0x17 + RTM_NR_MSGTYPES = 0x5c RTM_SETDCB = 0x4f RTM_SETLINK = 0x13 RTM_SETNEIGHTBL = 0x43 @@ -2047,6 +2081,8 @@ const ( SIOCDRARP = 0x8960 SIOCETHTOOL = 0x8946 SIOCGARP = 0x8954 + SIOCGETLINKNAME = 0x89e0 + SIOCGETNODEID = 0x89e1 SIOCGHWTSTAMP = 0x89b1 SIOCGIFADDR = 0x8915 SIOCGIFBR = 0x8940 @@ -2185,6 +2221,7 @@ const ( SO_DEBUG = 0x1 SO_DETACH_BPF = 0x1b SO_DETACH_FILTER = 0x1b + SO_DETACH_REUSEPORT_BPF = 0x47 SO_DOMAIN = 0x1029 SO_DONTROUTE = 0x10 SO_EE_CODE_TXTIME_INVALID_PARAM = 0x1 @@ -2482,6 +2519,71 @@ const ( TIOCSTOP = 0x2000746f TIOCSWINSZ = 0x80087467 TIOCVHANGUP = 0x20005437 + TIPC_ADDR_ID = 0x3 + TIPC_ADDR_MCAST = 0x1 + TIPC_ADDR_NAME = 0x2 + TIPC_ADDR_NAMESEQ = 0x1 + TIPC_CFG_SRV = 0x0 + TIPC_CLUSTER_BITS = 0xc + TIPC_CLUSTER_MASK = 0xfff000 + TIPC_CLUSTER_OFFSET = 0xc + TIPC_CLUSTER_SIZE = 0xfff + TIPC_CONN_SHUTDOWN = 0x5 + TIPC_CONN_TIMEOUT = 0x82 + TIPC_CRITICAL_IMPORTANCE = 0x3 + TIPC_DESTNAME = 0x3 + TIPC_DEST_DROPPABLE = 0x81 + TIPC_ERRINFO = 0x1 + TIPC_ERR_NO_NAME = 0x1 + TIPC_ERR_NO_NODE = 0x3 + TIPC_ERR_NO_PORT = 0x2 + TIPC_ERR_OVERLOAD = 0x4 + TIPC_GROUP_JOIN = 0x87 + TIPC_GROUP_LEAVE = 0x88 + TIPC_GROUP_LOOPBACK = 0x1 + TIPC_GROUP_MEMBER_EVTS = 0x2 + TIPC_HIGH_IMPORTANCE = 0x2 + TIPC_IMPORTANCE = 0x7f + TIPC_LINK_STATE = 0x2 + TIPC_LOW_IMPORTANCE = 0x0 + TIPC_MAX_BEARER_NAME = 0x20 + TIPC_MAX_IF_NAME = 0x10 + TIPC_MAX_LINK_NAME = 0x44 + TIPC_MAX_MEDIA_NAME = 0x10 + TIPC_MAX_USER_MSG_SIZE = 0x101d0 + TIPC_MCAST_BROADCAST = 0x85 + TIPC_MCAST_REPLICAST = 0x86 + TIPC_MEDIUM_IMPORTANCE = 0x1 + TIPC_NODEID_LEN = 0x10 + TIPC_NODE_BITS = 0xc + TIPC_NODE_MASK = 0xfff + TIPC_NODE_OFFSET = 0x0 + TIPC_NODE_RECVQ_DEPTH = 0x83 + TIPC_NODE_SIZE = 0xfff + TIPC_NODE_STATE = 0x0 + TIPC_OK = 0x0 + TIPC_PUBLISHED = 0x1 + TIPC_RESERVED_TYPES = 0x40 + TIPC_RETDATA = 0x2 + TIPC_SERVICE_ADDR = 0x2 + TIPC_SERVICE_RANGE = 0x1 + TIPC_SOCKET_ADDR = 0x3 + TIPC_SOCK_RECVQ_DEPTH = 0x84 + TIPC_SOCK_RECVQ_USED = 0x89 + TIPC_SRC_DROPPABLE = 0x80 + TIPC_SUBSCR_TIMEOUT = 0x3 + TIPC_SUB_CANCEL = 0x4 + TIPC_SUB_PORTS = 0x1 + TIPC_SUB_SERVICE = 0x2 + TIPC_TOP_SRV = 0x1 + TIPC_WAIT_FOREVER = 0xffffffff + TIPC_WITHDRAWN = 0x2 + TIPC_ZONE_BITS = 0x8 + TIPC_ZONE_CLUSTER_MASK = 0xfffff000 + TIPC_ZONE_MASK = 0xff000000 + TIPC_ZONE_OFFSET = 0x18 + TIPC_ZONE_SCOPE = 0x1 + TIPC_ZONE_SIZE = 0xff TMPFS_MAGIC = 0x1021994 TOSTOP = 0x100 TPACKET_ALIGNMENT = 0x10 @@ -2495,7 +2597,7 @@ const ( TP_STATUS_LOSING = 0x4 TP_STATUS_SENDING = 0x2 TP_STATUS_SEND_REQUEST = 0x1 - TP_STATUS_TS_RAW_HARDWARE = -0x80000000 + TP_STATUS_TS_RAW_HARDWARE = 0x80000000 TP_STATUS_TS_SOFTWARE = 0x20000000 TP_STATUS_TS_SYS_HARDWARE = 0x40000000 TP_STATUS_USER = 0x1 @@ -2693,6 +2795,8 @@ const ( XDP_FLAGS_SKB_MODE = 0x2 XDP_FLAGS_UPDATE_IF_NOEXIST = 0x1 XDP_MMAP_OFFSETS = 0x1 + XDP_OPTIONS = 0x8 + XDP_OPTIONS_ZEROCOPY = 0x1 XDP_PACKET_HEADROOM = 0x100 XDP_PGOFF_RX_RING = 0x0 XDP_PGOFF_TX_RING = 0x80000000 @@ -2709,6 +2813,7 @@ const ( XENFS_SUPER_MAGIC = 0xabba1974 XFS_SUPER_MAGIC = 0x58465342 XTABS = 0x1800 + Z3FOLD_MAGIC = 0x33 ZSMALLOC_MAGIC = 0x58295829 __TIOCFLUSH = 0x80047410 ) diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go index 78cc04e..96b9b8a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go @@ -3,7 +3,7 @@ // +build 386,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -1085,6 +1085,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go index 92185e6..ed522a8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1075,6 +1075,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go index 373ad45..c8d36fe 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go @@ -3,7 +3,7 @@ // +build arm,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -marm _const.go package unix @@ -1065,6 +1065,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go index fb6c604..f1c146a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm64.go @@ -3,7 +3,7 @@ // +build arm64,netbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -1075,6 +1075,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_OIFLIST = 0x4 NET_RT_OOIFLIST = 0x3 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go index d8be045..5402bd5 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go @@ -3,7 +3,7 @@ // +build 386,openbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m32 _const.go package unix @@ -881,14 +881,15 @@ const ( MADV_SPACEAVAIL = 0x5 MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 - MAP_COPY = 0x4 + MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 + MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x1ff7 - MAP_HASSEMAPHORE = 0x200 - MAP_INHERIT = 0x80 + MAP_FLAGMASK = 0xfff7 + MAP_HASSEMAPHORE = 0x0 + MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 - MAP_INHERIT_DONATE_COPY = 0x3 MAP_INHERIT_NONE = 0x2 MAP_INHERIT_SHARE = 0x0 MAP_NOEXTEND = 0x100 @@ -896,7 +897,8 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x20 MAP_SHARED = 0x1 - MAP_TRYFIXED = 0x400 + MAP_STACK = 0x4000 + MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 MNT_ASYNC = 0x40 @@ -946,6 +948,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go index 1f9e8a2..ffaf2d2 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go @@ -3,7 +3,7 @@ // +build amd64,openbsd -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -920,10 +920,11 @@ const ( MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x7ff7 + MAP_FLAGMASK = 0xfff7 MAP_HASSEMAPHORE = 0x0 MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 @@ -990,6 +991,7 @@ const ( NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go index 79d5695..7aa796a 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm.go @@ -1,11 +1,11 @@ // mkerrors.sh // Code generated by the command above; see README.md. DO NOT EDIT. -// Created by cgo -godefs - DO NOT EDIT -// cgo -godefs -- _const.go - // +build arm,openbsd +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs -- _const.go + package unix import "syscall" @@ -881,10 +881,11 @@ const ( MADV_WILLNEED = 0x3 MAP_ANON = 0x1000 MAP_ANONYMOUS = 0x1000 + MAP_CONCEAL = 0x8000 MAP_COPY = 0x2 MAP_FILE = 0x0 MAP_FIXED = 0x10 - MAP_FLAGMASK = 0x3ff7 + MAP_FLAGMASK = 0xfff7 MAP_HASSEMAPHORE = 0x0 MAP_INHERIT = 0x0 MAP_INHERIT_COPY = 0x1 @@ -896,6 +897,7 @@ const ( MAP_PRIVATE = 0x2 MAP_RENAME = 0x0 MAP_SHARED = 0x1 + MAP_STACK = 0x4000 MAP_TRYFIXED = 0x0 MCL_CURRENT = 0x1 MCL_FUTURE = 0x2 @@ -947,6 +949,7 @@ const ( NET_RT_MAXID = 0x6 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOTE_ATTRIB = 0x8 NOTE_CHILD = 0x4 diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go index ec5f92d..1792d3f 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_openbsd_arm64.go @@ -996,6 +996,7 @@ const ( NET_RT_MAXID = 0x7 NET_RT_STATS = 0x4 NET_RT_TABLE = 0x5 + NFDBITS = 0x20 NOFLSH = 0x80000000 NOKERNINFO = 0x2000000 NOTE_ATTRIB = 0x8 diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go index 22569db..46e054c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go @@ -3,7 +3,7 @@ // +build amd64,solaris -// Created by cgo -godefs - DO NOT EDIT +// Code generated by cmd/cgo -godefs; DO NOT EDIT. // cgo -godefs -- -m64 _const.go package unix @@ -666,6 +666,7 @@ const ( M_FLUSH = 0x86 NAME_MAX = 0xff NEWDEV = 0x1 + NFDBITS = 0x40 NL0 = 0x0 NL1 = 0x100 NLDLY = 0x100 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go index c4ec7ff..b42c1cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.go +// go run mksyscall.go -l32 -tags darwin,386,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_386.1_11.go syscall_darwin_386.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,386,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,6 +1666,49 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) @@ -1738,23 +1756,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go new file mode 100644 index 0000000..e263fbd --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -l32 -tags darwin,386,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,386,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s new file mode 100644 index 0000000..00da1eb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go 386 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 23346dc..603c9f6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -527,21 +506,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -943,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1872,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2341,6 +2321,42 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc___sysctl_trampoline() + +//go:linkname libc___sysctl libc___sysctl +//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int32, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int32(r0) @@ -2408,28 +2424,6 @@ func libc_fstatfs64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___getdirentries64_trampoline() - -//go:linkname libc___getdirentries64 libc___getdirentries64 -//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s index 37b85b4..ece6f67 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +62,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -108,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 @@ -264,6 +262,10 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc___sysctl(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 @@ -272,8 +274,6 @@ TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 - JMP libc___getdirentries64(SB) TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go index 2581e89..38b7cba 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.go +// go run mksyscall.go -tags darwin,amd64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_amd64.1_11.go syscall_darwin_amd64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,amd64,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -1691,6 +1666,49 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { + var _p0 unsafe.Pointer + if len(buf) > 0 { + _p0 = unsafe.Pointer(&buf[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) + n = int(r0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := RawSyscall(SYS_GETTIMEOFDAY, uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) @@ -1738,23 +1756,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := Syscall6(SYS_GETDIRENTRIES64, uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := Syscall(SYS_GETFSSTAT64, uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go new file mode 100644 index 0000000..314042a --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -tags darwin,amd64,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,amd64,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s new file mode 100644 index 0000000..d671e83 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go amd64 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index c142e33..fda478e 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -527,21 +506,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1887,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } @@ -2356,6 +2321,42 @@ func writelen(fd int, buf *byte, nbuf int) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc___sysctl_trampoline() + +//go:linkname libc___sysctl libc___sysctl +//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_ptrace_trampoline() + +//go:linkname libc_ptrace libc_ptrace +//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func gettimeofday(tp *Timeval) (sec int64, usec int32, err error) { r0, r1, e1 := syscall_rawSyscall(funcPC(libc_gettimeofday_trampoline), uintptr(unsafe.Pointer(tp)), 0, 0) sec = int64(r0) @@ -2423,28 +2424,6 @@ func libc_fstatfs64_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { - var _p0 unsafe.Pointer - if len(buf) > 0 { - _p0 = unsafe.Pointer(&buf[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - r0, _, e1 := syscall_syscall6(funcPC(libc___getdirentries64_trampoline), uintptr(fd), uintptr(_p0), uintptr(len(buf)), uintptr(unsafe.Pointer(basep)), 0, 0) - n = int(r0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___getdirentries64_trampoline() - -//go:linkname libc___getdirentries64 libc___getdirentries64 -//go:cgo_import_dynamic libc___getdirentries64 __getdirentries64 "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { r0, _, e1 := syscall_syscall(funcPC(libc_getfsstat64_trampoline), uintptr(buf), uintptr(size), uintptr(flags)) n = int(r0) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s index 1a39151..7c4d590 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +62,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -266,6 +262,10 @@ TEXT ·libc_mmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_mmap(SB) TEXT ·libc_munmap_trampoline(SB),NOSPLIT,$0-0 JMP libc_munmap(SB) +TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 + JMP libc___sysctl(SB) +TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 + JMP libc_ptrace(SB) TEXT ·libc_gettimeofday_trampoline(SB),NOSPLIT,$0-0 JMP libc_gettimeofday(SB) TEXT ·libc_fstat64_trampoline(SB),NOSPLIT,$0-0 @@ -274,8 +274,6 @@ TEXT ·libc_fstatat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatat64(SB) TEXT ·libc_fstatfs64_trampoline(SB),NOSPLIT,$0-0 JMP libc_fstatfs64(SB) -TEXT ·libc___getdirentries64_trampoline(SB),NOSPLIT,$0-0 - JMP libc___getdirentries64(SB) TEXT ·libc_getfsstat64_trampoline(SB),NOSPLIT,$0-0 JMP libc_getfsstat64(SB) TEXT ·libc_lstat64_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go index f8caece..abb6918 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.go +// go run mksyscall.go -l32 -tags darwin,arm,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm.1_11.go syscall_darwin_arm.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go new file mode 100644 index 0000000..f519ce9 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -l32 -tags darwin,arm,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s new file mode 100644 index 0000000..488e557 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go arm +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index 01cffbf..163b391 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -527,21 +506,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -943,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1872,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s index 994056f..5bebb1b 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +62,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -108,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go index 3fd0f3c..b75c11d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_11.go @@ -1,4 +1,4 @@ -// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.go +// go run mksyscall.go -tags darwin,arm64,!go1.12 syscall_bsd.go syscall_darwin.go syscall_darwin_arm64.1_11.go syscall_darwin_arm64.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build darwin,arm64,!go1.12 @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,16 +361,6 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := Syscall6(SYS_GETATTRLIST, uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -1352,8 +1326,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go new file mode 100644 index 0000000..d64e6c8 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -0,0 +1,41 @@ +// go run mksyscall.go -tags darwin,arm64,go1.13 syscall_darwin.1_13.go +// Code generated by the command above; see README.md. DO NOT EDIT. + +// +build darwin,arm64,go1.13 + +package unix + +import ( + "syscall" + "unsafe" +) + +var _ syscall.Errno + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func closedir(dir uintptr) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_closedir_trampoline), uintptr(dir), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_closedir_trampoline() + +//go:linkname libc_closedir libc_closedir +//go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { + r0, _, _ := syscall_syscall(funcPC(libc_readdir_r_trampoline), uintptr(dir), uintptr(unsafe.Pointer(entry)), uintptr(unsafe.Pointer(result))) + res = Errno(r0) + return +} + +func libc_readdir_r_trampoline() + +//go:linkname libc_readdir_r libc_readdir_r +//go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s new file mode 100644 index 0000000..b29dabb --- /dev/null +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.s @@ -0,0 +1,12 @@ +// go run mkasm_darwin.go arm64 +// Code generated by the command above; DO NOT EDIT. + +// +build go1.13 + +#include "textflag.h" +TEXT ·libc_fdopendir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_fdopendir(SB) +TEXT ·libc_closedir_trampoline(SB),NOSPLIT,$0-0 + JMP libc_closedir(SB) +TEXT ·libc_readdir_r_trampoline(SB),NOSPLIT,$0-0 + JMP libc_readdir_r(SB) diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 8f2691d..7c5bd51 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -304,27 +304,6 @@ func libc_kevent_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := syscall_syscall6(funcPC(libc___sysctl_trampoline), uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc___sysctl_trampoline() - -//go:linkname libc___sysctl libc___sysctl -//go:cgo_import_dynamic libc___sysctl __sysctl "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -527,21 +506,6 @@ func libc_munlockall_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -func libc_ptrace_trampoline() - -//go:linkname libc_ptrace libc_ptrace -//go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func getattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintptr, options int) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_getattrlist_trampoline), uintptr(unsafe.Pointer(path)), uintptr(list), uintptr(buf), uintptr(size), uintptr(options), 0) if e1 != 0 { @@ -943,6 +907,21 @@ func libc_chroot_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ClockGettime(clockid int32, time *Timespec) (err error) { + _, _, e1 := syscall_syscall(funcPC(libc_clock_gettime_trampoline), uintptr(clockid), uintptr(unsafe.Pointer(time)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +func libc_clock_gettime_trampoline() + +//go:linkname libc_clock_gettime libc_clock_gettime +//go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Close(fd int) (err error) { _, _, e1 := syscall_syscall(funcPC(libc_close_trampoline), uintptr(fd), 0, 0) if e1 != 0 { @@ -1872,8 +1851,9 @@ func libc_lseek_trampoline() // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := syscall_syscall6(funcPC(libc_select_trampoline), uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s index 61dc0d4..96ab987 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s @@ -40,8 +40,6 @@ TEXT ·libc_sendmsg_trampoline(SB),NOSPLIT,$0-0 JMP libc_sendmsg(SB) TEXT ·libc_kevent_trampoline(SB),NOSPLIT,$0-0 JMP libc_kevent(SB) -TEXT ·libc___sysctl_trampoline(SB),NOSPLIT,$0-0 - JMP libc___sysctl(SB) TEXT ·libc_utimes_trampoline(SB),NOSPLIT,$0-0 JMP libc_utimes(SB) TEXT ·libc_futimes_trampoline(SB),NOSPLIT,$0-0 @@ -64,8 +62,6 @@ TEXT ·libc_munlock_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlock(SB) TEXT ·libc_munlockall_trampoline(SB),NOSPLIT,$0-0 JMP libc_munlockall(SB) -TEXT ·libc_ptrace_trampoline(SB),NOSPLIT,$0-0 - JMP libc_ptrace(SB) TEXT ·libc_getattrlist_trampoline(SB),NOSPLIT,$0-0 JMP libc_getattrlist(SB) TEXT ·libc_pipe_trampoline(SB),NOSPLIT,$0-0 @@ -108,6 +104,8 @@ TEXT ·libc_chown_trampoline(SB),NOSPLIT,$0-0 JMP libc_chown(SB) TEXT ·libc_chroot_trampoline(SB),NOSPLIT,$0-0 JMP libc_chroot(SB) +TEXT ·libc_clock_gettime_trampoline(SB),NOSPLIT,$0-0 + JMP libc_clock_gettime(SB) TEXT ·libc_close_trampoline(SB),NOSPLIT,$0-0 JMP libc_close(SB) TEXT ·libc_dup_trampoline(SB),NOSPLIT,$0-0 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go index cdfe931..df199b3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go @@ -1272,8 +1272,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go index a783306..e68185f 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go index f995520..2f77f93 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,8 +361,14 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -387,8 +377,8 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -424,6 +414,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go index d681acd..e9a12c9 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,8 +361,14 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func pipe2(p *[2]_C_int, flags int) (err error) { - _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) if e1 != 0 { err = errnoErr(e1) } @@ -387,8 +377,8 @@ func pipe2(p *[2]_C_int, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func pipe2(p *[2]_C_int, flags int) (err error) { + _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { err = errnoErr(e1) } @@ -424,6 +414,16 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Access(path string, mode uint32) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go index 5049b2e..27ab0fb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe2(p *[2]_C_int, flags int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE2, uintptr(unsafe.Pointer(p)), uintptr(flags), 0) if e1 != 0 { @@ -404,8 +404,8 @@ func Getcwd(buf []byte) (n int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data int) (err error) { - _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) +func ioctl(fd int, req uint, arg uintptr) (err error) { + _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) if e1 != 0 { err = errnoErr(e1) } @@ -414,8 +414,8 @@ func ptrace(request int, pid int, addr uintptr, data int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ioctl(fd int, req uint, arg uintptr) (err error) { - _, _, e1 := Syscall(SYS_IOCTL, uintptr(fd), uintptr(req), uintptr(arg)) +func ptrace(request int, pid int, addr uintptr, data int) (err error) { + _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) } @@ -1606,8 +1606,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index c5e46e4..fe5d462 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index da8819e..536abce 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 6ad9be6..37823cd 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index f883317..794f612 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 8eebc6c..1b34b55 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index ecf62a6..5714e25 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index 1ba0f7b..88a6b33 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 20012b2..c09dbe3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index 2b520de..42f6c21 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index d9f044c..de2cd8d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go index 9feed65..d51bf07 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_riscv64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index 0a65150..1e3a3cb 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go index e27f669..3c97008 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_sparc64.go @@ -305,6 +305,36 @@ func keyctlDH(cmd int, arg2 *KeyctlDHParams, buf []byte) (ret int, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func keyctlRestrictKeyringByType(cmd int, arg2 int, keyType string, restriction string) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(keyType) + if err != nil { + return + } + var _p1 *byte + _p1, err = BytePtrFromString(restriction) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), 0, 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + +func keyctlRestrictKeyring(cmd int, arg2 int) (err error) { + _, _, e1 := Syscall(SYS_KEYCTL, uintptr(cmd), uintptr(arg2), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := Syscall6(SYS_PTRACE, uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go index 7e05826..5ade42c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go index d94d076..3e0bbc5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go index cf5bf3d..cb0af13 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go index 243a931..6fd48d3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe() (fd1 int, fd2 int, err error) { r0, r1, e1 := RawSyscall(SYS_PIPE, 0, 0, 0) fd1 = int(r0) @@ -1498,8 +1498,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go index a9532d0..2938e41 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go index 0cb9f01..22b79ab 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go index 6fc99b5..cb921f3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go index 27878a7..5a74380 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_openbsd_arm64.go @@ -214,22 +214,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { - var _p0 unsafe.Pointer - if len(mib) > 0 { - _p0 = unsafe.Pointer(&mib[0]) - } else { - _p0 = unsafe.Pointer(&_zero) - } - _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) - if e1 != 0 { - err = errnoErr(e1) - } - return -} - -// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT - func utimes(path string, timeval *[2]Timeval) (err error) { var _p0 *byte _p0, err = BytePtrFromString(path) @@ -377,6 +361,22 @@ func Munlockall() (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) (err error) { + var _p0 unsafe.Pointer + if len(mib) > 0 { + _p0 = unsafe.Pointer(&mib[0]) + } else { + _p0 = unsafe.Pointer(&_zero) + } + _, _, e1 := Syscall6(SYS___SYSCTL, uintptr(_p0), uintptr(len(mib)), uintptr(unsafe.Pointer(old)), uintptr(unsafe.Pointer(oldlen)), uintptr(unsafe.Pointer(new)), uintptr(newlen)) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func pipe(p *[2]_C_int) (err error) { _, _, e1 := RawSyscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0) if e1 != 0 { @@ -1304,8 +1304,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := Syscall6(SYS_SELECT, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := Syscall6(SYS_SELECT, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = errnoErr(e1) } diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go index 5f61476..a96165d 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go @@ -1478,8 +1478,9 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func Select(n int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (err error) { - _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(n), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) +func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err error) { + r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procSelect)), 5, uintptr(nfd), uintptr(unsafe.Pointer(r)), uintptr(unsafe.Pointer(w)), uintptr(unsafe.Pointer(e)), uintptr(unsafe.Pointer(timeout)), 0) + n = int(r0) if e1 != 0 { err = e1 } diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index e869c06..7aae554 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -429,4 +429,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 4917b8a..7968439 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -351,4 +351,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index f85fcb4..3c663c6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 678a119..753def9 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -296,4 +296,5 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 222c9f9..ac86bd5 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -414,4 +414,5 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 28e6d0e..1f5705b 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -344,4 +344,5 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index e643c6f..d9ed953 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -344,4 +344,5 @@ const ( SYS_FSCONFIG = 5431 SYS_FSMOUNT = 5432 SYS_FSPICK = 5433 + SYS_PIDFD_OPEN = 5434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 01d93c4..94266b6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -414,4 +414,5 @@ const ( SYS_FSCONFIG = 4431 SYS_FSMOUNT = 4432 SYS_FSPICK = 4433 + SYS_PIDFD_OPEN = 4434 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 5744149..52e3da6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 21c8320..6141f90 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -393,4 +393,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index c1bb6d8..4f7261a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -295,4 +295,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index bc3cc6b..f47014a 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -358,4 +358,6 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 + SYS_CLONE3 = 435 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index 0a2841b..dd78abb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -373,4 +373,5 @@ const ( SYS_FSCONFIG = 431 SYS_FSMOUNT = 432 SYS_FSPICK = 433 + SYS_PIDFD_OPEN = 434 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 50bc412..16d62fa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -425,6 +432,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -591,22 +599,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -614,6 +606,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -664,6 +657,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2468,6 +2468,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2521,3 +2557,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 055eaa7..97e9239 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +433,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -592,22 +600,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -615,6 +607,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -665,6 +658,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2481,6 +2481,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2535,3 +2571,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 66019c9..05f978e 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -289,6 +289,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -429,6 +436,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -595,22 +603,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -618,6 +610,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -668,6 +661,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2459,6 +2459,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2512,3 +2548,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 3104798..5086fce 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -593,22 +601,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -616,6 +608,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +659,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2460,6 +2460,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2514,3 +2550,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 46c8602..b6ddd8c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -288,6 +288,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -594,22 +602,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -617,6 +609,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +660,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2465,6 +2465,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2518,3 +2554,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index c2fe1a6..89f3e32 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -593,22 +601,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -616,6 +608,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +659,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2462,6 +2462,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2516,3 +2552,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index f1eb0d3..6b84cf7 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -593,22 +601,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -616,6 +608,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +659,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2462,6 +2462,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2516,3 +2552,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index 8759bc3..bc50cd3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -288,6 +288,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x8 SizeofIPMreq = 0x8 @@ -594,22 +602,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -617,6 +609,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +660,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2465,6 +2465,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2518,3 +2554,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index a812005..0a1ec17 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -287,6 +287,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -594,22 +602,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -617,6 +609,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +660,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2470,6 +2470,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2524,3 +2560,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 74b7a91..c7f045a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -287,6 +287,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -428,6 +435,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -594,22 +602,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -617,6 +609,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -667,6 +660,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2470,6 +2470,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2524,3 +2560,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index 8344583..5d8d447 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -286,6 +286,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]uint8 @@ -427,6 +434,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -593,22 +601,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -616,6 +608,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -666,6 +659,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -808,6 +808,7 @@ type Ustat_t struct { type EpollEvent struct { Events uint32 + _ int32 Fd int32 Pad int32 } @@ -2487,6 +2488,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2541,3 +2578,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]uint8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]uint8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]uint8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index d8fc0bc..034875a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -285,6 +285,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -426,6 +433,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -592,22 +600,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -615,6 +607,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -665,6 +658,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2484,6 +2484,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2538,3 +2574,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 5e0ab93..2f7ec8b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -289,6 +289,13 @@ type RawSockaddrXDP struct { type RawSockaddrPPPoX [0x1e]byte +type RawSockaddrTIPC struct { + Family uint16 + Addrtype uint8 + Scope int8 + Addr [12]byte +} + type RawSockaddr struct { Family uint16 Data [14]int8 @@ -430,6 +437,7 @@ const ( SizeofSockaddrVM = 0x10 SizeofSockaddrXDP = 0x10 SizeofSockaddrPPPoX = 0x1e + SizeofSockaddrTIPC = 0x10 SizeofLinger = 0x8 SizeofIovec = 0x10 SizeofIPMreq = 0x8 @@ -596,22 +604,6 @@ const ( RTN_THROW = 0x9 RTN_NAT = 0xa RTN_XRESOLVE = 0xb - RTNLGRP_NONE = 0x0 - RTNLGRP_LINK = 0x1 - RTNLGRP_NOTIFY = 0x2 - RTNLGRP_NEIGH = 0x3 - RTNLGRP_TC = 0x4 - RTNLGRP_IPV4_IFADDR = 0x5 - RTNLGRP_IPV4_MROUTE = 0x6 - RTNLGRP_IPV4_ROUTE = 0x7 - RTNLGRP_IPV4_RULE = 0x8 - RTNLGRP_IPV6_IFADDR = 0x9 - RTNLGRP_IPV6_MROUTE = 0xa - RTNLGRP_IPV6_ROUTE = 0xb - RTNLGRP_IPV6_IFINFO = 0xc - RTNLGRP_IPV6_PREFIX = 0x12 - RTNLGRP_IPV6_RULE = 0x13 - RTNLGRP_ND_USEROPT = 0x14 SizeofNlMsghdr = 0x10 SizeofNlMsgerr = 0x14 SizeofRtGenmsg = 0x1 @@ -619,6 +611,7 @@ const ( SizeofRtAttr = 0x4 SizeofIfInfomsg = 0x10 SizeofIfAddrmsg = 0x8 + SizeofIfaCacheinfo = 0x10 SizeofRtMsg = 0xc SizeofRtNexthop = 0x8 SizeofNdUseroptmsg = 0x10 @@ -669,6 +662,13 @@ type IfAddrmsg struct { Index uint32 } +type IfaCacheinfo struct { + Prefered uint32 + Valid uint32 + Cstamp uint32 + Tstamp uint32 +} + type RtMsg struct { Family uint8 Dst_len uint8 @@ -2465,6 +2465,42 @@ const ( BPF_FD_TYPE_URETPROBE = 0x5 ) +const ( + RTNLGRP_NONE = 0x0 + RTNLGRP_LINK = 0x1 + RTNLGRP_NOTIFY = 0x2 + RTNLGRP_NEIGH = 0x3 + RTNLGRP_TC = 0x4 + RTNLGRP_IPV4_IFADDR = 0x5 + RTNLGRP_IPV4_MROUTE = 0x6 + RTNLGRP_IPV4_ROUTE = 0x7 + RTNLGRP_IPV4_RULE = 0x8 + RTNLGRP_IPV6_IFADDR = 0x9 + RTNLGRP_IPV6_MROUTE = 0xa + RTNLGRP_IPV6_ROUTE = 0xb + RTNLGRP_IPV6_IFINFO = 0xc + RTNLGRP_DECnet_IFADDR = 0xd + RTNLGRP_NOP2 = 0xe + RTNLGRP_DECnet_ROUTE = 0xf + RTNLGRP_DECnet_RULE = 0x10 + RTNLGRP_NOP4 = 0x11 + RTNLGRP_IPV6_PREFIX = 0x12 + RTNLGRP_IPV6_RULE = 0x13 + RTNLGRP_ND_USEROPT = 0x14 + RTNLGRP_PHONET_IFADDR = 0x15 + RTNLGRP_PHONET_ROUTE = 0x16 + RTNLGRP_DCB = 0x17 + RTNLGRP_IPV4_NETCONF = 0x18 + RTNLGRP_IPV6_NETCONF = 0x19 + RTNLGRP_MDB = 0x1a + RTNLGRP_MPLS_ROUTE = 0x1b + RTNLGRP_NSID = 0x1c + RTNLGRP_MPLS_NETCONF = 0x1d + RTNLGRP_IPV4_MROUTE_R = 0x1e + RTNLGRP_IPV6_MROUTE_R = 0x1f + RTNLGRP_NEXTHOP = 0x20 +) + type CapUserHeader struct { Version uint32 Pid int32 @@ -2519,3 +2555,72 @@ type LoopInfo64 struct { Encrypt_key [32]uint8 Init [2]uint64 } + +type TIPCSocketAddr struct { + Ref uint32 + Node uint32 +} + +type TIPCServiceRange struct { + Type uint32 + Lower uint32 + Upper uint32 +} + +type TIPCServiceName struct { + Type uint32 + Instance uint32 + Domain uint32 +} + +type TIPCSubscr struct { + Seq TIPCServiceRange + Timeout uint32 + Filter uint32 + Handle [8]int8 +} + +type TIPCEvent struct { + Event uint32 + Lower uint32 + Upper uint32 + Port TIPCSocketAddr + S TIPCSubscr +} + +type TIPCGroupReq struct { + Type uint32 + Instance uint32 + Scope uint32 + Flags uint32 +} + +type TIPCSIOCLNReq struct { + Peer uint32 + Id uint32 + Linkname [68]int8 +} + +type TIPCSIOCNodeIDReq struct { + Peer uint32 + Id [16]int8 +} + +const ( + TIPC_CLUSTER_SCOPE = 0x2 + TIPC_NODE_SCOPE = 0x3 +) + +const ( + SYSLOG_ACTION_CLOSE = 0 + SYSLOG_ACTION_OPEN = 1 + SYSLOG_ACTION_READ = 2 + SYSLOG_ACTION_READ_ALL = 3 + SYSLOG_ACTION_READ_CLEAR = 4 + SYSLOG_ACTION_CLEAR = 5 + SYSLOG_ACTION_CONSOLE_OFF = 6 + SYSLOG_ACTION_CONSOLE_ON = 7 + SYSLOG_ACTION_CONSOLE_LEVEL = 8 + SYSLOG_ACTION_SIZE_UNREAD = 9 + SYSLOG_ACTION_SIZE_BUFFER = 10 +) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_386.s b/vendor/golang.org/x/sys/windows/asm_windows_386.s deleted file mode 100644 index 21d994d..0000000 --- a/vendor/golang.org/x/sys/windows/asm_windows_386.s +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// -// System calls for 386, Windows are implemented in runtime/syscall_windows.goc -// - -TEXT ·getprocaddress(SB), 7, $0-16 - JMP syscall·getprocaddress(SB) - -TEXT ·loadlibrary(SB), 7, $0-12 - JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s b/vendor/golang.org/x/sys/windows/asm_windows_amd64.s deleted file mode 100644 index 5bfdf79..0000000 --- a/vendor/golang.org/x/sys/windows/asm_windows_amd64.s +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// -// System calls for amd64, Windows are implemented in runtime/syscall_windows.goc -// - -TEXT ·getprocaddress(SB), 7, $0-32 - JMP syscall·getprocaddress(SB) - -TEXT ·loadlibrary(SB), 7, $0-24 - JMP syscall·loadlibrary(SB) diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index ba67658..d777113 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -11,6 +11,18 @@ import ( "unsafe" ) +// We need to use LoadLibrary and GetProcAddress from the Go runtime, because +// the these symbols are loaded by the system linker and are required to +// dynamically load additional symbols. Note that in the Go runtime, these +// return syscall.Handle and syscall.Errno, but these are the same, in fact, +// as windows.Handle and windows.Errno, and we intend to keep these the same. + +//go:linkname syscall_loadlibrary syscall.loadlibrary +func syscall_loadlibrary(filename *uint16) (handle Handle, err Errno) + +//go:linkname syscall_getprocaddress syscall.getprocaddress +func syscall_getprocaddress(handle Handle, procname *uint8) (proc uintptr, err Errno) + // DLLError describes reasons for DLL load failures. type DLLError struct { Err error @@ -20,10 +32,6 @@ type DLLError struct { func (e *DLLError) Error() string { return e.Msg } -// Implemented in runtime/syscall_windows.goc; we provide jumps to them in our assembly file. -func loadlibrary(filename *uint16) (handle uintptr, err syscall.Errno) -func getprocaddress(handle uintptr, procname *uint8) (proc uintptr, err syscall.Errno) - // A DLL implements access to a single DLL. type DLL struct { Name string @@ -40,7 +48,7 @@ func LoadDLL(name string) (dll *DLL, err error) { if err != nil { return nil, err } - h, e := loadlibrary(namep) + h, e := syscall_loadlibrary(namep) if e != 0 { return nil, &DLLError{ Err: e, @@ -50,7 +58,7 @@ func LoadDLL(name string) (dll *DLL, err error) { } d := &DLL{ Name: name, - Handle: Handle(h), + Handle: h, } return d, nil } @@ -71,7 +79,7 @@ func (d *DLL) FindProc(name string) (proc *Proc, err error) { if err != nil { return nil, err } - a, e := getprocaddress(uintptr(d.Handle), namep) + a, e := syscall_getprocaddress(d.Handle, namep) if e != 0 { return nil, &DLLError{ Err: e, diff --git a/vendor/golang.org/x/sys/windows/mksyscall.go b/vendor/golang.org/x/sys/windows/mksyscall.go index 6277057..328e3b2 100644 --- a/vendor/golang.org/x/sys/windows/mksyscall.go +++ b/vendor/golang.org/x/sys/windows/mksyscall.go @@ -6,4 +6,4 @@ package windows -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go +//go:generate go run golang.org/x/sys/windows/mkwinsyscall -output zsyscall_windows.go eventlog.go service.go syscall_windows.go security_windows.go diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 61b4964..d88ed91 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -9,14 +9,6 @@ import ( "unsafe" ) -const ( - STANDARD_RIGHTS_REQUIRED = 0xf0000 - STANDARD_RIGHTS_READ = 0x20000 - STANDARD_RIGHTS_WRITE = 0x20000 - STANDARD_RIGHTS_EXECUTE = 0x20000 - STANDARD_RIGHTS_ALL = 0x1F0000 -) - const ( NameUnknown = 0 NameFullyQualifiedDN = 1 @@ -235,16 +227,17 @@ func LookupSID(system, account string) (sid *SID, domain string, accType uint32, } } -// String converts SID to a string format -// suitable for display, storage, or transmission. -func (sid *SID) String() (string, error) { +// String converts SID to a string format suitable for display, storage, or transmission. +func (sid *SID) String() string { + // From https://docs.microsoft.com/en-us/windows/win32/secbiomet/general-constants + const SecurityMaxSidSize = 68 var s *uint16 e := ConvertSidToStringSid(sid, &s) if e != nil { - return "", e + return "" } defer LocalFree((Handle)(unsafe.Pointer(s))) - return UTF16ToString((*[256]uint16)(unsafe.Pointer(s))[:]), nil + return UTF16ToString((*[SecurityMaxSidSize]uint16)(unsafe.Pointer(s))[:]) } // Len returns the length, in bytes, of a valid security identifier SID. @@ -644,6 +637,8 @@ func (tml *Tokenmandatorylabel) Size() uint32 { //sys DuplicateTokenEx(existingToken Token, desiredAccess uint32, tokenAttributes *SecurityAttributes, impersonationLevel uint32, tokenType uint32, newToken *Token) (err error) = advapi32.DuplicateTokenEx //sys GetUserProfileDirectory(t Token, dir *uint16, dirLen *uint32) (err error) = userenv.GetUserProfileDirectoryW //sys getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemDirectoryW +//sys getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetWindowsDirectoryW +//sys getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) = kernel32.GetSystemWindowsDirectoryW // An access token contains the security information for a logon session. // The system creates an access token when a user logs on, and every @@ -654,21 +649,16 @@ func (tml *Tokenmandatorylabel) Size() uint32 { // system-related operations on the local computer. type Token Handle -// OpenCurrentProcessToken opens the access token -// associated with current process. It is a real -// token that needs to be closed, unlike -// GetCurrentProcessToken. +// OpenCurrentProcessToken opens an access token associated with current +// process with TOKEN_QUERY access. It is a real token that needs to be closed. +// +// Deprecated: Explicitly call OpenProcessToken(CurrentProcess(), ...) +// with the desired access instead, or use GetCurrentProcessToken for a +// TOKEN_QUERY token. func OpenCurrentProcessToken() (Token, error) { - p, e := GetCurrentProcess() - if e != nil { - return 0, e - } - var t Token - e = OpenProcessToken(p, TOKEN_QUERY, &t) - if e != nil { - return 0, e - } - return t, nil + var token Token + err := OpenProcessToken(CurrentProcess(), TOKEN_QUERY, &token) + return token, err } // GetCurrentProcessToken returns the access token associated with @@ -785,8 +775,8 @@ func (token Token) GetLinkedToken() (Token, error) { return linkedToken, nil } -// GetSystemDirectory retrieves path to current location of the system -// directory, which is typically, though not always, C:\Windows\System32. +// GetSystemDirectory retrieves the path to current location of the system +// directory, which is typically, though not always, `C:\Windows\System32`. func GetSystemDirectory() (string, error) { n := uint32(MAX_PATH) for { @@ -802,6 +792,42 @@ func GetSystemDirectory() (string, error) { } } +// GetWindowsDirectory retrieves the path to current location of the Windows +// directory, which is typically, though not always, `C:\Windows`. This may +// be a private user directory in the case that the application is running +// under a terminal server. +func GetWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + +// GetSystemWindowsDirectory retrieves the path to current location of the +// Windows directory, which is typically, though not always, `C:\Windows`. +func GetSystemWindowsDirectory() (string, error) { + n := uint32(MAX_PATH) + for { + b := make([]uint16, n) + l, e := getSystemWindowsDirectory(&b[0], n) + if e != nil { + return "", e + } + if l <= n { + return UTF16ToString(b[:l]), nil + } + n = l + } +} + // IsMember reports whether the access token t is a member of the provided SID. func (t Token) IsMember(sid *SID) (bool, error) { var b int32 @@ -852,3 +878,521 @@ type WTS_SESSION_INFO struct { //sys WTSQueryUserToken(session uint32, token *Token) (err error) = wtsapi32.WTSQueryUserToken //sys WTSEnumerateSessions(handle Handle, reserved uint32, version uint32, sessions **WTS_SESSION_INFO, count *uint32) (err error) = wtsapi32.WTSEnumerateSessionsW //sys WTSFreeMemory(ptr uintptr) = wtsapi32.WTSFreeMemory + +type ACL struct { + aclRevision byte + sbz1 byte + aclSize uint16 + aceCount uint16 + sbz2 uint16 +} + +type SECURITY_DESCRIPTOR struct { + revision byte + sbz1 byte + control SECURITY_DESCRIPTOR_CONTROL + owner *SID + group *SID + sacl *ACL + dacl *ACL +} + +type SecurityAttributes struct { + Length uint32 + SecurityDescriptor *SECURITY_DESCRIPTOR + InheritHandle uint32 +} + +type SE_OBJECT_TYPE uint32 + +// Constants for type SE_OBJECT_TYPE +const ( + SE_UNKNOWN_OBJECT_TYPE = 0 + SE_FILE_OBJECT = 1 + SE_SERVICE = 2 + SE_PRINTER = 3 + SE_REGISTRY_KEY = 4 + SE_LMSHARE = 5 + SE_KERNEL_OBJECT = 6 + SE_WINDOW_OBJECT = 7 + SE_DS_OBJECT = 8 + SE_DS_OBJECT_ALL = 9 + SE_PROVIDER_DEFINED_OBJECT = 10 + SE_WMIGUID_OBJECT = 11 + SE_REGISTRY_WOW64_32KEY = 12 + SE_REGISTRY_WOW64_64KEY = 13 +) + +type SECURITY_INFORMATION uint32 + +// Constants for type SECURITY_INFORMATION +const ( + OWNER_SECURITY_INFORMATION = 0x00000001 + GROUP_SECURITY_INFORMATION = 0x00000002 + DACL_SECURITY_INFORMATION = 0x00000004 + SACL_SECURITY_INFORMATION = 0x00000008 + LABEL_SECURITY_INFORMATION = 0x00000010 + ATTRIBUTE_SECURITY_INFORMATION = 0x00000020 + SCOPE_SECURITY_INFORMATION = 0x00000040 + BACKUP_SECURITY_INFORMATION = 0x00010000 + PROTECTED_DACL_SECURITY_INFORMATION = 0x80000000 + PROTECTED_SACL_SECURITY_INFORMATION = 0x40000000 + UNPROTECTED_DACL_SECURITY_INFORMATION = 0x20000000 + UNPROTECTED_SACL_SECURITY_INFORMATION = 0x10000000 +) + +type SECURITY_DESCRIPTOR_CONTROL uint16 + +// Constants for type SECURITY_DESCRIPTOR_CONTROL +const ( + SE_OWNER_DEFAULTED = 0x0001 + SE_GROUP_DEFAULTED = 0x0002 + SE_DACL_PRESENT = 0x0004 + SE_DACL_DEFAULTED = 0x0008 + SE_SACL_PRESENT = 0x0010 + SE_SACL_DEFAULTED = 0x0020 + SE_DACL_AUTO_INHERIT_REQ = 0x0100 + SE_SACL_AUTO_INHERIT_REQ = 0x0200 + SE_DACL_AUTO_INHERITED = 0x0400 + SE_SACL_AUTO_INHERITED = 0x0800 + SE_DACL_PROTECTED = 0x1000 + SE_SACL_PROTECTED = 0x2000 + SE_RM_CONTROL_VALID = 0x4000 + SE_SELF_RELATIVE = 0x8000 +) + +type ACCESS_MASK uint32 + +// Constants for type ACCESS_MASK +const ( + DELETE = 0x00010000 + READ_CONTROL = 0x00020000 + WRITE_DAC = 0x00040000 + WRITE_OWNER = 0x00080000 + SYNCHRONIZE = 0x00100000 + STANDARD_RIGHTS_REQUIRED = 0x000F0000 + STANDARD_RIGHTS_READ = READ_CONTROL + STANDARD_RIGHTS_WRITE = READ_CONTROL + STANDARD_RIGHTS_EXECUTE = READ_CONTROL + STANDARD_RIGHTS_ALL = 0x001F0000 + SPECIFIC_RIGHTS_ALL = 0x0000FFFF + ACCESS_SYSTEM_SECURITY = 0x01000000 + MAXIMUM_ALLOWED = 0x02000000 + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + GENERIC_EXECUTE = 0x20000000 + GENERIC_ALL = 0x10000000 +) + +type ACCESS_MODE uint32 + +// Constants for type ACCESS_MODE +const ( + NOT_USED_ACCESS = 0 + GRANT_ACCESS = 1 + SET_ACCESS = 2 + DENY_ACCESS = 3 + REVOKE_ACCESS = 4 + SET_AUDIT_SUCCESS = 5 + SET_AUDIT_FAILURE = 6 +) + +// Constants for AceFlags and Inheritance fields +const ( + NO_INHERITANCE = 0x0 + SUB_OBJECTS_ONLY_INHERIT = 0x1 + SUB_CONTAINERS_ONLY_INHERIT = 0x2 + SUB_CONTAINERS_AND_OBJECTS_INHERIT = 0x3 + INHERIT_NO_PROPAGATE = 0x4 + INHERIT_ONLY = 0x8 + INHERITED_ACCESS_ENTRY = 0x10 + INHERITED_PARENT = 0x10000000 + INHERITED_GRANDPARENT = 0x20000000 + OBJECT_INHERIT_ACE = 0x1 + CONTAINER_INHERIT_ACE = 0x2 + NO_PROPAGATE_INHERIT_ACE = 0x4 + INHERIT_ONLY_ACE = 0x8 + INHERITED_ACE = 0x10 + VALID_INHERIT_FLAGS = 0x1F +) + +type MULTIPLE_TRUSTEE_OPERATION uint32 + +// Constants for MULTIPLE_TRUSTEE_OPERATION +const ( + NO_MULTIPLE_TRUSTEE = 0 + TRUSTEE_IS_IMPERSONATE = 1 +) + +type TRUSTEE_FORM uint32 + +// Constants for TRUSTEE_FORM +const ( + TRUSTEE_IS_SID = 0 + TRUSTEE_IS_NAME = 1 + TRUSTEE_BAD_FORM = 2 + TRUSTEE_IS_OBJECTS_AND_SID = 3 + TRUSTEE_IS_OBJECTS_AND_NAME = 4 +) + +type TRUSTEE_TYPE uint32 + +// Constants for TRUSTEE_TYPE +const ( + TRUSTEE_IS_UNKNOWN = 0 + TRUSTEE_IS_USER = 1 + TRUSTEE_IS_GROUP = 2 + TRUSTEE_IS_DOMAIN = 3 + TRUSTEE_IS_ALIAS = 4 + TRUSTEE_IS_WELL_KNOWN_GROUP = 5 + TRUSTEE_IS_DELETED = 6 + TRUSTEE_IS_INVALID = 7 + TRUSTEE_IS_COMPUTER = 8 +) + +// Constants for ObjectsPresent field +const ( + ACE_OBJECT_TYPE_PRESENT = 0x1 + ACE_INHERITED_OBJECT_TYPE_PRESENT = 0x2 +) + +type EXPLICIT_ACCESS struct { + AccessPermissions ACCESS_MASK + AccessMode ACCESS_MODE + Inheritance uint32 + Trustee TRUSTEE +} + +// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions. +type TrusteeValue uintptr + +func TrusteeValueFromString(str string) TrusteeValue { + return TrusteeValue(unsafe.Pointer(StringToUTF16Ptr(str))) +} +func TrusteeValueFromSID(sid *SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(sid)) +} +func TrusteeValueFromObjectsAndSid(objectsAndSid *OBJECTS_AND_SID) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndSid)) +} +func TrusteeValueFromObjectsAndName(objectsAndName *OBJECTS_AND_NAME) TrusteeValue { + return TrusteeValue(unsafe.Pointer(objectsAndName)) +} + +type TRUSTEE struct { + MultipleTrustee *TRUSTEE + MultipleTrusteeOperation MULTIPLE_TRUSTEE_OPERATION + TrusteeForm TRUSTEE_FORM + TrusteeType TRUSTEE_TYPE + TrusteeValue TrusteeValue +} + +type OBJECTS_AND_SID struct { + ObjectsPresent uint32 + ObjectTypeGuid GUID + InheritedObjectTypeGuid GUID + Sid *SID +} + +type OBJECTS_AND_NAME struct { + ObjectsPresent uint32 + ObjectType SE_OBJECT_TYPE + ObjectTypeName *uint16 + InheritedObjectTypeName *uint16 + Name *uint16 +} + +//sys getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetSecurityInfo +//sys SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) = advapi32.SetSecurityInfo +//sys getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) = advapi32.GetNamedSecurityInfoW +//sys SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) = advapi32.SetNamedSecurityInfoW + +//sys buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) = advapi32.BuildSecurityDescriptorW +//sys initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) = advapi32.InitializeSecurityDescriptor + +//sys getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) = advapi32.GetSecurityDescriptorControl +//sys getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorDacl +//sys getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorSacl +//sys getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorOwner +//sys getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) = advapi32.GetSecurityDescriptorGroup +//sys getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) = advapi32.GetSecurityDescriptorLength +//sys getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) [failretval!=0] = advapi32.GetSecurityDescriptorRMControl +//sys isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) = advapi32.IsValidSecurityDescriptor + +//sys setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) = advapi32.SetSecurityDescriptorControl +//sys setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorDacl +//sys setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) = advapi32.SetSecurityDescriptorSacl +//sys setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) = advapi32.SetSecurityDescriptorOwner +//sys setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) = advapi32.SetSecurityDescriptorGroup +//sys setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) = advapi32.SetSecurityDescriptorRMControl + +//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW +//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW + +//sys makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) = advapi32.MakeAbsoluteSD +//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD + +//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW + +// Control returns the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) { + err = getSecurityDescriptorControl(sd, &control, &revision) + return +} + +// SetControl sets the security descriptor control bits. +func (sd *SECURITY_DESCRIPTOR) SetControl(controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) error { + return setSecurityDescriptorControl(sd, controlBitsOfInterest, controlBitsToSet) +} + +// RMControl returns the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) RMControl() (control uint8, err error) { + err = getSecurityDescriptorRMControl(sd, &control) + return +} + +// SetRMControl sets the security descriptor resource manager control bits. +func (sd *SECURITY_DESCRIPTOR) SetRMControl(rmControl uint8) { + setSecurityDescriptorRMControl(sd, &rmControl) +} + +// DACL returns the security descriptor DACL and whether it was defaulted. The dacl return value may be nil +// if a DACL exists but is an "empty DACL", meaning fully permissive. If the DACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) DACL() (dacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorDacl(sd, &present, &dacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetDACL sets the absolute security descriptor DACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetDACL(dacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorDacl(absoluteSD, present, dacl, defaulted) +} + +// SACL returns the security descriptor SACL and whether it was defaulted. The sacl return value may be nil +// if a SACL exists but is an "empty SACL", meaning fully permissive. If the SACL does not exist, err returns +// ERROR_OBJECT_NOT_FOUND. +func (sd *SECURITY_DESCRIPTOR) SACL() (sacl *ACL, defaulted bool, err error) { + var present bool + err = getSecurityDescriptorSacl(sd, &present, &sacl, &defaulted) + if !present { + err = ERROR_OBJECT_NOT_FOUND + } + return +} + +// SetSACL sets the absolute security descriptor SACL. +func (absoluteSD *SECURITY_DESCRIPTOR) SetSACL(sacl *ACL, present, defaulted bool) error { + return setSecurityDescriptorSacl(absoluteSD, present, sacl, defaulted) +} + +// Owner returns the security descriptor owner and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Owner() (owner *SID, defaulted bool, err error) { + err = getSecurityDescriptorOwner(sd, &owner, &defaulted) + return +} + +// SetOwner sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetOwner(owner *SID, defaulted bool) error { + return setSecurityDescriptorOwner(absoluteSD, owner, defaulted) +} + +// Group returns the security descriptor group and whether it was defaulted. +func (sd *SECURITY_DESCRIPTOR) Group() (group *SID, defaulted bool, err error) { + err = getSecurityDescriptorGroup(sd, &group, &defaulted) + return +} + +// SetGroup sets the absolute security descriptor owner. +func (absoluteSD *SECURITY_DESCRIPTOR) SetGroup(group *SID, defaulted bool) error { + return setSecurityDescriptorGroup(absoluteSD, group, defaulted) +} + +// Length returns the length of the security descriptor. +func (sd *SECURITY_DESCRIPTOR) Length() uint32 { + return getSecurityDescriptorLength(sd) +} + +// IsValid returns whether the security descriptor is valid. +func (sd *SECURITY_DESCRIPTOR) IsValid() bool { + return isValidSecurityDescriptor(sd) +} + +// String returns the SDDL form of the security descriptor, with a function signature that can be +// used with %v formatting directives. +func (sd *SECURITY_DESCRIPTOR) String() string { + var sddl *uint16 + err := convertSecurityDescriptorToStringSecurityDescriptor(sd, 1, 0xff, &sddl, nil) + if err != nil { + return "" + } + defer LocalFree(Handle(unsafe.Pointer(sddl))) + return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(sddl))[:]) +} + +// ToAbsolute converts a self-relative security descriptor into an absolute one. +func (selfRelativeSD *SECURITY_DESCRIPTOR) ToAbsolute() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := selfRelativeSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE == 0 { + err = ERROR_INVALID_PARAMETER + return + } + var absoluteSDSize, daclSize, saclSize, ownerSize, groupSize uint32 + err = makeAbsoluteSD(selfRelativeSD, nil, &absoluteSDSize, + nil, &daclSize, nil, &saclSize, nil, &ownerSize, nil, &groupSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeAbsoluteSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if absoluteSDSize > 0 { + absoluteSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, absoluteSDSize)[0])) + } + var ( + dacl *ACL + sacl *ACL + owner *SID + group *SID + ) + if daclSize > 0 { + dacl = (*ACL)(unsafe.Pointer(&make([]byte, daclSize)[0])) + } + if saclSize > 0 { + sacl = (*ACL)(unsafe.Pointer(&make([]byte, saclSize)[0])) + } + if ownerSize > 0 { + owner = (*SID)(unsafe.Pointer(&make([]byte, ownerSize)[0])) + } + if groupSize > 0 { + group = (*SID)(unsafe.Pointer(&make([]byte, groupSize)[0])) + } + err = makeAbsoluteSD(selfRelativeSD, absoluteSD, &absoluteSDSize, + dacl, &daclSize, sacl, &saclSize, owner, &ownerSize, group, &groupSize) + return +} + +// ToSelfRelative converts an absolute security descriptor into a self-relative one. +func (absoluteSD *SECURITY_DESCRIPTOR) ToSelfRelative() (selfRelativeSD *SECURITY_DESCRIPTOR, err error) { + control, _, err := absoluteSD.Control() + if err != nil { + return + } + if control&SE_SELF_RELATIVE != 0 { + err = ERROR_INVALID_PARAMETER + return + } + var selfRelativeSDSize uint32 + err = makeSelfRelativeSD(absoluteSD, nil, &selfRelativeSDSize) + switch err { + case ERROR_INSUFFICIENT_BUFFER: + case nil: + // makeSelfRelativeSD is expected to fail, but it succeeds. + return nil, ERROR_INTERNAL_ERROR + default: + return nil, err + } + if selfRelativeSDSize > 0 { + selfRelativeSD = (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&make([]byte, selfRelativeSDSize)[0])) + } + err = makeSelfRelativeSD(absoluteSD, selfRelativeSD, &selfRelativeSDSize) + return +} + +func (selfRelativeSD *SECURITY_DESCRIPTOR) copySelfRelativeSecurityDescriptor() *SECURITY_DESCRIPTOR { + sdBytes := make([]byte, selfRelativeSD.Length()) + copy(sdBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(selfRelativeSD))[:len(sdBytes)]) + return (*SECURITY_DESCRIPTOR)(unsafe.Pointer(&sdBytes[0])) +} + +// SecurityDescriptorFromString converts an SDDL string describing a security descriptor into a +// self-relative security descriptor object allocated on the Go heap. +func SecurityDescriptorFromString(sddl string) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &winHeapSD, nil) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetSecurityInfo queries the security information for a given handle and returns the self-relative security +// descriptor result on the Go heap. +func GetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getSecurityInfo(handle, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// GetNamedSecurityInfo queries the security information for a given named object and returns the self-relative security +// descriptor result on the Go heap. +func GetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + err = getNamedSecurityInfo(objectName, objectType, securityInformation, nil, nil, nil, nil, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// BuildSecurityDescriptor makes a new security descriptor using the input trustees, explicit access lists, and +// prior security descriptor to be merged, any of which can be nil, returning the self-relative security descriptor +// result on the Go heap. +func BuildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, accessEntries []EXPLICIT_ACCESS, auditEntries []EXPLICIT_ACCESS, mergedSecurityDescriptor *SECURITY_DESCRIPTOR) (sd *SECURITY_DESCRIPTOR, err error) { + var winHeapSD *SECURITY_DESCRIPTOR + var winHeapSDSize uint32 + var firstAccessEntry *EXPLICIT_ACCESS + if len(accessEntries) > 0 { + firstAccessEntry = &accessEntries[0] + } + var firstAuditEntry *EXPLICIT_ACCESS + if len(auditEntries) > 0 { + firstAuditEntry = &auditEntries[0] + } + err = buildSecurityDescriptor(owner, group, uint32(len(accessEntries)), firstAccessEntry, uint32(len(auditEntries)), firstAuditEntry, mergedSecurityDescriptor, &winHeapSDSize, &winHeapSD) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapSD))) + return winHeapSD.copySelfRelativeSecurityDescriptor(), nil +} + +// NewSecurityDescriptor creates and initializes a new absolute security descriptor. +func NewSecurityDescriptor() (absoluteSD *SECURITY_DESCRIPTOR, err error) { + absoluteSD = &SECURITY_DESCRIPTOR{} + err = initializeSecurityDescriptor(absoluteSD, 1) + return +} + +// ACLFromEntries returns a new ACL on the Go heap containing a list of explicit entries as well as those of another ACL. +// Both explicitEntries and mergedACL are optional and can be nil. +func ACLFromEntries(explicitEntries []EXPLICIT_ACCESS, mergedACL *ACL) (acl *ACL, err error) { + var firstExplicitEntry *EXPLICIT_ACCESS + if len(explicitEntries) > 0 { + firstExplicitEntry = &explicitEntries[0] + } + var winHeapACL *ACL + err = setEntriesInAcl(uint32(len(explicitEntries)), firstExplicitEntry, mergedACL, &winHeapACL) + if err != nil { + return + } + defer LocalFree(Handle(unsafe.Pointer(winHeapACL))) + aclBytes := make([]byte, winHeapACL.aclSize) + copy(aclBytes, (*[(1 << 31) - 1]byte)(unsafe.Pointer(winHeapACL))[:len(aclBytes)]) + return (*ACL)(unsafe.Pointer(&aclBytes[0])), nil +} diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 03383f1..847e00b 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -159,6 +159,10 @@ type SERVICE_DESCRIPTION struct { Description *uint16 } +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + type SERVICE_STATUS_PROCESS struct { ServiceType uint32 CurrentState uint32 diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index b230509..034b5f4 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -57,6 +57,10 @@ const ( FILE_VOLUME_IS_COMPRESSED = 0x00008000 FILE_VOLUME_QUOTAS = 0x00000020 + // Flags for LockFileEx. + LOCKFILE_FAIL_IMMEDIATELY = 0x00000001 + LOCKFILE_EXCLUSIVE_LOCK = 0x00000002 + // Return values of SleepEx and other APC functions STATUS_USER_APC = 0x000000C0 WAIT_IO_COMPLETION = STATUS_USER_APC @@ -136,6 +140,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys LoadLibraryEx(libname string, zero Handle, flags uintptr) (handle Handle, err error) = LoadLibraryExW //sys FreeLibrary(handle Handle) (err error) //sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) +//sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW +//sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) @@ -160,6 +166,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys DeleteFile(path *uint16) (err error) = DeleteFileW //sys MoveFile(from *uint16, to *uint16) (err error) = MoveFileW //sys MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) = MoveFileExW +//sys LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) +//sys UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) //sys GetComputerName(buf *uint16, n *uint32) (err error) = GetComputerNameW //sys GetComputerNameEx(nametype uint32, buf *uint16, n *uint32) (err error) = GetComputerNameExW //sys SetEndOfFile(handle Handle) (err error) @@ -173,13 +181,11 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys CancelIoEx(s Handle, o *Overlapped) (err error) //sys CreateProcess(appName *uint16, commandLine *uint16, procSecurity *SecurityAttributes, threadSecurity *SecurityAttributes, inheritHandles bool, creationFlags uint32, env *uint16, currentDir *uint16, startupInfo *StartupInfo, outProcInfo *ProcessInformation) (err error) = CreateProcessW //sys OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (handle Handle, err error) -//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) = shell32.ShellExecuteW +//sys ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) [failretval<=32] = shell32.ShellExecuteW //sys shGetKnownFolderPath(id *KNOWNFOLDERID, flags uint32, token Token, path **uint16) (ret error) = shell32.SHGetKnownFolderPath //sys TerminateProcess(handle Handle, exitcode uint32) (err error) //sys GetExitCodeProcess(handle Handle, exitcode *uint32) (err error) //sys GetStartupInfo(startupInfo *StartupInfo) (err error) = GetStartupInfoW -//sys GetCurrentProcess() (pseudoHandle Handle, err error) -//sys GetCurrentThread() (pseudoHandle Handle, err error) //sys GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) //sys DuplicateHandle(hSourceProcessHandle Handle, hSourceHandle Handle, hTargetProcessHandle Handle, lpTargetHandle *Handle, dwDesiredAccess uint32, bInheritHandle bool, dwOptions uint32) (err error) //sys WaitForSingleObject(handle Handle, waitMilliseconds uint32) (event uint32, err error) [failretval==0xffffffff] @@ -257,6 +263,10 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetEvent(event Handle) (err error) = kernel32.SetEvent //sys ResetEvent(event Handle) (err error) = kernel32.ResetEvent //sys PulseEvent(event Handle) (err error) = kernel32.PulseEvent +//sys CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) = kernel32.CreateMutexW +//sys CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) = kernel32.CreateMutexExW +//sys OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) = kernel32.OpenMutexW +//sys ReleaseMutex(mutex Handle) (err error) = kernel32.ReleaseMutex //sys SleepEx(milliseconds uint32, alertable bool) (ret uint32) = kernel32.SleepEx //sys CreateJobObject(jobAttr *SecurityAttributes, name *uint16) (handle Handle, err error) = kernel32.CreateJobObjectW //sys AssignProcessToJobObject(job Handle, process Handle) (err error) = kernel32.AssignProcessToJobObject @@ -269,6 +279,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GenerateConsoleCtrlEvent(ctrlEvent uint32, processGroupID uint32) (err error) //sys GetProcessId(process Handle) (id uint32, err error) //sys OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (handle Handle, err error) +//sys SetProcessPriorityBoost(process Handle, disable bool) (err error) = kernel32.SetProcessPriorityBoost // Volume Management Functions //sys DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) = DefineDosDeviceW @@ -279,6 +290,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys FindNextVolumeMountPoint(findVolumeMountPoint Handle, volumeMountPoint *uint16, bufferLength uint32) (err error) = FindNextVolumeMountPointW //sys FindVolumeClose(findVolume Handle) (err error) //sys FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) +//sys GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) = GetDiskFreeSpaceExW //sys GetDriveType(rootPathName *uint16) (driveType uint32) = GetDriveTypeW //sys GetLogicalDrives() (drivesBitMask uint32, err error) [failretval==0] //sys GetLogicalDriveStrings(bufferLength uint32, buffer *uint16) (n uint32, err error) [failretval==0] = GetLogicalDriveStringsW @@ -291,14 +303,47 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys SetVolumeLabel(rootPathName *uint16, volumeName *uint16) (err error) = SetVolumeLabelW //sys SetVolumeMountPoint(volumeMountPoint *uint16, volumeName *uint16) (err error) = SetVolumeMountPointW //sys MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret int32, err error) [failretval==0] = user32.MessageBoxW +//sys ExitWindowsEx(flags uint32, reason uint32) (err error) = user32.ExitWindowsEx +//sys InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) = advapi32.InitiateSystemShutdownExW +//sys SetProcessShutdownParameters(level uint32, flags uint32) (err error) = kernel32.SetProcessShutdownParameters +//sys GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) = kernel32.GetProcessShutdownParameters //sys clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) = ole32.CLSIDFromString //sys stringFromGUID2(rguid *GUID, lpsz *uint16, cchMax int32) (chars int32) = ole32.StringFromGUID2 //sys coCreateGuid(pguid *GUID) (ret error) = ole32.CoCreateGuid //sys CoTaskMemFree(address unsafe.Pointer) = ole32.CoTaskMemFree //sys rtlGetVersion(info *OsVersionInfoEx) (ret error) = ntdll.RtlGetVersion +//sys rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) = ntdll.RtlGetNtVersionNumbers // syscall interface implementation for other packages +// GetCurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentProcess for the same Handle without the nil +// error. +func GetCurrentProcess() (Handle, error) { + return CurrentProcess(), nil +} + +// CurrentProcess returns the handle for the current process. +// It is a pseudo handle that does not need to be closed. +func CurrentProcess() Handle { return Handle(^uintptr(1 - 1)) } + +// GetCurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +// The returned error is always nil. +// +// Deprecated: use CurrentThread for the same Handle without the nil +// error. +func GetCurrentThread() (Handle, error) { + return CurrentThread(), nil +} + +// CurrentThread returns the handle for the current thread. +// It is a pseudo handle that does not need to be closed. +func CurrentThread() Handle { return Handle(^uintptr(2 - 1)) } + // GetProcAddressByOrdinal retrieves the address of the exported // function from module by ordinal. func GetProcAddressByOrdinal(module Handle, ordinal uintptr) (proc uintptr, err error) { @@ -365,7 +410,11 @@ func Open(path string, mode int, perm uint32) (fd Handle, err error) { default: createmode = OPEN_EXISTING } - h, e := CreateFile(pathp, access, sharemode, sa, createmode, FILE_ATTRIBUTE_NORMAL, 0) + var attrs uint32 = FILE_ATTRIBUTE_NORMAL + if perm&S_IWRITE == 0 { + attrs = FILE_ATTRIBUTE_READONLY + } + h, e := CreateFile(pathp, access, sharemode, sa, createmode, attrs, 0) return h, e } @@ -812,7 +861,7 @@ func (rsa *RawSockaddrAny) Sockaddr() (Sockaddr, error) { for n < len(pp.Path) && pp.Path[n] != 0 { n++ } - bytes := (*[10000]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] + bytes := (*[len(pp.Path)]byte)(unsafe.Pointer(&pp.Path[0]))[0:n] sa.Name = string(bytes) return sa, nil @@ -1306,8 +1355,8 @@ func (t Token) KnownFolderPath(folderID *KNOWNFOLDERID, flags uint32) (string, e return UTF16ToString((*[(1 << 30) - 1]uint16)(unsafe.Pointer(p))[:]), nil } -// RtlGetVersion returns the true version of the underlying operating system, ignoring -// any manifesting or compatibility layers on top of the win32 layer. +// RtlGetVersion returns the version of the underlying operating system, ignoring +// manifest semantics but is affected by the application compatibility layer. func RtlGetVersion() *OsVersionInfoEx { info := &OsVersionInfoEx{} info.osVersionInfoSize = uint32(unsafe.Sizeof(*info)) @@ -1318,3 +1367,11 @@ func RtlGetVersion() *OsVersionInfoEx { _ = rtlGetVersion(info) return info } + +// RtlGetNtVersionNumbers returns the version of the underlying operating system, +// ignoring manifest semantics and the application compatibility layer. +func RtlGetNtVersionNumbers() (majorVersion, minorVersion, buildNumber uint32) { + rtlGetNtVersionNumbers(&majorVersion, &minorVersion, &buildNumber) + buildNumber &= 0xffff + return +} diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 1e3947f..7f178bb 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -62,11 +62,6 @@ var signals = [...]string{ } const ( - GENERIC_READ = 0x80000000 - GENERIC_WRITE = 0x40000000 - GENERIC_EXECUTE = 0x20000000 - GENERIC_ALL = 0x10000000 - FILE_LIST_DIRECTORY = 0x00000001 FILE_APPEND_DATA = 0x00000004 FILE_WRITE_ATTRIBUTES = 0x00000100 @@ -158,13 +153,6 @@ const ( WAIT_OBJECT_0 = 0x00000000 WAIT_FAILED = 0xFFFFFFFF - // Standard access rights. - DELETE = 0x00010000 - READ_CONTROL = 0x00020000 - SYNCHRONIZE = 0x00100000 - WRITE_DAC = 0x00040000 - WRITE_OWNER = 0x00080000 - // Access rights for process. PROCESS_CREATE_PROCESS = 0x0080 PROCESS_CREATE_THREAD = 0x0002 @@ -483,12 +471,6 @@ func NsecToTimeval(nsec int64) (tv Timeval) { return } -type SecurityAttributes struct { - Length uint32 - SecurityDescriptor uintptr - InheritHandle uint32 -} - type Overlapped struct { Internal uintptr InternalHigh uintptr @@ -1190,6 +1172,28 @@ const ( REG_QWORD = REG_QWORD_LITTLE_ENDIAN ) +const ( + EVENT_MODIFY_STATE = 0x0002 + EVENT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + MUTANT_QUERY_STATE = 0x0001 + MUTANT_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | MUTANT_QUERY_STATE + + SEMAPHORE_MODIFY_STATE = 0x0002 + SEMAPHORE_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | 0x3 + + TIMER_QUERY_STATE = 0x0001 + TIMER_MODIFY_STATE = 0x0002 + TIMER_ALL_ACCESS = STANDARD_RIGHTS_REQUIRED | SYNCHRONIZE | TIMER_QUERY_STATE | TIMER_MODIFY_STATE + + MUTEX_MODIFY_STATE = MUTANT_QUERY_STATE + MUTEX_ALL_ACCESS = MUTANT_ALL_ACCESS + + CREATE_EVENT_MANUAL_RESET = 0x1 + CREATE_EVENT_INITIAL_SET = 0x2 + CREATE_MUTEX_INITIAL_OWNER = 0x1 +) + type AddrinfoW struct { Flags int32 Family int32 @@ -1666,3 +1670,75 @@ type OsVersionInfoEx struct { ProductType byte _ byte } + +const ( + EWX_LOGOFF = 0x00000000 + EWX_SHUTDOWN = 0x00000001 + EWX_REBOOT = 0x00000002 + EWX_FORCE = 0x00000004 + EWX_POWEROFF = 0x00000008 + EWX_FORCEIFHUNG = 0x00000010 + EWX_QUICKRESOLVE = 0x00000020 + EWX_RESTARTAPPS = 0x00000040 + EWX_HYBRID_SHUTDOWN = 0x00400000 + EWX_BOOTOPTIONS = 0x01000000 + + SHTDN_REASON_FLAG_COMMENT_REQUIRED = 0x01000000 + SHTDN_REASON_FLAG_DIRTY_PROBLEM_ID_REQUIRED = 0x02000000 + SHTDN_REASON_FLAG_CLEAN_UI = 0x04000000 + SHTDN_REASON_FLAG_DIRTY_UI = 0x08000000 + SHTDN_REASON_FLAG_USER_DEFINED = 0x40000000 + SHTDN_REASON_FLAG_PLANNED = 0x80000000 + SHTDN_REASON_MAJOR_OTHER = 0x00000000 + SHTDN_REASON_MAJOR_NONE = 0x00000000 + SHTDN_REASON_MAJOR_HARDWARE = 0x00010000 + SHTDN_REASON_MAJOR_OPERATINGSYSTEM = 0x00020000 + SHTDN_REASON_MAJOR_SOFTWARE = 0x00030000 + SHTDN_REASON_MAJOR_APPLICATION = 0x00040000 + SHTDN_REASON_MAJOR_SYSTEM = 0x00050000 + SHTDN_REASON_MAJOR_POWER = 0x00060000 + SHTDN_REASON_MAJOR_LEGACY_API = 0x00070000 + SHTDN_REASON_MINOR_OTHER = 0x00000000 + SHTDN_REASON_MINOR_NONE = 0x000000ff + SHTDN_REASON_MINOR_MAINTENANCE = 0x00000001 + SHTDN_REASON_MINOR_INSTALLATION = 0x00000002 + SHTDN_REASON_MINOR_UPGRADE = 0x00000003 + SHTDN_REASON_MINOR_RECONFIG = 0x00000004 + SHTDN_REASON_MINOR_HUNG = 0x00000005 + SHTDN_REASON_MINOR_UNSTABLE = 0x00000006 + SHTDN_REASON_MINOR_DISK = 0x00000007 + SHTDN_REASON_MINOR_PROCESSOR = 0x00000008 + SHTDN_REASON_MINOR_NETWORKCARD = 0x00000009 + SHTDN_REASON_MINOR_POWER_SUPPLY = 0x0000000a + SHTDN_REASON_MINOR_CORDUNPLUGGED = 0x0000000b + SHTDN_REASON_MINOR_ENVIRONMENT = 0x0000000c + SHTDN_REASON_MINOR_HARDWARE_DRIVER = 0x0000000d + SHTDN_REASON_MINOR_OTHERDRIVER = 0x0000000e + SHTDN_REASON_MINOR_BLUESCREEN = 0x0000000F + SHTDN_REASON_MINOR_SERVICEPACK = 0x00000010 + SHTDN_REASON_MINOR_HOTFIX = 0x00000011 + SHTDN_REASON_MINOR_SECURITYFIX = 0x00000012 + SHTDN_REASON_MINOR_SECURITY = 0x00000013 + SHTDN_REASON_MINOR_NETWORK_CONNECTIVITY = 0x00000014 + SHTDN_REASON_MINOR_WMI = 0x00000015 + SHTDN_REASON_MINOR_SERVICEPACK_UNINSTALL = 0x00000016 + SHTDN_REASON_MINOR_HOTFIX_UNINSTALL = 0x00000017 + SHTDN_REASON_MINOR_SECURITYFIX_UNINSTALL = 0x00000018 + SHTDN_REASON_MINOR_MMC = 0x00000019 + SHTDN_REASON_MINOR_SYSTEMRESTORE = 0x0000001a + SHTDN_REASON_MINOR_TERMSRV = 0x00000020 + SHTDN_REASON_MINOR_DC_PROMOTION = 0x00000021 + SHTDN_REASON_MINOR_DC_DEMOTION = 0x00000022 + SHTDN_REASON_UNKNOWN = SHTDN_REASON_MINOR_NONE + SHTDN_REASON_LEGACY_API = SHTDN_REASON_MAJOR_LEGACY_API | SHTDN_REASON_FLAG_PLANNED + SHTDN_REASON_VALID_BIT_MASK = 0xc0ffffff + + SHUTDOWN_NORETRY = 0x1 +) + +// Flags used for GetModuleHandleEx +const ( + GET_MODULE_HANDLE_EX_FLAG_PIN = 1 + GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT = 2 + GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS = 4 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index d461bed..74d721e 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -51,261 +51,301 @@ var ( modnetapi32 = NewLazySystemDLL("netapi32.dll") modwtsapi32 = NewLazySystemDLL("wtsapi32.dll") - procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") - procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") - procReportEventW = modadvapi32.NewProc("ReportEventW") - procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") - procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") - procCreateServiceW = modadvapi32.NewProc("CreateServiceW") - procOpenServiceW = modadvapi32.NewProc("OpenServiceW") - procDeleteService = modadvapi32.NewProc("DeleteService") - procStartServiceW = modadvapi32.NewProc("StartServiceW") - procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") - procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") - procControlService = modadvapi32.NewProc("ControlService") - procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") - procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") - procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") - procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") - procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") - procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") - procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") - procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") - procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") - procGetLastError = modkernel32.NewProc("GetLastError") - procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") - procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") - procFreeLibrary = modkernel32.NewProc("FreeLibrary") - procGetProcAddress = modkernel32.NewProc("GetProcAddress") - procGetVersion = modkernel32.NewProc("GetVersion") - procFormatMessageW = modkernel32.NewProc("FormatMessageW") - procExitProcess = modkernel32.NewProc("ExitProcess") - procIsWow64Process = modkernel32.NewProc("IsWow64Process") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procReadFile = modkernel32.NewProc("ReadFile") - procWriteFile = modkernel32.NewProc("WriteFile") - procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") - procSetFilePointer = modkernel32.NewProc("SetFilePointer") - procCloseHandle = modkernel32.NewProc("CloseHandle") - procGetStdHandle = modkernel32.NewProc("GetStdHandle") - procSetStdHandle = modkernel32.NewProc("SetStdHandle") - procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") - procFindNextFileW = modkernel32.NewProc("FindNextFileW") - procFindClose = modkernel32.NewProc("FindClose") - procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") - procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") - procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") - procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") - procDeleteFileW = modkernel32.NewProc("DeleteFileW") - procMoveFileW = modkernel32.NewProc("MoveFileW") - procMoveFileExW = modkernel32.NewProc("MoveFileExW") - procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") - procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") - procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") - procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") - procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") - procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") - procCancelIo = modkernel32.NewProc("CancelIo") - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateProcessW = modkernel32.NewProc("CreateProcessW") - procOpenProcess = modkernel32.NewProc("OpenProcess") - procShellExecuteW = modshell32.NewProc("ShellExecuteW") - procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") - procTerminateProcess = modkernel32.NewProc("TerminateProcess") - procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") - procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") - procGetCurrentProcess = modkernel32.NewProc("GetCurrentProcess") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") - procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") - procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") - procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") - procGetTempPathW = modkernel32.NewProc("GetTempPathW") - procCreatePipe = modkernel32.NewProc("CreatePipe") - procGetFileType = modkernel32.NewProc("GetFileType") - procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") - procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") - procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") - procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") - procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") - procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") - procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") - procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") - procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") - procGetTickCount64 = modkernel32.NewProc("GetTickCount64") - procSetFileTime = modkernel32.NewProc("SetFileTime") - procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") - procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") - procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") - procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") - procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") - procLocalFree = modkernel32.NewProc("LocalFree") - procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") - procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") - procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") - procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") - procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") - procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") - procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") - procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") - procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") - procVirtualLock = modkernel32.NewProc("VirtualLock") - procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") - procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") - procVirtualFree = modkernel32.NewProc("VirtualFree") - procVirtualProtect = modkernel32.NewProc("VirtualProtect") - procTransmitFile = modmswsock.NewProc("TransmitFile") - procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") - procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") - procCertOpenStore = modcrypt32.NewProc("CertOpenStore") - procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") - procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") - procCertCloseStore = modcrypt32.NewProc("CertCloseStore") - procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") - procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") - procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") - procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") - procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") - procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") - procRegCloseKey = modadvapi32.NewProc("RegCloseKey") - procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") - procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") - procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") - procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") - procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") - procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") - procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") - procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") - procReadConsoleW = modkernel32.NewProc("ReadConsoleW") - procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") - procProcess32FirstW = modkernel32.NewProc("Process32FirstW") - procProcess32NextW = modkernel32.NewProc("Process32NextW") - procThread32First = modkernel32.NewProc("Thread32First") - procThread32Next = modkernel32.NewProc("Thread32Next") - procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") - procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") - procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") - procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") - procCreateEventW = modkernel32.NewProc("CreateEventW") - procCreateEventExW = modkernel32.NewProc("CreateEventExW") - procOpenEventW = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") - procSleepEx = modkernel32.NewProc("SleepEx") - procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") - procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") - procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") - procSetErrorMode = modkernel32.NewProc("SetErrorMode") - procResumeThread = modkernel32.NewProc("ResumeThread") - procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") - procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") - procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") - procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") - procGetProcessId = modkernel32.NewProc("GetProcessId") - procOpenThread = modkernel32.NewProc("OpenThread") - procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") - procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") - procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") - procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") - procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") - procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") - procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") - procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") - procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") - procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") - procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") - procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") - procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") - procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") - procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") - procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") - procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") - procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") - procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") - procMessageBoxW = moduser32.NewProc("MessageBoxW") - procCLSIDFromString = modole32.NewProc("CLSIDFromString") - procStringFromGUID2 = modole32.NewProc("StringFromGUID2") - procCoCreateGuid = modole32.NewProc("CoCreateGuid") - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procRtlGetVersion = modntdll.NewProc("RtlGetVersion") - procWSAStartup = modws2_32.NewProc("WSAStartup") - procWSACleanup = modws2_32.NewProc("WSACleanup") - procWSAIoctl = modws2_32.NewProc("WSAIoctl") - procsocket = modws2_32.NewProc("socket") - procsetsockopt = modws2_32.NewProc("setsockopt") - procgetsockopt = modws2_32.NewProc("getsockopt") - procbind = modws2_32.NewProc("bind") - procconnect = modws2_32.NewProc("connect") - procgetsockname = modws2_32.NewProc("getsockname") - procgetpeername = modws2_32.NewProc("getpeername") - proclisten = modws2_32.NewProc("listen") - procshutdown = modws2_32.NewProc("shutdown") - procclosesocket = modws2_32.NewProc("closesocket") - procAcceptEx = modmswsock.NewProc("AcceptEx") - procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") - procWSARecv = modws2_32.NewProc("WSARecv") - procWSASend = modws2_32.NewProc("WSASend") - procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") - procWSASendTo = modws2_32.NewProc("WSASendTo") - procgethostbyname = modws2_32.NewProc("gethostbyname") - procgetservbyname = modws2_32.NewProc("getservbyname") - procntohs = modws2_32.NewProc("ntohs") - procgetprotobyname = modws2_32.NewProc("getprotobyname") - procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") - procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") - procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") - procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") - procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") - procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") - procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") - procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") - procGetACP = modkernel32.NewProc("GetACP") - procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") - procTranslateNameW = modsecur32.NewProc("TranslateNameW") - procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") - procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") - procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") - procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") - procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") - procGetLengthSid = modadvapi32.NewProc("GetLengthSid") - procCopySid = modadvapi32.NewProc("CopySid") - procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") - procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") - procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") - procFreeSid = modadvapi32.NewProc("FreeSid") - procEqualSid = modadvapi32.NewProc("EqualSid") - procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") - procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") - procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") - procIsValidSid = modadvapi32.NewProc("IsValidSid") - procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") - procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procSetThreadToken = modadvapi32.NewProc("SetThreadToken") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") - procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") - procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") - procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") - procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") - procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") - procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") - procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") - procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procRegisterEventSourceW = modadvapi32.NewProc("RegisterEventSourceW") + procDeregisterEventSource = modadvapi32.NewProc("DeregisterEventSource") + procReportEventW = modadvapi32.NewProc("ReportEventW") + procOpenSCManagerW = modadvapi32.NewProc("OpenSCManagerW") + procCloseServiceHandle = modadvapi32.NewProc("CloseServiceHandle") + procCreateServiceW = modadvapi32.NewProc("CreateServiceW") + procOpenServiceW = modadvapi32.NewProc("OpenServiceW") + procDeleteService = modadvapi32.NewProc("DeleteService") + procStartServiceW = modadvapi32.NewProc("StartServiceW") + procQueryServiceStatus = modadvapi32.NewProc("QueryServiceStatus") + procQueryServiceLockStatusW = modadvapi32.NewProc("QueryServiceLockStatusW") + procControlService = modadvapi32.NewProc("ControlService") + procStartServiceCtrlDispatcherW = modadvapi32.NewProc("StartServiceCtrlDispatcherW") + procSetServiceStatus = modadvapi32.NewProc("SetServiceStatus") + procChangeServiceConfigW = modadvapi32.NewProc("ChangeServiceConfigW") + procQueryServiceConfigW = modadvapi32.NewProc("QueryServiceConfigW") + procChangeServiceConfig2W = modadvapi32.NewProc("ChangeServiceConfig2W") + procQueryServiceConfig2W = modadvapi32.NewProc("QueryServiceConfig2W") + procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW") + procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") + procNotifyServiceStatusChangeW = modadvapi32.NewProc("NotifyServiceStatusChangeW") + procGetLastError = modkernel32.NewProc("GetLastError") + procLoadLibraryW = modkernel32.NewProc("LoadLibraryW") + procLoadLibraryExW = modkernel32.NewProc("LoadLibraryExW") + procFreeLibrary = modkernel32.NewProc("FreeLibrary") + procGetProcAddress = modkernel32.NewProc("GetProcAddress") + procGetModuleFileNameW = modkernel32.NewProc("GetModuleFileNameW") + procGetModuleHandleExW = modkernel32.NewProc("GetModuleHandleExW") + procGetVersion = modkernel32.NewProc("GetVersion") + procFormatMessageW = modkernel32.NewProc("FormatMessageW") + procExitProcess = modkernel32.NewProc("ExitProcess") + procIsWow64Process = modkernel32.NewProc("IsWow64Process") + procCreateFileW = modkernel32.NewProc("CreateFileW") + procReadFile = modkernel32.NewProc("ReadFile") + procWriteFile = modkernel32.NewProc("WriteFile") + procGetOverlappedResult = modkernel32.NewProc("GetOverlappedResult") + procSetFilePointer = modkernel32.NewProc("SetFilePointer") + procCloseHandle = modkernel32.NewProc("CloseHandle") + procGetStdHandle = modkernel32.NewProc("GetStdHandle") + procSetStdHandle = modkernel32.NewProc("SetStdHandle") + procFindFirstFileW = modkernel32.NewProc("FindFirstFileW") + procFindNextFileW = modkernel32.NewProc("FindNextFileW") + procFindClose = modkernel32.NewProc("FindClose") + procGetFileInformationByHandle = modkernel32.NewProc("GetFileInformationByHandle") + procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") + procGetCurrentDirectoryW = modkernel32.NewProc("GetCurrentDirectoryW") + procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procCreateDirectoryW = modkernel32.NewProc("CreateDirectoryW") + procRemoveDirectoryW = modkernel32.NewProc("RemoveDirectoryW") + procDeleteFileW = modkernel32.NewProc("DeleteFileW") + procMoveFileW = modkernel32.NewProc("MoveFileW") + procMoveFileExW = modkernel32.NewProc("MoveFileExW") + procLockFileEx = modkernel32.NewProc("LockFileEx") + procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") + procGetComputerNameW = modkernel32.NewProc("GetComputerNameW") + procGetComputerNameExW = modkernel32.NewProc("GetComputerNameExW") + procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") + procGetSystemTimeAsFileTime = modkernel32.NewProc("GetSystemTimeAsFileTime") + procGetSystemTimePreciseAsFileTime = modkernel32.NewProc("GetSystemTimePreciseAsFileTime") + procGetTimeZoneInformation = modkernel32.NewProc("GetTimeZoneInformation") + procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") + procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") + procPostQueuedCompletionStatus = modkernel32.NewProc("PostQueuedCompletionStatus") + procCancelIo = modkernel32.NewProc("CancelIo") + procCancelIoEx = modkernel32.NewProc("CancelIoEx") + procCreateProcessW = modkernel32.NewProc("CreateProcessW") + procOpenProcess = modkernel32.NewProc("OpenProcess") + procShellExecuteW = modshell32.NewProc("ShellExecuteW") + procSHGetKnownFolderPath = modshell32.NewProc("SHGetKnownFolderPath") + procTerminateProcess = modkernel32.NewProc("TerminateProcess") + procGetExitCodeProcess = modkernel32.NewProc("GetExitCodeProcess") + procGetStartupInfoW = modkernel32.NewProc("GetStartupInfoW") + procGetProcessTimes = modkernel32.NewProc("GetProcessTimes") + procDuplicateHandle = modkernel32.NewProc("DuplicateHandle") + procWaitForSingleObject = modkernel32.NewProc("WaitForSingleObject") + procWaitForMultipleObjects = modkernel32.NewProc("WaitForMultipleObjects") + procGetTempPathW = modkernel32.NewProc("GetTempPathW") + procCreatePipe = modkernel32.NewProc("CreatePipe") + procGetFileType = modkernel32.NewProc("GetFileType") + procCryptAcquireContextW = modadvapi32.NewProc("CryptAcquireContextW") + procCryptReleaseContext = modadvapi32.NewProc("CryptReleaseContext") + procCryptGenRandom = modadvapi32.NewProc("CryptGenRandom") + procGetEnvironmentStringsW = modkernel32.NewProc("GetEnvironmentStringsW") + procFreeEnvironmentStringsW = modkernel32.NewProc("FreeEnvironmentStringsW") + procGetEnvironmentVariableW = modkernel32.NewProc("GetEnvironmentVariableW") + procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") + procCreateEnvironmentBlock = moduserenv.NewProc("CreateEnvironmentBlock") + procDestroyEnvironmentBlock = moduserenv.NewProc("DestroyEnvironmentBlock") + procGetTickCount64 = modkernel32.NewProc("GetTickCount64") + procSetFileTime = modkernel32.NewProc("SetFileTime") + procGetFileAttributesW = modkernel32.NewProc("GetFileAttributesW") + procSetFileAttributesW = modkernel32.NewProc("SetFileAttributesW") + procGetFileAttributesExW = modkernel32.NewProc("GetFileAttributesExW") + procGetCommandLineW = modkernel32.NewProc("GetCommandLineW") + procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") + procLocalFree = modkernel32.NewProc("LocalFree") + procSetHandleInformation = modkernel32.NewProc("SetHandleInformation") + procFlushFileBuffers = modkernel32.NewProc("FlushFileBuffers") + procGetFullPathNameW = modkernel32.NewProc("GetFullPathNameW") + procGetLongPathNameW = modkernel32.NewProc("GetLongPathNameW") + procGetShortPathNameW = modkernel32.NewProc("GetShortPathNameW") + procCreateFileMappingW = modkernel32.NewProc("CreateFileMappingW") + procMapViewOfFile = modkernel32.NewProc("MapViewOfFile") + procUnmapViewOfFile = modkernel32.NewProc("UnmapViewOfFile") + procFlushViewOfFile = modkernel32.NewProc("FlushViewOfFile") + procVirtualLock = modkernel32.NewProc("VirtualLock") + procVirtualUnlock = modkernel32.NewProc("VirtualUnlock") + procVirtualAlloc = modkernel32.NewProc("VirtualAlloc") + procVirtualFree = modkernel32.NewProc("VirtualFree") + procVirtualProtect = modkernel32.NewProc("VirtualProtect") + procTransmitFile = modmswsock.NewProc("TransmitFile") + procReadDirectoryChangesW = modkernel32.NewProc("ReadDirectoryChangesW") + procCertOpenSystemStoreW = modcrypt32.NewProc("CertOpenSystemStoreW") + procCertOpenStore = modcrypt32.NewProc("CertOpenStore") + procCertEnumCertificatesInStore = modcrypt32.NewProc("CertEnumCertificatesInStore") + procCertAddCertificateContextToStore = modcrypt32.NewProc("CertAddCertificateContextToStore") + procCertCloseStore = modcrypt32.NewProc("CertCloseStore") + procCertGetCertificateChain = modcrypt32.NewProc("CertGetCertificateChain") + procCertFreeCertificateChain = modcrypt32.NewProc("CertFreeCertificateChain") + procCertCreateCertificateContext = modcrypt32.NewProc("CertCreateCertificateContext") + procCertFreeCertificateContext = modcrypt32.NewProc("CertFreeCertificateContext") + procCertVerifyCertificateChainPolicy = modcrypt32.NewProc("CertVerifyCertificateChainPolicy") + procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") + procRegCloseKey = modadvapi32.NewProc("RegCloseKey") + procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") + procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") + procGetCurrentProcessId = modkernel32.NewProc("GetCurrentProcessId") + procGetConsoleMode = modkernel32.NewProc("GetConsoleMode") + procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") + procGetConsoleScreenBufferInfo = modkernel32.NewProc("GetConsoleScreenBufferInfo") + procWriteConsoleW = modkernel32.NewProc("WriteConsoleW") + procReadConsoleW = modkernel32.NewProc("ReadConsoleW") + procCreateToolhelp32Snapshot = modkernel32.NewProc("CreateToolhelp32Snapshot") + procProcess32FirstW = modkernel32.NewProc("Process32FirstW") + procProcess32NextW = modkernel32.NewProc("Process32NextW") + procThread32First = modkernel32.NewProc("Thread32First") + procThread32Next = modkernel32.NewProc("Thread32Next") + procDeviceIoControl = modkernel32.NewProc("DeviceIoControl") + procCreateSymbolicLinkW = modkernel32.NewProc("CreateSymbolicLinkW") + procCreateHardLinkW = modkernel32.NewProc("CreateHardLinkW") + procGetCurrentThreadId = modkernel32.NewProc("GetCurrentThreadId") + procCreateEventW = modkernel32.NewProc("CreateEventW") + procCreateEventExW = modkernel32.NewProc("CreateEventExW") + procOpenEventW = modkernel32.NewProc("OpenEventW") + procSetEvent = modkernel32.NewProc("SetEvent") + procResetEvent = modkernel32.NewProc("ResetEvent") + procPulseEvent = modkernel32.NewProc("PulseEvent") + procCreateMutexW = modkernel32.NewProc("CreateMutexW") + procCreateMutexExW = modkernel32.NewProc("CreateMutexExW") + procOpenMutexW = modkernel32.NewProc("OpenMutexW") + procReleaseMutex = modkernel32.NewProc("ReleaseMutex") + procSleepEx = modkernel32.NewProc("SleepEx") + procCreateJobObjectW = modkernel32.NewProc("CreateJobObjectW") + procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject") + procTerminateJobObject = modkernel32.NewProc("TerminateJobObject") + procSetErrorMode = modkernel32.NewProc("SetErrorMode") + procResumeThread = modkernel32.NewProc("ResumeThread") + procSetPriorityClass = modkernel32.NewProc("SetPriorityClass") + procGetPriorityClass = modkernel32.NewProc("GetPriorityClass") + procSetInformationJobObject = modkernel32.NewProc("SetInformationJobObject") + procGenerateConsoleCtrlEvent = modkernel32.NewProc("GenerateConsoleCtrlEvent") + procGetProcessId = modkernel32.NewProc("GetProcessId") + procOpenThread = modkernel32.NewProc("OpenThread") + procSetProcessPriorityBoost = modkernel32.NewProc("SetProcessPriorityBoost") + procDefineDosDeviceW = modkernel32.NewProc("DefineDosDeviceW") + procDeleteVolumeMountPointW = modkernel32.NewProc("DeleteVolumeMountPointW") + procFindFirstVolumeW = modkernel32.NewProc("FindFirstVolumeW") + procFindFirstVolumeMountPointW = modkernel32.NewProc("FindFirstVolumeMountPointW") + procFindNextVolumeW = modkernel32.NewProc("FindNextVolumeW") + procFindNextVolumeMountPointW = modkernel32.NewProc("FindNextVolumeMountPointW") + procFindVolumeClose = modkernel32.NewProc("FindVolumeClose") + procFindVolumeMountPointClose = modkernel32.NewProc("FindVolumeMountPointClose") + procGetDiskFreeSpaceExW = modkernel32.NewProc("GetDiskFreeSpaceExW") + procGetDriveTypeW = modkernel32.NewProc("GetDriveTypeW") + procGetLogicalDrives = modkernel32.NewProc("GetLogicalDrives") + procGetLogicalDriveStringsW = modkernel32.NewProc("GetLogicalDriveStringsW") + procGetVolumeInformationW = modkernel32.NewProc("GetVolumeInformationW") + procGetVolumeInformationByHandleW = modkernel32.NewProc("GetVolumeInformationByHandleW") + procGetVolumeNameForVolumeMountPointW = modkernel32.NewProc("GetVolumeNameForVolumeMountPointW") + procGetVolumePathNameW = modkernel32.NewProc("GetVolumePathNameW") + procGetVolumePathNamesForVolumeNameW = modkernel32.NewProc("GetVolumePathNamesForVolumeNameW") + procQueryDosDeviceW = modkernel32.NewProc("QueryDosDeviceW") + procSetVolumeLabelW = modkernel32.NewProc("SetVolumeLabelW") + procSetVolumeMountPointW = modkernel32.NewProc("SetVolumeMountPointW") + procMessageBoxW = moduser32.NewProc("MessageBoxW") + procExitWindowsEx = moduser32.NewProc("ExitWindowsEx") + procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procSetProcessShutdownParameters = modkernel32.NewProc("SetProcessShutdownParameters") + procGetProcessShutdownParameters = modkernel32.NewProc("GetProcessShutdownParameters") + procCLSIDFromString = modole32.NewProc("CLSIDFromString") + procStringFromGUID2 = modole32.NewProc("StringFromGUID2") + procCoCreateGuid = modole32.NewProc("CoCreateGuid") + procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") + procRtlGetVersion = modntdll.NewProc("RtlGetVersion") + procRtlGetNtVersionNumbers = modntdll.NewProc("RtlGetNtVersionNumbers") + procWSAStartup = modws2_32.NewProc("WSAStartup") + procWSACleanup = modws2_32.NewProc("WSACleanup") + procWSAIoctl = modws2_32.NewProc("WSAIoctl") + procsocket = modws2_32.NewProc("socket") + procsetsockopt = modws2_32.NewProc("setsockopt") + procgetsockopt = modws2_32.NewProc("getsockopt") + procbind = modws2_32.NewProc("bind") + procconnect = modws2_32.NewProc("connect") + procgetsockname = modws2_32.NewProc("getsockname") + procgetpeername = modws2_32.NewProc("getpeername") + proclisten = modws2_32.NewProc("listen") + procshutdown = modws2_32.NewProc("shutdown") + procclosesocket = modws2_32.NewProc("closesocket") + procAcceptEx = modmswsock.NewProc("AcceptEx") + procGetAcceptExSockaddrs = modmswsock.NewProc("GetAcceptExSockaddrs") + procWSARecv = modws2_32.NewProc("WSARecv") + procWSASend = modws2_32.NewProc("WSASend") + procWSARecvFrom = modws2_32.NewProc("WSARecvFrom") + procWSASendTo = modws2_32.NewProc("WSASendTo") + procgethostbyname = modws2_32.NewProc("gethostbyname") + procgetservbyname = modws2_32.NewProc("getservbyname") + procntohs = modws2_32.NewProc("ntohs") + procgetprotobyname = modws2_32.NewProc("getprotobyname") + procDnsQuery_W = moddnsapi.NewProc("DnsQuery_W") + procDnsRecordListFree = moddnsapi.NewProc("DnsRecordListFree") + procDnsNameCompare_W = moddnsapi.NewProc("DnsNameCompare_W") + procGetAddrInfoW = modws2_32.NewProc("GetAddrInfoW") + procFreeAddrInfoW = modws2_32.NewProc("FreeAddrInfoW") + procGetIfEntry = modiphlpapi.NewProc("GetIfEntry") + procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo") + procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") + procWSAEnumProtocolsW = modws2_32.NewProc("WSAEnumProtocolsW") + procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses") + procGetACP = modkernel32.NewProc("GetACP") + procMultiByteToWideChar = modkernel32.NewProc("MultiByteToWideChar") + procTranslateNameW = modsecur32.NewProc("TranslateNameW") + procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") + procNetUserGetInfo = modnetapi32.NewProc("NetUserGetInfo") + procNetGetJoinInformation = modnetapi32.NewProc("NetGetJoinInformation") + procNetApiBufferFree = modnetapi32.NewProc("NetApiBufferFree") + procLookupAccountSidW = modadvapi32.NewProc("LookupAccountSidW") + procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") + procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") + procConvertStringSidToSidW = modadvapi32.NewProc("ConvertStringSidToSidW") + procGetLengthSid = modadvapi32.NewProc("GetLengthSid") + procCopySid = modadvapi32.NewProc("CopySid") + procAllocateAndInitializeSid = modadvapi32.NewProc("AllocateAndInitializeSid") + procCreateWellKnownSid = modadvapi32.NewProc("CreateWellKnownSid") + procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") + procFreeSid = modadvapi32.NewProc("FreeSid") + procEqualSid = modadvapi32.NewProc("EqualSid") + procGetSidIdentifierAuthority = modadvapi32.NewProc("GetSidIdentifierAuthority") + procGetSidSubAuthorityCount = modadvapi32.NewProc("GetSidSubAuthorityCount") + procGetSidSubAuthority = modadvapi32.NewProc("GetSidSubAuthority") + procIsValidSid = modadvapi32.NewProc("IsValidSid") + procCheckTokenMembership = modadvapi32.NewProc("CheckTokenMembership") + procOpenProcessToken = modadvapi32.NewProc("OpenProcessToken") + procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") + procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") + procRevertToSelf = modadvapi32.NewProc("RevertToSelf") + procSetThreadToken = modadvapi32.NewProc("SetThreadToken") + procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") + procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") + procAdjustTokenGroups = modadvapi32.NewProc("AdjustTokenGroups") + procGetTokenInformation = modadvapi32.NewProc("GetTokenInformation") + procSetTokenInformation = modadvapi32.NewProc("SetTokenInformation") + procDuplicateTokenEx = modadvapi32.NewProc("DuplicateTokenEx") + procGetUserProfileDirectoryW = moduserenv.NewProc("GetUserProfileDirectoryW") + procGetSystemDirectoryW = modkernel32.NewProc("GetSystemDirectoryW") + procGetWindowsDirectoryW = modkernel32.NewProc("GetWindowsDirectoryW") + procGetSystemWindowsDirectoryW = modkernel32.NewProc("GetSystemWindowsDirectoryW") + procWTSQueryUserToken = modwtsapi32.NewProc("WTSQueryUserToken") + procWTSEnumerateSessionsW = modwtsapi32.NewProc("WTSEnumerateSessionsW") + procWTSFreeMemory = modwtsapi32.NewProc("WTSFreeMemory") + procGetSecurityInfo = modadvapi32.NewProc("GetSecurityInfo") + procSetSecurityInfo = modadvapi32.NewProc("SetSecurityInfo") + procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW") + procSetNamedSecurityInfoW = modadvapi32.NewProc("SetNamedSecurityInfoW") + procBuildSecurityDescriptorW = modadvapi32.NewProc("BuildSecurityDescriptorW") + procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") + procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl") + procGetSecurityDescriptorDacl = modadvapi32.NewProc("GetSecurityDescriptorDacl") + procGetSecurityDescriptorSacl = modadvapi32.NewProc("GetSecurityDescriptorSacl") + procGetSecurityDescriptorOwner = modadvapi32.NewProc("GetSecurityDescriptorOwner") + procGetSecurityDescriptorGroup = modadvapi32.NewProc("GetSecurityDescriptorGroup") + procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") + procGetSecurityDescriptorRMControl = modadvapi32.NewProc("GetSecurityDescriptorRMControl") + procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") + procSetSecurityDescriptorControl = modadvapi32.NewProc("SetSecurityDescriptorControl") + procSetSecurityDescriptorDacl = modadvapi32.NewProc("SetSecurityDescriptorDacl") + procSetSecurityDescriptorSacl = modadvapi32.NewProc("SetSecurityDescriptorSacl") + procSetSecurityDescriptorOwner = modadvapi32.NewProc("SetSecurityDescriptorOwner") + procSetSecurityDescriptorGroup = modadvapi32.NewProc("SetSecurityDescriptorGroup") + procSetSecurityDescriptorRMControl = modadvapi32.NewProc("SetSecurityDescriptorRMControl") + procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") + procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") + procMakeAbsoluteSD = modadvapi32.NewProc("MakeAbsoluteSD") + procMakeSelfRelativeSD = modadvapi32.NewProc("MakeSelfRelativeSD") + procSetEntriesInAclW = modadvapi32.NewProc("SetEntriesInAclW") ) func RegisterEventSource(uncServerName *uint16, sourceName *uint16) (handle Handle, err error) { @@ -646,6 +686,31 @@ func _GetProcAddress(module Handle, procname *byte) (proc uintptr, err error) { return } +func GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetModuleFileNameW.Addr(), 3, uintptr(module), uintptr(unsafe.Pointer(filename)), uintptr(size)) + n = uint32(r0) + if n == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) { + r1, _, e1 := syscall.Syscall(procGetModuleHandleExW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(moduleName)), uintptr(unsafe.Pointer(module))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetVersion() (ver uint32, err error) { r0, _, e1 := syscall.Syscall(procGetVersion.Addr(), 0, 0, 0, 0) ver = uint32(r0) @@ -682,7 +747,14 @@ func ExitProcess(exitcode uint32) { } func IsWow64Process(handle Handle, isWow64 *bool) (err error) { - r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(isWow64)), 0) + var _p0 uint32 + if *isWow64 { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procIsWow64Process.Addr(), 2, uintptr(handle), uintptr(unsafe.Pointer(&_p0)), 0) + *isWow64 = _p0 != 0 if r1 == 0 { if e1 != 0 { err = errnoErr(e1) @@ -952,6 +1024,30 @@ func MoveFileEx(from *uint16, to *uint16, flags uint32) (err error) { return } +func LockFileEx(file Handle, flags uint32, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procLockFileEx.Addr(), 6, uintptr(file), uintptr(flags), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func UnlockFileEx(file Handle, reserved uint32, bytesLow uint32, bytesHigh uint32, overlapped *Overlapped) (err error) { + r1, _, e1 := syscall.Syscall6(procUnlockFileEx.Addr(), 5, uintptr(file), uintptr(reserved), uintptr(bytesLow), uintptr(bytesHigh), uintptr(unsafe.Pointer(overlapped)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetComputerName(buf *uint16, n *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetComputerNameW.Addr(), 2, uintptr(unsafe.Pointer(buf)), uintptr(unsafe.Pointer(n)), 0) if r1 == 0 { @@ -1111,7 +1207,7 @@ func OpenProcess(desiredAccess uint32, inheritHandle bool, processId uint32) (ha func ShellExecute(hwnd Handle, verb *uint16, file *uint16, args *uint16, cwd *uint16, showCmd int32) (err error) { r1, _, e1 := syscall.Syscall6(procShellExecuteW.Addr(), 6, uintptr(hwnd), uintptr(unsafe.Pointer(verb)), uintptr(unsafe.Pointer(file)), uintptr(unsafe.Pointer(args)), uintptr(unsafe.Pointer(cwd)), uintptr(showCmd)) - if r1 == 0 { + if r1 <= 32 { if e1 != 0 { err = errnoErr(e1) } else { @@ -1165,32 +1261,6 @@ func GetStartupInfo(startupInfo *StartupInfo) (err error) { return } -func GetCurrentProcess() (pseudoHandle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentProcess.Addr(), 0, 0, 0, 0) - pseudoHandle = Handle(r0) - if pseudoHandle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func GetCurrentThread() (pseudoHandle Handle, err error) { - r0, _, e1 := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - pseudoHandle = Handle(r0) - if pseudoHandle == 0 { - if e1 != 0 { - err = errnoErr(e1) - } else { - err = syscall.EINVAL - } - } - return -} - func GetProcessTimes(handle Handle, creationTime *Filetime, exitTime *Filetime, kernelTime *Filetime, userTime *Filetime) (err error) { r1, _, e1 := syscall.Syscall6(procGetProcessTimes.Addr(), 5, uintptr(handle), uintptr(unsafe.Pointer(creationTime)), uintptr(unsafe.Pointer(exitTime)), uintptr(unsafe.Pointer(kernelTime)), uintptr(unsafe.Pointer(userTime)), 0) if r1 == 0 { @@ -2105,6 +2175,69 @@ func PulseEvent(event Handle) (err error) { return } +func CreateMutex(mutexAttrs *SecurityAttributes, initialOwner bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if initialOwner { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procCreateMutexW.Addr(), 3, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func CreateMutexEx(mutexAttrs *SecurityAttributes, name *uint16, flags uint32, desiredAccess uint32) (handle Handle, err error) { + r0, _, e1 := syscall.Syscall6(procCreateMutexExW.Addr(), 4, uintptr(unsafe.Pointer(mutexAttrs)), uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(desiredAccess), 0, 0) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func OpenMutex(desiredAccess uint32, inheritHandle bool, name *uint16) (handle Handle, err error) { + var _p0 uint32 + if inheritHandle { + _p0 = 1 + } else { + _p0 = 0 + } + r0, _, e1 := syscall.Syscall(procOpenMutexW.Addr(), 3, uintptr(desiredAccess), uintptr(_p0), uintptr(unsafe.Pointer(name))) + handle = Handle(r0) + if handle == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func ReleaseMutex(mutex Handle) (err error) { + r1, _, e1 := syscall.Syscall(procReleaseMutex.Addr(), 1, uintptr(mutex), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func SleepEx(milliseconds uint32, alertable bool) (ret uint32) { var _p0 uint32 if alertable { @@ -2255,6 +2388,24 @@ func OpenThread(desiredAccess uint32, inheritHandle bool, threadId uint32) (hand return } +func SetProcessPriorityBoost(process Handle, disable bool) (err error) { + var _p0 uint32 + if disable { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetProcessPriorityBoost.Addr(), 2, uintptr(process), uintptr(_p0), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func DefineDosDevice(flags uint32, deviceName *uint16, targetPath *uint16) (err error) { r1, _, e1 := syscall.Syscall(procDefineDosDeviceW.Addr(), 3, uintptr(flags), uintptr(unsafe.Pointer(deviceName)), uintptr(unsafe.Pointer(targetPath))) if r1 == 0 { @@ -2353,6 +2504,18 @@ func FindVolumeMountPointClose(findVolumeMountPoint Handle) (err error) { return } +func GetDiskFreeSpaceEx(directoryName *uint16, freeBytesAvailableToCaller *uint64, totalNumberOfBytes *uint64, totalNumberOfFreeBytes *uint64) (err error) { + r1, _, e1 := syscall.Syscall6(procGetDiskFreeSpaceExW.Addr(), 4, uintptr(unsafe.Pointer(directoryName)), uintptr(unsafe.Pointer(freeBytesAvailableToCaller)), uintptr(unsafe.Pointer(totalNumberOfBytes)), uintptr(unsafe.Pointer(totalNumberOfFreeBytes)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func GetDriveType(rootPathName *uint16) (driveType uint32) { r0, _, _ := syscall.Syscall(procGetDriveTypeW.Addr(), 1, uintptr(unsafe.Pointer(rootPathName)), 0, 0) driveType = uint32(r0) @@ -2495,6 +2658,66 @@ func MessageBox(hwnd Handle, text *uint16, caption *uint16, boxtype uint32) (ret return } +func ExitWindowsEx(flags uint32, reason uint32) (err error) { + r1, _, e1 := syscall.Syscall(procExitWindowsEx.Addr(), 2, uintptr(flags), uintptr(reason), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint32, forceAppsClosed bool, rebootAfterShutdown bool, reason uint32) (err error) { + var _p0 uint32 + if forceAppsClosed { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if rebootAfterShutdown { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procInitiateSystemShutdownExW.Addr(), 6, uintptr(unsafe.Pointer(machineName)), uintptr(unsafe.Pointer(message)), uintptr(timeout), uintptr(_p0), uintptr(_p1), uintptr(reason)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func SetProcessShutdownParameters(level uint32, flags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetProcessShutdownParameters.Addr(), 2, uintptr(level), uintptr(flags), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func GetProcessShutdownParameters(level *uint32, flags *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetProcessShutdownParameters.Addr(), 2, uintptr(unsafe.Pointer(level)), uintptr(unsafe.Pointer(flags)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func clsidFromString(lpsz *uint16, pclsid *GUID) (ret error) { r0, _, _ := syscall.Syscall(procCLSIDFromString.Addr(), 2, uintptr(unsafe.Pointer(lpsz)), uintptr(unsafe.Pointer(pclsid)), 0) if r0 != 0 { @@ -2530,6 +2753,11 @@ func rtlGetVersion(info *OsVersionInfoEx) (ret error) { return } +func rtlGetNtVersionNumbers(majorVersion *uint32, minorVersion *uint32, buildNumber *uint32) { + syscall.Syscall(procRtlGetNtVersionNumbers.Addr(), 3, uintptr(unsafe.Pointer(majorVersion)), uintptr(unsafe.Pointer(minorVersion)), uintptr(unsafe.Pointer(buildNumber))) + return +} + func WSAStartup(verreq uint32, data *WSAData) (sockerr error) { r0, _, _ := syscall.Syscall(procWSAStartup.Addr(), 2, uintptr(verreq), uintptr(unsafe.Pointer(data)), 0) if r0 != 0 { @@ -3307,6 +3535,32 @@ func getSystemDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { return } +func getWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSystemWindowsDirectory(dir *uint16, dirLen uint32) (len uint32, err error) { + r0, _, e1 := syscall.Syscall(procGetSystemWindowsDirectoryW.Addr(), 2, uintptr(unsafe.Pointer(dir)), uintptr(dirLen), 0) + len = uint32(r0) + if len == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + func WTSQueryUserToken(session uint32, token *Token) (err error) { r1, _, e1 := syscall.Syscall(procWTSQueryUserToken.Addr(), 2, uintptr(session), uintptr(unsafe.Pointer(token)), 0) if r1 == 0 { @@ -3335,3 +3589,358 @@ func WTSFreeMemory(ptr uintptr) { syscall.Syscall(procWTSFreeMemory.Addr(), 1, uintptr(ptr), 0, 0) return } + +func getSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetSecurityInfo.Addr(), 8, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetSecurityInfo(handle Handle, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) { + syscall.Syscall9(procSetSecurityInfo.Addr(), 7, uintptr(handle), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + return +} + +func getNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _getNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl, sd) +} + +func _getNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner **SID, group **SID, dacl **ACL, sacl **ACL, sd **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procGetNamedSecurityInfoW.Addr(), 8, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(sd)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func SetNamedSecurityInfo(objectName string, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + var _p0 *uint16 + _p0, ret = syscall.UTF16PtrFromString(objectName) + if ret != nil { + return + } + return _SetNamedSecurityInfo(_p0, objectType, securityInformation, owner, group, dacl, sacl) +} + +func _SetNamedSecurityInfo(objectName *uint16, objectType SE_OBJECT_TYPE, securityInformation SECURITY_INFORMATION, owner *SID, group *SID, dacl *ACL, sacl *ACL) (ret error) { + r0, _, _ := syscall.Syscall9(procSetNamedSecurityInfoW.Addr(), 7, uintptr(unsafe.Pointer(objectName)), uintptr(objectType), uintptr(securityInformation), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(sacl)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func buildSecurityDescriptor(owner *TRUSTEE, group *TRUSTEE, countAccessEntries uint32, accessEntries *EXPLICIT_ACCESS, countAuditEntries uint32, auditEntries *EXPLICIT_ACCESS, oldSecurityDescriptor *SECURITY_DESCRIPTOR, sizeNewSecurityDescriptor *uint32, newSecurityDescriptor **SECURITY_DESCRIPTOR) (ret error) { + r0, _, _ := syscall.Syscall9(procBuildSecurityDescriptorW.Addr(), 9, uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(group)), uintptr(countAccessEntries), uintptr(unsafe.Pointer(accessEntries)), uintptr(countAuditEntries), uintptr(unsafe.Pointer(auditEntries)), uintptr(unsafe.Pointer(oldSecurityDescriptor)), uintptr(unsafe.Pointer(sizeNewSecurityDescriptor)), uintptr(unsafe.Pointer(newSecurityDescriptor))) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func initializeSecurityDescriptor(absoluteSD *SECURITY_DESCRIPTOR, revision uint32) (err error) { + r1, _, e1 := syscall.Syscall(procInitializeSecurityDescriptor.Addr(), 2, uintptr(unsafe.Pointer(absoluteSD)), uintptr(revision), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, control *SECURITY_DESCRIPTOR_CONTROL, revision *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(control)), uintptr(unsafe.Pointer(revision))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent *bool, dacl **ACL, daclDefaulted *bool) (err error) { + var _p0 uint32 + if *daclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if *daclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *daclPresent = _p0 != 0 + *daclDefaulted = _p1 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent *bool, sacl **ACL, saclDefaulted *bool) (err error) { + var _p0 uint32 + if *saclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if *saclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procGetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(&_p0)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(&_p1)), 0, 0) + *saclPresent = _p0 != 0 + *saclDefaulted = _p1 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner **SID, ownerDefaulted *bool) (err error) { + var _p0 uint32 + if *ownerDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(&_p0))) + *ownerDefaulted = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group **SID, groupDefaulted *bool) (err error) { + var _p0 uint32 + if *groupDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procGetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(&_p0))) + *groupDefaulted = _p0 != 0 + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func getSecurityDescriptorLength(sd *SECURITY_DESCRIPTOR) (len uint32) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + len = uint32(r0) + return +} + +func getSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) (ret error) { + r0, _, _ := syscall.Syscall(procGetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { + r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) + isValid = r0 != 0 + return +} + +func setSecurityDescriptorControl(sd *SECURITY_DESCRIPTOR, controlBitsOfInterest SECURITY_DESCRIPTOR_CONTROL, controlBitsToSet SECURITY_DESCRIPTOR_CONTROL) (err error) { + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorControl.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(controlBitsOfInterest), uintptr(controlBitsToSet)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorDacl(sd *SECURITY_DESCRIPTOR, daclPresent bool, dacl *ACL, daclDefaulted bool) (err error) { + var _p0 uint32 + if daclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if daclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorDacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(dacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorSacl(sd *SECURITY_DESCRIPTOR, saclPresent bool, sacl *ACL, saclDefaulted bool) (err error) { + var _p0 uint32 + if saclPresent { + _p0 = 1 + } else { + _p0 = 0 + } + var _p1 uint32 + if saclDefaulted { + _p1 = 1 + } else { + _p1 = 0 + } + r1, _, e1 := syscall.Syscall6(procSetSecurityDescriptorSacl.Addr(), 4, uintptr(unsafe.Pointer(sd)), uintptr(_p0), uintptr(unsafe.Pointer(sacl)), uintptr(_p1), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorOwner(sd *SECURITY_DESCRIPTOR, owner *SID, ownerDefaulted bool) (err error) { + var _p0 uint32 + if ownerDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorOwner.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(owner)), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorGroup(sd *SECURITY_DESCRIPTOR, group *SID, groupDefaulted bool) (err error) { + var _p0 uint32 + if groupDefaulted { + _p0 = 1 + } else { + _p0 = 0 + } + r1, _, e1 := syscall.Syscall(procSetSecurityDescriptorGroup.Addr(), 3, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(group)), uintptr(_p0)) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setSecurityDescriptorRMControl(sd *SECURITY_DESCRIPTOR, rmControl *uint8) { + syscall.Syscall(procSetSecurityDescriptorRMControl.Addr(), 2, uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(rmControl)), 0) + return +} + +func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(str) + if err != nil { + return + } + return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) +} + +func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd **SECURITY_DESCRIPTOR, size *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func convertSecurityDescriptorToStringSecurityDescriptor(sd *SECURITY_DESCRIPTOR, revision uint32, securityInformation SECURITY_INFORMATION, str **uint16, strLen *uint32) (err error) { + r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(securityInformation), uintptr(unsafe.Pointer(str)), uintptr(unsafe.Pointer(strLen)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func makeAbsoluteSD(selfRelativeSD *SECURITY_DESCRIPTOR, absoluteSD *SECURITY_DESCRIPTOR, absoluteSDSize *uint32, dacl *ACL, daclSize *uint32, sacl *ACL, saclSize *uint32, owner *SID, ownerSize *uint32, group *SID, groupSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall12(procMakeAbsoluteSD.Addr(), 11, uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(absoluteSDSize)), uintptr(unsafe.Pointer(dacl)), uintptr(unsafe.Pointer(daclSize)), uintptr(unsafe.Pointer(sacl)), uintptr(unsafe.Pointer(saclSize)), uintptr(unsafe.Pointer(owner)), uintptr(unsafe.Pointer(ownerSize)), uintptr(unsafe.Pointer(group)), uintptr(unsafe.Pointer(groupSize)), 0) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) { + r1, _, e1 := syscall.Syscall(procMakeSelfRelativeSD.Addr(), 3, uintptr(unsafe.Pointer(absoluteSD)), uintptr(unsafe.Pointer(selfRelativeSD)), uintptr(unsafe.Pointer(selfRelativeSDSize))) + if r1 == 0 { + if e1 != 0 { + err = errnoErr(e1) + } else { + err = syscall.EINVAL + } + } + return +} + +func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) { + r0, _, _ := syscall.Syscall6(procSetEntriesInAclW.Addr(), 4, uintptr(countExplicitEntries), uintptr(unsafe.Pointer(explicitEntries)), uintptr(unsafe.Pointer(oldACL)), uintptr(unsafe.Pointer(newACL)), 0, 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} diff --git a/vendor/golang.org/x/text/transform/transform.go b/vendor/golang.org/x/text/transform/transform.go index fe47b9b..520b9ad 100644 --- a/vendor/golang.org/x/text/transform/transform.go +++ b/vendor/golang.org/x/text/transform/transform.go @@ -78,8 +78,8 @@ type SpanningTransformer interface { // considering the error err. // // A nil error means that all input bytes are known to be identical to the - // output produced by the Transformer. A nil error can be be returned - // regardless of whether atEOF is true. If err is nil, then then n must + // output produced by the Transformer. A nil error can be returned + // regardless of whether atEOF is true. If err is nil, then n must // equal len(src); the converse is not necessarily true. // // ErrEndOfSpan means that the Transformer output may differ from the @@ -493,7 +493,7 @@ func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err erro return dstL.n, srcL.p, err } -// Deprecated: use runes.Remove instead. +// Deprecated: Use runes.Remove instead. func RemoveFunc(f func(r rune) bool) Transformer { return removeF(f) } diff --git a/vendor/golang.org/x/text/unicode/bidi/bidi.go b/vendor/golang.org/x/text/unicode/bidi/bidi.go index 3fc4a62..e8edc54 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bidi.go +++ b/vendor/golang.org/x/text/unicode/bidi/bidi.go @@ -6,7 +6,7 @@ // Package bidi contains functionality for bidirectional text support. // -// See http://www.unicode.org/reports/tr9. +// See https://www.unicode.org/reports/tr9. // // NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways // and without notice. diff --git a/vendor/golang.org/x/text/unicode/bidi/bracket.go b/vendor/golang.org/x/text/unicode/bidi/bracket.go index 601e259..1853939 100644 --- a/vendor/golang.org/x/text/unicode/bidi/bracket.go +++ b/vendor/golang.org/x/text/unicode/bidi/bracket.go @@ -12,7 +12,7 @@ import ( // This file contains a port of the reference implementation of the // Bidi Parentheses Algorithm: -// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java +// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java // // The implementation in this file covers definitions BD14-BD16 and rule N0 // of UAX#9. @@ -246,7 +246,7 @@ func (p *bracketPairer) getStrongTypeN0(index int) Class { // assuming the given embedding direction. // // It returns ON if no strong type is found. If a single strong type is found, -// it returns this this type. Otherwise it returns the embedding direction. +// it returns this type. Otherwise it returns the embedding direction. // // TODO: use separate type for "strong" directionality. func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class { diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index d4c1399..48d1440 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -7,7 +7,7 @@ package bidi import "log" // This implementation is a port based on the reference implementation found at: -// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ +// https://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/ // // described in Unicode Bidirectional Algorithm (UAX #9). // diff --git a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go index 2e1ff19..d8c94e1 100644 --- a/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/bidi/tables10.0.0.go @@ -1,6 +1,6 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package bidi diff --git a/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go new file mode 100644 index 0000000..022e3c6 --- /dev/null +++ b/vendor/golang.org/x/text/unicode/bidi/tables11.0.0.go @@ -0,0 +1,1887 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package bidi + +// UnicodeVersion is the Unicode version from which the tables in this package are derived. +const UnicodeVersion = "11.0.0" + +// xorMasks contains masks to be xor-ed with brackets to get the reverse +// version. +var xorMasks = []int32{ // 8 elements + 0, 1, 6, 7, 3, 15, 29, 63, +} // Size: 56 bytes + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookup(s []byte) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupUnsafe(s []byte) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *bidiTrie) lookupString(s string) (v uint8, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return bidiValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := bidiIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = bidiIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = bidiIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *bidiTrie) lookupStringUnsafe(s string) uint8 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return bidiValues[c0] + } + i := bidiIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = bidiIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// bidiTrie. Total size: 16512 bytes (16.12 KiB). Checksum: 2a9cf1317f2ffaa. +type bidiTrie struct{} + +func newBidiTrie(i int) *bidiTrie { + return &bidiTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *bidiTrie) lookupValue(n uint32, b byte) uint8 { + switch { + default: + return uint8(bidiValues[n<<6+uint32(b)]) + } +} + +// bidiValues: 234 blocks, 14976 entries, 14976 bytes +// The third block is the zero block. +var bidiValues = [14976]uint8{ + // Block 0x0, offset 0x0 + 0x00: 0x000b, 0x01: 0x000b, 0x02: 0x000b, 0x03: 0x000b, 0x04: 0x000b, 0x05: 0x000b, + 0x06: 0x000b, 0x07: 0x000b, 0x08: 0x000b, 0x09: 0x0008, 0x0a: 0x0007, 0x0b: 0x0008, + 0x0c: 0x0009, 0x0d: 0x0007, 0x0e: 0x000b, 0x0f: 0x000b, 0x10: 0x000b, 0x11: 0x000b, + 0x12: 0x000b, 0x13: 0x000b, 0x14: 0x000b, 0x15: 0x000b, 0x16: 0x000b, 0x17: 0x000b, + 0x18: 0x000b, 0x19: 0x000b, 0x1a: 0x000b, 0x1b: 0x000b, 0x1c: 0x0007, 0x1d: 0x0007, + 0x1e: 0x0007, 0x1f: 0x0008, 0x20: 0x0009, 0x21: 0x000a, 0x22: 0x000a, 0x23: 0x0004, + 0x24: 0x0004, 0x25: 0x0004, 0x26: 0x000a, 0x27: 0x000a, 0x28: 0x003a, 0x29: 0x002a, + 0x2a: 0x000a, 0x2b: 0x0003, 0x2c: 0x0006, 0x2d: 0x0003, 0x2e: 0x0006, 0x2f: 0x0006, + 0x30: 0x0002, 0x31: 0x0002, 0x32: 0x0002, 0x33: 0x0002, 0x34: 0x0002, 0x35: 0x0002, + 0x36: 0x0002, 0x37: 0x0002, 0x38: 0x0002, 0x39: 0x0002, 0x3a: 0x0006, 0x3b: 0x000a, + 0x3c: 0x000a, 0x3d: 0x000a, 0x3e: 0x000a, 0x3f: 0x000a, + // Block 0x1, offset 0x40 + 0x40: 0x000a, + 0x5b: 0x005a, 0x5c: 0x000a, 0x5d: 0x004a, + 0x5e: 0x000a, 0x5f: 0x000a, 0x60: 0x000a, + 0x7b: 0x005a, + 0x7c: 0x000a, 0x7d: 0x004a, 0x7e: 0x000a, 0x7f: 0x000b, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x000b, 0xc1: 0x000b, 0xc2: 0x000b, 0xc3: 0x000b, 0xc4: 0x000b, 0xc5: 0x0007, + 0xc6: 0x000b, 0xc7: 0x000b, 0xc8: 0x000b, 0xc9: 0x000b, 0xca: 0x000b, 0xcb: 0x000b, + 0xcc: 0x000b, 0xcd: 0x000b, 0xce: 0x000b, 0xcf: 0x000b, 0xd0: 0x000b, 0xd1: 0x000b, + 0xd2: 0x000b, 0xd3: 0x000b, 0xd4: 0x000b, 0xd5: 0x000b, 0xd6: 0x000b, 0xd7: 0x000b, + 0xd8: 0x000b, 0xd9: 0x000b, 0xda: 0x000b, 0xdb: 0x000b, 0xdc: 0x000b, 0xdd: 0x000b, + 0xde: 0x000b, 0xdf: 0x000b, 0xe0: 0x0006, 0xe1: 0x000a, 0xe2: 0x0004, 0xe3: 0x0004, + 0xe4: 0x0004, 0xe5: 0x0004, 0xe6: 0x000a, 0xe7: 0x000a, 0xe8: 0x000a, 0xe9: 0x000a, + 0xeb: 0x000a, 0xec: 0x000a, 0xed: 0x000b, 0xee: 0x000a, 0xef: 0x000a, + 0xf0: 0x0004, 0xf1: 0x0004, 0xf2: 0x0002, 0xf3: 0x0002, 0xf4: 0x000a, + 0xf6: 0x000a, 0xf7: 0x000a, 0xf8: 0x000a, 0xf9: 0x0002, 0xfb: 0x000a, + 0xfc: 0x000a, 0xfd: 0x000a, 0xfe: 0x000a, 0xff: 0x000a, + // Block 0x4, offset 0x100 + 0x117: 0x000a, + 0x137: 0x000a, + // Block 0x5, offset 0x140 + 0x179: 0x000a, 0x17a: 0x000a, + // Block 0x6, offset 0x180 + 0x182: 0x000a, 0x183: 0x000a, 0x184: 0x000a, 0x185: 0x000a, + 0x186: 0x000a, 0x187: 0x000a, 0x188: 0x000a, 0x189: 0x000a, 0x18a: 0x000a, 0x18b: 0x000a, + 0x18c: 0x000a, 0x18d: 0x000a, 0x18e: 0x000a, 0x18f: 0x000a, + 0x192: 0x000a, 0x193: 0x000a, 0x194: 0x000a, 0x195: 0x000a, 0x196: 0x000a, 0x197: 0x000a, + 0x198: 0x000a, 0x199: 0x000a, 0x19a: 0x000a, 0x19b: 0x000a, 0x19c: 0x000a, 0x19d: 0x000a, + 0x19e: 0x000a, 0x19f: 0x000a, + 0x1a5: 0x000a, 0x1a6: 0x000a, 0x1a7: 0x000a, 0x1a8: 0x000a, 0x1a9: 0x000a, + 0x1aa: 0x000a, 0x1ab: 0x000a, 0x1ac: 0x000a, 0x1ad: 0x000a, 0x1af: 0x000a, + 0x1b0: 0x000a, 0x1b1: 0x000a, 0x1b2: 0x000a, 0x1b3: 0x000a, 0x1b4: 0x000a, 0x1b5: 0x000a, + 0x1b6: 0x000a, 0x1b7: 0x000a, 0x1b8: 0x000a, 0x1b9: 0x000a, 0x1ba: 0x000a, 0x1bb: 0x000a, + 0x1bc: 0x000a, 0x1bd: 0x000a, 0x1be: 0x000a, 0x1bf: 0x000a, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x000c, 0x1c1: 0x000c, 0x1c2: 0x000c, 0x1c3: 0x000c, 0x1c4: 0x000c, 0x1c5: 0x000c, + 0x1c6: 0x000c, 0x1c7: 0x000c, 0x1c8: 0x000c, 0x1c9: 0x000c, 0x1ca: 0x000c, 0x1cb: 0x000c, + 0x1cc: 0x000c, 0x1cd: 0x000c, 0x1ce: 0x000c, 0x1cf: 0x000c, 0x1d0: 0x000c, 0x1d1: 0x000c, + 0x1d2: 0x000c, 0x1d3: 0x000c, 0x1d4: 0x000c, 0x1d5: 0x000c, 0x1d6: 0x000c, 0x1d7: 0x000c, + 0x1d8: 0x000c, 0x1d9: 0x000c, 0x1da: 0x000c, 0x1db: 0x000c, 0x1dc: 0x000c, 0x1dd: 0x000c, + 0x1de: 0x000c, 0x1df: 0x000c, 0x1e0: 0x000c, 0x1e1: 0x000c, 0x1e2: 0x000c, 0x1e3: 0x000c, + 0x1e4: 0x000c, 0x1e5: 0x000c, 0x1e6: 0x000c, 0x1e7: 0x000c, 0x1e8: 0x000c, 0x1e9: 0x000c, + 0x1ea: 0x000c, 0x1eb: 0x000c, 0x1ec: 0x000c, 0x1ed: 0x000c, 0x1ee: 0x000c, 0x1ef: 0x000c, + 0x1f0: 0x000c, 0x1f1: 0x000c, 0x1f2: 0x000c, 0x1f3: 0x000c, 0x1f4: 0x000c, 0x1f5: 0x000c, + 0x1f6: 0x000c, 0x1f7: 0x000c, 0x1f8: 0x000c, 0x1f9: 0x000c, 0x1fa: 0x000c, 0x1fb: 0x000c, + 0x1fc: 0x000c, 0x1fd: 0x000c, 0x1fe: 0x000c, 0x1ff: 0x000c, + // Block 0x8, offset 0x200 + 0x200: 0x000c, 0x201: 0x000c, 0x202: 0x000c, 0x203: 0x000c, 0x204: 0x000c, 0x205: 0x000c, + 0x206: 0x000c, 0x207: 0x000c, 0x208: 0x000c, 0x209: 0x000c, 0x20a: 0x000c, 0x20b: 0x000c, + 0x20c: 0x000c, 0x20d: 0x000c, 0x20e: 0x000c, 0x20f: 0x000c, 0x210: 0x000c, 0x211: 0x000c, + 0x212: 0x000c, 0x213: 0x000c, 0x214: 0x000c, 0x215: 0x000c, 0x216: 0x000c, 0x217: 0x000c, + 0x218: 0x000c, 0x219: 0x000c, 0x21a: 0x000c, 0x21b: 0x000c, 0x21c: 0x000c, 0x21d: 0x000c, + 0x21e: 0x000c, 0x21f: 0x000c, 0x220: 0x000c, 0x221: 0x000c, 0x222: 0x000c, 0x223: 0x000c, + 0x224: 0x000c, 0x225: 0x000c, 0x226: 0x000c, 0x227: 0x000c, 0x228: 0x000c, 0x229: 0x000c, + 0x22a: 0x000c, 0x22b: 0x000c, 0x22c: 0x000c, 0x22d: 0x000c, 0x22e: 0x000c, 0x22f: 0x000c, + 0x234: 0x000a, 0x235: 0x000a, + 0x23e: 0x000a, + // Block 0x9, offset 0x240 + 0x244: 0x000a, 0x245: 0x000a, + 0x247: 0x000a, + // Block 0xa, offset 0x280 + 0x2b6: 0x000a, + // Block 0xb, offset 0x2c0 + 0x2c3: 0x000c, 0x2c4: 0x000c, 0x2c5: 0x000c, + 0x2c6: 0x000c, 0x2c7: 0x000c, 0x2c8: 0x000c, 0x2c9: 0x000c, + // Block 0xc, offset 0x300 + 0x30a: 0x000a, + 0x30d: 0x000a, 0x30e: 0x000a, 0x30f: 0x0004, 0x310: 0x0001, 0x311: 0x000c, + 0x312: 0x000c, 0x313: 0x000c, 0x314: 0x000c, 0x315: 0x000c, 0x316: 0x000c, 0x317: 0x000c, + 0x318: 0x000c, 0x319: 0x000c, 0x31a: 0x000c, 0x31b: 0x000c, 0x31c: 0x000c, 0x31d: 0x000c, + 0x31e: 0x000c, 0x31f: 0x000c, 0x320: 0x000c, 0x321: 0x000c, 0x322: 0x000c, 0x323: 0x000c, + 0x324: 0x000c, 0x325: 0x000c, 0x326: 0x000c, 0x327: 0x000c, 0x328: 0x000c, 0x329: 0x000c, + 0x32a: 0x000c, 0x32b: 0x000c, 0x32c: 0x000c, 0x32d: 0x000c, 0x32e: 0x000c, 0x32f: 0x000c, + 0x330: 0x000c, 0x331: 0x000c, 0x332: 0x000c, 0x333: 0x000c, 0x334: 0x000c, 0x335: 0x000c, + 0x336: 0x000c, 0x337: 0x000c, 0x338: 0x000c, 0x339: 0x000c, 0x33a: 0x000c, 0x33b: 0x000c, + 0x33c: 0x000c, 0x33d: 0x000c, 0x33e: 0x0001, 0x33f: 0x000c, + // Block 0xd, offset 0x340 + 0x340: 0x0001, 0x341: 0x000c, 0x342: 0x000c, 0x343: 0x0001, 0x344: 0x000c, 0x345: 0x000c, + 0x346: 0x0001, 0x347: 0x000c, 0x348: 0x0001, 0x349: 0x0001, 0x34a: 0x0001, 0x34b: 0x0001, + 0x34c: 0x0001, 0x34d: 0x0001, 0x34e: 0x0001, 0x34f: 0x0001, 0x350: 0x0001, 0x351: 0x0001, + 0x352: 0x0001, 0x353: 0x0001, 0x354: 0x0001, 0x355: 0x0001, 0x356: 0x0001, 0x357: 0x0001, + 0x358: 0x0001, 0x359: 0x0001, 0x35a: 0x0001, 0x35b: 0x0001, 0x35c: 0x0001, 0x35d: 0x0001, + 0x35e: 0x0001, 0x35f: 0x0001, 0x360: 0x0001, 0x361: 0x0001, 0x362: 0x0001, 0x363: 0x0001, + 0x364: 0x0001, 0x365: 0x0001, 0x366: 0x0001, 0x367: 0x0001, 0x368: 0x0001, 0x369: 0x0001, + 0x36a: 0x0001, 0x36b: 0x0001, 0x36c: 0x0001, 0x36d: 0x0001, 0x36e: 0x0001, 0x36f: 0x0001, + 0x370: 0x0001, 0x371: 0x0001, 0x372: 0x0001, 0x373: 0x0001, 0x374: 0x0001, 0x375: 0x0001, + 0x376: 0x0001, 0x377: 0x0001, 0x378: 0x0001, 0x379: 0x0001, 0x37a: 0x0001, 0x37b: 0x0001, + 0x37c: 0x0001, 0x37d: 0x0001, 0x37e: 0x0001, 0x37f: 0x0001, + // Block 0xe, offset 0x380 + 0x380: 0x0005, 0x381: 0x0005, 0x382: 0x0005, 0x383: 0x0005, 0x384: 0x0005, 0x385: 0x0005, + 0x386: 0x000a, 0x387: 0x000a, 0x388: 0x000d, 0x389: 0x0004, 0x38a: 0x0004, 0x38b: 0x000d, + 0x38c: 0x0006, 0x38d: 0x000d, 0x38e: 0x000a, 0x38f: 0x000a, 0x390: 0x000c, 0x391: 0x000c, + 0x392: 0x000c, 0x393: 0x000c, 0x394: 0x000c, 0x395: 0x000c, 0x396: 0x000c, 0x397: 0x000c, + 0x398: 0x000c, 0x399: 0x000c, 0x39a: 0x000c, 0x39b: 0x000d, 0x39c: 0x000d, 0x39d: 0x000d, + 0x39e: 0x000d, 0x39f: 0x000d, 0x3a0: 0x000d, 0x3a1: 0x000d, 0x3a2: 0x000d, 0x3a3: 0x000d, + 0x3a4: 0x000d, 0x3a5: 0x000d, 0x3a6: 0x000d, 0x3a7: 0x000d, 0x3a8: 0x000d, 0x3a9: 0x000d, + 0x3aa: 0x000d, 0x3ab: 0x000d, 0x3ac: 0x000d, 0x3ad: 0x000d, 0x3ae: 0x000d, 0x3af: 0x000d, + 0x3b0: 0x000d, 0x3b1: 0x000d, 0x3b2: 0x000d, 0x3b3: 0x000d, 0x3b4: 0x000d, 0x3b5: 0x000d, + 0x3b6: 0x000d, 0x3b7: 0x000d, 0x3b8: 0x000d, 0x3b9: 0x000d, 0x3ba: 0x000d, 0x3bb: 0x000d, + 0x3bc: 0x000d, 0x3bd: 0x000d, 0x3be: 0x000d, 0x3bf: 0x000d, + // Block 0xf, offset 0x3c0 + 0x3c0: 0x000d, 0x3c1: 0x000d, 0x3c2: 0x000d, 0x3c3: 0x000d, 0x3c4: 0x000d, 0x3c5: 0x000d, + 0x3c6: 0x000d, 0x3c7: 0x000d, 0x3c8: 0x000d, 0x3c9: 0x000d, 0x3ca: 0x000d, 0x3cb: 0x000c, + 0x3cc: 0x000c, 0x3cd: 0x000c, 0x3ce: 0x000c, 0x3cf: 0x000c, 0x3d0: 0x000c, 0x3d1: 0x000c, + 0x3d2: 0x000c, 0x3d3: 0x000c, 0x3d4: 0x000c, 0x3d5: 0x000c, 0x3d6: 0x000c, 0x3d7: 0x000c, + 0x3d8: 0x000c, 0x3d9: 0x000c, 0x3da: 0x000c, 0x3db: 0x000c, 0x3dc: 0x000c, 0x3dd: 0x000c, + 0x3de: 0x000c, 0x3df: 0x000c, 0x3e0: 0x0005, 0x3e1: 0x0005, 0x3e2: 0x0005, 0x3e3: 0x0005, + 0x3e4: 0x0005, 0x3e5: 0x0005, 0x3e6: 0x0005, 0x3e7: 0x0005, 0x3e8: 0x0005, 0x3e9: 0x0005, + 0x3ea: 0x0004, 0x3eb: 0x0005, 0x3ec: 0x0005, 0x3ed: 0x000d, 0x3ee: 0x000d, 0x3ef: 0x000d, + 0x3f0: 0x000c, 0x3f1: 0x000d, 0x3f2: 0x000d, 0x3f3: 0x000d, 0x3f4: 0x000d, 0x3f5: 0x000d, + 0x3f6: 0x000d, 0x3f7: 0x000d, 0x3f8: 0x000d, 0x3f9: 0x000d, 0x3fa: 0x000d, 0x3fb: 0x000d, + 0x3fc: 0x000d, 0x3fd: 0x000d, 0x3fe: 0x000d, 0x3ff: 0x000d, + // Block 0x10, offset 0x400 + 0x400: 0x000d, 0x401: 0x000d, 0x402: 0x000d, 0x403: 0x000d, 0x404: 0x000d, 0x405: 0x000d, + 0x406: 0x000d, 0x407: 0x000d, 0x408: 0x000d, 0x409: 0x000d, 0x40a: 0x000d, 0x40b: 0x000d, + 0x40c: 0x000d, 0x40d: 0x000d, 0x40e: 0x000d, 0x40f: 0x000d, 0x410: 0x000d, 0x411: 0x000d, + 0x412: 0x000d, 0x413: 0x000d, 0x414: 0x000d, 0x415: 0x000d, 0x416: 0x000d, 0x417: 0x000d, + 0x418: 0x000d, 0x419: 0x000d, 0x41a: 0x000d, 0x41b: 0x000d, 0x41c: 0x000d, 0x41d: 0x000d, + 0x41e: 0x000d, 0x41f: 0x000d, 0x420: 0x000d, 0x421: 0x000d, 0x422: 0x000d, 0x423: 0x000d, + 0x424: 0x000d, 0x425: 0x000d, 0x426: 0x000d, 0x427: 0x000d, 0x428: 0x000d, 0x429: 0x000d, + 0x42a: 0x000d, 0x42b: 0x000d, 0x42c: 0x000d, 0x42d: 0x000d, 0x42e: 0x000d, 0x42f: 0x000d, + 0x430: 0x000d, 0x431: 0x000d, 0x432: 0x000d, 0x433: 0x000d, 0x434: 0x000d, 0x435: 0x000d, + 0x436: 0x000d, 0x437: 0x000d, 0x438: 0x000d, 0x439: 0x000d, 0x43a: 0x000d, 0x43b: 0x000d, + 0x43c: 0x000d, 0x43d: 0x000d, 0x43e: 0x000d, 0x43f: 0x000d, + // Block 0x11, offset 0x440 + 0x440: 0x000d, 0x441: 0x000d, 0x442: 0x000d, 0x443: 0x000d, 0x444: 0x000d, 0x445: 0x000d, + 0x446: 0x000d, 0x447: 0x000d, 0x448: 0x000d, 0x449: 0x000d, 0x44a: 0x000d, 0x44b: 0x000d, + 0x44c: 0x000d, 0x44d: 0x000d, 0x44e: 0x000d, 0x44f: 0x000d, 0x450: 0x000d, 0x451: 0x000d, + 0x452: 0x000d, 0x453: 0x000d, 0x454: 0x000d, 0x455: 0x000d, 0x456: 0x000c, 0x457: 0x000c, + 0x458: 0x000c, 0x459: 0x000c, 0x45a: 0x000c, 0x45b: 0x000c, 0x45c: 0x000c, 0x45d: 0x0005, + 0x45e: 0x000a, 0x45f: 0x000c, 0x460: 0x000c, 0x461: 0x000c, 0x462: 0x000c, 0x463: 0x000c, + 0x464: 0x000c, 0x465: 0x000d, 0x466: 0x000d, 0x467: 0x000c, 0x468: 0x000c, 0x469: 0x000a, + 0x46a: 0x000c, 0x46b: 0x000c, 0x46c: 0x000c, 0x46d: 0x000c, 0x46e: 0x000d, 0x46f: 0x000d, + 0x470: 0x0002, 0x471: 0x0002, 0x472: 0x0002, 0x473: 0x0002, 0x474: 0x0002, 0x475: 0x0002, + 0x476: 0x0002, 0x477: 0x0002, 0x478: 0x0002, 0x479: 0x0002, 0x47a: 0x000d, 0x47b: 0x000d, + 0x47c: 0x000d, 0x47d: 0x000d, 0x47e: 0x000d, 0x47f: 0x000d, + // Block 0x12, offset 0x480 + 0x480: 0x000d, 0x481: 0x000d, 0x482: 0x000d, 0x483: 0x000d, 0x484: 0x000d, 0x485: 0x000d, + 0x486: 0x000d, 0x487: 0x000d, 0x488: 0x000d, 0x489: 0x000d, 0x48a: 0x000d, 0x48b: 0x000d, + 0x48c: 0x000d, 0x48d: 0x000d, 0x48e: 0x000d, 0x48f: 0x000d, 0x490: 0x000d, 0x491: 0x000c, + 0x492: 0x000d, 0x493: 0x000d, 0x494: 0x000d, 0x495: 0x000d, 0x496: 0x000d, 0x497: 0x000d, + 0x498: 0x000d, 0x499: 0x000d, 0x49a: 0x000d, 0x49b: 0x000d, 0x49c: 0x000d, 0x49d: 0x000d, + 0x49e: 0x000d, 0x49f: 0x000d, 0x4a0: 0x000d, 0x4a1: 0x000d, 0x4a2: 0x000d, 0x4a3: 0x000d, + 0x4a4: 0x000d, 0x4a5: 0x000d, 0x4a6: 0x000d, 0x4a7: 0x000d, 0x4a8: 0x000d, 0x4a9: 0x000d, + 0x4aa: 0x000d, 0x4ab: 0x000d, 0x4ac: 0x000d, 0x4ad: 0x000d, 0x4ae: 0x000d, 0x4af: 0x000d, + 0x4b0: 0x000c, 0x4b1: 0x000c, 0x4b2: 0x000c, 0x4b3: 0x000c, 0x4b4: 0x000c, 0x4b5: 0x000c, + 0x4b6: 0x000c, 0x4b7: 0x000c, 0x4b8: 0x000c, 0x4b9: 0x000c, 0x4ba: 0x000c, 0x4bb: 0x000c, + 0x4bc: 0x000c, 0x4bd: 0x000c, 0x4be: 0x000c, 0x4bf: 0x000c, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x000c, 0x4c1: 0x000c, 0x4c2: 0x000c, 0x4c3: 0x000c, 0x4c4: 0x000c, 0x4c5: 0x000c, + 0x4c6: 0x000c, 0x4c7: 0x000c, 0x4c8: 0x000c, 0x4c9: 0x000c, 0x4ca: 0x000c, 0x4cb: 0x000d, + 0x4cc: 0x000d, 0x4cd: 0x000d, 0x4ce: 0x000d, 0x4cf: 0x000d, 0x4d0: 0x000d, 0x4d1: 0x000d, + 0x4d2: 0x000d, 0x4d3: 0x000d, 0x4d4: 0x000d, 0x4d5: 0x000d, 0x4d6: 0x000d, 0x4d7: 0x000d, + 0x4d8: 0x000d, 0x4d9: 0x000d, 0x4da: 0x000d, 0x4db: 0x000d, 0x4dc: 0x000d, 0x4dd: 0x000d, + 0x4de: 0x000d, 0x4df: 0x000d, 0x4e0: 0x000d, 0x4e1: 0x000d, 0x4e2: 0x000d, 0x4e3: 0x000d, + 0x4e4: 0x000d, 0x4e5: 0x000d, 0x4e6: 0x000d, 0x4e7: 0x000d, 0x4e8: 0x000d, 0x4e9: 0x000d, + 0x4ea: 0x000d, 0x4eb: 0x000d, 0x4ec: 0x000d, 0x4ed: 0x000d, 0x4ee: 0x000d, 0x4ef: 0x000d, + 0x4f0: 0x000d, 0x4f1: 0x000d, 0x4f2: 0x000d, 0x4f3: 0x000d, 0x4f4: 0x000d, 0x4f5: 0x000d, + 0x4f6: 0x000d, 0x4f7: 0x000d, 0x4f8: 0x000d, 0x4f9: 0x000d, 0x4fa: 0x000d, 0x4fb: 0x000d, + 0x4fc: 0x000d, 0x4fd: 0x000d, 0x4fe: 0x000d, 0x4ff: 0x000d, + // Block 0x14, offset 0x500 + 0x500: 0x000d, 0x501: 0x000d, 0x502: 0x000d, 0x503: 0x000d, 0x504: 0x000d, 0x505: 0x000d, + 0x506: 0x000d, 0x507: 0x000d, 0x508: 0x000d, 0x509: 0x000d, 0x50a: 0x000d, 0x50b: 0x000d, + 0x50c: 0x000d, 0x50d: 0x000d, 0x50e: 0x000d, 0x50f: 0x000d, 0x510: 0x000d, 0x511: 0x000d, + 0x512: 0x000d, 0x513: 0x000d, 0x514: 0x000d, 0x515: 0x000d, 0x516: 0x000d, 0x517: 0x000d, + 0x518: 0x000d, 0x519: 0x000d, 0x51a: 0x000d, 0x51b: 0x000d, 0x51c: 0x000d, 0x51d: 0x000d, + 0x51e: 0x000d, 0x51f: 0x000d, 0x520: 0x000d, 0x521: 0x000d, 0x522: 0x000d, 0x523: 0x000d, + 0x524: 0x000d, 0x525: 0x000d, 0x526: 0x000c, 0x527: 0x000c, 0x528: 0x000c, 0x529: 0x000c, + 0x52a: 0x000c, 0x52b: 0x000c, 0x52c: 0x000c, 0x52d: 0x000c, 0x52e: 0x000c, 0x52f: 0x000c, + 0x530: 0x000c, 0x531: 0x000d, 0x532: 0x000d, 0x533: 0x000d, 0x534: 0x000d, 0x535: 0x000d, + 0x536: 0x000d, 0x537: 0x000d, 0x538: 0x000d, 0x539: 0x000d, 0x53a: 0x000d, 0x53b: 0x000d, + 0x53c: 0x000d, 0x53d: 0x000d, 0x53e: 0x000d, 0x53f: 0x000d, + // Block 0x15, offset 0x540 + 0x540: 0x0001, 0x541: 0x0001, 0x542: 0x0001, 0x543: 0x0001, 0x544: 0x0001, 0x545: 0x0001, + 0x546: 0x0001, 0x547: 0x0001, 0x548: 0x0001, 0x549: 0x0001, 0x54a: 0x0001, 0x54b: 0x0001, + 0x54c: 0x0001, 0x54d: 0x0001, 0x54e: 0x0001, 0x54f: 0x0001, 0x550: 0x0001, 0x551: 0x0001, + 0x552: 0x0001, 0x553: 0x0001, 0x554: 0x0001, 0x555: 0x0001, 0x556: 0x0001, 0x557: 0x0001, + 0x558: 0x0001, 0x559: 0x0001, 0x55a: 0x0001, 0x55b: 0x0001, 0x55c: 0x0001, 0x55d: 0x0001, + 0x55e: 0x0001, 0x55f: 0x0001, 0x560: 0x0001, 0x561: 0x0001, 0x562: 0x0001, 0x563: 0x0001, + 0x564: 0x0001, 0x565: 0x0001, 0x566: 0x0001, 0x567: 0x0001, 0x568: 0x0001, 0x569: 0x0001, + 0x56a: 0x0001, 0x56b: 0x000c, 0x56c: 0x000c, 0x56d: 0x000c, 0x56e: 0x000c, 0x56f: 0x000c, + 0x570: 0x000c, 0x571: 0x000c, 0x572: 0x000c, 0x573: 0x000c, 0x574: 0x0001, 0x575: 0x0001, + 0x576: 0x000a, 0x577: 0x000a, 0x578: 0x000a, 0x579: 0x000a, 0x57a: 0x0001, 0x57b: 0x0001, + 0x57c: 0x0001, 0x57d: 0x000c, 0x57e: 0x0001, 0x57f: 0x0001, + // Block 0x16, offset 0x580 + 0x580: 0x0001, 0x581: 0x0001, 0x582: 0x0001, 0x583: 0x0001, 0x584: 0x0001, 0x585: 0x0001, + 0x586: 0x0001, 0x587: 0x0001, 0x588: 0x0001, 0x589: 0x0001, 0x58a: 0x0001, 0x58b: 0x0001, + 0x58c: 0x0001, 0x58d: 0x0001, 0x58e: 0x0001, 0x58f: 0x0001, 0x590: 0x0001, 0x591: 0x0001, + 0x592: 0x0001, 0x593: 0x0001, 0x594: 0x0001, 0x595: 0x0001, 0x596: 0x000c, 0x597: 0x000c, + 0x598: 0x000c, 0x599: 0x000c, 0x59a: 0x0001, 0x59b: 0x000c, 0x59c: 0x000c, 0x59d: 0x000c, + 0x59e: 0x000c, 0x59f: 0x000c, 0x5a0: 0x000c, 0x5a1: 0x000c, 0x5a2: 0x000c, 0x5a3: 0x000c, + 0x5a4: 0x0001, 0x5a5: 0x000c, 0x5a6: 0x000c, 0x5a7: 0x000c, 0x5a8: 0x0001, 0x5a9: 0x000c, + 0x5aa: 0x000c, 0x5ab: 0x000c, 0x5ac: 0x000c, 0x5ad: 0x000c, 0x5ae: 0x0001, 0x5af: 0x0001, + 0x5b0: 0x0001, 0x5b1: 0x0001, 0x5b2: 0x0001, 0x5b3: 0x0001, 0x5b4: 0x0001, 0x5b5: 0x0001, + 0x5b6: 0x0001, 0x5b7: 0x0001, 0x5b8: 0x0001, 0x5b9: 0x0001, 0x5ba: 0x0001, 0x5bb: 0x0001, + 0x5bc: 0x0001, 0x5bd: 0x0001, 0x5be: 0x0001, 0x5bf: 0x0001, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x0001, 0x5c1: 0x0001, 0x5c2: 0x0001, 0x5c3: 0x0001, 0x5c4: 0x0001, 0x5c5: 0x0001, + 0x5c6: 0x0001, 0x5c7: 0x0001, 0x5c8: 0x0001, 0x5c9: 0x0001, 0x5ca: 0x0001, 0x5cb: 0x0001, + 0x5cc: 0x0001, 0x5cd: 0x0001, 0x5ce: 0x0001, 0x5cf: 0x0001, 0x5d0: 0x0001, 0x5d1: 0x0001, + 0x5d2: 0x0001, 0x5d3: 0x0001, 0x5d4: 0x0001, 0x5d5: 0x0001, 0x5d6: 0x0001, 0x5d7: 0x0001, + 0x5d8: 0x0001, 0x5d9: 0x000c, 0x5da: 0x000c, 0x5db: 0x000c, 0x5dc: 0x0001, 0x5dd: 0x0001, + 0x5de: 0x0001, 0x5df: 0x0001, 0x5e0: 0x000d, 0x5e1: 0x000d, 0x5e2: 0x000d, 0x5e3: 0x000d, + 0x5e4: 0x000d, 0x5e5: 0x000d, 0x5e6: 0x000d, 0x5e7: 0x000d, 0x5e8: 0x000d, 0x5e9: 0x000d, + 0x5ea: 0x000d, 0x5eb: 0x000d, 0x5ec: 0x000d, 0x5ed: 0x000d, 0x5ee: 0x000d, 0x5ef: 0x000d, + 0x5f0: 0x0001, 0x5f1: 0x0001, 0x5f2: 0x0001, 0x5f3: 0x0001, 0x5f4: 0x0001, 0x5f5: 0x0001, + 0x5f6: 0x0001, 0x5f7: 0x0001, 0x5f8: 0x0001, 0x5f9: 0x0001, 0x5fa: 0x0001, 0x5fb: 0x0001, + 0x5fc: 0x0001, 0x5fd: 0x0001, 0x5fe: 0x0001, 0x5ff: 0x0001, + // Block 0x18, offset 0x600 + 0x600: 0x0001, 0x601: 0x0001, 0x602: 0x0001, 0x603: 0x0001, 0x604: 0x0001, 0x605: 0x0001, + 0x606: 0x0001, 0x607: 0x0001, 0x608: 0x0001, 0x609: 0x0001, 0x60a: 0x0001, 0x60b: 0x0001, + 0x60c: 0x0001, 0x60d: 0x0001, 0x60e: 0x0001, 0x60f: 0x0001, 0x610: 0x0001, 0x611: 0x0001, + 0x612: 0x0001, 0x613: 0x0001, 0x614: 0x0001, 0x615: 0x0001, 0x616: 0x0001, 0x617: 0x0001, + 0x618: 0x0001, 0x619: 0x0001, 0x61a: 0x0001, 0x61b: 0x0001, 0x61c: 0x0001, 0x61d: 0x0001, + 0x61e: 0x0001, 0x61f: 0x0001, 0x620: 0x000d, 0x621: 0x000d, 0x622: 0x000d, 0x623: 0x000d, + 0x624: 0x000d, 0x625: 0x000d, 0x626: 0x000d, 0x627: 0x000d, 0x628: 0x000d, 0x629: 0x000d, + 0x62a: 0x000d, 0x62b: 0x000d, 0x62c: 0x000d, 0x62d: 0x000d, 0x62e: 0x000d, 0x62f: 0x000d, + 0x630: 0x000d, 0x631: 0x000d, 0x632: 0x000d, 0x633: 0x000d, 0x634: 0x000d, 0x635: 0x000d, + 0x636: 0x000d, 0x637: 0x000d, 0x638: 0x000d, 0x639: 0x000d, 0x63a: 0x000d, 0x63b: 0x000d, + 0x63c: 0x000d, 0x63d: 0x000d, 0x63e: 0x000d, 0x63f: 0x000d, + // Block 0x19, offset 0x640 + 0x640: 0x000d, 0x641: 0x000d, 0x642: 0x000d, 0x643: 0x000d, 0x644: 0x000d, 0x645: 0x000d, + 0x646: 0x000d, 0x647: 0x000d, 0x648: 0x000d, 0x649: 0x000d, 0x64a: 0x000d, 0x64b: 0x000d, + 0x64c: 0x000d, 0x64d: 0x000d, 0x64e: 0x000d, 0x64f: 0x000d, 0x650: 0x000d, 0x651: 0x000d, + 0x652: 0x000d, 0x653: 0x000c, 0x654: 0x000c, 0x655: 0x000c, 0x656: 0x000c, 0x657: 0x000c, + 0x658: 0x000c, 0x659: 0x000c, 0x65a: 0x000c, 0x65b: 0x000c, 0x65c: 0x000c, 0x65d: 0x000c, + 0x65e: 0x000c, 0x65f: 0x000c, 0x660: 0x000c, 0x661: 0x000c, 0x662: 0x0005, 0x663: 0x000c, + 0x664: 0x000c, 0x665: 0x000c, 0x666: 0x000c, 0x667: 0x000c, 0x668: 0x000c, 0x669: 0x000c, + 0x66a: 0x000c, 0x66b: 0x000c, 0x66c: 0x000c, 0x66d: 0x000c, 0x66e: 0x000c, 0x66f: 0x000c, + 0x670: 0x000c, 0x671: 0x000c, 0x672: 0x000c, 0x673: 0x000c, 0x674: 0x000c, 0x675: 0x000c, + 0x676: 0x000c, 0x677: 0x000c, 0x678: 0x000c, 0x679: 0x000c, 0x67a: 0x000c, 0x67b: 0x000c, + 0x67c: 0x000c, 0x67d: 0x000c, 0x67e: 0x000c, 0x67f: 0x000c, + // Block 0x1a, offset 0x680 + 0x680: 0x000c, 0x681: 0x000c, 0x682: 0x000c, + 0x6ba: 0x000c, + 0x6bc: 0x000c, + // Block 0x1b, offset 0x6c0 + 0x6c1: 0x000c, 0x6c2: 0x000c, 0x6c3: 0x000c, 0x6c4: 0x000c, 0x6c5: 0x000c, + 0x6c6: 0x000c, 0x6c7: 0x000c, 0x6c8: 0x000c, + 0x6cd: 0x000c, 0x6d1: 0x000c, + 0x6d2: 0x000c, 0x6d3: 0x000c, 0x6d4: 0x000c, 0x6d5: 0x000c, 0x6d6: 0x000c, 0x6d7: 0x000c, + 0x6e2: 0x000c, 0x6e3: 0x000c, + // Block 0x1c, offset 0x700 + 0x701: 0x000c, + 0x73c: 0x000c, + // Block 0x1d, offset 0x740 + 0x741: 0x000c, 0x742: 0x000c, 0x743: 0x000c, 0x744: 0x000c, + 0x74d: 0x000c, + 0x762: 0x000c, 0x763: 0x000c, + 0x772: 0x0004, 0x773: 0x0004, + 0x77b: 0x0004, + 0x77e: 0x000c, + // Block 0x1e, offset 0x780 + 0x781: 0x000c, 0x782: 0x000c, + 0x7bc: 0x000c, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x000c, 0x7c2: 0x000c, + 0x7c7: 0x000c, 0x7c8: 0x000c, 0x7cb: 0x000c, + 0x7cc: 0x000c, 0x7cd: 0x000c, 0x7d1: 0x000c, + 0x7f0: 0x000c, 0x7f1: 0x000c, 0x7f5: 0x000c, + // Block 0x20, offset 0x800 + 0x801: 0x000c, 0x802: 0x000c, 0x803: 0x000c, 0x804: 0x000c, 0x805: 0x000c, + 0x807: 0x000c, 0x808: 0x000c, + 0x80d: 0x000c, + 0x822: 0x000c, 0x823: 0x000c, + 0x831: 0x0004, + 0x83a: 0x000c, 0x83b: 0x000c, + 0x83c: 0x000c, 0x83d: 0x000c, 0x83e: 0x000c, 0x83f: 0x000c, + // Block 0x21, offset 0x840 + 0x841: 0x000c, + 0x87c: 0x000c, 0x87f: 0x000c, + // Block 0x22, offset 0x880 + 0x881: 0x000c, 0x882: 0x000c, 0x883: 0x000c, 0x884: 0x000c, + 0x88d: 0x000c, + 0x896: 0x000c, + 0x8a2: 0x000c, 0x8a3: 0x000c, + // Block 0x23, offset 0x8c0 + 0x8c2: 0x000c, + // Block 0x24, offset 0x900 + 0x900: 0x000c, + 0x90d: 0x000c, + 0x933: 0x000a, 0x934: 0x000a, 0x935: 0x000a, + 0x936: 0x000a, 0x937: 0x000a, 0x938: 0x000a, 0x939: 0x0004, 0x93a: 0x000a, + // Block 0x25, offset 0x940 + 0x940: 0x000c, 0x944: 0x000c, + 0x97e: 0x000c, 0x97f: 0x000c, + // Block 0x26, offset 0x980 + 0x980: 0x000c, + 0x986: 0x000c, 0x987: 0x000c, 0x988: 0x000c, 0x98a: 0x000c, 0x98b: 0x000c, + 0x98c: 0x000c, 0x98d: 0x000c, + 0x995: 0x000c, 0x996: 0x000c, + 0x9a2: 0x000c, 0x9a3: 0x000c, + 0x9b8: 0x000a, 0x9b9: 0x000a, 0x9ba: 0x000a, 0x9bb: 0x000a, + 0x9bc: 0x000a, 0x9bd: 0x000a, 0x9be: 0x000a, + // Block 0x27, offset 0x9c0 + 0x9cc: 0x000c, 0x9cd: 0x000c, + 0x9e2: 0x000c, 0x9e3: 0x000c, + // Block 0x28, offset 0xa00 + 0xa00: 0x000c, 0xa01: 0x000c, + 0xa3b: 0x000c, + 0xa3c: 0x000c, + // Block 0x29, offset 0xa40 + 0xa41: 0x000c, 0xa42: 0x000c, 0xa43: 0x000c, 0xa44: 0x000c, + 0xa4d: 0x000c, + 0xa62: 0x000c, 0xa63: 0x000c, + // Block 0x2a, offset 0xa80 + 0xa8a: 0x000c, + 0xa92: 0x000c, 0xa93: 0x000c, 0xa94: 0x000c, 0xa96: 0x000c, + // Block 0x2b, offset 0xac0 + 0xaf1: 0x000c, 0xaf4: 0x000c, 0xaf5: 0x000c, + 0xaf6: 0x000c, 0xaf7: 0x000c, 0xaf8: 0x000c, 0xaf9: 0x000c, 0xafa: 0x000c, + 0xaff: 0x0004, + // Block 0x2c, offset 0xb00 + 0xb07: 0x000c, 0xb08: 0x000c, 0xb09: 0x000c, 0xb0a: 0x000c, 0xb0b: 0x000c, + 0xb0c: 0x000c, 0xb0d: 0x000c, 0xb0e: 0x000c, + // Block 0x2d, offset 0xb40 + 0xb71: 0x000c, 0xb74: 0x000c, 0xb75: 0x000c, + 0xb76: 0x000c, 0xb77: 0x000c, 0xb78: 0x000c, 0xb79: 0x000c, 0xb7b: 0x000c, + 0xb7c: 0x000c, + // Block 0x2e, offset 0xb80 + 0xb88: 0x000c, 0xb89: 0x000c, 0xb8a: 0x000c, 0xb8b: 0x000c, + 0xb8c: 0x000c, 0xb8d: 0x000c, + // Block 0x2f, offset 0xbc0 + 0xbd8: 0x000c, 0xbd9: 0x000c, + 0xbf5: 0x000c, + 0xbf7: 0x000c, 0xbf9: 0x000c, 0xbfa: 0x003a, 0xbfb: 0x002a, + 0xbfc: 0x003a, 0xbfd: 0x002a, + // Block 0x30, offset 0xc00 + 0xc31: 0x000c, 0xc32: 0x000c, 0xc33: 0x000c, 0xc34: 0x000c, 0xc35: 0x000c, + 0xc36: 0x000c, 0xc37: 0x000c, 0xc38: 0x000c, 0xc39: 0x000c, 0xc3a: 0x000c, 0xc3b: 0x000c, + 0xc3c: 0x000c, 0xc3d: 0x000c, 0xc3e: 0x000c, + // Block 0x31, offset 0xc40 + 0xc40: 0x000c, 0xc41: 0x000c, 0xc42: 0x000c, 0xc43: 0x000c, 0xc44: 0x000c, + 0xc46: 0x000c, 0xc47: 0x000c, + 0xc4d: 0x000c, 0xc4e: 0x000c, 0xc4f: 0x000c, 0xc50: 0x000c, 0xc51: 0x000c, + 0xc52: 0x000c, 0xc53: 0x000c, 0xc54: 0x000c, 0xc55: 0x000c, 0xc56: 0x000c, 0xc57: 0x000c, + 0xc59: 0x000c, 0xc5a: 0x000c, 0xc5b: 0x000c, 0xc5c: 0x000c, 0xc5d: 0x000c, + 0xc5e: 0x000c, 0xc5f: 0x000c, 0xc60: 0x000c, 0xc61: 0x000c, 0xc62: 0x000c, 0xc63: 0x000c, + 0xc64: 0x000c, 0xc65: 0x000c, 0xc66: 0x000c, 0xc67: 0x000c, 0xc68: 0x000c, 0xc69: 0x000c, + 0xc6a: 0x000c, 0xc6b: 0x000c, 0xc6c: 0x000c, 0xc6d: 0x000c, 0xc6e: 0x000c, 0xc6f: 0x000c, + 0xc70: 0x000c, 0xc71: 0x000c, 0xc72: 0x000c, 0xc73: 0x000c, 0xc74: 0x000c, 0xc75: 0x000c, + 0xc76: 0x000c, 0xc77: 0x000c, 0xc78: 0x000c, 0xc79: 0x000c, 0xc7a: 0x000c, 0xc7b: 0x000c, + 0xc7c: 0x000c, + // Block 0x32, offset 0xc80 + 0xc86: 0x000c, + // Block 0x33, offset 0xcc0 + 0xced: 0x000c, 0xcee: 0x000c, 0xcef: 0x000c, + 0xcf0: 0x000c, 0xcf2: 0x000c, 0xcf3: 0x000c, 0xcf4: 0x000c, 0xcf5: 0x000c, + 0xcf6: 0x000c, 0xcf7: 0x000c, 0xcf9: 0x000c, 0xcfa: 0x000c, + 0xcfd: 0x000c, 0xcfe: 0x000c, + // Block 0x34, offset 0xd00 + 0xd18: 0x000c, 0xd19: 0x000c, + 0xd1e: 0x000c, 0xd1f: 0x000c, 0xd20: 0x000c, + 0xd31: 0x000c, 0xd32: 0x000c, 0xd33: 0x000c, 0xd34: 0x000c, + // Block 0x35, offset 0xd40 + 0xd42: 0x000c, 0xd45: 0x000c, + 0xd46: 0x000c, + 0xd4d: 0x000c, + 0xd5d: 0x000c, + // Block 0x36, offset 0xd80 + 0xd9d: 0x000c, + 0xd9e: 0x000c, 0xd9f: 0x000c, + // Block 0x37, offset 0xdc0 + 0xdd0: 0x000a, 0xdd1: 0x000a, + 0xdd2: 0x000a, 0xdd3: 0x000a, 0xdd4: 0x000a, 0xdd5: 0x000a, 0xdd6: 0x000a, 0xdd7: 0x000a, + 0xdd8: 0x000a, 0xdd9: 0x000a, + // Block 0x38, offset 0xe00 + 0xe00: 0x000a, + // Block 0x39, offset 0xe40 + 0xe40: 0x0009, + 0xe5b: 0x007a, 0xe5c: 0x006a, + // Block 0x3a, offset 0xe80 + 0xe92: 0x000c, 0xe93: 0x000c, 0xe94: 0x000c, + 0xeb2: 0x000c, 0xeb3: 0x000c, 0xeb4: 0x000c, + // Block 0x3b, offset 0xec0 + 0xed2: 0x000c, 0xed3: 0x000c, + 0xef2: 0x000c, 0xef3: 0x000c, + // Block 0x3c, offset 0xf00 + 0xf34: 0x000c, 0xf35: 0x000c, + 0xf37: 0x000c, 0xf38: 0x000c, 0xf39: 0x000c, 0xf3a: 0x000c, 0xf3b: 0x000c, + 0xf3c: 0x000c, 0xf3d: 0x000c, + // Block 0x3d, offset 0xf40 + 0xf46: 0x000c, 0xf49: 0x000c, 0xf4a: 0x000c, 0xf4b: 0x000c, + 0xf4c: 0x000c, 0xf4d: 0x000c, 0xf4e: 0x000c, 0xf4f: 0x000c, 0xf50: 0x000c, 0xf51: 0x000c, + 0xf52: 0x000c, 0xf53: 0x000c, + 0xf5b: 0x0004, 0xf5d: 0x000c, + 0xf70: 0x000a, 0xf71: 0x000a, 0xf72: 0x000a, 0xf73: 0x000a, 0xf74: 0x000a, 0xf75: 0x000a, + 0xf76: 0x000a, 0xf77: 0x000a, 0xf78: 0x000a, 0xf79: 0x000a, + // Block 0x3e, offset 0xf80 + 0xf80: 0x000a, 0xf81: 0x000a, 0xf82: 0x000a, 0xf83: 0x000a, 0xf84: 0x000a, 0xf85: 0x000a, + 0xf86: 0x000a, 0xf87: 0x000a, 0xf88: 0x000a, 0xf89: 0x000a, 0xf8a: 0x000a, 0xf8b: 0x000c, + 0xf8c: 0x000c, 0xf8d: 0x000c, 0xf8e: 0x000b, + // Block 0x3f, offset 0xfc0 + 0xfc5: 0x000c, + 0xfc6: 0x000c, + 0xfe9: 0x000c, + // Block 0x40, offset 0x1000 + 0x1020: 0x000c, 0x1021: 0x000c, 0x1022: 0x000c, + 0x1027: 0x000c, 0x1028: 0x000c, + 0x1032: 0x000c, + 0x1039: 0x000c, 0x103a: 0x000c, 0x103b: 0x000c, + // Block 0x41, offset 0x1040 + 0x1040: 0x000a, 0x1044: 0x000a, 0x1045: 0x000a, + // Block 0x42, offset 0x1080 + 0x109e: 0x000a, 0x109f: 0x000a, 0x10a0: 0x000a, 0x10a1: 0x000a, 0x10a2: 0x000a, 0x10a3: 0x000a, + 0x10a4: 0x000a, 0x10a5: 0x000a, 0x10a6: 0x000a, 0x10a7: 0x000a, 0x10a8: 0x000a, 0x10a9: 0x000a, + 0x10aa: 0x000a, 0x10ab: 0x000a, 0x10ac: 0x000a, 0x10ad: 0x000a, 0x10ae: 0x000a, 0x10af: 0x000a, + 0x10b0: 0x000a, 0x10b1: 0x000a, 0x10b2: 0x000a, 0x10b3: 0x000a, 0x10b4: 0x000a, 0x10b5: 0x000a, + 0x10b6: 0x000a, 0x10b7: 0x000a, 0x10b8: 0x000a, 0x10b9: 0x000a, 0x10ba: 0x000a, 0x10bb: 0x000a, + 0x10bc: 0x000a, 0x10bd: 0x000a, 0x10be: 0x000a, 0x10bf: 0x000a, + // Block 0x43, offset 0x10c0 + 0x10d7: 0x000c, + 0x10d8: 0x000c, 0x10db: 0x000c, + // Block 0x44, offset 0x1100 + 0x1116: 0x000c, + 0x1118: 0x000c, 0x1119: 0x000c, 0x111a: 0x000c, 0x111b: 0x000c, 0x111c: 0x000c, 0x111d: 0x000c, + 0x111e: 0x000c, 0x1120: 0x000c, 0x1122: 0x000c, + 0x1125: 0x000c, 0x1126: 0x000c, 0x1127: 0x000c, 0x1128: 0x000c, 0x1129: 0x000c, + 0x112a: 0x000c, 0x112b: 0x000c, 0x112c: 0x000c, + 0x1133: 0x000c, 0x1134: 0x000c, 0x1135: 0x000c, + 0x1136: 0x000c, 0x1137: 0x000c, 0x1138: 0x000c, 0x1139: 0x000c, 0x113a: 0x000c, 0x113b: 0x000c, + 0x113c: 0x000c, 0x113f: 0x000c, + // Block 0x45, offset 0x1140 + 0x1170: 0x000c, 0x1171: 0x000c, 0x1172: 0x000c, 0x1173: 0x000c, 0x1174: 0x000c, 0x1175: 0x000c, + 0x1176: 0x000c, 0x1177: 0x000c, 0x1178: 0x000c, 0x1179: 0x000c, 0x117a: 0x000c, 0x117b: 0x000c, + 0x117c: 0x000c, 0x117d: 0x000c, 0x117e: 0x000c, + // Block 0x46, offset 0x1180 + 0x1180: 0x000c, 0x1181: 0x000c, 0x1182: 0x000c, 0x1183: 0x000c, + 0x11b4: 0x000c, + 0x11b6: 0x000c, 0x11b7: 0x000c, 0x11b8: 0x000c, 0x11b9: 0x000c, 0x11ba: 0x000c, + 0x11bc: 0x000c, + // Block 0x47, offset 0x11c0 + 0x11c2: 0x000c, + 0x11eb: 0x000c, 0x11ec: 0x000c, 0x11ed: 0x000c, 0x11ee: 0x000c, 0x11ef: 0x000c, + 0x11f0: 0x000c, 0x11f1: 0x000c, 0x11f2: 0x000c, 0x11f3: 0x000c, + // Block 0x48, offset 0x1200 + 0x1200: 0x000c, 0x1201: 0x000c, + 0x1222: 0x000c, 0x1223: 0x000c, + 0x1224: 0x000c, 0x1225: 0x000c, 0x1228: 0x000c, 0x1229: 0x000c, + 0x122b: 0x000c, 0x122c: 0x000c, 0x122d: 0x000c, + // Block 0x49, offset 0x1240 + 0x1266: 0x000c, 0x1268: 0x000c, 0x1269: 0x000c, + 0x126d: 0x000c, 0x126f: 0x000c, + 0x1270: 0x000c, 0x1271: 0x000c, + // Block 0x4a, offset 0x1280 + 0x12ac: 0x000c, 0x12ad: 0x000c, 0x12ae: 0x000c, 0x12af: 0x000c, + 0x12b0: 0x000c, 0x12b1: 0x000c, 0x12b2: 0x000c, 0x12b3: 0x000c, + 0x12b6: 0x000c, 0x12b7: 0x000c, + // Block 0x4b, offset 0x12c0 + 0x12d0: 0x000c, 0x12d1: 0x000c, + 0x12d2: 0x000c, 0x12d4: 0x000c, 0x12d5: 0x000c, 0x12d6: 0x000c, 0x12d7: 0x000c, + 0x12d8: 0x000c, 0x12d9: 0x000c, 0x12da: 0x000c, 0x12db: 0x000c, 0x12dc: 0x000c, 0x12dd: 0x000c, + 0x12de: 0x000c, 0x12df: 0x000c, 0x12e0: 0x000c, 0x12e2: 0x000c, 0x12e3: 0x000c, + 0x12e4: 0x000c, 0x12e5: 0x000c, 0x12e6: 0x000c, 0x12e7: 0x000c, 0x12e8: 0x000c, + 0x12ed: 0x000c, + 0x12f4: 0x000c, + 0x12f8: 0x000c, 0x12f9: 0x000c, + // Block 0x4c, offset 0x1300 + 0x1300: 0x000c, 0x1301: 0x000c, 0x1302: 0x000c, 0x1303: 0x000c, 0x1304: 0x000c, 0x1305: 0x000c, + 0x1306: 0x000c, 0x1307: 0x000c, 0x1308: 0x000c, 0x1309: 0x000c, 0x130a: 0x000c, 0x130b: 0x000c, + 0x130c: 0x000c, 0x130d: 0x000c, 0x130e: 0x000c, 0x130f: 0x000c, 0x1310: 0x000c, 0x1311: 0x000c, + 0x1312: 0x000c, 0x1313: 0x000c, 0x1314: 0x000c, 0x1315: 0x000c, 0x1316: 0x000c, 0x1317: 0x000c, + 0x1318: 0x000c, 0x1319: 0x000c, 0x131a: 0x000c, 0x131b: 0x000c, 0x131c: 0x000c, 0x131d: 0x000c, + 0x131e: 0x000c, 0x131f: 0x000c, 0x1320: 0x000c, 0x1321: 0x000c, 0x1322: 0x000c, 0x1323: 0x000c, + 0x1324: 0x000c, 0x1325: 0x000c, 0x1326: 0x000c, 0x1327: 0x000c, 0x1328: 0x000c, 0x1329: 0x000c, + 0x132a: 0x000c, 0x132b: 0x000c, 0x132c: 0x000c, 0x132d: 0x000c, 0x132e: 0x000c, 0x132f: 0x000c, + 0x1330: 0x000c, 0x1331: 0x000c, 0x1332: 0x000c, 0x1333: 0x000c, 0x1334: 0x000c, 0x1335: 0x000c, + 0x1336: 0x000c, 0x1337: 0x000c, 0x1338: 0x000c, 0x1339: 0x000c, 0x133b: 0x000c, + 0x133c: 0x000c, 0x133d: 0x000c, 0x133e: 0x000c, 0x133f: 0x000c, + // Block 0x4d, offset 0x1340 + 0x137d: 0x000a, 0x137f: 0x000a, + // Block 0x4e, offset 0x1380 + 0x1380: 0x000a, 0x1381: 0x000a, + 0x138d: 0x000a, 0x138e: 0x000a, 0x138f: 0x000a, + 0x139d: 0x000a, + 0x139e: 0x000a, 0x139f: 0x000a, + 0x13ad: 0x000a, 0x13ae: 0x000a, 0x13af: 0x000a, + 0x13bd: 0x000a, 0x13be: 0x000a, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x0009, 0x13c1: 0x0009, 0x13c2: 0x0009, 0x13c3: 0x0009, 0x13c4: 0x0009, 0x13c5: 0x0009, + 0x13c6: 0x0009, 0x13c7: 0x0009, 0x13c8: 0x0009, 0x13c9: 0x0009, 0x13ca: 0x0009, 0x13cb: 0x000b, + 0x13cc: 0x000b, 0x13cd: 0x000b, 0x13cf: 0x0001, 0x13d0: 0x000a, 0x13d1: 0x000a, + 0x13d2: 0x000a, 0x13d3: 0x000a, 0x13d4: 0x000a, 0x13d5: 0x000a, 0x13d6: 0x000a, 0x13d7: 0x000a, + 0x13d8: 0x000a, 0x13d9: 0x000a, 0x13da: 0x000a, 0x13db: 0x000a, 0x13dc: 0x000a, 0x13dd: 0x000a, + 0x13de: 0x000a, 0x13df: 0x000a, 0x13e0: 0x000a, 0x13e1: 0x000a, 0x13e2: 0x000a, 0x13e3: 0x000a, + 0x13e4: 0x000a, 0x13e5: 0x000a, 0x13e6: 0x000a, 0x13e7: 0x000a, 0x13e8: 0x0009, 0x13e9: 0x0007, + 0x13ea: 0x000e, 0x13eb: 0x000e, 0x13ec: 0x000e, 0x13ed: 0x000e, 0x13ee: 0x000e, 0x13ef: 0x0006, + 0x13f0: 0x0004, 0x13f1: 0x0004, 0x13f2: 0x0004, 0x13f3: 0x0004, 0x13f4: 0x0004, 0x13f5: 0x000a, + 0x13f6: 0x000a, 0x13f7: 0x000a, 0x13f8: 0x000a, 0x13f9: 0x000a, 0x13fa: 0x000a, 0x13fb: 0x000a, + 0x13fc: 0x000a, 0x13fd: 0x000a, 0x13fe: 0x000a, 0x13ff: 0x000a, + // Block 0x50, offset 0x1400 + 0x1400: 0x000a, 0x1401: 0x000a, 0x1402: 0x000a, 0x1403: 0x000a, 0x1404: 0x0006, 0x1405: 0x009a, + 0x1406: 0x008a, 0x1407: 0x000a, 0x1408: 0x000a, 0x1409: 0x000a, 0x140a: 0x000a, 0x140b: 0x000a, + 0x140c: 0x000a, 0x140d: 0x000a, 0x140e: 0x000a, 0x140f: 0x000a, 0x1410: 0x000a, 0x1411: 0x000a, + 0x1412: 0x000a, 0x1413: 0x000a, 0x1414: 0x000a, 0x1415: 0x000a, 0x1416: 0x000a, 0x1417: 0x000a, + 0x1418: 0x000a, 0x1419: 0x000a, 0x141a: 0x000a, 0x141b: 0x000a, 0x141c: 0x000a, 0x141d: 0x000a, + 0x141e: 0x000a, 0x141f: 0x0009, 0x1420: 0x000b, 0x1421: 0x000b, 0x1422: 0x000b, 0x1423: 0x000b, + 0x1424: 0x000b, 0x1425: 0x000b, 0x1426: 0x000e, 0x1427: 0x000e, 0x1428: 0x000e, 0x1429: 0x000e, + 0x142a: 0x000b, 0x142b: 0x000b, 0x142c: 0x000b, 0x142d: 0x000b, 0x142e: 0x000b, 0x142f: 0x000b, + 0x1430: 0x0002, 0x1434: 0x0002, 0x1435: 0x0002, + 0x1436: 0x0002, 0x1437: 0x0002, 0x1438: 0x0002, 0x1439: 0x0002, 0x143a: 0x0003, 0x143b: 0x0003, + 0x143c: 0x000a, 0x143d: 0x009a, 0x143e: 0x008a, + // Block 0x51, offset 0x1440 + 0x1440: 0x0002, 0x1441: 0x0002, 0x1442: 0x0002, 0x1443: 0x0002, 0x1444: 0x0002, 0x1445: 0x0002, + 0x1446: 0x0002, 0x1447: 0x0002, 0x1448: 0x0002, 0x1449: 0x0002, 0x144a: 0x0003, 0x144b: 0x0003, + 0x144c: 0x000a, 0x144d: 0x009a, 0x144e: 0x008a, + 0x1460: 0x0004, 0x1461: 0x0004, 0x1462: 0x0004, 0x1463: 0x0004, + 0x1464: 0x0004, 0x1465: 0x0004, 0x1466: 0x0004, 0x1467: 0x0004, 0x1468: 0x0004, 0x1469: 0x0004, + 0x146a: 0x0004, 0x146b: 0x0004, 0x146c: 0x0004, 0x146d: 0x0004, 0x146e: 0x0004, 0x146f: 0x0004, + 0x1470: 0x0004, 0x1471: 0x0004, 0x1472: 0x0004, 0x1473: 0x0004, 0x1474: 0x0004, 0x1475: 0x0004, + 0x1476: 0x0004, 0x1477: 0x0004, 0x1478: 0x0004, 0x1479: 0x0004, 0x147a: 0x0004, 0x147b: 0x0004, + 0x147c: 0x0004, 0x147d: 0x0004, 0x147e: 0x0004, 0x147f: 0x0004, + // Block 0x52, offset 0x1480 + 0x1480: 0x0004, 0x1481: 0x0004, 0x1482: 0x0004, 0x1483: 0x0004, 0x1484: 0x0004, 0x1485: 0x0004, + 0x1486: 0x0004, 0x1487: 0x0004, 0x1488: 0x0004, 0x1489: 0x0004, 0x148a: 0x0004, 0x148b: 0x0004, + 0x148c: 0x0004, 0x148d: 0x0004, 0x148e: 0x0004, 0x148f: 0x0004, 0x1490: 0x000c, 0x1491: 0x000c, + 0x1492: 0x000c, 0x1493: 0x000c, 0x1494: 0x000c, 0x1495: 0x000c, 0x1496: 0x000c, 0x1497: 0x000c, + 0x1498: 0x000c, 0x1499: 0x000c, 0x149a: 0x000c, 0x149b: 0x000c, 0x149c: 0x000c, 0x149d: 0x000c, + 0x149e: 0x000c, 0x149f: 0x000c, 0x14a0: 0x000c, 0x14a1: 0x000c, 0x14a2: 0x000c, 0x14a3: 0x000c, + 0x14a4: 0x000c, 0x14a5: 0x000c, 0x14a6: 0x000c, 0x14a7: 0x000c, 0x14a8: 0x000c, 0x14a9: 0x000c, + 0x14aa: 0x000c, 0x14ab: 0x000c, 0x14ac: 0x000c, 0x14ad: 0x000c, 0x14ae: 0x000c, 0x14af: 0x000c, + 0x14b0: 0x000c, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x000a, 0x14c1: 0x000a, 0x14c3: 0x000a, 0x14c4: 0x000a, 0x14c5: 0x000a, + 0x14c6: 0x000a, 0x14c8: 0x000a, 0x14c9: 0x000a, + 0x14d4: 0x000a, 0x14d6: 0x000a, 0x14d7: 0x000a, + 0x14d8: 0x000a, + 0x14de: 0x000a, 0x14df: 0x000a, 0x14e0: 0x000a, 0x14e1: 0x000a, 0x14e2: 0x000a, 0x14e3: 0x000a, + 0x14e5: 0x000a, 0x14e7: 0x000a, 0x14e9: 0x000a, + 0x14ee: 0x0004, + 0x14fa: 0x000a, 0x14fb: 0x000a, + // Block 0x54, offset 0x1500 + 0x1500: 0x000a, 0x1501: 0x000a, 0x1502: 0x000a, 0x1503: 0x000a, 0x1504: 0x000a, + 0x150a: 0x000a, 0x150b: 0x000a, + 0x150c: 0x000a, 0x150d: 0x000a, 0x1510: 0x000a, 0x1511: 0x000a, + 0x1512: 0x000a, 0x1513: 0x000a, 0x1514: 0x000a, 0x1515: 0x000a, 0x1516: 0x000a, 0x1517: 0x000a, + 0x1518: 0x000a, 0x1519: 0x000a, 0x151a: 0x000a, 0x151b: 0x000a, 0x151c: 0x000a, 0x151d: 0x000a, + 0x151e: 0x000a, 0x151f: 0x000a, + // Block 0x55, offset 0x1540 + 0x1549: 0x000a, 0x154a: 0x000a, 0x154b: 0x000a, + 0x1550: 0x000a, 0x1551: 0x000a, + 0x1552: 0x000a, 0x1553: 0x000a, 0x1554: 0x000a, 0x1555: 0x000a, 0x1556: 0x000a, 0x1557: 0x000a, + 0x1558: 0x000a, 0x1559: 0x000a, 0x155a: 0x000a, 0x155b: 0x000a, 0x155c: 0x000a, 0x155d: 0x000a, + 0x155e: 0x000a, 0x155f: 0x000a, 0x1560: 0x000a, 0x1561: 0x000a, 0x1562: 0x000a, 0x1563: 0x000a, + 0x1564: 0x000a, 0x1565: 0x000a, 0x1566: 0x000a, 0x1567: 0x000a, 0x1568: 0x000a, 0x1569: 0x000a, + 0x156a: 0x000a, 0x156b: 0x000a, 0x156c: 0x000a, 0x156d: 0x000a, 0x156e: 0x000a, 0x156f: 0x000a, + 0x1570: 0x000a, 0x1571: 0x000a, 0x1572: 0x000a, 0x1573: 0x000a, 0x1574: 0x000a, 0x1575: 0x000a, + 0x1576: 0x000a, 0x1577: 0x000a, 0x1578: 0x000a, 0x1579: 0x000a, 0x157a: 0x000a, 0x157b: 0x000a, + 0x157c: 0x000a, 0x157d: 0x000a, 0x157e: 0x000a, 0x157f: 0x000a, + // Block 0x56, offset 0x1580 + 0x1580: 0x000a, 0x1581: 0x000a, 0x1582: 0x000a, 0x1583: 0x000a, 0x1584: 0x000a, 0x1585: 0x000a, + 0x1586: 0x000a, 0x1587: 0x000a, 0x1588: 0x000a, 0x1589: 0x000a, 0x158a: 0x000a, 0x158b: 0x000a, + 0x158c: 0x000a, 0x158d: 0x000a, 0x158e: 0x000a, 0x158f: 0x000a, 0x1590: 0x000a, 0x1591: 0x000a, + 0x1592: 0x000a, 0x1593: 0x000a, 0x1594: 0x000a, 0x1595: 0x000a, 0x1596: 0x000a, 0x1597: 0x000a, + 0x1598: 0x000a, 0x1599: 0x000a, 0x159a: 0x000a, 0x159b: 0x000a, 0x159c: 0x000a, 0x159d: 0x000a, + 0x159e: 0x000a, 0x159f: 0x000a, 0x15a0: 0x000a, 0x15a1: 0x000a, 0x15a2: 0x000a, 0x15a3: 0x000a, + 0x15a4: 0x000a, 0x15a5: 0x000a, 0x15a6: 0x000a, 0x15a7: 0x000a, 0x15a8: 0x000a, 0x15a9: 0x000a, + 0x15aa: 0x000a, 0x15ab: 0x000a, 0x15ac: 0x000a, 0x15ad: 0x000a, 0x15ae: 0x000a, 0x15af: 0x000a, + 0x15b0: 0x000a, 0x15b1: 0x000a, 0x15b2: 0x000a, 0x15b3: 0x000a, 0x15b4: 0x000a, 0x15b5: 0x000a, + 0x15b6: 0x000a, 0x15b7: 0x000a, 0x15b8: 0x000a, 0x15b9: 0x000a, 0x15ba: 0x000a, 0x15bb: 0x000a, + 0x15bc: 0x000a, 0x15bd: 0x000a, 0x15be: 0x000a, 0x15bf: 0x000a, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x000a, 0x15c1: 0x000a, 0x15c2: 0x000a, 0x15c3: 0x000a, 0x15c4: 0x000a, 0x15c5: 0x000a, + 0x15c6: 0x000a, 0x15c7: 0x000a, 0x15c8: 0x000a, 0x15c9: 0x000a, 0x15ca: 0x000a, 0x15cb: 0x000a, + 0x15cc: 0x000a, 0x15cd: 0x000a, 0x15ce: 0x000a, 0x15cf: 0x000a, 0x15d0: 0x000a, 0x15d1: 0x000a, + 0x15d2: 0x0003, 0x15d3: 0x0004, 0x15d4: 0x000a, 0x15d5: 0x000a, 0x15d6: 0x000a, 0x15d7: 0x000a, + 0x15d8: 0x000a, 0x15d9: 0x000a, 0x15da: 0x000a, 0x15db: 0x000a, 0x15dc: 0x000a, 0x15dd: 0x000a, + 0x15de: 0x000a, 0x15df: 0x000a, 0x15e0: 0x000a, 0x15e1: 0x000a, 0x15e2: 0x000a, 0x15e3: 0x000a, + 0x15e4: 0x000a, 0x15e5: 0x000a, 0x15e6: 0x000a, 0x15e7: 0x000a, 0x15e8: 0x000a, 0x15e9: 0x000a, + 0x15ea: 0x000a, 0x15eb: 0x000a, 0x15ec: 0x000a, 0x15ed: 0x000a, 0x15ee: 0x000a, 0x15ef: 0x000a, + 0x15f0: 0x000a, 0x15f1: 0x000a, 0x15f2: 0x000a, 0x15f3: 0x000a, 0x15f4: 0x000a, 0x15f5: 0x000a, + 0x15f6: 0x000a, 0x15f7: 0x000a, 0x15f8: 0x000a, 0x15f9: 0x000a, 0x15fa: 0x000a, 0x15fb: 0x000a, + 0x15fc: 0x000a, 0x15fd: 0x000a, 0x15fe: 0x000a, 0x15ff: 0x000a, + // Block 0x58, offset 0x1600 + 0x1600: 0x000a, 0x1601: 0x000a, 0x1602: 0x000a, 0x1603: 0x000a, 0x1604: 0x000a, 0x1605: 0x000a, + 0x1606: 0x000a, 0x1607: 0x000a, 0x1608: 0x003a, 0x1609: 0x002a, 0x160a: 0x003a, 0x160b: 0x002a, + 0x160c: 0x000a, 0x160d: 0x000a, 0x160e: 0x000a, 0x160f: 0x000a, 0x1610: 0x000a, 0x1611: 0x000a, + 0x1612: 0x000a, 0x1613: 0x000a, 0x1614: 0x000a, 0x1615: 0x000a, 0x1616: 0x000a, 0x1617: 0x000a, + 0x1618: 0x000a, 0x1619: 0x000a, 0x161a: 0x000a, 0x161b: 0x000a, 0x161c: 0x000a, 0x161d: 0x000a, + 0x161e: 0x000a, 0x161f: 0x000a, 0x1620: 0x000a, 0x1621: 0x000a, 0x1622: 0x000a, 0x1623: 0x000a, + 0x1624: 0x000a, 0x1625: 0x000a, 0x1626: 0x000a, 0x1627: 0x000a, 0x1628: 0x000a, 0x1629: 0x009a, + 0x162a: 0x008a, 0x162b: 0x000a, 0x162c: 0x000a, 0x162d: 0x000a, 0x162e: 0x000a, 0x162f: 0x000a, + 0x1630: 0x000a, 0x1631: 0x000a, 0x1632: 0x000a, 0x1633: 0x000a, 0x1634: 0x000a, 0x1635: 0x000a, + // Block 0x59, offset 0x1640 + 0x167b: 0x000a, + 0x167c: 0x000a, 0x167d: 0x000a, 0x167e: 0x000a, 0x167f: 0x000a, + // Block 0x5a, offset 0x1680 + 0x1680: 0x000a, 0x1681: 0x000a, 0x1682: 0x000a, 0x1683: 0x000a, 0x1684: 0x000a, 0x1685: 0x000a, + 0x1686: 0x000a, 0x1687: 0x000a, 0x1688: 0x000a, 0x1689: 0x000a, 0x168a: 0x000a, 0x168b: 0x000a, + 0x168c: 0x000a, 0x168d: 0x000a, 0x168e: 0x000a, 0x168f: 0x000a, 0x1690: 0x000a, 0x1691: 0x000a, + 0x1692: 0x000a, 0x1693: 0x000a, 0x1694: 0x000a, 0x1696: 0x000a, 0x1697: 0x000a, + 0x1698: 0x000a, 0x1699: 0x000a, 0x169a: 0x000a, 0x169b: 0x000a, 0x169c: 0x000a, 0x169d: 0x000a, + 0x169e: 0x000a, 0x169f: 0x000a, 0x16a0: 0x000a, 0x16a1: 0x000a, 0x16a2: 0x000a, 0x16a3: 0x000a, + 0x16a4: 0x000a, 0x16a5: 0x000a, 0x16a6: 0x000a, 0x16a7: 0x000a, 0x16a8: 0x000a, 0x16a9: 0x000a, + 0x16aa: 0x000a, 0x16ab: 0x000a, 0x16ac: 0x000a, 0x16ad: 0x000a, 0x16ae: 0x000a, 0x16af: 0x000a, + 0x16b0: 0x000a, 0x16b1: 0x000a, 0x16b2: 0x000a, 0x16b3: 0x000a, 0x16b4: 0x000a, 0x16b5: 0x000a, + 0x16b6: 0x000a, 0x16b7: 0x000a, 0x16b8: 0x000a, 0x16b9: 0x000a, 0x16ba: 0x000a, 0x16bb: 0x000a, + 0x16bc: 0x000a, 0x16bd: 0x000a, 0x16be: 0x000a, 0x16bf: 0x000a, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x000a, 0x16c1: 0x000a, 0x16c2: 0x000a, 0x16c3: 0x000a, 0x16c4: 0x000a, 0x16c5: 0x000a, + 0x16c6: 0x000a, 0x16c7: 0x000a, 0x16c8: 0x000a, 0x16c9: 0x000a, 0x16ca: 0x000a, 0x16cb: 0x000a, + 0x16cc: 0x000a, 0x16cd: 0x000a, 0x16ce: 0x000a, 0x16cf: 0x000a, 0x16d0: 0x000a, 0x16d1: 0x000a, + 0x16d2: 0x000a, 0x16d3: 0x000a, 0x16d4: 0x000a, 0x16d5: 0x000a, 0x16d6: 0x000a, 0x16d7: 0x000a, + 0x16d8: 0x000a, 0x16d9: 0x000a, 0x16da: 0x000a, 0x16db: 0x000a, 0x16dc: 0x000a, 0x16dd: 0x000a, + 0x16de: 0x000a, 0x16df: 0x000a, 0x16e0: 0x000a, 0x16e1: 0x000a, 0x16e2: 0x000a, 0x16e3: 0x000a, + 0x16e4: 0x000a, 0x16e5: 0x000a, 0x16e6: 0x000a, + // Block 0x5c, offset 0x1700 + 0x1700: 0x000a, 0x1701: 0x000a, 0x1702: 0x000a, 0x1703: 0x000a, 0x1704: 0x000a, 0x1705: 0x000a, + 0x1706: 0x000a, 0x1707: 0x000a, 0x1708: 0x000a, 0x1709: 0x000a, 0x170a: 0x000a, + 0x1720: 0x000a, 0x1721: 0x000a, 0x1722: 0x000a, 0x1723: 0x000a, + 0x1724: 0x000a, 0x1725: 0x000a, 0x1726: 0x000a, 0x1727: 0x000a, 0x1728: 0x000a, 0x1729: 0x000a, + 0x172a: 0x000a, 0x172b: 0x000a, 0x172c: 0x000a, 0x172d: 0x000a, 0x172e: 0x000a, 0x172f: 0x000a, + 0x1730: 0x000a, 0x1731: 0x000a, 0x1732: 0x000a, 0x1733: 0x000a, 0x1734: 0x000a, 0x1735: 0x000a, + 0x1736: 0x000a, 0x1737: 0x000a, 0x1738: 0x000a, 0x1739: 0x000a, 0x173a: 0x000a, 0x173b: 0x000a, + 0x173c: 0x000a, 0x173d: 0x000a, 0x173e: 0x000a, 0x173f: 0x000a, + // Block 0x5d, offset 0x1740 + 0x1740: 0x000a, 0x1741: 0x000a, 0x1742: 0x000a, 0x1743: 0x000a, 0x1744: 0x000a, 0x1745: 0x000a, + 0x1746: 0x000a, 0x1747: 0x000a, 0x1748: 0x0002, 0x1749: 0x0002, 0x174a: 0x0002, 0x174b: 0x0002, + 0x174c: 0x0002, 0x174d: 0x0002, 0x174e: 0x0002, 0x174f: 0x0002, 0x1750: 0x0002, 0x1751: 0x0002, + 0x1752: 0x0002, 0x1753: 0x0002, 0x1754: 0x0002, 0x1755: 0x0002, 0x1756: 0x0002, 0x1757: 0x0002, + 0x1758: 0x0002, 0x1759: 0x0002, 0x175a: 0x0002, 0x175b: 0x0002, + // Block 0x5e, offset 0x1780 + 0x17aa: 0x000a, 0x17ab: 0x000a, 0x17ac: 0x000a, 0x17ad: 0x000a, 0x17ae: 0x000a, 0x17af: 0x000a, + 0x17b0: 0x000a, 0x17b1: 0x000a, 0x17b2: 0x000a, 0x17b3: 0x000a, 0x17b4: 0x000a, 0x17b5: 0x000a, + 0x17b6: 0x000a, 0x17b7: 0x000a, 0x17b8: 0x000a, 0x17b9: 0x000a, 0x17ba: 0x000a, 0x17bb: 0x000a, + 0x17bc: 0x000a, 0x17bd: 0x000a, 0x17be: 0x000a, 0x17bf: 0x000a, + // Block 0x5f, offset 0x17c0 + 0x17c0: 0x000a, 0x17c1: 0x000a, 0x17c2: 0x000a, 0x17c3: 0x000a, 0x17c4: 0x000a, 0x17c5: 0x000a, + 0x17c6: 0x000a, 0x17c7: 0x000a, 0x17c8: 0x000a, 0x17c9: 0x000a, 0x17ca: 0x000a, 0x17cb: 0x000a, + 0x17cc: 0x000a, 0x17cd: 0x000a, 0x17ce: 0x000a, 0x17cf: 0x000a, 0x17d0: 0x000a, 0x17d1: 0x000a, + 0x17d2: 0x000a, 0x17d3: 0x000a, 0x17d4: 0x000a, 0x17d5: 0x000a, 0x17d6: 0x000a, 0x17d7: 0x000a, + 0x17d8: 0x000a, 0x17d9: 0x000a, 0x17da: 0x000a, 0x17db: 0x000a, 0x17dc: 0x000a, 0x17dd: 0x000a, + 0x17de: 0x000a, 0x17df: 0x000a, 0x17e0: 0x000a, 0x17e1: 0x000a, 0x17e2: 0x000a, 0x17e3: 0x000a, + 0x17e4: 0x000a, 0x17e5: 0x000a, 0x17e6: 0x000a, 0x17e7: 0x000a, 0x17e8: 0x000a, 0x17e9: 0x000a, + 0x17ea: 0x000a, 0x17eb: 0x000a, 0x17ed: 0x000a, 0x17ee: 0x000a, 0x17ef: 0x000a, + 0x17f0: 0x000a, 0x17f1: 0x000a, 0x17f2: 0x000a, 0x17f3: 0x000a, 0x17f4: 0x000a, 0x17f5: 0x000a, + 0x17f6: 0x000a, 0x17f7: 0x000a, 0x17f8: 0x000a, 0x17f9: 0x000a, 0x17fa: 0x000a, 0x17fb: 0x000a, + 0x17fc: 0x000a, 0x17fd: 0x000a, 0x17fe: 0x000a, 0x17ff: 0x000a, + // Block 0x60, offset 0x1800 + 0x1800: 0x000a, 0x1801: 0x000a, 0x1802: 0x000a, 0x1803: 0x000a, 0x1804: 0x000a, 0x1805: 0x000a, + 0x1806: 0x000a, 0x1807: 0x000a, 0x1808: 0x000a, 0x1809: 0x000a, 0x180a: 0x000a, 0x180b: 0x000a, + 0x180c: 0x000a, 0x180d: 0x000a, 0x180e: 0x000a, 0x180f: 0x000a, 0x1810: 0x000a, 0x1811: 0x000a, + 0x1812: 0x000a, 0x1813: 0x000a, 0x1814: 0x000a, 0x1815: 0x000a, 0x1816: 0x000a, 0x1817: 0x000a, + 0x1818: 0x000a, 0x1819: 0x000a, 0x181a: 0x000a, 0x181b: 0x000a, 0x181c: 0x000a, 0x181d: 0x000a, + 0x181e: 0x000a, 0x181f: 0x000a, 0x1820: 0x000a, 0x1821: 0x000a, 0x1822: 0x000a, 0x1823: 0x000a, + 0x1824: 0x000a, 0x1825: 0x000a, 0x1826: 0x000a, 0x1827: 0x000a, 0x1828: 0x003a, 0x1829: 0x002a, + 0x182a: 0x003a, 0x182b: 0x002a, 0x182c: 0x003a, 0x182d: 0x002a, 0x182e: 0x003a, 0x182f: 0x002a, + 0x1830: 0x003a, 0x1831: 0x002a, 0x1832: 0x003a, 0x1833: 0x002a, 0x1834: 0x003a, 0x1835: 0x002a, + 0x1836: 0x000a, 0x1837: 0x000a, 0x1838: 0x000a, 0x1839: 0x000a, 0x183a: 0x000a, 0x183b: 0x000a, + 0x183c: 0x000a, 0x183d: 0x000a, 0x183e: 0x000a, 0x183f: 0x000a, + // Block 0x61, offset 0x1840 + 0x1840: 0x000a, 0x1841: 0x000a, 0x1842: 0x000a, 0x1843: 0x000a, 0x1844: 0x000a, 0x1845: 0x009a, + 0x1846: 0x008a, 0x1847: 0x000a, 0x1848: 0x000a, 0x1849: 0x000a, 0x184a: 0x000a, 0x184b: 0x000a, + 0x184c: 0x000a, 0x184d: 0x000a, 0x184e: 0x000a, 0x184f: 0x000a, 0x1850: 0x000a, 0x1851: 0x000a, + 0x1852: 0x000a, 0x1853: 0x000a, 0x1854: 0x000a, 0x1855: 0x000a, 0x1856: 0x000a, 0x1857: 0x000a, + 0x1858: 0x000a, 0x1859: 0x000a, 0x185a: 0x000a, 0x185b: 0x000a, 0x185c: 0x000a, 0x185d: 0x000a, + 0x185e: 0x000a, 0x185f: 0x000a, 0x1860: 0x000a, 0x1861: 0x000a, 0x1862: 0x000a, 0x1863: 0x000a, + 0x1864: 0x000a, 0x1865: 0x000a, 0x1866: 0x003a, 0x1867: 0x002a, 0x1868: 0x003a, 0x1869: 0x002a, + 0x186a: 0x003a, 0x186b: 0x002a, 0x186c: 0x003a, 0x186d: 0x002a, 0x186e: 0x003a, 0x186f: 0x002a, + 0x1870: 0x000a, 0x1871: 0x000a, 0x1872: 0x000a, 0x1873: 0x000a, 0x1874: 0x000a, 0x1875: 0x000a, + 0x1876: 0x000a, 0x1877: 0x000a, 0x1878: 0x000a, 0x1879: 0x000a, 0x187a: 0x000a, 0x187b: 0x000a, + 0x187c: 0x000a, 0x187d: 0x000a, 0x187e: 0x000a, 0x187f: 0x000a, + // Block 0x62, offset 0x1880 + 0x1880: 0x000a, 0x1881: 0x000a, 0x1882: 0x000a, 0x1883: 0x007a, 0x1884: 0x006a, 0x1885: 0x009a, + 0x1886: 0x008a, 0x1887: 0x00ba, 0x1888: 0x00aa, 0x1889: 0x009a, 0x188a: 0x008a, 0x188b: 0x007a, + 0x188c: 0x006a, 0x188d: 0x00da, 0x188e: 0x002a, 0x188f: 0x003a, 0x1890: 0x00ca, 0x1891: 0x009a, + 0x1892: 0x008a, 0x1893: 0x007a, 0x1894: 0x006a, 0x1895: 0x009a, 0x1896: 0x008a, 0x1897: 0x00ba, + 0x1898: 0x00aa, 0x1899: 0x000a, 0x189a: 0x000a, 0x189b: 0x000a, 0x189c: 0x000a, 0x189d: 0x000a, + 0x189e: 0x000a, 0x189f: 0x000a, 0x18a0: 0x000a, 0x18a1: 0x000a, 0x18a2: 0x000a, 0x18a3: 0x000a, + 0x18a4: 0x000a, 0x18a5: 0x000a, 0x18a6: 0x000a, 0x18a7: 0x000a, 0x18a8: 0x000a, 0x18a9: 0x000a, + 0x18aa: 0x000a, 0x18ab: 0x000a, 0x18ac: 0x000a, 0x18ad: 0x000a, 0x18ae: 0x000a, 0x18af: 0x000a, + 0x18b0: 0x000a, 0x18b1: 0x000a, 0x18b2: 0x000a, 0x18b3: 0x000a, 0x18b4: 0x000a, 0x18b5: 0x000a, + 0x18b6: 0x000a, 0x18b7: 0x000a, 0x18b8: 0x000a, 0x18b9: 0x000a, 0x18ba: 0x000a, 0x18bb: 0x000a, + 0x18bc: 0x000a, 0x18bd: 0x000a, 0x18be: 0x000a, 0x18bf: 0x000a, + // Block 0x63, offset 0x18c0 + 0x18c0: 0x000a, 0x18c1: 0x000a, 0x18c2: 0x000a, 0x18c3: 0x000a, 0x18c4: 0x000a, 0x18c5: 0x000a, + 0x18c6: 0x000a, 0x18c7: 0x000a, 0x18c8: 0x000a, 0x18c9: 0x000a, 0x18ca: 0x000a, 0x18cb: 0x000a, + 0x18cc: 0x000a, 0x18cd: 0x000a, 0x18ce: 0x000a, 0x18cf: 0x000a, 0x18d0: 0x000a, 0x18d1: 0x000a, + 0x18d2: 0x000a, 0x18d3: 0x000a, 0x18d4: 0x000a, 0x18d5: 0x000a, 0x18d6: 0x000a, 0x18d7: 0x000a, + 0x18d8: 0x003a, 0x18d9: 0x002a, 0x18da: 0x003a, 0x18db: 0x002a, 0x18dc: 0x000a, 0x18dd: 0x000a, + 0x18de: 0x000a, 0x18df: 0x000a, 0x18e0: 0x000a, 0x18e1: 0x000a, 0x18e2: 0x000a, 0x18e3: 0x000a, + 0x18e4: 0x000a, 0x18e5: 0x000a, 0x18e6: 0x000a, 0x18e7: 0x000a, 0x18e8: 0x000a, 0x18e9: 0x000a, + 0x18ea: 0x000a, 0x18eb: 0x000a, 0x18ec: 0x000a, 0x18ed: 0x000a, 0x18ee: 0x000a, 0x18ef: 0x000a, + 0x18f0: 0x000a, 0x18f1: 0x000a, 0x18f2: 0x000a, 0x18f3: 0x000a, 0x18f4: 0x000a, 0x18f5: 0x000a, + 0x18f6: 0x000a, 0x18f7: 0x000a, 0x18f8: 0x000a, 0x18f9: 0x000a, 0x18fa: 0x000a, 0x18fb: 0x000a, + 0x18fc: 0x003a, 0x18fd: 0x002a, 0x18fe: 0x000a, 0x18ff: 0x000a, + // Block 0x64, offset 0x1900 + 0x1900: 0x000a, 0x1901: 0x000a, 0x1902: 0x000a, 0x1903: 0x000a, 0x1904: 0x000a, 0x1905: 0x000a, + 0x1906: 0x000a, 0x1907: 0x000a, 0x1908: 0x000a, 0x1909: 0x000a, 0x190a: 0x000a, 0x190b: 0x000a, + 0x190c: 0x000a, 0x190d: 0x000a, 0x190e: 0x000a, 0x190f: 0x000a, 0x1910: 0x000a, 0x1911: 0x000a, + 0x1912: 0x000a, 0x1913: 0x000a, 0x1914: 0x000a, 0x1915: 0x000a, 0x1916: 0x000a, 0x1917: 0x000a, + 0x1918: 0x000a, 0x1919: 0x000a, 0x191a: 0x000a, 0x191b: 0x000a, 0x191c: 0x000a, 0x191d: 0x000a, + 0x191e: 0x000a, 0x191f: 0x000a, 0x1920: 0x000a, 0x1921: 0x000a, 0x1922: 0x000a, 0x1923: 0x000a, + 0x1924: 0x000a, 0x1925: 0x000a, 0x1926: 0x000a, 0x1927: 0x000a, 0x1928: 0x000a, 0x1929: 0x000a, + 0x192a: 0x000a, 0x192b: 0x000a, 0x192c: 0x000a, 0x192d: 0x000a, 0x192e: 0x000a, 0x192f: 0x000a, + 0x1930: 0x000a, 0x1931: 0x000a, 0x1932: 0x000a, 0x1933: 0x000a, + 0x1936: 0x000a, 0x1937: 0x000a, 0x1938: 0x000a, 0x1939: 0x000a, 0x193a: 0x000a, 0x193b: 0x000a, + 0x193c: 0x000a, 0x193d: 0x000a, 0x193e: 0x000a, 0x193f: 0x000a, + // Block 0x65, offset 0x1940 + 0x1940: 0x000a, 0x1941: 0x000a, 0x1942: 0x000a, 0x1943: 0x000a, 0x1944: 0x000a, 0x1945: 0x000a, + 0x1946: 0x000a, 0x1947: 0x000a, 0x1948: 0x000a, 0x1949: 0x000a, 0x194a: 0x000a, 0x194b: 0x000a, + 0x194c: 0x000a, 0x194d: 0x000a, 0x194e: 0x000a, 0x194f: 0x000a, 0x1950: 0x000a, 0x1951: 0x000a, + 0x1952: 0x000a, 0x1953: 0x000a, 0x1954: 0x000a, 0x1955: 0x000a, + 0x1958: 0x000a, 0x1959: 0x000a, 0x195a: 0x000a, 0x195b: 0x000a, 0x195c: 0x000a, 0x195d: 0x000a, + 0x195e: 0x000a, 0x195f: 0x000a, 0x1960: 0x000a, 0x1961: 0x000a, 0x1962: 0x000a, 0x1963: 0x000a, + 0x1964: 0x000a, 0x1965: 0x000a, 0x1966: 0x000a, 0x1967: 0x000a, 0x1968: 0x000a, 0x1969: 0x000a, + 0x196a: 0x000a, 0x196b: 0x000a, 0x196c: 0x000a, 0x196d: 0x000a, 0x196e: 0x000a, 0x196f: 0x000a, + 0x1970: 0x000a, 0x1971: 0x000a, 0x1972: 0x000a, 0x1973: 0x000a, 0x1974: 0x000a, 0x1975: 0x000a, + 0x1976: 0x000a, 0x1977: 0x000a, 0x1978: 0x000a, 0x1979: 0x000a, 0x197a: 0x000a, 0x197b: 0x000a, + 0x197c: 0x000a, 0x197d: 0x000a, 0x197e: 0x000a, 0x197f: 0x000a, + // Block 0x66, offset 0x1980 + 0x1980: 0x000a, 0x1981: 0x000a, 0x1982: 0x000a, 0x1983: 0x000a, 0x1984: 0x000a, 0x1985: 0x000a, + 0x1986: 0x000a, 0x1987: 0x000a, 0x1988: 0x000a, 0x198a: 0x000a, 0x198b: 0x000a, + 0x198c: 0x000a, 0x198d: 0x000a, 0x198e: 0x000a, 0x198f: 0x000a, 0x1990: 0x000a, 0x1991: 0x000a, + 0x1992: 0x000a, 0x1993: 0x000a, 0x1994: 0x000a, 0x1995: 0x000a, 0x1996: 0x000a, 0x1997: 0x000a, + 0x1998: 0x000a, 0x1999: 0x000a, 0x199a: 0x000a, 0x199b: 0x000a, 0x199c: 0x000a, 0x199d: 0x000a, + 0x199e: 0x000a, 0x199f: 0x000a, 0x19a0: 0x000a, 0x19a1: 0x000a, 0x19a2: 0x000a, 0x19a3: 0x000a, + 0x19a4: 0x000a, 0x19a5: 0x000a, 0x19a6: 0x000a, 0x19a7: 0x000a, 0x19a8: 0x000a, 0x19a9: 0x000a, + 0x19aa: 0x000a, 0x19ab: 0x000a, 0x19ac: 0x000a, 0x19ad: 0x000a, 0x19ae: 0x000a, 0x19af: 0x000a, + 0x19b0: 0x000a, 0x19b1: 0x000a, 0x19b2: 0x000a, 0x19b3: 0x000a, 0x19b4: 0x000a, 0x19b5: 0x000a, + 0x19b6: 0x000a, 0x19b7: 0x000a, 0x19b8: 0x000a, 0x19b9: 0x000a, 0x19ba: 0x000a, 0x19bb: 0x000a, + 0x19bc: 0x000a, 0x19bd: 0x000a, 0x19be: 0x000a, + // Block 0x67, offset 0x19c0 + 0x19e5: 0x000a, 0x19e6: 0x000a, 0x19e7: 0x000a, 0x19e8: 0x000a, 0x19e9: 0x000a, + 0x19ea: 0x000a, 0x19ef: 0x000c, + 0x19f0: 0x000c, 0x19f1: 0x000c, + 0x19f9: 0x000a, 0x19fa: 0x000a, 0x19fb: 0x000a, + 0x19fc: 0x000a, 0x19fd: 0x000a, 0x19fe: 0x000a, 0x19ff: 0x000a, + // Block 0x68, offset 0x1a00 + 0x1a3f: 0x000c, + // Block 0x69, offset 0x1a40 + 0x1a60: 0x000c, 0x1a61: 0x000c, 0x1a62: 0x000c, 0x1a63: 0x000c, + 0x1a64: 0x000c, 0x1a65: 0x000c, 0x1a66: 0x000c, 0x1a67: 0x000c, 0x1a68: 0x000c, 0x1a69: 0x000c, + 0x1a6a: 0x000c, 0x1a6b: 0x000c, 0x1a6c: 0x000c, 0x1a6d: 0x000c, 0x1a6e: 0x000c, 0x1a6f: 0x000c, + 0x1a70: 0x000c, 0x1a71: 0x000c, 0x1a72: 0x000c, 0x1a73: 0x000c, 0x1a74: 0x000c, 0x1a75: 0x000c, + 0x1a76: 0x000c, 0x1a77: 0x000c, 0x1a78: 0x000c, 0x1a79: 0x000c, 0x1a7a: 0x000c, 0x1a7b: 0x000c, + 0x1a7c: 0x000c, 0x1a7d: 0x000c, 0x1a7e: 0x000c, 0x1a7f: 0x000c, + // Block 0x6a, offset 0x1a80 + 0x1a80: 0x000a, 0x1a81: 0x000a, 0x1a82: 0x000a, 0x1a83: 0x000a, 0x1a84: 0x000a, 0x1a85: 0x000a, + 0x1a86: 0x000a, 0x1a87: 0x000a, 0x1a88: 0x000a, 0x1a89: 0x000a, 0x1a8a: 0x000a, 0x1a8b: 0x000a, + 0x1a8c: 0x000a, 0x1a8d: 0x000a, 0x1a8e: 0x000a, 0x1a8f: 0x000a, 0x1a90: 0x000a, 0x1a91: 0x000a, + 0x1a92: 0x000a, 0x1a93: 0x000a, 0x1a94: 0x000a, 0x1a95: 0x000a, 0x1a96: 0x000a, 0x1a97: 0x000a, + 0x1a98: 0x000a, 0x1a99: 0x000a, 0x1a9a: 0x000a, 0x1a9b: 0x000a, 0x1a9c: 0x000a, 0x1a9d: 0x000a, + 0x1a9e: 0x000a, 0x1a9f: 0x000a, 0x1aa0: 0x000a, 0x1aa1: 0x000a, 0x1aa2: 0x003a, 0x1aa3: 0x002a, + 0x1aa4: 0x003a, 0x1aa5: 0x002a, 0x1aa6: 0x003a, 0x1aa7: 0x002a, 0x1aa8: 0x003a, 0x1aa9: 0x002a, + 0x1aaa: 0x000a, 0x1aab: 0x000a, 0x1aac: 0x000a, 0x1aad: 0x000a, 0x1aae: 0x000a, 0x1aaf: 0x000a, + 0x1ab0: 0x000a, 0x1ab1: 0x000a, 0x1ab2: 0x000a, 0x1ab3: 0x000a, 0x1ab4: 0x000a, 0x1ab5: 0x000a, + 0x1ab6: 0x000a, 0x1ab7: 0x000a, 0x1ab8: 0x000a, 0x1ab9: 0x000a, 0x1aba: 0x000a, 0x1abb: 0x000a, + 0x1abc: 0x000a, 0x1abd: 0x000a, 0x1abe: 0x000a, 0x1abf: 0x000a, + // Block 0x6b, offset 0x1ac0 + 0x1ac0: 0x000a, 0x1ac1: 0x000a, 0x1ac2: 0x000a, 0x1ac3: 0x000a, 0x1ac4: 0x000a, 0x1ac5: 0x000a, + 0x1ac6: 0x000a, 0x1ac7: 0x000a, 0x1ac8: 0x000a, 0x1ac9: 0x000a, 0x1aca: 0x000a, 0x1acb: 0x000a, + 0x1acc: 0x000a, 0x1acd: 0x000a, 0x1ace: 0x000a, + // Block 0x6c, offset 0x1b00 + 0x1b00: 0x000a, 0x1b01: 0x000a, 0x1b02: 0x000a, 0x1b03: 0x000a, 0x1b04: 0x000a, 0x1b05: 0x000a, + 0x1b06: 0x000a, 0x1b07: 0x000a, 0x1b08: 0x000a, 0x1b09: 0x000a, 0x1b0a: 0x000a, 0x1b0b: 0x000a, + 0x1b0c: 0x000a, 0x1b0d: 0x000a, 0x1b0e: 0x000a, 0x1b0f: 0x000a, 0x1b10: 0x000a, 0x1b11: 0x000a, + 0x1b12: 0x000a, 0x1b13: 0x000a, 0x1b14: 0x000a, 0x1b15: 0x000a, 0x1b16: 0x000a, 0x1b17: 0x000a, + 0x1b18: 0x000a, 0x1b19: 0x000a, 0x1b1b: 0x000a, 0x1b1c: 0x000a, 0x1b1d: 0x000a, + 0x1b1e: 0x000a, 0x1b1f: 0x000a, 0x1b20: 0x000a, 0x1b21: 0x000a, 0x1b22: 0x000a, 0x1b23: 0x000a, + 0x1b24: 0x000a, 0x1b25: 0x000a, 0x1b26: 0x000a, 0x1b27: 0x000a, 0x1b28: 0x000a, 0x1b29: 0x000a, + 0x1b2a: 0x000a, 0x1b2b: 0x000a, 0x1b2c: 0x000a, 0x1b2d: 0x000a, 0x1b2e: 0x000a, 0x1b2f: 0x000a, + 0x1b30: 0x000a, 0x1b31: 0x000a, 0x1b32: 0x000a, 0x1b33: 0x000a, 0x1b34: 0x000a, 0x1b35: 0x000a, + 0x1b36: 0x000a, 0x1b37: 0x000a, 0x1b38: 0x000a, 0x1b39: 0x000a, 0x1b3a: 0x000a, 0x1b3b: 0x000a, + 0x1b3c: 0x000a, 0x1b3d: 0x000a, 0x1b3e: 0x000a, 0x1b3f: 0x000a, + // Block 0x6d, offset 0x1b40 + 0x1b40: 0x000a, 0x1b41: 0x000a, 0x1b42: 0x000a, 0x1b43: 0x000a, 0x1b44: 0x000a, 0x1b45: 0x000a, + 0x1b46: 0x000a, 0x1b47: 0x000a, 0x1b48: 0x000a, 0x1b49: 0x000a, 0x1b4a: 0x000a, 0x1b4b: 0x000a, + 0x1b4c: 0x000a, 0x1b4d: 0x000a, 0x1b4e: 0x000a, 0x1b4f: 0x000a, 0x1b50: 0x000a, 0x1b51: 0x000a, + 0x1b52: 0x000a, 0x1b53: 0x000a, 0x1b54: 0x000a, 0x1b55: 0x000a, 0x1b56: 0x000a, 0x1b57: 0x000a, + 0x1b58: 0x000a, 0x1b59: 0x000a, 0x1b5a: 0x000a, 0x1b5b: 0x000a, 0x1b5c: 0x000a, 0x1b5d: 0x000a, + 0x1b5e: 0x000a, 0x1b5f: 0x000a, 0x1b60: 0x000a, 0x1b61: 0x000a, 0x1b62: 0x000a, 0x1b63: 0x000a, + 0x1b64: 0x000a, 0x1b65: 0x000a, 0x1b66: 0x000a, 0x1b67: 0x000a, 0x1b68: 0x000a, 0x1b69: 0x000a, + 0x1b6a: 0x000a, 0x1b6b: 0x000a, 0x1b6c: 0x000a, 0x1b6d: 0x000a, 0x1b6e: 0x000a, 0x1b6f: 0x000a, + 0x1b70: 0x000a, 0x1b71: 0x000a, 0x1b72: 0x000a, 0x1b73: 0x000a, + // Block 0x6e, offset 0x1b80 + 0x1b80: 0x000a, 0x1b81: 0x000a, 0x1b82: 0x000a, 0x1b83: 0x000a, 0x1b84: 0x000a, 0x1b85: 0x000a, + 0x1b86: 0x000a, 0x1b87: 0x000a, 0x1b88: 0x000a, 0x1b89: 0x000a, 0x1b8a: 0x000a, 0x1b8b: 0x000a, + 0x1b8c: 0x000a, 0x1b8d: 0x000a, 0x1b8e: 0x000a, 0x1b8f: 0x000a, 0x1b90: 0x000a, 0x1b91: 0x000a, + 0x1b92: 0x000a, 0x1b93: 0x000a, 0x1b94: 0x000a, 0x1b95: 0x000a, + 0x1bb0: 0x000a, 0x1bb1: 0x000a, 0x1bb2: 0x000a, 0x1bb3: 0x000a, 0x1bb4: 0x000a, 0x1bb5: 0x000a, + 0x1bb6: 0x000a, 0x1bb7: 0x000a, 0x1bb8: 0x000a, 0x1bb9: 0x000a, 0x1bba: 0x000a, 0x1bbb: 0x000a, + // Block 0x6f, offset 0x1bc0 + 0x1bc0: 0x0009, 0x1bc1: 0x000a, 0x1bc2: 0x000a, 0x1bc3: 0x000a, 0x1bc4: 0x000a, + 0x1bc8: 0x003a, 0x1bc9: 0x002a, 0x1bca: 0x003a, 0x1bcb: 0x002a, + 0x1bcc: 0x003a, 0x1bcd: 0x002a, 0x1bce: 0x003a, 0x1bcf: 0x002a, 0x1bd0: 0x003a, 0x1bd1: 0x002a, + 0x1bd2: 0x000a, 0x1bd3: 0x000a, 0x1bd4: 0x003a, 0x1bd5: 0x002a, 0x1bd6: 0x003a, 0x1bd7: 0x002a, + 0x1bd8: 0x003a, 0x1bd9: 0x002a, 0x1bda: 0x003a, 0x1bdb: 0x002a, 0x1bdc: 0x000a, 0x1bdd: 0x000a, + 0x1bde: 0x000a, 0x1bdf: 0x000a, 0x1be0: 0x000a, + 0x1bea: 0x000c, 0x1beb: 0x000c, 0x1bec: 0x000c, 0x1bed: 0x000c, + 0x1bf0: 0x000a, + 0x1bf6: 0x000a, 0x1bf7: 0x000a, + 0x1bfd: 0x000a, 0x1bfe: 0x000a, 0x1bff: 0x000a, + // Block 0x70, offset 0x1c00 + 0x1c19: 0x000c, 0x1c1a: 0x000c, 0x1c1b: 0x000a, 0x1c1c: 0x000a, + 0x1c20: 0x000a, + // Block 0x71, offset 0x1c40 + 0x1c7b: 0x000a, + // Block 0x72, offset 0x1c80 + 0x1c80: 0x000a, 0x1c81: 0x000a, 0x1c82: 0x000a, 0x1c83: 0x000a, 0x1c84: 0x000a, 0x1c85: 0x000a, + 0x1c86: 0x000a, 0x1c87: 0x000a, 0x1c88: 0x000a, 0x1c89: 0x000a, 0x1c8a: 0x000a, 0x1c8b: 0x000a, + 0x1c8c: 0x000a, 0x1c8d: 0x000a, 0x1c8e: 0x000a, 0x1c8f: 0x000a, 0x1c90: 0x000a, 0x1c91: 0x000a, + 0x1c92: 0x000a, 0x1c93: 0x000a, 0x1c94: 0x000a, 0x1c95: 0x000a, 0x1c96: 0x000a, 0x1c97: 0x000a, + 0x1c98: 0x000a, 0x1c99: 0x000a, 0x1c9a: 0x000a, 0x1c9b: 0x000a, 0x1c9c: 0x000a, 0x1c9d: 0x000a, + 0x1c9e: 0x000a, 0x1c9f: 0x000a, 0x1ca0: 0x000a, 0x1ca1: 0x000a, 0x1ca2: 0x000a, 0x1ca3: 0x000a, + // Block 0x73, offset 0x1cc0 + 0x1cdd: 0x000a, + 0x1cde: 0x000a, + // Block 0x74, offset 0x1d00 + 0x1d10: 0x000a, 0x1d11: 0x000a, + 0x1d12: 0x000a, 0x1d13: 0x000a, 0x1d14: 0x000a, 0x1d15: 0x000a, 0x1d16: 0x000a, 0x1d17: 0x000a, + 0x1d18: 0x000a, 0x1d19: 0x000a, 0x1d1a: 0x000a, 0x1d1b: 0x000a, 0x1d1c: 0x000a, 0x1d1d: 0x000a, + 0x1d1e: 0x000a, 0x1d1f: 0x000a, + 0x1d3c: 0x000a, 0x1d3d: 0x000a, 0x1d3e: 0x000a, + // Block 0x75, offset 0x1d40 + 0x1d71: 0x000a, 0x1d72: 0x000a, 0x1d73: 0x000a, 0x1d74: 0x000a, 0x1d75: 0x000a, + 0x1d76: 0x000a, 0x1d77: 0x000a, 0x1d78: 0x000a, 0x1d79: 0x000a, 0x1d7a: 0x000a, 0x1d7b: 0x000a, + 0x1d7c: 0x000a, 0x1d7d: 0x000a, 0x1d7e: 0x000a, 0x1d7f: 0x000a, + // Block 0x76, offset 0x1d80 + 0x1d8c: 0x000a, 0x1d8d: 0x000a, 0x1d8e: 0x000a, 0x1d8f: 0x000a, + // Block 0x77, offset 0x1dc0 + 0x1df7: 0x000a, 0x1df8: 0x000a, 0x1df9: 0x000a, 0x1dfa: 0x000a, + // Block 0x78, offset 0x1e00 + 0x1e1e: 0x000a, 0x1e1f: 0x000a, + 0x1e3f: 0x000a, + // Block 0x79, offset 0x1e40 + 0x1e50: 0x000a, 0x1e51: 0x000a, + 0x1e52: 0x000a, 0x1e53: 0x000a, 0x1e54: 0x000a, 0x1e55: 0x000a, 0x1e56: 0x000a, 0x1e57: 0x000a, + 0x1e58: 0x000a, 0x1e59: 0x000a, 0x1e5a: 0x000a, 0x1e5b: 0x000a, 0x1e5c: 0x000a, 0x1e5d: 0x000a, + 0x1e5e: 0x000a, 0x1e5f: 0x000a, 0x1e60: 0x000a, 0x1e61: 0x000a, 0x1e62: 0x000a, 0x1e63: 0x000a, + 0x1e64: 0x000a, 0x1e65: 0x000a, 0x1e66: 0x000a, 0x1e67: 0x000a, 0x1e68: 0x000a, 0x1e69: 0x000a, + 0x1e6a: 0x000a, 0x1e6b: 0x000a, 0x1e6c: 0x000a, 0x1e6d: 0x000a, 0x1e6e: 0x000a, 0x1e6f: 0x000a, + 0x1e70: 0x000a, 0x1e71: 0x000a, 0x1e72: 0x000a, 0x1e73: 0x000a, 0x1e74: 0x000a, 0x1e75: 0x000a, + 0x1e76: 0x000a, 0x1e77: 0x000a, 0x1e78: 0x000a, 0x1e79: 0x000a, 0x1e7a: 0x000a, 0x1e7b: 0x000a, + 0x1e7c: 0x000a, 0x1e7d: 0x000a, 0x1e7e: 0x000a, 0x1e7f: 0x000a, + // Block 0x7a, offset 0x1e80 + 0x1e80: 0x000a, 0x1e81: 0x000a, 0x1e82: 0x000a, 0x1e83: 0x000a, 0x1e84: 0x000a, 0x1e85: 0x000a, + 0x1e86: 0x000a, + // Block 0x7b, offset 0x1ec0 + 0x1ecd: 0x000a, 0x1ece: 0x000a, 0x1ecf: 0x000a, + // Block 0x7c, offset 0x1f00 + 0x1f2f: 0x000c, + 0x1f30: 0x000c, 0x1f31: 0x000c, 0x1f32: 0x000c, 0x1f33: 0x000a, 0x1f34: 0x000c, 0x1f35: 0x000c, + 0x1f36: 0x000c, 0x1f37: 0x000c, 0x1f38: 0x000c, 0x1f39: 0x000c, 0x1f3a: 0x000c, 0x1f3b: 0x000c, + 0x1f3c: 0x000c, 0x1f3d: 0x000c, 0x1f3e: 0x000a, 0x1f3f: 0x000a, + // Block 0x7d, offset 0x1f40 + 0x1f5e: 0x000c, 0x1f5f: 0x000c, + // Block 0x7e, offset 0x1f80 + 0x1fb0: 0x000c, 0x1fb1: 0x000c, + // Block 0x7f, offset 0x1fc0 + 0x1fc0: 0x000a, 0x1fc1: 0x000a, 0x1fc2: 0x000a, 0x1fc3: 0x000a, 0x1fc4: 0x000a, 0x1fc5: 0x000a, + 0x1fc6: 0x000a, 0x1fc7: 0x000a, 0x1fc8: 0x000a, 0x1fc9: 0x000a, 0x1fca: 0x000a, 0x1fcb: 0x000a, + 0x1fcc: 0x000a, 0x1fcd: 0x000a, 0x1fce: 0x000a, 0x1fcf: 0x000a, 0x1fd0: 0x000a, 0x1fd1: 0x000a, + 0x1fd2: 0x000a, 0x1fd3: 0x000a, 0x1fd4: 0x000a, 0x1fd5: 0x000a, 0x1fd6: 0x000a, 0x1fd7: 0x000a, + 0x1fd8: 0x000a, 0x1fd9: 0x000a, 0x1fda: 0x000a, 0x1fdb: 0x000a, 0x1fdc: 0x000a, 0x1fdd: 0x000a, + 0x1fde: 0x000a, 0x1fdf: 0x000a, 0x1fe0: 0x000a, 0x1fe1: 0x000a, + // Block 0x80, offset 0x2000 + 0x2008: 0x000a, + // Block 0x81, offset 0x2040 + 0x2042: 0x000c, + 0x2046: 0x000c, 0x204b: 0x000c, + 0x2065: 0x000c, 0x2066: 0x000c, 0x2068: 0x000a, 0x2069: 0x000a, + 0x206a: 0x000a, 0x206b: 0x000a, + 0x2078: 0x0004, 0x2079: 0x0004, + // Block 0x82, offset 0x2080 + 0x20b4: 0x000a, 0x20b5: 0x000a, + 0x20b6: 0x000a, 0x20b7: 0x000a, + // Block 0x83, offset 0x20c0 + 0x20c4: 0x000c, 0x20c5: 0x000c, + 0x20e0: 0x000c, 0x20e1: 0x000c, 0x20e2: 0x000c, 0x20e3: 0x000c, + 0x20e4: 0x000c, 0x20e5: 0x000c, 0x20e6: 0x000c, 0x20e7: 0x000c, 0x20e8: 0x000c, 0x20e9: 0x000c, + 0x20ea: 0x000c, 0x20eb: 0x000c, 0x20ec: 0x000c, 0x20ed: 0x000c, 0x20ee: 0x000c, 0x20ef: 0x000c, + 0x20f0: 0x000c, 0x20f1: 0x000c, + 0x20ff: 0x000c, + // Block 0x84, offset 0x2100 + 0x2126: 0x000c, 0x2127: 0x000c, 0x2128: 0x000c, 0x2129: 0x000c, + 0x212a: 0x000c, 0x212b: 0x000c, 0x212c: 0x000c, 0x212d: 0x000c, + // Block 0x85, offset 0x2140 + 0x2147: 0x000c, 0x2148: 0x000c, 0x2149: 0x000c, 0x214a: 0x000c, 0x214b: 0x000c, + 0x214c: 0x000c, 0x214d: 0x000c, 0x214e: 0x000c, 0x214f: 0x000c, 0x2150: 0x000c, 0x2151: 0x000c, + // Block 0x86, offset 0x2180 + 0x2180: 0x000c, 0x2181: 0x000c, 0x2182: 0x000c, + 0x21b3: 0x000c, + 0x21b6: 0x000c, 0x21b7: 0x000c, 0x21b8: 0x000c, 0x21b9: 0x000c, + 0x21bc: 0x000c, + // Block 0x87, offset 0x21c0 + 0x21e5: 0x000c, + // Block 0x88, offset 0x2200 + 0x2229: 0x000c, + 0x222a: 0x000c, 0x222b: 0x000c, 0x222c: 0x000c, 0x222d: 0x000c, 0x222e: 0x000c, + 0x2231: 0x000c, 0x2232: 0x000c, 0x2235: 0x000c, + 0x2236: 0x000c, + // Block 0x89, offset 0x2240 + 0x2243: 0x000c, + 0x224c: 0x000c, + 0x227c: 0x000c, + // Block 0x8a, offset 0x2280 + 0x22b0: 0x000c, 0x22b2: 0x000c, 0x22b3: 0x000c, 0x22b4: 0x000c, + 0x22b7: 0x000c, 0x22b8: 0x000c, + 0x22be: 0x000c, 0x22bf: 0x000c, + // Block 0x8b, offset 0x22c0 + 0x22c1: 0x000c, + 0x22ec: 0x000c, 0x22ed: 0x000c, + 0x22f6: 0x000c, + // Block 0x8c, offset 0x2300 + 0x2325: 0x000c, 0x2328: 0x000c, + 0x232d: 0x000c, + // Block 0x8d, offset 0x2340 + 0x235d: 0x0001, + 0x235e: 0x000c, 0x235f: 0x0001, 0x2360: 0x0001, 0x2361: 0x0001, 0x2362: 0x0001, 0x2363: 0x0001, + 0x2364: 0x0001, 0x2365: 0x0001, 0x2366: 0x0001, 0x2367: 0x0001, 0x2368: 0x0001, 0x2369: 0x0003, + 0x236a: 0x0001, 0x236b: 0x0001, 0x236c: 0x0001, 0x236d: 0x0001, 0x236e: 0x0001, 0x236f: 0x0001, + 0x2370: 0x0001, 0x2371: 0x0001, 0x2372: 0x0001, 0x2373: 0x0001, 0x2374: 0x0001, 0x2375: 0x0001, + 0x2376: 0x0001, 0x2377: 0x0001, 0x2378: 0x0001, 0x2379: 0x0001, 0x237a: 0x0001, 0x237b: 0x0001, + 0x237c: 0x0001, 0x237d: 0x0001, 0x237e: 0x0001, 0x237f: 0x0001, + // Block 0x8e, offset 0x2380 + 0x2380: 0x0001, 0x2381: 0x0001, 0x2382: 0x0001, 0x2383: 0x0001, 0x2384: 0x0001, 0x2385: 0x0001, + 0x2386: 0x0001, 0x2387: 0x0001, 0x2388: 0x0001, 0x2389: 0x0001, 0x238a: 0x0001, 0x238b: 0x0001, + 0x238c: 0x0001, 0x238d: 0x0001, 0x238e: 0x0001, 0x238f: 0x0001, 0x2390: 0x000d, 0x2391: 0x000d, + 0x2392: 0x000d, 0x2393: 0x000d, 0x2394: 0x000d, 0x2395: 0x000d, 0x2396: 0x000d, 0x2397: 0x000d, + 0x2398: 0x000d, 0x2399: 0x000d, 0x239a: 0x000d, 0x239b: 0x000d, 0x239c: 0x000d, 0x239d: 0x000d, + 0x239e: 0x000d, 0x239f: 0x000d, 0x23a0: 0x000d, 0x23a1: 0x000d, 0x23a2: 0x000d, 0x23a3: 0x000d, + 0x23a4: 0x000d, 0x23a5: 0x000d, 0x23a6: 0x000d, 0x23a7: 0x000d, 0x23a8: 0x000d, 0x23a9: 0x000d, + 0x23aa: 0x000d, 0x23ab: 0x000d, 0x23ac: 0x000d, 0x23ad: 0x000d, 0x23ae: 0x000d, 0x23af: 0x000d, + 0x23b0: 0x000d, 0x23b1: 0x000d, 0x23b2: 0x000d, 0x23b3: 0x000d, 0x23b4: 0x000d, 0x23b5: 0x000d, + 0x23b6: 0x000d, 0x23b7: 0x000d, 0x23b8: 0x000d, 0x23b9: 0x000d, 0x23ba: 0x000d, 0x23bb: 0x000d, + 0x23bc: 0x000d, 0x23bd: 0x000d, 0x23be: 0x000d, 0x23bf: 0x000d, + // Block 0x8f, offset 0x23c0 + 0x23c0: 0x000d, 0x23c1: 0x000d, 0x23c2: 0x000d, 0x23c3: 0x000d, 0x23c4: 0x000d, 0x23c5: 0x000d, + 0x23c6: 0x000d, 0x23c7: 0x000d, 0x23c8: 0x000d, 0x23c9: 0x000d, 0x23ca: 0x000d, 0x23cb: 0x000d, + 0x23cc: 0x000d, 0x23cd: 0x000d, 0x23ce: 0x000d, 0x23cf: 0x000d, 0x23d0: 0x000d, 0x23d1: 0x000d, + 0x23d2: 0x000d, 0x23d3: 0x000d, 0x23d4: 0x000d, 0x23d5: 0x000d, 0x23d6: 0x000d, 0x23d7: 0x000d, + 0x23d8: 0x000d, 0x23d9: 0x000d, 0x23da: 0x000d, 0x23db: 0x000d, 0x23dc: 0x000d, 0x23dd: 0x000d, + 0x23de: 0x000d, 0x23df: 0x000d, 0x23e0: 0x000d, 0x23e1: 0x000d, 0x23e2: 0x000d, 0x23e3: 0x000d, + 0x23e4: 0x000d, 0x23e5: 0x000d, 0x23e6: 0x000d, 0x23e7: 0x000d, 0x23e8: 0x000d, 0x23e9: 0x000d, + 0x23ea: 0x000d, 0x23eb: 0x000d, 0x23ec: 0x000d, 0x23ed: 0x000d, 0x23ee: 0x000d, 0x23ef: 0x000d, + 0x23f0: 0x000d, 0x23f1: 0x000d, 0x23f2: 0x000d, 0x23f3: 0x000d, 0x23f4: 0x000d, 0x23f5: 0x000d, + 0x23f6: 0x000d, 0x23f7: 0x000d, 0x23f8: 0x000d, 0x23f9: 0x000d, 0x23fa: 0x000d, 0x23fb: 0x000d, + 0x23fc: 0x000d, 0x23fd: 0x000d, 0x23fe: 0x000a, 0x23ff: 0x000a, + // Block 0x90, offset 0x2400 + 0x2400: 0x000d, 0x2401: 0x000d, 0x2402: 0x000d, 0x2403: 0x000d, 0x2404: 0x000d, 0x2405: 0x000d, + 0x2406: 0x000d, 0x2407: 0x000d, 0x2408: 0x000d, 0x2409: 0x000d, 0x240a: 0x000d, 0x240b: 0x000d, + 0x240c: 0x000d, 0x240d: 0x000d, 0x240e: 0x000d, 0x240f: 0x000d, 0x2410: 0x000b, 0x2411: 0x000b, + 0x2412: 0x000b, 0x2413: 0x000b, 0x2414: 0x000b, 0x2415: 0x000b, 0x2416: 0x000b, 0x2417: 0x000b, + 0x2418: 0x000b, 0x2419: 0x000b, 0x241a: 0x000b, 0x241b: 0x000b, 0x241c: 0x000b, 0x241d: 0x000b, + 0x241e: 0x000b, 0x241f: 0x000b, 0x2420: 0x000b, 0x2421: 0x000b, 0x2422: 0x000b, 0x2423: 0x000b, + 0x2424: 0x000b, 0x2425: 0x000b, 0x2426: 0x000b, 0x2427: 0x000b, 0x2428: 0x000b, 0x2429: 0x000b, + 0x242a: 0x000b, 0x242b: 0x000b, 0x242c: 0x000b, 0x242d: 0x000b, 0x242e: 0x000b, 0x242f: 0x000b, + 0x2430: 0x000d, 0x2431: 0x000d, 0x2432: 0x000d, 0x2433: 0x000d, 0x2434: 0x000d, 0x2435: 0x000d, + 0x2436: 0x000d, 0x2437: 0x000d, 0x2438: 0x000d, 0x2439: 0x000d, 0x243a: 0x000d, 0x243b: 0x000d, + 0x243c: 0x000d, 0x243d: 0x000a, 0x243e: 0x000d, 0x243f: 0x000d, + // Block 0x91, offset 0x2440 + 0x2440: 0x000c, 0x2441: 0x000c, 0x2442: 0x000c, 0x2443: 0x000c, 0x2444: 0x000c, 0x2445: 0x000c, + 0x2446: 0x000c, 0x2447: 0x000c, 0x2448: 0x000c, 0x2449: 0x000c, 0x244a: 0x000c, 0x244b: 0x000c, + 0x244c: 0x000c, 0x244d: 0x000c, 0x244e: 0x000c, 0x244f: 0x000c, 0x2450: 0x000a, 0x2451: 0x000a, + 0x2452: 0x000a, 0x2453: 0x000a, 0x2454: 0x000a, 0x2455: 0x000a, 0x2456: 0x000a, 0x2457: 0x000a, + 0x2458: 0x000a, 0x2459: 0x000a, + 0x2460: 0x000c, 0x2461: 0x000c, 0x2462: 0x000c, 0x2463: 0x000c, + 0x2464: 0x000c, 0x2465: 0x000c, 0x2466: 0x000c, 0x2467: 0x000c, 0x2468: 0x000c, 0x2469: 0x000c, + 0x246a: 0x000c, 0x246b: 0x000c, 0x246c: 0x000c, 0x246d: 0x000c, 0x246e: 0x000c, 0x246f: 0x000c, + 0x2470: 0x000a, 0x2471: 0x000a, 0x2472: 0x000a, 0x2473: 0x000a, 0x2474: 0x000a, 0x2475: 0x000a, + 0x2476: 0x000a, 0x2477: 0x000a, 0x2478: 0x000a, 0x2479: 0x000a, 0x247a: 0x000a, 0x247b: 0x000a, + 0x247c: 0x000a, 0x247d: 0x000a, 0x247e: 0x000a, 0x247f: 0x000a, + // Block 0x92, offset 0x2480 + 0x2480: 0x000a, 0x2481: 0x000a, 0x2482: 0x000a, 0x2483: 0x000a, 0x2484: 0x000a, 0x2485: 0x000a, + 0x2486: 0x000a, 0x2487: 0x000a, 0x2488: 0x000a, 0x2489: 0x000a, 0x248a: 0x000a, 0x248b: 0x000a, + 0x248c: 0x000a, 0x248d: 0x000a, 0x248e: 0x000a, 0x248f: 0x000a, 0x2490: 0x0006, 0x2491: 0x000a, + 0x2492: 0x0006, 0x2494: 0x000a, 0x2495: 0x0006, 0x2496: 0x000a, 0x2497: 0x000a, + 0x2498: 0x000a, 0x2499: 0x009a, 0x249a: 0x008a, 0x249b: 0x007a, 0x249c: 0x006a, 0x249d: 0x009a, + 0x249e: 0x008a, 0x249f: 0x0004, 0x24a0: 0x000a, 0x24a1: 0x000a, 0x24a2: 0x0003, 0x24a3: 0x0003, + 0x24a4: 0x000a, 0x24a5: 0x000a, 0x24a6: 0x000a, 0x24a8: 0x000a, 0x24a9: 0x0004, + 0x24aa: 0x0004, 0x24ab: 0x000a, + 0x24b0: 0x000d, 0x24b1: 0x000d, 0x24b2: 0x000d, 0x24b3: 0x000d, 0x24b4: 0x000d, 0x24b5: 0x000d, + 0x24b6: 0x000d, 0x24b7: 0x000d, 0x24b8: 0x000d, 0x24b9: 0x000d, 0x24ba: 0x000d, 0x24bb: 0x000d, + 0x24bc: 0x000d, 0x24bd: 0x000d, 0x24be: 0x000d, 0x24bf: 0x000d, + // Block 0x93, offset 0x24c0 + 0x24c0: 0x000d, 0x24c1: 0x000d, 0x24c2: 0x000d, 0x24c3: 0x000d, 0x24c4: 0x000d, 0x24c5: 0x000d, + 0x24c6: 0x000d, 0x24c7: 0x000d, 0x24c8: 0x000d, 0x24c9: 0x000d, 0x24ca: 0x000d, 0x24cb: 0x000d, + 0x24cc: 0x000d, 0x24cd: 0x000d, 0x24ce: 0x000d, 0x24cf: 0x000d, 0x24d0: 0x000d, 0x24d1: 0x000d, + 0x24d2: 0x000d, 0x24d3: 0x000d, 0x24d4: 0x000d, 0x24d5: 0x000d, 0x24d6: 0x000d, 0x24d7: 0x000d, + 0x24d8: 0x000d, 0x24d9: 0x000d, 0x24da: 0x000d, 0x24db: 0x000d, 0x24dc: 0x000d, 0x24dd: 0x000d, + 0x24de: 0x000d, 0x24df: 0x000d, 0x24e0: 0x000d, 0x24e1: 0x000d, 0x24e2: 0x000d, 0x24e3: 0x000d, + 0x24e4: 0x000d, 0x24e5: 0x000d, 0x24e6: 0x000d, 0x24e7: 0x000d, 0x24e8: 0x000d, 0x24e9: 0x000d, + 0x24ea: 0x000d, 0x24eb: 0x000d, 0x24ec: 0x000d, 0x24ed: 0x000d, 0x24ee: 0x000d, 0x24ef: 0x000d, + 0x24f0: 0x000d, 0x24f1: 0x000d, 0x24f2: 0x000d, 0x24f3: 0x000d, 0x24f4: 0x000d, 0x24f5: 0x000d, + 0x24f6: 0x000d, 0x24f7: 0x000d, 0x24f8: 0x000d, 0x24f9: 0x000d, 0x24fa: 0x000d, 0x24fb: 0x000d, + 0x24fc: 0x000d, 0x24fd: 0x000d, 0x24fe: 0x000d, 0x24ff: 0x000b, + // Block 0x94, offset 0x2500 + 0x2501: 0x000a, 0x2502: 0x000a, 0x2503: 0x0004, 0x2504: 0x0004, 0x2505: 0x0004, + 0x2506: 0x000a, 0x2507: 0x000a, 0x2508: 0x003a, 0x2509: 0x002a, 0x250a: 0x000a, 0x250b: 0x0003, + 0x250c: 0x0006, 0x250d: 0x0003, 0x250e: 0x0006, 0x250f: 0x0006, 0x2510: 0x0002, 0x2511: 0x0002, + 0x2512: 0x0002, 0x2513: 0x0002, 0x2514: 0x0002, 0x2515: 0x0002, 0x2516: 0x0002, 0x2517: 0x0002, + 0x2518: 0x0002, 0x2519: 0x0002, 0x251a: 0x0006, 0x251b: 0x000a, 0x251c: 0x000a, 0x251d: 0x000a, + 0x251e: 0x000a, 0x251f: 0x000a, 0x2520: 0x000a, + 0x253b: 0x005a, + 0x253c: 0x000a, 0x253d: 0x004a, 0x253e: 0x000a, 0x253f: 0x000a, + // Block 0x95, offset 0x2540 + 0x2540: 0x000a, + 0x255b: 0x005a, 0x255c: 0x000a, 0x255d: 0x004a, + 0x255e: 0x000a, 0x255f: 0x00fa, 0x2560: 0x00ea, 0x2561: 0x000a, 0x2562: 0x003a, 0x2563: 0x002a, + 0x2564: 0x000a, 0x2565: 0x000a, + // Block 0x96, offset 0x2580 + 0x25a0: 0x0004, 0x25a1: 0x0004, 0x25a2: 0x000a, 0x25a3: 0x000a, + 0x25a4: 0x000a, 0x25a5: 0x0004, 0x25a6: 0x0004, 0x25a8: 0x000a, 0x25a9: 0x000a, + 0x25aa: 0x000a, 0x25ab: 0x000a, 0x25ac: 0x000a, 0x25ad: 0x000a, 0x25ae: 0x000a, + 0x25b0: 0x000b, 0x25b1: 0x000b, 0x25b2: 0x000b, 0x25b3: 0x000b, 0x25b4: 0x000b, 0x25b5: 0x000b, + 0x25b6: 0x000b, 0x25b7: 0x000b, 0x25b8: 0x000b, 0x25b9: 0x000a, 0x25ba: 0x000a, 0x25bb: 0x000a, + 0x25bc: 0x000a, 0x25bd: 0x000a, 0x25be: 0x000b, 0x25bf: 0x000b, + // Block 0x97, offset 0x25c0 + 0x25c1: 0x000a, + // Block 0x98, offset 0x2600 + 0x2600: 0x000a, 0x2601: 0x000a, 0x2602: 0x000a, 0x2603: 0x000a, 0x2604: 0x000a, 0x2605: 0x000a, + 0x2606: 0x000a, 0x2607: 0x000a, 0x2608: 0x000a, 0x2609: 0x000a, 0x260a: 0x000a, 0x260b: 0x000a, + 0x260c: 0x000a, 0x2610: 0x000a, 0x2611: 0x000a, + 0x2612: 0x000a, 0x2613: 0x000a, 0x2614: 0x000a, 0x2615: 0x000a, 0x2616: 0x000a, 0x2617: 0x000a, + 0x2618: 0x000a, 0x2619: 0x000a, 0x261a: 0x000a, 0x261b: 0x000a, + 0x2620: 0x000a, + // Block 0x99, offset 0x2640 + 0x267d: 0x000c, + // Block 0x9a, offset 0x2680 + 0x26a0: 0x000c, 0x26a1: 0x0002, 0x26a2: 0x0002, 0x26a3: 0x0002, + 0x26a4: 0x0002, 0x26a5: 0x0002, 0x26a6: 0x0002, 0x26a7: 0x0002, 0x26a8: 0x0002, 0x26a9: 0x0002, + 0x26aa: 0x0002, 0x26ab: 0x0002, 0x26ac: 0x0002, 0x26ad: 0x0002, 0x26ae: 0x0002, 0x26af: 0x0002, + 0x26b0: 0x0002, 0x26b1: 0x0002, 0x26b2: 0x0002, 0x26b3: 0x0002, 0x26b4: 0x0002, 0x26b5: 0x0002, + 0x26b6: 0x0002, 0x26b7: 0x0002, 0x26b8: 0x0002, 0x26b9: 0x0002, 0x26ba: 0x0002, 0x26bb: 0x0002, + // Block 0x9b, offset 0x26c0 + 0x26f6: 0x000c, 0x26f7: 0x000c, 0x26f8: 0x000c, 0x26f9: 0x000c, 0x26fa: 0x000c, + // Block 0x9c, offset 0x2700 + 0x2700: 0x0001, 0x2701: 0x0001, 0x2702: 0x0001, 0x2703: 0x0001, 0x2704: 0x0001, 0x2705: 0x0001, + 0x2706: 0x0001, 0x2707: 0x0001, 0x2708: 0x0001, 0x2709: 0x0001, 0x270a: 0x0001, 0x270b: 0x0001, + 0x270c: 0x0001, 0x270d: 0x0001, 0x270e: 0x0001, 0x270f: 0x0001, 0x2710: 0x0001, 0x2711: 0x0001, + 0x2712: 0x0001, 0x2713: 0x0001, 0x2714: 0x0001, 0x2715: 0x0001, 0x2716: 0x0001, 0x2717: 0x0001, + 0x2718: 0x0001, 0x2719: 0x0001, 0x271a: 0x0001, 0x271b: 0x0001, 0x271c: 0x0001, 0x271d: 0x0001, + 0x271e: 0x0001, 0x271f: 0x0001, 0x2720: 0x0001, 0x2721: 0x0001, 0x2722: 0x0001, 0x2723: 0x0001, + 0x2724: 0x0001, 0x2725: 0x0001, 0x2726: 0x0001, 0x2727: 0x0001, 0x2728: 0x0001, 0x2729: 0x0001, + 0x272a: 0x0001, 0x272b: 0x0001, 0x272c: 0x0001, 0x272d: 0x0001, 0x272e: 0x0001, 0x272f: 0x0001, + 0x2730: 0x0001, 0x2731: 0x0001, 0x2732: 0x0001, 0x2733: 0x0001, 0x2734: 0x0001, 0x2735: 0x0001, + 0x2736: 0x0001, 0x2737: 0x0001, 0x2738: 0x0001, 0x2739: 0x0001, 0x273a: 0x0001, 0x273b: 0x0001, + 0x273c: 0x0001, 0x273d: 0x0001, 0x273e: 0x0001, 0x273f: 0x0001, + // Block 0x9d, offset 0x2740 + 0x2740: 0x0001, 0x2741: 0x0001, 0x2742: 0x0001, 0x2743: 0x0001, 0x2744: 0x0001, 0x2745: 0x0001, + 0x2746: 0x0001, 0x2747: 0x0001, 0x2748: 0x0001, 0x2749: 0x0001, 0x274a: 0x0001, 0x274b: 0x0001, + 0x274c: 0x0001, 0x274d: 0x0001, 0x274e: 0x0001, 0x274f: 0x0001, 0x2750: 0x0001, 0x2751: 0x0001, + 0x2752: 0x0001, 0x2753: 0x0001, 0x2754: 0x0001, 0x2755: 0x0001, 0x2756: 0x0001, 0x2757: 0x0001, + 0x2758: 0x0001, 0x2759: 0x0001, 0x275a: 0x0001, 0x275b: 0x0001, 0x275c: 0x0001, 0x275d: 0x0001, + 0x275e: 0x0001, 0x275f: 0x000a, 0x2760: 0x0001, 0x2761: 0x0001, 0x2762: 0x0001, 0x2763: 0x0001, + 0x2764: 0x0001, 0x2765: 0x0001, 0x2766: 0x0001, 0x2767: 0x0001, 0x2768: 0x0001, 0x2769: 0x0001, + 0x276a: 0x0001, 0x276b: 0x0001, 0x276c: 0x0001, 0x276d: 0x0001, 0x276e: 0x0001, 0x276f: 0x0001, + 0x2770: 0x0001, 0x2771: 0x0001, 0x2772: 0x0001, 0x2773: 0x0001, 0x2774: 0x0001, 0x2775: 0x0001, + 0x2776: 0x0001, 0x2777: 0x0001, 0x2778: 0x0001, 0x2779: 0x0001, 0x277a: 0x0001, 0x277b: 0x0001, + 0x277c: 0x0001, 0x277d: 0x0001, 0x277e: 0x0001, 0x277f: 0x0001, + // Block 0x9e, offset 0x2780 + 0x2780: 0x0001, 0x2781: 0x000c, 0x2782: 0x000c, 0x2783: 0x000c, 0x2784: 0x0001, 0x2785: 0x000c, + 0x2786: 0x000c, 0x2787: 0x0001, 0x2788: 0x0001, 0x2789: 0x0001, 0x278a: 0x0001, 0x278b: 0x0001, + 0x278c: 0x000c, 0x278d: 0x000c, 0x278e: 0x000c, 0x278f: 0x000c, 0x2790: 0x0001, 0x2791: 0x0001, + 0x2792: 0x0001, 0x2793: 0x0001, 0x2794: 0x0001, 0x2795: 0x0001, 0x2796: 0x0001, 0x2797: 0x0001, + 0x2798: 0x0001, 0x2799: 0x0001, 0x279a: 0x0001, 0x279b: 0x0001, 0x279c: 0x0001, 0x279d: 0x0001, + 0x279e: 0x0001, 0x279f: 0x0001, 0x27a0: 0x0001, 0x27a1: 0x0001, 0x27a2: 0x0001, 0x27a3: 0x0001, + 0x27a4: 0x0001, 0x27a5: 0x0001, 0x27a6: 0x0001, 0x27a7: 0x0001, 0x27a8: 0x0001, 0x27a9: 0x0001, + 0x27aa: 0x0001, 0x27ab: 0x0001, 0x27ac: 0x0001, 0x27ad: 0x0001, 0x27ae: 0x0001, 0x27af: 0x0001, + 0x27b0: 0x0001, 0x27b1: 0x0001, 0x27b2: 0x0001, 0x27b3: 0x0001, 0x27b4: 0x0001, 0x27b5: 0x0001, + 0x27b6: 0x0001, 0x27b7: 0x0001, 0x27b8: 0x000c, 0x27b9: 0x000c, 0x27ba: 0x000c, 0x27bb: 0x0001, + 0x27bc: 0x0001, 0x27bd: 0x0001, 0x27be: 0x0001, 0x27bf: 0x000c, + // Block 0x9f, offset 0x27c0 + 0x27c0: 0x0001, 0x27c1: 0x0001, 0x27c2: 0x0001, 0x27c3: 0x0001, 0x27c4: 0x0001, 0x27c5: 0x0001, + 0x27c6: 0x0001, 0x27c7: 0x0001, 0x27c8: 0x0001, 0x27c9: 0x0001, 0x27ca: 0x0001, 0x27cb: 0x0001, + 0x27cc: 0x0001, 0x27cd: 0x0001, 0x27ce: 0x0001, 0x27cf: 0x0001, 0x27d0: 0x0001, 0x27d1: 0x0001, + 0x27d2: 0x0001, 0x27d3: 0x0001, 0x27d4: 0x0001, 0x27d5: 0x0001, 0x27d6: 0x0001, 0x27d7: 0x0001, + 0x27d8: 0x0001, 0x27d9: 0x0001, 0x27da: 0x0001, 0x27db: 0x0001, 0x27dc: 0x0001, 0x27dd: 0x0001, + 0x27de: 0x0001, 0x27df: 0x0001, 0x27e0: 0x0001, 0x27e1: 0x0001, 0x27e2: 0x0001, 0x27e3: 0x0001, + 0x27e4: 0x0001, 0x27e5: 0x000c, 0x27e6: 0x000c, 0x27e7: 0x0001, 0x27e8: 0x0001, 0x27e9: 0x0001, + 0x27ea: 0x0001, 0x27eb: 0x0001, 0x27ec: 0x0001, 0x27ed: 0x0001, 0x27ee: 0x0001, 0x27ef: 0x0001, + 0x27f0: 0x0001, 0x27f1: 0x0001, 0x27f2: 0x0001, 0x27f3: 0x0001, 0x27f4: 0x0001, 0x27f5: 0x0001, + 0x27f6: 0x0001, 0x27f7: 0x0001, 0x27f8: 0x0001, 0x27f9: 0x0001, 0x27fa: 0x0001, 0x27fb: 0x0001, + 0x27fc: 0x0001, 0x27fd: 0x0001, 0x27fe: 0x0001, 0x27ff: 0x0001, + // Block 0xa0, offset 0x2800 + 0x2800: 0x0001, 0x2801: 0x0001, 0x2802: 0x0001, 0x2803: 0x0001, 0x2804: 0x0001, 0x2805: 0x0001, + 0x2806: 0x0001, 0x2807: 0x0001, 0x2808: 0x0001, 0x2809: 0x0001, 0x280a: 0x0001, 0x280b: 0x0001, + 0x280c: 0x0001, 0x280d: 0x0001, 0x280e: 0x0001, 0x280f: 0x0001, 0x2810: 0x0001, 0x2811: 0x0001, + 0x2812: 0x0001, 0x2813: 0x0001, 0x2814: 0x0001, 0x2815: 0x0001, 0x2816: 0x0001, 0x2817: 0x0001, + 0x2818: 0x0001, 0x2819: 0x0001, 0x281a: 0x0001, 0x281b: 0x0001, 0x281c: 0x0001, 0x281d: 0x0001, + 0x281e: 0x0001, 0x281f: 0x0001, 0x2820: 0x0001, 0x2821: 0x0001, 0x2822: 0x0001, 0x2823: 0x0001, + 0x2824: 0x0001, 0x2825: 0x0001, 0x2826: 0x0001, 0x2827: 0x0001, 0x2828: 0x0001, 0x2829: 0x0001, + 0x282a: 0x0001, 0x282b: 0x0001, 0x282c: 0x0001, 0x282d: 0x0001, 0x282e: 0x0001, 0x282f: 0x0001, + 0x2830: 0x0001, 0x2831: 0x0001, 0x2832: 0x0001, 0x2833: 0x0001, 0x2834: 0x0001, 0x2835: 0x0001, + 0x2836: 0x0001, 0x2837: 0x0001, 0x2838: 0x0001, 0x2839: 0x000a, 0x283a: 0x000a, 0x283b: 0x000a, + 0x283c: 0x000a, 0x283d: 0x000a, 0x283e: 0x000a, 0x283f: 0x000a, + // Block 0xa1, offset 0x2840 + 0x2840: 0x000d, 0x2841: 0x000d, 0x2842: 0x000d, 0x2843: 0x000d, 0x2844: 0x000d, 0x2845: 0x000d, + 0x2846: 0x000d, 0x2847: 0x000d, 0x2848: 0x000d, 0x2849: 0x000d, 0x284a: 0x000d, 0x284b: 0x000d, + 0x284c: 0x000d, 0x284d: 0x000d, 0x284e: 0x000d, 0x284f: 0x000d, 0x2850: 0x000d, 0x2851: 0x000d, + 0x2852: 0x000d, 0x2853: 0x000d, 0x2854: 0x000d, 0x2855: 0x000d, 0x2856: 0x000d, 0x2857: 0x000d, + 0x2858: 0x000d, 0x2859: 0x000d, 0x285a: 0x000d, 0x285b: 0x000d, 0x285c: 0x000d, 0x285d: 0x000d, + 0x285e: 0x000d, 0x285f: 0x000d, 0x2860: 0x000d, 0x2861: 0x000d, 0x2862: 0x000d, 0x2863: 0x000d, + 0x2864: 0x000c, 0x2865: 0x000c, 0x2866: 0x000c, 0x2867: 0x000c, 0x2868: 0x000d, 0x2869: 0x000d, + 0x286a: 0x000d, 0x286b: 0x000d, 0x286c: 0x000d, 0x286d: 0x000d, 0x286e: 0x000d, 0x286f: 0x000d, + 0x2870: 0x0005, 0x2871: 0x0005, 0x2872: 0x0005, 0x2873: 0x0005, 0x2874: 0x0005, 0x2875: 0x0005, + 0x2876: 0x0005, 0x2877: 0x0005, 0x2878: 0x0005, 0x2879: 0x0005, 0x287a: 0x000d, 0x287b: 0x000d, + 0x287c: 0x000d, 0x287d: 0x000d, 0x287e: 0x000d, 0x287f: 0x000d, + // Block 0xa2, offset 0x2880 + 0x2880: 0x0001, 0x2881: 0x0001, 0x2882: 0x0001, 0x2883: 0x0001, 0x2884: 0x0001, 0x2885: 0x0001, + 0x2886: 0x0001, 0x2887: 0x0001, 0x2888: 0x0001, 0x2889: 0x0001, 0x288a: 0x0001, 0x288b: 0x0001, + 0x288c: 0x0001, 0x288d: 0x0001, 0x288e: 0x0001, 0x288f: 0x0001, 0x2890: 0x0001, 0x2891: 0x0001, + 0x2892: 0x0001, 0x2893: 0x0001, 0x2894: 0x0001, 0x2895: 0x0001, 0x2896: 0x0001, 0x2897: 0x0001, + 0x2898: 0x0001, 0x2899: 0x0001, 0x289a: 0x0001, 0x289b: 0x0001, 0x289c: 0x0001, 0x289d: 0x0001, + 0x289e: 0x0001, 0x289f: 0x0001, 0x28a0: 0x0005, 0x28a1: 0x0005, 0x28a2: 0x0005, 0x28a3: 0x0005, + 0x28a4: 0x0005, 0x28a5: 0x0005, 0x28a6: 0x0005, 0x28a7: 0x0005, 0x28a8: 0x0005, 0x28a9: 0x0005, + 0x28aa: 0x0005, 0x28ab: 0x0005, 0x28ac: 0x0005, 0x28ad: 0x0005, 0x28ae: 0x0005, 0x28af: 0x0005, + 0x28b0: 0x0005, 0x28b1: 0x0005, 0x28b2: 0x0005, 0x28b3: 0x0005, 0x28b4: 0x0005, 0x28b5: 0x0005, + 0x28b6: 0x0005, 0x28b7: 0x0005, 0x28b8: 0x0005, 0x28b9: 0x0005, 0x28ba: 0x0005, 0x28bb: 0x0005, + 0x28bc: 0x0005, 0x28bd: 0x0005, 0x28be: 0x0005, 0x28bf: 0x0001, + // Block 0xa3, offset 0x28c0 + 0x28c0: 0x0001, 0x28c1: 0x0001, 0x28c2: 0x0001, 0x28c3: 0x0001, 0x28c4: 0x0001, 0x28c5: 0x0001, + 0x28c6: 0x0001, 0x28c7: 0x0001, 0x28c8: 0x0001, 0x28c9: 0x0001, 0x28ca: 0x0001, 0x28cb: 0x0001, + 0x28cc: 0x0001, 0x28cd: 0x0001, 0x28ce: 0x0001, 0x28cf: 0x0001, 0x28d0: 0x0001, 0x28d1: 0x0001, + 0x28d2: 0x0001, 0x28d3: 0x0001, 0x28d4: 0x0001, 0x28d5: 0x0001, 0x28d6: 0x0001, 0x28d7: 0x0001, + 0x28d8: 0x0001, 0x28d9: 0x0001, 0x28da: 0x0001, 0x28db: 0x0001, 0x28dc: 0x0001, 0x28dd: 0x0001, + 0x28de: 0x0001, 0x28df: 0x0001, 0x28e0: 0x0001, 0x28e1: 0x0001, 0x28e2: 0x0001, 0x28e3: 0x0001, + 0x28e4: 0x0001, 0x28e5: 0x0001, 0x28e6: 0x0001, 0x28e7: 0x0001, 0x28e8: 0x0001, 0x28e9: 0x0001, + 0x28ea: 0x0001, 0x28eb: 0x0001, 0x28ec: 0x0001, 0x28ed: 0x0001, 0x28ee: 0x0001, 0x28ef: 0x0001, + 0x28f0: 0x000d, 0x28f1: 0x000d, 0x28f2: 0x000d, 0x28f3: 0x000d, 0x28f4: 0x000d, 0x28f5: 0x000d, + 0x28f6: 0x000d, 0x28f7: 0x000d, 0x28f8: 0x000d, 0x28f9: 0x000d, 0x28fa: 0x000d, 0x28fb: 0x000d, + 0x28fc: 0x000d, 0x28fd: 0x000d, 0x28fe: 0x000d, 0x28ff: 0x000d, + // Block 0xa4, offset 0x2900 + 0x2900: 0x000d, 0x2901: 0x000d, 0x2902: 0x000d, 0x2903: 0x000d, 0x2904: 0x000d, 0x2905: 0x000d, + 0x2906: 0x000c, 0x2907: 0x000c, 0x2908: 0x000c, 0x2909: 0x000c, 0x290a: 0x000c, 0x290b: 0x000c, + 0x290c: 0x000c, 0x290d: 0x000c, 0x290e: 0x000c, 0x290f: 0x000c, 0x2910: 0x000c, 0x2911: 0x000d, + 0x2912: 0x000d, 0x2913: 0x000d, 0x2914: 0x000d, 0x2915: 0x000d, 0x2916: 0x000d, 0x2917: 0x000d, + 0x2918: 0x000d, 0x2919: 0x000d, 0x291a: 0x000d, 0x291b: 0x000d, 0x291c: 0x000d, 0x291d: 0x000d, + 0x291e: 0x000d, 0x291f: 0x000d, 0x2920: 0x000d, 0x2921: 0x000d, 0x2922: 0x000d, 0x2923: 0x000d, + 0x2924: 0x000d, 0x2925: 0x000d, 0x2926: 0x000d, 0x2927: 0x000d, 0x2928: 0x000d, 0x2929: 0x000d, + 0x292a: 0x000d, 0x292b: 0x000d, 0x292c: 0x000d, 0x292d: 0x000d, 0x292e: 0x000d, 0x292f: 0x000d, + 0x2930: 0x0001, 0x2931: 0x0001, 0x2932: 0x0001, 0x2933: 0x0001, 0x2934: 0x0001, 0x2935: 0x0001, + 0x2936: 0x0001, 0x2937: 0x0001, 0x2938: 0x0001, 0x2939: 0x0001, 0x293a: 0x0001, 0x293b: 0x0001, + 0x293c: 0x0001, 0x293d: 0x0001, 0x293e: 0x0001, 0x293f: 0x0001, + // Block 0xa5, offset 0x2940 + 0x2941: 0x000c, + 0x2978: 0x000c, 0x2979: 0x000c, 0x297a: 0x000c, 0x297b: 0x000c, + 0x297c: 0x000c, 0x297d: 0x000c, 0x297e: 0x000c, 0x297f: 0x000c, + // Block 0xa6, offset 0x2980 + 0x2980: 0x000c, 0x2981: 0x000c, 0x2982: 0x000c, 0x2983: 0x000c, 0x2984: 0x000c, 0x2985: 0x000c, + 0x2986: 0x000c, + 0x2992: 0x000a, 0x2993: 0x000a, 0x2994: 0x000a, 0x2995: 0x000a, 0x2996: 0x000a, 0x2997: 0x000a, + 0x2998: 0x000a, 0x2999: 0x000a, 0x299a: 0x000a, 0x299b: 0x000a, 0x299c: 0x000a, 0x299d: 0x000a, + 0x299e: 0x000a, 0x299f: 0x000a, 0x29a0: 0x000a, 0x29a1: 0x000a, 0x29a2: 0x000a, 0x29a3: 0x000a, + 0x29a4: 0x000a, 0x29a5: 0x000a, + 0x29bf: 0x000c, + // Block 0xa7, offset 0x29c0 + 0x29c0: 0x000c, 0x29c1: 0x000c, + 0x29f3: 0x000c, 0x29f4: 0x000c, 0x29f5: 0x000c, + 0x29f6: 0x000c, 0x29f9: 0x000c, 0x29fa: 0x000c, + // Block 0xa8, offset 0x2a00 + 0x2a00: 0x000c, 0x2a01: 0x000c, 0x2a02: 0x000c, + 0x2a27: 0x000c, 0x2a28: 0x000c, 0x2a29: 0x000c, + 0x2a2a: 0x000c, 0x2a2b: 0x000c, 0x2a2d: 0x000c, 0x2a2e: 0x000c, 0x2a2f: 0x000c, + 0x2a30: 0x000c, 0x2a31: 0x000c, 0x2a32: 0x000c, 0x2a33: 0x000c, 0x2a34: 0x000c, + // Block 0xa9, offset 0x2a40 + 0x2a73: 0x000c, + // Block 0xaa, offset 0x2a80 + 0x2a80: 0x000c, 0x2a81: 0x000c, + 0x2ab6: 0x000c, 0x2ab7: 0x000c, 0x2ab8: 0x000c, 0x2ab9: 0x000c, 0x2aba: 0x000c, 0x2abb: 0x000c, + 0x2abc: 0x000c, 0x2abd: 0x000c, 0x2abe: 0x000c, + // Block 0xab, offset 0x2ac0 + 0x2ac9: 0x000c, 0x2aca: 0x000c, 0x2acb: 0x000c, + 0x2acc: 0x000c, + // Block 0xac, offset 0x2b00 + 0x2b2f: 0x000c, + 0x2b30: 0x000c, 0x2b31: 0x000c, 0x2b34: 0x000c, + 0x2b36: 0x000c, 0x2b37: 0x000c, + 0x2b3e: 0x000c, + // Block 0xad, offset 0x2b40 + 0x2b5f: 0x000c, 0x2b63: 0x000c, + 0x2b64: 0x000c, 0x2b65: 0x000c, 0x2b66: 0x000c, 0x2b67: 0x000c, 0x2b68: 0x000c, 0x2b69: 0x000c, + 0x2b6a: 0x000c, + // Block 0xae, offset 0x2b80 + 0x2b80: 0x000c, + 0x2ba6: 0x000c, 0x2ba7: 0x000c, 0x2ba8: 0x000c, 0x2ba9: 0x000c, + 0x2baa: 0x000c, 0x2bab: 0x000c, 0x2bac: 0x000c, + 0x2bb0: 0x000c, 0x2bb1: 0x000c, 0x2bb2: 0x000c, 0x2bb3: 0x000c, 0x2bb4: 0x000c, + // Block 0xaf, offset 0x2bc0 + 0x2bf8: 0x000c, 0x2bf9: 0x000c, 0x2bfa: 0x000c, 0x2bfb: 0x000c, + 0x2bfc: 0x000c, 0x2bfd: 0x000c, 0x2bfe: 0x000c, 0x2bff: 0x000c, + // Block 0xb0, offset 0x2c00 + 0x2c02: 0x000c, 0x2c03: 0x000c, 0x2c04: 0x000c, + 0x2c06: 0x000c, + 0x2c1e: 0x000c, + // Block 0xb1, offset 0x2c40 + 0x2c73: 0x000c, 0x2c74: 0x000c, 0x2c75: 0x000c, + 0x2c76: 0x000c, 0x2c77: 0x000c, 0x2c78: 0x000c, 0x2c7a: 0x000c, + 0x2c7f: 0x000c, + // Block 0xb2, offset 0x2c80 + 0x2c80: 0x000c, 0x2c82: 0x000c, 0x2c83: 0x000c, + // Block 0xb3, offset 0x2cc0 + 0x2cf2: 0x000c, 0x2cf3: 0x000c, 0x2cf4: 0x000c, 0x2cf5: 0x000c, + 0x2cfc: 0x000c, 0x2cfd: 0x000c, 0x2cff: 0x000c, + // Block 0xb4, offset 0x2d00 + 0x2d00: 0x000c, + 0x2d1c: 0x000c, 0x2d1d: 0x000c, + // Block 0xb5, offset 0x2d40 + 0x2d73: 0x000c, 0x2d74: 0x000c, 0x2d75: 0x000c, + 0x2d76: 0x000c, 0x2d77: 0x000c, 0x2d78: 0x000c, 0x2d79: 0x000c, 0x2d7a: 0x000c, + 0x2d7d: 0x000c, 0x2d7f: 0x000c, + // Block 0xb6, offset 0x2d80 + 0x2d80: 0x000c, + 0x2da0: 0x000a, 0x2da1: 0x000a, 0x2da2: 0x000a, 0x2da3: 0x000a, + 0x2da4: 0x000a, 0x2da5: 0x000a, 0x2da6: 0x000a, 0x2da7: 0x000a, 0x2da8: 0x000a, 0x2da9: 0x000a, + 0x2daa: 0x000a, 0x2dab: 0x000a, 0x2dac: 0x000a, + // Block 0xb7, offset 0x2dc0 + 0x2deb: 0x000c, 0x2ded: 0x000c, + 0x2df0: 0x000c, 0x2df1: 0x000c, 0x2df2: 0x000c, 0x2df3: 0x000c, 0x2df4: 0x000c, 0x2df5: 0x000c, + 0x2df7: 0x000c, + // Block 0xb8, offset 0x2e00 + 0x2e1d: 0x000c, + 0x2e1e: 0x000c, 0x2e1f: 0x000c, 0x2e22: 0x000c, 0x2e23: 0x000c, + 0x2e24: 0x000c, 0x2e25: 0x000c, 0x2e27: 0x000c, 0x2e28: 0x000c, 0x2e29: 0x000c, + 0x2e2a: 0x000c, 0x2e2b: 0x000c, + // Block 0xb9, offset 0x2e40 + 0x2e6f: 0x000c, + 0x2e70: 0x000c, 0x2e71: 0x000c, 0x2e72: 0x000c, 0x2e73: 0x000c, 0x2e74: 0x000c, 0x2e75: 0x000c, + 0x2e76: 0x000c, 0x2e77: 0x000c, 0x2e79: 0x000c, 0x2e7a: 0x000c, + // Block 0xba, offset 0x2e80 + 0x2e81: 0x000c, 0x2e82: 0x000c, 0x2e83: 0x000c, 0x2e84: 0x000c, 0x2e85: 0x000c, + 0x2e86: 0x000c, 0x2e89: 0x000c, 0x2e8a: 0x000c, + 0x2eb3: 0x000c, 0x2eb4: 0x000c, 0x2eb5: 0x000c, + 0x2eb6: 0x000c, 0x2eb7: 0x000c, 0x2eb8: 0x000c, 0x2ebb: 0x000c, + 0x2ebc: 0x000c, 0x2ebd: 0x000c, 0x2ebe: 0x000c, + // Block 0xbb, offset 0x2ec0 + 0x2ec7: 0x000c, + 0x2ed1: 0x000c, + 0x2ed2: 0x000c, 0x2ed3: 0x000c, 0x2ed4: 0x000c, 0x2ed5: 0x000c, 0x2ed6: 0x000c, + 0x2ed9: 0x000c, 0x2eda: 0x000c, 0x2edb: 0x000c, + // Block 0xbc, offset 0x2f00 + 0x2f0a: 0x000c, 0x2f0b: 0x000c, + 0x2f0c: 0x000c, 0x2f0d: 0x000c, 0x2f0e: 0x000c, 0x2f0f: 0x000c, 0x2f10: 0x000c, 0x2f11: 0x000c, + 0x2f12: 0x000c, 0x2f13: 0x000c, 0x2f14: 0x000c, 0x2f15: 0x000c, 0x2f16: 0x000c, + 0x2f18: 0x000c, 0x2f19: 0x000c, + // Block 0xbd, offset 0x2f40 + 0x2f70: 0x000c, 0x2f71: 0x000c, 0x2f72: 0x000c, 0x2f73: 0x000c, 0x2f74: 0x000c, 0x2f75: 0x000c, + 0x2f76: 0x000c, 0x2f78: 0x000c, 0x2f79: 0x000c, 0x2f7a: 0x000c, 0x2f7b: 0x000c, + 0x2f7c: 0x000c, 0x2f7d: 0x000c, + // Block 0xbe, offset 0x2f80 + 0x2f92: 0x000c, 0x2f93: 0x000c, 0x2f94: 0x000c, 0x2f95: 0x000c, 0x2f96: 0x000c, 0x2f97: 0x000c, + 0x2f98: 0x000c, 0x2f99: 0x000c, 0x2f9a: 0x000c, 0x2f9b: 0x000c, 0x2f9c: 0x000c, 0x2f9d: 0x000c, + 0x2f9e: 0x000c, 0x2f9f: 0x000c, 0x2fa0: 0x000c, 0x2fa1: 0x000c, 0x2fa2: 0x000c, 0x2fa3: 0x000c, + 0x2fa4: 0x000c, 0x2fa5: 0x000c, 0x2fa6: 0x000c, 0x2fa7: 0x000c, + 0x2faa: 0x000c, 0x2fab: 0x000c, 0x2fac: 0x000c, 0x2fad: 0x000c, 0x2fae: 0x000c, 0x2faf: 0x000c, + 0x2fb0: 0x000c, 0x2fb2: 0x000c, 0x2fb3: 0x000c, 0x2fb5: 0x000c, + 0x2fb6: 0x000c, + // Block 0xbf, offset 0x2fc0 + 0x2ff1: 0x000c, 0x2ff2: 0x000c, 0x2ff3: 0x000c, 0x2ff4: 0x000c, 0x2ff5: 0x000c, + 0x2ff6: 0x000c, 0x2ffa: 0x000c, + 0x2ffc: 0x000c, 0x2ffd: 0x000c, 0x2fff: 0x000c, + // Block 0xc0, offset 0x3000 + 0x3000: 0x000c, 0x3001: 0x000c, 0x3002: 0x000c, 0x3003: 0x000c, 0x3004: 0x000c, 0x3005: 0x000c, + 0x3007: 0x000c, + // Block 0xc1, offset 0x3040 + 0x3050: 0x000c, 0x3051: 0x000c, + 0x3055: 0x000c, 0x3057: 0x000c, + // Block 0xc2, offset 0x3080 + 0x30b3: 0x000c, 0x30b4: 0x000c, + // Block 0xc3, offset 0x30c0 + 0x30f0: 0x000c, 0x30f1: 0x000c, 0x30f2: 0x000c, 0x30f3: 0x000c, 0x30f4: 0x000c, + // Block 0xc4, offset 0x3100 + 0x3130: 0x000c, 0x3131: 0x000c, 0x3132: 0x000c, 0x3133: 0x000c, 0x3134: 0x000c, 0x3135: 0x000c, + 0x3136: 0x000c, + // Block 0xc5, offset 0x3140 + 0x314f: 0x000c, 0x3150: 0x000c, 0x3151: 0x000c, + 0x3152: 0x000c, + // Block 0xc6, offset 0x3180 + 0x319d: 0x000c, + 0x319e: 0x000c, 0x31a0: 0x000b, 0x31a1: 0x000b, 0x31a2: 0x000b, 0x31a3: 0x000b, + // Block 0xc7, offset 0x31c0 + 0x31e7: 0x000c, 0x31e8: 0x000c, 0x31e9: 0x000c, + 0x31f3: 0x000b, 0x31f4: 0x000b, 0x31f5: 0x000b, + 0x31f6: 0x000b, 0x31f7: 0x000b, 0x31f8: 0x000b, 0x31f9: 0x000b, 0x31fa: 0x000b, 0x31fb: 0x000c, + 0x31fc: 0x000c, 0x31fd: 0x000c, 0x31fe: 0x000c, 0x31ff: 0x000c, + // Block 0xc8, offset 0x3200 + 0x3200: 0x000c, 0x3201: 0x000c, 0x3202: 0x000c, 0x3205: 0x000c, + 0x3206: 0x000c, 0x3207: 0x000c, 0x3208: 0x000c, 0x3209: 0x000c, 0x320a: 0x000c, 0x320b: 0x000c, + 0x322a: 0x000c, 0x322b: 0x000c, 0x322c: 0x000c, 0x322d: 0x000c, + // Block 0xc9, offset 0x3240 + 0x3240: 0x000a, 0x3241: 0x000a, 0x3242: 0x000c, 0x3243: 0x000c, 0x3244: 0x000c, 0x3245: 0x000a, + // Block 0xca, offset 0x3280 + 0x3280: 0x000a, 0x3281: 0x000a, 0x3282: 0x000a, 0x3283: 0x000a, 0x3284: 0x000a, 0x3285: 0x000a, + 0x3286: 0x000a, 0x3287: 0x000a, 0x3288: 0x000a, 0x3289: 0x000a, 0x328a: 0x000a, 0x328b: 0x000a, + 0x328c: 0x000a, 0x328d: 0x000a, 0x328e: 0x000a, 0x328f: 0x000a, 0x3290: 0x000a, 0x3291: 0x000a, + 0x3292: 0x000a, 0x3293: 0x000a, 0x3294: 0x000a, 0x3295: 0x000a, 0x3296: 0x000a, + // Block 0xcb, offset 0x32c0 + 0x32db: 0x000a, + // Block 0xcc, offset 0x3300 + 0x3315: 0x000a, + // Block 0xcd, offset 0x3340 + 0x334f: 0x000a, + // Block 0xce, offset 0x3380 + 0x3389: 0x000a, + // Block 0xcf, offset 0x33c0 + 0x33c3: 0x000a, + 0x33ce: 0x0002, 0x33cf: 0x0002, 0x33d0: 0x0002, 0x33d1: 0x0002, + 0x33d2: 0x0002, 0x33d3: 0x0002, 0x33d4: 0x0002, 0x33d5: 0x0002, 0x33d6: 0x0002, 0x33d7: 0x0002, + 0x33d8: 0x0002, 0x33d9: 0x0002, 0x33da: 0x0002, 0x33db: 0x0002, 0x33dc: 0x0002, 0x33dd: 0x0002, + 0x33de: 0x0002, 0x33df: 0x0002, 0x33e0: 0x0002, 0x33e1: 0x0002, 0x33e2: 0x0002, 0x33e3: 0x0002, + 0x33e4: 0x0002, 0x33e5: 0x0002, 0x33e6: 0x0002, 0x33e7: 0x0002, 0x33e8: 0x0002, 0x33e9: 0x0002, + 0x33ea: 0x0002, 0x33eb: 0x0002, 0x33ec: 0x0002, 0x33ed: 0x0002, 0x33ee: 0x0002, 0x33ef: 0x0002, + 0x33f0: 0x0002, 0x33f1: 0x0002, 0x33f2: 0x0002, 0x33f3: 0x0002, 0x33f4: 0x0002, 0x33f5: 0x0002, + 0x33f6: 0x0002, 0x33f7: 0x0002, 0x33f8: 0x0002, 0x33f9: 0x0002, 0x33fa: 0x0002, 0x33fb: 0x0002, + 0x33fc: 0x0002, 0x33fd: 0x0002, 0x33fe: 0x0002, 0x33ff: 0x0002, + // Block 0xd0, offset 0x3400 + 0x3400: 0x000c, 0x3401: 0x000c, 0x3402: 0x000c, 0x3403: 0x000c, 0x3404: 0x000c, 0x3405: 0x000c, + 0x3406: 0x000c, 0x3407: 0x000c, 0x3408: 0x000c, 0x3409: 0x000c, 0x340a: 0x000c, 0x340b: 0x000c, + 0x340c: 0x000c, 0x340d: 0x000c, 0x340e: 0x000c, 0x340f: 0x000c, 0x3410: 0x000c, 0x3411: 0x000c, + 0x3412: 0x000c, 0x3413: 0x000c, 0x3414: 0x000c, 0x3415: 0x000c, 0x3416: 0x000c, 0x3417: 0x000c, + 0x3418: 0x000c, 0x3419: 0x000c, 0x341a: 0x000c, 0x341b: 0x000c, 0x341c: 0x000c, 0x341d: 0x000c, + 0x341e: 0x000c, 0x341f: 0x000c, 0x3420: 0x000c, 0x3421: 0x000c, 0x3422: 0x000c, 0x3423: 0x000c, + 0x3424: 0x000c, 0x3425: 0x000c, 0x3426: 0x000c, 0x3427: 0x000c, 0x3428: 0x000c, 0x3429: 0x000c, + 0x342a: 0x000c, 0x342b: 0x000c, 0x342c: 0x000c, 0x342d: 0x000c, 0x342e: 0x000c, 0x342f: 0x000c, + 0x3430: 0x000c, 0x3431: 0x000c, 0x3432: 0x000c, 0x3433: 0x000c, 0x3434: 0x000c, 0x3435: 0x000c, + 0x3436: 0x000c, 0x343b: 0x000c, + 0x343c: 0x000c, 0x343d: 0x000c, 0x343e: 0x000c, 0x343f: 0x000c, + // Block 0xd1, offset 0x3440 + 0x3440: 0x000c, 0x3441: 0x000c, 0x3442: 0x000c, 0x3443: 0x000c, 0x3444: 0x000c, 0x3445: 0x000c, + 0x3446: 0x000c, 0x3447: 0x000c, 0x3448: 0x000c, 0x3449: 0x000c, 0x344a: 0x000c, 0x344b: 0x000c, + 0x344c: 0x000c, 0x344d: 0x000c, 0x344e: 0x000c, 0x344f: 0x000c, 0x3450: 0x000c, 0x3451: 0x000c, + 0x3452: 0x000c, 0x3453: 0x000c, 0x3454: 0x000c, 0x3455: 0x000c, 0x3456: 0x000c, 0x3457: 0x000c, + 0x3458: 0x000c, 0x3459: 0x000c, 0x345a: 0x000c, 0x345b: 0x000c, 0x345c: 0x000c, 0x345d: 0x000c, + 0x345e: 0x000c, 0x345f: 0x000c, 0x3460: 0x000c, 0x3461: 0x000c, 0x3462: 0x000c, 0x3463: 0x000c, + 0x3464: 0x000c, 0x3465: 0x000c, 0x3466: 0x000c, 0x3467: 0x000c, 0x3468: 0x000c, 0x3469: 0x000c, + 0x346a: 0x000c, 0x346b: 0x000c, 0x346c: 0x000c, + 0x3475: 0x000c, + // Block 0xd2, offset 0x3480 + 0x3484: 0x000c, + 0x349b: 0x000c, 0x349c: 0x000c, 0x349d: 0x000c, + 0x349e: 0x000c, 0x349f: 0x000c, 0x34a1: 0x000c, 0x34a2: 0x000c, 0x34a3: 0x000c, + 0x34a4: 0x000c, 0x34a5: 0x000c, 0x34a6: 0x000c, 0x34a7: 0x000c, 0x34a8: 0x000c, 0x34a9: 0x000c, + 0x34aa: 0x000c, 0x34ab: 0x000c, 0x34ac: 0x000c, 0x34ad: 0x000c, 0x34ae: 0x000c, 0x34af: 0x000c, + // Block 0xd3, offset 0x34c0 + 0x34c0: 0x000c, 0x34c1: 0x000c, 0x34c2: 0x000c, 0x34c3: 0x000c, 0x34c4: 0x000c, 0x34c5: 0x000c, + 0x34c6: 0x000c, 0x34c8: 0x000c, 0x34c9: 0x000c, 0x34ca: 0x000c, 0x34cb: 0x000c, + 0x34cc: 0x000c, 0x34cd: 0x000c, 0x34ce: 0x000c, 0x34cf: 0x000c, 0x34d0: 0x000c, 0x34d1: 0x000c, + 0x34d2: 0x000c, 0x34d3: 0x000c, 0x34d4: 0x000c, 0x34d5: 0x000c, 0x34d6: 0x000c, 0x34d7: 0x000c, + 0x34d8: 0x000c, 0x34db: 0x000c, 0x34dc: 0x000c, 0x34dd: 0x000c, + 0x34de: 0x000c, 0x34df: 0x000c, 0x34e0: 0x000c, 0x34e1: 0x000c, 0x34e3: 0x000c, + 0x34e4: 0x000c, 0x34e6: 0x000c, 0x34e7: 0x000c, 0x34e8: 0x000c, 0x34e9: 0x000c, + 0x34ea: 0x000c, + // Block 0xd4, offset 0x3500 + 0x3500: 0x0001, 0x3501: 0x0001, 0x3502: 0x0001, 0x3503: 0x0001, 0x3504: 0x0001, 0x3505: 0x0001, + 0x3506: 0x0001, 0x3507: 0x0001, 0x3508: 0x0001, 0x3509: 0x0001, 0x350a: 0x0001, 0x350b: 0x0001, + 0x350c: 0x0001, 0x350d: 0x0001, 0x350e: 0x0001, 0x350f: 0x0001, 0x3510: 0x000c, 0x3511: 0x000c, + 0x3512: 0x000c, 0x3513: 0x000c, 0x3514: 0x000c, 0x3515: 0x000c, 0x3516: 0x000c, 0x3517: 0x0001, + 0x3518: 0x0001, 0x3519: 0x0001, 0x351a: 0x0001, 0x351b: 0x0001, 0x351c: 0x0001, 0x351d: 0x0001, + 0x351e: 0x0001, 0x351f: 0x0001, 0x3520: 0x0001, 0x3521: 0x0001, 0x3522: 0x0001, 0x3523: 0x0001, + 0x3524: 0x0001, 0x3525: 0x0001, 0x3526: 0x0001, 0x3527: 0x0001, 0x3528: 0x0001, 0x3529: 0x0001, + 0x352a: 0x0001, 0x352b: 0x0001, 0x352c: 0x0001, 0x352d: 0x0001, 0x352e: 0x0001, 0x352f: 0x0001, + 0x3530: 0x0001, 0x3531: 0x0001, 0x3532: 0x0001, 0x3533: 0x0001, 0x3534: 0x0001, 0x3535: 0x0001, + 0x3536: 0x0001, 0x3537: 0x0001, 0x3538: 0x0001, 0x3539: 0x0001, 0x353a: 0x0001, 0x353b: 0x0001, + 0x353c: 0x0001, 0x353d: 0x0001, 0x353e: 0x0001, 0x353f: 0x0001, + // Block 0xd5, offset 0x3540 + 0x3540: 0x0001, 0x3541: 0x0001, 0x3542: 0x0001, 0x3543: 0x0001, 0x3544: 0x000c, 0x3545: 0x000c, + 0x3546: 0x000c, 0x3547: 0x000c, 0x3548: 0x000c, 0x3549: 0x000c, 0x354a: 0x000c, 0x354b: 0x0001, + 0x354c: 0x0001, 0x354d: 0x0001, 0x354e: 0x0001, 0x354f: 0x0001, 0x3550: 0x0001, 0x3551: 0x0001, + 0x3552: 0x0001, 0x3553: 0x0001, 0x3554: 0x0001, 0x3555: 0x0001, 0x3556: 0x0001, 0x3557: 0x0001, + 0x3558: 0x0001, 0x3559: 0x0001, 0x355a: 0x0001, 0x355b: 0x0001, 0x355c: 0x0001, 0x355d: 0x0001, + 0x355e: 0x0001, 0x355f: 0x0001, 0x3560: 0x0001, 0x3561: 0x0001, 0x3562: 0x0001, 0x3563: 0x0001, + 0x3564: 0x0001, 0x3565: 0x0001, 0x3566: 0x0001, 0x3567: 0x0001, 0x3568: 0x0001, 0x3569: 0x0001, + 0x356a: 0x0001, 0x356b: 0x0001, 0x356c: 0x0001, 0x356d: 0x0001, 0x356e: 0x0001, 0x356f: 0x0001, + 0x3570: 0x0001, 0x3571: 0x0001, 0x3572: 0x0001, 0x3573: 0x0001, 0x3574: 0x0001, 0x3575: 0x0001, + 0x3576: 0x0001, 0x3577: 0x0001, 0x3578: 0x0001, 0x3579: 0x0001, 0x357a: 0x0001, 0x357b: 0x0001, + 0x357c: 0x0001, 0x357d: 0x0001, 0x357e: 0x0001, 0x357f: 0x0001, + // Block 0xd6, offset 0x3580 + 0x3580: 0x000d, 0x3581: 0x000d, 0x3582: 0x000d, 0x3583: 0x000d, 0x3584: 0x000d, 0x3585: 0x000d, + 0x3586: 0x000d, 0x3587: 0x000d, 0x3588: 0x000d, 0x3589: 0x000d, 0x358a: 0x000d, 0x358b: 0x000d, + 0x358c: 0x000d, 0x358d: 0x000d, 0x358e: 0x000d, 0x358f: 0x000d, 0x3590: 0x000d, 0x3591: 0x000d, + 0x3592: 0x000d, 0x3593: 0x000d, 0x3594: 0x000d, 0x3595: 0x000d, 0x3596: 0x000d, 0x3597: 0x000d, + 0x3598: 0x000d, 0x3599: 0x000d, 0x359a: 0x000d, 0x359b: 0x000d, 0x359c: 0x000d, 0x359d: 0x000d, + 0x359e: 0x000d, 0x359f: 0x000d, 0x35a0: 0x000d, 0x35a1: 0x000d, 0x35a2: 0x000d, 0x35a3: 0x000d, + 0x35a4: 0x000d, 0x35a5: 0x000d, 0x35a6: 0x000d, 0x35a7: 0x000d, 0x35a8: 0x000d, 0x35a9: 0x000d, + 0x35aa: 0x000d, 0x35ab: 0x000d, 0x35ac: 0x000d, 0x35ad: 0x000d, 0x35ae: 0x000d, 0x35af: 0x000d, + 0x35b0: 0x000a, 0x35b1: 0x000a, 0x35b2: 0x000d, 0x35b3: 0x000d, 0x35b4: 0x000d, 0x35b5: 0x000d, + 0x35b6: 0x000d, 0x35b7: 0x000d, 0x35b8: 0x000d, 0x35b9: 0x000d, 0x35ba: 0x000d, 0x35bb: 0x000d, + 0x35bc: 0x000d, 0x35bd: 0x000d, 0x35be: 0x000d, 0x35bf: 0x000d, + // Block 0xd7, offset 0x35c0 + 0x35c0: 0x000a, 0x35c1: 0x000a, 0x35c2: 0x000a, 0x35c3: 0x000a, 0x35c4: 0x000a, 0x35c5: 0x000a, + 0x35c6: 0x000a, 0x35c7: 0x000a, 0x35c8: 0x000a, 0x35c9: 0x000a, 0x35ca: 0x000a, 0x35cb: 0x000a, + 0x35cc: 0x000a, 0x35cd: 0x000a, 0x35ce: 0x000a, 0x35cf: 0x000a, 0x35d0: 0x000a, 0x35d1: 0x000a, + 0x35d2: 0x000a, 0x35d3: 0x000a, 0x35d4: 0x000a, 0x35d5: 0x000a, 0x35d6: 0x000a, 0x35d7: 0x000a, + 0x35d8: 0x000a, 0x35d9: 0x000a, 0x35da: 0x000a, 0x35db: 0x000a, 0x35dc: 0x000a, 0x35dd: 0x000a, + 0x35de: 0x000a, 0x35df: 0x000a, 0x35e0: 0x000a, 0x35e1: 0x000a, 0x35e2: 0x000a, 0x35e3: 0x000a, + 0x35e4: 0x000a, 0x35e5: 0x000a, 0x35e6: 0x000a, 0x35e7: 0x000a, 0x35e8: 0x000a, 0x35e9: 0x000a, + 0x35ea: 0x000a, 0x35eb: 0x000a, + 0x35f0: 0x000a, 0x35f1: 0x000a, 0x35f2: 0x000a, 0x35f3: 0x000a, 0x35f4: 0x000a, 0x35f5: 0x000a, + 0x35f6: 0x000a, 0x35f7: 0x000a, 0x35f8: 0x000a, 0x35f9: 0x000a, 0x35fa: 0x000a, 0x35fb: 0x000a, + 0x35fc: 0x000a, 0x35fd: 0x000a, 0x35fe: 0x000a, 0x35ff: 0x000a, + // Block 0xd8, offset 0x3600 + 0x3600: 0x000a, 0x3601: 0x000a, 0x3602: 0x000a, 0x3603: 0x000a, 0x3604: 0x000a, 0x3605: 0x000a, + 0x3606: 0x000a, 0x3607: 0x000a, 0x3608: 0x000a, 0x3609: 0x000a, 0x360a: 0x000a, 0x360b: 0x000a, + 0x360c: 0x000a, 0x360d: 0x000a, 0x360e: 0x000a, 0x360f: 0x000a, 0x3610: 0x000a, 0x3611: 0x000a, + 0x3612: 0x000a, 0x3613: 0x000a, + 0x3620: 0x000a, 0x3621: 0x000a, 0x3622: 0x000a, 0x3623: 0x000a, + 0x3624: 0x000a, 0x3625: 0x000a, 0x3626: 0x000a, 0x3627: 0x000a, 0x3628: 0x000a, 0x3629: 0x000a, + 0x362a: 0x000a, 0x362b: 0x000a, 0x362c: 0x000a, 0x362d: 0x000a, 0x362e: 0x000a, + 0x3631: 0x000a, 0x3632: 0x000a, 0x3633: 0x000a, 0x3634: 0x000a, 0x3635: 0x000a, + 0x3636: 0x000a, 0x3637: 0x000a, 0x3638: 0x000a, 0x3639: 0x000a, 0x363a: 0x000a, 0x363b: 0x000a, + 0x363c: 0x000a, 0x363d: 0x000a, 0x363e: 0x000a, 0x363f: 0x000a, + // Block 0xd9, offset 0x3640 + 0x3641: 0x000a, 0x3642: 0x000a, 0x3643: 0x000a, 0x3644: 0x000a, 0x3645: 0x000a, + 0x3646: 0x000a, 0x3647: 0x000a, 0x3648: 0x000a, 0x3649: 0x000a, 0x364a: 0x000a, 0x364b: 0x000a, + 0x364c: 0x000a, 0x364d: 0x000a, 0x364e: 0x000a, 0x364f: 0x000a, 0x3651: 0x000a, + 0x3652: 0x000a, 0x3653: 0x000a, 0x3654: 0x000a, 0x3655: 0x000a, 0x3656: 0x000a, 0x3657: 0x000a, + 0x3658: 0x000a, 0x3659: 0x000a, 0x365a: 0x000a, 0x365b: 0x000a, 0x365c: 0x000a, 0x365d: 0x000a, + 0x365e: 0x000a, 0x365f: 0x000a, 0x3660: 0x000a, 0x3661: 0x000a, 0x3662: 0x000a, 0x3663: 0x000a, + 0x3664: 0x000a, 0x3665: 0x000a, 0x3666: 0x000a, 0x3667: 0x000a, 0x3668: 0x000a, 0x3669: 0x000a, + 0x366a: 0x000a, 0x366b: 0x000a, 0x366c: 0x000a, 0x366d: 0x000a, 0x366e: 0x000a, 0x366f: 0x000a, + 0x3670: 0x000a, 0x3671: 0x000a, 0x3672: 0x000a, 0x3673: 0x000a, 0x3674: 0x000a, 0x3675: 0x000a, + // Block 0xda, offset 0x3680 + 0x3680: 0x0002, 0x3681: 0x0002, 0x3682: 0x0002, 0x3683: 0x0002, 0x3684: 0x0002, 0x3685: 0x0002, + 0x3686: 0x0002, 0x3687: 0x0002, 0x3688: 0x0002, 0x3689: 0x0002, 0x368a: 0x0002, 0x368b: 0x000a, + 0x368c: 0x000a, + 0x36af: 0x000a, + // Block 0xdb, offset 0x36c0 + 0x36ea: 0x000a, 0x36eb: 0x000a, + // Block 0xdc, offset 0x3700 + 0x3720: 0x000a, 0x3721: 0x000a, 0x3722: 0x000a, 0x3723: 0x000a, + 0x3724: 0x000a, 0x3725: 0x000a, + // Block 0xdd, offset 0x3740 + 0x3740: 0x000a, 0x3741: 0x000a, 0x3742: 0x000a, 0x3743: 0x000a, 0x3744: 0x000a, 0x3745: 0x000a, + 0x3746: 0x000a, 0x3747: 0x000a, 0x3748: 0x000a, 0x3749: 0x000a, 0x374a: 0x000a, 0x374b: 0x000a, + 0x374c: 0x000a, 0x374d: 0x000a, 0x374e: 0x000a, 0x374f: 0x000a, 0x3750: 0x000a, 0x3751: 0x000a, + 0x3752: 0x000a, 0x3753: 0x000a, 0x3754: 0x000a, + 0x3760: 0x000a, 0x3761: 0x000a, 0x3762: 0x000a, 0x3763: 0x000a, + 0x3764: 0x000a, 0x3765: 0x000a, 0x3766: 0x000a, 0x3767: 0x000a, 0x3768: 0x000a, 0x3769: 0x000a, + 0x376a: 0x000a, 0x376b: 0x000a, 0x376c: 0x000a, + 0x3770: 0x000a, 0x3771: 0x000a, 0x3772: 0x000a, 0x3773: 0x000a, 0x3774: 0x000a, 0x3775: 0x000a, + 0x3776: 0x000a, 0x3777: 0x000a, 0x3778: 0x000a, 0x3779: 0x000a, + // Block 0xde, offset 0x3780 + 0x3780: 0x000a, 0x3781: 0x000a, 0x3782: 0x000a, 0x3783: 0x000a, 0x3784: 0x000a, 0x3785: 0x000a, + 0x3786: 0x000a, 0x3787: 0x000a, 0x3788: 0x000a, 0x3789: 0x000a, 0x378a: 0x000a, 0x378b: 0x000a, + 0x378c: 0x000a, 0x378d: 0x000a, 0x378e: 0x000a, 0x378f: 0x000a, 0x3790: 0x000a, 0x3791: 0x000a, + 0x3792: 0x000a, 0x3793: 0x000a, 0x3794: 0x000a, 0x3795: 0x000a, 0x3796: 0x000a, 0x3797: 0x000a, + 0x3798: 0x000a, + // Block 0xdf, offset 0x37c0 + 0x37c0: 0x000a, 0x37c1: 0x000a, 0x37c2: 0x000a, 0x37c3: 0x000a, 0x37c4: 0x000a, 0x37c5: 0x000a, + 0x37c6: 0x000a, 0x37c7: 0x000a, 0x37c8: 0x000a, 0x37c9: 0x000a, 0x37ca: 0x000a, 0x37cb: 0x000a, + 0x37d0: 0x000a, 0x37d1: 0x000a, + 0x37d2: 0x000a, 0x37d3: 0x000a, 0x37d4: 0x000a, 0x37d5: 0x000a, 0x37d6: 0x000a, 0x37d7: 0x000a, + 0x37d8: 0x000a, 0x37d9: 0x000a, 0x37da: 0x000a, 0x37db: 0x000a, 0x37dc: 0x000a, 0x37dd: 0x000a, + 0x37de: 0x000a, 0x37df: 0x000a, 0x37e0: 0x000a, 0x37e1: 0x000a, 0x37e2: 0x000a, 0x37e3: 0x000a, + 0x37e4: 0x000a, 0x37e5: 0x000a, 0x37e6: 0x000a, 0x37e7: 0x000a, 0x37e8: 0x000a, 0x37e9: 0x000a, + 0x37ea: 0x000a, 0x37eb: 0x000a, 0x37ec: 0x000a, 0x37ed: 0x000a, 0x37ee: 0x000a, 0x37ef: 0x000a, + 0x37f0: 0x000a, 0x37f1: 0x000a, 0x37f2: 0x000a, 0x37f3: 0x000a, 0x37f4: 0x000a, 0x37f5: 0x000a, + 0x37f6: 0x000a, 0x37f7: 0x000a, 0x37f8: 0x000a, 0x37f9: 0x000a, 0x37fa: 0x000a, 0x37fb: 0x000a, + 0x37fc: 0x000a, 0x37fd: 0x000a, 0x37fe: 0x000a, 0x37ff: 0x000a, + // Block 0xe0, offset 0x3800 + 0x3800: 0x000a, 0x3801: 0x000a, 0x3802: 0x000a, 0x3803: 0x000a, 0x3804: 0x000a, 0x3805: 0x000a, + 0x3806: 0x000a, 0x3807: 0x000a, + 0x3810: 0x000a, 0x3811: 0x000a, + 0x3812: 0x000a, 0x3813: 0x000a, 0x3814: 0x000a, 0x3815: 0x000a, 0x3816: 0x000a, 0x3817: 0x000a, + 0x3818: 0x000a, 0x3819: 0x000a, + 0x3820: 0x000a, 0x3821: 0x000a, 0x3822: 0x000a, 0x3823: 0x000a, + 0x3824: 0x000a, 0x3825: 0x000a, 0x3826: 0x000a, 0x3827: 0x000a, 0x3828: 0x000a, 0x3829: 0x000a, + 0x382a: 0x000a, 0x382b: 0x000a, 0x382c: 0x000a, 0x382d: 0x000a, 0x382e: 0x000a, 0x382f: 0x000a, + 0x3830: 0x000a, 0x3831: 0x000a, 0x3832: 0x000a, 0x3833: 0x000a, 0x3834: 0x000a, 0x3835: 0x000a, + 0x3836: 0x000a, 0x3837: 0x000a, 0x3838: 0x000a, 0x3839: 0x000a, 0x383a: 0x000a, 0x383b: 0x000a, + 0x383c: 0x000a, 0x383d: 0x000a, 0x383e: 0x000a, 0x383f: 0x000a, + // Block 0xe1, offset 0x3840 + 0x3840: 0x000a, 0x3841: 0x000a, 0x3842: 0x000a, 0x3843: 0x000a, 0x3844: 0x000a, 0x3845: 0x000a, + 0x3846: 0x000a, 0x3847: 0x000a, + 0x3850: 0x000a, 0x3851: 0x000a, + 0x3852: 0x000a, 0x3853: 0x000a, 0x3854: 0x000a, 0x3855: 0x000a, 0x3856: 0x000a, 0x3857: 0x000a, + 0x3858: 0x000a, 0x3859: 0x000a, 0x385a: 0x000a, 0x385b: 0x000a, 0x385c: 0x000a, 0x385d: 0x000a, + 0x385e: 0x000a, 0x385f: 0x000a, 0x3860: 0x000a, 0x3861: 0x000a, 0x3862: 0x000a, 0x3863: 0x000a, + 0x3864: 0x000a, 0x3865: 0x000a, 0x3866: 0x000a, 0x3867: 0x000a, 0x3868: 0x000a, 0x3869: 0x000a, + 0x386a: 0x000a, 0x386b: 0x000a, 0x386c: 0x000a, 0x386d: 0x000a, + // Block 0xe2, offset 0x3880 + 0x3880: 0x000a, 0x3881: 0x000a, 0x3882: 0x000a, 0x3883: 0x000a, 0x3884: 0x000a, 0x3885: 0x000a, + 0x3886: 0x000a, 0x3887: 0x000a, 0x3888: 0x000a, 0x3889: 0x000a, 0x388a: 0x000a, 0x388b: 0x000a, + 0x3890: 0x000a, 0x3891: 0x000a, + 0x3892: 0x000a, 0x3893: 0x000a, 0x3894: 0x000a, 0x3895: 0x000a, 0x3896: 0x000a, 0x3897: 0x000a, + 0x3898: 0x000a, 0x3899: 0x000a, 0x389a: 0x000a, 0x389b: 0x000a, 0x389c: 0x000a, 0x389d: 0x000a, + 0x389e: 0x000a, 0x389f: 0x000a, 0x38a0: 0x000a, 0x38a1: 0x000a, 0x38a2: 0x000a, 0x38a3: 0x000a, + 0x38a4: 0x000a, 0x38a5: 0x000a, 0x38a6: 0x000a, 0x38a7: 0x000a, 0x38a8: 0x000a, 0x38a9: 0x000a, + 0x38aa: 0x000a, 0x38ab: 0x000a, 0x38ac: 0x000a, 0x38ad: 0x000a, 0x38ae: 0x000a, 0x38af: 0x000a, + 0x38b0: 0x000a, 0x38b1: 0x000a, 0x38b2: 0x000a, 0x38b3: 0x000a, 0x38b4: 0x000a, 0x38b5: 0x000a, + 0x38b6: 0x000a, 0x38b7: 0x000a, 0x38b8: 0x000a, 0x38b9: 0x000a, 0x38ba: 0x000a, 0x38bb: 0x000a, + 0x38bc: 0x000a, 0x38bd: 0x000a, 0x38be: 0x000a, + // Block 0xe3, offset 0x38c0 + 0x38c0: 0x000a, 0x38c1: 0x000a, 0x38c2: 0x000a, 0x38c3: 0x000a, 0x38c4: 0x000a, 0x38c5: 0x000a, + 0x38c6: 0x000a, 0x38c7: 0x000a, 0x38c8: 0x000a, 0x38c9: 0x000a, 0x38ca: 0x000a, 0x38cb: 0x000a, + 0x38cc: 0x000a, 0x38cd: 0x000a, 0x38ce: 0x000a, 0x38cf: 0x000a, 0x38d0: 0x000a, 0x38d1: 0x000a, + 0x38d2: 0x000a, 0x38d3: 0x000a, 0x38d4: 0x000a, 0x38d5: 0x000a, 0x38d6: 0x000a, 0x38d7: 0x000a, + 0x38d8: 0x000a, 0x38d9: 0x000a, 0x38da: 0x000a, 0x38db: 0x000a, 0x38dc: 0x000a, 0x38dd: 0x000a, + 0x38de: 0x000a, 0x38df: 0x000a, 0x38e0: 0x000a, 0x38e1: 0x000a, 0x38e2: 0x000a, 0x38e3: 0x000a, + 0x38e4: 0x000a, 0x38e5: 0x000a, 0x38e6: 0x000a, 0x38e7: 0x000a, 0x38e8: 0x000a, 0x38e9: 0x000a, + 0x38ea: 0x000a, 0x38eb: 0x000a, 0x38ec: 0x000a, 0x38ed: 0x000a, 0x38ee: 0x000a, 0x38ef: 0x000a, + 0x38f0: 0x000a, 0x38f3: 0x000a, 0x38f4: 0x000a, 0x38f5: 0x000a, + 0x38f6: 0x000a, 0x38fa: 0x000a, + 0x38fc: 0x000a, 0x38fd: 0x000a, 0x38fe: 0x000a, 0x38ff: 0x000a, + // Block 0xe4, offset 0x3900 + 0x3900: 0x000a, 0x3901: 0x000a, 0x3902: 0x000a, 0x3903: 0x000a, 0x3904: 0x000a, 0x3905: 0x000a, + 0x3906: 0x000a, 0x3907: 0x000a, 0x3908: 0x000a, 0x3909: 0x000a, 0x390a: 0x000a, 0x390b: 0x000a, + 0x390c: 0x000a, 0x390d: 0x000a, 0x390e: 0x000a, 0x390f: 0x000a, 0x3910: 0x000a, 0x3911: 0x000a, + 0x3912: 0x000a, 0x3913: 0x000a, 0x3914: 0x000a, 0x3915: 0x000a, 0x3916: 0x000a, 0x3917: 0x000a, + 0x3918: 0x000a, 0x3919: 0x000a, 0x391a: 0x000a, 0x391b: 0x000a, 0x391c: 0x000a, 0x391d: 0x000a, + 0x391e: 0x000a, 0x391f: 0x000a, 0x3920: 0x000a, 0x3921: 0x000a, 0x3922: 0x000a, + 0x3930: 0x000a, 0x3931: 0x000a, 0x3932: 0x000a, 0x3933: 0x000a, 0x3934: 0x000a, 0x3935: 0x000a, + 0x3936: 0x000a, 0x3937: 0x000a, 0x3938: 0x000a, 0x3939: 0x000a, + // Block 0xe5, offset 0x3940 + 0x3940: 0x000a, 0x3941: 0x000a, 0x3942: 0x000a, + 0x3950: 0x000a, 0x3951: 0x000a, + 0x3952: 0x000a, 0x3953: 0x000a, 0x3954: 0x000a, 0x3955: 0x000a, 0x3956: 0x000a, 0x3957: 0x000a, + 0x3958: 0x000a, 0x3959: 0x000a, 0x395a: 0x000a, 0x395b: 0x000a, 0x395c: 0x000a, 0x395d: 0x000a, + 0x395e: 0x000a, 0x395f: 0x000a, 0x3960: 0x000a, 0x3961: 0x000a, 0x3962: 0x000a, 0x3963: 0x000a, + 0x3964: 0x000a, 0x3965: 0x000a, 0x3966: 0x000a, 0x3967: 0x000a, 0x3968: 0x000a, 0x3969: 0x000a, + 0x396a: 0x000a, 0x396b: 0x000a, 0x396c: 0x000a, 0x396d: 0x000a, 0x396e: 0x000a, 0x396f: 0x000a, + 0x3970: 0x000a, 0x3971: 0x000a, 0x3972: 0x000a, 0x3973: 0x000a, 0x3974: 0x000a, 0x3975: 0x000a, + 0x3976: 0x000a, 0x3977: 0x000a, 0x3978: 0x000a, 0x3979: 0x000a, 0x397a: 0x000a, 0x397b: 0x000a, + 0x397c: 0x000a, 0x397d: 0x000a, 0x397e: 0x000a, 0x397f: 0x000a, + // Block 0xe6, offset 0x3980 + 0x39a0: 0x000a, 0x39a1: 0x000a, 0x39a2: 0x000a, 0x39a3: 0x000a, + 0x39a4: 0x000a, 0x39a5: 0x000a, 0x39a6: 0x000a, 0x39a7: 0x000a, 0x39a8: 0x000a, 0x39a9: 0x000a, + 0x39aa: 0x000a, 0x39ab: 0x000a, 0x39ac: 0x000a, 0x39ad: 0x000a, + // Block 0xe7, offset 0x39c0 + 0x39fe: 0x000b, 0x39ff: 0x000b, + // Block 0xe8, offset 0x3a00 + 0x3a00: 0x000b, 0x3a01: 0x000b, 0x3a02: 0x000b, 0x3a03: 0x000b, 0x3a04: 0x000b, 0x3a05: 0x000b, + 0x3a06: 0x000b, 0x3a07: 0x000b, 0x3a08: 0x000b, 0x3a09: 0x000b, 0x3a0a: 0x000b, 0x3a0b: 0x000b, + 0x3a0c: 0x000b, 0x3a0d: 0x000b, 0x3a0e: 0x000b, 0x3a0f: 0x000b, 0x3a10: 0x000b, 0x3a11: 0x000b, + 0x3a12: 0x000b, 0x3a13: 0x000b, 0x3a14: 0x000b, 0x3a15: 0x000b, 0x3a16: 0x000b, 0x3a17: 0x000b, + 0x3a18: 0x000b, 0x3a19: 0x000b, 0x3a1a: 0x000b, 0x3a1b: 0x000b, 0x3a1c: 0x000b, 0x3a1d: 0x000b, + 0x3a1e: 0x000b, 0x3a1f: 0x000b, 0x3a20: 0x000b, 0x3a21: 0x000b, 0x3a22: 0x000b, 0x3a23: 0x000b, + 0x3a24: 0x000b, 0x3a25: 0x000b, 0x3a26: 0x000b, 0x3a27: 0x000b, 0x3a28: 0x000b, 0x3a29: 0x000b, + 0x3a2a: 0x000b, 0x3a2b: 0x000b, 0x3a2c: 0x000b, 0x3a2d: 0x000b, 0x3a2e: 0x000b, 0x3a2f: 0x000b, + 0x3a30: 0x000b, 0x3a31: 0x000b, 0x3a32: 0x000b, 0x3a33: 0x000b, 0x3a34: 0x000b, 0x3a35: 0x000b, + 0x3a36: 0x000b, 0x3a37: 0x000b, 0x3a38: 0x000b, 0x3a39: 0x000b, 0x3a3a: 0x000b, 0x3a3b: 0x000b, + 0x3a3c: 0x000b, 0x3a3d: 0x000b, 0x3a3e: 0x000b, 0x3a3f: 0x000b, + // Block 0xe9, offset 0x3a40 + 0x3a40: 0x000c, 0x3a41: 0x000c, 0x3a42: 0x000c, 0x3a43: 0x000c, 0x3a44: 0x000c, 0x3a45: 0x000c, + 0x3a46: 0x000c, 0x3a47: 0x000c, 0x3a48: 0x000c, 0x3a49: 0x000c, 0x3a4a: 0x000c, 0x3a4b: 0x000c, + 0x3a4c: 0x000c, 0x3a4d: 0x000c, 0x3a4e: 0x000c, 0x3a4f: 0x000c, 0x3a50: 0x000c, 0x3a51: 0x000c, + 0x3a52: 0x000c, 0x3a53: 0x000c, 0x3a54: 0x000c, 0x3a55: 0x000c, 0x3a56: 0x000c, 0x3a57: 0x000c, + 0x3a58: 0x000c, 0x3a59: 0x000c, 0x3a5a: 0x000c, 0x3a5b: 0x000c, 0x3a5c: 0x000c, 0x3a5d: 0x000c, + 0x3a5e: 0x000c, 0x3a5f: 0x000c, 0x3a60: 0x000c, 0x3a61: 0x000c, 0x3a62: 0x000c, 0x3a63: 0x000c, + 0x3a64: 0x000c, 0x3a65: 0x000c, 0x3a66: 0x000c, 0x3a67: 0x000c, 0x3a68: 0x000c, 0x3a69: 0x000c, + 0x3a6a: 0x000c, 0x3a6b: 0x000c, 0x3a6c: 0x000c, 0x3a6d: 0x000c, 0x3a6e: 0x000c, 0x3a6f: 0x000c, + 0x3a70: 0x000b, 0x3a71: 0x000b, 0x3a72: 0x000b, 0x3a73: 0x000b, 0x3a74: 0x000b, 0x3a75: 0x000b, + 0x3a76: 0x000b, 0x3a77: 0x000b, 0x3a78: 0x000b, 0x3a79: 0x000b, 0x3a7a: 0x000b, 0x3a7b: 0x000b, + 0x3a7c: 0x000b, 0x3a7d: 0x000b, 0x3a7e: 0x000b, 0x3a7f: 0x000b, +} + +// bidiIndex: 24 blocks, 1536 entries, 1536 bytes +// Block 0 is the zero block. +var bidiIndex = [1536]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x01, 0xc3: 0x02, + 0xca: 0x03, 0xcb: 0x04, 0xcc: 0x05, 0xcd: 0x06, 0xce: 0x07, 0xcf: 0x08, + 0xd2: 0x09, 0xd6: 0x0a, 0xd7: 0x0b, + 0xd8: 0x0c, 0xd9: 0x0d, 0xda: 0x0e, 0xdb: 0x0f, 0xdc: 0x10, 0xdd: 0x11, 0xde: 0x12, 0xdf: 0x13, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, 0xe4: 0x06, + 0xea: 0x07, 0xef: 0x08, + 0xf0: 0x11, 0xf1: 0x12, 0xf2: 0x12, 0xf3: 0x14, 0xf4: 0x15, + // Block 0x4, offset 0x100 + 0x120: 0x14, 0x121: 0x15, 0x122: 0x16, 0x123: 0x17, 0x124: 0x18, 0x125: 0x19, 0x126: 0x1a, 0x127: 0x1b, + 0x128: 0x1c, 0x129: 0x1d, 0x12a: 0x1c, 0x12b: 0x1e, 0x12c: 0x1f, 0x12d: 0x20, 0x12e: 0x21, 0x12f: 0x22, + 0x130: 0x23, 0x131: 0x24, 0x132: 0x1a, 0x133: 0x25, 0x134: 0x26, 0x135: 0x27, 0x137: 0x28, + 0x138: 0x29, 0x139: 0x2a, 0x13a: 0x2b, 0x13b: 0x2c, 0x13c: 0x2d, 0x13d: 0x2e, 0x13e: 0x2f, 0x13f: 0x30, + // Block 0x5, offset 0x140 + 0x140: 0x31, 0x141: 0x32, 0x142: 0x33, + 0x14d: 0x34, 0x14e: 0x35, + 0x150: 0x36, + 0x15a: 0x37, 0x15c: 0x38, 0x15d: 0x39, 0x15e: 0x3a, 0x15f: 0x3b, + 0x160: 0x3c, 0x162: 0x3d, 0x164: 0x3e, 0x165: 0x3f, 0x167: 0x40, + 0x168: 0x41, 0x169: 0x42, 0x16a: 0x43, 0x16c: 0x44, 0x16d: 0x45, 0x16e: 0x46, 0x16f: 0x47, + 0x170: 0x48, 0x173: 0x49, 0x177: 0x4a, + 0x17e: 0x4b, 0x17f: 0x4c, + // Block 0x6, offset 0x180 + 0x180: 0x4d, 0x181: 0x4e, 0x182: 0x4f, 0x183: 0x50, 0x184: 0x51, 0x185: 0x52, 0x186: 0x53, 0x187: 0x54, + 0x188: 0x55, 0x189: 0x54, 0x18a: 0x54, 0x18b: 0x54, 0x18c: 0x56, 0x18d: 0x57, 0x18e: 0x58, 0x18f: 0x54, + 0x190: 0x59, 0x191: 0x5a, 0x192: 0x5b, 0x193: 0x5c, 0x194: 0x54, 0x195: 0x54, 0x196: 0x54, 0x197: 0x54, + 0x198: 0x54, 0x199: 0x54, 0x19a: 0x5d, 0x19b: 0x54, 0x19c: 0x54, 0x19d: 0x5e, 0x19e: 0x54, 0x19f: 0x5f, + 0x1a4: 0x54, 0x1a5: 0x54, 0x1a6: 0x60, 0x1a7: 0x61, + 0x1a8: 0x54, 0x1a9: 0x54, 0x1aa: 0x54, 0x1ab: 0x54, 0x1ac: 0x54, 0x1ad: 0x62, 0x1ae: 0x63, 0x1af: 0x64, + 0x1b3: 0x65, 0x1b5: 0x66, 0x1b7: 0x67, + 0x1b8: 0x68, 0x1b9: 0x69, 0x1ba: 0x6a, 0x1bb: 0x6b, 0x1bc: 0x54, 0x1bd: 0x54, 0x1be: 0x54, 0x1bf: 0x6c, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x6d, 0x1c2: 0x6e, 0x1c3: 0x6f, 0x1c7: 0x70, + 0x1c8: 0x71, 0x1c9: 0x72, 0x1ca: 0x73, 0x1cb: 0x74, 0x1cd: 0x75, 0x1cf: 0x76, + // Block 0x8, offset 0x200 + 0x237: 0x54, + // Block 0x9, offset 0x240 + 0x252: 0x77, 0x253: 0x78, + 0x258: 0x79, 0x259: 0x7a, 0x25a: 0x7b, 0x25b: 0x7c, 0x25c: 0x7d, 0x25e: 0x7e, + 0x260: 0x7f, 0x261: 0x80, 0x263: 0x81, 0x264: 0x82, 0x265: 0x83, 0x266: 0x84, 0x267: 0x85, + 0x268: 0x86, 0x269: 0x87, 0x26a: 0x88, 0x26b: 0x89, 0x26f: 0x8a, + // Block 0xa, offset 0x280 + 0x2ac: 0x8b, 0x2ad: 0x8c, 0x2ae: 0x0e, 0x2af: 0x0e, + 0x2b0: 0x0e, 0x2b1: 0x0e, 0x2b2: 0x0e, 0x2b3: 0x0e, 0x2b4: 0x8d, 0x2b5: 0x0e, 0x2b6: 0x0e, 0x2b7: 0x8e, + 0x2b8: 0x8f, 0x2b9: 0x90, 0x2ba: 0x0e, 0x2bb: 0x91, 0x2bc: 0x92, 0x2bd: 0x93, 0x2bf: 0x94, + // Block 0xb, offset 0x2c0 + 0x2c4: 0x95, 0x2c5: 0x54, 0x2c6: 0x96, 0x2c7: 0x97, + 0x2cb: 0x98, 0x2cd: 0x99, + 0x2e0: 0x9a, 0x2e1: 0x9a, 0x2e2: 0x9a, 0x2e3: 0x9a, 0x2e4: 0x9b, 0x2e5: 0x9a, 0x2e6: 0x9a, 0x2e7: 0x9a, + 0x2e8: 0x9c, 0x2e9: 0x9a, 0x2ea: 0x9a, 0x2eb: 0x9d, 0x2ec: 0x9e, 0x2ed: 0x9a, 0x2ee: 0x9a, 0x2ef: 0x9a, + 0x2f0: 0x9a, 0x2f1: 0x9a, 0x2f2: 0x9a, 0x2f3: 0x9a, 0x2f4: 0x9f, 0x2f5: 0x9a, 0x2f6: 0x9a, 0x2f7: 0x9a, + 0x2f8: 0x9a, 0x2f9: 0xa0, 0x2fa: 0x9a, 0x2fb: 0x9a, 0x2fc: 0xa1, 0x2fd: 0xa2, 0x2fe: 0x9a, 0x2ff: 0x9a, + // Block 0xc, offset 0x300 + 0x300: 0xa3, 0x301: 0xa4, 0x302: 0xa5, 0x304: 0xa6, 0x305: 0xa7, 0x306: 0xa8, 0x307: 0xa9, + 0x308: 0xaa, 0x30b: 0xab, 0x30c: 0x26, 0x30d: 0xac, + 0x310: 0xad, 0x311: 0xae, 0x312: 0xaf, 0x313: 0xb0, 0x316: 0xb1, 0x317: 0xb2, + 0x318: 0xb3, 0x319: 0xb4, 0x31a: 0xb5, 0x31c: 0xb6, + 0x320: 0xb7, + 0x328: 0xb8, 0x329: 0xb9, 0x32a: 0xba, + 0x330: 0xbb, 0x332: 0xbc, 0x334: 0xbd, 0x335: 0xbe, 0x336: 0xbf, + 0x33b: 0xc0, + // Block 0xd, offset 0x340 + 0x36b: 0xc1, 0x36c: 0xc2, + 0x37e: 0xc3, + // Block 0xe, offset 0x380 + 0x3b2: 0xc4, + // Block 0xf, offset 0x3c0 + 0x3c5: 0xc5, 0x3c6: 0xc6, + 0x3c8: 0x54, 0x3c9: 0xc7, 0x3cc: 0x54, 0x3cd: 0xc8, + 0x3db: 0xc9, 0x3dc: 0xca, 0x3dd: 0xcb, 0x3de: 0xcc, 0x3df: 0xcd, + 0x3e8: 0xce, 0x3e9: 0xcf, 0x3ea: 0xd0, + // Block 0x10, offset 0x400 + 0x400: 0xd1, + 0x420: 0x9a, 0x421: 0x9a, 0x422: 0x9a, 0x423: 0xd2, 0x424: 0x9a, 0x425: 0xd3, 0x426: 0x9a, 0x427: 0x9a, + 0x428: 0x9a, 0x429: 0x9a, 0x42a: 0x9a, 0x42b: 0x9a, 0x42c: 0x9a, 0x42d: 0x9a, 0x42e: 0x9a, 0x42f: 0x9a, + 0x430: 0x9a, 0x431: 0xa1, 0x432: 0x0e, 0x433: 0x9a, 0x434: 0x9a, 0x435: 0x9a, 0x436: 0x9a, 0x437: 0x9a, + 0x438: 0x0e, 0x439: 0x0e, 0x43a: 0x0e, 0x43b: 0xd4, 0x43c: 0x9a, 0x43d: 0x9a, 0x43e: 0x9a, 0x43f: 0x9a, + // Block 0x11, offset 0x440 + 0x440: 0xd5, 0x441: 0x54, 0x442: 0xd6, 0x443: 0xd7, 0x444: 0xd8, 0x445: 0xd9, + 0x449: 0xda, 0x44c: 0x54, 0x44d: 0x54, 0x44e: 0x54, 0x44f: 0x54, + 0x450: 0x54, 0x451: 0x54, 0x452: 0x54, 0x453: 0x54, 0x454: 0x54, 0x455: 0x54, 0x456: 0x54, 0x457: 0x54, + 0x458: 0x54, 0x459: 0x54, 0x45a: 0x54, 0x45b: 0xdb, 0x45c: 0x54, 0x45d: 0x6b, 0x45e: 0x54, 0x45f: 0xdc, + 0x460: 0xdd, 0x461: 0xde, 0x462: 0xdf, 0x464: 0xe0, 0x465: 0xe1, 0x466: 0xe2, 0x467: 0xe3, + 0x469: 0xe4, + 0x47f: 0xe5, + // Block 0x12, offset 0x480 + 0x4bf: 0xe5, + // Block 0x13, offset 0x4c0 + 0x4d0: 0x09, 0x4d1: 0x0a, 0x4d6: 0x0b, + 0x4db: 0x0c, 0x4dd: 0x0d, 0x4de: 0x0e, 0x4df: 0x0f, + 0x4ef: 0x10, + 0x4ff: 0x10, + // Block 0x14, offset 0x500 + 0x50f: 0x10, + 0x51f: 0x10, + 0x52f: 0x10, + 0x53f: 0x10, + // Block 0x15, offset 0x540 + 0x540: 0xe6, 0x541: 0xe6, 0x542: 0xe6, 0x543: 0xe6, 0x544: 0x05, 0x545: 0x05, 0x546: 0x05, 0x547: 0xe7, + 0x548: 0xe6, 0x549: 0xe6, 0x54a: 0xe6, 0x54b: 0xe6, 0x54c: 0xe6, 0x54d: 0xe6, 0x54e: 0xe6, 0x54f: 0xe6, + 0x550: 0xe6, 0x551: 0xe6, 0x552: 0xe6, 0x553: 0xe6, 0x554: 0xe6, 0x555: 0xe6, 0x556: 0xe6, 0x557: 0xe6, + 0x558: 0xe6, 0x559: 0xe6, 0x55a: 0xe6, 0x55b: 0xe6, 0x55c: 0xe6, 0x55d: 0xe6, 0x55e: 0xe6, 0x55f: 0xe6, + 0x560: 0xe6, 0x561: 0xe6, 0x562: 0xe6, 0x563: 0xe6, 0x564: 0xe6, 0x565: 0xe6, 0x566: 0xe6, 0x567: 0xe6, + 0x568: 0xe6, 0x569: 0xe6, 0x56a: 0xe6, 0x56b: 0xe6, 0x56c: 0xe6, 0x56d: 0xe6, 0x56e: 0xe6, 0x56f: 0xe6, + 0x570: 0xe6, 0x571: 0xe6, 0x572: 0xe6, 0x573: 0xe6, 0x574: 0xe6, 0x575: 0xe6, 0x576: 0xe6, 0x577: 0xe6, + 0x578: 0xe6, 0x579: 0xe6, 0x57a: 0xe6, 0x57b: 0xe6, 0x57c: 0xe6, 0x57d: 0xe6, 0x57e: 0xe6, 0x57f: 0xe6, + // Block 0x16, offset 0x580 + 0x58f: 0x10, + 0x59f: 0x10, + 0x5a0: 0x13, + 0x5af: 0x10, + 0x5bf: 0x10, + // Block 0x17, offset 0x5c0 + 0x5cf: 0x10, +} + +// Total table size 16568 bytes (16KiB); checksum: F50EF68C diff --git a/vendor/golang.org/x/text/unicode/norm/composition.go b/vendor/golang.org/x/text/unicode/norm/composition.go index bab4c5d..e2087bc 100644 --- a/vendor/golang.org/x/text/unicode/norm/composition.go +++ b/vendor/golang.org/x/text/unicode/norm/composition.go @@ -407,7 +407,7 @@ func decomposeHangul(buf []byte, r rune) int { // decomposeHangul algorithmically decomposes a Hangul rune into // its Jamo components. -// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. +// See https://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul. func (rb *reorderBuffer) decomposeHangul(r rune) { r -= hangulBase x := r % jamoTCount @@ -420,7 +420,7 @@ func (rb *reorderBuffer) decomposeHangul(r rune) { } // combineHangul algorithmically combines Jamo character components into Hangul. -// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul. +// See https://unicode.org/reports/tr15/#Hangul for details on combining Hangul. func (rb *reorderBuffer) combineHangul(s, i, k int) { b := rb.rune[:] bn := rb.nrune @@ -461,6 +461,10 @@ func (rb *reorderBuffer) combineHangul(s, i, k int) { // It should only be used to recompose a single segment, as it will not // handle alternations between Hangul and non-Hangul characters correctly. func (rb *reorderBuffer) compose() { + // Lazily load the map used by the combine func below, but do + // it outside of the loop. + recompMapOnce.Do(buildRecompMap) + // UAX #15, section X5 , including Corrigendum #5 // "In any character sequence beginning with starter S, a character C is // blocked from S if and only if there is some character B between S diff --git a/vendor/golang.org/x/text/unicode/norm/forminfo.go b/vendor/golang.org/x/text/unicode/norm/forminfo.go index e67e765..526c703 100644 --- a/vendor/golang.org/x/text/unicode/norm/forminfo.go +++ b/vendor/golang.org/x/text/unicode/norm/forminfo.go @@ -4,6 +4,8 @@ package norm +import "encoding/binary" + // This file contains Form-specific logic and wrappers for data in tables.go. // Rune info is stored in a separate trie per composing form. A composing form @@ -178,6 +180,17 @@ func (p Properties) TrailCCC() uint8 { return ccc[p.tccc] } +func buildRecompMap() { + recompMap = make(map[uint32]rune, len(recompMapPacked)/8) + var buf [8]byte + for i := 0; i < len(recompMapPacked); i += 8 { + copy(buf[:], recompMapPacked[i:i+8]) + key := binary.BigEndian.Uint32(buf[:4]) + val := binary.BigEndian.Uint32(buf[4:]) + recompMap[key] = rune(val) + } +} + // Recomposition // We use 32-bit keys instead of 64-bit for the two codepoint keys. // This clips off the bits of three entries, but we know this will not @@ -186,8 +199,14 @@ func (p Properties) TrailCCC() uint8 { // Note that the recomposition map for NFC and NFKC are identical. // combine returns the combined rune or 0 if it doesn't exist. +// +// The caller is responsible for calling +// recompMapOnce.Do(buildRecompMap) sometime before this is called. func combine(a, b rune) rune { key := uint32(uint16(a))<<16 + uint32(uint16(b)) + if recompMap == nil { + panic("caller error") // see func comment + } return recompMap[key] } diff --git a/vendor/golang.org/x/text/unicode/norm/iter.go b/vendor/golang.org/x/text/unicode/norm/iter.go index ce17f96..417c6b2 100644 --- a/vendor/golang.org/x/text/unicode/norm/iter.go +++ b/vendor/golang.org/x/text/unicode/norm/iter.go @@ -128,8 +128,9 @@ func (i *Iter) Next() []byte { func nextASCIIBytes(i *Iter) []byte { p := i.p + 1 if p >= i.rb.nsrc { + p0 := i.p i.setDone() - return i.rb.src.bytes[i.p:p] + return i.rb.src.bytes[p0:p] } if i.rb.src.bytes[p] < utf8.RuneSelf { p0 := i.p diff --git a/vendor/golang.org/x/text/unicode/norm/normalize.go b/vendor/golang.org/x/text/unicode/norm/normalize.go index e28ac64..95efcf2 100644 --- a/vendor/golang.org/x/text/unicode/norm/normalize.go +++ b/vendor/golang.org/x/text/unicode/norm/normalize.go @@ -29,8 +29,8 @@ import ( // proceed independently on both sides: // f(x) == append(f(x[0:n]), f(x[n:])...) // -// References: http://unicode.org/reports/tr15/ and -// http://unicode.org/notes/tn5/. +// References: https://unicode.org/reports/tr15/ and +// https://unicode.org/notes/tn5/. type Form int const ( diff --git a/vendor/golang.org/x/text/unicode/norm/readwriter.go b/vendor/golang.org/x/text/unicode/norm/readwriter.go index d926ee9..b38096f 100644 --- a/vendor/golang.org/x/text/unicode/norm/readwriter.go +++ b/vendor/golang.org/x/text/unicode/norm/readwriter.go @@ -60,8 +60,8 @@ func (w *normWriter) Close() error { } // Writer returns a new writer that implements Write(b) -// by writing f(b) to w. The returned writer may use an -// an internal buffer to maintain state across Write calls. +// by writing f(b) to w. The returned writer may use an +// internal buffer to maintain state across Write calls. // Calling its Close method writes any buffered data to w. func (f Form) Writer(w io.Writer) io.WriteCloser { wr := &normWriter{rb: reorderBuffer{}, w: w} diff --git a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go index 44dd397..26fbd55 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables10.0.0.go @@ -1,9 +1,11 @@ // Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. -// +build go1.10 +// +build go1.10,!go1.13 package norm +import "sync" + const ( // Version is the Unicode edition from which the tables are derived. Version = "10.0.0" @@ -6707,947 +6709,949 @@ var nfkcSparseValues = [869]valueRange{ } // recompMap: 7520 bytes (entries only) -var recompMap = map[uint32]rune{ - 0x00410300: 0x00C0, - 0x00410301: 0x00C1, - 0x00410302: 0x00C2, - 0x00410303: 0x00C3, - 0x00410308: 0x00C4, - 0x0041030A: 0x00C5, - 0x00430327: 0x00C7, - 0x00450300: 0x00C8, - 0x00450301: 0x00C9, - 0x00450302: 0x00CA, - 0x00450308: 0x00CB, - 0x00490300: 0x00CC, - 0x00490301: 0x00CD, - 0x00490302: 0x00CE, - 0x00490308: 0x00CF, - 0x004E0303: 0x00D1, - 0x004F0300: 0x00D2, - 0x004F0301: 0x00D3, - 0x004F0302: 0x00D4, - 0x004F0303: 0x00D5, - 0x004F0308: 0x00D6, - 0x00550300: 0x00D9, - 0x00550301: 0x00DA, - 0x00550302: 0x00DB, - 0x00550308: 0x00DC, - 0x00590301: 0x00DD, - 0x00610300: 0x00E0, - 0x00610301: 0x00E1, - 0x00610302: 0x00E2, - 0x00610303: 0x00E3, - 0x00610308: 0x00E4, - 0x0061030A: 0x00E5, - 0x00630327: 0x00E7, - 0x00650300: 0x00E8, - 0x00650301: 0x00E9, - 0x00650302: 0x00EA, - 0x00650308: 0x00EB, - 0x00690300: 0x00EC, - 0x00690301: 0x00ED, - 0x00690302: 0x00EE, - 0x00690308: 0x00EF, - 0x006E0303: 0x00F1, - 0x006F0300: 0x00F2, - 0x006F0301: 0x00F3, - 0x006F0302: 0x00F4, - 0x006F0303: 0x00F5, - 0x006F0308: 0x00F6, - 0x00750300: 0x00F9, - 0x00750301: 0x00FA, - 0x00750302: 0x00FB, - 0x00750308: 0x00FC, - 0x00790301: 0x00FD, - 0x00790308: 0x00FF, - 0x00410304: 0x0100, - 0x00610304: 0x0101, - 0x00410306: 0x0102, - 0x00610306: 0x0103, - 0x00410328: 0x0104, - 0x00610328: 0x0105, - 0x00430301: 0x0106, - 0x00630301: 0x0107, - 0x00430302: 0x0108, - 0x00630302: 0x0109, - 0x00430307: 0x010A, - 0x00630307: 0x010B, - 0x0043030C: 0x010C, - 0x0063030C: 0x010D, - 0x0044030C: 0x010E, - 0x0064030C: 0x010F, - 0x00450304: 0x0112, - 0x00650304: 0x0113, - 0x00450306: 0x0114, - 0x00650306: 0x0115, - 0x00450307: 0x0116, - 0x00650307: 0x0117, - 0x00450328: 0x0118, - 0x00650328: 0x0119, - 0x0045030C: 0x011A, - 0x0065030C: 0x011B, - 0x00470302: 0x011C, - 0x00670302: 0x011D, - 0x00470306: 0x011E, - 0x00670306: 0x011F, - 0x00470307: 0x0120, - 0x00670307: 0x0121, - 0x00470327: 0x0122, - 0x00670327: 0x0123, - 0x00480302: 0x0124, - 0x00680302: 0x0125, - 0x00490303: 0x0128, - 0x00690303: 0x0129, - 0x00490304: 0x012A, - 0x00690304: 0x012B, - 0x00490306: 0x012C, - 0x00690306: 0x012D, - 0x00490328: 0x012E, - 0x00690328: 0x012F, - 0x00490307: 0x0130, - 0x004A0302: 0x0134, - 0x006A0302: 0x0135, - 0x004B0327: 0x0136, - 0x006B0327: 0x0137, - 0x004C0301: 0x0139, - 0x006C0301: 0x013A, - 0x004C0327: 0x013B, - 0x006C0327: 0x013C, - 0x004C030C: 0x013D, - 0x006C030C: 0x013E, - 0x004E0301: 0x0143, - 0x006E0301: 0x0144, - 0x004E0327: 0x0145, - 0x006E0327: 0x0146, - 0x004E030C: 0x0147, - 0x006E030C: 0x0148, - 0x004F0304: 0x014C, - 0x006F0304: 0x014D, - 0x004F0306: 0x014E, - 0x006F0306: 0x014F, - 0x004F030B: 0x0150, - 0x006F030B: 0x0151, - 0x00520301: 0x0154, - 0x00720301: 0x0155, - 0x00520327: 0x0156, - 0x00720327: 0x0157, - 0x0052030C: 0x0158, - 0x0072030C: 0x0159, - 0x00530301: 0x015A, - 0x00730301: 0x015B, - 0x00530302: 0x015C, - 0x00730302: 0x015D, - 0x00530327: 0x015E, - 0x00730327: 0x015F, - 0x0053030C: 0x0160, - 0x0073030C: 0x0161, - 0x00540327: 0x0162, - 0x00740327: 0x0163, - 0x0054030C: 0x0164, - 0x0074030C: 0x0165, - 0x00550303: 0x0168, - 0x00750303: 0x0169, - 0x00550304: 0x016A, - 0x00750304: 0x016B, - 0x00550306: 0x016C, - 0x00750306: 0x016D, - 0x0055030A: 0x016E, - 0x0075030A: 0x016F, - 0x0055030B: 0x0170, - 0x0075030B: 0x0171, - 0x00550328: 0x0172, - 0x00750328: 0x0173, - 0x00570302: 0x0174, - 0x00770302: 0x0175, - 0x00590302: 0x0176, - 0x00790302: 0x0177, - 0x00590308: 0x0178, - 0x005A0301: 0x0179, - 0x007A0301: 0x017A, - 0x005A0307: 0x017B, - 0x007A0307: 0x017C, - 0x005A030C: 0x017D, - 0x007A030C: 0x017E, - 0x004F031B: 0x01A0, - 0x006F031B: 0x01A1, - 0x0055031B: 0x01AF, - 0x0075031B: 0x01B0, - 0x0041030C: 0x01CD, - 0x0061030C: 0x01CE, - 0x0049030C: 0x01CF, - 0x0069030C: 0x01D0, - 0x004F030C: 0x01D1, - 0x006F030C: 0x01D2, - 0x0055030C: 0x01D3, - 0x0075030C: 0x01D4, - 0x00DC0304: 0x01D5, - 0x00FC0304: 0x01D6, - 0x00DC0301: 0x01D7, - 0x00FC0301: 0x01D8, - 0x00DC030C: 0x01D9, - 0x00FC030C: 0x01DA, - 0x00DC0300: 0x01DB, - 0x00FC0300: 0x01DC, - 0x00C40304: 0x01DE, - 0x00E40304: 0x01DF, - 0x02260304: 0x01E0, - 0x02270304: 0x01E1, - 0x00C60304: 0x01E2, - 0x00E60304: 0x01E3, - 0x0047030C: 0x01E6, - 0x0067030C: 0x01E7, - 0x004B030C: 0x01E8, - 0x006B030C: 0x01E9, - 0x004F0328: 0x01EA, - 0x006F0328: 0x01EB, - 0x01EA0304: 0x01EC, - 0x01EB0304: 0x01ED, - 0x01B7030C: 0x01EE, - 0x0292030C: 0x01EF, - 0x006A030C: 0x01F0, - 0x00470301: 0x01F4, - 0x00670301: 0x01F5, - 0x004E0300: 0x01F8, - 0x006E0300: 0x01F9, - 0x00C50301: 0x01FA, - 0x00E50301: 0x01FB, - 0x00C60301: 0x01FC, - 0x00E60301: 0x01FD, - 0x00D80301: 0x01FE, - 0x00F80301: 0x01FF, - 0x0041030F: 0x0200, - 0x0061030F: 0x0201, - 0x00410311: 0x0202, - 0x00610311: 0x0203, - 0x0045030F: 0x0204, - 0x0065030F: 0x0205, - 0x00450311: 0x0206, - 0x00650311: 0x0207, - 0x0049030F: 0x0208, - 0x0069030F: 0x0209, - 0x00490311: 0x020A, - 0x00690311: 0x020B, - 0x004F030F: 0x020C, - 0x006F030F: 0x020D, - 0x004F0311: 0x020E, - 0x006F0311: 0x020F, - 0x0052030F: 0x0210, - 0x0072030F: 0x0211, - 0x00520311: 0x0212, - 0x00720311: 0x0213, - 0x0055030F: 0x0214, - 0x0075030F: 0x0215, - 0x00550311: 0x0216, - 0x00750311: 0x0217, - 0x00530326: 0x0218, - 0x00730326: 0x0219, - 0x00540326: 0x021A, - 0x00740326: 0x021B, - 0x0048030C: 0x021E, - 0x0068030C: 0x021F, - 0x00410307: 0x0226, - 0x00610307: 0x0227, - 0x00450327: 0x0228, - 0x00650327: 0x0229, - 0x00D60304: 0x022A, - 0x00F60304: 0x022B, - 0x00D50304: 0x022C, - 0x00F50304: 0x022D, - 0x004F0307: 0x022E, - 0x006F0307: 0x022F, - 0x022E0304: 0x0230, - 0x022F0304: 0x0231, - 0x00590304: 0x0232, - 0x00790304: 0x0233, - 0x00A80301: 0x0385, - 0x03910301: 0x0386, - 0x03950301: 0x0388, - 0x03970301: 0x0389, - 0x03990301: 0x038A, - 0x039F0301: 0x038C, - 0x03A50301: 0x038E, - 0x03A90301: 0x038F, - 0x03CA0301: 0x0390, - 0x03990308: 0x03AA, - 0x03A50308: 0x03AB, - 0x03B10301: 0x03AC, - 0x03B50301: 0x03AD, - 0x03B70301: 0x03AE, - 0x03B90301: 0x03AF, - 0x03CB0301: 0x03B0, - 0x03B90308: 0x03CA, - 0x03C50308: 0x03CB, - 0x03BF0301: 0x03CC, - 0x03C50301: 0x03CD, - 0x03C90301: 0x03CE, - 0x03D20301: 0x03D3, - 0x03D20308: 0x03D4, - 0x04150300: 0x0400, - 0x04150308: 0x0401, - 0x04130301: 0x0403, - 0x04060308: 0x0407, - 0x041A0301: 0x040C, - 0x04180300: 0x040D, - 0x04230306: 0x040E, - 0x04180306: 0x0419, - 0x04380306: 0x0439, - 0x04350300: 0x0450, - 0x04350308: 0x0451, - 0x04330301: 0x0453, - 0x04560308: 0x0457, - 0x043A0301: 0x045C, - 0x04380300: 0x045D, - 0x04430306: 0x045E, - 0x0474030F: 0x0476, - 0x0475030F: 0x0477, - 0x04160306: 0x04C1, - 0x04360306: 0x04C2, - 0x04100306: 0x04D0, - 0x04300306: 0x04D1, - 0x04100308: 0x04D2, - 0x04300308: 0x04D3, - 0x04150306: 0x04D6, - 0x04350306: 0x04D7, - 0x04D80308: 0x04DA, - 0x04D90308: 0x04DB, - 0x04160308: 0x04DC, - 0x04360308: 0x04DD, - 0x04170308: 0x04DE, - 0x04370308: 0x04DF, - 0x04180304: 0x04E2, - 0x04380304: 0x04E3, - 0x04180308: 0x04E4, - 0x04380308: 0x04E5, - 0x041E0308: 0x04E6, - 0x043E0308: 0x04E7, - 0x04E80308: 0x04EA, - 0x04E90308: 0x04EB, - 0x042D0308: 0x04EC, - 0x044D0308: 0x04ED, - 0x04230304: 0x04EE, - 0x04430304: 0x04EF, - 0x04230308: 0x04F0, - 0x04430308: 0x04F1, - 0x0423030B: 0x04F2, - 0x0443030B: 0x04F3, - 0x04270308: 0x04F4, - 0x04470308: 0x04F5, - 0x042B0308: 0x04F8, - 0x044B0308: 0x04F9, - 0x06270653: 0x0622, - 0x06270654: 0x0623, - 0x06480654: 0x0624, - 0x06270655: 0x0625, - 0x064A0654: 0x0626, - 0x06D50654: 0x06C0, - 0x06C10654: 0x06C2, - 0x06D20654: 0x06D3, - 0x0928093C: 0x0929, - 0x0930093C: 0x0931, - 0x0933093C: 0x0934, - 0x09C709BE: 0x09CB, - 0x09C709D7: 0x09CC, - 0x0B470B56: 0x0B48, - 0x0B470B3E: 0x0B4B, - 0x0B470B57: 0x0B4C, - 0x0B920BD7: 0x0B94, - 0x0BC60BBE: 0x0BCA, - 0x0BC70BBE: 0x0BCB, - 0x0BC60BD7: 0x0BCC, - 0x0C460C56: 0x0C48, - 0x0CBF0CD5: 0x0CC0, - 0x0CC60CD5: 0x0CC7, - 0x0CC60CD6: 0x0CC8, - 0x0CC60CC2: 0x0CCA, - 0x0CCA0CD5: 0x0CCB, - 0x0D460D3E: 0x0D4A, - 0x0D470D3E: 0x0D4B, - 0x0D460D57: 0x0D4C, - 0x0DD90DCA: 0x0DDA, - 0x0DD90DCF: 0x0DDC, - 0x0DDC0DCA: 0x0DDD, - 0x0DD90DDF: 0x0DDE, - 0x1025102E: 0x1026, - 0x1B051B35: 0x1B06, - 0x1B071B35: 0x1B08, - 0x1B091B35: 0x1B0A, - 0x1B0B1B35: 0x1B0C, - 0x1B0D1B35: 0x1B0E, - 0x1B111B35: 0x1B12, - 0x1B3A1B35: 0x1B3B, - 0x1B3C1B35: 0x1B3D, - 0x1B3E1B35: 0x1B40, - 0x1B3F1B35: 0x1B41, - 0x1B421B35: 0x1B43, - 0x00410325: 0x1E00, - 0x00610325: 0x1E01, - 0x00420307: 0x1E02, - 0x00620307: 0x1E03, - 0x00420323: 0x1E04, - 0x00620323: 0x1E05, - 0x00420331: 0x1E06, - 0x00620331: 0x1E07, - 0x00C70301: 0x1E08, - 0x00E70301: 0x1E09, - 0x00440307: 0x1E0A, - 0x00640307: 0x1E0B, - 0x00440323: 0x1E0C, - 0x00640323: 0x1E0D, - 0x00440331: 0x1E0E, - 0x00640331: 0x1E0F, - 0x00440327: 0x1E10, - 0x00640327: 0x1E11, - 0x0044032D: 0x1E12, - 0x0064032D: 0x1E13, - 0x01120300: 0x1E14, - 0x01130300: 0x1E15, - 0x01120301: 0x1E16, - 0x01130301: 0x1E17, - 0x0045032D: 0x1E18, - 0x0065032D: 0x1E19, - 0x00450330: 0x1E1A, - 0x00650330: 0x1E1B, - 0x02280306: 0x1E1C, - 0x02290306: 0x1E1D, - 0x00460307: 0x1E1E, - 0x00660307: 0x1E1F, - 0x00470304: 0x1E20, - 0x00670304: 0x1E21, - 0x00480307: 0x1E22, - 0x00680307: 0x1E23, - 0x00480323: 0x1E24, - 0x00680323: 0x1E25, - 0x00480308: 0x1E26, - 0x00680308: 0x1E27, - 0x00480327: 0x1E28, - 0x00680327: 0x1E29, - 0x0048032E: 0x1E2A, - 0x0068032E: 0x1E2B, - 0x00490330: 0x1E2C, - 0x00690330: 0x1E2D, - 0x00CF0301: 0x1E2E, - 0x00EF0301: 0x1E2F, - 0x004B0301: 0x1E30, - 0x006B0301: 0x1E31, - 0x004B0323: 0x1E32, - 0x006B0323: 0x1E33, - 0x004B0331: 0x1E34, - 0x006B0331: 0x1E35, - 0x004C0323: 0x1E36, - 0x006C0323: 0x1E37, - 0x1E360304: 0x1E38, - 0x1E370304: 0x1E39, - 0x004C0331: 0x1E3A, - 0x006C0331: 0x1E3B, - 0x004C032D: 0x1E3C, - 0x006C032D: 0x1E3D, - 0x004D0301: 0x1E3E, - 0x006D0301: 0x1E3F, - 0x004D0307: 0x1E40, - 0x006D0307: 0x1E41, - 0x004D0323: 0x1E42, - 0x006D0323: 0x1E43, - 0x004E0307: 0x1E44, - 0x006E0307: 0x1E45, - 0x004E0323: 0x1E46, - 0x006E0323: 0x1E47, - 0x004E0331: 0x1E48, - 0x006E0331: 0x1E49, - 0x004E032D: 0x1E4A, - 0x006E032D: 0x1E4B, - 0x00D50301: 0x1E4C, - 0x00F50301: 0x1E4D, - 0x00D50308: 0x1E4E, - 0x00F50308: 0x1E4F, - 0x014C0300: 0x1E50, - 0x014D0300: 0x1E51, - 0x014C0301: 0x1E52, - 0x014D0301: 0x1E53, - 0x00500301: 0x1E54, - 0x00700301: 0x1E55, - 0x00500307: 0x1E56, - 0x00700307: 0x1E57, - 0x00520307: 0x1E58, - 0x00720307: 0x1E59, - 0x00520323: 0x1E5A, - 0x00720323: 0x1E5B, - 0x1E5A0304: 0x1E5C, - 0x1E5B0304: 0x1E5D, - 0x00520331: 0x1E5E, - 0x00720331: 0x1E5F, - 0x00530307: 0x1E60, - 0x00730307: 0x1E61, - 0x00530323: 0x1E62, - 0x00730323: 0x1E63, - 0x015A0307: 0x1E64, - 0x015B0307: 0x1E65, - 0x01600307: 0x1E66, - 0x01610307: 0x1E67, - 0x1E620307: 0x1E68, - 0x1E630307: 0x1E69, - 0x00540307: 0x1E6A, - 0x00740307: 0x1E6B, - 0x00540323: 0x1E6C, - 0x00740323: 0x1E6D, - 0x00540331: 0x1E6E, - 0x00740331: 0x1E6F, - 0x0054032D: 0x1E70, - 0x0074032D: 0x1E71, - 0x00550324: 0x1E72, - 0x00750324: 0x1E73, - 0x00550330: 0x1E74, - 0x00750330: 0x1E75, - 0x0055032D: 0x1E76, - 0x0075032D: 0x1E77, - 0x01680301: 0x1E78, - 0x01690301: 0x1E79, - 0x016A0308: 0x1E7A, - 0x016B0308: 0x1E7B, - 0x00560303: 0x1E7C, - 0x00760303: 0x1E7D, - 0x00560323: 0x1E7E, - 0x00760323: 0x1E7F, - 0x00570300: 0x1E80, - 0x00770300: 0x1E81, - 0x00570301: 0x1E82, - 0x00770301: 0x1E83, - 0x00570308: 0x1E84, - 0x00770308: 0x1E85, - 0x00570307: 0x1E86, - 0x00770307: 0x1E87, - 0x00570323: 0x1E88, - 0x00770323: 0x1E89, - 0x00580307: 0x1E8A, - 0x00780307: 0x1E8B, - 0x00580308: 0x1E8C, - 0x00780308: 0x1E8D, - 0x00590307: 0x1E8E, - 0x00790307: 0x1E8F, - 0x005A0302: 0x1E90, - 0x007A0302: 0x1E91, - 0x005A0323: 0x1E92, - 0x007A0323: 0x1E93, - 0x005A0331: 0x1E94, - 0x007A0331: 0x1E95, - 0x00680331: 0x1E96, - 0x00740308: 0x1E97, - 0x0077030A: 0x1E98, - 0x0079030A: 0x1E99, - 0x017F0307: 0x1E9B, - 0x00410323: 0x1EA0, - 0x00610323: 0x1EA1, - 0x00410309: 0x1EA2, - 0x00610309: 0x1EA3, - 0x00C20301: 0x1EA4, - 0x00E20301: 0x1EA5, - 0x00C20300: 0x1EA6, - 0x00E20300: 0x1EA7, - 0x00C20309: 0x1EA8, - 0x00E20309: 0x1EA9, - 0x00C20303: 0x1EAA, - 0x00E20303: 0x1EAB, - 0x1EA00302: 0x1EAC, - 0x1EA10302: 0x1EAD, - 0x01020301: 0x1EAE, - 0x01030301: 0x1EAF, - 0x01020300: 0x1EB0, - 0x01030300: 0x1EB1, - 0x01020309: 0x1EB2, - 0x01030309: 0x1EB3, - 0x01020303: 0x1EB4, - 0x01030303: 0x1EB5, - 0x1EA00306: 0x1EB6, - 0x1EA10306: 0x1EB7, - 0x00450323: 0x1EB8, - 0x00650323: 0x1EB9, - 0x00450309: 0x1EBA, - 0x00650309: 0x1EBB, - 0x00450303: 0x1EBC, - 0x00650303: 0x1EBD, - 0x00CA0301: 0x1EBE, - 0x00EA0301: 0x1EBF, - 0x00CA0300: 0x1EC0, - 0x00EA0300: 0x1EC1, - 0x00CA0309: 0x1EC2, - 0x00EA0309: 0x1EC3, - 0x00CA0303: 0x1EC4, - 0x00EA0303: 0x1EC5, - 0x1EB80302: 0x1EC6, - 0x1EB90302: 0x1EC7, - 0x00490309: 0x1EC8, - 0x00690309: 0x1EC9, - 0x00490323: 0x1ECA, - 0x00690323: 0x1ECB, - 0x004F0323: 0x1ECC, - 0x006F0323: 0x1ECD, - 0x004F0309: 0x1ECE, - 0x006F0309: 0x1ECF, - 0x00D40301: 0x1ED0, - 0x00F40301: 0x1ED1, - 0x00D40300: 0x1ED2, - 0x00F40300: 0x1ED3, - 0x00D40309: 0x1ED4, - 0x00F40309: 0x1ED5, - 0x00D40303: 0x1ED6, - 0x00F40303: 0x1ED7, - 0x1ECC0302: 0x1ED8, - 0x1ECD0302: 0x1ED9, - 0x01A00301: 0x1EDA, - 0x01A10301: 0x1EDB, - 0x01A00300: 0x1EDC, - 0x01A10300: 0x1EDD, - 0x01A00309: 0x1EDE, - 0x01A10309: 0x1EDF, - 0x01A00303: 0x1EE0, - 0x01A10303: 0x1EE1, - 0x01A00323: 0x1EE2, - 0x01A10323: 0x1EE3, - 0x00550323: 0x1EE4, - 0x00750323: 0x1EE5, - 0x00550309: 0x1EE6, - 0x00750309: 0x1EE7, - 0x01AF0301: 0x1EE8, - 0x01B00301: 0x1EE9, - 0x01AF0300: 0x1EEA, - 0x01B00300: 0x1EEB, - 0x01AF0309: 0x1EEC, - 0x01B00309: 0x1EED, - 0x01AF0303: 0x1EEE, - 0x01B00303: 0x1EEF, - 0x01AF0323: 0x1EF0, - 0x01B00323: 0x1EF1, - 0x00590300: 0x1EF2, - 0x00790300: 0x1EF3, - 0x00590323: 0x1EF4, - 0x00790323: 0x1EF5, - 0x00590309: 0x1EF6, - 0x00790309: 0x1EF7, - 0x00590303: 0x1EF8, - 0x00790303: 0x1EF9, - 0x03B10313: 0x1F00, - 0x03B10314: 0x1F01, - 0x1F000300: 0x1F02, - 0x1F010300: 0x1F03, - 0x1F000301: 0x1F04, - 0x1F010301: 0x1F05, - 0x1F000342: 0x1F06, - 0x1F010342: 0x1F07, - 0x03910313: 0x1F08, - 0x03910314: 0x1F09, - 0x1F080300: 0x1F0A, - 0x1F090300: 0x1F0B, - 0x1F080301: 0x1F0C, - 0x1F090301: 0x1F0D, - 0x1F080342: 0x1F0E, - 0x1F090342: 0x1F0F, - 0x03B50313: 0x1F10, - 0x03B50314: 0x1F11, - 0x1F100300: 0x1F12, - 0x1F110300: 0x1F13, - 0x1F100301: 0x1F14, - 0x1F110301: 0x1F15, - 0x03950313: 0x1F18, - 0x03950314: 0x1F19, - 0x1F180300: 0x1F1A, - 0x1F190300: 0x1F1B, - 0x1F180301: 0x1F1C, - 0x1F190301: 0x1F1D, - 0x03B70313: 0x1F20, - 0x03B70314: 0x1F21, - 0x1F200300: 0x1F22, - 0x1F210300: 0x1F23, - 0x1F200301: 0x1F24, - 0x1F210301: 0x1F25, - 0x1F200342: 0x1F26, - 0x1F210342: 0x1F27, - 0x03970313: 0x1F28, - 0x03970314: 0x1F29, - 0x1F280300: 0x1F2A, - 0x1F290300: 0x1F2B, - 0x1F280301: 0x1F2C, - 0x1F290301: 0x1F2D, - 0x1F280342: 0x1F2E, - 0x1F290342: 0x1F2F, - 0x03B90313: 0x1F30, - 0x03B90314: 0x1F31, - 0x1F300300: 0x1F32, - 0x1F310300: 0x1F33, - 0x1F300301: 0x1F34, - 0x1F310301: 0x1F35, - 0x1F300342: 0x1F36, - 0x1F310342: 0x1F37, - 0x03990313: 0x1F38, - 0x03990314: 0x1F39, - 0x1F380300: 0x1F3A, - 0x1F390300: 0x1F3B, - 0x1F380301: 0x1F3C, - 0x1F390301: 0x1F3D, - 0x1F380342: 0x1F3E, - 0x1F390342: 0x1F3F, - 0x03BF0313: 0x1F40, - 0x03BF0314: 0x1F41, - 0x1F400300: 0x1F42, - 0x1F410300: 0x1F43, - 0x1F400301: 0x1F44, - 0x1F410301: 0x1F45, - 0x039F0313: 0x1F48, - 0x039F0314: 0x1F49, - 0x1F480300: 0x1F4A, - 0x1F490300: 0x1F4B, - 0x1F480301: 0x1F4C, - 0x1F490301: 0x1F4D, - 0x03C50313: 0x1F50, - 0x03C50314: 0x1F51, - 0x1F500300: 0x1F52, - 0x1F510300: 0x1F53, - 0x1F500301: 0x1F54, - 0x1F510301: 0x1F55, - 0x1F500342: 0x1F56, - 0x1F510342: 0x1F57, - 0x03A50314: 0x1F59, - 0x1F590300: 0x1F5B, - 0x1F590301: 0x1F5D, - 0x1F590342: 0x1F5F, - 0x03C90313: 0x1F60, - 0x03C90314: 0x1F61, - 0x1F600300: 0x1F62, - 0x1F610300: 0x1F63, - 0x1F600301: 0x1F64, - 0x1F610301: 0x1F65, - 0x1F600342: 0x1F66, - 0x1F610342: 0x1F67, - 0x03A90313: 0x1F68, - 0x03A90314: 0x1F69, - 0x1F680300: 0x1F6A, - 0x1F690300: 0x1F6B, - 0x1F680301: 0x1F6C, - 0x1F690301: 0x1F6D, - 0x1F680342: 0x1F6E, - 0x1F690342: 0x1F6F, - 0x03B10300: 0x1F70, - 0x03B50300: 0x1F72, - 0x03B70300: 0x1F74, - 0x03B90300: 0x1F76, - 0x03BF0300: 0x1F78, - 0x03C50300: 0x1F7A, - 0x03C90300: 0x1F7C, - 0x1F000345: 0x1F80, - 0x1F010345: 0x1F81, - 0x1F020345: 0x1F82, - 0x1F030345: 0x1F83, - 0x1F040345: 0x1F84, - 0x1F050345: 0x1F85, - 0x1F060345: 0x1F86, - 0x1F070345: 0x1F87, - 0x1F080345: 0x1F88, - 0x1F090345: 0x1F89, - 0x1F0A0345: 0x1F8A, - 0x1F0B0345: 0x1F8B, - 0x1F0C0345: 0x1F8C, - 0x1F0D0345: 0x1F8D, - 0x1F0E0345: 0x1F8E, - 0x1F0F0345: 0x1F8F, - 0x1F200345: 0x1F90, - 0x1F210345: 0x1F91, - 0x1F220345: 0x1F92, - 0x1F230345: 0x1F93, - 0x1F240345: 0x1F94, - 0x1F250345: 0x1F95, - 0x1F260345: 0x1F96, - 0x1F270345: 0x1F97, - 0x1F280345: 0x1F98, - 0x1F290345: 0x1F99, - 0x1F2A0345: 0x1F9A, - 0x1F2B0345: 0x1F9B, - 0x1F2C0345: 0x1F9C, - 0x1F2D0345: 0x1F9D, - 0x1F2E0345: 0x1F9E, - 0x1F2F0345: 0x1F9F, - 0x1F600345: 0x1FA0, - 0x1F610345: 0x1FA1, - 0x1F620345: 0x1FA2, - 0x1F630345: 0x1FA3, - 0x1F640345: 0x1FA4, - 0x1F650345: 0x1FA5, - 0x1F660345: 0x1FA6, - 0x1F670345: 0x1FA7, - 0x1F680345: 0x1FA8, - 0x1F690345: 0x1FA9, - 0x1F6A0345: 0x1FAA, - 0x1F6B0345: 0x1FAB, - 0x1F6C0345: 0x1FAC, - 0x1F6D0345: 0x1FAD, - 0x1F6E0345: 0x1FAE, - 0x1F6F0345: 0x1FAF, - 0x03B10306: 0x1FB0, - 0x03B10304: 0x1FB1, - 0x1F700345: 0x1FB2, - 0x03B10345: 0x1FB3, - 0x03AC0345: 0x1FB4, - 0x03B10342: 0x1FB6, - 0x1FB60345: 0x1FB7, - 0x03910306: 0x1FB8, - 0x03910304: 0x1FB9, - 0x03910300: 0x1FBA, - 0x03910345: 0x1FBC, - 0x00A80342: 0x1FC1, - 0x1F740345: 0x1FC2, - 0x03B70345: 0x1FC3, - 0x03AE0345: 0x1FC4, - 0x03B70342: 0x1FC6, - 0x1FC60345: 0x1FC7, - 0x03950300: 0x1FC8, - 0x03970300: 0x1FCA, - 0x03970345: 0x1FCC, - 0x1FBF0300: 0x1FCD, - 0x1FBF0301: 0x1FCE, - 0x1FBF0342: 0x1FCF, - 0x03B90306: 0x1FD0, - 0x03B90304: 0x1FD1, - 0x03CA0300: 0x1FD2, - 0x03B90342: 0x1FD6, - 0x03CA0342: 0x1FD7, - 0x03990306: 0x1FD8, - 0x03990304: 0x1FD9, - 0x03990300: 0x1FDA, - 0x1FFE0300: 0x1FDD, - 0x1FFE0301: 0x1FDE, - 0x1FFE0342: 0x1FDF, - 0x03C50306: 0x1FE0, - 0x03C50304: 0x1FE1, - 0x03CB0300: 0x1FE2, - 0x03C10313: 0x1FE4, - 0x03C10314: 0x1FE5, - 0x03C50342: 0x1FE6, - 0x03CB0342: 0x1FE7, - 0x03A50306: 0x1FE8, - 0x03A50304: 0x1FE9, - 0x03A50300: 0x1FEA, - 0x03A10314: 0x1FEC, - 0x00A80300: 0x1FED, - 0x1F7C0345: 0x1FF2, - 0x03C90345: 0x1FF3, - 0x03CE0345: 0x1FF4, - 0x03C90342: 0x1FF6, - 0x1FF60345: 0x1FF7, - 0x039F0300: 0x1FF8, - 0x03A90300: 0x1FFA, - 0x03A90345: 0x1FFC, - 0x21900338: 0x219A, - 0x21920338: 0x219B, - 0x21940338: 0x21AE, - 0x21D00338: 0x21CD, - 0x21D40338: 0x21CE, - 0x21D20338: 0x21CF, - 0x22030338: 0x2204, - 0x22080338: 0x2209, - 0x220B0338: 0x220C, - 0x22230338: 0x2224, - 0x22250338: 0x2226, - 0x223C0338: 0x2241, - 0x22430338: 0x2244, - 0x22450338: 0x2247, - 0x22480338: 0x2249, - 0x003D0338: 0x2260, - 0x22610338: 0x2262, - 0x224D0338: 0x226D, - 0x003C0338: 0x226E, - 0x003E0338: 0x226F, - 0x22640338: 0x2270, - 0x22650338: 0x2271, - 0x22720338: 0x2274, - 0x22730338: 0x2275, - 0x22760338: 0x2278, - 0x22770338: 0x2279, - 0x227A0338: 0x2280, - 0x227B0338: 0x2281, - 0x22820338: 0x2284, - 0x22830338: 0x2285, - 0x22860338: 0x2288, - 0x22870338: 0x2289, - 0x22A20338: 0x22AC, - 0x22A80338: 0x22AD, - 0x22A90338: 0x22AE, - 0x22AB0338: 0x22AF, - 0x227C0338: 0x22E0, - 0x227D0338: 0x22E1, - 0x22910338: 0x22E2, - 0x22920338: 0x22E3, - 0x22B20338: 0x22EA, - 0x22B30338: 0x22EB, - 0x22B40338: 0x22EC, - 0x22B50338: 0x22ED, - 0x304B3099: 0x304C, - 0x304D3099: 0x304E, - 0x304F3099: 0x3050, - 0x30513099: 0x3052, - 0x30533099: 0x3054, - 0x30553099: 0x3056, - 0x30573099: 0x3058, - 0x30593099: 0x305A, - 0x305B3099: 0x305C, - 0x305D3099: 0x305E, - 0x305F3099: 0x3060, - 0x30613099: 0x3062, - 0x30643099: 0x3065, - 0x30663099: 0x3067, - 0x30683099: 0x3069, - 0x306F3099: 0x3070, - 0x306F309A: 0x3071, - 0x30723099: 0x3073, - 0x3072309A: 0x3074, - 0x30753099: 0x3076, - 0x3075309A: 0x3077, - 0x30783099: 0x3079, - 0x3078309A: 0x307A, - 0x307B3099: 0x307C, - 0x307B309A: 0x307D, - 0x30463099: 0x3094, - 0x309D3099: 0x309E, - 0x30AB3099: 0x30AC, - 0x30AD3099: 0x30AE, - 0x30AF3099: 0x30B0, - 0x30B13099: 0x30B2, - 0x30B33099: 0x30B4, - 0x30B53099: 0x30B6, - 0x30B73099: 0x30B8, - 0x30B93099: 0x30BA, - 0x30BB3099: 0x30BC, - 0x30BD3099: 0x30BE, - 0x30BF3099: 0x30C0, - 0x30C13099: 0x30C2, - 0x30C43099: 0x30C5, - 0x30C63099: 0x30C7, - 0x30C83099: 0x30C9, - 0x30CF3099: 0x30D0, - 0x30CF309A: 0x30D1, - 0x30D23099: 0x30D3, - 0x30D2309A: 0x30D4, - 0x30D53099: 0x30D6, - 0x30D5309A: 0x30D7, - 0x30D83099: 0x30D9, - 0x30D8309A: 0x30DA, - 0x30DB3099: 0x30DC, - 0x30DB309A: 0x30DD, - 0x30A63099: 0x30F4, - 0x30EF3099: 0x30F7, - 0x30F03099: 0x30F8, - 0x30F13099: 0x30F9, - 0x30F23099: 0x30FA, - 0x30FD3099: 0x30FE, - 0x109910BA: 0x1109A, - 0x109B10BA: 0x1109C, - 0x10A510BA: 0x110AB, - 0x11311127: 0x1112E, - 0x11321127: 0x1112F, - 0x1347133E: 0x1134B, - 0x13471357: 0x1134C, - 0x14B914BA: 0x114BB, - 0x14B914B0: 0x114BC, - 0x14B914BD: 0x114BE, - 0x15B815AF: 0x115BA, - 0x15B915AF: 0x115BB, -} +var recompMap map[uint32]rune +var recompMapOnce sync.Once -// Total size of tables: 53KB (54226 bytes) +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54226 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go new file mode 100644 index 0000000..7297cce --- /dev/null +++ b/vendor/golang.org/x/text/unicode/norm/tables11.0.0.go @@ -0,0 +1,7693 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +// +build go1.13 + +package norm + +import "sync" + +const ( + // Version is the Unicode edition from which the tables are derived. + Version = "11.0.0" + + // MaxTransformChunkSize indicates the maximum number of bytes that Transform + // may need to write atomically for any Form. Making a destination buffer at + // least this size ensures that Transform can always make progress and that + // the user does not need to grow the buffer on an ErrShortDst. + MaxTransformChunkSize = 35 + maxNonStarters*4 +) + +var ccc = [55]uint8{ + 0, 1, 7, 8, 9, 10, 11, 12, + 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, + 84, 91, 103, 107, 118, 122, 129, 130, + 132, 202, 214, 216, 218, 220, 222, 224, + 226, 228, 230, 232, 233, 234, 240, +} + +const ( + firstMulti = 0x186D + firstCCC = 0x2C9E + endMulti = 0x2F60 + firstLeadingCCC = 0x49AE + firstCCCZeroExcept = 0x4A78 + firstStarterWithNLead = 0x4A9F + lastDecomp = 0x4AA1 + maxDecomp = 0x8000 +) + +// decomps: 19105 bytes +var decomps = [...]byte{ + // Bytes 0 - 3f + 0x00, 0x41, 0x20, 0x41, 0x21, 0x41, 0x22, 0x41, + 0x23, 0x41, 0x24, 0x41, 0x25, 0x41, 0x26, 0x41, + 0x27, 0x41, 0x28, 0x41, 0x29, 0x41, 0x2A, 0x41, + 0x2B, 0x41, 0x2C, 0x41, 0x2D, 0x41, 0x2E, 0x41, + 0x2F, 0x41, 0x30, 0x41, 0x31, 0x41, 0x32, 0x41, + 0x33, 0x41, 0x34, 0x41, 0x35, 0x41, 0x36, 0x41, + 0x37, 0x41, 0x38, 0x41, 0x39, 0x41, 0x3A, 0x41, + 0x3B, 0x41, 0x3C, 0x41, 0x3D, 0x41, 0x3E, 0x41, + // Bytes 40 - 7f + 0x3F, 0x41, 0x40, 0x41, 0x41, 0x41, 0x42, 0x41, + 0x43, 0x41, 0x44, 0x41, 0x45, 0x41, 0x46, 0x41, + 0x47, 0x41, 0x48, 0x41, 0x49, 0x41, 0x4A, 0x41, + 0x4B, 0x41, 0x4C, 0x41, 0x4D, 0x41, 0x4E, 0x41, + 0x4F, 0x41, 0x50, 0x41, 0x51, 0x41, 0x52, 0x41, + 0x53, 0x41, 0x54, 0x41, 0x55, 0x41, 0x56, 0x41, + 0x57, 0x41, 0x58, 0x41, 0x59, 0x41, 0x5A, 0x41, + 0x5B, 0x41, 0x5C, 0x41, 0x5D, 0x41, 0x5E, 0x41, + // Bytes 80 - bf + 0x5F, 0x41, 0x60, 0x41, 0x61, 0x41, 0x62, 0x41, + 0x63, 0x41, 0x64, 0x41, 0x65, 0x41, 0x66, 0x41, + 0x67, 0x41, 0x68, 0x41, 0x69, 0x41, 0x6A, 0x41, + 0x6B, 0x41, 0x6C, 0x41, 0x6D, 0x41, 0x6E, 0x41, + 0x6F, 0x41, 0x70, 0x41, 0x71, 0x41, 0x72, 0x41, + 0x73, 0x41, 0x74, 0x41, 0x75, 0x41, 0x76, 0x41, + 0x77, 0x41, 0x78, 0x41, 0x79, 0x41, 0x7A, 0x41, + 0x7B, 0x41, 0x7C, 0x41, 0x7D, 0x41, 0x7E, 0x42, + // Bytes c0 - ff + 0xC2, 0xA2, 0x42, 0xC2, 0xA3, 0x42, 0xC2, 0xA5, + 0x42, 0xC2, 0xA6, 0x42, 0xC2, 0xAC, 0x42, 0xC2, + 0xB7, 0x42, 0xC3, 0x86, 0x42, 0xC3, 0xB0, 0x42, + 0xC4, 0xA6, 0x42, 0xC4, 0xA7, 0x42, 0xC4, 0xB1, + 0x42, 0xC5, 0x8B, 0x42, 0xC5, 0x93, 0x42, 0xC6, + 0x8E, 0x42, 0xC6, 0x90, 0x42, 0xC6, 0xAB, 0x42, + 0xC8, 0xA2, 0x42, 0xC8, 0xB7, 0x42, 0xC9, 0x90, + 0x42, 0xC9, 0x91, 0x42, 0xC9, 0x92, 0x42, 0xC9, + // Bytes 100 - 13f + 0x94, 0x42, 0xC9, 0x95, 0x42, 0xC9, 0x99, 0x42, + 0xC9, 0x9B, 0x42, 0xC9, 0x9C, 0x42, 0xC9, 0x9F, + 0x42, 0xC9, 0xA1, 0x42, 0xC9, 0xA3, 0x42, 0xC9, + 0xA5, 0x42, 0xC9, 0xA6, 0x42, 0xC9, 0xA8, 0x42, + 0xC9, 0xA9, 0x42, 0xC9, 0xAA, 0x42, 0xC9, 0xAB, + 0x42, 0xC9, 0xAD, 0x42, 0xC9, 0xAF, 0x42, 0xC9, + 0xB0, 0x42, 0xC9, 0xB1, 0x42, 0xC9, 0xB2, 0x42, + 0xC9, 0xB3, 0x42, 0xC9, 0xB4, 0x42, 0xC9, 0xB5, + // Bytes 140 - 17f + 0x42, 0xC9, 0xB8, 0x42, 0xC9, 0xB9, 0x42, 0xC9, + 0xBB, 0x42, 0xCA, 0x81, 0x42, 0xCA, 0x82, 0x42, + 0xCA, 0x83, 0x42, 0xCA, 0x89, 0x42, 0xCA, 0x8A, + 0x42, 0xCA, 0x8B, 0x42, 0xCA, 0x8C, 0x42, 0xCA, + 0x90, 0x42, 0xCA, 0x91, 0x42, 0xCA, 0x92, 0x42, + 0xCA, 0x95, 0x42, 0xCA, 0x9D, 0x42, 0xCA, 0x9F, + 0x42, 0xCA, 0xB9, 0x42, 0xCE, 0x91, 0x42, 0xCE, + 0x92, 0x42, 0xCE, 0x93, 0x42, 0xCE, 0x94, 0x42, + // Bytes 180 - 1bf + 0xCE, 0x95, 0x42, 0xCE, 0x96, 0x42, 0xCE, 0x97, + 0x42, 0xCE, 0x98, 0x42, 0xCE, 0x99, 0x42, 0xCE, + 0x9A, 0x42, 0xCE, 0x9B, 0x42, 0xCE, 0x9C, 0x42, + 0xCE, 0x9D, 0x42, 0xCE, 0x9E, 0x42, 0xCE, 0x9F, + 0x42, 0xCE, 0xA0, 0x42, 0xCE, 0xA1, 0x42, 0xCE, + 0xA3, 0x42, 0xCE, 0xA4, 0x42, 0xCE, 0xA5, 0x42, + 0xCE, 0xA6, 0x42, 0xCE, 0xA7, 0x42, 0xCE, 0xA8, + 0x42, 0xCE, 0xA9, 0x42, 0xCE, 0xB1, 0x42, 0xCE, + // Bytes 1c0 - 1ff + 0xB2, 0x42, 0xCE, 0xB3, 0x42, 0xCE, 0xB4, 0x42, + 0xCE, 0xB5, 0x42, 0xCE, 0xB6, 0x42, 0xCE, 0xB7, + 0x42, 0xCE, 0xB8, 0x42, 0xCE, 0xB9, 0x42, 0xCE, + 0xBA, 0x42, 0xCE, 0xBB, 0x42, 0xCE, 0xBC, 0x42, + 0xCE, 0xBD, 0x42, 0xCE, 0xBE, 0x42, 0xCE, 0xBF, + 0x42, 0xCF, 0x80, 0x42, 0xCF, 0x81, 0x42, 0xCF, + 0x82, 0x42, 0xCF, 0x83, 0x42, 0xCF, 0x84, 0x42, + 0xCF, 0x85, 0x42, 0xCF, 0x86, 0x42, 0xCF, 0x87, + // Bytes 200 - 23f + 0x42, 0xCF, 0x88, 0x42, 0xCF, 0x89, 0x42, 0xCF, + 0x9C, 0x42, 0xCF, 0x9D, 0x42, 0xD0, 0xBD, 0x42, + 0xD1, 0x8A, 0x42, 0xD1, 0x8C, 0x42, 0xD7, 0x90, + 0x42, 0xD7, 0x91, 0x42, 0xD7, 0x92, 0x42, 0xD7, + 0x93, 0x42, 0xD7, 0x94, 0x42, 0xD7, 0x9B, 0x42, + 0xD7, 0x9C, 0x42, 0xD7, 0x9D, 0x42, 0xD7, 0xA2, + 0x42, 0xD7, 0xA8, 0x42, 0xD7, 0xAA, 0x42, 0xD8, + 0xA1, 0x42, 0xD8, 0xA7, 0x42, 0xD8, 0xA8, 0x42, + // Bytes 240 - 27f + 0xD8, 0xA9, 0x42, 0xD8, 0xAA, 0x42, 0xD8, 0xAB, + 0x42, 0xD8, 0xAC, 0x42, 0xD8, 0xAD, 0x42, 0xD8, + 0xAE, 0x42, 0xD8, 0xAF, 0x42, 0xD8, 0xB0, 0x42, + 0xD8, 0xB1, 0x42, 0xD8, 0xB2, 0x42, 0xD8, 0xB3, + 0x42, 0xD8, 0xB4, 0x42, 0xD8, 0xB5, 0x42, 0xD8, + 0xB6, 0x42, 0xD8, 0xB7, 0x42, 0xD8, 0xB8, 0x42, + 0xD8, 0xB9, 0x42, 0xD8, 0xBA, 0x42, 0xD9, 0x81, + 0x42, 0xD9, 0x82, 0x42, 0xD9, 0x83, 0x42, 0xD9, + // Bytes 280 - 2bf + 0x84, 0x42, 0xD9, 0x85, 0x42, 0xD9, 0x86, 0x42, + 0xD9, 0x87, 0x42, 0xD9, 0x88, 0x42, 0xD9, 0x89, + 0x42, 0xD9, 0x8A, 0x42, 0xD9, 0xAE, 0x42, 0xD9, + 0xAF, 0x42, 0xD9, 0xB1, 0x42, 0xD9, 0xB9, 0x42, + 0xD9, 0xBA, 0x42, 0xD9, 0xBB, 0x42, 0xD9, 0xBE, + 0x42, 0xD9, 0xBF, 0x42, 0xDA, 0x80, 0x42, 0xDA, + 0x83, 0x42, 0xDA, 0x84, 0x42, 0xDA, 0x86, 0x42, + 0xDA, 0x87, 0x42, 0xDA, 0x88, 0x42, 0xDA, 0x8C, + // Bytes 2c0 - 2ff + 0x42, 0xDA, 0x8D, 0x42, 0xDA, 0x8E, 0x42, 0xDA, + 0x91, 0x42, 0xDA, 0x98, 0x42, 0xDA, 0xA1, 0x42, + 0xDA, 0xA4, 0x42, 0xDA, 0xA6, 0x42, 0xDA, 0xA9, + 0x42, 0xDA, 0xAD, 0x42, 0xDA, 0xAF, 0x42, 0xDA, + 0xB1, 0x42, 0xDA, 0xB3, 0x42, 0xDA, 0xBA, 0x42, + 0xDA, 0xBB, 0x42, 0xDA, 0xBE, 0x42, 0xDB, 0x81, + 0x42, 0xDB, 0x85, 0x42, 0xDB, 0x86, 0x42, 0xDB, + 0x87, 0x42, 0xDB, 0x88, 0x42, 0xDB, 0x89, 0x42, + // Bytes 300 - 33f + 0xDB, 0x8B, 0x42, 0xDB, 0x8C, 0x42, 0xDB, 0x90, + 0x42, 0xDB, 0x92, 0x43, 0xE0, 0xBC, 0x8B, 0x43, + 0xE1, 0x83, 0x9C, 0x43, 0xE1, 0x84, 0x80, 0x43, + 0xE1, 0x84, 0x81, 0x43, 0xE1, 0x84, 0x82, 0x43, + 0xE1, 0x84, 0x83, 0x43, 0xE1, 0x84, 0x84, 0x43, + 0xE1, 0x84, 0x85, 0x43, 0xE1, 0x84, 0x86, 0x43, + 0xE1, 0x84, 0x87, 0x43, 0xE1, 0x84, 0x88, 0x43, + 0xE1, 0x84, 0x89, 0x43, 0xE1, 0x84, 0x8A, 0x43, + // Bytes 340 - 37f + 0xE1, 0x84, 0x8B, 0x43, 0xE1, 0x84, 0x8C, 0x43, + 0xE1, 0x84, 0x8D, 0x43, 0xE1, 0x84, 0x8E, 0x43, + 0xE1, 0x84, 0x8F, 0x43, 0xE1, 0x84, 0x90, 0x43, + 0xE1, 0x84, 0x91, 0x43, 0xE1, 0x84, 0x92, 0x43, + 0xE1, 0x84, 0x94, 0x43, 0xE1, 0x84, 0x95, 0x43, + 0xE1, 0x84, 0x9A, 0x43, 0xE1, 0x84, 0x9C, 0x43, + 0xE1, 0x84, 0x9D, 0x43, 0xE1, 0x84, 0x9E, 0x43, + 0xE1, 0x84, 0xA0, 0x43, 0xE1, 0x84, 0xA1, 0x43, + // Bytes 380 - 3bf + 0xE1, 0x84, 0xA2, 0x43, 0xE1, 0x84, 0xA3, 0x43, + 0xE1, 0x84, 0xA7, 0x43, 0xE1, 0x84, 0xA9, 0x43, + 0xE1, 0x84, 0xAB, 0x43, 0xE1, 0x84, 0xAC, 0x43, + 0xE1, 0x84, 0xAD, 0x43, 0xE1, 0x84, 0xAE, 0x43, + 0xE1, 0x84, 0xAF, 0x43, 0xE1, 0x84, 0xB2, 0x43, + 0xE1, 0x84, 0xB6, 0x43, 0xE1, 0x85, 0x80, 0x43, + 0xE1, 0x85, 0x87, 0x43, 0xE1, 0x85, 0x8C, 0x43, + 0xE1, 0x85, 0x97, 0x43, 0xE1, 0x85, 0x98, 0x43, + // Bytes 3c0 - 3ff + 0xE1, 0x85, 0x99, 0x43, 0xE1, 0x85, 0xA0, 0x43, + 0xE1, 0x86, 0x84, 0x43, 0xE1, 0x86, 0x85, 0x43, + 0xE1, 0x86, 0x88, 0x43, 0xE1, 0x86, 0x91, 0x43, + 0xE1, 0x86, 0x92, 0x43, 0xE1, 0x86, 0x94, 0x43, + 0xE1, 0x86, 0x9E, 0x43, 0xE1, 0x86, 0xA1, 0x43, + 0xE1, 0x87, 0x87, 0x43, 0xE1, 0x87, 0x88, 0x43, + 0xE1, 0x87, 0x8C, 0x43, 0xE1, 0x87, 0x8E, 0x43, + 0xE1, 0x87, 0x93, 0x43, 0xE1, 0x87, 0x97, 0x43, + // Bytes 400 - 43f + 0xE1, 0x87, 0x99, 0x43, 0xE1, 0x87, 0x9D, 0x43, + 0xE1, 0x87, 0x9F, 0x43, 0xE1, 0x87, 0xB1, 0x43, + 0xE1, 0x87, 0xB2, 0x43, 0xE1, 0xB4, 0x82, 0x43, + 0xE1, 0xB4, 0x96, 0x43, 0xE1, 0xB4, 0x97, 0x43, + 0xE1, 0xB4, 0x9C, 0x43, 0xE1, 0xB4, 0x9D, 0x43, + 0xE1, 0xB4, 0xA5, 0x43, 0xE1, 0xB5, 0xBB, 0x43, + 0xE1, 0xB6, 0x85, 0x43, 0xE2, 0x80, 0x82, 0x43, + 0xE2, 0x80, 0x83, 0x43, 0xE2, 0x80, 0x90, 0x43, + // Bytes 440 - 47f + 0xE2, 0x80, 0x93, 0x43, 0xE2, 0x80, 0x94, 0x43, + 0xE2, 0x82, 0xA9, 0x43, 0xE2, 0x86, 0x90, 0x43, + 0xE2, 0x86, 0x91, 0x43, 0xE2, 0x86, 0x92, 0x43, + 0xE2, 0x86, 0x93, 0x43, 0xE2, 0x88, 0x82, 0x43, + 0xE2, 0x88, 0x87, 0x43, 0xE2, 0x88, 0x91, 0x43, + 0xE2, 0x88, 0x92, 0x43, 0xE2, 0x94, 0x82, 0x43, + 0xE2, 0x96, 0xA0, 0x43, 0xE2, 0x97, 0x8B, 0x43, + 0xE2, 0xA6, 0x85, 0x43, 0xE2, 0xA6, 0x86, 0x43, + // Bytes 480 - 4bf + 0xE2, 0xB5, 0xA1, 0x43, 0xE3, 0x80, 0x81, 0x43, + 0xE3, 0x80, 0x82, 0x43, 0xE3, 0x80, 0x88, 0x43, + 0xE3, 0x80, 0x89, 0x43, 0xE3, 0x80, 0x8A, 0x43, + 0xE3, 0x80, 0x8B, 0x43, 0xE3, 0x80, 0x8C, 0x43, + 0xE3, 0x80, 0x8D, 0x43, 0xE3, 0x80, 0x8E, 0x43, + 0xE3, 0x80, 0x8F, 0x43, 0xE3, 0x80, 0x90, 0x43, + 0xE3, 0x80, 0x91, 0x43, 0xE3, 0x80, 0x92, 0x43, + 0xE3, 0x80, 0x94, 0x43, 0xE3, 0x80, 0x95, 0x43, + // Bytes 4c0 - 4ff + 0xE3, 0x80, 0x96, 0x43, 0xE3, 0x80, 0x97, 0x43, + 0xE3, 0x82, 0xA1, 0x43, 0xE3, 0x82, 0xA2, 0x43, + 0xE3, 0x82, 0xA3, 0x43, 0xE3, 0x82, 0xA4, 0x43, + 0xE3, 0x82, 0xA5, 0x43, 0xE3, 0x82, 0xA6, 0x43, + 0xE3, 0x82, 0xA7, 0x43, 0xE3, 0x82, 0xA8, 0x43, + 0xE3, 0x82, 0xA9, 0x43, 0xE3, 0x82, 0xAA, 0x43, + 0xE3, 0x82, 0xAB, 0x43, 0xE3, 0x82, 0xAD, 0x43, + 0xE3, 0x82, 0xAF, 0x43, 0xE3, 0x82, 0xB1, 0x43, + // Bytes 500 - 53f + 0xE3, 0x82, 0xB3, 0x43, 0xE3, 0x82, 0xB5, 0x43, + 0xE3, 0x82, 0xB7, 0x43, 0xE3, 0x82, 0xB9, 0x43, + 0xE3, 0x82, 0xBB, 0x43, 0xE3, 0x82, 0xBD, 0x43, + 0xE3, 0x82, 0xBF, 0x43, 0xE3, 0x83, 0x81, 0x43, + 0xE3, 0x83, 0x83, 0x43, 0xE3, 0x83, 0x84, 0x43, + 0xE3, 0x83, 0x86, 0x43, 0xE3, 0x83, 0x88, 0x43, + 0xE3, 0x83, 0x8A, 0x43, 0xE3, 0x83, 0x8B, 0x43, + 0xE3, 0x83, 0x8C, 0x43, 0xE3, 0x83, 0x8D, 0x43, + // Bytes 540 - 57f + 0xE3, 0x83, 0x8E, 0x43, 0xE3, 0x83, 0x8F, 0x43, + 0xE3, 0x83, 0x92, 0x43, 0xE3, 0x83, 0x95, 0x43, + 0xE3, 0x83, 0x98, 0x43, 0xE3, 0x83, 0x9B, 0x43, + 0xE3, 0x83, 0x9E, 0x43, 0xE3, 0x83, 0x9F, 0x43, + 0xE3, 0x83, 0xA0, 0x43, 0xE3, 0x83, 0xA1, 0x43, + 0xE3, 0x83, 0xA2, 0x43, 0xE3, 0x83, 0xA3, 0x43, + 0xE3, 0x83, 0xA4, 0x43, 0xE3, 0x83, 0xA5, 0x43, + 0xE3, 0x83, 0xA6, 0x43, 0xE3, 0x83, 0xA7, 0x43, + // Bytes 580 - 5bf + 0xE3, 0x83, 0xA8, 0x43, 0xE3, 0x83, 0xA9, 0x43, + 0xE3, 0x83, 0xAA, 0x43, 0xE3, 0x83, 0xAB, 0x43, + 0xE3, 0x83, 0xAC, 0x43, 0xE3, 0x83, 0xAD, 0x43, + 0xE3, 0x83, 0xAF, 0x43, 0xE3, 0x83, 0xB0, 0x43, + 0xE3, 0x83, 0xB1, 0x43, 0xE3, 0x83, 0xB2, 0x43, + 0xE3, 0x83, 0xB3, 0x43, 0xE3, 0x83, 0xBB, 0x43, + 0xE3, 0x83, 0xBC, 0x43, 0xE3, 0x92, 0x9E, 0x43, + 0xE3, 0x92, 0xB9, 0x43, 0xE3, 0x92, 0xBB, 0x43, + // Bytes 5c0 - 5ff + 0xE3, 0x93, 0x9F, 0x43, 0xE3, 0x94, 0x95, 0x43, + 0xE3, 0x9B, 0xAE, 0x43, 0xE3, 0x9B, 0xBC, 0x43, + 0xE3, 0x9E, 0x81, 0x43, 0xE3, 0xA0, 0xAF, 0x43, + 0xE3, 0xA1, 0xA2, 0x43, 0xE3, 0xA1, 0xBC, 0x43, + 0xE3, 0xA3, 0x87, 0x43, 0xE3, 0xA3, 0xA3, 0x43, + 0xE3, 0xA4, 0x9C, 0x43, 0xE3, 0xA4, 0xBA, 0x43, + 0xE3, 0xA8, 0xAE, 0x43, 0xE3, 0xA9, 0xAC, 0x43, + 0xE3, 0xAB, 0xA4, 0x43, 0xE3, 0xAC, 0x88, 0x43, + // Bytes 600 - 63f + 0xE3, 0xAC, 0x99, 0x43, 0xE3, 0xAD, 0x89, 0x43, + 0xE3, 0xAE, 0x9D, 0x43, 0xE3, 0xB0, 0x98, 0x43, + 0xE3, 0xB1, 0x8E, 0x43, 0xE3, 0xB4, 0xB3, 0x43, + 0xE3, 0xB6, 0x96, 0x43, 0xE3, 0xBA, 0xAC, 0x43, + 0xE3, 0xBA, 0xB8, 0x43, 0xE3, 0xBC, 0x9B, 0x43, + 0xE3, 0xBF, 0xBC, 0x43, 0xE4, 0x80, 0x88, 0x43, + 0xE4, 0x80, 0x98, 0x43, 0xE4, 0x80, 0xB9, 0x43, + 0xE4, 0x81, 0x86, 0x43, 0xE4, 0x82, 0x96, 0x43, + // Bytes 640 - 67f + 0xE4, 0x83, 0xA3, 0x43, 0xE4, 0x84, 0xAF, 0x43, + 0xE4, 0x88, 0x82, 0x43, 0xE4, 0x88, 0xA7, 0x43, + 0xE4, 0x8A, 0xA0, 0x43, 0xE4, 0x8C, 0x81, 0x43, + 0xE4, 0x8C, 0xB4, 0x43, 0xE4, 0x8D, 0x99, 0x43, + 0xE4, 0x8F, 0x95, 0x43, 0xE4, 0x8F, 0x99, 0x43, + 0xE4, 0x90, 0x8B, 0x43, 0xE4, 0x91, 0xAB, 0x43, + 0xE4, 0x94, 0xAB, 0x43, 0xE4, 0x95, 0x9D, 0x43, + 0xE4, 0x95, 0xA1, 0x43, 0xE4, 0x95, 0xAB, 0x43, + // Bytes 680 - 6bf + 0xE4, 0x97, 0x97, 0x43, 0xE4, 0x97, 0xB9, 0x43, + 0xE4, 0x98, 0xB5, 0x43, 0xE4, 0x9A, 0xBE, 0x43, + 0xE4, 0x9B, 0x87, 0x43, 0xE4, 0xA6, 0x95, 0x43, + 0xE4, 0xA7, 0xA6, 0x43, 0xE4, 0xA9, 0xAE, 0x43, + 0xE4, 0xA9, 0xB6, 0x43, 0xE4, 0xAA, 0xB2, 0x43, + 0xE4, 0xAC, 0xB3, 0x43, 0xE4, 0xAF, 0x8E, 0x43, + 0xE4, 0xB3, 0x8E, 0x43, 0xE4, 0xB3, 0xAD, 0x43, + 0xE4, 0xB3, 0xB8, 0x43, 0xE4, 0xB5, 0x96, 0x43, + // Bytes 6c0 - 6ff + 0xE4, 0xB8, 0x80, 0x43, 0xE4, 0xB8, 0x81, 0x43, + 0xE4, 0xB8, 0x83, 0x43, 0xE4, 0xB8, 0x89, 0x43, + 0xE4, 0xB8, 0x8A, 0x43, 0xE4, 0xB8, 0x8B, 0x43, + 0xE4, 0xB8, 0x8D, 0x43, 0xE4, 0xB8, 0x99, 0x43, + 0xE4, 0xB8, 0xA6, 0x43, 0xE4, 0xB8, 0xA8, 0x43, + 0xE4, 0xB8, 0xAD, 0x43, 0xE4, 0xB8, 0xB2, 0x43, + 0xE4, 0xB8, 0xB6, 0x43, 0xE4, 0xB8, 0xB8, 0x43, + 0xE4, 0xB8, 0xB9, 0x43, 0xE4, 0xB8, 0xBD, 0x43, + // Bytes 700 - 73f + 0xE4, 0xB8, 0xBF, 0x43, 0xE4, 0xB9, 0x81, 0x43, + 0xE4, 0xB9, 0x99, 0x43, 0xE4, 0xB9, 0x9D, 0x43, + 0xE4, 0xBA, 0x82, 0x43, 0xE4, 0xBA, 0x85, 0x43, + 0xE4, 0xBA, 0x86, 0x43, 0xE4, 0xBA, 0x8C, 0x43, + 0xE4, 0xBA, 0x94, 0x43, 0xE4, 0xBA, 0xA0, 0x43, + 0xE4, 0xBA, 0xA4, 0x43, 0xE4, 0xBA, 0xAE, 0x43, + 0xE4, 0xBA, 0xBA, 0x43, 0xE4, 0xBB, 0x80, 0x43, + 0xE4, 0xBB, 0x8C, 0x43, 0xE4, 0xBB, 0xA4, 0x43, + // Bytes 740 - 77f + 0xE4, 0xBC, 0x81, 0x43, 0xE4, 0xBC, 0x91, 0x43, + 0xE4, 0xBD, 0xA0, 0x43, 0xE4, 0xBE, 0x80, 0x43, + 0xE4, 0xBE, 0x86, 0x43, 0xE4, 0xBE, 0x8B, 0x43, + 0xE4, 0xBE, 0xAE, 0x43, 0xE4, 0xBE, 0xBB, 0x43, + 0xE4, 0xBE, 0xBF, 0x43, 0xE5, 0x80, 0x82, 0x43, + 0xE5, 0x80, 0xAB, 0x43, 0xE5, 0x81, 0xBA, 0x43, + 0xE5, 0x82, 0x99, 0x43, 0xE5, 0x83, 0x8F, 0x43, + 0xE5, 0x83, 0x9A, 0x43, 0xE5, 0x83, 0xA7, 0x43, + // Bytes 780 - 7bf + 0xE5, 0x84, 0xAA, 0x43, 0xE5, 0x84, 0xBF, 0x43, + 0xE5, 0x85, 0x80, 0x43, 0xE5, 0x85, 0x85, 0x43, + 0xE5, 0x85, 0x8D, 0x43, 0xE5, 0x85, 0x94, 0x43, + 0xE5, 0x85, 0xA4, 0x43, 0xE5, 0x85, 0xA5, 0x43, + 0xE5, 0x85, 0xA7, 0x43, 0xE5, 0x85, 0xA8, 0x43, + 0xE5, 0x85, 0xA9, 0x43, 0xE5, 0x85, 0xAB, 0x43, + 0xE5, 0x85, 0xAD, 0x43, 0xE5, 0x85, 0xB7, 0x43, + 0xE5, 0x86, 0x80, 0x43, 0xE5, 0x86, 0x82, 0x43, + // Bytes 7c0 - 7ff + 0xE5, 0x86, 0x8D, 0x43, 0xE5, 0x86, 0x92, 0x43, + 0xE5, 0x86, 0x95, 0x43, 0xE5, 0x86, 0x96, 0x43, + 0xE5, 0x86, 0x97, 0x43, 0xE5, 0x86, 0x99, 0x43, + 0xE5, 0x86, 0xA4, 0x43, 0xE5, 0x86, 0xAB, 0x43, + 0xE5, 0x86, 0xAC, 0x43, 0xE5, 0x86, 0xB5, 0x43, + 0xE5, 0x86, 0xB7, 0x43, 0xE5, 0x87, 0x89, 0x43, + 0xE5, 0x87, 0x8C, 0x43, 0xE5, 0x87, 0x9C, 0x43, + 0xE5, 0x87, 0x9E, 0x43, 0xE5, 0x87, 0xA0, 0x43, + // Bytes 800 - 83f + 0xE5, 0x87, 0xB5, 0x43, 0xE5, 0x88, 0x80, 0x43, + 0xE5, 0x88, 0x83, 0x43, 0xE5, 0x88, 0x87, 0x43, + 0xE5, 0x88, 0x97, 0x43, 0xE5, 0x88, 0x9D, 0x43, + 0xE5, 0x88, 0xA9, 0x43, 0xE5, 0x88, 0xBA, 0x43, + 0xE5, 0x88, 0xBB, 0x43, 0xE5, 0x89, 0x86, 0x43, + 0xE5, 0x89, 0x8D, 0x43, 0xE5, 0x89, 0xB2, 0x43, + 0xE5, 0x89, 0xB7, 0x43, 0xE5, 0x8A, 0x89, 0x43, + 0xE5, 0x8A, 0x9B, 0x43, 0xE5, 0x8A, 0xA3, 0x43, + // Bytes 840 - 87f + 0xE5, 0x8A, 0xB3, 0x43, 0xE5, 0x8A, 0xB4, 0x43, + 0xE5, 0x8B, 0x87, 0x43, 0xE5, 0x8B, 0x89, 0x43, + 0xE5, 0x8B, 0x92, 0x43, 0xE5, 0x8B, 0x9E, 0x43, + 0xE5, 0x8B, 0xA4, 0x43, 0xE5, 0x8B, 0xB5, 0x43, + 0xE5, 0x8B, 0xB9, 0x43, 0xE5, 0x8B, 0xBA, 0x43, + 0xE5, 0x8C, 0x85, 0x43, 0xE5, 0x8C, 0x86, 0x43, + 0xE5, 0x8C, 0x95, 0x43, 0xE5, 0x8C, 0x97, 0x43, + 0xE5, 0x8C, 0x9A, 0x43, 0xE5, 0x8C, 0xB8, 0x43, + // Bytes 880 - 8bf + 0xE5, 0x8C, 0xBB, 0x43, 0xE5, 0x8C, 0xBF, 0x43, + 0xE5, 0x8D, 0x81, 0x43, 0xE5, 0x8D, 0x84, 0x43, + 0xE5, 0x8D, 0x85, 0x43, 0xE5, 0x8D, 0x89, 0x43, + 0xE5, 0x8D, 0x91, 0x43, 0xE5, 0x8D, 0x94, 0x43, + 0xE5, 0x8D, 0x9A, 0x43, 0xE5, 0x8D, 0x9C, 0x43, + 0xE5, 0x8D, 0xA9, 0x43, 0xE5, 0x8D, 0xB0, 0x43, + 0xE5, 0x8D, 0xB3, 0x43, 0xE5, 0x8D, 0xB5, 0x43, + 0xE5, 0x8D, 0xBD, 0x43, 0xE5, 0x8D, 0xBF, 0x43, + // Bytes 8c0 - 8ff + 0xE5, 0x8E, 0x82, 0x43, 0xE5, 0x8E, 0xB6, 0x43, + 0xE5, 0x8F, 0x83, 0x43, 0xE5, 0x8F, 0x88, 0x43, + 0xE5, 0x8F, 0x8A, 0x43, 0xE5, 0x8F, 0x8C, 0x43, + 0xE5, 0x8F, 0x9F, 0x43, 0xE5, 0x8F, 0xA3, 0x43, + 0xE5, 0x8F, 0xA5, 0x43, 0xE5, 0x8F, 0xAB, 0x43, + 0xE5, 0x8F, 0xAF, 0x43, 0xE5, 0x8F, 0xB1, 0x43, + 0xE5, 0x8F, 0xB3, 0x43, 0xE5, 0x90, 0x86, 0x43, + 0xE5, 0x90, 0x88, 0x43, 0xE5, 0x90, 0x8D, 0x43, + // Bytes 900 - 93f + 0xE5, 0x90, 0x8F, 0x43, 0xE5, 0x90, 0x9D, 0x43, + 0xE5, 0x90, 0xB8, 0x43, 0xE5, 0x90, 0xB9, 0x43, + 0xE5, 0x91, 0x82, 0x43, 0xE5, 0x91, 0x88, 0x43, + 0xE5, 0x91, 0xA8, 0x43, 0xE5, 0x92, 0x9E, 0x43, + 0xE5, 0x92, 0xA2, 0x43, 0xE5, 0x92, 0xBD, 0x43, + 0xE5, 0x93, 0xB6, 0x43, 0xE5, 0x94, 0x90, 0x43, + 0xE5, 0x95, 0x8F, 0x43, 0xE5, 0x95, 0x93, 0x43, + 0xE5, 0x95, 0x95, 0x43, 0xE5, 0x95, 0xA3, 0x43, + // Bytes 940 - 97f + 0xE5, 0x96, 0x84, 0x43, 0xE5, 0x96, 0x87, 0x43, + 0xE5, 0x96, 0x99, 0x43, 0xE5, 0x96, 0x9D, 0x43, + 0xE5, 0x96, 0xAB, 0x43, 0xE5, 0x96, 0xB3, 0x43, + 0xE5, 0x96, 0xB6, 0x43, 0xE5, 0x97, 0x80, 0x43, + 0xE5, 0x97, 0x82, 0x43, 0xE5, 0x97, 0xA2, 0x43, + 0xE5, 0x98, 0x86, 0x43, 0xE5, 0x99, 0x91, 0x43, + 0xE5, 0x99, 0xA8, 0x43, 0xE5, 0x99, 0xB4, 0x43, + 0xE5, 0x9B, 0x97, 0x43, 0xE5, 0x9B, 0x9B, 0x43, + // Bytes 980 - 9bf + 0xE5, 0x9B, 0xB9, 0x43, 0xE5, 0x9C, 0x96, 0x43, + 0xE5, 0x9C, 0x97, 0x43, 0xE5, 0x9C, 0x9F, 0x43, + 0xE5, 0x9C, 0xB0, 0x43, 0xE5, 0x9E, 0x8B, 0x43, + 0xE5, 0x9F, 0x8E, 0x43, 0xE5, 0x9F, 0xB4, 0x43, + 0xE5, 0xA0, 0x8D, 0x43, 0xE5, 0xA0, 0xB1, 0x43, + 0xE5, 0xA0, 0xB2, 0x43, 0xE5, 0xA1, 0x80, 0x43, + 0xE5, 0xA1, 0x9A, 0x43, 0xE5, 0xA1, 0x9E, 0x43, + 0xE5, 0xA2, 0xA8, 0x43, 0xE5, 0xA2, 0xAC, 0x43, + // Bytes 9c0 - 9ff + 0xE5, 0xA2, 0xB3, 0x43, 0xE5, 0xA3, 0x98, 0x43, + 0xE5, 0xA3, 0x9F, 0x43, 0xE5, 0xA3, 0xAB, 0x43, + 0xE5, 0xA3, 0xAE, 0x43, 0xE5, 0xA3, 0xB0, 0x43, + 0xE5, 0xA3, 0xB2, 0x43, 0xE5, 0xA3, 0xB7, 0x43, + 0xE5, 0xA4, 0x82, 0x43, 0xE5, 0xA4, 0x86, 0x43, + 0xE5, 0xA4, 0x8A, 0x43, 0xE5, 0xA4, 0x95, 0x43, + 0xE5, 0xA4, 0x9A, 0x43, 0xE5, 0xA4, 0x9C, 0x43, + 0xE5, 0xA4, 0xA2, 0x43, 0xE5, 0xA4, 0xA7, 0x43, + // Bytes a00 - a3f + 0xE5, 0xA4, 0xA9, 0x43, 0xE5, 0xA5, 0x84, 0x43, + 0xE5, 0xA5, 0x88, 0x43, 0xE5, 0xA5, 0x91, 0x43, + 0xE5, 0xA5, 0x94, 0x43, 0xE5, 0xA5, 0xA2, 0x43, + 0xE5, 0xA5, 0xB3, 0x43, 0xE5, 0xA7, 0x98, 0x43, + 0xE5, 0xA7, 0xAC, 0x43, 0xE5, 0xA8, 0x9B, 0x43, + 0xE5, 0xA8, 0xA7, 0x43, 0xE5, 0xA9, 0xA2, 0x43, + 0xE5, 0xA9, 0xA6, 0x43, 0xE5, 0xAA, 0xB5, 0x43, + 0xE5, 0xAC, 0x88, 0x43, 0xE5, 0xAC, 0xA8, 0x43, + // Bytes a40 - a7f + 0xE5, 0xAC, 0xBE, 0x43, 0xE5, 0xAD, 0x90, 0x43, + 0xE5, 0xAD, 0x97, 0x43, 0xE5, 0xAD, 0xA6, 0x43, + 0xE5, 0xAE, 0x80, 0x43, 0xE5, 0xAE, 0x85, 0x43, + 0xE5, 0xAE, 0x97, 0x43, 0xE5, 0xAF, 0x83, 0x43, + 0xE5, 0xAF, 0x98, 0x43, 0xE5, 0xAF, 0xA7, 0x43, + 0xE5, 0xAF, 0xAE, 0x43, 0xE5, 0xAF, 0xB3, 0x43, + 0xE5, 0xAF, 0xB8, 0x43, 0xE5, 0xAF, 0xBF, 0x43, + 0xE5, 0xB0, 0x86, 0x43, 0xE5, 0xB0, 0x8F, 0x43, + // Bytes a80 - abf + 0xE5, 0xB0, 0xA2, 0x43, 0xE5, 0xB0, 0xB8, 0x43, + 0xE5, 0xB0, 0xBF, 0x43, 0xE5, 0xB1, 0xA0, 0x43, + 0xE5, 0xB1, 0xA2, 0x43, 0xE5, 0xB1, 0xA4, 0x43, + 0xE5, 0xB1, 0xA5, 0x43, 0xE5, 0xB1, 0xAE, 0x43, + 0xE5, 0xB1, 0xB1, 0x43, 0xE5, 0xB2, 0x8D, 0x43, + 0xE5, 0xB3, 0x80, 0x43, 0xE5, 0xB4, 0x99, 0x43, + 0xE5, 0xB5, 0x83, 0x43, 0xE5, 0xB5, 0x90, 0x43, + 0xE5, 0xB5, 0xAB, 0x43, 0xE5, 0xB5, 0xAE, 0x43, + // Bytes ac0 - aff + 0xE5, 0xB5, 0xBC, 0x43, 0xE5, 0xB6, 0xB2, 0x43, + 0xE5, 0xB6, 0xBA, 0x43, 0xE5, 0xB7, 0x9B, 0x43, + 0xE5, 0xB7, 0xA1, 0x43, 0xE5, 0xB7, 0xA2, 0x43, + 0xE5, 0xB7, 0xA5, 0x43, 0xE5, 0xB7, 0xA6, 0x43, + 0xE5, 0xB7, 0xB1, 0x43, 0xE5, 0xB7, 0xBD, 0x43, + 0xE5, 0xB7, 0xBE, 0x43, 0xE5, 0xB8, 0xA8, 0x43, + 0xE5, 0xB8, 0xBD, 0x43, 0xE5, 0xB9, 0xA9, 0x43, + 0xE5, 0xB9, 0xB2, 0x43, 0xE5, 0xB9, 0xB4, 0x43, + // Bytes b00 - b3f + 0xE5, 0xB9, 0xBA, 0x43, 0xE5, 0xB9, 0xBC, 0x43, + 0xE5, 0xB9, 0xBF, 0x43, 0xE5, 0xBA, 0xA6, 0x43, + 0xE5, 0xBA, 0xB0, 0x43, 0xE5, 0xBA, 0xB3, 0x43, + 0xE5, 0xBA, 0xB6, 0x43, 0xE5, 0xBB, 0x89, 0x43, + 0xE5, 0xBB, 0x8A, 0x43, 0xE5, 0xBB, 0x92, 0x43, + 0xE5, 0xBB, 0x93, 0x43, 0xE5, 0xBB, 0x99, 0x43, + 0xE5, 0xBB, 0xAC, 0x43, 0xE5, 0xBB, 0xB4, 0x43, + 0xE5, 0xBB, 0xBE, 0x43, 0xE5, 0xBC, 0x84, 0x43, + // Bytes b40 - b7f + 0xE5, 0xBC, 0x8B, 0x43, 0xE5, 0xBC, 0x93, 0x43, + 0xE5, 0xBC, 0xA2, 0x43, 0xE5, 0xBD, 0x90, 0x43, + 0xE5, 0xBD, 0x93, 0x43, 0xE5, 0xBD, 0xA1, 0x43, + 0xE5, 0xBD, 0xA2, 0x43, 0xE5, 0xBD, 0xA9, 0x43, + 0xE5, 0xBD, 0xAB, 0x43, 0xE5, 0xBD, 0xB3, 0x43, + 0xE5, 0xBE, 0x8B, 0x43, 0xE5, 0xBE, 0x8C, 0x43, + 0xE5, 0xBE, 0x97, 0x43, 0xE5, 0xBE, 0x9A, 0x43, + 0xE5, 0xBE, 0xA9, 0x43, 0xE5, 0xBE, 0xAD, 0x43, + // Bytes b80 - bbf + 0xE5, 0xBF, 0x83, 0x43, 0xE5, 0xBF, 0x8D, 0x43, + 0xE5, 0xBF, 0x97, 0x43, 0xE5, 0xBF, 0xB5, 0x43, + 0xE5, 0xBF, 0xB9, 0x43, 0xE6, 0x80, 0x92, 0x43, + 0xE6, 0x80, 0x9C, 0x43, 0xE6, 0x81, 0xB5, 0x43, + 0xE6, 0x82, 0x81, 0x43, 0xE6, 0x82, 0x94, 0x43, + 0xE6, 0x83, 0x87, 0x43, 0xE6, 0x83, 0x98, 0x43, + 0xE6, 0x83, 0xA1, 0x43, 0xE6, 0x84, 0x88, 0x43, + 0xE6, 0x85, 0x84, 0x43, 0xE6, 0x85, 0x88, 0x43, + // Bytes bc0 - bff + 0xE6, 0x85, 0x8C, 0x43, 0xE6, 0x85, 0x8E, 0x43, + 0xE6, 0x85, 0xA0, 0x43, 0xE6, 0x85, 0xA8, 0x43, + 0xE6, 0x85, 0xBA, 0x43, 0xE6, 0x86, 0x8E, 0x43, + 0xE6, 0x86, 0x90, 0x43, 0xE6, 0x86, 0xA4, 0x43, + 0xE6, 0x86, 0xAF, 0x43, 0xE6, 0x86, 0xB2, 0x43, + 0xE6, 0x87, 0x9E, 0x43, 0xE6, 0x87, 0xB2, 0x43, + 0xE6, 0x87, 0xB6, 0x43, 0xE6, 0x88, 0x80, 0x43, + 0xE6, 0x88, 0x88, 0x43, 0xE6, 0x88, 0x90, 0x43, + // Bytes c00 - c3f + 0xE6, 0x88, 0x9B, 0x43, 0xE6, 0x88, 0xAE, 0x43, + 0xE6, 0x88, 0xB4, 0x43, 0xE6, 0x88, 0xB6, 0x43, + 0xE6, 0x89, 0x8B, 0x43, 0xE6, 0x89, 0x93, 0x43, + 0xE6, 0x89, 0x9D, 0x43, 0xE6, 0x8A, 0x95, 0x43, + 0xE6, 0x8A, 0xB1, 0x43, 0xE6, 0x8B, 0x89, 0x43, + 0xE6, 0x8B, 0x8F, 0x43, 0xE6, 0x8B, 0x93, 0x43, + 0xE6, 0x8B, 0x94, 0x43, 0xE6, 0x8B, 0xBC, 0x43, + 0xE6, 0x8B, 0xBE, 0x43, 0xE6, 0x8C, 0x87, 0x43, + // Bytes c40 - c7f + 0xE6, 0x8C, 0xBD, 0x43, 0xE6, 0x8D, 0x90, 0x43, + 0xE6, 0x8D, 0x95, 0x43, 0xE6, 0x8D, 0xA8, 0x43, + 0xE6, 0x8D, 0xBB, 0x43, 0xE6, 0x8E, 0x83, 0x43, + 0xE6, 0x8E, 0xA0, 0x43, 0xE6, 0x8E, 0xA9, 0x43, + 0xE6, 0x8F, 0x84, 0x43, 0xE6, 0x8F, 0x85, 0x43, + 0xE6, 0x8F, 0xA4, 0x43, 0xE6, 0x90, 0x9C, 0x43, + 0xE6, 0x90, 0xA2, 0x43, 0xE6, 0x91, 0x92, 0x43, + 0xE6, 0x91, 0xA9, 0x43, 0xE6, 0x91, 0xB7, 0x43, + // Bytes c80 - cbf + 0xE6, 0x91, 0xBE, 0x43, 0xE6, 0x92, 0x9A, 0x43, + 0xE6, 0x92, 0x9D, 0x43, 0xE6, 0x93, 0x84, 0x43, + 0xE6, 0x94, 0xAF, 0x43, 0xE6, 0x94, 0xB4, 0x43, + 0xE6, 0x95, 0x8F, 0x43, 0xE6, 0x95, 0x96, 0x43, + 0xE6, 0x95, 0xAC, 0x43, 0xE6, 0x95, 0xB8, 0x43, + 0xE6, 0x96, 0x87, 0x43, 0xE6, 0x96, 0x97, 0x43, + 0xE6, 0x96, 0x99, 0x43, 0xE6, 0x96, 0xA4, 0x43, + 0xE6, 0x96, 0xB0, 0x43, 0xE6, 0x96, 0xB9, 0x43, + // Bytes cc0 - cff + 0xE6, 0x97, 0x85, 0x43, 0xE6, 0x97, 0xA0, 0x43, + 0xE6, 0x97, 0xA2, 0x43, 0xE6, 0x97, 0xA3, 0x43, + 0xE6, 0x97, 0xA5, 0x43, 0xE6, 0x98, 0x93, 0x43, + 0xE6, 0x98, 0xA0, 0x43, 0xE6, 0x99, 0x89, 0x43, + 0xE6, 0x99, 0xB4, 0x43, 0xE6, 0x9A, 0x88, 0x43, + 0xE6, 0x9A, 0x91, 0x43, 0xE6, 0x9A, 0x9C, 0x43, + 0xE6, 0x9A, 0xB4, 0x43, 0xE6, 0x9B, 0x86, 0x43, + 0xE6, 0x9B, 0xB0, 0x43, 0xE6, 0x9B, 0xB4, 0x43, + // Bytes d00 - d3f + 0xE6, 0x9B, 0xB8, 0x43, 0xE6, 0x9C, 0x80, 0x43, + 0xE6, 0x9C, 0x88, 0x43, 0xE6, 0x9C, 0x89, 0x43, + 0xE6, 0x9C, 0x97, 0x43, 0xE6, 0x9C, 0x9B, 0x43, + 0xE6, 0x9C, 0xA1, 0x43, 0xE6, 0x9C, 0xA8, 0x43, + 0xE6, 0x9D, 0x8E, 0x43, 0xE6, 0x9D, 0x93, 0x43, + 0xE6, 0x9D, 0x96, 0x43, 0xE6, 0x9D, 0x9E, 0x43, + 0xE6, 0x9D, 0xBB, 0x43, 0xE6, 0x9E, 0x85, 0x43, + 0xE6, 0x9E, 0x97, 0x43, 0xE6, 0x9F, 0xB3, 0x43, + // Bytes d40 - d7f + 0xE6, 0x9F, 0xBA, 0x43, 0xE6, 0xA0, 0x97, 0x43, + 0xE6, 0xA0, 0x9F, 0x43, 0xE6, 0xA0, 0xAA, 0x43, + 0xE6, 0xA1, 0x92, 0x43, 0xE6, 0xA2, 0x81, 0x43, + 0xE6, 0xA2, 0x85, 0x43, 0xE6, 0xA2, 0x8E, 0x43, + 0xE6, 0xA2, 0xA8, 0x43, 0xE6, 0xA4, 0x94, 0x43, + 0xE6, 0xA5, 0x82, 0x43, 0xE6, 0xA6, 0xA3, 0x43, + 0xE6, 0xA7, 0xAA, 0x43, 0xE6, 0xA8, 0x82, 0x43, + 0xE6, 0xA8, 0x93, 0x43, 0xE6, 0xAA, 0xA8, 0x43, + // Bytes d80 - dbf + 0xE6, 0xAB, 0x93, 0x43, 0xE6, 0xAB, 0x9B, 0x43, + 0xE6, 0xAC, 0x84, 0x43, 0xE6, 0xAC, 0xA0, 0x43, + 0xE6, 0xAC, 0xA1, 0x43, 0xE6, 0xAD, 0x94, 0x43, + 0xE6, 0xAD, 0xA2, 0x43, 0xE6, 0xAD, 0xA3, 0x43, + 0xE6, 0xAD, 0xB2, 0x43, 0xE6, 0xAD, 0xB7, 0x43, + 0xE6, 0xAD, 0xB9, 0x43, 0xE6, 0xAE, 0x9F, 0x43, + 0xE6, 0xAE, 0xAE, 0x43, 0xE6, 0xAE, 0xB3, 0x43, + 0xE6, 0xAE, 0xBA, 0x43, 0xE6, 0xAE, 0xBB, 0x43, + // Bytes dc0 - dff + 0xE6, 0xAF, 0x8B, 0x43, 0xE6, 0xAF, 0x8D, 0x43, + 0xE6, 0xAF, 0x94, 0x43, 0xE6, 0xAF, 0x9B, 0x43, + 0xE6, 0xB0, 0x8F, 0x43, 0xE6, 0xB0, 0x94, 0x43, + 0xE6, 0xB0, 0xB4, 0x43, 0xE6, 0xB1, 0x8E, 0x43, + 0xE6, 0xB1, 0xA7, 0x43, 0xE6, 0xB2, 0x88, 0x43, + 0xE6, 0xB2, 0xBF, 0x43, 0xE6, 0xB3, 0x8C, 0x43, + 0xE6, 0xB3, 0x8D, 0x43, 0xE6, 0xB3, 0xA5, 0x43, + 0xE6, 0xB3, 0xA8, 0x43, 0xE6, 0xB4, 0x96, 0x43, + // Bytes e00 - e3f + 0xE6, 0xB4, 0x9B, 0x43, 0xE6, 0xB4, 0x9E, 0x43, + 0xE6, 0xB4, 0xB4, 0x43, 0xE6, 0xB4, 0xBE, 0x43, + 0xE6, 0xB5, 0x81, 0x43, 0xE6, 0xB5, 0xA9, 0x43, + 0xE6, 0xB5, 0xAA, 0x43, 0xE6, 0xB5, 0xB7, 0x43, + 0xE6, 0xB5, 0xB8, 0x43, 0xE6, 0xB6, 0x85, 0x43, + 0xE6, 0xB7, 0x8B, 0x43, 0xE6, 0xB7, 0x9A, 0x43, + 0xE6, 0xB7, 0xAA, 0x43, 0xE6, 0xB7, 0xB9, 0x43, + 0xE6, 0xB8, 0x9A, 0x43, 0xE6, 0xB8, 0xAF, 0x43, + // Bytes e40 - e7f + 0xE6, 0xB9, 0xAE, 0x43, 0xE6, 0xBA, 0x80, 0x43, + 0xE6, 0xBA, 0x9C, 0x43, 0xE6, 0xBA, 0xBA, 0x43, + 0xE6, 0xBB, 0x87, 0x43, 0xE6, 0xBB, 0x8B, 0x43, + 0xE6, 0xBB, 0x91, 0x43, 0xE6, 0xBB, 0x9B, 0x43, + 0xE6, 0xBC, 0x8F, 0x43, 0xE6, 0xBC, 0x94, 0x43, + 0xE6, 0xBC, 0xA2, 0x43, 0xE6, 0xBC, 0xA3, 0x43, + 0xE6, 0xBD, 0xAE, 0x43, 0xE6, 0xBF, 0x86, 0x43, + 0xE6, 0xBF, 0xAB, 0x43, 0xE6, 0xBF, 0xBE, 0x43, + // Bytes e80 - ebf + 0xE7, 0x80, 0x9B, 0x43, 0xE7, 0x80, 0x9E, 0x43, + 0xE7, 0x80, 0xB9, 0x43, 0xE7, 0x81, 0x8A, 0x43, + 0xE7, 0x81, 0xAB, 0x43, 0xE7, 0x81, 0xB0, 0x43, + 0xE7, 0x81, 0xB7, 0x43, 0xE7, 0x81, 0xBD, 0x43, + 0xE7, 0x82, 0x99, 0x43, 0xE7, 0x82, 0xAD, 0x43, + 0xE7, 0x83, 0x88, 0x43, 0xE7, 0x83, 0x99, 0x43, + 0xE7, 0x84, 0xA1, 0x43, 0xE7, 0x85, 0x85, 0x43, + 0xE7, 0x85, 0x89, 0x43, 0xE7, 0x85, 0xAE, 0x43, + // Bytes ec0 - eff + 0xE7, 0x86, 0x9C, 0x43, 0xE7, 0x87, 0x8E, 0x43, + 0xE7, 0x87, 0x90, 0x43, 0xE7, 0x88, 0x90, 0x43, + 0xE7, 0x88, 0x9B, 0x43, 0xE7, 0x88, 0xA8, 0x43, + 0xE7, 0x88, 0xAA, 0x43, 0xE7, 0x88, 0xAB, 0x43, + 0xE7, 0x88, 0xB5, 0x43, 0xE7, 0x88, 0xB6, 0x43, + 0xE7, 0x88, 0xBB, 0x43, 0xE7, 0x88, 0xBF, 0x43, + 0xE7, 0x89, 0x87, 0x43, 0xE7, 0x89, 0x90, 0x43, + 0xE7, 0x89, 0x99, 0x43, 0xE7, 0x89, 0x9B, 0x43, + // Bytes f00 - f3f + 0xE7, 0x89, 0xA2, 0x43, 0xE7, 0x89, 0xB9, 0x43, + 0xE7, 0x8A, 0x80, 0x43, 0xE7, 0x8A, 0x95, 0x43, + 0xE7, 0x8A, 0xAC, 0x43, 0xE7, 0x8A, 0xAF, 0x43, + 0xE7, 0x8B, 0x80, 0x43, 0xE7, 0x8B, 0xBC, 0x43, + 0xE7, 0x8C, 0xAA, 0x43, 0xE7, 0x8D, 0xB5, 0x43, + 0xE7, 0x8D, 0xBA, 0x43, 0xE7, 0x8E, 0x84, 0x43, + 0xE7, 0x8E, 0x87, 0x43, 0xE7, 0x8E, 0x89, 0x43, + 0xE7, 0x8E, 0x8B, 0x43, 0xE7, 0x8E, 0xA5, 0x43, + // Bytes f40 - f7f + 0xE7, 0x8E, 0xB2, 0x43, 0xE7, 0x8F, 0x9E, 0x43, + 0xE7, 0x90, 0x86, 0x43, 0xE7, 0x90, 0x89, 0x43, + 0xE7, 0x90, 0xA2, 0x43, 0xE7, 0x91, 0x87, 0x43, + 0xE7, 0x91, 0x9C, 0x43, 0xE7, 0x91, 0xA9, 0x43, + 0xE7, 0x91, 0xB1, 0x43, 0xE7, 0x92, 0x85, 0x43, + 0xE7, 0x92, 0x89, 0x43, 0xE7, 0x92, 0x98, 0x43, + 0xE7, 0x93, 0x8A, 0x43, 0xE7, 0x93, 0x9C, 0x43, + 0xE7, 0x93, 0xA6, 0x43, 0xE7, 0x94, 0x86, 0x43, + // Bytes f80 - fbf + 0xE7, 0x94, 0x98, 0x43, 0xE7, 0x94, 0x9F, 0x43, + 0xE7, 0x94, 0xA4, 0x43, 0xE7, 0x94, 0xA8, 0x43, + 0xE7, 0x94, 0xB0, 0x43, 0xE7, 0x94, 0xB2, 0x43, + 0xE7, 0x94, 0xB3, 0x43, 0xE7, 0x94, 0xB7, 0x43, + 0xE7, 0x94, 0xBB, 0x43, 0xE7, 0x94, 0xBE, 0x43, + 0xE7, 0x95, 0x99, 0x43, 0xE7, 0x95, 0xA5, 0x43, + 0xE7, 0x95, 0xB0, 0x43, 0xE7, 0x96, 0x8B, 0x43, + 0xE7, 0x96, 0x92, 0x43, 0xE7, 0x97, 0xA2, 0x43, + // Bytes fc0 - fff + 0xE7, 0x98, 0x90, 0x43, 0xE7, 0x98, 0x9D, 0x43, + 0xE7, 0x98, 0x9F, 0x43, 0xE7, 0x99, 0x82, 0x43, + 0xE7, 0x99, 0xA9, 0x43, 0xE7, 0x99, 0xB6, 0x43, + 0xE7, 0x99, 0xBD, 0x43, 0xE7, 0x9A, 0xAE, 0x43, + 0xE7, 0x9A, 0xBF, 0x43, 0xE7, 0x9B, 0x8A, 0x43, + 0xE7, 0x9B, 0x9B, 0x43, 0xE7, 0x9B, 0xA3, 0x43, + 0xE7, 0x9B, 0xA7, 0x43, 0xE7, 0x9B, 0xAE, 0x43, + 0xE7, 0x9B, 0xB4, 0x43, 0xE7, 0x9C, 0x81, 0x43, + // Bytes 1000 - 103f + 0xE7, 0x9C, 0x9E, 0x43, 0xE7, 0x9C, 0x9F, 0x43, + 0xE7, 0x9D, 0x80, 0x43, 0xE7, 0x9D, 0x8A, 0x43, + 0xE7, 0x9E, 0x8B, 0x43, 0xE7, 0x9E, 0xA7, 0x43, + 0xE7, 0x9F, 0x9B, 0x43, 0xE7, 0x9F, 0xA2, 0x43, + 0xE7, 0x9F, 0xB3, 0x43, 0xE7, 0xA1, 0x8E, 0x43, + 0xE7, 0xA1, 0xAB, 0x43, 0xE7, 0xA2, 0x8C, 0x43, + 0xE7, 0xA2, 0x91, 0x43, 0xE7, 0xA3, 0x8A, 0x43, + 0xE7, 0xA3, 0x8C, 0x43, 0xE7, 0xA3, 0xBB, 0x43, + // Bytes 1040 - 107f + 0xE7, 0xA4, 0xAA, 0x43, 0xE7, 0xA4, 0xBA, 0x43, + 0xE7, 0xA4, 0xBC, 0x43, 0xE7, 0xA4, 0xBE, 0x43, + 0xE7, 0xA5, 0x88, 0x43, 0xE7, 0xA5, 0x89, 0x43, + 0xE7, 0xA5, 0x90, 0x43, 0xE7, 0xA5, 0x96, 0x43, + 0xE7, 0xA5, 0x9D, 0x43, 0xE7, 0xA5, 0x9E, 0x43, + 0xE7, 0xA5, 0xA5, 0x43, 0xE7, 0xA5, 0xBF, 0x43, + 0xE7, 0xA6, 0x81, 0x43, 0xE7, 0xA6, 0x8D, 0x43, + 0xE7, 0xA6, 0x8E, 0x43, 0xE7, 0xA6, 0x8F, 0x43, + // Bytes 1080 - 10bf + 0xE7, 0xA6, 0xAE, 0x43, 0xE7, 0xA6, 0xB8, 0x43, + 0xE7, 0xA6, 0xBE, 0x43, 0xE7, 0xA7, 0x8A, 0x43, + 0xE7, 0xA7, 0x98, 0x43, 0xE7, 0xA7, 0xAB, 0x43, + 0xE7, 0xA8, 0x9C, 0x43, 0xE7, 0xA9, 0x80, 0x43, + 0xE7, 0xA9, 0x8A, 0x43, 0xE7, 0xA9, 0x8F, 0x43, + 0xE7, 0xA9, 0xB4, 0x43, 0xE7, 0xA9, 0xBA, 0x43, + 0xE7, 0xAA, 0x81, 0x43, 0xE7, 0xAA, 0xB1, 0x43, + 0xE7, 0xAB, 0x8B, 0x43, 0xE7, 0xAB, 0xAE, 0x43, + // Bytes 10c0 - 10ff + 0xE7, 0xAB, 0xB9, 0x43, 0xE7, 0xAC, 0xA0, 0x43, + 0xE7, 0xAE, 0x8F, 0x43, 0xE7, 0xAF, 0x80, 0x43, + 0xE7, 0xAF, 0x86, 0x43, 0xE7, 0xAF, 0x89, 0x43, + 0xE7, 0xB0, 0xBE, 0x43, 0xE7, 0xB1, 0xA0, 0x43, + 0xE7, 0xB1, 0xB3, 0x43, 0xE7, 0xB1, 0xBB, 0x43, + 0xE7, 0xB2, 0x92, 0x43, 0xE7, 0xB2, 0xBE, 0x43, + 0xE7, 0xB3, 0x92, 0x43, 0xE7, 0xB3, 0x96, 0x43, + 0xE7, 0xB3, 0xA3, 0x43, 0xE7, 0xB3, 0xA7, 0x43, + // Bytes 1100 - 113f + 0xE7, 0xB3, 0xA8, 0x43, 0xE7, 0xB3, 0xB8, 0x43, + 0xE7, 0xB4, 0x80, 0x43, 0xE7, 0xB4, 0x90, 0x43, + 0xE7, 0xB4, 0xA2, 0x43, 0xE7, 0xB4, 0xAF, 0x43, + 0xE7, 0xB5, 0x82, 0x43, 0xE7, 0xB5, 0x9B, 0x43, + 0xE7, 0xB5, 0xA3, 0x43, 0xE7, 0xB6, 0xA0, 0x43, + 0xE7, 0xB6, 0xBE, 0x43, 0xE7, 0xB7, 0x87, 0x43, + 0xE7, 0xB7, 0xB4, 0x43, 0xE7, 0xB8, 0x82, 0x43, + 0xE7, 0xB8, 0x89, 0x43, 0xE7, 0xB8, 0xB7, 0x43, + // Bytes 1140 - 117f + 0xE7, 0xB9, 0x81, 0x43, 0xE7, 0xB9, 0x85, 0x43, + 0xE7, 0xBC, 0xB6, 0x43, 0xE7, 0xBC, 0xBE, 0x43, + 0xE7, 0xBD, 0x91, 0x43, 0xE7, 0xBD, 0xB2, 0x43, + 0xE7, 0xBD, 0xB9, 0x43, 0xE7, 0xBD, 0xBA, 0x43, + 0xE7, 0xBE, 0x85, 0x43, 0xE7, 0xBE, 0x8A, 0x43, + 0xE7, 0xBE, 0x95, 0x43, 0xE7, 0xBE, 0x9A, 0x43, + 0xE7, 0xBE, 0xBD, 0x43, 0xE7, 0xBF, 0xBA, 0x43, + 0xE8, 0x80, 0x81, 0x43, 0xE8, 0x80, 0x85, 0x43, + // Bytes 1180 - 11bf + 0xE8, 0x80, 0x8C, 0x43, 0xE8, 0x80, 0x92, 0x43, + 0xE8, 0x80, 0xB3, 0x43, 0xE8, 0x81, 0x86, 0x43, + 0xE8, 0x81, 0xA0, 0x43, 0xE8, 0x81, 0xAF, 0x43, + 0xE8, 0x81, 0xB0, 0x43, 0xE8, 0x81, 0xBE, 0x43, + 0xE8, 0x81, 0xBF, 0x43, 0xE8, 0x82, 0x89, 0x43, + 0xE8, 0x82, 0x8B, 0x43, 0xE8, 0x82, 0xAD, 0x43, + 0xE8, 0x82, 0xB2, 0x43, 0xE8, 0x84, 0x83, 0x43, + 0xE8, 0x84, 0xBE, 0x43, 0xE8, 0x87, 0x98, 0x43, + // Bytes 11c0 - 11ff + 0xE8, 0x87, 0xA3, 0x43, 0xE8, 0x87, 0xA8, 0x43, + 0xE8, 0x87, 0xAA, 0x43, 0xE8, 0x87, 0xAD, 0x43, + 0xE8, 0x87, 0xB3, 0x43, 0xE8, 0x87, 0xBC, 0x43, + 0xE8, 0x88, 0x81, 0x43, 0xE8, 0x88, 0x84, 0x43, + 0xE8, 0x88, 0x8C, 0x43, 0xE8, 0x88, 0x98, 0x43, + 0xE8, 0x88, 0x9B, 0x43, 0xE8, 0x88, 0x9F, 0x43, + 0xE8, 0x89, 0xAE, 0x43, 0xE8, 0x89, 0xAF, 0x43, + 0xE8, 0x89, 0xB2, 0x43, 0xE8, 0x89, 0xB8, 0x43, + // Bytes 1200 - 123f + 0xE8, 0x89, 0xB9, 0x43, 0xE8, 0x8A, 0x8B, 0x43, + 0xE8, 0x8A, 0x91, 0x43, 0xE8, 0x8A, 0x9D, 0x43, + 0xE8, 0x8A, 0xB1, 0x43, 0xE8, 0x8A, 0xB3, 0x43, + 0xE8, 0x8A, 0xBD, 0x43, 0xE8, 0x8B, 0xA5, 0x43, + 0xE8, 0x8B, 0xA6, 0x43, 0xE8, 0x8C, 0x9D, 0x43, + 0xE8, 0x8C, 0xA3, 0x43, 0xE8, 0x8C, 0xB6, 0x43, + 0xE8, 0x8D, 0x92, 0x43, 0xE8, 0x8D, 0x93, 0x43, + 0xE8, 0x8D, 0xA3, 0x43, 0xE8, 0x8E, 0xAD, 0x43, + // Bytes 1240 - 127f + 0xE8, 0x8E, 0xBD, 0x43, 0xE8, 0x8F, 0x89, 0x43, + 0xE8, 0x8F, 0x8A, 0x43, 0xE8, 0x8F, 0x8C, 0x43, + 0xE8, 0x8F, 0x9C, 0x43, 0xE8, 0x8F, 0xA7, 0x43, + 0xE8, 0x8F, 0xAF, 0x43, 0xE8, 0x8F, 0xB1, 0x43, + 0xE8, 0x90, 0xBD, 0x43, 0xE8, 0x91, 0x89, 0x43, + 0xE8, 0x91, 0x97, 0x43, 0xE8, 0x93, 0xAE, 0x43, + 0xE8, 0x93, 0xB1, 0x43, 0xE8, 0x93, 0xB3, 0x43, + 0xE8, 0x93, 0xBC, 0x43, 0xE8, 0x94, 0x96, 0x43, + // Bytes 1280 - 12bf + 0xE8, 0x95, 0xA4, 0x43, 0xE8, 0x97, 0x8D, 0x43, + 0xE8, 0x97, 0xBA, 0x43, 0xE8, 0x98, 0x86, 0x43, + 0xE8, 0x98, 0x92, 0x43, 0xE8, 0x98, 0xAD, 0x43, + 0xE8, 0x98, 0xBF, 0x43, 0xE8, 0x99, 0x8D, 0x43, + 0xE8, 0x99, 0x90, 0x43, 0xE8, 0x99, 0x9C, 0x43, + 0xE8, 0x99, 0xA7, 0x43, 0xE8, 0x99, 0xA9, 0x43, + 0xE8, 0x99, 0xAB, 0x43, 0xE8, 0x9A, 0x88, 0x43, + 0xE8, 0x9A, 0xA9, 0x43, 0xE8, 0x9B, 0xA2, 0x43, + // Bytes 12c0 - 12ff + 0xE8, 0x9C, 0x8E, 0x43, 0xE8, 0x9C, 0xA8, 0x43, + 0xE8, 0x9D, 0xAB, 0x43, 0xE8, 0x9D, 0xB9, 0x43, + 0xE8, 0x9E, 0x86, 0x43, 0xE8, 0x9E, 0xBA, 0x43, + 0xE8, 0x9F, 0xA1, 0x43, 0xE8, 0xA0, 0x81, 0x43, + 0xE8, 0xA0, 0x9F, 0x43, 0xE8, 0xA1, 0x80, 0x43, + 0xE8, 0xA1, 0x8C, 0x43, 0xE8, 0xA1, 0xA0, 0x43, + 0xE8, 0xA1, 0xA3, 0x43, 0xE8, 0xA3, 0x82, 0x43, + 0xE8, 0xA3, 0x8F, 0x43, 0xE8, 0xA3, 0x97, 0x43, + // Bytes 1300 - 133f + 0xE8, 0xA3, 0x9E, 0x43, 0xE8, 0xA3, 0xA1, 0x43, + 0xE8, 0xA3, 0xB8, 0x43, 0xE8, 0xA3, 0xBA, 0x43, + 0xE8, 0xA4, 0x90, 0x43, 0xE8, 0xA5, 0x81, 0x43, + 0xE8, 0xA5, 0xA4, 0x43, 0xE8, 0xA5, 0xBE, 0x43, + 0xE8, 0xA6, 0x86, 0x43, 0xE8, 0xA6, 0x8B, 0x43, + 0xE8, 0xA6, 0x96, 0x43, 0xE8, 0xA7, 0x92, 0x43, + 0xE8, 0xA7, 0xA3, 0x43, 0xE8, 0xA8, 0x80, 0x43, + 0xE8, 0xAA, 0xA0, 0x43, 0xE8, 0xAA, 0xAA, 0x43, + // Bytes 1340 - 137f + 0xE8, 0xAA, 0xBF, 0x43, 0xE8, 0xAB, 0x8B, 0x43, + 0xE8, 0xAB, 0x92, 0x43, 0xE8, 0xAB, 0x96, 0x43, + 0xE8, 0xAB, 0xAD, 0x43, 0xE8, 0xAB, 0xB8, 0x43, + 0xE8, 0xAB, 0xBE, 0x43, 0xE8, 0xAC, 0x81, 0x43, + 0xE8, 0xAC, 0xB9, 0x43, 0xE8, 0xAD, 0x98, 0x43, + 0xE8, 0xAE, 0x80, 0x43, 0xE8, 0xAE, 0x8A, 0x43, + 0xE8, 0xB0, 0xB7, 0x43, 0xE8, 0xB1, 0x86, 0x43, + 0xE8, 0xB1, 0x88, 0x43, 0xE8, 0xB1, 0x95, 0x43, + // Bytes 1380 - 13bf + 0xE8, 0xB1, 0xB8, 0x43, 0xE8, 0xB2, 0x9D, 0x43, + 0xE8, 0xB2, 0xA1, 0x43, 0xE8, 0xB2, 0xA9, 0x43, + 0xE8, 0xB2, 0xAB, 0x43, 0xE8, 0xB3, 0x81, 0x43, + 0xE8, 0xB3, 0x82, 0x43, 0xE8, 0xB3, 0x87, 0x43, + 0xE8, 0xB3, 0x88, 0x43, 0xE8, 0xB3, 0x93, 0x43, + 0xE8, 0xB4, 0x88, 0x43, 0xE8, 0xB4, 0x9B, 0x43, + 0xE8, 0xB5, 0xA4, 0x43, 0xE8, 0xB5, 0xB0, 0x43, + 0xE8, 0xB5, 0xB7, 0x43, 0xE8, 0xB6, 0xB3, 0x43, + // Bytes 13c0 - 13ff + 0xE8, 0xB6, 0xBC, 0x43, 0xE8, 0xB7, 0x8B, 0x43, + 0xE8, 0xB7, 0xAF, 0x43, 0xE8, 0xB7, 0xB0, 0x43, + 0xE8, 0xBA, 0xAB, 0x43, 0xE8, 0xBB, 0x8A, 0x43, + 0xE8, 0xBB, 0x94, 0x43, 0xE8, 0xBC, 0xA6, 0x43, + 0xE8, 0xBC, 0xAA, 0x43, 0xE8, 0xBC, 0xB8, 0x43, + 0xE8, 0xBC, 0xBB, 0x43, 0xE8, 0xBD, 0xA2, 0x43, + 0xE8, 0xBE, 0x9B, 0x43, 0xE8, 0xBE, 0x9E, 0x43, + 0xE8, 0xBE, 0xB0, 0x43, 0xE8, 0xBE, 0xB5, 0x43, + // Bytes 1400 - 143f + 0xE8, 0xBE, 0xB6, 0x43, 0xE9, 0x80, 0xA3, 0x43, + 0xE9, 0x80, 0xB8, 0x43, 0xE9, 0x81, 0x8A, 0x43, + 0xE9, 0x81, 0xA9, 0x43, 0xE9, 0x81, 0xB2, 0x43, + 0xE9, 0x81, 0xBC, 0x43, 0xE9, 0x82, 0x8F, 0x43, + 0xE9, 0x82, 0x91, 0x43, 0xE9, 0x82, 0x94, 0x43, + 0xE9, 0x83, 0x8E, 0x43, 0xE9, 0x83, 0x9E, 0x43, + 0xE9, 0x83, 0xB1, 0x43, 0xE9, 0x83, 0xBD, 0x43, + 0xE9, 0x84, 0x91, 0x43, 0xE9, 0x84, 0x9B, 0x43, + // Bytes 1440 - 147f + 0xE9, 0x85, 0x89, 0x43, 0xE9, 0x85, 0x8D, 0x43, + 0xE9, 0x85, 0xAA, 0x43, 0xE9, 0x86, 0x99, 0x43, + 0xE9, 0x86, 0xB4, 0x43, 0xE9, 0x87, 0x86, 0x43, + 0xE9, 0x87, 0x8C, 0x43, 0xE9, 0x87, 0x8F, 0x43, + 0xE9, 0x87, 0x91, 0x43, 0xE9, 0x88, 0xB4, 0x43, + 0xE9, 0x88, 0xB8, 0x43, 0xE9, 0x89, 0xB6, 0x43, + 0xE9, 0x89, 0xBC, 0x43, 0xE9, 0x8B, 0x97, 0x43, + 0xE9, 0x8B, 0x98, 0x43, 0xE9, 0x8C, 0x84, 0x43, + // Bytes 1480 - 14bf + 0xE9, 0x8D, 0x8A, 0x43, 0xE9, 0x8F, 0xB9, 0x43, + 0xE9, 0x90, 0x95, 0x43, 0xE9, 0x95, 0xB7, 0x43, + 0xE9, 0x96, 0x80, 0x43, 0xE9, 0x96, 0x8B, 0x43, + 0xE9, 0x96, 0xAD, 0x43, 0xE9, 0x96, 0xB7, 0x43, + 0xE9, 0x98, 0x9C, 0x43, 0xE9, 0x98, 0xAE, 0x43, + 0xE9, 0x99, 0x8B, 0x43, 0xE9, 0x99, 0x8D, 0x43, + 0xE9, 0x99, 0xB5, 0x43, 0xE9, 0x99, 0xB8, 0x43, + 0xE9, 0x99, 0xBC, 0x43, 0xE9, 0x9A, 0x86, 0x43, + // Bytes 14c0 - 14ff + 0xE9, 0x9A, 0xA3, 0x43, 0xE9, 0x9A, 0xB6, 0x43, + 0xE9, 0x9A, 0xB7, 0x43, 0xE9, 0x9A, 0xB8, 0x43, + 0xE9, 0x9A, 0xB9, 0x43, 0xE9, 0x9B, 0x83, 0x43, + 0xE9, 0x9B, 0xA2, 0x43, 0xE9, 0x9B, 0xA3, 0x43, + 0xE9, 0x9B, 0xA8, 0x43, 0xE9, 0x9B, 0xB6, 0x43, + 0xE9, 0x9B, 0xB7, 0x43, 0xE9, 0x9C, 0xA3, 0x43, + 0xE9, 0x9C, 0xB2, 0x43, 0xE9, 0x9D, 0x88, 0x43, + 0xE9, 0x9D, 0x91, 0x43, 0xE9, 0x9D, 0x96, 0x43, + // Bytes 1500 - 153f + 0xE9, 0x9D, 0x9E, 0x43, 0xE9, 0x9D, 0xA2, 0x43, + 0xE9, 0x9D, 0xA9, 0x43, 0xE9, 0x9F, 0x8B, 0x43, + 0xE9, 0x9F, 0x9B, 0x43, 0xE9, 0x9F, 0xA0, 0x43, + 0xE9, 0x9F, 0xAD, 0x43, 0xE9, 0x9F, 0xB3, 0x43, + 0xE9, 0x9F, 0xBF, 0x43, 0xE9, 0xA0, 0x81, 0x43, + 0xE9, 0xA0, 0x85, 0x43, 0xE9, 0xA0, 0x8B, 0x43, + 0xE9, 0xA0, 0x98, 0x43, 0xE9, 0xA0, 0xA9, 0x43, + 0xE9, 0xA0, 0xBB, 0x43, 0xE9, 0xA1, 0x9E, 0x43, + // Bytes 1540 - 157f + 0xE9, 0xA2, 0xA8, 0x43, 0xE9, 0xA3, 0x9B, 0x43, + 0xE9, 0xA3, 0x9F, 0x43, 0xE9, 0xA3, 0xA2, 0x43, + 0xE9, 0xA3, 0xAF, 0x43, 0xE9, 0xA3, 0xBC, 0x43, + 0xE9, 0xA4, 0xA8, 0x43, 0xE9, 0xA4, 0xA9, 0x43, + 0xE9, 0xA6, 0x96, 0x43, 0xE9, 0xA6, 0x99, 0x43, + 0xE9, 0xA6, 0xA7, 0x43, 0xE9, 0xA6, 0xAC, 0x43, + 0xE9, 0xA7, 0x82, 0x43, 0xE9, 0xA7, 0xB1, 0x43, + 0xE9, 0xA7, 0xBE, 0x43, 0xE9, 0xA9, 0xAA, 0x43, + // Bytes 1580 - 15bf + 0xE9, 0xAA, 0xA8, 0x43, 0xE9, 0xAB, 0x98, 0x43, + 0xE9, 0xAB, 0x9F, 0x43, 0xE9, 0xAC, 0x92, 0x43, + 0xE9, 0xAC, 0xA5, 0x43, 0xE9, 0xAC, 0xAF, 0x43, + 0xE9, 0xAC, 0xB2, 0x43, 0xE9, 0xAC, 0xBC, 0x43, + 0xE9, 0xAD, 0x9A, 0x43, 0xE9, 0xAD, 0xAF, 0x43, + 0xE9, 0xB1, 0x80, 0x43, 0xE9, 0xB1, 0x97, 0x43, + 0xE9, 0xB3, 0xA5, 0x43, 0xE9, 0xB3, 0xBD, 0x43, + 0xE9, 0xB5, 0xA7, 0x43, 0xE9, 0xB6, 0xB4, 0x43, + // Bytes 15c0 - 15ff + 0xE9, 0xB7, 0xBA, 0x43, 0xE9, 0xB8, 0x9E, 0x43, + 0xE9, 0xB9, 0xB5, 0x43, 0xE9, 0xB9, 0xBF, 0x43, + 0xE9, 0xBA, 0x97, 0x43, 0xE9, 0xBA, 0x9F, 0x43, + 0xE9, 0xBA, 0xA5, 0x43, 0xE9, 0xBA, 0xBB, 0x43, + 0xE9, 0xBB, 0x83, 0x43, 0xE9, 0xBB, 0x8D, 0x43, + 0xE9, 0xBB, 0x8E, 0x43, 0xE9, 0xBB, 0x91, 0x43, + 0xE9, 0xBB, 0xB9, 0x43, 0xE9, 0xBB, 0xBD, 0x43, + 0xE9, 0xBB, 0xBE, 0x43, 0xE9, 0xBC, 0x85, 0x43, + // Bytes 1600 - 163f + 0xE9, 0xBC, 0x8E, 0x43, 0xE9, 0xBC, 0x8F, 0x43, + 0xE9, 0xBC, 0x93, 0x43, 0xE9, 0xBC, 0x96, 0x43, + 0xE9, 0xBC, 0xA0, 0x43, 0xE9, 0xBC, 0xBB, 0x43, + 0xE9, 0xBD, 0x83, 0x43, 0xE9, 0xBD, 0x8A, 0x43, + 0xE9, 0xBD, 0x92, 0x43, 0xE9, 0xBE, 0x8D, 0x43, + 0xE9, 0xBE, 0x8E, 0x43, 0xE9, 0xBE, 0x9C, 0x43, + 0xE9, 0xBE, 0x9F, 0x43, 0xE9, 0xBE, 0xA0, 0x43, + 0xEA, 0x9C, 0xA7, 0x43, 0xEA, 0x9D, 0xAF, 0x43, + // Bytes 1640 - 167f + 0xEA, 0xAC, 0xB7, 0x43, 0xEA, 0xAD, 0x92, 0x44, + 0xF0, 0xA0, 0x84, 0xA2, 0x44, 0xF0, 0xA0, 0x94, + 0x9C, 0x44, 0xF0, 0xA0, 0x94, 0xA5, 0x44, 0xF0, + 0xA0, 0x95, 0x8B, 0x44, 0xF0, 0xA0, 0x98, 0xBA, + 0x44, 0xF0, 0xA0, 0xA0, 0x84, 0x44, 0xF0, 0xA0, + 0xA3, 0x9E, 0x44, 0xF0, 0xA0, 0xA8, 0xAC, 0x44, + 0xF0, 0xA0, 0xAD, 0xA3, 0x44, 0xF0, 0xA1, 0x93, + 0xA4, 0x44, 0xF0, 0xA1, 0x9A, 0xA8, 0x44, 0xF0, + // Bytes 1680 - 16bf + 0xA1, 0x9B, 0xAA, 0x44, 0xF0, 0xA1, 0xA7, 0x88, + 0x44, 0xF0, 0xA1, 0xAC, 0x98, 0x44, 0xF0, 0xA1, + 0xB4, 0x8B, 0x44, 0xF0, 0xA1, 0xB7, 0xA4, 0x44, + 0xF0, 0xA1, 0xB7, 0xA6, 0x44, 0xF0, 0xA2, 0x86, + 0x83, 0x44, 0xF0, 0xA2, 0x86, 0x9F, 0x44, 0xF0, + 0xA2, 0x8C, 0xB1, 0x44, 0xF0, 0xA2, 0x9B, 0x94, + 0x44, 0xF0, 0xA2, 0xA1, 0x84, 0x44, 0xF0, 0xA2, + 0xA1, 0x8A, 0x44, 0xF0, 0xA2, 0xAC, 0x8C, 0x44, + // Bytes 16c0 - 16ff + 0xF0, 0xA2, 0xAF, 0xB1, 0x44, 0xF0, 0xA3, 0x80, + 0x8A, 0x44, 0xF0, 0xA3, 0x8A, 0xB8, 0x44, 0xF0, + 0xA3, 0x8D, 0x9F, 0x44, 0xF0, 0xA3, 0x8E, 0x93, + 0x44, 0xF0, 0xA3, 0x8E, 0x9C, 0x44, 0xF0, 0xA3, + 0x8F, 0x83, 0x44, 0xF0, 0xA3, 0x8F, 0x95, 0x44, + 0xF0, 0xA3, 0x91, 0xAD, 0x44, 0xF0, 0xA3, 0x9A, + 0xA3, 0x44, 0xF0, 0xA3, 0xA2, 0xA7, 0x44, 0xF0, + 0xA3, 0xAA, 0x8D, 0x44, 0xF0, 0xA3, 0xAB, 0xBA, + // Bytes 1700 - 173f + 0x44, 0xF0, 0xA3, 0xB2, 0xBC, 0x44, 0xF0, 0xA3, + 0xB4, 0x9E, 0x44, 0xF0, 0xA3, 0xBB, 0x91, 0x44, + 0xF0, 0xA3, 0xBD, 0x9E, 0x44, 0xF0, 0xA3, 0xBE, + 0x8E, 0x44, 0xF0, 0xA4, 0x89, 0xA3, 0x44, 0xF0, + 0xA4, 0x8B, 0xAE, 0x44, 0xF0, 0xA4, 0x8E, 0xAB, + 0x44, 0xF0, 0xA4, 0x98, 0x88, 0x44, 0xF0, 0xA4, + 0x9C, 0xB5, 0x44, 0xF0, 0xA4, 0xA0, 0x94, 0x44, + 0xF0, 0xA4, 0xB0, 0xB6, 0x44, 0xF0, 0xA4, 0xB2, + // Bytes 1740 - 177f + 0x92, 0x44, 0xF0, 0xA4, 0xBE, 0xA1, 0x44, 0xF0, + 0xA4, 0xBE, 0xB8, 0x44, 0xF0, 0xA5, 0x81, 0x84, + 0x44, 0xF0, 0xA5, 0x83, 0xB2, 0x44, 0xF0, 0xA5, + 0x83, 0xB3, 0x44, 0xF0, 0xA5, 0x84, 0x99, 0x44, + 0xF0, 0xA5, 0x84, 0xB3, 0x44, 0xF0, 0xA5, 0x89, + 0x89, 0x44, 0xF0, 0xA5, 0x90, 0x9D, 0x44, 0xF0, + 0xA5, 0x98, 0xA6, 0x44, 0xF0, 0xA5, 0x9A, 0x9A, + 0x44, 0xF0, 0xA5, 0x9B, 0x85, 0x44, 0xF0, 0xA5, + // Bytes 1780 - 17bf + 0xA5, 0xBC, 0x44, 0xF0, 0xA5, 0xAA, 0xA7, 0x44, + 0xF0, 0xA5, 0xAE, 0xAB, 0x44, 0xF0, 0xA5, 0xB2, + 0x80, 0x44, 0xF0, 0xA5, 0xB3, 0x90, 0x44, 0xF0, + 0xA5, 0xBE, 0x86, 0x44, 0xF0, 0xA6, 0x87, 0x9A, + 0x44, 0xF0, 0xA6, 0x88, 0xA8, 0x44, 0xF0, 0xA6, + 0x89, 0x87, 0x44, 0xF0, 0xA6, 0x8B, 0x99, 0x44, + 0xF0, 0xA6, 0x8C, 0xBE, 0x44, 0xF0, 0xA6, 0x93, + 0x9A, 0x44, 0xF0, 0xA6, 0x94, 0xA3, 0x44, 0xF0, + // Bytes 17c0 - 17ff + 0xA6, 0x96, 0xA8, 0x44, 0xF0, 0xA6, 0x9E, 0xA7, + 0x44, 0xF0, 0xA6, 0x9E, 0xB5, 0x44, 0xF0, 0xA6, + 0xAC, 0xBC, 0x44, 0xF0, 0xA6, 0xB0, 0xB6, 0x44, + 0xF0, 0xA6, 0xB3, 0x95, 0x44, 0xF0, 0xA6, 0xB5, + 0xAB, 0x44, 0xF0, 0xA6, 0xBC, 0xAC, 0x44, 0xF0, + 0xA6, 0xBE, 0xB1, 0x44, 0xF0, 0xA7, 0x83, 0x92, + 0x44, 0xF0, 0xA7, 0x8F, 0x8A, 0x44, 0xF0, 0xA7, + 0x99, 0xA7, 0x44, 0xF0, 0xA7, 0xA2, 0xAE, 0x44, + // Bytes 1800 - 183f + 0xF0, 0xA7, 0xA5, 0xA6, 0x44, 0xF0, 0xA7, 0xB2, + 0xA8, 0x44, 0xF0, 0xA7, 0xBB, 0x93, 0x44, 0xF0, + 0xA7, 0xBC, 0xAF, 0x44, 0xF0, 0xA8, 0x97, 0x92, + 0x44, 0xF0, 0xA8, 0x97, 0xAD, 0x44, 0xF0, 0xA8, + 0x9C, 0xAE, 0x44, 0xF0, 0xA8, 0xAF, 0xBA, 0x44, + 0xF0, 0xA8, 0xB5, 0xB7, 0x44, 0xF0, 0xA9, 0x85, + 0x85, 0x44, 0xF0, 0xA9, 0x87, 0x9F, 0x44, 0xF0, + 0xA9, 0x88, 0x9A, 0x44, 0xF0, 0xA9, 0x90, 0x8A, + // Bytes 1840 - 187f + 0x44, 0xF0, 0xA9, 0x92, 0x96, 0x44, 0xF0, 0xA9, + 0x96, 0xB6, 0x44, 0xF0, 0xA9, 0xAC, 0xB0, 0x44, + 0xF0, 0xAA, 0x83, 0x8E, 0x44, 0xF0, 0xAA, 0x84, + 0x85, 0x44, 0xF0, 0xAA, 0x88, 0x8E, 0x44, 0xF0, + 0xAA, 0x8A, 0x91, 0x44, 0xF0, 0xAA, 0x8E, 0x92, + 0x44, 0xF0, 0xAA, 0x98, 0x80, 0x42, 0x21, 0x21, + 0x42, 0x21, 0x3F, 0x42, 0x2E, 0x2E, 0x42, 0x30, + 0x2C, 0x42, 0x30, 0x2E, 0x42, 0x31, 0x2C, 0x42, + // Bytes 1880 - 18bf + 0x31, 0x2E, 0x42, 0x31, 0x30, 0x42, 0x31, 0x31, + 0x42, 0x31, 0x32, 0x42, 0x31, 0x33, 0x42, 0x31, + 0x34, 0x42, 0x31, 0x35, 0x42, 0x31, 0x36, 0x42, + 0x31, 0x37, 0x42, 0x31, 0x38, 0x42, 0x31, 0x39, + 0x42, 0x32, 0x2C, 0x42, 0x32, 0x2E, 0x42, 0x32, + 0x30, 0x42, 0x32, 0x31, 0x42, 0x32, 0x32, 0x42, + 0x32, 0x33, 0x42, 0x32, 0x34, 0x42, 0x32, 0x35, + 0x42, 0x32, 0x36, 0x42, 0x32, 0x37, 0x42, 0x32, + // Bytes 18c0 - 18ff + 0x38, 0x42, 0x32, 0x39, 0x42, 0x33, 0x2C, 0x42, + 0x33, 0x2E, 0x42, 0x33, 0x30, 0x42, 0x33, 0x31, + 0x42, 0x33, 0x32, 0x42, 0x33, 0x33, 0x42, 0x33, + 0x34, 0x42, 0x33, 0x35, 0x42, 0x33, 0x36, 0x42, + 0x33, 0x37, 0x42, 0x33, 0x38, 0x42, 0x33, 0x39, + 0x42, 0x34, 0x2C, 0x42, 0x34, 0x2E, 0x42, 0x34, + 0x30, 0x42, 0x34, 0x31, 0x42, 0x34, 0x32, 0x42, + 0x34, 0x33, 0x42, 0x34, 0x34, 0x42, 0x34, 0x35, + // Bytes 1900 - 193f + 0x42, 0x34, 0x36, 0x42, 0x34, 0x37, 0x42, 0x34, + 0x38, 0x42, 0x34, 0x39, 0x42, 0x35, 0x2C, 0x42, + 0x35, 0x2E, 0x42, 0x35, 0x30, 0x42, 0x36, 0x2C, + 0x42, 0x36, 0x2E, 0x42, 0x37, 0x2C, 0x42, 0x37, + 0x2E, 0x42, 0x38, 0x2C, 0x42, 0x38, 0x2E, 0x42, + 0x39, 0x2C, 0x42, 0x39, 0x2E, 0x42, 0x3D, 0x3D, + 0x42, 0x3F, 0x21, 0x42, 0x3F, 0x3F, 0x42, 0x41, + 0x55, 0x42, 0x42, 0x71, 0x42, 0x43, 0x44, 0x42, + // Bytes 1940 - 197f + 0x44, 0x4A, 0x42, 0x44, 0x5A, 0x42, 0x44, 0x7A, + 0x42, 0x47, 0x42, 0x42, 0x47, 0x79, 0x42, 0x48, + 0x50, 0x42, 0x48, 0x56, 0x42, 0x48, 0x67, 0x42, + 0x48, 0x7A, 0x42, 0x49, 0x49, 0x42, 0x49, 0x4A, + 0x42, 0x49, 0x55, 0x42, 0x49, 0x56, 0x42, 0x49, + 0x58, 0x42, 0x4B, 0x42, 0x42, 0x4B, 0x4B, 0x42, + 0x4B, 0x4D, 0x42, 0x4C, 0x4A, 0x42, 0x4C, 0x6A, + 0x42, 0x4D, 0x42, 0x42, 0x4D, 0x43, 0x42, 0x4D, + // Bytes 1980 - 19bf + 0x44, 0x42, 0x4D, 0x56, 0x42, 0x4D, 0x57, 0x42, + 0x4E, 0x4A, 0x42, 0x4E, 0x6A, 0x42, 0x4E, 0x6F, + 0x42, 0x50, 0x48, 0x42, 0x50, 0x52, 0x42, 0x50, + 0x61, 0x42, 0x52, 0x73, 0x42, 0x53, 0x44, 0x42, + 0x53, 0x4D, 0x42, 0x53, 0x53, 0x42, 0x53, 0x76, + 0x42, 0x54, 0x4D, 0x42, 0x56, 0x49, 0x42, 0x57, + 0x43, 0x42, 0x57, 0x5A, 0x42, 0x57, 0x62, 0x42, + 0x58, 0x49, 0x42, 0x63, 0x63, 0x42, 0x63, 0x64, + // Bytes 19c0 - 19ff + 0x42, 0x63, 0x6D, 0x42, 0x64, 0x42, 0x42, 0x64, + 0x61, 0x42, 0x64, 0x6C, 0x42, 0x64, 0x6D, 0x42, + 0x64, 0x7A, 0x42, 0x65, 0x56, 0x42, 0x66, 0x66, + 0x42, 0x66, 0x69, 0x42, 0x66, 0x6C, 0x42, 0x66, + 0x6D, 0x42, 0x68, 0x61, 0x42, 0x69, 0x69, 0x42, + 0x69, 0x6A, 0x42, 0x69, 0x6E, 0x42, 0x69, 0x76, + 0x42, 0x69, 0x78, 0x42, 0x6B, 0x41, 0x42, 0x6B, + 0x56, 0x42, 0x6B, 0x57, 0x42, 0x6B, 0x67, 0x42, + // Bytes 1a00 - 1a3f + 0x6B, 0x6C, 0x42, 0x6B, 0x6D, 0x42, 0x6B, 0x74, + 0x42, 0x6C, 0x6A, 0x42, 0x6C, 0x6D, 0x42, 0x6C, + 0x6E, 0x42, 0x6C, 0x78, 0x42, 0x6D, 0x32, 0x42, + 0x6D, 0x33, 0x42, 0x6D, 0x41, 0x42, 0x6D, 0x56, + 0x42, 0x6D, 0x57, 0x42, 0x6D, 0x62, 0x42, 0x6D, + 0x67, 0x42, 0x6D, 0x6C, 0x42, 0x6D, 0x6D, 0x42, + 0x6D, 0x73, 0x42, 0x6E, 0x41, 0x42, 0x6E, 0x46, + 0x42, 0x6E, 0x56, 0x42, 0x6E, 0x57, 0x42, 0x6E, + // Bytes 1a40 - 1a7f + 0x6A, 0x42, 0x6E, 0x6D, 0x42, 0x6E, 0x73, 0x42, + 0x6F, 0x56, 0x42, 0x70, 0x41, 0x42, 0x70, 0x46, + 0x42, 0x70, 0x56, 0x42, 0x70, 0x57, 0x42, 0x70, + 0x63, 0x42, 0x70, 0x73, 0x42, 0x73, 0x72, 0x42, + 0x73, 0x74, 0x42, 0x76, 0x69, 0x42, 0x78, 0x69, + 0x43, 0x28, 0x31, 0x29, 0x43, 0x28, 0x32, 0x29, + 0x43, 0x28, 0x33, 0x29, 0x43, 0x28, 0x34, 0x29, + 0x43, 0x28, 0x35, 0x29, 0x43, 0x28, 0x36, 0x29, + // Bytes 1a80 - 1abf + 0x43, 0x28, 0x37, 0x29, 0x43, 0x28, 0x38, 0x29, + 0x43, 0x28, 0x39, 0x29, 0x43, 0x28, 0x41, 0x29, + 0x43, 0x28, 0x42, 0x29, 0x43, 0x28, 0x43, 0x29, + 0x43, 0x28, 0x44, 0x29, 0x43, 0x28, 0x45, 0x29, + 0x43, 0x28, 0x46, 0x29, 0x43, 0x28, 0x47, 0x29, + 0x43, 0x28, 0x48, 0x29, 0x43, 0x28, 0x49, 0x29, + 0x43, 0x28, 0x4A, 0x29, 0x43, 0x28, 0x4B, 0x29, + 0x43, 0x28, 0x4C, 0x29, 0x43, 0x28, 0x4D, 0x29, + // Bytes 1ac0 - 1aff + 0x43, 0x28, 0x4E, 0x29, 0x43, 0x28, 0x4F, 0x29, + 0x43, 0x28, 0x50, 0x29, 0x43, 0x28, 0x51, 0x29, + 0x43, 0x28, 0x52, 0x29, 0x43, 0x28, 0x53, 0x29, + 0x43, 0x28, 0x54, 0x29, 0x43, 0x28, 0x55, 0x29, + 0x43, 0x28, 0x56, 0x29, 0x43, 0x28, 0x57, 0x29, + 0x43, 0x28, 0x58, 0x29, 0x43, 0x28, 0x59, 0x29, + 0x43, 0x28, 0x5A, 0x29, 0x43, 0x28, 0x61, 0x29, + 0x43, 0x28, 0x62, 0x29, 0x43, 0x28, 0x63, 0x29, + // Bytes 1b00 - 1b3f + 0x43, 0x28, 0x64, 0x29, 0x43, 0x28, 0x65, 0x29, + 0x43, 0x28, 0x66, 0x29, 0x43, 0x28, 0x67, 0x29, + 0x43, 0x28, 0x68, 0x29, 0x43, 0x28, 0x69, 0x29, + 0x43, 0x28, 0x6A, 0x29, 0x43, 0x28, 0x6B, 0x29, + 0x43, 0x28, 0x6C, 0x29, 0x43, 0x28, 0x6D, 0x29, + 0x43, 0x28, 0x6E, 0x29, 0x43, 0x28, 0x6F, 0x29, + 0x43, 0x28, 0x70, 0x29, 0x43, 0x28, 0x71, 0x29, + 0x43, 0x28, 0x72, 0x29, 0x43, 0x28, 0x73, 0x29, + // Bytes 1b40 - 1b7f + 0x43, 0x28, 0x74, 0x29, 0x43, 0x28, 0x75, 0x29, + 0x43, 0x28, 0x76, 0x29, 0x43, 0x28, 0x77, 0x29, + 0x43, 0x28, 0x78, 0x29, 0x43, 0x28, 0x79, 0x29, + 0x43, 0x28, 0x7A, 0x29, 0x43, 0x2E, 0x2E, 0x2E, + 0x43, 0x31, 0x30, 0x2E, 0x43, 0x31, 0x31, 0x2E, + 0x43, 0x31, 0x32, 0x2E, 0x43, 0x31, 0x33, 0x2E, + 0x43, 0x31, 0x34, 0x2E, 0x43, 0x31, 0x35, 0x2E, + 0x43, 0x31, 0x36, 0x2E, 0x43, 0x31, 0x37, 0x2E, + // Bytes 1b80 - 1bbf + 0x43, 0x31, 0x38, 0x2E, 0x43, 0x31, 0x39, 0x2E, + 0x43, 0x32, 0x30, 0x2E, 0x43, 0x3A, 0x3A, 0x3D, + 0x43, 0x3D, 0x3D, 0x3D, 0x43, 0x43, 0x6F, 0x2E, + 0x43, 0x46, 0x41, 0x58, 0x43, 0x47, 0x48, 0x7A, + 0x43, 0x47, 0x50, 0x61, 0x43, 0x49, 0x49, 0x49, + 0x43, 0x4C, 0x54, 0x44, 0x43, 0x4C, 0xC2, 0xB7, + 0x43, 0x4D, 0x48, 0x7A, 0x43, 0x4D, 0x50, 0x61, + 0x43, 0x4D, 0xCE, 0xA9, 0x43, 0x50, 0x50, 0x4D, + // Bytes 1bc0 - 1bff + 0x43, 0x50, 0x50, 0x56, 0x43, 0x50, 0x54, 0x45, + 0x43, 0x54, 0x45, 0x4C, 0x43, 0x54, 0x48, 0x7A, + 0x43, 0x56, 0x49, 0x49, 0x43, 0x58, 0x49, 0x49, + 0x43, 0x61, 0x2F, 0x63, 0x43, 0x61, 0x2F, 0x73, + 0x43, 0x61, 0xCA, 0xBE, 0x43, 0x62, 0x61, 0x72, + 0x43, 0x63, 0x2F, 0x6F, 0x43, 0x63, 0x2F, 0x75, + 0x43, 0x63, 0x61, 0x6C, 0x43, 0x63, 0x6D, 0x32, + 0x43, 0x63, 0x6D, 0x33, 0x43, 0x64, 0x6D, 0x32, + // Bytes 1c00 - 1c3f + 0x43, 0x64, 0x6D, 0x33, 0x43, 0x65, 0x72, 0x67, + 0x43, 0x66, 0x66, 0x69, 0x43, 0x66, 0x66, 0x6C, + 0x43, 0x67, 0x61, 0x6C, 0x43, 0x68, 0x50, 0x61, + 0x43, 0x69, 0x69, 0x69, 0x43, 0x6B, 0x48, 0x7A, + 0x43, 0x6B, 0x50, 0x61, 0x43, 0x6B, 0x6D, 0x32, + 0x43, 0x6B, 0x6D, 0x33, 0x43, 0x6B, 0xCE, 0xA9, + 0x43, 0x6C, 0x6F, 0x67, 0x43, 0x6C, 0xC2, 0xB7, + 0x43, 0x6D, 0x69, 0x6C, 0x43, 0x6D, 0x6D, 0x32, + // Bytes 1c40 - 1c7f + 0x43, 0x6D, 0x6D, 0x33, 0x43, 0x6D, 0x6F, 0x6C, + 0x43, 0x72, 0x61, 0x64, 0x43, 0x76, 0x69, 0x69, + 0x43, 0x78, 0x69, 0x69, 0x43, 0xC2, 0xB0, 0x43, + 0x43, 0xC2, 0xB0, 0x46, 0x43, 0xCA, 0xBC, 0x6E, + 0x43, 0xCE, 0xBC, 0x41, 0x43, 0xCE, 0xBC, 0x46, + 0x43, 0xCE, 0xBC, 0x56, 0x43, 0xCE, 0xBC, 0x57, + 0x43, 0xCE, 0xBC, 0x67, 0x43, 0xCE, 0xBC, 0x6C, + 0x43, 0xCE, 0xBC, 0x6D, 0x43, 0xCE, 0xBC, 0x73, + // Bytes 1c80 - 1cbf + 0x44, 0x28, 0x31, 0x30, 0x29, 0x44, 0x28, 0x31, + 0x31, 0x29, 0x44, 0x28, 0x31, 0x32, 0x29, 0x44, + 0x28, 0x31, 0x33, 0x29, 0x44, 0x28, 0x31, 0x34, + 0x29, 0x44, 0x28, 0x31, 0x35, 0x29, 0x44, 0x28, + 0x31, 0x36, 0x29, 0x44, 0x28, 0x31, 0x37, 0x29, + 0x44, 0x28, 0x31, 0x38, 0x29, 0x44, 0x28, 0x31, + 0x39, 0x29, 0x44, 0x28, 0x32, 0x30, 0x29, 0x44, + 0x30, 0xE7, 0x82, 0xB9, 0x44, 0x31, 0xE2, 0x81, + // Bytes 1cc0 - 1cff + 0x84, 0x44, 0x31, 0xE6, 0x97, 0xA5, 0x44, 0x31, + 0xE6, 0x9C, 0x88, 0x44, 0x31, 0xE7, 0x82, 0xB9, + 0x44, 0x32, 0xE6, 0x97, 0xA5, 0x44, 0x32, 0xE6, + 0x9C, 0x88, 0x44, 0x32, 0xE7, 0x82, 0xB9, 0x44, + 0x33, 0xE6, 0x97, 0xA5, 0x44, 0x33, 0xE6, 0x9C, + 0x88, 0x44, 0x33, 0xE7, 0x82, 0xB9, 0x44, 0x34, + 0xE6, 0x97, 0xA5, 0x44, 0x34, 0xE6, 0x9C, 0x88, + 0x44, 0x34, 0xE7, 0x82, 0xB9, 0x44, 0x35, 0xE6, + // Bytes 1d00 - 1d3f + 0x97, 0xA5, 0x44, 0x35, 0xE6, 0x9C, 0x88, 0x44, + 0x35, 0xE7, 0x82, 0xB9, 0x44, 0x36, 0xE6, 0x97, + 0xA5, 0x44, 0x36, 0xE6, 0x9C, 0x88, 0x44, 0x36, + 0xE7, 0x82, 0xB9, 0x44, 0x37, 0xE6, 0x97, 0xA5, + 0x44, 0x37, 0xE6, 0x9C, 0x88, 0x44, 0x37, 0xE7, + 0x82, 0xB9, 0x44, 0x38, 0xE6, 0x97, 0xA5, 0x44, + 0x38, 0xE6, 0x9C, 0x88, 0x44, 0x38, 0xE7, 0x82, + 0xB9, 0x44, 0x39, 0xE6, 0x97, 0xA5, 0x44, 0x39, + // Bytes 1d40 - 1d7f + 0xE6, 0x9C, 0x88, 0x44, 0x39, 0xE7, 0x82, 0xB9, + 0x44, 0x56, 0x49, 0x49, 0x49, 0x44, 0x61, 0x2E, + 0x6D, 0x2E, 0x44, 0x6B, 0x63, 0x61, 0x6C, 0x44, + 0x70, 0x2E, 0x6D, 0x2E, 0x44, 0x76, 0x69, 0x69, + 0x69, 0x44, 0xD5, 0xA5, 0xD6, 0x82, 0x44, 0xD5, + 0xB4, 0xD5, 0xA5, 0x44, 0xD5, 0xB4, 0xD5, 0xAB, + 0x44, 0xD5, 0xB4, 0xD5, 0xAD, 0x44, 0xD5, 0xB4, + 0xD5, 0xB6, 0x44, 0xD5, 0xBE, 0xD5, 0xB6, 0x44, + // Bytes 1d80 - 1dbf + 0xD7, 0x90, 0xD7, 0x9C, 0x44, 0xD8, 0xA7, 0xD9, + 0xB4, 0x44, 0xD8, 0xA8, 0xD8, 0xAC, 0x44, 0xD8, + 0xA8, 0xD8, 0xAD, 0x44, 0xD8, 0xA8, 0xD8, 0xAE, + 0x44, 0xD8, 0xA8, 0xD8, 0xB1, 0x44, 0xD8, 0xA8, + 0xD8, 0xB2, 0x44, 0xD8, 0xA8, 0xD9, 0x85, 0x44, + 0xD8, 0xA8, 0xD9, 0x86, 0x44, 0xD8, 0xA8, 0xD9, + 0x87, 0x44, 0xD8, 0xA8, 0xD9, 0x89, 0x44, 0xD8, + 0xA8, 0xD9, 0x8A, 0x44, 0xD8, 0xAA, 0xD8, 0xAC, + // Bytes 1dc0 - 1dff + 0x44, 0xD8, 0xAA, 0xD8, 0xAD, 0x44, 0xD8, 0xAA, + 0xD8, 0xAE, 0x44, 0xD8, 0xAA, 0xD8, 0xB1, 0x44, + 0xD8, 0xAA, 0xD8, 0xB2, 0x44, 0xD8, 0xAA, 0xD9, + 0x85, 0x44, 0xD8, 0xAA, 0xD9, 0x86, 0x44, 0xD8, + 0xAA, 0xD9, 0x87, 0x44, 0xD8, 0xAA, 0xD9, 0x89, + 0x44, 0xD8, 0xAA, 0xD9, 0x8A, 0x44, 0xD8, 0xAB, + 0xD8, 0xAC, 0x44, 0xD8, 0xAB, 0xD8, 0xB1, 0x44, + 0xD8, 0xAB, 0xD8, 0xB2, 0x44, 0xD8, 0xAB, 0xD9, + // Bytes 1e00 - 1e3f + 0x85, 0x44, 0xD8, 0xAB, 0xD9, 0x86, 0x44, 0xD8, + 0xAB, 0xD9, 0x87, 0x44, 0xD8, 0xAB, 0xD9, 0x89, + 0x44, 0xD8, 0xAB, 0xD9, 0x8A, 0x44, 0xD8, 0xAC, + 0xD8, 0xAD, 0x44, 0xD8, 0xAC, 0xD9, 0x85, 0x44, + 0xD8, 0xAC, 0xD9, 0x89, 0x44, 0xD8, 0xAC, 0xD9, + 0x8A, 0x44, 0xD8, 0xAD, 0xD8, 0xAC, 0x44, 0xD8, + 0xAD, 0xD9, 0x85, 0x44, 0xD8, 0xAD, 0xD9, 0x89, + 0x44, 0xD8, 0xAD, 0xD9, 0x8A, 0x44, 0xD8, 0xAE, + // Bytes 1e40 - 1e7f + 0xD8, 0xAC, 0x44, 0xD8, 0xAE, 0xD8, 0xAD, 0x44, + 0xD8, 0xAE, 0xD9, 0x85, 0x44, 0xD8, 0xAE, 0xD9, + 0x89, 0x44, 0xD8, 0xAE, 0xD9, 0x8A, 0x44, 0xD8, + 0xB3, 0xD8, 0xAC, 0x44, 0xD8, 0xB3, 0xD8, 0xAD, + 0x44, 0xD8, 0xB3, 0xD8, 0xAE, 0x44, 0xD8, 0xB3, + 0xD8, 0xB1, 0x44, 0xD8, 0xB3, 0xD9, 0x85, 0x44, + 0xD8, 0xB3, 0xD9, 0x87, 0x44, 0xD8, 0xB3, 0xD9, + 0x89, 0x44, 0xD8, 0xB3, 0xD9, 0x8A, 0x44, 0xD8, + // Bytes 1e80 - 1ebf + 0xB4, 0xD8, 0xAC, 0x44, 0xD8, 0xB4, 0xD8, 0xAD, + 0x44, 0xD8, 0xB4, 0xD8, 0xAE, 0x44, 0xD8, 0xB4, + 0xD8, 0xB1, 0x44, 0xD8, 0xB4, 0xD9, 0x85, 0x44, + 0xD8, 0xB4, 0xD9, 0x87, 0x44, 0xD8, 0xB4, 0xD9, + 0x89, 0x44, 0xD8, 0xB4, 0xD9, 0x8A, 0x44, 0xD8, + 0xB5, 0xD8, 0xAD, 0x44, 0xD8, 0xB5, 0xD8, 0xAE, + 0x44, 0xD8, 0xB5, 0xD8, 0xB1, 0x44, 0xD8, 0xB5, + 0xD9, 0x85, 0x44, 0xD8, 0xB5, 0xD9, 0x89, 0x44, + // Bytes 1ec0 - 1eff + 0xD8, 0xB5, 0xD9, 0x8A, 0x44, 0xD8, 0xB6, 0xD8, + 0xAC, 0x44, 0xD8, 0xB6, 0xD8, 0xAD, 0x44, 0xD8, + 0xB6, 0xD8, 0xAE, 0x44, 0xD8, 0xB6, 0xD8, 0xB1, + 0x44, 0xD8, 0xB6, 0xD9, 0x85, 0x44, 0xD8, 0xB6, + 0xD9, 0x89, 0x44, 0xD8, 0xB6, 0xD9, 0x8A, 0x44, + 0xD8, 0xB7, 0xD8, 0xAD, 0x44, 0xD8, 0xB7, 0xD9, + 0x85, 0x44, 0xD8, 0xB7, 0xD9, 0x89, 0x44, 0xD8, + 0xB7, 0xD9, 0x8A, 0x44, 0xD8, 0xB8, 0xD9, 0x85, + // Bytes 1f00 - 1f3f + 0x44, 0xD8, 0xB9, 0xD8, 0xAC, 0x44, 0xD8, 0xB9, + 0xD9, 0x85, 0x44, 0xD8, 0xB9, 0xD9, 0x89, 0x44, + 0xD8, 0xB9, 0xD9, 0x8A, 0x44, 0xD8, 0xBA, 0xD8, + 0xAC, 0x44, 0xD8, 0xBA, 0xD9, 0x85, 0x44, 0xD8, + 0xBA, 0xD9, 0x89, 0x44, 0xD8, 0xBA, 0xD9, 0x8A, + 0x44, 0xD9, 0x81, 0xD8, 0xAC, 0x44, 0xD9, 0x81, + 0xD8, 0xAD, 0x44, 0xD9, 0x81, 0xD8, 0xAE, 0x44, + 0xD9, 0x81, 0xD9, 0x85, 0x44, 0xD9, 0x81, 0xD9, + // Bytes 1f40 - 1f7f + 0x89, 0x44, 0xD9, 0x81, 0xD9, 0x8A, 0x44, 0xD9, + 0x82, 0xD8, 0xAD, 0x44, 0xD9, 0x82, 0xD9, 0x85, + 0x44, 0xD9, 0x82, 0xD9, 0x89, 0x44, 0xD9, 0x82, + 0xD9, 0x8A, 0x44, 0xD9, 0x83, 0xD8, 0xA7, 0x44, + 0xD9, 0x83, 0xD8, 0xAC, 0x44, 0xD9, 0x83, 0xD8, + 0xAD, 0x44, 0xD9, 0x83, 0xD8, 0xAE, 0x44, 0xD9, + 0x83, 0xD9, 0x84, 0x44, 0xD9, 0x83, 0xD9, 0x85, + 0x44, 0xD9, 0x83, 0xD9, 0x89, 0x44, 0xD9, 0x83, + // Bytes 1f80 - 1fbf + 0xD9, 0x8A, 0x44, 0xD9, 0x84, 0xD8, 0xA7, 0x44, + 0xD9, 0x84, 0xD8, 0xAC, 0x44, 0xD9, 0x84, 0xD8, + 0xAD, 0x44, 0xD9, 0x84, 0xD8, 0xAE, 0x44, 0xD9, + 0x84, 0xD9, 0x85, 0x44, 0xD9, 0x84, 0xD9, 0x87, + 0x44, 0xD9, 0x84, 0xD9, 0x89, 0x44, 0xD9, 0x84, + 0xD9, 0x8A, 0x44, 0xD9, 0x85, 0xD8, 0xA7, 0x44, + 0xD9, 0x85, 0xD8, 0xAC, 0x44, 0xD9, 0x85, 0xD8, + 0xAD, 0x44, 0xD9, 0x85, 0xD8, 0xAE, 0x44, 0xD9, + // Bytes 1fc0 - 1fff + 0x85, 0xD9, 0x85, 0x44, 0xD9, 0x85, 0xD9, 0x89, + 0x44, 0xD9, 0x85, 0xD9, 0x8A, 0x44, 0xD9, 0x86, + 0xD8, 0xAC, 0x44, 0xD9, 0x86, 0xD8, 0xAD, 0x44, + 0xD9, 0x86, 0xD8, 0xAE, 0x44, 0xD9, 0x86, 0xD8, + 0xB1, 0x44, 0xD9, 0x86, 0xD8, 0xB2, 0x44, 0xD9, + 0x86, 0xD9, 0x85, 0x44, 0xD9, 0x86, 0xD9, 0x86, + 0x44, 0xD9, 0x86, 0xD9, 0x87, 0x44, 0xD9, 0x86, + 0xD9, 0x89, 0x44, 0xD9, 0x86, 0xD9, 0x8A, 0x44, + // Bytes 2000 - 203f + 0xD9, 0x87, 0xD8, 0xAC, 0x44, 0xD9, 0x87, 0xD9, + 0x85, 0x44, 0xD9, 0x87, 0xD9, 0x89, 0x44, 0xD9, + 0x87, 0xD9, 0x8A, 0x44, 0xD9, 0x88, 0xD9, 0xB4, + 0x44, 0xD9, 0x8A, 0xD8, 0xAC, 0x44, 0xD9, 0x8A, + 0xD8, 0xAD, 0x44, 0xD9, 0x8A, 0xD8, 0xAE, 0x44, + 0xD9, 0x8A, 0xD8, 0xB1, 0x44, 0xD9, 0x8A, 0xD8, + 0xB2, 0x44, 0xD9, 0x8A, 0xD9, 0x85, 0x44, 0xD9, + 0x8A, 0xD9, 0x86, 0x44, 0xD9, 0x8A, 0xD9, 0x87, + // Bytes 2040 - 207f + 0x44, 0xD9, 0x8A, 0xD9, 0x89, 0x44, 0xD9, 0x8A, + 0xD9, 0x8A, 0x44, 0xD9, 0x8A, 0xD9, 0xB4, 0x44, + 0xDB, 0x87, 0xD9, 0xB4, 0x45, 0x28, 0xE1, 0x84, + 0x80, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x82, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x83, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x85, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x86, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x87, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x89, 0x29, 0x45, 0x28, + // Bytes 2080 - 20bf + 0xE1, 0x84, 0x8B, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x8C, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x8E, 0x29, + 0x45, 0x28, 0xE1, 0x84, 0x8F, 0x29, 0x45, 0x28, + 0xE1, 0x84, 0x90, 0x29, 0x45, 0x28, 0xE1, 0x84, + 0x91, 0x29, 0x45, 0x28, 0xE1, 0x84, 0x92, 0x29, + 0x45, 0x28, 0xE4, 0xB8, 0x80, 0x29, 0x45, 0x28, + 0xE4, 0xB8, 0x83, 0x29, 0x45, 0x28, 0xE4, 0xB8, + 0x89, 0x29, 0x45, 0x28, 0xE4, 0xB9, 0x9D, 0x29, + // Bytes 20c0 - 20ff + 0x45, 0x28, 0xE4, 0xBA, 0x8C, 0x29, 0x45, 0x28, + 0xE4, 0xBA, 0x94, 0x29, 0x45, 0x28, 0xE4, 0xBB, + 0xA3, 0x29, 0x45, 0x28, 0xE4, 0xBC, 0x81, 0x29, + 0x45, 0x28, 0xE4, 0xBC, 0x91, 0x29, 0x45, 0x28, + 0xE5, 0x85, 0xAB, 0x29, 0x45, 0x28, 0xE5, 0x85, + 0xAD, 0x29, 0x45, 0x28, 0xE5, 0x8A, 0xB4, 0x29, + 0x45, 0x28, 0xE5, 0x8D, 0x81, 0x29, 0x45, 0x28, + 0xE5, 0x8D, 0x94, 0x29, 0x45, 0x28, 0xE5, 0x90, + // Bytes 2100 - 213f + 0x8D, 0x29, 0x45, 0x28, 0xE5, 0x91, 0xBC, 0x29, + 0x45, 0x28, 0xE5, 0x9B, 0x9B, 0x29, 0x45, 0x28, + 0xE5, 0x9C, 0x9F, 0x29, 0x45, 0x28, 0xE5, 0xAD, + 0xA6, 0x29, 0x45, 0x28, 0xE6, 0x97, 0xA5, 0x29, + 0x45, 0x28, 0xE6, 0x9C, 0x88, 0x29, 0x45, 0x28, + 0xE6, 0x9C, 0x89, 0x29, 0x45, 0x28, 0xE6, 0x9C, + 0xA8, 0x29, 0x45, 0x28, 0xE6, 0xA0, 0xAA, 0x29, + 0x45, 0x28, 0xE6, 0xB0, 0xB4, 0x29, 0x45, 0x28, + // Bytes 2140 - 217f + 0xE7, 0x81, 0xAB, 0x29, 0x45, 0x28, 0xE7, 0x89, + 0xB9, 0x29, 0x45, 0x28, 0xE7, 0x9B, 0xA3, 0x29, + 0x45, 0x28, 0xE7, 0xA4, 0xBE, 0x29, 0x45, 0x28, + 0xE7, 0xA5, 0x9D, 0x29, 0x45, 0x28, 0xE7, 0xA5, + 0xAD, 0x29, 0x45, 0x28, 0xE8, 0x87, 0xAA, 0x29, + 0x45, 0x28, 0xE8, 0x87, 0xB3, 0x29, 0x45, 0x28, + 0xE8, 0xB2, 0xA1, 0x29, 0x45, 0x28, 0xE8, 0xB3, + 0x87, 0x29, 0x45, 0x28, 0xE9, 0x87, 0x91, 0x29, + // Bytes 2180 - 21bf + 0x45, 0x30, 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x31, 0x30, 0xE6, + 0x9C, 0x88, 0x45, 0x31, 0x30, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x31, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x31, 0xE6, 0x9C, 0x88, 0x45, 0x31, 0x31, 0xE7, + 0x82, 0xB9, 0x45, 0x31, 0x32, 0xE6, 0x97, 0xA5, + 0x45, 0x31, 0x32, 0xE6, 0x9C, 0x88, 0x45, 0x31, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x33, 0xE6, + // Bytes 21c0 - 21ff + 0x97, 0xA5, 0x45, 0x31, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x35, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x35, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x36, 0xE6, 0x97, 0xA5, 0x45, 0x31, + 0x36, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x37, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x37, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x31, + // Bytes 2200 - 223f + 0x38, 0xE7, 0x82, 0xB9, 0x45, 0x31, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x31, 0x39, 0xE7, 0x82, 0xB9, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x32, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x33, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x34, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x35, + 0x45, 0x31, 0xE2, 0x81, 0x84, 0x36, 0x45, 0x31, + 0xE2, 0x81, 0x84, 0x37, 0x45, 0x31, 0xE2, 0x81, + 0x84, 0x38, 0x45, 0x31, 0xE2, 0x81, 0x84, 0x39, + // Bytes 2240 - 227f + 0x45, 0x32, 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x30, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x31, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x32, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x32, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x33, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0x33, 0xE7, 0x82, 0xB9, + 0x45, 0x32, 0x34, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x34, 0xE7, 0x82, 0xB9, 0x45, 0x32, 0x35, 0xE6, + // Bytes 2280 - 22bf + 0x97, 0xA5, 0x45, 0x32, 0x36, 0xE6, 0x97, 0xA5, + 0x45, 0x32, 0x37, 0xE6, 0x97, 0xA5, 0x45, 0x32, + 0x38, 0xE6, 0x97, 0xA5, 0x45, 0x32, 0x39, 0xE6, + 0x97, 0xA5, 0x45, 0x32, 0xE2, 0x81, 0x84, 0x33, + 0x45, 0x32, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + 0x30, 0xE6, 0x97, 0xA5, 0x45, 0x33, 0x31, 0xE6, + 0x97, 0xA5, 0x45, 0x33, 0xE2, 0x81, 0x84, 0x34, + 0x45, 0x33, 0xE2, 0x81, 0x84, 0x35, 0x45, 0x33, + // Bytes 22c0 - 22ff + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x34, 0xE2, 0x81, + 0x84, 0x35, 0x45, 0x35, 0xE2, 0x81, 0x84, 0x36, + 0x45, 0x35, 0xE2, 0x81, 0x84, 0x38, 0x45, 0x37, + 0xE2, 0x81, 0x84, 0x38, 0x45, 0x41, 0xE2, 0x88, + 0x95, 0x6D, 0x45, 0x56, 0xE2, 0x88, 0x95, 0x6D, + 0x45, 0x6D, 0xE2, 0x88, 0x95, 0x73, 0x46, 0x31, + 0xE2, 0x81, 0x84, 0x31, 0x30, 0x46, 0x43, 0xE2, + 0x88, 0x95, 0x6B, 0x67, 0x46, 0x6D, 0xE2, 0x88, + // Bytes 2300 - 233f + 0x95, 0x73, 0x32, 0x46, 0xD8, 0xA8, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xA8, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x85, + 0x46, 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x89, 0x46, + 0xD8, 0xAA, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAA, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, 0xD8, 0xAA, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, + // Bytes 2340 - 237f + 0xD9, 0x89, 0x46, 0xD8, 0xAA, 0xD8, 0xAE, 0xD9, + 0x8A, 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAC, + 0x46, 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAD, 0x46, + 0xD8, 0xAA, 0xD9, 0x85, 0xD8, 0xAE, 0x46, 0xD8, + 0xAA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAA, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD8, 0xAC, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD8, + // Bytes 2380 - 23bf + 0xAD, 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x89, + 0x46, 0xD8, 0xAC, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD8, 0xAD, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD8, + 0xAD, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, 0xAD, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB3, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, 0xD8, 0xAC, + 0xD9, 0x89, 0x46, 0xD8, 0xB3, 0xD8, 0xAD, 0xD8, + 0xAC, 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x89, + // Bytes 23c0 - 23ff + 0x46, 0xD8, 0xB3, 0xD8, 0xAE, 0xD9, 0x8A, 0x46, + 0xD8, 0xB3, 0xD9, 0x85, 0xD8, 0xAC, 0x46, 0xD8, + 0xB3, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, 0xB3, + 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, + 0xAC, 0xD9, 0x8A, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, + 0xD9, 0x85, 0x46, 0xD8, 0xB4, 0xD8, 0xAD, 0xD9, + 0x8A, 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD8, 0xAE, + 0x46, 0xD8, 0xB4, 0xD9, 0x85, 0xD9, 0x85, 0x46, + // Bytes 2400 - 243f + 0xD8, 0xB5, 0xD8, 0xAD, 0xD8, 0xAD, 0x46, 0xD8, + 0xB5, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD8, 0xB5, + 0xD9, 0x84, 0xD9, 0x89, 0x46, 0xD8, 0xB5, 0xD9, + 0x84, 0xDB, 0x92, 0x46, 0xD8, 0xB5, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, + 0x89, 0x46, 0xD8, 0xB6, 0xD8, 0xAD, 0xD9, 0x8A, + 0x46, 0xD8, 0xB6, 0xD8, 0xAE, 0xD9, 0x85, 0x46, + 0xD8, 0xB7, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD8, + // Bytes 2440 - 247f + 0xB7, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD8, 0xB7, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD8, 0xB9, 0xD8, + 0xAC, 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, + 0xD9, 0x85, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD8, 0xB9, 0xD9, 0x85, 0xD9, 0x8A, + 0x46, 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x85, 0x46, + 0xD8, 0xBA, 0xD9, 0x85, 0xD9, 0x89, 0x46, 0xD8, + 0xBA, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x81, + // Bytes 2480 - 24bf + 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x81, 0xD9, + 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x82, 0xD9, 0x84, + 0xDB, 0x92, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD8, + 0xAD, 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x85, + 0x46, 0xD9, 0x82, 0xD9, 0x85, 0xD9, 0x8A, 0x46, + 0xD9, 0x83, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x83, 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x84, + 0xD8, 0xAC, 0xD8, 0xAC, 0x46, 0xD9, 0x84, 0xD8, + // Bytes 24c0 - 24ff + 0xAC, 0xD9, 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAC, + 0xD9, 0x8A, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, + 0x85, 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x89, + 0x46, 0xD9, 0x84, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, + 0xD9, 0x84, 0xD8, 0xAE, 0xD9, 0x85, 0x46, 0xD9, + 0x84, 0xD9, 0x85, 0xD8, 0xAD, 0x46, 0xD9, 0x84, + 0xD9, 0x85, 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD8, + 0xAC, 0xD8, 0xAD, 0x46, 0xD9, 0x85, 0xD8, 0xAC, + // Bytes 2500 - 253f + 0xD8, 0xAE, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, + 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAC, 0xD9, 0x8A, + 0x46, 0xD9, 0x85, 0xD8, 0xAD, 0xD8, 0xAC, 0x46, + 0xD9, 0x85, 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, + 0x85, 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x85, + 0xD8, 0xAE, 0xD8, 0xAC, 0x46, 0xD9, 0x85, 0xD8, + 0xAE, 0xD9, 0x85, 0x46, 0xD9, 0x85, 0xD8, 0xAE, + 0xD9, 0x8A, 0x46, 0xD9, 0x85, 0xD9, 0x85, 0xD9, + // Bytes 2540 - 257f + 0x8A, 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD8, 0xAD, + 0x46, 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x85, 0x46, + 0xD9, 0x86, 0xD8, 0xAC, 0xD9, 0x89, 0x46, 0xD9, + 0x86, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x86, + 0xD8, 0xAD, 0xD9, 0x85, 0x46, 0xD9, 0x86, 0xD8, + 0xAD, 0xD9, 0x89, 0x46, 0xD9, 0x86, 0xD8, 0xAD, + 0xD9, 0x8A, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, + 0x89, 0x46, 0xD9, 0x86, 0xD9, 0x85, 0xD9, 0x8A, + // Bytes 2580 - 25bf + 0x46, 0xD9, 0x87, 0xD9, 0x85, 0xD8, 0xAC, 0x46, + 0xD9, 0x87, 0xD9, 0x85, 0xD9, 0x85, 0x46, 0xD9, + 0x8A, 0xD8, 0xAC, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, + 0xD8, 0xAD, 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, + 0x85, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x85, + 0xD9, 0x8A, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, + 0xA7, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAC, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAD, 0x46, + // Bytes 25c0 - 25ff + 0xD9, 0x8A, 0xD9, 0x94, 0xD8, 0xAE, 0x46, 0xD9, + 0x8A, 0xD9, 0x94, 0xD8, 0xB1, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xD8, 0xB2, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xD9, 0x85, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xD9, 0x86, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, + 0x87, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x88, + 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x89, 0x46, + 0xD9, 0x8A, 0xD9, 0x94, 0xD9, 0x8A, 0x46, 0xD9, + // Bytes 2600 - 263f + 0x8A, 0xD9, 0x94, 0xDB, 0x86, 0x46, 0xD9, 0x8A, + 0xD9, 0x94, 0xDB, 0x87, 0x46, 0xD9, 0x8A, 0xD9, + 0x94, 0xDB, 0x88, 0x46, 0xD9, 0x8A, 0xD9, 0x94, + 0xDB, 0x90, 0x46, 0xD9, 0x8A, 0xD9, 0x94, 0xDB, + 0x95, 0x46, 0xE0, 0xB9, 0x8D, 0xE0, 0xB8, 0xB2, + 0x46, 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0x99, 0x46, + 0xE0, 0xBA, 0xAB, 0xE0, 0xBA, 0xA1, 0x46, 0xE0, + 0xBB, 0x8D, 0xE0, 0xBA, 0xB2, 0x46, 0xE0, 0xBD, + // Bytes 2640 - 267f + 0x80, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, 0xBD, 0x82, + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x8C, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBD, 0x91, 0xE0, 0xBE, + 0xB7, 0x46, 0xE0, 0xBD, 0x96, 0xE0, 0xBE, 0xB7, + 0x46, 0xE0, 0xBD, 0x9B, 0xE0, 0xBE, 0xB7, 0x46, + 0xE0, 0xBE, 0x90, 0xE0, 0xBE, 0xB5, 0x46, 0xE0, + 0xBE, 0x92, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, + 0x9C, 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA1, + // Bytes 2680 - 26bf + 0xE0, 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xA6, 0xE0, + 0xBE, 0xB7, 0x46, 0xE0, 0xBE, 0xAB, 0xE0, 0xBE, + 0xB7, 0x46, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x46, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0x46, + 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0x46, 0xE2, + 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x46, 0xE3, 0x81, + 0xBB, 0xE3, 0x81, 0x8B, 0x46, 0xE3, 0x82, 0x88, + 0xE3, 0x82, 0x8A, 0x46, 0xE3, 0x82, 0xAD, 0xE3, + // Bytes 26c0 - 26ff + 0x83, 0xAD, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x82, + 0xB3, 0x46, 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0x88, + 0x46, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xB3, 0x46, + 0xE3, 0x83, 0x8A, 0xE3, 0x83, 0x8E, 0x46, 0xE3, + 0x83, 0x9B, 0xE3, 0x83, 0xB3, 0x46, 0xE3, 0x83, + 0x9F, 0xE3, 0x83, 0xAA, 0x46, 0xE3, 0x83, 0xAA, + 0xE3, 0x83, 0xA9, 0x46, 0xE3, 0x83, 0xAC, 0xE3, + 0x83, 0xA0, 0x46, 0xE5, 0xA4, 0xA7, 0xE6, 0xAD, + // Bytes 2700 - 273f + 0xA3, 0x46, 0xE5, 0xB9, 0xB3, 0xE6, 0x88, 0x90, + 0x46, 0xE6, 0x98, 0x8E, 0xE6, 0xB2, 0xBB, 0x46, + 0xE6, 0x98, 0xAD, 0xE5, 0x92, 0x8C, 0x47, 0x72, + 0x61, 0x64, 0xE2, 0x88, 0x95, 0x73, 0x47, 0xE3, + 0x80, 0x94, 0x53, 0xE3, 0x80, 0x95, 0x48, 0x28, + 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x29, + 0x48, 0x28, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, + // Bytes 2740 - 277f + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x85, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x86, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x87, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x89, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, + 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x29, 0x48, + 0x28, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xAE, 0x29, + // Bytes 2780 - 27bf + 0x48, 0x28, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, + 0x29, 0x48, 0x28, 0xE1, 0x84, 0x8F, 0xE1, 0x85, + 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x90, 0xE1, + 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, 0x91, + 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x28, 0xE1, 0x84, + 0x92, 0xE1, 0x85, 0xA1, 0x29, 0x48, 0x72, 0x61, + 0x64, 0xE2, 0x88, 0x95, 0x73, 0x32, 0x48, 0xD8, + 0xA7, 0xD9, 0x83, 0xD8, 0xA8, 0xD8, 0xB1, 0x48, + // Bytes 27c0 - 27ff + 0xD8, 0xA7, 0xD9, 0x84, 0xD9, 0x84, 0xD9, 0x87, + 0x48, 0xD8, 0xB1, 0xD8, 0xB3, 0xD9, 0x88, 0xD9, + 0x84, 0x48, 0xD8, 0xB1, 0xDB, 0x8C, 0xD8, 0xA7, + 0xD9, 0x84, 0x48, 0xD8, 0xB5, 0xD9, 0x84, 0xD8, + 0xB9, 0xD9, 0x85, 0x48, 0xD8, 0xB9, 0xD9, 0x84, + 0xD9, 0x8A, 0xD9, 0x87, 0x48, 0xD9, 0x85, 0xD8, + 0xAD, 0xD9, 0x85, 0xD8, 0xAF, 0x48, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x49, 0xE2, + // Bytes 2800 - 283f + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0x49, 0xE2, 0x80, 0xB5, 0xE2, 0x80, 0xB5, 0xE2, + 0x80, 0xB5, 0x49, 0xE2, 0x88, 0xAB, 0xE2, 0x88, + 0xAB, 0xE2, 0x88, 0xAB, 0x49, 0xE2, 0x88, 0xAE, + 0xE2, 0x88, 0xAE, 0xE2, 0x88, 0xAE, 0x49, 0xE3, + 0x80, 0x94, 0xE4, 0xB8, 0x89, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE4, 0xBA, 0x8C, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE5, 0x8B, + // Bytes 2840 - 287f + 0x9D, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE5, 0xAE, 0x89, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE6, 0x89, 0x93, 0xE3, 0x80, 0x95, + 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x95, 0x97, 0xE3, + 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, 0xE6, 0x9C, + 0xAC, 0xE3, 0x80, 0x95, 0x49, 0xE3, 0x80, 0x94, + 0xE7, 0x82, 0xB9, 0xE3, 0x80, 0x95, 0x49, 0xE3, + 0x80, 0x94, 0xE7, 0x9B, 0x97, 0xE3, 0x80, 0x95, + // Bytes 2880 - 28bf + 0x49, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x81, 0x49, 0xE3, 0x82, 0xA6, + 0xE3, 0x82, 0xA9, 0xE3, 0x83, 0xB3, 0x49, 0xE3, + 0x82, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB9, + 0x49, 0xE3, 0x82, 0xAA, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xA0, 0x49, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAA, 0x49, 0xE3, 0x82, 0xB1, + // Bytes 28c0 - 28ff + 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xB9, 0x49, 0xE3, + 0x82, 0xB3, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x8A, + 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, + 0x83, 0x81, 0x49, 0xE3, 0x82, 0xBB, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x49, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0xE3, 0x82, 0xB7, 0x49, 0xE3, + 0x83, 0x88, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x8E, 0xE3, 0x83, 0x83, 0xE3, + // Bytes 2900 - 293f + 0x83, 0x88, 0x49, 0xE3, 0x83, 0x8F, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0x84, 0x49, 0xE3, 0x83, 0x92, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, 0x49, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xB3, + 0x49, 0xE3, 0x83, 0x95, 0xE3, 0x83, 0xA9, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xBD, 0x49, 0xE3, 0x83, 0x98, + 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x84, 0x49, 0xE3, + // Bytes 2940 - 297f + 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0xAB, + 0x49, 0xE3, 0x83, 0x9B, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xB3, 0x49, 0xE3, 0x83, 0x9E, 0xE3, 0x82, + 0xA4, 0xE3, 0x83, 0xAB, 0x49, 0xE3, 0x83, 0x9E, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x8F, 0x49, 0xE3, + 0x83, 0x9E, 0xE3, 0x83, 0xAB, 0xE3, 0x82, 0xAF, + 0x49, 0xE3, 0x83, 0xA4, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0xAB, 0x49, 0xE3, 0x83, 0xA6, 0xE3, 0x82, + // Bytes 2980 - 29bf + 0xA2, 0xE3, 0x83, 0xB3, 0x49, 0xE3, 0x83, 0xAF, + 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, 0x4C, 0xE2, + 0x80, 0xB2, 0xE2, 0x80, 0xB2, 0xE2, 0x80, 0xB2, + 0xE2, 0x80, 0xB2, 0x4C, 0xE2, 0x88, 0xAB, 0xE2, + 0x88, 0xAB, 0xE2, 0x88, 0xAB, 0xE2, 0x88, 0xAB, + 0x4C, 0xE3, 0x82, 0xA2, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA1, 0x4C, 0xE3, 0x82, + 0xA8, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xAB, 0xE3, + // Bytes 29c0 - 29ff + 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xB3, 0x4C, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x9E, 0x4C, 0xE3, 0x82, 0xAB, + 0xE3, 0x83, 0xA9, 0xE3, 0x83, 0x83, 0xE3, 0x83, + 0x88, 0x4C, 0xE3, 0x82, 0xAB, 0xE3, 0x83, 0xAD, + 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, 0x4C, 0xE3, + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0x8B, + // Bytes 2a00 - 2a3f + 0xE3, 0x83, 0xBC, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, + 0x83, 0xA5, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xBC, + 0x4C, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, + 0x83, 0xA9, 0xE3, 0x83, 0xA0, 0x4C, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xBC, 0xE3, + 0x83, 0x8D, 0x4C, 0xE3, 0x82, 0xB5, 0xE3, 0x82, + 0xA4, 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2a40 - 2a7f + 0xBC, 0xE3, 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0x84, 0x4C, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0x95, 0xE3, 0x82, 0xA3, 0xE3, 0x83, 0xBC, + 0xE3, 0x83, 0x88, 0x4C, 0xE3, 0x83, 0x98, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x82, 0xBF, + 0x4C, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, + // Bytes 2a80 - 2abf + 0x83, 0x8B, 0xE3, 0x83, 0x92, 0x4C, 0xE3, 0x83, + 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xB3, 0xE3, + 0x82, 0xB9, 0x4C, 0xE3, 0x83, 0x9B, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0xE3, 0x83, 0x88, 0x4C, + 0xE3, 0x83, 0x9E, 0xE3, 0x82, 0xA4, 0xE3, 0x82, + 0xAF, 0xE3, 0x83, 0xAD, 0x4C, 0xE3, 0x83, 0x9F, + 0xE3, 0x82, 0xAF, 0xE3, 0x83, 0xAD, 0xE3, 0x83, + 0xB3, 0x4C, 0xE3, 0x83, 0xA1, 0xE3, 0x83, 0xBC, + // Bytes 2ac0 - 2aff + 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x4C, 0xE3, + 0x83, 0xAA, 0xE3, 0x83, 0x83, 0xE3, 0x83, 0x88, + 0xE3, 0x83, 0xAB, 0x4C, 0xE3, 0x83, 0xAB, 0xE3, + 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0x4C, 0xE6, 0xA0, 0xAA, 0xE5, 0xBC, 0x8F, 0xE4, + 0xBC, 0x9A, 0xE7, 0xA4, 0xBE, 0x4E, 0x28, 0xE1, + 0x84, 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x92, + 0xE1, 0x85, 0xAE, 0x29, 0x4F, 0xD8, 0xAC, 0xD9, + // Bytes 2b00 - 2b3f + 0x84, 0x20, 0xD8, 0xAC, 0xD9, 0x84, 0xD8, 0xA7, + 0xD9, 0x84, 0xD9, 0x87, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xA2, + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x98, 0xE3, 0x82, + 0x9A, 0xE3, 0x82, 0xA2, 0x4F, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xAF, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x82, 0xB5, + // Bytes 2b40 - 2b7f + 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x81, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xA0, 0x4F, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x98, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0xBF, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0xAB, 0x4F, 0xE3, 0x83, 0x9B, + 0xE3, 0x82, 0x9A, 0xE3, 0x82, 0xA4, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0x4F, 0xE3, 0x83, 0x9E, + // Bytes 2b80 - 2bbf + 0xE3, 0x83, 0xB3, 0xE3, 0x82, 0xB7, 0xE3, 0x83, + 0xA7, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xA1, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0x88, 0xE3, 0x83, 0xB3, 0x4F, 0xE3, 0x83, 0xAB, + 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x95, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xAB, 0x51, 0x28, 0xE1, 0x84, + 0x8B, 0xE1, 0x85, 0xA9, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xA5, 0xE1, 0x86, 0xAB, 0x29, 0x52, 0xE3, + // Bytes 2bc0 - 2bff + 0x82, 0xAD, 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xAB, + 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xBC, 0x52, 0xE3, 0x82, 0xAD, 0xE3, 0x83, 0xAD, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + 0xA9, 0xE3, 0x83, 0xA0, 0x52, 0xE3, 0x82, 0xAD, + 0xE3, 0x83, 0xAD, 0xE3, 0x83, 0xA1, 0xE3, 0x83, + 0xBC, 0xE3, 0x83, 0x88, 0xE3, 0x83, 0xAB, 0x52, + 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0xE3, 0x83, + // Bytes 2c00 - 2c3f + 0xA9, 0xE3, 0x83, 0xA0, 0xE3, 0x83, 0x88, 0xE3, + 0x83, 0xB3, 0x52, 0xE3, 0x82, 0xAF, 0xE3, 0x83, + 0xAB, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0xE3, + 0x82, 0xA4, 0xE3, 0x83, 0xAD, 0x52, 0xE3, 0x83, + 0x8F, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, 0xE3, + 0x82, 0xBB, 0xE3, 0x83, 0xB3, 0xE3, 0x83, 0x88, + 0x52, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0xE3, + 0x82, 0xA2, 0xE3, 0x82, 0xB9, 0xE3, 0x83, 0x88, + // Bytes 2c40 - 2c7f + 0xE3, 0x83, 0xAB, 0x52, 0xE3, 0x83, 0x95, 0xE3, + 0x82, 0x99, 0xE3, 0x83, 0x83, 0xE3, 0x82, 0xB7, + 0xE3, 0x82, 0xA7, 0xE3, 0x83, 0xAB, 0x52, 0xE3, + 0x83, 0x9F, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0x8F, + 0xE3, 0x82, 0x99, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + 0xAB, 0x52, 0xE3, 0x83, 0xAC, 0xE3, 0x83, 0xB3, + 0xE3, 0x83, 0x88, 0xE3, 0x82, 0xB1, 0xE3, 0x82, + 0x99, 0xE3, 0x83, 0xB3, 0x61, 0xD8, 0xB5, 0xD9, + // Bytes 2c80 - 2cbf + 0x84, 0xD9, 0x89, 0x20, 0xD8, 0xA7, 0xD9, 0x84, + 0xD9, 0x84, 0xD9, 0x87, 0x20, 0xD8, 0xB9, 0xD9, + 0x84, 0xD9, 0x8A, 0xD9, 0x87, 0x20, 0xD9, 0x88, + 0xD8, 0xB3, 0xD9, 0x84, 0xD9, 0x85, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA6, 0xBE, 0x01, 0x06, 0xE0, + 0xA7, 0x87, 0xE0, 0xA7, 0x97, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAC, 0xBE, 0x01, 0x06, 0xE0, + 0xAD, 0x87, 0xE0, 0xAD, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2cc0 - 2cff + 0xAD, 0x87, 0xE0, 0xAD, 0x97, 0x01, 0x06, 0xE0, + 0xAE, 0x92, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xAF, 0x86, 0xE0, 0xAF, 0x97, 0x01, 0x06, 0xE0, + 0xAF, 0x87, 0xE0, 0xAE, 0xBE, 0x01, 0x06, 0xE0, + 0xB2, 0xBF, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x95, 0x01, 0x06, 0xE0, + 0xB3, 0x86, 0xE0, 0xB3, 0x96, 0x01, 0x06, 0xE0, + // Bytes 2d00 - 2d3f + 0xB5, 0x86, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB5, 0x86, 0xE0, 0xB5, 0x97, 0x01, 0x06, 0xE0, + 0xB5, 0x87, 0xE0, 0xB4, 0xBE, 0x01, 0x06, 0xE0, + 0xB7, 0x99, 0xE0, 0xB7, 0x9F, 0x01, 0x06, 0xE1, + 0x80, 0xA5, 0xE1, 0x80, 0xAE, 0x01, 0x06, 0xE1, + 0xAC, 0x85, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x87, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x89, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + // Bytes 2d40 - 2d7f + 0xAC, 0x8B, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x8D, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0x91, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBA, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBC, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBE, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAC, 0xBF, 0xE1, 0xAC, 0xB5, 0x01, 0x06, 0xE1, + 0xAD, 0x82, 0xE1, 0xAC, 0xB5, 0x01, 0x08, 0xF0, + // Bytes 2d80 - 2dbf + 0x91, 0x84, 0xB1, 0xF0, 0x91, 0x84, 0xA7, 0x01, + 0x08, 0xF0, 0x91, 0x84, 0xB2, 0xF0, 0x91, 0x84, + 0xA7, 0x01, 0x08, 0xF0, 0x91, 0x8D, 0x87, 0xF0, + 0x91, 0x8C, 0xBE, 0x01, 0x08, 0xF0, 0x91, 0x8D, + 0x87, 0xF0, 0x91, 0x8D, 0x97, 0x01, 0x08, 0xF0, + 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, 0xB0, 0x01, + 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, 0x91, 0x92, + 0xBA, 0x01, 0x08, 0xF0, 0x91, 0x92, 0xB9, 0xF0, + // Bytes 2dc0 - 2dff + 0x91, 0x92, 0xBD, 0x01, 0x08, 0xF0, 0x91, 0x96, + 0xB8, 0xF0, 0x91, 0x96, 0xAF, 0x01, 0x08, 0xF0, + 0x91, 0x96, 0xB9, 0xF0, 0x91, 0x96, 0xAF, 0x01, + 0x09, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0xE0, + 0xB3, 0x95, 0x02, 0x09, 0xE0, 0xB7, 0x99, 0xE0, + 0xB7, 0x8F, 0xE0, 0xB7, 0x8A, 0x12, 0x44, 0x44, + 0x5A, 0xCC, 0x8C, 0xC9, 0x44, 0x44, 0x7A, 0xCC, + 0x8C, 0xC9, 0x44, 0x64, 0x7A, 0xCC, 0x8C, 0xC9, + // Bytes 2e00 - 2e3f + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x94, 0xC9, + 0x46, 0xD9, 0x84, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x46, 0xE1, 0x84, 0x80, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x82, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x83, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x85, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x86, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e40 - 2e7f + 0x46, 0xE1, 0x84, 0x87, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x89, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xAE, 0x01, + 0x46, 0xE1, 0x84, 0x8C, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8E, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x8F, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x90, 0xE1, 0x85, 0xA1, 0x01, + // Bytes 2e80 - 2ebf + 0x46, 0xE1, 0x84, 0x91, 0xE1, 0x85, 0xA1, 0x01, + 0x46, 0xE1, 0x84, 0x92, 0xE1, 0x85, 0xA1, 0x01, + 0x49, 0xE3, 0x83, 0xA1, 0xE3, 0x82, 0xAB, 0xE3, + 0x82, 0x99, 0x0D, 0x4C, 0xE1, 0x84, 0x8C, 0xE1, + 0x85, 0xAE, 0xE1, 0x84, 0x8B, 0xE1, 0x85, 0xB4, + 0x01, 0x4C, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, + 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, 0x4C, + 0xE3, 0x82, 0xB3, 0xE3, 0x83, 0xBC, 0xE3, 0x83, + // Bytes 2ec0 - 2eff + 0x9B, 0xE3, 0x82, 0x9A, 0x0D, 0x4C, 0xE3, 0x83, + 0xA4, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, 0xE3, + 0x82, 0x99, 0x0D, 0x4F, 0xE1, 0x84, 0x8E, 0xE1, + 0x85, 0xA1, 0xE1, 0x86, 0xB7, 0xE1, 0x84, 0x80, + 0xE1, 0x85, 0xA9, 0x01, 0x4F, 0xE3, 0x82, 0xA4, + 0xE3, 0x83, 0x8B, 0xE3, 0x83, 0xB3, 0xE3, 0x82, + 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, 0x82, + 0xB7, 0xE3, 0x83, 0xAA, 0xE3, 0x83, 0xB3, 0xE3, + // Bytes 2f00 - 2f3f + 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, 0x4F, 0xE3, + 0x83, 0x98, 0xE3, 0x82, 0x9A, 0xE3, 0x83, 0xBC, + 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, 0x4F, + 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0xE3, 0x83, + 0xB3, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x52, 0xE3, 0x82, 0xA8, 0xE3, 0x82, 0xB9, 0xE3, + 0x82, 0xAF, 0xE3, 0x83, 0xBC, 0xE3, 0x83, 0x88, + 0xE3, 0x82, 0x99, 0x0D, 0x52, 0xE3, 0x83, 0x95, + // Bytes 2f40 - 2f7f + 0xE3, 0x82, 0xA1, 0xE3, 0x83, 0xA9, 0xE3, 0x83, + 0x83, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x86, 0xE0, 0xB3, 0x86, 0xE0, 0xB3, 0x82, 0x01, + 0x86, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8F, 0x01, + 0x03, 0x3C, 0xCC, 0xB8, 0x05, 0x03, 0x3D, 0xCC, + 0xB8, 0x05, 0x03, 0x3E, 0xCC, 0xB8, 0x05, 0x03, + 0x41, 0xCC, 0x80, 0xC9, 0x03, 0x41, 0xCC, 0x81, + 0xC9, 0x03, 0x41, 0xCC, 0x83, 0xC9, 0x03, 0x41, + // Bytes 2f80 - 2fbf + 0xCC, 0x84, 0xC9, 0x03, 0x41, 0xCC, 0x89, 0xC9, + 0x03, 0x41, 0xCC, 0x8C, 0xC9, 0x03, 0x41, 0xCC, + 0x8F, 0xC9, 0x03, 0x41, 0xCC, 0x91, 0xC9, 0x03, + 0x41, 0xCC, 0xA5, 0xB5, 0x03, 0x41, 0xCC, 0xA8, + 0xA5, 0x03, 0x42, 0xCC, 0x87, 0xC9, 0x03, 0x42, + 0xCC, 0xA3, 0xB5, 0x03, 0x42, 0xCC, 0xB1, 0xB5, + 0x03, 0x43, 0xCC, 0x81, 0xC9, 0x03, 0x43, 0xCC, + 0x82, 0xC9, 0x03, 0x43, 0xCC, 0x87, 0xC9, 0x03, + // Bytes 2fc0 - 2fff + 0x43, 0xCC, 0x8C, 0xC9, 0x03, 0x44, 0xCC, 0x87, + 0xC9, 0x03, 0x44, 0xCC, 0x8C, 0xC9, 0x03, 0x44, + 0xCC, 0xA3, 0xB5, 0x03, 0x44, 0xCC, 0xA7, 0xA5, + 0x03, 0x44, 0xCC, 0xAD, 0xB5, 0x03, 0x44, 0xCC, + 0xB1, 0xB5, 0x03, 0x45, 0xCC, 0x80, 0xC9, 0x03, + 0x45, 0xCC, 0x81, 0xC9, 0x03, 0x45, 0xCC, 0x83, + 0xC9, 0x03, 0x45, 0xCC, 0x86, 0xC9, 0x03, 0x45, + 0xCC, 0x87, 0xC9, 0x03, 0x45, 0xCC, 0x88, 0xC9, + // Bytes 3000 - 303f + 0x03, 0x45, 0xCC, 0x89, 0xC9, 0x03, 0x45, 0xCC, + 0x8C, 0xC9, 0x03, 0x45, 0xCC, 0x8F, 0xC9, 0x03, + 0x45, 0xCC, 0x91, 0xC9, 0x03, 0x45, 0xCC, 0xA8, + 0xA5, 0x03, 0x45, 0xCC, 0xAD, 0xB5, 0x03, 0x45, + 0xCC, 0xB0, 0xB5, 0x03, 0x46, 0xCC, 0x87, 0xC9, + 0x03, 0x47, 0xCC, 0x81, 0xC9, 0x03, 0x47, 0xCC, + 0x82, 0xC9, 0x03, 0x47, 0xCC, 0x84, 0xC9, 0x03, + 0x47, 0xCC, 0x86, 0xC9, 0x03, 0x47, 0xCC, 0x87, + // Bytes 3040 - 307f + 0xC9, 0x03, 0x47, 0xCC, 0x8C, 0xC9, 0x03, 0x47, + 0xCC, 0xA7, 0xA5, 0x03, 0x48, 0xCC, 0x82, 0xC9, + 0x03, 0x48, 0xCC, 0x87, 0xC9, 0x03, 0x48, 0xCC, + 0x88, 0xC9, 0x03, 0x48, 0xCC, 0x8C, 0xC9, 0x03, + 0x48, 0xCC, 0xA3, 0xB5, 0x03, 0x48, 0xCC, 0xA7, + 0xA5, 0x03, 0x48, 0xCC, 0xAE, 0xB5, 0x03, 0x49, + 0xCC, 0x80, 0xC9, 0x03, 0x49, 0xCC, 0x81, 0xC9, + 0x03, 0x49, 0xCC, 0x82, 0xC9, 0x03, 0x49, 0xCC, + // Bytes 3080 - 30bf + 0x83, 0xC9, 0x03, 0x49, 0xCC, 0x84, 0xC9, 0x03, + 0x49, 0xCC, 0x86, 0xC9, 0x03, 0x49, 0xCC, 0x87, + 0xC9, 0x03, 0x49, 0xCC, 0x89, 0xC9, 0x03, 0x49, + 0xCC, 0x8C, 0xC9, 0x03, 0x49, 0xCC, 0x8F, 0xC9, + 0x03, 0x49, 0xCC, 0x91, 0xC9, 0x03, 0x49, 0xCC, + 0xA3, 0xB5, 0x03, 0x49, 0xCC, 0xA8, 0xA5, 0x03, + 0x49, 0xCC, 0xB0, 0xB5, 0x03, 0x4A, 0xCC, 0x82, + 0xC9, 0x03, 0x4B, 0xCC, 0x81, 0xC9, 0x03, 0x4B, + // Bytes 30c0 - 30ff + 0xCC, 0x8C, 0xC9, 0x03, 0x4B, 0xCC, 0xA3, 0xB5, + 0x03, 0x4B, 0xCC, 0xA7, 0xA5, 0x03, 0x4B, 0xCC, + 0xB1, 0xB5, 0x03, 0x4C, 0xCC, 0x81, 0xC9, 0x03, + 0x4C, 0xCC, 0x8C, 0xC9, 0x03, 0x4C, 0xCC, 0xA7, + 0xA5, 0x03, 0x4C, 0xCC, 0xAD, 0xB5, 0x03, 0x4C, + 0xCC, 0xB1, 0xB5, 0x03, 0x4D, 0xCC, 0x81, 0xC9, + 0x03, 0x4D, 0xCC, 0x87, 0xC9, 0x03, 0x4D, 0xCC, + 0xA3, 0xB5, 0x03, 0x4E, 0xCC, 0x80, 0xC9, 0x03, + // Bytes 3100 - 313f + 0x4E, 0xCC, 0x81, 0xC9, 0x03, 0x4E, 0xCC, 0x83, + 0xC9, 0x03, 0x4E, 0xCC, 0x87, 0xC9, 0x03, 0x4E, + 0xCC, 0x8C, 0xC9, 0x03, 0x4E, 0xCC, 0xA3, 0xB5, + 0x03, 0x4E, 0xCC, 0xA7, 0xA5, 0x03, 0x4E, 0xCC, + 0xAD, 0xB5, 0x03, 0x4E, 0xCC, 0xB1, 0xB5, 0x03, + 0x4F, 0xCC, 0x80, 0xC9, 0x03, 0x4F, 0xCC, 0x81, + 0xC9, 0x03, 0x4F, 0xCC, 0x86, 0xC9, 0x03, 0x4F, + 0xCC, 0x89, 0xC9, 0x03, 0x4F, 0xCC, 0x8B, 0xC9, + // Bytes 3140 - 317f + 0x03, 0x4F, 0xCC, 0x8C, 0xC9, 0x03, 0x4F, 0xCC, + 0x8F, 0xC9, 0x03, 0x4F, 0xCC, 0x91, 0xC9, 0x03, + 0x50, 0xCC, 0x81, 0xC9, 0x03, 0x50, 0xCC, 0x87, + 0xC9, 0x03, 0x52, 0xCC, 0x81, 0xC9, 0x03, 0x52, + 0xCC, 0x87, 0xC9, 0x03, 0x52, 0xCC, 0x8C, 0xC9, + 0x03, 0x52, 0xCC, 0x8F, 0xC9, 0x03, 0x52, 0xCC, + 0x91, 0xC9, 0x03, 0x52, 0xCC, 0xA7, 0xA5, 0x03, + 0x52, 0xCC, 0xB1, 0xB5, 0x03, 0x53, 0xCC, 0x82, + // Bytes 3180 - 31bf + 0xC9, 0x03, 0x53, 0xCC, 0x87, 0xC9, 0x03, 0x53, + 0xCC, 0xA6, 0xB5, 0x03, 0x53, 0xCC, 0xA7, 0xA5, + 0x03, 0x54, 0xCC, 0x87, 0xC9, 0x03, 0x54, 0xCC, + 0x8C, 0xC9, 0x03, 0x54, 0xCC, 0xA3, 0xB5, 0x03, + 0x54, 0xCC, 0xA6, 0xB5, 0x03, 0x54, 0xCC, 0xA7, + 0xA5, 0x03, 0x54, 0xCC, 0xAD, 0xB5, 0x03, 0x54, + 0xCC, 0xB1, 0xB5, 0x03, 0x55, 0xCC, 0x80, 0xC9, + 0x03, 0x55, 0xCC, 0x81, 0xC9, 0x03, 0x55, 0xCC, + // Bytes 31c0 - 31ff + 0x82, 0xC9, 0x03, 0x55, 0xCC, 0x86, 0xC9, 0x03, + 0x55, 0xCC, 0x89, 0xC9, 0x03, 0x55, 0xCC, 0x8A, + 0xC9, 0x03, 0x55, 0xCC, 0x8B, 0xC9, 0x03, 0x55, + 0xCC, 0x8C, 0xC9, 0x03, 0x55, 0xCC, 0x8F, 0xC9, + 0x03, 0x55, 0xCC, 0x91, 0xC9, 0x03, 0x55, 0xCC, + 0xA3, 0xB5, 0x03, 0x55, 0xCC, 0xA4, 0xB5, 0x03, + 0x55, 0xCC, 0xA8, 0xA5, 0x03, 0x55, 0xCC, 0xAD, + 0xB5, 0x03, 0x55, 0xCC, 0xB0, 0xB5, 0x03, 0x56, + // Bytes 3200 - 323f + 0xCC, 0x83, 0xC9, 0x03, 0x56, 0xCC, 0xA3, 0xB5, + 0x03, 0x57, 0xCC, 0x80, 0xC9, 0x03, 0x57, 0xCC, + 0x81, 0xC9, 0x03, 0x57, 0xCC, 0x82, 0xC9, 0x03, + 0x57, 0xCC, 0x87, 0xC9, 0x03, 0x57, 0xCC, 0x88, + 0xC9, 0x03, 0x57, 0xCC, 0xA3, 0xB5, 0x03, 0x58, + 0xCC, 0x87, 0xC9, 0x03, 0x58, 0xCC, 0x88, 0xC9, + 0x03, 0x59, 0xCC, 0x80, 0xC9, 0x03, 0x59, 0xCC, + 0x81, 0xC9, 0x03, 0x59, 0xCC, 0x82, 0xC9, 0x03, + // Bytes 3240 - 327f + 0x59, 0xCC, 0x83, 0xC9, 0x03, 0x59, 0xCC, 0x84, + 0xC9, 0x03, 0x59, 0xCC, 0x87, 0xC9, 0x03, 0x59, + 0xCC, 0x88, 0xC9, 0x03, 0x59, 0xCC, 0x89, 0xC9, + 0x03, 0x59, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, 0xCC, + 0x81, 0xC9, 0x03, 0x5A, 0xCC, 0x82, 0xC9, 0x03, + 0x5A, 0xCC, 0x87, 0xC9, 0x03, 0x5A, 0xCC, 0x8C, + 0xC9, 0x03, 0x5A, 0xCC, 0xA3, 0xB5, 0x03, 0x5A, + 0xCC, 0xB1, 0xB5, 0x03, 0x61, 0xCC, 0x80, 0xC9, + // Bytes 3280 - 32bf + 0x03, 0x61, 0xCC, 0x81, 0xC9, 0x03, 0x61, 0xCC, + 0x83, 0xC9, 0x03, 0x61, 0xCC, 0x84, 0xC9, 0x03, + 0x61, 0xCC, 0x89, 0xC9, 0x03, 0x61, 0xCC, 0x8C, + 0xC9, 0x03, 0x61, 0xCC, 0x8F, 0xC9, 0x03, 0x61, + 0xCC, 0x91, 0xC9, 0x03, 0x61, 0xCC, 0xA5, 0xB5, + 0x03, 0x61, 0xCC, 0xA8, 0xA5, 0x03, 0x62, 0xCC, + 0x87, 0xC9, 0x03, 0x62, 0xCC, 0xA3, 0xB5, 0x03, + 0x62, 0xCC, 0xB1, 0xB5, 0x03, 0x63, 0xCC, 0x81, + // Bytes 32c0 - 32ff + 0xC9, 0x03, 0x63, 0xCC, 0x82, 0xC9, 0x03, 0x63, + 0xCC, 0x87, 0xC9, 0x03, 0x63, 0xCC, 0x8C, 0xC9, + 0x03, 0x64, 0xCC, 0x87, 0xC9, 0x03, 0x64, 0xCC, + 0x8C, 0xC9, 0x03, 0x64, 0xCC, 0xA3, 0xB5, 0x03, + 0x64, 0xCC, 0xA7, 0xA5, 0x03, 0x64, 0xCC, 0xAD, + 0xB5, 0x03, 0x64, 0xCC, 0xB1, 0xB5, 0x03, 0x65, + 0xCC, 0x80, 0xC9, 0x03, 0x65, 0xCC, 0x81, 0xC9, + 0x03, 0x65, 0xCC, 0x83, 0xC9, 0x03, 0x65, 0xCC, + // Bytes 3300 - 333f + 0x86, 0xC9, 0x03, 0x65, 0xCC, 0x87, 0xC9, 0x03, + 0x65, 0xCC, 0x88, 0xC9, 0x03, 0x65, 0xCC, 0x89, + 0xC9, 0x03, 0x65, 0xCC, 0x8C, 0xC9, 0x03, 0x65, + 0xCC, 0x8F, 0xC9, 0x03, 0x65, 0xCC, 0x91, 0xC9, + 0x03, 0x65, 0xCC, 0xA8, 0xA5, 0x03, 0x65, 0xCC, + 0xAD, 0xB5, 0x03, 0x65, 0xCC, 0xB0, 0xB5, 0x03, + 0x66, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, 0x81, + 0xC9, 0x03, 0x67, 0xCC, 0x82, 0xC9, 0x03, 0x67, + // Bytes 3340 - 337f + 0xCC, 0x84, 0xC9, 0x03, 0x67, 0xCC, 0x86, 0xC9, + 0x03, 0x67, 0xCC, 0x87, 0xC9, 0x03, 0x67, 0xCC, + 0x8C, 0xC9, 0x03, 0x67, 0xCC, 0xA7, 0xA5, 0x03, + 0x68, 0xCC, 0x82, 0xC9, 0x03, 0x68, 0xCC, 0x87, + 0xC9, 0x03, 0x68, 0xCC, 0x88, 0xC9, 0x03, 0x68, + 0xCC, 0x8C, 0xC9, 0x03, 0x68, 0xCC, 0xA3, 0xB5, + 0x03, 0x68, 0xCC, 0xA7, 0xA5, 0x03, 0x68, 0xCC, + 0xAE, 0xB5, 0x03, 0x68, 0xCC, 0xB1, 0xB5, 0x03, + // Bytes 3380 - 33bf + 0x69, 0xCC, 0x80, 0xC9, 0x03, 0x69, 0xCC, 0x81, + 0xC9, 0x03, 0x69, 0xCC, 0x82, 0xC9, 0x03, 0x69, + 0xCC, 0x83, 0xC9, 0x03, 0x69, 0xCC, 0x84, 0xC9, + 0x03, 0x69, 0xCC, 0x86, 0xC9, 0x03, 0x69, 0xCC, + 0x89, 0xC9, 0x03, 0x69, 0xCC, 0x8C, 0xC9, 0x03, + 0x69, 0xCC, 0x8F, 0xC9, 0x03, 0x69, 0xCC, 0x91, + 0xC9, 0x03, 0x69, 0xCC, 0xA3, 0xB5, 0x03, 0x69, + 0xCC, 0xA8, 0xA5, 0x03, 0x69, 0xCC, 0xB0, 0xB5, + // Bytes 33c0 - 33ff + 0x03, 0x6A, 0xCC, 0x82, 0xC9, 0x03, 0x6A, 0xCC, + 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0x81, 0xC9, 0x03, + 0x6B, 0xCC, 0x8C, 0xC9, 0x03, 0x6B, 0xCC, 0xA3, + 0xB5, 0x03, 0x6B, 0xCC, 0xA7, 0xA5, 0x03, 0x6B, + 0xCC, 0xB1, 0xB5, 0x03, 0x6C, 0xCC, 0x81, 0xC9, + 0x03, 0x6C, 0xCC, 0x8C, 0xC9, 0x03, 0x6C, 0xCC, + 0xA7, 0xA5, 0x03, 0x6C, 0xCC, 0xAD, 0xB5, 0x03, + 0x6C, 0xCC, 0xB1, 0xB5, 0x03, 0x6D, 0xCC, 0x81, + // Bytes 3400 - 343f + 0xC9, 0x03, 0x6D, 0xCC, 0x87, 0xC9, 0x03, 0x6D, + 0xCC, 0xA3, 0xB5, 0x03, 0x6E, 0xCC, 0x80, 0xC9, + 0x03, 0x6E, 0xCC, 0x81, 0xC9, 0x03, 0x6E, 0xCC, + 0x83, 0xC9, 0x03, 0x6E, 0xCC, 0x87, 0xC9, 0x03, + 0x6E, 0xCC, 0x8C, 0xC9, 0x03, 0x6E, 0xCC, 0xA3, + 0xB5, 0x03, 0x6E, 0xCC, 0xA7, 0xA5, 0x03, 0x6E, + 0xCC, 0xAD, 0xB5, 0x03, 0x6E, 0xCC, 0xB1, 0xB5, + 0x03, 0x6F, 0xCC, 0x80, 0xC9, 0x03, 0x6F, 0xCC, + // Bytes 3440 - 347f + 0x81, 0xC9, 0x03, 0x6F, 0xCC, 0x86, 0xC9, 0x03, + 0x6F, 0xCC, 0x89, 0xC9, 0x03, 0x6F, 0xCC, 0x8B, + 0xC9, 0x03, 0x6F, 0xCC, 0x8C, 0xC9, 0x03, 0x6F, + 0xCC, 0x8F, 0xC9, 0x03, 0x6F, 0xCC, 0x91, 0xC9, + 0x03, 0x70, 0xCC, 0x81, 0xC9, 0x03, 0x70, 0xCC, + 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x81, 0xC9, 0x03, + 0x72, 0xCC, 0x87, 0xC9, 0x03, 0x72, 0xCC, 0x8C, + 0xC9, 0x03, 0x72, 0xCC, 0x8F, 0xC9, 0x03, 0x72, + // Bytes 3480 - 34bf + 0xCC, 0x91, 0xC9, 0x03, 0x72, 0xCC, 0xA7, 0xA5, + 0x03, 0x72, 0xCC, 0xB1, 0xB5, 0x03, 0x73, 0xCC, + 0x82, 0xC9, 0x03, 0x73, 0xCC, 0x87, 0xC9, 0x03, + 0x73, 0xCC, 0xA6, 0xB5, 0x03, 0x73, 0xCC, 0xA7, + 0xA5, 0x03, 0x74, 0xCC, 0x87, 0xC9, 0x03, 0x74, + 0xCC, 0x88, 0xC9, 0x03, 0x74, 0xCC, 0x8C, 0xC9, + 0x03, 0x74, 0xCC, 0xA3, 0xB5, 0x03, 0x74, 0xCC, + 0xA6, 0xB5, 0x03, 0x74, 0xCC, 0xA7, 0xA5, 0x03, + // Bytes 34c0 - 34ff + 0x74, 0xCC, 0xAD, 0xB5, 0x03, 0x74, 0xCC, 0xB1, + 0xB5, 0x03, 0x75, 0xCC, 0x80, 0xC9, 0x03, 0x75, + 0xCC, 0x81, 0xC9, 0x03, 0x75, 0xCC, 0x82, 0xC9, + 0x03, 0x75, 0xCC, 0x86, 0xC9, 0x03, 0x75, 0xCC, + 0x89, 0xC9, 0x03, 0x75, 0xCC, 0x8A, 0xC9, 0x03, + 0x75, 0xCC, 0x8B, 0xC9, 0x03, 0x75, 0xCC, 0x8C, + 0xC9, 0x03, 0x75, 0xCC, 0x8F, 0xC9, 0x03, 0x75, + 0xCC, 0x91, 0xC9, 0x03, 0x75, 0xCC, 0xA3, 0xB5, + // Bytes 3500 - 353f + 0x03, 0x75, 0xCC, 0xA4, 0xB5, 0x03, 0x75, 0xCC, + 0xA8, 0xA5, 0x03, 0x75, 0xCC, 0xAD, 0xB5, 0x03, + 0x75, 0xCC, 0xB0, 0xB5, 0x03, 0x76, 0xCC, 0x83, + 0xC9, 0x03, 0x76, 0xCC, 0xA3, 0xB5, 0x03, 0x77, + 0xCC, 0x80, 0xC9, 0x03, 0x77, 0xCC, 0x81, 0xC9, + 0x03, 0x77, 0xCC, 0x82, 0xC9, 0x03, 0x77, 0xCC, + 0x87, 0xC9, 0x03, 0x77, 0xCC, 0x88, 0xC9, 0x03, + 0x77, 0xCC, 0x8A, 0xC9, 0x03, 0x77, 0xCC, 0xA3, + // Bytes 3540 - 357f + 0xB5, 0x03, 0x78, 0xCC, 0x87, 0xC9, 0x03, 0x78, + 0xCC, 0x88, 0xC9, 0x03, 0x79, 0xCC, 0x80, 0xC9, + 0x03, 0x79, 0xCC, 0x81, 0xC9, 0x03, 0x79, 0xCC, + 0x82, 0xC9, 0x03, 0x79, 0xCC, 0x83, 0xC9, 0x03, + 0x79, 0xCC, 0x84, 0xC9, 0x03, 0x79, 0xCC, 0x87, + 0xC9, 0x03, 0x79, 0xCC, 0x88, 0xC9, 0x03, 0x79, + 0xCC, 0x89, 0xC9, 0x03, 0x79, 0xCC, 0x8A, 0xC9, + 0x03, 0x79, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, 0xCC, + // Bytes 3580 - 35bf + 0x81, 0xC9, 0x03, 0x7A, 0xCC, 0x82, 0xC9, 0x03, + 0x7A, 0xCC, 0x87, 0xC9, 0x03, 0x7A, 0xCC, 0x8C, + 0xC9, 0x03, 0x7A, 0xCC, 0xA3, 0xB5, 0x03, 0x7A, + 0xCC, 0xB1, 0xB5, 0x04, 0xC2, 0xA8, 0xCC, 0x80, + 0xCA, 0x04, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x04, + 0xC2, 0xA8, 0xCD, 0x82, 0xCA, 0x04, 0xC3, 0x86, + 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0x86, 0xCC, 0x84, + 0xC9, 0x04, 0xC3, 0x98, 0xCC, 0x81, 0xC9, 0x04, + // Bytes 35c0 - 35ff + 0xC3, 0xA6, 0xCC, 0x81, 0xC9, 0x04, 0xC3, 0xA6, + 0xCC, 0x84, 0xC9, 0x04, 0xC3, 0xB8, 0xCC, 0x81, + 0xC9, 0x04, 0xC5, 0xBF, 0xCC, 0x87, 0xC9, 0x04, + 0xC6, 0xB7, 0xCC, 0x8C, 0xC9, 0x04, 0xCA, 0x92, + 0xCC, 0x8C, 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0x91, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0x91, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0x91, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0x91, 0xCD, 0x85, + // Bytes 3600 - 363f + 0xD9, 0x04, 0xCE, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0x95, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x97, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x97, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0x97, 0xCD, 0x85, 0xD9, 0x04, + 0xCE, 0x99, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x99, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0x99, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0x9F, + // Bytes 3640 - 367f + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0x9F, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA1, 0xCC, 0x94, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA5, + 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x84, + 0xC9, 0x04, 0xCE, 0xA5, 0xCC, 0x86, 0xC9, 0x04, + 0xCE, 0xA5, 0xCC, 0x88, 0xC9, 0x04, 0xCE, 0xA9, + 0xCC, 0x80, 0xC9, 0x04, 0xCE, 0xA9, 0xCC, 0x81, + 0xC9, 0x04, 0xCE, 0xA9, 0xCD, 0x85, 0xD9, 0x04, + // Bytes 3680 - 36bf + 0xCE, 0xB1, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB1, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB1, 0xCD, 0x85, + 0xD9, 0x04, 0xCE, 0xB5, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xB5, 0xCC, 0x81, 0xC9, 0x04, 0xCE, 0xB7, + 0xCD, 0x85, 0xD9, 0x04, 0xCE, 0xB9, 0xCC, 0x80, + 0xC9, 0x04, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x04, + 0xCE, 0xB9, 0xCC, 0x84, 0xC9, 0x04, 0xCE, 0xB9, + 0xCC, 0x86, 0xC9, 0x04, 0xCE, 0xB9, 0xCD, 0x82, + // Bytes 36c0 - 36ff + 0xC9, 0x04, 0xCE, 0xBF, 0xCC, 0x80, 0xC9, 0x04, + 0xCE, 0xBF, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x81, + 0xCC, 0x93, 0xC9, 0x04, 0xCF, 0x81, 0xCC, 0x94, + 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x80, 0xC9, 0x04, + 0xCF, 0x85, 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x85, + 0xCC, 0x84, 0xC9, 0x04, 0xCF, 0x85, 0xCC, 0x86, + 0xC9, 0x04, 0xCF, 0x85, 0xCD, 0x82, 0xC9, 0x04, + 0xCF, 0x89, 0xCD, 0x85, 0xD9, 0x04, 0xCF, 0x92, + // Bytes 3700 - 373f + 0xCC, 0x81, 0xC9, 0x04, 0xCF, 0x92, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0x86, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0x90, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x90, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x93, 0xCC, 0x81, + 0xC9, 0x04, 0xD0, 0x95, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0x95, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0x95, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0x96, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3740 - 377f + 0xD0, 0x97, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x98, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x84, + 0xC9, 0x04, 0xD0, 0x98, 0xCC, 0x86, 0xC9, 0x04, + 0xD0, 0x98, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0x9A, + 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0x9E, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x84, 0xC9, 0x04, + 0xD0, 0xA3, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xA3, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xA3, 0xCC, 0x8B, + // Bytes 3780 - 37bf + 0xC9, 0x04, 0xD0, 0xA7, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xAB, 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xAD, + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB0, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB3, 0xCC, 0x81, 0xC9, 0x04, 0xD0, 0xB5, + 0xCC, 0x80, 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x86, + 0xC9, 0x04, 0xD0, 0xB5, 0xCC, 0x88, 0xC9, 0x04, + 0xD0, 0xB6, 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB6, + // Bytes 37c0 - 37ff + 0xCC, 0x88, 0xC9, 0x04, 0xD0, 0xB7, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x80, 0xC9, 0x04, + 0xD0, 0xB8, 0xCC, 0x84, 0xC9, 0x04, 0xD0, 0xB8, + 0xCC, 0x86, 0xC9, 0x04, 0xD0, 0xB8, 0xCC, 0x88, + 0xC9, 0x04, 0xD0, 0xBA, 0xCC, 0x81, 0xC9, 0x04, + 0xD0, 0xBE, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x83, + 0xCC, 0x84, 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x86, + 0xC9, 0x04, 0xD1, 0x83, 0xCC, 0x88, 0xC9, 0x04, + // Bytes 3800 - 383f + 0xD1, 0x83, 0xCC, 0x8B, 0xC9, 0x04, 0xD1, 0x87, + 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0x8B, 0xCC, 0x88, + 0xC9, 0x04, 0xD1, 0x8D, 0xCC, 0x88, 0xC9, 0x04, + 0xD1, 0x96, 0xCC, 0x88, 0xC9, 0x04, 0xD1, 0xB4, + 0xCC, 0x8F, 0xC9, 0x04, 0xD1, 0xB5, 0xCC, 0x8F, + 0xC9, 0x04, 0xD3, 0x98, 0xCC, 0x88, 0xC9, 0x04, + 0xD3, 0x99, 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA8, + 0xCC, 0x88, 0xC9, 0x04, 0xD3, 0xA9, 0xCC, 0x88, + // Bytes 3840 - 387f + 0xC9, 0x04, 0xD8, 0xA7, 0xD9, 0x93, 0xC9, 0x04, + 0xD8, 0xA7, 0xD9, 0x94, 0xC9, 0x04, 0xD8, 0xA7, + 0xD9, 0x95, 0xB5, 0x04, 0xD9, 0x88, 0xD9, 0x94, + 0xC9, 0x04, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, 0x04, + 0xDB, 0x81, 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x92, + 0xD9, 0x94, 0xC9, 0x04, 0xDB, 0x95, 0xD9, 0x94, + 0xC9, 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x41, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + // Bytes 3880 - 38bf + 0x41, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x41, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x41, 0xCC, + 0x86, 0xCC, 0x80, 0xCA, 0x05, 0x41, 0xCC, 0x86, + 0xCC, 0x81, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, + 0x83, 0xCA, 0x05, 0x41, 0xCC, 0x86, 0xCC, 0x89, + 0xCA, 0x05, 0x41, 0xCC, 0x87, 0xCC, 0x84, 0xCA, + 0x05, 0x41, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, + 0x41, 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x41, + // Bytes 38c0 - 38ff + 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x41, 0xCC, + 0xA3, 0xCC, 0x86, 0xCA, 0x05, 0x43, 0xCC, 0xA7, + 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, + 0x80, 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x81, + 0xCA, 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x83, 0xCA, + 0x05, 0x45, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, + 0x45, 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x45, + 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x45, 0xCC, + // Bytes 3900 - 393f + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x45, 0xCC, 0xA7, + 0xCC, 0x86, 0xCA, 0x05, 0x49, 0xCC, 0x88, 0xCC, + 0x81, 0xCA, 0x05, 0x4C, 0xCC, 0xA3, 0xCC, 0x84, + 0xCA, 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, + 0x05, 0x4F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, + 0x4F, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x4F, + 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x4F, 0xCC, + 0x83, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x83, + // Bytes 3940 - 397f + 0xCC, 0x84, 0xCA, 0x05, 0x4F, 0xCC, 0x83, 0xCC, + 0x88, 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x80, + 0xCA, 0x05, 0x4F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, + 0x05, 0x4F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + 0x4F, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x4F, + 0xCC, 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x4F, 0xCC, + 0x9B, 0xCC, 0x81, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, + 0xCC, 0x83, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, + // Bytes 3980 - 39bf + 0x89, 0xCA, 0x05, 0x4F, 0xCC, 0x9B, 0xCC, 0xA3, + 0xB6, 0x05, 0x4F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, + 0x05, 0x4F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, + 0x52, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x53, + 0xCC, 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, + 0x8C, 0xCC, 0x87, 0xCA, 0x05, 0x53, 0xCC, 0xA3, + 0xCC, 0x87, 0xCA, 0x05, 0x55, 0xCC, 0x83, 0xCC, + 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x84, 0xCC, 0x88, + // Bytes 39c0 - 39ff + 0xCA, 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x05, 0x55, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, + 0x55, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x55, + 0xCC, 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x55, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x55, 0xCC, 0x9B, + 0xCC, 0x81, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x55, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + // Bytes 3a00 - 3a3f + 0x05, 0x61, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x61, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x61, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x61, 0xCC, + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x61, 0xCC, 0x86, + 0xCC, 0x80, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, + 0x81, 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x83, + 0xCA, 0x05, 0x61, 0xCC, 0x86, 0xCC, 0x89, 0xCA, + 0x05, 0x61, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, + // Bytes 3a40 - 3a7f + 0x61, 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x61, + 0xCC, 0x8A, 0xCC, 0x81, 0xCA, 0x05, 0x61, 0xCC, + 0xA3, 0xCC, 0x82, 0xCA, 0x05, 0x61, 0xCC, 0xA3, + 0xCC, 0x86, 0xCA, 0x05, 0x63, 0xCC, 0xA7, 0xCC, + 0x81, 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x80, + 0xCA, 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x81, 0xCA, + 0x05, 0x65, 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, + 0x65, 0xCC, 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x65, + // Bytes 3a80 - 3abf + 0xCC, 0x84, 0xCC, 0x80, 0xCA, 0x05, 0x65, 0xCC, + 0x84, 0xCC, 0x81, 0xCA, 0x05, 0x65, 0xCC, 0xA3, + 0xCC, 0x82, 0xCA, 0x05, 0x65, 0xCC, 0xA7, 0xCC, + 0x86, 0xCA, 0x05, 0x69, 0xCC, 0x88, 0xCC, 0x81, + 0xCA, 0x05, 0x6C, 0xCC, 0xA3, 0xCC, 0x84, 0xCA, + 0x05, 0x6F, 0xCC, 0x82, 0xCC, 0x80, 0xCA, 0x05, + 0x6F, 0xCC, 0x82, 0xCC, 0x81, 0xCA, 0x05, 0x6F, + 0xCC, 0x82, 0xCC, 0x83, 0xCA, 0x05, 0x6F, 0xCC, + // Bytes 3ac0 - 3aff + 0x82, 0xCC, 0x89, 0xCA, 0x05, 0x6F, 0xCC, 0x83, + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, + 0x84, 0xCA, 0x05, 0x6F, 0xCC, 0x83, 0xCC, 0x88, + 0xCA, 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x80, 0xCA, + 0x05, 0x6F, 0xCC, 0x84, 0xCC, 0x81, 0xCA, 0x05, + 0x6F, 0xCC, 0x87, 0xCC, 0x84, 0xCA, 0x05, 0x6F, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x6F, 0xCC, + 0x9B, 0xCC, 0x80, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, + // Bytes 3b00 - 3b3f + 0xCC, 0x81, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, + 0x83, 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0x89, + 0xCA, 0x05, 0x6F, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, + 0x05, 0x6F, 0xCC, 0xA3, 0xCC, 0x82, 0xCA, 0x05, + 0x6F, 0xCC, 0xA8, 0xCC, 0x84, 0xCA, 0x05, 0x72, + 0xCC, 0xA3, 0xCC, 0x84, 0xCA, 0x05, 0x73, 0xCC, + 0x81, 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0x8C, + 0xCC, 0x87, 0xCA, 0x05, 0x73, 0xCC, 0xA3, 0xCC, + // Bytes 3b40 - 3b7f + 0x87, 0xCA, 0x05, 0x75, 0xCC, 0x83, 0xCC, 0x81, + 0xCA, 0x05, 0x75, 0xCC, 0x84, 0xCC, 0x88, 0xCA, + 0x05, 0x75, 0xCC, 0x88, 0xCC, 0x80, 0xCA, 0x05, + 0x75, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x05, 0x75, + 0xCC, 0x88, 0xCC, 0x84, 0xCA, 0x05, 0x75, 0xCC, + 0x88, 0xCC, 0x8C, 0xCA, 0x05, 0x75, 0xCC, 0x9B, + 0xCC, 0x80, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, + 0x81, 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x83, + // Bytes 3b80 - 3bbf + 0xCA, 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0x89, 0xCA, + 0x05, 0x75, 0xCC, 0x9B, 0xCC, 0xA3, 0xB6, 0x05, + 0xE1, 0xBE, 0xBF, 0xCC, 0x80, 0xCA, 0x05, 0xE1, + 0xBE, 0xBF, 0xCC, 0x81, 0xCA, 0x05, 0xE1, 0xBE, + 0xBF, 0xCD, 0x82, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, + 0xCC, 0x80, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCC, + 0x81, 0xCA, 0x05, 0xE1, 0xBF, 0xBE, 0xCD, 0x82, + 0xCA, 0x05, 0xE2, 0x86, 0x90, 0xCC, 0xB8, 0x05, + // Bytes 3bc0 - 3bff + 0x05, 0xE2, 0x86, 0x92, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x86, 0x94, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x87, 0x90, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, + 0x92, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x87, 0x94, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x83, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x88, 0x88, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x88, 0x8B, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x88, 0xA3, 0xCC, 0xB8, 0x05, 0x05, + // Bytes 3c00 - 3c3f + 0xE2, 0x88, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x88, 0xBC, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x85, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x88, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0x8D, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xA1, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xA4, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xA5, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + // Bytes 3c40 - 3c7f + 0x89, 0xB2, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, + 0xB3, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB6, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xB7, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x89, 0xBA, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x89, 0xBB, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x89, 0xBC, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x89, 0xBD, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0x82, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + // Bytes 3c80 - 3cbf + 0x83, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x86, + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x87, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0x91, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0x92, 0xCC, 0xB8, 0x05, + 0x05, 0xE2, 0x8A, 0xA2, 0xCC, 0xB8, 0x05, 0x05, + 0xE2, 0x8A, 0xA8, 0xCC, 0xB8, 0x05, 0x05, 0xE2, + 0x8A, 0xA9, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, + 0xAB, 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB2, + // Bytes 3cc0 - 3cff + 0xCC, 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB3, 0xCC, + 0xB8, 0x05, 0x05, 0xE2, 0x8A, 0xB4, 0xCC, 0xB8, + 0x05, 0x05, 0xE2, 0x8A, 0xB5, 0xCC, 0xB8, 0x05, + 0x06, 0xCE, 0x91, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x91, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + // Bytes 3d00 - 3d3f + 0x06, 0xCE, 0x95, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x97, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3d40 - 3d7f + 0x06, 0xCE, 0x99, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0x9F, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xA5, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + // Bytes 3d80 - 3dbf + 0x06, 0xCE, 0xA9, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB1, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + // Bytes 3dc0 - 3dff + 0x06, 0xCE, 0xB5, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB5, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB7, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCE, 0xB7, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + // Bytes 3e00 - 3e3f + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + // Bytes 3e40 - 3e7f + 0x06, 0xCE, 0xB9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCE, 0xBF, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x88, 0xCD, 0x82, 0xCA, + // Bytes 3e80 - 3ebf + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x93, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x80, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCC, 0x81, 0xCA, + 0x06, 0xCF, 0x85, 0xCC, 0x94, 0xCD, 0x82, 0xCA, + 0x06, 0xCF, 0x89, 0xCC, 0x80, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x81, 0xCD, 0x85, 0xDA, + // Bytes 3ec0 - 3eff + 0x06, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCC, 0x94, 0xCD, 0x85, 0xDA, + 0x06, 0xCF, 0x89, 0xCD, 0x82, 0xCD, 0x85, 0xDA, + 0x06, 0xE0, 0xA4, 0xA8, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB0, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xA4, 0xB3, 0xE0, 0xA4, 0xBC, 0x09, + 0x06, 0xE0, 0xB1, 0x86, 0xE0, 0xB1, 0x96, 0x85, + 0x06, 0xE0, 0xB7, 0x99, 0xE0, 0xB7, 0x8A, 0x11, + // Bytes 3f00 - 3f3f + 0x06, 0xE3, 0x81, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x91, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x93, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x95, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x97, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f40 - 3f7f + 0x06, 0xE3, 0x81, 0x99, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0x9F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA1, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA4, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xA8, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 3f80 - 3fbf + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xAF, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB2, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB5, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xB8, 0xE3, 0x82, 0x9A, 0x0D, + // Bytes 3fc0 - 3fff + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x81, 0xBB, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x82, 0x9D, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xA6, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4000 - 403f + 0x06, 0xE3, 0x82, 0xB3, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB5, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB7, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xB9, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBB, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x82, 0xBF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x81, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4040 - 407f + 0x06, 0xE3, 0x83, 0x84, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x86, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x88, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x8F, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x92, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 4080 - 40bf + 0x06, 0xE3, 0x83, 0x95, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x98, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0x9B, 0xE3, 0x82, 0x9A, 0x0D, + 0x06, 0xE3, 0x83, 0xAF, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB0, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xB1, 0xE3, 0x82, 0x99, 0x0D, + // Bytes 40c0 - 40ff + 0x06, 0xE3, 0x83, 0xB2, 0xE3, 0x82, 0x99, 0x0D, + 0x06, 0xE3, 0x83, 0xBD, 0xE3, 0x82, 0x99, 0x0D, + 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + // Bytes 4100 - 413f + 0x85, 0xDB, 0x08, 0xCE, 0x91, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0x97, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + // Bytes 4140 - 417f + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xA9, 0xCC, 0x94, 0xCD, + // Bytes 4180 - 41bf + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB1, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + // Bytes 41c0 - 41ff + 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x93, 0xCC, + 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, + 0x93, 0xCD, 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCD, 0x85, 0xDB, + 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCD, + 0x85, 0xDB, 0x08, 0xCE, 0xB7, 0xCC, 0x94, 0xCD, + 0x82, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + // Bytes 4200 - 423f + 0x93, 0xCC, 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCD, 0x85, 0xDB, + 0x08, 0xCF, 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCD, + 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, 0x94, 0xCC, + 0x80, 0xCD, 0x85, 0xDB, 0x08, 0xCF, 0x89, 0xCC, + 0x94, 0xCC, 0x81, 0xCD, 0x85, 0xDB, 0x08, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCD, 0x85, 0xDB, + 0x08, 0xF0, 0x91, 0x82, 0x99, 0xF0, 0x91, 0x82, + // Bytes 4240 - 427f + 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, 0x9B, 0xF0, + 0x91, 0x82, 0xBA, 0x09, 0x08, 0xF0, 0x91, 0x82, + 0xA5, 0xF0, 0x91, 0x82, 0xBA, 0x09, 0x42, 0xC2, + 0xB4, 0x01, 0x43, 0x20, 0xCC, 0x81, 0xC9, 0x43, + 0x20, 0xCC, 0x83, 0xC9, 0x43, 0x20, 0xCC, 0x84, + 0xC9, 0x43, 0x20, 0xCC, 0x85, 0xC9, 0x43, 0x20, + 0xCC, 0x86, 0xC9, 0x43, 0x20, 0xCC, 0x87, 0xC9, + 0x43, 0x20, 0xCC, 0x88, 0xC9, 0x43, 0x20, 0xCC, + // Bytes 4280 - 42bf + 0x8A, 0xC9, 0x43, 0x20, 0xCC, 0x8B, 0xC9, 0x43, + 0x20, 0xCC, 0x93, 0xC9, 0x43, 0x20, 0xCC, 0x94, + 0xC9, 0x43, 0x20, 0xCC, 0xA7, 0xA5, 0x43, 0x20, + 0xCC, 0xA8, 0xA5, 0x43, 0x20, 0xCC, 0xB3, 0xB5, + 0x43, 0x20, 0xCD, 0x82, 0xC9, 0x43, 0x20, 0xCD, + 0x85, 0xD9, 0x43, 0x20, 0xD9, 0x8B, 0x59, 0x43, + 0x20, 0xD9, 0x8C, 0x5D, 0x43, 0x20, 0xD9, 0x8D, + 0x61, 0x43, 0x20, 0xD9, 0x8E, 0x65, 0x43, 0x20, + // Bytes 42c0 - 42ff + 0xD9, 0x8F, 0x69, 0x43, 0x20, 0xD9, 0x90, 0x6D, + 0x43, 0x20, 0xD9, 0x91, 0x71, 0x43, 0x20, 0xD9, + 0x92, 0x75, 0x43, 0x41, 0xCC, 0x8A, 0xC9, 0x43, + 0x73, 0xCC, 0x87, 0xC9, 0x44, 0x20, 0xE3, 0x82, + 0x99, 0x0D, 0x44, 0x20, 0xE3, 0x82, 0x9A, 0x0D, + 0x44, 0xC2, 0xA8, 0xCC, 0x81, 0xCA, 0x44, 0xCE, + 0x91, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0x95, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0x97, 0xCC, 0x81, 0xC9, + // Bytes 4300 - 433f + 0x44, 0xCE, 0x99, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0x9F, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xA5, 0xCC, 0x88, 0xC9, + 0x44, 0xCE, 0xA9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xB1, 0xCC, 0x81, 0xC9, 0x44, 0xCE, 0xB5, 0xCC, + 0x81, 0xC9, 0x44, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, + 0x44, 0xCE, 0xB9, 0xCC, 0x81, 0xC9, 0x44, 0xCE, + 0xBF, 0xCC, 0x81, 0xC9, 0x44, 0xCF, 0x85, 0xCC, + // Bytes 4340 - 437f + 0x81, 0xC9, 0x44, 0xCF, 0x89, 0xCC, 0x81, 0xC9, + 0x44, 0xD7, 0x90, 0xD6, 0xB7, 0x31, 0x44, 0xD7, + 0x90, 0xD6, 0xB8, 0x35, 0x44, 0xD7, 0x90, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x91, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x91, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x92, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x93, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x94, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x95, 0xD6, 0xB9, 0x39, 0x44, 0xD7, + // Bytes 4380 - 43bf + 0x95, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x96, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x98, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x99, 0xD6, 0xB4, 0x25, 0x44, 0xD7, + 0x99, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9A, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0x9B, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0x9B, 0xD6, 0xBF, 0x49, 0x44, 0xD7, + 0x9C, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0x9E, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA0, 0xD6, 0xBC, 0x41, + // Bytes 43c0 - 43ff + 0x44, 0xD7, 0xA1, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA3, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA4, 0xD6, 0xBF, 0x49, + 0x44, 0xD7, 0xA6, 0xD6, 0xBC, 0x41, 0x44, 0xD7, + 0xA7, 0xD6, 0xBC, 0x41, 0x44, 0xD7, 0xA8, 0xD6, + 0xBC, 0x41, 0x44, 0xD7, 0xA9, 0xD6, 0xBC, 0x41, + 0x44, 0xD7, 0xA9, 0xD7, 0x81, 0x4D, 0x44, 0xD7, + 0xA9, 0xD7, 0x82, 0x51, 0x44, 0xD7, 0xAA, 0xD6, + // Bytes 4400 - 443f + 0xBC, 0x41, 0x44, 0xD7, 0xB2, 0xD6, 0xB7, 0x31, + 0x44, 0xD8, 0xA7, 0xD9, 0x8B, 0x59, 0x44, 0xD8, + 0xA7, 0xD9, 0x93, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, + 0x94, 0xC9, 0x44, 0xD8, 0xA7, 0xD9, 0x95, 0xB5, + 0x44, 0xD8, 0xB0, 0xD9, 0xB0, 0x79, 0x44, 0xD8, + 0xB1, 0xD9, 0xB0, 0x79, 0x44, 0xD9, 0x80, 0xD9, + 0x8B, 0x59, 0x44, 0xD9, 0x80, 0xD9, 0x8E, 0x65, + 0x44, 0xD9, 0x80, 0xD9, 0x8F, 0x69, 0x44, 0xD9, + // Bytes 4440 - 447f + 0x80, 0xD9, 0x90, 0x6D, 0x44, 0xD9, 0x80, 0xD9, + 0x91, 0x71, 0x44, 0xD9, 0x80, 0xD9, 0x92, 0x75, + 0x44, 0xD9, 0x87, 0xD9, 0xB0, 0x79, 0x44, 0xD9, + 0x88, 0xD9, 0x94, 0xC9, 0x44, 0xD9, 0x89, 0xD9, + 0xB0, 0x79, 0x44, 0xD9, 0x8A, 0xD9, 0x94, 0xC9, + 0x44, 0xDB, 0x92, 0xD9, 0x94, 0xC9, 0x44, 0xDB, + 0x95, 0xD9, 0x94, 0xC9, 0x45, 0x20, 0xCC, 0x88, + 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCC, + // Bytes 4480 - 44bf + 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x88, 0xCD, 0x82, + 0xCA, 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x80, 0xCA, + 0x45, 0x20, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x45, + 0x20, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x45, 0x20, + 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x45, 0x20, 0xCC, + 0x94, 0xCC, 0x81, 0xCA, 0x45, 0x20, 0xCC, 0x94, + 0xCD, 0x82, 0xCA, 0x45, 0x20, 0xD9, 0x8C, 0xD9, + 0x91, 0x72, 0x45, 0x20, 0xD9, 0x8D, 0xD9, 0x91, + // Bytes 44c0 - 44ff + 0x72, 0x45, 0x20, 0xD9, 0x8E, 0xD9, 0x91, 0x72, + 0x45, 0x20, 0xD9, 0x8F, 0xD9, 0x91, 0x72, 0x45, + 0x20, 0xD9, 0x90, 0xD9, 0x91, 0x72, 0x45, 0x20, + 0xD9, 0x91, 0xD9, 0xB0, 0x7A, 0x45, 0xE2, 0xAB, + 0x9D, 0xCC, 0xB8, 0x05, 0x46, 0xCE, 0xB9, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xCF, 0x85, 0xCC, + 0x88, 0xCC, 0x81, 0xCA, 0x46, 0xD7, 0xA9, 0xD6, + 0xBC, 0xD7, 0x81, 0x4E, 0x46, 0xD7, 0xA9, 0xD6, + // Bytes 4500 - 453f + 0xBC, 0xD7, 0x82, 0x52, 0x46, 0xD9, 0x80, 0xD9, + 0x8E, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x8F, 0xD9, 0x91, 0x72, 0x46, 0xD9, 0x80, 0xD9, + 0x90, 0xD9, 0x91, 0x72, 0x46, 0xE0, 0xA4, 0x95, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x96, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x97, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0x9C, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA1, + // Bytes 4540 - 457f + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xA2, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAB, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA4, 0xAF, + 0xE0, 0xA4, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA1, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xA2, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA6, 0xAF, + 0xE0, 0xA6, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x96, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x97, + // Bytes 4580 - 45bf + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0x9C, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xAB, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB2, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xA8, 0xB8, + 0xE0, 0xA8, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA1, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xAC, 0xA2, + 0xE0, 0xAC, 0xBC, 0x09, 0x46, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE0, 0xBE, 0xB3, + // Bytes 45c0 - 45ff + 0xE0, 0xBE, 0x80, 0x9D, 0x46, 0xE3, 0x83, 0x86, + 0xE3, 0x82, 0x99, 0x0D, 0x48, 0xF0, 0x9D, 0x85, + 0x97, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, 0x48, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xAD, + 0x48, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xAD, 0x48, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + 0x9D, 0x85, 0xA5, 0xAD, 0x49, 0xE0, 0xBE, 0xB2, + 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x49, + // Bytes 4600 - 463f + 0xE0, 0xBE, 0xB3, 0xE0, 0xBD, 0xB1, 0xE0, 0xBE, + 0x80, 0x9E, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, + 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, + 0x9D, 0x85, 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xB0, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, + 0x98, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + // Bytes 4640 - 467f + 0xB1, 0xAE, 0x4C, 0xF0, 0x9D, 0x85, 0x98, 0xF0, + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xB2, 0xAE, + 0x4C, 0xF0, 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, + 0xA5, 0xF0, 0x9D, 0x85, 0xAE, 0xAE, 0x4C, 0xF0, + 0x9D, 0x86, 0xB9, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, + 0x9D, 0x85, 0xAF, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, + 0xBA, 0xF0, 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, + 0xAE, 0xAE, 0x4C, 0xF0, 0x9D, 0x86, 0xBA, 0xF0, + // Bytes 4680 - 46bf + 0x9D, 0x85, 0xA5, 0xF0, 0x9D, 0x85, 0xAF, 0xAE, + 0x83, 0x41, 0xCC, 0x82, 0xC9, 0x83, 0x41, 0xCC, + 0x86, 0xC9, 0x83, 0x41, 0xCC, 0x87, 0xC9, 0x83, + 0x41, 0xCC, 0x88, 0xC9, 0x83, 0x41, 0xCC, 0x8A, + 0xC9, 0x83, 0x41, 0xCC, 0xA3, 0xB5, 0x83, 0x43, + 0xCC, 0xA7, 0xA5, 0x83, 0x45, 0xCC, 0x82, 0xC9, + 0x83, 0x45, 0xCC, 0x84, 0xC9, 0x83, 0x45, 0xCC, + 0xA3, 0xB5, 0x83, 0x45, 0xCC, 0xA7, 0xA5, 0x83, + // Bytes 46c0 - 46ff + 0x49, 0xCC, 0x88, 0xC9, 0x83, 0x4C, 0xCC, 0xA3, + 0xB5, 0x83, 0x4F, 0xCC, 0x82, 0xC9, 0x83, 0x4F, + 0xCC, 0x83, 0xC9, 0x83, 0x4F, 0xCC, 0x84, 0xC9, + 0x83, 0x4F, 0xCC, 0x87, 0xC9, 0x83, 0x4F, 0xCC, + 0x88, 0xC9, 0x83, 0x4F, 0xCC, 0x9B, 0xAD, 0x83, + 0x4F, 0xCC, 0xA3, 0xB5, 0x83, 0x4F, 0xCC, 0xA8, + 0xA5, 0x83, 0x52, 0xCC, 0xA3, 0xB5, 0x83, 0x53, + 0xCC, 0x81, 0xC9, 0x83, 0x53, 0xCC, 0x8C, 0xC9, + // Bytes 4700 - 473f + 0x83, 0x53, 0xCC, 0xA3, 0xB5, 0x83, 0x55, 0xCC, + 0x83, 0xC9, 0x83, 0x55, 0xCC, 0x84, 0xC9, 0x83, + 0x55, 0xCC, 0x88, 0xC9, 0x83, 0x55, 0xCC, 0x9B, + 0xAD, 0x83, 0x61, 0xCC, 0x82, 0xC9, 0x83, 0x61, + 0xCC, 0x86, 0xC9, 0x83, 0x61, 0xCC, 0x87, 0xC9, + 0x83, 0x61, 0xCC, 0x88, 0xC9, 0x83, 0x61, 0xCC, + 0x8A, 0xC9, 0x83, 0x61, 0xCC, 0xA3, 0xB5, 0x83, + 0x63, 0xCC, 0xA7, 0xA5, 0x83, 0x65, 0xCC, 0x82, + // Bytes 4740 - 477f + 0xC9, 0x83, 0x65, 0xCC, 0x84, 0xC9, 0x83, 0x65, + 0xCC, 0xA3, 0xB5, 0x83, 0x65, 0xCC, 0xA7, 0xA5, + 0x83, 0x69, 0xCC, 0x88, 0xC9, 0x83, 0x6C, 0xCC, + 0xA3, 0xB5, 0x83, 0x6F, 0xCC, 0x82, 0xC9, 0x83, + 0x6F, 0xCC, 0x83, 0xC9, 0x83, 0x6F, 0xCC, 0x84, + 0xC9, 0x83, 0x6F, 0xCC, 0x87, 0xC9, 0x83, 0x6F, + 0xCC, 0x88, 0xC9, 0x83, 0x6F, 0xCC, 0x9B, 0xAD, + 0x83, 0x6F, 0xCC, 0xA3, 0xB5, 0x83, 0x6F, 0xCC, + // Bytes 4780 - 47bf + 0xA8, 0xA5, 0x83, 0x72, 0xCC, 0xA3, 0xB5, 0x83, + 0x73, 0xCC, 0x81, 0xC9, 0x83, 0x73, 0xCC, 0x8C, + 0xC9, 0x83, 0x73, 0xCC, 0xA3, 0xB5, 0x83, 0x75, + 0xCC, 0x83, 0xC9, 0x83, 0x75, 0xCC, 0x84, 0xC9, + 0x83, 0x75, 0xCC, 0x88, 0xC9, 0x83, 0x75, 0xCC, + 0x9B, 0xAD, 0x84, 0xCE, 0x91, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x91, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x95, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x95, 0xCC, + // Bytes 47c0 - 47ff + 0x94, 0xC9, 0x84, 0xCE, 0x97, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x97, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0x99, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0x99, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0x9F, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0x9F, 0xCC, 0x94, 0xC9, 0x84, 0xCE, + 0xA5, 0xCC, 0x94, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xA9, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCC, 0x80, 0xC9, 0x84, 0xCE, + // Bytes 4800 - 483f + 0xB1, 0xCC, 0x81, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, + 0x93, 0xC9, 0x84, 0xCE, 0xB1, 0xCC, 0x94, 0xC9, + 0x84, 0xCE, 0xB1, 0xCD, 0x82, 0xC9, 0x84, 0xCE, + 0xB5, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB5, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, 0x80, 0xC9, + 0x84, 0xCE, 0xB7, 0xCC, 0x81, 0xC9, 0x84, 0xCE, + 0xB7, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB7, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xB7, 0xCD, 0x82, 0xC9, + // Bytes 4840 - 487f + 0x84, 0xCE, 0xB9, 0xCC, 0x88, 0xC9, 0x84, 0xCE, + 0xB9, 0xCC, 0x93, 0xC9, 0x84, 0xCE, 0xB9, 0xCC, + 0x94, 0xC9, 0x84, 0xCE, 0xBF, 0xCC, 0x93, 0xC9, + 0x84, 0xCE, 0xBF, 0xCC, 0x94, 0xC9, 0x84, 0xCF, + 0x85, 0xCC, 0x88, 0xC9, 0x84, 0xCF, 0x85, 0xCC, + 0x93, 0xC9, 0x84, 0xCF, 0x85, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCC, 0x80, 0xC9, 0x84, 0xCF, + 0x89, 0xCC, 0x81, 0xC9, 0x84, 0xCF, 0x89, 0xCC, + // Bytes 4880 - 48bf + 0x93, 0xC9, 0x84, 0xCF, 0x89, 0xCC, 0x94, 0xC9, + 0x84, 0xCF, 0x89, 0xCD, 0x82, 0xC9, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x91, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + // Bytes 48c0 - 48ff + 0x97, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0x97, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + // Bytes 4900 - 493f + 0xA9, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xA9, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + // Bytes 4940 - 497f + 0xB1, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB1, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCE, + 0xB7, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + // Bytes 4980 - 49bf + 0x89, 0xCC, 0x93, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x93, 0xCD, 0x82, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x80, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCC, 0x81, 0xCA, 0x86, 0xCF, + 0x89, 0xCC, 0x94, 0xCD, 0x82, 0xCA, 0x42, 0xCC, + 0x80, 0xC9, 0x32, 0x42, 0xCC, 0x81, 0xC9, 0x32, + 0x42, 0xCC, 0x93, 0xC9, 0x32, 0x43, 0xE1, 0x85, + // Bytes 49c0 - 49ff + 0xA1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA5, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xA6, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xA7, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xA8, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xA9, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAA, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAB, 0x01, 0x00, 0x43, + // Bytes 4a00 - 4a3f + 0xE1, 0x85, 0xAC, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xAE, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xAF, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB1, 0x01, 0x00, 0x43, 0xE1, 0x85, 0xB2, 0x01, + 0x00, 0x43, 0xE1, 0x85, 0xB3, 0x01, 0x00, 0x43, + 0xE1, 0x85, 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x85, + 0xB5, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xAA, 0x01, + // Bytes 4a40 - 4a7f + 0x00, 0x43, 0xE1, 0x86, 0xAC, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xAD, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB0, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB1, 0x01, + 0x00, 0x43, 0xE1, 0x86, 0xB2, 0x01, 0x00, 0x43, + 0xE1, 0x86, 0xB3, 0x01, 0x00, 0x43, 0xE1, 0x86, + 0xB4, 0x01, 0x00, 0x43, 0xE1, 0x86, 0xB5, 0x01, + 0x00, 0x44, 0xCC, 0x88, 0xCC, 0x81, 0xCA, 0x32, + 0x43, 0xE3, 0x82, 0x99, 0x0D, 0x03, 0x43, 0xE3, + // Bytes 4a80 - 4abf + 0x82, 0x9A, 0x0D, 0x03, 0x46, 0xE0, 0xBD, 0xB1, + 0xE0, 0xBD, 0xB2, 0x9E, 0x26, 0x46, 0xE0, 0xBD, + 0xB1, 0xE0, 0xBD, 0xB4, 0xA2, 0x26, 0x46, 0xE0, + 0xBD, 0xB1, 0xE0, 0xBE, 0x80, 0x9E, 0x26, 0x00, + 0x01, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfcValues[c0] + } + i := nfcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfcTrie. Total size: 10586 bytes (10.34 KiB). Checksum: dd926e82067bee11. +type nfcTrie struct{} + +func newNfcTrie(i int) *nfcTrie { + return &nfcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 46: + return uint16(nfcValues[n<<6+uint32(b)]) + default: + n -= 46 + return uint16(nfcSparse.lookup(n, b)) + } +} + +// nfcValues: 48 blocks, 3072 entries, 6144 bytes +// The third block is the zero block. +var nfcValues = [3072]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, + // Block 0x5, offset 0x140 + 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0xa000, + // Block 0x6, offset 0x180 + 0x184: 0x8100, 0x185: 0x8100, + 0x186: 0x8100, + 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x8100, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x8100, 0x285: 0x35a1, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x3721, 0x2c1: 0x372d, 0x2c3: 0x371b, + 0x2c6: 0xa000, 0x2c7: 0x3709, + 0x2cc: 0x375d, 0x2cd: 0x3745, 0x2ce: 0x376f, 0x2d0: 0xa000, + 0x2d3: 0xa000, 0x2d5: 0xa000, 0x2d6: 0xa000, 0x2d7: 0xa000, + 0x2d8: 0xa000, 0x2d9: 0x3751, 0x2da: 0xa000, + 0x2de: 0xa000, 0x2e3: 0xa000, + 0x2e7: 0xa000, + 0x2eb: 0xa000, 0x2ed: 0xa000, + 0x2f0: 0xa000, 0x2f3: 0xa000, 0x2f5: 0xa000, + 0x2f6: 0xa000, 0x2f7: 0xa000, 0x2f8: 0xa000, 0x2f9: 0x37d5, 0x2fa: 0xa000, + 0x2fe: 0xa000, + // Block 0xc, offset 0x300 + 0x301: 0x3733, 0x302: 0x37b7, + 0x310: 0x370f, 0x311: 0x3793, + 0x312: 0x3715, 0x313: 0x3799, 0x316: 0x3727, 0x317: 0x37ab, + 0x318: 0xa000, 0x319: 0xa000, 0x31a: 0x3829, 0x31b: 0x382f, 0x31c: 0x3739, 0x31d: 0x37bd, + 0x31e: 0x373f, 0x31f: 0x37c3, 0x322: 0x374b, 0x323: 0x37cf, + 0x324: 0x3757, 0x325: 0x37db, 0x326: 0x3763, 0x327: 0x37e7, 0x328: 0xa000, 0x329: 0xa000, + 0x32a: 0x3835, 0x32b: 0x383b, 0x32c: 0x378d, 0x32d: 0x3811, 0x32e: 0x3769, 0x32f: 0x37ed, + 0x330: 0x3775, 0x331: 0x37f9, 0x332: 0x377b, 0x333: 0x37ff, 0x334: 0x3781, 0x335: 0x3805, + 0x338: 0x3787, 0x339: 0x380b, + // Block 0xd, offset 0x340 + 0x351: 0x812d, + 0x352: 0x8132, 0x353: 0x8132, 0x354: 0x8132, 0x355: 0x8132, 0x356: 0x812d, 0x357: 0x8132, + 0x358: 0x8132, 0x359: 0x8132, 0x35a: 0x812e, 0x35b: 0x812d, 0x35c: 0x8132, 0x35d: 0x8132, + 0x35e: 0x8132, 0x35f: 0x8132, 0x360: 0x8132, 0x361: 0x8132, 0x362: 0x812d, 0x363: 0x812d, + 0x364: 0x812d, 0x365: 0x812d, 0x366: 0x812d, 0x367: 0x812d, 0x368: 0x8132, 0x369: 0x8132, + 0x36a: 0x812d, 0x36b: 0x8132, 0x36c: 0x8132, 0x36d: 0x812e, 0x36e: 0x8131, 0x36f: 0x8132, + 0x370: 0x8105, 0x371: 0x8106, 0x372: 0x8107, 0x373: 0x8108, 0x374: 0x8109, 0x375: 0x810a, + 0x376: 0x810b, 0x377: 0x810c, 0x378: 0x810d, 0x379: 0x810e, 0x37a: 0x810e, 0x37b: 0x810f, + 0x37c: 0x8110, 0x37d: 0x8111, 0x37f: 0x8112, + // Block 0xe, offset 0x380 + 0x388: 0xa000, 0x38a: 0xa000, 0x38b: 0x8116, + 0x38c: 0x8117, 0x38d: 0x8118, 0x38e: 0x8119, 0x38f: 0x811a, 0x390: 0x811b, 0x391: 0x811c, + 0x392: 0x811d, 0x393: 0x9932, 0x394: 0x9932, 0x395: 0x992d, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x8132, 0x39b: 0x8132, 0x39c: 0x812d, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x812d, + 0x3b0: 0x811e, + // Block 0xf, offset 0x3c0 + 0x3d3: 0x812d, 0x3d4: 0x8132, 0x3d5: 0x8132, 0x3d6: 0x8132, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x8132, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x8132, 0x3e0: 0x8132, 0x3e1: 0x8132, 0x3e3: 0x812d, + 0x3e4: 0x8132, 0x3e5: 0x8132, 0x3e6: 0x812d, 0x3e7: 0x8132, 0x3e8: 0x8132, 0x3e9: 0x812d, + 0x3ea: 0x8132, 0x3eb: 0x8132, 0x3ec: 0x8132, 0x3ed: 0x812d, 0x3ee: 0x812d, 0x3ef: 0x812d, + 0x3f0: 0x8116, 0x3f1: 0x8117, 0x3f2: 0x8118, 0x3f3: 0x8132, 0x3f4: 0x8132, 0x3f5: 0x8132, + 0x3f6: 0x812d, 0x3f7: 0x8132, 0x3f8: 0x8132, 0x3f9: 0x812d, 0x3fa: 0x812d, 0x3fb: 0x8132, + 0x3fc: 0x8132, 0x3fd: 0x8132, 0x3fe: 0x8132, 0x3ff: 0x8132, + // Block 0x10, offset 0x400 + 0x405: 0xa000, + 0x406: 0x2d26, 0x407: 0xa000, 0x408: 0x2d2e, 0x409: 0xa000, 0x40a: 0x2d36, 0x40b: 0xa000, + 0x40c: 0x2d3e, 0x40d: 0xa000, 0x40e: 0x2d46, 0x411: 0xa000, + 0x412: 0x2d4e, + 0x434: 0x8102, 0x435: 0x9900, + 0x43a: 0xa000, 0x43b: 0x2d56, + 0x43c: 0xa000, 0x43d: 0x2d5e, 0x43e: 0xa000, 0x43f: 0xa000, + // Block 0x11, offset 0x440 + 0x440: 0x8132, 0x441: 0x8132, 0x442: 0x812d, 0x443: 0x8132, 0x444: 0x8132, 0x445: 0x8132, + 0x446: 0x8132, 0x447: 0x8132, 0x448: 0x8132, 0x449: 0x8132, 0x44a: 0x812d, 0x44b: 0x8132, + 0x44c: 0x8132, 0x44d: 0x8135, 0x44e: 0x812a, 0x44f: 0x812d, 0x450: 0x8129, 0x451: 0x8132, + 0x452: 0x8132, 0x453: 0x8132, 0x454: 0x8132, 0x455: 0x8132, 0x456: 0x8132, 0x457: 0x8132, + 0x458: 0x8132, 0x459: 0x8132, 0x45a: 0x8132, 0x45b: 0x8132, 0x45c: 0x8132, 0x45d: 0x8132, + 0x45e: 0x8132, 0x45f: 0x8132, 0x460: 0x8132, 0x461: 0x8132, 0x462: 0x8132, 0x463: 0x8132, + 0x464: 0x8132, 0x465: 0x8132, 0x466: 0x8132, 0x467: 0x8132, 0x468: 0x8132, 0x469: 0x8132, + 0x46a: 0x8132, 0x46b: 0x8132, 0x46c: 0x8132, 0x46d: 0x8132, 0x46e: 0x8132, 0x46f: 0x8132, + 0x470: 0x8132, 0x471: 0x8132, 0x472: 0x8132, 0x473: 0x8132, 0x474: 0x8132, 0x475: 0x8132, + 0x476: 0x8133, 0x477: 0x8131, 0x478: 0x8131, 0x479: 0x812d, 0x47b: 0x8132, + 0x47c: 0x8134, 0x47d: 0x812d, 0x47e: 0x8132, 0x47f: 0x812d, + // Block 0x12, offset 0x480 + 0x480: 0x2f97, 0x481: 0x32a3, 0x482: 0x2fa1, 0x483: 0x32ad, 0x484: 0x2fa6, 0x485: 0x32b2, + 0x486: 0x2fab, 0x487: 0x32b7, 0x488: 0x38cc, 0x489: 0x3a5b, 0x48a: 0x2fc4, 0x48b: 0x32d0, + 0x48c: 0x2fce, 0x48d: 0x32da, 0x48e: 0x2fdd, 0x48f: 0x32e9, 0x490: 0x2fd3, 0x491: 0x32df, + 0x492: 0x2fd8, 0x493: 0x32e4, 0x494: 0x38ef, 0x495: 0x3a7e, 0x496: 0x38f6, 0x497: 0x3a85, + 0x498: 0x3019, 0x499: 0x3325, 0x49a: 0x301e, 0x49b: 0x332a, 0x49c: 0x3904, 0x49d: 0x3a93, + 0x49e: 0x3023, 0x49f: 0x332f, 0x4a0: 0x3032, 0x4a1: 0x333e, 0x4a2: 0x3050, 0x4a3: 0x335c, + 0x4a4: 0x305f, 0x4a5: 0x336b, 0x4a6: 0x3055, 0x4a7: 0x3361, 0x4a8: 0x3064, 0x4a9: 0x3370, + 0x4aa: 0x3069, 0x4ab: 0x3375, 0x4ac: 0x30af, 0x4ad: 0x33bb, 0x4ae: 0x390b, 0x4af: 0x3a9a, + 0x4b0: 0x30b9, 0x4b1: 0x33ca, 0x4b2: 0x30c3, 0x4b3: 0x33d4, 0x4b4: 0x30cd, 0x4b5: 0x33de, + 0x4b6: 0x46c4, 0x4b7: 0x4755, 0x4b8: 0x3912, 0x4b9: 0x3aa1, 0x4ba: 0x30e6, 0x4bb: 0x33f7, + 0x4bc: 0x30e1, 0x4bd: 0x33f2, 0x4be: 0x30eb, 0x4bf: 0x33fc, + // Block 0x13, offset 0x4c0 + 0x4c0: 0x30f0, 0x4c1: 0x3401, 0x4c2: 0x30f5, 0x4c3: 0x3406, 0x4c4: 0x3109, 0x4c5: 0x341a, + 0x4c6: 0x3113, 0x4c7: 0x3424, 0x4c8: 0x3122, 0x4c9: 0x3433, 0x4ca: 0x311d, 0x4cb: 0x342e, + 0x4cc: 0x3935, 0x4cd: 0x3ac4, 0x4ce: 0x3943, 0x4cf: 0x3ad2, 0x4d0: 0x394a, 0x4d1: 0x3ad9, + 0x4d2: 0x3951, 0x4d3: 0x3ae0, 0x4d4: 0x314f, 0x4d5: 0x3460, 0x4d6: 0x3154, 0x4d7: 0x3465, + 0x4d8: 0x315e, 0x4d9: 0x346f, 0x4da: 0x46f1, 0x4db: 0x4782, 0x4dc: 0x3997, 0x4dd: 0x3b26, + 0x4de: 0x3177, 0x4df: 0x3488, 0x4e0: 0x3181, 0x4e1: 0x3492, 0x4e2: 0x4700, 0x4e3: 0x4791, + 0x4e4: 0x399e, 0x4e5: 0x3b2d, 0x4e6: 0x39a5, 0x4e7: 0x3b34, 0x4e8: 0x39ac, 0x4e9: 0x3b3b, + 0x4ea: 0x3190, 0x4eb: 0x34a1, 0x4ec: 0x319a, 0x4ed: 0x34b0, 0x4ee: 0x31ae, 0x4ef: 0x34c4, + 0x4f0: 0x31a9, 0x4f1: 0x34bf, 0x4f2: 0x31ea, 0x4f3: 0x3500, 0x4f4: 0x31f9, 0x4f5: 0x350f, + 0x4f6: 0x31f4, 0x4f7: 0x350a, 0x4f8: 0x39b3, 0x4f9: 0x3b42, 0x4fa: 0x39ba, 0x4fb: 0x3b49, + 0x4fc: 0x31fe, 0x4fd: 0x3514, 0x4fe: 0x3203, 0x4ff: 0x3519, + // Block 0x14, offset 0x500 + 0x500: 0x3208, 0x501: 0x351e, 0x502: 0x320d, 0x503: 0x3523, 0x504: 0x321c, 0x505: 0x3532, + 0x506: 0x3217, 0x507: 0x352d, 0x508: 0x3221, 0x509: 0x353c, 0x50a: 0x3226, 0x50b: 0x3541, + 0x50c: 0x322b, 0x50d: 0x3546, 0x50e: 0x3249, 0x50f: 0x3564, 0x510: 0x3262, 0x511: 0x3582, + 0x512: 0x3271, 0x513: 0x3591, 0x514: 0x3276, 0x515: 0x3596, 0x516: 0x337a, 0x517: 0x34a6, + 0x518: 0x3537, 0x519: 0x3573, 0x51b: 0x35d1, + 0x520: 0x46a1, 0x521: 0x4732, 0x522: 0x2f83, 0x523: 0x328f, + 0x524: 0x3878, 0x525: 0x3a07, 0x526: 0x3871, 0x527: 0x3a00, 0x528: 0x3886, 0x529: 0x3a15, + 0x52a: 0x387f, 0x52b: 0x3a0e, 0x52c: 0x38be, 0x52d: 0x3a4d, 0x52e: 0x3894, 0x52f: 0x3a23, + 0x530: 0x388d, 0x531: 0x3a1c, 0x532: 0x38a2, 0x533: 0x3a31, 0x534: 0x389b, 0x535: 0x3a2a, + 0x536: 0x38c5, 0x537: 0x3a54, 0x538: 0x46b5, 0x539: 0x4746, 0x53a: 0x3000, 0x53b: 0x330c, + 0x53c: 0x2fec, 0x53d: 0x32f8, 0x53e: 0x38da, 0x53f: 0x3a69, + // Block 0x15, offset 0x540 + 0x540: 0x38d3, 0x541: 0x3a62, 0x542: 0x38e8, 0x543: 0x3a77, 0x544: 0x38e1, 0x545: 0x3a70, + 0x546: 0x38fd, 0x547: 0x3a8c, 0x548: 0x3091, 0x549: 0x339d, 0x54a: 0x30a5, 0x54b: 0x33b1, + 0x54c: 0x46e7, 0x54d: 0x4778, 0x54e: 0x3136, 0x54f: 0x3447, 0x550: 0x3920, 0x551: 0x3aaf, + 0x552: 0x3919, 0x553: 0x3aa8, 0x554: 0x392e, 0x555: 0x3abd, 0x556: 0x3927, 0x557: 0x3ab6, + 0x558: 0x3989, 0x559: 0x3b18, 0x55a: 0x396d, 0x55b: 0x3afc, 0x55c: 0x3966, 0x55d: 0x3af5, + 0x55e: 0x397b, 0x55f: 0x3b0a, 0x560: 0x3974, 0x561: 0x3b03, 0x562: 0x3982, 0x563: 0x3b11, + 0x564: 0x31e5, 0x565: 0x34fb, 0x566: 0x31c7, 0x567: 0x34dd, 0x568: 0x39e4, 0x569: 0x3b73, + 0x56a: 0x39dd, 0x56b: 0x3b6c, 0x56c: 0x39f2, 0x56d: 0x3b81, 0x56e: 0x39eb, 0x56f: 0x3b7a, + 0x570: 0x39f9, 0x571: 0x3b88, 0x572: 0x3230, 0x573: 0x354b, 0x574: 0x3258, 0x575: 0x3578, + 0x576: 0x3253, 0x577: 0x356e, 0x578: 0x323f, 0x579: 0x355a, + // Block 0x16, offset 0x580 + 0x580: 0x4804, 0x581: 0x480a, 0x582: 0x491e, 0x583: 0x4936, 0x584: 0x4926, 0x585: 0x493e, + 0x586: 0x492e, 0x587: 0x4946, 0x588: 0x47aa, 0x589: 0x47b0, 0x58a: 0x488e, 0x58b: 0x48a6, + 0x58c: 0x4896, 0x58d: 0x48ae, 0x58e: 0x489e, 0x58f: 0x48b6, 0x590: 0x4816, 0x591: 0x481c, + 0x592: 0x3db8, 0x593: 0x3dc8, 0x594: 0x3dc0, 0x595: 0x3dd0, + 0x598: 0x47b6, 0x599: 0x47bc, 0x59a: 0x3ce8, 0x59b: 0x3cf8, 0x59c: 0x3cf0, 0x59d: 0x3d00, + 0x5a0: 0x482e, 0x5a1: 0x4834, 0x5a2: 0x494e, 0x5a3: 0x4966, + 0x5a4: 0x4956, 0x5a5: 0x496e, 0x5a6: 0x495e, 0x5a7: 0x4976, 0x5a8: 0x47c2, 0x5a9: 0x47c8, + 0x5aa: 0x48be, 0x5ab: 0x48d6, 0x5ac: 0x48c6, 0x5ad: 0x48de, 0x5ae: 0x48ce, 0x5af: 0x48e6, + 0x5b0: 0x4846, 0x5b1: 0x484c, 0x5b2: 0x3e18, 0x5b3: 0x3e30, 0x5b4: 0x3e20, 0x5b5: 0x3e38, + 0x5b6: 0x3e28, 0x5b7: 0x3e40, 0x5b8: 0x47ce, 0x5b9: 0x47d4, 0x5ba: 0x3d18, 0x5bb: 0x3d30, + 0x5bc: 0x3d20, 0x5bd: 0x3d38, 0x5be: 0x3d28, 0x5bf: 0x3d40, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x4852, 0x5c1: 0x4858, 0x5c2: 0x3e48, 0x5c3: 0x3e58, 0x5c4: 0x3e50, 0x5c5: 0x3e60, + 0x5c8: 0x47da, 0x5c9: 0x47e0, 0x5ca: 0x3d48, 0x5cb: 0x3d58, + 0x5cc: 0x3d50, 0x5cd: 0x3d60, 0x5d0: 0x4864, 0x5d1: 0x486a, + 0x5d2: 0x3e80, 0x5d3: 0x3e98, 0x5d4: 0x3e88, 0x5d5: 0x3ea0, 0x5d6: 0x3e90, 0x5d7: 0x3ea8, + 0x5d9: 0x47e6, 0x5db: 0x3d68, 0x5dd: 0x3d70, + 0x5df: 0x3d78, 0x5e0: 0x487c, 0x5e1: 0x4882, 0x5e2: 0x497e, 0x5e3: 0x4996, + 0x5e4: 0x4986, 0x5e5: 0x499e, 0x5e6: 0x498e, 0x5e7: 0x49a6, 0x5e8: 0x47ec, 0x5e9: 0x47f2, + 0x5ea: 0x48ee, 0x5eb: 0x4906, 0x5ec: 0x48f6, 0x5ed: 0x490e, 0x5ee: 0x48fe, 0x5ef: 0x4916, + 0x5f0: 0x47f8, 0x5f1: 0x431e, 0x5f2: 0x3691, 0x5f3: 0x4324, 0x5f4: 0x4822, 0x5f5: 0x432a, + 0x5f6: 0x36a3, 0x5f7: 0x4330, 0x5f8: 0x36c1, 0x5f9: 0x4336, 0x5fa: 0x36d9, 0x5fb: 0x433c, + 0x5fc: 0x4870, 0x5fd: 0x4342, + // Block 0x18, offset 0x600 + 0x600: 0x3da0, 0x601: 0x3da8, 0x602: 0x4184, 0x603: 0x41a2, 0x604: 0x418e, 0x605: 0x41ac, + 0x606: 0x4198, 0x607: 0x41b6, 0x608: 0x3cd8, 0x609: 0x3ce0, 0x60a: 0x40d0, 0x60b: 0x40ee, + 0x60c: 0x40da, 0x60d: 0x40f8, 0x60e: 0x40e4, 0x60f: 0x4102, 0x610: 0x3de8, 0x611: 0x3df0, + 0x612: 0x41c0, 0x613: 0x41de, 0x614: 0x41ca, 0x615: 0x41e8, 0x616: 0x41d4, 0x617: 0x41f2, + 0x618: 0x3d08, 0x619: 0x3d10, 0x61a: 0x410c, 0x61b: 0x412a, 0x61c: 0x4116, 0x61d: 0x4134, + 0x61e: 0x4120, 0x61f: 0x413e, 0x620: 0x3ec0, 0x621: 0x3ec8, 0x622: 0x41fc, 0x623: 0x421a, + 0x624: 0x4206, 0x625: 0x4224, 0x626: 0x4210, 0x627: 0x422e, 0x628: 0x3d80, 0x629: 0x3d88, + 0x62a: 0x4148, 0x62b: 0x4166, 0x62c: 0x4152, 0x62d: 0x4170, 0x62e: 0x415c, 0x62f: 0x417a, + 0x630: 0x3685, 0x631: 0x367f, 0x632: 0x3d90, 0x633: 0x368b, 0x634: 0x3d98, + 0x636: 0x4810, 0x637: 0x3db0, 0x638: 0x35f5, 0x639: 0x35ef, 0x63a: 0x35e3, 0x63b: 0x42ee, + 0x63c: 0x35fb, 0x63d: 0x8100, 0x63e: 0x01d3, 0x63f: 0xa100, + // Block 0x19, offset 0x640 + 0x640: 0x8100, 0x641: 0x35a7, 0x642: 0x3dd8, 0x643: 0x369d, 0x644: 0x3de0, + 0x646: 0x483a, 0x647: 0x3df8, 0x648: 0x3601, 0x649: 0x42f4, 0x64a: 0x360d, 0x64b: 0x42fa, + 0x64c: 0x3619, 0x64d: 0x3b8f, 0x64e: 0x3b96, 0x64f: 0x3b9d, 0x650: 0x36b5, 0x651: 0x36af, + 0x652: 0x3e00, 0x653: 0x44e4, 0x656: 0x36bb, 0x657: 0x3e10, + 0x658: 0x3631, 0x659: 0x362b, 0x65a: 0x361f, 0x65b: 0x4300, 0x65d: 0x3ba4, + 0x65e: 0x3bab, 0x65f: 0x3bb2, 0x660: 0x36eb, 0x661: 0x36e5, 0x662: 0x3e68, 0x663: 0x44ec, + 0x664: 0x36cd, 0x665: 0x36d3, 0x666: 0x36f1, 0x667: 0x3e78, 0x668: 0x3661, 0x669: 0x365b, + 0x66a: 0x364f, 0x66b: 0x430c, 0x66c: 0x3649, 0x66d: 0x359b, 0x66e: 0x42e8, 0x66f: 0x0081, + 0x672: 0x3eb0, 0x673: 0x36f7, 0x674: 0x3eb8, + 0x676: 0x4888, 0x677: 0x3ed0, 0x678: 0x363d, 0x679: 0x4306, 0x67a: 0x366d, 0x67b: 0x4318, + 0x67c: 0x3679, 0x67d: 0x4256, 0x67e: 0xa100, + // Block 0x1a, offset 0x680 + 0x681: 0x3c06, 0x683: 0xa000, 0x684: 0x3c0d, 0x685: 0xa000, + 0x687: 0x3c14, 0x688: 0xa000, 0x689: 0x3c1b, + 0x68d: 0xa000, + 0x6a0: 0x2f65, 0x6a1: 0xa000, 0x6a2: 0x3c29, + 0x6a4: 0xa000, 0x6a5: 0xa000, + 0x6ad: 0x3c22, 0x6ae: 0x2f60, 0x6af: 0x2f6a, + 0x6b0: 0x3c30, 0x6b1: 0x3c37, 0x6b2: 0xa000, 0x6b3: 0xa000, 0x6b4: 0x3c3e, 0x6b5: 0x3c45, + 0x6b6: 0xa000, 0x6b7: 0xa000, 0x6b8: 0x3c4c, 0x6b9: 0x3c53, 0x6ba: 0xa000, 0x6bb: 0xa000, + 0x6bc: 0xa000, 0x6bd: 0xa000, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3c5a, 0x6c1: 0x3c61, 0x6c2: 0xa000, 0x6c3: 0xa000, 0x6c4: 0x3c76, 0x6c5: 0x3c7d, + 0x6c6: 0xa000, 0x6c7: 0xa000, 0x6c8: 0x3c84, 0x6c9: 0x3c8b, + 0x6d1: 0xa000, + 0x6d2: 0xa000, + 0x6e2: 0xa000, + 0x6e8: 0xa000, 0x6e9: 0xa000, + 0x6eb: 0xa000, 0x6ec: 0x3ca0, 0x6ed: 0x3ca7, 0x6ee: 0x3cae, 0x6ef: 0x3cb5, + 0x6f2: 0xa000, 0x6f3: 0xa000, 0x6f4: 0xa000, 0x6f5: 0xa000, + // Block 0x1c, offset 0x700 + 0x706: 0xa000, 0x70b: 0xa000, + 0x70c: 0x3f08, 0x70d: 0xa000, 0x70e: 0x3f10, 0x70f: 0xa000, 0x710: 0x3f18, 0x711: 0xa000, + 0x712: 0x3f20, 0x713: 0xa000, 0x714: 0x3f28, 0x715: 0xa000, 0x716: 0x3f30, 0x717: 0xa000, + 0x718: 0x3f38, 0x719: 0xa000, 0x71a: 0x3f40, 0x71b: 0xa000, 0x71c: 0x3f48, 0x71d: 0xa000, + 0x71e: 0x3f50, 0x71f: 0xa000, 0x720: 0x3f58, 0x721: 0xa000, 0x722: 0x3f60, + 0x724: 0xa000, 0x725: 0x3f68, 0x726: 0xa000, 0x727: 0x3f70, 0x728: 0xa000, 0x729: 0x3f78, + 0x72f: 0xa000, + 0x730: 0x3f80, 0x731: 0x3f88, 0x732: 0xa000, 0x733: 0x3f90, 0x734: 0x3f98, 0x735: 0xa000, + 0x736: 0x3fa0, 0x737: 0x3fa8, 0x738: 0xa000, 0x739: 0x3fb0, 0x73a: 0x3fb8, 0x73b: 0xa000, + 0x73c: 0x3fc0, 0x73d: 0x3fc8, + // Block 0x1d, offset 0x740 + 0x754: 0x3f00, + 0x759: 0x9903, 0x75a: 0x9903, 0x75b: 0x8100, 0x75c: 0x8100, 0x75d: 0xa000, + 0x75e: 0x3fd0, + 0x766: 0xa000, + 0x76b: 0xa000, 0x76c: 0x3fe0, 0x76d: 0xa000, 0x76e: 0x3fe8, 0x76f: 0xa000, + 0x770: 0x3ff0, 0x771: 0xa000, 0x772: 0x3ff8, 0x773: 0xa000, 0x774: 0x4000, 0x775: 0xa000, + 0x776: 0x4008, 0x777: 0xa000, 0x778: 0x4010, 0x779: 0xa000, 0x77a: 0x4018, 0x77b: 0xa000, + 0x77c: 0x4020, 0x77d: 0xa000, 0x77e: 0x4028, 0x77f: 0xa000, + // Block 0x1e, offset 0x780 + 0x780: 0x4030, 0x781: 0xa000, 0x782: 0x4038, 0x784: 0xa000, 0x785: 0x4040, + 0x786: 0xa000, 0x787: 0x4048, 0x788: 0xa000, 0x789: 0x4050, + 0x78f: 0xa000, 0x790: 0x4058, 0x791: 0x4060, + 0x792: 0xa000, 0x793: 0x4068, 0x794: 0x4070, 0x795: 0xa000, 0x796: 0x4078, 0x797: 0x4080, + 0x798: 0xa000, 0x799: 0x4088, 0x79a: 0x4090, 0x79b: 0xa000, 0x79c: 0x4098, 0x79d: 0x40a0, + 0x7af: 0xa000, + 0x7b0: 0xa000, 0x7b1: 0xa000, 0x7b2: 0xa000, 0x7b4: 0x3fd8, + 0x7b7: 0x40a8, 0x7b8: 0x40b0, 0x7b9: 0x40b8, 0x7ba: 0x40c0, + 0x7bd: 0xa000, 0x7be: 0x40c8, + // Block 0x1f, offset 0x7c0 + 0x7c0: 0x1377, 0x7c1: 0x0cfb, 0x7c2: 0x13d3, 0x7c3: 0x139f, 0x7c4: 0x0e57, 0x7c5: 0x06eb, + 0x7c6: 0x08df, 0x7c7: 0x162b, 0x7c8: 0x162b, 0x7c9: 0x0a0b, 0x7ca: 0x145f, 0x7cb: 0x0943, + 0x7cc: 0x0a07, 0x7cd: 0x0bef, 0x7ce: 0x0fcf, 0x7cf: 0x115f, 0x7d0: 0x1297, 0x7d1: 0x12d3, + 0x7d2: 0x1307, 0x7d3: 0x141b, 0x7d4: 0x0d73, 0x7d5: 0x0dff, 0x7d6: 0x0eab, 0x7d7: 0x0f43, + 0x7d8: 0x125f, 0x7d9: 0x1447, 0x7da: 0x1573, 0x7db: 0x070f, 0x7dc: 0x08b3, 0x7dd: 0x0d87, + 0x7de: 0x0ecf, 0x7df: 0x1293, 0x7e0: 0x15c3, 0x7e1: 0x0ab3, 0x7e2: 0x0e77, 0x7e3: 0x1283, + 0x7e4: 0x1317, 0x7e5: 0x0c23, 0x7e6: 0x11bb, 0x7e7: 0x12df, 0x7e8: 0x0b1f, 0x7e9: 0x0d0f, + 0x7ea: 0x0e17, 0x7eb: 0x0f1b, 0x7ec: 0x1427, 0x7ed: 0x074f, 0x7ee: 0x07e7, 0x7ef: 0x0853, + 0x7f0: 0x0c8b, 0x7f1: 0x0d7f, 0x7f2: 0x0ecb, 0x7f3: 0x0fef, 0x7f4: 0x1177, 0x7f5: 0x128b, + 0x7f6: 0x12a3, 0x7f7: 0x13c7, 0x7f8: 0x14ef, 0x7f9: 0x15a3, 0x7fa: 0x15bf, 0x7fb: 0x102b, + 0x7fc: 0x106b, 0x7fd: 0x1123, 0x7fe: 0x1243, 0x7ff: 0x147b, + // Block 0x20, offset 0x800 + 0x800: 0x15cb, 0x801: 0x134b, 0x802: 0x09c7, 0x803: 0x0b3b, 0x804: 0x10db, 0x805: 0x119b, + 0x806: 0x0eff, 0x807: 0x1033, 0x808: 0x1397, 0x809: 0x14e7, 0x80a: 0x09c3, 0x80b: 0x0a8f, + 0x80c: 0x0d77, 0x80d: 0x0e2b, 0x80e: 0x0e5f, 0x80f: 0x1113, 0x810: 0x113b, 0x811: 0x14a7, + 0x812: 0x084f, 0x813: 0x11a7, 0x814: 0x07f3, 0x815: 0x07ef, 0x816: 0x1097, 0x817: 0x1127, + 0x818: 0x125b, 0x819: 0x14af, 0x81a: 0x1367, 0x81b: 0x0c27, 0x81c: 0x0d73, 0x81d: 0x1357, + 0x81e: 0x06f7, 0x81f: 0x0a63, 0x820: 0x0b93, 0x821: 0x0f2f, 0x822: 0x0faf, 0x823: 0x0873, + 0x824: 0x103b, 0x825: 0x075f, 0x826: 0x0b77, 0x827: 0x06d7, 0x828: 0x0deb, 0x829: 0x0ca3, + 0x82a: 0x110f, 0x82b: 0x08c7, 0x82c: 0x09b3, 0x82d: 0x0ffb, 0x82e: 0x1263, 0x82f: 0x133b, + 0x830: 0x0db7, 0x831: 0x13f7, 0x832: 0x0de3, 0x833: 0x0c37, 0x834: 0x121b, 0x835: 0x0c57, + 0x836: 0x0fab, 0x837: 0x072b, 0x838: 0x07a7, 0x839: 0x07eb, 0x83a: 0x0d53, 0x83b: 0x10fb, + 0x83c: 0x11f3, 0x83d: 0x1347, 0x83e: 0x145b, 0x83f: 0x085b, + // Block 0x21, offset 0x840 + 0x840: 0x090f, 0x841: 0x0a17, 0x842: 0x0b2f, 0x843: 0x0cbf, 0x844: 0x0e7b, 0x845: 0x103f, + 0x846: 0x1497, 0x847: 0x157b, 0x848: 0x15cf, 0x849: 0x15e7, 0x84a: 0x0837, 0x84b: 0x0cf3, + 0x84c: 0x0da3, 0x84d: 0x13eb, 0x84e: 0x0afb, 0x84f: 0x0bd7, 0x850: 0x0bf3, 0x851: 0x0c83, + 0x852: 0x0e6b, 0x853: 0x0eb7, 0x854: 0x0f67, 0x855: 0x108b, 0x856: 0x112f, 0x857: 0x1193, + 0x858: 0x13db, 0x859: 0x126b, 0x85a: 0x1403, 0x85b: 0x147f, 0x85c: 0x080f, 0x85d: 0x083b, + 0x85e: 0x0923, 0x85f: 0x0ea7, 0x860: 0x12f3, 0x861: 0x133b, 0x862: 0x0b1b, 0x863: 0x0b8b, + 0x864: 0x0c4f, 0x865: 0x0daf, 0x866: 0x10d7, 0x867: 0x0f23, 0x868: 0x073b, 0x869: 0x097f, + 0x86a: 0x0a63, 0x86b: 0x0ac7, 0x86c: 0x0b97, 0x86d: 0x0f3f, 0x86e: 0x0f5b, 0x86f: 0x116b, + 0x870: 0x118b, 0x871: 0x1463, 0x872: 0x14e3, 0x873: 0x14f3, 0x874: 0x152f, 0x875: 0x0753, + 0x876: 0x107f, 0x877: 0x144f, 0x878: 0x14cb, 0x879: 0x0baf, 0x87a: 0x0717, 0x87b: 0x0777, + 0x87c: 0x0a67, 0x87d: 0x0a87, 0x87e: 0x0caf, 0x87f: 0x0d73, + // Block 0x22, offset 0x880 + 0x880: 0x0ec3, 0x881: 0x0fcb, 0x882: 0x1277, 0x883: 0x1417, 0x884: 0x1623, 0x885: 0x0ce3, + 0x886: 0x14a3, 0x887: 0x0833, 0x888: 0x0d2f, 0x889: 0x0d3b, 0x88a: 0x0e0f, 0x88b: 0x0e47, + 0x88c: 0x0f4b, 0x88d: 0x0fa7, 0x88e: 0x1027, 0x88f: 0x110b, 0x890: 0x153b, 0x891: 0x07af, + 0x892: 0x0c03, 0x893: 0x14b3, 0x894: 0x0767, 0x895: 0x0aab, 0x896: 0x0e2f, 0x897: 0x13df, + 0x898: 0x0b67, 0x899: 0x0bb7, 0x89a: 0x0d43, 0x89b: 0x0f2f, 0x89c: 0x14bb, 0x89d: 0x0817, + 0x89e: 0x08ff, 0x89f: 0x0a97, 0x8a0: 0x0cd3, 0x8a1: 0x0d1f, 0x8a2: 0x0d5f, 0x8a3: 0x0df3, + 0x8a4: 0x0f47, 0x8a5: 0x0fbb, 0x8a6: 0x1157, 0x8a7: 0x12f7, 0x8a8: 0x1303, 0x8a9: 0x1457, + 0x8aa: 0x14d7, 0x8ab: 0x0883, 0x8ac: 0x0e4b, 0x8ad: 0x0903, 0x8ae: 0x0ec7, 0x8af: 0x0f6b, + 0x8b0: 0x1287, 0x8b1: 0x14bf, 0x8b2: 0x15ab, 0x8b3: 0x15d3, 0x8b4: 0x0d37, 0x8b5: 0x0e27, + 0x8b6: 0x11c3, 0x8b7: 0x10b7, 0x8b8: 0x10c3, 0x8b9: 0x10e7, 0x8ba: 0x0f17, 0x8bb: 0x0e9f, + 0x8bc: 0x1363, 0x8bd: 0x0733, 0x8be: 0x122b, 0x8bf: 0x081b, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x080b, 0x8c1: 0x0b0b, 0x8c2: 0x0c2b, 0x8c3: 0x10f3, 0x8c4: 0x0a53, 0x8c5: 0x0e03, + 0x8c6: 0x0cef, 0x8c7: 0x13e7, 0x8c8: 0x12e7, 0x8c9: 0x14ab, 0x8ca: 0x1323, 0x8cb: 0x0b27, + 0x8cc: 0x0787, 0x8cd: 0x095b, 0x8d0: 0x09af, + 0x8d2: 0x0cdf, 0x8d5: 0x07f7, 0x8d6: 0x0f1f, 0x8d7: 0x0fe3, + 0x8d8: 0x1047, 0x8d9: 0x1063, 0x8da: 0x1067, 0x8db: 0x107b, 0x8dc: 0x14fb, 0x8dd: 0x10eb, + 0x8de: 0x116f, 0x8e0: 0x128f, 0x8e2: 0x1353, + 0x8e5: 0x1407, 0x8e6: 0x1433, + 0x8ea: 0x154f, 0x8eb: 0x1553, 0x8ec: 0x1557, 0x8ed: 0x15bb, 0x8ee: 0x142b, 0x8ef: 0x14c7, + 0x8f0: 0x0757, 0x8f1: 0x077b, 0x8f2: 0x078f, 0x8f3: 0x084b, 0x8f4: 0x0857, 0x8f5: 0x0897, + 0x8f6: 0x094b, 0x8f7: 0x0967, 0x8f8: 0x096f, 0x8f9: 0x09ab, 0x8fa: 0x09b7, 0x8fb: 0x0a93, + 0x8fc: 0x0a9b, 0x8fd: 0x0ba3, 0x8fe: 0x0bcb, 0x8ff: 0x0bd3, + // Block 0x24, offset 0x900 + 0x900: 0x0beb, 0x901: 0x0c97, 0x902: 0x0cc7, 0x903: 0x0ce7, 0x904: 0x0d57, 0x905: 0x0e1b, + 0x906: 0x0e37, 0x907: 0x0e67, 0x908: 0x0ebb, 0x909: 0x0edb, 0x90a: 0x0f4f, 0x90b: 0x102f, + 0x90c: 0x104b, 0x90d: 0x1053, 0x90e: 0x104f, 0x90f: 0x1057, 0x910: 0x105b, 0x911: 0x105f, + 0x912: 0x1073, 0x913: 0x1077, 0x914: 0x109b, 0x915: 0x10af, 0x916: 0x10cb, 0x917: 0x112f, + 0x918: 0x1137, 0x919: 0x113f, 0x91a: 0x1153, 0x91b: 0x117b, 0x91c: 0x11cb, 0x91d: 0x11ff, + 0x91e: 0x11ff, 0x91f: 0x1267, 0x920: 0x130f, 0x921: 0x1327, 0x922: 0x135b, 0x923: 0x135f, + 0x924: 0x13a3, 0x925: 0x13a7, 0x926: 0x13ff, 0x927: 0x1407, 0x928: 0x14db, 0x929: 0x151f, + 0x92a: 0x1537, 0x92b: 0x0b9b, 0x92c: 0x171e, 0x92d: 0x11e3, + 0x930: 0x06df, 0x931: 0x07e3, 0x932: 0x07a3, 0x933: 0x074b, 0x934: 0x078b, 0x935: 0x07b7, + 0x936: 0x0847, 0x937: 0x0863, 0x938: 0x094b, 0x939: 0x0937, 0x93a: 0x0947, 0x93b: 0x0963, + 0x93c: 0x09af, 0x93d: 0x09bf, 0x93e: 0x0a03, 0x93f: 0x0a0f, + // Block 0x25, offset 0x940 + 0x940: 0x0a2b, 0x941: 0x0a3b, 0x942: 0x0b23, 0x943: 0x0b2b, 0x944: 0x0b5b, 0x945: 0x0b7b, + 0x946: 0x0bab, 0x947: 0x0bc3, 0x948: 0x0bb3, 0x949: 0x0bd3, 0x94a: 0x0bc7, 0x94b: 0x0beb, + 0x94c: 0x0c07, 0x94d: 0x0c5f, 0x94e: 0x0c6b, 0x94f: 0x0c73, 0x950: 0x0c9b, 0x951: 0x0cdf, + 0x952: 0x0d0f, 0x953: 0x0d13, 0x954: 0x0d27, 0x955: 0x0da7, 0x956: 0x0db7, 0x957: 0x0e0f, + 0x958: 0x0e5b, 0x959: 0x0e53, 0x95a: 0x0e67, 0x95b: 0x0e83, 0x95c: 0x0ebb, 0x95d: 0x1013, + 0x95e: 0x0edf, 0x95f: 0x0f13, 0x960: 0x0f1f, 0x961: 0x0f5f, 0x962: 0x0f7b, 0x963: 0x0f9f, + 0x964: 0x0fc3, 0x965: 0x0fc7, 0x966: 0x0fe3, 0x967: 0x0fe7, 0x968: 0x0ff7, 0x969: 0x100b, + 0x96a: 0x1007, 0x96b: 0x1037, 0x96c: 0x10b3, 0x96d: 0x10cb, 0x96e: 0x10e3, 0x96f: 0x111b, + 0x970: 0x112f, 0x971: 0x114b, 0x972: 0x117b, 0x973: 0x122f, 0x974: 0x1257, 0x975: 0x12cb, + 0x976: 0x1313, 0x977: 0x131f, 0x978: 0x1327, 0x979: 0x133f, 0x97a: 0x1353, 0x97b: 0x1343, + 0x97c: 0x135b, 0x97d: 0x1357, 0x97e: 0x134f, 0x97f: 0x135f, + // Block 0x26, offset 0x980 + 0x980: 0x136b, 0x981: 0x13a7, 0x982: 0x13e3, 0x983: 0x1413, 0x984: 0x144b, 0x985: 0x146b, + 0x986: 0x14b7, 0x987: 0x14db, 0x988: 0x14fb, 0x989: 0x150f, 0x98a: 0x151f, 0x98b: 0x152b, + 0x98c: 0x1537, 0x98d: 0x158b, 0x98e: 0x162b, 0x98f: 0x16b5, 0x990: 0x16b0, 0x991: 0x16e2, + 0x992: 0x0607, 0x993: 0x062f, 0x994: 0x0633, 0x995: 0x1764, 0x996: 0x1791, 0x997: 0x1809, + 0x998: 0x1617, 0x999: 0x1627, + // Block 0x27, offset 0x9c0 + 0x9c0: 0x06fb, 0x9c1: 0x06f3, 0x9c2: 0x0703, 0x9c3: 0x1647, 0x9c4: 0x0747, 0x9c5: 0x0757, + 0x9c6: 0x075b, 0x9c7: 0x0763, 0x9c8: 0x076b, 0x9c9: 0x076f, 0x9ca: 0x077b, 0x9cb: 0x0773, + 0x9cc: 0x05b3, 0x9cd: 0x165b, 0x9ce: 0x078f, 0x9cf: 0x0793, 0x9d0: 0x0797, 0x9d1: 0x07b3, + 0x9d2: 0x164c, 0x9d3: 0x05b7, 0x9d4: 0x079f, 0x9d5: 0x07bf, 0x9d6: 0x1656, 0x9d7: 0x07cf, + 0x9d8: 0x07d7, 0x9d9: 0x0737, 0x9da: 0x07df, 0x9db: 0x07e3, 0x9dc: 0x1831, 0x9dd: 0x07ff, + 0x9de: 0x0807, 0x9df: 0x05bf, 0x9e0: 0x081f, 0x9e1: 0x0823, 0x9e2: 0x082b, 0x9e3: 0x082f, + 0x9e4: 0x05c3, 0x9e5: 0x0847, 0x9e6: 0x084b, 0x9e7: 0x0857, 0x9e8: 0x0863, 0x9e9: 0x0867, + 0x9ea: 0x086b, 0x9eb: 0x0873, 0x9ec: 0x0893, 0x9ed: 0x0897, 0x9ee: 0x089f, 0x9ef: 0x08af, + 0x9f0: 0x08b7, 0x9f1: 0x08bb, 0x9f2: 0x08bb, 0x9f3: 0x08bb, 0x9f4: 0x166a, 0x9f5: 0x0e93, + 0x9f6: 0x08cf, 0x9f7: 0x08d7, 0x9f8: 0x166f, 0x9f9: 0x08e3, 0x9fa: 0x08eb, 0x9fb: 0x08f3, + 0x9fc: 0x091b, 0x9fd: 0x0907, 0x9fe: 0x0913, 0x9ff: 0x0917, + // Block 0x28, offset 0xa00 + 0xa00: 0x091f, 0xa01: 0x0927, 0xa02: 0x092b, 0xa03: 0x0933, 0xa04: 0x093b, 0xa05: 0x093f, + 0xa06: 0x093f, 0xa07: 0x0947, 0xa08: 0x094f, 0xa09: 0x0953, 0xa0a: 0x095f, 0xa0b: 0x0983, + 0xa0c: 0x0967, 0xa0d: 0x0987, 0xa0e: 0x096b, 0xa0f: 0x0973, 0xa10: 0x080b, 0xa11: 0x09cf, + 0xa12: 0x0997, 0xa13: 0x099b, 0xa14: 0x099f, 0xa15: 0x0993, 0xa16: 0x09a7, 0xa17: 0x09a3, + 0xa18: 0x09bb, 0xa19: 0x1674, 0xa1a: 0x09d7, 0xa1b: 0x09db, 0xa1c: 0x09e3, 0xa1d: 0x09ef, + 0xa1e: 0x09f7, 0xa1f: 0x0a13, 0xa20: 0x1679, 0xa21: 0x167e, 0xa22: 0x0a1f, 0xa23: 0x0a23, + 0xa24: 0x0a27, 0xa25: 0x0a1b, 0xa26: 0x0a2f, 0xa27: 0x05c7, 0xa28: 0x05cb, 0xa29: 0x0a37, + 0xa2a: 0x0a3f, 0xa2b: 0x0a3f, 0xa2c: 0x1683, 0xa2d: 0x0a5b, 0xa2e: 0x0a5f, 0xa2f: 0x0a63, + 0xa30: 0x0a6b, 0xa31: 0x1688, 0xa32: 0x0a73, 0xa33: 0x0a77, 0xa34: 0x0b4f, 0xa35: 0x0a7f, + 0xa36: 0x05cf, 0xa37: 0x0a8b, 0xa38: 0x0a9b, 0xa39: 0x0aa7, 0xa3a: 0x0aa3, 0xa3b: 0x1692, + 0xa3c: 0x0aaf, 0xa3d: 0x1697, 0xa3e: 0x0abb, 0xa3f: 0x0ab7, + // Block 0x29, offset 0xa40 + 0xa40: 0x0abf, 0xa41: 0x0acf, 0xa42: 0x0ad3, 0xa43: 0x05d3, 0xa44: 0x0ae3, 0xa45: 0x0aeb, + 0xa46: 0x0aef, 0xa47: 0x0af3, 0xa48: 0x05d7, 0xa49: 0x169c, 0xa4a: 0x05db, 0xa4b: 0x0b0f, + 0xa4c: 0x0b13, 0xa4d: 0x0b17, 0xa4e: 0x0b1f, 0xa4f: 0x1863, 0xa50: 0x0b37, 0xa51: 0x16a6, + 0xa52: 0x16a6, 0xa53: 0x11d7, 0xa54: 0x0b47, 0xa55: 0x0b47, 0xa56: 0x05df, 0xa57: 0x16c9, + 0xa58: 0x179b, 0xa59: 0x0b57, 0xa5a: 0x0b5f, 0xa5b: 0x05e3, 0xa5c: 0x0b73, 0xa5d: 0x0b83, + 0xa5e: 0x0b87, 0xa5f: 0x0b8f, 0xa60: 0x0b9f, 0xa61: 0x05eb, 0xa62: 0x05e7, 0xa63: 0x0ba3, + 0xa64: 0x16ab, 0xa65: 0x0ba7, 0xa66: 0x0bbb, 0xa67: 0x0bbf, 0xa68: 0x0bc3, 0xa69: 0x0bbf, + 0xa6a: 0x0bcf, 0xa6b: 0x0bd3, 0xa6c: 0x0be3, 0xa6d: 0x0bdb, 0xa6e: 0x0bdf, 0xa6f: 0x0be7, + 0xa70: 0x0beb, 0xa71: 0x0bef, 0xa72: 0x0bfb, 0xa73: 0x0bff, 0xa74: 0x0c17, 0xa75: 0x0c1f, + 0xa76: 0x0c2f, 0xa77: 0x0c43, 0xa78: 0x16ba, 0xa79: 0x0c3f, 0xa7a: 0x0c33, 0xa7b: 0x0c4b, + 0xa7c: 0x0c53, 0xa7d: 0x0c67, 0xa7e: 0x16bf, 0xa7f: 0x0c6f, + // Block 0x2a, offset 0xa80 + 0xa80: 0x0c63, 0xa81: 0x0c5b, 0xa82: 0x05ef, 0xa83: 0x0c77, 0xa84: 0x0c7f, 0xa85: 0x0c87, + 0xa86: 0x0c7b, 0xa87: 0x05f3, 0xa88: 0x0c97, 0xa89: 0x0c9f, 0xa8a: 0x16c4, 0xa8b: 0x0ccb, + 0xa8c: 0x0cff, 0xa8d: 0x0cdb, 0xa8e: 0x05ff, 0xa8f: 0x0ce7, 0xa90: 0x05fb, 0xa91: 0x05f7, + 0xa92: 0x07c3, 0xa93: 0x07c7, 0xa94: 0x0d03, 0xa95: 0x0ceb, 0xa96: 0x11ab, 0xa97: 0x0663, + 0xa98: 0x0d0f, 0xa99: 0x0d13, 0xa9a: 0x0d17, 0xa9b: 0x0d2b, 0xa9c: 0x0d23, 0xa9d: 0x16dd, + 0xa9e: 0x0603, 0xa9f: 0x0d3f, 0xaa0: 0x0d33, 0xaa1: 0x0d4f, 0xaa2: 0x0d57, 0xaa3: 0x16e7, + 0xaa4: 0x0d5b, 0xaa5: 0x0d47, 0xaa6: 0x0d63, 0xaa7: 0x0607, 0xaa8: 0x0d67, 0xaa9: 0x0d6b, + 0xaaa: 0x0d6f, 0xaab: 0x0d7b, 0xaac: 0x16ec, 0xaad: 0x0d83, 0xaae: 0x060b, 0xaaf: 0x0d8f, + 0xab0: 0x16f1, 0xab1: 0x0d93, 0xab2: 0x060f, 0xab3: 0x0d9f, 0xab4: 0x0dab, 0xab5: 0x0db7, + 0xab6: 0x0dbb, 0xab7: 0x16f6, 0xab8: 0x168d, 0xab9: 0x16fb, 0xaba: 0x0ddb, 0xabb: 0x1700, + 0xabc: 0x0de7, 0xabd: 0x0def, 0xabe: 0x0ddf, 0xabf: 0x0dfb, + // Block 0x2b, offset 0xac0 + 0xac0: 0x0e0b, 0xac1: 0x0e1b, 0xac2: 0x0e0f, 0xac3: 0x0e13, 0xac4: 0x0e1f, 0xac5: 0x0e23, + 0xac6: 0x1705, 0xac7: 0x0e07, 0xac8: 0x0e3b, 0xac9: 0x0e3f, 0xaca: 0x0613, 0xacb: 0x0e53, + 0xacc: 0x0e4f, 0xacd: 0x170a, 0xace: 0x0e33, 0xacf: 0x0e6f, 0xad0: 0x170f, 0xad1: 0x1714, + 0xad2: 0x0e73, 0xad3: 0x0e87, 0xad4: 0x0e83, 0xad5: 0x0e7f, 0xad6: 0x0617, 0xad7: 0x0e8b, + 0xad8: 0x0e9b, 0xad9: 0x0e97, 0xada: 0x0ea3, 0xadb: 0x1651, 0xadc: 0x0eb3, 0xadd: 0x1719, + 0xade: 0x0ebf, 0xadf: 0x1723, 0xae0: 0x0ed3, 0xae1: 0x0edf, 0xae2: 0x0ef3, 0xae3: 0x1728, + 0xae4: 0x0f07, 0xae5: 0x0f0b, 0xae6: 0x172d, 0xae7: 0x1732, 0xae8: 0x0f27, 0xae9: 0x0f37, + 0xaea: 0x061b, 0xaeb: 0x0f3b, 0xaec: 0x061f, 0xaed: 0x061f, 0xaee: 0x0f53, 0xaef: 0x0f57, + 0xaf0: 0x0f5f, 0xaf1: 0x0f63, 0xaf2: 0x0f6f, 0xaf3: 0x0623, 0xaf4: 0x0f87, 0xaf5: 0x1737, + 0xaf6: 0x0fa3, 0xaf7: 0x173c, 0xaf8: 0x0faf, 0xaf9: 0x16a1, 0xafa: 0x0fbf, 0xafb: 0x1741, + 0xafc: 0x1746, 0xafd: 0x174b, 0xafe: 0x0627, 0xaff: 0x062b, + // Block 0x2c, offset 0xb00 + 0xb00: 0x0ff7, 0xb01: 0x1755, 0xb02: 0x1750, 0xb03: 0x175a, 0xb04: 0x175f, 0xb05: 0x0fff, + 0xb06: 0x1003, 0xb07: 0x1003, 0xb08: 0x100b, 0xb09: 0x0633, 0xb0a: 0x100f, 0xb0b: 0x0637, + 0xb0c: 0x063b, 0xb0d: 0x1769, 0xb0e: 0x1023, 0xb0f: 0x102b, 0xb10: 0x1037, 0xb11: 0x063f, + 0xb12: 0x176e, 0xb13: 0x105b, 0xb14: 0x1773, 0xb15: 0x1778, 0xb16: 0x107b, 0xb17: 0x1093, + 0xb18: 0x0643, 0xb19: 0x109b, 0xb1a: 0x109f, 0xb1b: 0x10a3, 0xb1c: 0x177d, 0xb1d: 0x1782, + 0xb1e: 0x1782, 0xb1f: 0x10bb, 0xb20: 0x0647, 0xb21: 0x1787, 0xb22: 0x10cf, 0xb23: 0x10d3, + 0xb24: 0x064b, 0xb25: 0x178c, 0xb26: 0x10ef, 0xb27: 0x064f, 0xb28: 0x10ff, 0xb29: 0x10f7, + 0xb2a: 0x1107, 0xb2b: 0x1796, 0xb2c: 0x111f, 0xb2d: 0x0653, 0xb2e: 0x112b, 0xb2f: 0x1133, + 0xb30: 0x1143, 0xb31: 0x0657, 0xb32: 0x17a0, 0xb33: 0x17a5, 0xb34: 0x065b, 0xb35: 0x17aa, + 0xb36: 0x115b, 0xb37: 0x17af, 0xb38: 0x1167, 0xb39: 0x1173, 0xb3a: 0x117b, 0xb3b: 0x17b4, + 0xb3c: 0x17b9, 0xb3d: 0x118f, 0xb3e: 0x17be, 0xb3f: 0x1197, + // Block 0x2d, offset 0xb40 + 0xb40: 0x16ce, 0xb41: 0x065f, 0xb42: 0x11af, 0xb43: 0x11b3, 0xb44: 0x0667, 0xb45: 0x11b7, + 0xb46: 0x0a33, 0xb47: 0x17c3, 0xb48: 0x17c8, 0xb49: 0x16d3, 0xb4a: 0x16d8, 0xb4b: 0x11d7, + 0xb4c: 0x11db, 0xb4d: 0x13f3, 0xb4e: 0x066b, 0xb4f: 0x1207, 0xb50: 0x1203, 0xb51: 0x120b, + 0xb52: 0x083f, 0xb53: 0x120f, 0xb54: 0x1213, 0xb55: 0x1217, 0xb56: 0x121f, 0xb57: 0x17cd, + 0xb58: 0x121b, 0xb59: 0x1223, 0xb5a: 0x1237, 0xb5b: 0x123b, 0xb5c: 0x1227, 0xb5d: 0x123f, + 0xb5e: 0x1253, 0xb5f: 0x1267, 0xb60: 0x1233, 0xb61: 0x1247, 0xb62: 0x124b, 0xb63: 0x124f, + 0xb64: 0x17d2, 0xb65: 0x17dc, 0xb66: 0x17d7, 0xb67: 0x066f, 0xb68: 0x126f, 0xb69: 0x1273, + 0xb6a: 0x127b, 0xb6b: 0x17f0, 0xb6c: 0x127f, 0xb6d: 0x17e1, 0xb6e: 0x0673, 0xb6f: 0x0677, + 0xb70: 0x17e6, 0xb71: 0x17eb, 0xb72: 0x067b, 0xb73: 0x129f, 0xb74: 0x12a3, 0xb75: 0x12a7, + 0xb76: 0x12ab, 0xb77: 0x12b7, 0xb78: 0x12b3, 0xb79: 0x12bf, 0xb7a: 0x12bb, 0xb7b: 0x12cb, + 0xb7c: 0x12c3, 0xb7d: 0x12c7, 0xb7e: 0x12cf, 0xb7f: 0x067f, + // Block 0x2e, offset 0xb80 + 0xb80: 0x12d7, 0xb81: 0x12db, 0xb82: 0x0683, 0xb83: 0x12eb, 0xb84: 0x12ef, 0xb85: 0x17f5, + 0xb86: 0x12fb, 0xb87: 0x12ff, 0xb88: 0x0687, 0xb89: 0x130b, 0xb8a: 0x05bb, 0xb8b: 0x17fa, + 0xb8c: 0x17ff, 0xb8d: 0x068b, 0xb8e: 0x068f, 0xb8f: 0x1337, 0xb90: 0x134f, 0xb91: 0x136b, + 0xb92: 0x137b, 0xb93: 0x1804, 0xb94: 0x138f, 0xb95: 0x1393, 0xb96: 0x13ab, 0xb97: 0x13b7, + 0xb98: 0x180e, 0xb99: 0x1660, 0xb9a: 0x13c3, 0xb9b: 0x13bf, 0xb9c: 0x13cb, 0xb9d: 0x1665, + 0xb9e: 0x13d7, 0xb9f: 0x13e3, 0xba0: 0x1813, 0xba1: 0x1818, 0xba2: 0x1423, 0xba3: 0x142f, + 0xba4: 0x1437, 0xba5: 0x181d, 0xba6: 0x143b, 0xba7: 0x1467, 0xba8: 0x1473, 0xba9: 0x1477, + 0xbaa: 0x146f, 0xbab: 0x1483, 0xbac: 0x1487, 0xbad: 0x1822, 0xbae: 0x1493, 0xbaf: 0x0693, + 0xbb0: 0x149b, 0xbb1: 0x1827, 0xbb2: 0x0697, 0xbb3: 0x14d3, 0xbb4: 0x0ac3, 0xbb5: 0x14eb, + 0xbb6: 0x182c, 0xbb7: 0x1836, 0xbb8: 0x069b, 0xbb9: 0x069f, 0xbba: 0x1513, 0xbbb: 0x183b, + 0xbbc: 0x06a3, 0xbbd: 0x1840, 0xbbe: 0x152b, 0xbbf: 0x152b, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x1533, 0xbc1: 0x1845, 0xbc2: 0x154b, 0xbc3: 0x06a7, 0xbc4: 0x155b, 0xbc5: 0x1567, + 0xbc6: 0x156f, 0xbc7: 0x1577, 0xbc8: 0x06ab, 0xbc9: 0x184a, 0xbca: 0x158b, 0xbcb: 0x15a7, + 0xbcc: 0x15b3, 0xbcd: 0x06af, 0xbce: 0x06b3, 0xbcf: 0x15b7, 0xbd0: 0x184f, 0xbd1: 0x06b7, + 0xbd2: 0x1854, 0xbd3: 0x1859, 0xbd4: 0x185e, 0xbd5: 0x15db, 0xbd6: 0x06bb, 0xbd7: 0x15ef, + 0xbd8: 0x15f7, 0xbd9: 0x15fb, 0xbda: 0x1603, 0xbdb: 0x160b, 0xbdc: 0x1613, 0xbdd: 0x1868, +} + +// nfcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x2e, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x2f, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x30, 0xcb: 0x31, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x32, + 0xd0: 0x09, 0xd1: 0x33, 0xd2: 0x34, 0xd3: 0x0a, 0xd6: 0x0b, 0xd7: 0x35, + 0xd8: 0x36, 0xd9: 0x0c, 0xdb: 0x37, 0xdc: 0x38, 0xdd: 0x39, 0xdf: 0x3a, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x3b, 0x121: 0x3c, 0x123: 0x0d, 0x124: 0x3d, 0x125: 0x3e, 0x126: 0x3f, 0x127: 0x40, + 0x128: 0x41, 0x129: 0x42, 0x12a: 0x43, 0x12b: 0x44, 0x12c: 0x3f, 0x12d: 0x45, 0x12e: 0x46, 0x12f: 0x47, + 0x131: 0x48, 0x132: 0x49, 0x133: 0x4a, 0x134: 0x4b, 0x135: 0x4c, 0x137: 0x4d, + 0x138: 0x4e, 0x139: 0x4f, 0x13a: 0x50, 0x13b: 0x51, 0x13c: 0x52, 0x13d: 0x53, 0x13e: 0x54, 0x13f: 0x55, + // Block 0x5, offset 0x140 + 0x140: 0x56, 0x142: 0x57, 0x144: 0x58, 0x145: 0x59, 0x146: 0x5a, 0x147: 0x5b, + 0x14d: 0x5c, + 0x15c: 0x5d, 0x15f: 0x5e, + 0x162: 0x5f, 0x164: 0x60, + 0x168: 0x61, 0x169: 0x62, 0x16a: 0x63, 0x16c: 0x0e, 0x16d: 0x64, 0x16e: 0x65, 0x16f: 0x66, + 0x170: 0x67, 0x173: 0x68, 0x177: 0x0f, + 0x178: 0x10, 0x179: 0x11, 0x17a: 0x12, 0x17b: 0x13, 0x17c: 0x14, 0x17d: 0x15, 0x17e: 0x16, 0x17f: 0x17, + // Block 0x6, offset 0x180 + 0x180: 0x69, 0x183: 0x6a, 0x184: 0x6b, 0x186: 0x6c, 0x187: 0x6d, + 0x188: 0x6e, 0x189: 0x18, 0x18a: 0x19, 0x18b: 0x6f, 0x18c: 0x70, + 0x1ab: 0x71, + 0x1b3: 0x72, 0x1b5: 0x73, 0x1b7: 0x74, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x75, 0x1c1: 0x1a, 0x1c2: 0x1b, 0x1c3: 0x1c, 0x1c4: 0x76, 0x1c5: 0x77, + 0x1c9: 0x78, 0x1cc: 0x79, 0x1cd: 0x7a, + // Block 0x8, offset 0x200 + 0x219: 0x7b, 0x21a: 0x7c, 0x21b: 0x7d, + 0x220: 0x7e, 0x223: 0x7f, 0x224: 0x80, 0x225: 0x81, 0x226: 0x82, 0x227: 0x83, + 0x22a: 0x84, 0x22b: 0x85, 0x22f: 0x86, + 0x230: 0x87, 0x231: 0x88, 0x232: 0x89, 0x233: 0x8a, 0x234: 0x8b, 0x235: 0x8c, 0x236: 0x8d, 0x237: 0x87, + 0x238: 0x88, 0x239: 0x89, 0x23a: 0x8a, 0x23b: 0x8b, 0x23c: 0x8c, 0x23d: 0x8d, 0x23e: 0x87, 0x23f: 0x88, + // Block 0x9, offset 0x240 + 0x240: 0x89, 0x241: 0x8a, 0x242: 0x8b, 0x243: 0x8c, 0x244: 0x8d, 0x245: 0x87, 0x246: 0x88, 0x247: 0x89, + 0x248: 0x8a, 0x249: 0x8b, 0x24a: 0x8c, 0x24b: 0x8d, 0x24c: 0x87, 0x24d: 0x88, 0x24e: 0x89, 0x24f: 0x8a, + 0x250: 0x8b, 0x251: 0x8c, 0x252: 0x8d, 0x253: 0x87, 0x254: 0x88, 0x255: 0x89, 0x256: 0x8a, 0x257: 0x8b, + 0x258: 0x8c, 0x259: 0x8d, 0x25a: 0x87, 0x25b: 0x88, 0x25c: 0x89, 0x25d: 0x8a, 0x25e: 0x8b, 0x25f: 0x8c, + 0x260: 0x8d, 0x261: 0x87, 0x262: 0x88, 0x263: 0x89, 0x264: 0x8a, 0x265: 0x8b, 0x266: 0x8c, 0x267: 0x8d, + 0x268: 0x87, 0x269: 0x88, 0x26a: 0x89, 0x26b: 0x8a, 0x26c: 0x8b, 0x26d: 0x8c, 0x26e: 0x8d, 0x26f: 0x87, + 0x270: 0x88, 0x271: 0x89, 0x272: 0x8a, 0x273: 0x8b, 0x274: 0x8c, 0x275: 0x8d, 0x276: 0x87, 0x277: 0x88, + 0x278: 0x89, 0x279: 0x8a, 0x27a: 0x8b, 0x27b: 0x8c, 0x27c: 0x8d, 0x27d: 0x87, 0x27e: 0x88, 0x27f: 0x89, + // Block 0xa, offset 0x280 + 0x280: 0x8a, 0x281: 0x8b, 0x282: 0x8c, 0x283: 0x8d, 0x284: 0x87, 0x285: 0x88, 0x286: 0x89, 0x287: 0x8a, + 0x288: 0x8b, 0x289: 0x8c, 0x28a: 0x8d, 0x28b: 0x87, 0x28c: 0x88, 0x28d: 0x89, 0x28e: 0x8a, 0x28f: 0x8b, + 0x290: 0x8c, 0x291: 0x8d, 0x292: 0x87, 0x293: 0x88, 0x294: 0x89, 0x295: 0x8a, 0x296: 0x8b, 0x297: 0x8c, + 0x298: 0x8d, 0x299: 0x87, 0x29a: 0x88, 0x29b: 0x89, 0x29c: 0x8a, 0x29d: 0x8b, 0x29e: 0x8c, 0x29f: 0x8d, + 0x2a0: 0x87, 0x2a1: 0x88, 0x2a2: 0x89, 0x2a3: 0x8a, 0x2a4: 0x8b, 0x2a5: 0x8c, 0x2a6: 0x8d, 0x2a7: 0x87, + 0x2a8: 0x88, 0x2a9: 0x89, 0x2aa: 0x8a, 0x2ab: 0x8b, 0x2ac: 0x8c, 0x2ad: 0x8d, 0x2ae: 0x87, 0x2af: 0x88, + 0x2b0: 0x89, 0x2b1: 0x8a, 0x2b2: 0x8b, 0x2b3: 0x8c, 0x2b4: 0x8d, 0x2b5: 0x87, 0x2b6: 0x88, 0x2b7: 0x89, + 0x2b8: 0x8a, 0x2b9: 0x8b, 0x2ba: 0x8c, 0x2bb: 0x8d, 0x2bc: 0x87, 0x2bd: 0x88, 0x2be: 0x89, 0x2bf: 0x8a, + // Block 0xb, offset 0x2c0 + 0x2c0: 0x8b, 0x2c1: 0x8c, 0x2c2: 0x8d, 0x2c3: 0x87, 0x2c4: 0x88, 0x2c5: 0x89, 0x2c6: 0x8a, 0x2c7: 0x8b, + 0x2c8: 0x8c, 0x2c9: 0x8d, 0x2ca: 0x87, 0x2cb: 0x88, 0x2cc: 0x89, 0x2cd: 0x8a, 0x2ce: 0x8b, 0x2cf: 0x8c, + 0x2d0: 0x8d, 0x2d1: 0x87, 0x2d2: 0x88, 0x2d3: 0x89, 0x2d4: 0x8a, 0x2d5: 0x8b, 0x2d6: 0x8c, 0x2d7: 0x8d, + 0x2d8: 0x87, 0x2d9: 0x88, 0x2da: 0x89, 0x2db: 0x8a, 0x2dc: 0x8b, 0x2dd: 0x8c, 0x2de: 0x8e, + // Block 0xc, offset 0x300 + 0x324: 0x1d, 0x325: 0x1e, 0x326: 0x1f, 0x327: 0x20, + 0x328: 0x21, 0x329: 0x22, 0x32a: 0x23, 0x32b: 0x24, 0x32c: 0x8f, 0x32d: 0x90, 0x32e: 0x91, + 0x331: 0x92, 0x332: 0x93, 0x333: 0x94, 0x334: 0x95, + 0x338: 0x96, 0x339: 0x97, 0x33a: 0x98, 0x33b: 0x99, 0x33e: 0x9a, 0x33f: 0x9b, + // Block 0xd, offset 0x340 + 0x347: 0x9c, + 0x34b: 0x9d, 0x34d: 0x9e, + 0x368: 0x9f, 0x36b: 0xa0, + 0x374: 0xa1, + 0x37d: 0xa2, + // Block 0xe, offset 0x380 + 0x381: 0xa3, 0x382: 0xa4, 0x384: 0xa5, 0x385: 0x82, 0x387: 0xa6, + 0x388: 0xa7, 0x38b: 0xa8, 0x38c: 0xa9, 0x38d: 0xaa, + 0x391: 0xab, 0x392: 0xac, 0x393: 0xad, 0x396: 0xae, 0x397: 0xaf, + 0x398: 0x73, 0x39a: 0xb0, 0x39c: 0xb1, + 0x3a0: 0xb2, + 0x3a8: 0xb3, 0x3a9: 0xb4, 0x3aa: 0xb5, + 0x3b0: 0x73, 0x3b5: 0xb6, 0x3b6: 0xb7, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xb8, 0x3ec: 0xb9, + // Block 0x10, offset 0x400 + 0x432: 0xba, + // Block 0x11, offset 0x440 + 0x445: 0xbb, 0x446: 0xbc, 0x447: 0xbd, + 0x449: 0xbe, + // Block 0x12, offset 0x480 + 0x480: 0xbf, + 0x4a3: 0xc0, 0x4a5: 0xc1, + // Block 0x13, offset 0x4c0 + 0x4c8: 0xc2, + // Block 0x14, offset 0x500 + 0x520: 0x25, 0x521: 0x26, 0x522: 0x27, 0x523: 0x28, 0x524: 0x29, 0x525: 0x2a, 0x526: 0x2b, 0x527: 0x2c, + 0x528: 0x2d, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfcSparseOffset: 149 entries, 298 bytes +var nfcSparseOffset = []uint16{0x0, 0x5, 0x9, 0xb, 0xd, 0x18, 0x28, 0x2a, 0x2f, 0x3a, 0x49, 0x56, 0x5e, 0x63, 0x68, 0x6a, 0x72, 0x79, 0x7c, 0x84, 0x88, 0x8c, 0x8e, 0x90, 0x99, 0x9d, 0xa4, 0xa9, 0xac, 0xb6, 0xb9, 0xc0, 0xc8, 0xcb, 0xcd, 0xcf, 0xd1, 0xd6, 0xe7, 0xf3, 0xf5, 0xfb, 0xfd, 0xff, 0x101, 0x103, 0x105, 0x107, 0x10a, 0x10d, 0x10f, 0x112, 0x115, 0x119, 0x11e, 0x127, 0x129, 0x12c, 0x12e, 0x139, 0x13d, 0x14b, 0x14e, 0x154, 0x15a, 0x165, 0x169, 0x16b, 0x16d, 0x16f, 0x171, 0x173, 0x179, 0x17d, 0x17f, 0x181, 0x189, 0x18d, 0x190, 0x192, 0x194, 0x196, 0x199, 0x19b, 0x19d, 0x19f, 0x1a1, 0x1a7, 0x1aa, 0x1ac, 0x1b3, 0x1b9, 0x1bf, 0x1c7, 0x1cd, 0x1d3, 0x1d9, 0x1dd, 0x1eb, 0x1f4, 0x1f7, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x205, 0x20a, 0x20c, 0x20e, 0x213, 0x219, 0x21b, 0x21d, 0x21f, 0x225, 0x228, 0x22a, 0x230, 0x233, 0x23b, 0x242, 0x245, 0x248, 0x24a, 0x24d, 0x255, 0x259, 0x260, 0x263, 0x269, 0x26b, 0x26e, 0x270, 0x273, 0x275, 0x277, 0x279, 0x27c, 0x27e, 0x280, 0x282, 0x284, 0x291, 0x29b, 0x29d, 0x29f, 0x2a5, 0x2a7, 0x2aa} + +// nfcSparseValues: 684 entries, 2736 bytes +var nfcSparseValues = [684]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0000, lo: 0x04}, + {value: 0xa100, lo: 0xa8, hi: 0xa8}, + {value: 0x8100, lo: 0xaf, hi: 0xaf}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb8, hi: 0xb8}, + // Block 0x1, offset 0x5 + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x9 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + // Block 0x3, offset 0xb + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x98, hi: 0x9d}, + // Block 0x4, offset 0xd + {value: 0x0006, lo: 0x0a}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x85, hi: 0x85}, + {value: 0xa000, lo: 0x89, hi: 0x89}, + {value: 0x4840, lo: 0x8a, hi: 0x8a}, + {value: 0x485e, lo: 0x8b, hi: 0x8b}, + {value: 0x36c7, lo: 0x8c, hi: 0x8c}, + {value: 0x36df, lo: 0x8d, hi: 0x8d}, + {value: 0x4876, lo: 0x8e, hi: 0x8e}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x36fd, lo: 0x93, hi: 0x94}, + // Block 0x5, offset 0x18 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x6, offset 0x28 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x7, offset 0x2a + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x8, offset 0x2f + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x9, offset 0x3a + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0xa, offset 0x49 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xb, offset 0x56 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xc, offset 0x5e + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xd, offset 0x63 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xe, offset 0x68 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xf, offset 0x6a + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0x10, offset 0x72 + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x11, offset 0x79 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x7c + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x13, offset 0x84 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x14, offset 0x88 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x15, offset 0x8c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x16, offset 0x8e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x17, offset 0x90 + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x18, offset 0x99 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x19, offset 0x9d + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1a, offset 0xa4 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1b, offset 0xa9 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1c, offset 0xac + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1d, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1e, offset 0xb9 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1f, offset 0xc0 + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x20, offset 0xc8 + {value: 0x0000, lo: 0x02}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x21, offset 0xcb + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x22, offset 0xcd + {value: 0x0000, lo: 0x01}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x23, offset 0xcf + {value: 0x0000, lo: 0x01}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + // Block 0x24, offset 0xd1 + {value: 0x0000, lo: 0x04}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x25, offset 0xd6 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x8200, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x8200, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x26, offset 0xe7 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x27, offset 0xf3 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x28, offset 0xf5 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x29, offset 0xfb + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x2a, offset 0xfd + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0xff + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x101 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x103 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x107 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x10a + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x10f + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x112 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x115 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x119 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x11e + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x127 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x129 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x12c + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x12e + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x139 + {value: 0x0004, lo: 0x03}, + {value: 0x0433, lo: 0x80, hi: 0x81}, + {value: 0x8100, lo: 0x97, hi: 0x97}, + {value: 0x8100, lo: 0xbe, hi: 0xbe}, + // Block 0x3c, offset 0x13d + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x3d, offset 0x14b + {value: 0x427b, lo: 0x02}, + {value: 0x01b8, lo: 0xa6, hi: 0xa6}, + {value: 0x0057, lo: 0xaa, hi: 0xab}, + // Block 0x3e, offset 0x14e + {value: 0x0007, lo: 0x05}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x3f, offset 0x154 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x40, offset 0x15a + {value: 0x6408, lo: 0x0a}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x41, offset 0x165 + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x42, offset 0x169 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x43, offset 0x16b + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x44, offset 0x16d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x45, offset 0x16f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x46, offset 0x171 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x47, offset 0x173 + {value: 0x0000, lo: 0x05}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xaf}, + // Block 0x48, offset 0x179 + {value: 0x0000, lo: 0x03}, + {value: 0x4a9f, lo: 0xb3, hi: 0xb3}, + {value: 0x4a9f, lo: 0xb5, hi: 0xb6}, + {value: 0x4a9f, lo: 0xba, hi: 0xbf}, + // Block 0x49, offset 0x17d + {value: 0x0000, lo: 0x01}, + {value: 0x4a9f, lo: 0x8f, hi: 0xa3}, + // Block 0x4a, offset 0x17f + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xae, hi: 0xbe}, + // Block 0x4b, offset 0x181 + {value: 0x0000, lo: 0x07}, + {value: 0x8100, lo: 0x84, hi: 0x84}, + {value: 0x8100, lo: 0x87, hi: 0x87}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + {value: 0x8100, lo: 0x9e, hi: 0x9e}, + {value: 0x8100, lo: 0xa1, hi: 0xa1}, + {value: 0x8100, lo: 0xb2, hi: 0xb2}, + {value: 0x8100, lo: 0xbb, hi: 0xbb}, + // Block 0x4c, offset 0x189 + {value: 0x0000, lo: 0x03}, + {value: 0x8100, lo: 0x80, hi: 0x80}, + {value: 0x8100, lo: 0x8b, hi: 0x8b}, + {value: 0x8100, lo: 0x8e, hi: 0x8e}, + // Block 0x4d, offset 0x18d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x4e, offset 0x190 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x4f, offset 0x192 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x50, offset 0x194 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x51, offset 0x196 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x52, offset 0x199 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x53, offset 0x19b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x54, offset 0x19d + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x55, offset 0x19f + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x56, offset 0x1a1 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x57, offset 0x1a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x58, offset 0x1aa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x59, offset 0x1ac + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x5a, offset 0x1b3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x5b, offset 0x1b9 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x5c, offset 0x1bf + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x5d, offset 0x1c7 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x5e, offset 0x1cd + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x5f, offset 0x1d3 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x60, offset 0x1d9 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x61, offset 0x1dd + {value: 0x0006, lo: 0x0d}, + {value: 0x4390, lo: 0x9d, hi: 0x9d}, + {value: 0x8115, lo: 0x9e, hi: 0x9e}, + {value: 0x4402, lo: 0x9f, hi: 0x9f}, + {value: 0x43f0, lo: 0xaa, hi: 0xab}, + {value: 0x44f4, lo: 0xac, hi: 0xac}, + {value: 0x44fc, lo: 0xad, hi: 0xad}, + {value: 0x4348, lo: 0xae, hi: 0xb1}, + {value: 0x4366, lo: 0xb2, hi: 0xb4}, + {value: 0x437e, lo: 0xb5, hi: 0xb6}, + {value: 0x438a, lo: 0xb8, hi: 0xb8}, + {value: 0x4396, lo: 0xb9, hi: 0xbb}, + {value: 0x43ae, lo: 0xbc, hi: 0xbc}, + {value: 0x43b4, lo: 0xbe, hi: 0xbe}, + // Block 0x62, offset 0x1eb + {value: 0x0006, lo: 0x08}, + {value: 0x43ba, lo: 0x80, hi: 0x81}, + {value: 0x43c6, lo: 0x83, hi: 0x84}, + {value: 0x43d8, lo: 0x86, hi: 0x89}, + {value: 0x43fc, lo: 0x8a, hi: 0x8a}, + {value: 0x4378, lo: 0x8b, hi: 0x8b}, + {value: 0x4360, lo: 0x8c, hi: 0x8c}, + {value: 0x43a8, lo: 0x8d, hi: 0x8d}, + {value: 0x43d2, lo: 0x8e, hi: 0x8e}, + // Block 0x63, offset 0x1f4 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0xa4, hi: 0xa5}, + {value: 0x8100, lo: 0xb0, hi: 0xb1}, + // Block 0x64, offset 0x1f7 + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x9b, hi: 0x9d}, + {value: 0x8200, lo: 0x9e, hi: 0xa3}, + // Block 0x65, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x90, hi: 0x90}, + // Block 0x66, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8100, lo: 0x99, hi: 0x99}, + {value: 0x8200, lo: 0xb2, hi: 0xb4}, + // Block 0x67, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xbc, hi: 0xbd}, + // Block 0x68, offset 0x201 + {value: 0x0000, lo: 0x03}, + {value: 0x8132, lo: 0xa0, hi: 0xa6}, + {value: 0x812d, lo: 0xa7, hi: 0xad}, + {value: 0x8132, lo: 0xae, hi: 0xaf}, + // Block 0x69, offset 0x205 + {value: 0x0000, lo: 0x04}, + {value: 0x8100, lo: 0x89, hi: 0x8c}, + {value: 0x8100, lo: 0xb0, hi: 0xb2}, + {value: 0x8100, lo: 0xb4, hi: 0xb4}, + {value: 0x8100, lo: 0xb6, hi: 0xbf}, + // Block 0x6a, offset 0x20a + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x81, hi: 0x8c}, + // Block 0x6b, offset 0x20c + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0xb5, hi: 0xba}, + // Block 0x6c, offset 0x20e + {value: 0x0000, lo: 0x04}, + {value: 0x4a9f, lo: 0x9e, hi: 0x9f}, + {value: 0x4a9f, lo: 0xa3, hi: 0xa3}, + {value: 0x4a9f, lo: 0xa5, hi: 0xa6}, + {value: 0x4a9f, lo: 0xaa, hi: 0xaf}, + // Block 0x6d, offset 0x213 + {value: 0x0000, lo: 0x05}, + {value: 0x4a9f, lo: 0x82, hi: 0x87}, + {value: 0x4a9f, lo: 0x8a, hi: 0x8f}, + {value: 0x4a9f, lo: 0x92, hi: 0x97}, + {value: 0x4a9f, lo: 0x9a, hi: 0x9c}, + {value: 0x8100, lo: 0xa3, hi: 0xa3}, + // Block 0x6e, offset 0x219 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6f, offset 0x21b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x70, offset 0x21d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x71, offset 0x21f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x225 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x73, offset 0x228 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x74, offset 0x22a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x75, offset 0x230 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x76, offset 0x233 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x77, offset 0x23b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x78, offset 0x242 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x79, offset 0x245 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x7a, offset 0x248 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x7b, offset 0x24a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x7c, offset 0x24d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x7d, offset 0x255 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7e, offset 0x259 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7f, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x80, offset 0x263 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x81, offset 0x269 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x82, offset 0x26b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x83, offset 0x26e + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x84, offset 0x270 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x85, offset 0x273 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x86, offset 0x275 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x87, offset 0x277 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x88, offset 0x279 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x89, offset 0x27c + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x8a, offset 0x27e + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x8b, offset 0x280 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x8c, offset 0x282 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8d, offset 0x284 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8e, offset 0x291 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8f, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x90, offset 0x29d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x91, offset 0x29f + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x92, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x93, offset 0x2a7 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x94, offset 0x2aa + {value: 0x0000, lo: 0x01}, + {value: 0x8100, lo: 0x93, hi: 0x93}, +} + +// lookup returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookup(s []byte) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupUnsafe(s []byte) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// lookupString returns the trie value for the first UTF-8 encoding in s and +// the width in bytes of this encoding. The size will be 0 if s does not +// hold enough bytes to complete the encoding. len(s) must be greater than 0. +func (t *nfkcTrie) lookupString(s string) (v uint16, sz int) { + c0 := s[0] + switch { + case c0 < 0x80: // is ASCII + return nfkcValues[c0], 1 + case c0 < 0xC2: + return 0, 1 // Illegal UTF-8: not a starter, not ASCII. + case c0 < 0xE0: // 2-byte UTF-8 + if len(s) < 2 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c1), 2 + case c0 < 0xF0: // 3-byte UTF-8 + if len(s) < 3 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c2), 3 + case c0 < 0xF8: // 4-byte UTF-8 + if len(s) < 4 { + return 0, 0 + } + i := nfkcIndex[c0] + c1 := s[1] + if c1 < 0x80 || 0xC0 <= c1 { + return 0, 1 // Illegal UTF-8: not a continuation byte. + } + o := uint32(i)<<6 + uint32(c1) + i = nfkcIndex[o] + c2 := s[2] + if c2 < 0x80 || 0xC0 <= c2 { + return 0, 2 // Illegal UTF-8: not a continuation byte. + } + o = uint32(i)<<6 + uint32(c2) + i = nfkcIndex[o] + c3 := s[3] + if c3 < 0x80 || 0xC0 <= c3 { + return 0, 3 // Illegal UTF-8: not a continuation byte. + } + return t.lookupValue(uint32(i), c3), 4 + } + // Illegal rune + return 0, 1 +} + +// lookupStringUnsafe returns the trie value for the first UTF-8 encoding in s. +// s must start with a full and valid UTF-8 encoded rune. +func (t *nfkcTrie) lookupStringUnsafe(s string) uint16 { + c0 := s[0] + if c0 < 0x80 { // is ASCII + return nfkcValues[c0] + } + i := nfkcIndex[c0] + if c0 < 0xE0 { // 2-byte UTF-8 + return t.lookupValue(uint32(i), s[1]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[1])] + if c0 < 0xF0 { // 3-byte UTF-8 + return t.lookupValue(uint32(i), s[2]) + } + i = nfkcIndex[uint32(i)<<6+uint32(s[2])] + if c0 < 0xF8 { // 4-byte UTF-8 + return t.lookupValue(uint32(i), s[3]) + } + return 0 +} + +// nfkcTrie. Total size: 17248 bytes (16.84 KiB). Checksum: 4fb368372b6b1b27. +type nfkcTrie struct{} + +func newNfkcTrie(i int) *nfkcTrie { + return &nfkcTrie{} +} + +// lookupValue determines the type of block n and looks up the value for b. +func (t *nfkcTrie) lookupValue(n uint32, b byte) uint16 { + switch { + case n < 92: + return uint16(nfkcValues[n<<6+uint32(b)]) + default: + n -= 92 + return uint16(nfkcSparse.lookup(n, b)) + } +} + +// nfkcValues: 94 blocks, 6016 entries, 12032 bytes +// The third block is the zero block. +var nfkcValues = [6016]uint16{ + // Block 0x0, offset 0x0 + 0x3c: 0xa000, 0x3d: 0xa000, 0x3e: 0xa000, + // Block 0x1, offset 0x40 + 0x41: 0xa000, 0x42: 0xa000, 0x43: 0xa000, 0x44: 0xa000, 0x45: 0xa000, + 0x46: 0xa000, 0x47: 0xa000, 0x48: 0xa000, 0x49: 0xa000, 0x4a: 0xa000, 0x4b: 0xa000, + 0x4c: 0xa000, 0x4d: 0xa000, 0x4e: 0xa000, 0x4f: 0xa000, 0x50: 0xa000, + 0x52: 0xa000, 0x53: 0xa000, 0x54: 0xa000, 0x55: 0xa000, 0x56: 0xa000, 0x57: 0xa000, + 0x58: 0xa000, 0x59: 0xa000, 0x5a: 0xa000, + 0x61: 0xa000, 0x62: 0xa000, 0x63: 0xa000, + 0x64: 0xa000, 0x65: 0xa000, 0x66: 0xa000, 0x67: 0xa000, 0x68: 0xa000, 0x69: 0xa000, + 0x6a: 0xa000, 0x6b: 0xa000, 0x6c: 0xa000, 0x6d: 0xa000, 0x6e: 0xa000, 0x6f: 0xa000, + 0x70: 0xa000, 0x72: 0xa000, 0x73: 0xa000, 0x74: 0xa000, 0x75: 0xa000, + 0x76: 0xa000, 0x77: 0xa000, 0x78: 0xa000, 0x79: 0xa000, 0x7a: 0xa000, + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc0: 0x2f6f, 0xc1: 0x2f74, 0xc2: 0x4688, 0xc3: 0x2f79, 0xc4: 0x4697, 0xc5: 0x469c, + 0xc6: 0xa000, 0xc7: 0x46a6, 0xc8: 0x2fe2, 0xc9: 0x2fe7, 0xca: 0x46ab, 0xcb: 0x2ffb, + 0xcc: 0x306e, 0xcd: 0x3073, 0xce: 0x3078, 0xcf: 0x46bf, 0xd1: 0x3104, + 0xd2: 0x3127, 0xd3: 0x312c, 0xd4: 0x46c9, 0xd5: 0x46ce, 0xd6: 0x46dd, + 0xd8: 0xa000, 0xd9: 0x31b3, 0xda: 0x31b8, 0xdb: 0x31bd, 0xdc: 0x470f, 0xdd: 0x3235, + 0xe0: 0x327b, 0xe1: 0x3280, 0xe2: 0x4719, 0xe3: 0x3285, + 0xe4: 0x4728, 0xe5: 0x472d, 0xe6: 0xa000, 0xe7: 0x4737, 0xe8: 0x32ee, 0xe9: 0x32f3, + 0xea: 0x473c, 0xeb: 0x3307, 0xec: 0x337f, 0xed: 0x3384, 0xee: 0x3389, 0xef: 0x4750, + 0xf1: 0x3415, 0xf2: 0x3438, 0xf3: 0x343d, 0xf4: 0x475a, 0xf5: 0x475f, + 0xf6: 0x476e, 0xf8: 0xa000, 0xf9: 0x34c9, 0xfa: 0x34ce, 0xfb: 0x34d3, + 0xfc: 0x47a0, 0xfd: 0x3550, 0xff: 0x3569, + // Block 0x4, offset 0x100 + 0x100: 0x2f7e, 0x101: 0x328a, 0x102: 0x468d, 0x103: 0x471e, 0x104: 0x2f9c, 0x105: 0x32a8, + 0x106: 0x2fb0, 0x107: 0x32bc, 0x108: 0x2fb5, 0x109: 0x32c1, 0x10a: 0x2fba, 0x10b: 0x32c6, + 0x10c: 0x2fbf, 0x10d: 0x32cb, 0x10e: 0x2fc9, 0x10f: 0x32d5, + 0x112: 0x46b0, 0x113: 0x4741, 0x114: 0x2ff1, 0x115: 0x32fd, 0x116: 0x2ff6, 0x117: 0x3302, + 0x118: 0x3014, 0x119: 0x3320, 0x11a: 0x3005, 0x11b: 0x3311, 0x11c: 0x302d, 0x11d: 0x3339, + 0x11e: 0x3037, 0x11f: 0x3343, 0x120: 0x303c, 0x121: 0x3348, 0x122: 0x3046, 0x123: 0x3352, + 0x124: 0x304b, 0x125: 0x3357, 0x128: 0x307d, 0x129: 0x338e, + 0x12a: 0x3082, 0x12b: 0x3393, 0x12c: 0x3087, 0x12d: 0x3398, 0x12e: 0x30aa, 0x12f: 0x33b6, + 0x130: 0x308c, 0x132: 0x195d, 0x133: 0x19e7, 0x134: 0x30b4, 0x135: 0x33c0, + 0x136: 0x30c8, 0x137: 0x33d9, 0x139: 0x30d2, 0x13a: 0x33e3, 0x13b: 0x30dc, + 0x13c: 0x33ed, 0x13d: 0x30d7, 0x13e: 0x33e8, 0x13f: 0x1bac, + // Block 0x5, offset 0x140 + 0x140: 0x1c34, 0x143: 0x30ff, 0x144: 0x3410, 0x145: 0x3118, + 0x146: 0x3429, 0x147: 0x310e, 0x148: 0x341f, 0x149: 0x1c5c, + 0x14c: 0x46d3, 0x14d: 0x4764, 0x14e: 0x3131, 0x14f: 0x3442, 0x150: 0x313b, 0x151: 0x344c, + 0x154: 0x3159, 0x155: 0x346a, 0x156: 0x3172, 0x157: 0x3483, + 0x158: 0x3163, 0x159: 0x3474, 0x15a: 0x46f6, 0x15b: 0x4787, 0x15c: 0x317c, 0x15d: 0x348d, + 0x15e: 0x318b, 0x15f: 0x349c, 0x160: 0x46fb, 0x161: 0x478c, 0x162: 0x31a4, 0x163: 0x34ba, + 0x164: 0x3195, 0x165: 0x34ab, 0x168: 0x4705, 0x169: 0x4796, + 0x16a: 0x470a, 0x16b: 0x479b, 0x16c: 0x31c2, 0x16d: 0x34d8, 0x16e: 0x31cc, 0x16f: 0x34e2, + 0x170: 0x31d1, 0x171: 0x34e7, 0x172: 0x31ef, 0x173: 0x3505, 0x174: 0x3212, 0x175: 0x3528, + 0x176: 0x323a, 0x177: 0x3555, 0x178: 0x324e, 0x179: 0x325d, 0x17a: 0x357d, 0x17b: 0x3267, + 0x17c: 0x3587, 0x17d: 0x326c, 0x17e: 0x358c, 0x17f: 0x00a7, + // Block 0x6, offset 0x180 + 0x184: 0x2dee, 0x185: 0x2df4, + 0x186: 0x2dfa, 0x187: 0x1972, 0x188: 0x1975, 0x189: 0x1a08, 0x18a: 0x1987, 0x18b: 0x198a, + 0x18c: 0x1a3e, 0x18d: 0x2f88, 0x18e: 0x3294, 0x18f: 0x3096, 0x190: 0x33a2, 0x191: 0x3140, + 0x192: 0x3451, 0x193: 0x31d6, 0x194: 0x34ec, 0x195: 0x39cf, 0x196: 0x3b5e, 0x197: 0x39c8, + 0x198: 0x3b57, 0x199: 0x39d6, 0x19a: 0x3b65, 0x19b: 0x39c1, 0x19c: 0x3b50, + 0x19e: 0x38b0, 0x19f: 0x3a3f, 0x1a0: 0x38a9, 0x1a1: 0x3a38, 0x1a2: 0x35b3, 0x1a3: 0x35c5, + 0x1a6: 0x3041, 0x1a7: 0x334d, 0x1a8: 0x30be, 0x1a9: 0x33cf, + 0x1aa: 0x46ec, 0x1ab: 0x477d, 0x1ac: 0x3990, 0x1ad: 0x3b1f, 0x1ae: 0x35d7, 0x1af: 0x35dd, + 0x1b0: 0x33c5, 0x1b1: 0x1942, 0x1b2: 0x1945, 0x1b3: 0x19cf, 0x1b4: 0x3028, 0x1b5: 0x3334, + 0x1b8: 0x30fa, 0x1b9: 0x340b, 0x1ba: 0x38b7, 0x1bb: 0x3a46, + 0x1bc: 0x35ad, 0x1bd: 0x35bf, 0x1be: 0x35b9, 0x1bf: 0x35cb, + // Block 0x7, offset 0x1c0 + 0x1c0: 0x2f8d, 0x1c1: 0x3299, 0x1c2: 0x2f92, 0x1c3: 0x329e, 0x1c4: 0x300a, 0x1c5: 0x3316, + 0x1c6: 0x300f, 0x1c7: 0x331b, 0x1c8: 0x309b, 0x1c9: 0x33a7, 0x1ca: 0x30a0, 0x1cb: 0x33ac, + 0x1cc: 0x3145, 0x1cd: 0x3456, 0x1ce: 0x314a, 0x1cf: 0x345b, 0x1d0: 0x3168, 0x1d1: 0x3479, + 0x1d2: 0x316d, 0x1d3: 0x347e, 0x1d4: 0x31db, 0x1d5: 0x34f1, 0x1d6: 0x31e0, 0x1d7: 0x34f6, + 0x1d8: 0x3186, 0x1d9: 0x3497, 0x1da: 0x319f, 0x1db: 0x34b5, + 0x1de: 0x305a, 0x1df: 0x3366, + 0x1e6: 0x4692, 0x1e7: 0x4723, 0x1e8: 0x46ba, 0x1e9: 0x474b, + 0x1ea: 0x395f, 0x1eb: 0x3aee, 0x1ec: 0x393c, 0x1ed: 0x3acb, 0x1ee: 0x46d8, 0x1ef: 0x4769, + 0x1f0: 0x3958, 0x1f1: 0x3ae7, 0x1f2: 0x3244, 0x1f3: 0x355f, + // Block 0x8, offset 0x200 + 0x200: 0x9932, 0x201: 0x9932, 0x202: 0x9932, 0x203: 0x9932, 0x204: 0x9932, 0x205: 0x8132, + 0x206: 0x9932, 0x207: 0x9932, 0x208: 0x9932, 0x209: 0x9932, 0x20a: 0x9932, 0x20b: 0x9932, + 0x20c: 0x9932, 0x20d: 0x8132, 0x20e: 0x8132, 0x20f: 0x9932, 0x210: 0x8132, 0x211: 0x9932, + 0x212: 0x8132, 0x213: 0x9932, 0x214: 0x9932, 0x215: 0x8133, 0x216: 0x812d, 0x217: 0x812d, + 0x218: 0x812d, 0x219: 0x812d, 0x21a: 0x8133, 0x21b: 0x992b, 0x21c: 0x812d, 0x21d: 0x812d, + 0x21e: 0x812d, 0x21f: 0x812d, 0x220: 0x812d, 0x221: 0x8129, 0x222: 0x8129, 0x223: 0x992d, + 0x224: 0x992d, 0x225: 0x992d, 0x226: 0x992d, 0x227: 0x9929, 0x228: 0x9929, 0x229: 0x812d, + 0x22a: 0x812d, 0x22b: 0x812d, 0x22c: 0x812d, 0x22d: 0x992d, 0x22e: 0x992d, 0x22f: 0x812d, + 0x230: 0x992d, 0x231: 0x992d, 0x232: 0x812d, 0x233: 0x812d, 0x234: 0x8101, 0x235: 0x8101, + 0x236: 0x8101, 0x237: 0x8101, 0x238: 0x9901, 0x239: 0x812d, 0x23a: 0x812d, 0x23b: 0x812d, + 0x23c: 0x812d, 0x23d: 0x8132, 0x23e: 0x8132, 0x23f: 0x8132, + // Block 0x9, offset 0x240 + 0x240: 0x49ae, 0x241: 0x49b3, 0x242: 0x9932, 0x243: 0x49b8, 0x244: 0x4a71, 0x245: 0x9936, + 0x246: 0x8132, 0x247: 0x812d, 0x248: 0x812d, 0x249: 0x812d, 0x24a: 0x8132, 0x24b: 0x8132, + 0x24c: 0x8132, 0x24d: 0x812d, 0x24e: 0x812d, 0x250: 0x8132, 0x251: 0x8132, + 0x252: 0x8132, 0x253: 0x812d, 0x254: 0x812d, 0x255: 0x812d, 0x256: 0x812d, 0x257: 0x8132, + 0x258: 0x8133, 0x259: 0x812d, 0x25a: 0x812d, 0x25b: 0x8132, 0x25c: 0x8134, 0x25d: 0x8135, + 0x25e: 0x8135, 0x25f: 0x8134, 0x260: 0x8135, 0x261: 0x8135, 0x262: 0x8134, 0x263: 0x8132, + 0x264: 0x8132, 0x265: 0x8132, 0x266: 0x8132, 0x267: 0x8132, 0x268: 0x8132, 0x269: 0x8132, + 0x26a: 0x8132, 0x26b: 0x8132, 0x26c: 0x8132, 0x26d: 0x8132, 0x26e: 0x8132, 0x26f: 0x8132, + 0x274: 0x0170, + 0x27a: 0x42a5, + 0x27e: 0x0037, + // Block 0xa, offset 0x280 + 0x284: 0x425a, 0x285: 0x447b, + 0x286: 0x35e9, 0x287: 0x00ce, 0x288: 0x3607, 0x289: 0x3613, 0x28a: 0x3625, + 0x28c: 0x3643, 0x28e: 0x3655, 0x28f: 0x3673, 0x290: 0x3e08, 0x291: 0xa000, + 0x295: 0xa000, 0x297: 0xa000, + 0x299: 0xa000, + 0x29f: 0xa000, 0x2a1: 0xa000, + 0x2a5: 0xa000, 0x2a9: 0xa000, + 0x2aa: 0x3637, 0x2ab: 0x3667, 0x2ac: 0x47fe, 0x2ad: 0x3697, 0x2ae: 0x4828, 0x2af: 0x36a9, + 0x2b0: 0x3e70, 0x2b1: 0xa000, 0x2b5: 0xa000, + 0x2b7: 0xa000, 0x2b9: 0xa000, + 0x2bf: 0xa000, + // Block 0xb, offset 0x2c0 + 0x2c1: 0xa000, 0x2c5: 0xa000, + 0x2c9: 0xa000, 0x2ca: 0x4840, 0x2cb: 0x485e, + 0x2cc: 0x36c7, 0x2cd: 0x36df, 0x2ce: 0x4876, 0x2d0: 0x01be, 0x2d1: 0x01d0, + 0x2d2: 0x01ac, 0x2d3: 0x430c, 0x2d4: 0x4312, 0x2d5: 0x01fa, 0x2d6: 0x01e8, + 0x2f0: 0x01d6, 0x2f1: 0x01eb, 0x2f2: 0x01ee, 0x2f4: 0x0188, 0x2f5: 0x01c7, + 0x2f9: 0x01a6, + // Block 0xc, offset 0x300 + 0x300: 0x3721, 0x301: 0x372d, 0x303: 0x371b, + 0x306: 0xa000, 0x307: 0x3709, + 0x30c: 0x375d, 0x30d: 0x3745, 0x30e: 0x376f, 0x310: 0xa000, + 0x313: 0xa000, 0x315: 0xa000, 0x316: 0xa000, 0x317: 0xa000, + 0x318: 0xa000, 0x319: 0x3751, 0x31a: 0xa000, + 0x31e: 0xa000, 0x323: 0xa000, + 0x327: 0xa000, + 0x32b: 0xa000, 0x32d: 0xa000, + 0x330: 0xa000, 0x333: 0xa000, 0x335: 0xa000, + 0x336: 0xa000, 0x337: 0xa000, 0x338: 0xa000, 0x339: 0x37d5, 0x33a: 0xa000, + 0x33e: 0xa000, + // Block 0xd, offset 0x340 + 0x341: 0x3733, 0x342: 0x37b7, + 0x350: 0x370f, 0x351: 0x3793, + 0x352: 0x3715, 0x353: 0x3799, 0x356: 0x3727, 0x357: 0x37ab, + 0x358: 0xa000, 0x359: 0xa000, 0x35a: 0x3829, 0x35b: 0x382f, 0x35c: 0x3739, 0x35d: 0x37bd, + 0x35e: 0x373f, 0x35f: 0x37c3, 0x362: 0x374b, 0x363: 0x37cf, + 0x364: 0x3757, 0x365: 0x37db, 0x366: 0x3763, 0x367: 0x37e7, 0x368: 0xa000, 0x369: 0xa000, + 0x36a: 0x3835, 0x36b: 0x383b, 0x36c: 0x378d, 0x36d: 0x3811, 0x36e: 0x3769, 0x36f: 0x37ed, + 0x370: 0x3775, 0x371: 0x37f9, 0x372: 0x377b, 0x373: 0x37ff, 0x374: 0x3781, 0x375: 0x3805, + 0x378: 0x3787, 0x379: 0x380b, + // Block 0xe, offset 0x380 + 0x387: 0x1d61, + 0x391: 0x812d, + 0x392: 0x8132, 0x393: 0x8132, 0x394: 0x8132, 0x395: 0x8132, 0x396: 0x812d, 0x397: 0x8132, + 0x398: 0x8132, 0x399: 0x8132, 0x39a: 0x812e, 0x39b: 0x812d, 0x39c: 0x8132, 0x39d: 0x8132, + 0x39e: 0x8132, 0x39f: 0x8132, 0x3a0: 0x8132, 0x3a1: 0x8132, 0x3a2: 0x812d, 0x3a3: 0x812d, + 0x3a4: 0x812d, 0x3a5: 0x812d, 0x3a6: 0x812d, 0x3a7: 0x812d, 0x3a8: 0x8132, 0x3a9: 0x8132, + 0x3aa: 0x812d, 0x3ab: 0x8132, 0x3ac: 0x8132, 0x3ad: 0x812e, 0x3ae: 0x8131, 0x3af: 0x8132, + 0x3b0: 0x8105, 0x3b1: 0x8106, 0x3b2: 0x8107, 0x3b3: 0x8108, 0x3b4: 0x8109, 0x3b5: 0x810a, + 0x3b6: 0x810b, 0x3b7: 0x810c, 0x3b8: 0x810d, 0x3b9: 0x810e, 0x3ba: 0x810e, 0x3bb: 0x810f, + 0x3bc: 0x8110, 0x3bd: 0x8111, 0x3bf: 0x8112, + // Block 0xf, offset 0x3c0 + 0x3c8: 0xa000, 0x3ca: 0xa000, 0x3cb: 0x8116, + 0x3cc: 0x8117, 0x3cd: 0x8118, 0x3ce: 0x8119, 0x3cf: 0x811a, 0x3d0: 0x811b, 0x3d1: 0x811c, + 0x3d2: 0x811d, 0x3d3: 0x9932, 0x3d4: 0x9932, 0x3d5: 0x992d, 0x3d6: 0x812d, 0x3d7: 0x8132, + 0x3d8: 0x8132, 0x3d9: 0x8132, 0x3da: 0x8132, 0x3db: 0x8132, 0x3dc: 0x812d, 0x3dd: 0x8132, + 0x3de: 0x8132, 0x3df: 0x812d, + 0x3f0: 0x811e, 0x3f5: 0x1d84, + 0x3f6: 0x2013, 0x3f7: 0x204f, 0x3f8: 0x204a, + // Block 0x10, offset 0x400 + 0x413: 0x812d, 0x414: 0x8132, 0x415: 0x8132, 0x416: 0x8132, 0x417: 0x8132, + 0x418: 0x8132, 0x419: 0x8132, 0x41a: 0x8132, 0x41b: 0x8132, 0x41c: 0x8132, 0x41d: 0x8132, + 0x41e: 0x8132, 0x41f: 0x8132, 0x420: 0x8132, 0x421: 0x8132, 0x423: 0x812d, + 0x424: 0x8132, 0x425: 0x8132, 0x426: 0x812d, 0x427: 0x8132, 0x428: 0x8132, 0x429: 0x812d, + 0x42a: 0x8132, 0x42b: 0x8132, 0x42c: 0x8132, 0x42d: 0x812d, 0x42e: 0x812d, 0x42f: 0x812d, + 0x430: 0x8116, 0x431: 0x8117, 0x432: 0x8118, 0x433: 0x8132, 0x434: 0x8132, 0x435: 0x8132, + 0x436: 0x812d, 0x437: 0x8132, 0x438: 0x8132, 0x439: 0x812d, 0x43a: 0x812d, 0x43b: 0x8132, + 0x43c: 0x8132, 0x43d: 0x8132, 0x43e: 0x8132, 0x43f: 0x8132, + // Block 0x11, offset 0x440 + 0x445: 0xa000, + 0x446: 0x2d26, 0x447: 0xa000, 0x448: 0x2d2e, 0x449: 0xa000, 0x44a: 0x2d36, 0x44b: 0xa000, + 0x44c: 0x2d3e, 0x44d: 0xa000, 0x44e: 0x2d46, 0x451: 0xa000, + 0x452: 0x2d4e, + 0x474: 0x8102, 0x475: 0x9900, + 0x47a: 0xa000, 0x47b: 0x2d56, + 0x47c: 0xa000, 0x47d: 0x2d5e, 0x47e: 0xa000, 0x47f: 0xa000, + // Block 0x12, offset 0x480 + 0x480: 0x0069, 0x481: 0x006b, 0x482: 0x006f, 0x483: 0x0083, 0x484: 0x00f5, 0x485: 0x00f8, + 0x486: 0x0413, 0x487: 0x0085, 0x488: 0x0089, 0x489: 0x008b, 0x48a: 0x0104, 0x48b: 0x0107, + 0x48c: 0x010a, 0x48d: 0x008f, 0x48f: 0x0097, 0x490: 0x009b, 0x491: 0x00e0, + 0x492: 0x009f, 0x493: 0x00fe, 0x494: 0x0417, 0x495: 0x041b, 0x496: 0x00a1, 0x497: 0x00a9, + 0x498: 0x00ab, 0x499: 0x0423, 0x49a: 0x012b, 0x49b: 0x00ad, 0x49c: 0x0427, 0x49d: 0x01be, + 0x49e: 0x01c1, 0x49f: 0x01c4, 0x4a0: 0x01fa, 0x4a1: 0x01fd, 0x4a2: 0x0093, 0x4a3: 0x00a5, + 0x4a4: 0x00ab, 0x4a5: 0x00ad, 0x4a6: 0x01be, 0x4a7: 0x01c1, 0x4a8: 0x01eb, 0x4a9: 0x01fa, + 0x4aa: 0x01fd, + 0x4b8: 0x020c, + // Block 0x13, offset 0x4c0 + 0x4db: 0x00fb, 0x4dc: 0x0087, 0x4dd: 0x0101, + 0x4de: 0x00d4, 0x4df: 0x010a, 0x4e0: 0x008d, 0x4e1: 0x010d, 0x4e2: 0x0110, 0x4e3: 0x0116, + 0x4e4: 0x011c, 0x4e5: 0x011f, 0x4e6: 0x0122, 0x4e7: 0x042b, 0x4e8: 0x016a, 0x4e9: 0x0128, + 0x4ea: 0x042f, 0x4eb: 0x016d, 0x4ec: 0x0131, 0x4ed: 0x012e, 0x4ee: 0x0134, 0x4ef: 0x0137, + 0x4f0: 0x013a, 0x4f1: 0x013d, 0x4f2: 0x0140, 0x4f3: 0x014c, 0x4f4: 0x014f, 0x4f5: 0x00ec, + 0x4f6: 0x0152, 0x4f7: 0x0155, 0x4f8: 0x041f, 0x4f9: 0x0158, 0x4fa: 0x015b, 0x4fb: 0x00b5, + 0x4fc: 0x015e, 0x4fd: 0x0161, 0x4fe: 0x0164, 0x4ff: 0x01d0, + // Block 0x14, offset 0x500 + 0x500: 0x8132, 0x501: 0x8132, 0x502: 0x812d, 0x503: 0x8132, 0x504: 0x8132, 0x505: 0x8132, + 0x506: 0x8132, 0x507: 0x8132, 0x508: 0x8132, 0x509: 0x8132, 0x50a: 0x812d, 0x50b: 0x8132, + 0x50c: 0x8132, 0x50d: 0x8135, 0x50e: 0x812a, 0x50f: 0x812d, 0x510: 0x8129, 0x511: 0x8132, + 0x512: 0x8132, 0x513: 0x8132, 0x514: 0x8132, 0x515: 0x8132, 0x516: 0x8132, 0x517: 0x8132, + 0x518: 0x8132, 0x519: 0x8132, 0x51a: 0x8132, 0x51b: 0x8132, 0x51c: 0x8132, 0x51d: 0x8132, + 0x51e: 0x8132, 0x51f: 0x8132, 0x520: 0x8132, 0x521: 0x8132, 0x522: 0x8132, 0x523: 0x8132, + 0x524: 0x8132, 0x525: 0x8132, 0x526: 0x8132, 0x527: 0x8132, 0x528: 0x8132, 0x529: 0x8132, + 0x52a: 0x8132, 0x52b: 0x8132, 0x52c: 0x8132, 0x52d: 0x8132, 0x52e: 0x8132, 0x52f: 0x8132, + 0x530: 0x8132, 0x531: 0x8132, 0x532: 0x8132, 0x533: 0x8132, 0x534: 0x8132, 0x535: 0x8132, + 0x536: 0x8133, 0x537: 0x8131, 0x538: 0x8131, 0x539: 0x812d, 0x53b: 0x8132, + 0x53c: 0x8134, 0x53d: 0x812d, 0x53e: 0x8132, 0x53f: 0x812d, + // Block 0x15, offset 0x540 + 0x540: 0x2f97, 0x541: 0x32a3, 0x542: 0x2fa1, 0x543: 0x32ad, 0x544: 0x2fa6, 0x545: 0x32b2, + 0x546: 0x2fab, 0x547: 0x32b7, 0x548: 0x38cc, 0x549: 0x3a5b, 0x54a: 0x2fc4, 0x54b: 0x32d0, + 0x54c: 0x2fce, 0x54d: 0x32da, 0x54e: 0x2fdd, 0x54f: 0x32e9, 0x550: 0x2fd3, 0x551: 0x32df, + 0x552: 0x2fd8, 0x553: 0x32e4, 0x554: 0x38ef, 0x555: 0x3a7e, 0x556: 0x38f6, 0x557: 0x3a85, + 0x558: 0x3019, 0x559: 0x3325, 0x55a: 0x301e, 0x55b: 0x332a, 0x55c: 0x3904, 0x55d: 0x3a93, + 0x55e: 0x3023, 0x55f: 0x332f, 0x560: 0x3032, 0x561: 0x333e, 0x562: 0x3050, 0x563: 0x335c, + 0x564: 0x305f, 0x565: 0x336b, 0x566: 0x3055, 0x567: 0x3361, 0x568: 0x3064, 0x569: 0x3370, + 0x56a: 0x3069, 0x56b: 0x3375, 0x56c: 0x30af, 0x56d: 0x33bb, 0x56e: 0x390b, 0x56f: 0x3a9a, + 0x570: 0x30b9, 0x571: 0x33ca, 0x572: 0x30c3, 0x573: 0x33d4, 0x574: 0x30cd, 0x575: 0x33de, + 0x576: 0x46c4, 0x577: 0x4755, 0x578: 0x3912, 0x579: 0x3aa1, 0x57a: 0x30e6, 0x57b: 0x33f7, + 0x57c: 0x30e1, 0x57d: 0x33f2, 0x57e: 0x30eb, 0x57f: 0x33fc, + // Block 0x16, offset 0x580 + 0x580: 0x30f0, 0x581: 0x3401, 0x582: 0x30f5, 0x583: 0x3406, 0x584: 0x3109, 0x585: 0x341a, + 0x586: 0x3113, 0x587: 0x3424, 0x588: 0x3122, 0x589: 0x3433, 0x58a: 0x311d, 0x58b: 0x342e, + 0x58c: 0x3935, 0x58d: 0x3ac4, 0x58e: 0x3943, 0x58f: 0x3ad2, 0x590: 0x394a, 0x591: 0x3ad9, + 0x592: 0x3951, 0x593: 0x3ae0, 0x594: 0x314f, 0x595: 0x3460, 0x596: 0x3154, 0x597: 0x3465, + 0x598: 0x315e, 0x599: 0x346f, 0x59a: 0x46f1, 0x59b: 0x4782, 0x59c: 0x3997, 0x59d: 0x3b26, + 0x59e: 0x3177, 0x59f: 0x3488, 0x5a0: 0x3181, 0x5a1: 0x3492, 0x5a2: 0x4700, 0x5a3: 0x4791, + 0x5a4: 0x399e, 0x5a5: 0x3b2d, 0x5a6: 0x39a5, 0x5a7: 0x3b34, 0x5a8: 0x39ac, 0x5a9: 0x3b3b, + 0x5aa: 0x3190, 0x5ab: 0x34a1, 0x5ac: 0x319a, 0x5ad: 0x34b0, 0x5ae: 0x31ae, 0x5af: 0x34c4, + 0x5b0: 0x31a9, 0x5b1: 0x34bf, 0x5b2: 0x31ea, 0x5b3: 0x3500, 0x5b4: 0x31f9, 0x5b5: 0x350f, + 0x5b6: 0x31f4, 0x5b7: 0x350a, 0x5b8: 0x39b3, 0x5b9: 0x3b42, 0x5ba: 0x39ba, 0x5bb: 0x3b49, + 0x5bc: 0x31fe, 0x5bd: 0x3514, 0x5be: 0x3203, 0x5bf: 0x3519, + // Block 0x17, offset 0x5c0 + 0x5c0: 0x3208, 0x5c1: 0x351e, 0x5c2: 0x320d, 0x5c3: 0x3523, 0x5c4: 0x321c, 0x5c5: 0x3532, + 0x5c6: 0x3217, 0x5c7: 0x352d, 0x5c8: 0x3221, 0x5c9: 0x353c, 0x5ca: 0x3226, 0x5cb: 0x3541, + 0x5cc: 0x322b, 0x5cd: 0x3546, 0x5ce: 0x3249, 0x5cf: 0x3564, 0x5d0: 0x3262, 0x5d1: 0x3582, + 0x5d2: 0x3271, 0x5d3: 0x3591, 0x5d4: 0x3276, 0x5d5: 0x3596, 0x5d6: 0x337a, 0x5d7: 0x34a6, + 0x5d8: 0x3537, 0x5d9: 0x3573, 0x5da: 0x1be0, 0x5db: 0x42d7, + 0x5e0: 0x46a1, 0x5e1: 0x4732, 0x5e2: 0x2f83, 0x5e3: 0x328f, + 0x5e4: 0x3878, 0x5e5: 0x3a07, 0x5e6: 0x3871, 0x5e7: 0x3a00, 0x5e8: 0x3886, 0x5e9: 0x3a15, + 0x5ea: 0x387f, 0x5eb: 0x3a0e, 0x5ec: 0x38be, 0x5ed: 0x3a4d, 0x5ee: 0x3894, 0x5ef: 0x3a23, + 0x5f0: 0x388d, 0x5f1: 0x3a1c, 0x5f2: 0x38a2, 0x5f3: 0x3a31, 0x5f4: 0x389b, 0x5f5: 0x3a2a, + 0x5f6: 0x38c5, 0x5f7: 0x3a54, 0x5f8: 0x46b5, 0x5f9: 0x4746, 0x5fa: 0x3000, 0x5fb: 0x330c, + 0x5fc: 0x2fec, 0x5fd: 0x32f8, 0x5fe: 0x38da, 0x5ff: 0x3a69, + // Block 0x18, offset 0x600 + 0x600: 0x38d3, 0x601: 0x3a62, 0x602: 0x38e8, 0x603: 0x3a77, 0x604: 0x38e1, 0x605: 0x3a70, + 0x606: 0x38fd, 0x607: 0x3a8c, 0x608: 0x3091, 0x609: 0x339d, 0x60a: 0x30a5, 0x60b: 0x33b1, + 0x60c: 0x46e7, 0x60d: 0x4778, 0x60e: 0x3136, 0x60f: 0x3447, 0x610: 0x3920, 0x611: 0x3aaf, + 0x612: 0x3919, 0x613: 0x3aa8, 0x614: 0x392e, 0x615: 0x3abd, 0x616: 0x3927, 0x617: 0x3ab6, + 0x618: 0x3989, 0x619: 0x3b18, 0x61a: 0x396d, 0x61b: 0x3afc, 0x61c: 0x3966, 0x61d: 0x3af5, + 0x61e: 0x397b, 0x61f: 0x3b0a, 0x620: 0x3974, 0x621: 0x3b03, 0x622: 0x3982, 0x623: 0x3b11, + 0x624: 0x31e5, 0x625: 0x34fb, 0x626: 0x31c7, 0x627: 0x34dd, 0x628: 0x39e4, 0x629: 0x3b73, + 0x62a: 0x39dd, 0x62b: 0x3b6c, 0x62c: 0x39f2, 0x62d: 0x3b81, 0x62e: 0x39eb, 0x62f: 0x3b7a, + 0x630: 0x39f9, 0x631: 0x3b88, 0x632: 0x3230, 0x633: 0x354b, 0x634: 0x3258, 0x635: 0x3578, + 0x636: 0x3253, 0x637: 0x356e, 0x638: 0x323f, 0x639: 0x355a, + // Block 0x19, offset 0x640 + 0x640: 0x4804, 0x641: 0x480a, 0x642: 0x491e, 0x643: 0x4936, 0x644: 0x4926, 0x645: 0x493e, + 0x646: 0x492e, 0x647: 0x4946, 0x648: 0x47aa, 0x649: 0x47b0, 0x64a: 0x488e, 0x64b: 0x48a6, + 0x64c: 0x4896, 0x64d: 0x48ae, 0x64e: 0x489e, 0x64f: 0x48b6, 0x650: 0x4816, 0x651: 0x481c, + 0x652: 0x3db8, 0x653: 0x3dc8, 0x654: 0x3dc0, 0x655: 0x3dd0, + 0x658: 0x47b6, 0x659: 0x47bc, 0x65a: 0x3ce8, 0x65b: 0x3cf8, 0x65c: 0x3cf0, 0x65d: 0x3d00, + 0x660: 0x482e, 0x661: 0x4834, 0x662: 0x494e, 0x663: 0x4966, + 0x664: 0x4956, 0x665: 0x496e, 0x666: 0x495e, 0x667: 0x4976, 0x668: 0x47c2, 0x669: 0x47c8, + 0x66a: 0x48be, 0x66b: 0x48d6, 0x66c: 0x48c6, 0x66d: 0x48de, 0x66e: 0x48ce, 0x66f: 0x48e6, + 0x670: 0x4846, 0x671: 0x484c, 0x672: 0x3e18, 0x673: 0x3e30, 0x674: 0x3e20, 0x675: 0x3e38, + 0x676: 0x3e28, 0x677: 0x3e40, 0x678: 0x47ce, 0x679: 0x47d4, 0x67a: 0x3d18, 0x67b: 0x3d30, + 0x67c: 0x3d20, 0x67d: 0x3d38, 0x67e: 0x3d28, 0x67f: 0x3d40, + // Block 0x1a, offset 0x680 + 0x680: 0x4852, 0x681: 0x4858, 0x682: 0x3e48, 0x683: 0x3e58, 0x684: 0x3e50, 0x685: 0x3e60, + 0x688: 0x47da, 0x689: 0x47e0, 0x68a: 0x3d48, 0x68b: 0x3d58, + 0x68c: 0x3d50, 0x68d: 0x3d60, 0x690: 0x4864, 0x691: 0x486a, + 0x692: 0x3e80, 0x693: 0x3e98, 0x694: 0x3e88, 0x695: 0x3ea0, 0x696: 0x3e90, 0x697: 0x3ea8, + 0x699: 0x47e6, 0x69b: 0x3d68, 0x69d: 0x3d70, + 0x69f: 0x3d78, 0x6a0: 0x487c, 0x6a1: 0x4882, 0x6a2: 0x497e, 0x6a3: 0x4996, + 0x6a4: 0x4986, 0x6a5: 0x499e, 0x6a6: 0x498e, 0x6a7: 0x49a6, 0x6a8: 0x47ec, 0x6a9: 0x47f2, + 0x6aa: 0x48ee, 0x6ab: 0x4906, 0x6ac: 0x48f6, 0x6ad: 0x490e, 0x6ae: 0x48fe, 0x6af: 0x4916, + 0x6b0: 0x47f8, 0x6b1: 0x431e, 0x6b2: 0x3691, 0x6b3: 0x4324, 0x6b4: 0x4822, 0x6b5: 0x432a, + 0x6b6: 0x36a3, 0x6b7: 0x4330, 0x6b8: 0x36c1, 0x6b9: 0x4336, 0x6ba: 0x36d9, 0x6bb: 0x433c, + 0x6bc: 0x4870, 0x6bd: 0x4342, + // Block 0x1b, offset 0x6c0 + 0x6c0: 0x3da0, 0x6c1: 0x3da8, 0x6c2: 0x4184, 0x6c3: 0x41a2, 0x6c4: 0x418e, 0x6c5: 0x41ac, + 0x6c6: 0x4198, 0x6c7: 0x41b6, 0x6c8: 0x3cd8, 0x6c9: 0x3ce0, 0x6ca: 0x40d0, 0x6cb: 0x40ee, + 0x6cc: 0x40da, 0x6cd: 0x40f8, 0x6ce: 0x40e4, 0x6cf: 0x4102, 0x6d0: 0x3de8, 0x6d1: 0x3df0, + 0x6d2: 0x41c0, 0x6d3: 0x41de, 0x6d4: 0x41ca, 0x6d5: 0x41e8, 0x6d6: 0x41d4, 0x6d7: 0x41f2, + 0x6d8: 0x3d08, 0x6d9: 0x3d10, 0x6da: 0x410c, 0x6db: 0x412a, 0x6dc: 0x4116, 0x6dd: 0x4134, + 0x6de: 0x4120, 0x6df: 0x413e, 0x6e0: 0x3ec0, 0x6e1: 0x3ec8, 0x6e2: 0x41fc, 0x6e3: 0x421a, + 0x6e4: 0x4206, 0x6e5: 0x4224, 0x6e6: 0x4210, 0x6e7: 0x422e, 0x6e8: 0x3d80, 0x6e9: 0x3d88, + 0x6ea: 0x4148, 0x6eb: 0x4166, 0x6ec: 0x4152, 0x6ed: 0x4170, 0x6ee: 0x415c, 0x6ef: 0x417a, + 0x6f0: 0x3685, 0x6f1: 0x367f, 0x6f2: 0x3d90, 0x6f3: 0x368b, 0x6f4: 0x3d98, + 0x6f6: 0x4810, 0x6f7: 0x3db0, 0x6f8: 0x35f5, 0x6f9: 0x35ef, 0x6fa: 0x35e3, 0x6fb: 0x42ee, + 0x6fc: 0x35fb, 0x6fd: 0x4287, 0x6fe: 0x01d3, 0x6ff: 0x4287, + // Block 0x1c, offset 0x700 + 0x700: 0x42a0, 0x701: 0x4482, 0x702: 0x3dd8, 0x703: 0x369d, 0x704: 0x3de0, + 0x706: 0x483a, 0x707: 0x3df8, 0x708: 0x3601, 0x709: 0x42f4, 0x70a: 0x360d, 0x70b: 0x42fa, + 0x70c: 0x3619, 0x70d: 0x4489, 0x70e: 0x4490, 0x70f: 0x4497, 0x710: 0x36b5, 0x711: 0x36af, + 0x712: 0x3e00, 0x713: 0x44e4, 0x716: 0x36bb, 0x717: 0x3e10, + 0x718: 0x3631, 0x719: 0x362b, 0x71a: 0x361f, 0x71b: 0x4300, 0x71d: 0x449e, + 0x71e: 0x44a5, 0x71f: 0x44ac, 0x720: 0x36eb, 0x721: 0x36e5, 0x722: 0x3e68, 0x723: 0x44ec, + 0x724: 0x36cd, 0x725: 0x36d3, 0x726: 0x36f1, 0x727: 0x3e78, 0x728: 0x3661, 0x729: 0x365b, + 0x72a: 0x364f, 0x72b: 0x430c, 0x72c: 0x3649, 0x72d: 0x4474, 0x72e: 0x447b, 0x72f: 0x0081, + 0x732: 0x3eb0, 0x733: 0x36f7, 0x734: 0x3eb8, + 0x736: 0x4888, 0x737: 0x3ed0, 0x738: 0x363d, 0x739: 0x4306, 0x73a: 0x366d, 0x73b: 0x4318, + 0x73c: 0x3679, 0x73d: 0x425a, 0x73e: 0x428c, + // Block 0x1d, offset 0x740 + 0x740: 0x1bd8, 0x741: 0x1bdc, 0x742: 0x0047, 0x743: 0x1c54, 0x745: 0x1be8, + 0x746: 0x1bec, 0x747: 0x00e9, 0x749: 0x1c58, 0x74a: 0x008f, 0x74b: 0x0051, + 0x74c: 0x0051, 0x74d: 0x0051, 0x74e: 0x0091, 0x74f: 0x00da, 0x750: 0x0053, 0x751: 0x0053, + 0x752: 0x0059, 0x753: 0x0099, 0x755: 0x005d, 0x756: 0x198d, + 0x759: 0x0061, 0x75a: 0x0063, 0x75b: 0x0065, 0x75c: 0x0065, 0x75d: 0x0065, + 0x760: 0x199f, 0x761: 0x1bc8, 0x762: 0x19a8, + 0x764: 0x0075, 0x766: 0x01b8, 0x768: 0x0075, + 0x76a: 0x0057, 0x76b: 0x42d2, 0x76c: 0x0045, 0x76d: 0x0047, 0x76f: 0x008b, + 0x770: 0x004b, 0x771: 0x004d, 0x773: 0x005b, 0x774: 0x009f, 0x775: 0x0215, + 0x776: 0x0218, 0x777: 0x021b, 0x778: 0x021e, 0x779: 0x0093, 0x77b: 0x1b98, + 0x77c: 0x01e8, 0x77d: 0x01c1, 0x77e: 0x0179, 0x77f: 0x01a0, + // Block 0x1e, offset 0x780 + 0x780: 0x0463, 0x785: 0x0049, + 0x786: 0x0089, 0x787: 0x008b, 0x788: 0x0093, 0x789: 0x0095, + 0x790: 0x222e, 0x791: 0x223a, + 0x792: 0x22ee, 0x793: 0x2216, 0x794: 0x229a, 0x795: 0x2222, 0x796: 0x22a0, 0x797: 0x22b8, + 0x798: 0x22c4, 0x799: 0x2228, 0x79a: 0x22ca, 0x79b: 0x2234, 0x79c: 0x22be, 0x79d: 0x22d0, + 0x79e: 0x22d6, 0x79f: 0x1cbc, 0x7a0: 0x0053, 0x7a1: 0x195a, 0x7a2: 0x1ba4, 0x7a3: 0x1963, + 0x7a4: 0x006d, 0x7a5: 0x19ab, 0x7a6: 0x1bd0, 0x7a7: 0x1d48, 0x7a8: 0x1966, 0x7a9: 0x0071, + 0x7aa: 0x19b7, 0x7ab: 0x1bd4, 0x7ac: 0x0059, 0x7ad: 0x0047, 0x7ae: 0x0049, 0x7af: 0x005b, + 0x7b0: 0x0093, 0x7b1: 0x19e4, 0x7b2: 0x1c18, 0x7b3: 0x19ed, 0x7b4: 0x00ad, 0x7b5: 0x1a62, + 0x7b6: 0x1c4c, 0x7b7: 0x1d5c, 0x7b8: 0x19f0, 0x7b9: 0x00b1, 0x7ba: 0x1a65, 0x7bb: 0x1c50, + 0x7bc: 0x0099, 0x7bd: 0x0087, 0x7be: 0x0089, 0x7bf: 0x009b, + // Block 0x1f, offset 0x7c0 + 0x7c1: 0x3c06, 0x7c3: 0xa000, 0x7c4: 0x3c0d, 0x7c5: 0xa000, + 0x7c7: 0x3c14, 0x7c8: 0xa000, 0x7c9: 0x3c1b, + 0x7cd: 0xa000, + 0x7e0: 0x2f65, 0x7e1: 0xa000, 0x7e2: 0x3c29, + 0x7e4: 0xa000, 0x7e5: 0xa000, + 0x7ed: 0x3c22, 0x7ee: 0x2f60, 0x7ef: 0x2f6a, + 0x7f0: 0x3c30, 0x7f1: 0x3c37, 0x7f2: 0xa000, 0x7f3: 0xa000, 0x7f4: 0x3c3e, 0x7f5: 0x3c45, + 0x7f6: 0xa000, 0x7f7: 0xa000, 0x7f8: 0x3c4c, 0x7f9: 0x3c53, 0x7fa: 0xa000, 0x7fb: 0xa000, + 0x7fc: 0xa000, 0x7fd: 0xa000, + // Block 0x20, offset 0x800 + 0x800: 0x3c5a, 0x801: 0x3c61, 0x802: 0xa000, 0x803: 0xa000, 0x804: 0x3c76, 0x805: 0x3c7d, + 0x806: 0xa000, 0x807: 0xa000, 0x808: 0x3c84, 0x809: 0x3c8b, + 0x811: 0xa000, + 0x812: 0xa000, + 0x822: 0xa000, + 0x828: 0xa000, 0x829: 0xa000, + 0x82b: 0xa000, 0x82c: 0x3ca0, 0x82d: 0x3ca7, 0x82e: 0x3cae, 0x82f: 0x3cb5, + 0x832: 0xa000, 0x833: 0xa000, 0x834: 0xa000, 0x835: 0xa000, + // Block 0x21, offset 0x840 + 0x860: 0x0023, 0x861: 0x0025, 0x862: 0x0027, 0x863: 0x0029, + 0x864: 0x002b, 0x865: 0x002d, 0x866: 0x002f, 0x867: 0x0031, 0x868: 0x0033, 0x869: 0x1882, + 0x86a: 0x1885, 0x86b: 0x1888, 0x86c: 0x188b, 0x86d: 0x188e, 0x86e: 0x1891, 0x86f: 0x1894, + 0x870: 0x1897, 0x871: 0x189a, 0x872: 0x189d, 0x873: 0x18a6, 0x874: 0x1a68, 0x875: 0x1a6c, + 0x876: 0x1a70, 0x877: 0x1a74, 0x878: 0x1a78, 0x879: 0x1a7c, 0x87a: 0x1a80, 0x87b: 0x1a84, + 0x87c: 0x1a88, 0x87d: 0x1c80, 0x87e: 0x1c85, 0x87f: 0x1c8a, + // Block 0x22, offset 0x880 + 0x880: 0x1c8f, 0x881: 0x1c94, 0x882: 0x1c99, 0x883: 0x1c9e, 0x884: 0x1ca3, 0x885: 0x1ca8, + 0x886: 0x1cad, 0x887: 0x1cb2, 0x888: 0x187f, 0x889: 0x18a3, 0x88a: 0x18c7, 0x88b: 0x18eb, + 0x88c: 0x190f, 0x88d: 0x1918, 0x88e: 0x191e, 0x88f: 0x1924, 0x890: 0x192a, 0x891: 0x1b60, + 0x892: 0x1b64, 0x893: 0x1b68, 0x894: 0x1b6c, 0x895: 0x1b70, 0x896: 0x1b74, 0x897: 0x1b78, + 0x898: 0x1b7c, 0x899: 0x1b80, 0x89a: 0x1b84, 0x89b: 0x1b88, 0x89c: 0x1af4, 0x89d: 0x1af8, + 0x89e: 0x1afc, 0x89f: 0x1b00, 0x8a0: 0x1b04, 0x8a1: 0x1b08, 0x8a2: 0x1b0c, 0x8a3: 0x1b10, + 0x8a4: 0x1b14, 0x8a5: 0x1b18, 0x8a6: 0x1b1c, 0x8a7: 0x1b20, 0x8a8: 0x1b24, 0x8a9: 0x1b28, + 0x8aa: 0x1b2c, 0x8ab: 0x1b30, 0x8ac: 0x1b34, 0x8ad: 0x1b38, 0x8ae: 0x1b3c, 0x8af: 0x1b40, + 0x8b0: 0x1b44, 0x8b1: 0x1b48, 0x8b2: 0x1b4c, 0x8b3: 0x1b50, 0x8b4: 0x1b54, 0x8b5: 0x1b58, + 0x8b6: 0x0043, 0x8b7: 0x0045, 0x8b8: 0x0047, 0x8b9: 0x0049, 0x8ba: 0x004b, 0x8bb: 0x004d, + 0x8bc: 0x004f, 0x8bd: 0x0051, 0x8be: 0x0053, 0x8bf: 0x0055, + // Block 0x23, offset 0x8c0 + 0x8c0: 0x06bf, 0x8c1: 0x06e3, 0x8c2: 0x06ef, 0x8c3: 0x06ff, 0x8c4: 0x0707, 0x8c5: 0x0713, + 0x8c6: 0x071b, 0x8c7: 0x0723, 0x8c8: 0x072f, 0x8c9: 0x0783, 0x8ca: 0x079b, 0x8cb: 0x07ab, + 0x8cc: 0x07bb, 0x8cd: 0x07cb, 0x8ce: 0x07db, 0x8cf: 0x07fb, 0x8d0: 0x07ff, 0x8d1: 0x0803, + 0x8d2: 0x0837, 0x8d3: 0x085f, 0x8d4: 0x086f, 0x8d5: 0x0877, 0x8d6: 0x087b, 0x8d7: 0x0887, + 0x8d8: 0x08a3, 0x8d9: 0x08a7, 0x8da: 0x08bf, 0x8db: 0x08c3, 0x8dc: 0x08cb, 0x8dd: 0x08db, + 0x8de: 0x0977, 0x8df: 0x098b, 0x8e0: 0x09cb, 0x8e1: 0x09df, 0x8e2: 0x09e7, 0x8e3: 0x09eb, + 0x8e4: 0x09fb, 0x8e5: 0x0a17, 0x8e6: 0x0a43, 0x8e7: 0x0a4f, 0x8e8: 0x0a6f, 0x8e9: 0x0a7b, + 0x8ea: 0x0a7f, 0x8eb: 0x0a83, 0x8ec: 0x0a9b, 0x8ed: 0x0a9f, 0x8ee: 0x0acb, 0x8ef: 0x0ad7, + 0x8f0: 0x0adf, 0x8f1: 0x0ae7, 0x8f2: 0x0af7, 0x8f3: 0x0aff, 0x8f4: 0x0b07, 0x8f5: 0x0b33, + 0x8f6: 0x0b37, 0x8f7: 0x0b3f, 0x8f8: 0x0b43, 0x8f9: 0x0b4b, 0x8fa: 0x0b53, 0x8fb: 0x0b63, + 0x8fc: 0x0b7f, 0x8fd: 0x0bf7, 0x8fe: 0x0c0b, 0x8ff: 0x0c0f, + // Block 0x24, offset 0x900 + 0x900: 0x0c8f, 0x901: 0x0c93, 0x902: 0x0ca7, 0x903: 0x0cab, 0x904: 0x0cb3, 0x905: 0x0cbb, + 0x906: 0x0cc3, 0x907: 0x0ccf, 0x908: 0x0cf7, 0x909: 0x0d07, 0x90a: 0x0d1b, 0x90b: 0x0d8b, + 0x90c: 0x0d97, 0x90d: 0x0da7, 0x90e: 0x0db3, 0x90f: 0x0dbf, 0x910: 0x0dc7, 0x911: 0x0dcb, + 0x912: 0x0dcf, 0x913: 0x0dd3, 0x914: 0x0dd7, 0x915: 0x0e8f, 0x916: 0x0ed7, 0x917: 0x0ee3, + 0x918: 0x0ee7, 0x919: 0x0eeb, 0x91a: 0x0eef, 0x91b: 0x0ef7, 0x91c: 0x0efb, 0x91d: 0x0f0f, + 0x91e: 0x0f2b, 0x91f: 0x0f33, 0x920: 0x0f73, 0x921: 0x0f77, 0x922: 0x0f7f, 0x923: 0x0f83, + 0x924: 0x0f8b, 0x925: 0x0f8f, 0x926: 0x0fb3, 0x927: 0x0fb7, 0x928: 0x0fd3, 0x929: 0x0fd7, + 0x92a: 0x0fdb, 0x92b: 0x0fdf, 0x92c: 0x0ff3, 0x92d: 0x1017, 0x92e: 0x101b, 0x92f: 0x101f, + 0x930: 0x1043, 0x931: 0x1083, 0x932: 0x1087, 0x933: 0x10a7, 0x934: 0x10b7, 0x935: 0x10bf, + 0x936: 0x10df, 0x937: 0x1103, 0x938: 0x1147, 0x939: 0x114f, 0x93a: 0x1163, 0x93b: 0x116f, + 0x93c: 0x1177, 0x93d: 0x117f, 0x93e: 0x1183, 0x93f: 0x1187, + // Block 0x25, offset 0x940 + 0x940: 0x119f, 0x941: 0x11a3, 0x942: 0x11bf, 0x943: 0x11c7, 0x944: 0x11cf, 0x945: 0x11d3, + 0x946: 0x11df, 0x947: 0x11e7, 0x948: 0x11eb, 0x949: 0x11ef, 0x94a: 0x11f7, 0x94b: 0x11fb, + 0x94c: 0x129b, 0x94d: 0x12af, 0x94e: 0x12e3, 0x94f: 0x12e7, 0x950: 0x12ef, 0x951: 0x131b, + 0x952: 0x1323, 0x953: 0x132b, 0x954: 0x1333, 0x955: 0x136f, 0x956: 0x1373, 0x957: 0x137b, + 0x958: 0x137f, 0x959: 0x1383, 0x95a: 0x13af, 0x95b: 0x13b3, 0x95c: 0x13bb, 0x95d: 0x13cf, + 0x95e: 0x13d3, 0x95f: 0x13ef, 0x960: 0x13f7, 0x961: 0x13fb, 0x962: 0x141f, 0x963: 0x143f, + 0x964: 0x1453, 0x965: 0x1457, 0x966: 0x145f, 0x967: 0x148b, 0x968: 0x148f, 0x969: 0x149f, + 0x96a: 0x14c3, 0x96b: 0x14cf, 0x96c: 0x14df, 0x96d: 0x14f7, 0x96e: 0x14ff, 0x96f: 0x1503, + 0x970: 0x1507, 0x971: 0x150b, 0x972: 0x1517, 0x973: 0x151b, 0x974: 0x1523, 0x975: 0x153f, + 0x976: 0x1543, 0x977: 0x1547, 0x978: 0x155f, 0x979: 0x1563, 0x97a: 0x156b, 0x97b: 0x157f, + 0x97c: 0x1583, 0x97d: 0x1587, 0x97e: 0x158f, 0x97f: 0x1593, + // Block 0x26, offset 0x980 + 0x986: 0xa000, 0x98b: 0xa000, + 0x98c: 0x3f08, 0x98d: 0xa000, 0x98e: 0x3f10, 0x98f: 0xa000, 0x990: 0x3f18, 0x991: 0xa000, + 0x992: 0x3f20, 0x993: 0xa000, 0x994: 0x3f28, 0x995: 0xa000, 0x996: 0x3f30, 0x997: 0xa000, + 0x998: 0x3f38, 0x999: 0xa000, 0x99a: 0x3f40, 0x99b: 0xa000, 0x99c: 0x3f48, 0x99d: 0xa000, + 0x99e: 0x3f50, 0x99f: 0xa000, 0x9a0: 0x3f58, 0x9a1: 0xa000, 0x9a2: 0x3f60, + 0x9a4: 0xa000, 0x9a5: 0x3f68, 0x9a6: 0xa000, 0x9a7: 0x3f70, 0x9a8: 0xa000, 0x9a9: 0x3f78, + 0x9af: 0xa000, + 0x9b0: 0x3f80, 0x9b1: 0x3f88, 0x9b2: 0xa000, 0x9b3: 0x3f90, 0x9b4: 0x3f98, 0x9b5: 0xa000, + 0x9b6: 0x3fa0, 0x9b7: 0x3fa8, 0x9b8: 0xa000, 0x9b9: 0x3fb0, 0x9ba: 0x3fb8, 0x9bb: 0xa000, + 0x9bc: 0x3fc0, 0x9bd: 0x3fc8, + // Block 0x27, offset 0x9c0 + 0x9d4: 0x3f00, + 0x9d9: 0x9903, 0x9da: 0x9903, 0x9db: 0x42dc, 0x9dc: 0x42e2, 0x9dd: 0xa000, + 0x9de: 0x3fd0, 0x9df: 0x26b4, + 0x9e6: 0xa000, + 0x9eb: 0xa000, 0x9ec: 0x3fe0, 0x9ed: 0xa000, 0x9ee: 0x3fe8, 0x9ef: 0xa000, + 0x9f0: 0x3ff0, 0x9f1: 0xa000, 0x9f2: 0x3ff8, 0x9f3: 0xa000, 0x9f4: 0x4000, 0x9f5: 0xa000, + 0x9f6: 0x4008, 0x9f7: 0xa000, 0x9f8: 0x4010, 0x9f9: 0xa000, 0x9fa: 0x4018, 0x9fb: 0xa000, + 0x9fc: 0x4020, 0x9fd: 0xa000, 0x9fe: 0x4028, 0x9ff: 0xa000, + // Block 0x28, offset 0xa00 + 0xa00: 0x4030, 0xa01: 0xa000, 0xa02: 0x4038, 0xa04: 0xa000, 0xa05: 0x4040, + 0xa06: 0xa000, 0xa07: 0x4048, 0xa08: 0xa000, 0xa09: 0x4050, + 0xa0f: 0xa000, 0xa10: 0x4058, 0xa11: 0x4060, + 0xa12: 0xa000, 0xa13: 0x4068, 0xa14: 0x4070, 0xa15: 0xa000, 0xa16: 0x4078, 0xa17: 0x4080, + 0xa18: 0xa000, 0xa19: 0x4088, 0xa1a: 0x4090, 0xa1b: 0xa000, 0xa1c: 0x4098, 0xa1d: 0x40a0, + 0xa2f: 0xa000, + 0xa30: 0xa000, 0xa31: 0xa000, 0xa32: 0xa000, 0xa34: 0x3fd8, + 0xa37: 0x40a8, 0xa38: 0x40b0, 0xa39: 0x40b8, 0xa3a: 0x40c0, + 0xa3d: 0xa000, 0xa3e: 0x40c8, 0xa3f: 0x26c9, + // Block 0x29, offset 0xa40 + 0xa40: 0x0367, 0xa41: 0x032b, 0xa42: 0x032f, 0xa43: 0x0333, 0xa44: 0x037b, 0xa45: 0x0337, + 0xa46: 0x033b, 0xa47: 0x033f, 0xa48: 0x0343, 0xa49: 0x0347, 0xa4a: 0x034b, 0xa4b: 0x034f, + 0xa4c: 0x0353, 0xa4d: 0x0357, 0xa4e: 0x035b, 0xa4f: 0x49bd, 0xa50: 0x49c3, 0xa51: 0x49c9, + 0xa52: 0x49cf, 0xa53: 0x49d5, 0xa54: 0x49db, 0xa55: 0x49e1, 0xa56: 0x49e7, 0xa57: 0x49ed, + 0xa58: 0x49f3, 0xa59: 0x49f9, 0xa5a: 0x49ff, 0xa5b: 0x4a05, 0xa5c: 0x4a0b, 0xa5d: 0x4a11, + 0xa5e: 0x4a17, 0xa5f: 0x4a1d, 0xa60: 0x4a23, 0xa61: 0x4a29, 0xa62: 0x4a2f, 0xa63: 0x4a35, + 0xa64: 0x03c3, 0xa65: 0x035f, 0xa66: 0x0363, 0xa67: 0x03e7, 0xa68: 0x03eb, 0xa69: 0x03ef, + 0xa6a: 0x03f3, 0xa6b: 0x03f7, 0xa6c: 0x03fb, 0xa6d: 0x03ff, 0xa6e: 0x036b, 0xa6f: 0x0403, + 0xa70: 0x0407, 0xa71: 0x036f, 0xa72: 0x0373, 0xa73: 0x0377, 0xa74: 0x037f, 0xa75: 0x0383, + 0xa76: 0x0387, 0xa77: 0x038b, 0xa78: 0x038f, 0xa79: 0x0393, 0xa7a: 0x0397, 0xa7b: 0x039b, + 0xa7c: 0x039f, 0xa7d: 0x03a3, 0xa7e: 0x03a7, 0xa7f: 0x03ab, + // Block 0x2a, offset 0xa80 + 0xa80: 0x03af, 0xa81: 0x03b3, 0xa82: 0x040b, 0xa83: 0x040f, 0xa84: 0x03b7, 0xa85: 0x03bb, + 0xa86: 0x03bf, 0xa87: 0x03c7, 0xa88: 0x03cb, 0xa89: 0x03cf, 0xa8a: 0x03d3, 0xa8b: 0x03d7, + 0xa8c: 0x03db, 0xa8d: 0x03df, 0xa8e: 0x03e3, + 0xa92: 0x06bf, 0xa93: 0x071b, 0xa94: 0x06cb, 0xa95: 0x097b, 0xa96: 0x06cf, 0xa97: 0x06e7, + 0xa98: 0x06d3, 0xa99: 0x0f93, 0xa9a: 0x0707, 0xa9b: 0x06db, 0xa9c: 0x06c3, 0xa9d: 0x09ff, + 0xa9e: 0x098f, 0xa9f: 0x072f, + // Block 0x2b, offset 0xac0 + 0xac0: 0x2054, 0xac1: 0x205a, 0xac2: 0x2060, 0xac3: 0x2066, 0xac4: 0x206c, 0xac5: 0x2072, + 0xac6: 0x2078, 0xac7: 0x207e, 0xac8: 0x2084, 0xac9: 0x208a, 0xaca: 0x2090, 0xacb: 0x2096, + 0xacc: 0x209c, 0xacd: 0x20a2, 0xace: 0x2726, 0xacf: 0x272f, 0xad0: 0x2738, 0xad1: 0x2741, + 0xad2: 0x274a, 0xad3: 0x2753, 0xad4: 0x275c, 0xad5: 0x2765, 0xad6: 0x276e, 0xad7: 0x2780, + 0xad8: 0x2789, 0xad9: 0x2792, 0xada: 0x279b, 0xadb: 0x27a4, 0xadc: 0x2777, 0xadd: 0x2bac, + 0xade: 0x2aed, 0xae0: 0x20a8, 0xae1: 0x20c0, 0xae2: 0x20b4, 0xae3: 0x2108, + 0xae4: 0x20c6, 0xae5: 0x20e4, 0xae6: 0x20ae, 0xae7: 0x20de, 0xae8: 0x20ba, 0xae9: 0x20f0, + 0xaea: 0x2120, 0xaeb: 0x213e, 0xaec: 0x2138, 0xaed: 0x212c, 0xaee: 0x217a, 0xaef: 0x210e, + 0xaf0: 0x211a, 0xaf1: 0x2132, 0xaf2: 0x2126, 0xaf3: 0x2150, 0xaf4: 0x20fc, 0xaf5: 0x2144, + 0xaf6: 0x216e, 0xaf7: 0x2156, 0xaf8: 0x20ea, 0xaf9: 0x20cc, 0xafa: 0x2102, 0xafb: 0x2114, + 0xafc: 0x214a, 0xafd: 0x20d2, 0xafe: 0x2174, 0xaff: 0x20f6, + // Block 0x2c, offset 0xb00 + 0xb00: 0x215c, 0xb01: 0x20d8, 0xb02: 0x2162, 0xb03: 0x2168, 0xb04: 0x092f, 0xb05: 0x0b03, + 0xb06: 0x0ca7, 0xb07: 0x10c7, + 0xb10: 0x1bc4, 0xb11: 0x18a9, + 0xb12: 0x18ac, 0xb13: 0x18af, 0xb14: 0x18b2, 0xb15: 0x18b5, 0xb16: 0x18b8, 0xb17: 0x18bb, + 0xb18: 0x18be, 0xb19: 0x18c1, 0xb1a: 0x18ca, 0xb1b: 0x18cd, 0xb1c: 0x18d0, 0xb1d: 0x18d3, + 0xb1e: 0x18d6, 0xb1f: 0x18d9, 0xb20: 0x0313, 0xb21: 0x031b, 0xb22: 0x031f, 0xb23: 0x0327, + 0xb24: 0x032b, 0xb25: 0x032f, 0xb26: 0x0337, 0xb27: 0x033f, 0xb28: 0x0343, 0xb29: 0x034b, + 0xb2a: 0x034f, 0xb2b: 0x0353, 0xb2c: 0x0357, 0xb2d: 0x035b, 0xb2e: 0x2e18, 0xb2f: 0x2e20, + 0xb30: 0x2e28, 0xb31: 0x2e30, 0xb32: 0x2e38, 0xb33: 0x2e40, 0xb34: 0x2e48, 0xb35: 0x2e50, + 0xb36: 0x2e60, 0xb37: 0x2e68, 0xb38: 0x2e70, 0xb39: 0x2e78, 0xb3a: 0x2e80, 0xb3b: 0x2e88, + 0xb3c: 0x2ed3, 0xb3d: 0x2e9b, 0xb3e: 0x2e58, + // Block 0x2d, offset 0xb40 + 0xb40: 0x06bf, 0xb41: 0x071b, 0xb42: 0x06cb, 0xb43: 0x097b, 0xb44: 0x071f, 0xb45: 0x07af, + 0xb46: 0x06c7, 0xb47: 0x07ab, 0xb48: 0x070b, 0xb49: 0x0887, 0xb4a: 0x0d07, 0xb4b: 0x0e8f, + 0xb4c: 0x0dd7, 0xb4d: 0x0d1b, 0xb4e: 0x145f, 0xb4f: 0x098b, 0xb50: 0x0ccf, 0xb51: 0x0d4b, + 0xb52: 0x0d0b, 0xb53: 0x104b, 0xb54: 0x08fb, 0xb55: 0x0f03, 0xb56: 0x1387, 0xb57: 0x105f, + 0xb58: 0x0843, 0xb59: 0x108f, 0xb5a: 0x0f9b, 0xb5b: 0x0a17, 0xb5c: 0x140f, 0xb5d: 0x077f, + 0xb5e: 0x08ab, 0xb5f: 0x0df7, 0xb60: 0x1527, 0xb61: 0x0743, 0xb62: 0x07d3, 0xb63: 0x0d9b, + 0xb64: 0x06cf, 0xb65: 0x06e7, 0xb66: 0x06d3, 0xb67: 0x0adb, 0xb68: 0x08ef, 0xb69: 0x087f, + 0xb6a: 0x0a57, 0xb6b: 0x0a4b, 0xb6c: 0x0feb, 0xb6d: 0x073f, 0xb6e: 0x139b, 0xb6f: 0x089b, + 0xb70: 0x09f3, 0xb71: 0x18dc, 0xb72: 0x18df, 0xb73: 0x18e2, 0xb74: 0x18e5, 0xb75: 0x18ee, + 0xb76: 0x18f1, 0xb77: 0x18f4, 0xb78: 0x18f7, 0xb79: 0x18fa, 0xb7a: 0x18fd, 0xb7b: 0x1900, + 0xb7c: 0x1903, 0xb7d: 0x1906, 0xb7e: 0x1909, 0xb7f: 0x1912, + // Block 0x2e, offset 0xb80 + 0xb80: 0x1cc6, 0xb81: 0x1cd5, 0xb82: 0x1ce4, 0xb83: 0x1cf3, 0xb84: 0x1d02, 0xb85: 0x1d11, + 0xb86: 0x1d20, 0xb87: 0x1d2f, 0xb88: 0x1d3e, 0xb89: 0x218c, 0xb8a: 0x219e, 0xb8b: 0x21b0, + 0xb8c: 0x1954, 0xb8d: 0x1c04, 0xb8e: 0x19d2, 0xb8f: 0x1ba8, 0xb90: 0x04cb, 0xb91: 0x04d3, + 0xb92: 0x04db, 0xb93: 0x04e3, 0xb94: 0x04eb, 0xb95: 0x04ef, 0xb96: 0x04f3, 0xb97: 0x04f7, + 0xb98: 0x04fb, 0xb99: 0x04ff, 0xb9a: 0x0503, 0xb9b: 0x0507, 0xb9c: 0x050b, 0xb9d: 0x050f, + 0xb9e: 0x0513, 0xb9f: 0x0517, 0xba0: 0x051b, 0xba1: 0x0523, 0xba2: 0x0527, 0xba3: 0x052b, + 0xba4: 0x052f, 0xba5: 0x0533, 0xba6: 0x0537, 0xba7: 0x053b, 0xba8: 0x053f, 0xba9: 0x0543, + 0xbaa: 0x0547, 0xbab: 0x054b, 0xbac: 0x054f, 0xbad: 0x0553, 0xbae: 0x0557, 0xbaf: 0x055b, + 0xbb0: 0x055f, 0xbb1: 0x0563, 0xbb2: 0x0567, 0xbb3: 0x056f, 0xbb4: 0x0577, 0xbb5: 0x057f, + 0xbb6: 0x0583, 0xbb7: 0x0587, 0xbb8: 0x058b, 0xbb9: 0x058f, 0xbba: 0x0593, 0xbbb: 0x0597, + 0xbbc: 0x059b, 0xbbd: 0x059f, 0xbbe: 0x05a3, + // Block 0x2f, offset 0xbc0 + 0xbc0: 0x2b0c, 0xbc1: 0x29a8, 0xbc2: 0x2b1c, 0xbc3: 0x2880, 0xbc4: 0x2ee4, 0xbc5: 0x288a, + 0xbc6: 0x2894, 0xbc7: 0x2f28, 0xbc8: 0x29b5, 0xbc9: 0x289e, 0xbca: 0x28a8, 0xbcb: 0x28b2, + 0xbcc: 0x29dc, 0xbcd: 0x29e9, 0xbce: 0x29c2, 0xbcf: 0x29cf, 0xbd0: 0x2ea9, 0xbd1: 0x29f6, + 0xbd2: 0x2a03, 0xbd3: 0x2bbe, 0xbd4: 0x26bb, 0xbd5: 0x2bd1, 0xbd6: 0x2be4, 0xbd7: 0x2b2c, + 0xbd8: 0x2a10, 0xbd9: 0x2bf7, 0xbda: 0x2c0a, 0xbdb: 0x2a1d, 0xbdc: 0x28bc, 0xbdd: 0x28c6, + 0xbde: 0x2eb7, 0xbdf: 0x2a2a, 0xbe0: 0x2b3c, 0xbe1: 0x2ef5, 0xbe2: 0x28d0, 0xbe3: 0x28da, + 0xbe4: 0x2a37, 0xbe5: 0x28e4, 0xbe6: 0x28ee, 0xbe7: 0x26d0, 0xbe8: 0x26d7, 0xbe9: 0x28f8, + 0xbea: 0x2902, 0xbeb: 0x2c1d, 0xbec: 0x2a44, 0xbed: 0x2b4c, 0xbee: 0x2c30, 0xbef: 0x2a51, + 0xbf0: 0x2916, 0xbf1: 0x290c, 0xbf2: 0x2f3c, 0xbf3: 0x2a5e, 0xbf4: 0x2c43, 0xbf5: 0x2920, + 0xbf6: 0x2b5c, 0xbf7: 0x292a, 0xbf8: 0x2a78, 0xbf9: 0x2934, 0xbfa: 0x2a85, 0xbfb: 0x2f06, + 0xbfc: 0x2a6b, 0xbfd: 0x2b6c, 0xbfe: 0x2a92, 0xbff: 0x26de, + // Block 0x30, offset 0xc00 + 0xc00: 0x2f17, 0xc01: 0x293e, 0xc02: 0x2948, 0xc03: 0x2a9f, 0xc04: 0x2952, 0xc05: 0x295c, + 0xc06: 0x2966, 0xc07: 0x2b7c, 0xc08: 0x2aac, 0xc09: 0x26e5, 0xc0a: 0x2c56, 0xc0b: 0x2e90, + 0xc0c: 0x2b8c, 0xc0d: 0x2ab9, 0xc0e: 0x2ec5, 0xc0f: 0x2970, 0xc10: 0x297a, 0xc11: 0x2ac6, + 0xc12: 0x26ec, 0xc13: 0x2ad3, 0xc14: 0x2b9c, 0xc15: 0x26f3, 0xc16: 0x2c69, 0xc17: 0x2984, + 0xc18: 0x1cb7, 0xc19: 0x1ccb, 0xc1a: 0x1cda, 0xc1b: 0x1ce9, 0xc1c: 0x1cf8, 0xc1d: 0x1d07, + 0xc1e: 0x1d16, 0xc1f: 0x1d25, 0xc20: 0x1d34, 0xc21: 0x1d43, 0xc22: 0x2192, 0xc23: 0x21a4, + 0xc24: 0x21b6, 0xc25: 0x21c2, 0xc26: 0x21ce, 0xc27: 0x21da, 0xc28: 0x21e6, 0xc29: 0x21f2, + 0xc2a: 0x21fe, 0xc2b: 0x220a, 0xc2c: 0x2246, 0xc2d: 0x2252, 0xc2e: 0x225e, 0xc2f: 0x226a, + 0xc30: 0x2276, 0xc31: 0x1c14, 0xc32: 0x19c6, 0xc33: 0x1936, 0xc34: 0x1be4, 0xc35: 0x1a47, + 0xc36: 0x1a56, 0xc37: 0x19cc, 0xc38: 0x1bfc, 0xc39: 0x1c00, 0xc3a: 0x1960, 0xc3b: 0x2701, + 0xc3c: 0x270f, 0xc3d: 0x26fa, 0xc3e: 0x2708, 0xc3f: 0x2ae0, + // Block 0x31, offset 0xc40 + 0xc40: 0x1a4a, 0xc41: 0x1a32, 0xc42: 0x1c60, 0xc43: 0x1a1a, 0xc44: 0x19f3, 0xc45: 0x1969, + 0xc46: 0x1978, 0xc47: 0x1948, 0xc48: 0x1bf0, 0xc49: 0x1d52, 0xc4a: 0x1a4d, 0xc4b: 0x1a35, + 0xc4c: 0x1c64, 0xc4d: 0x1c70, 0xc4e: 0x1a26, 0xc4f: 0x19fc, 0xc50: 0x1957, 0xc51: 0x1c1c, + 0xc52: 0x1bb0, 0xc53: 0x1b9c, 0xc54: 0x1bcc, 0xc55: 0x1c74, 0xc56: 0x1a29, 0xc57: 0x19c9, + 0xc58: 0x19ff, 0xc59: 0x19de, 0xc5a: 0x1a41, 0xc5b: 0x1c78, 0xc5c: 0x1a2c, 0xc5d: 0x19c0, + 0xc5e: 0x1a02, 0xc5f: 0x1c3c, 0xc60: 0x1bf4, 0xc61: 0x1a14, 0xc62: 0x1c24, 0xc63: 0x1c40, + 0xc64: 0x1bf8, 0xc65: 0x1a17, 0xc66: 0x1c28, 0xc67: 0x22e8, 0xc68: 0x22fc, 0xc69: 0x1996, + 0xc6a: 0x1c20, 0xc6b: 0x1bb4, 0xc6c: 0x1ba0, 0xc6d: 0x1c48, 0xc6e: 0x2716, 0xc6f: 0x27ad, + 0xc70: 0x1a59, 0xc71: 0x1a44, 0xc72: 0x1c7c, 0xc73: 0x1a2f, 0xc74: 0x1a50, 0xc75: 0x1a38, + 0xc76: 0x1c68, 0xc77: 0x1a1d, 0xc78: 0x19f6, 0xc79: 0x1981, 0xc7a: 0x1a53, 0xc7b: 0x1a3b, + 0xc7c: 0x1c6c, 0xc7d: 0x1a20, 0xc7e: 0x19f9, 0xc7f: 0x1984, + // Block 0x32, offset 0xc80 + 0xc80: 0x1c2c, 0xc81: 0x1bb8, 0xc82: 0x1d4d, 0xc83: 0x1939, 0xc84: 0x19ba, 0xc85: 0x19bd, + 0xc86: 0x22f5, 0xc87: 0x1b94, 0xc88: 0x19c3, 0xc89: 0x194b, 0xc8a: 0x19e1, 0xc8b: 0x194e, + 0xc8c: 0x19ea, 0xc8d: 0x196c, 0xc8e: 0x196f, 0xc8f: 0x1a05, 0xc90: 0x1a0b, 0xc91: 0x1a0e, + 0xc92: 0x1c30, 0xc93: 0x1a11, 0xc94: 0x1a23, 0xc95: 0x1c38, 0xc96: 0x1c44, 0xc97: 0x1990, + 0xc98: 0x1d57, 0xc99: 0x1bbc, 0xc9a: 0x1993, 0xc9b: 0x1a5c, 0xc9c: 0x19a5, 0xc9d: 0x19b4, + 0xc9e: 0x22e2, 0xc9f: 0x22dc, 0xca0: 0x1cc1, 0xca1: 0x1cd0, 0xca2: 0x1cdf, 0xca3: 0x1cee, + 0xca4: 0x1cfd, 0xca5: 0x1d0c, 0xca6: 0x1d1b, 0xca7: 0x1d2a, 0xca8: 0x1d39, 0xca9: 0x2186, + 0xcaa: 0x2198, 0xcab: 0x21aa, 0xcac: 0x21bc, 0xcad: 0x21c8, 0xcae: 0x21d4, 0xcaf: 0x21e0, + 0xcb0: 0x21ec, 0xcb1: 0x21f8, 0xcb2: 0x2204, 0xcb3: 0x2240, 0xcb4: 0x224c, 0xcb5: 0x2258, + 0xcb6: 0x2264, 0xcb7: 0x2270, 0xcb8: 0x227c, 0xcb9: 0x2282, 0xcba: 0x2288, 0xcbb: 0x228e, + 0xcbc: 0x2294, 0xcbd: 0x22a6, 0xcbe: 0x22ac, 0xcbf: 0x1c10, + // Block 0x33, offset 0xcc0 + 0xcc0: 0x1377, 0xcc1: 0x0cfb, 0xcc2: 0x13d3, 0xcc3: 0x139f, 0xcc4: 0x0e57, 0xcc5: 0x06eb, + 0xcc6: 0x08df, 0xcc7: 0x162b, 0xcc8: 0x162b, 0xcc9: 0x0a0b, 0xcca: 0x145f, 0xccb: 0x0943, + 0xccc: 0x0a07, 0xccd: 0x0bef, 0xcce: 0x0fcf, 0xccf: 0x115f, 0xcd0: 0x1297, 0xcd1: 0x12d3, + 0xcd2: 0x1307, 0xcd3: 0x141b, 0xcd4: 0x0d73, 0xcd5: 0x0dff, 0xcd6: 0x0eab, 0xcd7: 0x0f43, + 0xcd8: 0x125f, 0xcd9: 0x1447, 0xcda: 0x1573, 0xcdb: 0x070f, 0xcdc: 0x08b3, 0xcdd: 0x0d87, + 0xcde: 0x0ecf, 0xcdf: 0x1293, 0xce0: 0x15c3, 0xce1: 0x0ab3, 0xce2: 0x0e77, 0xce3: 0x1283, + 0xce4: 0x1317, 0xce5: 0x0c23, 0xce6: 0x11bb, 0xce7: 0x12df, 0xce8: 0x0b1f, 0xce9: 0x0d0f, + 0xcea: 0x0e17, 0xceb: 0x0f1b, 0xcec: 0x1427, 0xced: 0x074f, 0xcee: 0x07e7, 0xcef: 0x0853, + 0xcf0: 0x0c8b, 0xcf1: 0x0d7f, 0xcf2: 0x0ecb, 0xcf3: 0x0fef, 0xcf4: 0x1177, 0xcf5: 0x128b, + 0xcf6: 0x12a3, 0xcf7: 0x13c7, 0xcf8: 0x14ef, 0xcf9: 0x15a3, 0xcfa: 0x15bf, 0xcfb: 0x102b, + 0xcfc: 0x106b, 0xcfd: 0x1123, 0xcfe: 0x1243, 0xcff: 0x147b, + // Block 0x34, offset 0xd00 + 0xd00: 0x15cb, 0xd01: 0x134b, 0xd02: 0x09c7, 0xd03: 0x0b3b, 0xd04: 0x10db, 0xd05: 0x119b, + 0xd06: 0x0eff, 0xd07: 0x1033, 0xd08: 0x1397, 0xd09: 0x14e7, 0xd0a: 0x09c3, 0xd0b: 0x0a8f, + 0xd0c: 0x0d77, 0xd0d: 0x0e2b, 0xd0e: 0x0e5f, 0xd0f: 0x1113, 0xd10: 0x113b, 0xd11: 0x14a7, + 0xd12: 0x084f, 0xd13: 0x11a7, 0xd14: 0x07f3, 0xd15: 0x07ef, 0xd16: 0x1097, 0xd17: 0x1127, + 0xd18: 0x125b, 0xd19: 0x14af, 0xd1a: 0x1367, 0xd1b: 0x0c27, 0xd1c: 0x0d73, 0xd1d: 0x1357, + 0xd1e: 0x06f7, 0xd1f: 0x0a63, 0xd20: 0x0b93, 0xd21: 0x0f2f, 0xd22: 0x0faf, 0xd23: 0x0873, + 0xd24: 0x103b, 0xd25: 0x075f, 0xd26: 0x0b77, 0xd27: 0x06d7, 0xd28: 0x0deb, 0xd29: 0x0ca3, + 0xd2a: 0x110f, 0xd2b: 0x08c7, 0xd2c: 0x09b3, 0xd2d: 0x0ffb, 0xd2e: 0x1263, 0xd2f: 0x133b, + 0xd30: 0x0db7, 0xd31: 0x13f7, 0xd32: 0x0de3, 0xd33: 0x0c37, 0xd34: 0x121b, 0xd35: 0x0c57, + 0xd36: 0x0fab, 0xd37: 0x072b, 0xd38: 0x07a7, 0xd39: 0x07eb, 0xd3a: 0x0d53, 0xd3b: 0x10fb, + 0xd3c: 0x11f3, 0xd3d: 0x1347, 0xd3e: 0x145b, 0xd3f: 0x085b, + // Block 0x35, offset 0xd40 + 0xd40: 0x090f, 0xd41: 0x0a17, 0xd42: 0x0b2f, 0xd43: 0x0cbf, 0xd44: 0x0e7b, 0xd45: 0x103f, + 0xd46: 0x1497, 0xd47: 0x157b, 0xd48: 0x15cf, 0xd49: 0x15e7, 0xd4a: 0x0837, 0xd4b: 0x0cf3, + 0xd4c: 0x0da3, 0xd4d: 0x13eb, 0xd4e: 0x0afb, 0xd4f: 0x0bd7, 0xd50: 0x0bf3, 0xd51: 0x0c83, + 0xd52: 0x0e6b, 0xd53: 0x0eb7, 0xd54: 0x0f67, 0xd55: 0x108b, 0xd56: 0x112f, 0xd57: 0x1193, + 0xd58: 0x13db, 0xd59: 0x126b, 0xd5a: 0x1403, 0xd5b: 0x147f, 0xd5c: 0x080f, 0xd5d: 0x083b, + 0xd5e: 0x0923, 0xd5f: 0x0ea7, 0xd60: 0x12f3, 0xd61: 0x133b, 0xd62: 0x0b1b, 0xd63: 0x0b8b, + 0xd64: 0x0c4f, 0xd65: 0x0daf, 0xd66: 0x10d7, 0xd67: 0x0f23, 0xd68: 0x073b, 0xd69: 0x097f, + 0xd6a: 0x0a63, 0xd6b: 0x0ac7, 0xd6c: 0x0b97, 0xd6d: 0x0f3f, 0xd6e: 0x0f5b, 0xd6f: 0x116b, + 0xd70: 0x118b, 0xd71: 0x1463, 0xd72: 0x14e3, 0xd73: 0x14f3, 0xd74: 0x152f, 0xd75: 0x0753, + 0xd76: 0x107f, 0xd77: 0x144f, 0xd78: 0x14cb, 0xd79: 0x0baf, 0xd7a: 0x0717, 0xd7b: 0x0777, + 0xd7c: 0x0a67, 0xd7d: 0x0a87, 0xd7e: 0x0caf, 0xd7f: 0x0d73, + // Block 0x36, offset 0xd80 + 0xd80: 0x0ec3, 0xd81: 0x0fcb, 0xd82: 0x1277, 0xd83: 0x1417, 0xd84: 0x1623, 0xd85: 0x0ce3, + 0xd86: 0x14a3, 0xd87: 0x0833, 0xd88: 0x0d2f, 0xd89: 0x0d3b, 0xd8a: 0x0e0f, 0xd8b: 0x0e47, + 0xd8c: 0x0f4b, 0xd8d: 0x0fa7, 0xd8e: 0x1027, 0xd8f: 0x110b, 0xd90: 0x153b, 0xd91: 0x07af, + 0xd92: 0x0c03, 0xd93: 0x14b3, 0xd94: 0x0767, 0xd95: 0x0aab, 0xd96: 0x0e2f, 0xd97: 0x13df, + 0xd98: 0x0b67, 0xd99: 0x0bb7, 0xd9a: 0x0d43, 0xd9b: 0x0f2f, 0xd9c: 0x14bb, 0xd9d: 0x0817, + 0xd9e: 0x08ff, 0xd9f: 0x0a97, 0xda0: 0x0cd3, 0xda1: 0x0d1f, 0xda2: 0x0d5f, 0xda3: 0x0df3, + 0xda4: 0x0f47, 0xda5: 0x0fbb, 0xda6: 0x1157, 0xda7: 0x12f7, 0xda8: 0x1303, 0xda9: 0x1457, + 0xdaa: 0x14d7, 0xdab: 0x0883, 0xdac: 0x0e4b, 0xdad: 0x0903, 0xdae: 0x0ec7, 0xdaf: 0x0f6b, + 0xdb0: 0x1287, 0xdb1: 0x14bf, 0xdb2: 0x15ab, 0xdb3: 0x15d3, 0xdb4: 0x0d37, 0xdb5: 0x0e27, + 0xdb6: 0x11c3, 0xdb7: 0x10b7, 0xdb8: 0x10c3, 0xdb9: 0x10e7, 0xdba: 0x0f17, 0xdbb: 0x0e9f, + 0xdbc: 0x1363, 0xdbd: 0x0733, 0xdbe: 0x122b, 0xdbf: 0x081b, + // Block 0x37, offset 0xdc0 + 0xdc0: 0x080b, 0xdc1: 0x0b0b, 0xdc2: 0x0c2b, 0xdc3: 0x10f3, 0xdc4: 0x0a53, 0xdc5: 0x0e03, + 0xdc6: 0x0cef, 0xdc7: 0x13e7, 0xdc8: 0x12e7, 0xdc9: 0x14ab, 0xdca: 0x1323, 0xdcb: 0x0b27, + 0xdcc: 0x0787, 0xdcd: 0x095b, 0xdd0: 0x09af, + 0xdd2: 0x0cdf, 0xdd5: 0x07f7, 0xdd6: 0x0f1f, 0xdd7: 0x0fe3, + 0xdd8: 0x1047, 0xdd9: 0x1063, 0xdda: 0x1067, 0xddb: 0x107b, 0xddc: 0x14fb, 0xddd: 0x10eb, + 0xdde: 0x116f, 0xde0: 0x128f, 0xde2: 0x1353, + 0xde5: 0x1407, 0xde6: 0x1433, + 0xdea: 0x154f, 0xdeb: 0x1553, 0xdec: 0x1557, 0xded: 0x15bb, 0xdee: 0x142b, 0xdef: 0x14c7, + 0xdf0: 0x0757, 0xdf1: 0x077b, 0xdf2: 0x078f, 0xdf3: 0x084b, 0xdf4: 0x0857, 0xdf5: 0x0897, + 0xdf6: 0x094b, 0xdf7: 0x0967, 0xdf8: 0x096f, 0xdf9: 0x09ab, 0xdfa: 0x09b7, 0xdfb: 0x0a93, + 0xdfc: 0x0a9b, 0xdfd: 0x0ba3, 0xdfe: 0x0bcb, 0xdff: 0x0bd3, + // Block 0x38, offset 0xe00 + 0xe00: 0x0beb, 0xe01: 0x0c97, 0xe02: 0x0cc7, 0xe03: 0x0ce7, 0xe04: 0x0d57, 0xe05: 0x0e1b, + 0xe06: 0x0e37, 0xe07: 0x0e67, 0xe08: 0x0ebb, 0xe09: 0x0edb, 0xe0a: 0x0f4f, 0xe0b: 0x102f, + 0xe0c: 0x104b, 0xe0d: 0x1053, 0xe0e: 0x104f, 0xe0f: 0x1057, 0xe10: 0x105b, 0xe11: 0x105f, + 0xe12: 0x1073, 0xe13: 0x1077, 0xe14: 0x109b, 0xe15: 0x10af, 0xe16: 0x10cb, 0xe17: 0x112f, + 0xe18: 0x1137, 0xe19: 0x113f, 0xe1a: 0x1153, 0xe1b: 0x117b, 0xe1c: 0x11cb, 0xe1d: 0x11ff, + 0xe1e: 0x11ff, 0xe1f: 0x1267, 0xe20: 0x130f, 0xe21: 0x1327, 0xe22: 0x135b, 0xe23: 0x135f, + 0xe24: 0x13a3, 0xe25: 0x13a7, 0xe26: 0x13ff, 0xe27: 0x1407, 0xe28: 0x14db, 0xe29: 0x151f, + 0xe2a: 0x1537, 0xe2b: 0x0b9b, 0xe2c: 0x171e, 0xe2d: 0x11e3, + 0xe30: 0x06df, 0xe31: 0x07e3, 0xe32: 0x07a3, 0xe33: 0x074b, 0xe34: 0x078b, 0xe35: 0x07b7, + 0xe36: 0x0847, 0xe37: 0x0863, 0xe38: 0x094b, 0xe39: 0x0937, 0xe3a: 0x0947, 0xe3b: 0x0963, + 0xe3c: 0x09af, 0xe3d: 0x09bf, 0xe3e: 0x0a03, 0xe3f: 0x0a0f, + // Block 0x39, offset 0xe40 + 0xe40: 0x0a2b, 0xe41: 0x0a3b, 0xe42: 0x0b23, 0xe43: 0x0b2b, 0xe44: 0x0b5b, 0xe45: 0x0b7b, + 0xe46: 0x0bab, 0xe47: 0x0bc3, 0xe48: 0x0bb3, 0xe49: 0x0bd3, 0xe4a: 0x0bc7, 0xe4b: 0x0beb, + 0xe4c: 0x0c07, 0xe4d: 0x0c5f, 0xe4e: 0x0c6b, 0xe4f: 0x0c73, 0xe50: 0x0c9b, 0xe51: 0x0cdf, + 0xe52: 0x0d0f, 0xe53: 0x0d13, 0xe54: 0x0d27, 0xe55: 0x0da7, 0xe56: 0x0db7, 0xe57: 0x0e0f, + 0xe58: 0x0e5b, 0xe59: 0x0e53, 0xe5a: 0x0e67, 0xe5b: 0x0e83, 0xe5c: 0x0ebb, 0xe5d: 0x1013, + 0xe5e: 0x0edf, 0xe5f: 0x0f13, 0xe60: 0x0f1f, 0xe61: 0x0f5f, 0xe62: 0x0f7b, 0xe63: 0x0f9f, + 0xe64: 0x0fc3, 0xe65: 0x0fc7, 0xe66: 0x0fe3, 0xe67: 0x0fe7, 0xe68: 0x0ff7, 0xe69: 0x100b, + 0xe6a: 0x1007, 0xe6b: 0x1037, 0xe6c: 0x10b3, 0xe6d: 0x10cb, 0xe6e: 0x10e3, 0xe6f: 0x111b, + 0xe70: 0x112f, 0xe71: 0x114b, 0xe72: 0x117b, 0xe73: 0x122f, 0xe74: 0x1257, 0xe75: 0x12cb, + 0xe76: 0x1313, 0xe77: 0x131f, 0xe78: 0x1327, 0xe79: 0x133f, 0xe7a: 0x1353, 0xe7b: 0x1343, + 0xe7c: 0x135b, 0xe7d: 0x1357, 0xe7e: 0x134f, 0xe7f: 0x135f, + // Block 0x3a, offset 0xe80 + 0xe80: 0x136b, 0xe81: 0x13a7, 0xe82: 0x13e3, 0xe83: 0x1413, 0xe84: 0x144b, 0xe85: 0x146b, + 0xe86: 0x14b7, 0xe87: 0x14db, 0xe88: 0x14fb, 0xe89: 0x150f, 0xe8a: 0x151f, 0xe8b: 0x152b, + 0xe8c: 0x1537, 0xe8d: 0x158b, 0xe8e: 0x162b, 0xe8f: 0x16b5, 0xe90: 0x16b0, 0xe91: 0x16e2, + 0xe92: 0x0607, 0xe93: 0x062f, 0xe94: 0x0633, 0xe95: 0x1764, 0xe96: 0x1791, 0xe97: 0x1809, + 0xe98: 0x1617, 0xe99: 0x1627, + // Block 0x3b, offset 0xec0 + 0xec0: 0x19d5, 0xec1: 0x19d8, 0xec2: 0x19db, 0xec3: 0x1c08, 0xec4: 0x1c0c, 0xec5: 0x1a5f, + 0xec6: 0x1a5f, + 0xed3: 0x1d75, 0xed4: 0x1d66, 0xed5: 0x1d6b, 0xed6: 0x1d7a, 0xed7: 0x1d70, + 0xedd: 0x4390, + 0xede: 0x8115, 0xedf: 0x4402, 0xee0: 0x022d, 0xee1: 0x0215, 0xee2: 0x021e, 0xee3: 0x0221, + 0xee4: 0x0224, 0xee5: 0x0227, 0xee6: 0x022a, 0xee7: 0x0230, 0xee8: 0x0233, 0xee9: 0x0017, + 0xeea: 0x43f0, 0xeeb: 0x43f6, 0xeec: 0x44f4, 0xeed: 0x44fc, 0xeee: 0x4348, 0xeef: 0x434e, + 0xef0: 0x4354, 0xef1: 0x435a, 0xef2: 0x4366, 0xef3: 0x436c, 0xef4: 0x4372, 0xef5: 0x437e, + 0xef6: 0x4384, 0xef8: 0x438a, 0xef9: 0x4396, 0xefa: 0x439c, 0xefb: 0x43a2, + 0xefc: 0x43ae, 0xefe: 0x43b4, + // Block 0x3c, offset 0xf00 + 0xf00: 0x43ba, 0xf01: 0x43c0, 0xf03: 0x43c6, 0xf04: 0x43cc, + 0xf06: 0x43d8, 0xf07: 0x43de, 0xf08: 0x43e4, 0xf09: 0x43ea, 0xf0a: 0x43fc, 0xf0b: 0x4378, + 0xf0c: 0x4360, 0xf0d: 0x43a8, 0xf0e: 0x43d2, 0xf0f: 0x1d7f, 0xf10: 0x0299, 0xf11: 0x0299, + 0xf12: 0x02a2, 0xf13: 0x02a2, 0xf14: 0x02a2, 0xf15: 0x02a2, 0xf16: 0x02a5, 0xf17: 0x02a5, + 0xf18: 0x02a5, 0xf19: 0x02a5, 0xf1a: 0x02ab, 0xf1b: 0x02ab, 0xf1c: 0x02ab, 0xf1d: 0x02ab, + 0xf1e: 0x029f, 0xf1f: 0x029f, 0xf20: 0x029f, 0xf21: 0x029f, 0xf22: 0x02a8, 0xf23: 0x02a8, + 0xf24: 0x02a8, 0xf25: 0x02a8, 0xf26: 0x029c, 0xf27: 0x029c, 0xf28: 0x029c, 0xf29: 0x029c, + 0xf2a: 0x02cf, 0xf2b: 0x02cf, 0xf2c: 0x02cf, 0xf2d: 0x02cf, 0xf2e: 0x02d2, 0xf2f: 0x02d2, + 0xf30: 0x02d2, 0xf31: 0x02d2, 0xf32: 0x02b1, 0xf33: 0x02b1, 0xf34: 0x02b1, 0xf35: 0x02b1, + 0xf36: 0x02ae, 0xf37: 0x02ae, 0xf38: 0x02ae, 0xf39: 0x02ae, 0xf3a: 0x02b4, 0xf3b: 0x02b4, + 0xf3c: 0x02b4, 0xf3d: 0x02b4, 0xf3e: 0x02b7, 0xf3f: 0x02b7, + // Block 0x3d, offset 0xf40 + 0xf40: 0x02b7, 0xf41: 0x02b7, 0xf42: 0x02c0, 0xf43: 0x02c0, 0xf44: 0x02bd, 0xf45: 0x02bd, + 0xf46: 0x02c3, 0xf47: 0x02c3, 0xf48: 0x02ba, 0xf49: 0x02ba, 0xf4a: 0x02c9, 0xf4b: 0x02c9, + 0xf4c: 0x02c6, 0xf4d: 0x02c6, 0xf4e: 0x02d5, 0xf4f: 0x02d5, 0xf50: 0x02d5, 0xf51: 0x02d5, + 0xf52: 0x02db, 0xf53: 0x02db, 0xf54: 0x02db, 0xf55: 0x02db, 0xf56: 0x02e1, 0xf57: 0x02e1, + 0xf58: 0x02e1, 0xf59: 0x02e1, 0xf5a: 0x02de, 0xf5b: 0x02de, 0xf5c: 0x02de, 0xf5d: 0x02de, + 0xf5e: 0x02e4, 0xf5f: 0x02e4, 0xf60: 0x02e7, 0xf61: 0x02e7, 0xf62: 0x02e7, 0xf63: 0x02e7, + 0xf64: 0x446e, 0xf65: 0x446e, 0xf66: 0x02ed, 0xf67: 0x02ed, 0xf68: 0x02ed, 0xf69: 0x02ed, + 0xf6a: 0x02ea, 0xf6b: 0x02ea, 0xf6c: 0x02ea, 0xf6d: 0x02ea, 0xf6e: 0x0308, 0xf6f: 0x0308, + 0xf70: 0x4468, 0xf71: 0x4468, + // Block 0x3e, offset 0xf80 + 0xf93: 0x02d8, 0xf94: 0x02d8, 0xf95: 0x02d8, 0xf96: 0x02d8, 0xf97: 0x02f6, + 0xf98: 0x02f6, 0xf99: 0x02f3, 0xf9a: 0x02f3, 0xf9b: 0x02f9, 0xf9c: 0x02f9, 0xf9d: 0x204f, + 0xf9e: 0x02ff, 0xf9f: 0x02ff, 0xfa0: 0x02f0, 0xfa1: 0x02f0, 0xfa2: 0x02fc, 0xfa3: 0x02fc, + 0xfa4: 0x0305, 0xfa5: 0x0305, 0xfa6: 0x0305, 0xfa7: 0x0305, 0xfa8: 0x028d, 0xfa9: 0x028d, + 0xfaa: 0x25aa, 0xfab: 0x25aa, 0xfac: 0x261a, 0xfad: 0x261a, 0xfae: 0x25e9, 0xfaf: 0x25e9, + 0xfb0: 0x2605, 0xfb1: 0x2605, 0xfb2: 0x25fe, 0xfb3: 0x25fe, 0xfb4: 0x260c, 0xfb5: 0x260c, + 0xfb6: 0x2613, 0xfb7: 0x2613, 0xfb8: 0x2613, 0xfb9: 0x25f0, 0xfba: 0x25f0, 0xfbb: 0x25f0, + 0xfbc: 0x0302, 0xfbd: 0x0302, 0xfbe: 0x0302, 0xfbf: 0x0302, + // Block 0x3f, offset 0xfc0 + 0xfc0: 0x25b1, 0xfc1: 0x25b8, 0xfc2: 0x25d4, 0xfc3: 0x25f0, 0xfc4: 0x25f7, 0xfc5: 0x1d89, + 0xfc6: 0x1d8e, 0xfc7: 0x1d93, 0xfc8: 0x1da2, 0xfc9: 0x1db1, 0xfca: 0x1db6, 0xfcb: 0x1dbb, + 0xfcc: 0x1dc0, 0xfcd: 0x1dc5, 0xfce: 0x1dd4, 0xfcf: 0x1de3, 0xfd0: 0x1de8, 0xfd1: 0x1ded, + 0xfd2: 0x1dfc, 0xfd3: 0x1e0b, 0xfd4: 0x1e10, 0xfd5: 0x1e15, 0xfd6: 0x1e1a, 0xfd7: 0x1e29, + 0xfd8: 0x1e2e, 0xfd9: 0x1e3d, 0xfda: 0x1e42, 0xfdb: 0x1e47, 0xfdc: 0x1e56, 0xfdd: 0x1e5b, + 0xfde: 0x1e60, 0xfdf: 0x1e6a, 0xfe0: 0x1ea6, 0xfe1: 0x1eb5, 0xfe2: 0x1ec4, 0xfe3: 0x1ec9, + 0xfe4: 0x1ece, 0xfe5: 0x1ed8, 0xfe6: 0x1ee7, 0xfe7: 0x1eec, 0xfe8: 0x1efb, 0xfe9: 0x1f00, + 0xfea: 0x1f05, 0xfeb: 0x1f14, 0xfec: 0x1f19, 0xfed: 0x1f28, 0xfee: 0x1f2d, 0xfef: 0x1f32, + 0xff0: 0x1f37, 0xff1: 0x1f3c, 0xff2: 0x1f41, 0xff3: 0x1f46, 0xff4: 0x1f4b, 0xff5: 0x1f50, + 0xff6: 0x1f55, 0xff7: 0x1f5a, 0xff8: 0x1f5f, 0xff9: 0x1f64, 0xffa: 0x1f69, 0xffb: 0x1f6e, + 0xffc: 0x1f73, 0xffd: 0x1f78, 0xffe: 0x1f7d, 0xfff: 0x1f87, + // Block 0x40, offset 0x1000 + 0x1000: 0x1f8c, 0x1001: 0x1f91, 0x1002: 0x1f96, 0x1003: 0x1fa0, 0x1004: 0x1fa5, 0x1005: 0x1faf, + 0x1006: 0x1fb4, 0x1007: 0x1fb9, 0x1008: 0x1fbe, 0x1009: 0x1fc3, 0x100a: 0x1fc8, 0x100b: 0x1fcd, + 0x100c: 0x1fd2, 0x100d: 0x1fd7, 0x100e: 0x1fe6, 0x100f: 0x1ff5, 0x1010: 0x1ffa, 0x1011: 0x1fff, + 0x1012: 0x2004, 0x1013: 0x2009, 0x1014: 0x200e, 0x1015: 0x2018, 0x1016: 0x201d, 0x1017: 0x2022, + 0x1018: 0x2031, 0x1019: 0x2040, 0x101a: 0x2045, 0x101b: 0x4420, 0x101c: 0x4426, 0x101d: 0x445c, + 0x101e: 0x44b3, 0x101f: 0x44ba, 0x1020: 0x44c1, 0x1021: 0x44c8, 0x1022: 0x44cf, 0x1023: 0x44d6, + 0x1024: 0x25c6, 0x1025: 0x25cd, 0x1026: 0x25d4, 0x1027: 0x25db, 0x1028: 0x25f0, 0x1029: 0x25f7, + 0x102a: 0x1d98, 0x102b: 0x1d9d, 0x102c: 0x1da2, 0x102d: 0x1da7, 0x102e: 0x1db1, 0x102f: 0x1db6, + 0x1030: 0x1dca, 0x1031: 0x1dcf, 0x1032: 0x1dd4, 0x1033: 0x1dd9, 0x1034: 0x1de3, 0x1035: 0x1de8, + 0x1036: 0x1df2, 0x1037: 0x1df7, 0x1038: 0x1dfc, 0x1039: 0x1e01, 0x103a: 0x1e0b, 0x103b: 0x1e10, + 0x103c: 0x1f3c, 0x103d: 0x1f41, 0x103e: 0x1f50, 0x103f: 0x1f55, + // Block 0x41, offset 0x1040 + 0x1040: 0x1f5a, 0x1041: 0x1f6e, 0x1042: 0x1f73, 0x1043: 0x1f78, 0x1044: 0x1f7d, 0x1045: 0x1f96, + 0x1046: 0x1fa0, 0x1047: 0x1fa5, 0x1048: 0x1faa, 0x1049: 0x1fbe, 0x104a: 0x1fdc, 0x104b: 0x1fe1, + 0x104c: 0x1fe6, 0x104d: 0x1feb, 0x104e: 0x1ff5, 0x104f: 0x1ffa, 0x1050: 0x445c, 0x1051: 0x2027, + 0x1052: 0x202c, 0x1053: 0x2031, 0x1054: 0x2036, 0x1055: 0x2040, 0x1056: 0x2045, 0x1057: 0x25b1, + 0x1058: 0x25b8, 0x1059: 0x25bf, 0x105a: 0x25d4, 0x105b: 0x25e2, 0x105c: 0x1d89, 0x105d: 0x1d8e, + 0x105e: 0x1d93, 0x105f: 0x1da2, 0x1060: 0x1dac, 0x1061: 0x1dbb, 0x1062: 0x1dc0, 0x1063: 0x1dc5, + 0x1064: 0x1dd4, 0x1065: 0x1dde, 0x1066: 0x1dfc, 0x1067: 0x1e15, 0x1068: 0x1e1a, 0x1069: 0x1e29, + 0x106a: 0x1e2e, 0x106b: 0x1e3d, 0x106c: 0x1e47, 0x106d: 0x1e56, 0x106e: 0x1e5b, 0x106f: 0x1e60, + 0x1070: 0x1e6a, 0x1071: 0x1ea6, 0x1072: 0x1eab, 0x1073: 0x1eb5, 0x1074: 0x1ec4, 0x1075: 0x1ec9, + 0x1076: 0x1ece, 0x1077: 0x1ed8, 0x1078: 0x1ee7, 0x1079: 0x1efb, 0x107a: 0x1f00, 0x107b: 0x1f05, + 0x107c: 0x1f14, 0x107d: 0x1f19, 0x107e: 0x1f28, 0x107f: 0x1f2d, + // Block 0x42, offset 0x1080 + 0x1080: 0x1f32, 0x1081: 0x1f37, 0x1082: 0x1f46, 0x1083: 0x1f4b, 0x1084: 0x1f5f, 0x1085: 0x1f64, + 0x1086: 0x1f69, 0x1087: 0x1f6e, 0x1088: 0x1f73, 0x1089: 0x1f87, 0x108a: 0x1f8c, 0x108b: 0x1f91, + 0x108c: 0x1f96, 0x108d: 0x1f9b, 0x108e: 0x1faf, 0x108f: 0x1fb4, 0x1090: 0x1fb9, 0x1091: 0x1fbe, + 0x1092: 0x1fcd, 0x1093: 0x1fd2, 0x1094: 0x1fd7, 0x1095: 0x1fe6, 0x1096: 0x1ff0, 0x1097: 0x1fff, + 0x1098: 0x2004, 0x1099: 0x4450, 0x109a: 0x2018, 0x109b: 0x201d, 0x109c: 0x2022, 0x109d: 0x2031, + 0x109e: 0x203b, 0x109f: 0x25d4, 0x10a0: 0x25e2, 0x10a1: 0x1da2, 0x10a2: 0x1dac, 0x10a3: 0x1dd4, + 0x10a4: 0x1dde, 0x10a5: 0x1dfc, 0x10a6: 0x1e06, 0x10a7: 0x1e6a, 0x10a8: 0x1e6f, 0x10a9: 0x1e92, + 0x10aa: 0x1e97, 0x10ab: 0x1f6e, 0x10ac: 0x1f73, 0x10ad: 0x1f96, 0x10ae: 0x1fe6, 0x10af: 0x1ff0, + 0x10b0: 0x2031, 0x10b1: 0x203b, 0x10b2: 0x4504, 0x10b3: 0x450c, 0x10b4: 0x4514, 0x10b5: 0x1ef1, + 0x10b6: 0x1ef6, 0x10b7: 0x1f0a, 0x10b8: 0x1f0f, 0x10b9: 0x1f1e, 0x10ba: 0x1f23, 0x10bb: 0x1e74, + 0x10bc: 0x1e79, 0x10bd: 0x1e9c, 0x10be: 0x1ea1, 0x10bf: 0x1e33, + // Block 0x43, offset 0x10c0 + 0x10c0: 0x1e38, 0x10c1: 0x1e1f, 0x10c2: 0x1e24, 0x10c3: 0x1e4c, 0x10c4: 0x1e51, 0x10c5: 0x1eba, + 0x10c6: 0x1ebf, 0x10c7: 0x1edd, 0x10c8: 0x1ee2, 0x10c9: 0x1e7e, 0x10ca: 0x1e83, 0x10cb: 0x1e88, + 0x10cc: 0x1e92, 0x10cd: 0x1e8d, 0x10ce: 0x1e65, 0x10cf: 0x1eb0, 0x10d0: 0x1ed3, 0x10d1: 0x1ef1, + 0x10d2: 0x1ef6, 0x10d3: 0x1f0a, 0x10d4: 0x1f0f, 0x10d5: 0x1f1e, 0x10d6: 0x1f23, 0x10d7: 0x1e74, + 0x10d8: 0x1e79, 0x10d9: 0x1e9c, 0x10da: 0x1ea1, 0x10db: 0x1e33, 0x10dc: 0x1e38, 0x10dd: 0x1e1f, + 0x10de: 0x1e24, 0x10df: 0x1e4c, 0x10e0: 0x1e51, 0x10e1: 0x1eba, 0x10e2: 0x1ebf, 0x10e3: 0x1edd, + 0x10e4: 0x1ee2, 0x10e5: 0x1e7e, 0x10e6: 0x1e83, 0x10e7: 0x1e88, 0x10e8: 0x1e92, 0x10e9: 0x1e8d, + 0x10ea: 0x1e65, 0x10eb: 0x1eb0, 0x10ec: 0x1ed3, 0x10ed: 0x1e7e, 0x10ee: 0x1e83, 0x10ef: 0x1e88, + 0x10f0: 0x1e92, 0x10f1: 0x1e6f, 0x10f2: 0x1e97, 0x10f3: 0x1eec, 0x10f4: 0x1e56, 0x10f5: 0x1e5b, + 0x10f6: 0x1e60, 0x10f7: 0x1e7e, 0x10f8: 0x1e83, 0x10f9: 0x1e88, 0x10fa: 0x1eec, 0x10fb: 0x1efb, + 0x10fc: 0x4408, 0x10fd: 0x4408, + // Block 0x44, offset 0x1100 + 0x1110: 0x2311, 0x1111: 0x2326, + 0x1112: 0x2326, 0x1113: 0x232d, 0x1114: 0x2334, 0x1115: 0x2349, 0x1116: 0x2350, 0x1117: 0x2357, + 0x1118: 0x237a, 0x1119: 0x237a, 0x111a: 0x239d, 0x111b: 0x2396, 0x111c: 0x23b2, 0x111d: 0x23a4, + 0x111e: 0x23ab, 0x111f: 0x23ce, 0x1120: 0x23ce, 0x1121: 0x23c7, 0x1122: 0x23d5, 0x1123: 0x23d5, + 0x1124: 0x23ff, 0x1125: 0x23ff, 0x1126: 0x241b, 0x1127: 0x23e3, 0x1128: 0x23e3, 0x1129: 0x23dc, + 0x112a: 0x23f1, 0x112b: 0x23f1, 0x112c: 0x23f8, 0x112d: 0x23f8, 0x112e: 0x2422, 0x112f: 0x2430, + 0x1130: 0x2430, 0x1131: 0x2437, 0x1132: 0x2437, 0x1133: 0x243e, 0x1134: 0x2445, 0x1135: 0x244c, + 0x1136: 0x2453, 0x1137: 0x2453, 0x1138: 0x245a, 0x1139: 0x2468, 0x113a: 0x2476, 0x113b: 0x246f, + 0x113c: 0x247d, 0x113d: 0x247d, 0x113e: 0x2492, 0x113f: 0x2499, + // Block 0x45, offset 0x1140 + 0x1140: 0x24ca, 0x1141: 0x24d8, 0x1142: 0x24d1, 0x1143: 0x24b5, 0x1144: 0x24b5, 0x1145: 0x24df, + 0x1146: 0x24df, 0x1147: 0x24e6, 0x1148: 0x24e6, 0x1149: 0x2510, 0x114a: 0x2517, 0x114b: 0x251e, + 0x114c: 0x24f4, 0x114d: 0x2502, 0x114e: 0x2525, 0x114f: 0x252c, + 0x1152: 0x24fb, 0x1153: 0x2580, 0x1154: 0x2587, 0x1155: 0x255d, 0x1156: 0x2564, 0x1157: 0x2548, + 0x1158: 0x2548, 0x1159: 0x254f, 0x115a: 0x2579, 0x115b: 0x2572, 0x115c: 0x259c, 0x115d: 0x259c, + 0x115e: 0x230a, 0x115f: 0x231f, 0x1160: 0x2318, 0x1161: 0x2342, 0x1162: 0x233b, 0x1163: 0x2365, + 0x1164: 0x235e, 0x1165: 0x2388, 0x1166: 0x236c, 0x1167: 0x2381, 0x1168: 0x23b9, 0x1169: 0x2406, + 0x116a: 0x23ea, 0x116b: 0x2429, 0x116c: 0x24c3, 0x116d: 0x24ed, 0x116e: 0x2595, 0x116f: 0x258e, + 0x1170: 0x25a3, 0x1171: 0x253a, 0x1172: 0x24a0, 0x1173: 0x256b, 0x1174: 0x2492, 0x1175: 0x24ca, + 0x1176: 0x2461, 0x1177: 0x24ae, 0x1178: 0x2541, 0x1179: 0x2533, 0x117a: 0x24bc, 0x117b: 0x24a7, + 0x117c: 0x24bc, 0x117d: 0x2541, 0x117e: 0x2373, 0x117f: 0x238f, + // Block 0x46, offset 0x1180 + 0x1180: 0x2509, 0x1181: 0x2484, 0x1182: 0x2303, 0x1183: 0x24a7, 0x1184: 0x244c, 0x1185: 0x241b, + 0x1186: 0x23c0, 0x1187: 0x2556, + 0x11b0: 0x2414, 0x11b1: 0x248b, 0x11b2: 0x27bf, 0x11b3: 0x27b6, 0x11b4: 0x27ec, 0x11b5: 0x27da, + 0x11b6: 0x27c8, 0x11b7: 0x27e3, 0x11b8: 0x27f5, 0x11b9: 0x240d, 0x11ba: 0x2c7c, 0x11bb: 0x2afc, + 0x11bc: 0x27d1, + // Block 0x47, offset 0x11c0 + 0x11d0: 0x0019, 0x11d1: 0x0483, + 0x11d2: 0x0487, 0x11d3: 0x0035, 0x11d4: 0x0037, 0x11d5: 0x0003, 0x11d6: 0x003f, 0x11d7: 0x04bf, + 0x11d8: 0x04c3, 0x11d9: 0x1b5c, + 0x11e0: 0x8132, 0x11e1: 0x8132, 0x11e2: 0x8132, 0x11e3: 0x8132, + 0x11e4: 0x8132, 0x11e5: 0x8132, 0x11e6: 0x8132, 0x11e7: 0x812d, 0x11e8: 0x812d, 0x11e9: 0x812d, + 0x11ea: 0x812d, 0x11eb: 0x812d, 0x11ec: 0x812d, 0x11ed: 0x812d, 0x11ee: 0x8132, 0x11ef: 0x8132, + 0x11f0: 0x1873, 0x11f1: 0x0443, 0x11f2: 0x043f, 0x11f3: 0x007f, 0x11f4: 0x007f, 0x11f5: 0x0011, + 0x11f6: 0x0013, 0x11f7: 0x00b7, 0x11f8: 0x00bb, 0x11f9: 0x04b7, 0x11fa: 0x04bb, 0x11fb: 0x04ab, + 0x11fc: 0x04af, 0x11fd: 0x0493, 0x11fe: 0x0497, 0x11ff: 0x048b, + // Block 0x48, offset 0x1200 + 0x1200: 0x048f, 0x1201: 0x049b, 0x1202: 0x049f, 0x1203: 0x04a3, 0x1204: 0x04a7, + 0x1207: 0x0077, 0x1208: 0x007b, 0x1209: 0x4269, 0x120a: 0x4269, 0x120b: 0x4269, + 0x120c: 0x4269, 0x120d: 0x007f, 0x120e: 0x007f, 0x120f: 0x007f, 0x1210: 0x0019, 0x1211: 0x0483, + 0x1212: 0x001d, 0x1214: 0x0037, 0x1215: 0x0035, 0x1216: 0x003f, 0x1217: 0x0003, + 0x1218: 0x0443, 0x1219: 0x0011, 0x121a: 0x0013, 0x121b: 0x00b7, 0x121c: 0x00bb, 0x121d: 0x04b7, + 0x121e: 0x04bb, 0x121f: 0x0007, 0x1220: 0x000d, 0x1221: 0x0015, 0x1222: 0x0017, 0x1223: 0x001b, + 0x1224: 0x0039, 0x1225: 0x003d, 0x1226: 0x003b, 0x1228: 0x0079, 0x1229: 0x0009, + 0x122a: 0x000b, 0x122b: 0x0041, + 0x1230: 0x42aa, 0x1231: 0x442c, 0x1232: 0x42af, 0x1234: 0x42b4, + 0x1236: 0x42b9, 0x1237: 0x4432, 0x1238: 0x42be, 0x1239: 0x4438, 0x123a: 0x42c3, 0x123b: 0x443e, + 0x123c: 0x42c8, 0x123d: 0x4444, 0x123e: 0x42cd, 0x123f: 0x444a, + // Block 0x49, offset 0x1240 + 0x1240: 0x0236, 0x1241: 0x440e, 0x1242: 0x440e, 0x1243: 0x4414, 0x1244: 0x4414, 0x1245: 0x4456, + 0x1246: 0x4456, 0x1247: 0x441a, 0x1248: 0x441a, 0x1249: 0x4462, 0x124a: 0x4462, 0x124b: 0x4462, + 0x124c: 0x4462, 0x124d: 0x0239, 0x124e: 0x0239, 0x124f: 0x023c, 0x1250: 0x023c, 0x1251: 0x023c, + 0x1252: 0x023c, 0x1253: 0x023f, 0x1254: 0x023f, 0x1255: 0x0242, 0x1256: 0x0242, 0x1257: 0x0242, + 0x1258: 0x0242, 0x1259: 0x0245, 0x125a: 0x0245, 0x125b: 0x0245, 0x125c: 0x0245, 0x125d: 0x0248, + 0x125e: 0x0248, 0x125f: 0x0248, 0x1260: 0x0248, 0x1261: 0x024b, 0x1262: 0x024b, 0x1263: 0x024b, + 0x1264: 0x024b, 0x1265: 0x024e, 0x1266: 0x024e, 0x1267: 0x024e, 0x1268: 0x024e, 0x1269: 0x0251, + 0x126a: 0x0251, 0x126b: 0x0254, 0x126c: 0x0254, 0x126d: 0x0257, 0x126e: 0x0257, 0x126f: 0x025a, + 0x1270: 0x025a, 0x1271: 0x025d, 0x1272: 0x025d, 0x1273: 0x025d, 0x1274: 0x025d, 0x1275: 0x0260, + 0x1276: 0x0260, 0x1277: 0x0260, 0x1278: 0x0260, 0x1279: 0x0263, 0x127a: 0x0263, 0x127b: 0x0263, + 0x127c: 0x0263, 0x127d: 0x0266, 0x127e: 0x0266, 0x127f: 0x0266, + // Block 0x4a, offset 0x1280 + 0x1280: 0x0266, 0x1281: 0x0269, 0x1282: 0x0269, 0x1283: 0x0269, 0x1284: 0x0269, 0x1285: 0x026c, + 0x1286: 0x026c, 0x1287: 0x026c, 0x1288: 0x026c, 0x1289: 0x026f, 0x128a: 0x026f, 0x128b: 0x026f, + 0x128c: 0x026f, 0x128d: 0x0272, 0x128e: 0x0272, 0x128f: 0x0272, 0x1290: 0x0272, 0x1291: 0x0275, + 0x1292: 0x0275, 0x1293: 0x0275, 0x1294: 0x0275, 0x1295: 0x0278, 0x1296: 0x0278, 0x1297: 0x0278, + 0x1298: 0x0278, 0x1299: 0x027b, 0x129a: 0x027b, 0x129b: 0x027b, 0x129c: 0x027b, 0x129d: 0x027e, + 0x129e: 0x027e, 0x129f: 0x027e, 0x12a0: 0x027e, 0x12a1: 0x0281, 0x12a2: 0x0281, 0x12a3: 0x0281, + 0x12a4: 0x0281, 0x12a5: 0x0284, 0x12a6: 0x0284, 0x12a7: 0x0284, 0x12a8: 0x0284, 0x12a9: 0x0287, + 0x12aa: 0x0287, 0x12ab: 0x0287, 0x12ac: 0x0287, 0x12ad: 0x028a, 0x12ae: 0x028a, 0x12af: 0x028d, + 0x12b0: 0x028d, 0x12b1: 0x0290, 0x12b2: 0x0290, 0x12b3: 0x0290, 0x12b4: 0x0290, 0x12b5: 0x2e00, + 0x12b6: 0x2e00, 0x12b7: 0x2e08, 0x12b8: 0x2e08, 0x12b9: 0x2e10, 0x12ba: 0x2e10, 0x12bb: 0x1f82, + 0x12bc: 0x1f82, + // Block 0x4b, offset 0x12c0 + 0x12c0: 0x0081, 0x12c1: 0x0083, 0x12c2: 0x0085, 0x12c3: 0x0087, 0x12c4: 0x0089, 0x12c5: 0x008b, + 0x12c6: 0x008d, 0x12c7: 0x008f, 0x12c8: 0x0091, 0x12c9: 0x0093, 0x12ca: 0x0095, 0x12cb: 0x0097, + 0x12cc: 0x0099, 0x12cd: 0x009b, 0x12ce: 0x009d, 0x12cf: 0x009f, 0x12d0: 0x00a1, 0x12d1: 0x00a3, + 0x12d2: 0x00a5, 0x12d3: 0x00a7, 0x12d4: 0x00a9, 0x12d5: 0x00ab, 0x12d6: 0x00ad, 0x12d7: 0x00af, + 0x12d8: 0x00b1, 0x12d9: 0x00b3, 0x12da: 0x00b5, 0x12db: 0x00b7, 0x12dc: 0x00b9, 0x12dd: 0x00bb, + 0x12de: 0x00bd, 0x12df: 0x0477, 0x12e0: 0x047b, 0x12e1: 0x0487, 0x12e2: 0x049b, 0x12e3: 0x049f, + 0x12e4: 0x0483, 0x12e5: 0x05ab, 0x12e6: 0x05a3, 0x12e7: 0x04c7, 0x12e8: 0x04cf, 0x12e9: 0x04d7, + 0x12ea: 0x04df, 0x12eb: 0x04e7, 0x12ec: 0x056b, 0x12ed: 0x0573, 0x12ee: 0x057b, 0x12ef: 0x051f, + 0x12f0: 0x05af, 0x12f1: 0x04cb, 0x12f2: 0x04d3, 0x12f3: 0x04db, 0x12f4: 0x04e3, 0x12f5: 0x04eb, + 0x12f6: 0x04ef, 0x12f7: 0x04f3, 0x12f8: 0x04f7, 0x12f9: 0x04fb, 0x12fa: 0x04ff, 0x12fb: 0x0503, + 0x12fc: 0x0507, 0x12fd: 0x050b, 0x12fe: 0x050f, 0x12ff: 0x0513, + // Block 0x4c, offset 0x1300 + 0x1300: 0x0517, 0x1301: 0x051b, 0x1302: 0x0523, 0x1303: 0x0527, 0x1304: 0x052b, 0x1305: 0x052f, + 0x1306: 0x0533, 0x1307: 0x0537, 0x1308: 0x053b, 0x1309: 0x053f, 0x130a: 0x0543, 0x130b: 0x0547, + 0x130c: 0x054b, 0x130d: 0x054f, 0x130e: 0x0553, 0x130f: 0x0557, 0x1310: 0x055b, 0x1311: 0x055f, + 0x1312: 0x0563, 0x1313: 0x0567, 0x1314: 0x056f, 0x1315: 0x0577, 0x1316: 0x057f, 0x1317: 0x0583, + 0x1318: 0x0587, 0x1319: 0x058b, 0x131a: 0x058f, 0x131b: 0x0593, 0x131c: 0x0597, 0x131d: 0x05a7, + 0x131e: 0x4a78, 0x131f: 0x4a7e, 0x1320: 0x03c3, 0x1321: 0x0313, 0x1322: 0x0317, 0x1323: 0x4a3b, + 0x1324: 0x031b, 0x1325: 0x4a41, 0x1326: 0x4a47, 0x1327: 0x031f, 0x1328: 0x0323, 0x1329: 0x0327, + 0x132a: 0x4a4d, 0x132b: 0x4a53, 0x132c: 0x4a59, 0x132d: 0x4a5f, 0x132e: 0x4a65, 0x132f: 0x4a6b, + 0x1330: 0x0367, 0x1331: 0x032b, 0x1332: 0x032f, 0x1333: 0x0333, 0x1334: 0x037b, 0x1335: 0x0337, + 0x1336: 0x033b, 0x1337: 0x033f, 0x1338: 0x0343, 0x1339: 0x0347, 0x133a: 0x034b, 0x133b: 0x034f, + 0x133c: 0x0353, 0x133d: 0x0357, 0x133e: 0x035b, + // Block 0x4d, offset 0x1340 + 0x1342: 0x49bd, 0x1343: 0x49c3, 0x1344: 0x49c9, 0x1345: 0x49cf, + 0x1346: 0x49d5, 0x1347: 0x49db, 0x134a: 0x49e1, 0x134b: 0x49e7, + 0x134c: 0x49ed, 0x134d: 0x49f3, 0x134e: 0x49f9, 0x134f: 0x49ff, + 0x1352: 0x4a05, 0x1353: 0x4a0b, 0x1354: 0x4a11, 0x1355: 0x4a17, 0x1356: 0x4a1d, 0x1357: 0x4a23, + 0x135a: 0x4a29, 0x135b: 0x4a2f, 0x135c: 0x4a35, + 0x1360: 0x00bf, 0x1361: 0x00c2, 0x1362: 0x00cb, 0x1363: 0x4264, + 0x1364: 0x00c8, 0x1365: 0x00c5, 0x1366: 0x0447, 0x1368: 0x046b, 0x1369: 0x044b, + 0x136a: 0x044f, 0x136b: 0x0453, 0x136c: 0x0457, 0x136d: 0x046f, 0x136e: 0x0473, + // Block 0x4e, offset 0x1380 + 0x1380: 0x0063, 0x1381: 0x0065, 0x1382: 0x0067, 0x1383: 0x0069, 0x1384: 0x006b, 0x1385: 0x006d, + 0x1386: 0x006f, 0x1387: 0x0071, 0x1388: 0x0073, 0x1389: 0x0075, 0x138a: 0x0083, 0x138b: 0x0085, + 0x138c: 0x0087, 0x138d: 0x0089, 0x138e: 0x008b, 0x138f: 0x008d, 0x1390: 0x008f, 0x1391: 0x0091, + 0x1392: 0x0093, 0x1393: 0x0095, 0x1394: 0x0097, 0x1395: 0x0099, 0x1396: 0x009b, 0x1397: 0x009d, + 0x1398: 0x009f, 0x1399: 0x00a1, 0x139a: 0x00a3, 0x139b: 0x00a5, 0x139c: 0x00a7, 0x139d: 0x00a9, + 0x139e: 0x00ab, 0x139f: 0x00ad, 0x13a0: 0x00af, 0x13a1: 0x00b1, 0x13a2: 0x00b3, 0x13a3: 0x00b5, + 0x13a4: 0x00dd, 0x13a5: 0x00f2, 0x13a8: 0x0173, 0x13a9: 0x0176, + 0x13aa: 0x0179, 0x13ab: 0x017c, 0x13ac: 0x017f, 0x13ad: 0x0182, 0x13ae: 0x0185, 0x13af: 0x0188, + 0x13b0: 0x018b, 0x13b1: 0x018e, 0x13b2: 0x0191, 0x13b3: 0x0194, 0x13b4: 0x0197, 0x13b5: 0x019a, + 0x13b6: 0x019d, 0x13b7: 0x01a0, 0x13b8: 0x01a3, 0x13b9: 0x0188, 0x13ba: 0x01a6, 0x13bb: 0x01a9, + 0x13bc: 0x01ac, 0x13bd: 0x01af, 0x13be: 0x01b2, 0x13bf: 0x01b5, + // Block 0x4f, offset 0x13c0 + 0x13c0: 0x01fd, 0x13c1: 0x0200, 0x13c2: 0x0203, 0x13c3: 0x045b, 0x13c4: 0x01c7, 0x13c5: 0x01d0, + 0x13c6: 0x01d6, 0x13c7: 0x01fa, 0x13c8: 0x01eb, 0x13c9: 0x01e8, 0x13ca: 0x0206, 0x13cb: 0x0209, + 0x13ce: 0x0021, 0x13cf: 0x0023, 0x13d0: 0x0025, 0x13d1: 0x0027, + 0x13d2: 0x0029, 0x13d3: 0x002b, 0x13d4: 0x002d, 0x13d5: 0x002f, 0x13d6: 0x0031, 0x13d7: 0x0033, + 0x13d8: 0x0021, 0x13d9: 0x0023, 0x13da: 0x0025, 0x13db: 0x0027, 0x13dc: 0x0029, 0x13dd: 0x002b, + 0x13de: 0x002d, 0x13df: 0x002f, 0x13e0: 0x0031, 0x13e1: 0x0033, 0x13e2: 0x0021, 0x13e3: 0x0023, + 0x13e4: 0x0025, 0x13e5: 0x0027, 0x13e6: 0x0029, 0x13e7: 0x002b, 0x13e8: 0x002d, 0x13e9: 0x002f, + 0x13ea: 0x0031, 0x13eb: 0x0033, 0x13ec: 0x0021, 0x13ed: 0x0023, 0x13ee: 0x0025, 0x13ef: 0x0027, + 0x13f0: 0x0029, 0x13f1: 0x002b, 0x13f2: 0x002d, 0x13f3: 0x002f, 0x13f4: 0x0031, 0x13f5: 0x0033, + 0x13f6: 0x0021, 0x13f7: 0x0023, 0x13f8: 0x0025, 0x13f9: 0x0027, 0x13fa: 0x0029, 0x13fb: 0x002b, + 0x13fc: 0x002d, 0x13fd: 0x002f, 0x13fe: 0x0031, 0x13ff: 0x0033, + // Block 0x50, offset 0x1400 + 0x1400: 0x0239, 0x1401: 0x023c, 0x1402: 0x0248, 0x1403: 0x0251, 0x1405: 0x028a, + 0x1406: 0x025a, 0x1407: 0x024b, 0x1408: 0x0269, 0x1409: 0x0290, 0x140a: 0x027b, 0x140b: 0x027e, + 0x140c: 0x0281, 0x140d: 0x0284, 0x140e: 0x025d, 0x140f: 0x026f, 0x1410: 0x0275, 0x1411: 0x0263, + 0x1412: 0x0278, 0x1413: 0x0257, 0x1414: 0x0260, 0x1415: 0x0242, 0x1416: 0x0245, 0x1417: 0x024e, + 0x1418: 0x0254, 0x1419: 0x0266, 0x141a: 0x026c, 0x141b: 0x0272, 0x141c: 0x0293, 0x141d: 0x02e4, + 0x141e: 0x02cc, 0x141f: 0x0296, 0x1421: 0x023c, 0x1422: 0x0248, + 0x1424: 0x0287, 0x1427: 0x024b, 0x1429: 0x0290, + 0x142a: 0x027b, 0x142b: 0x027e, 0x142c: 0x0281, 0x142d: 0x0284, 0x142e: 0x025d, 0x142f: 0x026f, + 0x1430: 0x0275, 0x1431: 0x0263, 0x1432: 0x0278, 0x1434: 0x0260, 0x1435: 0x0242, + 0x1436: 0x0245, 0x1437: 0x024e, 0x1439: 0x0266, 0x143b: 0x0272, + // Block 0x51, offset 0x1440 + 0x1442: 0x0248, + 0x1447: 0x024b, 0x1449: 0x0290, 0x144b: 0x027e, + 0x144d: 0x0284, 0x144e: 0x025d, 0x144f: 0x026f, 0x1451: 0x0263, + 0x1452: 0x0278, 0x1454: 0x0260, 0x1457: 0x024e, + 0x1459: 0x0266, 0x145b: 0x0272, 0x145d: 0x02e4, + 0x145f: 0x0296, 0x1461: 0x023c, 0x1462: 0x0248, + 0x1464: 0x0287, 0x1467: 0x024b, 0x1468: 0x0269, 0x1469: 0x0290, + 0x146a: 0x027b, 0x146c: 0x0281, 0x146d: 0x0284, 0x146e: 0x025d, 0x146f: 0x026f, + 0x1470: 0x0275, 0x1471: 0x0263, 0x1472: 0x0278, 0x1474: 0x0260, 0x1475: 0x0242, + 0x1476: 0x0245, 0x1477: 0x024e, 0x1479: 0x0266, 0x147a: 0x026c, 0x147b: 0x0272, + 0x147c: 0x0293, 0x147e: 0x02cc, + // Block 0x52, offset 0x1480 + 0x1480: 0x0239, 0x1481: 0x023c, 0x1482: 0x0248, 0x1483: 0x0251, 0x1484: 0x0287, 0x1485: 0x028a, + 0x1486: 0x025a, 0x1487: 0x024b, 0x1488: 0x0269, 0x1489: 0x0290, 0x148b: 0x027e, + 0x148c: 0x0281, 0x148d: 0x0284, 0x148e: 0x025d, 0x148f: 0x026f, 0x1490: 0x0275, 0x1491: 0x0263, + 0x1492: 0x0278, 0x1493: 0x0257, 0x1494: 0x0260, 0x1495: 0x0242, 0x1496: 0x0245, 0x1497: 0x024e, + 0x1498: 0x0254, 0x1499: 0x0266, 0x149a: 0x026c, 0x149b: 0x0272, + 0x14a1: 0x023c, 0x14a2: 0x0248, 0x14a3: 0x0251, + 0x14a5: 0x028a, 0x14a6: 0x025a, 0x14a7: 0x024b, 0x14a8: 0x0269, 0x14a9: 0x0290, + 0x14ab: 0x027e, 0x14ac: 0x0281, 0x14ad: 0x0284, 0x14ae: 0x025d, 0x14af: 0x026f, + 0x14b0: 0x0275, 0x14b1: 0x0263, 0x14b2: 0x0278, 0x14b3: 0x0257, 0x14b4: 0x0260, 0x14b5: 0x0242, + 0x14b6: 0x0245, 0x14b7: 0x024e, 0x14b8: 0x0254, 0x14b9: 0x0266, 0x14ba: 0x026c, 0x14bb: 0x0272, + // Block 0x53, offset 0x14c0 + 0x14c0: 0x1879, 0x14c1: 0x1876, 0x14c2: 0x187c, 0x14c3: 0x18a0, 0x14c4: 0x18c4, 0x14c5: 0x18e8, + 0x14c6: 0x190c, 0x14c7: 0x1915, 0x14c8: 0x191b, 0x14c9: 0x1921, 0x14ca: 0x1927, + 0x14d0: 0x1a8c, 0x14d1: 0x1a90, + 0x14d2: 0x1a94, 0x14d3: 0x1a98, 0x14d4: 0x1a9c, 0x14d5: 0x1aa0, 0x14d6: 0x1aa4, 0x14d7: 0x1aa8, + 0x14d8: 0x1aac, 0x14d9: 0x1ab0, 0x14da: 0x1ab4, 0x14db: 0x1ab8, 0x14dc: 0x1abc, 0x14dd: 0x1ac0, + 0x14de: 0x1ac4, 0x14df: 0x1ac8, 0x14e0: 0x1acc, 0x14e1: 0x1ad0, 0x14e2: 0x1ad4, 0x14e3: 0x1ad8, + 0x14e4: 0x1adc, 0x14e5: 0x1ae0, 0x14e6: 0x1ae4, 0x14e7: 0x1ae8, 0x14e8: 0x1aec, 0x14e9: 0x1af0, + 0x14ea: 0x271e, 0x14eb: 0x0047, 0x14ec: 0x0065, 0x14ed: 0x193c, 0x14ee: 0x19b1, + 0x14f0: 0x0043, 0x14f1: 0x0045, 0x14f2: 0x0047, 0x14f3: 0x0049, 0x14f4: 0x004b, 0x14f5: 0x004d, + 0x14f6: 0x004f, 0x14f7: 0x0051, 0x14f8: 0x0053, 0x14f9: 0x0055, 0x14fa: 0x0057, 0x14fb: 0x0059, + 0x14fc: 0x005b, 0x14fd: 0x005d, 0x14fe: 0x005f, 0x14ff: 0x0061, + // Block 0x54, offset 0x1500 + 0x1500: 0x26ad, 0x1501: 0x26c2, 0x1502: 0x0503, + 0x1510: 0x0c0f, 0x1511: 0x0a47, + 0x1512: 0x08d3, 0x1513: 0x45c4, 0x1514: 0x071b, 0x1515: 0x09ef, 0x1516: 0x132f, 0x1517: 0x09ff, + 0x1518: 0x0727, 0x1519: 0x0cd7, 0x151a: 0x0eaf, 0x151b: 0x0caf, 0x151c: 0x0827, 0x151d: 0x0b6b, + 0x151e: 0x07bf, 0x151f: 0x0cb7, 0x1520: 0x0813, 0x1521: 0x1117, 0x1522: 0x0f83, 0x1523: 0x138b, + 0x1524: 0x09d3, 0x1525: 0x090b, 0x1526: 0x0e63, 0x1527: 0x0c1b, 0x1528: 0x0c47, 0x1529: 0x06bf, + 0x152a: 0x06cb, 0x152b: 0x140b, 0x152c: 0x0adb, 0x152d: 0x06e7, 0x152e: 0x08ef, 0x152f: 0x0c3b, + 0x1530: 0x13b3, 0x1531: 0x0c13, 0x1532: 0x106f, 0x1533: 0x10ab, 0x1534: 0x08f7, 0x1535: 0x0e43, + 0x1536: 0x0d0b, 0x1537: 0x0d07, 0x1538: 0x0f97, 0x1539: 0x082b, 0x153a: 0x0957, 0x153b: 0x1443, + // Block 0x55, offset 0x1540 + 0x1540: 0x06fb, 0x1541: 0x06f3, 0x1542: 0x0703, 0x1543: 0x1647, 0x1544: 0x0747, 0x1545: 0x0757, + 0x1546: 0x075b, 0x1547: 0x0763, 0x1548: 0x076b, 0x1549: 0x076f, 0x154a: 0x077b, 0x154b: 0x0773, + 0x154c: 0x05b3, 0x154d: 0x165b, 0x154e: 0x078f, 0x154f: 0x0793, 0x1550: 0x0797, 0x1551: 0x07b3, + 0x1552: 0x164c, 0x1553: 0x05b7, 0x1554: 0x079f, 0x1555: 0x07bf, 0x1556: 0x1656, 0x1557: 0x07cf, + 0x1558: 0x07d7, 0x1559: 0x0737, 0x155a: 0x07df, 0x155b: 0x07e3, 0x155c: 0x1831, 0x155d: 0x07ff, + 0x155e: 0x0807, 0x155f: 0x05bf, 0x1560: 0x081f, 0x1561: 0x0823, 0x1562: 0x082b, 0x1563: 0x082f, + 0x1564: 0x05c3, 0x1565: 0x0847, 0x1566: 0x084b, 0x1567: 0x0857, 0x1568: 0x0863, 0x1569: 0x0867, + 0x156a: 0x086b, 0x156b: 0x0873, 0x156c: 0x0893, 0x156d: 0x0897, 0x156e: 0x089f, 0x156f: 0x08af, + 0x1570: 0x08b7, 0x1571: 0x08bb, 0x1572: 0x08bb, 0x1573: 0x08bb, 0x1574: 0x166a, 0x1575: 0x0e93, + 0x1576: 0x08cf, 0x1577: 0x08d7, 0x1578: 0x166f, 0x1579: 0x08e3, 0x157a: 0x08eb, 0x157b: 0x08f3, + 0x157c: 0x091b, 0x157d: 0x0907, 0x157e: 0x0913, 0x157f: 0x0917, + // Block 0x56, offset 0x1580 + 0x1580: 0x091f, 0x1581: 0x0927, 0x1582: 0x092b, 0x1583: 0x0933, 0x1584: 0x093b, 0x1585: 0x093f, + 0x1586: 0x093f, 0x1587: 0x0947, 0x1588: 0x094f, 0x1589: 0x0953, 0x158a: 0x095f, 0x158b: 0x0983, + 0x158c: 0x0967, 0x158d: 0x0987, 0x158e: 0x096b, 0x158f: 0x0973, 0x1590: 0x080b, 0x1591: 0x09cf, + 0x1592: 0x0997, 0x1593: 0x099b, 0x1594: 0x099f, 0x1595: 0x0993, 0x1596: 0x09a7, 0x1597: 0x09a3, + 0x1598: 0x09bb, 0x1599: 0x1674, 0x159a: 0x09d7, 0x159b: 0x09db, 0x159c: 0x09e3, 0x159d: 0x09ef, + 0x159e: 0x09f7, 0x159f: 0x0a13, 0x15a0: 0x1679, 0x15a1: 0x167e, 0x15a2: 0x0a1f, 0x15a3: 0x0a23, + 0x15a4: 0x0a27, 0x15a5: 0x0a1b, 0x15a6: 0x0a2f, 0x15a7: 0x05c7, 0x15a8: 0x05cb, 0x15a9: 0x0a37, + 0x15aa: 0x0a3f, 0x15ab: 0x0a3f, 0x15ac: 0x1683, 0x15ad: 0x0a5b, 0x15ae: 0x0a5f, 0x15af: 0x0a63, + 0x15b0: 0x0a6b, 0x15b1: 0x1688, 0x15b2: 0x0a73, 0x15b3: 0x0a77, 0x15b4: 0x0b4f, 0x15b5: 0x0a7f, + 0x15b6: 0x05cf, 0x15b7: 0x0a8b, 0x15b8: 0x0a9b, 0x15b9: 0x0aa7, 0x15ba: 0x0aa3, 0x15bb: 0x1692, + 0x15bc: 0x0aaf, 0x15bd: 0x1697, 0x15be: 0x0abb, 0x15bf: 0x0ab7, + // Block 0x57, offset 0x15c0 + 0x15c0: 0x0abf, 0x15c1: 0x0acf, 0x15c2: 0x0ad3, 0x15c3: 0x05d3, 0x15c4: 0x0ae3, 0x15c5: 0x0aeb, + 0x15c6: 0x0aef, 0x15c7: 0x0af3, 0x15c8: 0x05d7, 0x15c9: 0x169c, 0x15ca: 0x05db, 0x15cb: 0x0b0f, + 0x15cc: 0x0b13, 0x15cd: 0x0b17, 0x15ce: 0x0b1f, 0x15cf: 0x1863, 0x15d0: 0x0b37, 0x15d1: 0x16a6, + 0x15d2: 0x16a6, 0x15d3: 0x11d7, 0x15d4: 0x0b47, 0x15d5: 0x0b47, 0x15d6: 0x05df, 0x15d7: 0x16c9, + 0x15d8: 0x179b, 0x15d9: 0x0b57, 0x15da: 0x0b5f, 0x15db: 0x05e3, 0x15dc: 0x0b73, 0x15dd: 0x0b83, + 0x15de: 0x0b87, 0x15df: 0x0b8f, 0x15e0: 0x0b9f, 0x15e1: 0x05eb, 0x15e2: 0x05e7, 0x15e3: 0x0ba3, + 0x15e4: 0x16ab, 0x15e5: 0x0ba7, 0x15e6: 0x0bbb, 0x15e7: 0x0bbf, 0x15e8: 0x0bc3, 0x15e9: 0x0bbf, + 0x15ea: 0x0bcf, 0x15eb: 0x0bd3, 0x15ec: 0x0be3, 0x15ed: 0x0bdb, 0x15ee: 0x0bdf, 0x15ef: 0x0be7, + 0x15f0: 0x0beb, 0x15f1: 0x0bef, 0x15f2: 0x0bfb, 0x15f3: 0x0bff, 0x15f4: 0x0c17, 0x15f5: 0x0c1f, + 0x15f6: 0x0c2f, 0x15f7: 0x0c43, 0x15f8: 0x16ba, 0x15f9: 0x0c3f, 0x15fa: 0x0c33, 0x15fb: 0x0c4b, + 0x15fc: 0x0c53, 0x15fd: 0x0c67, 0x15fe: 0x16bf, 0x15ff: 0x0c6f, + // Block 0x58, offset 0x1600 + 0x1600: 0x0c63, 0x1601: 0x0c5b, 0x1602: 0x05ef, 0x1603: 0x0c77, 0x1604: 0x0c7f, 0x1605: 0x0c87, + 0x1606: 0x0c7b, 0x1607: 0x05f3, 0x1608: 0x0c97, 0x1609: 0x0c9f, 0x160a: 0x16c4, 0x160b: 0x0ccb, + 0x160c: 0x0cff, 0x160d: 0x0cdb, 0x160e: 0x05ff, 0x160f: 0x0ce7, 0x1610: 0x05fb, 0x1611: 0x05f7, + 0x1612: 0x07c3, 0x1613: 0x07c7, 0x1614: 0x0d03, 0x1615: 0x0ceb, 0x1616: 0x11ab, 0x1617: 0x0663, + 0x1618: 0x0d0f, 0x1619: 0x0d13, 0x161a: 0x0d17, 0x161b: 0x0d2b, 0x161c: 0x0d23, 0x161d: 0x16dd, + 0x161e: 0x0603, 0x161f: 0x0d3f, 0x1620: 0x0d33, 0x1621: 0x0d4f, 0x1622: 0x0d57, 0x1623: 0x16e7, + 0x1624: 0x0d5b, 0x1625: 0x0d47, 0x1626: 0x0d63, 0x1627: 0x0607, 0x1628: 0x0d67, 0x1629: 0x0d6b, + 0x162a: 0x0d6f, 0x162b: 0x0d7b, 0x162c: 0x16ec, 0x162d: 0x0d83, 0x162e: 0x060b, 0x162f: 0x0d8f, + 0x1630: 0x16f1, 0x1631: 0x0d93, 0x1632: 0x060f, 0x1633: 0x0d9f, 0x1634: 0x0dab, 0x1635: 0x0db7, + 0x1636: 0x0dbb, 0x1637: 0x16f6, 0x1638: 0x168d, 0x1639: 0x16fb, 0x163a: 0x0ddb, 0x163b: 0x1700, + 0x163c: 0x0de7, 0x163d: 0x0def, 0x163e: 0x0ddf, 0x163f: 0x0dfb, + // Block 0x59, offset 0x1640 + 0x1640: 0x0e0b, 0x1641: 0x0e1b, 0x1642: 0x0e0f, 0x1643: 0x0e13, 0x1644: 0x0e1f, 0x1645: 0x0e23, + 0x1646: 0x1705, 0x1647: 0x0e07, 0x1648: 0x0e3b, 0x1649: 0x0e3f, 0x164a: 0x0613, 0x164b: 0x0e53, + 0x164c: 0x0e4f, 0x164d: 0x170a, 0x164e: 0x0e33, 0x164f: 0x0e6f, 0x1650: 0x170f, 0x1651: 0x1714, + 0x1652: 0x0e73, 0x1653: 0x0e87, 0x1654: 0x0e83, 0x1655: 0x0e7f, 0x1656: 0x0617, 0x1657: 0x0e8b, + 0x1658: 0x0e9b, 0x1659: 0x0e97, 0x165a: 0x0ea3, 0x165b: 0x1651, 0x165c: 0x0eb3, 0x165d: 0x1719, + 0x165e: 0x0ebf, 0x165f: 0x1723, 0x1660: 0x0ed3, 0x1661: 0x0edf, 0x1662: 0x0ef3, 0x1663: 0x1728, + 0x1664: 0x0f07, 0x1665: 0x0f0b, 0x1666: 0x172d, 0x1667: 0x1732, 0x1668: 0x0f27, 0x1669: 0x0f37, + 0x166a: 0x061b, 0x166b: 0x0f3b, 0x166c: 0x061f, 0x166d: 0x061f, 0x166e: 0x0f53, 0x166f: 0x0f57, + 0x1670: 0x0f5f, 0x1671: 0x0f63, 0x1672: 0x0f6f, 0x1673: 0x0623, 0x1674: 0x0f87, 0x1675: 0x1737, + 0x1676: 0x0fa3, 0x1677: 0x173c, 0x1678: 0x0faf, 0x1679: 0x16a1, 0x167a: 0x0fbf, 0x167b: 0x1741, + 0x167c: 0x1746, 0x167d: 0x174b, 0x167e: 0x0627, 0x167f: 0x062b, + // Block 0x5a, offset 0x1680 + 0x1680: 0x0ff7, 0x1681: 0x1755, 0x1682: 0x1750, 0x1683: 0x175a, 0x1684: 0x175f, 0x1685: 0x0fff, + 0x1686: 0x1003, 0x1687: 0x1003, 0x1688: 0x100b, 0x1689: 0x0633, 0x168a: 0x100f, 0x168b: 0x0637, + 0x168c: 0x063b, 0x168d: 0x1769, 0x168e: 0x1023, 0x168f: 0x102b, 0x1690: 0x1037, 0x1691: 0x063f, + 0x1692: 0x176e, 0x1693: 0x105b, 0x1694: 0x1773, 0x1695: 0x1778, 0x1696: 0x107b, 0x1697: 0x1093, + 0x1698: 0x0643, 0x1699: 0x109b, 0x169a: 0x109f, 0x169b: 0x10a3, 0x169c: 0x177d, 0x169d: 0x1782, + 0x169e: 0x1782, 0x169f: 0x10bb, 0x16a0: 0x0647, 0x16a1: 0x1787, 0x16a2: 0x10cf, 0x16a3: 0x10d3, + 0x16a4: 0x064b, 0x16a5: 0x178c, 0x16a6: 0x10ef, 0x16a7: 0x064f, 0x16a8: 0x10ff, 0x16a9: 0x10f7, + 0x16aa: 0x1107, 0x16ab: 0x1796, 0x16ac: 0x111f, 0x16ad: 0x0653, 0x16ae: 0x112b, 0x16af: 0x1133, + 0x16b0: 0x1143, 0x16b1: 0x0657, 0x16b2: 0x17a0, 0x16b3: 0x17a5, 0x16b4: 0x065b, 0x16b5: 0x17aa, + 0x16b6: 0x115b, 0x16b7: 0x17af, 0x16b8: 0x1167, 0x16b9: 0x1173, 0x16ba: 0x117b, 0x16bb: 0x17b4, + 0x16bc: 0x17b9, 0x16bd: 0x118f, 0x16be: 0x17be, 0x16bf: 0x1197, + // Block 0x5b, offset 0x16c0 + 0x16c0: 0x16ce, 0x16c1: 0x065f, 0x16c2: 0x11af, 0x16c3: 0x11b3, 0x16c4: 0x0667, 0x16c5: 0x11b7, + 0x16c6: 0x0a33, 0x16c7: 0x17c3, 0x16c8: 0x17c8, 0x16c9: 0x16d3, 0x16ca: 0x16d8, 0x16cb: 0x11d7, + 0x16cc: 0x11db, 0x16cd: 0x13f3, 0x16ce: 0x066b, 0x16cf: 0x1207, 0x16d0: 0x1203, 0x16d1: 0x120b, + 0x16d2: 0x083f, 0x16d3: 0x120f, 0x16d4: 0x1213, 0x16d5: 0x1217, 0x16d6: 0x121f, 0x16d7: 0x17cd, + 0x16d8: 0x121b, 0x16d9: 0x1223, 0x16da: 0x1237, 0x16db: 0x123b, 0x16dc: 0x1227, 0x16dd: 0x123f, + 0x16de: 0x1253, 0x16df: 0x1267, 0x16e0: 0x1233, 0x16e1: 0x1247, 0x16e2: 0x124b, 0x16e3: 0x124f, + 0x16e4: 0x17d2, 0x16e5: 0x17dc, 0x16e6: 0x17d7, 0x16e7: 0x066f, 0x16e8: 0x126f, 0x16e9: 0x1273, + 0x16ea: 0x127b, 0x16eb: 0x17f0, 0x16ec: 0x127f, 0x16ed: 0x17e1, 0x16ee: 0x0673, 0x16ef: 0x0677, + 0x16f0: 0x17e6, 0x16f1: 0x17eb, 0x16f2: 0x067b, 0x16f3: 0x129f, 0x16f4: 0x12a3, 0x16f5: 0x12a7, + 0x16f6: 0x12ab, 0x16f7: 0x12b7, 0x16f8: 0x12b3, 0x16f9: 0x12bf, 0x16fa: 0x12bb, 0x16fb: 0x12cb, + 0x16fc: 0x12c3, 0x16fd: 0x12c7, 0x16fe: 0x12cf, 0x16ff: 0x067f, + // Block 0x5c, offset 0x1700 + 0x1700: 0x12d7, 0x1701: 0x12db, 0x1702: 0x0683, 0x1703: 0x12eb, 0x1704: 0x12ef, 0x1705: 0x17f5, + 0x1706: 0x12fb, 0x1707: 0x12ff, 0x1708: 0x0687, 0x1709: 0x130b, 0x170a: 0x05bb, 0x170b: 0x17fa, + 0x170c: 0x17ff, 0x170d: 0x068b, 0x170e: 0x068f, 0x170f: 0x1337, 0x1710: 0x134f, 0x1711: 0x136b, + 0x1712: 0x137b, 0x1713: 0x1804, 0x1714: 0x138f, 0x1715: 0x1393, 0x1716: 0x13ab, 0x1717: 0x13b7, + 0x1718: 0x180e, 0x1719: 0x1660, 0x171a: 0x13c3, 0x171b: 0x13bf, 0x171c: 0x13cb, 0x171d: 0x1665, + 0x171e: 0x13d7, 0x171f: 0x13e3, 0x1720: 0x1813, 0x1721: 0x1818, 0x1722: 0x1423, 0x1723: 0x142f, + 0x1724: 0x1437, 0x1725: 0x181d, 0x1726: 0x143b, 0x1727: 0x1467, 0x1728: 0x1473, 0x1729: 0x1477, + 0x172a: 0x146f, 0x172b: 0x1483, 0x172c: 0x1487, 0x172d: 0x1822, 0x172e: 0x1493, 0x172f: 0x0693, + 0x1730: 0x149b, 0x1731: 0x1827, 0x1732: 0x0697, 0x1733: 0x14d3, 0x1734: 0x0ac3, 0x1735: 0x14eb, + 0x1736: 0x182c, 0x1737: 0x1836, 0x1738: 0x069b, 0x1739: 0x069f, 0x173a: 0x1513, 0x173b: 0x183b, + 0x173c: 0x06a3, 0x173d: 0x1840, 0x173e: 0x152b, 0x173f: 0x152b, + // Block 0x5d, offset 0x1740 + 0x1740: 0x1533, 0x1741: 0x1845, 0x1742: 0x154b, 0x1743: 0x06a7, 0x1744: 0x155b, 0x1745: 0x1567, + 0x1746: 0x156f, 0x1747: 0x1577, 0x1748: 0x06ab, 0x1749: 0x184a, 0x174a: 0x158b, 0x174b: 0x15a7, + 0x174c: 0x15b3, 0x174d: 0x06af, 0x174e: 0x06b3, 0x174f: 0x15b7, 0x1750: 0x184f, 0x1751: 0x06b7, + 0x1752: 0x1854, 0x1753: 0x1859, 0x1754: 0x185e, 0x1755: 0x15db, 0x1756: 0x06bb, 0x1757: 0x15ef, + 0x1758: 0x15f7, 0x1759: 0x15fb, 0x175a: 0x1603, 0x175b: 0x160b, 0x175c: 0x1613, 0x175d: 0x1868, +} + +// nfkcIndex: 22 blocks, 1408 entries, 1408 bytes +// Block 0 is the zero block. +var nfkcIndex = [1408]uint8{ + // Block 0x0, offset 0x0 + // Block 0x1, offset 0x40 + // Block 0x2, offset 0x80 + // Block 0x3, offset 0xc0 + 0xc2: 0x5c, 0xc3: 0x01, 0xc4: 0x02, 0xc5: 0x03, 0xc6: 0x5d, 0xc7: 0x04, + 0xc8: 0x05, 0xca: 0x5e, 0xcb: 0x5f, 0xcc: 0x06, 0xcd: 0x07, 0xce: 0x08, 0xcf: 0x09, + 0xd0: 0x0a, 0xd1: 0x60, 0xd2: 0x61, 0xd3: 0x0b, 0xd6: 0x0c, 0xd7: 0x62, + 0xd8: 0x63, 0xd9: 0x0d, 0xdb: 0x64, 0xdc: 0x65, 0xdd: 0x66, 0xdf: 0x67, + 0xe0: 0x02, 0xe1: 0x03, 0xe2: 0x04, 0xe3: 0x05, + 0xea: 0x06, 0xeb: 0x07, 0xec: 0x08, 0xed: 0x09, 0xef: 0x0a, + 0xf0: 0x13, + // Block 0x4, offset 0x100 + 0x120: 0x68, 0x121: 0x69, 0x123: 0x0e, 0x124: 0x6a, 0x125: 0x6b, 0x126: 0x6c, 0x127: 0x6d, + 0x128: 0x6e, 0x129: 0x6f, 0x12a: 0x70, 0x12b: 0x71, 0x12c: 0x6c, 0x12d: 0x72, 0x12e: 0x73, 0x12f: 0x74, + 0x131: 0x75, 0x132: 0x76, 0x133: 0x77, 0x134: 0x78, 0x135: 0x79, 0x137: 0x7a, + 0x138: 0x7b, 0x139: 0x7c, 0x13a: 0x7d, 0x13b: 0x7e, 0x13c: 0x7f, 0x13d: 0x80, 0x13e: 0x81, 0x13f: 0x82, + // Block 0x5, offset 0x140 + 0x140: 0x83, 0x142: 0x84, 0x143: 0x85, 0x144: 0x86, 0x145: 0x87, 0x146: 0x88, 0x147: 0x89, + 0x14d: 0x8a, + 0x15c: 0x8b, 0x15f: 0x8c, + 0x162: 0x8d, 0x164: 0x8e, + 0x168: 0x8f, 0x169: 0x90, 0x16a: 0x91, 0x16c: 0x0f, 0x16d: 0x92, 0x16e: 0x93, 0x16f: 0x94, + 0x170: 0x95, 0x173: 0x96, 0x174: 0x97, 0x175: 0x10, 0x176: 0x11, 0x177: 0x12, + 0x178: 0x13, 0x179: 0x14, 0x17a: 0x15, 0x17b: 0x16, 0x17c: 0x17, 0x17d: 0x18, 0x17e: 0x19, 0x17f: 0x1a, + // Block 0x6, offset 0x180 + 0x180: 0x98, 0x181: 0x99, 0x182: 0x9a, 0x183: 0x9b, 0x184: 0x1b, 0x185: 0x1c, 0x186: 0x9c, 0x187: 0x9d, + 0x188: 0x9e, 0x189: 0x1d, 0x18a: 0x1e, 0x18b: 0x9f, 0x18c: 0xa0, + 0x191: 0x1f, 0x192: 0x20, 0x193: 0xa1, + 0x1a8: 0xa2, 0x1a9: 0xa3, 0x1ab: 0xa4, + 0x1b1: 0xa5, 0x1b3: 0xa6, 0x1b5: 0xa7, 0x1b7: 0xa8, + 0x1ba: 0xa9, 0x1bb: 0xaa, 0x1bc: 0x21, 0x1bd: 0x22, 0x1be: 0x23, 0x1bf: 0xab, + // Block 0x7, offset 0x1c0 + 0x1c0: 0xac, 0x1c1: 0x24, 0x1c2: 0x25, 0x1c3: 0x26, 0x1c4: 0xad, 0x1c5: 0x27, 0x1c6: 0x28, + 0x1c8: 0x29, 0x1c9: 0x2a, 0x1ca: 0x2b, 0x1cb: 0x2c, 0x1cc: 0x2d, 0x1cd: 0x2e, 0x1ce: 0x2f, 0x1cf: 0x30, + // Block 0x8, offset 0x200 + 0x219: 0xae, 0x21a: 0xaf, 0x21b: 0xb0, 0x21d: 0xb1, 0x21f: 0xb2, + 0x220: 0xb3, 0x223: 0xb4, 0x224: 0xb5, 0x225: 0xb6, 0x226: 0xb7, 0x227: 0xb8, + 0x22a: 0xb9, 0x22b: 0xba, 0x22d: 0xbb, 0x22f: 0xbc, + 0x230: 0xbd, 0x231: 0xbe, 0x232: 0xbf, 0x233: 0xc0, 0x234: 0xc1, 0x235: 0xc2, 0x236: 0xc3, 0x237: 0xbd, + 0x238: 0xbe, 0x239: 0xbf, 0x23a: 0xc0, 0x23b: 0xc1, 0x23c: 0xc2, 0x23d: 0xc3, 0x23e: 0xbd, 0x23f: 0xbe, + // Block 0x9, offset 0x240 + 0x240: 0xbf, 0x241: 0xc0, 0x242: 0xc1, 0x243: 0xc2, 0x244: 0xc3, 0x245: 0xbd, 0x246: 0xbe, 0x247: 0xbf, + 0x248: 0xc0, 0x249: 0xc1, 0x24a: 0xc2, 0x24b: 0xc3, 0x24c: 0xbd, 0x24d: 0xbe, 0x24e: 0xbf, 0x24f: 0xc0, + 0x250: 0xc1, 0x251: 0xc2, 0x252: 0xc3, 0x253: 0xbd, 0x254: 0xbe, 0x255: 0xbf, 0x256: 0xc0, 0x257: 0xc1, + 0x258: 0xc2, 0x259: 0xc3, 0x25a: 0xbd, 0x25b: 0xbe, 0x25c: 0xbf, 0x25d: 0xc0, 0x25e: 0xc1, 0x25f: 0xc2, + 0x260: 0xc3, 0x261: 0xbd, 0x262: 0xbe, 0x263: 0xbf, 0x264: 0xc0, 0x265: 0xc1, 0x266: 0xc2, 0x267: 0xc3, + 0x268: 0xbd, 0x269: 0xbe, 0x26a: 0xbf, 0x26b: 0xc0, 0x26c: 0xc1, 0x26d: 0xc2, 0x26e: 0xc3, 0x26f: 0xbd, + 0x270: 0xbe, 0x271: 0xbf, 0x272: 0xc0, 0x273: 0xc1, 0x274: 0xc2, 0x275: 0xc3, 0x276: 0xbd, 0x277: 0xbe, + 0x278: 0xbf, 0x279: 0xc0, 0x27a: 0xc1, 0x27b: 0xc2, 0x27c: 0xc3, 0x27d: 0xbd, 0x27e: 0xbe, 0x27f: 0xbf, + // Block 0xa, offset 0x280 + 0x280: 0xc0, 0x281: 0xc1, 0x282: 0xc2, 0x283: 0xc3, 0x284: 0xbd, 0x285: 0xbe, 0x286: 0xbf, 0x287: 0xc0, + 0x288: 0xc1, 0x289: 0xc2, 0x28a: 0xc3, 0x28b: 0xbd, 0x28c: 0xbe, 0x28d: 0xbf, 0x28e: 0xc0, 0x28f: 0xc1, + 0x290: 0xc2, 0x291: 0xc3, 0x292: 0xbd, 0x293: 0xbe, 0x294: 0xbf, 0x295: 0xc0, 0x296: 0xc1, 0x297: 0xc2, + 0x298: 0xc3, 0x299: 0xbd, 0x29a: 0xbe, 0x29b: 0xbf, 0x29c: 0xc0, 0x29d: 0xc1, 0x29e: 0xc2, 0x29f: 0xc3, + 0x2a0: 0xbd, 0x2a1: 0xbe, 0x2a2: 0xbf, 0x2a3: 0xc0, 0x2a4: 0xc1, 0x2a5: 0xc2, 0x2a6: 0xc3, 0x2a7: 0xbd, + 0x2a8: 0xbe, 0x2a9: 0xbf, 0x2aa: 0xc0, 0x2ab: 0xc1, 0x2ac: 0xc2, 0x2ad: 0xc3, 0x2ae: 0xbd, 0x2af: 0xbe, + 0x2b0: 0xbf, 0x2b1: 0xc0, 0x2b2: 0xc1, 0x2b3: 0xc2, 0x2b4: 0xc3, 0x2b5: 0xbd, 0x2b6: 0xbe, 0x2b7: 0xbf, + 0x2b8: 0xc0, 0x2b9: 0xc1, 0x2ba: 0xc2, 0x2bb: 0xc3, 0x2bc: 0xbd, 0x2bd: 0xbe, 0x2be: 0xbf, 0x2bf: 0xc0, + // Block 0xb, offset 0x2c0 + 0x2c0: 0xc1, 0x2c1: 0xc2, 0x2c2: 0xc3, 0x2c3: 0xbd, 0x2c4: 0xbe, 0x2c5: 0xbf, 0x2c6: 0xc0, 0x2c7: 0xc1, + 0x2c8: 0xc2, 0x2c9: 0xc3, 0x2ca: 0xbd, 0x2cb: 0xbe, 0x2cc: 0xbf, 0x2cd: 0xc0, 0x2ce: 0xc1, 0x2cf: 0xc2, + 0x2d0: 0xc3, 0x2d1: 0xbd, 0x2d2: 0xbe, 0x2d3: 0xbf, 0x2d4: 0xc0, 0x2d5: 0xc1, 0x2d6: 0xc2, 0x2d7: 0xc3, + 0x2d8: 0xbd, 0x2d9: 0xbe, 0x2da: 0xbf, 0x2db: 0xc0, 0x2dc: 0xc1, 0x2dd: 0xc2, 0x2de: 0xc4, + // Block 0xc, offset 0x300 + 0x324: 0x31, 0x325: 0x32, 0x326: 0x33, 0x327: 0x34, + 0x328: 0x35, 0x329: 0x36, 0x32a: 0x37, 0x32b: 0x38, 0x32c: 0x39, 0x32d: 0x3a, 0x32e: 0x3b, 0x32f: 0x3c, + 0x330: 0x3d, 0x331: 0x3e, 0x332: 0x3f, 0x333: 0x40, 0x334: 0x41, 0x335: 0x42, 0x336: 0x43, 0x337: 0x44, + 0x338: 0x45, 0x339: 0x46, 0x33a: 0x47, 0x33b: 0x48, 0x33c: 0xc5, 0x33d: 0x49, 0x33e: 0x4a, 0x33f: 0x4b, + // Block 0xd, offset 0x340 + 0x347: 0xc6, + 0x34b: 0xc7, 0x34d: 0xc8, + 0x368: 0xc9, 0x36b: 0xca, + 0x374: 0xcb, + 0x37d: 0xcc, + // Block 0xe, offset 0x380 + 0x381: 0xcd, 0x382: 0xce, 0x384: 0xcf, 0x385: 0xb7, 0x387: 0xd0, + 0x388: 0xd1, 0x38b: 0xd2, 0x38c: 0xd3, 0x38d: 0xd4, + 0x391: 0xd5, 0x392: 0xd6, 0x393: 0xd7, 0x396: 0xd8, 0x397: 0xd9, + 0x398: 0xda, 0x39a: 0xdb, 0x39c: 0xdc, + 0x3a0: 0xdd, + 0x3a8: 0xde, 0x3a9: 0xdf, 0x3aa: 0xe0, + 0x3b0: 0xda, 0x3b5: 0xe1, 0x3b6: 0xe2, + // Block 0xf, offset 0x3c0 + 0x3eb: 0xe3, 0x3ec: 0xe4, + // Block 0x10, offset 0x400 + 0x432: 0xe5, + // Block 0x11, offset 0x440 + 0x445: 0xe6, 0x446: 0xe7, 0x447: 0xe8, + 0x449: 0xe9, + 0x450: 0xea, 0x451: 0xeb, 0x452: 0xec, 0x453: 0xed, 0x454: 0xee, 0x455: 0xef, 0x456: 0xf0, 0x457: 0xf1, + 0x458: 0xf2, 0x459: 0xf3, 0x45a: 0x4c, 0x45b: 0xf4, 0x45c: 0xf5, 0x45d: 0xf6, 0x45e: 0xf7, 0x45f: 0x4d, + // Block 0x12, offset 0x480 + 0x480: 0xf8, + 0x4a3: 0xf9, 0x4a5: 0xfa, + 0x4b8: 0x4e, 0x4b9: 0x4f, 0x4ba: 0x50, + // Block 0x13, offset 0x4c0 + 0x4c4: 0x51, 0x4c5: 0xfb, 0x4c6: 0xfc, + 0x4c8: 0x52, 0x4c9: 0xfd, + // Block 0x14, offset 0x500 + 0x520: 0x53, 0x521: 0x54, 0x522: 0x55, 0x523: 0x56, 0x524: 0x57, 0x525: 0x58, 0x526: 0x59, 0x527: 0x5a, + 0x528: 0x5b, + // Block 0x15, offset 0x540 + 0x550: 0x0b, 0x551: 0x0c, 0x556: 0x0d, + 0x55b: 0x0e, 0x55d: 0x0f, 0x55e: 0x10, 0x55f: 0x11, + 0x56f: 0x12, +} + +// nfkcSparseOffset: 162 entries, 324 bytes +var nfkcSparseOffset = []uint16{0x0, 0xe, 0x12, 0x1b, 0x25, 0x35, 0x37, 0x3c, 0x47, 0x56, 0x63, 0x6b, 0x70, 0x75, 0x77, 0x7f, 0x86, 0x89, 0x91, 0x95, 0x99, 0x9b, 0x9d, 0xa6, 0xaa, 0xb1, 0xb6, 0xb9, 0xc3, 0xc6, 0xcd, 0xd5, 0xd9, 0xdb, 0xde, 0xe2, 0xe8, 0xf9, 0x105, 0x107, 0x10d, 0x10f, 0x111, 0x113, 0x115, 0x117, 0x119, 0x11b, 0x11e, 0x121, 0x123, 0x126, 0x129, 0x12d, 0x132, 0x13b, 0x13d, 0x140, 0x142, 0x14d, 0x158, 0x166, 0x174, 0x184, 0x192, 0x199, 0x19f, 0x1ae, 0x1b2, 0x1b4, 0x1b8, 0x1ba, 0x1bd, 0x1bf, 0x1c2, 0x1c4, 0x1c7, 0x1c9, 0x1cb, 0x1cd, 0x1d9, 0x1e3, 0x1ed, 0x1f0, 0x1f4, 0x1f6, 0x1f8, 0x1fa, 0x1fc, 0x1ff, 0x201, 0x203, 0x205, 0x207, 0x20d, 0x210, 0x214, 0x216, 0x21d, 0x223, 0x229, 0x231, 0x237, 0x23d, 0x243, 0x247, 0x249, 0x24b, 0x24d, 0x24f, 0x255, 0x258, 0x25a, 0x260, 0x263, 0x26b, 0x272, 0x275, 0x278, 0x27a, 0x27d, 0x285, 0x289, 0x290, 0x293, 0x299, 0x29b, 0x29d, 0x2a0, 0x2a2, 0x2a5, 0x2a7, 0x2a9, 0x2ab, 0x2ae, 0x2b0, 0x2b2, 0x2b4, 0x2b6, 0x2c3, 0x2cd, 0x2cf, 0x2d1, 0x2d5, 0x2da, 0x2e6, 0x2eb, 0x2f4, 0x2fa, 0x2ff, 0x303, 0x308, 0x30c, 0x31c, 0x32a, 0x338, 0x346, 0x34c, 0x34e, 0x351, 0x35b, 0x35d} + +// nfkcSparseValues: 871 entries, 3484 bytes +var nfkcSparseValues = [871]valueRange{ + // Block 0x0, offset 0x0 + {value: 0x0002, lo: 0x0d}, + {value: 0x0001, lo: 0xa0, hi: 0xa0}, + {value: 0x4278, lo: 0xa8, hi: 0xa8}, + {value: 0x0083, lo: 0xaa, hi: 0xaa}, + {value: 0x4264, lo: 0xaf, hi: 0xaf}, + {value: 0x0025, lo: 0xb2, hi: 0xb3}, + {value: 0x425a, lo: 0xb4, hi: 0xb4}, + {value: 0x01dc, lo: 0xb5, hi: 0xb5}, + {value: 0x4291, lo: 0xb8, hi: 0xb8}, + {value: 0x0023, lo: 0xb9, hi: 0xb9}, + {value: 0x009f, lo: 0xba, hi: 0xba}, + {value: 0x221c, lo: 0xbc, hi: 0xbc}, + {value: 0x2210, lo: 0xbd, hi: 0xbd}, + {value: 0x22b2, lo: 0xbe, hi: 0xbe}, + // Block 0x1, offset 0xe + {value: 0x0091, lo: 0x03}, + {value: 0x46e2, lo: 0xa0, hi: 0xa1}, + {value: 0x4714, lo: 0xaf, hi: 0xb0}, + {value: 0xa000, lo: 0xb7, hi: 0xb7}, + // Block 0x2, offset 0x12 + {value: 0x0003, lo: 0x08}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x0091, lo: 0xb0, hi: 0xb0}, + {value: 0x0119, lo: 0xb1, hi: 0xb1}, + {value: 0x0095, lo: 0xb2, hi: 0xb2}, + {value: 0x00a5, lo: 0xb3, hi: 0xb3}, + {value: 0x0143, lo: 0xb4, hi: 0xb6}, + {value: 0x00af, lo: 0xb7, hi: 0xb7}, + {value: 0x00b3, lo: 0xb8, hi: 0xb8}, + // Block 0x3, offset 0x1b + {value: 0x000a, lo: 0x09}, + {value: 0x426e, lo: 0x98, hi: 0x98}, + {value: 0x4273, lo: 0x99, hi: 0x9a}, + {value: 0x4296, lo: 0x9b, hi: 0x9b}, + {value: 0x425f, lo: 0x9c, hi: 0x9c}, + {value: 0x4282, lo: 0x9d, hi: 0x9d}, + {value: 0x0113, lo: 0xa0, hi: 0xa0}, + {value: 0x0099, lo: 0xa1, hi: 0xa1}, + {value: 0x00a7, lo: 0xa2, hi: 0xa3}, + {value: 0x0167, lo: 0xa4, hi: 0xa4}, + // Block 0x4, offset 0x25 + {value: 0x0000, lo: 0x0f}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0xa000, lo: 0x8d, hi: 0x8d}, + {value: 0x37a5, lo: 0x90, hi: 0x90}, + {value: 0x37b1, lo: 0x91, hi: 0x91}, + {value: 0x379f, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x96, hi: 0x96}, + {value: 0x3817, lo: 0x97, hi: 0x97}, + {value: 0x37e1, lo: 0x9c, hi: 0x9c}, + {value: 0x37c9, lo: 0x9d, hi: 0x9d}, + {value: 0x37f3, lo: 0x9e, hi: 0x9e}, + {value: 0xa000, lo: 0xb4, hi: 0xb5}, + {value: 0x381d, lo: 0xb6, hi: 0xb6}, + {value: 0x3823, lo: 0xb7, hi: 0xb7}, + // Block 0x5, offset 0x35 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x83, hi: 0x87}, + // Block 0x6, offset 0x37 + {value: 0x0001, lo: 0x04}, + {value: 0x8113, lo: 0x81, hi: 0x82}, + {value: 0x8132, lo: 0x84, hi: 0x84}, + {value: 0x812d, lo: 0x85, hi: 0x85}, + {value: 0x810d, lo: 0x87, hi: 0x87}, + // Block 0x7, offset 0x3c + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x97}, + {value: 0x8119, lo: 0x98, hi: 0x98}, + {value: 0x811a, lo: 0x99, hi: 0x99}, + {value: 0x811b, lo: 0x9a, hi: 0x9a}, + {value: 0x3841, lo: 0xa2, hi: 0xa2}, + {value: 0x3847, lo: 0xa3, hi: 0xa3}, + {value: 0x3853, lo: 0xa4, hi: 0xa4}, + {value: 0x384d, lo: 0xa5, hi: 0xa5}, + {value: 0x3859, lo: 0xa6, hi: 0xa6}, + {value: 0xa000, lo: 0xa7, hi: 0xa7}, + // Block 0x8, offset 0x47 + {value: 0x0000, lo: 0x0e}, + {value: 0x386b, lo: 0x80, hi: 0x80}, + {value: 0xa000, lo: 0x81, hi: 0x81}, + {value: 0x385f, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x3865, lo: 0x93, hi: 0x93}, + {value: 0xa000, lo: 0x95, hi: 0x95}, + {value: 0x8132, lo: 0x96, hi: 0x9c}, + {value: 0x8132, lo: 0x9f, hi: 0xa2}, + {value: 0x812d, lo: 0xa3, hi: 0xa3}, + {value: 0x8132, lo: 0xa4, hi: 0xa4}, + {value: 0x8132, lo: 0xa7, hi: 0xa8}, + {value: 0x812d, lo: 0xaa, hi: 0xaa}, + {value: 0x8132, lo: 0xab, hi: 0xac}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + // Block 0x9, offset 0x56 + {value: 0x0000, lo: 0x0c}, + {value: 0x811f, lo: 0x91, hi: 0x91}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x812d, lo: 0xb1, hi: 0xb1}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb5, hi: 0xb6}, + {value: 0x812d, lo: 0xb7, hi: 0xb9}, + {value: 0x8132, lo: 0xba, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbc}, + {value: 0x8132, lo: 0xbd, hi: 0xbd}, + {value: 0x812d, lo: 0xbe, hi: 0xbe}, + {value: 0x8132, lo: 0xbf, hi: 0xbf}, + // Block 0xa, offset 0x63 + {value: 0x0005, lo: 0x07}, + {value: 0x8132, lo: 0x80, hi: 0x80}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x812d, lo: 0x82, hi: 0x83}, + {value: 0x812d, lo: 0x84, hi: 0x85}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x812d, lo: 0x88, hi: 0x89}, + {value: 0x8132, lo: 0x8a, hi: 0x8a}, + // Block 0xb, offset 0x6b + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xab, hi: 0xb1}, + {value: 0x812d, lo: 0xb2, hi: 0xb2}, + {value: 0x8132, lo: 0xb3, hi: 0xb3}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0xc, offset 0x70 + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0x96, hi: 0x99}, + {value: 0x8132, lo: 0x9b, hi: 0xa3}, + {value: 0x8132, lo: 0xa5, hi: 0xa7}, + {value: 0x8132, lo: 0xa9, hi: 0xad}, + // Block 0xd, offset 0x75 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x99, hi: 0x9b}, + // Block 0xe, offset 0x77 + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0xa8, hi: 0xa8}, + {value: 0x3ed8, lo: 0xa9, hi: 0xa9}, + {value: 0xa000, lo: 0xb0, hi: 0xb0}, + {value: 0x3ee0, lo: 0xb1, hi: 0xb1}, + {value: 0xa000, lo: 0xb3, hi: 0xb3}, + {value: 0x3ee8, lo: 0xb4, hi: 0xb4}, + {value: 0x9902, lo: 0xbc, hi: 0xbc}, + // Block 0xf, offset 0x7f + {value: 0x0008, lo: 0x06}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x91, hi: 0x91}, + {value: 0x812d, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x93, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x94}, + {value: 0x451c, lo: 0x98, hi: 0x9f}, + // Block 0x10, offset 0x86 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x11, offset 0x89 + {value: 0x0008, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2c9e, lo: 0x8b, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x455c, lo: 0x9c, hi: 0x9d}, + {value: 0x456c, lo: 0x9f, hi: 0x9f}, + {value: 0x8132, lo: 0xbe, hi: 0xbe}, + // Block 0x12, offset 0x91 + {value: 0x0000, lo: 0x03}, + {value: 0x4594, lo: 0xb3, hi: 0xb3}, + {value: 0x459c, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x13, offset 0x95 + {value: 0x0008, lo: 0x03}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x4574, lo: 0x99, hi: 0x9b}, + {value: 0x458c, lo: 0x9e, hi: 0x9e}, + // Block 0x14, offset 0x99 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + // Block 0x15, offset 0x9b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + // Block 0x16, offset 0x9d + {value: 0x0000, lo: 0x08}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2cb6, lo: 0x88, hi: 0x88}, + {value: 0x2cae, lo: 0x8b, hi: 0x8b}, + {value: 0x2cbe, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x96, hi: 0x97}, + {value: 0x45a4, lo: 0x9c, hi: 0x9c}, + {value: 0x45ac, lo: 0x9d, hi: 0x9d}, + // Block 0x17, offset 0xa6 + {value: 0x0000, lo: 0x03}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0x2cc6, lo: 0x94, hi: 0x94}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x18, offset 0xaa + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cce, lo: 0x8a, hi: 0x8a}, + {value: 0x2cde, lo: 0x8b, hi: 0x8b}, + {value: 0x2cd6, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x19, offset 0xb1 + {value: 0x1801, lo: 0x04}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x3ef0, lo: 0x88, hi: 0x88}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x8120, lo: 0x95, hi: 0x96}, + // Block 0x1a, offset 0xb6 + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbc, hi: 0xbc}, + {value: 0xa000, lo: 0xbf, hi: 0xbf}, + // Block 0x1b, offset 0xb9 + {value: 0x0000, lo: 0x09}, + {value: 0x2ce6, lo: 0x80, hi: 0x80}, + {value: 0x9900, lo: 0x82, hi: 0x82}, + {value: 0xa000, lo: 0x86, hi: 0x86}, + {value: 0x2cee, lo: 0x87, hi: 0x87}, + {value: 0x2cf6, lo: 0x88, hi: 0x88}, + {value: 0x2f50, lo: 0x8a, hi: 0x8a}, + {value: 0x2dd8, lo: 0x8b, hi: 0x8b}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x95, hi: 0x96}, + // Block 0x1c, offset 0xc3 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x1d, offset 0xc6 + {value: 0x0000, lo: 0x06}, + {value: 0xa000, lo: 0x86, hi: 0x87}, + {value: 0x2cfe, lo: 0x8a, hi: 0x8a}, + {value: 0x2d0e, lo: 0x8b, hi: 0x8b}, + {value: 0x2d06, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + // Block 0x1e, offset 0xcd + {value: 0x6bea, lo: 0x07}, + {value: 0x9904, lo: 0x8a, hi: 0x8a}, + {value: 0x9900, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x3ef8, lo: 0x9a, hi: 0x9a}, + {value: 0x2f58, lo: 0x9c, hi: 0x9c}, + {value: 0x2de3, lo: 0x9d, hi: 0x9d}, + {value: 0x2d16, lo: 0x9e, hi: 0x9f}, + // Block 0x1f, offset 0xd5 + {value: 0x0000, lo: 0x03}, + {value: 0x2621, lo: 0xb3, hi: 0xb3}, + {value: 0x8122, lo: 0xb8, hi: 0xb9}, + {value: 0x8104, lo: 0xba, hi: 0xba}, + // Block 0x20, offset 0xd9 + {value: 0x0000, lo: 0x01}, + {value: 0x8123, lo: 0x88, hi: 0x8b}, + // Block 0x21, offset 0xdb + {value: 0x0000, lo: 0x02}, + {value: 0x2636, lo: 0xb3, hi: 0xb3}, + {value: 0x8124, lo: 0xb8, hi: 0xb9}, + // Block 0x22, offset 0xde + {value: 0x0000, lo: 0x03}, + {value: 0x8125, lo: 0x88, hi: 0x8b}, + {value: 0x2628, lo: 0x9c, hi: 0x9c}, + {value: 0x262f, lo: 0x9d, hi: 0x9d}, + // Block 0x23, offset 0xe2 + {value: 0x0000, lo: 0x05}, + {value: 0x030b, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x98, hi: 0x99}, + {value: 0x812d, lo: 0xb5, hi: 0xb5}, + {value: 0x812d, lo: 0xb7, hi: 0xb7}, + {value: 0x812b, lo: 0xb9, hi: 0xb9}, + // Block 0x24, offset 0xe8 + {value: 0x0000, lo: 0x10}, + {value: 0x2644, lo: 0x83, hi: 0x83}, + {value: 0x264b, lo: 0x8d, hi: 0x8d}, + {value: 0x2652, lo: 0x92, hi: 0x92}, + {value: 0x2659, lo: 0x97, hi: 0x97}, + {value: 0x2660, lo: 0x9c, hi: 0x9c}, + {value: 0x263d, lo: 0xa9, hi: 0xa9}, + {value: 0x8126, lo: 0xb1, hi: 0xb1}, + {value: 0x8127, lo: 0xb2, hi: 0xb2}, + {value: 0x4a84, lo: 0xb3, hi: 0xb3}, + {value: 0x8128, lo: 0xb4, hi: 0xb4}, + {value: 0x4a8d, lo: 0xb5, hi: 0xb5}, + {value: 0x45b4, lo: 0xb6, hi: 0xb6}, + {value: 0x45f4, lo: 0xb7, hi: 0xb7}, + {value: 0x45bc, lo: 0xb8, hi: 0xb8}, + {value: 0x45ff, lo: 0xb9, hi: 0xb9}, + {value: 0x8127, lo: 0xba, hi: 0xbd}, + // Block 0x25, offset 0xf9 + {value: 0x0000, lo: 0x0b}, + {value: 0x8127, lo: 0x80, hi: 0x80}, + {value: 0x4a96, lo: 0x81, hi: 0x81}, + {value: 0x8132, lo: 0x82, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0x86, hi: 0x87}, + {value: 0x266e, lo: 0x93, hi: 0x93}, + {value: 0x2675, lo: 0x9d, hi: 0x9d}, + {value: 0x267c, lo: 0xa2, hi: 0xa2}, + {value: 0x2683, lo: 0xa7, hi: 0xa7}, + {value: 0x268a, lo: 0xac, hi: 0xac}, + {value: 0x2667, lo: 0xb9, hi: 0xb9}, + // Block 0x26, offset 0x105 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x86, hi: 0x86}, + // Block 0x27, offset 0x107 + {value: 0x0000, lo: 0x05}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x2d1e, lo: 0xa6, hi: 0xa6}, + {value: 0x9900, lo: 0xae, hi: 0xae}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x28, offset 0x10d + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + // Block 0x29, offset 0x10f + {value: 0x0000, lo: 0x01}, + {value: 0x030f, lo: 0xbc, hi: 0xbc}, + // Block 0x2a, offset 0x111 + {value: 0x0000, lo: 0x01}, + {value: 0xa000, lo: 0x80, hi: 0x92}, + // Block 0x2b, offset 0x113 + {value: 0x0000, lo: 0x01}, + {value: 0xb900, lo: 0xa1, hi: 0xb5}, + // Block 0x2c, offset 0x115 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0xa8, hi: 0xbf}, + // Block 0x2d, offset 0x117 + {value: 0x0000, lo: 0x01}, + {value: 0x9900, lo: 0x80, hi: 0x82}, + // Block 0x2e, offset 0x119 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x9d, hi: 0x9f}, + // Block 0x2f, offset 0x11b + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x94, hi: 0x94}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x30, offset 0x11e + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x92, hi: 0x92}, + {value: 0x8132, lo: 0x9d, hi: 0x9d}, + // Block 0x31, offset 0x121 + {value: 0x0000, lo: 0x01}, + {value: 0x8131, lo: 0xa9, hi: 0xa9}, + // Block 0x32, offset 0x123 + {value: 0x0004, lo: 0x02}, + {value: 0x812e, lo: 0xb9, hi: 0xba}, + {value: 0x812d, lo: 0xbb, hi: 0xbb}, + // Block 0x33, offset 0x126 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x97, hi: 0x97}, + {value: 0x812d, lo: 0x98, hi: 0x98}, + // Block 0x34, offset 0x129 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0xa0, hi: 0xa0}, + {value: 0x8132, lo: 0xb5, hi: 0xbc}, + {value: 0x812d, lo: 0xbf, hi: 0xbf}, + // Block 0x35, offset 0x12d + {value: 0x0000, lo: 0x04}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + {value: 0x812d, lo: 0xb5, hi: 0xba}, + {value: 0x8132, lo: 0xbb, hi: 0xbc}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x36, offset 0x132 + {value: 0x0000, lo: 0x08}, + {value: 0x2d66, lo: 0x80, hi: 0x80}, + {value: 0x2d6e, lo: 0x81, hi: 0x81}, + {value: 0xa000, lo: 0x82, hi: 0x82}, + {value: 0x2d76, lo: 0x83, hi: 0x83}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xab, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xac}, + {value: 0x8132, lo: 0xad, hi: 0xb3}, + // Block 0x37, offset 0x13b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xaa, hi: 0xab}, + // Block 0x38, offset 0x13d + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xa6, hi: 0xa6}, + {value: 0x8104, lo: 0xb2, hi: 0xb3}, + // Block 0x39, offset 0x140 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x3a, offset 0x142 + {value: 0x0000, lo: 0x0a}, + {value: 0x8132, lo: 0x90, hi: 0x92}, + {value: 0x8101, lo: 0x94, hi: 0x94}, + {value: 0x812d, lo: 0x95, hi: 0x99}, + {value: 0x8132, lo: 0x9a, hi: 0x9b}, + {value: 0x812d, lo: 0x9c, hi: 0x9f}, + {value: 0x8132, lo: 0xa0, hi: 0xa0}, + {value: 0x8101, lo: 0xa2, hi: 0xa8}, + {value: 0x812d, lo: 0xad, hi: 0xad}, + {value: 0x8132, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb8, hi: 0xb9}, + // Block 0x3b, offset 0x14d + {value: 0x0002, lo: 0x0a}, + {value: 0x0043, lo: 0xac, hi: 0xac}, + {value: 0x00d1, lo: 0xad, hi: 0xad}, + {value: 0x0045, lo: 0xae, hi: 0xae}, + {value: 0x0049, lo: 0xb0, hi: 0xb1}, + {value: 0x00e6, lo: 0xb2, hi: 0xb2}, + {value: 0x004f, lo: 0xb3, hi: 0xba}, + {value: 0x005f, lo: 0xbc, hi: 0xbc}, + {value: 0x00ef, lo: 0xbd, hi: 0xbd}, + {value: 0x0061, lo: 0xbe, hi: 0xbe}, + {value: 0x0065, lo: 0xbf, hi: 0xbf}, + // Block 0x3c, offset 0x158 + {value: 0x0000, lo: 0x0d}, + {value: 0x0001, lo: 0x80, hi: 0x8a}, + {value: 0x043b, lo: 0x91, hi: 0x91}, + {value: 0x429b, lo: 0x97, hi: 0x97}, + {value: 0x001d, lo: 0xa4, hi: 0xa4}, + {value: 0x1873, lo: 0xa5, hi: 0xa5}, + {value: 0x1b5c, lo: 0xa6, hi: 0xa6}, + {value: 0x0001, lo: 0xaf, hi: 0xaf}, + {value: 0x2691, lo: 0xb3, hi: 0xb3}, + {value: 0x27fe, lo: 0xb4, hi: 0xb4}, + {value: 0x2698, lo: 0xb6, hi: 0xb6}, + {value: 0x2808, lo: 0xb7, hi: 0xb7}, + {value: 0x186d, lo: 0xbc, hi: 0xbc}, + {value: 0x4269, lo: 0xbe, hi: 0xbe}, + // Block 0x3d, offset 0x166 + {value: 0x0002, lo: 0x0d}, + {value: 0x1933, lo: 0x87, hi: 0x87}, + {value: 0x1930, lo: 0x88, hi: 0x88}, + {value: 0x1870, lo: 0x89, hi: 0x89}, + {value: 0x298e, lo: 0x97, hi: 0x97}, + {value: 0x0001, lo: 0x9f, hi: 0x9f}, + {value: 0x0021, lo: 0xb0, hi: 0xb0}, + {value: 0x0093, lo: 0xb1, hi: 0xb1}, + {value: 0x0029, lo: 0xb4, hi: 0xb9}, + {value: 0x0017, lo: 0xba, hi: 0xba}, + {value: 0x0467, lo: 0xbb, hi: 0xbb}, + {value: 0x003b, lo: 0xbc, hi: 0xbc}, + {value: 0x0011, lo: 0xbd, hi: 0xbe}, + {value: 0x009d, lo: 0xbf, hi: 0xbf}, + // Block 0x3e, offset 0x174 + {value: 0x0002, lo: 0x0f}, + {value: 0x0021, lo: 0x80, hi: 0x89}, + {value: 0x0017, lo: 0x8a, hi: 0x8a}, + {value: 0x0467, lo: 0x8b, hi: 0x8b}, + {value: 0x003b, lo: 0x8c, hi: 0x8c}, + {value: 0x0011, lo: 0x8d, hi: 0x8e}, + {value: 0x0083, lo: 0x90, hi: 0x90}, + {value: 0x008b, lo: 0x91, hi: 0x91}, + {value: 0x009f, lo: 0x92, hi: 0x92}, + {value: 0x00b1, lo: 0x93, hi: 0x93}, + {value: 0x0104, lo: 0x94, hi: 0x94}, + {value: 0x0091, lo: 0x95, hi: 0x95}, + {value: 0x0097, lo: 0x96, hi: 0x99}, + {value: 0x00a1, lo: 0x9a, hi: 0x9a}, + {value: 0x00a7, lo: 0x9b, hi: 0x9c}, + {value: 0x1999, lo: 0xa8, hi: 0xa8}, + // Block 0x3f, offset 0x184 + {value: 0x0000, lo: 0x0d}, + {value: 0x8132, lo: 0x90, hi: 0x91}, + {value: 0x8101, lo: 0x92, hi: 0x93}, + {value: 0x8132, lo: 0x94, hi: 0x97}, + {value: 0x8101, lo: 0x98, hi: 0x9a}, + {value: 0x8132, lo: 0x9b, hi: 0x9c}, + {value: 0x8132, lo: 0xa1, hi: 0xa1}, + {value: 0x8101, lo: 0xa5, hi: 0xa6}, + {value: 0x8132, lo: 0xa7, hi: 0xa7}, + {value: 0x812d, lo: 0xa8, hi: 0xa8}, + {value: 0x8132, lo: 0xa9, hi: 0xa9}, + {value: 0x8101, lo: 0xaa, hi: 0xab}, + {value: 0x812d, lo: 0xac, hi: 0xaf}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + // Block 0x40, offset 0x192 + {value: 0x0007, lo: 0x06}, + {value: 0x2180, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + {value: 0x3bb9, lo: 0x9a, hi: 0x9b}, + {value: 0x3bc7, lo: 0xae, hi: 0xae}, + // Block 0x41, offset 0x199 + {value: 0x000e, lo: 0x05}, + {value: 0x3bce, lo: 0x8d, hi: 0x8e}, + {value: 0x3bd5, lo: 0x8f, hi: 0x8f}, + {value: 0xa000, lo: 0x90, hi: 0x90}, + {value: 0xa000, lo: 0x92, hi: 0x92}, + {value: 0xa000, lo: 0x94, hi: 0x94}, + // Block 0x42, offset 0x19f + {value: 0x0173, lo: 0x0e}, + {value: 0xa000, lo: 0x83, hi: 0x83}, + {value: 0x3be3, lo: 0x84, hi: 0x84}, + {value: 0xa000, lo: 0x88, hi: 0x88}, + {value: 0x3bea, lo: 0x89, hi: 0x89}, + {value: 0xa000, lo: 0x8b, hi: 0x8b}, + {value: 0x3bf1, lo: 0x8c, hi: 0x8c}, + {value: 0xa000, lo: 0xa3, hi: 0xa3}, + {value: 0x3bf8, lo: 0xa4, hi: 0xa4}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x3bff, lo: 0xa6, hi: 0xa6}, + {value: 0x269f, lo: 0xac, hi: 0xad}, + {value: 0x26a6, lo: 0xaf, hi: 0xaf}, + {value: 0x281c, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xbc, hi: 0xbc}, + // Block 0x43, offset 0x1ae + {value: 0x0007, lo: 0x03}, + {value: 0x3c68, lo: 0xa0, hi: 0xa1}, + {value: 0x3c92, lo: 0xa2, hi: 0xa3}, + {value: 0x3cbc, lo: 0xaa, hi: 0xad}, + // Block 0x44, offset 0x1b2 + {value: 0x0004, lo: 0x01}, + {value: 0x048b, lo: 0xa9, hi: 0xaa}, + // Block 0x45, offset 0x1b4 + {value: 0x0002, lo: 0x03}, + {value: 0x0057, lo: 0x80, hi: 0x8f}, + {value: 0x0083, lo: 0x90, hi: 0xa9}, + {value: 0x0021, lo: 0xaa, hi: 0xaa}, + // Block 0x46, offset 0x1b8 + {value: 0x0000, lo: 0x01}, + {value: 0x299b, lo: 0x8c, hi: 0x8c}, + // Block 0x47, offset 0x1ba + {value: 0x0263, lo: 0x02}, + {value: 0x1b8c, lo: 0xb4, hi: 0xb4}, + {value: 0x192d, lo: 0xb5, hi: 0xb6}, + // Block 0x48, offset 0x1bd + {value: 0x0000, lo: 0x01}, + {value: 0x44dd, lo: 0x9c, hi: 0x9c}, + // Block 0x49, offset 0x1bf + {value: 0x0000, lo: 0x02}, + {value: 0x0095, lo: 0xbc, hi: 0xbc}, + {value: 0x006d, lo: 0xbd, hi: 0xbd}, + // Block 0x4a, offset 0x1c2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xaf, hi: 0xb1}, + // Block 0x4b, offset 0x1c4 + {value: 0x0000, lo: 0x02}, + {value: 0x047f, lo: 0xaf, hi: 0xaf}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x4c, offset 0x1c7 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa0, hi: 0xbf}, + // Block 0x4d, offset 0x1c9 + {value: 0x0000, lo: 0x01}, + {value: 0x0dc3, lo: 0x9f, hi: 0x9f}, + // Block 0x4e, offset 0x1cb + {value: 0x0000, lo: 0x01}, + {value: 0x162f, lo: 0xb3, hi: 0xb3}, + // Block 0x4f, offset 0x1cd + {value: 0x0004, lo: 0x0b}, + {value: 0x1597, lo: 0x80, hi: 0x82}, + {value: 0x15af, lo: 0x83, hi: 0x83}, + {value: 0x15c7, lo: 0x84, hi: 0x85}, + {value: 0x15d7, lo: 0x86, hi: 0x89}, + {value: 0x15eb, lo: 0x8a, hi: 0x8c}, + {value: 0x15ff, lo: 0x8d, hi: 0x8d}, + {value: 0x1607, lo: 0x8e, hi: 0x8e}, + {value: 0x160f, lo: 0x8f, hi: 0x90}, + {value: 0x161b, lo: 0x91, hi: 0x93}, + {value: 0x162b, lo: 0x94, hi: 0x94}, + {value: 0x1633, lo: 0x95, hi: 0x95}, + // Block 0x50, offset 0x1d9 + {value: 0x0004, lo: 0x09}, + {value: 0x0001, lo: 0x80, hi: 0x80}, + {value: 0x812c, lo: 0xaa, hi: 0xaa}, + {value: 0x8131, lo: 0xab, hi: 0xab}, + {value: 0x8133, lo: 0xac, hi: 0xac}, + {value: 0x812e, lo: 0xad, hi: 0xad}, + {value: 0x812f, lo: 0xae, hi: 0xae}, + {value: 0x812f, lo: 0xaf, hi: 0xaf}, + {value: 0x04b3, lo: 0xb6, hi: 0xb6}, + {value: 0x0887, lo: 0xb8, hi: 0xba}, + // Block 0x51, offset 0x1e3 + {value: 0x0006, lo: 0x09}, + {value: 0x0313, lo: 0xb1, hi: 0xb1}, + {value: 0x0317, lo: 0xb2, hi: 0xb2}, + {value: 0x4a3b, lo: 0xb3, hi: 0xb3}, + {value: 0x031b, lo: 0xb4, hi: 0xb4}, + {value: 0x4a41, lo: 0xb5, hi: 0xb6}, + {value: 0x031f, lo: 0xb7, hi: 0xb7}, + {value: 0x0323, lo: 0xb8, hi: 0xb8}, + {value: 0x0327, lo: 0xb9, hi: 0xb9}, + {value: 0x4a4d, lo: 0xba, hi: 0xbf}, + // Block 0x52, offset 0x1ed + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xaf, hi: 0xaf}, + {value: 0x8132, lo: 0xb4, hi: 0xbd}, + // Block 0x53, offset 0x1f0 + {value: 0x0000, lo: 0x03}, + {value: 0x020f, lo: 0x9c, hi: 0x9c}, + {value: 0x0212, lo: 0x9d, hi: 0x9d}, + {value: 0x8132, lo: 0x9e, hi: 0x9f}, + // Block 0x54, offset 0x1f4 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb1}, + // Block 0x55, offset 0x1f6 + {value: 0x0000, lo: 0x01}, + {value: 0x163b, lo: 0xb0, hi: 0xb0}, + // Block 0x56, offset 0x1f8 + {value: 0x000c, lo: 0x01}, + {value: 0x00d7, lo: 0xb8, hi: 0xb9}, + // Block 0x57, offset 0x1fa + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + // Block 0x58, offset 0x1fc + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x84, hi: 0x84}, + {value: 0x8132, lo: 0xa0, hi: 0xb1}, + // Block 0x59, offset 0x1ff + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xab, hi: 0xad}, + // Block 0x5a, offset 0x201 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x93, hi: 0x93}, + // Block 0x5b, offset 0x203 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0xb3, hi: 0xb3}, + // Block 0x5c, offset 0x205 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + // Block 0x5d, offset 0x207 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0xb0, hi: 0xb0}, + {value: 0x8132, lo: 0xb2, hi: 0xb3}, + {value: 0x812d, lo: 0xb4, hi: 0xb4}, + {value: 0x8132, lo: 0xb7, hi: 0xb8}, + {value: 0x8132, lo: 0xbe, hi: 0xbf}, + // Block 0x5e, offset 0x20d + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x81, hi: 0x81}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + // Block 0x5f, offset 0x210 + {value: 0x0008, lo: 0x03}, + {value: 0x1637, lo: 0x9c, hi: 0x9d}, + {value: 0x0125, lo: 0x9e, hi: 0x9e}, + {value: 0x1643, lo: 0x9f, hi: 0x9f}, + // Block 0x60, offset 0x214 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xad, hi: 0xad}, + // Block 0x61, offset 0x216 + {value: 0x0000, lo: 0x06}, + {value: 0xe500, lo: 0x80, hi: 0x80}, + {value: 0xc600, lo: 0x81, hi: 0x9b}, + {value: 0xe500, lo: 0x9c, hi: 0x9c}, + {value: 0xc600, lo: 0x9d, hi: 0xb7}, + {value: 0xe500, lo: 0xb8, hi: 0xb8}, + {value: 0xc600, lo: 0xb9, hi: 0xbf}, + // Block 0x62, offset 0x21d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x93}, + {value: 0xe500, lo: 0x94, hi: 0x94}, + {value: 0xc600, lo: 0x95, hi: 0xaf}, + {value: 0xe500, lo: 0xb0, hi: 0xb0}, + {value: 0xc600, lo: 0xb1, hi: 0xbf}, + // Block 0x63, offset 0x223 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8b}, + {value: 0xe500, lo: 0x8c, hi: 0x8c}, + {value: 0xc600, lo: 0x8d, hi: 0xa7}, + {value: 0xe500, lo: 0xa8, hi: 0xa8}, + {value: 0xc600, lo: 0xa9, hi: 0xbf}, + // Block 0x64, offset 0x229 + {value: 0x0000, lo: 0x07}, + {value: 0xc600, lo: 0x80, hi: 0x83}, + {value: 0xe500, lo: 0x84, hi: 0x84}, + {value: 0xc600, lo: 0x85, hi: 0x9f}, + {value: 0xe500, lo: 0xa0, hi: 0xa0}, + {value: 0xc600, lo: 0xa1, hi: 0xbb}, + {value: 0xe500, lo: 0xbc, hi: 0xbc}, + {value: 0xc600, lo: 0xbd, hi: 0xbf}, + // Block 0x65, offset 0x231 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x97}, + {value: 0xe500, lo: 0x98, hi: 0x98}, + {value: 0xc600, lo: 0x99, hi: 0xb3}, + {value: 0xe500, lo: 0xb4, hi: 0xb4}, + {value: 0xc600, lo: 0xb5, hi: 0xbf}, + // Block 0x66, offset 0x237 + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x8f}, + {value: 0xe500, lo: 0x90, hi: 0x90}, + {value: 0xc600, lo: 0x91, hi: 0xab}, + {value: 0xe500, lo: 0xac, hi: 0xac}, + {value: 0xc600, lo: 0xad, hi: 0xbf}, + // Block 0x67, offset 0x23d + {value: 0x0000, lo: 0x05}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + {value: 0xe500, lo: 0xa4, hi: 0xa4}, + {value: 0xc600, lo: 0xa5, hi: 0xbf}, + // Block 0x68, offset 0x243 + {value: 0x0000, lo: 0x03}, + {value: 0xc600, lo: 0x80, hi: 0x87}, + {value: 0xe500, lo: 0x88, hi: 0x88}, + {value: 0xc600, lo: 0x89, hi: 0xa3}, + // Block 0x69, offset 0x247 + {value: 0x0002, lo: 0x01}, + {value: 0x0003, lo: 0x81, hi: 0xbf}, + // Block 0x6a, offset 0x249 + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xbd, hi: 0xbd}, + // Block 0x6b, offset 0x24b + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0xa0, hi: 0xa0}, + // Block 0x6c, offset 0x24d + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb6, hi: 0xba}, + // Block 0x6d, offset 0x24f + {value: 0x002c, lo: 0x05}, + {value: 0x812d, lo: 0x8d, hi: 0x8d}, + {value: 0x8132, lo: 0x8f, hi: 0x8f}, + {value: 0x8132, lo: 0xb8, hi: 0xb8}, + {value: 0x8101, lo: 0xb9, hi: 0xba}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x6e, offset 0x255 + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0xa5, hi: 0xa5}, + {value: 0x812d, lo: 0xa6, hi: 0xa6}, + // Block 0x6f, offset 0x258 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xa4, hi: 0xa7}, + // Block 0x70, offset 0x25a + {value: 0x0000, lo: 0x05}, + {value: 0x812d, lo: 0x86, hi: 0x87}, + {value: 0x8132, lo: 0x88, hi: 0x8a}, + {value: 0x812d, lo: 0x8b, hi: 0x8b}, + {value: 0x8132, lo: 0x8c, hi: 0x8c}, + {value: 0x812d, lo: 0x8d, hi: 0x90}, + // Block 0x71, offset 0x260 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x86, hi: 0x86}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x72, offset 0x263 + {value: 0x17fe, lo: 0x07}, + {value: 0xa000, lo: 0x99, hi: 0x99}, + {value: 0x4238, lo: 0x9a, hi: 0x9a}, + {value: 0xa000, lo: 0x9b, hi: 0x9b}, + {value: 0x4242, lo: 0x9c, hi: 0x9c}, + {value: 0xa000, lo: 0xa5, hi: 0xa5}, + {value: 0x424c, lo: 0xab, hi: 0xab}, + {value: 0x8104, lo: 0xb9, hi: 0xba}, + // Block 0x73, offset 0x26b + {value: 0x0000, lo: 0x06}, + {value: 0x8132, lo: 0x80, hi: 0x82}, + {value: 0x9900, lo: 0xa7, hi: 0xa7}, + {value: 0x2d7e, lo: 0xae, hi: 0xae}, + {value: 0x2d88, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb1, hi: 0xb2}, + {value: 0x8104, lo: 0xb3, hi: 0xb4}, + // Block 0x74, offset 0x272 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x80, hi: 0x80}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x75, offset 0x275 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb5, hi: 0xb5}, + {value: 0x8102, lo: 0xb6, hi: 0xb6}, + // Block 0x76, offset 0x278 + {value: 0x0002, lo: 0x01}, + {value: 0x8102, lo: 0xa9, hi: 0xaa}, + // Block 0x77, offset 0x27a + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0xbb, hi: 0xbc}, + {value: 0x9900, lo: 0xbe, hi: 0xbe}, + // Block 0x78, offset 0x27d + {value: 0x0000, lo: 0x07}, + {value: 0xa000, lo: 0x87, hi: 0x87}, + {value: 0x2d92, lo: 0x8b, hi: 0x8b}, + {value: 0x2d9c, lo: 0x8c, hi: 0x8c}, + {value: 0x8104, lo: 0x8d, hi: 0x8d}, + {value: 0x9900, lo: 0x97, hi: 0x97}, + {value: 0x8132, lo: 0xa6, hi: 0xac}, + {value: 0x8132, lo: 0xb0, hi: 0xb4}, + // Block 0x79, offset 0x285 + {value: 0x0000, lo: 0x03}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x86, hi: 0x86}, + {value: 0x8132, lo: 0x9e, hi: 0x9e}, + // Block 0x7a, offset 0x289 + {value: 0x6b5a, lo: 0x06}, + {value: 0x9900, lo: 0xb0, hi: 0xb0}, + {value: 0xa000, lo: 0xb9, hi: 0xb9}, + {value: 0x9900, lo: 0xba, hi: 0xba}, + {value: 0x2db0, lo: 0xbb, hi: 0xbb}, + {value: 0x2da6, lo: 0xbc, hi: 0xbd}, + {value: 0x2dba, lo: 0xbe, hi: 0xbe}, + // Block 0x7b, offset 0x290 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0x82, hi: 0x82}, + {value: 0x8102, lo: 0x83, hi: 0x83}, + // Block 0x7c, offset 0x293 + {value: 0x0000, lo: 0x05}, + {value: 0x9900, lo: 0xaf, hi: 0xaf}, + {value: 0xa000, lo: 0xb8, hi: 0xb9}, + {value: 0x2dc4, lo: 0xba, hi: 0xba}, + {value: 0x2dce, lo: 0xbb, hi: 0xbb}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7d, offset 0x299 + {value: 0x0000, lo: 0x01}, + {value: 0x8102, lo: 0x80, hi: 0x80}, + // Block 0x7e, offset 0x29b + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xbf, hi: 0xbf}, + // Block 0x7f, offset 0x29d + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb6, hi: 0xb6}, + {value: 0x8102, lo: 0xb7, hi: 0xb7}, + // Block 0x80, offset 0x2a0 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xab, hi: 0xab}, + // Block 0x81, offset 0x2a2 + {value: 0x0000, lo: 0x02}, + {value: 0x8104, lo: 0xb9, hi: 0xb9}, + {value: 0x8102, lo: 0xba, hi: 0xba}, + // Block 0x82, offset 0x2a5 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0xb4, hi: 0xb4}, + // Block 0x83, offset 0x2a7 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x87, hi: 0x87}, + // Block 0x84, offset 0x2a9 + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x99, hi: 0x99}, + // Block 0x85, offset 0x2ab + {value: 0x0000, lo: 0x02}, + {value: 0x8102, lo: 0x82, hi: 0x82}, + {value: 0x8104, lo: 0x84, hi: 0x85}, + // Block 0x86, offset 0x2ae + {value: 0x0000, lo: 0x01}, + {value: 0x8104, lo: 0x97, hi: 0x97}, + // Block 0x87, offset 0x2b0 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0xb0, hi: 0xb4}, + // Block 0x88, offset 0x2b2 + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0xb0, hi: 0xb6}, + // Block 0x89, offset 0x2b4 + {value: 0x0000, lo: 0x01}, + {value: 0x8101, lo: 0x9e, hi: 0x9e}, + // Block 0x8a, offset 0x2b6 + {value: 0x0000, lo: 0x0c}, + {value: 0x45cc, lo: 0x9e, hi: 0x9e}, + {value: 0x45d6, lo: 0x9f, hi: 0x9f}, + {value: 0x460a, lo: 0xa0, hi: 0xa0}, + {value: 0x4618, lo: 0xa1, hi: 0xa1}, + {value: 0x4626, lo: 0xa2, hi: 0xa2}, + {value: 0x4634, lo: 0xa3, hi: 0xa3}, + {value: 0x4642, lo: 0xa4, hi: 0xa4}, + {value: 0x812b, lo: 0xa5, hi: 0xa6}, + {value: 0x8101, lo: 0xa7, hi: 0xa9}, + {value: 0x8130, lo: 0xad, hi: 0xad}, + {value: 0x812b, lo: 0xae, hi: 0xb2}, + {value: 0x812d, lo: 0xbb, hi: 0xbf}, + // Block 0x8b, offset 0x2c3 + {value: 0x0000, lo: 0x09}, + {value: 0x812d, lo: 0x80, hi: 0x82}, + {value: 0x8132, lo: 0x85, hi: 0x89}, + {value: 0x812d, lo: 0x8a, hi: 0x8b}, + {value: 0x8132, lo: 0xaa, hi: 0xad}, + {value: 0x45e0, lo: 0xbb, hi: 0xbb}, + {value: 0x45ea, lo: 0xbc, hi: 0xbc}, + {value: 0x4650, lo: 0xbd, hi: 0xbd}, + {value: 0x466c, lo: 0xbe, hi: 0xbe}, + {value: 0x465e, lo: 0xbf, hi: 0xbf}, + // Block 0x8c, offset 0x2cd + {value: 0x0000, lo: 0x01}, + {value: 0x467a, lo: 0x80, hi: 0x80}, + // Block 0x8d, offset 0x2cf + {value: 0x0000, lo: 0x01}, + {value: 0x8132, lo: 0x82, hi: 0x84}, + // Block 0x8e, offset 0x2d1 + {value: 0x0002, lo: 0x03}, + {value: 0x0043, lo: 0x80, hi: 0x99}, + {value: 0x0083, lo: 0x9a, hi: 0xb3}, + {value: 0x0043, lo: 0xb4, hi: 0xbf}, + // Block 0x8f, offset 0x2d5 + {value: 0x0002, lo: 0x04}, + {value: 0x005b, lo: 0x80, hi: 0x8d}, + {value: 0x0083, lo: 0x8e, hi: 0x94}, + {value: 0x0093, lo: 0x96, hi: 0xa7}, + {value: 0x0043, lo: 0xa8, hi: 0xbf}, + // Block 0x90, offset 0x2da + {value: 0x0002, lo: 0x0b}, + {value: 0x0073, lo: 0x80, hi: 0x81}, + {value: 0x0083, lo: 0x82, hi: 0x9b}, + {value: 0x0043, lo: 0x9c, hi: 0x9c}, + {value: 0x0047, lo: 0x9e, hi: 0x9f}, + {value: 0x004f, lo: 0xa2, hi: 0xa2}, + {value: 0x0055, lo: 0xa5, hi: 0xa6}, + {value: 0x005d, lo: 0xa9, hi: 0xac}, + {value: 0x0067, lo: 0xae, hi: 0xb5}, + {value: 0x0083, lo: 0xb6, hi: 0xb9}, + {value: 0x008d, lo: 0xbb, hi: 0xbb}, + {value: 0x0091, lo: 0xbd, hi: 0xbf}, + // Block 0x91, offset 0x2e6 + {value: 0x0002, lo: 0x04}, + {value: 0x0097, lo: 0x80, hi: 0x83}, + {value: 0x00a1, lo: 0x85, hi: 0x8f}, + {value: 0x0043, lo: 0x90, hi: 0xa9}, + {value: 0x0083, lo: 0xaa, hi: 0xbf}, + // Block 0x92, offset 0x2eb + {value: 0x0002, lo: 0x08}, + {value: 0x00af, lo: 0x80, hi: 0x83}, + {value: 0x0043, lo: 0x84, hi: 0x85}, + {value: 0x0049, lo: 0x87, hi: 0x8a}, + {value: 0x0055, lo: 0x8d, hi: 0x94}, + {value: 0x0067, lo: 0x96, hi: 0x9c}, + {value: 0x0083, lo: 0x9e, hi: 0xb7}, + {value: 0x0043, lo: 0xb8, hi: 0xb9}, + {value: 0x0049, lo: 0xbb, hi: 0xbe}, + // Block 0x93, offset 0x2f4 + {value: 0x0002, lo: 0x05}, + {value: 0x0053, lo: 0x80, hi: 0x84}, + {value: 0x005f, lo: 0x86, hi: 0x86}, + {value: 0x0067, lo: 0x8a, hi: 0x90}, + {value: 0x0083, lo: 0x92, hi: 0xab}, + {value: 0x0043, lo: 0xac, hi: 0xbf}, + // Block 0x94, offset 0x2fa + {value: 0x0002, lo: 0x04}, + {value: 0x006b, lo: 0x80, hi: 0x85}, + {value: 0x0083, lo: 0x86, hi: 0x9f}, + {value: 0x0043, lo: 0xa0, hi: 0xb9}, + {value: 0x0083, lo: 0xba, hi: 0xbf}, + // Block 0x95, offset 0x2ff + {value: 0x0002, lo: 0x03}, + {value: 0x008f, lo: 0x80, hi: 0x93}, + {value: 0x0043, lo: 0x94, hi: 0xad}, + {value: 0x0083, lo: 0xae, hi: 0xbf}, + // Block 0x96, offset 0x303 + {value: 0x0002, lo: 0x04}, + {value: 0x00a7, lo: 0x80, hi: 0x87}, + {value: 0x0043, lo: 0x88, hi: 0xa1}, + {value: 0x0083, lo: 0xa2, hi: 0xbb}, + {value: 0x0043, lo: 0xbc, hi: 0xbf}, + // Block 0x97, offset 0x308 + {value: 0x0002, lo: 0x03}, + {value: 0x004b, lo: 0x80, hi: 0x95}, + {value: 0x0083, lo: 0x96, hi: 0xaf}, + {value: 0x0043, lo: 0xb0, hi: 0xbf}, + // Block 0x98, offset 0x30c + {value: 0x0003, lo: 0x0f}, + {value: 0x01b8, lo: 0x80, hi: 0x80}, + {value: 0x045f, lo: 0x81, hi: 0x81}, + {value: 0x01bb, lo: 0x82, hi: 0x9a}, + {value: 0x045b, lo: 0x9b, hi: 0x9b}, + {value: 0x01c7, lo: 0x9c, hi: 0x9c}, + {value: 0x01d0, lo: 0x9d, hi: 0x9d}, + {value: 0x01d6, lo: 0x9e, hi: 0x9e}, + {value: 0x01fa, lo: 0x9f, hi: 0x9f}, + {value: 0x01eb, lo: 0xa0, hi: 0xa0}, + {value: 0x01e8, lo: 0xa1, hi: 0xa1}, + {value: 0x0173, lo: 0xa2, hi: 0xb2}, + {value: 0x0188, lo: 0xb3, hi: 0xb3}, + {value: 0x01a6, lo: 0xb4, hi: 0xba}, + {value: 0x045f, lo: 0xbb, hi: 0xbb}, + {value: 0x01bb, lo: 0xbc, hi: 0xbf}, + // Block 0x99, offset 0x31c + {value: 0x0003, lo: 0x0d}, + {value: 0x01c7, lo: 0x80, hi: 0x94}, + {value: 0x045b, lo: 0x95, hi: 0x95}, + {value: 0x01c7, lo: 0x96, hi: 0x96}, + {value: 0x01d0, lo: 0x97, hi: 0x97}, + {value: 0x01d6, lo: 0x98, hi: 0x98}, + {value: 0x01fa, lo: 0x99, hi: 0x99}, + {value: 0x01eb, lo: 0x9a, hi: 0x9a}, + {value: 0x01e8, lo: 0x9b, hi: 0x9b}, + {value: 0x0173, lo: 0x9c, hi: 0xac}, + {value: 0x0188, lo: 0xad, hi: 0xad}, + {value: 0x01a6, lo: 0xae, hi: 0xb4}, + {value: 0x045f, lo: 0xb5, hi: 0xb5}, + {value: 0x01bb, lo: 0xb6, hi: 0xbf}, + // Block 0x9a, offset 0x32a + {value: 0x0003, lo: 0x0d}, + {value: 0x01d9, lo: 0x80, hi: 0x8e}, + {value: 0x045b, lo: 0x8f, hi: 0x8f}, + {value: 0x01c7, lo: 0x90, hi: 0x90}, + {value: 0x01d0, lo: 0x91, hi: 0x91}, + {value: 0x01d6, lo: 0x92, hi: 0x92}, + {value: 0x01fa, lo: 0x93, hi: 0x93}, + {value: 0x01eb, lo: 0x94, hi: 0x94}, + {value: 0x01e8, lo: 0x95, hi: 0x95}, + {value: 0x0173, lo: 0x96, hi: 0xa6}, + {value: 0x0188, lo: 0xa7, hi: 0xa7}, + {value: 0x01a6, lo: 0xa8, hi: 0xae}, + {value: 0x045f, lo: 0xaf, hi: 0xaf}, + {value: 0x01bb, lo: 0xb0, hi: 0xbf}, + // Block 0x9b, offset 0x338 + {value: 0x0003, lo: 0x0d}, + {value: 0x01eb, lo: 0x80, hi: 0x88}, + {value: 0x045b, lo: 0x89, hi: 0x89}, + {value: 0x01c7, lo: 0x8a, hi: 0x8a}, + {value: 0x01d0, lo: 0x8b, hi: 0x8b}, + {value: 0x01d6, lo: 0x8c, hi: 0x8c}, + {value: 0x01fa, lo: 0x8d, hi: 0x8d}, + {value: 0x01eb, lo: 0x8e, hi: 0x8e}, + {value: 0x01e8, lo: 0x8f, hi: 0x8f}, + {value: 0x0173, lo: 0x90, hi: 0xa0}, + {value: 0x0188, lo: 0xa1, hi: 0xa1}, + {value: 0x01a6, lo: 0xa2, hi: 0xa8}, + {value: 0x045f, lo: 0xa9, hi: 0xa9}, + {value: 0x01bb, lo: 0xaa, hi: 0xbf}, + // Block 0x9c, offset 0x346 + {value: 0x0000, lo: 0x05}, + {value: 0x8132, lo: 0x80, hi: 0x86}, + {value: 0x8132, lo: 0x88, hi: 0x98}, + {value: 0x8132, lo: 0x9b, hi: 0xa1}, + {value: 0x8132, lo: 0xa3, hi: 0xa4}, + {value: 0x8132, lo: 0xa6, hi: 0xaa}, + // Block 0x9d, offset 0x34c + {value: 0x0000, lo: 0x01}, + {value: 0x812d, lo: 0x90, hi: 0x96}, + // Block 0x9e, offset 0x34e + {value: 0x0000, lo: 0x02}, + {value: 0x8132, lo: 0x84, hi: 0x89}, + {value: 0x8102, lo: 0x8a, hi: 0x8a}, + // Block 0x9f, offset 0x351 + {value: 0x0002, lo: 0x09}, + {value: 0x0063, lo: 0x80, hi: 0x89}, + {value: 0x1951, lo: 0x8a, hi: 0x8a}, + {value: 0x1981, lo: 0x8b, hi: 0x8b}, + {value: 0x199c, lo: 0x8c, hi: 0x8c}, + {value: 0x19a2, lo: 0x8d, hi: 0x8d}, + {value: 0x1bc0, lo: 0x8e, hi: 0x8e}, + {value: 0x19ae, lo: 0x8f, hi: 0x8f}, + {value: 0x197b, lo: 0xaa, hi: 0xaa}, + {value: 0x197e, lo: 0xab, hi: 0xab}, + // Block 0xa0, offset 0x35b + {value: 0x0000, lo: 0x01}, + {value: 0x193f, lo: 0x90, hi: 0x90}, + // Block 0xa1, offset 0x35d + {value: 0x0028, lo: 0x09}, + {value: 0x2862, lo: 0x80, hi: 0x80}, + {value: 0x2826, lo: 0x81, hi: 0x81}, + {value: 0x2830, lo: 0x82, hi: 0x82}, + {value: 0x2844, lo: 0x83, hi: 0x84}, + {value: 0x284e, lo: 0x85, hi: 0x86}, + {value: 0x283a, lo: 0x87, hi: 0x87}, + {value: 0x2858, lo: 0x88, hi: 0x88}, + {value: 0x0b6f, lo: 0x90, hi: 0x90}, + {value: 0x08e7, lo: 0x91, hi: 0x91}, +} + +// recompMap: 7520 bytes (entries only) +var recompMap map[uint32]rune +var recompMapOnce sync.Once + +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54514 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go index a01274a..9429069 100644 --- a/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go +++ b/vendor/golang.org/x/text/unicode/norm/tables9.0.0.go @@ -4,6 +4,8 @@ package norm +import "sync" + const ( // Version is the Unicode edition from which the tables are derived. Version = "9.0.0" @@ -6687,947 +6689,949 @@ var nfkcSparseValues = [875]valueRange{ } // recompMap: 7520 bytes (entries only) -var recompMap = map[uint32]rune{ - 0x00410300: 0x00C0, - 0x00410301: 0x00C1, - 0x00410302: 0x00C2, - 0x00410303: 0x00C3, - 0x00410308: 0x00C4, - 0x0041030A: 0x00C5, - 0x00430327: 0x00C7, - 0x00450300: 0x00C8, - 0x00450301: 0x00C9, - 0x00450302: 0x00CA, - 0x00450308: 0x00CB, - 0x00490300: 0x00CC, - 0x00490301: 0x00CD, - 0x00490302: 0x00CE, - 0x00490308: 0x00CF, - 0x004E0303: 0x00D1, - 0x004F0300: 0x00D2, - 0x004F0301: 0x00D3, - 0x004F0302: 0x00D4, - 0x004F0303: 0x00D5, - 0x004F0308: 0x00D6, - 0x00550300: 0x00D9, - 0x00550301: 0x00DA, - 0x00550302: 0x00DB, - 0x00550308: 0x00DC, - 0x00590301: 0x00DD, - 0x00610300: 0x00E0, - 0x00610301: 0x00E1, - 0x00610302: 0x00E2, - 0x00610303: 0x00E3, - 0x00610308: 0x00E4, - 0x0061030A: 0x00E5, - 0x00630327: 0x00E7, - 0x00650300: 0x00E8, - 0x00650301: 0x00E9, - 0x00650302: 0x00EA, - 0x00650308: 0x00EB, - 0x00690300: 0x00EC, - 0x00690301: 0x00ED, - 0x00690302: 0x00EE, - 0x00690308: 0x00EF, - 0x006E0303: 0x00F1, - 0x006F0300: 0x00F2, - 0x006F0301: 0x00F3, - 0x006F0302: 0x00F4, - 0x006F0303: 0x00F5, - 0x006F0308: 0x00F6, - 0x00750300: 0x00F9, - 0x00750301: 0x00FA, - 0x00750302: 0x00FB, - 0x00750308: 0x00FC, - 0x00790301: 0x00FD, - 0x00790308: 0x00FF, - 0x00410304: 0x0100, - 0x00610304: 0x0101, - 0x00410306: 0x0102, - 0x00610306: 0x0103, - 0x00410328: 0x0104, - 0x00610328: 0x0105, - 0x00430301: 0x0106, - 0x00630301: 0x0107, - 0x00430302: 0x0108, - 0x00630302: 0x0109, - 0x00430307: 0x010A, - 0x00630307: 0x010B, - 0x0043030C: 0x010C, - 0x0063030C: 0x010D, - 0x0044030C: 0x010E, - 0x0064030C: 0x010F, - 0x00450304: 0x0112, - 0x00650304: 0x0113, - 0x00450306: 0x0114, - 0x00650306: 0x0115, - 0x00450307: 0x0116, - 0x00650307: 0x0117, - 0x00450328: 0x0118, - 0x00650328: 0x0119, - 0x0045030C: 0x011A, - 0x0065030C: 0x011B, - 0x00470302: 0x011C, - 0x00670302: 0x011D, - 0x00470306: 0x011E, - 0x00670306: 0x011F, - 0x00470307: 0x0120, - 0x00670307: 0x0121, - 0x00470327: 0x0122, - 0x00670327: 0x0123, - 0x00480302: 0x0124, - 0x00680302: 0x0125, - 0x00490303: 0x0128, - 0x00690303: 0x0129, - 0x00490304: 0x012A, - 0x00690304: 0x012B, - 0x00490306: 0x012C, - 0x00690306: 0x012D, - 0x00490328: 0x012E, - 0x00690328: 0x012F, - 0x00490307: 0x0130, - 0x004A0302: 0x0134, - 0x006A0302: 0x0135, - 0x004B0327: 0x0136, - 0x006B0327: 0x0137, - 0x004C0301: 0x0139, - 0x006C0301: 0x013A, - 0x004C0327: 0x013B, - 0x006C0327: 0x013C, - 0x004C030C: 0x013D, - 0x006C030C: 0x013E, - 0x004E0301: 0x0143, - 0x006E0301: 0x0144, - 0x004E0327: 0x0145, - 0x006E0327: 0x0146, - 0x004E030C: 0x0147, - 0x006E030C: 0x0148, - 0x004F0304: 0x014C, - 0x006F0304: 0x014D, - 0x004F0306: 0x014E, - 0x006F0306: 0x014F, - 0x004F030B: 0x0150, - 0x006F030B: 0x0151, - 0x00520301: 0x0154, - 0x00720301: 0x0155, - 0x00520327: 0x0156, - 0x00720327: 0x0157, - 0x0052030C: 0x0158, - 0x0072030C: 0x0159, - 0x00530301: 0x015A, - 0x00730301: 0x015B, - 0x00530302: 0x015C, - 0x00730302: 0x015D, - 0x00530327: 0x015E, - 0x00730327: 0x015F, - 0x0053030C: 0x0160, - 0x0073030C: 0x0161, - 0x00540327: 0x0162, - 0x00740327: 0x0163, - 0x0054030C: 0x0164, - 0x0074030C: 0x0165, - 0x00550303: 0x0168, - 0x00750303: 0x0169, - 0x00550304: 0x016A, - 0x00750304: 0x016B, - 0x00550306: 0x016C, - 0x00750306: 0x016D, - 0x0055030A: 0x016E, - 0x0075030A: 0x016F, - 0x0055030B: 0x0170, - 0x0075030B: 0x0171, - 0x00550328: 0x0172, - 0x00750328: 0x0173, - 0x00570302: 0x0174, - 0x00770302: 0x0175, - 0x00590302: 0x0176, - 0x00790302: 0x0177, - 0x00590308: 0x0178, - 0x005A0301: 0x0179, - 0x007A0301: 0x017A, - 0x005A0307: 0x017B, - 0x007A0307: 0x017C, - 0x005A030C: 0x017D, - 0x007A030C: 0x017E, - 0x004F031B: 0x01A0, - 0x006F031B: 0x01A1, - 0x0055031B: 0x01AF, - 0x0075031B: 0x01B0, - 0x0041030C: 0x01CD, - 0x0061030C: 0x01CE, - 0x0049030C: 0x01CF, - 0x0069030C: 0x01D0, - 0x004F030C: 0x01D1, - 0x006F030C: 0x01D2, - 0x0055030C: 0x01D3, - 0x0075030C: 0x01D4, - 0x00DC0304: 0x01D5, - 0x00FC0304: 0x01D6, - 0x00DC0301: 0x01D7, - 0x00FC0301: 0x01D8, - 0x00DC030C: 0x01D9, - 0x00FC030C: 0x01DA, - 0x00DC0300: 0x01DB, - 0x00FC0300: 0x01DC, - 0x00C40304: 0x01DE, - 0x00E40304: 0x01DF, - 0x02260304: 0x01E0, - 0x02270304: 0x01E1, - 0x00C60304: 0x01E2, - 0x00E60304: 0x01E3, - 0x0047030C: 0x01E6, - 0x0067030C: 0x01E7, - 0x004B030C: 0x01E8, - 0x006B030C: 0x01E9, - 0x004F0328: 0x01EA, - 0x006F0328: 0x01EB, - 0x01EA0304: 0x01EC, - 0x01EB0304: 0x01ED, - 0x01B7030C: 0x01EE, - 0x0292030C: 0x01EF, - 0x006A030C: 0x01F0, - 0x00470301: 0x01F4, - 0x00670301: 0x01F5, - 0x004E0300: 0x01F8, - 0x006E0300: 0x01F9, - 0x00C50301: 0x01FA, - 0x00E50301: 0x01FB, - 0x00C60301: 0x01FC, - 0x00E60301: 0x01FD, - 0x00D80301: 0x01FE, - 0x00F80301: 0x01FF, - 0x0041030F: 0x0200, - 0x0061030F: 0x0201, - 0x00410311: 0x0202, - 0x00610311: 0x0203, - 0x0045030F: 0x0204, - 0x0065030F: 0x0205, - 0x00450311: 0x0206, - 0x00650311: 0x0207, - 0x0049030F: 0x0208, - 0x0069030F: 0x0209, - 0x00490311: 0x020A, - 0x00690311: 0x020B, - 0x004F030F: 0x020C, - 0x006F030F: 0x020D, - 0x004F0311: 0x020E, - 0x006F0311: 0x020F, - 0x0052030F: 0x0210, - 0x0072030F: 0x0211, - 0x00520311: 0x0212, - 0x00720311: 0x0213, - 0x0055030F: 0x0214, - 0x0075030F: 0x0215, - 0x00550311: 0x0216, - 0x00750311: 0x0217, - 0x00530326: 0x0218, - 0x00730326: 0x0219, - 0x00540326: 0x021A, - 0x00740326: 0x021B, - 0x0048030C: 0x021E, - 0x0068030C: 0x021F, - 0x00410307: 0x0226, - 0x00610307: 0x0227, - 0x00450327: 0x0228, - 0x00650327: 0x0229, - 0x00D60304: 0x022A, - 0x00F60304: 0x022B, - 0x00D50304: 0x022C, - 0x00F50304: 0x022D, - 0x004F0307: 0x022E, - 0x006F0307: 0x022F, - 0x022E0304: 0x0230, - 0x022F0304: 0x0231, - 0x00590304: 0x0232, - 0x00790304: 0x0233, - 0x00A80301: 0x0385, - 0x03910301: 0x0386, - 0x03950301: 0x0388, - 0x03970301: 0x0389, - 0x03990301: 0x038A, - 0x039F0301: 0x038C, - 0x03A50301: 0x038E, - 0x03A90301: 0x038F, - 0x03CA0301: 0x0390, - 0x03990308: 0x03AA, - 0x03A50308: 0x03AB, - 0x03B10301: 0x03AC, - 0x03B50301: 0x03AD, - 0x03B70301: 0x03AE, - 0x03B90301: 0x03AF, - 0x03CB0301: 0x03B0, - 0x03B90308: 0x03CA, - 0x03C50308: 0x03CB, - 0x03BF0301: 0x03CC, - 0x03C50301: 0x03CD, - 0x03C90301: 0x03CE, - 0x03D20301: 0x03D3, - 0x03D20308: 0x03D4, - 0x04150300: 0x0400, - 0x04150308: 0x0401, - 0x04130301: 0x0403, - 0x04060308: 0x0407, - 0x041A0301: 0x040C, - 0x04180300: 0x040D, - 0x04230306: 0x040E, - 0x04180306: 0x0419, - 0x04380306: 0x0439, - 0x04350300: 0x0450, - 0x04350308: 0x0451, - 0x04330301: 0x0453, - 0x04560308: 0x0457, - 0x043A0301: 0x045C, - 0x04380300: 0x045D, - 0x04430306: 0x045E, - 0x0474030F: 0x0476, - 0x0475030F: 0x0477, - 0x04160306: 0x04C1, - 0x04360306: 0x04C2, - 0x04100306: 0x04D0, - 0x04300306: 0x04D1, - 0x04100308: 0x04D2, - 0x04300308: 0x04D3, - 0x04150306: 0x04D6, - 0x04350306: 0x04D7, - 0x04D80308: 0x04DA, - 0x04D90308: 0x04DB, - 0x04160308: 0x04DC, - 0x04360308: 0x04DD, - 0x04170308: 0x04DE, - 0x04370308: 0x04DF, - 0x04180304: 0x04E2, - 0x04380304: 0x04E3, - 0x04180308: 0x04E4, - 0x04380308: 0x04E5, - 0x041E0308: 0x04E6, - 0x043E0308: 0x04E7, - 0x04E80308: 0x04EA, - 0x04E90308: 0x04EB, - 0x042D0308: 0x04EC, - 0x044D0308: 0x04ED, - 0x04230304: 0x04EE, - 0x04430304: 0x04EF, - 0x04230308: 0x04F0, - 0x04430308: 0x04F1, - 0x0423030B: 0x04F2, - 0x0443030B: 0x04F3, - 0x04270308: 0x04F4, - 0x04470308: 0x04F5, - 0x042B0308: 0x04F8, - 0x044B0308: 0x04F9, - 0x06270653: 0x0622, - 0x06270654: 0x0623, - 0x06480654: 0x0624, - 0x06270655: 0x0625, - 0x064A0654: 0x0626, - 0x06D50654: 0x06C0, - 0x06C10654: 0x06C2, - 0x06D20654: 0x06D3, - 0x0928093C: 0x0929, - 0x0930093C: 0x0931, - 0x0933093C: 0x0934, - 0x09C709BE: 0x09CB, - 0x09C709D7: 0x09CC, - 0x0B470B56: 0x0B48, - 0x0B470B3E: 0x0B4B, - 0x0B470B57: 0x0B4C, - 0x0B920BD7: 0x0B94, - 0x0BC60BBE: 0x0BCA, - 0x0BC70BBE: 0x0BCB, - 0x0BC60BD7: 0x0BCC, - 0x0C460C56: 0x0C48, - 0x0CBF0CD5: 0x0CC0, - 0x0CC60CD5: 0x0CC7, - 0x0CC60CD6: 0x0CC8, - 0x0CC60CC2: 0x0CCA, - 0x0CCA0CD5: 0x0CCB, - 0x0D460D3E: 0x0D4A, - 0x0D470D3E: 0x0D4B, - 0x0D460D57: 0x0D4C, - 0x0DD90DCA: 0x0DDA, - 0x0DD90DCF: 0x0DDC, - 0x0DDC0DCA: 0x0DDD, - 0x0DD90DDF: 0x0DDE, - 0x1025102E: 0x1026, - 0x1B051B35: 0x1B06, - 0x1B071B35: 0x1B08, - 0x1B091B35: 0x1B0A, - 0x1B0B1B35: 0x1B0C, - 0x1B0D1B35: 0x1B0E, - 0x1B111B35: 0x1B12, - 0x1B3A1B35: 0x1B3B, - 0x1B3C1B35: 0x1B3D, - 0x1B3E1B35: 0x1B40, - 0x1B3F1B35: 0x1B41, - 0x1B421B35: 0x1B43, - 0x00410325: 0x1E00, - 0x00610325: 0x1E01, - 0x00420307: 0x1E02, - 0x00620307: 0x1E03, - 0x00420323: 0x1E04, - 0x00620323: 0x1E05, - 0x00420331: 0x1E06, - 0x00620331: 0x1E07, - 0x00C70301: 0x1E08, - 0x00E70301: 0x1E09, - 0x00440307: 0x1E0A, - 0x00640307: 0x1E0B, - 0x00440323: 0x1E0C, - 0x00640323: 0x1E0D, - 0x00440331: 0x1E0E, - 0x00640331: 0x1E0F, - 0x00440327: 0x1E10, - 0x00640327: 0x1E11, - 0x0044032D: 0x1E12, - 0x0064032D: 0x1E13, - 0x01120300: 0x1E14, - 0x01130300: 0x1E15, - 0x01120301: 0x1E16, - 0x01130301: 0x1E17, - 0x0045032D: 0x1E18, - 0x0065032D: 0x1E19, - 0x00450330: 0x1E1A, - 0x00650330: 0x1E1B, - 0x02280306: 0x1E1C, - 0x02290306: 0x1E1D, - 0x00460307: 0x1E1E, - 0x00660307: 0x1E1F, - 0x00470304: 0x1E20, - 0x00670304: 0x1E21, - 0x00480307: 0x1E22, - 0x00680307: 0x1E23, - 0x00480323: 0x1E24, - 0x00680323: 0x1E25, - 0x00480308: 0x1E26, - 0x00680308: 0x1E27, - 0x00480327: 0x1E28, - 0x00680327: 0x1E29, - 0x0048032E: 0x1E2A, - 0x0068032E: 0x1E2B, - 0x00490330: 0x1E2C, - 0x00690330: 0x1E2D, - 0x00CF0301: 0x1E2E, - 0x00EF0301: 0x1E2F, - 0x004B0301: 0x1E30, - 0x006B0301: 0x1E31, - 0x004B0323: 0x1E32, - 0x006B0323: 0x1E33, - 0x004B0331: 0x1E34, - 0x006B0331: 0x1E35, - 0x004C0323: 0x1E36, - 0x006C0323: 0x1E37, - 0x1E360304: 0x1E38, - 0x1E370304: 0x1E39, - 0x004C0331: 0x1E3A, - 0x006C0331: 0x1E3B, - 0x004C032D: 0x1E3C, - 0x006C032D: 0x1E3D, - 0x004D0301: 0x1E3E, - 0x006D0301: 0x1E3F, - 0x004D0307: 0x1E40, - 0x006D0307: 0x1E41, - 0x004D0323: 0x1E42, - 0x006D0323: 0x1E43, - 0x004E0307: 0x1E44, - 0x006E0307: 0x1E45, - 0x004E0323: 0x1E46, - 0x006E0323: 0x1E47, - 0x004E0331: 0x1E48, - 0x006E0331: 0x1E49, - 0x004E032D: 0x1E4A, - 0x006E032D: 0x1E4B, - 0x00D50301: 0x1E4C, - 0x00F50301: 0x1E4D, - 0x00D50308: 0x1E4E, - 0x00F50308: 0x1E4F, - 0x014C0300: 0x1E50, - 0x014D0300: 0x1E51, - 0x014C0301: 0x1E52, - 0x014D0301: 0x1E53, - 0x00500301: 0x1E54, - 0x00700301: 0x1E55, - 0x00500307: 0x1E56, - 0x00700307: 0x1E57, - 0x00520307: 0x1E58, - 0x00720307: 0x1E59, - 0x00520323: 0x1E5A, - 0x00720323: 0x1E5B, - 0x1E5A0304: 0x1E5C, - 0x1E5B0304: 0x1E5D, - 0x00520331: 0x1E5E, - 0x00720331: 0x1E5F, - 0x00530307: 0x1E60, - 0x00730307: 0x1E61, - 0x00530323: 0x1E62, - 0x00730323: 0x1E63, - 0x015A0307: 0x1E64, - 0x015B0307: 0x1E65, - 0x01600307: 0x1E66, - 0x01610307: 0x1E67, - 0x1E620307: 0x1E68, - 0x1E630307: 0x1E69, - 0x00540307: 0x1E6A, - 0x00740307: 0x1E6B, - 0x00540323: 0x1E6C, - 0x00740323: 0x1E6D, - 0x00540331: 0x1E6E, - 0x00740331: 0x1E6F, - 0x0054032D: 0x1E70, - 0x0074032D: 0x1E71, - 0x00550324: 0x1E72, - 0x00750324: 0x1E73, - 0x00550330: 0x1E74, - 0x00750330: 0x1E75, - 0x0055032D: 0x1E76, - 0x0075032D: 0x1E77, - 0x01680301: 0x1E78, - 0x01690301: 0x1E79, - 0x016A0308: 0x1E7A, - 0x016B0308: 0x1E7B, - 0x00560303: 0x1E7C, - 0x00760303: 0x1E7D, - 0x00560323: 0x1E7E, - 0x00760323: 0x1E7F, - 0x00570300: 0x1E80, - 0x00770300: 0x1E81, - 0x00570301: 0x1E82, - 0x00770301: 0x1E83, - 0x00570308: 0x1E84, - 0x00770308: 0x1E85, - 0x00570307: 0x1E86, - 0x00770307: 0x1E87, - 0x00570323: 0x1E88, - 0x00770323: 0x1E89, - 0x00580307: 0x1E8A, - 0x00780307: 0x1E8B, - 0x00580308: 0x1E8C, - 0x00780308: 0x1E8D, - 0x00590307: 0x1E8E, - 0x00790307: 0x1E8F, - 0x005A0302: 0x1E90, - 0x007A0302: 0x1E91, - 0x005A0323: 0x1E92, - 0x007A0323: 0x1E93, - 0x005A0331: 0x1E94, - 0x007A0331: 0x1E95, - 0x00680331: 0x1E96, - 0x00740308: 0x1E97, - 0x0077030A: 0x1E98, - 0x0079030A: 0x1E99, - 0x017F0307: 0x1E9B, - 0x00410323: 0x1EA0, - 0x00610323: 0x1EA1, - 0x00410309: 0x1EA2, - 0x00610309: 0x1EA3, - 0x00C20301: 0x1EA4, - 0x00E20301: 0x1EA5, - 0x00C20300: 0x1EA6, - 0x00E20300: 0x1EA7, - 0x00C20309: 0x1EA8, - 0x00E20309: 0x1EA9, - 0x00C20303: 0x1EAA, - 0x00E20303: 0x1EAB, - 0x1EA00302: 0x1EAC, - 0x1EA10302: 0x1EAD, - 0x01020301: 0x1EAE, - 0x01030301: 0x1EAF, - 0x01020300: 0x1EB0, - 0x01030300: 0x1EB1, - 0x01020309: 0x1EB2, - 0x01030309: 0x1EB3, - 0x01020303: 0x1EB4, - 0x01030303: 0x1EB5, - 0x1EA00306: 0x1EB6, - 0x1EA10306: 0x1EB7, - 0x00450323: 0x1EB8, - 0x00650323: 0x1EB9, - 0x00450309: 0x1EBA, - 0x00650309: 0x1EBB, - 0x00450303: 0x1EBC, - 0x00650303: 0x1EBD, - 0x00CA0301: 0x1EBE, - 0x00EA0301: 0x1EBF, - 0x00CA0300: 0x1EC0, - 0x00EA0300: 0x1EC1, - 0x00CA0309: 0x1EC2, - 0x00EA0309: 0x1EC3, - 0x00CA0303: 0x1EC4, - 0x00EA0303: 0x1EC5, - 0x1EB80302: 0x1EC6, - 0x1EB90302: 0x1EC7, - 0x00490309: 0x1EC8, - 0x00690309: 0x1EC9, - 0x00490323: 0x1ECA, - 0x00690323: 0x1ECB, - 0x004F0323: 0x1ECC, - 0x006F0323: 0x1ECD, - 0x004F0309: 0x1ECE, - 0x006F0309: 0x1ECF, - 0x00D40301: 0x1ED0, - 0x00F40301: 0x1ED1, - 0x00D40300: 0x1ED2, - 0x00F40300: 0x1ED3, - 0x00D40309: 0x1ED4, - 0x00F40309: 0x1ED5, - 0x00D40303: 0x1ED6, - 0x00F40303: 0x1ED7, - 0x1ECC0302: 0x1ED8, - 0x1ECD0302: 0x1ED9, - 0x01A00301: 0x1EDA, - 0x01A10301: 0x1EDB, - 0x01A00300: 0x1EDC, - 0x01A10300: 0x1EDD, - 0x01A00309: 0x1EDE, - 0x01A10309: 0x1EDF, - 0x01A00303: 0x1EE0, - 0x01A10303: 0x1EE1, - 0x01A00323: 0x1EE2, - 0x01A10323: 0x1EE3, - 0x00550323: 0x1EE4, - 0x00750323: 0x1EE5, - 0x00550309: 0x1EE6, - 0x00750309: 0x1EE7, - 0x01AF0301: 0x1EE8, - 0x01B00301: 0x1EE9, - 0x01AF0300: 0x1EEA, - 0x01B00300: 0x1EEB, - 0x01AF0309: 0x1EEC, - 0x01B00309: 0x1EED, - 0x01AF0303: 0x1EEE, - 0x01B00303: 0x1EEF, - 0x01AF0323: 0x1EF0, - 0x01B00323: 0x1EF1, - 0x00590300: 0x1EF2, - 0x00790300: 0x1EF3, - 0x00590323: 0x1EF4, - 0x00790323: 0x1EF5, - 0x00590309: 0x1EF6, - 0x00790309: 0x1EF7, - 0x00590303: 0x1EF8, - 0x00790303: 0x1EF9, - 0x03B10313: 0x1F00, - 0x03B10314: 0x1F01, - 0x1F000300: 0x1F02, - 0x1F010300: 0x1F03, - 0x1F000301: 0x1F04, - 0x1F010301: 0x1F05, - 0x1F000342: 0x1F06, - 0x1F010342: 0x1F07, - 0x03910313: 0x1F08, - 0x03910314: 0x1F09, - 0x1F080300: 0x1F0A, - 0x1F090300: 0x1F0B, - 0x1F080301: 0x1F0C, - 0x1F090301: 0x1F0D, - 0x1F080342: 0x1F0E, - 0x1F090342: 0x1F0F, - 0x03B50313: 0x1F10, - 0x03B50314: 0x1F11, - 0x1F100300: 0x1F12, - 0x1F110300: 0x1F13, - 0x1F100301: 0x1F14, - 0x1F110301: 0x1F15, - 0x03950313: 0x1F18, - 0x03950314: 0x1F19, - 0x1F180300: 0x1F1A, - 0x1F190300: 0x1F1B, - 0x1F180301: 0x1F1C, - 0x1F190301: 0x1F1D, - 0x03B70313: 0x1F20, - 0x03B70314: 0x1F21, - 0x1F200300: 0x1F22, - 0x1F210300: 0x1F23, - 0x1F200301: 0x1F24, - 0x1F210301: 0x1F25, - 0x1F200342: 0x1F26, - 0x1F210342: 0x1F27, - 0x03970313: 0x1F28, - 0x03970314: 0x1F29, - 0x1F280300: 0x1F2A, - 0x1F290300: 0x1F2B, - 0x1F280301: 0x1F2C, - 0x1F290301: 0x1F2D, - 0x1F280342: 0x1F2E, - 0x1F290342: 0x1F2F, - 0x03B90313: 0x1F30, - 0x03B90314: 0x1F31, - 0x1F300300: 0x1F32, - 0x1F310300: 0x1F33, - 0x1F300301: 0x1F34, - 0x1F310301: 0x1F35, - 0x1F300342: 0x1F36, - 0x1F310342: 0x1F37, - 0x03990313: 0x1F38, - 0x03990314: 0x1F39, - 0x1F380300: 0x1F3A, - 0x1F390300: 0x1F3B, - 0x1F380301: 0x1F3C, - 0x1F390301: 0x1F3D, - 0x1F380342: 0x1F3E, - 0x1F390342: 0x1F3F, - 0x03BF0313: 0x1F40, - 0x03BF0314: 0x1F41, - 0x1F400300: 0x1F42, - 0x1F410300: 0x1F43, - 0x1F400301: 0x1F44, - 0x1F410301: 0x1F45, - 0x039F0313: 0x1F48, - 0x039F0314: 0x1F49, - 0x1F480300: 0x1F4A, - 0x1F490300: 0x1F4B, - 0x1F480301: 0x1F4C, - 0x1F490301: 0x1F4D, - 0x03C50313: 0x1F50, - 0x03C50314: 0x1F51, - 0x1F500300: 0x1F52, - 0x1F510300: 0x1F53, - 0x1F500301: 0x1F54, - 0x1F510301: 0x1F55, - 0x1F500342: 0x1F56, - 0x1F510342: 0x1F57, - 0x03A50314: 0x1F59, - 0x1F590300: 0x1F5B, - 0x1F590301: 0x1F5D, - 0x1F590342: 0x1F5F, - 0x03C90313: 0x1F60, - 0x03C90314: 0x1F61, - 0x1F600300: 0x1F62, - 0x1F610300: 0x1F63, - 0x1F600301: 0x1F64, - 0x1F610301: 0x1F65, - 0x1F600342: 0x1F66, - 0x1F610342: 0x1F67, - 0x03A90313: 0x1F68, - 0x03A90314: 0x1F69, - 0x1F680300: 0x1F6A, - 0x1F690300: 0x1F6B, - 0x1F680301: 0x1F6C, - 0x1F690301: 0x1F6D, - 0x1F680342: 0x1F6E, - 0x1F690342: 0x1F6F, - 0x03B10300: 0x1F70, - 0x03B50300: 0x1F72, - 0x03B70300: 0x1F74, - 0x03B90300: 0x1F76, - 0x03BF0300: 0x1F78, - 0x03C50300: 0x1F7A, - 0x03C90300: 0x1F7C, - 0x1F000345: 0x1F80, - 0x1F010345: 0x1F81, - 0x1F020345: 0x1F82, - 0x1F030345: 0x1F83, - 0x1F040345: 0x1F84, - 0x1F050345: 0x1F85, - 0x1F060345: 0x1F86, - 0x1F070345: 0x1F87, - 0x1F080345: 0x1F88, - 0x1F090345: 0x1F89, - 0x1F0A0345: 0x1F8A, - 0x1F0B0345: 0x1F8B, - 0x1F0C0345: 0x1F8C, - 0x1F0D0345: 0x1F8D, - 0x1F0E0345: 0x1F8E, - 0x1F0F0345: 0x1F8F, - 0x1F200345: 0x1F90, - 0x1F210345: 0x1F91, - 0x1F220345: 0x1F92, - 0x1F230345: 0x1F93, - 0x1F240345: 0x1F94, - 0x1F250345: 0x1F95, - 0x1F260345: 0x1F96, - 0x1F270345: 0x1F97, - 0x1F280345: 0x1F98, - 0x1F290345: 0x1F99, - 0x1F2A0345: 0x1F9A, - 0x1F2B0345: 0x1F9B, - 0x1F2C0345: 0x1F9C, - 0x1F2D0345: 0x1F9D, - 0x1F2E0345: 0x1F9E, - 0x1F2F0345: 0x1F9F, - 0x1F600345: 0x1FA0, - 0x1F610345: 0x1FA1, - 0x1F620345: 0x1FA2, - 0x1F630345: 0x1FA3, - 0x1F640345: 0x1FA4, - 0x1F650345: 0x1FA5, - 0x1F660345: 0x1FA6, - 0x1F670345: 0x1FA7, - 0x1F680345: 0x1FA8, - 0x1F690345: 0x1FA9, - 0x1F6A0345: 0x1FAA, - 0x1F6B0345: 0x1FAB, - 0x1F6C0345: 0x1FAC, - 0x1F6D0345: 0x1FAD, - 0x1F6E0345: 0x1FAE, - 0x1F6F0345: 0x1FAF, - 0x03B10306: 0x1FB0, - 0x03B10304: 0x1FB1, - 0x1F700345: 0x1FB2, - 0x03B10345: 0x1FB3, - 0x03AC0345: 0x1FB4, - 0x03B10342: 0x1FB6, - 0x1FB60345: 0x1FB7, - 0x03910306: 0x1FB8, - 0x03910304: 0x1FB9, - 0x03910300: 0x1FBA, - 0x03910345: 0x1FBC, - 0x00A80342: 0x1FC1, - 0x1F740345: 0x1FC2, - 0x03B70345: 0x1FC3, - 0x03AE0345: 0x1FC4, - 0x03B70342: 0x1FC6, - 0x1FC60345: 0x1FC7, - 0x03950300: 0x1FC8, - 0x03970300: 0x1FCA, - 0x03970345: 0x1FCC, - 0x1FBF0300: 0x1FCD, - 0x1FBF0301: 0x1FCE, - 0x1FBF0342: 0x1FCF, - 0x03B90306: 0x1FD0, - 0x03B90304: 0x1FD1, - 0x03CA0300: 0x1FD2, - 0x03B90342: 0x1FD6, - 0x03CA0342: 0x1FD7, - 0x03990306: 0x1FD8, - 0x03990304: 0x1FD9, - 0x03990300: 0x1FDA, - 0x1FFE0300: 0x1FDD, - 0x1FFE0301: 0x1FDE, - 0x1FFE0342: 0x1FDF, - 0x03C50306: 0x1FE0, - 0x03C50304: 0x1FE1, - 0x03CB0300: 0x1FE2, - 0x03C10313: 0x1FE4, - 0x03C10314: 0x1FE5, - 0x03C50342: 0x1FE6, - 0x03CB0342: 0x1FE7, - 0x03A50306: 0x1FE8, - 0x03A50304: 0x1FE9, - 0x03A50300: 0x1FEA, - 0x03A10314: 0x1FEC, - 0x00A80300: 0x1FED, - 0x1F7C0345: 0x1FF2, - 0x03C90345: 0x1FF3, - 0x03CE0345: 0x1FF4, - 0x03C90342: 0x1FF6, - 0x1FF60345: 0x1FF7, - 0x039F0300: 0x1FF8, - 0x03A90300: 0x1FFA, - 0x03A90345: 0x1FFC, - 0x21900338: 0x219A, - 0x21920338: 0x219B, - 0x21940338: 0x21AE, - 0x21D00338: 0x21CD, - 0x21D40338: 0x21CE, - 0x21D20338: 0x21CF, - 0x22030338: 0x2204, - 0x22080338: 0x2209, - 0x220B0338: 0x220C, - 0x22230338: 0x2224, - 0x22250338: 0x2226, - 0x223C0338: 0x2241, - 0x22430338: 0x2244, - 0x22450338: 0x2247, - 0x22480338: 0x2249, - 0x003D0338: 0x2260, - 0x22610338: 0x2262, - 0x224D0338: 0x226D, - 0x003C0338: 0x226E, - 0x003E0338: 0x226F, - 0x22640338: 0x2270, - 0x22650338: 0x2271, - 0x22720338: 0x2274, - 0x22730338: 0x2275, - 0x22760338: 0x2278, - 0x22770338: 0x2279, - 0x227A0338: 0x2280, - 0x227B0338: 0x2281, - 0x22820338: 0x2284, - 0x22830338: 0x2285, - 0x22860338: 0x2288, - 0x22870338: 0x2289, - 0x22A20338: 0x22AC, - 0x22A80338: 0x22AD, - 0x22A90338: 0x22AE, - 0x22AB0338: 0x22AF, - 0x227C0338: 0x22E0, - 0x227D0338: 0x22E1, - 0x22910338: 0x22E2, - 0x22920338: 0x22E3, - 0x22B20338: 0x22EA, - 0x22B30338: 0x22EB, - 0x22B40338: 0x22EC, - 0x22B50338: 0x22ED, - 0x304B3099: 0x304C, - 0x304D3099: 0x304E, - 0x304F3099: 0x3050, - 0x30513099: 0x3052, - 0x30533099: 0x3054, - 0x30553099: 0x3056, - 0x30573099: 0x3058, - 0x30593099: 0x305A, - 0x305B3099: 0x305C, - 0x305D3099: 0x305E, - 0x305F3099: 0x3060, - 0x30613099: 0x3062, - 0x30643099: 0x3065, - 0x30663099: 0x3067, - 0x30683099: 0x3069, - 0x306F3099: 0x3070, - 0x306F309A: 0x3071, - 0x30723099: 0x3073, - 0x3072309A: 0x3074, - 0x30753099: 0x3076, - 0x3075309A: 0x3077, - 0x30783099: 0x3079, - 0x3078309A: 0x307A, - 0x307B3099: 0x307C, - 0x307B309A: 0x307D, - 0x30463099: 0x3094, - 0x309D3099: 0x309E, - 0x30AB3099: 0x30AC, - 0x30AD3099: 0x30AE, - 0x30AF3099: 0x30B0, - 0x30B13099: 0x30B2, - 0x30B33099: 0x30B4, - 0x30B53099: 0x30B6, - 0x30B73099: 0x30B8, - 0x30B93099: 0x30BA, - 0x30BB3099: 0x30BC, - 0x30BD3099: 0x30BE, - 0x30BF3099: 0x30C0, - 0x30C13099: 0x30C2, - 0x30C43099: 0x30C5, - 0x30C63099: 0x30C7, - 0x30C83099: 0x30C9, - 0x30CF3099: 0x30D0, - 0x30CF309A: 0x30D1, - 0x30D23099: 0x30D3, - 0x30D2309A: 0x30D4, - 0x30D53099: 0x30D6, - 0x30D5309A: 0x30D7, - 0x30D83099: 0x30D9, - 0x30D8309A: 0x30DA, - 0x30DB3099: 0x30DC, - 0x30DB309A: 0x30DD, - 0x30A63099: 0x30F4, - 0x30EF3099: 0x30F7, - 0x30F03099: 0x30F8, - 0x30F13099: 0x30F9, - 0x30F23099: 0x30FA, - 0x30FD3099: 0x30FE, - 0x109910BA: 0x1109A, - 0x109B10BA: 0x1109C, - 0x10A510BA: 0x110AB, - 0x11311127: 0x1112E, - 0x11321127: 0x1112F, - 0x1347133E: 0x1134B, - 0x13471357: 0x1134C, - 0x14B914BA: 0x114BB, - 0x14B914B0: 0x114BC, - 0x14B914BD: 0x114BE, - 0x15B815AF: 0x115BA, - 0x15B915AF: 0x115BB, -} +var recompMap map[uint32]rune +var recompMapOnce sync.Once -// Total size of tables: 53KB (54006 bytes) +const recompMapPacked = "" + + "\x00A\x03\x00\x00\x00\x00\xc0" + // 0x00410300: 0x000000C0 + "\x00A\x03\x01\x00\x00\x00\xc1" + // 0x00410301: 0x000000C1 + "\x00A\x03\x02\x00\x00\x00\xc2" + // 0x00410302: 0x000000C2 + "\x00A\x03\x03\x00\x00\x00\xc3" + // 0x00410303: 0x000000C3 + "\x00A\x03\b\x00\x00\x00\xc4" + // 0x00410308: 0x000000C4 + "\x00A\x03\n\x00\x00\x00\xc5" + // 0x0041030A: 0x000000C5 + "\x00C\x03'\x00\x00\x00\xc7" + // 0x00430327: 0x000000C7 + "\x00E\x03\x00\x00\x00\x00\xc8" + // 0x00450300: 0x000000C8 + "\x00E\x03\x01\x00\x00\x00\xc9" + // 0x00450301: 0x000000C9 + "\x00E\x03\x02\x00\x00\x00\xca" + // 0x00450302: 0x000000CA + "\x00E\x03\b\x00\x00\x00\xcb" + // 0x00450308: 0x000000CB + "\x00I\x03\x00\x00\x00\x00\xcc" + // 0x00490300: 0x000000CC + "\x00I\x03\x01\x00\x00\x00\xcd" + // 0x00490301: 0x000000CD + "\x00I\x03\x02\x00\x00\x00\xce" + // 0x00490302: 0x000000CE + "\x00I\x03\b\x00\x00\x00\xcf" + // 0x00490308: 0x000000CF + "\x00N\x03\x03\x00\x00\x00\xd1" + // 0x004E0303: 0x000000D1 + "\x00O\x03\x00\x00\x00\x00\xd2" + // 0x004F0300: 0x000000D2 + "\x00O\x03\x01\x00\x00\x00\xd3" + // 0x004F0301: 0x000000D3 + "\x00O\x03\x02\x00\x00\x00\xd4" + // 0x004F0302: 0x000000D4 + "\x00O\x03\x03\x00\x00\x00\xd5" + // 0x004F0303: 0x000000D5 + "\x00O\x03\b\x00\x00\x00\xd6" + // 0x004F0308: 0x000000D6 + "\x00U\x03\x00\x00\x00\x00\xd9" + // 0x00550300: 0x000000D9 + "\x00U\x03\x01\x00\x00\x00\xda" + // 0x00550301: 0x000000DA + "\x00U\x03\x02\x00\x00\x00\xdb" + // 0x00550302: 0x000000DB + "\x00U\x03\b\x00\x00\x00\xdc" + // 0x00550308: 0x000000DC + "\x00Y\x03\x01\x00\x00\x00\xdd" + // 0x00590301: 0x000000DD + "\x00a\x03\x00\x00\x00\x00\xe0" + // 0x00610300: 0x000000E0 + "\x00a\x03\x01\x00\x00\x00\xe1" + // 0x00610301: 0x000000E1 + "\x00a\x03\x02\x00\x00\x00\xe2" + // 0x00610302: 0x000000E2 + "\x00a\x03\x03\x00\x00\x00\xe3" + // 0x00610303: 0x000000E3 + "\x00a\x03\b\x00\x00\x00\xe4" + // 0x00610308: 0x000000E4 + "\x00a\x03\n\x00\x00\x00\xe5" + // 0x0061030A: 0x000000E5 + "\x00c\x03'\x00\x00\x00\xe7" + // 0x00630327: 0x000000E7 + "\x00e\x03\x00\x00\x00\x00\xe8" + // 0x00650300: 0x000000E8 + "\x00e\x03\x01\x00\x00\x00\xe9" + // 0x00650301: 0x000000E9 + "\x00e\x03\x02\x00\x00\x00\xea" + // 0x00650302: 0x000000EA + "\x00e\x03\b\x00\x00\x00\xeb" + // 0x00650308: 0x000000EB + "\x00i\x03\x00\x00\x00\x00\xec" + // 0x00690300: 0x000000EC + "\x00i\x03\x01\x00\x00\x00\xed" + // 0x00690301: 0x000000ED + "\x00i\x03\x02\x00\x00\x00\xee" + // 0x00690302: 0x000000EE + "\x00i\x03\b\x00\x00\x00\xef" + // 0x00690308: 0x000000EF + "\x00n\x03\x03\x00\x00\x00\xf1" + // 0x006E0303: 0x000000F1 + "\x00o\x03\x00\x00\x00\x00\xf2" + // 0x006F0300: 0x000000F2 + "\x00o\x03\x01\x00\x00\x00\xf3" + // 0x006F0301: 0x000000F3 + "\x00o\x03\x02\x00\x00\x00\xf4" + // 0x006F0302: 0x000000F4 + "\x00o\x03\x03\x00\x00\x00\xf5" + // 0x006F0303: 0x000000F5 + "\x00o\x03\b\x00\x00\x00\xf6" + // 0x006F0308: 0x000000F6 + "\x00u\x03\x00\x00\x00\x00\xf9" + // 0x00750300: 0x000000F9 + "\x00u\x03\x01\x00\x00\x00\xfa" + // 0x00750301: 0x000000FA + "\x00u\x03\x02\x00\x00\x00\xfb" + // 0x00750302: 0x000000FB + "\x00u\x03\b\x00\x00\x00\xfc" + // 0x00750308: 0x000000FC + "\x00y\x03\x01\x00\x00\x00\xfd" + // 0x00790301: 0x000000FD + "\x00y\x03\b\x00\x00\x00\xff" + // 0x00790308: 0x000000FF + "\x00A\x03\x04\x00\x00\x01\x00" + // 0x00410304: 0x00000100 + "\x00a\x03\x04\x00\x00\x01\x01" + // 0x00610304: 0x00000101 + "\x00A\x03\x06\x00\x00\x01\x02" + // 0x00410306: 0x00000102 + "\x00a\x03\x06\x00\x00\x01\x03" + // 0x00610306: 0x00000103 + "\x00A\x03(\x00\x00\x01\x04" + // 0x00410328: 0x00000104 + "\x00a\x03(\x00\x00\x01\x05" + // 0x00610328: 0x00000105 + "\x00C\x03\x01\x00\x00\x01\x06" + // 0x00430301: 0x00000106 + "\x00c\x03\x01\x00\x00\x01\a" + // 0x00630301: 0x00000107 + "\x00C\x03\x02\x00\x00\x01\b" + // 0x00430302: 0x00000108 + "\x00c\x03\x02\x00\x00\x01\t" + // 0x00630302: 0x00000109 + "\x00C\x03\a\x00\x00\x01\n" + // 0x00430307: 0x0000010A + "\x00c\x03\a\x00\x00\x01\v" + // 0x00630307: 0x0000010B + "\x00C\x03\f\x00\x00\x01\f" + // 0x0043030C: 0x0000010C + "\x00c\x03\f\x00\x00\x01\r" + // 0x0063030C: 0x0000010D + "\x00D\x03\f\x00\x00\x01\x0e" + // 0x0044030C: 0x0000010E + "\x00d\x03\f\x00\x00\x01\x0f" + // 0x0064030C: 0x0000010F + "\x00E\x03\x04\x00\x00\x01\x12" + // 0x00450304: 0x00000112 + "\x00e\x03\x04\x00\x00\x01\x13" + // 0x00650304: 0x00000113 + "\x00E\x03\x06\x00\x00\x01\x14" + // 0x00450306: 0x00000114 + "\x00e\x03\x06\x00\x00\x01\x15" + // 0x00650306: 0x00000115 + "\x00E\x03\a\x00\x00\x01\x16" + // 0x00450307: 0x00000116 + "\x00e\x03\a\x00\x00\x01\x17" + // 0x00650307: 0x00000117 + "\x00E\x03(\x00\x00\x01\x18" + // 0x00450328: 0x00000118 + "\x00e\x03(\x00\x00\x01\x19" + // 0x00650328: 0x00000119 + "\x00E\x03\f\x00\x00\x01\x1a" + // 0x0045030C: 0x0000011A + "\x00e\x03\f\x00\x00\x01\x1b" + // 0x0065030C: 0x0000011B + "\x00G\x03\x02\x00\x00\x01\x1c" + // 0x00470302: 0x0000011C + "\x00g\x03\x02\x00\x00\x01\x1d" + // 0x00670302: 0x0000011D + "\x00G\x03\x06\x00\x00\x01\x1e" + // 0x00470306: 0x0000011E + "\x00g\x03\x06\x00\x00\x01\x1f" + // 0x00670306: 0x0000011F + "\x00G\x03\a\x00\x00\x01 " + // 0x00470307: 0x00000120 + "\x00g\x03\a\x00\x00\x01!" + // 0x00670307: 0x00000121 + "\x00G\x03'\x00\x00\x01\"" + // 0x00470327: 0x00000122 + "\x00g\x03'\x00\x00\x01#" + // 0x00670327: 0x00000123 + "\x00H\x03\x02\x00\x00\x01$" + // 0x00480302: 0x00000124 + "\x00h\x03\x02\x00\x00\x01%" + // 0x00680302: 0x00000125 + "\x00I\x03\x03\x00\x00\x01(" + // 0x00490303: 0x00000128 + "\x00i\x03\x03\x00\x00\x01)" + // 0x00690303: 0x00000129 + "\x00I\x03\x04\x00\x00\x01*" + // 0x00490304: 0x0000012A + "\x00i\x03\x04\x00\x00\x01+" + // 0x00690304: 0x0000012B + "\x00I\x03\x06\x00\x00\x01," + // 0x00490306: 0x0000012C + "\x00i\x03\x06\x00\x00\x01-" + // 0x00690306: 0x0000012D + "\x00I\x03(\x00\x00\x01." + // 0x00490328: 0x0000012E + "\x00i\x03(\x00\x00\x01/" + // 0x00690328: 0x0000012F + "\x00I\x03\a\x00\x00\x010" + // 0x00490307: 0x00000130 + "\x00J\x03\x02\x00\x00\x014" + // 0x004A0302: 0x00000134 + "\x00j\x03\x02\x00\x00\x015" + // 0x006A0302: 0x00000135 + "\x00K\x03'\x00\x00\x016" + // 0x004B0327: 0x00000136 + "\x00k\x03'\x00\x00\x017" + // 0x006B0327: 0x00000137 + "\x00L\x03\x01\x00\x00\x019" + // 0x004C0301: 0x00000139 + "\x00l\x03\x01\x00\x00\x01:" + // 0x006C0301: 0x0000013A + "\x00L\x03'\x00\x00\x01;" + // 0x004C0327: 0x0000013B + "\x00l\x03'\x00\x00\x01<" + // 0x006C0327: 0x0000013C + "\x00L\x03\f\x00\x00\x01=" + // 0x004C030C: 0x0000013D + "\x00l\x03\f\x00\x00\x01>" + // 0x006C030C: 0x0000013E + "\x00N\x03\x01\x00\x00\x01C" + // 0x004E0301: 0x00000143 + "\x00n\x03\x01\x00\x00\x01D" + // 0x006E0301: 0x00000144 + "\x00N\x03'\x00\x00\x01E" + // 0x004E0327: 0x00000145 + "\x00n\x03'\x00\x00\x01F" + // 0x006E0327: 0x00000146 + "\x00N\x03\f\x00\x00\x01G" + // 0x004E030C: 0x00000147 + "\x00n\x03\f\x00\x00\x01H" + // 0x006E030C: 0x00000148 + "\x00O\x03\x04\x00\x00\x01L" + // 0x004F0304: 0x0000014C + "\x00o\x03\x04\x00\x00\x01M" + // 0x006F0304: 0x0000014D + "\x00O\x03\x06\x00\x00\x01N" + // 0x004F0306: 0x0000014E + "\x00o\x03\x06\x00\x00\x01O" + // 0x006F0306: 0x0000014F + "\x00O\x03\v\x00\x00\x01P" + // 0x004F030B: 0x00000150 + "\x00o\x03\v\x00\x00\x01Q" + // 0x006F030B: 0x00000151 + "\x00R\x03\x01\x00\x00\x01T" + // 0x00520301: 0x00000154 + "\x00r\x03\x01\x00\x00\x01U" + // 0x00720301: 0x00000155 + "\x00R\x03'\x00\x00\x01V" + // 0x00520327: 0x00000156 + "\x00r\x03'\x00\x00\x01W" + // 0x00720327: 0x00000157 + "\x00R\x03\f\x00\x00\x01X" + // 0x0052030C: 0x00000158 + "\x00r\x03\f\x00\x00\x01Y" + // 0x0072030C: 0x00000159 + "\x00S\x03\x01\x00\x00\x01Z" + // 0x00530301: 0x0000015A + "\x00s\x03\x01\x00\x00\x01[" + // 0x00730301: 0x0000015B + "\x00S\x03\x02\x00\x00\x01\\" + // 0x00530302: 0x0000015C + "\x00s\x03\x02\x00\x00\x01]" + // 0x00730302: 0x0000015D + "\x00S\x03'\x00\x00\x01^" + // 0x00530327: 0x0000015E + "\x00s\x03'\x00\x00\x01_" + // 0x00730327: 0x0000015F + "\x00S\x03\f\x00\x00\x01`" + // 0x0053030C: 0x00000160 + "\x00s\x03\f\x00\x00\x01a" + // 0x0073030C: 0x00000161 + "\x00T\x03'\x00\x00\x01b" + // 0x00540327: 0x00000162 + "\x00t\x03'\x00\x00\x01c" + // 0x00740327: 0x00000163 + "\x00T\x03\f\x00\x00\x01d" + // 0x0054030C: 0x00000164 + "\x00t\x03\f\x00\x00\x01e" + // 0x0074030C: 0x00000165 + "\x00U\x03\x03\x00\x00\x01h" + // 0x00550303: 0x00000168 + "\x00u\x03\x03\x00\x00\x01i" + // 0x00750303: 0x00000169 + "\x00U\x03\x04\x00\x00\x01j" + // 0x00550304: 0x0000016A + "\x00u\x03\x04\x00\x00\x01k" + // 0x00750304: 0x0000016B + "\x00U\x03\x06\x00\x00\x01l" + // 0x00550306: 0x0000016C + "\x00u\x03\x06\x00\x00\x01m" + // 0x00750306: 0x0000016D + "\x00U\x03\n\x00\x00\x01n" + // 0x0055030A: 0x0000016E + "\x00u\x03\n\x00\x00\x01o" + // 0x0075030A: 0x0000016F + "\x00U\x03\v\x00\x00\x01p" + // 0x0055030B: 0x00000170 + "\x00u\x03\v\x00\x00\x01q" + // 0x0075030B: 0x00000171 + "\x00U\x03(\x00\x00\x01r" + // 0x00550328: 0x00000172 + "\x00u\x03(\x00\x00\x01s" + // 0x00750328: 0x00000173 + "\x00W\x03\x02\x00\x00\x01t" + // 0x00570302: 0x00000174 + "\x00w\x03\x02\x00\x00\x01u" + // 0x00770302: 0x00000175 + "\x00Y\x03\x02\x00\x00\x01v" + // 0x00590302: 0x00000176 + "\x00y\x03\x02\x00\x00\x01w" + // 0x00790302: 0x00000177 + "\x00Y\x03\b\x00\x00\x01x" + // 0x00590308: 0x00000178 + "\x00Z\x03\x01\x00\x00\x01y" + // 0x005A0301: 0x00000179 + "\x00z\x03\x01\x00\x00\x01z" + // 0x007A0301: 0x0000017A + "\x00Z\x03\a\x00\x00\x01{" + // 0x005A0307: 0x0000017B + "\x00z\x03\a\x00\x00\x01|" + // 0x007A0307: 0x0000017C + "\x00Z\x03\f\x00\x00\x01}" + // 0x005A030C: 0x0000017D + "\x00z\x03\f\x00\x00\x01~" + // 0x007A030C: 0x0000017E + "\x00O\x03\x1b\x00\x00\x01\xa0" + // 0x004F031B: 0x000001A0 + "\x00o\x03\x1b\x00\x00\x01\xa1" + // 0x006F031B: 0x000001A1 + "\x00U\x03\x1b\x00\x00\x01\xaf" + // 0x0055031B: 0x000001AF + "\x00u\x03\x1b\x00\x00\x01\xb0" + // 0x0075031B: 0x000001B0 + "\x00A\x03\f\x00\x00\x01\xcd" + // 0x0041030C: 0x000001CD + "\x00a\x03\f\x00\x00\x01\xce" + // 0x0061030C: 0x000001CE + "\x00I\x03\f\x00\x00\x01\xcf" + // 0x0049030C: 0x000001CF + "\x00i\x03\f\x00\x00\x01\xd0" + // 0x0069030C: 0x000001D0 + "\x00O\x03\f\x00\x00\x01\xd1" + // 0x004F030C: 0x000001D1 + "\x00o\x03\f\x00\x00\x01\xd2" + // 0x006F030C: 0x000001D2 + "\x00U\x03\f\x00\x00\x01\xd3" + // 0x0055030C: 0x000001D3 + "\x00u\x03\f\x00\x00\x01\xd4" + // 0x0075030C: 0x000001D4 + "\x00\xdc\x03\x04\x00\x00\x01\xd5" + // 0x00DC0304: 0x000001D5 + "\x00\xfc\x03\x04\x00\x00\x01\xd6" + // 0x00FC0304: 0x000001D6 + "\x00\xdc\x03\x01\x00\x00\x01\xd7" + // 0x00DC0301: 0x000001D7 + "\x00\xfc\x03\x01\x00\x00\x01\xd8" + // 0x00FC0301: 0x000001D8 + "\x00\xdc\x03\f\x00\x00\x01\xd9" + // 0x00DC030C: 0x000001D9 + "\x00\xfc\x03\f\x00\x00\x01\xda" + // 0x00FC030C: 0x000001DA + "\x00\xdc\x03\x00\x00\x00\x01\xdb" + // 0x00DC0300: 0x000001DB + "\x00\xfc\x03\x00\x00\x00\x01\xdc" + // 0x00FC0300: 0x000001DC + "\x00\xc4\x03\x04\x00\x00\x01\xde" + // 0x00C40304: 0x000001DE + "\x00\xe4\x03\x04\x00\x00\x01\xdf" + // 0x00E40304: 0x000001DF + "\x02&\x03\x04\x00\x00\x01\xe0" + // 0x02260304: 0x000001E0 + "\x02'\x03\x04\x00\x00\x01\xe1" + // 0x02270304: 0x000001E1 + "\x00\xc6\x03\x04\x00\x00\x01\xe2" + // 0x00C60304: 0x000001E2 + "\x00\xe6\x03\x04\x00\x00\x01\xe3" + // 0x00E60304: 0x000001E3 + "\x00G\x03\f\x00\x00\x01\xe6" + // 0x0047030C: 0x000001E6 + "\x00g\x03\f\x00\x00\x01\xe7" + // 0x0067030C: 0x000001E7 + "\x00K\x03\f\x00\x00\x01\xe8" + // 0x004B030C: 0x000001E8 + "\x00k\x03\f\x00\x00\x01\xe9" + // 0x006B030C: 0x000001E9 + "\x00O\x03(\x00\x00\x01\xea" + // 0x004F0328: 0x000001EA + "\x00o\x03(\x00\x00\x01\xeb" + // 0x006F0328: 0x000001EB + "\x01\xea\x03\x04\x00\x00\x01\xec" + // 0x01EA0304: 0x000001EC + "\x01\xeb\x03\x04\x00\x00\x01\xed" + // 0x01EB0304: 0x000001ED + "\x01\xb7\x03\f\x00\x00\x01\xee" + // 0x01B7030C: 0x000001EE + "\x02\x92\x03\f\x00\x00\x01\xef" + // 0x0292030C: 0x000001EF + "\x00j\x03\f\x00\x00\x01\xf0" + // 0x006A030C: 0x000001F0 + "\x00G\x03\x01\x00\x00\x01\xf4" + // 0x00470301: 0x000001F4 + "\x00g\x03\x01\x00\x00\x01\xf5" + // 0x00670301: 0x000001F5 + "\x00N\x03\x00\x00\x00\x01\xf8" + // 0x004E0300: 0x000001F8 + "\x00n\x03\x00\x00\x00\x01\xf9" + // 0x006E0300: 0x000001F9 + "\x00\xc5\x03\x01\x00\x00\x01\xfa" + // 0x00C50301: 0x000001FA + "\x00\xe5\x03\x01\x00\x00\x01\xfb" + // 0x00E50301: 0x000001FB + "\x00\xc6\x03\x01\x00\x00\x01\xfc" + // 0x00C60301: 0x000001FC + "\x00\xe6\x03\x01\x00\x00\x01\xfd" + // 0x00E60301: 0x000001FD + "\x00\xd8\x03\x01\x00\x00\x01\xfe" + // 0x00D80301: 0x000001FE + "\x00\xf8\x03\x01\x00\x00\x01\xff" + // 0x00F80301: 0x000001FF + "\x00A\x03\x0f\x00\x00\x02\x00" + // 0x0041030F: 0x00000200 + "\x00a\x03\x0f\x00\x00\x02\x01" + // 0x0061030F: 0x00000201 + "\x00A\x03\x11\x00\x00\x02\x02" + // 0x00410311: 0x00000202 + "\x00a\x03\x11\x00\x00\x02\x03" + // 0x00610311: 0x00000203 + "\x00E\x03\x0f\x00\x00\x02\x04" + // 0x0045030F: 0x00000204 + "\x00e\x03\x0f\x00\x00\x02\x05" + // 0x0065030F: 0x00000205 + "\x00E\x03\x11\x00\x00\x02\x06" + // 0x00450311: 0x00000206 + "\x00e\x03\x11\x00\x00\x02\a" + // 0x00650311: 0x00000207 + "\x00I\x03\x0f\x00\x00\x02\b" + // 0x0049030F: 0x00000208 + "\x00i\x03\x0f\x00\x00\x02\t" + // 0x0069030F: 0x00000209 + "\x00I\x03\x11\x00\x00\x02\n" + // 0x00490311: 0x0000020A + "\x00i\x03\x11\x00\x00\x02\v" + // 0x00690311: 0x0000020B + "\x00O\x03\x0f\x00\x00\x02\f" + // 0x004F030F: 0x0000020C + "\x00o\x03\x0f\x00\x00\x02\r" + // 0x006F030F: 0x0000020D + "\x00O\x03\x11\x00\x00\x02\x0e" + // 0x004F0311: 0x0000020E + "\x00o\x03\x11\x00\x00\x02\x0f" + // 0x006F0311: 0x0000020F + "\x00R\x03\x0f\x00\x00\x02\x10" + // 0x0052030F: 0x00000210 + "\x00r\x03\x0f\x00\x00\x02\x11" + // 0x0072030F: 0x00000211 + "\x00R\x03\x11\x00\x00\x02\x12" + // 0x00520311: 0x00000212 + "\x00r\x03\x11\x00\x00\x02\x13" + // 0x00720311: 0x00000213 + "\x00U\x03\x0f\x00\x00\x02\x14" + // 0x0055030F: 0x00000214 + "\x00u\x03\x0f\x00\x00\x02\x15" + // 0x0075030F: 0x00000215 + "\x00U\x03\x11\x00\x00\x02\x16" + // 0x00550311: 0x00000216 + "\x00u\x03\x11\x00\x00\x02\x17" + // 0x00750311: 0x00000217 + "\x00S\x03&\x00\x00\x02\x18" + // 0x00530326: 0x00000218 + "\x00s\x03&\x00\x00\x02\x19" + // 0x00730326: 0x00000219 + "\x00T\x03&\x00\x00\x02\x1a" + // 0x00540326: 0x0000021A + "\x00t\x03&\x00\x00\x02\x1b" + // 0x00740326: 0x0000021B + "\x00H\x03\f\x00\x00\x02\x1e" + // 0x0048030C: 0x0000021E + "\x00h\x03\f\x00\x00\x02\x1f" + // 0x0068030C: 0x0000021F + "\x00A\x03\a\x00\x00\x02&" + // 0x00410307: 0x00000226 + "\x00a\x03\a\x00\x00\x02'" + // 0x00610307: 0x00000227 + "\x00E\x03'\x00\x00\x02(" + // 0x00450327: 0x00000228 + "\x00e\x03'\x00\x00\x02)" + // 0x00650327: 0x00000229 + "\x00\xd6\x03\x04\x00\x00\x02*" + // 0x00D60304: 0x0000022A + "\x00\xf6\x03\x04\x00\x00\x02+" + // 0x00F60304: 0x0000022B + "\x00\xd5\x03\x04\x00\x00\x02," + // 0x00D50304: 0x0000022C + "\x00\xf5\x03\x04\x00\x00\x02-" + // 0x00F50304: 0x0000022D + "\x00O\x03\a\x00\x00\x02." + // 0x004F0307: 0x0000022E + "\x00o\x03\a\x00\x00\x02/" + // 0x006F0307: 0x0000022F + "\x02.\x03\x04\x00\x00\x020" + // 0x022E0304: 0x00000230 + "\x02/\x03\x04\x00\x00\x021" + // 0x022F0304: 0x00000231 + "\x00Y\x03\x04\x00\x00\x022" + // 0x00590304: 0x00000232 + "\x00y\x03\x04\x00\x00\x023" + // 0x00790304: 0x00000233 + "\x00\xa8\x03\x01\x00\x00\x03\x85" + // 0x00A80301: 0x00000385 + "\x03\x91\x03\x01\x00\x00\x03\x86" + // 0x03910301: 0x00000386 + "\x03\x95\x03\x01\x00\x00\x03\x88" + // 0x03950301: 0x00000388 + "\x03\x97\x03\x01\x00\x00\x03\x89" + // 0x03970301: 0x00000389 + "\x03\x99\x03\x01\x00\x00\x03\x8a" + // 0x03990301: 0x0000038A + "\x03\x9f\x03\x01\x00\x00\x03\x8c" + // 0x039F0301: 0x0000038C + "\x03\xa5\x03\x01\x00\x00\x03\x8e" + // 0x03A50301: 0x0000038E + "\x03\xa9\x03\x01\x00\x00\x03\x8f" + // 0x03A90301: 0x0000038F + "\x03\xca\x03\x01\x00\x00\x03\x90" + // 0x03CA0301: 0x00000390 + "\x03\x99\x03\b\x00\x00\x03\xaa" + // 0x03990308: 0x000003AA + "\x03\xa5\x03\b\x00\x00\x03\xab" + // 0x03A50308: 0x000003AB + "\x03\xb1\x03\x01\x00\x00\x03\xac" + // 0x03B10301: 0x000003AC + "\x03\xb5\x03\x01\x00\x00\x03\xad" + // 0x03B50301: 0x000003AD + "\x03\xb7\x03\x01\x00\x00\x03\xae" + // 0x03B70301: 0x000003AE + "\x03\xb9\x03\x01\x00\x00\x03\xaf" + // 0x03B90301: 0x000003AF + "\x03\xcb\x03\x01\x00\x00\x03\xb0" + // 0x03CB0301: 0x000003B0 + "\x03\xb9\x03\b\x00\x00\x03\xca" + // 0x03B90308: 0x000003CA + "\x03\xc5\x03\b\x00\x00\x03\xcb" + // 0x03C50308: 0x000003CB + "\x03\xbf\x03\x01\x00\x00\x03\xcc" + // 0x03BF0301: 0x000003CC + "\x03\xc5\x03\x01\x00\x00\x03\xcd" + // 0x03C50301: 0x000003CD + "\x03\xc9\x03\x01\x00\x00\x03\xce" + // 0x03C90301: 0x000003CE + "\x03\xd2\x03\x01\x00\x00\x03\xd3" + // 0x03D20301: 0x000003D3 + "\x03\xd2\x03\b\x00\x00\x03\xd4" + // 0x03D20308: 0x000003D4 + "\x04\x15\x03\x00\x00\x00\x04\x00" + // 0x04150300: 0x00000400 + "\x04\x15\x03\b\x00\x00\x04\x01" + // 0x04150308: 0x00000401 + "\x04\x13\x03\x01\x00\x00\x04\x03" + // 0x04130301: 0x00000403 + "\x04\x06\x03\b\x00\x00\x04\a" + // 0x04060308: 0x00000407 + "\x04\x1a\x03\x01\x00\x00\x04\f" + // 0x041A0301: 0x0000040C + "\x04\x18\x03\x00\x00\x00\x04\r" + // 0x04180300: 0x0000040D + "\x04#\x03\x06\x00\x00\x04\x0e" + // 0x04230306: 0x0000040E + "\x04\x18\x03\x06\x00\x00\x04\x19" + // 0x04180306: 0x00000419 + "\x048\x03\x06\x00\x00\x049" + // 0x04380306: 0x00000439 + "\x045\x03\x00\x00\x00\x04P" + // 0x04350300: 0x00000450 + "\x045\x03\b\x00\x00\x04Q" + // 0x04350308: 0x00000451 + "\x043\x03\x01\x00\x00\x04S" + // 0x04330301: 0x00000453 + "\x04V\x03\b\x00\x00\x04W" + // 0x04560308: 0x00000457 + "\x04:\x03\x01\x00\x00\x04\\" + // 0x043A0301: 0x0000045C + "\x048\x03\x00\x00\x00\x04]" + // 0x04380300: 0x0000045D + "\x04C\x03\x06\x00\x00\x04^" + // 0x04430306: 0x0000045E + "\x04t\x03\x0f\x00\x00\x04v" + // 0x0474030F: 0x00000476 + "\x04u\x03\x0f\x00\x00\x04w" + // 0x0475030F: 0x00000477 + "\x04\x16\x03\x06\x00\x00\x04\xc1" + // 0x04160306: 0x000004C1 + "\x046\x03\x06\x00\x00\x04\xc2" + // 0x04360306: 0x000004C2 + "\x04\x10\x03\x06\x00\x00\x04\xd0" + // 0x04100306: 0x000004D0 + "\x040\x03\x06\x00\x00\x04\xd1" + // 0x04300306: 0x000004D1 + "\x04\x10\x03\b\x00\x00\x04\xd2" + // 0x04100308: 0x000004D2 + "\x040\x03\b\x00\x00\x04\xd3" + // 0x04300308: 0x000004D3 + "\x04\x15\x03\x06\x00\x00\x04\xd6" + // 0x04150306: 0x000004D6 + "\x045\x03\x06\x00\x00\x04\xd7" + // 0x04350306: 0x000004D7 + "\x04\xd8\x03\b\x00\x00\x04\xda" + // 0x04D80308: 0x000004DA + "\x04\xd9\x03\b\x00\x00\x04\xdb" + // 0x04D90308: 0x000004DB + "\x04\x16\x03\b\x00\x00\x04\xdc" + // 0x04160308: 0x000004DC + "\x046\x03\b\x00\x00\x04\xdd" + // 0x04360308: 0x000004DD + "\x04\x17\x03\b\x00\x00\x04\xde" + // 0x04170308: 0x000004DE + "\x047\x03\b\x00\x00\x04\xdf" + // 0x04370308: 0x000004DF + "\x04\x18\x03\x04\x00\x00\x04\xe2" + // 0x04180304: 0x000004E2 + "\x048\x03\x04\x00\x00\x04\xe3" + // 0x04380304: 0x000004E3 + "\x04\x18\x03\b\x00\x00\x04\xe4" + // 0x04180308: 0x000004E4 + "\x048\x03\b\x00\x00\x04\xe5" + // 0x04380308: 0x000004E5 + "\x04\x1e\x03\b\x00\x00\x04\xe6" + // 0x041E0308: 0x000004E6 + "\x04>\x03\b\x00\x00\x04\xe7" + // 0x043E0308: 0x000004E7 + "\x04\xe8\x03\b\x00\x00\x04\xea" + // 0x04E80308: 0x000004EA + "\x04\xe9\x03\b\x00\x00\x04\xeb" + // 0x04E90308: 0x000004EB + "\x04-\x03\b\x00\x00\x04\xec" + // 0x042D0308: 0x000004EC + "\x04M\x03\b\x00\x00\x04\xed" + // 0x044D0308: 0x000004ED + "\x04#\x03\x04\x00\x00\x04\xee" + // 0x04230304: 0x000004EE + "\x04C\x03\x04\x00\x00\x04\xef" + // 0x04430304: 0x000004EF + "\x04#\x03\b\x00\x00\x04\xf0" + // 0x04230308: 0x000004F0 + "\x04C\x03\b\x00\x00\x04\xf1" + // 0x04430308: 0x000004F1 + "\x04#\x03\v\x00\x00\x04\xf2" + // 0x0423030B: 0x000004F2 + "\x04C\x03\v\x00\x00\x04\xf3" + // 0x0443030B: 0x000004F3 + "\x04'\x03\b\x00\x00\x04\xf4" + // 0x04270308: 0x000004F4 + "\x04G\x03\b\x00\x00\x04\xf5" + // 0x04470308: 0x000004F5 + "\x04+\x03\b\x00\x00\x04\xf8" + // 0x042B0308: 0x000004F8 + "\x04K\x03\b\x00\x00\x04\xf9" + // 0x044B0308: 0x000004F9 + "\x06'\x06S\x00\x00\x06\"" + // 0x06270653: 0x00000622 + "\x06'\x06T\x00\x00\x06#" + // 0x06270654: 0x00000623 + "\x06H\x06T\x00\x00\x06$" + // 0x06480654: 0x00000624 + "\x06'\x06U\x00\x00\x06%" + // 0x06270655: 0x00000625 + "\x06J\x06T\x00\x00\x06&" + // 0x064A0654: 0x00000626 + "\x06\xd5\x06T\x00\x00\x06\xc0" + // 0x06D50654: 0x000006C0 + "\x06\xc1\x06T\x00\x00\x06\xc2" + // 0x06C10654: 0x000006C2 + "\x06\xd2\x06T\x00\x00\x06\xd3" + // 0x06D20654: 0x000006D3 + "\t(\t<\x00\x00\t)" + // 0x0928093C: 0x00000929 + "\t0\t<\x00\x00\t1" + // 0x0930093C: 0x00000931 + "\t3\t<\x00\x00\t4" + // 0x0933093C: 0x00000934 + "\t\xc7\t\xbe\x00\x00\t\xcb" + // 0x09C709BE: 0x000009CB + "\t\xc7\t\xd7\x00\x00\t\xcc" + // 0x09C709D7: 0x000009CC + "\vG\vV\x00\x00\vH" + // 0x0B470B56: 0x00000B48 + "\vG\v>\x00\x00\vK" + // 0x0B470B3E: 0x00000B4B + "\vG\vW\x00\x00\vL" + // 0x0B470B57: 0x00000B4C + "\v\x92\v\xd7\x00\x00\v\x94" + // 0x0B920BD7: 0x00000B94 + "\v\xc6\v\xbe\x00\x00\v\xca" + // 0x0BC60BBE: 0x00000BCA + "\v\xc7\v\xbe\x00\x00\v\xcb" + // 0x0BC70BBE: 0x00000BCB + "\v\xc6\v\xd7\x00\x00\v\xcc" + // 0x0BC60BD7: 0x00000BCC + "\fF\fV\x00\x00\fH" + // 0x0C460C56: 0x00000C48 + "\f\xbf\f\xd5\x00\x00\f\xc0" + // 0x0CBF0CD5: 0x00000CC0 + "\f\xc6\f\xd5\x00\x00\f\xc7" + // 0x0CC60CD5: 0x00000CC7 + "\f\xc6\f\xd6\x00\x00\f\xc8" + // 0x0CC60CD6: 0x00000CC8 + "\f\xc6\f\xc2\x00\x00\f\xca" + // 0x0CC60CC2: 0x00000CCA + "\f\xca\f\xd5\x00\x00\f\xcb" + // 0x0CCA0CD5: 0x00000CCB + "\rF\r>\x00\x00\rJ" + // 0x0D460D3E: 0x00000D4A + "\rG\r>\x00\x00\rK" + // 0x0D470D3E: 0x00000D4B + "\rF\rW\x00\x00\rL" + // 0x0D460D57: 0x00000D4C + "\r\xd9\r\xca\x00\x00\r\xda" + // 0x0DD90DCA: 0x00000DDA + "\r\xd9\r\xcf\x00\x00\r\xdc" + // 0x0DD90DCF: 0x00000DDC + "\r\xdc\r\xca\x00\x00\r\xdd" + // 0x0DDC0DCA: 0x00000DDD + "\r\xd9\r\xdf\x00\x00\r\xde" + // 0x0DD90DDF: 0x00000DDE + "\x10%\x10.\x00\x00\x10&" + // 0x1025102E: 0x00001026 + "\x1b\x05\x1b5\x00\x00\x1b\x06" + // 0x1B051B35: 0x00001B06 + "\x1b\a\x1b5\x00\x00\x1b\b" + // 0x1B071B35: 0x00001B08 + "\x1b\t\x1b5\x00\x00\x1b\n" + // 0x1B091B35: 0x00001B0A + "\x1b\v\x1b5\x00\x00\x1b\f" + // 0x1B0B1B35: 0x00001B0C + "\x1b\r\x1b5\x00\x00\x1b\x0e" + // 0x1B0D1B35: 0x00001B0E + "\x1b\x11\x1b5\x00\x00\x1b\x12" + // 0x1B111B35: 0x00001B12 + "\x1b:\x1b5\x00\x00\x1b;" + // 0x1B3A1B35: 0x00001B3B + "\x1b<\x1b5\x00\x00\x1b=" + // 0x1B3C1B35: 0x00001B3D + "\x1b>\x1b5\x00\x00\x1b@" + // 0x1B3E1B35: 0x00001B40 + "\x1b?\x1b5\x00\x00\x1bA" + // 0x1B3F1B35: 0x00001B41 + "\x1bB\x1b5\x00\x00\x1bC" + // 0x1B421B35: 0x00001B43 + "\x00A\x03%\x00\x00\x1e\x00" + // 0x00410325: 0x00001E00 + "\x00a\x03%\x00\x00\x1e\x01" + // 0x00610325: 0x00001E01 + "\x00B\x03\a\x00\x00\x1e\x02" + // 0x00420307: 0x00001E02 + "\x00b\x03\a\x00\x00\x1e\x03" + // 0x00620307: 0x00001E03 + "\x00B\x03#\x00\x00\x1e\x04" + // 0x00420323: 0x00001E04 + "\x00b\x03#\x00\x00\x1e\x05" + // 0x00620323: 0x00001E05 + "\x00B\x031\x00\x00\x1e\x06" + // 0x00420331: 0x00001E06 + "\x00b\x031\x00\x00\x1e\a" + // 0x00620331: 0x00001E07 + "\x00\xc7\x03\x01\x00\x00\x1e\b" + // 0x00C70301: 0x00001E08 + "\x00\xe7\x03\x01\x00\x00\x1e\t" + // 0x00E70301: 0x00001E09 + "\x00D\x03\a\x00\x00\x1e\n" + // 0x00440307: 0x00001E0A + "\x00d\x03\a\x00\x00\x1e\v" + // 0x00640307: 0x00001E0B + "\x00D\x03#\x00\x00\x1e\f" + // 0x00440323: 0x00001E0C + "\x00d\x03#\x00\x00\x1e\r" + // 0x00640323: 0x00001E0D + "\x00D\x031\x00\x00\x1e\x0e" + // 0x00440331: 0x00001E0E + "\x00d\x031\x00\x00\x1e\x0f" + // 0x00640331: 0x00001E0F + "\x00D\x03'\x00\x00\x1e\x10" + // 0x00440327: 0x00001E10 + "\x00d\x03'\x00\x00\x1e\x11" + // 0x00640327: 0x00001E11 + "\x00D\x03-\x00\x00\x1e\x12" + // 0x0044032D: 0x00001E12 + "\x00d\x03-\x00\x00\x1e\x13" + // 0x0064032D: 0x00001E13 + "\x01\x12\x03\x00\x00\x00\x1e\x14" + // 0x01120300: 0x00001E14 + "\x01\x13\x03\x00\x00\x00\x1e\x15" + // 0x01130300: 0x00001E15 + "\x01\x12\x03\x01\x00\x00\x1e\x16" + // 0x01120301: 0x00001E16 + "\x01\x13\x03\x01\x00\x00\x1e\x17" + // 0x01130301: 0x00001E17 + "\x00E\x03-\x00\x00\x1e\x18" + // 0x0045032D: 0x00001E18 + "\x00e\x03-\x00\x00\x1e\x19" + // 0x0065032D: 0x00001E19 + "\x00E\x030\x00\x00\x1e\x1a" + // 0x00450330: 0x00001E1A + "\x00e\x030\x00\x00\x1e\x1b" + // 0x00650330: 0x00001E1B + "\x02(\x03\x06\x00\x00\x1e\x1c" + // 0x02280306: 0x00001E1C + "\x02)\x03\x06\x00\x00\x1e\x1d" + // 0x02290306: 0x00001E1D + "\x00F\x03\a\x00\x00\x1e\x1e" + // 0x00460307: 0x00001E1E + "\x00f\x03\a\x00\x00\x1e\x1f" + // 0x00660307: 0x00001E1F + "\x00G\x03\x04\x00\x00\x1e " + // 0x00470304: 0x00001E20 + "\x00g\x03\x04\x00\x00\x1e!" + // 0x00670304: 0x00001E21 + "\x00H\x03\a\x00\x00\x1e\"" + // 0x00480307: 0x00001E22 + "\x00h\x03\a\x00\x00\x1e#" + // 0x00680307: 0x00001E23 + "\x00H\x03#\x00\x00\x1e$" + // 0x00480323: 0x00001E24 + "\x00h\x03#\x00\x00\x1e%" + // 0x00680323: 0x00001E25 + "\x00H\x03\b\x00\x00\x1e&" + // 0x00480308: 0x00001E26 + "\x00h\x03\b\x00\x00\x1e'" + // 0x00680308: 0x00001E27 + "\x00H\x03'\x00\x00\x1e(" + // 0x00480327: 0x00001E28 + "\x00h\x03'\x00\x00\x1e)" + // 0x00680327: 0x00001E29 + "\x00H\x03.\x00\x00\x1e*" + // 0x0048032E: 0x00001E2A + "\x00h\x03.\x00\x00\x1e+" + // 0x0068032E: 0x00001E2B + "\x00I\x030\x00\x00\x1e," + // 0x00490330: 0x00001E2C + "\x00i\x030\x00\x00\x1e-" + // 0x00690330: 0x00001E2D + "\x00\xcf\x03\x01\x00\x00\x1e." + // 0x00CF0301: 0x00001E2E + "\x00\xef\x03\x01\x00\x00\x1e/" + // 0x00EF0301: 0x00001E2F + "\x00K\x03\x01\x00\x00\x1e0" + // 0x004B0301: 0x00001E30 + "\x00k\x03\x01\x00\x00\x1e1" + // 0x006B0301: 0x00001E31 + "\x00K\x03#\x00\x00\x1e2" + // 0x004B0323: 0x00001E32 + "\x00k\x03#\x00\x00\x1e3" + // 0x006B0323: 0x00001E33 + "\x00K\x031\x00\x00\x1e4" + // 0x004B0331: 0x00001E34 + "\x00k\x031\x00\x00\x1e5" + // 0x006B0331: 0x00001E35 + "\x00L\x03#\x00\x00\x1e6" + // 0x004C0323: 0x00001E36 + "\x00l\x03#\x00\x00\x1e7" + // 0x006C0323: 0x00001E37 + "\x1e6\x03\x04\x00\x00\x1e8" + // 0x1E360304: 0x00001E38 + "\x1e7\x03\x04\x00\x00\x1e9" + // 0x1E370304: 0x00001E39 + "\x00L\x031\x00\x00\x1e:" + // 0x004C0331: 0x00001E3A + "\x00l\x031\x00\x00\x1e;" + // 0x006C0331: 0x00001E3B + "\x00L\x03-\x00\x00\x1e<" + // 0x004C032D: 0x00001E3C + "\x00l\x03-\x00\x00\x1e=" + // 0x006C032D: 0x00001E3D + "\x00M\x03\x01\x00\x00\x1e>" + // 0x004D0301: 0x00001E3E + "\x00m\x03\x01\x00\x00\x1e?" + // 0x006D0301: 0x00001E3F + "\x00M\x03\a\x00\x00\x1e@" + // 0x004D0307: 0x00001E40 + "\x00m\x03\a\x00\x00\x1eA" + // 0x006D0307: 0x00001E41 + "\x00M\x03#\x00\x00\x1eB" + // 0x004D0323: 0x00001E42 + "\x00m\x03#\x00\x00\x1eC" + // 0x006D0323: 0x00001E43 + "\x00N\x03\a\x00\x00\x1eD" + // 0x004E0307: 0x00001E44 + "\x00n\x03\a\x00\x00\x1eE" + // 0x006E0307: 0x00001E45 + "\x00N\x03#\x00\x00\x1eF" + // 0x004E0323: 0x00001E46 + "\x00n\x03#\x00\x00\x1eG" + // 0x006E0323: 0x00001E47 + "\x00N\x031\x00\x00\x1eH" + // 0x004E0331: 0x00001E48 + "\x00n\x031\x00\x00\x1eI" + // 0x006E0331: 0x00001E49 + "\x00N\x03-\x00\x00\x1eJ" + // 0x004E032D: 0x00001E4A + "\x00n\x03-\x00\x00\x1eK" + // 0x006E032D: 0x00001E4B + "\x00\xd5\x03\x01\x00\x00\x1eL" + // 0x00D50301: 0x00001E4C + "\x00\xf5\x03\x01\x00\x00\x1eM" + // 0x00F50301: 0x00001E4D + "\x00\xd5\x03\b\x00\x00\x1eN" + // 0x00D50308: 0x00001E4E + "\x00\xf5\x03\b\x00\x00\x1eO" + // 0x00F50308: 0x00001E4F + "\x01L\x03\x00\x00\x00\x1eP" + // 0x014C0300: 0x00001E50 + "\x01M\x03\x00\x00\x00\x1eQ" + // 0x014D0300: 0x00001E51 + "\x01L\x03\x01\x00\x00\x1eR" + // 0x014C0301: 0x00001E52 + "\x01M\x03\x01\x00\x00\x1eS" + // 0x014D0301: 0x00001E53 + "\x00P\x03\x01\x00\x00\x1eT" + // 0x00500301: 0x00001E54 + "\x00p\x03\x01\x00\x00\x1eU" + // 0x00700301: 0x00001E55 + "\x00P\x03\a\x00\x00\x1eV" + // 0x00500307: 0x00001E56 + "\x00p\x03\a\x00\x00\x1eW" + // 0x00700307: 0x00001E57 + "\x00R\x03\a\x00\x00\x1eX" + // 0x00520307: 0x00001E58 + "\x00r\x03\a\x00\x00\x1eY" + // 0x00720307: 0x00001E59 + "\x00R\x03#\x00\x00\x1eZ" + // 0x00520323: 0x00001E5A + "\x00r\x03#\x00\x00\x1e[" + // 0x00720323: 0x00001E5B + "\x1eZ\x03\x04\x00\x00\x1e\\" + // 0x1E5A0304: 0x00001E5C + "\x1e[\x03\x04\x00\x00\x1e]" + // 0x1E5B0304: 0x00001E5D + "\x00R\x031\x00\x00\x1e^" + // 0x00520331: 0x00001E5E + "\x00r\x031\x00\x00\x1e_" + // 0x00720331: 0x00001E5F + "\x00S\x03\a\x00\x00\x1e`" + // 0x00530307: 0x00001E60 + "\x00s\x03\a\x00\x00\x1ea" + // 0x00730307: 0x00001E61 + "\x00S\x03#\x00\x00\x1eb" + // 0x00530323: 0x00001E62 + "\x00s\x03#\x00\x00\x1ec" + // 0x00730323: 0x00001E63 + "\x01Z\x03\a\x00\x00\x1ed" + // 0x015A0307: 0x00001E64 + "\x01[\x03\a\x00\x00\x1ee" + // 0x015B0307: 0x00001E65 + "\x01`\x03\a\x00\x00\x1ef" + // 0x01600307: 0x00001E66 + "\x01a\x03\a\x00\x00\x1eg" + // 0x01610307: 0x00001E67 + "\x1eb\x03\a\x00\x00\x1eh" + // 0x1E620307: 0x00001E68 + "\x1ec\x03\a\x00\x00\x1ei" + // 0x1E630307: 0x00001E69 + "\x00T\x03\a\x00\x00\x1ej" + // 0x00540307: 0x00001E6A + "\x00t\x03\a\x00\x00\x1ek" + // 0x00740307: 0x00001E6B + "\x00T\x03#\x00\x00\x1el" + // 0x00540323: 0x00001E6C + "\x00t\x03#\x00\x00\x1em" + // 0x00740323: 0x00001E6D + "\x00T\x031\x00\x00\x1en" + // 0x00540331: 0x00001E6E + "\x00t\x031\x00\x00\x1eo" + // 0x00740331: 0x00001E6F + "\x00T\x03-\x00\x00\x1ep" + // 0x0054032D: 0x00001E70 + "\x00t\x03-\x00\x00\x1eq" + // 0x0074032D: 0x00001E71 + "\x00U\x03$\x00\x00\x1er" + // 0x00550324: 0x00001E72 + "\x00u\x03$\x00\x00\x1es" + // 0x00750324: 0x00001E73 + "\x00U\x030\x00\x00\x1et" + // 0x00550330: 0x00001E74 + "\x00u\x030\x00\x00\x1eu" + // 0x00750330: 0x00001E75 + "\x00U\x03-\x00\x00\x1ev" + // 0x0055032D: 0x00001E76 + "\x00u\x03-\x00\x00\x1ew" + // 0x0075032D: 0x00001E77 + "\x01h\x03\x01\x00\x00\x1ex" + // 0x01680301: 0x00001E78 + "\x01i\x03\x01\x00\x00\x1ey" + // 0x01690301: 0x00001E79 + "\x01j\x03\b\x00\x00\x1ez" + // 0x016A0308: 0x00001E7A + "\x01k\x03\b\x00\x00\x1e{" + // 0x016B0308: 0x00001E7B + "\x00V\x03\x03\x00\x00\x1e|" + // 0x00560303: 0x00001E7C + "\x00v\x03\x03\x00\x00\x1e}" + // 0x00760303: 0x00001E7D + "\x00V\x03#\x00\x00\x1e~" + // 0x00560323: 0x00001E7E + "\x00v\x03#\x00\x00\x1e\u007f" + // 0x00760323: 0x00001E7F + "\x00W\x03\x00\x00\x00\x1e\x80" + // 0x00570300: 0x00001E80 + "\x00w\x03\x00\x00\x00\x1e\x81" + // 0x00770300: 0x00001E81 + "\x00W\x03\x01\x00\x00\x1e\x82" + // 0x00570301: 0x00001E82 + "\x00w\x03\x01\x00\x00\x1e\x83" + // 0x00770301: 0x00001E83 + "\x00W\x03\b\x00\x00\x1e\x84" + // 0x00570308: 0x00001E84 + "\x00w\x03\b\x00\x00\x1e\x85" + // 0x00770308: 0x00001E85 + "\x00W\x03\a\x00\x00\x1e\x86" + // 0x00570307: 0x00001E86 + "\x00w\x03\a\x00\x00\x1e\x87" + // 0x00770307: 0x00001E87 + "\x00W\x03#\x00\x00\x1e\x88" + // 0x00570323: 0x00001E88 + "\x00w\x03#\x00\x00\x1e\x89" + // 0x00770323: 0x00001E89 + "\x00X\x03\a\x00\x00\x1e\x8a" + // 0x00580307: 0x00001E8A + "\x00x\x03\a\x00\x00\x1e\x8b" + // 0x00780307: 0x00001E8B + "\x00X\x03\b\x00\x00\x1e\x8c" + // 0x00580308: 0x00001E8C + "\x00x\x03\b\x00\x00\x1e\x8d" + // 0x00780308: 0x00001E8D + "\x00Y\x03\a\x00\x00\x1e\x8e" + // 0x00590307: 0x00001E8E + "\x00y\x03\a\x00\x00\x1e\x8f" + // 0x00790307: 0x00001E8F + "\x00Z\x03\x02\x00\x00\x1e\x90" + // 0x005A0302: 0x00001E90 + "\x00z\x03\x02\x00\x00\x1e\x91" + // 0x007A0302: 0x00001E91 + "\x00Z\x03#\x00\x00\x1e\x92" + // 0x005A0323: 0x00001E92 + "\x00z\x03#\x00\x00\x1e\x93" + // 0x007A0323: 0x00001E93 + "\x00Z\x031\x00\x00\x1e\x94" + // 0x005A0331: 0x00001E94 + "\x00z\x031\x00\x00\x1e\x95" + // 0x007A0331: 0x00001E95 + "\x00h\x031\x00\x00\x1e\x96" + // 0x00680331: 0x00001E96 + "\x00t\x03\b\x00\x00\x1e\x97" + // 0x00740308: 0x00001E97 + "\x00w\x03\n\x00\x00\x1e\x98" + // 0x0077030A: 0x00001E98 + "\x00y\x03\n\x00\x00\x1e\x99" + // 0x0079030A: 0x00001E99 + "\x01\u007f\x03\a\x00\x00\x1e\x9b" + // 0x017F0307: 0x00001E9B + "\x00A\x03#\x00\x00\x1e\xa0" + // 0x00410323: 0x00001EA0 + "\x00a\x03#\x00\x00\x1e\xa1" + // 0x00610323: 0x00001EA1 + "\x00A\x03\t\x00\x00\x1e\xa2" + // 0x00410309: 0x00001EA2 + "\x00a\x03\t\x00\x00\x1e\xa3" + // 0x00610309: 0x00001EA3 + "\x00\xc2\x03\x01\x00\x00\x1e\xa4" + // 0x00C20301: 0x00001EA4 + "\x00\xe2\x03\x01\x00\x00\x1e\xa5" + // 0x00E20301: 0x00001EA5 + "\x00\xc2\x03\x00\x00\x00\x1e\xa6" + // 0x00C20300: 0x00001EA6 + "\x00\xe2\x03\x00\x00\x00\x1e\xa7" + // 0x00E20300: 0x00001EA7 + "\x00\xc2\x03\t\x00\x00\x1e\xa8" + // 0x00C20309: 0x00001EA8 + "\x00\xe2\x03\t\x00\x00\x1e\xa9" + // 0x00E20309: 0x00001EA9 + "\x00\xc2\x03\x03\x00\x00\x1e\xaa" + // 0x00C20303: 0x00001EAA + "\x00\xe2\x03\x03\x00\x00\x1e\xab" + // 0x00E20303: 0x00001EAB + "\x1e\xa0\x03\x02\x00\x00\x1e\xac" + // 0x1EA00302: 0x00001EAC + "\x1e\xa1\x03\x02\x00\x00\x1e\xad" + // 0x1EA10302: 0x00001EAD + "\x01\x02\x03\x01\x00\x00\x1e\xae" + // 0x01020301: 0x00001EAE + "\x01\x03\x03\x01\x00\x00\x1e\xaf" + // 0x01030301: 0x00001EAF + "\x01\x02\x03\x00\x00\x00\x1e\xb0" + // 0x01020300: 0x00001EB0 + "\x01\x03\x03\x00\x00\x00\x1e\xb1" + // 0x01030300: 0x00001EB1 + "\x01\x02\x03\t\x00\x00\x1e\xb2" + // 0x01020309: 0x00001EB2 + "\x01\x03\x03\t\x00\x00\x1e\xb3" + // 0x01030309: 0x00001EB3 + "\x01\x02\x03\x03\x00\x00\x1e\xb4" + // 0x01020303: 0x00001EB4 + "\x01\x03\x03\x03\x00\x00\x1e\xb5" + // 0x01030303: 0x00001EB5 + "\x1e\xa0\x03\x06\x00\x00\x1e\xb6" + // 0x1EA00306: 0x00001EB6 + "\x1e\xa1\x03\x06\x00\x00\x1e\xb7" + // 0x1EA10306: 0x00001EB7 + "\x00E\x03#\x00\x00\x1e\xb8" + // 0x00450323: 0x00001EB8 + "\x00e\x03#\x00\x00\x1e\xb9" + // 0x00650323: 0x00001EB9 + "\x00E\x03\t\x00\x00\x1e\xba" + // 0x00450309: 0x00001EBA + "\x00e\x03\t\x00\x00\x1e\xbb" + // 0x00650309: 0x00001EBB + "\x00E\x03\x03\x00\x00\x1e\xbc" + // 0x00450303: 0x00001EBC + "\x00e\x03\x03\x00\x00\x1e\xbd" + // 0x00650303: 0x00001EBD + "\x00\xca\x03\x01\x00\x00\x1e\xbe" + // 0x00CA0301: 0x00001EBE + "\x00\xea\x03\x01\x00\x00\x1e\xbf" + // 0x00EA0301: 0x00001EBF + "\x00\xca\x03\x00\x00\x00\x1e\xc0" + // 0x00CA0300: 0x00001EC0 + "\x00\xea\x03\x00\x00\x00\x1e\xc1" + // 0x00EA0300: 0x00001EC1 + "\x00\xca\x03\t\x00\x00\x1e\xc2" + // 0x00CA0309: 0x00001EC2 + "\x00\xea\x03\t\x00\x00\x1e\xc3" + // 0x00EA0309: 0x00001EC3 + "\x00\xca\x03\x03\x00\x00\x1e\xc4" + // 0x00CA0303: 0x00001EC4 + "\x00\xea\x03\x03\x00\x00\x1e\xc5" + // 0x00EA0303: 0x00001EC5 + "\x1e\xb8\x03\x02\x00\x00\x1e\xc6" + // 0x1EB80302: 0x00001EC6 + "\x1e\xb9\x03\x02\x00\x00\x1e\xc7" + // 0x1EB90302: 0x00001EC7 + "\x00I\x03\t\x00\x00\x1e\xc8" + // 0x00490309: 0x00001EC8 + "\x00i\x03\t\x00\x00\x1e\xc9" + // 0x00690309: 0x00001EC9 + "\x00I\x03#\x00\x00\x1e\xca" + // 0x00490323: 0x00001ECA + "\x00i\x03#\x00\x00\x1e\xcb" + // 0x00690323: 0x00001ECB + "\x00O\x03#\x00\x00\x1e\xcc" + // 0x004F0323: 0x00001ECC + "\x00o\x03#\x00\x00\x1e\xcd" + // 0x006F0323: 0x00001ECD + "\x00O\x03\t\x00\x00\x1e\xce" + // 0x004F0309: 0x00001ECE + "\x00o\x03\t\x00\x00\x1e\xcf" + // 0x006F0309: 0x00001ECF + "\x00\xd4\x03\x01\x00\x00\x1e\xd0" + // 0x00D40301: 0x00001ED0 + "\x00\xf4\x03\x01\x00\x00\x1e\xd1" + // 0x00F40301: 0x00001ED1 + "\x00\xd4\x03\x00\x00\x00\x1e\xd2" + // 0x00D40300: 0x00001ED2 + "\x00\xf4\x03\x00\x00\x00\x1e\xd3" + // 0x00F40300: 0x00001ED3 + "\x00\xd4\x03\t\x00\x00\x1e\xd4" + // 0x00D40309: 0x00001ED4 + "\x00\xf4\x03\t\x00\x00\x1e\xd5" + // 0x00F40309: 0x00001ED5 + "\x00\xd4\x03\x03\x00\x00\x1e\xd6" + // 0x00D40303: 0x00001ED6 + "\x00\xf4\x03\x03\x00\x00\x1e\xd7" + // 0x00F40303: 0x00001ED7 + "\x1e\xcc\x03\x02\x00\x00\x1e\xd8" + // 0x1ECC0302: 0x00001ED8 + "\x1e\xcd\x03\x02\x00\x00\x1e\xd9" + // 0x1ECD0302: 0x00001ED9 + "\x01\xa0\x03\x01\x00\x00\x1e\xda" + // 0x01A00301: 0x00001EDA + "\x01\xa1\x03\x01\x00\x00\x1e\xdb" + // 0x01A10301: 0x00001EDB + "\x01\xa0\x03\x00\x00\x00\x1e\xdc" + // 0x01A00300: 0x00001EDC + "\x01\xa1\x03\x00\x00\x00\x1e\xdd" + // 0x01A10300: 0x00001EDD + "\x01\xa0\x03\t\x00\x00\x1e\xde" + // 0x01A00309: 0x00001EDE + "\x01\xa1\x03\t\x00\x00\x1e\xdf" + // 0x01A10309: 0x00001EDF + "\x01\xa0\x03\x03\x00\x00\x1e\xe0" + // 0x01A00303: 0x00001EE0 + "\x01\xa1\x03\x03\x00\x00\x1e\xe1" + // 0x01A10303: 0x00001EE1 + "\x01\xa0\x03#\x00\x00\x1e\xe2" + // 0x01A00323: 0x00001EE2 + "\x01\xa1\x03#\x00\x00\x1e\xe3" + // 0x01A10323: 0x00001EE3 + "\x00U\x03#\x00\x00\x1e\xe4" + // 0x00550323: 0x00001EE4 + "\x00u\x03#\x00\x00\x1e\xe5" + // 0x00750323: 0x00001EE5 + "\x00U\x03\t\x00\x00\x1e\xe6" + // 0x00550309: 0x00001EE6 + "\x00u\x03\t\x00\x00\x1e\xe7" + // 0x00750309: 0x00001EE7 + "\x01\xaf\x03\x01\x00\x00\x1e\xe8" + // 0x01AF0301: 0x00001EE8 + "\x01\xb0\x03\x01\x00\x00\x1e\xe9" + // 0x01B00301: 0x00001EE9 + "\x01\xaf\x03\x00\x00\x00\x1e\xea" + // 0x01AF0300: 0x00001EEA + "\x01\xb0\x03\x00\x00\x00\x1e\xeb" + // 0x01B00300: 0x00001EEB + "\x01\xaf\x03\t\x00\x00\x1e\xec" + // 0x01AF0309: 0x00001EEC + "\x01\xb0\x03\t\x00\x00\x1e\xed" + // 0x01B00309: 0x00001EED + "\x01\xaf\x03\x03\x00\x00\x1e\xee" + // 0x01AF0303: 0x00001EEE + "\x01\xb0\x03\x03\x00\x00\x1e\xef" + // 0x01B00303: 0x00001EEF + "\x01\xaf\x03#\x00\x00\x1e\xf0" + // 0x01AF0323: 0x00001EF0 + "\x01\xb0\x03#\x00\x00\x1e\xf1" + // 0x01B00323: 0x00001EF1 + "\x00Y\x03\x00\x00\x00\x1e\xf2" + // 0x00590300: 0x00001EF2 + "\x00y\x03\x00\x00\x00\x1e\xf3" + // 0x00790300: 0x00001EF3 + "\x00Y\x03#\x00\x00\x1e\xf4" + // 0x00590323: 0x00001EF4 + "\x00y\x03#\x00\x00\x1e\xf5" + // 0x00790323: 0x00001EF5 + "\x00Y\x03\t\x00\x00\x1e\xf6" + // 0x00590309: 0x00001EF6 + "\x00y\x03\t\x00\x00\x1e\xf7" + // 0x00790309: 0x00001EF7 + "\x00Y\x03\x03\x00\x00\x1e\xf8" + // 0x00590303: 0x00001EF8 + "\x00y\x03\x03\x00\x00\x1e\xf9" + // 0x00790303: 0x00001EF9 + "\x03\xb1\x03\x13\x00\x00\x1f\x00" + // 0x03B10313: 0x00001F00 + "\x03\xb1\x03\x14\x00\x00\x1f\x01" + // 0x03B10314: 0x00001F01 + "\x1f\x00\x03\x00\x00\x00\x1f\x02" + // 0x1F000300: 0x00001F02 + "\x1f\x01\x03\x00\x00\x00\x1f\x03" + // 0x1F010300: 0x00001F03 + "\x1f\x00\x03\x01\x00\x00\x1f\x04" + // 0x1F000301: 0x00001F04 + "\x1f\x01\x03\x01\x00\x00\x1f\x05" + // 0x1F010301: 0x00001F05 + "\x1f\x00\x03B\x00\x00\x1f\x06" + // 0x1F000342: 0x00001F06 + "\x1f\x01\x03B\x00\x00\x1f\a" + // 0x1F010342: 0x00001F07 + "\x03\x91\x03\x13\x00\x00\x1f\b" + // 0x03910313: 0x00001F08 + "\x03\x91\x03\x14\x00\x00\x1f\t" + // 0x03910314: 0x00001F09 + "\x1f\b\x03\x00\x00\x00\x1f\n" + // 0x1F080300: 0x00001F0A + "\x1f\t\x03\x00\x00\x00\x1f\v" + // 0x1F090300: 0x00001F0B + "\x1f\b\x03\x01\x00\x00\x1f\f" + // 0x1F080301: 0x00001F0C + "\x1f\t\x03\x01\x00\x00\x1f\r" + // 0x1F090301: 0x00001F0D + "\x1f\b\x03B\x00\x00\x1f\x0e" + // 0x1F080342: 0x00001F0E + "\x1f\t\x03B\x00\x00\x1f\x0f" + // 0x1F090342: 0x00001F0F + "\x03\xb5\x03\x13\x00\x00\x1f\x10" + // 0x03B50313: 0x00001F10 + "\x03\xb5\x03\x14\x00\x00\x1f\x11" + // 0x03B50314: 0x00001F11 + "\x1f\x10\x03\x00\x00\x00\x1f\x12" + // 0x1F100300: 0x00001F12 + "\x1f\x11\x03\x00\x00\x00\x1f\x13" + // 0x1F110300: 0x00001F13 + "\x1f\x10\x03\x01\x00\x00\x1f\x14" + // 0x1F100301: 0x00001F14 + "\x1f\x11\x03\x01\x00\x00\x1f\x15" + // 0x1F110301: 0x00001F15 + "\x03\x95\x03\x13\x00\x00\x1f\x18" + // 0x03950313: 0x00001F18 + "\x03\x95\x03\x14\x00\x00\x1f\x19" + // 0x03950314: 0x00001F19 + "\x1f\x18\x03\x00\x00\x00\x1f\x1a" + // 0x1F180300: 0x00001F1A + "\x1f\x19\x03\x00\x00\x00\x1f\x1b" + // 0x1F190300: 0x00001F1B + "\x1f\x18\x03\x01\x00\x00\x1f\x1c" + // 0x1F180301: 0x00001F1C + "\x1f\x19\x03\x01\x00\x00\x1f\x1d" + // 0x1F190301: 0x00001F1D + "\x03\xb7\x03\x13\x00\x00\x1f " + // 0x03B70313: 0x00001F20 + "\x03\xb7\x03\x14\x00\x00\x1f!" + // 0x03B70314: 0x00001F21 + "\x1f \x03\x00\x00\x00\x1f\"" + // 0x1F200300: 0x00001F22 + "\x1f!\x03\x00\x00\x00\x1f#" + // 0x1F210300: 0x00001F23 + "\x1f \x03\x01\x00\x00\x1f$" + // 0x1F200301: 0x00001F24 + "\x1f!\x03\x01\x00\x00\x1f%" + // 0x1F210301: 0x00001F25 + "\x1f \x03B\x00\x00\x1f&" + // 0x1F200342: 0x00001F26 + "\x1f!\x03B\x00\x00\x1f'" + // 0x1F210342: 0x00001F27 + "\x03\x97\x03\x13\x00\x00\x1f(" + // 0x03970313: 0x00001F28 + "\x03\x97\x03\x14\x00\x00\x1f)" + // 0x03970314: 0x00001F29 + "\x1f(\x03\x00\x00\x00\x1f*" + // 0x1F280300: 0x00001F2A + "\x1f)\x03\x00\x00\x00\x1f+" + // 0x1F290300: 0x00001F2B + "\x1f(\x03\x01\x00\x00\x1f," + // 0x1F280301: 0x00001F2C + "\x1f)\x03\x01\x00\x00\x1f-" + // 0x1F290301: 0x00001F2D + "\x1f(\x03B\x00\x00\x1f." + // 0x1F280342: 0x00001F2E + "\x1f)\x03B\x00\x00\x1f/" + // 0x1F290342: 0x00001F2F + "\x03\xb9\x03\x13\x00\x00\x1f0" + // 0x03B90313: 0x00001F30 + "\x03\xb9\x03\x14\x00\x00\x1f1" + // 0x03B90314: 0x00001F31 + "\x1f0\x03\x00\x00\x00\x1f2" + // 0x1F300300: 0x00001F32 + "\x1f1\x03\x00\x00\x00\x1f3" + // 0x1F310300: 0x00001F33 + "\x1f0\x03\x01\x00\x00\x1f4" + // 0x1F300301: 0x00001F34 + "\x1f1\x03\x01\x00\x00\x1f5" + // 0x1F310301: 0x00001F35 + "\x1f0\x03B\x00\x00\x1f6" + // 0x1F300342: 0x00001F36 + "\x1f1\x03B\x00\x00\x1f7" + // 0x1F310342: 0x00001F37 + "\x03\x99\x03\x13\x00\x00\x1f8" + // 0x03990313: 0x00001F38 + "\x03\x99\x03\x14\x00\x00\x1f9" + // 0x03990314: 0x00001F39 + "\x1f8\x03\x00\x00\x00\x1f:" + // 0x1F380300: 0x00001F3A + "\x1f9\x03\x00\x00\x00\x1f;" + // 0x1F390300: 0x00001F3B + "\x1f8\x03\x01\x00\x00\x1f<" + // 0x1F380301: 0x00001F3C + "\x1f9\x03\x01\x00\x00\x1f=" + // 0x1F390301: 0x00001F3D + "\x1f8\x03B\x00\x00\x1f>" + // 0x1F380342: 0x00001F3E + "\x1f9\x03B\x00\x00\x1f?" + // 0x1F390342: 0x00001F3F + "\x03\xbf\x03\x13\x00\x00\x1f@" + // 0x03BF0313: 0x00001F40 + "\x03\xbf\x03\x14\x00\x00\x1fA" + // 0x03BF0314: 0x00001F41 + "\x1f@\x03\x00\x00\x00\x1fB" + // 0x1F400300: 0x00001F42 + "\x1fA\x03\x00\x00\x00\x1fC" + // 0x1F410300: 0x00001F43 + "\x1f@\x03\x01\x00\x00\x1fD" + // 0x1F400301: 0x00001F44 + "\x1fA\x03\x01\x00\x00\x1fE" + // 0x1F410301: 0x00001F45 + "\x03\x9f\x03\x13\x00\x00\x1fH" + // 0x039F0313: 0x00001F48 + "\x03\x9f\x03\x14\x00\x00\x1fI" + // 0x039F0314: 0x00001F49 + "\x1fH\x03\x00\x00\x00\x1fJ" + // 0x1F480300: 0x00001F4A + "\x1fI\x03\x00\x00\x00\x1fK" + // 0x1F490300: 0x00001F4B + "\x1fH\x03\x01\x00\x00\x1fL" + // 0x1F480301: 0x00001F4C + "\x1fI\x03\x01\x00\x00\x1fM" + // 0x1F490301: 0x00001F4D + "\x03\xc5\x03\x13\x00\x00\x1fP" + // 0x03C50313: 0x00001F50 + "\x03\xc5\x03\x14\x00\x00\x1fQ" + // 0x03C50314: 0x00001F51 + "\x1fP\x03\x00\x00\x00\x1fR" + // 0x1F500300: 0x00001F52 + "\x1fQ\x03\x00\x00\x00\x1fS" + // 0x1F510300: 0x00001F53 + "\x1fP\x03\x01\x00\x00\x1fT" + // 0x1F500301: 0x00001F54 + "\x1fQ\x03\x01\x00\x00\x1fU" + // 0x1F510301: 0x00001F55 + "\x1fP\x03B\x00\x00\x1fV" + // 0x1F500342: 0x00001F56 + "\x1fQ\x03B\x00\x00\x1fW" + // 0x1F510342: 0x00001F57 + "\x03\xa5\x03\x14\x00\x00\x1fY" + // 0x03A50314: 0x00001F59 + "\x1fY\x03\x00\x00\x00\x1f[" + // 0x1F590300: 0x00001F5B + "\x1fY\x03\x01\x00\x00\x1f]" + // 0x1F590301: 0x00001F5D + "\x1fY\x03B\x00\x00\x1f_" + // 0x1F590342: 0x00001F5F + "\x03\xc9\x03\x13\x00\x00\x1f`" + // 0x03C90313: 0x00001F60 + "\x03\xc9\x03\x14\x00\x00\x1fa" + // 0x03C90314: 0x00001F61 + "\x1f`\x03\x00\x00\x00\x1fb" + // 0x1F600300: 0x00001F62 + "\x1fa\x03\x00\x00\x00\x1fc" + // 0x1F610300: 0x00001F63 + "\x1f`\x03\x01\x00\x00\x1fd" + // 0x1F600301: 0x00001F64 + "\x1fa\x03\x01\x00\x00\x1fe" + // 0x1F610301: 0x00001F65 + "\x1f`\x03B\x00\x00\x1ff" + // 0x1F600342: 0x00001F66 + "\x1fa\x03B\x00\x00\x1fg" + // 0x1F610342: 0x00001F67 + "\x03\xa9\x03\x13\x00\x00\x1fh" + // 0x03A90313: 0x00001F68 + "\x03\xa9\x03\x14\x00\x00\x1fi" + // 0x03A90314: 0x00001F69 + "\x1fh\x03\x00\x00\x00\x1fj" + // 0x1F680300: 0x00001F6A + "\x1fi\x03\x00\x00\x00\x1fk" + // 0x1F690300: 0x00001F6B + "\x1fh\x03\x01\x00\x00\x1fl" + // 0x1F680301: 0x00001F6C + "\x1fi\x03\x01\x00\x00\x1fm" + // 0x1F690301: 0x00001F6D + "\x1fh\x03B\x00\x00\x1fn" + // 0x1F680342: 0x00001F6E + "\x1fi\x03B\x00\x00\x1fo" + // 0x1F690342: 0x00001F6F + "\x03\xb1\x03\x00\x00\x00\x1fp" + // 0x03B10300: 0x00001F70 + "\x03\xb5\x03\x00\x00\x00\x1fr" + // 0x03B50300: 0x00001F72 + "\x03\xb7\x03\x00\x00\x00\x1ft" + // 0x03B70300: 0x00001F74 + "\x03\xb9\x03\x00\x00\x00\x1fv" + // 0x03B90300: 0x00001F76 + "\x03\xbf\x03\x00\x00\x00\x1fx" + // 0x03BF0300: 0x00001F78 + "\x03\xc5\x03\x00\x00\x00\x1fz" + // 0x03C50300: 0x00001F7A + "\x03\xc9\x03\x00\x00\x00\x1f|" + // 0x03C90300: 0x00001F7C + "\x1f\x00\x03E\x00\x00\x1f\x80" + // 0x1F000345: 0x00001F80 + "\x1f\x01\x03E\x00\x00\x1f\x81" + // 0x1F010345: 0x00001F81 + "\x1f\x02\x03E\x00\x00\x1f\x82" + // 0x1F020345: 0x00001F82 + "\x1f\x03\x03E\x00\x00\x1f\x83" + // 0x1F030345: 0x00001F83 + "\x1f\x04\x03E\x00\x00\x1f\x84" + // 0x1F040345: 0x00001F84 + "\x1f\x05\x03E\x00\x00\x1f\x85" + // 0x1F050345: 0x00001F85 + "\x1f\x06\x03E\x00\x00\x1f\x86" + // 0x1F060345: 0x00001F86 + "\x1f\a\x03E\x00\x00\x1f\x87" + // 0x1F070345: 0x00001F87 + "\x1f\b\x03E\x00\x00\x1f\x88" + // 0x1F080345: 0x00001F88 + "\x1f\t\x03E\x00\x00\x1f\x89" + // 0x1F090345: 0x00001F89 + "\x1f\n\x03E\x00\x00\x1f\x8a" + // 0x1F0A0345: 0x00001F8A + "\x1f\v\x03E\x00\x00\x1f\x8b" + // 0x1F0B0345: 0x00001F8B + "\x1f\f\x03E\x00\x00\x1f\x8c" + // 0x1F0C0345: 0x00001F8C + "\x1f\r\x03E\x00\x00\x1f\x8d" + // 0x1F0D0345: 0x00001F8D + "\x1f\x0e\x03E\x00\x00\x1f\x8e" + // 0x1F0E0345: 0x00001F8E + "\x1f\x0f\x03E\x00\x00\x1f\x8f" + // 0x1F0F0345: 0x00001F8F + "\x1f \x03E\x00\x00\x1f\x90" + // 0x1F200345: 0x00001F90 + "\x1f!\x03E\x00\x00\x1f\x91" + // 0x1F210345: 0x00001F91 + "\x1f\"\x03E\x00\x00\x1f\x92" + // 0x1F220345: 0x00001F92 + "\x1f#\x03E\x00\x00\x1f\x93" + // 0x1F230345: 0x00001F93 + "\x1f$\x03E\x00\x00\x1f\x94" + // 0x1F240345: 0x00001F94 + "\x1f%\x03E\x00\x00\x1f\x95" + // 0x1F250345: 0x00001F95 + "\x1f&\x03E\x00\x00\x1f\x96" + // 0x1F260345: 0x00001F96 + "\x1f'\x03E\x00\x00\x1f\x97" + // 0x1F270345: 0x00001F97 + "\x1f(\x03E\x00\x00\x1f\x98" + // 0x1F280345: 0x00001F98 + "\x1f)\x03E\x00\x00\x1f\x99" + // 0x1F290345: 0x00001F99 + "\x1f*\x03E\x00\x00\x1f\x9a" + // 0x1F2A0345: 0x00001F9A + "\x1f+\x03E\x00\x00\x1f\x9b" + // 0x1F2B0345: 0x00001F9B + "\x1f,\x03E\x00\x00\x1f\x9c" + // 0x1F2C0345: 0x00001F9C + "\x1f-\x03E\x00\x00\x1f\x9d" + // 0x1F2D0345: 0x00001F9D + "\x1f.\x03E\x00\x00\x1f\x9e" + // 0x1F2E0345: 0x00001F9E + "\x1f/\x03E\x00\x00\x1f\x9f" + // 0x1F2F0345: 0x00001F9F + "\x1f`\x03E\x00\x00\x1f\xa0" + // 0x1F600345: 0x00001FA0 + "\x1fa\x03E\x00\x00\x1f\xa1" + // 0x1F610345: 0x00001FA1 + "\x1fb\x03E\x00\x00\x1f\xa2" + // 0x1F620345: 0x00001FA2 + "\x1fc\x03E\x00\x00\x1f\xa3" + // 0x1F630345: 0x00001FA3 + "\x1fd\x03E\x00\x00\x1f\xa4" + // 0x1F640345: 0x00001FA4 + "\x1fe\x03E\x00\x00\x1f\xa5" + // 0x1F650345: 0x00001FA5 + "\x1ff\x03E\x00\x00\x1f\xa6" + // 0x1F660345: 0x00001FA6 + "\x1fg\x03E\x00\x00\x1f\xa7" + // 0x1F670345: 0x00001FA7 + "\x1fh\x03E\x00\x00\x1f\xa8" + // 0x1F680345: 0x00001FA8 + "\x1fi\x03E\x00\x00\x1f\xa9" + // 0x1F690345: 0x00001FA9 + "\x1fj\x03E\x00\x00\x1f\xaa" + // 0x1F6A0345: 0x00001FAA + "\x1fk\x03E\x00\x00\x1f\xab" + // 0x1F6B0345: 0x00001FAB + "\x1fl\x03E\x00\x00\x1f\xac" + // 0x1F6C0345: 0x00001FAC + "\x1fm\x03E\x00\x00\x1f\xad" + // 0x1F6D0345: 0x00001FAD + "\x1fn\x03E\x00\x00\x1f\xae" + // 0x1F6E0345: 0x00001FAE + "\x1fo\x03E\x00\x00\x1f\xaf" + // 0x1F6F0345: 0x00001FAF + "\x03\xb1\x03\x06\x00\x00\x1f\xb0" + // 0x03B10306: 0x00001FB0 + "\x03\xb1\x03\x04\x00\x00\x1f\xb1" + // 0x03B10304: 0x00001FB1 + "\x1fp\x03E\x00\x00\x1f\xb2" + // 0x1F700345: 0x00001FB2 + "\x03\xb1\x03E\x00\x00\x1f\xb3" + // 0x03B10345: 0x00001FB3 + "\x03\xac\x03E\x00\x00\x1f\xb4" + // 0x03AC0345: 0x00001FB4 + "\x03\xb1\x03B\x00\x00\x1f\xb6" + // 0x03B10342: 0x00001FB6 + "\x1f\xb6\x03E\x00\x00\x1f\xb7" + // 0x1FB60345: 0x00001FB7 + "\x03\x91\x03\x06\x00\x00\x1f\xb8" + // 0x03910306: 0x00001FB8 + "\x03\x91\x03\x04\x00\x00\x1f\xb9" + // 0x03910304: 0x00001FB9 + "\x03\x91\x03\x00\x00\x00\x1f\xba" + // 0x03910300: 0x00001FBA + "\x03\x91\x03E\x00\x00\x1f\xbc" + // 0x03910345: 0x00001FBC + "\x00\xa8\x03B\x00\x00\x1f\xc1" + // 0x00A80342: 0x00001FC1 + "\x1ft\x03E\x00\x00\x1f\xc2" + // 0x1F740345: 0x00001FC2 + "\x03\xb7\x03E\x00\x00\x1f\xc3" + // 0x03B70345: 0x00001FC3 + "\x03\xae\x03E\x00\x00\x1f\xc4" + // 0x03AE0345: 0x00001FC4 + "\x03\xb7\x03B\x00\x00\x1f\xc6" + // 0x03B70342: 0x00001FC6 + "\x1f\xc6\x03E\x00\x00\x1f\xc7" + // 0x1FC60345: 0x00001FC7 + "\x03\x95\x03\x00\x00\x00\x1f\xc8" + // 0x03950300: 0x00001FC8 + "\x03\x97\x03\x00\x00\x00\x1f\xca" + // 0x03970300: 0x00001FCA + "\x03\x97\x03E\x00\x00\x1f\xcc" + // 0x03970345: 0x00001FCC + "\x1f\xbf\x03\x00\x00\x00\x1f\xcd" + // 0x1FBF0300: 0x00001FCD + "\x1f\xbf\x03\x01\x00\x00\x1f\xce" + // 0x1FBF0301: 0x00001FCE + "\x1f\xbf\x03B\x00\x00\x1f\xcf" + // 0x1FBF0342: 0x00001FCF + "\x03\xb9\x03\x06\x00\x00\x1f\xd0" + // 0x03B90306: 0x00001FD0 + "\x03\xb9\x03\x04\x00\x00\x1f\xd1" + // 0x03B90304: 0x00001FD1 + "\x03\xca\x03\x00\x00\x00\x1f\xd2" + // 0x03CA0300: 0x00001FD2 + "\x03\xb9\x03B\x00\x00\x1f\xd6" + // 0x03B90342: 0x00001FD6 + "\x03\xca\x03B\x00\x00\x1f\xd7" + // 0x03CA0342: 0x00001FD7 + "\x03\x99\x03\x06\x00\x00\x1f\xd8" + // 0x03990306: 0x00001FD8 + "\x03\x99\x03\x04\x00\x00\x1f\xd9" + // 0x03990304: 0x00001FD9 + "\x03\x99\x03\x00\x00\x00\x1f\xda" + // 0x03990300: 0x00001FDA + "\x1f\xfe\x03\x00\x00\x00\x1f\xdd" + // 0x1FFE0300: 0x00001FDD + "\x1f\xfe\x03\x01\x00\x00\x1f\xde" + // 0x1FFE0301: 0x00001FDE + "\x1f\xfe\x03B\x00\x00\x1f\xdf" + // 0x1FFE0342: 0x00001FDF + "\x03\xc5\x03\x06\x00\x00\x1f\xe0" + // 0x03C50306: 0x00001FE0 + "\x03\xc5\x03\x04\x00\x00\x1f\xe1" + // 0x03C50304: 0x00001FE1 + "\x03\xcb\x03\x00\x00\x00\x1f\xe2" + // 0x03CB0300: 0x00001FE2 + "\x03\xc1\x03\x13\x00\x00\x1f\xe4" + // 0x03C10313: 0x00001FE4 + "\x03\xc1\x03\x14\x00\x00\x1f\xe5" + // 0x03C10314: 0x00001FE5 + "\x03\xc5\x03B\x00\x00\x1f\xe6" + // 0x03C50342: 0x00001FE6 + "\x03\xcb\x03B\x00\x00\x1f\xe7" + // 0x03CB0342: 0x00001FE7 + "\x03\xa5\x03\x06\x00\x00\x1f\xe8" + // 0x03A50306: 0x00001FE8 + "\x03\xa5\x03\x04\x00\x00\x1f\xe9" + // 0x03A50304: 0x00001FE9 + "\x03\xa5\x03\x00\x00\x00\x1f\xea" + // 0x03A50300: 0x00001FEA + "\x03\xa1\x03\x14\x00\x00\x1f\xec" + // 0x03A10314: 0x00001FEC + "\x00\xa8\x03\x00\x00\x00\x1f\xed" + // 0x00A80300: 0x00001FED + "\x1f|\x03E\x00\x00\x1f\xf2" + // 0x1F7C0345: 0x00001FF2 + "\x03\xc9\x03E\x00\x00\x1f\xf3" + // 0x03C90345: 0x00001FF3 + "\x03\xce\x03E\x00\x00\x1f\xf4" + // 0x03CE0345: 0x00001FF4 + "\x03\xc9\x03B\x00\x00\x1f\xf6" + // 0x03C90342: 0x00001FF6 + "\x1f\xf6\x03E\x00\x00\x1f\xf7" + // 0x1FF60345: 0x00001FF7 + "\x03\x9f\x03\x00\x00\x00\x1f\xf8" + // 0x039F0300: 0x00001FF8 + "\x03\xa9\x03\x00\x00\x00\x1f\xfa" + // 0x03A90300: 0x00001FFA + "\x03\xa9\x03E\x00\x00\x1f\xfc" + // 0x03A90345: 0x00001FFC + "!\x90\x038\x00\x00!\x9a" + // 0x21900338: 0x0000219A + "!\x92\x038\x00\x00!\x9b" + // 0x21920338: 0x0000219B + "!\x94\x038\x00\x00!\xae" + // 0x21940338: 0x000021AE + "!\xd0\x038\x00\x00!\xcd" + // 0x21D00338: 0x000021CD + "!\xd4\x038\x00\x00!\xce" + // 0x21D40338: 0x000021CE + "!\xd2\x038\x00\x00!\xcf" + // 0x21D20338: 0x000021CF + "\"\x03\x038\x00\x00\"\x04" + // 0x22030338: 0x00002204 + "\"\b\x038\x00\x00\"\t" + // 0x22080338: 0x00002209 + "\"\v\x038\x00\x00\"\f" + // 0x220B0338: 0x0000220C + "\"#\x038\x00\x00\"$" + // 0x22230338: 0x00002224 + "\"%\x038\x00\x00\"&" + // 0x22250338: 0x00002226 + "\"<\x038\x00\x00\"A" + // 0x223C0338: 0x00002241 + "\"C\x038\x00\x00\"D" + // 0x22430338: 0x00002244 + "\"E\x038\x00\x00\"G" + // 0x22450338: 0x00002247 + "\"H\x038\x00\x00\"I" + // 0x22480338: 0x00002249 + "\x00=\x038\x00\x00\"`" + // 0x003D0338: 0x00002260 + "\"a\x038\x00\x00\"b" + // 0x22610338: 0x00002262 + "\"M\x038\x00\x00\"m" + // 0x224D0338: 0x0000226D + "\x00<\x038\x00\x00\"n" + // 0x003C0338: 0x0000226E + "\x00>\x038\x00\x00\"o" + // 0x003E0338: 0x0000226F + "\"d\x038\x00\x00\"p" + // 0x22640338: 0x00002270 + "\"e\x038\x00\x00\"q" + // 0x22650338: 0x00002271 + "\"r\x038\x00\x00\"t" + // 0x22720338: 0x00002274 + "\"s\x038\x00\x00\"u" + // 0x22730338: 0x00002275 + "\"v\x038\x00\x00\"x" + // 0x22760338: 0x00002278 + "\"w\x038\x00\x00\"y" + // 0x22770338: 0x00002279 + "\"z\x038\x00\x00\"\x80" + // 0x227A0338: 0x00002280 + "\"{\x038\x00\x00\"\x81" + // 0x227B0338: 0x00002281 + "\"\x82\x038\x00\x00\"\x84" + // 0x22820338: 0x00002284 + "\"\x83\x038\x00\x00\"\x85" + // 0x22830338: 0x00002285 + "\"\x86\x038\x00\x00\"\x88" + // 0x22860338: 0x00002288 + "\"\x87\x038\x00\x00\"\x89" + // 0x22870338: 0x00002289 + "\"\xa2\x038\x00\x00\"\xac" + // 0x22A20338: 0x000022AC + "\"\xa8\x038\x00\x00\"\xad" + // 0x22A80338: 0x000022AD + "\"\xa9\x038\x00\x00\"\xae" + // 0x22A90338: 0x000022AE + "\"\xab\x038\x00\x00\"\xaf" + // 0x22AB0338: 0x000022AF + "\"|\x038\x00\x00\"\xe0" + // 0x227C0338: 0x000022E0 + "\"}\x038\x00\x00\"\xe1" + // 0x227D0338: 0x000022E1 + "\"\x91\x038\x00\x00\"\xe2" + // 0x22910338: 0x000022E2 + "\"\x92\x038\x00\x00\"\xe3" + // 0x22920338: 0x000022E3 + "\"\xb2\x038\x00\x00\"\xea" + // 0x22B20338: 0x000022EA + "\"\xb3\x038\x00\x00\"\xeb" + // 0x22B30338: 0x000022EB + "\"\xb4\x038\x00\x00\"\xec" + // 0x22B40338: 0x000022EC + "\"\xb5\x038\x00\x00\"\xed" + // 0x22B50338: 0x000022ED + "0K0\x99\x00\x000L" + // 0x304B3099: 0x0000304C + "0M0\x99\x00\x000N" + // 0x304D3099: 0x0000304E + "0O0\x99\x00\x000P" + // 0x304F3099: 0x00003050 + "0Q0\x99\x00\x000R" + // 0x30513099: 0x00003052 + "0S0\x99\x00\x000T" + // 0x30533099: 0x00003054 + "0U0\x99\x00\x000V" + // 0x30553099: 0x00003056 + "0W0\x99\x00\x000X" + // 0x30573099: 0x00003058 + "0Y0\x99\x00\x000Z" + // 0x30593099: 0x0000305A + "0[0\x99\x00\x000\\" + // 0x305B3099: 0x0000305C + "0]0\x99\x00\x000^" + // 0x305D3099: 0x0000305E + "0_0\x99\x00\x000`" + // 0x305F3099: 0x00003060 + "0a0\x99\x00\x000b" + // 0x30613099: 0x00003062 + "0d0\x99\x00\x000e" + // 0x30643099: 0x00003065 + "0f0\x99\x00\x000g" + // 0x30663099: 0x00003067 + "0h0\x99\x00\x000i" + // 0x30683099: 0x00003069 + "0o0\x99\x00\x000p" + // 0x306F3099: 0x00003070 + "0o0\x9a\x00\x000q" + // 0x306F309A: 0x00003071 + "0r0\x99\x00\x000s" + // 0x30723099: 0x00003073 + "0r0\x9a\x00\x000t" + // 0x3072309A: 0x00003074 + "0u0\x99\x00\x000v" + // 0x30753099: 0x00003076 + "0u0\x9a\x00\x000w" + // 0x3075309A: 0x00003077 + "0x0\x99\x00\x000y" + // 0x30783099: 0x00003079 + "0x0\x9a\x00\x000z" + // 0x3078309A: 0x0000307A + "0{0\x99\x00\x000|" + // 0x307B3099: 0x0000307C + "0{0\x9a\x00\x000}" + // 0x307B309A: 0x0000307D + "0F0\x99\x00\x000\x94" + // 0x30463099: 0x00003094 + "0\x9d0\x99\x00\x000\x9e" + // 0x309D3099: 0x0000309E + "0\xab0\x99\x00\x000\xac" + // 0x30AB3099: 0x000030AC + "0\xad0\x99\x00\x000\xae" + // 0x30AD3099: 0x000030AE + "0\xaf0\x99\x00\x000\xb0" + // 0x30AF3099: 0x000030B0 + "0\xb10\x99\x00\x000\xb2" + // 0x30B13099: 0x000030B2 + "0\xb30\x99\x00\x000\xb4" + // 0x30B33099: 0x000030B4 + "0\xb50\x99\x00\x000\xb6" + // 0x30B53099: 0x000030B6 + "0\xb70\x99\x00\x000\xb8" + // 0x30B73099: 0x000030B8 + "0\xb90\x99\x00\x000\xba" + // 0x30B93099: 0x000030BA + "0\xbb0\x99\x00\x000\xbc" + // 0x30BB3099: 0x000030BC + "0\xbd0\x99\x00\x000\xbe" + // 0x30BD3099: 0x000030BE + "0\xbf0\x99\x00\x000\xc0" + // 0x30BF3099: 0x000030C0 + "0\xc10\x99\x00\x000\xc2" + // 0x30C13099: 0x000030C2 + "0\xc40\x99\x00\x000\xc5" + // 0x30C43099: 0x000030C5 + "0\xc60\x99\x00\x000\xc7" + // 0x30C63099: 0x000030C7 + "0\xc80\x99\x00\x000\xc9" + // 0x30C83099: 0x000030C9 + "0\xcf0\x99\x00\x000\xd0" + // 0x30CF3099: 0x000030D0 + "0\xcf0\x9a\x00\x000\xd1" + // 0x30CF309A: 0x000030D1 + "0\xd20\x99\x00\x000\xd3" + // 0x30D23099: 0x000030D3 + "0\xd20\x9a\x00\x000\xd4" + // 0x30D2309A: 0x000030D4 + "0\xd50\x99\x00\x000\xd6" + // 0x30D53099: 0x000030D6 + "0\xd50\x9a\x00\x000\xd7" + // 0x30D5309A: 0x000030D7 + "0\xd80\x99\x00\x000\xd9" + // 0x30D83099: 0x000030D9 + "0\xd80\x9a\x00\x000\xda" + // 0x30D8309A: 0x000030DA + "0\xdb0\x99\x00\x000\xdc" + // 0x30DB3099: 0x000030DC + "0\xdb0\x9a\x00\x000\xdd" + // 0x30DB309A: 0x000030DD + "0\xa60\x99\x00\x000\xf4" + // 0x30A63099: 0x000030F4 + "0\xef0\x99\x00\x000\xf7" + // 0x30EF3099: 0x000030F7 + "0\xf00\x99\x00\x000\xf8" + // 0x30F03099: 0x000030F8 + "0\xf10\x99\x00\x000\xf9" + // 0x30F13099: 0x000030F9 + "0\xf20\x99\x00\x000\xfa" + // 0x30F23099: 0x000030FA + "0\xfd0\x99\x00\x000\xfe" + // 0x30FD3099: 0x000030FE + "\x10\x99\x10\xba\x00\x01\x10\x9a" + // 0x109910BA: 0x0001109A + "\x10\x9b\x10\xba\x00\x01\x10\x9c" + // 0x109B10BA: 0x0001109C + "\x10\xa5\x10\xba\x00\x01\x10\xab" + // 0x10A510BA: 0x000110AB + "\x111\x11'\x00\x01\x11." + // 0x11311127: 0x0001112E + "\x112\x11'\x00\x01\x11/" + // 0x11321127: 0x0001112F + "\x13G\x13>\x00\x01\x13K" + // 0x1347133E: 0x0001134B + "\x13G\x13W\x00\x01\x13L" + // 0x13471357: 0x0001134C + "\x14\xb9\x14\xba\x00\x01\x14\xbb" + // 0x14B914BA: 0x000114BB + "\x14\xb9\x14\xb0\x00\x01\x14\xbc" + // 0x14B914B0: 0x000114BC + "\x14\xb9\x14\xbd\x00\x01\x14\xbe" + // 0x14B914BD: 0x000114BE + "\x15\xb8\x15\xaf\x00\x01\x15\xba" + // 0x15B815AF: 0x000115BA + "\x15\xb9\x15\xaf\x00\x01\x15\xbb" + // 0x15B915AF: 0x000115BB + "" + // Total size of tables: 53KB (54006 bytes) diff --git a/vendor/golang.org/x/text/unicode/norm/transform.go b/vendor/golang.org/x/text/unicode/norm/transform.go index 9f47efb..a1d366a 100644 --- a/vendor/golang.org/x/text/unicode/norm/transform.go +++ b/vendor/golang.org/x/text/unicode/norm/transform.go @@ -18,7 +18,6 @@ func (Form) Reset() {} // Users should either catch ErrShortDst and allow dst to grow or have dst be at // least of size MaxTransformChunkSize to be guaranteed of progress. func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) { - n := 0 // Cap the maximum number of src bytes to check. b := src eof := atEOF @@ -27,13 +26,14 @@ func (f Form) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) eof = false b = b[:ns] } - i, ok := formTable[f].quickSpan(inputBytes(b), n, len(b), eof) - n += copy(dst[n:], b[n:i]) + i, ok := formTable[f].quickSpan(inputBytes(b), 0, len(b), eof) + n := copy(dst, b[:i]) if !ok { nDst, nSrc, err = f.transform(dst[n:], src[n:], atEOF) return nDst + n, nSrc + n, err } - if n < len(src) && !atEOF { + + if err == nil && n < len(src) && !atEOF { err = transform.ErrShortSrc } return n, n, err @@ -79,7 +79,7 @@ func (f Form) transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) nSrc += n nDst += n if ok { - if n < rb.nsrc && !atEOF { + if err == nil && n < rb.nsrc && !atEOF { err = transform.ErrShortSrc } return nDst, nSrc, err diff --git a/vendor/golang.org/x/time/AUTHORS b/vendor/golang.org/x/time/AUTHORS new file mode 100644 index 0000000..15167cd --- /dev/null +++ b/vendor/golang.org/x/time/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/vendor/golang.org/x/time/CONTRIBUTORS b/vendor/golang.org/x/time/CONTRIBUTORS new file mode 100644 index 0000000..1c4577e --- /dev/null +++ b/vendor/golang.org/x/time/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/golang.org/x/time/LICENSE b/vendor/golang.org/x/time/LICENSE new file mode 100644 index 0000000..6a66aea --- /dev/null +++ b/vendor/golang.org/x/time/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/time/PATENTS b/vendor/golang.org/x/time/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/golang.org/x/time/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 0000000..a98fe77 --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,402 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "context" + "fmt" + "math" + "sync" + "time" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + mu sync.Mutex + limit Limit + burst int + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// The returned Reservation’s OK() method returns false if n exceeds the Limiter's burst size. +// Usage example: +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + lim.mu.Lock() + burst := lim.burst + limit := lim.limit + lim.mu.Unlock() + + if n > burst && limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait if necessary + delay := r.DelayFrom(now) + if delay == 0 { + return nil + } + t := time.NewTimer(delay) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// SetBurst is shorthand for SetBurstAt(time.Now(), newBurst). +func (lim *Limiter) SetBurst(newBurst int) { + lim.SetBurstAt(time.Now(), newBurst) +} + +// SetBurstAt sets a new burst size for the limiter. +func (lim *Limiter) SetBurstAt(now time.Time, newBurst int) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.burst = newBurst +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + + if lim.limit == Inf { + lim.mu.Unlock() + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + lim.mu.Unlock() + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +// advance requires that lim.mu is held. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + // Split the integer and fractional parts ourself to minimize rounding errors. + // See golang.org/issues/34861. + sec := float64(d/time.Second) * float64(limit) + nsec := float64(d%time.Second) * float64(limit) + return sec + nsec/1e9 +} diff --git a/vendor/google.golang.org/appengine/LICENSE b/vendor/google.golang.org/appengine/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/google.golang.org/appengine/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/google.golang.org/appengine/internal/api.go b/vendor/google.golang.org/appengine/internal/api.go new file mode 100644 index 0000000..bbc1cb9 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api.go @@ -0,0 +1,671 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "bytes" + "errors" + "fmt" + "io/ioutil" + "log" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + logpb "google.golang.org/appengine/internal/log" + remotepb "google.golang.org/appengine/internal/remote_api" +) + +const ( + apiPath = "/rpc_http" + defaultTicketSuffix = "/default.20150612t184001.0" +) + +var ( + // Incoming headers. + ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket") + dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo") + traceHeader = http.CanonicalHeaderKey("X-Cloud-Trace-Context") + curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace") + userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP") + remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr") + + // Outgoing headers. + apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint") + apiEndpointHeaderValue = []string{"app-engine-apis"} + apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method") + apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"} + apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline") + apiContentType = http.CanonicalHeaderKey("Content-Type") + apiContentTypeValue = []string{"application/octet-stream"} + logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count") + + apiHTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + Dial: limitDial, + }, + } + + defaultTicketOnce sync.Once + defaultTicket string + backgroundContextOnce sync.Once + backgroundContext netcontext.Context +) + +func apiURL() *url.URL { + host, port := "appengine.googleapis.internal", "10001" + if h := os.Getenv("API_HOST"); h != "" { + host = h + } + if p := os.Getenv("API_PORT"); p != "" { + port = p + } + return &url.URL{ + Scheme: "http", + Host: host + ":" + port, + Path: apiPath, + } +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + c := &context{ + req: r, + outHeader: w.Header(), + apiURL: apiURL(), + } + r = r.WithContext(withContext(r.Context(), c)) + c.req = r + + stopFlushing := make(chan int) + + // Patch up RemoteAddr so it looks reasonable. + if addr := r.Header.Get(userIPHeader); addr != "" { + r.RemoteAddr = addr + } else if addr = r.Header.Get(remoteAddrHeader); addr != "" { + r.RemoteAddr = addr + } else { + // Should not normally reach here, but pick a sensible default anyway. + r.RemoteAddr = "127.0.0.1" + } + // The address in the headers will most likely be of these forms: + // 123.123.123.123 + // 2001:db8::1 + // net/http.Request.RemoteAddr is specified to be in "IP:port" form. + if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil { + // Assume the remote address is only a host; add a default port. + r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80") + } + + // Start goroutine responsible for flushing app logs. + // This is done after adding c to ctx.m (and stopped before removing it) + // because flushing logs requires making an API call. + go c.logFlusher(stopFlushing) + + executeRequestSafely(c, r) + c.outHeader = nil // make sure header changes aren't respected any more + + stopFlushing <- 1 // any logging beyond this point will be dropped + + // Flush any pending logs asynchronously. + c.pendingLogs.Lock() + flushes := c.pendingLogs.flushes + if len(c.pendingLogs.lines) > 0 { + flushes++ + } + c.pendingLogs.Unlock() + flushed := make(chan struct{}) + go func() { + defer close(flushed) + // Force a log flush, because with very short requests we + // may not ever flush logs. + c.flushLog(true) + }() + w.Header().Set(logFlushHeader, strconv.Itoa(flushes)) + + // Avoid nil Write call if c.Write is never called. + if c.outCode != 0 { + w.WriteHeader(c.outCode) + } + if c.outBody != nil { + w.Write(c.outBody) + } + // Wait for the last flush to complete before returning, + // otherwise the security ticket will not be valid. + <-flushed +} + +func executeRequestSafely(c *context, r *http.Request) { + defer func() { + if x := recover(); x != nil { + logf(c, 4, "%s", renderPanic(x)) // 4 == critical + c.outCode = 500 + } + }() + + http.DefaultServeMux.ServeHTTP(c, r) +} + +func renderPanic(x interface{}) string { + buf := make([]byte, 16<<10) // 16 KB should be plenty + buf = buf[:runtime.Stack(buf, false)] + + // Remove the first few stack frames: + // this func + // the recover closure in the caller + // That will root the stack trace at the site of the panic. + const ( + skipStart = "internal.renderPanic" + skipFrames = 2 + ) + start := bytes.Index(buf, []byte(skipStart)) + p := start + for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ { + p = bytes.IndexByte(buf[p+1:], '\n') + p + 1 + if p < 0 { + break + } + } + if p >= 0 { + // buf[start:p+1] is the block to remove. + // Copy buf[p+1:] over buf[start:] and shrink buf. + copy(buf[start:], buf[p+1:]) + buf = buf[:len(buf)-(p+1-start)] + } + + // Add panic heading. + head := fmt.Sprintf("panic: %v\n\n", x) + if len(head) > len(buf) { + // Extremely unlikely to happen. + return head + } + copy(buf[len(head):], buf) + copy(buf, head) + + return string(buf) +} + +// context represents the context of an in-flight HTTP request. +// It implements the appengine.Context and http.ResponseWriter interfaces. +type context struct { + req *http.Request + + outCode int + outHeader http.Header + outBody []byte + + pendingLogs struct { + sync.Mutex + lines []*logpb.UserAppLogLine + flushes int + } + + apiURL *url.URL +} + +var contextKey = "holds a *context" + +// jointContext joins two contexts in a superficial way. +// It takes values and timeouts from a base context, and only values from another context. +type jointContext struct { + base netcontext.Context + valuesOnly netcontext.Context +} + +func (c jointContext) Deadline() (time.Time, bool) { + return c.base.Deadline() +} + +func (c jointContext) Done() <-chan struct{} { + return c.base.Done() +} + +func (c jointContext) Err() error { + return c.base.Err() +} + +func (c jointContext) Value(key interface{}) interface{} { + if val := c.base.Value(key); val != nil { + return val + } + return c.valuesOnly.Value(key) +} + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) *context { + c, _ := ctx.Value(&contextKey).(*context) + return c +} + +func withContext(parent netcontext.Context, c *context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + if ns := c.req.Header.Get(curNamespaceHeader); ns != "" { + ctx = withNamespace(ctx, ns) + } + return ctx +} + +func toContext(c *context) netcontext.Context { + return withContext(netcontext.Background(), c) +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + return c.req.Header + } + return nil +} + +func ReqContext(req *http.Request) netcontext.Context { + return req.Context() +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + return jointContext{ + base: parent, + valuesOnly: req.Context(), + } +} + +// DefaultTicket returns a ticket used for background context or dev_appserver. +func DefaultTicket() string { + defaultTicketOnce.Do(func() { + if IsDevAppServer() { + defaultTicket = "testapp" + defaultTicketSuffix + return + } + appID := partitionlessAppID() + escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1) + majVersion := VersionID(nil) + if i := strings.Index(majVersion, "."); i > 0 { + majVersion = majVersion[:i] + } + defaultTicket = fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(nil), majVersion, InstanceID()) + }) + return defaultTicket +} + +func BackgroundContext() netcontext.Context { + backgroundContextOnce.Do(func() { + // Compute background security ticket. + ticket := DefaultTicket() + + c := &context{ + req: &http.Request{ + Header: http.Header{ + ticketHeader: []string{ticket}, + }, + }, + apiURL: apiURL(), + } + backgroundContext = toContext(c) + + // TODO(dsymonds): Wire up the shutdown handler to do a final flush. + go c.logFlusher(make(chan int)) + }) + + return backgroundContext +} + +// RegisterTestRequest registers the HTTP request req for testing, such that +// any API calls are sent to the provided URL. It returns a closure to delete +// the registration. +// It should only be used by aetest package. +func RegisterTestRequest(req *http.Request, apiURL *url.URL, decorate func(netcontext.Context) netcontext.Context) (*http.Request, func()) { + c := &context{ + req: req, + apiURL: apiURL, + } + ctx := withContext(decorate(req.Context()), c) + req = req.WithContext(ctx) + c.req = req + return req, func() {} +} + +var errTimeout = &CallError{ + Detail: "Deadline exceeded", + Code: int32(remotepb.RpcError_CANCELLED), + Timeout: true, +} + +func (c *context) Header() http.Header { return c.outHeader } + +// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status +// codes do not permit a response body (nor response entity headers such as +// Content-Length, Content-Type, etc). +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == 204: + return false + case status == 304: + return false + } + return true +} + +func (c *context) Write(b []byte) (int, error) { + if c.outCode == 0 { + c.WriteHeader(http.StatusOK) + } + if len(b) > 0 && !bodyAllowedForStatus(c.outCode) { + return 0, http.ErrBodyNotAllowed + } + c.outBody = append(c.outBody, b...) + return len(b), nil +} + +func (c *context) WriteHeader(code int) { + if c.outCode != 0 { + logf(c, 3, "WriteHeader called multiple times on request.") // error level + return + } + c.outCode = code +} + +func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) { + hreq := &http.Request{ + Method: "POST", + URL: c.apiURL, + Header: http.Header{ + apiEndpointHeader: apiEndpointHeaderValue, + apiMethodHeader: apiMethodHeaderValue, + apiContentType: apiContentTypeValue, + apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)}, + }, + Body: ioutil.NopCloser(bytes.NewReader(body)), + ContentLength: int64(len(body)), + Host: c.apiURL.Host, + } + if info := c.req.Header.Get(dapperHeader); info != "" { + hreq.Header.Set(dapperHeader, info) + } + if info := c.req.Header.Get(traceHeader); info != "" { + hreq.Header.Set(traceHeader, info) + } + + tr := apiHTTPClient.Transport.(*http.Transport) + + var timedOut int32 // atomic; set to 1 if timed out + t := time.AfterFunc(timeout, func() { + atomic.StoreInt32(&timedOut, 1) + tr.CancelRequest(hreq) + }) + defer t.Stop() + defer func() { + // Check if timeout was exceeded. + if atomic.LoadInt32(&timedOut) != 0 { + err = errTimeout + } + }() + + hresp, err := apiHTTPClient.Do(hreq) + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge HTTP failed: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + defer hresp.Body.Close() + hrespBody, err := ioutil.ReadAll(hresp.Body) + if hresp.StatusCode != 200 { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + if err != nil { + return nil, &CallError{ + Detail: fmt.Sprintf("service bridge response bad: %v", err), + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return hrespBody, nil +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + // Default RPC timeout is 60s. + timeout := 60 * time.Second + if deadline, ok := ctx.Deadline(); ok { + timeout = deadline.Sub(time.Now()) + } + + data, err := proto.Marshal(in) + if err != nil { + return err + } + + ticket := c.req.Header.Get(ticketHeader) + // Use a test ticket under test environment. + if ticket == "" { + if appid := ctx.Value(&appIDOverrideKey); appid != nil { + ticket = appid.(string) + defaultTicketSuffix + } + } + // Fall back to use background ticket when the request ticket is not available in Flex or dev_appserver. + if ticket == "" { + ticket = DefaultTicket() + } + req := &remotepb.Request{ + ServiceName: &service, + Method: &method, + Request: data, + RequestId: &ticket, + } + hreqBody, err := proto.Marshal(req) + if err != nil { + return err + } + + hrespBody, err := c.post(hreqBody, timeout) + if err != nil { + return err + } + + res := &remotepb.Response{} + if err := proto.Unmarshal(hrespBody, res); err != nil { + return err + } + if res.RpcError != nil { + ce := &CallError{ + Detail: res.RpcError.GetDetail(), + Code: *res.RpcError.Code, + } + switch remotepb.RpcError_ErrorCode(ce.Code) { + case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED: + ce.Timeout = true + } + return ce + } + if res.ApplicationError != nil { + return &APIError{ + Service: *req.ServiceName, + Detail: res.ApplicationError.GetDetail(), + Code: *res.ApplicationError.Code, + } + } + if res.Exception != nil || res.JavaException != nil { + // This shouldn't happen, but let's be defensive. + return &CallError{ + Detail: "service bridge returned exception", + Code: int32(remotepb.RpcError_UNKNOWN), + } + } + return proto.Unmarshal(res.Response, out) +} + +func (c *context) Request() *http.Request { + return c.req +} + +func (c *context) addLogLine(ll *logpb.UserAppLogLine) { + // Truncate long log lines. + // TODO(dsymonds): Check if this is still necessary. + const lim = 8 << 10 + if len(*ll.Message) > lim { + suffix := fmt.Sprintf("...(length %d)", len(*ll.Message)) + ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix) + } + + c.pendingLogs.Lock() + c.pendingLogs.lines = append(c.pendingLogs.lines, ll) + c.pendingLogs.Unlock() +} + +var logLevelName = map[int64]string{ + 0: "DEBUG", + 1: "INFO", + 2: "WARNING", + 3: "ERROR", + 4: "CRITICAL", +} + +func logf(c *context, level int64, format string, args ...interface{}) { + if c == nil { + panic("not an App Engine context") + } + s := fmt.Sprintf(format, args...) + s = strings.TrimRight(s, "\n") // Remove any trailing newline characters. + c.addLogLine(&logpb.UserAppLogLine{ + TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3), + Level: &level, + Message: &s, + }) + // Only duplicate log to stderr if not running on App Engine second generation + if !IsSecondGen() { + log.Print(logLevelName[level] + ": " + s) + } +} + +// flushLog attempts to flush any pending logs to the appserver. +// It should not be called concurrently. +func (c *context) flushLog(force bool) (flushed bool) { + c.pendingLogs.Lock() + // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious. + n, rem := 0, 30<<20 + for ; n < len(c.pendingLogs.lines); n++ { + ll := c.pendingLogs.lines[n] + // Each log line will require about 3 bytes of overhead. + nb := proto.Size(ll) + 3 + if nb > rem { + break + } + rem -= nb + } + lines := c.pendingLogs.lines[:n] + c.pendingLogs.lines = c.pendingLogs.lines[n:] + c.pendingLogs.Unlock() + + if len(lines) == 0 && !force { + // Nothing to flush. + return false + } + + rescueLogs := false + defer func() { + if rescueLogs { + c.pendingLogs.Lock() + c.pendingLogs.lines = append(lines, c.pendingLogs.lines...) + c.pendingLogs.Unlock() + } + }() + + buf, err := proto.Marshal(&logpb.UserAppLogGroup{ + LogLine: lines, + }) + if err != nil { + log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err) + rescueLogs = true + return false + } + + req := &logpb.FlushRequest{ + Logs: buf, + } + res := &basepb.VoidProto{} + c.pendingLogs.Lock() + c.pendingLogs.flushes++ + c.pendingLogs.Unlock() + if err := Call(toContext(c), "logservice", "Flush", req, res); err != nil { + log.Printf("internal.flushLog: Flush RPC: %v", err) + rescueLogs = true + return false + } + return true +} + +const ( + // Log flushing parameters. + flushInterval = 1 * time.Second + forceFlushInterval = 60 * time.Second +) + +func (c *context) logFlusher(stop <-chan int) { + lastFlush := time.Now() + tick := time.NewTicker(flushInterval) + for { + select { + case <-stop: + // Request finished. + tick.Stop() + return + case <-tick.C: + force := time.Now().Sub(lastFlush) > forceFlushInterval + if c.flushLog(force) { + lastFlush = time.Now() + } + } + } +} + +func ContextForTesting(req *http.Request) netcontext.Context { + return toContext(&context{req: req}) +} diff --git a/vendor/google.golang.org/appengine/internal/api_classic.go b/vendor/google.golang.org/appengine/internal/api_classic.go new file mode 100644 index 0000000..f0f40b2 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_classic.go @@ -0,0 +1,169 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "errors" + "fmt" + "net/http" + "time" + + "appengine" + "appengine_internal" + basepb "appengine_internal/base" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var contextKey = "holds an appengine.Context" + +// fromContext returns the App Engine context or nil if ctx is not +// derived from an App Engine context. +func fromContext(ctx netcontext.Context) appengine.Context { + c, _ := ctx.Value(&contextKey).(appengine.Context) + return c +} + +// This is only for classic App Engine adapters. +func ClassicContextFromContext(ctx netcontext.Context) (appengine.Context, error) { + c := fromContext(ctx) + if c == nil { + return nil, errNotAppEngineContext + } + return c, nil +} + +func withContext(parent netcontext.Context, c appengine.Context) netcontext.Context { + ctx := netcontext.WithValue(parent, &contextKey, c) + + s := &basepb.StringProto{} + c.Call("__go__", "GetNamespace", &basepb.VoidProto{}, s, nil) + if ns := s.GetValue(); ns != "" { + ctx = NamespacedContext(ctx, ns) + } + + return ctx +} + +func IncomingHeaders(ctx netcontext.Context) http.Header { + if c := fromContext(ctx); c != nil { + if req, ok := c.Request().(*http.Request); ok { + return req.Header + } + } + return nil +} + +func ReqContext(req *http.Request) netcontext.Context { + return WithContext(netcontext.Background(), req) +} + +func WithContext(parent netcontext.Context, req *http.Request) netcontext.Context { + c := appengine.NewContext(req) + return withContext(parent, c) +} + +type testingContext struct { + appengine.Context + + req *http.Request +} + +func (t *testingContext) FullyQualifiedAppID() string { return "dev~testcontext" } +func (t *testingContext) Call(service, method string, _, _ appengine_internal.ProtoMessage, _ *appengine_internal.CallOptions) error { + if service == "__go__" && method == "GetNamespace" { + return nil + } + return fmt.Errorf("testingContext: unsupported Call") +} +func (t *testingContext) Request() interface{} { return t.req } + +func ContextForTesting(req *http.Request) netcontext.Context { + return withContext(netcontext.Background(), &testingContext{req: req}) +} + +func Call(ctx netcontext.Context, service, method string, in, out proto.Message) error { + if ns := NamespaceFromContext(ctx); ns != "" { + if fn, ok := NamespaceMods[service]; ok { + fn(in, ns) + } + } + + if f, ctx, ok := callOverrideFromContext(ctx); ok { + return f(ctx, service, method, in, out) + } + + // Handle already-done contexts quickly. + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + c := fromContext(ctx) + if c == nil { + // Give a good error message rather than a panic lower down. + return errNotAppEngineContext + } + + // Apply transaction modifications if we're in a transaction. + if t := transactionFromContext(ctx); t != nil { + if t.finished { + return errors.New("transaction context has expired") + } + applyTransaction(in, &t.transaction) + } + + var opts *appengine_internal.CallOptions + if d, ok := ctx.Deadline(); ok { + opts = &appengine_internal.CallOptions{ + Timeout: d.Sub(time.Now()), + } + } + + err := c.Call(service, method, in, out, opts) + switch v := err.(type) { + case *appengine_internal.APIError: + return &APIError{ + Service: v.Service, + Detail: v.Detail, + Code: v.Code, + } + case *appengine_internal.CallError: + return &CallError{ + Detail: v.Detail, + Code: v.Code, + Timeout: v.Timeout, + } + } + return err +} + +func handleHTTP(w http.ResponseWriter, r *http.Request) { + panic("handleHTTP called; this should be impossible") +} + +func logf(c appengine.Context, level int64, format string, args ...interface{}) { + var fn func(format string, args ...interface{}) + switch level { + case 0: + fn = c.Debugf + case 1: + fn = c.Infof + case 2: + fn = c.Warningf + case 3: + fn = c.Errorf + case 4: + fn = c.Criticalf + default: + // This shouldn't happen. + fn = c.Criticalf + } + fn(format, args...) +} diff --git a/vendor/google.golang.org/appengine/internal/api_common.go b/vendor/google.golang.org/appengine/internal/api_common.go new file mode 100644 index 0000000..e0c0b21 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/api_common.go @@ -0,0 +1,123 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "errors" + "os" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" +) + +var errNotAppEngineContext = errors.New("not an App Engine context") + +type CallOverrideFunc func(ctx netcontext.Context, service, method string, in, out proto.Message) error + +var callOverrideKey = "holds []CallOverrideFunc" + +func WithCallOverride(ctx netcontext.Context, f CallOverrideFunc) netcontext.Context { + // We avoid appending to any existing call override + // so we don't risk overwriting a popped stack below. + var cofs []CallOverrideFunc + if uf, ok := ctx.Value(&callOverrideKey).([]CallOverrideFunc); ok { + cofs = append(cofs, uf...) + } + cofs = append(cofs, f) + return netcontext.WithValue(ctx, &callOverrideKey, cofs) +} + +func callOverrideFromContext(ctx netcontext.Context) (CallOverrideFunc, netcontext.Context, bool) { + cofs, _ := ctx.Value(&callOverrideKey).([]CallOverrideFunc) + if len(cofs) == 0 { + return nil, nil, false + } + // We found a list of overrides; grab the last, and reconstitute a + // context that will hide it. + f := cofs[len(cofs)-1] + ctx = netcontext.WithValue(ctx, &callOverrideKey, cofs[:len(cofs)-1]) + return f, ctx, true +} + +type logOverrideFunc func(level int64, format string, args ...interface{}) + +var logOverrideKey = "holds a logOverrideFunc" + +func WithLogOverride(ctx netcontext.Context, f logOverrideFunc) netcontext.Context { + return netcontext.WithValue(ctx, &logOverrideKey, f) +} + +var appIDOverrideKey = "holds a string, being the full app ID" + +func WithAppIDOverride(ctx netcontext.Context, appID string) netcontext.Context { + return netcontext.WithValue(ctx, &appIDOverrideKey, appID) +} + +var namespaceKey = "holds the namespace string" + +func withNamespace(ctx netcontext.Context, ns string) netcontext.Context { + return netcontext.WithValue(ctx, &namespaceKey, ns) +} + +func NamespaceFromContext(ctx netcontext.Context) string { + // If there's no namespace, return the empty string. + ns, _ := ctx.Value(&namespaceKey).(string) + return ns +} + +// FullyQualifiedAppID returns the fully-qualified application ID. +// This may contain a partition prefix (e.g. "s~" for High Replication apps), +// or a domain prefix (e.g. "example.com:"). +func FullyQualifiedAppID(ctx netcontext.Context) string { + if id, ok := ctx.Value(&appIDOverrideKey).(string); ok { + return id + } + return fullyQualifiedAppID(ctx) +} + +func Logf(ctx netcontext.Context, level int64, format string, args ...interface{}) { + if f, ok := ctx.Value(&logOverrideKey).(logOverrideFunc); ok { + f(level, format, args...) + return + } + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + logf(c, level, format, args...) +} + +// NamespacedContext wraps a Context to support namespaces. +func NamespacedContext(ctx netcontext.Context, namespace string) netcontext.Context { + return withNamespace(ctx, namespace) +} + +// SetTestEnv sets the env variables for testing background ticket in Flex. +func SetTestEnv() func() { + var environ = []struct { + key, value string + }{ + {"GAE_LONG_APP_ID", "my-app-id"}, + {"GAE_MINOR_VERSION", "067924799508853122"}, + {"GAE_MODULE_INSTANCE", "0"}, + {"GAE_MODULE_NAME", "default"}, + {"GAE_MODULE_VERSION", "20150612t184001"}, + } + + for _, v := range environ { + old := os.Getenv(v.key) + os.Setenv(v.key, v.value) + v.value = old + } + return func() { // Restore old environment after the test completes. + for _, v := range environ { + if v.value == "" { + os.Unsetenv(v.key) + continue + } + os.Setenv(v.key, v.value) + } + } +} diff --git a/vendor/google.golang.org/appengine/internal/app_id.go b/vendor/google.golang.org/appengine/internal/app_id.go new file mode 100644 index 0000000..11df8c0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/app_id.go @@ -0,0 +1,28 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "strings" +) + +func parseFullAppID(appid string) (partition, domain, displayID string) { + if i := strings.Index(appid, "~"); i != -1 { + partition, appid = appid[:i], appid[i+1:] + } + if i := strings.Index(appid, ":"); i != -1 { + domain, appid = appid[:i], appid[i+1:] + } + return partition, domain, appid +} + +// appID returns "appid" or "domain.com:appid". +func appID(fullAppID string) string { + _, dom, dis := parseFullAppID(fullAppID) + if dom != "" { + return dom + ":" + dis + } + return dis +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.pb.go b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go new file mode 100644 index 0000000..db4777e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.pb.go @@ -0,0 +1,308 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/base/api_base.proto + +package base + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type StringProto struct { + Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StringProto) Reset() { *m = StringProto{} } +func (m *StringProto) String() string { return proto.CompactTextString(m) } +func (*StringProto) ProtoMessage() {} +func (*StringProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{0} +} +func (m *StringProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StringProto.Unmarshal(m, b) +} +func (m *StringProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StringProto.Marshal(b, m, deterministic) +} +func (dst *StringProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_StringProto.Merge(dst, src) +} +func (m *StringProto) XXX_Size() int { + return xxx_messageInfo_StringProto.Size(m) +} +func (m *StringProto) XXX_DiscardUnknown() { + xxx_messageInfo_StringProto.DiscardUnknown(m) +} + +var xxx_messageInfo_StringProto proto.InternalMessageInfo + +func (m *StringProto) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Integer32Proto struct { + Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Integer32Proto) Reset() { *m = Integer32Proto{} } +func (m *Integer32Proto) String() string { return proto.CompactTextString(m) } +func (*Integer32Proto) ProtoMessage() {} +func (*Integer32Proto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{1} +} +func (m *Integer32Proto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Integer32Proto.Unmarshal(m, b) +} +func (m *Integer32Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Integer32Proto.Marshal(b, m, deterministic) +} +func (dst *Integer32Proto) XXX_Merge(src proto.Message) { + xxx_messageInfo_Integer32Proto.Merge(dst, src) +} +func (m *Integer32Proto) XXX_Size() int { + return xxx_messageInfo_Integer32Proto.Size(m) +} +func (m *Integer32Proto) XXX_DiscardUnknown() { + xxx_messageInfo_Integer32Proto.DiscardUnknown(m) +} + +var xxx_messageInfo_Integer32Proto proto.InternalMessageInfo + +func (m *Integer32Proto) GetValue() int32 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Integer64Proto struct { + Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Integer64Proto) Reset() { *m = Integer64Proto{} } +func (m *Integer64Proto) String() string { return proto.CompactTextString(m) } +func (*Integer64Proto) ProtoMessage() {} +func (*Integer64Proto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{2} +} +func (m *Integer64Proto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Integer64Proto.Unmarshal(m, b) +} +func (m *Integer64Proto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Integer64Proto.Marshal(b, m, deterministic) +} +func (dst *Integer64Proto) XXX_Merge(src proto.Message) { + xxx_messageInfo_Integer64Proto.Merge(dst, src) +} +func (m *Integer64Proto) XXX_Size() int { + return xxx_messageInfo_Integer64Proto.Size(m) +} +func (m *Integer64Proto) XXX_DiscardUnknown() { + xxx_messageInfo_Integer64Proto.DiscardUnknown(m) +} + +var xxx_messageInfo_Integer64Proto proto.InternalMessageInfo + +func (m *Integer64Proto) GetValue() int64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BoolProto struct { + Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BoolProto) Reset() { *m = BoolProto{} } +func (m *BoolProto) String() string { return proto.CompactTextString(m) } +func (*BoolProto) ProtoMessage() {} +func (*BoolProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{3} +} +func (m *BoolProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BoolProto.Unmarshal(m, b) +} +func (m *BoolProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BoolProto.Marshal(b, m, deterministic) +} +func (dst *BoolProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoolProto.Merge(dst, src) +} +func (m *BoolProto) XXX_Size() int { + return xxx_messageInfo_BoolProto.Size(m) +} +func (m *BoolProto) XXX_DiscardUnknown() { + xxx_messageInfo_BoolProto.DiscardUnknown(m) +} + +var xxx_messageInfo_BoolProto proto.InternalMessageInfo + +func (m *BoolProto) GetValue() bool { + if m != nil && m.Value != nil { + return *m.Value + } + return false +} + +type DoubleProto struct { + Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DoubleProto) Reset() { *m = DoubleProto{} } +func (m *DoubleProto) String() string { return proto.CompactTextString(m) } +func (*DoubleProto) ProtoMessage() {} +func (*DoubleProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{4} +} +func (m *DoubleProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DoubleProto.Unmarshal(m, b) +} +func (m *DoubleProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DoubleProto.Marshal(b, m, deterministic) +} +func (dst *DoubleProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_DoubleProto.Merge(dst, src) +} +func (m *DoubleProto) XXX_Size() int { + return xxx_messageInfo_DoubleProto.Size(m) +} +func (m *DoubleProto) XXX_DiscardUnknown() { + xxx_messageInfo_DoubleProto.DiscardUnknown(m) +} + +var xxx_messageInfo_DoubleProto proto.InternalMessageInfo + +func (m *DoubleProto) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type BytesProto struct { + Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BytesProto) Reset() { *m = BytesProto{} } +func (m *BytesProto) String() string { return proto.CompactTextString(m) } +func (*BytesProto) ProtoMessage() {} +func (*BytesProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{5} +} +func (m *BytesProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BytesProto.Unmarshal(m, b) +} +func (m *BytesProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BytesProto.Marshal(b, m, deterministic) +} +func (dst *BytesProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_BytesProto.Merge(dst, src) +} +func (m *BytesProto) XXX_Size() int { + return xxx_messageInfo_BytesProto.Size(m) +} +func (m *BytesProto) XXX_DiscardUnknown() { + xxx_messageInfo_BytesProto.DiscardUnknown(m) +} + +var xxx_messageInfo_BytesProto proto.InternalMessageInfo + +func (m *BytesProto) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type VoidProto struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VoidProto) Reset() { *m = VoidProto{} } +func (m *VoidProto) String() string { return proto.CompactTextString(m) } +func (*VoidProto) ProtoMessage() {} +func (*VoidProto) Descriptor() ([]byte, []int) { + return fileDescriptor_api_base_9d49f8792e0c1140, []int{6} +} +func (m *VoidProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_VoidProto.Unmarshal(m, b) +} +func (m *VoidProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_VoidProto.Marshal(b, m, deterministic) +} +func (dst *VoidProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_VoidProto.Merge(dst, src) +} +func (m *VoidProto) XXX_Size() int { + return xxx_messageInfo_VoidProto.Size(m) +} +func (m *VoidProto) XXX_DiscardUnknown() { + xxx_messageInfo_VoidProto.DiscardUnknown(m) +} + +var xxx_messageInfo_VoidProto proto.InternalMessageInfo + +func init() { + proto.RegisterType((*StringProto)(nil), "appengine.base.StringProto") + proto.RegisterType((*Integer32Proto)(nil), "appengine.base.Integer32Proto") + proto.RegisterType((*Integer64Proto)(nil), "appengine.base.Integer64Proto") + proto.RegisterType((*BoolProto)(nil), "appengine.base.BoolProto") + proto.RegisterType((*DoubleProto)(nil), "appengine.base.DoubleProto") + proto.RegisterType((*BytesProto)(nil), "appengine.base.BytesProto") + proto.RegisterType((*VoidProto)(nil), "appengine.base.VoidProto") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/base/api_base.proto", fileDescriptor_api_base_9d49f8792e0c1140) +} + +var fileDescriptor_api_base_9d49f8792e0c1140 = []byte{ + // 199 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0xcf, 0x3f, 0x4b, 0xc6, 0x30, + 0x10, 0x06, 0x70, 0x5a, 0xad, 0xb4, 0x57, 0xe9, 0x20, 0x0e, 0x1d, 0xb5, 0x05, 0x71, 0x4a, 0x40, + 0x45, 0x9c, 0x83, 0x8b, 0x9b, 0x28, 0x38, 0xb8, 0x48, 0x8a, 0xc7, 0x11, 0x08, 0xb9, 0x90, 0xa6, + 0x82, 0xdf, 0x5e, 0xda, 0xd2, 0xfa, 0xc2, 0x9b, 0xed, 0xfe, 0xfc, 0xe0, 0xe1, 0x81, 0x27, 0x62, + 0x26, 0x8b, 0x82, 0xd8, 0x6a, 0x47, 0x82, 0x03, 0x49, 0xed, 0x3d, 0x3a, 0x32, 0x0e, 0xa5, 0x71, + 0x11, 0x83, 0xd3, 0x56, 0x0e, 0x7a, 0x44, 0xa9, 0xbd, 0xf9, 0x9a, 0x07, 0xe1, 0x03, 0x47, 0xbe, + 0x68, 0x76, 0x27, 0xe6, 0x6b, 0xd7, 0x43, 0xfd, 0x1e, 0x83, 0x71, 0xf4, 0xba, 0xbc, 0x2f, 0xa1, + 0xf8, 0xd1, 0x76, 0xc2, 0x36, 0xbb, 0xca, 0x6f, 0xab, 0xb7, 0x75, 0xe9, 0x6e, 0xa0, 0x79, 0x71, + 0x11, 0x09, 0xc3, 0xfd, 0x5d, 0xc2, 0x15, 0xc7, 0xee, 0xf1, 0x21, 0xe1, 0x4e, 0x36, 0x77, 0x0d, + 0x95, 0x62, 0xb6, 0x09, 0x52, 0x6e, 0xa4, 0x87, 0xfa, 0x99, 0xa7, 0xc1, 0x62, 0x02, 0x65, 0xff, + 0x79, 0xa0, 0x7e, 0x23, 0x8e, 0xab, 0x69, 0x0f, 0xcd, 0xb9, 0xca, 0xcb, 0xdd, 0xd5, 0x50, 0x7d, + 0xb0, 0xf9, 0x5e, 0x98, 0x3a, 0xfb, 0x3c, 0x9d, 0x9b, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xba, + 0x37, 0x25, 0xea, 0x44, 0x01, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/base/api_base.proto b/vendor/google.golang.org/appengine/internal/base/api_base.proto new file mode 100644 index 0000000..56cd7a3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/base/api_base.proto @@ -0,0 +1,33 @@ +// Built-in base types for API calls. Primarily useful as return types. + +syntax = "proto2"; +option go_package = "base"; + +package appengine.base; + +message StringProto { + required string value = 1; +} + +message Integer32Proto { + required int32 value = 1; +} + +message Integer64Proto { + required int64 value = 1; +} + +message BoolProto { + required bool value = 1; +} + +message DoubleProto { + required double value = 1; +} + +message BytesProto { + required bytes value = 1 [ctype=CORD]; +} + +message VoidProto { +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go new file mode 100644 index 0000000..2fb7482 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go @@ -0,0 +1,4367 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto + +package datastore + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type Property_Meaning int32 + +const ( + Property_NO_MEANING Property_Meaning = 0 + Property_BLOB Property_Meaning = 14 + Property_TEXT Property_Meaning = 15 + Property_BYTESTRING Property_Meaning = 16 + Property_ATOM_CATEGORY Property_Meaning = 1 + Property_ATOM_LINK Property_Meaning = 2 + Property_ATOM_TITLE Property_Meaning = 3 + Property_ATOM_CONTENT Property_Meaning = 4 + Property_ATOM_SUMMARY Property_Meaning = 5 + Property_ATOM_AUTHOR Property_Meaning = 6 + Property_GD_WHEN Property_Meaning = 7 + Property_GD_EMAIL Property_Meaning = 8 + Property_GEORSS_POINT Property_Meaning = 9 + Property_GD_IM Property_Meaning = 10 + Property_GD_PHONENUMBER Property_Meaning = 11 + Property_GD_POSTALADDRESS Property_Meaning = 12 + Property_GD_RATING Property_Meaning = 13 + Property_BLOBKEY Property_Meaning = 17 + Property_ENTITY_PROTO Property_Meaning = 19 + Property_INDEX_VALUE Property_Meaning = 18 +) + +var Property_Meaning_name = map[int32]string{ + 0: "NO_MEANING", + 14: "BLOB", + 15: "TEXT", + 16: "BYTESTRING", + 1: "ATOM_CATEGORY", + 2: "ATOM_LINK", + 3: "ATOM_TITLE", + 4: "ATOM_CONTENT", + 5: "ATOM_SUMMARY", + 6: "ATOM_AUTHOR", + 7: "GD_WHEN", + 8: "GD_EMAIL", + 9: "GEORSS_POINT", + 10: "GD_IM", + 11: "GD_PHONENUMBER", + 12: "GD_POSTALADDRESS", + 13: "GD_RATING", + 17: "BLOBKEY", + 19: "ENTITY_PROTO", + 18: "INDEX_VALUE", +} +var Property_Meaning_value = map[string]int32{ + "NO_MEANING": 0, + "BLOB": 14, + "TEXT": 15, + "BYTESTRING": 16, + "ATOM_CATEGORY": 1, + "ATOM_LINK": 2, + "ATOM_TITLE": 3, + "ATOM_CONTENT": 4, + "ATOM_SUMMARY": 5, + "ATOM_AUTHOR": 6, + "GD_WHEN": 7, + "GD_EMAIL": 8, + "GEORSS_POINT": 9, + "GD_IM": 10, + "GD_PHONENUMBER": 11, + "GD_POSTALADDRESS": 12, + "GD_RATING": 13, + "BLOBKEY": 17, + "ENTITY_PROTO": 19, + "INDEX_VALUE": 18, +} + +func (x Property_Meaning) Enum() *Property_Meaning { + p := new(Property_Meaning) + *p = x + return p +} +func (x Property_Meaning) String() string { + return proto.EnumName(Property_Meaning_name, int32(x)) +} +func (x *Property_Meaning) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning") + if err != nil { + return err + } + *x = Property_Meaning(value) + return nil +} +func (Property_Meaning) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 0} +} + +type Property_FtsTokenizationOption int32 + +const ( + Property_HTML Property_FtsTokenizationOption = 1 + Property_ATOM Property_FtsTokenizationOption = 2 +) + +var Property_FtsTokenizationOption_name = map[int32]string{ + 1: "HTML", + 2: "ATOM", +} +var Property_FtsTokenizationOption_value = map[string]int32{ + "HTML": 1, + "ATOM": 2, +} + +func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption { + p := new(Property_FtsTokenizationOption) + *p = x + return p +} +func (x Property_FtsTokenizationOption) String() string { + return proto.EnumName(Property_FtsTokenizationOption_name, int32(x)) +} +func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption") + if err != nil { + return err + } + *x = Property_FtsTokenizationOption(value) + return nil +} +func (Property_FtsTokenizationOption) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2, 1} +} + +type EntityProto_Kind int32 + +const ( + EntityProto_GD_CONTACT EntityProto_Kind = 1 + EntityProto_GD_EVENT EntityProto_Kind = 2 + EntityProto_GD_MESSAGE EntityProto_Kind = 3 +) + +var EntityProto_Kind_name = map[int32]string{ + 1: "GD_CONTACT", + 2: "GD_EVENT", + 3: "GD_MESSAGE", +} +var EntityProto_Kind_value = map[string]int32{ + "GD_CONTACT": 1, + "GD_EVENT": 2, + "GD_MESSAGE": 3, +} + +func (x EntityProto_Kind) Enum() *EntityProto_Kind { + p := new(EntityProto_Kind) + *p = x + return p +} +func (x EntityProto_Kind) String() string { + return proto.EnumName(EntityProto_Kind_name, int32(x)) +} +func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind") + if err != nil { + return err + } + *x = EntityProto_Kind(value) + return nil +} +func (EntityProto_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6, 0} +} + +type Index_Property_Direction int32 + +const ( + Index_Property_ASCENDING Index_Property_Direction = 1 + Index_Property_DESCENDING Index_Property_Direction = 2 +) + +var Index_Property_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Index_Property_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Index_Property_Direction) Enum() *Index_Property_Direction { + p := new(Index_Property_Direction) + *p = x + return p +} +func (x Index_Property_Direction) String() string { + return proto.EnumName(Index_Property_Direction_name, int32(x)) +} +func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction") + if err != nil { + return err + } + *x = Index_Property_Direction(value) + return nil +} +func (Index_Property_Direction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0, 0} +} + +type CompositeIndex_State int32 + +const ( + CompositeIndex_WRITE_ONLY CompositeIndex_State = 1 + CompositeIndex_READ_WRITE CompositeIndex_State = 2 + CompositeIndex_DELETED CompositeIndex_State = 3 + CompositeIndex_ERROR CompositeIndex_State = 4 +) + +var CompositeIndex_State_name = map[int32]string{ + 1: "WRITE_ONLY", + 2: "READ_WRITE", + 3: "DELETED", + 4: "ERROR", +} +var CompositeIndex_State_value = map[string]int32{ + "WRITE_ONLY": 1, + "READ_WRITE": 2, + "DELETED": 3, + "ERROR": 4, +} + +func (x CompositeIndex_State) Enum() *CompositeIndex_State { + p := new(CompositeIndex_State) + *p = x + return p +} +func (x CompositeIndex_State) String() string { + return proto.EnumName(CompositeIndex_State_name, int32(x)) +} +func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State") + if err != nil { + return err + } + *x = CompositeIndex_State(value) + return nil +} +func (CompositeIndex_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9, 0} +} + +type Snapshot_Status int32 + +const ( + Snapshot_INACTIVE Snapshot_Status = 0 + Snapshot_ACTIVE Snapshot_Status = 1 +) + +var Snapshot_Status_name = map[int32]string{ + 0: "INACTIVE", + 1: "ACTIVE", +} +var Snapshot_Status_value = map[string]int32{ + "INACTIVE": 0, + "ACTIVE": 1, +} + +func (x Snapshot_Status) Enum() *Snapshot_Status { + p := new(Snapshot_Status) + *p = x + return p +} +func (x Snapshot_Status) String() string { + return proto.EnumName(Snapshot_Status_name, int32(x)) +} +func (x *Snapshot_Status) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status") + if err != nil { + return err + } + *x = Snapshot_Status(value) + return nil +} +func (Snapshot_Status) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12, 0} +} + +type Query_Hint int32 + +const ( + Query_ORDER_FIRST Query_Hint = 1 + Query_ANCESTOR_FIRST Query_Hint = 2 + Query_FILTER_FIRST Query_Hint = 3 +) + +var Query_Hint_name = map[int32]string{ + 1: "ORDER_FIRST", + 2: "ANCESTOR_FIRST", + 3: "FILTER_FIRST", +} +var Query_Hint_value = map[string]int32{ + "ORDER_FIRST": 1, + "ANCESTOR_FIRST": 2, + "FILTER_FIRST": 3, +} + +func (x Query_Hint) Enum() *Query_Hint { + p := new(Query_Hint) + *p = x + return p +} +func (x Query_Hint) String() string { + return proto.EnumName(Query_Hint_name, int32(x)) +} +func (x *Query_Hint) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint") + if err != nil { + return err + } + *x = Query_Hint(value) + return nil +} +func (Query_Hint) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} +} + +type Query_Filter_Operator int32 + +const ( + Query_Filter_LESS_THAN Query_Filter_Operator = 1 + Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2 + Query_Filter_GREATER_THAN Query_Filter_Operator = 3 + Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4 + Query_Filter_EQUAL Query_Filter_Operator = 5 + Query_Filter_IN Query_Filter_Operator = 6 + Query_Filter_EXISTS Query_Filter_Operator = 7 +) + +var Query_Filter_Operator_name = map[int32]string{ + 1: "LESS_THAN", + 2: "LESS_THAN_OR_EQUAL", + 3: "GREATER_THAN", + 4: "GREATER_THAN_OR_EQUAL", + 5: "EQUAL", + 6: "IN", + 7: "EXISTS", +} +var Query_Filter_Operator_value = map[string]int32{ + "LESS_THAN": 1, + "LESS_THAN_OR_EQUAL": 2, + "GREATER_THAN": 3, + "GREATER_THAN_OR_EQUAL": 4, + "EQUAL": 5, + "IN": 6, + "EXISTS": 7, +} + +func (x Query_Filter_Operator) Enum() *Query_Filter_Operator { + p := new(Query_Filter_Operator) + *p = x + return p +} +func (x Query_Filter_Operator) String() string { + return proto.EnumName(Query_Filter_Operator_name, int32(x)) +} +func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator") + if err != nil { + return err + } + *x = Query_Filter_Operator(value) + return nil +} +func (Query_Filter_Operator) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0, 0} +} + +type Query_Order_Direction int32 + +const ( + Query_Order_ASCENDING Query_Order_Direction = 1 + Query_Order_DESCENDING Query_Order_Direction = 2 +) + +var Query_Order_Direction_name = map[int32]string{ + 1: "ASCENDING", + 2: "DESCENDING", +} +var Query_Order_Direction_value = map[string]int32{ + "ASCENDING": 1, + "DESCENDING": 2, +} + +func (x Query_Order_Direction) Enum() *Query_Order_Direction { + p := new(Query_Order_Direction) + *p = x + return p +} +func (x Query_Order_Direction) String() string { + return proto.EnumName(Query_Order_Direction_name, int32(x)) +} +func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction") + if err != nil { + return err + } + *x = Query_Order_Direction(value) + return nil +} +func (Query_Order_Direction) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1, 0} +} + +type Error_ErrorCode int32 + +const ( + Error_BAD_REQUEST Error_ErrorCode = 1 + Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2 + Error_INTERNAL_ERROR Error_ErrorCode = 3 + Error_NEED_INDEX Error_ErrorCode = 4 + Error_TIMEOUT Error_ErrorCode = 5 + Error_PERMISSION_DENIED Error_ErrorCode = 6 + Error_BIGTABLE_ERROR Error_ErrorCode = 7 + Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8 + Error_CAPABILITY_DISABLED Error_ErrorCode = 9 + Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10 + Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11 +) + +var Error_ErrorCode_name = map[int32]string{ + 1: "BAD_REQUEST", + 2: "CONCURRENT_TRANSACTION", + 3: "INTERNAL_ERROR", + 4: "NEED_INDEX", + 5: "TIMEOUT", + 6: "PERMISSION_DENIED", + 7: "BIGTABLE_ERROR", + 8: "COMMITTED_BUT_STILL_APPLYING", + 9: "CAPABILITY_DISABLED", + 10: "TRY_ALTERNATE_BACKEND", + 11: "SAFE_TIME_TOO_OLD", +} +var Error_ErrorCode_value = map[string]int32{ + "BAD_REQUEST": 1, + "CONCURRENT_TRANSACTION": 2, + "INTERNAL_ERROR": 3, + "NEED_INDEX": 4, + "TIMEOUT": 5, + "PERMISSION_DENIED": 6, + "BIGTABLE_ERROR": 7, + "COMMITTED_BUT_STILL_APPLYING": 8, + "CAPABILITY_DISABLED": 9, + "TRY_ALTERNATE_BACKEND": 10, + "SAFE_TIME_TOO_OLD": 11, +} + +func (x Error_ErrorCode) Enum() *Error_ErrorCode { + p := new(Error_ErrorCode) + *p = x + return p +} +func (x Error_ErrorCode) String() string { + return proto.EnumName(Error_ErrorCode_name, int32(x)) +} +func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode") + if err != nil { + return err + } + *x = Error_ErrorCode(value) + return nil +} +func (Error_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19, 0} +} + +type PutRequest_AutoIdPolicy int32 + +const ( + PutRequest_CURRENT PutRequest_AutoIdPolicy = 0 + PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1 +) + +var PutRequest_AutoIdPolicy_name = map[int32]string{ + 0: "CURRENT", + 1: "SEQUENTIAL", +} +var PutRequest_AutoIdPolicy_value = map[string]int32{ + "CURRENT": 0, + "SEQUENTIAL": 1, +} + +func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy { + p := new(PutRequest_AutoIdPolicy) + *p = x + return p +} +func (x PutRequest_AutoIdPolicy) String() string { + return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x)) +} +func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy") + if err != nil { + return err + } + *x = PutRequest_AutoIdPolicy(value) + return nil +} +func (PutRequest_AutoIdPolicy) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23, 0} +} + +type BeginTransactionRequest_TransactionMode int32 + +const ( + BeginTransactionRequest_UNKNOWN BeginTransactionRequest_TransactionMode = 0 + BeginTransactionRequest_READ_ONLY BeginTransactionRequest_TransactionMode = 1 + BeginTransactionRequest_READ_WRITE BeginTransactionRequest_TransactionMode = 2 +) + +var BeginTransactionRequest_TransactionMode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "READ_ONLY", + 2: "READ_WRITE", +} +var BeginTransactionRequest_TransactionMode_value = map[string]int32{ + "UNKNOWN": 0, + "READ_ONLY": 1, + "READ_WRITE": 2, +} + +func (x BeginTransactionRequest_TransactionMode) Enum() *BeginTransactionRequest_TransactionMode { + p := new(BeginTransactionRequest_TransactionMode) + *p = x + return p +} +func (x BeginTransactionRequest_TransactionMode) String() string { + return proto.EnumName(BeginTransactionRequest_TransactionMode_name, int32(x)) +} +func (x *BeginTransactionRequest_TransactionMode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_TransactionMode_value, data, "BeginTransactionRequest_TransactionMode") + if err != nil { + return err + } + *x = BeginTransactionRequest_TransactionMode(value) + return nil +} +func (BeginTransactionRequest_TransactionMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36, 0} +} + +type Action struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Action) Reset() { *m = Action{} } +func (m *Action) String() string { return proto.CompactTextString(m) } +func (*Action) ProtoMessage() {} +func (*Action) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{0} +} +func (m *Action) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Action.Unmarshal(m, b) +} +func (m *Action) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Action.Marshal(b, m, deterministic) +} +func (dst *Action) XXX_Merge(src proto.Message) { + xxx_messageInfo_Action.Merge(dst, src) +} +func (m *Action) XXX_Size() int { + return xxx_messageInfo_Action.Size(m) +} +func (m *Action) XXX_DiscardUnknown() { + xxx_messageInfo_Action.DiscardUnknown(m) +} + +var xxx_messageInfo_Action proto.InternalMessageInfo + +type PropertyValue struct { + Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"` + BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"` + StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"` + DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"` + Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue,json=pointvalue" json:"pointvalue,omitempty"` + Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue,json=uservalue" json:"uservalue,omitempty"` + Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue,json=referencevalue" json:"referencevalue,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue) Reset() { *m = PropertyValue{} } +func (m *PropertyValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue) ProtoMessage() {} +func (*PropertyValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1} +} +func (m *PropertyValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue.Unmarshal(m, b) +} +func (m *PropertyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue.Merge(dst, src) +} +func (m *PropertyValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue.Size(m) +} +func (m *PropertyValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue proto.InternalMessageInfo + +func (m *PropertyValue) GetInt64Value() int64 { + if m != nil && m.Int64Value != nil { + return *m.Int64Value + } + return 0 +} + +func (m *PropertyValue) GetBooleanValue() bool { + if m != nil && m.BooleanValue != nil { + return *m.BooleanValue + } + return false +} + +func (m *PropertyValue) GetStringValue() string { + if m != nil && m.StringValue != nil { + return *m.StringValue + } + return "" +} + +func (m *PropertyValue) GetDoubleValue() float64 { + if m != nil && m.DoubleValue != nil { + return *m.DoubleValue + } + return 0 +} + +func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue { + if m != nil { + return m.Pointvalue + } + return nil +} + +func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue { + if m != nil { + return m.Uservalue + } + return nil +} + +func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue { + if m != nil { + return m.Referencevalue + } + return nil +} + +type PropertyValue_PointValue struct { + X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"` + Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} } +func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_PointValue) ProtoMessage() {} +func (*PropertyValue_PointValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 0} +} +func (m *PropertyValue_PointValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_PointValue.Unmarshal(m, b) +} +func (m *PropertyValue_PointValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_PointValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_PointValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_PointValue.Merge(dst, src) +} +func (m *PropertyValue_PointValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue_PointValue.Size(m) +} +func (m *PropertyValue_PointValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_PointValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_PointValue proto.InternalMessageInfo + +func (m *PropertyValue_PointValue) GetX() float64 { + if m != nil && m.X != nil { + return *m.X + } + return 0 +} + +func (m *PropertyValue_PointValue) GetY() float64 { + if m != nil && m.Y != nil { + return *m.Y + } + return 0 +} + +type PropertyValue_UserValue struct { + Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} } +func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_UserValue) ProtoMessage() {} +func (*PropertyValue_UserValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 1} +} +func (m *PropertyValue_UserValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_UserValue.Unmarshal(m, b) +} +func (m *PropertyValue_UserValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_UserValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_UserValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_UserValue.Merge(dst, src) +} +func (m *PropertyValue_UserValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue_UserValue.Size(m) +} +func (m *PropertyValue_UserValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_UserValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_UserValue proto.InternalMessageInfo + +func (m *PropertyValue_UserValue) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *PropertyValue_UserValue) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *PropertyValue_UserValue) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *PropertyValue_UserValue) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type PropertyValue_ReferenceValue struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` + Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement,json=pathelement" json:"pathelement,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} } +func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue) ProtoMessage() {} +func (*PropertyValue_ReferenceValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2} +} +func (m *PropertyValue_ReferenceValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_ReferenceValue.Unmarshal(m, b) +} +func (m *PropertyValue_ReferenceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_ReferenceValue.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_ReferenceValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_ReferenceValue.Merge(dst, src) +} +func (m *PropertyValue_ReferenceValue) XXX_Size() int { + return xxx_messageInfo_PropertyValue_ReferenceValue.Size(m) +} +func (m *PropertyValue_ReferenceValue) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_ReferenceValue.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_ReferenceValue proto.InternalMessageInfo + +func (m *PropertyValue_ReferenceValue) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement { + if m != nil { + return m.Pathelement + } + return nil +} + +type PropertyValue_ReferenceValue_PathElement struct { + Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PropertyValue_ReferenceValue_PathElement) Reset() { + *m = PropertyValue_ReferenceValue_PathElement{} +} +func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) } +func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {} +func (*PropertyValue_ReferenceValue_PathElement) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{1, 2, 0} +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Unmarshal(m, b) +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Marshal(b, m, deterministic) +} +func (dst *PropertyValue_ReferenceValue_PathElement) XXX_Merge(src proto.Message) { + xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Merge(dst, src) +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_Size() int { + return xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.Size(m) +} +func (m *PropertyValue_ReferenceValue_PathElement) XXX_DiscardUnknown() { + xxx_messageInfo_PropertyValue_ReferenceValue_PathElement.DiscardUnknown(m) +} + +var xxx_messageInfo_PropertyValue_ReferenceValue_PathElement proto.InternalMessageInfo + +func (m *PropertyValue_ReferenceValue_PathElement) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *PropertyValue_ReferenceValue_PathElement) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Property struct { + Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"` + MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri,json=meaningUri" json:"meaning_uri,omitempty"` + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"` + Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"` + Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"` + FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,json=ftsTokenizationOption,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"` + Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Property) Reset() { *m = Property{} } +func (m *Property) String() string { return proto.CompactTextString(m) } +func (*Property) ProtoMessage() {} +func (*Property) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{2} +} +func (m *Property) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Property.Unmarshal(m, b) +} +func (m *Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Property.Marshal(b, m, deterministic) +} +func (dst *Property) XXX_Merge(src proto.Message) { + xxx_messageInfo_Property.Merge(dst, src) +} +func (m *Property) XXX_Size() int { + return xxx_messageInfo_Property.Size(m) +} +func (m *Property) XXX_DiscardUnknown() { + xxx_messageInfo_Property.DiscardUnknown(m) +} + +var xxx_messageInfo_Property proto.InternalMessageInfo + +const Default_Property_Meaning Property_Meaning = Property_NO_MEANING +const Default_Property_Searchable bool = false +const Default_Property_Locale string = "en" + +func (m *Property) GetMeaning() Property_Meaning { + if m != nil && m.Meaning != nil { + return *m.Meaning + } + return Default_Property_Meaning +} + +func (m *Property) GetMeaningUri() string { + if m != nil && m.MeaningUri != nil { + return *m.MeaningUri + } + return "" +} + +func (m *Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Property) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +func (m *Property) GetMultiple() bool { + if m != nil && m.Multiple != nil { + return *m.Multiple + } + return false +} + +func (m *Property) GetSearchable() bool { + if m != nil && m.Searchable != nil { + return *m.Searchable + } + return Default_Property_Searchable +} + +func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption { + if m != nil && m.FtsTokenizationOption != nil { + return *m.FtsTokenizationOption + } + return Property_HTML +} + +func (m *Property) GetLocale() string { + if m != nil && m.Locale != nil { + return *m.Locale + } + return Default_Property_Locale +} + +type Path struct { + Element []*Path_Element `protobuf:"group,1,rep,name=Element,json=element" json:"element,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Path) Reset() { *m = Path{} } +func (m *Path) String() string { return proto.CompactTextString(m) } +func (*Path) ProtoMessage() {} +func (*Path) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3} +} +func (m *Path) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Path.Unmarshal(m, b) +} +func (m *Path) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Path.Marshal(b, m, deterministic) +} +func (dst *Path) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path.Merge(dst, src) +} +func (m *Path) XXX_Size() int { + return xxx_messageInfo_Path.Size(m) +} +func (m *Path) XXX_DiscardUnknown() { + xxx_messageInfo_Path.DiscardUnknown(m) +} + +var xxx_messageInfo_Path proto.InternalMessageInfo + +func (m *Path) GetElement() []*Path_Element { + if m != nil { + return m.Element + } + return nil +} + +type Path_Element struct { + Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"` + Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"` + Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Path_Element) Reset() { *m = Path_Element{} } +func (m *Path_Element) String() string { return proto.CompactTextString(m) } +func (*Path_Element) ProtoMessage() {} +func (*Path_Element) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{3, 0} +} +func (m *Path_Element) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Path_Element.Unmarshal(m, b) +} +func (m *Path_Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Path_Element.Marshal(b, m, deterministic) +} +func (dst *Path_Element) XXX_Merge(src proto.Message) { + xxx_messageInfo_Path_Element.Merge(dst, src) +} +func (m *Path_Element) XXX_Size() int { + return xxx_messageInfo_Path_Element.Size(m) +} +func (m *Path_Element) XXX_DiscardUnknown() { + xxx_messageInfo_Path_Element.DiscardUnknown(m) +} + +var xxx_messageInfo_Path_Element proto.InternalMessageInfo + +func (m *Path_Element) GetType() string { + if m != nil && m.Type != nil { + return *m.Type + } + return "" +} + +func (m *Path_Element) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *Path_Element) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +type Reference struct { + App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,20,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` + Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Reference) Reset() { *m = Reference{} } +func (m *Reference) String() string { return proto.CompactTextString(m) } +func (*Reference) ProtoMessage() {} +func (*Reference) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{4} +} +func (m *Reference) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Reference.Unmarshal(m, b) +} +func (m *Reference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Reference.Marshal(b, m, deterministic) +} +func (dst *Reference) XXX_Merge(src proto.Message) { + xxx_messageInfo_Reference.Merge(dst, src) +} +func (m *Reference) XXX_Size() int { + return xxx_messageInfo_Reference.Size(m) +} +func (m *Reference) XXX_DiscardUnknown() { + xxx_messageInfo_Reference.DiscardUnknown(m) +} + +var xxx_messageInfo_Reference proto.InternalMessageInfo + +func (m *Reference) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Reference) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Reference) GetPath() *Path { + if m != nil { + return m.Path + } + return nil +} + +type User struct { + Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"` + AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain,json=authDomain" json:"auth_domain,omitempty"` + Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"` + FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity,json=federatedIdentity" json:"federated_identity,omitempty"` + FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider,json=federatedProvider" json:"federated_provider,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *User) Reset() { *m = User{} } +func (m *User) String() string { return proto.CompactTextString(m) } +func (*User) ProtoMessage() {} +func (*User) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{5} +} +func (m *User) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_User.Unmarshal(m, b) +} +func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_User.Marshal(b, m, deterministic) +} +func (dst *User) XXX_Merge(src proto.Message) { + xxx_messageInfo_User.Merge(dst, src) +} +func (m *User) XXX_Size() int { + return xxx_messageInfo_User.Size(m) +} +func (m *User) XXX_DiscardUnknown() { + xxx_messageInfo_User.DiscardUnknown(m) +} + +var xxx_messageInfo_User proto.InternalMessageInfo + +func (m *User) GetEmail() string { + if m != nil && m.Email != nil { + return *m.Email + } + return "" +} + +func (m *User) GetAuthDomain() string { + if m != nil && m.AuthDomain != nil { + return *m.AuthDomain + } + return "" +} + +func (m *User) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *User) GetFederatedIdentity() string { + if m != nil && m.FederatedIdentity != nil { + return *m.FederatedIdentity + } + return "" +} + +func (m *User) GetFederatedProvider() string { + if m != nil && m.FederatedProvider != nil { + return *m.FederatedProvider + } + return "" +} + +type EntityProto struct { + Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"` + EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group,json=entityGroup" json:"entity_group,omitempty"` + Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"` + Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"` + KindUri *string `protobuf:"bytes,5,opt,name=kind_uri,json=kindUri" json:"kind_uri,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property,json=rawProperty" json:"raw_property,omitempty"` + Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *EntityProto) Reset() { *m = EntityProto{} } +func (m *EntityProto) String() string { return proto.CompactTextString(m) } +func (*EntityProto) ProtoMessage() {} +func (*EntityProto) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{6} +} +func (m *EntityProto) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_EntityProto.Unmarshal(m, b) +} +func (m *EntityProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_EntityProto.Marshal(b, m, deterministic) +} +func (dst *EntityProto) XXX_Merge(src proto.Message) { + xxx_messageInfo_EntityProto.Merge(dst, src) +} +func (m *EntityProto) XXX_Size() int { + return xxx_messageInfo_EntityProto.Size(m) +} +func (m *EntityProto) XXX_DiscardUnknown() { + xxx_messageInfo_EntityProto.DiscardUnknown(m) +} + +var xxx_messageInfo_EntityProto proto.InternalMessageInfo + +func (m *EntityProto) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *EntityProto) GetEntityGroup() *Path { + if m != nil { + return m.EntityGroup + } + return nil +} + +func (m *EntityProto) GetOwner() *User { + if m != nil { + return m.Owner + } + return nil +} + +func (m *EntityProto) GetKind() EntityProto_Kind { + if m != nil && m.Kind != nil { + return *m.Kind + } + return EntityProto_GD_CONTACT +} + +func (m *EntityProto) GetKindUri() string { + if m != nil && m.KindUri != nil { + return *m.KindUri + } + return "" +} + +func (m *EntityProto) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +func (m *EntityProto) GetRawProperty() []*Property { + if m != nil { + return m.RawProperty + } + return nil +} + +func (m *EntityProto) GetRank() int32 { + if m != nil && m.Rank != nil { + return *m.Rank + } + return 0 +} + +type CompositeProperty struct { + IndexId *int64 `protobuf:"varint,1,req,name=index_id,json=indexId" json:"index_id,omitempty"` + Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompositeProperty) Reset() { *m = CompositeProperty{} } +func (m *CompositeProperty) String() string { return proto.CompactTextString(m) } +func (*CompositeProperty) ProtoMessage() {} +func (*CompositeProperty) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{7} +} +func (m *CompositeProperty) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompositeProperty.Unmarshal(m, b) +} +func (m *CompositeProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompositeProperty.Marshal(b, m, deterministic) +} +func (dst *CompositeProperty) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompositeProperty.Merge(dst, src) +} +func (m *CompositeProperty) XXX_Size() int { + return xxx_messageInfo_CompositeProperty.Size(m) +} +func (m *CompositeProperty) XXX_DiscardUnknown() { + xxx_messageInfo_CompositeProperty.DiscardUnknown(m) +} + +var xxx_messageInfo_CompositeProperty proto.InternalMessageInfo + +func (m *CompositeProperty) GetIndexId() int64 { + if m != nil && m.IndexId != nil { + return *m.IndexId + } + return 0 +} + +func (m *CompositeProperty) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Index struct { + EntityType *string `protobuf:"bytes,1,req,name=entity_type,json=entityType" json:"entity_type,omitempty"` + Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"` + Property []*Index_Property `protobuf:"group,2,rep,name=Property,json=property" json:"property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Index) Reset() { *m = Index{} } +func (m *Index) String() string { return proto.CompactTextString(m) } +func (*Index) ProtoMessage() {} +func (*Index) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8} +} +func (m *Index) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Index.Unmarshal(m, b) +} +func (m *Index) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Index.Marshal(b, m, deterministic) +} +func (dst *Index) XXX_Merge(src proto.Message) { + xxx_messageInfo_Index.Merge(dst, src) +} +func (m *Index) XXX_Size() int { + return xxx_messageInfo_Index.Size(m) +} +func (m *Index) XXX_DiscardUnknown() { + xxx_messageInfo_Index.DiscardUnknown(m) +} + +var xxx_messageInfo_Index proto.InternalMessageInfo + +func (m *Index) GetEntityType() string { + if m != nil && m.EntityType != nil { + return *m.EntityType + } + return "" +} + +func (m *Index) GetAncestor() bool { + if m != nil && m.Ancestor != nil { + return *m.Ancestor + } + return false +} + +func (m *Index) GetProperty() []*Index_Property { + if m != nil { + return m.Property + } + return nil +} + +type Index_Property struct { + Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"` + Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Index_Property) Reset() { *m = Index_Property{} } +func (m *Index_Property) String() string { return proto.CompactTextString(m) } +func (*Index_Property) ProtoMessage() {} +func (*Index_Property) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{8, 0} +} +func (m *Index_Property) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Index_Property.Unmarshal(m, b) +} +func (m *Index_Property) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Index_Property.Marshal(b, m, deterministic) +} +func (dst *Index_Property) XXX_Merge(src proto.Message) { + xxx_messageInfo_Index_Property.Merge(dst, src) +} +func (m *Index_Property) XXX_Size() int { + return xxx_messageInfo_Index_Property.Size(m) +} +func (m *Index_Property) XXX_DiscardUnknown() { + xxx_messageInfo_Index_Property.DiscardUnknown(m) +} + +var xxx_messageInfo_Index_Property proto.InternalMessageInfo + +const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING + +func (m *Index_Property) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Index_Property) GetDirection() Index_Property_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Index_Property_Direction +} + +type CompositeIndex struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"` + Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"` + State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"` + OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,json=onlyUseIfRequired,def=0" json:"only_use_if_required,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompositeIndex) Reset() { *m = CompositeIndex{} } +func (m *CompositeIndex) String() string { return proto.CompactTextString(m) } +func (*CompositeIndex) ProtoMessage() {} +func (*CompositeIndex) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{9} +} +func (m *CompositeIndex) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompositeIndex.Unmarshal(m, b) +} +func (m *CompositeIndex) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompositeIndex.Marshal(b, m, deterministic) +} +func (dst *CompositeIndex) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompositeIndex.Merge(dst, src) +} +func (m *CompositeIndex) XXX_Size() int { + return xxx_messageInfo_CompositeIndex.Size(m) +} +func (m *CompositeIndex) XXX_DiscardUnknown() { + xxx_messageInfo_CompositeIndex.DiscardUnknown(m) +} + +var xxx_messageInfo_CompositeIndex proto.InternalMessageInfo + +const Default_CompositeIndex_OnlyUseIfRequired bool = false + +func (m *CompositeIndex) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *CompositeIndex) GetId() int64 { + if m != nil && m.Id != nil { + return *m.Id + } + return 0 +} + +func (m *CompositeIndex) GetDefinition() *Index { + if m != nil { + return m.Definition + } + return nil +} + +func (m *CompositeIndex) GetState() CompositeIndex_State { + if m != nil && m.State != nil { + return *m.State + } + return CompositeIndex_WRITE_ONLY +} + +func (m *CompositeIndex) GetOnlyUseIfRequired() bool { + if m != nil && m.OnlyUseIfRequired != nil { + return *m.OnlyUseIfRequired + } + return Default_CompositeIndex_OnlyUseIfRequired +} + +type IndexPostfix struct { + IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value,json=indexValue" json:"index_value,omitempty"` + Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexPostfix) Reset() { *m = IndexPostfix{} } +func (m *IndexPostfix) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix) ProtoMessage() {} +func (*IndexPostfix) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10} +} +func (m *IndexPostfix) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexPostfix.Unmarshal(m, b) +} +func (m *IndexPostfix) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexPostfix.Marshal(b, m, deterministic) +} +func (dst *IndexPostfix) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexPostfix.Merge(dst, src) +} +func (m *IndexPostfix) XXX_Size() int { + return xxx_messageInfo_IndexPostfix.Size(m) +} +func (m *IndexPostfix) XXX_DiscardUnknown() { + xxx_messageInfo_IndexPostfix.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexPostfix proto.InternalMessageInfo + +const Default_IndexPostfix_Before bool = true + +func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue { + if m != nil { + return m.IndexValue + } + return nil +} + +func (m *IndexPostfix) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *IndexPostfix) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPostfix_Before +} + +type IndexPostfix_IndexValue struct { + PropertyName *string `protobuf:"bytes,1,req,name=property_name,json=propertyName" json:"property_name,omitempty"` + Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} } +func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) } +func (*IndexPostfix_IndexValue) ProtoMessage() {} +func (*IndexPostfix_IndexValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{10, 0} +} +func (m *IndexPostfix_IndexValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexPostfix_IndexValue.Unmarshal(m, b) +} +func (m *IndexPostfix_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexPostfix_IndexValue.Marshal(b, m, deterministic) +} +func (dst *IndexPostfix_IndexValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexPostfix_IndexValue.Merge(dst, src) +} +func (m *IndexPostfix_IndexValue) XXX_Size() int { + return xxx_messageInfo_IndexPostfix_IndexValue.Size(m) +} +func (m *IndexPostfix_IndexValue) XXX_DiscardUnknown() { + xxx_messageInfo_IndexPostfix_IndexValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexPostfix_IndexValue proto.InternalMessageInfo + +func (m *IndexPostfix_IndexValue) GetPropertyName() string { + if m != nil && m.PropertyName != nil { + return *m.PropertyName + } + return "" +} + +func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type IndexPosition struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IndexPosition) Reset() { *m = IndexPosition{} } +func (m *IndexPosition) String() string { return proto.CompactTextString(m) } +func (*IndexPosition) ProtoMessage() {} +func (*IndexPosition) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{11} +} +func (m *IndexPosition) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IndexPosition.Unmarshal(m, b) +} +func (m *IndexPosition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IndexPosition.Marshal(b, m, deterministic) +} +func (dst *IndexPosition) XXX_Merge(src proto.Message) { + xxx_messageInfo_IndexPosition.Merge(dst, src) +} +func (m *IndexPosition) XXX_Size() int { + return xxx_messageInfo_IndexPosition.Size(m) +} +func (m *IndexPosition) XXX_DiscardUnknown() { + xxx_messageInfo_IndexPosition.DiscardUnknown(m) +} + +var xxx_messageInfo_IndexPosition proto.InternalMessageInfo + +const Default_IndexPosition_Before bool = true + +func (m *IndexPosition) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *IndexPosition) GetBefore() bool { + if m != nil && m.Before != nil { + return *m.Before + } + return Default_IndexPosition_Before +} + +type Snapshot struct { + Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{12} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Snapshot.Unmarshal(m, b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) +} +func (dst *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(dst, src) +} +func (m *Snapshot) XXX_Size() int { + return xxx_messageInfo_Snapshot.Size(m) +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetTs() int64 { + if m != nil && m.Ts != nil { + return *m.Ts + } + return 0 +} + +type InternalHeader struct { + Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InternalHeader) Reset() { *m = InternalHeader{} } +func (m *InternalHeader) String() string { return proto.CompactTextString(m) } +func (*InternalHeader) ProtoMessage() {} +func (*InternalHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{13} +} +func (m *InternalHeader) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InternalHeader.Unmarshal(m, b) +} +func (m *InternalHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InternalHeader.Marshal(b, m, deterministic) +} +func (dst *InternalHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_InternalHeader.Merge(dst, src) +} +func (m *InternalHeader) XXX_Size() int { + return xxx_messageInfo_InternalHeader.Size(m) +} +func (m *InternalHeader) XXX_DiscardUnknown() { + xxx_messageInfo_InternalHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_InternalHeader proto.InternalMessageInfo + +func (m *InternalHeader) GetQos() string { + if m != nil && m.Qos != nil { + return *m.Qos + } + return "" +} + +type Transaction struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"` + App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"` + MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Transaction) Reset() { *m = Transaction{} } +func (m *Transaction) String() string { return proto.CompactTextString(m) } +func (*Transaction) ProtoMessage() {} +func (*Transaction) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{14} +} +func (m *Transaction) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Transaction.Unmarshal(m, b) +} +func (m *Transaction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Transaction.Marshal(b, m, deterministic) +} +func (dst *Transaction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Transaction.Merge(dst, src) +} +func (m *Transaction) XXX_Size() int { + return xxx_messageInfo_Transaction.Size(m) +} +func (m *Transaction) XXX_DiscardUnknown() { + xxx_messageInfo_Transaction.DiscardUnknown(m) +} + +var xxx_messageInfo_Transaction proto.InternalMessageInfo + +const Default_Transaction_MarkChanges bool = false + +func (m *Transaction) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Transaction) GetHandle() uint64 { + if m != nil && m.Handle != nil { + return *m.Handle + } + return 0 +} + +func (m *Transaction) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Transaction) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_Transaction_MarkChanges +} + +type Query struct { + Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + NameSpace *string `protobuf:"bytes,29,opt,name=name_space,json=nameSpace" json:"name_space,omitempty"` + Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"` + Filter []*Query_Filter `protobuf:"group,4,rep,name=Filter,json=filter" json:"filter,omitempty"` + SearchQuery *string `protobuf:"bytes,8,opt,name=search_query,json=searchQuery" json:"search_query,omitempty"` + Order []*Query_Order `protobuf:"group,9,rep,name=Order,json=order" json:"order,omitempty"` + Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"` + Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` + EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor,json=endCompiledCursor" json:"end_compiled_cursor,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` + RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,json=requirePerfectPlan,def=0" json:"require_perfect_plan,omitempty"` + KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,json=keysOnly,def=0" json:"keys_only,omitempty"` + Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"` + Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"` + FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"` + PropertyName []string `protobuf:"bytes,33,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` + GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name,json=groupByPropertyName" json:"group_by_property_name,omitempty"` + Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"` + MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds,json=minSafeTimeSeconds" json:"min_safe_time_seconds,omitempty"` + SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name,json=safeReplicaName" json:"safe_replica_name,omitempty"` + PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,json=persistOffset,def=0" json:"persist_offset,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} +func (*Query) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15} +} +func (m *Query) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query.Unmarshal(m, b) +} +func (m *Query) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query.Marshal(b, m, deterministic) +} +func (dst *Query) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query.Merge(dst, src) +} +func (m *Query) XXX_Size() int { + return xxx_messageInfo_Query.Size(m) +} +func (m *Query) XXX_DiscardUnknown() { + xxx_messageInfo_Query.DiscardUnknown(m) +} + +var xxx_messageInfo_Query proto.InternalMessageInfo + +const Default_Query_Offset int32 = 0 +const Default_Query_RequirePerfectPlan bool = false +const Default_Query_KeysOnly bool = false +const Default_Query_Compile bool = false +const Default_Query_PersistOffset bool = false + +func (m *Query) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *Query) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *Query) GetNameSpace() string { + if m != nil && m.NameSpace != nil { + return *m.NameSpace + } + return "" +} + +func (m *Query) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *Query) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +func (m *Query) GetFilter() []*Query_Filter { + if m != nil { + return m.Filter + } + return nil +} + +func (m *Query) GetSearchQuery() string { + if m != nil && m.SearchQuery != nil { + return *m.SearchQuery + } + return "" +} + +func (m *Query) GetOrder() []*Query_Order { + if m != nil { + return m.Order + } + return nil +} + +func (m *Query) GetHint() Query_Hint { + if m != nil && m.Hint != nil { + return *m.Hint + } + return Query_ORDER_FIRST +} + +func (m *Query) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *Query) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_Query_Offset +} + +func (m *Query) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *Query) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *Query) GetEndCompiledCursor() *CompiledCursor { + if m != nil { + return m.EndCompiledCursor + } + return nil +} + +func (m *Query) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *Query) GetRequirePerfectPlan() bool { + if m != nil && m.RequirePerfectPlan != nil { + return *m.RequirePerfectPlan + } + return Default_Query_RequirePerfectPlan +} + +func (m *Query) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return Default_Query_KeysOnly +} + +func (m *Query) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *Query) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_Query_Compile +} + +func (m *Query) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *Query) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *Query) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *Query) GetGroupByPropertyName() []string { + if m != nil { + return m.GroupByPropertyName + } + return nil +} + +func (m *Query) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return false +} + +func (m *Query) GetMinSafeTimeSeconds() int64 { + if m != nil && m.MinSafeTimeSeconds != nil { + return *m.MinSafeTimeSeconds + } + return 0 +} + +func (m *Query) GetSafeReplicaName() []string { + if m != nil { + return m.SafeReplicaName + } + return nil +} + +func (m *Query) GetPersistOffset() bool { + if m != nil && m.PersistOffset != nil { + return *m.PersistOffset + } + return Default_Query_PersistOffset +} + +type Query_Filter struct { + Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"` + Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query_Filter) Reset() { *m = Query_Filter{} } +func (m *Query_Filter) String() string { return proto.CompactTextString(m) } +func (*Query_Filter) ProtoMessage() {} +func (*Query_Filter) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 0} +} +func (m *Query_Filter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query_Filter.Unmarshal(m, b) +} +func (m *Query_Filter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query_Filter.Marshal(b, m, deterministic) +} +func (dst *Query_Filter) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query_Filter.Merge(dst, src) +} +func (m *Query_Filter) XXX_Size() int { + return xxx_messageInfo_Query_Filter.Size(m) +} +func (m *Query_Filter) XXX_DiscardUnknown() { + xxx_messageInfo_Query_Filter.DiscardUnknown(m) +} + +var xxx_messageInfo_Query_Filter proto.InternalMessageInfo + +func (m *Query_Filter) GetOp() Query_Filter_Operator { + if m != nil && m.Op != nil { + return *m.Op + } + return Query_Filter_LESS_THAN +} + +func (m *Query_Filter) GetProperty() []*Property { + if m != nil { + return m.Property + } + return nil +} + +type Query_Order struct { + Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"` + Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Query_Order) Reset() { *m = Query_Order{} } +func (m *Query_Order) String() string { return proto.CompactTextString(m) } +func (*Query_Order) ProtoMessage() {} +func (*Query_Order) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{15, 1} +} +func (m *Query_Order) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Query_Order.Unmarshal(m, b) +} +func (m *Query_Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Query_Order.Marshal(b, m, deterministic) +} +func (dst *Query_Order) XXX_Merge(src proto.Message) { + xxx_messageInfo_Query_Order.Merge(dst, src) +} +func (m *Query_Order) XXX_Size() int { + return xxx_messageInfo_Query_Order.Size(m) +} +func (m *Query_Order) XXX_DiscardUnknown() { + xxx_messageInfo_Query_Order.DiscardUnknown(m) +} + +var xxx_messageInfo_Query_Order proto.InternalMessageInfo + +const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING + +func (m *Query_Order) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *Query_Order) GetDirection() Query_Order_Direction { + if m != nil && m.Direction != nil { + return *m.Direction + } + return Default_Query_Order_Direction +} + +type CompiledQuery struct { + Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan,json=primaryscan" json:"primaryscan,omitempty"` + Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan,json=mergejoinscan" json:"mergejoinscan,omitempty"` + IndexDef *Index `protobuf:"bytes,21,opt,name=index_def,json=indexDef" json:"index_def,omitempty"` + Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"` + Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"` + KeysOnly *bool `protobuf:"varint,12,req,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` + PropertyName []string `protobuf:"bytes,24,rep,name=property_name,json=propertyName" json:"property_name,omitempty"` + DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size,json=distinctInfixSize" json:"distinct_infix_size,omitempty"` + Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter,json=entityfilter" json:"entityfilter,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery) Reset() { *m = CompiledQuery{} } +func (m *CompiledQuery) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery) ProtoMessage() {} +func (*CompiledQuery) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16} +} +func (m *CompiledQuery) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery.Unmarshal(m, b) +} +func (m *CompiledQuery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery.Merge(dst, src) +} +func (m *CompiledQuery) XXX_Size() int { + return xxx_messageInfo_CompiledQuery.Size(m) +} +func (m *CompiledQuery) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery proto.InternalMessageInfo + +const Default_CompiledQuery_Offset int32 = 0 + +func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan { + if m != nil { + return m.Primaryscan + } + return nil +} + +func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan { + if m != nil { + return m.Mergejoinscan + } + return nil +} + +func (m *CompiledQuery) GetIndexDef() *Index { + if m != nil { + return m.IndexDef + } + return nil +} + +func (m *CompiledQuery) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_CompiledQuery_Offset +} + +func (m *CompiledQuery) GetLimit() int32 { + if m != nil && m.Limit != nil { + return *m.Limit + } + return 0 +} + +func (m *CompiledQuery) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *CompiledQuery) GetPropertyName() []string { + if m != nil { + return m.PropertyName + } + return nil +} + +func (m *CompiledQuery) GetDistinctInfixSize() int32 { + if m != nil && m.DistinctInfixSize != nil { + return *m.DistinctInfixSize + } + return 0 +} + +func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter { + if m != nil { + return m.Entityfilter + } + return nil +} + +type CompiledQuery_PrimaryScan struct { + IndexName *string `protobuf:"bytes,2,opt,name=index_name,json=indexName" json:"index_name,omitempty"` + StartKey *string `protobuf:"bytes,3,opt,name=start_key,json=startKey" json:"start_key,omitempty"` + StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive,json=startInclusive" json:"start_inclusive,omitempty"` + EndKey *string `protobuf:"bytes,5,opt,name=end_key,json=endKey" json:"end_key,omitempty"` + EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive,json=endInclusive" json:"end_inclusive,omitempty"` + StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value,json=startPostfixValue" json:"start_postfix_value,omitempty"` + EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value,json=endPostfixValue" json:"end_postfix_value,omitempty"` + EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us,json=endUnappliedLogTimestampUs" json:"end_unapplied_log_timestamp_us,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} } +func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_PrimaryScan) ProtoMessage() {} +func (*CompiledQuery_PrimaryScan) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 0} +} +func (m *CompiledQuery_PrimaryScan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery_PrimaryScan.Unmarshal(m, b) +} +func (m *CompiledQuery_PrimaryScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery_PrimaryScan.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery_PrimaryScan) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery_PrimaryScan.Merge(dst, src) +} +func (m *CompiledQuery_PrimaryScan) XXX_Size() int { + return xxx_messageInfo_CompiledQuery_PrimaryScan.Size(m) +} +func (m *CompiledQuery_PrimaryScan) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery_PrimaryScan.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery_PrimaryScan proto.InternalMessageInfo + +func (m *CompiledQuery_PrimaryScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetEndKey() string { + if m != nil && m.EndKey != nil { + return *m.EndKey + } + return "" +} + +func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool { + if m != nil && m.EndInclusive != nil { + return *m.EndInclusive + } + return false +} + +func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string { + if m != nil { + return m.StartPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string { + if m != nil { + return m.EndPostfixValue + } + return nil +} + +func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 { + if m != nil && m.EndUnappliedLogTimestampUs != nil { + return *m.EndUnappliedLogTimestampUs + } + return 0 +} + +type CompiledQuery_MergeJoinScan struct { + IndexName *string `protobuf:"bytes,8,req,name=index_name,json=indexName" json:"index_name,omitempty"` + PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value,json=prefixValue" json:"prefix_value,omitempty"` + ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,json=valuePrefix,def=0" json:"value_prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} } +func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_MergeJoinScan) ProtoMessage() {} +func (*CompiledQuery_MergeJoinScan) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 1} +} +func (m *CompiledQuery_MergeJoinScan) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery_MergeJoinScan.Unmarshal(m, b) +} +func (m *CompiledQuery_MergeJoinScan) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery_MergeJoinScan.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery_MergeJoinScan) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery_MergeJoinScan.Merge(dst, src) +} +func (m *CompiledQuery_MergeJoinScan) XXX_Size() int { + return xxx_messageInfo_CompiledQuery_MergeJoinScan.Size(m) +} +func (m *CompiledQuery_MergeJoinScan) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery_MergeJoinScan.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery_MergeJoinScan proto.InternalMessageInfo + +const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false + +func (m *CompiledQuery_MergeJoinScan) GetIndexName() string { + if m != nil && m.IndexName != nil { + return *m.IndexName + } + return "" +} + +func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string { + if m != nil { + return m.PrefixValue + } + return nil +} + +func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool { + if m != nil && m.ValuePrefix != nil { + return *m.ValuePrefix + } + return Default_CompiledQuery_MergeJoinScan_ValuePrefix +} + +type CompiledQuery_EntityFilter struct { + Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"` + Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"` + Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} } +func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) } +func (*CompiledQuery_EntityFilter) ProtoMessage() {} +func (*CompiledQuery_EntityFilter) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{16, 2} +} +func (m *CompiledQuery_EntityFilter) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledQuery_EntityFilter.Unmarshal(m, b) +} +func (m *CompiledQuery_EntityFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledQuery_EntityFilter.Marshal(b, m, deterministic) +} +func (dst *CompiledQuery_EntityFilter) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledQuery_EntityFilter.Merge(dst, src) +} +func (m *CompiledQuery_EntityFilter) XXX_Size() int { + return xxx_messageInfo_CompiledQuery_EntityFilter.Size(m) +} +func (m *CompiledQuery_EntityFilter) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledQuery_EntityFilter.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledQuery_EntityFilter proto.InternalMessageInfo + +const Default_CompiledQuery_EntityFilter_Distinct bool = false + +func (m *CompiledQuery_EntityFilter) GetDistinct() bool { + if m != nil && m.Distinct != nil { + return *m.Distinct + } + return Default_CompiledQuery_EntityFilter_Distinct +} + +func (m *CompiledQuery_EntityFilter) GetKind() string { + if m != nil && m.Kind != nil { + return *m.Kind + } + return "" +} + +func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference { + if m != nil { + return m.Ancestor + } + return nil +} + +type CompiledCursor struct { + Position *CompiledCursor_Position `protobuf:"group,2,opt,name=Position,json=position" json:"position,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledCursor) Reset() { *m = CompiledCursor{} } +func (m *CompiledCursor) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor) ProtoMessage() {} +func (*CompiledCursor) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17} +} +func (m *CompiledCursor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledCursor.Unmarshal(m, b) +} +func (m *CompiledCursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledCursor.Marshal(b, m, deterministic) +} +func (dst *CompiledCursor) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledCursor.Merge(dst, src) +} +func (m *CompiledCursor) XXX_Size() int { + return xxx_messageInfo_CompiledCursor.Size(m) +} +func (m *CompiledCursor) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledCursor.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledCursor proto.InternalMessageInfo + +func (m *CompiledCursor) GetPosition() *CompiledCursor_Position { + if m != nil { + return m.Position + } + return nil +} + +type CompiledCursor_Position struct { + StartKey *string `protobuf:"bytes,27,opt,name=start_key,json=startKey" json:"start_key,omitempty"` + Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue,json=indexvalue" json:"indexvalue,omitempty"` + Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"` + StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,json=startInclusive,def=1" json:"start_inclusive,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} } +func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position) ProtoMessage() {} +func (*CompiledCursor_Position) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0} +} +func (m *CompiledCursor_Position) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledCursor_Position.Unmarshal(m, b) +} +func (m *CompiledCursor_Position) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledCursor_Position.Marshal(b, m, deterministic) +} +func (dst *CompiledCursor_Position) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledCursor_Position.Merge(dst, src) +} +func (m *CompiledCursor_Position) XXX_Size() int { + return xxx_messageInfo_CompiledCursor_Position.Size(m) +} +func (m *CompiledCursor_Position) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledCursor_Position.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledCursor_Position proto.InternalMessageInfo + +const Default_CompiledCursor_Position_StartInclusive bool = true + +func (m *CompiledCursor_Position) GetStartKey() string { + if m != nil && m.StartKey != nil { + return *m.StartKey + } + return "" +} + +func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue { + if m != nil { + return m.Indexvalue + } + return nil +} + +func (m *CompiledCursor_Position) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *CompiledCursor_Position) GetStartInclusive() bool { + if m != nil && m.StartInclusive != nil { + return *m.StartInclusive + } + return Default_CompiledCursor_Position_StartInclusive +} + +type CompiledCursor_Position_IndexValue struct { + Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"` + Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} } +func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) } +func (*CompiledCursor_Position_IndexValue) ProtoMessage() {} +func (*CompiledCursor_Position_IndexValue) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{17, 0, 0} +} +func (m *CompiledCursor_Position_IndexValue) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompiledCursor_Position_IndexValue.Unmarshal(m, b) +} +func (m *CompiledCursor_Position_IndexValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompiledCursor_Position_IndexValue.Marshal(b, m, deterministic) +} +func (dst *CompiledCursor_Position_IndexValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompiledCursor_Position_IndexValue.Merge(dst, src) +} +func (m *CompiledCursor_Position_IndexValue) XXX_Size() int { + return xxx_messageInfo_CompiledCursor_Position_IndexValue.Size(m) +} +func (m *CompiledCursor_Position_IndexValue) XXX_DiscardUnknown() { + xxx_messageInfo_CompiledCursor_Position_IndexValue.DiscardUnknown(m) +} + +var xxx_messageInfo_CompiledCursor_Position_IndexValue proto.InternalMessageInfo + +func (m *CompiledCursor_Position_IndexValue) GetProperty() string { + if m != nil && m.Property != nil { + return *m.Property + } + return "" +} + +func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue { + if m != nil { + return m.Value + } + return nil +} + +type Cursor struct { + Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"` + App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cursor) Reset() { *m = Cursor{} } +func (m *Cursor) String() string { return proto.CompactTextString(m) } +func (*Cursor) ProtoMessage() {} +func (*Cursor) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{18} +} +func (m *Cursor) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cursor.Unmarshal(m, b) +} +func (m *Cursor) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cursor.Marshal(b, m, deterministic) +} +func (dst *Cursor) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cursor.Merge(dst, src) +} +func (m *Cursor) XXX_Size() int { + return xxx_messageInfo_Cursor.Size(m) +} +func (m *Cursor) XXX_DiscardUnknown() { + xxx_messageInfo_Cursor.DiscardUnknown(m) +} + +var xxx_messageInfo_Cursor proto.InternalMessageInfo + +func (m *Cursor) GetCursor() uint64 { + if m != nil && m.Cursor != nil { + return *m.Cursor + } + return 0 +} + +func (m *Cursor) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +type Error struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{19} +} +func (m *Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Error.Unmarshal(m, b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) +} +func (dst *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(dst, src) +} +func (m *Error) XXX_Size() int { + return xxx_messageInfo_Error.Size(m) +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +type Cost struct { + IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes,json=indexWrites" json:"index_writes,omitempty"` + IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes,json=indexWriteBytes" json:"index_write_bytes,omitempty"` + EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes,json=entityWrites" json:"entity_writes,omitempty"` + EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes,json=entityWriteBytes" json:"entity_write_bytes,omitempty"` + Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost,json=commitcost" json:"commitcost,omitempty"` + ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta,json=approximateStorageDelta" json:"approximate_storage_delta,omitempty"` + IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates,json=idSequenceUpdates" json:"id_sequence_updates,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cost) Reset() { *m = Cost{} } +func (m *Cost) String() string { return proto.CompactTextString(m) } +func (*Cost) ProtoMessage() {} +func (*Cost) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20} +} +func (m *Cost) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cost.Unmarshal(m, b) +} +func (m *Cost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cost.Marshal(b, m, deterministic) +} +func (dst *Cost) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cost.Merge(dst, src) +} +func (m *Cost) XXX_Size() int { + return xxx_messageInfo_Cost.Size(m) +} +func (m *Cost) XXX_DiscardUnknown() { + xxx_messageInfo_Cost.DiscardUnknown(m) +} + +var xxx_messageInfo_Cost proto.InternalMessageInfo + +func (m *Cost) GetIndexWrites() int32 { + if m != nil && m.IndexWrites != nil { + return *m.IndexWrites + } + return 0 +} + +func (m *Cost) GetIndexWriteBytes() int32 { + if m != nil && m.IndexWriteBytes != nil { + return *m.IndexWriteBytes + } + return 0 +} + +func (m *Cost) GetEntityWrites() int32 { + if m != nil && m.EntityWrites != nil { + return *m.EntityWrites + } + return 0 +} + +func (m *Cost) GetEntityWriteBytes() int32 { + if m != nil && m.EntityWriteBytes != nil { + return *m.EntityWriteBytes + } + return 0 +} + +func (m *Cost) GetCommitcost() *Cost_CommitCost { + if m != nil { + return m.Commitcost + } + return nil +} + +func (m *Cost) GetApproximateStorageDelta() int32 { + if m != nil && m.ApproximateStorageDelta != nil { + return *m.ApproximateStorageDelta + } + return 0 +} + +func (m *Cost) GetIdSequenceUpdates() int32 { + if m != nil && m.IdSequenceUpdates != nil { + return *m.IdSequenceUpdates + } + return 0 +} + +type Cost_CommitCost struct { + RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts,json=requestedEntityPuts" json:"requested_entity_puts,omitempty"` + RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes,json=requestedEntityDeletes" json:"requested_entity_deletes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} } +func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) } +func (*Cost_CommitCost) ProtoMessage() {} +func (*Cost_CommitCost) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{20, 0} +} +func (m *Cost_CommitCost) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Cost_CommitCost.Unmarshal(m, b) +} +func (m *Cost_CommitCost) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Cost_CommitCost.Marshal(b, m, deterministic) +} +func (dst *Cost_CommitCost) XXX_Merge(src proto.Message) { + xxx_messageInfo_Cost_CommitCost.Merge(dst, src) +} +func (m *Cost_CommitCost) XXX_Size() int { + return xxx_messageInfo_Cost_CommitCost.Size(m) +} +func (m *Cost_CommitCost) XXX_DiscardUnknown() { + xxx_messageInfo_Cost_CommitCost.DiscardUnknown(m) +} + +var xxx_messageInfo_Cost_CommitCost proto.InternalMessageInfo + +func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 { + if m != nil && m.RequestedEntityPuts != nil { + return *m.RequestedEntityPuts + } + return 0 +} + +func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 { + if m != nil && m.RequestedEntityDeletes != nil { + return *m.RequestedEntityDeletes + } + return 0 +} + +type GetRequest struct { + Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms,json=failoverMs" json:"failover_ms,omitempty"` + Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"` + AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,json=allowDeferred,def=0" json:"allow_deferred,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{21} +} +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (dst *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(dst, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +const Default_GetRequest_AllowDeferred bool = false + +func (m *GetRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *GetRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *GetRequest) GetFailoverMs() int64 { + if m != nil && m.FailoverMs != nil { + return *m.FailoverMs + } + return 0 +} + +func (m *GetRequest) GetStrong() bool { + if m != nil && m.Strong != nil { + return *m.Strong + } + return false +} + +func (m *GetRequest) GetAllowDeferred() bool { + if m != nil && m.AllowDeferred != nil { + return *m.AllowDeferred + } + return Default_GetRequest_AllowDeferred +} + +type GetResponse struct { + Entity []*GetResponse_Entity `protobuf:"group,1,rep,name=Entity,json=entity" json:"entity,omitempty"` + Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"` + InOrder *bool `protobuf:"varint,6,opt,name=in_order,json=inOrder,def=1" json:"in_order,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22} +} +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (dst *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(dst, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +const Default_GetResponse_InOrder bool = true + +func (m *GetResponse) GetEntity() []*GetResponse_Entity { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse) GetDeferred() []*Reference { + if m != nil { + return m.Deferred + } + return nil +} + +func (m *GetResponse) GetInOrder() bool { + if m != nil && m.InOrder != nil { + return *m.InOrder + } + return Default_GetResponse_InOrder +} + +type GetResponse_Entity struct { + Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"` + Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"` + Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} } +func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) } +func (*GetResponse_Entity) ProtoMessage() {} +func (*GetResponse_Entity) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{22, 0} +} +func (m *GetResponse_Entity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse_Entity.Unmarshal(m, b) +} +func (m *GetResponse_Entity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse_Entity.Marshal(b, m, deterministic) +} +func (dst *GetResponse_Entity) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse_Entity.Merge(dst, src) +} +func (m *GetResponse_Entity) XXX_Size() int { + return xxx_messageInfo_GetResponse_Entity.Size(m) +} +func (m *GetResponse_Entity) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse_Entity.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse_Entity proto.InternalMessageInfo + +func (m *GetResponse_Entity) GetEntity() *EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *GetResponse_Entity) GetKey() *Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *GetResponse_Entity) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +type PutRequest struct { + Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"` + Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"` + Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,json=autoIdPolicy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{23} +} +func (m *PutRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PutRequest.Unmarshal(m, b) +} +func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) +} +func (dst *PutRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutRequest.Merge(dst, src) +} +func (m *PutRequest) XXX_Size() int { + return xxx_messageInfo_PutRequest.Size(m) +} +func (m *PutRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutRequest proto.InternalMessageInfo + +const Default_PutRequest_Trusted bool = false +const Default_PutRequest_Force bool = false +const Default_PutRequest_MarkChanges bool = false +const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT + +func (m *PutRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *PutRequest) GetEntity() []*EntityProto { + if m != nil { + return m.Entity + } + return nil +} + +func (m *PutRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *PutRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *PutRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_PutRequest_Trusted +} + +func (m *PutRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_PutRequest_Force +} + +func (m *PutRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_PutRequest_MarkChanges +} + +func (m *PutRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy { + if m != nil && m.AutoIdPolicy != nil { + return *m.AutoIdPolicy + } + return Default_PutRequest_AutoIdPolicy +} + +type PutResponse struct { + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{24} +} +func (m *PutResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PutResponse.Unmarshal(m, b) +} +func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) +} +func (dst *PutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutResponse.Merge(dst, src) +} +func (m *PutResponse) XXX_Size() int { + return xxx_messageInfo_PutResponse.Size(m) +} +func (m *PutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutResponse proto.InternalMessageInfo + +func (m *PutResponse) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *PutResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type TouchRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"` + CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index,json=compositeIndex" json:"composite_index,omitempty"` + Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TouchRequest) Reset() { *m = TouchRequest{} } +func (m *TouchRequest) String() string { return proto.CompactTextString(m) } +func (*TouchRequest) ProtoMessage() {} +func (*TouchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{25} +} +func (m *TouchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TouchRequest.Unmarshal(m, b) +} +func (m *TouchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TouchRequest.Marshal(b, m, deterministic) +} +func (dst *TouchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TouchRequest.Merge(dst, src) +} +func (m *TouchRequest) XXX_Size() int { + return xxx_messageInfo_TouchRequest.Size(m) +} +func (m *TouchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TouchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TouchRequest proto.InternalMessageInfo + +const Default_TouchRequest_Force bool = false + +func (m *TouchRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *TouchRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex { + if m != nil { + return m.CompositeIndex + } + return nil +} + +func (m *TouchRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_TouchRequest_Force +} + +func (m *TouchRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type TouchResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *TouchResponse) Reset() { *m = TouchResponse{} } +func (m *TouchResponse) String() string { return proto.CompactTextString(m) } +func (*TouchResponse) ProtoMessage() {} +func (*TouchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{26} +} +func (m *TouchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_TouchResponse.Unmarshal(m, b) +} +func (m *TouchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_TouchResponse.Marshal(b, m, deterministic) +} +func (dst *TouchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_TouchResponse.Merge(dst, src) +} +func (m *TouchResponse) XXX_Size() int { + return xxx_messageInfo_TouchResponse.Size(m) +} +func (m *TouchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_TouchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_TouchResponse proto.InternalMessageInfo + +func (m *TouchResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type DeleteRequest struct { + Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"` + Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"` + Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"` + Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"` + Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"` + MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,json=markChanges,def=0" json:"mark_changes,omitempty"` + Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{27} +} +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (dst *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(dst, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +const Default_DeleteRequest_Trusted bool = false +const Default_DeleteRequest_Force bool = false +const Default_DeleteRequest_MarkChanges bool = false + +func (m *DeleteRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *DeleteRequest) GetKey() []*Reference { + if m != nil { + return m.Key + } + return nil +} + +func (m *DeleteRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *DeleteRequest) GetTrusted() bool { + if m != nil && m.Trusted != nil { + return *m.Trusted + } + return Default_DeleteRequest_Trusted +} + +func (m *DeleteRequest) GetForce() bool { + if m != nil && m.Force != nil { + return *m.Force + } + return Default_DeleteRequest_Force +} + +func (m *DeleteRequest) GetMarkChanges() bool { + if m != nil && m.MarkChanges != nil { + return *m.MarkChanges + } + return Default_DeleteRequest_MarkChanges +} + +func (m *DeleteRequest) GetSnapshot() []*Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +type DeleteResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{28} +} +func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) +} +func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) +} +func (dst *DeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResponse.Merge(dst, src) +} +func (m *DeleteResponse) XXX_Size() int { + return xxx_messageInfo_DeleteResponse.Size(m) +} +func (m *DeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo + +func (m *DeleteResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *DeleteResponse) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type NextRequest struct { + Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"` + Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"` + Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"` + Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"` + Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NextRequest) Reset() { *m = NextRequest{} } +func (m *NextRequest) String() string { return proto.CompactTextString(m) } +func (*NextRequest) ProtoMessage() {} +func (*NextRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{29} +} +func (m *NextRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NextRequest.Unmarshal(m, b) +} +func (m *NextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NextRequest.Marshal(b, m, deterministic) +} +func (dst *NextRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NextRequest.Merge(dst, src) +} +func (m *NextRequest) XXX_Size() int { + return xxx_messageInfo_NextRequest.Size(m) +} +func (m *NextRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NextRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NextRequest proto.InternalMessageInfo + +const Default_NextRequest_Offset int32 = 0 +const Default_NextRequest_Compile bool = false + +func (m *NextRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *NextRequest) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *NextRequest) GetCount() int32 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *NextRequest) GetOffset() int32 { + if m != nil && m.Offset != nil { + return *m.Offset + } + return Default_NextRequest_Offset +} + +func (m *NextRequest) GetCompile() bool { + if m != nil && m.Compile != nil { + return *m.Compile + } + return Default_NextRequest_Compile +} + +type QueryResult struct { + Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"` + Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"` + SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results,json=skippedResults" json:"skipped_results,omitempty"` + MoreResults *bool `protobuf:"varint,3,req,name=more_results,json=moreResults" json:"more_results,omitempty"` + KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only,json=keysOnly" json:"keys_only,omitempty"` + IndexOnly *bool `protobuf:"varint,9,opt,name=index_only,json=indexOnly" json:"index_only,omitempty"` + SmallOps *bool `protobuf:"varint,10,opt,name=small_ops,json=smallOps" json:"small_ops,omitempty"` + CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query,json=compiledQuery" json:"compiled_query,omitempty"` + CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor,json=compiledCursor" json:"compiled_cursor,omitempty"` + Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"` + Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *QueryResult) Reset() { *m = QueryResult{} } +func (m *QueryResult) String() string { return proto.CompactTextString(m) } +func (*QueryResult) ProtoMessage() {} +func (*QueryResult) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{30} +} +func (m *QueryResult) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_QueryResult.Unmarshal(m, b) +} +func (m *QueryResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_QueryResult.Marshal(b, m, deterministic) +} +func (dst *QueryResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryResult.Merge(dst, src) +} +func (m *QueryResult) XXX_Size() int { + return xxx_messageInfo_QueryResult.Size(m) +} +func (m *QueryResult) XXX_DiscardUnknown() { + xxx_messageInfo_QueryResult.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryResult proto.InternalMessageInfo + +func (m *QueryResult) GetCursor() *Cursor { + if m != nil { + return m.Cursor + } + return nil +} + +func (m *QueryResult) GetResult() []*EntityProto { + if m != nil { + return m.Result + } + return nil +} + +func (m *QueryResult) GetSkippedResults() int32 { + if m != nil && m.SkippedResults != nil { + return *m.SkippedResults + } + return 0 +} + +func (m *QueryResult) GetMoreResults() bool { + if m != nil && m.MoreResults != nil { + return *m.MoreResults + } + return false +} + +func (m *QueryResult) GetKeysOnly() bool { + if m != nil && m.KeysOnly != nil { + return *m.KeysOnly + } + return false +} + +func (m *QueryResult) GetIndexOnly() bool { + if m != nil && m.IndexOnly != nil { + return *m.IndexOnly + } + return false +} + +func (m *QueryResult) GetSmallOps() bool { + if m != nil && m.SmallOps != nil { + return *m.SmallOps + } + return false +} + +func (m *QueryResult) GetCompiledQuery() *CompiledQuery { + if m != nil { + return m.CompiledQuery + } + return nil +} + +func (m *QueryResult) GetCompiledCursor() *CompiledCursor { + if m != nil { + return m.CompiledCursor + } + return nil +} + +func (m *QueryResult) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +func (m *QueryResult) GetVersion() []int64 { + if m != nil { + return m.Version + } + return nil +} + +type AllocateIdsRequest struct { + Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"` + ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key,json=modelKey" json:"model_key,omitempty"` + Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"` + Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} } +func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsRequest) ProtoMessage() {} +func (*AllocateIdsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{31} +} +func (m *AllocateIdsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocateIdsRequest.Unmarshal(m, b) +} +func (m *AllocateIdsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocateIdsRequest.Marshal(b, m, deterministic) +} +func (dst *AllocateIdsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocateIdsRequest.Merge(dst, src) +} +func (m *AllocateIdsRequest) XXX_Size() int { + return xxx_messageInfo_AllocateIdsRequest.Size(m) +} +func (m *AllocateIdsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AllocateIdsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocateIdsRequest proto.InternalMessageInfo + +func (m *AllocateIdsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AllocateIdsRequest) GetModelKey() *Reference { + if m != nil { + return m.ModelKey + } + return nil +} + +func (m *AllocateIdsRequest) GetSize() int64 { + if m != nil && m.Size != nil { + return *m.Size + } + return 0 +} + +func (m *AllocateIdsRequest) GetMax() int64 { + if m != nil && m.Max != nil { + return *m.Max + } + return 0 +} + +func (m *AllocateIdsRequest) GetReserve() []*Reference { + if m != nil { + return m.Reserve + } + return nil +} + +type AllocateIdsResponse struct { + Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"` + End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"` + Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} } +func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) } +func (*AllocateIdsResponse) ProtoMessage() {} +func (*AllocateIdsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{32} +} +func (m *AllocateIdsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AllocateIdsResponse.Unmarshal(m, b) +} +func (m *AllocateIdsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AllocateIdsResponse.Marshal(b, m, deterministic) +} +func (dst *AllocateIdsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllocateIdsResponse.Merge(dst, src) +} +func (m *AllocateIdsResponse) XXX_Size() int { + return xxx_messageInfo_AllocateIdsResponse.Size(m) +} +func (m *AllocateIdsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AllocateIdsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AllocateIdsResponse proto.InternalMessageInfo + +func (m *AllocateIdsResponse) GetStart() int64 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *AllocateIdsResponse) GetEnd() int64 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + +func (m *AllocateIdsResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +type CompositeIndices struct { + Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompositeIndices) Reset() { *m = CompositeIndices{} } +func (m *CompositeIndices) String() string { return proto.CompactTextString(m) } +func (*CompositeIndices) ProtoMessage() {} +func (*CompositeIndices) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{33} +} +func (m *CompositeIndices) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompositeIndices.Unmarshal(m, b) +} +func (m *CompositeIndices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompositeIndices.Marshal(b, m, deterministic) +} +func (dst *CompositeIndices) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompositeIndices.Merge(dst, src) +} +func (m *CompositeIndices) XXX_Size() int { + return xxx_messageInfo_CompositeIndices.Size(m) +} +func (m *CompositeIndices) XXX_DiscardUnknown() { + xxx_messageInfo_CompositeIndices.DiscardUnknown(m) +} + +var xxx_messageInfo_CompositeIndices proto.InternalMessageInfo + +func (m *CompositeIndices) GetIndex() []*CompositeIndex { + if m != nil { + return m.Index + } + return nil +} + +type AddActionsRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"` + Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} } +func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) } +func (*AddActionsRequest) ProtoMessage() {} +func (*AddActionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{34} +} +func (m *AddActionsRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddActionsRequest.Unmarshal(m, b) +} +func (m *AddActionsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddActionsRequest.Marshal(b, m, deterministic) +} +func (dst *AddActionsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddActionsRequest.Merge(dst, src) +} +func (m *AddActionsRequest) XXX_Size() int { + return xxx_messageInfo_AddActionsRequest.Size(m) +} +func (m *AddActionsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_AddActionsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_AddActionsRequest proto.InternalMessageInfo + +func (m *AddActionsRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *AddActionsRequest) GetTransaction() *Transaction { + if m != nil { + return m.Transaction + } + return nil +} + +func (m *AddActionsRequest) GetAction() []*Action { + if m != nil { + return m.Action + } + return nil +} + +type AddActionsResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} } +func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) } +func (*AddActionsResponse) ProtoMessage() {} +func (*AddActionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{35} +} +func (m *AddActionsResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_AddActionsResponse.Unmarshal(m, b) +} +func (m *AddActionsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_AddActionsResponse.Marshal(b, m, deterministic) +} +func (dst *AddActionsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AddActionsResponse.Merge(dst, src) +} +func (m *AddActionsResponse) XXX_Size() int { + return xxx_messageInfo_AddActionsResponse.Size(m) +} +func (m *AddActionsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AddActionsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AddActionsResponse proto.InternalMessageInfo + +type BeginTransactionRequest struct { + Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"` + App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"` + AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,json=allowMultipleEg,def=0" json:"allow_multiple_eg,omitempty"` + DatabaseId *string `protobuf:"bytes,4,opt,name=database_id,json=databaseId" json:"database_id,omitempty"` + Mode *BeginTransactionRequest_TransactionMode `protobuf:"varint,5,opt,name=mode,enum=appengine.BeginTransactionRequest_TransactionMode,def=0" json:"mode,omitempty"` + PreviousTransaction *Transaction `protobuf:"bytes,7,opt,name=previous_transaction,json=previousTransaction" json:"previous_transaction,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} } +func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) } +func (*BeginTransactionRequest) ProtoMessage() {} +func (*BeginTransactionRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{36} +} +func (m *BeginTransactionRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BeginTransactionRequest.Unmarshal(m, b) +} +func (m *BeginTransactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BeginTransactionRequest.Marshal(b, m, deterministic) +} +func (dst *BeginTransactionRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BeginTransactionRequest.Merge(dst, src) +} +func (m *BeginTransactionRequest) XXX_Size() int { + return xxx_messageInfo_BeginTransactionRequest.Size(m) +} +func (m *BeginTransactionRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BeginTransactionRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BeginTransactionRequest proto.InternalMessageInfo + +const Default_BeginTransactionRequest_AllowMultipleEg bool = false +const Default_BeginTransactionRequest_Mode BeginTransactionRequest_TransactionMode = BeginTransactionRequest_UNKNOWN + +func (m *BeginTransactionRequest) GetHeader() *InternalHeader { + if m != nil { + return m.Header + } + return nil +} + +func (m *BeginTransactionRequest) GetApp() string { + if m != nil && m.App != nil { + return *m.App + } + return "" +} + +func (m *BeginTransactionRequest) GetAllowMultipleEg() bool { + if m != nil && m.AllowMultipleEg != nil { + return *m.AllowMultipleEg + } + return Default_BeginTransactionRequest_AllowMultipleEg +} + +func (m *BeginTransactionRequest) GetDatabaseId() string { + if m != nil && m.DatabaseId != nil { + return *m.DatabaseId + } + return "" +} + +func (m *BeginTransactionRequest) GetMode() BeginTransactionRequest_TransactionMode { + if m != nil && m.Mode != nil { + return *m.Mode + } + return Default_BeginTransactionRequest_Mode +} + +func (m *BeginTransactionRequest) GetPreviousTransaction() *Transaction { + if m != nil { + return m.PreviousTransaction + } + return nil +} + +type CommitResponse struct { + Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"` + Version []*CommitResponse_Version `protobuf:"group,3,rep,name=Version,json=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitResponse) Reset() { *m = CommitResponse{} } +func (m *CommitResponse) String() string { return proto.CompactTextString(m) } +func (*CommitResponse) ProtoMessage() {} +func (*CommitResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37} +} +func (m *CommitResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitResponse.Unmarshal(m, b) +} +func (m *CommitResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitResponse.Marshal(b, m, deterministic) +} +func (dst *CommitResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse.Merge(dst, src) +} +func (m *CommitResponse) XXX_Size() int { + return xxx_messageInfo_CommitResponse.Size(m) +} +func (m *CommitResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse proto.InternalMessageInfo + +func (m *CommitResponse) GetCost() *Cost { + if m != nil { + return m.Cost + } + return nil +} + +func (m *CommitResponse) GetVersion() []*CommitResponse_Version { + if m != nil { + return m.Version + } + return nil +} + +type CommitResponse_Version struct { + RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key,json=rootEntityKey" json:"root_entity_key,omitempty"` + Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} } +func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) } +func (*CommitResponse_Version) ProtoMessage() {} +func (*CommitResponse_Version) Descriptor() ([]byte, []int) { + return fileDescriptor_datastore_v3_83b17b80c34f6179, []int{37, 0} +} +func (m *CommitResponse_Version) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CommitResponse_Version.Unmarshal(m, b) +} +func (m *CommitResponse_Version) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CommitResponse_Version.Marshal(b, m, deterministic) +} +func (dst *CommitResponse_Version) XXX_Merge(src proto.Message) { + xxx_messageInfo_CommitResponse_Version.Merge(dst, src) +} +func (m *CommitResponse_Version) XXX_Size() int { + return xxx_messageInfo_CommitResponse_Version.Size(m) +} +func (m *CommitResponse_Version) XXX_DiscardUnknown() { + xxx_messageInfo_CommitResponse_Version.DiscardUnknown(m) +} + +var xxx_messageInfo_CommitResponse_Version proto.InternalMessageInfo + +func (m *CommitResponse_Version) GetRootEntityKey() *Reference { + if m != nil { + return m.RootEntityKey + } + return nil +} + +func (m *CommitResponse_Version) GetVersion() int64 { + if m != nil && m.Version != nil { + return *m.Version + } + return 0 +} + +func init() { + proto.RegisterType((*Action)(nil), "appengine.Action") + proto.RegisterType((*PropertyValue)(nil), "appengine.PropertyValue") + proto.RegisterType((*PropertyValue_PointValue)(nil), "appengine.PropertyValue.PointValue") + proto.RegisterType((*PropertyValue_UserValue)(nil), "appengine.PropertyValue.UserValue") + proto.RegisterType((*PropertyValue_ReferenceValue)(nil), "appengine.PropertyValue.ReferenceValue") + proto.RegisterType((*PropertyValue_ReferenceValue_PathElement)(nil), "appengine.PropertyValue.ReferenceValue.PathElement") + proto.RegisterType((*Property)(nil), "appengine.Property") + proto.RegisterType((*Path)(nil), "appengine.Path") + proto.RegisterType((*Path_Element)(nil), "appengine.Path.Element") + proto.RegisterType((*Reference)(nil), "appengine.Reference") + proto.RegisterType((*User)(nil), "appengine.User") + proto.RegisterType((*EntityProto)(nil), "appengine.EntityProto") + proto.RegisterType((*CompositeProperty)(nil), "appengine.CompositeProperty") + proto.RegisterType((*Index)(nil), "appengine.Index") + proto.RegisterType((*Index_Property)(nil), "appengine.Index.Property") + proto.RegisterType((*CompositeIndex)(nil), "appengine.CompositeIndex") + proto.RegisterType((*IndexPostfix)(nil), "appengine.IndexPostfix") + proto.RegisterType((*IndexPostfix_IndexValue)(nil), "appengine.IndexPostfix.IndexValue") + proto.RegisterType((*IndexPosition)(nil), "appengine.IndexPosition") + proto.RegisterType((*Snapshot)(nil), "appengine.Snapshot") + proto.RegisterType((*InternalHeader)(nil), "appengine.InternalHeader") + proto.RegisterType((*Transaction)(nil), "appengine.Transaction") + proto.RegisterType((*Query)(nil), "appengine.Query") + proto.RegisterType((*Query_Filter)(nil), "appengine.Query.Filter") + proto.RegisterType((*Query_Order)(nil), "appengine.Query.Order") + proto.RegisterType((*CompiledQuery)(nil), "appengine.CompiledQuery") + proto.RegisterType((*CompiledQuery_PrimaryScan)(nil), "appengine.CompiledQuery.PrimaryScan") + proto.RegisterType((*CompiledQuery_MergeJoinScan)(nil), "appengine.CompiledQuery.MergeJoinScan") + proto.RegisterType((*CompiledQuery_EntityFilter)(nil), "appengine.CompiledQuery.EntityFilter") + proto.RegisterType((*CompiledCursor)(nil), "appengine.CompiledCursor") + proto.RegisterType((*CompiledCursor_Position)(nil), "appengine.CompiledCursor.Position") + proto.RegisterType((*CompiledCursor_Position_IndexValue)(nil), "appengine.CompiledCursor.Position.IndexValue") + proto.RegisterType((*Cursor)(nil), "appengine.Cursor") + proto.RegisterType((*Error)(nil), "appengine.Error") + proto.RegisterType((*Cost)(nil), "appengine.Cost") + proto.RegisterType((*Cost_CommitCost)(nil), "appengine.Cost.CommitCost") + proto.RegisterType((*GetRequest)(nil), "appengine.GetRequest") + proto.RegisterType((*GetResponse)(nil), "appengine.GetResponse") + proto.RegisterType((*GetResponse_Entity)(nil), "appengine.GetResponse.Entity") + proto.RegisterType((*PutRequest)(nil), "appengine.PutRequest") + proto.RegisterType((*PutResponse)(nil), "appengine.PutResponse") + proto.RegisterType((*TouchRequest)(nil), "appengine.TouchRequest") + proto.RegisterType((*TouchResponse)(nil), "appengine.TouchResponse") + proto.RegisterType((*DeleteRequest)(nil), "appengine.DeleteRequest") + proto.RegisterType((*DeleteResponse)(nil), "appengine.DeleteResponse") + proto.RegisterType((*NextRequest)(nil), "appengine.NextRequest") + proto.RegisterType((*QueryResult)(nil), "appengine.QueryResult") + proto.RegisterType((*AllocateIdsRequest)(nil), "appengine.AllocateIdsRequest") + proto.RegisterType((*AllocateIdsResponse)(nil), "appengine.AllocateIdsResponse") + proto.RegisterType((*CompositeIndices)(nil), "appengine.CompositeIndices") + proto.RegisterType((*AddActionsRequest)(nil), "appengine.AddActionsRequest") + proto.RegisterType((*AddActionsResponse)(nil), "appengine.AddActionsResponse") + proto.RegisterType((*BeginTransactionRequest)(nil), "appengine.BeginTransactionRequest") + proto.RegisterType((*CommitResponse)(nil), "appengine.CommitResponse") + proto.RegisterType((*CommitResponse_Version)(nil), "appengine.CommitResponse.Version") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/datastore/datastore_v3.proto", fileDescriptor_datastore_v3_83b17b80c34f6179) +} + +var fileDescriptor_datastore_v3_83b17b80c34f6179 = []byte{ + // 4156 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x5a, 0xcd, 0x73, 0xe3, 0x46, + 0x76, 0x37, 0xc1, 0xef, 0x47, 0x89, 0x82, 0x5a, 0xf3, 0xc1, 0xa1, 0x3f, 0x46, 0xc6, 0xac, 0x6d, + 0xd9, 0x6b, 0x73, 0x6c, 0xf9, 0x23, 0x5b, 0x4a, 0x76, 0x1d, 0x4a, 0xc4, 0x68, 0x90, 0xa1, 0x48, + 0xb9, 0x09, 0xd9, 0x9e, 0x5c, 0x50, 0x18, 0xa2, 0x29, 0x21, 0x43, 0x02, 0x30, 0x00, 0x6a, 0x46, + 0x93, 0xe4, 0x90, 0x4b, 0x2a, 0x55, 0x5b, 0xa9, 0x1c, 0x92, 0x4a, 0x25, 0xf9, 0x07, 0x72, 0xc8, + 0x39, 0x95, 0xaa, 0x54, 0xf6, 0x98, 0x5b, 0x0e, 0x7b, 0xc9, 0x31, 0x95, 0x73, 0xf2, 0x27, 0x24, + 0x39, 0xa4, 0xfa, 0x75, 0x03, 0x02, 0x28, 0x4a, 0x23, 0x6d, 0xf6, 0x90, 0x13, 0xd1, 0xef, 0xfd, + 0xba, 0xf1, 0xfa, 0xf5, 0xfb, 0x6c, 0x10, 0xba, 0xc7, 0xbe, 0x7f, 0x3c, 0x65, 0x9d, 0x63, 0x7f, + 0x6a, 0x7b, 0xc7, 0x1d, 0x3f, 0x3c, 0x7e, 0x68, 0x07, 0x01, 0xf3, 0x8e, 0x5d, 0x8f, 0x3d, 0x74, + 0xbd, 0x98, 0x85, 0x9e, 0x3d, 0x7d, 0xe8, 0xd8, 0xb1, 0x1d, 0xc5, 0x7e, 0xc8, 0xce, 0x9f, 0xac, + 0xd3, 0xcf, 0x3b, 0x41, 0xe8, 0xc7, 0x3e, 0xa9, 0xa7, 0x13, 0xb4, 0x1a, 0x54, 0xba, 0xe3, 0xd8, + 0xf5, 0x3d, 0xed, 0x1f, 0x2b, 0xb0, 0x7a, 0x18, 0xfa, 0x01, 0x0b, 0xe3, 0xb3, 0x6f, 0xed, 0xe9, + 0x9c, 0x91, 0x77, 0x00, 0x5c, 0x2f, 0xfe, 0xea, 0x0b, 0x1c, 0xb5, 0x0a, 0x9b, 0x85, 0xad, 0x22, + 0xcd, 0x50, 0x88, 0x06, 0x2b, 0xcf, 0x7c, 0x7f, 0xca, 0x6c, 0x4f, 0x20, 0x94, 0xcd, 0xc2, 0x56, + 0x8d, 0xe6, 0x68, 0x64, 0x13, 0x1a, 0x51, 0x1c, 0xba, 0xde, 0xb1, 0x80, 0x14, 0x37, 0x0b, 0x5b, + 0x75, 0x9a, 0x25, 0x71, 0x84, 0xe3, 0xcf, 0x9f, 0x4d, 0x99, 0x40, 0x94, 0x36, 0x0b, 0x5b, 0x05, + 0x9a, 0x25, 0x91, 0x3d, 0x80, 0xc0, 0x77, 0xbd, 0xf8, 0x14, 0x01, 0xe5, 0xcd, 0xc2, 0x16, 0x6c, + 0x3f, 0xe8, 0xa4, 0x7b, 0xe8, 0xe4, 0xa4, 0xee, 0x1c, 0x72, 0x28, 0x3e, 0xd2, 0xcc, 0x34, 0xf2, + 0xdb, 0x50, 0x9f, 0x47, 0x2c, 0x14, 0x6b, 0xd4, 0x70, 0x0d, 0xed, 0xd2, 0x35, 0x8e, 0x22, 0x16, + 0x8a, 0x25, 0xce, 0x27, 0x91, 0x21, 0x34, 0x43, 0x36, 0x61, 0x21, 0xf3, 0xc6, 0x4c, 0x2c, 0xb3, + 0x82, 0xcb, 0x7c, 0x70, 0xe9, 0x32, 0x34, 0x81, 0x8b, 0xb5, 0x16, 0xa6, 0xb7, 0xb7, 0x00, 0xce, + 0x85, 0x25, 0x2b, 0x50, 0x78, 0xd9, 0xaa, 0x6c, 0x2a, 0x5b, 0x05, 0x5a, 0x78, 0xc9, 0x47, 0x67, + 0xad, 0xaa, 0x18, 0x9d, 0xb5, 0xff, 0xa9, 0x00, 0xf5, 0x54, 0x26, 0x72, 0x0b, 0xca, 0x6c, 0x66, + 0xbb, 0xd3, 0x56, 0x7d, 0x53, 0xd9, 0xaa, 0x53, 0x31, 0x20, 0xf7, 0xa1, 0x61, 0xcf, 0xe3, 0x13, + 0xcb, 0xf1, 0x67, 0xb6, 0xeb, 0xb5, 0x00, 0x79, 0xc0, 0x49, 0x3d, 0xa4, 0x90, 0x36, 0xd4, 0x3c, + 0x77, 0xfc, 0xdc, 0xb3, 0x67, 0xac, 0xd5, 0xc0, 0x73, 0x48, 0xc7, 0xe4, 0x13, 0x20, 0x13, 0xe6, + 0xb0, 0xd0, 0x8e, 0x99, 0x63, 0xb9, 0x0e, 0xf3, 0x62, 0x37, 0x3e, 0x6b, 0xdd, 0x46, 0xd4, 0x7a, + 0xca, 0x31, 0x24, 0x23, 0x0f, 0x0f, 0x42, 0xff, 0xd4, 0x75, 0x58, 0xd8, 0xba, 0xb3, 0x00, 0x3f, + 0x94, 0x8c, 0xf6, 0xbf, 0x17, 0xa0, 0x99, 0xd7, 0x05, 0x51, 0xa1, 0x68, 0x07, 0x41, 0x6b, 0x15, + 0xa5, 0xe4, 0x8f, 0xe4, 0x6d, 0x00, 0x2e, 0x8a, 0x15, 0x05, 0xf6, 0x98, 0xb5, 0x6e, 0xe1, 0x5a, + 0x75, 0x4e, 0x19, 0x71, 0x02, 0x39, 0x82, 0x46, 0x60, 0xc7, 0x27, 0x6c, 0xca, 0x66, 0xcc, 0x8b, + 0x5b, 0xcd, 0xcd, 0xe2, 0x16, 0x6c, 0x7f, 0x7e, 0x4d, 0xd5, 0x77, 0x0e, 0xed, 0xf8, 0x44, 0x17, + 0x53, 0x69, 0x76, 0x9d, 0xb6, 0x0e, 0x8d, 0x0c, 0x8f, 0x10, 0x28, 0xc5, 0x67, 0x01, 0x6b, 0xad, + 0xa1, 0x5c, 0xf8, 0x4c, 0x9a, 0xa0, 0xb8, 0x4e, 0x4b, 0x45, 0xf3, 0x57, 0x5c, 0x87, 0x63, 0x50, + 0x87, 0xeb, 0x28, 0x22, 0x3e, 0x6b, 0xff, 0x51, 0x86, 0x5a, 0x22, 0x00, 0xe9, 0x42, 0x75, 0xc6, + 0x6c, 0xcf, 0xf5, 0x8e, 0xd1, 0x69, 0x9a, 0xdb, 0x6f, 0x2e, 0x11, 0xb3, 0x73, 0x20, 0x20, 0x3b, + 0x30, 0x18, 0x5a, 0x07, 0x7a, 0x77, 0x60, 0x0c, 0xf6, 0x69, 0x32, 0x8f, 0x1f, 0xa6, 0x7c, 0xb4, + 0xe6, 0xa1, 0x8b, 0x9e, 0x55, 0xa7, 0x20, 0x49, 0x47, 0xa1, 0x9b, 0x0a, 0x51, 0x14, 0x82, 0xe2, + 0x21, 0x76, 0xa0, 0x9c, 0xb8, 0x88, 0xb2, 0xd5, 0xd8, 0x6e, 0x5d, 0xa6, 0x1c, 0x2a, 0x60, 0xdc, + 0x20, 0x66, 0xf3, 0x69, 0xec, 0x06, 0x53, 0xee, 0x76, 0xca, 0x56, 0x8d, 0xa6, 0x63, 0xf2, 0x1e, + 0x40, 0xc4, 0xec, 0x70, 0x7c, 0x62, 0x3f, 0x9b, 0xb2, 0x56, 0x85, 0x7b, 0xf6, 0x4e, 0x79, 0x62, + 0x4f, 0x23, 0x46, 0x33, 0x0c, 0x62, 0xc3, 0xdd, 0x49, 0x1c, 0x59, 0xb1, 0xff, 0x9c, 0x79, 0xee, + 0x2b, 0x9b, 0x07, 0x12, 0xcb, 0x0f, 0xf8, 0x0f, 0xfa, 0x58, 0x73, 0xfb, 0xc3, 0x65, 0x5b, 0x7f, + 0x14, 0x47, 0x66, 0x66, 0xc6, 0x10, 0x27, 0xd0, 0xdb, 0x93, 0x65, 0x64, 0xd2, 0x86, 0xca, 0xd4, + 0x1f, 0xdb, 0x53, 0xd6, 0xaa, 0x73, 0x2d, 0xec, 0x28, 0xcc, 0xa3, 0x92, 0xa2, 0xfd, 0xb3, 0x02, + 0x55, 0xa9, 0x47, 0xd2, 0x84, 0x8c, 0x26, 0xd5, 0x37, 0x48, 0x0d, 0x4a, 0xbb, 0xfd, 0xe1, 0xae, + 0xda, 0xe4, 0x4f, 0xa6, 0xfe, 0xbd, 0xa9, 0xae, 0x71, 0xcc, 0xee, 0x53, 0x53, 0x1f, 0x99, 0x94, + 0x63, 0x54, 0xb2, 0x0e, 0xab, 0x5d, 0x73, 0x78, 0x60, 0xed, 0x75, 0x4d, 0x7d, 0x7f, 0x48, 0x9f, + 0xaa, 0x05, 0xb2, 0x0a, 0x75, 0x24, 0xf5, 0x8d, 0xc1, 0x13, 0x55, 0xe1, 0x33, 0x70, 0x68, 0x1a, + 0x66, 0x5f, 0x57, 0x8b, 0x44, 0x85, 0x15, 0x31, 0x63, 0x38, 0x30, 0xf5, 0x81, 0xa9, 0x96, 0x52, + 0xca, 0xe8, 0xe8, 0xe0, 0xa0, 0x4b, 0x9f, 0xaa, 0x65, 0xb2, 0x06, 0x0d, 0xa4, 0x74, 0x8f, 0xcc, + 0xc7, 0x43, 0xaa, 0x56, 0x48, 0x03, 0xaa, 0xfb, 0x3d, 0xeb, 0xbb, 0xc7, 0xfa, 0x40, 0xad, 0x92, + 0x15, 0xa8, 0xed, 0xf7, 0x2c, 0xfd, 0xa0, 0x6b, 0xf4, 0xd5, 0x1a, 0x9f, 0xbd, 0xaf, 0x0f, 0xe9, + 0x68, 0x64, 0x1d, 0x0e, 0x8d, 0x81, 0xa9, 0xd6, 0x49, 0x1d, 0xca, 0xfb, 0x3d, 0xcb, 0x38, 0x50, + 0x81, 0x10, 0x68, 0xee, 0xf7, 0xac, 0xc3, 0xc7, 0xc3, 0x81, 0x3e, 0x38, 0x3a, 0xd8, 0xd5, 0xa9, + 0xda, 0x20, 0xb7, 0x40, 0xe5, 0xb4, 0xe1, 0xc8, 0xec, 0xf6, 0xbb, 0xbd, 0x1e, 0xd5, 0x47, 0x23, + 0x75, 0x85, 0x4b, 0xbd, 0xdf, 0xb3, 0x68, 0xd7, 0xe4, 0xfb, 0x5a, 0xe5, 0x2f, 0xe4, 0x7b, 0x7f, + 0xa2, 0x3f, 0x55, 0xd7, 0xf9, 0x2b, 0xf4, 0x81, 0x69, 0x98, 0x4f, 0xad, 0x43, 0x3a, 0x34, 0x87, + 0xea, 0x06, 0x17, 0xd0, 0x18, 0xf4, 0xf4, 0xef, 0xad, 0x6f, 0xbb, 0xfd, 0x23, 0x5d, 0x25, 0xda, + 0x8f, 0xe1, 0xf6, 0xd2, 0x33, 0xe1, 0xaa, 0x7b, 0x6c, 0x1e, 0xf4, 0xd5, 0x02, 0x7f, 0xe2, 0x9b, + 0x52, 0x15, 0xed, 0x0f, 0xa0, 0xc4, 0x5d, 0x86, 0x7c, 0x06, 0xd5, 0xc4, 0x1b, 0x0b, 0xe8, 0x8d, + 0x77, 0xb3, 0x67, 0x6d, 0xc7, 0x27, 0x9d, 0xc4, 0xe3, 0x12, 0x5c, 0xbb, 0x0b, 0xd5, 0x45, 0x4f, + 0x53, 0x2e, 0x78, 0x5a, 0xf1, 0x82, 0xa7, 0x95, 0x32, 0x9e, 0x66, 0x43, 0x3d, 0xf5, 0xed, 0x9b, + 0x47, 0x91, 0x07, 0x50, 0xe2, 0xde, 0xdf, 0x6a, 0xa2, 0x87, 0xac, 0x2d, 0x08, 0x4c, 0x91, 0xa9, + 0xfd, 0x43, 0x01, 0x4a, 0x3c, 0xda, 0x9e, 0x07, 0xda, 0xc2, 0x15, 0x81, 0x56, 0xb9, 0x32, 0xd0, + 0x16, 0xaf, 0x15, 0x68, 0x2b, 0x37, 0x0b, 0xb4, 0xd5, 0x4b, 0x02, 0xad, 0xf6, 0x67, 0x45, 0x68, + 0xe8, 0x38, 0xf3, 0x10, 0x13, 0xfd, 0xfb, 0x50, 0x7c, 0xce, 0xce, 0x50, 0x3f, 0x8d, 0xed, 0x5b, + 0x99, 0xdd, 0xa6, 0x2a, 0xa4, 0x1c, 0x40, 0xb6, 0x61, 0x45, 0xbc, 0xd0, 0x3a, 0x0e, 0xfd, 0x79, + 0xd0, 0x52, 0x97, 0xab, 0xa7, 0x21, 0x40, 0xfb, 0x1c, 0x43, 0xde, 0x83, 0xb2, 0xff, 0xc2, 0x63, + 0x21, 0xc6, 0xc1, 0x3c, 0x98, 0x2b, 0x8f, 0x0a, 0x2e, 0x79, 0x08, 0xa5, 0xe7, 0xae, 0xe7, 0xe0, + 0x19, 0xe6, 0x23, 0x61, 0x46, 0xd0, 0xce, 0x13, 0xd7, 0x73, 0x28, 0x02, 0xc9, 0x3d, 0xa8, 0xf1, + 0x5f, 0x8c, 0x7b, 0x65, 0xdc, 0x68, 0x95, 0x8f, 0x79, 0xd0, 0x7b, 0x08, 0xb5, 0x40, 0xc6, 0x10, + 0x4c, 0x00, 0x8d, 0xed, 0x8d, 0x25, 0xe1, 0x85, 0xa6, 0x20, 0xf2, 0x15, 0xac, 0x84, 0xf6, 0x0b, + 0x2b, 0x9d, 0xb4, 0x76, 0xf9, 0xa4, 0x46, 0x68, 0xbf, 0x48, 0x23, 0x38, 0x81, 0x52, 0x68, 0x7b, + 0xcf, 0x5b, 0x64, 0xb3, 0xb0, 0x55, 0xa6, 0xf8, 0xac, 0x7d, 0x01, 0x25, 0x2e, 0x25, 0x8f, 0x08, + 0xfb, 0x3d, 0xf4, 0xff, 0xee, 0x9e, 0xa9, 0x16, 0x12, 0x7f, 0xfe, 0x96, 0x47, 0x03, 0x45, 0x72, + 0x0f, 0xf4, 0xd1, 0xa8, 0xbb, 0xaf, 0xab, 0x45, 0xad, 0x07, 0xeb, 0x7b, 0xfe, 0x2c, 0xf0, 0x23, + 0x37, 0x66, 0xe9, 0xf2, 0xf7, 0xa0, 0xe6, 0x7a, 0x0e, 0x7b, 0x69, 0xb9, 0x0e, 0x9a, 0x56, 0x91, + 0x56, 0x71, 0x6c, 0x38, 0xdc, 0xe4, 0x4e, 0x65, 0x31, 0x55, 0xe4, 0x26, 0x87, 0x03, 0xed, 0x2f, + 0x15, 0x28, 0x1b, 0x1c, 0xc1, 0x8d, 0x4f, 0x9e, 0x14, 0x7a, 0x8f, 0x30, 0x4c, 0x10, 0x24, 0x93, + 0xfb, 0x50, 0x1b, 0x6a, 0xb6, 0x37, 0x66, 0xbc, 0xe2, 0xc3, 0x3c, 0x50, 0xa3, 0xe9, 0x98, 0x7c, + 0x99, 0xd1, 0x9f, 0x82, 0x2e, 0x7b, 0x2f, 0xa3, 0x0a, 0x7c, 0xc1, 0x12, 0x2d, 0xb6, 0xff, 0xaa, + 0x90, 0x49, 0x6e, 0xcb, 0x12, 0x4f, 0x1f, 0xea, 0x8e, 0x1b, 0x32, 0xac, 0x23, 0xe5, 0x41, 0x3f, + 0xb8, 0x74, 0xe1, 0x4e, 0x2f, 0x81, 0xee, 0xd4, 0xbb, 0xa3, 0x3d, 0x7d, 0xd0, 0xe3, 0x99, 0xef, + 0x7c, 0x01, 0xed, 0x23, 0xa8, 0xa7, 0x10, 0x0c, 0xc7, 0x09, 0x48, 0x2d, 0x70, 0xf5, 0xf6, 0xf4, + 0x74, 0xac, 0x68, 0x7f, 0xad, 0x40, 0x33, 0xd5, 0xaf, 0xd0, 0xd0, 0x6d, 0xa8, 0xd8, 0x41, 0x90, + 0xa8, 0xb6, 0x4e, 0xcb, 0x76, 0x10, 0x18, 0x8e, 0x8c, 0x2d, 0x0a, 0x6a, 0x9b, 0xc7, 0x96, 0x4f, + 0x01, 0x1c, 0x36, 0x71, 0x3d, 0x17, 0x85, 0x2e, 0xa2, 0xc1, 0xab, 0x8b, 0x42, 0xd3, 0x0c, 0x86, + 0x7c, 0x09, 0xe5, 0x28, 0xb6, 0x63, 0x91, 0x2b, 0x9b, 0xdb, 0xf7, 0x33, 0xe0, 0xbc, 0x08, 0x9d, + 0x11, 0x87, 0x51, 0x81, 0x26, 0x5f, 0xc1, 0x2d, 0xdf, 0x9b, 0x9e, 0x59, 0xf3, 0x88, 0x59, 0xee, + 0xc4, 0x0a, 0xd9, 0x0f, 0x73, 0x37, 0x64, 0x4e, 0x3e, 0xa7, 0xae, 0x73, 0xc8, 0x51, 0xc4, 0x8c, + 0x09, 0x95, 0x7c, 0xed, 0x6b, 0x28, 0xe3, 0x3a, 0x7c, 0xcf, 0xdf, 0x51, 0xc3, 0xd4, 0xad, 0xe1, + 0xa0, 0xff, 0x54, 0xe8, 0x80, 0xea, 0xdd, 0x9e, 0x85, 0x44, 0x55, 0xe1, 0xc1, 0xbe, 0xa7, 0xf7, + 0x75, 0x53, 0xef, 0xa9, 0x45, 0x9e, 0x3d, 0x74, 0x4a, 0x87, 0x54, 0x2d, 0x69, 0xff, 0x53, 0x80, + 0x15, 0x94, 0xe7, 0xd0, 0x8f, 0xe2, 0x89, 0xfb, 0x92, 0xec, 0x41, 0x43, 0x98, 0xdd, 0xa9, 0x2c, + 0xe8, 0xb9, 0x33, 0x68, 0x8b, 0x7b, 0x96, 0x68, 0x31, 0x90, 0x75, 0xb4, 0x9b, 0x3e, 0x27, 0x21, + 0x45, 0x41, 0xa7, 0xbf, 0x22, 0xa4, 0xbc, 0x05, 0x95, 0x67, 0x6c, 0xe2, 0x87, 0x22, 0x04, 0xd6, + 0x76, 0x4a, 0x71, 0x38, 0x67, 0x54, 0xd2, 0xda, 0x36, 0xc0, 0xf9, 0xfa, 0xe4, 0x01, 0xac, 0x26, + 0xc6, 0x66, 0xa1, 0x71, 0x89, 0x93, 0x5b, 0x49, 0x88, 0x83, 0x5c, 0x75, 0xa3, 0x5c, 0xab, 0xba, + 0xd1, 0xbe, 0x86, 0xd5, 0x64, 0x3f, 0xe2, 0xfc, 0x54, 0x21, 0x79, 0x01, 0x63, 0xca, 0x82, 0x8c, + 0xca, 0x45, 0x19, 0xb5, 0x9f, 0x41, 0x6d, 0xe4, 0xd9, 0x41, 0x74, 0xe2, 0xc7, 0xdc, 0x7a, 0xe2, + 0x48, 0xfa, 0xaa, 0x12, 0x47, 0x9a, 0x06, 0x15, 0x7e, 0x38, 0xf3, 0x88, 0xbb, 0xbf, 0x31, 0xe8, + 0xee, 0x99, 0xc6, 0xb7, 0xba, 0xfa, 0x06, 0x01, 0xa8, 0xc8, 0xe7, 0x82, 0xa6, 0x41, 0xd3, 0x90, + 0xed, 0xd8, 0x63, 0x66, 0x3b, 0x2c, 0xe4, 0x12, 0xfc, 0xe0, 0x47, 0x89, 0x04, 0x3f, 0xf8, 0x91, + 0xf6, 0x17, 0x05, 0x68, 0x98, 0xa1, 0xed, 0x45, 0xb6, 0x30, 0xf7, 0xcf, 0xa0, 0x72, 0x82, 0x58, + 0x74, 0xa3, 0xc6, 0x82, 0x7f, 0x66, 0x17, 0xa3, 0x12, 0x48, 0xee, 0x40, 0xe5, 0xc4, 0xf6, 0x9c, + 0xa9, 0xd0, 0x5a, 0x85, 0xca, 0x51, 0x92, 0x1b, 0x95, 0xf3, 0xdc, 0xb8, 0x05, 0x2b, 0x33, 0x3b, + 0x7c, 0x6e, 0x8d, 0x4f, 0x6c, 0xef, 0x98, 0x45, 0xf2, 0x60, 0xa4, 0x05, 0x36, 0x38, 0x6b, 0x4f, + 0x70, 0xb4, 0xbf, 0x5f, 0x81, 0xf2, 0x37, 0x73, 0x16, 0x9e, 0x65, 0x04, 0xfa, 0xe0, 0xba, 0x02, + 0xc9, 0x17, 0x17, 0x2e, 0x4b, 0xca, 0x6f, 0x2f, 0x26, 0x65, 0x22, 0x53, 0x84, 0xc8, 0x95, 0x22, + 0x0b, 0x7c, 0x9a, 0x09, 0x63, 0xeb, 0x57, 0xd8, 0xda, 0x79, 0x70, 0x7b, 0x08, 0x95, 0x89, 0x3b, + 0x8d, 0x51, 0x75, 0x8b, 0xd5, 0x08, 0xee, 0xa5, 0xf3, 0x08, 0xd9, 0x54, 0xc2, 0xc8, 0xbb, 0xb0, + 0x22, 0x2a, 0x59, 0xeb, 0x07, 0xce, 0xc6, 0x82, 0x95, 0xf7, 0xa6, 0x48, 0x13, 0xbb, 0xff, 0x18, + 0xca, 0x7e, 0xc8, 0x37, 0x5f, 0xc7, 0x25, 0xef, 0x5c, 0x58, 0x72, 0xc8, 0xb9, 0x54, 0x80, 0xc8, + 0x87, 0x50, 0x3a, 0x71, 0xbd, 0x18, 0xb3, 0x46, 0x73, 0xfb, 0xf6, 0x05, 0xf0, 0x63, 0xd7, 0x8b, + 0x29, 0x42, 0x78, 0x98, 0x1f, 0xfb, 0x73, 0x2f, 0x6e, 0xdd, 0xc5, 0x0c, 0x23, 0x06, 0xe4, 0x1e, + 0x54, 0xfc, 0xc9, 0x24, 0x62, 0x31, 0x76, 0x96, 0xe5, 0x9d, 0xc2, 0xa7, 0x54, 0x12, 0xf8, 0x84, + 0xa9, 0x3b, 0x73, 0x63, 0xec, 0x43, 0xca, 0x54, 0x0c, 0xc8, 0x2e, 0xac, 0x8d, 0xfd, 0x59, 0xe0, + 0x4e, 0x99, 0x63, 0x8d, 0xe7, 0x61, 0xe4, 0x87, 0xad, 0x77, 0x2e, 0x1c, 0xd3, 0x9e, 0x44, 0xec, + 0x21, 0x80, 0x36, 0xc7, 0xb9, 0x31, 0x31, 0x60, 0x83, 0x79, 0x8e, 0xb5, 0xb8, 0xce, 0xfd, 0xd7, + 0xad, 0xb3, 0xce, 0x3c, 0x27, 0x4f, 0x4a, 0xc4, 0xc1, 0x48, 0x68, 0x61, 0xcc, 0x68, 0x6d, 0x60, + 0x90, 0xb9, 0x77, 0x69, 0xac, 0x14, 0xe2, 0x64, 0xc2, 0xf7, 0x6f, 0xc0, 0x2d, 0x19, 0x22, 0xad, + 0x80, 0x85, 0x13, 0x36, 0x8e, 0xad, 0x60, 0x6a, 0x7b, 0x58, 0xca, 0xa5, 0xc6, 0x4a, 0x24, 0xe4, + 0x50, 0x20, 0x0e, 0xa7, 0xb6, 0x47, 0x34, 0xa8, 0x3f, 0x67, 0x67, 0x91, 0xc5, 0x23, 0x29, 0x76, + 0xae, 0x29, 0xba, 0xc6, 0xe9, 0x43, 0x6f, 0x7a, 0x46, 0x7e, 0x02, 0x8d, 0xf8, 0xdc, 0xdb, 0xb0, + 0x61, 0x6d, 0xe4, 0x4e, 0x35, 0xe3, 0x8b, 0x34, 0x0b, 0x25, 0xf7, 0xa1, 0x2a, 0x35, 0xd4, 0xba, + 0x97, 0x5d, 0x3b, 0xa1, 0xf2, 0xc4, 0x3c, 0xb1, 0xdd, 0xa9, 0x7f, 0xca, 0x42, 0x6b, 0x16, 0xb5, + 0xda, 0xe2, 0xb6, 0x24, 0x21, 0x1d, 0x44, 0xdc, 0x4f, 0xa3, 0x38, 0xf4, 0xbd, 0xe3, 0xd6, 0x26, + 0xde, 0x93, 0xc8, 0xd1, 0xc5, 0xe0, 0xf7, 0x2e, 0x66, 0xfe, 0x7c, 0xf0, 0xfb, 0x1c, 0xee, 0x60, + 0x65, 0x66, 0x3d, 0x3b, 0xb3, 0xf2, 0x68, 0x0d, 0xd1, 0x1b, 0xc8, 0xdd, 0x3d, 0x3b, 0xcc, 0x4e, + 0x6a, 0x43, 0xcd, 0x71, 0xa3, 0xd8, 0xf5, 0xc6, 0x71, 0xab, 0x85, 0xef, 0x4c, 0xc7, 0xe4, 0x33, + 0xb8, 0x3d, 0x73, 0x3d, 0x2b, 0xb2, 0x27, 0xcc, 0x8a, 0x5d, 0xee, 0x9b, 0x6c, 0xec, 0x7b, 0x4e, + 0xd4, 0x7a, 0x80, 0x82, 0x93, 0x99, 0xeb, 0x8d, 0xec, 0x09, 0x33, 0xdd, 0x19, 0x1b, 0x09, 0x0e, + 0xf9, 0x08, 0xd6, 0x11, 0x1e, 0xb2, 0x60, 0xea, 0x8e, 0x6d, 0xf1, 0xfa, 0x1f, 0xe1, 0xeb, 0xd7, + 0x38, 0x83, 0x0a, 0x3a, 0xbe, 0xfa, 0x63, 0x68, 0x06, 0x2c, 0x8c, 0xdc, 0x28, 0xb6, 0xa4, 0x45, + 0xbf, 0x97, 0xd5, 0xda, 0xaa, 0x64, 0x0e, 0x91, 0xd7, 0xfe, 0xcf, 0x02, 0x54, 0x84, 0x73, 0x92, + 0x4f, 0x41, 0xf1, 0x03, 0xbc, 0x06, 0x69, 0x6e, 0x6f, 0x5e, 0xe2, 0xc1, 0x9d, 0x61, 0xc0, 0xeb, + 0x5e, 0x3f, 0xa4, 0x8a, 0x1f, 0xdc, 0xb8, 0x28, 0xd4, 0xfe, 0x10, 0x6a, 0xc9, 0x02, 0xbc, 0xbc, + 0xe8, 0xeb, 0xa3, 0x91, 0x65, 0x3e, 0xee, 0x0e, 0xd4, 0x02, 0xb9, 0x03, 0x24, 0x1d, 0x5a, 0x43, + 0x6a, 0xe9, 0xdf, 0x1c, 0x75, 0xfb, 0xaa, 0x82, 0x5d, 0x1a, 0xd5, 0xbb, 0xa6, 0x4e, 0x05, 0xb2, + 0x48, 0xee, 0xc1, 0xed, 0x2c, 0xe5, 0x1c, 0x5c, 0xc2, 0x14, 0x8c, 0x8f, 0x65, 0x52, 0x01, 0xc5, + 0x18, 0xa8, 0x15, 0x9e, 0x16, 0xf4, 0xef, 0x8d, 0x91, 0x39, 0x52, 0xab, 0xed, 0xbf, 0x29, 0x40, + 0x19, 0xc3, 0x06, 0x3f, 0x9f, 0x54, 0x72, 0x71, 0x5d, 0x73, 0x5e, 0xb9, 0x1a, 0xd9, 0x92, 0xaa, + 0x81, 0x01, 0x65, 0x73, 0x79, 0xf4, 0xf9, 0xb5, 0xd6, 0x53, 0x3f, 0x85, 0x12, 0x8f, 0x52, 0xbc, + 0x43, 0x1c, 0xd2, 0x9e, 0x4e, 0xad, 0x47, 0x06, 0x1d, 0xf1, 0x2a, 0x97, 0x40, 0xb3, 0x3b, 0xd8, + 0xd3, 0x47, 0xe6, 0x30, 0xa1, 0xa1, 0x56, 0x1e, 0x19, 0x7d, 0x33, 0x45, 0x15, 0xb5, 0x9f, 0xd7, + 0x60, 0x35, 0x89, 0x09, 0x22, 0x82, 0x3e, 0x82, 0x46, 0x10, 0xba, 0x33, 0x3b, 0x3c, 0x8b, 0xc6, + 0xb6, 0x87, 0x49, 0x01, 0xb6, 0x7f, 0xb4, 0x24, 0xaa, 0x88, 0x1d, 0x1d, 0x0a, 0xec, 0x68, 0x6c, + 0x7b, 0x34, 0x3b, 0x91, 0xf4, 0x61, 0x75, 0xc6, 0xc2, 0x63, 0xf6, 0x7b, 0xbe, 0xeb, 0xe1, 0x4a, + 0x55, 0x8c, 0xc8, 0xef, 0x5f, 0xba, 0xd2, 0x01, 0x47, 0xff, 0x8e, 0xef, 0x7a, 0xb8, 0x56, 0x7e, + 0x32, 0xf9, 0x04, 0xea, 0xa2, 0x12, 0x72, 0xd8, 0x04, 0x63, 0xc5, 0xb2, 0xda, 0x4f, 0xd4, 0xe8, + 0x3d, 0x36, 0xc9, 0xc4, 0x65, 0xb8, 0x34, 0x2e, 0x37, 0xb2, 0x71, 0xf9, 0xcd, 0x6c, 0x2c, 0x5a, + 0x11, 0x55, 0x78, 0x1a, 0x84, 0x2e, 0x38, 0x7c, 0x6b, 0x89, 0xc3, 0x77, 0x60, 0x23, 0xf1, 0x55, + 0xcb, 0xf5, 0x26, 0xee, 0x4b, 0x2b, 0x72, 0x5f, 0x89, 0xd8, 0x53, 0xa6, 0xeb, 0x09, 0xcb, 0xe0, + 0x9c, 0x91, 0xfb, 0x8a, 0x11, 0x23, 0xe9, 0xe0, 0x64, 0x0e, 0x5c, 0xc5, 0xab, 0xc9, 0xf7, 0x2e, + 0x55, 0x8f, 0x68, 0xbe, 0x64, 0x46, 0xcc, 0x4d, 0x6d, 0xff, 0x52, 0x81, 0x46, 0xe6, 0x1c, 0x78, + 0xf6, 0x16, 0xca, 0x42, 0x61, 0xc5, 0x55, 0x94, 0x50, 0x1f, 0x4a, 0xfa, 0x26, 0xd4, 0xa3, 0xd8, + 0x0e, 0x63, 0x8b, 0x17, 0x57, 0xb2, 0xdd, 0x45, 0xc2, 0x13, 0x76, 0x46, 0x3e, 0x80, 0x35, 0xc1, + 0x74, 0xbd, 0xf1, 0x74, 0x1e, 0xb9, 0xa7, 0xa2, 0x99, 0xaf, 0xd1, 0x26, 0x92, 0x8d, 0x84, 0x4a, + 0xee, 0x42, 0x95, 0x67, 0x21, 0xbe, 0x86, 0x68, 0xfa, 0x2a, 0xcc, 0x73, 0xf8, 0x0a, 0x0f, 0x60, + 0x95, 0x33, 0xce, 0xe7, 0x57, 0xc4, 0x2d, 0x33, 0xf3, 0x9c, 0xf3, 0xd9, 0x1d, 0xd8, 0x10, 0xaf, + 0x09, 0x44, 0xf1, 0x2a, 0x2b, 0xdc, 0x3b, 0xa8, 0xd8, 0x75, 0x64, 0xc9, 0xb2, 0x56, 0x14, 0x9c, + 0x1f, 0x01, 0xcf, 0x5e, 0x0b, 0xe8, 0xbb, 0x22, 0x94, 0x31, 0xcf, 0xc9, 0x61, 0x77, 0xe1, 0x1d, + 0x8e, 0x9d, 0x7b, 0x76, 0x10, 0x4c, 0x5d, 0xe6, 0x58, 0x53, 0xff, 0x18, 0x43, 0x66, 0x14, 0xdb, + 0xb3, 0xc0, 0x9a, 0x47, 0xad, 0x0d, 0x0c, 0x99, 0x6d, 0xe6, 0x39, 0x47, 0x09, 0xa8, 0xef, 0x1f, + 0x9b, 0x09, 0xe4, 0x28, 0x6a, 0xff, 0x3e, 0xac, 0xe6, 0xec, 0x71, 0x41, 0xa7, 0x35, 0x74, 0xfe, + 0x8c, 0x4e, 0xdf, 0x85, 0x95, 0x20, 0x64, 0xe7, 0xa2, 0xd5, 0x51, 0xb4, 0x86, 0xa0, 0x09, 0xb1, + 0xb6, 0x60, 0x05, 0x79, 0x96, 0x20, 0xe6, 0xf3, 0x63, 0x03, 0x59, 0x87, 0xc8, 0x69, 0xbf, 0x80, + 0x95, 0xec, 0x69, 0x93, 0x77, 0x33, 0x69, 0xa1, 0x99, 0xcb, 0x93, 0x69, 0x76, 0x48, 0x2a, 0xb2, + 0xf5, 0x4b, 0x2a, 0x32, 0x72, 0x9d, 0x8a, 0x4c, 0xfb, 0x2f, 0xd9, 0x9c, 0x65, 0x2a, 0x84, 0x9f, + 0x41, 0x2d, 0x90, 0xf5, 0x38, 0x5a, 0x52, 0xfe, 0x12, 0x3e, 0x0f, 0xee, 0x24, 0x95, 0x3b, 0x4d, + 0xe7, 0xb4, 0xff, 0x56, 0x81, 0x5a, 0x5a, 0xd0, 0xe7, 0x2c, 0xef, 0xcd, 0x05, 0xcb, 0x3b, 0x90, + 0x1a, 0x16, 0x0a, 0x7c, 0x1b, 0xa3, 0xc5, 0x27, 0xaf, 0x7f, 0xd7, 0xc5, 0xb6, 0xe7, 0x34, 0xdb, + 0xf6, 0x6c, 0xbe, 0xae, 0xed, 0xf9, 0xe4, 0xa2, 0xc1, 0xbf, 0x95, 0xe9, 0x2d, 0x16, 0xcc, 0xbe, + 0xfd, 0x7d, 0xae, 0x0f, 0xca, 0x26, 0x84, 0x77, 0xc4, 0x7e, 0xd2, 0x84, 0x90, 0xb6, 0x3f, 0xf7, + 0xaf, 0xd7, 0xfe, 0x6c, 0x43, 0x45, 0xea, 0xfc, 0x0e, 0x54, 0x64, 0x4d, 0x27, 0x1b, 0x04, 0x31, + 0x3a, 0x6f, 0x10, 0x0a, 0xb2, 0x4e, 0xd7, 0x7e, 0xae, 0x40, 0x59, 0x0f, 0x43, 0x3f, 0xd4, 0xfe, + 0x48, 0x81, 0x3a, 0x3e, 0xed, 0xf9, 0x0e, 0xe3, 0xd9, 0x60, 0xb7, 0xdb, 0xb3, 0xa8, 0xfe, 0xcd, + 0x91, 0x8e, 0xd9, 0xa0, 0x0d, 0x77, 0xf6, 0x86, 0x83, 0xbd, 0x23, 0x4a, 0xf5, 0x81, 0x69, 0x99, + 0xb4, 0x3b, 0x18, 0xf1, 0xb6, 0x67, 0x38, 0x50, 0x15, 0x9e, 0x29, 0x8c, 0x81, 0xa9, 0xd3, 0x41, + 0xb7, 0x6f, 0x89, 0x56, 0xb4, 0x88, 0x77, 0xb3, 0xba, 0xde, 0xb3, 0xf0, 0xd6, 0x51, 0x2d, 0xf1, + 0x96, 0xd5, 0x34, 0x0e, 0xf4, 0xe1, 0x91, 0xa9, 0x96, 0xc9, 0x6d, 0x58, 0x3f, 0xd4, 0xe9, 0x81, + 0x31, 0x1a, 0x19, 0xc3, 0x81, 0xd5, 0xd3, 0x07, 0x86, 0xde, 0x53, 0x2b, 0x7c, 0x9d, 0x5d, 0x63, + 0xdf, 0xec, 0xee, 0xf6, 0x75, 0xb9, 0x4e, 0x95, 0x6c, 0xc2, 0x5b, 0x7b, 0xc3, 0x83, 0x03, 0xc3, + 0x34, 0xf5, 0x9e, 0xb5, 0x7b, 0x64, 0x5a, 0x23, 0xd3, 0xe8, 0xf7, 0xad, 0xee, 0xe1, 0x61, 0xff, + 0x29, 0x4f, 0x60, 0x35, 0x72, 0x17, 0x36, 0xf6, 0xba, 0x87, 0xdd, 0x5d, 0xa3, 0x6f, 0x98, 0x4f, + 0xad, 0x9e, 0x31, 0xe2, 0xf3, 0x7b, 0x6a, 0x9d, 0x27, 0x6c, 0x93, 0x3e, 0xb5, 0xba, 0x7d, 0x14, + 0xcd, 0xd4, 0xad, 0xdd, 0xee, 0xde, 0x13, 0x7d, 0xd0, 0x53, 0x81, 0x0b, 0x30, 0xea, 0x3e, 0xd2, + 0x2d, 0x2e, 0x92, 0x65, 0x0e, 0x87, 0xd6, 0xb0, 0xdf, 0x53, 0x1b, 0xda, 0xbf, 0x14, 0xa1, 0xb4, + 0xe7, 0x47, 0x31, 0xf7, 0x46, 0xe1, 0xac, 0x2f, 0x42, 0x37, 0x66, 0xa2, 0x7f, 0x2b, 0x53, 0xd1, + 0x4b, 0x7f, 0x87, 0x24, 0x1e, 0x50, 0x32, 0x10, 0xeb, 0xd9, 0x19, 0xc7, 0x29, 0x88, 0x5b, 0x3b, + 0xc7, 0xed, 0x72, 0xb2, 0x88, 0x68, 0x78, 0x85, 0x23, 0xd7, 0x2b, 0x22, 0x4e, 0x06, 0x61, 0xb9, + 0xe0, 0xc7, 0x40, 0xb2, 0x20, 0xb9, 0x62, 0x09, 0x91, 0x6a, 0x06, 0x29, 0x96, 0xdc, 0x01, 0x18, + 0xfb, 0xb3, 0x99, 0x1b, 0x8f, 0xfd, 0x28, 0x96, 0x5f, 0xc8, 0xda, 0x39, 0x63, 0x8f, 0x62, 0x6e, + 0xf1, 0x33, 0x37, 0xe6, 0x8f, 0x34, 0x83, 0x26, 0x3b, 0x70, 0xcf, 0x0e, 0x82, 0xd0, 0x7f, 0xe9, + 0xce, 0xec, 0x98, 0x59, 0xdc, 0x73, 0xed, 0x63, 0x66, 0x39, 0x6c, 0x1a, 0xdb, 0xd8, 0x13, 0x95, + 0xe9, 0xdd, 0x0c, 0x60, 0x24, 0xf8, 0x3d, 0xce, 0xe6, 0x71, 0xd7, 0x75, 0xac, 0x88, 0xfd, 0x30, + 0xe7, 0x1e, 0x60, 0xcd, 0x03, 0xc7, 0xe6, 0x62, 0xd6, 0x45, 0x96, 0x72, 0x9d, 0x91, 0xe4, 0x1c, + 0x09, 0x46, 0xfb, 0x15, 0xc0, 0xb9, 0x14, 0x64, 0x1b, 0x6e, 0xf3, 0x3a, 0x9e, 0x45, 0x31, 0x73, + 0x2c, 0xb9, 0xdb, 0x60, 0x1e, 0x47, 0x18, 0xe2, 0xcb, 0x74, 0x23, 0x65, 0xca, 0x9b, 0xc2, 0x79, + 0x1c, 0x91, 0x9f, 0x40, 0xeb, 0xc2, 0x1c, 0x87, 0x4d, 0x19, 0x7f, 0x6d, 0x15, 0xa7, 0xdd, 0x59, + 0x98, 0xd6, 0x13, 0x5c, 0xed, 0x4f, 0x14, 0x80, 0x7d, 0x16, 0x53, 0xc1, 0xcd, 0x34, 0xb6, 0x95, + 0xeb, 0x36, 0xb6, 0xef, 0x27, 0x17, 0x08, 0xc5, 0xab, 0x63, 0xc0, 0x42, 0x97, 0xa1, 0xdc, 0xa4, + 0xcb, 0xc8, 0x35, 0x11, 0xc5, 0x2b, 0x9a, 0x88, 0x52, 0xae, 0x89, 0xf8, 0x18, 0x9a, 0xf6, 0x74, + 0xea, 0xbf, 0xe0, 0x05, 0x0d, 0x0b, 0x43, 0xe6, 0xa0, 0x11, 0x9c, 0xd7, 0xdb, 0xc8, 0xec, 0x49, + 0x9e, 0xf6, 0xe7, 0x0a, 0x34, 0x50, 0x15, 0x51, 0xe0, 0x7b, 0x11, 0x23, 0x5f, 0x42, 0x45, 0x5e, + 0x44, 0x8b, 0x8b, 0xfc, 0xb7, 0x33, 0xb2, 0x66, 0x70, 0xb2, 0x68, 0xa0, 0x12, 0xcc, 0x33, 0x42, + 0xe6, 0x75, 0x97, 0x2b, 0x25, 0x45, 0x91, 0xfb, 0x50, 0x73, 0x3d, 0x4b, 0xb4, 0xd4, 0x95, 0x4c, + 0x58, 0xac, 0xba, 0x1e, 0xd6, 0xb2, 0xed, 0x57, 0x50, 0x11, 0x2f, 0x21, 0x9d, 0x54, 0xa6, 0x8b, + 0xfa, 0xcb, 0xdc, 0x1c, 0xa7, 0xc2, 0xc8, 0xc3, 0x29, 0xbd, 0x2e, 0x40, 0xb7, 0xa0, 0x7a, 0xca, + 0x9b, 0x0f, 0xbc, 0xf4, 0xe3, 0xea, 0x4d, 0x86, 0xda, 0x1f, 0x97, 0x00, 0x0e, 0xe7, 0x4b, 0x0c, + 0xa4, 0x71, 0x5d, 0x03, 0xe9, 0xe4, 0xf4, 0xf8, 0x7a, 0x99, 0x7f, 0x75, 0x43, 0x59, 0xd2, 0x69, + 0x17, 0x6f, 0xda, 0x69, 0xdf, 0x87, 0x6a, 0x1c, 0xce, 0xb9, 0xa3, 0x08, 0x63, 0x4a, 0x5b, 0x5a, + 0x49, 0x25, 0x6f, 0x42, 0x79, 0xe2, 0x87, 0x63, 0x86, 0x8e, 0x95, 0xb2, 0x05, 0xed, 0xc2, 0x65, + 0x52, 0xed, 0xb2, 0xcb, 0x24, 0xde, 0xa0, 0x45, 0xf2, 0x1e, 0x0d, 0x0b, 0x99, 0x7c, 0x83, 0x96, + 0x5c, 0xb1, 0xd1, 0x14, 0x44, 0xbe, 0x81, 0xa6, 0x3d, 0x8f, 0x7d, 0xcb, 0xe5, 0x15, 0xda, 0xd4, + 0x1d, 0x9f, 0x61, 0xd9, 0xdd, 0xcc, 0x7f, 0xaf, 0x4f, 0x0f, 0xaa, 0xd3, 0x9d, 0xc7, 0xbe, 0xe1, + 0x1c, 0x22, 0x72, 0xa7, 0x2a, 0x93, 0x12, 0x5d, 0xb1, 0x33, 0x64, 0xed, 0xc7, 0xb0, 0x92, 0x85, + 0xf1, 0x04, 0x24, 0x81, 0xea, 0x1b, 0x3c, 0x3b, 0x8d, 0x78, 0x6a, 0x1b, 0x98, 0x46, 0xb7, 0xaf, + 0x16, 0xb4, 0x18, 0x1a, 0xb8, 0xbc, 0xf4, 0x8e, 0xeb, 0xba, 0xfd, 0x03, 0x28, 0x61, 0xf8, 0x55, + 0x2e, 0x7c, 0x0f, 0xc1, 0x98, 0x8b, 0xcc, 0xbc, 0xf9, 0x15, 0xb3, 0xe6, 0xf7, 0xdf, 0x05, 0x58, + 0x31, 0xfd, 0xf9, 0xf8, 0xe4, 0xa2, 0x01, 0xc2, 0xaf, 0x3b, 0x42, 0x2d, 0x31, 0x1f, 0xe5, 0xa6, + 0xe6, 0x93, 0x5a, 0x47, 0x71, 0x89, 0x75, 0xdc, 0xf4, 0xcc, 0xb5, 0x2f, 0x60, 0x55, 0x6e, 0x5e, + 0x6a, 0x3d, 0xd1, 0x66, 0xe1, 0x0a, 0x6d, 0x6a, 0xbf, 0x50, 0x60, 0x55, 0xc4, 0xf7, 0xff, 0xbb, + 0xd2, 0x2a, 0x37, 0x0c, 0xeb, 0xe5, 0x1b, 0x5d, 0x1e, 0xfd, 0xbf, 0xf4, 0x34, 0x6d, 0x08, 0xcd, + 0x44, 0x7d, 0x37, 0x50, 0xfb, 0x15, 0x46, 0xfc, 0x8b, 0x02, 0x34, 0x06, 0xec, 0xe5, 0x92, 0x20, + 0x5a, 0xbe, 0xee, 0x71, 0x7c, 0x98, 0x2b, 0x57, 0x1b, 0xdb, 0xeb, 0x59, 0x19, 0xc4, 0xd5, 0x63, + 0x52, 0xc1, 0xa6, 0xb7, 0xa8, 0xca, 0xf2, 0x5b, 0xd4, 0xd2, 0x62, 0xb7, 0x9e, 0xb9, 0xc5, 0x2b, + 0x2e, 0xbb, 0xc5, 0xd3, 0xfe, 0xad, 0x08, 0x0d, 0x6c, 0x90, 0x29, 0x8b, 0xe6, 0xd3, 0x38, 0x27, + 0x4c, 0xe1, 0x6a, 0x61, 0x3a, 0x50, 0x09, 0x71, 0x92, 0x74, 0xa5, 0x4b, 0x83, 0xbf, 0x40, 0x61, + 0x6b, 0xfc, 0xdc, 0x0d, 0x02, 0xe6, 0x58, 0x82, 0x92, 0x14, 0x30, 0x4d, 0x49, 0x16, 0x22, 0x44, + 0xbc, 0xfc, 0x9c, 0xf9, 0x21, 0x4b, 0x51, 0x45, 0xbc, 0x4f, 0x68, 0x70, 0x5a, 0x02, 0xc9, 0xdd, + 0x37, 0x88, 0xca, 0xe0, 0xfc, 0xbe, 0x21, 0xed, 0x35, 0x91, 0x5b, 0x47, 0xae, 0xe8, 0x35, 0x91, + 0xcd, 0xbb, 0xa8, 0x99, 0x3d, 0x9d, 0x5a, 0x7e, 0x10, 0xa1, 0xd3, 0xd4, 0x68, 0x0d, 0x09, 0xc3, + 0x20, 0x22, 0x5f, 0x43, 0x7a, 0x5d, 0x2c, 0x6f, 0xc9, 0xc5, 0x39, 0xb6, 0x2e, 0xbb, 0x58, 0xa0, + 0xab, 0xe3, 0xdc, 0xfd, 0xcf, 0x92, 0x1b, 0xea, 0xca, 0x4d, 0x6f, 0xa8, 0x1f, 0x42, 0x59, 0xc4, + 0xa8, 0xda, 0xeb, 0x62, 0x94, 0xc0, 0x65, 0xed, 0xb3, 0x91, 0xb7, 0xcf, 0x5f, 0x16, 0x80, 0x74, + 0xa7, 0x53, 0x7f, 0x6c, 0xc7, 0xcc, 0x70, 0xa2, 0x8b, 0x66, 0x7a, 0xed, 0xcf, 0x2e, 0x9f, 0x41, + 0x7d, 0xe6, 0x3b, 0x6c, 0x6a, 0x25, 0xdf, 0x94, 0x2e, 0xad, 0x7e, 0x10, 0xc6, 0x5b, 0x52, 0x02, + 0x25, 0xbc, 0xc4, 0x51, 0xb0, 0xee, 0xc0, 0x67, 0xde, 0x84, 0xcd, 0xec, 0x97, 0xb2, 0x14, 0xe1, + 0x8f, 0xa4, 0x03, 0xd5, 0x90, 0x45, 0x2c, 0x3c, 0x65, 0x57, 0x16, 0x55, 0x09, 0x48, 0x7b, 0x06, + 0x1b, 0xb9, 0x1d, 0x49, 0x47, 0xbe, 0x85, 0x5f, 0x2b, 0xc3, 0x58, 0x7e, 0xb4, 0x12, 0x03, 0xfe, + 0x3a, 0xe6, 0x25, 0x9f, 0x41, 0xf9, 0x63, 0xea, 0xf0, 0xc5, 0xab, 0xe2, 0xec, 0x1e, 0xa8, 0x59, + 0x4d, 0xbb, 0x63, 0x0c, 0x36, 0xf2, 0x54, 0x0a, 0xd7, 0x3b, 0x15, 0xed, 0xef, 0x0a, 0xb0, 0xde, + 0x75, 0x1c, 0xf1, 0x77, 0xc3, 0x25, 0xaa, 0x2f, 0x5e, 0x57, 0xf5, 0x0b, 0x81, 0x58, 0x84, 0x89, + 0x6b, 0x05, 0xe2, 0x0f, 0xa1, 0x92, 0xd6, 0x5a, 0xc5, 0x05, 0x77, 0x16, 0x72, 0x51, 0x09, 0xd0, + 0x6e, 0x01, 0xc9, 0x0a, 0x2b, 0xb4, 0xaa, 0xfd, 0x69, 0x11, 0xee, 0xee, 0xb2, 0x63, 0xd7, 0xcb, + 0xbe, 0xe2, 0x57, 0xdf, 0xc9, 0xc5, 0x4f, 0x65, 0x9f, 0xc1, 0xba, 0x28, 0xe4, 0x93, 0x7f, 0x62, + 0x59, 0xec, 0x58, 0x7e, 0x9d, 0x94, 0xb1, 0x6a, 0x0d, 0xf9, 0x07, 0x92, 0xad, 0xe3, 0x7f, 0xc5, + 0x1c, 0x3b, 0xb6, 0x9f, 0xd9, 0x11, 0xb3, 0x5c, 0x47, 0xfe, 0x59, 0x06, 0x12, 0x92, 0xe1, 0x90, + 0x21, 0x94, 0xb8, 0x0d, 0xa2, 0xeb, 0x36, 0xb7, 0xb7, 0x33, 0x62, 0x5d, 0xb2, 0x95, 0xac, 0x02, + 0x0f, 0x7c, 0x87, 0xed, 0x54, 0x8f, 0x06, 0x4f, 0x06, 0xc3, 0xef, 0x06, 0x14, 0x17, 0x22, 0x06, + 0xdc, 0x0a, 0x42, 0x76, 0xea, 0xfa, 0xf3, 0xc8, 0xca, 0x9e, 0x44, 0xf5, 0xca, 0x94, 0xb8, 0x91, + 0xcc, 0xc9, 0x10, 0xb5, 0x9f, 0xc2, 0xda, 0xc2, 0xcb, 0x78, 0x6d, 0x26, 0x5f, 0xa7, 0xbe, 0x41, + 0x56, 0xa1, 0x8e, 0x1f, 0xbb, 0x97, 0x7f, 0xfb, 0xd6, 0xfe, 0xb5, 0x80, 0x57, 0x4c, 0x33, 0x37, + 0xbe, 0x59, 0x06, 0xfb, 0xcd, 0x7c, 0x06, 0x83, 0xed, 0x77, 0xf3, 0xe6, 0x9b, 0x59, 0xb0, 0xf3, + 0xad, 0x00, 0xa6, 0x41, 0xa4, 0x6d, 0x43, 0x55, 0xd2, 0xc8, 0x6f, 0xc1, 0x5a, 0xe8, 0xfb, 0x71, + 0xd2, 0x89, 0x8a, 0x0e, 0xe4, 0xf2, 0x3f, 0xdb, 0xac, 0x72, 0xb0, 0x48, 0x06, 0x4f, 0xf2, 0xbd, + 0x48, 0x59, 0xfc, 0x0d, 0x44, 0x0e, 0x77, 0x1b, 0xbf, 0x5b, 0x4f, 0xff, 0xb7, 0xfb, 0xbf, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x35, 0x9f, 0x30, 0x98, 0xf2, 0x2b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto new file mode 100644 index 0000000..497b4d9 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/datastore/datastore_v3.proto @@ -0,0 +1,551 @@ +syntax = "proto2"; +option go_package = "datastore"; + +package appengine; + +message Action{} + +message PropertyValue { + optional int64 int64Value = 1; + optional bool booleanValue = 2; + optional string stringValue = 3; + optional double doubleValue = 4; + + optional group PointValue = 5 { + required double x = 6; + required double y = 7; + } + + optional group UserValue = 8 { + required string email = 9; + required string auth_domain = 10; + optional string nickname = 11; + optional string federated_identity = 21; + optional string federated_provider = 22; + } + + optional group ReferenceValue = 12 { + required string app = 13; + optional string name_space = 20; + repeated group PathElement = 14 { + required string type = 15; + optional int64 id = 16; + optional string name = 17; + } + } +} + +message Property { + enum Meaning { + NO_MEANING = 0; + BLOB = 14; + TEXT = 15; + BYTESTRING = 16; + + ATOM_CATEGORY = 1; + ATOM_LINK = 2; + ATOM_TITLE = 3; + ATOM_CONTENT = 4; + ATOM_SUMMARY = 5; + ATOM_AUTHOR = 6; + + GD_WHEN = 7; + GD_EMAIL = 8; + GEORSS_POINT = 9; + GD_IM = 10; + + GD_PHONENUMBER = 11; + GD_POSTALADDRESS = 12; + + GD_RATING = 13; + + BLOBKEY = 17; + ENTITY_PROTO = 19; + + INDEX_VALUE = 18; + }; + + optional Meaning meaning = 1 [default = NO_MEANING]; + optional string meaning_uri = 2; + + required string name = 3; + + required PropertyValue value = 5; + + required bool multiple = 4; + + optional bool searchable = 6 [default=false]; + + enum FtsTokenizationOption { + HTML = 1; + ATOM = 2; + } + + optional FtsTokenizationOption fts_tokenization_option = 8; + + optional string locale = 9 [default = "en"]; +} + +message Path { + repeated group Element = 1 { + required string type = 2; + optional int64 id = 3; + optional string name = 4; + } +} + +message Reference { + required string app = 13; + optional string name_space = 20; + required Path path = 14; +} + +message User { + required string email = 1; + required string auth_domain = 2; + optional string nickname = 3; + optional string federated_identity = 6; + optional string federated_provider = 7; +} + +message EntityProto { + required Reference key = 13; + required Path entity_group = 16; + optional User owner = 17; + + enum Kind { + GD_CONTACT = 1; + GD_EVENT = 2; + GD_MESSAGE = 3; + } + optional Kind kind = 4; + optional string kind_uri = 5; + + repeated Property property = 14; + repeated Property raw_property = 15; + + optional int32 rank = 18; +} + +message CompositeProperty { + required int64 index_id = 1; + repeated string value = 2; +} + +message Index { + required string entity_type = 1; + required bool ancestor = 5; + repeated group Property = 2 { + required string name = 3; + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + optional Direction direction = 4 [default = ASCENDING]; + } +} + +message CompositeIndex { + required string app_id = 1; + required int64 id = 2; + required Index definition = 3; + + enum State { + WRITE_ONLY = 1; + READ_WRITE = 2; + DELETED = 3; + ERROR = 4; + } + required State state = 4; + + optional bool only_use_if_required = 6 [default = false]; +} + +message IndexPostfix { + message IndexValue { + required string property_name = 1; + required PropertyValue value = 2; + } + + repeated IndexValue index_value = 1; + + optional Reference key = 2; + + optional bool before = 3 [default=true]; +} + +message IndexPosition { + optional string key = 1; + + optional bool before = 2 [default=true]; +} + +message Snapshot { + enum Status { + INACTIVE = 0; + ACTIVE = 1; + } + + required int64 ts = 1; +} + +message InternalHeader { + optional string qos = 1; +} + +message Transaction { + optional InternalHeader header = 4; + required fixed64 handle = 1; + required string app = 2; + optional bool mark_changes = 3 [default = false]; +} + +message Query { + optional InternalHeader header = 39; + + required string app = 1; + optional string name_space = 29; + + optional string kind = 3; + optional Reference ancestor = 17; + + repeated group Filter = 4 { + enum Operator { + LESS_THAN = 1; + LESS_THAN_OR_EQUAL = 2; + GREATER_THAN = 3; + GREATER_THAN_OR_EQUAL = 4; + EQUAL = 5; + IN = 6; + EXISTS = 7; + } + + required Operator op = 6; + repeated Property property = 14; + } + + optional string search_query = 8; + + repeated group Order = 9 { + enum Direction { + ASCENDING = 1; + DESCENDING = 2; + } + + required string property = 10; + optional Direction direction = 11 [default = ASCENDING]; + } + + enum Hint { + ORDER_FIRST = 1; + ANCESTOR_FIRST = 2; + FILTER_FIRST = 3; + } + optional Hint hint = 18; + + optional int32 count = 23; + + optional int32 offset = 12 [default = 0]; + + optional int32 limit = 16; + + optional CompiledCursor compiled_cursor = 30; + optional CompiledCursor end_compiled_cursor = 31; + + repeated CompositeIndex composite_index = 19; + + optional bool require_perfect_plan = 20 [default = false]; + + optional bool keys_only = 21 [default = false]; + + optional Transaction transaction = 22; + + optional bool compile = 25 [default = false]; + + optional int64 failover_ms = 26; + + optional bool strong = 32; + + repeated string property_name = 33; + + repeated string group_by_property_name = 34; + + optional bool distinct = 24; + + optional int64 min_safe_time_seconds = 35; + + repeated string safe_replica_name = 36; + + optional bool persist_offset = 37 [default=false]; +} + +message CompiledQuery { + required group PrimaryScan = 1 { + optional string index_name = 2; + + optional string start_key = 3; + optional bool start_inclusive = 4; + optional string end_key = 5; + optional bool end_inclusive = 6; + + repeated string start_postfix_value = 22; + repeated string end_postfix_value = 23; + + optional int64 end_unapplied_log_timestamp_us = 19; + } + + repeated group MergeJoinScan = 7 { + required string index_name = 8; + + repeated string prefix_value = 9; + + optional bool value_prefix = 20 [default=false]; + } + + optional Index index_def = 21; + + optional int32 offset = 10 [default = 0]; + + optional int32 limit = 11; + + required bool keys_only = 12; + + repeated string property_name = 24; + + optional int32 distinct_infix_size = 25; + + optional group EntityFilter = 13 { + optional bool distinct = 14 [default=false]; + + optional string kind = 17; + optional Reference ancestor = 18; + } +} + +message CompiledCursor { + optional group Position = 2 { + optional string start_key = 27; + + repeated group IndexValue = 29 { + optional string property = 30; + required PropertyValue value = 31; + } + + optional Reference key = 32; + + optional bool start_inclusive = 28 [default=true]; + } +} + +message Cursor { + required fixed64 cursor = 1; + + optional string app = 2; +} + +message Error { + enum ErrorCode { + BAD_REQUEST = 1; + CONCURRENT_TRANSACTION = 2; + INTERNAL_ERROR = 3; + NEED_INDEX = 4; + TIMEOUT = 5; + PERMISSION_DENIED = 6; + BIGTABLE_ERROR = 7; + COMMITTED_BUT_STILL_APPLYING = 8; + CAPABILITY_DISABLED = 9; + TRY_ALTERNATE_BACKEND = 10; + SAFE_TIME_TOO_OLD = 11; + } +} + +message Cost { + optional int32 index_writes = 1; + optional int32 index_write_bytes = 2; + optional int32 entity_writes = 3; + optional int32 entity_write_bytes = 4; + optional group CommitCost = 5 { + optional int32 requested_entity_puts = 6; + optional int32 requested_entity_deletes = 7; + }; + optional int32 approximate_storage_delta = 8; + optional int32 id_sequence_updates = 9; +} + +message GetRequest { + optional InternalHeader header = 6; + + repeated Reference key = 1; + optional Transaction transaction = 2; + + optional int64 failover_ms = 3; + + optional bool strong = 4; + + optional bool allow_deferred = 5 [default=false]; +} + +message GetResponse { + repeated group Entity = 1 { + optional EntityProto entity = 2; + optional Reference key = 4; + + optional int64 version = 3; + } + + repeated Reference deferred = 5; + + optional bool in_order = 6 [default=true]; +} + +message PutRequest { + optional InternalHeader header = 11; + + repeated EntityProto entity = 1; + optional Transaction transaction = 2; + repeated CompositeIndex composite_index = 3; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; + + enum AutoIdPolicy { + CURRENT = 0; + SEQUENTIAL = 1; + } + optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT]; +} + +message PutResponse { + repeated Reference key = 1; + optional Cost cost = 2; + repeated int64 version = 3; +} + +message TouchRequest { + optional InternalHeader header = 10; + + repeated Reference key = 1; + repeated CompositeIndex composite_index = 2; + optional bool force = 3 [default = false]; + repeated Snapshot snapshot = 9; +} + +message TouchResponse { + optional Cost cost = 1; +} + +message DeleteRequest { + optional InternalHeader header = 10; + + repeated Reference key = 6; + optional Transaction transaction = 5; + + optional bool trusted = 4 [default = false]; + + optional bool force = 7 [default = false]; + + optional bool mark_changes = 8 [default = false]; + repeated Snapshot snapshot = 9; +} + +message DeleteResponse { + optional Cost cost = 1; + repeated int64 version = 3; +} + +message NextRequest { + optional InternalHeader header = 5; + + required Cursor cursor = 1; + optional int32 count = 2; + + optional int32 offset = 4 [default = 0]; + + optional bool compile = 3 [default = false]; +} + +message QueryResult { + optional Cursor cursor = 1; + + repeated EntityProto result = 2; + + optional int32 skipped_results = 7; + + required bool more_results = 3; + + optional bool keys_only = 4; + + optional bool index_only = 9; + + optional bool small_ops = 10; + + optional CompiledQuery compiled_query = 5; + + optional CompiledCursor compiled_cursor = 6; + + repeated CompositeIndex index = 8; + + repeated int64 version = 11; +} + +message AllocateIdsRequest { + optional InternalHeader header = 4; + + optional Reference model_key = 1; + + optional int64 size = 2; + + optional int64 max = 3; + + repeated Reference reserve = 5; +} + +message AllocateIdsResponse { + required int64 start = 1; + required int64 end = 2; + optional Cost cost = 3; +} + +message CompositeIndices { + repeated CompositeIndex index = 1; +} + +message AddActionsRequest { + optional InternalHeader header = 3; + + required Transaction transaction = 1; + repeated Action action = 2; +} + +message AddActionsResponse { +} + +message BeginTransactionRequest { + optional InternalHeader header = 3; + + required string app = 1; + optional bool allow_multiple_eg = 2 [default = false]; + optional string database_id = 4; + + enum TransactionMode { + UNKNOWN = 0; + READ_ONLY = 1; + READ_WRITE = 2; + } + optional TransactionMode mode = 5 [default = UNKNOWN]; + + optional Transaction previous_transaction = 7; +} + +message CommitResponse { + optional Cost cost = 1; + + repeated group Version = 3 { + required Reference root_entity_key = 4; + required int64 version = 5; + } +} diff --git a/vendor/google.golang.org/appengine/internal/identity.go b/vendor/google.golang.org/appengine/internal/identity.go new file mode 100644 index 0000000..9b4134e --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity.go @@ -0,0 +1,55 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +import ( + "os" + + netcontext "golang.org/x/net/context" +) + +var ( + // This is set to true in identity_classic.go, which is behind the appengine build tag. + // The appengine build tag is set for the first generation runtimes (<= Go 1.9) but not + // the second generation runtimes (>= Go 1.11), so this indicates whether we're on a + // first-gen runtime. See IsStandard below for the second-gen check. + appengineStandard bool + + // This is set to true in identity_flex.go, which is behind the appenginevm build tag. + appengineFlex bool +) + +// AppID is the implementation of the wrapper function of the same name in +// ../identity.go. See that file for commentary. +func AppID(c netcontext.Context) string { + return appID(FullyQualifiedAppID(c)) +} + +// IsStandard is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsStandard() bool { + // appengineStandard will be true for first-gen runtimes (<= Go 1.9) but not + // second-gen (>= Go 1.11). + return appengineStandard || IsSecondGen() +} + +// IsStandard is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsSecondGen() bool { + // Second-gen runtimes set $GAE_ENV so we use that to check if we're on a second-gen runtime. + return os.Getenv("GAE_ENV") == "standard" +} + +// IsFlex is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsFlex() bool { + return appengineFlex +} + +// IsAppEngine is the implementation of the wrapper function of the same name in +// ../appengine.go. See that file for commentary. +func IsAppEngine() bool { + return IsStandard() || IsFlex() +} diff --git a/vendor/google.golang.org/appengine/internal/identity_classic.go b/vendor/google.golang.org/appengine/internal/identity_classic.go new file mode 100644 index 0000000..4e979f4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_classic.go @@ -0,0 +1,61 @@ +// Copyright 2015 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine" + + netcontext "golang.org/x/net/context" +) + +func init() { + appengineStandard = true +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.DefaultVersionHostname(c) +} + +func Datacenter(_ netcontext.Context) string { return appengine.Datacenter() } +func ServerSoftware() string { return appengine.ServerSoftware() } +func InstanceID() string { return appengine.InstanceID() } +func IsDevAppServer() bool { return appengine.IsDevAppServer() } + +func RequestID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.RequestID(c) +} + +func ModuleName(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.ModuleName(c) +} +func VersionID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return appengine.VersionID(c) +} + +func fullyQualifiedAppID(ctx netcontext.Context) string { + c := fromContext(ctx) + if c == nil { + panic(errNotAppEngineContext) + } + return c.FullyQualifiedAppID() +} diff --git a/vendor/google.golang.org/appengine/internal/identity_flex.go b/vendor/google.golang.org/appengine/internal/identity_flex.go new file mode 100644 index 0000000..d5e2e7b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_flex.go @@ -0,0 +1,11 @@ +// Copyright 2018 Google LLC. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appenginevm + +package internal + +func init() { + appengineFlex = true +} diff --git a/vendor/google.golang.org/appengine/internal/identity_vm.go b/vendor/google.golang.org/appengine/internal/identity_vm.go new file mode 100644 index 0000000..5d80672 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/identity_vm.go @@ -0,0 +1,134 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "log" + "net/http" + "os" + "strings" + + netcontext "golang.org/x/net/context" +) + +// These functions are implementations of the wrapper functions +// in ../appengine/identity.go. See that file for commentary. + +const ( + hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname" + hRequestLogId = "X-AppEngine-Request-Log-Id" + hDatacenter = "X-AppEngine-Datacenter" +) + +func ctxHeaders(ctx netcontext.Context) http.Header { + c := fromContext(ctx) + if c == nil { + return nil + } + return c.Request().Header +} + +func DefaultVersionHostname(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hDefaultVersionHostname) +} + +func RequestID(ctx netcontext.Context) string { + return ctxHeaders(ctx).Get(hRequestLogId) +} + +func Datacenter(ctx netcontext.Context) string { + if dc := ctxHeaders(ctx).Get(hDatacenter); dc != "" { + return dc + } + // If the header isn't set, read zone from the metadata service. + // It has the format projects/[NUMERIC_PROJECT_ID]/zones/[ZONE] + zone, err := getMetadata("instance/zone") + if err != nil { + log.Printf("Datacenter: %v", err) + return "" + } + parts := strings.Split(string(zone), "/") + if len(parts) == 0 { + return "" + } + return parts[len(parts)-1] +} + +func ServerSoftware() string { + // TODO(dsymonds): Remove fallback when we've verified this. + if s := os.Getenv("SERVER_SOFTWARE"); s != "" { + return s + } + if s := os.Getenv("GAE_ENV"); s != "" { + return s + } + return "Google App Engine/1.x.x" +} + +// TODO(dsymonds): Remove the metadata fetches. + +func ModuleName(_ netcontext.Context) string { + if s := os.Getenv("GAE_MODULE_NAME"); s != "" { + return s + } + if s := os.Getenv("GAE_SERVICE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_name")) +} + +func VersionID(_ netcontext.Context) string { + if s1, s2 := os.Getenv("GAE_MODULE_VERSION"), os.Getenv("GAE_MINOR_VERSION"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + if s1, s2 := os.Getenv("GAE_VERSION"), os.Getenv("GAE_DEPLOYMENT_ID"); s1 != "" && s2 != "" { + return s1 + "." + s2 + } + return string(mustGetMetadata("instance/attributes/gae_backend_version")) + "." + string(mustGetMetadata("instance/attributes/gae_backend_minor_version")) +} + +func InstanceID() string { + if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" { + return s + } + if s := os.Getenv("GAE_INSTANCE"); s != "" { + return s + } + return string(mustGetMetadata("instance/attributes/gae_backend_instance")) +} + +func partitionlessAppID() string { + // gae_project has everything except the partition prefix. + if appID := os.Getenv("GAE_LONG_APP_ID"); appID != "" { + return appID + } + if project := os.Getenv("GOOGLE_CLOUD_PROJECT"); project != "" { + return project + } + return string(mustGetMetadata("instance/attributes/gae_project")) +} + +func fullyQualifiedAppID(_ netcontext.Context) string { + if s := os.Getenv("GAE_APPLICATION"); s != "" { + return s + } + appID := partitionlessAppID() + + part := os.Getenv("GAE_PARTITION") + if part == "" { + part = string(mustGetMetadata("instance/attributes/gae_partition")) + } + + if part != "" { + appID = part + "~" + appID + } + return appID +} + +func IsDevAppServer() bool { + return os.Getenv("RUN_WITH_DEVAPPSERVER") != "" +} diff --git a/vendor/google.golang.org/appengine/internal/internal.go b/vendor/google.golang.org/appengine/internal/internal.go new file mode 100644 index 0000000..051ea39 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/internal.go @@ -0,0 +1,110 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package internal provides support for package appengine. +// +// Programs should not use this package directly. Its API is not stable. +// Use packages appengine and appengine/* instead. +package internal + +import ( + "fmt" + + "github.com/golang/protobuf/proto" + + remotepb "google.golang.org/appengine/internal/remote_api" +) + +// errorCodeMaps is a map of service name to the error code map for the service. +var errorCodeMaps = make(map[string]map[int32]string) + +// RegisterErrorCodeMap is called from API implementations to register their +// error code map. This should only be called from init functions. +func RegisterErrorCodeMap(service string, m map[int32]string) { + errorCodeMaps[service] = m +} + +type timeoutCodeKey struct { + service string + code int32 +} + +// timeoutCodes is the set of service+code pairs that represent timeouts. +var timeoutCodes = make(map[timeoutCodeKey]bool) + +func RegisterTimeoutErrorCode(service string, code int32) { + timeoutCodes[timeoutCodeKey{service, code}] = true +} + +// APIError is the type returned by appengine.Context's Call method +// when an API call fails in an API-specific way. This may be, for instance, +// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE. +type APIError struct { + Service string + Detail string + Code int32 // API-specific error code +} + +func (e *APIError) Error() string { + if e.Code == 0 { + if e.Detail == "" { + return "APIError " + } + return e.Detail + } + s := fmt.Sprintf("API error %d", e.Code) + if m, ok := errorCodeMaps[e.Service]; ok { + s += " (" + e.Service + ": " + m[e.Code] + ")" + } else { + // Shouldn't happen, but provide a bit more detail if it does. + s = e.Service + " " + s + } + if e.Detail != "" { + s += ": " + e.Detail + } + return s +} + +func (e *APIError) IsTimeout() bool { + return timeoutCodes[timeoutCodeKey{e.Service, e.Code}] +} + +// CallError is the type returned by appengine.Context's Call method when an +// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED. +type CallError struct { + Detail string + Code int32 + // TODO: Remove this if we get a distinguishable error code. + Timeout bool +} + +func (e *CallError) Error() string { + var msg string + switch remotepb.RpcError_ErrorCode(e.Code) { + case remotepb.RpcError_UNKNOWN: + return e.Detail + case remotepb.RpcError_OVER_QUOTA: + msg = "Over quota" + case remotepb.RpcError_CAPABILITY_DISABLED: + msg = "Capability disabled" + case remotepb.RpcError_CANCELLED: + msg = "Canceled" + default: + msg = fmt.Sprintf("Call error %d", e.Code) + } + s := msg + ": " + e.Detail + if e.Timeout { + s += " (timeout)" + } + return s +} + +func (e *CallError) IsTimeout() bool { + return e.Timeout +} + +// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace. +// The function should be prepared to be called on the same message more than once; it should only modify the +// RPC request the first time. +var NamespaceMods = make(map[string]func(m proto.Message, namespace string)) diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.pb.go b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go new file mode 100644 index 0000000..8545ac4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.pb.go @@ -0,0 +1,1313 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/log/log_service.proto + +package log + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type LogServiceError_ErrorCode int32 + +const ( + LogServiceError_OK LogServiceError_ErrorCode = 0 + LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1 + LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2 +) + +var LogServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_REQUEST", + 2: "STORAGE_ERROR", +} +var LogServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_REQUEST": 1, + "STORAGE_ERROR": 2, +} + +func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode { + p := new(LogServiceError_ErrorCode) + *p = x + return p +} +func (x LogServiceError_ErrorCode) String() string { + return proto.EnumName(LogServiceError_ErrorCode_name, int32(x)) +} +func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode") + if err != nil { + return err + } + *x = LogServiceError_ErrorCode(value) + return nil +} +func (LogServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{0, 0} +} + +type LogServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogServiceError) Reset() { *m = LogServiceError{} } +func (m *LogServiceError) String() string { return proto.CompactTextString(m) } +func (*LogServiceError) ProtoMessage() {} +func (*LogServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{0} +} +func (m *LogServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogServiceError.Unmarshal(m, b) +} +func (m *LogServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogServiceError.Marshal(b, m, deterministic) +} +func (dst *LogServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogServiceError.Merge(dst, src) +} +func (m *LogServiceError) XXX_Size() int { + return xxx_messageInfo_LogServiceError.Size(m) +} +func (m *LogServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_LogServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_LogServiceError proto.InternalMessageInfo + +type UserAppLogLine struct { + TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec,json=timestampUsec" json:"timestamp_usec,omitempty"` + Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} } +func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) } +func (*UserAppLogLine) ProtoMessage() {} +func (*UserAppLogLine) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{1} +} +func (m *UserAppLogLine) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserAppLogLine.Unmarshal(m, b) +} +func (m *UserAppLogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserAppLogLine.Marshal(b, m, deterministic) +} +func (dst *UserAppLogLine) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserAppLogLine.Merge(dst, src) +} +func (m *UserAppLogLine) XXX_Size() int { + return xxx_messageInfo_UserAppLogLine.Size(m) +} +func (m *UserAppLogLine) XXX_DiscardUnknown() { + xxx_messageInfo_UserAppLogLine.DiscardUnknown(m) +} + +var xxx_messageInfo_UserAppLogLine proto.InternalMessageInfo + +func (m *UserAppLogLine) GetTimestampUsec() int64 { + if m != nil && m.TimestampUsec != nil { + return *m.TimestampUsec + } + return 0 +} + +func (m *UserAppLogLine) GetLevel() int64 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *UserAppLogLine) GetMessage() string { + if m != nil && m.Message != nil { + return *m.Message + } + return "" +} + +type UserAppLogGroup struct { + LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line,json=logLine" json:"log_line,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} } +func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) } +func (*UserAppLogGroup) ProtoMessage() {} +func (*UserAppLogGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{2} +} +func (m *UserAppLogGroup) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_UserAppLogGroup.Unmarshal(m, b) +} +func (m *UserAppLogGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_UserAppLogGroup.Marshal(b, m, deterministic) +} +func (dst *UserAppLogGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserAppLogGroup.Merge(dst, src) +} +func (m *UserAppLogGroup) XXX_Size() int { + return xxx_messageInfo_UserAppLogGroup.Size(m) +} +func (m *UserAppLogGroup) XXX_DiscardUnknown() { + xxx_messageInfo_UserAppLogGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_UserAppLogGroup proto.InternalMessageInfo + +func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine { + if m != nil { + return m.LogLine + } + return nil +} + +type FlushRequest struct { + Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} +func (*FlushRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{3} +} +func (m *FlushRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FlushRequest.Unmarshal(m, b) +} +func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) +} +func (dst *FlushRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushRequest.Merge(dst, src) +} +func (m *FlushRequest) XXX_Size() int { + return xxx_messageInfo_FlushRequest.Size(m) +} +func (m *FlushRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FlushRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FlushRequest proto.InternalMessageInfo + +func (m *FlushRequest) GetLogs() []byte { + if m != nil { + return m.Logs + } + return nil +} + +type SetStatusRequest struct { + Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} } +func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) } +func (*SetStatusRequest) ProtoMessage() {} +func (*SetStatusRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{4} +} +func (m *SetStatusRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetStatusRequest.Unmarshal(m, b) +} +func (m *SetStatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetStatusRequest.Marshal(b, m, deterministic) +} +func (dst *SetStatusRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetStatusRequest.Merge(dst, src) +} +func (m *SetStatusRequest) XXX_Size() int { + return xxx_messageInfo_SetStatusRequest.Size(m) +} +func (m *SetStatusRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetStatusRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetStatusRequest proto.InternalMessageInfo + +func (m *SetStatusRequest) GetStatus() string { + if m != nil && m.Status != nil { + return *m.Status + } + return "" +} + +type LogOffset struct { + RequestId []byte `protobuf:"bytes,1,opt,name=request_id,json=requestId" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogOffset) Reset() { *m = LogOffset{} } +func (m *LogOffset) String() string { return proto.CompactTextString(m) } +func (*LogOffset) ProtoMessage() {} +func (*LogOffset) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{5} +} +func (m *LogOffset) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogOffset.Unmarshal(m, b) +} +func (m *LogOffset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogOffset.Marshal(b, m, deterministic) +} +func (dst *LogOffset) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogOffset.Merge(dst, src) +} +func (m *LogOffset) XXX_Size() int { + return xxx_messageInfo_LogOffset.Size(m) +} +func (m *LogOffset) XXX_DiscardUnknown() { + xxx_messageInfo_LogOffset.DiscardUnknown(m) +} + +var xxx_messageInfo_LogOffset proto.InternalMessageInfo + +func (m *LogOffset) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +type LogLine struct { + Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"` + Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"` + LogMessage *string `protobuf:"bytes,3,req,name=log_message,json=logMessage" json:"log_message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogLine) Reset() { *m = LogLine{} } +func (m *LogLine) String() string { return proto.CompactTextString(m) } +func (*LogLine) ProtoMessage() {} +func (*LogLine) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{6} +} +func (m *LogLine) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogLine.Unmarshal(m, b) +} +func (m *LogLine) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogLine.Marshal(b, m, deterministic) +} +func (dst *LogLine) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogLine.Merge(dst, src) +} +func (m *LogLine) XXX_Size() int { + return xxx_messageInfo_LogLine.Size(m) +} +func (m *LogLine) XXX_DiscardUnknown() { + xxx_messageInfo_LogLine.DiscardUnknown(m) +} + +var xxx_messageInfo_LogLine proto.InternalMessageInfo + +func (m *LogLine) GetTime() int64 { + if m != nil && m.Time != nil { + return *m.Time + } + return 0 +} + +func (m *LogLine) GetLevel() int32 { + if m != nil && m.Level != nil { + return *m.Level + } + return 0 +} + +func (m *LogLine) GetLogMessage() string { + if m != nil && m.LogMessage != nil { + return *m.LogMessage + } + return "" +} + +type RequestLog struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + ModuleId *string `protobuf:"bytes,37,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,req,name=version_id,json=versionId" json:"version_id,omitempty"` + RequestId []byte `protobuf:"bytes,3,req,name=request_id,json=requestId" json:"request_id,omitempty"` + Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"` + Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"` + Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"` + StartTime *int64 `protobuf:"varint,6,req,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,7,req,name=end_time,json=endTime" json:"end_time,omitempty"` + Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"` + Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"` + Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"` + Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"` + HttpVersion *string `protobuf:"bytes,12,req,name=http_version,json=httpVersion" json:"http_version,omitempty"` + Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"` + ResponseSize *int64 `protobuf:"varint,14,req,name=response_size,json=responseSize" json:"response_size,omitempty"` + Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"` + UserAgent *string `protobuf:"bytes,16,opt,name=user_agent,json=userAgent" json:"user_agent,omitempty"` + UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry,json=urlMapEntry" json:"url_map_entry,omitempty"` + Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"` + ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles,json=apiMcycles" json:"api_mcycles,omitempty"` + Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"` + Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"` + TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name,json=taskQueueName" json:"task_queue_name,omitempty"` + TaskName *string `protobuf:"bytes,23,opt,name=task_name,json=taskName" json:"task_name,omitempty"` + WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request,json=wasLoadingRequest" json:"was_loading_request,omitempty"` + PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time,json=pendingTime" json:"pending_time,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,json=replicaIndex,def=-1" json:"replica_index,omitempty"` + Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"` + CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key,json=cloneKey" json:"clone_key,omitempty"` + Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"` + LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete,json=linesIncomplete" json:"lines_incomplete,omitempty"` + AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release,json=appEngineRelease" json:"app_engine_release,omitempty"` + ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason,json=exitReason" json:"exit_reason,omitempty"` + WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time,json=wasThrottledForTime" json:"was_throttled_for_time,omitempty"` + WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests,json=wasThrottledForRequests" json:"was_throttled_for_requests,omitempty"` + ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` + ServerName []byte `protobuf:"bytes,34,opt,name=server_name,json=serverName" json:"server_name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestLog) Reset() { *m = RequestLog{} } +func (m *RequestLog) String() string { return proto.CompactTextString(m) } +func (*RequestLog) ProtoMessage() {} +func (*RequestLog) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{7} +} +func (m *RequestLog) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RequestLog.Unmarshal(m, b) +} +func (m *RequestLog) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RequestLog.Marshal(b, m, deterministic) +} +func (dst *RequestLog) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestLog.Merge(dst, src) +} +func (m *RequestLog) XXX_Size() int { + return xxx_messageInfo_RequestLog.Size(m) +} +func (m *RequestLog) XXX_DiscardUnknown() { + xxx_messageInfo_RequestLog.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestLog proto.InternalMessageInfo + +const Default_RequestLog_ModuleId string = "default" +const Default_RequestLog_ReplicaIndex int32 = -1 +const Default_RequestLog_Finished bool = true + +func (m *RequestLog) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *RequestLog) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_RequestLog_ModuleId +} + +func (m *RequestLog) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *RequestLog) GetRequestId() []byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *RequestLog) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *RequestLog) GetIp() string { + if m != nil && m.Ip != nil { + return *m.Ip + } + return "" +} + +func (m *RequestLog) GetNickname() string { + if m != nil && m.Nickname != nil { + return *m.Nickname + } + return "" +} + +func (m *RequestLog) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *RequestLog) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *RequestLog) GetLatency() int64 { + if m != nil && m.Latency != nil { + return *m.Latency + } + return 0 +} + +func (m *RequestLog) GetMcycles() int64 { + if m != nil && m.Mcycles != nil { + return *m.Mcycles + } + return 0 +} + +func (m *RequestLog) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *RequestLog) GetResource() string { + if m != nil && m.Resource != nil { + return *m.Resource + } + return "" +} + +func (m *RequestLog) GetHttpVersion() string { + if m != nil && m.HttpVersion != nil { + return *m.HttpVersion + } + return "" +} + +func (m *RequestLog) GetStatus() int32 { + if m != nil && m.Status != nil { + return *m.Status + } + return 0 +} + +func (m *RequestLog) GetResponseSize() int64 { + if m != nil && m.ResponseSize != nil { + return *m.ResponseSize + } + return 0 +} + +func (m *RequestLog) GetReferrer() string { + if m != nil && m.Referrer != nil { + return *m.Referrer + } + return "" +} + +func (m *RequestLog) GetUserAgent() string { + if m != nil && m.UserAgent != nil { + return *m.UserAgent + } + return "" +} + +func (m *RequestLog) GetUrlMapEntry() string { + if m != nil && m.UrlMapEntry != nil { + return *m.UrlMapEntry + } + return "" +} + +func (m *RequestLog) GetCombined() string { + if m != nil && m.Combined != nil { + return *m.Combined + } + return "" +} + +func (m *RequestLog) GetApiMcycles() int64 { + if m != nil && m.ApiMcycles != nil { + return *m.ApiMcycles + } + return 0 +} + +func (m *RequestLog) GetHost() string { + if m != nil && m.Host != nil { + return *m.Host + } + return "" +} + +func (m *RequestLog) GetCost() float64 { + if m != nil && m.Cost != nil { + return *m.Cost + } + return 0 +} + +func (m *RequestLog) GetTaskQueueName() string { + if m != nil && m.TaskQueueName != nil { + return *m.TaskQueueName + } + return "" +} + +func (m *RequestLog) GetTaskName() string { + if m != nil && m.TaskName != nil { + return *m.TaskName + } + return "" +} + +func (m *RequestLog) GetWasLoadingRequest() bool { + if m != nil && m.WasLoadingRequest != nil { + return *m.WasLoadingRequest + } + return false +} + +func (m *RequestLog) GetPendingTime() int64 { + if m != nil && m.PendingTime != nil { + return *m.PendingTime + } + return 0 +} + +func (m *RequestLog) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return Default_RequestLog_ReplicaIndex +} + +func (m *RequestLog) GetFinished() bool { + if m != nil && m.Finished != nil { + return *m.Finished + } + return Default_RequestLog_Finished +} + +func (m *RequestLog) GetCloneKey() []byte { + if m != nil { + return m.CloneKey + } + return nil +} + +func (m *RequestLog) GetLine() []*LogLine { + if m != nil { + return m.Line + } + return nil +} + +func (m *RequestLog) GetLinesIncomplete() bool { + if m != nil && m.LinesIncomplete != nil { + return *m.LinesIncomplete + } + return false +} + +func (m *RequestLog) GetAppEngineRelease() []byte { + if m != nil { + return m.AppEngineRelease + } + return nil +} + +func (m *RequestLog) GetExitReason() int32 { + if m != nil && m.ExitReason != nil { + return *m.ExitReason + } + return 0 +} + +func (m *RequestLog) GetWasThrottledForTime() bool { + if m != nil && m.WasThrottledForTime != nil { + return *m.WasThrottledForTime + } + return false +} + +func (m *RequestLog) GetWasThrottledForRequests() bool { + if m != nil && m.WasThrottledForRequests != nil { + return *m.WasThrottledForRequests + } + return false +} + +func (m *RequestLog) GetThrottledTime() int64 { + if m != nil && m.ThrottledTime != nil { + return *m.ThrottledTime + } + return 0 +} + +func (m *RequestLog) GetServerName() []byte { + if m != nil { + return m.ServerName + } + return nil +} + +type LogModuleVersion struct { + ModuleId *string `protobuf:"bytes,1,opt,name=module_id,json=moduleId,def=default" json:"module_id,omitempty"` + VersionId *string `protobuf:"bytes,2,opt,name=version_id,json=versionId" json:"version_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} } +func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) } +func (*LogModuleVersion) ProtoMessage() {} +func (*LogModuleVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{8} +} +func (m *LogModuleVersion) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogModuleVersion.Unmarshal(m, b) +} +func (m *LogModuleVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogModuleVersion.Marshal(b, m, deterministic) +} +func (dst *LogModuleVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogModuleVersion.Merge(dst, src) +} +func (m *LogModuleVersion) XXX_Size() int { + return xxx_messageInfo_LogModuleVersion.Size(m) +} +func (m *LogModuleVersion) XXX_DiscardUnknown() { + xxx_messageInfo_LogModuleVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_LogModuleVersion proto.InternalMessageInfo + +const Default_LogModuleVersion_ModuleId string = "default" + +func (m *LogModuleVersion) GetModuleId() string { + if m != nil && m.ModuleId != nil { + return *m.ModuleId + } + return Default_LogModuleVersion_ModuleId +} + +func (m *LogModuleVersion) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +type LogReadRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` + ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version,json=moduleVersion" json:"module_version,omitempty"` + StartTime *int64 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int64 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"` + RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id,json=requestId" json:"request_id,omitempty"` + MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level,json=minimumLogLevel" json:"minimum_log_level,omitempty"` + IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete,json=includeIncomplete" json:"include_incomplete,omitempty"` + Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"` + CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex,json=combinedLogRegex" json:"combined_log_regex,omitempty"` + HostRegex *string `protobuf:"bytes,15,opt,name=host_regex,json=hostRegex" json:"host_regex,omitempty"` + ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index,json=replicaIndex" json:"replica_index,omitempty"` + IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs,json=includeAppLogs" json:"include_app_logs,omitempty"` + AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request,json=appLogsPerRequest" json:"app_logs_per_request,omitempty"` + IncludeHost *bool `protobuf:"varint,11,opt,name=include_host,json=includeHost" json:"include_host,omitempty"` + IncludeAll *bool `protobuf:"varint,12,opt,name=include_all,json=includeAll" json:"include_all,omitempty"` + CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator,json=cacheIterator" json:"cache_iterator,omitempty"` + NumShards *int32 `protobuf:"varint,18,opt,name=num_shards,json=numShards" json:"num_shards,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogReadRequest) Reset() { *m = LogReadRequest{} } +func (m *LogReadRequest) String() string { return proto.CompactTextString(m) } +func (*LogReadRequest) ProtoMessage() {} +func (*LogReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{9} +} +func (m *LogReadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogReadRequest.Unmarshal(m, b) +} +func (m *LogReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogReadRequest.Marshal(b, m, deterministic) +} +func (dst *LogReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogReadRequest.Merge(dst, src) +} +func (m *LogReadRequest) XXX_Size() int { + return xxx_messageInfo_LogReadRequest.Size(m) +} +func (m *LogReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LogReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LogReadRequest proto.InternalMessageInfo + +func (m *LogReadRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogReadRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion { + if m != nil { + return m.ModuleVersion + } + return nil +} + +func (m *LogReadRequest) GetStartTime() int64 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogReadRequest) GetEndTime() int64 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogReadRequest) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadRequest) GetRequestId() [][]byte { + if m != nil { + return m.RequestId + } + return nil +} + +func (m *LogReadRequest) GetMinimumLogLevel() int32 { + if m != nil && m.MinimumLogLevel != nil { + return *m.MinimumLogLevel + } + return 0 +} + +func (m *LogReadRequest) GetIncludeIncomplete() bool { + if m != nil && m.IncludeIncomplete != nil { + return *m.IncludeIncomplete + } + return false +} + +func (m *LogReadRequest) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogReadRequest) GetCombinedLogRegex() string { + if m != nil && m.CombinedLogRegex != nil { + return *m.CombinedLogRegex + } + return "" +} + +func (m *LogReadRequest) GetHostRegex() string { + if m != nil && m.HostRegex != nil { + return *m.HostRegex + } + return "" +} + +func (m *LogReadRequest) GetReplicaIndex() int32 { + if m != nil && m.ReplicaIndex != nil { + return *m.ReplicaIndex + } + return 0 +} + +func (m *LogReadRequest) GetIncludeAppLogs() bool { + if m != nil && m.IncludeAppLogs != nil { + return *m.IncludeAppLogs + } + return false +} + +func (m *LogReadRequest) GetAppLogsPerRequest() int32 { + if m != nil && m.AppLogsPerRequest != nil { + return *m.AppLogsPerRequest + } + return 0 +} + +func (m *LogReadRequest) GetIncludeHost() bool { + if m != nil && m.IncludeHost != nil { + return *m.IncludeHost + } + return false +} + +func (m *LogReadRequest) GetIncludeAll() bool { + if m != nil && m.IncludeAll != nil { + return *m.IncludeAll + } + return false +} + +func (m *LogReadRequest) GetCacheIterator() bool { + if m != nil && m.CacheIterator != nil { + return *m.CacheIterator + } + return false +} + +func (m *LogReadRequest) GetNumShards() int32 { + if m != nil && m.NumShards != nil { + return *m.NumShards + } + return 0 +} + +type LogReadResponse struct { + Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"` + Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"` + LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time,json=lastEndTime" json:"last_end_time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogReadResponse) Reset() { *m = LogReadResponse{} } +func (m *LogReadResponse) String() string { return proto.CompactTextString(m) } +func (*LogReadResponse) ProtoMessage() {} +func (*LogReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{10} +} +func (m *LogReadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogReadResponse.Unmarshal(m, b) +} +func (m *LogReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogReadResponse.Marshal(b, m, deterministic) +} +func (dst *LogReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogReadResponse.Merge(dst, src) +} +func (m *LogReadResponse) XXX_Size() int { + return xxx_messageInfo_LogReadResponse.Size(m) +} +func (m *LogReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LogReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LogReadResponse proto.InternalMessageInfo + +func (m *LogReadResponse) GetLog() []*RequestLog { + if m != nil { + return m.Log + } + return nil +} + +func (m *LogReadResponse) GetOffset() *LogOffset { + if m != nil { + return m.Offset + } + return nil +} + +func (m *LogReadResponse) GetLastEndTime() int64 { + if m != nil && m.LastEndTime != nil { + return *m.LastEndTime + } + return 0 +} + +type LogUsageRecord struct { + VersionId *string `protobuf:"bytes,1,opt,name=version_id,json=versionId" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,2,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,3,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"` + TotalSize *int64 `protobuf:"varint,5,opt,name=total_size,json=totalSize" json:"total_size,omitempty"` + Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} } +func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) } +func (*LogUsageRecord) ProtoMessage() {} +func (*LogUsageRecord) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{11} +} +func (m *LogUsageRecord) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogUsageRecord.Unmarshal(m, b) +} +func (m *LogUsageRecord) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogUsageRecord.Marshal(b, m, deterministic) +} +func (dst *LogUsageRecord) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogUsageRecord.Merge(dst, src) +} +func (m *LogUsageRecord) XXX_Size() int { + return xxx_messageInfo_LogUsageRecord.Size(m) +} +func (m *LogUsageRecord) XXX_DiscardUnknown() { + xxx_messageInfo_LogUsageRecord.DiscardUnknown(m) +} + +var xxx_messageInfo_LogUsageRecord proto.InternalMessageInfo + +func (m *LogUsageRecord) GetVersionId() string { + if m != nil && m.VersionId != nil { + return *m.VersionId + } + return "" +} + +func (m *LogUsageRecord) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRecord) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRecord) GetCount() int64 { + if m != nil && m.Count != nil { + return *m.Count + } + return 0 +} + +func (m *LogUsageRecord) GetTotalSize() int64 { + if m != nil && m.TotalSize != nil { + return *m.TotalSize + } + return 0 +} + +func (m *LogUsageRecord) GetRecords() int32 { + if m != nil && m.Records != nil { + return *m.Records + } + return 0 +} + +type LogUsageRequest struct { + AppId *string `protobuf:"bytes,1,req,name=app_id,json=appId" json:"app_id,omitempty"` + VersionId []string `protobuf:"bytes,2,rep,name=version_id,json=versionId" json:"version_id,omitempty"` + StartTime *int32 `protobuf:"varint,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + EndTime *int32 `protobuf:"varint,4,opt,name=end_time,json=endTime" json:"end_time,omitempty"` + ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,json=resolutionHours,def=1" json:"resolution_hours,omitempty"` + CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions,json=combineVersions" json:"combine_versions,omitempty"` + UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version,json=usageVersion" json:"usage_version,omitempty"` + VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only,json=versionsOnly" json:"versions_only,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} } +func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) } +func (*LogUsageRequest) ProtoMessage() {} +func (*LogUsageRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{12} +} +func (m *LogUsageRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogUsageRequest.Unmarshal(m, b) +} +func (m *LogUsageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogUsageRequest.Marshal(b, m, deterministic) +} +func (dst *LogUsageRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogUsageRequest.Merge(dst, src) +} +func (m *LogUsageRequest) XXX_Size() int { + return xxx_messageInfo_LogUsageRequest.Size(m) +} +func (m *LogUsageRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LogUsageRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LogUsageRequest proto.InternalMessageInfo + +const Default_LogUsageRequest_ResolutionHours uint32 = 1 + +func (m *LogUsageRequest) GetAppId() string { + if m != nil && m.AppId != nil { + return *m.AppId + } + return "" +} + +func (m *LogUsageRequest) GetVersionId() []string { + if m != nil { + return m.VersionId + } + return nil +} + +func (m *LogUsageRequest) GetStartTime() int32 { + if m != nil && m.StartTime != nil { + return *m.StartTime + } + return 0 +} + +func (m *LogUsageRequest) GetEndTime() int32 { + if m != nil && m.EndTime != nil { + return *m.EndTime + } + return 0 +} + +func (m *LogUsageRequest) GetResolutionHours() uint32 { + if m != nil && m.ResolutionHours != nil { + return *m.ResolutionHours + } + return Default_LogUsageRequest_ResolutionHours +} + +func (m *LogUsageRequest) GetCombineVersions() bool { + if m != nil && m.CombineVersions != nil { + return *m.CombineVersions + } + return false +} + +func (m *LogUsageRequest) GetUsageVersion() int32 { + if m != nil && m.UsageVersion != nil { + return *m.UsageVersion + } + return 0 +} + +func (m *LogUsageRequest) GetVersionsOnly() bool { + if m != nil && m.VersionsOnly != nil { + return *m.VersionsOnly + } + return false +} + +type LogUsageResponse struct { + Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"` + Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} } +func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) } +func (*LogUsageResponse) ProtoMessage() {} +func (*LogUsageResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_log_service_f054fd4b5012319d, []int{13} +} +func (m *LogUsageResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LogUsageResponse.Unmarshal(m, b) +} +func (m *LogUsageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LogUsageResponse.Marshal(b, m, deterministic) +} +func (dst *LogUsageResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LogUsageResponse.Merge(dst, src) +} +func (m *LogUsageResponse) XXX_Size() int { + return xxx_messageInfo_LogUsageResponse.Size(m) +} +func (m *LogUsageResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LogUsageResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LogUsageResponse proto.InternalMessageInfo + +func (m *LogUsageResponse) GetUsage() []*LogUsageRecord { + if m != nil { + return m.Usage + } + return nil +} + +func (m *LogUsageResponse) GetSummary() *LogUsageRecord { + if m != nil { + return m.Summary + } + return nil +} + +func init() { + proto.RegisterType((*LogServiceError)(nil), "appengine.LogServiceError") + proto.RegisterType((*UserAppLogLine)(nil), "appengine.UserAppLogLine") + proto.RegisterType((*UserAppLogGroup)(nil), "appengine.UserAppLogGroup") + proto.RegisterType((*FlushRequest)(nil), "appengine.FlushRequest") + proto.RegisterType((*SetStatusRequest)(nil), "appengine.SetStatusRequest") + proto.RegisterType((*LogOffset)(nil), "appengine.LogOffset") + proto.RegisterType((*LogLine)(nil), "appengine.LogLine") + proto.RegisterType((*RequestLog)(nil), "appengine.RequestLog") + proto.RegisterType((*LogModuleVersion)(nil), "appengine.LogModuleVersion") + proto.RegisterType((*LogReadRequest)(nil), "appengine.LogReadRequest") + proto.RegisterType((*LogReadResponse)(nil), "appengine.LogReadResponse") + proto.RegisterType((*LogUsageRecord)(nil), "appengine.LogUsageRecord") + proto.RegisterType((*LogUsageRequest)(nil), "appengine.LogUsageRequest") + proto.RegisterType((*LogUsageResponse)(nil), "appengine.LogUsageResponse") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/log/log_service.proto", fileDescriptor_log_service_f054fd4b5012319d) +} + +var fileDescriptor_log_service_f054fd4b5012319d = []byte{ + // 1553 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x72, 0xdb, 0xc6, + 0x15, 0x2e, 0x48, 0x51, 0x24, 0x0f, 0x49, 0x91, 0x5a, 0xcb, 0xce, 0xda, 0xae, 0x6b, 0x1a, 0x4e, + 0x1c, 0xd6, 0x93, 0x48, 0x93, 0xa4, 0x57, 0xca, 0x95, 0xd3, 0x2a, 0x8e, 0x26, 0xb4, 0xd5, 0x40, + 0x72, 0x3a, 0xd3, 0x1b, 0x0c, 0x0a, 0x1c, 0x81, 0x18, 0x2f, 0xb1, 0xc8, 0xee, 0xc2, 0x91, 0x72, + 0xdb, 0xdb, 0x3e, 0x46, 0x1f, 0xa2, 0xaf, 0xd2, 0xb7, 0xe9, 0xec, 0xd9, 0x05, 0x44, 0x2a, 0x4d, + 0xc6, 0x33, 0xb9, 0xe0, 0x10, 0xfb, 0x9d, 0x83, 0xdd, 0xf3, 0xf3, 0x9d, 0x6f, 0x01, 0xc7, 0xb9, + 0x94, 0xb9, 0xc0, 0xc3, 0x5c, 0x8a, 0xa4, 0xcc, 0x0f, 0xa5, 0xca, 0x8f, 0x92, 0xaa, 0xc2, 0x32, + 0x2f, 0x4a, 0x3c, 0x2a, 0x4a, 0x83, 0xaa, 0x4c, 0xc4, 0x91, 0x90, 0xb9, 0xfd, 0xc5, 0x1a, 0xd5, + 0xbb, 0x22, 0xc5, 0xc3, 0x4a, 0x49, 0x23, 0xd9, 0xb0, 0xf5, 0x0c, 0x5f, 0xc3, 0x74, 0x29, 0xf3, + 0x73, 0x67, 0x3e, 0x51, 0x4a, 0xaa, 0xf0, 0x4b, 0x18, 0xd2, 0xc3, 0x9f, 0x65, 0x86, 0x6c, 0x17, + 0x3a, 0x67, 0xdf, 0xce, 0x7e, 0xc7, 0xee, 0xc0, 0xf4, 0xf4, 0xf5, 0xf7, 0x2f, 0x96, 0xa7, 0x7f, + 0x89, 0xa3, 0x93, 0xef, 0xde, 0x9c, 0x9c, 0x5f, 0xcc, 0x02, 0xb6, 0x0f, 0x93, 0xf3, 0x8b, 0xb3, + 0xe8, 0xc5, 0xcb, 0x93, 0xf8, 0x24, 0x8a, 0xce, 0xa2, 0x59, 0x27, 0xcc, 0x61, 0xef, 0x8d, 0x46, + 0xf5, 0xa2, 0xaa, 0x96, 0x32, 0x5f, 0x16, 0x25, 0xb2, 0x8f, 0x60, 0xcf, 0x14, 0x6b, 0xd4, 0x26, + 0x59, 0x57, 0x71, 0xad, 0x31, 0xe5, 0xc1, 0xbc, 0xb3, 0xe8, 0x46, 0x93, 0x16, 0x7d, 0xa3, 0x31, + 0x65, 0x07, 0xd0, 0x13, 0xf8, 0x0e, 0x05, 0xef, 0x90, 0xd5, 0x2d, 0x18, 0x87, 0xfe, 0x1a, 0xb5, + 0x4e, 0x72, 0xe4, 0xdd, 0x79, 0x67, 0x31, 0x8c, 0x9a, 0x65, 0xf8, 0x12, 0xa6, 0x37, 0x07, 0xbd, + 0x54, 0xb2, 0xae, 0xd8, 0x9f, 0x60, 0x60, 0x73, 0x15, 0x45, 0x89, 0xbc, 0x33, 0xef, 0x2e, 0x46, + 0x9f, 0xdf, 0x3f, 0x6c, 0x33, 0x3d, 0xdc, 0x0e, 0x2b, 0xea, 0x0b, 0xf7, 0x10, 0x86, 0x30, 0xfe, + 0x5a, 0xd4, 0x7a, 0x15, 0xe1, 0x0f, 0x35, 0x6a, 0xc3, 0x18, 0xec, 0x08, 0x99, 0x6b, 0x1e, 0xcc, + 0x83, 0xc5, 0x38, 0xa2, 0xe7, 0xf0, 0x39, 0xcc, 0xce, 0xd1, 0x9c, 0x9b, 0xc4, 0xd4, 0xba, 0xf1, + 0xbb, 0x07, 0xbb, 0x9a, 0x00, 0xca, 0x67, 0x18, 0xf9, 0x55, 0xf8, 0x1c, 0x86, 0x4b, 0x99, 0x9f, + 0x5d, 0x5e, 0x6a, 0x34, 0xec, 0x11, 0x80, 0x72, 0xfe, 0x71, 0x91, 0xf9, 0x2d, 0x87, 0x1e, 0x39, + 0xcd, 0xc2, 0x0b, 0xe8, 0x37, 0x65, 0x62, 0xb0, 0x63, 0x0b, 0xe2, 0x8b, 0x43, 0xcf, 0xdb, 0x35, + 0xe9, 0x35, 0x35, 0x79, 0x0c, 0x23, 0x9b, 0xe6, 0x76, 0x5d, 0x40, 0xc8, 0xfc, 0x95, 0x2f, 0xcd, + 0x3f, 0x01, 0xc0, 0x47, 0xb9, 0x94, 0x39, 0xbb, 0x0b, 0xbb, 0x49, 0x55, 0xb9, 0xf3, 0xad, 0x6b, + 0x2f, 0xa9, 0xaa, 0xd3, 0x8c, 0x7d, 0x08, 0xc3, 0xb5, 0xcc, 0x6a, 0x81, 0xd6, 0xf2, 0xd1, 0x3c, + 0x58, 0x0c, 0x8f, 0xfb, 0x19, 0x5e, 0x26, 0xb5, 0x30, 0xd1, 0xc0, 0x59, 0x4e, 0x33, 0x9b, 0xc0, + 0x3b, 0x54, 0xba, 0x90, 0xa5, 0x75, 0xeb, 0xd0, 0x06, 0x43, 0x8f, 0x38, 0xf3, 0x46, 0x7e, 0x36, + 0x94, 0xcd, 0xfc, 0xd8, 0x27, 0xb0, 0x2b, 0xa9, 0x10, 0xfc, 0xe9, 0x3c, 0x58, 0x8c, 0x3e, 0x3f, + 0xd8, 0xe8, 0x47, 0x5b, 0xa4, 0xc8, 0xfb, 0xb0, 0x3d, 0xe8, 0x14, 0x15, 0xdf, 0xa1, 0x33, 0x3a, + 0x45, 0xc5, 0x1e, 0xc0, 0xa0, 0x2c, 0xd2, 0xb7, 0x65, 0xb2, 0x46, 0xde, 0xb3, 0x01, 0x46, 0xed, + 0xda, 0x1e, 0xac, 0x4d, 0xa2, 0x4c, 0x4c, 0x45, 0xdb, 0xa5, 0xa2, 0x0d, 0x09, 0xb9, 0xb0, 0x95, + 0xbb, 0x0f, 0x03, 0x2c, 0x33, 0x67, 0xec, 0x93, 0xb1, 0x8f, 0x65, 0x46, 0x26, 0x0e, 0x7d, 0x91, + 0x18, 0x2c, 0xd3, 0x6b, 0x3e, 0x70, 0x16, 0xbf, 0x24, 0xb2, 0xa5, 0xd7, 0xa9, 0x40, 0xcd, 0x87, + 0xce, 0xe2, 0x97, 0xb6, 0xd7, 0x6b, 0x34, 0x2b, 0x99, 0x71, 0x70, 0xbd, 0x76, 0x2b, 0x1b, 0xa1, + 0x42, 0x2d, 0x6b, 0x95, 0x22, 0x1f, 0x91, 0xa5, 0x5d, 0xb3, 0x27, 0x30, 0x5e, 0x19, 0x53, 0xc5, + 0xbe, 0x58, 0x7c, 0x4c, 0xf6, 0x91, 0xc5, 0xbe, 0x77, 0xd0, 0x06, 0x85, 0x26, 0xd4, 0x60, 0xbf, + 0x62, 0x4f, 0x61, 0xa2, 0x50, 0x57, 0xb2, 0xd4, 0x18, 0xeb, 0xe2, 0x27, 0xe4, 0x7b, 0x14, 0xce, + 0xb8, 0x01, 0xcf, 0x8b, 0x9f, 0xd0, 0x9d, 0x7d, 0x89, 0x4a, 0xa1, 0xe2, 0x53, 0x57, 0x9d, 0x66, + 0x6d, 0xab, 0x53, 0x6b, 0x54, 0x71, 0x92, 0x63, 0x69, 0xf8, 0x8c, 0xac, 0x43, 0x8b, 0xbc, 0xb0, + 0x00, 0x0b, 0x61, 0x52, 0x2b, 0x11, 0xaf, 0x93, 0x2a, 0xc6, 0xd2, 0xa8, 0x6b, 0xbe, 0xef, 0x62, + 0xab, 0x95, 0x78, 0x95, 0x54, 0x27, 0x16, 0xb2, 0xdb, 0xa7, 0x72, 0xfd, 0x8f, 0xa2, 0xc4, 0x8c, + 0x33, 0x97, 0x5a, 0xb3, 0xb6, 0x0c, 0x4c, 0xaa, 0x22, 0x6e, 0x8a, 0x75, 0x67, 0x1e, 0x2c, 0xba, + 0x11, 0x24, 0x55, 0xf1, 0xca, 0xd7, 0x8b, 0xc1, 0xce, 0x4a, 0x6a, 0xc3, 0x0f, 0xe8, 0x64, 0x7a, + 0xb6, 0x58, 0x6a, 0xb1, 0xbb, 0xf3, 0x60, 0x11, 0x44, 0xf4, 0xcc, 0x9e, 0xc1, 0xd4, 0x24, 0xfa, + 0x6d, 0xfc, 0x43, 0x8d, 0x35, 0xc6, 0xd4, 0xe8, 0x7b, 0xf4, 0xca, 0xc4, 0xc2, 0xdf, 0x59, 0xf4, + 0xb5, 0xed, 0xf6, 0x43, 0x18, 0x92, 0x1f, 0x79, 0x7c, 0xe0, 0x92, 0xb5, 0x00, 0x19, 0x0f, 0xe1, + 0xce, 0x8f, 0x89, 0x8e, 0x85, 0x4c, 0xb2, 0xa2, 0xcc, 0x63, 0xcf, 0x3e, 0xce, 0xe7, 0xc1, 0x62, + 0x10, 0xed, 0xff, 0x98, 0xe8, 0xa5, 0xb3, 0x34, 0x83, 0xfb, 0x04, 0xc6, 0x15, 0x96, 0xe4, 0x4b, + 0xfc, 0xb8, 0x4f, 0xe1, 0x8f, 0x3c, 0x46, 0x1c, 0xf9, 0xd8, 0x36, 0xa0, 0x12, 0x45, 0x9a, 0xc4, + 0x45, 0x99, 0xe1, 0x15, 0x7f, 0x30, 0x0f, 0x16, 0xbd, 0xe3, 0xce, 0xa7, 0x9f, 0xd9, 0x26, 0x90, + 0xe1, 0xd4, 0xe2, 0x6c, 0x0e, 0x83, 0xcb, 0xa2, 0x2c, 0xf4, 0x0a, 0x33, 0xfe, 0xd0, 0x1e, 0x78, + 0xbc, 0x63, 0x54, 0x8d, 0x51, 0x8b, 0xda, 0xd0, 0x53, 0x21, 0x4b, 0x8c, 0xdf, 0xe2, 0x35, 0xff, + 0x3d, 0x09, 0xc0, 0x80, 0x80, 0x6f, 0xf1, 0x9a, 0x3d, 0x83, 0x1d, 0x52, 0xab, 0x47, 0xa4, 0x56, + 0x6c, 0x7b, 0x3a, 0x48, 0xa6, 0xc8, 0xce, 0xfe, 0x08, 0x33, 0xfb, 0xaf, 0xe3, 0xa2, 0x4c, 0xe5, + 0xba, 0x12, 0x68, 0x90, 0x7f, 0x48, 0xf9, 0x4d, 0x09, 0x3f, 0x6d, 0x61, 0xf6, 0x09, 0x30, 0x3b, + 0xed, 0x6e, 0x9b, 0x58, 0xa1, 0xc0, 0x44, 0x23, 0x7f, 0x46, 0x07, 0xcf, 0x92, 0xaa, 0x3a, 0x21, + 0x43, 0xe4, 0x70, 0xdb, 0x49, 0xbc, 0x2a, 0x4c, 0xac, 0x30, 0xd1, 0xb2, 0xe4, 0x7f, 0xb0, 0x69, + 0x46, 0x60, 0xa1, 0x88, 0x10, 0xf6, 0x05, 0xdc, 0xb3, 0xc5, 0x35, 0x2b, 0x25, 0x8d, 0x11, 0x98, + 0xc5, 0x97, 0x52, 0xb9, 0xb2, 0x3d, 0xa6, 0xf3, 0x6d, 0xe9, 0x2f, 0x1a, 0xe3, 0xd7, 0x52, 0x51, + 0xf9, 0xbe, 0x84, 0x07, 0x3f, 0x7f, 0xc9, 0xf7, 0x45, 0xf3, 0x39, 0xbd, 0xf8, 0xc1, 0xad, 0x17, + 0x7d, 0x77, 0x34, 0xdd, 0x17, 0xed, 0x8b, 0x74, 0xd2, 0x13, 0x6a, 0xd0, 0xa4, 0x45, 0xe9, 0x8c, + 0xc7, 0x30, 0xb2, 0x97, 0x1a, 0x2a, 0x47, 0x8a, 0x90, 0x12, 0x04, 0x07, 0x59, 0x5a, 0x84, 0x7f, + 0x83, 0xd9, 0x52, 0xe6, 0xaf, 0x48, 0xc8, 0x9a, 0x81, 0xdb, 0xd2, 0xbc, 0xe0, 0x7d, 0x35, 0x2f, + 0xd8, 0xd2, 0xbc, 0xf0, 0xbf, 0x3d, 0xd8, 0x5b, 0xca, 0x3c, 0xc2, 0x24, 0x6b, 0x28, 0xf5, 0x0b, + 0x12, 0x7b, 0x7b, 0xa3, 0xee, 0xb6, 0x78, 0x7e, 0x05, 0x7b, 0x3e, 0x9a, 0x46, 0x23, 0xee, 0x10, + 0x0f, 0x1e, 0x6e, 0xf3, 0x60, 0x2b, 0x85, 0x68, 0xb2, 0xde, 0xca, 0x68, 0x5b, 0x07, 0xbb, 0x54, + 0xa9, 0x5f, 0xd0, 0xc1, 0x1d, 0x32, 0xb6, 0x3a, 0x78, 0xa3, 0xcd, 0xbd, 0xf7, 0xd0, 0xe6, 0x6d, + 0xa1, 0xdf, 0x9d, 0x77, 0xb7, 0x85, 0xfe, 0x39, 0xec, 0xaf, 0x8b, 0xb2, 0x58, 0xd7, 0xeb, 0x98, + 0xae, 0x60, 0xba, 0xb5, 0xfa, 0xc4, 0xa6, 0xa9, 0x37, 0x58, 0x46, 0xd3, 0xfd, 0xf5, 0x29, 0xb0, + 0xa2, 0x4c, 0x45, 0x9d, 0xe1, 0x26, 0x9d, 0x07, 0x6e, 0x5c, 0xbd, 0x65, 0x83, 0xd0, 0x07, 0xd0, + 0x4b, 0x65, 0x5d, 0x1a, 0x3e, 0xa4, 0xf8, 0xdd, 0xc2, 0xd2, 0xbc, 0x91, 0x23, 0x3a, 0x51, 0x61, + 0x8e, 0x57, 0x7c, 0x8f, 0x7a, 0x35, 0x6b, 0x2c, 0xd4, 0xa5, 0x1c, 0xaf, 0x6c, 0xf4, 0x56, 0x83, + 0xbc, 0x97, 0x53, 0xcb, 0xa1, 0x45, 0x9c, 0xf9, 0xe9, 0xed, 0x71, 0x9f, 0x51, 0xe4, 0xdb, 0xa3, + 0xbe, 0x80, 0x59, 0x13, 0xb6, 0xed, 0x35, 0x7d, 0x23, 0x00, 0x05, 0xbd, 0xe7, 0x71, 0xf7, 0x75, + 0xa1, 0xd9, 0x11, 0x1c, 0x34, 0x1e, 0x71, 0x85, 0x2d, 0xf3, 0xf9, 0x3e, 0xed, 0xba, 0x9f, 0x38, + 0xb7, 0xbf, 0xa2, 0xda, 0x50, 0xa4, 0x66, 0x6b, 0x92, 0xcd, 0x11, 0x6d, 0x3b, 0xf2, 0xd8, 0x37, + 0x56, 0x29, 0x1f, 0xc3, 0xa8, 0x3d, 0x5d, 0x08, 0x3e, 0x26, 0x0f, 0x68, 0x0e, 0x16, 0xc2, 0x8e, + 0x4d, 0x9a, 0xa4, 0x2b, 0x8c, 0x0b, 0x83, 0x2a, 0x31, 0x52, 0xf1, 0x09, 0xf9, 0x4c, 0x08, 0x3d, + 0xf5, 0xa0, 0xad, 0x44, 0x59, 0xaf, 0x63, 0xbd, 0x4a, 0x54, 0xa6, 0x39, 0xa3, 0x88, 0x86, 0x65, + 0xbd, 0x3e, 0x27, 0x20, 0xfc, 0x57, 0x40, 0xdf, 0x83, 0x8e, 0xdb, 0xee, 0xb2, 0x61, 0x1f, 0x43, + 0x57, 0xc8, 0x9c, 0x07, 0xc4, 0xcd, 0xbb, 0x1b, 0x2c, 0xb9, 0xf9, 0xc6, 0x88, 0xac, 0xc7, 0x06, + 0xa3, 0x3a, 0xef, 0xc1, 0xa8, 0x10, 0x26, 0x22, 0xd1, 0x26, 0x6e, 0xf9, 0xe9, 0xc8, 0x3b, 0xb2, + 0xe0, 0x89, 0xe3, 0x68, 0xf8, 0x9f, 0x80, 0x46, 0xed, 0x8d, 0xfd, 0xac, 0x89, 0x30, 0x95, 0xea, + 0xf6, 0x4c, 0x05, 0xb7, 0x86, 0xf3, 0xd6, 0x3c, 0x74, 0x5c, 0x7e, 0xff, 0x7f, 0x1e, 0xba, 0x64, + 0x6c, 0xe7, 0xa1, 0xe5, 0xd9, 0xce, 0x26, 0xcf, 0x1e, 0x01, 0x18, 0x69, 0x12, 0xe1, 0xee, 0xe1, + 0x9e, 0x9b, 0x2f, 0x42, 0xe8, 0x12, 0xe6, 0xd0, 0x57, 0x14, 0x97, 0xe6, 0xbb, 0x6e, 0x3b, 0xbf, + 0x0c, 0xff, 0xdd, 0xa1, 0x4a, 0xfa, 0xd0, 0x7f, 0x8b, 0x4c, 0xfc, 0x7c, 0xc4, 0x7b, 0xbf, 0x36, + 0xe2, 0xbd, 0xcd, 0x11, 0x9f, 0xd9, 0xcf, 0x11, 0x51, 0x1b, 0xbb, 0xf7, 0x4a, 0xd6, 0x4a, 0x53, + 0x0a, 0x93, 0xe3, 0xe0, 0xb3, 0x68, 0x7a, 0x63, 0xfa, 0xc6, 0x5a, 0xec, 0x25, 0xe3, 0x07, 0xa7, + 0xd1, 0x23, 0x97, 0xd4, 0x20, 0x9a, 0x7a, 0xdc, 0x8b, 0x0e, 0x7d, 0xa0, 0xd4, 0x36, 0xb1, 0x56, + 0xb8, 0xdc, 0xa8, 0x8f, 0x09, 0x6c, 0xa4, 0xe9, 0x29, 0x4c, 0x9a, 0x7d, 0x62, 0x59, 0x8a, 0x6b, + 0x3f, 0xe2, 0xe3, 0x06, 0x3c, 0x2b, 0xc5, 0x75, 0x78, 0x45, 0x2a, 0xed, 0xab, 0xe4, 0x09, 0x77, + 0x04, 0x3d, 0xda, 0xc8, 0x53, 0xee, 0xfe, 0x36, 0x8d, 0x36, 0xc8, 0x10, 0x39, 0x3f, 0xf6, 0x05, + 0xf4, 0x75, 0xbd, 0x5e, 0x27, 0xea, 0xda, 0x33, 0xef, 0x57, 0x5e, 0x69, 0x3c, 0xbf, 0xea, 0xfd, + 0xdd, 0x92, 0xf6, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x70, 0xd9, 0xa0, 0xf8, 0x48, 0x0d, 0x00, + 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/log/log_service.proto b/vendor/google.golang.org/appengine/internal/log/log_service.proto new file mode 100644 index 0000000..8981dc4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/log/log_service.proto @@ -0,0 +1,150 @@ +syntax = "proto2"; +option go_package = "log"; + +package appengine; + +message LogServiceError { + enum ErrorCode { + OK = 0; + INVALID_REQUEST = 1; + STORAGE_ERROR = 2; + } +} + +message UserAppLogLine { + required int64 timestamp_usec = 1; + required int64 level = 2; + required string message = 3; +} + +message UserAppLogGroup { + repeated UserAppLogLine log_line = 2; +} + +message FlushRequest { + optional bytes logs = 1; +} + +message SetStatusRequest { + required string status = 1; +} + + +message LogOffset { + optional bytes request_id = 1; +} + +message LogLine { + required int64 time = 1; + required int32 level = 2; + required string log_message = 3; +} + +message RequestLog { + required string app_id = 1; + optional string module_id = 37 [default="default"]; + required string version_id = 2; + required bytes request_id = 3; + optional LogOffset offset = 35; + required string ip = 4; + optional string nickname = 5; + required int64 start_time = 6; + required int64 end_time = 7; + required int64 latency = 8; + required int64 mcycles = 9; + required string method = 10; + required string resource = 11; + required string http_version = 12; + required int32 status = 13; + required int64 response_size = 14; + optional string referrer = 15; + optional string user_agent = 16; + required string url_map_entry = 17; + required string combined = 18; + optional int64 api_mcycles = 19; + optional string host = 20; + optional double cost = 21; + + optional string task_queue_name = 22; + optional string task_name = 23; + + optional bool was_loading_request = 24; + optional int64 pending_time = 25; + optional int32 replica_index = 26 [default = -1]; + optional bool finished = 27 [default = true]; + optional bytes clone_key = 28; + + repeated LogLine line = 29; + + optional bool lines_incomplete = 36; + optional bytes app_engine_release = 38; + + optional int32 exit_reason = 30; + optional bool was_throttled_for_time = 31; + optional bool was_throttled_for_requests = 32; + optional int64 throttled_time = 33; + + optional bytes server_name = 34; +} + +message LogModuleVersion { + optional string module_id = 1 [default="default"]; + optional string version_id = 2; +} + +message LogReadRequest { + required string app_id = 1; + repeated string version_id = 2; + repeated LogModuleVersion module_version = 19; + + optional int64 start_time = 3; + optional int64 end_time = 4; + optional LogOffset offset = 5; + repeated bytes request_id = 6; + + optional int32 minimum_log_level = 7; + optional bool include_incomplete = 8; + optional int64 count = 9; + + optional string combined_log_regex = 14; + optional string host_regex = 15; + optional int32 replica_index = 16; + + optional bool include_app_logs = 10; + optional int32 app_logs_per_request = 17; + optional bool include_host = 11; + optional bool include_all = 12; + optional bool cache_iterator = 13; + optional int32 num_shards = 18; +} + +message LogReadResponse { + repeated RequestLog log = 1; + optional LogOffset offset = 2; + optional int64 last_end_time = 3; +} + +message LogUsageRecord { + optional string version_id = 1; + optional int32 start_time = 2; + optional int32 end_time = 3; + optional int64 count = 4; + optional int64 total_size = 5; + optional int32 records = 6; +} + +message LogUsageRequest { + required string app_id = 1; + repeated string version_id = 2; + optional int32 start_time = 3; + optional int32 end_time = 4; + optional uint32 resolution_hours = 5 [default = 1]; + optional bool combine_versions = 6; + optional int32 usage_version = 7; + optional bool versions_only = 8; +} + +message LogUsageResponse { + repeated LogUsageRecord usage = 1; + optional LogUsageRecord summary = 2; +} diff --git a/vendor/google.golang.org/appengine/internal/main.go b/vendor/google.golang.org/appengine/internal/main.go new file mode 100644 index 0000000..1e76531 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main.go @@ -0,0 +1,16 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build appengine + +package internal + +import ( + "appengine_internal" +) + +func Main() { + MainPath = "" + appengine_internal.Main() +} diff --git a/vendor/google.golang.org/appengine/internal/main_common.go b/vendor/google.golang.org/appengine/internal/main_common.go new file mode 100644 index 0000000..357dce4 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_common.go @@ -0,0 +1,7 @@ +package internal + +// MainPath stores the file path of the main package. On App Engine Standard +// using Go version 1.9 and below, this will be unset. On App Engine Flex and +// App Engine Standard second-gen (Go 1.11 and above), this will be the +// filepath to package main. +var MainPath string diff --git a/vendor/google.golang.org/appengine/internal/main_vm.go b/vendor/google.golang.org/appengine/internal/main_vm.go new file mode 100644 index 0000000..ddb79a3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/main_vm.go @@ -0,0 +1,69 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// +build !appengine + +package internal + +import ( + "io" + "log" + "net/http" + "net/url" + "os" + "path/filepath" + "runtime" +) + +func Main() { + MainPath = filepath.Dir(findMainPath()) + installHealthChecker(http.DefaultServeMux) + + port := "8080" + if s := os.Getenv("PORT"); s != "" { + port = s + } + + host := "" + if IsDevAppServer() { + host = "127.0.0.1" + } + if err := http.ListenAndServe(host+":"+port, http.HandlerFunc(handleHTTP)); err != nil { + log.Fatalf("http.ListenAndServe: %v", err) + } +} + +// Find the path to package main by looking at the root Caller. +func findMainPath() string { + pc := make([]uintptr, 100) + n := runtime.Callers(2, pc) + frames := runtime.CallersFrames(pc[:n]) + for { + frame, more := frames.Next() + // Tests won't have package main, instead they have testing.tRunner + if frame.Function == "main.main" || frame.Function == "testing.tRunner" { + return frame.File + } + if !more { + break + } + } + return "" +} + +func installHealthChecker(mux *http.ServeMux) { + // If no health check handler has been installed by this point, add a trivial one. + const healthPath = "/_ah/health" + hreq := &http.Request{ + Method: "GET", + URL: &url.URL{ + Path: healthPath, + }, + } + if _, pat := mux.Handler(hreq); pat != healthPath { + mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "ok") + }) + } +} diff --git a/vendor/google.golang.org/appengine/internal/metadata.go b/vendor/google.golang.org/appengine/internal/metadata.go new file mode 100644 index 0000000..c4ba63b --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/metadata.go @@ -0,0 +1,60 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file has code for accessing metadata. +// +// References: +// https://cloud.google.com/compute/docs/metadata + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" +) + +const ( + metadataHost = "metadata" + metadataPath = "/computeMetadata/v1/" +) + +var ( + metadataRequestHeaders = http.Header{ + "Metadata-Flavor": []string{"Google"}, + } +) + +// TODO(dsymonds): Do we need to support default values, like Python? +func mustGetMetadata(key string) []byte { + b, err := getMetadata(key) + if err != nil { + panic(fmt.Sprintf("Metadata fetch failed for '%s': %v", key, err)) + } + return b +} + +func getMetadata(key string) ([]byte, error) { + // TODO(dsymonds): May need to use url.Parse to support keys with query args. + req := &http.Request{ + Method: "GET", + URL: &url.URL{ + Scheme: "http", + Host: metadataHost, + Path: metadataPath + key, + }, + Header: metadataRequestHeaders, + Host: metadataHost, + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} diff --git a/vendor/google.golang.org/appengine/internal/net.go b/vendor/google.golang.org/appengine/internal/net.go new file mode 100644 index 0000000..3b94cf0 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/net.go @@ -0,0 +1,56 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements a network dialer that limits the number of concurrent connections. +// It is only used for API calls. + +import ( + "log" + "net" + "runtime" + "sync" + "time" +) + +var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable. + +func limitRelease() { + // non-blocking + select { + case <-limitSem: + default: + // This should not normally happen. + log.Print("appengine: unbalanced limitSem release!") + } +} + +func limitDial(network, addr string) (net.Conn, error) { + limitSem <- 1 + + // Dial with a timeout in case the API host is MIA. + // The connection should normally be very fast. + conn, err := net.DialTimeout(network, addr, 500*time.Millisecond) + if err != nil { + limitRelease() + return nil, err + } + lc := &limitConn{Conn: conn} + runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required + return lc, nil +} + +type limitConn struct { + close sync.Once + net.Conn +} + +func (lc *limitConn) Close() error { + defer lc.close.Do(func() { + limitRelease() + runtime.SetFinalizer(lc, nil) + }) + return lc.Conn.Close() +} diff --git a/vendor/google.golang.org/appengine/internal/regen.sh b/vendor/google.golang.org/appengine/internal/regen.sh new file mode 100644 index 0000000..2fdb546 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/regen.sh @@ -0,0 +1,40 @@ +#!/bin/bash -e +# +# This script rebuilds the generated code for the protocol buffers. +# To run this you will need protoc and goprotobuf installed; +# see https://github.com/golang/protobuf for instructions. + +PKG=google.golang.org/appengine + +function die() { + echo 1>&2 $* + exit 1 +} + +# Sanity check that the right tools are accessible. +for tool in go protoc protoc-gen-go; do + q=$(which $tool) || die "didn't find $tool" + echo 1>&2 "$tool: $q" +done + +echo -n 1>&2 "finding package dir... " +pkgdir=$(go list -f '{{.Dir}}' $PKG) +echo 1>&2 $pkgdir +base=$(echo $pkgdir | sed "s,/$PKG\$,,") +echo 1>&2 "base: $base" +cd $base + +# Run protoc once per package. +for dir in $(find $PKG/internal -name '*.proto' | xargs dirname | sort | uniq); do + echo 1>&2 "* $dir" + protoc --go_out=. $dir/*.proto +done + +for f in $(find $PKG/internal -name '*.pb.go'); do + # Remove proto.RegisterEnum calls. + # These cause duplicate registration panics when these packages + # are used on classic App Engine. proto.RegisterEnum only affects + # parsing the text format; we don't care about that. + # https://code.google.com/p/googleappengine/issues/detail?id=11670#c17 + sed -i '/proto.RegisterEnum/d' $f +done diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go new file mode 100644 index 0000000..8d782a3 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.pb.go @@ -0,0 +1,361 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/remote_api/remote_api.proto + +package remote_api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type RpcError_ErrorCode int32 + +const ( + RpcError_UNKNOWN RpcError_ErrorCode = 0 + RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1 + RpcError_PARSE_ERROR RpcError_ErrorCode = 2 + RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3 + RpcError_OVER_QUOTA RpcError_ErrorCode = 4 + RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5 + RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6 + RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7 + RpcError_BAD_REQUEST RpcError_ErrorCode = 8 + RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9 + RpcError_CANCELLED RpcError_ErrorCode = 10 + RpcError_REPLAY_ERROR RpcError_ErrorCode = 11 + RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12 +) + +var RpcError_ErrorCode_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CALL_NOT_FOUND", + 2: "PARSE_ERROR", + 3: "SECURITY_VIOLATION", + 4: "OVER_QUOTA", + 5: "REQUEST_TOO_LARGE", + 6: "CAPABILITY_DISABLED", + 7: "FEATURE_DISABLED", + 8: "BAD_REQUEST", + 9: "RESPONSE_TOO_LARGE", + 10: "CANCELLED", + 11: "REPLAY_ERROR", + 12: "DEADLINE_EXCEEDED", +} +var RpcError_ErrorCode_value = map[string]int32{ + "UNKNOWN": 0, + "CALL_NOT_FOUND": 1, + "PARSE_ERROR": 2, + "SECURITY_VIOLATION": 3, + "OVER_QUOTA": 4, + "REQUEST_TOO_LARGE": 5, + "CAPABILITY_DISABLED": 6, + "FEATURE_DISABLED": 7, + "BAD_REQUEST": 8, + "RESPONSE_TOO_LARGE": 9, + "CANCELLED": 10, + "REPLAY_ERROR": 11, + "DEADLINE_EXCEEDED": 12, +} + +func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode { + p := new(RpcError_ErrorCode) + *p = x + return p +} +func (x RpcError_ErrorCode) String() string { + return proto.EnumName(RpcError_ErrorCode_name, int32(x)) +} +func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode") + if err != nil { + return err + } + *x = RpcError_ErrorCode(value) + return nil +} +func (RpcError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{2, 0} +} + +type Request struct { + ServiceName *string `protobuf:"bytes,2,req,name=service_name,json=serviceName" json:"service_name,omitempty"` + Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"` + Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"` + RequestId *string `protobuf:"bytes,5,opt,name=request_id,json=requestId" json:"request_id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{0} +} +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (dst *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(dst, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetServiceName() string { + if m != nil && m.ServiceName != nil { + return *m.ServiceName + } + return "" +} + +func (m *Request) GetMethod() string { + if m != nil && m.Method != nil { + return *m.Method + } + return "" +} + +func (m *Request) GetRequest() []byte { + if m != nil { + return m.Request + } + return nil +} + +func (m *Request) GetRequestId() string { + if m != nil && m.RequestId != nil { + return *m.RequestId + } + return "" +} + +type ApplicationError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ApplicationError) Reset() { *m = ApplicationError{} } +func (m *ApplicationError) String() string { return proto.CompactTextString(m) } +func (*ApplicationError) ProtoMessage() {} +func (*ApplicationError) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{1} +} +func (m *ApplicationError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ApplicationError.Unmarshal(m, b) +} +func (m *ApplicationError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ApplicationError.Marshal(b, m, deterministic) +} +func (dst *ApplicationError) XXX_Merge(src proto.Message) { + xxx_messageInfo_ApplicationError.Merge(dst, src) +} +func (m *ApplicationError) XXX_Size() int { + return xxx_messageInfo_ApplicationError.Size(m) +} +func (m *ApplicationError) XXX_DiscardUnknown() { + xxx_messageInfo_ApplicationError.DiscardUnknown(m) +} + +var xxx_messageInfo_ApplicationError proto.InternalMessageInfo + +func (m *ApplicationError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *ApplicationError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type RpcError struct { + Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"` + Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RpcError) Reset() { *m = RpcError{} } +func (m *RpcError) String() string { return proto.CompactTextString(m) } +func (*RpcError) ProtoMessage() {} +func (*RpcError) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{2} +} +func (m *RpcError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_RpcError.Unmarshal(m, b) +} +func (m *RpcError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_RpcError.Marshal(b, m, deterministic) +} +func (dst *RpcError) XXX_Merge(src proto.Message) { + xxx_messageInfo_RpcError.Merge(dst, src) +} +func (m *RpcError) XXX_Size() int { + return xxx_messageInfo_RpcError.Size(m) +} +func (m *RpcError) XXX_DiscardUnknown() { + xxx_messageInfo_RpcError.DiscardUnknown(m) +} + +var xxx_messageInfo_RpcError proto.InternalMessageInfo + +func (m *RpcError) GetCode() int32 { + if m != nil && m.Code != nil { + return *m.Code + } + return 0 +} + +func (m *RpcError) GetDetail() string { + if m != nil && m.Detail != nil { + return *m.Detail + } + return "" +} + +type Response struct { + Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"` + Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"` + ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error,json=applicationError" json:"application_error,omitempty"` + JavaException []byte `protobuf:"bytes,4,opt,name=java_exception,json=javaException" json:"java_exception,omitempty"` + RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error,json=rpcError" json:"rpc_error,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { + return fileDescriptor_remote_api_1978114ec33a273d, []int{3} +} +func (m *Response) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Response.Unmarshal(m, b) +} +func (m *Response) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Response.Marshal(b, m, deterministic) +} +func (dst *Response) XXX_Merge(src proto.Message) { + xxx_messageInfo_Response.Merge(dst, src) +} +func (m *Response) XXX_Size() int { + return xxx_messageInfo_Response.Size(m) +} +func (m *Response) XXX_DiscardUnknown() { + xxx_messageInfo_Response.DiscardUnknown(m) +} + +var xxx_messageInfo_Response proto.InternalMessageInfo + +func (m *Response) GetResponse() []byte { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetException() []byte { + if m != nil { + return m.Exception + } + return nil +} + +func (m *Response) GetApplicationError() *ApplicationError { + if m != nil { + return m.ApplicationError + } + return nil +} + +func (m *Response) GetJavaException() []byte { + if m != nil { + return m.JavaException + } + return nil +} + +func (m *Response) GetRpcError() *RpcError { + if m != nil { + return m.RpcError + } + return nil +} + +func init() { + proto.RegisterType((*Request)(nil), "remote_api.Request") + proto.RegisterType((*ApplicationError)(nil), "remote_api.ApplicationError") + proto.RegisterType((*RpcError)(nil), "remote_api.RpcError") + proto.RegisterType((*Response)(nil), "remote_api.Response") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/remote_api/remote_api.proto", fileDescriptor_remote_api_1978114ec33a273d) +} + +var fileDescriptor_remote_api_1978114ec33a273d = []byte{ + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x51, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xb1, 0x9b, 0x34, 0xf1, 0xc4, 0x2d, 0xdb, 0xa5, 0x14, 0x0b, 0x15, 0x29, 0x44, 0x42, + 0xca, 0x53, 0x2a, 0x38, 0x00, 0x62, 0x63, 0x6f, 0x91, 0x85, 0x65, 0xa7, 0x6b, 0xbb, 0x50, 0x5e, + 0x56, 0x2b, 0x67, 0x65, 0x8c, 0x12, 0xaf, 0xd9, 0x98, 0x8a, 0x17, 0x6e, 0xc0, 0xb5, 0x38, 0x0c, + 0xb7, 0x40, 0x36, 0x6e, 0x63, 0xf5, 0x89, 0xb7, 0x7f, 0x7e, 0x7b, 0xe6, 0x1b, 0xcd, 0xcc, 0xc2, + 0xbb, 0x5c, 0xa9, 0x7c, 0x23, 0x17, 0xb9, 0xda, 0x88, 0x32, 0x5f, 0x28, 0x9d, 0x5f, 0x88, 0xaa, + 0x92, 0x65, 0x5e, 0x94, 0xf2, 0xa2, 0x28, 0x6b, 0xa9, 0x4b, 0xb1, 0xb9, 0xd0, 0x72, 0xab, 0x6a, + 0xc9, 0x45, 0x55, 0xf4, 0xe4, 0xa2, 0xd2, 0xaa, 0x56, 0x18, 0xf6, 0xce, 0xec, 0x27, 0x8c, 0x98, + 0xfc, 0xf6, 0x5d, 0xee, 0x6a, 0xfc, 0x12, 0xec, 0x9d, 0xd4, 0xb7, 0x45, 0x26, 0x79, 0x29, 0xb6, + 0xd2, 0x31, 0xa7, 0xe6, 0xdc, 0x62, 0x93, 0xce, 0x0b, 0xc5, 0x56, 0xe2, 0x33, 0x38, 0xdc, 0xca, + 0xfa, 0x8b, 0x5a, 0x3b, 0x07, 0xed, 0xc7, 0x2e, 0xc2, 0x0e, 0x8c, 0xf4, 0xbf, 0x2a, 0xce, 0x60, + 0x6a, 0xce, 0x6d, 0x76, 0x17, 0xe2, 0x17, 0x00, 0x9d, 0xe4, 0xc5, 0xda, 0x19, 0x4e, 0x8d, 0xb9, + 0xc5, 0xac, 0xce, 0xf1, 0xd7, 0xb3, 0xb7, 0x80, 0x48, 0x55, 0x6d, 0x8a, 0x4c, 0xd4, 0x85, 0x2a, + 0xa9, 0xd6, 0x4a, 0x63, 0x0c, 0x83, 0x4c, 0xad, 0xa5, 0x63, 0x4c, 0xcd, 0xf9, 0x90, 0xb5, 0xba, + 0x01, 0xaf, 0x65, 0x2d, 0x8a, 0x4d, 0xd7, 0x55, 0x17, 0xcd, 0x7e, 0x9b, 0x30, 0x66, 0x55, 0xf6, + 0x7f, 0x89, 0x46, 0x2f, 0xf1, 0x97, 0x09, 0x56, 0x9b, 0xe5, 0x36, 0x7f, 0x4d, 0x60, 0x94, 0x86, + 0x1f, 0xc2, 0xe8, 0x63, 0x88, 0x1e, 0x61, 0x0c, 0xc7, 0x2e, 0x09, 0x02, 0x1e, 0x46, 0x09, 0xbf, + 0x8c, 0xd2, 0xd0, 0x43, 0x06, 0x7e, 0x0c, 0x93, 0x15, 0x61, 0x31, 0xe5, 0x94, 0xb1, 0x88, 0x21, + 0x13, 0x9f, 0x01, 0x8e, 0xa9, 0x9b, 0x32, 0x3f, 0xb9, 0xe1, 0xd7, 0x7e, 0x14, 0x90, 0xc4, 0x8f, + 0x42, 0x74, 0x80, 0x8f, 0x01, 0xa2, 0x6b, 0xca, 0xf8, 0x55, 0x1a, 0x25, 0x04, 0x0d, 0xf0, 0x53, + 0x38, 0x61, 0xf4, 0x2a, 0xa5, 0x71, 0xc2, 0x93, 0x28, 0xe2, 0x01, 0x61, 0xef, 0x29, 0x1a, 0xe2, + 0x67, 0xf0, 0xc4, 0x25, 0x2b, 0xb2, 0xf4, 0x83, 0xa6, 0x80, 0xe7, 0xc7, 0x64, 0x19, 0x50, 0x0f, + 0x1d, 0xe2, 0x53, 0x40, 0x97, 0x94, 0x24, 0x29, 0xa3, 0x7b, 0x77, 0xd4, 0xe0, 0x97, 0xc4, 0xe3, + 0x5d, 0x25, 0x34, 0x6e, 0xf0, 0x8c, 0xc6, 0xab, 0x28, 0x8c, 0x69, 0xaf, 0xae, 0x85, 0x8f, 0xc0, + 0x72, 0x49, 0xe8, 0xd2, 0xa0, 0xc9, 0x03, 0x8c, 0xc0, 0x66, 0x74, 0x15, 0x90, 0x9b, 0xae, 0xef, + 0x49, 0xd3, 0x8f, 0x47, 0x89, 0x17, 0xf8, 0x21, 0xe5, 0xf4, 0x93, 0x4b, 0xa9, 0x47, 0x3d, 0x64, + 0xcf, 0xfe, 0x18, 0x30, 0x66, 0x72, 0x57, 0xa9, 0x72, 0x27, 0xf1, 0x73, 0x18, 0xeb, 0x4e, 0x3b, + 0xc6, 0xd4, 0x98, 0xdb, 0xec, 0x3e, 0xc6, 0xe7, 0x60, 0xc9, 0x1f, 0x99, 0xac, 0x9a, 0x75, 0xb5, + 0x23, 0xb5, 0xd9, 0xde, 0xc0, 0x3e, 0x9c, 0x88, 0xfd, 0x3a, 0xb9, 0x6c, 0x06, 0xec, 0x1c, 0x4c, + 0x8d, 0xf9, 0xe4, 0xcd, 0xf9, 0xa2, 0x77, 0x87, 0x0f, 0x77, 0xce, 0x90, 0x78, 0x78, 0x05, 0xaf, + 0xe0, 0xf8, 0xab, 0xb8, 0x15, 0x7c, 0x4f, 0x1b, 0xb4, 0xb4, 0xa3, 0xc6, 0xa5, 0xf7, 0xc4, 0xd7, + 0x60, 0xe9, 0x2a, 0xeb, 0x48, 0xc3, 0x96, 0x74, 0xda, 0x27, 0xdd, 0x1d, 0x07, 0x1b, 0xeb, 0x4e, + 0x2d, 0xed, 0xcf, 0xbd, 0x07, 0xf0, 0x37, 0x00, 0x00, 0xff, 0xff, 0x38, 0xd1, 0x0f, 0x22, 0x4f, + 0x03, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto new file mode 100644 index 0000000..f21763a --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/remote_api/remote_api.proto @@ -0,0 +1,44 @@ +syntax = "proto2"; +option go_package = "remote_api"; + +package remote_api; + +message Request { + required string service_name = 2; + required string method = 3; + required bytes request = 4; + optional string request_id = 5; +} + +message ApplicationError { + required int32 code = 1; + required string detail = 2; +} + +message RpcError { + enum ErrorCode { + UNKNOWN = 0; + CALL_NOT_FOUND = 1; + PARSE_ERROR = 2; + SECURITY_VIOLATION = 3; + OVER_QUOTA = 4; + REQUEST_TOO_LARGE = 5; + CAPABILITY_DISABLED = 6; + FEATURE_DISABLED = 7; + BAD_REQUEST = 8; + RESPONSE_TOO_LARGE = 9; + CANCELLED = 10; + REPLAY_ERROR = 11; + DEADLINE_EXCEEDED = 12; + } + required int32 code = 1; + optional string detail = 2; +} + +message Response { + optional bytes response = 1; + optional bytes exception = 2; + optional ApplicationError application_error = 3; + optional bytes java_exception = 4; + optional RpcError rpc_error = 5; +} diff --git a/vendor/google.golang.org/appengine/internal/transaction.go b/vendor/google.golang.org/appengine/internal/transaction.go new file mode 100644 index 0000000..9006ae6 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/transaction.go @@ -0,0 +1,115 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +package internal + +// This file implements hooks for applying datastore transactions. + +import ( + "errors" + "reflect" + + "github.com/golang/protobuf/proto" + netcontext "golang.org/x/net/context" + + basepb "google.golang.org/appengine/internal/base" + pb "google.golang.org/appengine/internal/datastore" +) + +var transactionSetters = make(map[reflect.Type]reflect.Value) + +// RegisterTransactionSetter registers a function that sets transaction information +// in a protocol buffer message. f should be a function with two arguments, +// the first being a protocol buffer type, and the second being *datastore.Transaction. +func RegisterTransactionSetter(f interface{}) { + v := reflect.ValueOf(f) + transactionSetters[v.Type().In(0)] = v +} + +// applyTransaction applies the transaction t to message pb +// by using the relevant setter passed to RegisterTransactionSetter. +func applyTransaction(pb proto.Message, t *pb.Transaction) { + v := reflect.ValueOf(pb) + if f, ok := transactionSetters[v.Type()]; ok { + f.Call([]reflect.Value{v, reflect.ValueOf(t)}) + } +} + +var transactionKey = "used for *Transaction" + +func transactionFromContext(ctx netcontext.Context) *transaction { + t, _ := ctx.Value(&transactionKey).(*transaction) + return t +} + +func withTransaction(ctx netcontext.Context, t *transaction) netcontext.Context { + return netcontext.WithValue(ctx, &transactionKey, t) +} + +type transaction struct { + transaction pb.Transaction + finished bool +} + +var ErrConcurrentTransaction = errors.New("internal: concurrent transaction") + +func RunTransactionOnce(c netcontext.Context, f func(netcontext.Context) error, xg bool, readOnly bool, previousTransaction *pb.Transaction) (*pb.Transaction, error) { + if transactionFromContext(c) != nil { + return nil, errors.New("nested transactions are not supported") + } + + // Begin the transaction. + t := &transaction{} + req := &pb.BeginTransactionRequest{ + App: proto.String(FullyQualifiedAppID(c)), + } + if xg { + req.AllowMultipleEg = proto.Bool(true) + } + if previousTransaction != nil { + req.PreviousTransaction = previousTransaction + } + if readOnly { + req.Mode = pb.BeginTransactionRequest_READ_ONLY.Enum() + } else { + req.Mode = pb.BeginTransactionRequest_READ_WRITE.Enum() + } + if err := Call(c, "datastore_v3", "BeginTransaction", req, &t.transaction); err != nil { + return nil, err + } + + // Call f, rolling back the transaction if f returns a non-nil error, or panics. + // The panic is not recovered. + defer func() { + if t.finished { + return + } + t.finished = true + // Ignore the error return value, since we are already returning a non-nil + // error (or we're panicking). + Call(c, "datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}) + }() + if err := f(withTransaction(c, t)); err != nil { + return &t.transaction, err + } + t.finished = true + + // Commit the transaction. + res := &pb.CommitResponse{} + err := Call(c, "datastore_v3", "Commit", &t.transaction, res) + if ae, ok := err.(*APIError); ok { + /* TODO: restore this conditional + if appengine.IsDevAppServer() { + */ + // The Python Dev AppServer raises an ApplicationError with error code 2 (which is + // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.". + if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." { + return &t.transaction, ErrConcurrentTransaction + } + if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) { + return &t.transaction, ErrConcurrentTransaction + } + } + return &t.transaction, err +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go new file mode 100644 index 0000000..5f72775 --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go @@ -0,0 +1,527 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto + +package urlfetch + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type URLFetchServiceError_ErrorCode int32 + +const ( + URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0 + URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1 + URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2 + URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3 + URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4 + URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5 + URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6 + URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7 + URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8 + URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9 + URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10 + URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11 + URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12 +) + +var URLFetchServiceError_ErrorCode_name = map[int32]string{ + 0: "OK", + 1: "INVALID_URL", + 2: "FETCH_ERROR", + 3: "UNSPECIFIED_ERROR", + 4: "RESPONSE_TOO_LARGE", + 5: "DEADLINE_EXCEEDED", + 6: "SSL_CERTIFICATE_ERROR", + 7: "DNS_ERROR", + 8: "CLOSED", + 9: "INTERNAL_TRANSIENT_ERROR", + 10: "TOO_MANY_REDIRECTS", + 11: "MALFORMED_REPLY", + 12: "CONNECTION_ERROR", +} +var URLFetchServiceError_ErrorCode_value = map[string]int32{ + "OK": 0, + "INVALID_URL": 1, + "FETCH_ERROR": 2, + "UNSPECIFIED_ERROR": 3, + "RESPONSE_TOO_LARGE": 4, + "DEADLINE_EXCEEDED": 5, + "SSL_CERTIFICATE_ERROR": 6, + "DNS_ERROR": 7, + "CLOSED": 8, + "INTERNAL_TRANSIENT_ERROR": 9, + "TOO_MANY_REDIRECTS": 10, + "MALFORMED_REPLY": 11, + "CONNECTION_ERROR": 12, +} + +func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode { + p := new(URLFetchServiceError_ErrorCode) + *p = x + return p +} +func (x URLFetchServiceError_ErrorCode) String() string { + return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x)) +} +func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode") + if err != nil { + return err + } + *x = URLFetchServiceError_ErrorCode(value) + return nil +} +func (URLFetchServiceError_ErrorCode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0, 0} +} + +type URLFetchRequest_RequestMethod int32 + +const ( + URLFetchRequest_GET URLFetchRequest_RequestMethod = 1 + URLFetchRequest_POST URLFetchRequest_RequestMethod = 2 + URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3 + URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4 + URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5 + URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6 +) + +var URLFetchRequest_RequestMethod_name = map[int32]string{ + 1: "GET", + 2: "POST", + 3: "HEAD", + 4: "PUT", + 5: "DELETE", + 6: "PATCH", +} +var URLFetchRequest_RequestMethod_value = map[string]int32{ + "GET": 1, + "POST": 2, + "HEAD": 3, + "PUT": 4, + "DELETE": 5, + "PATCH": 6, +} + +func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod { + p := new(URLFetchRequest_RequestMethod) + *p = x + return p +} +func (x URLFetchRequest_RequestMethod) String() string { + return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x)) +} +func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod") + if err != nil { + return err + } + *x = URLFetchRequest_RequestMethod(value) + return nil +} +func (URLFetchRequest_RequestMethod) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} +} + +type URLFetchServiceError struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} } +func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) } +func (*URLFetchServiceError) ProtoMessage() {} +func (*URLFetchServiceError) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{0} +} +func (m *URLFetchServiceError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchServiceError.Unmarshal(m, b) +} +func (m *URLFetchServiceError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchServiceError.Marshal(b, m, deterministic) +} +func (dst *URLFetchServiceError) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchServiceError.Merge(dst, src) +} +func (m *URLFetchServiceError) XXX_Size() int { + return xxx_messageInfo_URLFetchServiceError.Size(m) +} +func (m *URLFetchServiceError) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchServiceError.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchServiceError proto.InternalMessageInfo + +type URLFetchRequest struct { + Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,name=Method,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"` + Url *string `protobuf:"bytes,2,req,name=Url" json:"Url,omitempty"` + Header []*URLFetchRequest_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` + Payload []byte `protobuf:"bytes,6,opt,name=Payload" json:"Payload,omitempty"` + FollowRedirects *bool `protobuf:"varint,7,opt,name=FollowRedirects,def=1" json:"FollowRedirects,omitempty"` + Deadline *float64 `protobuf:"fixed64,8,opt,name=Deadline" json:"Deadline,omitempty"` + MustValidateServerCertificate *bool `protobuf:"varint,9,opt,name=MustValidateServerCertificate,def=1" json:"MustValidateServerCertificate,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} } +func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest) ProtoMessage() {} +func (*URLFetchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1} +} +func (m *URLFetchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchRequest.Unmarshal(m, b) +} +func (m *URLFetchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchRequest.Marshal(b, m, deterministic) +} +func (dst *URLFetchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchRequest.Merge(dst, src) +} +func (m *URLFetchRequest) XXX_Size() int { + return xxx_messageInfo_URLFetchRequest.Size(m) +} +func (m *URLFetchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchRequest proto.InternalMessageInfo + +const Default_URLFetchRequest_FollowRedirects bool = true +const Default_URLFetchRequest_MustValidateServerCertificate bool = true + +func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod { + if m != nil && m.Method != nil { + return *m.Method + } + return URLFetchRequest_GET +} + +func (m *URLFetchRequest) GetUrl() string { + if m != nil && m.Url != nil { + return *m.Url + } + return "" +} + +func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +func (m *URLFetchRequest) GetFollowRedirects() bool { + if m != nil && m.FollowRedirects != nil { + return *m.FollowRedirects + } + return Default_URLFetchRequest_FollowRedirects +} + +func (m *URLFetchRequest) GetDeadline() float64 { + if m != nil && m.Deadline != nil { + return *m.Deadline + } + return 0 +} + +func (m *URLFetchRequest) GetMustValidateServerCertificate() bool { + if m != nil && m.MustValidateServerCertificate != nil { + return *m.MustValidateServerCertificate + } + return Default_URLFetchRequest_MustValidateServerCertificate +} + +type URLFetchRequest_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} } +func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchRequest_Header) ProtoMessage() {} +func (*URLFetchRequest_Header) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{1, 0} +} +func (m *URLFetchRequest_Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchRequest_Header.Unmarshal(m, b) +} +func (m *URLFetchRequest_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchRequest_Header.Marshal(b, m, deterministic) +} +func (dst *URLFetchRequest_Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchRequest_Header.Merge(dst, src) +} +func (m *URLFetchRequest_Header) XXX_Size() int { + return xxx_messageInfo_URLFetchRequest_Header.Size(m) +} +func (m *URLFetchRequest_Header) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchRequest_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchRequest_Header proto.InternalMessageInfo + +func (m *URLFetchRequest_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchRequest_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type URLFetchResponse struct { + Content []byte `protobuf:"bytes,1,opt,name=Content" json:"Content,omitempty"` + StatusCode *int32 `protobuf:"varint,2,req,name=StatusCode" json:"StatusCode,omitempty"` + Header []*URLFetchResponse_Header `protobuf:"group,3,rep,name=Header,json=header" json:"header,omitempty"` + ContentWasTruncated *bool `protobuf:"varint,6,opt,name=ContentWasTruncated,def=0" json:"ContentWasTruncated,omitempty"` + ExternalBytesSent *int64 `protobuf:"varint,7,opt,name=ExternalBytesSent" json:"ExternalBytesSent,omitempty"` + ExternalBytesReceived *int64 `protobuf:"varint,8,opt,name=ExternalBytesReceived" json:"ExternalBytesReceived,omitempty"` + FinalUrl *string `protobuf:"bytes,9,opt,name=FinalUrl" json:"FinalUrl,omitempty"` + ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,name=ApiCpuMilliseconds,def=0" json:"ApiCpuMilliseconds,omitempty"` + ApiBytesSent *int64 `protobuf:"varint,11,opt,name=ApiBytesSent,def=0" json:"ApiBytesSent,omitempty"` + ApiBytesReceived *int64 `protobuf:"varint,12,opt,name=ApiBytesReceived,def=0" json:"ApiBytesReceived,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} } +func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse) ProtoMessage() {} +func (*URLFetchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2} +} +func (m *URLFetchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchResponse.Unmarshal(m, b) +} +func (m *URLFetchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchResponse.Marshal(b, m, deterministic) +} +func (dst *URLFetchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchResponse.Merge(dst, src) +} +func (m *URLFetchResponse) XXX_Size() int { + return xxx_messageInfo_URLFetchResponse.Size(m) +} +func (m *URLFetchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchResponse proto.InternalMessageInfo + +const Default_URLFetchResponse_ContentWasTruncated bool = false +const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0 +const Default_URLFetchResponse_ApiBytesSent int64 = 0 +const Default_URLFetchResponse_ApiBytesReceived int64 = 0 + +func (m *URLFetchResponse) GetContent() []byte { + if m != nil { + return m.Content + } + return nil +} + +func (m *URLFetchResponse) GetStatusCode() int32 { + if m != nil && m.StatusCode != nil { + return *m.StatusCode + } + return 0 +} + +func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header { + if m != nil { + return m.Header + } + return nil +} + +func (m *URLFetchResponse) GetContentWasTruncated() bool { + if m != nil && m.ContentWasTruncated != nil { + return *m.ContentWasTruncated + } + return Default_URLFetchResponse_ContentWasTruncated +} + +func (m *URLFetchResponse) GetExternalBytesSent() int64 { + if m != nil && m.ExternalBytesSent != nil { + return *m.ExternalBytesSent + } + return 0 +} + +func (m *URLFetchResponse) GetExternalBytesReceived() int64 { + if m != nil && m.ExternalBytesReceived != nil { + return *m.ExternalBytesReceived + } + return 0 +} + +func (m *URLFetchResponse) GetFinalUrl() string { + if m != nil && m.FinalUrl != nil { + return *m.FinalUrl + } + return "" +} + +func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 { + if m != nil && m.ApiCpuMilliseconds != nil { + return *m.ApiCpuMilliseconds + } + return Default_URLFetchResponse_ApiCpuMilliseconds +} + +func (m *URLFetchResponse) GetApiBytesSent() int64 { + if m != nil && m.ApiBytesSent != nil { + return *m.ApiBytesSent + } + return Default_URLFetchResponse_ApiBytesSent +} + +func (m *URLFetchResponse) GetApiBytesReceived() int64 { + if m != nil && m.ApiBytesReceived != nil { + return *m.ApiBytesReceived + } + return Default_URLFetchResponse_ApiBytesReceived +} + +type URLFetchResponse_Header struct { + Key *string `protobuf:"bytes,4,req,name=Key" json:"Key,omitempty"` + Value *string `protobuf:"bytes,5,req,name=Value" json:"Value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} } +func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) } +func (*URLFetchResponse_Header) ProtoMessage() {} +func (*URLFetchResponse_Header) Descriptor() ([]byte, []int) { + return fileDescriptor_urlfetch_service_b245a7065f33bced, []int{2, 0} +} +func (m *URLFetchResponse_Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URLFetchResponse_Header.Unmarshal(m, b) +} +func (m *URLFetchResponse_Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URLFetchResponse_Header.Marshal(b, m, deterministic) +} +func (dst *URLFetchResponse_Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_URLFetchResponse_Header.Merge(dst, src) +} +func (m *URLFetchResponse_Header) XXX_Size() int { + return xxx_messageInfo_URLFetchResponse_Header.Size(m) +} +func (m *URLFetchResponse_Header) XXX_DiscardUnknown() { + xxx_messageInfo_URLFetchResponse_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_URLFetchResponse_Header proto.InternalMessageInfo + +func (m *URLFetchResponse_Header) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *URLFetchResponse_Header) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +func init() { + proto.RegisterType((*URLFetchServiceError)(nil), "appengine.URLFetchServiceError") + proto.RegisterType((*URLFetchRequest)(nil), "appengine.URLFetchRequest") + proto.RegisterType((*URLFetchRequest_Header)(nil), "appengine.URLFetchRequest.Header") + proto.RegisterType((*URLFetchResponse)(nil), "appengine.URLFetchResponse") + proto.RegisterType((*URLFetchResponse_Header)(nil), "appengine.URLFetchResponse.Header") +} + +func init() { + proto.RegisterFile("google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto", fileDescriptor_urlfetch_service_b245a7065f33bced) +} + +var fileDescriptor_urlfetch_service_b245a7065f33bced = []byte{ + // 770 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xdd, 0x6e, 0xe3, 0x54, + 0x10, 0xc6, 0x76, 0x7e, 0xa7, 0x5d, 0x7a, 0x76, 0xb6, 0x45, 0x66, 0xb5, 0xa0, 0x10, 0x09, 0x29, + 0x17, 0x90, 0x2e, 0x2b, 0x24, 0x44, 0xaf, 0x70, 0xed, 0x93, 0xad, 0xa9, 0x63, 0x47, 0xc7, 0x4e, + 0x61, 0xb9, 0xb1, 0xac, 0x78, 0x9a, 0x5a, 0xb2, 0xec, 0x60, 0x9f, 0x2c, 0xf4, 0x35, 0x78, 0x0d, + 0xde, 0x87, 0xa7, 0xe1, 0x02, 0x9d, 0xc4, 0xc9, 0x6e, 0xbb, 0xd1, 0x4a, 0x5c, 0x65, 0xe6, 0x9b, + 0xef, 0xcc, 0x99, 0x7c, 0xdf, 0xf8, 0x80, 0xb3, 0x2c, 0xcb, 0x65, 0x4e, 0xe3, 0x65, 0x99, 0x27, + 0xc5, 0x72, 0x5c, 0x56, 0xcb, 0xf3, 0x64, 0xb5, 0xa2, 0x62, 0x99, 0x15, 0x74, 0x9e, 0x15, 0x92, + 0xaa, 0x22, 0xc9, 0xcf, 0xd7, 0x55, 0x7e, 0x4b, 0x72, 0x71, 0xb7, 0x0f, 0xe2, 0x9a, 0xaa, 0xb7, + 0xd9, 0x82, 0xc6, 0xab, 0xaa, 0x94, 0x25, 0xf6, 0xf7, 0x67, 0x86, 0x7f, 0xeb, 0x70, 0x3a, 0x17, + 0xde, 0x44, 0xb1, 0xc2, 0x2d, 0x89, 0x57, 0x55, 0x59, 0x0d, 0xff, 0xd2, 0xa1, 0xbf, 0x89, 0xec, + 0x32, 0x25, 0xec, 0x80, 0x1e, 0x5c, 0xb3, 0x4f, 0xf0, 0x04, 0x8e, 0x5c, 0xff, 0xc6, 0xf2, 0x5c, + 0x27, 0x9e, 0x0b, 0x8f, 0x69, 0x0a, 0x98, 0xf0, 0xc8, 0xbe, 0x8a, 0xb9, 0x10, 0x81, 0x60, 0x3a, + 0x9e, 0xc1, 0xd3, 0xb9, 0x1f, 0xce, 0xb8, 0xed, 0x4e, 0x5c, 0xee, 0x34, 0xb0, 0x81, 0x9f, 0x01, + 0x0a, 0x1e, 0xce, 0x02, 0x3f, 0xe4, 0x71, 0x14, 0x04, 0xb1, 0x67, 0x89, 0xd7, 0x9c, 0xb5, 0x14, + 0xdd, 0xe1, 0x96, 0xe3, 0xb9, 0x3e, 0x8f, 0xf9, 0xaf, 0x36, 0xe7, 0x0e, 0x77, 0x58, 0x1b, 0x3f, + 0x87, 0xb3, 0x30, 0xf4, 0x62, 0x9b, 0x8b, 0xc8, 0x9d, 0xb8, 0xb6, 0x15, 0xf1, 0xa6, 0x53, 0x07, + 0x9f, 0x40, 0xdf, 0xf1, 0xc3, 0x26, 0xed, 0x22, 0x40, 0xc7, 0xf6, 0x82, 0x90, 0x3b, 0xac, 0x87, + 0x2f, 0xc0, 0x74, 0xfd, 0x88, 0x0b, 0xdf, 0xf2, 0xe2, 0x48, 0x58, 0x7e, 0xe8, 0x72, 0x3f, 0x6a, + 0x98, 0x7d, 0x35, 0x82, 0xba, 0x79, 0x6a, 0xf9, 0x6f, 0x62, 0xc1, 0x1d, 0x57, 0x70, 0x3b, 0x0a, + 0x19, 0xe0, 0x33, 0x38, 0x99, 0x5a, 0xde, 0x24, 0x10, 0x53, 0xee, 0xc4, 0x82, 0xcf, 0xbc, 0x37, + 0xec, 0x08, 0x4f, 0x81, 0xd9, 0x81, 0xef, 0x73, 0x3b, 0x72, 0x03, 0xbf, 0x69, 0x71, 0x3c, 0xfc, + 0xc7, 0x80, 0x93, 0x9d, 0x5a, 0x82, 0x7e, 0x5f, 0x53, 0x2d, 0xf1, 0x27, 0xe8, 0x4c, 0x49, 0xde, + 0x95, 0xa9, 0xa9, 0x0d, 0xf4, 0xd1, 0xa7, 0xaf, 0x46, 0xe3, 0xbd, 0xba, 0xe3, 0x47, 0xdc, 0x71, + 0xf3, 0xbb, 0xe5, 0x8b, 0xe6, 0x1c, 0x32, 0x30, 0xe6, 0x55, 0x6e, 0xea, 0x03, 0x7d, 0xd4, 0x17, + 0x2a, 0xc4, 0x1f, 0xa1, 0x73, 0x47, 0x49, 0x4a, 0x95, 0x69, 0x0c, 0x8c, 0x11, 0xbc, 0xfa, 0xea, + 0x23, 0x3d, 0xaf, 0x36, 0x44, 0xd1, 0x1c, 0xc0, 0x17, 0xd0, 0x9d, 0x25, 0xf7, 0x79, 0x99, 0xa4, + 0x66, 0x67, 0xa0, 0x8d, 0x8e, 0x2f, 0xf5, 0x9e, 0x26, 0x76, 0x10, 0x8e, 0xe1, 0x64, 0x52, 0xe6, + 0x79, 0xf9, 0x87, 0xa0, 0x34, 0xab, 0x68, 0x21, 0x6b, 0xb3, 0x3b, 0xd0, 0x46, 0xbd, 0x8b, 0x96, + 0xac, 0xd6, 0x24, 0x1e, 0x17, 0xf1, 0x39, 0xf4, 0x1c, 0x4a, 0xd2, 0x3c, 0x2b, 0xc8, 0xec, 0x0d, + 0xb4, 0x91, 0x26, 0xf6, 0x39, 0xfe, 0x0c, 0x5f, 0x4c, 0xd7, 0xb5, 0xbc, 0x49, 0xf2, 0x2c, 0x4d, + 0x24, 0xa9, 0xed, 0xa1, 0xca, 0xa6, 0x4a, 0x66, 0xb7, 0xd9, 0x22, 0x91, 0x64, 0xf6, 0xdf, 0xeb, + 0xfc, 0x71, 0xea, 0xf3, 0x97, 0xd0, 0xd9, 0xfe, 0x0f, 0x25, 0xc6, 0x35, 0xdd, 0x9b, 0xad, 0xad, + 0x18, 0xd7, 0x74, 0x8f, 0xa7, 0xd0, 0xbe, 0x49, 0xf2, 0x35, 0x99, 0xed, 0x0d, 0xb6, 0x4d, 0x86, + 0x1e, 0x3c, 0x79, 0xa0, 0x26, 0x76, 0xc1, 0x78, 0xcd, 0x23, 0xa6, 0x61, 0x0f, 0x5a, 0xb3, 0x20, + 0x8c, 0x98, 0xae, 0xa2, 0x2b, 0x6e, 0x39, 0xcc, 0x50, 0xc5, 0xd9, 0x3c, 0x62, 0x2d, 0xb5, 0x2e, + 0x0e, 0xf7, 0x78, 0xc4, 0x59, 0x1b, 0xfb, 0xd0, 0x9e, 0x59, 0x91, 0x7d, 0xc5, 0x3a, 0xc3, 0x7f, + 0x0d, 0x60, 0xef, 0x84, 0xad, 0x57, 0x65, 0x51, 0x13, 0x9a, 0xd0, 0xb5, 0xcb, 0x42, 0x52, 0x21, + 0x4d, 0x4d, 0x49, 0x29, 0x76, 0x29, 0x7e, 0x09, 0x10, 0xca, 0x44, 0xae, 0x6b, 0xf5, 0x71, 0x6c, + 0x8c, 0x6b, 0x8b, 0xf7, 0x10, 0xbc, 0x78, 0xe4, 0xdf, 0xf0, 0xa0, 0x7f, 0xdb, 0x6b, 0x1e, 0x1b, + 0xf8, 0x03, 0x3c, 0x6b, 0xae, 0xf9, 0x25, 0xa9, 0xa3, 0x6a, 0x5d, 0x28, 0x81, 0xb6, 0x66, 0xf6, + 0x2e, 0xda, 0xb7, 0x49, 0x5e, 0x93, 0x38, 0xc4, 0xc0, 0x6f, 0xe0, 0x29, 0xff, 0x73, 0xfb, 0x02, + 0x5c, 0xde, 0x4b, 0xaa, 0x43, 0x35, 0xb8, 0x72, 0xd7, 0x10, 0x1f, 0x16, 0xf0, 0x7b, 0x38, 0x7b, + 0x00, 0x0a, 0x5a, 0x50, 0xf6, 0x96, 0xd2, 0x8d, 0xcd, 0x86, 0x38, 0x5c, 0x54, 0xfb, 0x30, 0xc9, + 0x8a, 0x24, 0x57, 0xfb, 0xaa, 0xec, 0xed, 0x8b, 0x7d, 0x8e, 0xdf, 0x01, 0x5a, 0xab, 0xcc, 0x5e, + 0xad, 0xa7, 0x59, 0x9e, 0x67, 0x35, 0x2d, 0xca, 0x22, 0xad, 0x4d, 0x50, 0xed, 0x2e, 0xb4, 0x97, + 0xe2, 0x40, 0x11, 0xbf, 0x86, 0x63, 0x6b, 0x95, 0xbd, 0x9b, 0xf6, 0x68, 0x47, 0x7e, 0x00, 0xe3, + 0xb7, 0xc0, 0x76, 0xf9, 0x7e, 0xcc, 0xe3, 0x1d, 0xf5, 0x83, 0xd2, 0xff, 0x5f, 0xa6, 0x4b, 0xf8, + 0xad, 0xb7, 0x7b, 0x2a, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x1d, 0x9f, 0x6d, 0x24, 0x63, 0x05, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto new file mode 100644 index 0000000..f695edf --- /dev/null +++ b/vendor/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto @@ -0,0 +1,64 @@ +syntax = "proto2"; +option go_package = "urlfetch"; + +package appengine; + +message URLFetchServiceError { + enum ErrorCode { + OK = 0; + INVALID_URL = 1; + FETCH_ERROR = 2; + UNSPECIFIED_ERROR = 3; + RESPONSE_TOO_LARGE = 4; + DEADLINE_EXCEEDED = 5; + SSL_CERTIFICATE_ERROR = 6; + DNS_ERROR = 7; + CLOSED = 8; + INTERNAL_TRANSIENT_ERROR = 9; + TOO_MANY_REDIRECTS = 10; + MALFORMED_REPLY = 11; + CONNECTION_ERROR = 12; + } +} + +message URLFetchRequest { + enum RequestMethod { + GET = 1; + POST = 2; + HEAD = 3; + PUT = 4; + DELETE = 5; + PATCH = 6; + } + required RequestMethod Method = 1; + required string Url = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bytes Payload = 6 [ctype=CORD]; + + optional bool FollowRedirects = 7 [default=true]; + + optional double Deadline = 8; + + optional bool MustValidateServerCertificate = 9 [default=true]; +} + +message URLFetchResponse { + optional bytes Content = 1; + required int32 StatusCode = 2; + repeated group Header = 3 { + required string Key = 4; + required string Value = 5; + } + optional bool ContentWasTruncated = 6 [default=false]; + optional int64 ExternalBytesSent = 7; + optional int64 ExternalBytesReceived = 8; + + optional string FinalUrl = 9; + + optional int64 ApiCpuMilliseconds = 10 [default=0]; + optional int64 ApiBytesSent = 11 [default=0]; + optional int64 ApiBytesReceived = 12 [default=0]; +} diff --git a/vendor/google.golang.org/appengine/urlfetch/urlfetch.go b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go new file mode 100644 index 0000000..6ffe1e6 --- /dev/null +++ b/vendor/google.golang.org/appengine/urlfetch/urlfetch.go @@ -0,0 +1,210 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by the Apache 2.0 +// license that can be found in the LICENSE file. + +// Package urlfetch provides an http.RoundTripper implementation +// for fetching URLs via App Engine's urlfetch service. +package urlfetch // import "google.golang.org/appengine/urlfetch" + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "golang.org/x/net/context" + + "google.golang.org/appengine/internal" + pb "google.golang.org/appengine/internal/urlfetch" +) + +// Transport is an implementation of http.RoundTripper for +// App Engine. Users should generally create an http.Client using +// this transport and use the Client rather than using this transport +// directly. +type Transport struct { + Context context.Context + + // Controls whether the application checks the validity of SSL certificates + // over HTTPS connections. A value of false (the default) instructs the + // application to send a request to the server only if the certificate is + // valid and signed by a trusted certificate authority (CA), and also + // includes a hostname that matches the certificate. A value of true + // instructs the application to perform no certificate validation. + AllowInvalidServerCertificate bool +} + +// Verify statically that *Transport implements http.RoundTripper. +var _ http.RoundTripper = (*Transport)(nil) + +// Client returns an *http.Client using a default urlfetch Transport. This +// client will have the default deadline of 5 seconds, and will check the +// validity of SSL certificates. +// +// Any deadline of the provided context will be used for requests through this client; +// if the client does not have a deadline then a 5 second default is used. +func Client(ctx context.Context) *http.Client { + return &http.Client{ + Transport: &Transport{ + Context: ctx, + }, + } +} + +type bodyReader struct { + content []byte + truncated bool + closed bool +} + +// ErrTruncatedBody is the error returned after the final Read() from a +// response's Body if the body has been truncated by App Engine's proxy. +var ErrTruncatedBody = errors.New("urlfetch: truncated body") + +func statusCodeToText(code int) string { + if t := http.StatusText(code); t != "" { + return t + } + return strconv.Itoa(code) +} + +func (br *bodyReader) Read(p []byte) (n int, err error) { + if br.closed { + if br.truncated { + return 0, ErrTruncatedBody + } + return 0, io.EOF + } + n = copy(p, br.content) + if n > 0 { + br.content = br.content[n:] + return + } + if br.truncated { + br.closed = true + return 0, ErrTruncatedBody + } + return 0, io.EOF +} + +func (br *bodyReader) Close() error { + br.closed = true + br.content = nil + return nil +} + +// A map of the URL Fetch-accepted methods that take a request body. +var methodAcceptsRequestBody = map[string]bool{ + "POST": true, + "PUT": true, + "PATCH": true, +} + +// urlString returns a valid string given a URL. This function is necessary because +// the String method of URL doesn't correctly handle URLs with non-empty Opaque values. +// See http://code.google.com/p/go/issues/detail?id=4860. +func urlString(u *url.URL) string { + if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") { + return u.String() + } + aux := *u + aux.Opaque = "//" + aux.Host + aux.Opaque + return aux.String() +} + +// RoundTrip issues a single HTTP request and returns its response. Per the +// http.RoundTripper interface, RoundTrip only returns an error if there +// was an unsupported request or the URL Fetch proxy fails. +// Note that HTTP response codes such as 5xx, 403, 404, etc are not +// errors as far as the transport is concerned and will be returned +// with err set to nil. +func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) { + methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method] + if !ok { + return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method) + } + + method := pb.URLFetchRequest_RequestMethod(methNum) + + freq := &pb.URLFetchRequest{ + Method: &method, + Url: proto.String(urlString(req.URL)), + FollowRedirects: proto.Bool(false), // http.Client's responsibility + MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate), + } + if deadline, ok := t.Context.Deadline(); ok { + freq.Deadline = proto.Float64(deadline.Sub(time.Now()).Seconds()) + } + + for k, vals := range req.Header { + for _, val := range vals { + freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{ + Key: proto.String(k), + Value: proto.String(val), + }) + } + } + if methodAcceptsRequestBody[req.Method] && req.Body != nil { + // Avoid a []byte copy if req.Body has a Bytes method. + switch b := req.Body.(type) { + case interface { + Bytes() []byte + }: + freq.Payload = b.Bytes() + default: + freq.Payload, err = ioutil.ReadAll(req.Body) + if err != nil { + return nil, err + } + } + } + + fres := &pb.URLFetchResponse{} + if err := internal.Call(t.Context, "urlfetch", "Fetch", freq, fres); err != nil { + return nil, err + } + + res = &http.Response{} + res.StatusCode = int(*fres.StatusCode) + res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode)) + res.Header = make(http.Header) + res.Request = req + + // Faked: + res.ProtoMajor = 1 + res.ProtoMinor = 1 + res.Proto = "HTTP/1.1" + res.Close = true + + for _, h := range fres.Header { + hkey := http.CanonicalHeaderKey(*h.Key) + hval := *h.Value + if hkey == "Content-Length" { + // Will get filled in below for all but HEAD requests. + if req.Method == "HEAD" { + res.ContentLength, _ = strconv.ParseInt(hval, 10, 64) + } + continue + } + res.Header.Add(hkey, hval) + } + + if req.Method != "HEAD" { + res.ContentLength = int64(len(fres.Content)) + } + + truncated := fres.GetContentWasTruncated() + res.Body = &bodyReader{content: fres.Content, truncated: truncated} + return +} + +func init() { + internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name) + internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED)) +} diff --git a/vendor/google.golang.org/protobuf/AUTHORS b/vendor/google.golang.org/protobuf/AUTHORS new file mode 100644 index 0000000..2b00ddb --- /dev/null +++ b/vendor/google.golang.org/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at https://tip.golang.org/AUTHORS. diff --git a/vendor/google.golang.org/protobuf/CONTRIBUTORS b/vendor/google.golang.org/protobuf/CONTRIBUTORS new file mode 100644 index 0000000..1fbd3e9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at https://tip.golang.org/CONTRIBUTORS. diff --git a/vendor/google.golang.org/protobuf/LICENSE b/vendor/google.golang.org/protobuf/LICENSE new file mode 100644 index 0000000..49ea0f9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2018 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/google.golang.org/protobuf/PATENTS b/vendor/google.golang.org/protobuf/PATENTS new file mode 100644 index 0000000..7330990 --- /dev/null +++ b/vendor/google.golang.org/protobuf/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go new file mode 100644 index 0000000..77dbe1b --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -0,0 +1,789 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/set" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Unmarshal reads the given []byte into the given proto.Message. +func Unmarshal(b []byte, m proto.Message) error { + return UnmarshalOptions{}.Unmarshal(b, m) +} + +// UnmarshalOptions is a configurable textproto format unmarshaler. +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return error if there are any missing required fields. + AllowPartial bool + + // DiscardUnknown specifies whether to ignore unknown fields when parsing. + // An unknown field is any field whose field name or field number does not + // resolve to any known or extension field in the message. + // By default, unmarshal rejects unknown fields as an error. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling + // google.protobuf.Any messages or extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.MessageTypeResolver + protoregistry.ExtensionTypeResolver + } +} + +// Unmarshal reads the given []byte and populates the given proto.Message using options in +// UnmarshalOptions object. +func (o UnmarshalOptions) Unmarshal(b []byte, m proto.Message) error { + proto.Reset(m) + + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + dec := decoder{text.NewDecoder(b), o} + if err := dec.unmarshalMessage(m.ProtoReflect(), false); err != nil { + return err + } + if o.AllowPartial { + return nil + } + return proto.CheckInitialized(m) +} + +type decoder struct { + *text.Decoder + opts UnmarshalOptions +} + +// newError returns an error object with position info. +func (d decoder) newError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("(line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unexpectedTokenError returns a syntax error for the given unexpected token. +func (d decoder) unexpectedTokenError(tok text.Token) error { + return d.syntaxError(tok.Pos(), "unexpected token: %s", tok.RawString()) +} + +// syntaxError returns a syntax error for given position. +func (d decoder) syntaxError(pos int, f string, x ...interface{}) error { + line, column := d.Position(pos) + head := fmt.Sprintf("syntax error (line %d:%d): ", line, column) + return errors.New(head+f, x...) +} + +// unmarshalMessage unmarshals into the given protoreflect.Message. +func (d decoder) unmarshalMessage(m pref.Message, checkDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if messageDesc.FullName() == "google.protobuf.Any" { + return d.unmarshalAny(m, checkDelims) + } + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + + var seenNums set.Ints + var seenOneofs set.Ints + fieldDescs := messageDesc.Fields() + + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch typ := tok.Kind(); typ { + case text.Name: + // Continue below. + case text.EOF: + if checkDelims { + return text.ErrUnexpectedEOF + } + return nil + default: + if checkDelims && typ == text.MessageClose { + return nil + } + return d.unexpectedTokenError(tok) + } + + // Resolve the field descriptor. + var name pref.Name + var fd pref.FieldDescriptor + var xt pref.ExtensionType + var xtErr error + var isFieldNumberName bool + + switch tok.NameKind() { + case text.IdentName: + name = pref.Name(tok.IdentName()) + fd = fieldDescs.ByName(name) + if fd == nil { + // The proto name of a group field is in all lowercase, + // while the textproto field name is the group message name. + gd := fieldDescs.ByName(pref.Name(strings.ToLower(string(name)))) + if gd != nil && gd.Kind() == pref.GroupKind && gd.Message().Name() == name { + fd = gd + } + } else if fd.Kind() == pref.GroupKind && fd.Message().Name() != name { + fd = nil // reset since field name is actually the message name + } + + case text.TypeName: + // Handle extensions only. This code path is not for Any. + xt, xtErr = d.findExtension(pref.FullName(tok.TypeName())) + + case text.FieldNumber: + isFieldNumberName = true + num := pref.FieldNumber(tok.FieldNumber()) + if !num.IsValid() { + return d.newError(tok.Pos(), "invalid field number: %d", num) + } + fd = fieldDescs.ByNumber(num) + if fd == nil { + xt, xtErr = d.opts.Resolver.FindExtensionByNumber(messageDesc.FullName(), num) + } + } + + if xt != nil { + fd = xt.TypeDescriptor() + if !messageDesc.ExtensionRanges().Has(fd.Number()) || fd.ContainingMessage().FullName() != messageDesc.FullName() { + return d.newError(tok.Pos(), "message %v cannot be extended by %v", messageDesc.FullName(), fd.FullName()) + } + } else if xtErr != nil && xtErr != protoregistry.NotFound { + return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) + } + if flags.ProtoLegacy { + if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { + fd = nil // reset since the weak reference is not linked in + } + } + + // Handle unknown fields. + if fd == nil { + if d.opts.DiscardUnknown || messageDesc.ReservedNames().Has(name) { + d.skipValue() + continue + } + return d.newError(tok.Pos(), "unknown field: %v", tok.RawString()) + } + + // Handle fields identified by field number. + if isFieldNumberName { + // TODO: Add an option to permit parsing field numbers. + // + // This requires careful thought as the MarshalOptions.EmitUnknown + // option allows formatting unknown fields as the field number and the + // best-effort textual representation of the field value. In that case, + // it may not be possible to unmarshal the value from a parser that does + // have information about the unknown field. + return d.newError(tok.Pos(), "cannot specify field by number: %v", tok.RawString()) + } + + switch { + case fd.IsList(): + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + list := m.Mutable(fd).List() + if err := d.unmarshalList(fd, list); err != nil { + return err + } + + case fd.IsMap(): + mmap := m.Mutable(fd).Map() + if err := d.unmarshalMap(fd, mmap); err != nil { + return err + } + + default: + kind := fd.Kind() + if kind != pref.MessageKind && kind != pref.GroupKind && !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + // If field is a oneof, check if it has already been set. + if od := fd.ContainingOneof(); od != nil { + idx := uint64(od.Index()) + if seenOneofs.Has(idx) { + return d.newError(tok.Pos(), "error parsing %q, oneof %v is already set", tok.RawString(), od.FullName()) + } + seenOneofs.Set(idx) + } + + num := uint64(fd.Number()) + if seenNums.Has(num) { + return d.newError(tok.Pos(), "non-repeated field %q is repeated", tok.RawString()) + } + + if err := d.unmarshalSingular(fd, m); err != nil { + return err + } + seenNums.Set(num) + } + } + + return nil +} + +// findExtension returns protoreflect.ExtensionType from the Resolver if found. +func (d decoder) findExtension(xtName pref.FullName) (pref.ExtensionType, error) { + xt, err := d.opts.Resolver.FindExtensionByName(xtName) + if err == nil { + return xt, nil + } + return messageset.FindMessageSetExtension(d.opts.Resolver, xtName) +} + +// unmarshalSingular unmarshals a non-repeated field value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalSingular(fd pref.FieldDescriptor, m pref.Message) error { + var val pref.Value + var err error + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + val = m.NewField(fd) + err = d.unmarshalMessage(val.Message(), true) + default: + val, err = d.unmarshalScalar(fd) + } + if err == nil { + m.Set(fd, val) + } + return err +} + +// unmarshalScalar unmarshals a scalar/enum protoreflect.Value specified by the +// given FieldDescriptor. +func (d decoder) unmarshalScalar(fd pref.FieldDescriptor) (pref.Value, error) { + tok, err := d.Read() + if err != nil { + return pref.Value{}, err + } + + if tok.Kind() != text.Scalar { + return pref.Value{}, d.unexpectedTokenError(tok) + } + + kind := fd.Kind() + switch kind { + case pref.BoolKind: + if b, ok := tok.Bool(); ok { + return pref.ValueOfBool(b), nil + } + + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if n, ok := tok.Int32(); ok { + return pref.ValueOfInt32(n), nil + } + + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if n, ok := tok.Int64(); ok { + return pref.ValueOfInt64(n), nil + } + + case pref.Uint32Kind, pref.Fixed32Kind: + if n, ok := tok.Uint32(); ok { + return pref.ValueOfUint32(n), nil + } + + case pref.Uint64Kind, pref.Fixed64Kind: + if n, ok := tok.Uint64(); ok { + return pref.ValueOfUint64(n), nil + } + + case pref.FloatKind: + if n, ok := tok.Float32(); ok { + return pref.ValueOfFloat32(n), nil + } + + case pref.DoubleKind: + if n, ok := tok.Float64(); ok { + return pref.ValueOfFloat64(n), nil + } + + case pref.StringKind: + if s, ok := tok.String(); ok { + if strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return pref.Value{}, d.newError(tok.Pos(), "contains invalid UTF-8") + } + return pref.ValueOfString(s), nil + } + + case pref.BytesKind: + if b, ok := tok.String(); ok { + return pref.ValueOfBytes([]byte(b)), nil + } + + case pref.EnumKind: + if lit, ok := tok.Enum(); ok { + // Lookup EnumNumber based on name. + if enumVal := fd.Enum().Values().ByName(pref.Name(lit)); enumVal != nil { + return pref.ValueOfEnum(enumVal.Number()), nil + } + } + if num, ok := tok.Int32(); ok { + return pref.ValueOfEnum(pref.EnumNumber(num)), nil + } + + default: + panic(fmt.Sprintf("invalid scalar kind %v", kind)) + } + + return pref.Value{}, d.newError(tok.Pos(), "invalid value for %v type: %v", kind, tok.RawString()) +} + +// unmarshalList unmarshals into given protoreflect.List. A list value can +// either be in [] syntax or simply just a single scalar/message value. +func (d decoder) unmarshalList(fd pref.FieldDescriptor, list pref.List) error { + tok, err := d.Peek() + if err != nil { + return err + } + + switch fd.Kind() { + case pref.MessageKind, pref.GroupKind: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.MessageOpen: + pval := list.NewElement() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return err + } + list.Append(pval) + return nil + } + + default: + switch tok.Kind() { + case text.ListOpen: + d.Read() + for { + tok, err := d.Peek() + if err != nil { + return err + } + + switch tok.Kind() { + case text.ListClose: + d.Read() + return nil + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + default: + return d.unexpectedTokenError(tok) + } + } + + case text.Scalar: + pval, err := d.unmarshalScalar(fd) + if err != nil { + return err + } + list.Append(pval) + return nil + } + } + + return d.unexpectedTokenError(tok) +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMap(fd pref.FieldDescriptor, mmap pref.Map) error { + // Determine ahead whether map entry is a scalar type or a message type in + // order to call the appropriate unmarshalMapValue func inside + // unmarshalMapEntry. + var unmarshalMapValue func() (pref.Value, error) + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + unmarshalMapValue = func() (pref.Value, error) { + pval := mmap.NewValue() + if err := d.unmarshalMessage(pval.Message(), true); err != nil { + return pref.Value{}, err + } + return pval, nil + } + default: + unmarshalMapValue = func() (pref.Value, error) { + return d.unmarshalScalar(fd.MapValue()) + } + } + + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageOpen: + return d.unmarshalMapEntry(fd, mmap, unmarshalMapValue) + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + if err := d.unmarshalMapEntry(fd, mmap, unmarshalMapValue); err != nil { + return err + } + default: + return d.unexpectedTokenError(tok) + } + } + + default: + return d.unexpectedTokenError(tok) + } +} + +// unmarshalMap unmarshals into given protoreflect.Map. A map value is a +// textproto message containing {key: , value: }. +func (d decoder) unmarshalMapEntry(fd pref.FieldDescriptor, mmap pref.Map, unmarshalMapValue func() (pref.Value, error)) error { + var key pref.MapKey + var pval pref.Value +Loop: + for { + // Read field name. + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.Name: + if tok.NameKind() != text.IdentName { + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", tok.RawString()) + } + d.skipValue() + continue Loop + } + // Continue below. + case text.MessageClose: + break Loop + default: + return d.unexpectedTokenError(tok) + } + + name := tok.IdentName() + switch name { + case "key": + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + if key.IsValid() { + return d.newError(tok.Pos(), `map entry "key" cannot be repeated`) + } + val, err := d.unmarshalScalar(fd.MapKey()) + if err != nil { + return err + } + key = val.MapKey() + + case "value": + if kind := fd.MapValue().Kind(); (kind != pref.MessageKind) && (kind != pref.GroupKind) { + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + } + if pval.IsValid() { + return d.newError(tok.Pos(), `map entry "value" cannot be repeated`) + } + pval, err = unmarshalMapValue() + if err != nil { + return err + } + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "unknown map entry field %q", name) + } + d.skipValue() + } + } + + if !key.IsValid() { + key = fd.MapKey().Default().MapKey() + } + if !pval.IsValid() { + switch fd.MapValue().Kind() { + case pref.MessageKind, pref.GroupKind: + // If value field is not set for message/group types, construct an + // empty one as default. + pval = mmap.NewValue() + default: + pval = fd.MapValue().Default() + } + } + mmap.Set(key, pval) + return nil +} + +// unmarshalAny unmarshals an Any textproto. It can either be in expanded form +// or non-expanded form. +func (d decoder) unmarshalAny(m pref.Message, checkDelims bool) error { + var typeURL string + var bValue []byte + + // hasFields tracks which valid fields have been seen in the loop below in + // order to flag an error if there are duplicates or conflicts. It may + // contain the strings "type_url", "value" and "expanded". The literal + // "expanded" is used to indicate that the expanded form has been + // encountered already. + hasFields := map[string]bool{} + + if checkDelims { + tok, err := d.Read() + if err != nil { + return err + } + + if tok.Kind() != text.MessageOpen { + return d.unexpectedTokenError(tok) + } + } + +Loop: + for { + // Read field name. Can only have 3 possible field names, i.e. type_url, + // value and type URL name inside []. + tok, err := d.Read() + if err != nil { + return err + } + if typ := tok.Kind(); typ != text.Name { + if checkDelims { + if typ == text.MessageClose { + break Loop + } + } else if typ == text.EOF { + break Loop + } + return d.unexpectedTokenError(tok) + } + + switch tok.NameKind() { + case text.IdentName: + // Both type_url and value fields require field separator :. + if !tok.HasSeparator() { + return d.syntaxError(tok.Pos(), "missing field separator :") + } + + switch tok.IdentName() { + case "type_url": + if hasFields["type_url"] { + return d.newError(tok.Pos(), "duplicate Any type_url field") + } + if hasFields["expanded"] { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + var ok bool + typeURL, ok = tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid Any type_url: %v", tok.RawString()) + } + hasFields["type_url"] = true + + case "value": + if hasFields["value"] { + return d.newError(tok.Pos(), "duplicate Any value field") + } + if hasFields["expanded"] { + return d.newError(tok.Pos(), "conflict with [%s] field", typeURL) + } + tok, err := d.Read() + if err != nil { + return err + } + s, ok := tok.String() + if !ok { + return d.newError(tok.Pos(), "invalid Any value: %v", tok.RawString()) + } + bValue = []byte(s) + hasFields["value"] = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + } + } + + case text.TypeName: + if hasFields["expanded"] { + return d.newError(tok.Pos(), "cannot have more than one type") + } + if hasFields["type_url"] { + return d.newError(tok.Pos(), "conflict with type_url field") + } + typeURL = tok.TypeName() + var err error + bValue, err = d.unmarshalExpandedAny(typeURL, tok.Pos()) + if err != nil { + return err + } + hasFields["expanded"] = true + + default: + if !d.opts.DiscardUnknown { + return d.newError(tok.Pos(), "invalid field name %q in google.protobuf.Any message", tok.RawString()) + } + } + } + + fds := m.Descriptor().Fields() + if len(typeURL) > 0 { + m.Set(fds.ByNumber(fieldnum.Any_TypeUrl), pref.ValueOfString(typeURL)) + } + if len(bValue) > 0 { + m.Set(fds.ByNumber(fieldnum.Any_Value), pref.ValueOfBytes(bValue)) + } + return nil +} + +func (d decoder) unmarshalExpandedAny(typeURL string, pos int) ([]byte, error) { + mt, err := d.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return nil, d.newError(pos, "unable to resolve message [%v]: %v", typeURL, err) + } + // Create new message for the embedded message type and unmarshal the value + // field into it. + m := mt.New() + if err := d.unmarshalMessage(m, true); err != nil { + return nil, err + } + // Serialize the embedded message and return the resulting bytes. + b, err := proto.MarshalOptions{ + AllowPartial: true, // Never check required fields inside an Any. + Deterministic: true, + }.Marshal(m.Interface()) + if err != nil { + return nil, d.newError(pos, "error in marshaling message into Any.value: %v", err) + } + return b, nil +} + +// skipValue makes the decoder parse a field value in order to advance the read +// to the next field. It relies on Read returning an error if the types are not +// in valid sequence. +func (d decoder) skipValue() error { + tok, err := d.Read() + if err != nil { + return err + } + // Only need to continue reading for messages and lists. + switch tok.Kind() { + case text.MessageOpen: + return d.skipMessageValue() + + case text.ListOpen: + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.ListClose: + return nil + case text.MessageOpen: + return d.skipMessageValue() + default: + // Skip items. This will not validate whether skipped values are + // of the same type or not, same behavior as C++ + // TextFormat::Parser::AllowUnknownField(true) version 3.8.0. + if err := d.skipValue(); err != nil { + return err + } + } + } + } + return nil +} + +// skipMessageValue makes the decoder parse and skip over all fields in a +// message. It assumes that the previous read type is MessageOpen. +func (d decoder) skipMessageValue() error { + for { + tok, err := d.Read() + if err != nil { + return err + } + switch tok.Kind() { + case text.MessageClose: + return nil + case text.Name: + if err := d.skipValue(); err != nil { + return err + } + } + } +} diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/doc.go b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go new file mode 100644 index 0000000..162b4f9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package prototext marshals and unmarshals protocol buffer messages as the +// textproto format. +package prototext diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/encode.go b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go new file mode 100644 index 0000000..dece229 --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/prototext/encode.go @@ -0,0 +1,426 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package prototext + +import ( + "fmt" + "sort" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/encoding/text" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +const defaultIndent = " " + +// Format formats the message as a multiline string. +// This function is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func Format(m proto.Message) string { + return MarshalOptions{Multiline: true}.Format(m) +} + +// Marshal writes the given proto.Message in textproto format using default +// options. Do not depend on the output being stable. It may change over time +// across different versions of the program. +func Marshal(m proto.Message) ([]byte, error) { + return MarshalOptions{}.Marshal(m) +} + +// MarshalOptions is a configurable text format marshaler. +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Multiline specifies whether the marshaler should format the output in + // indented-form with every textual element on a new line. + // If Indent is an empty string, then an arbitrary indent is chosen. + Multiline bool + + // Indent specifies the set of indentation characters to use in a multiline + // formatted output such that every entry is preceded by Indent and + // terminated by a newline. If non-empty, then Multiline is treated as true. + // Indent can only be composed of space or tab characters. + Indent string + + // EmitASCII specifies whether to format strings and bytes as ASCII only + // as opposed to using UTF-8 encoding when possible. + EmitASCII bool + + // allowInvalidUTF8 specifies whether to permit the encoding of strings + // with invalid UTF-8. This is unexported as it is intended to only + // be specified by the Format method. + allowInvalidUTF8 bool + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return error if there are any missing required fields. + AllowPartial bool + + // EmitUnknown specifies whether to emit unknown fields in the output. + // If specified, the unmarshaler may be unable to parse the output. + // The default is to exclude unknown fields. + EmitUnknown bool + + // Resolver is used for looking up types when expanding google.protobuf.Any + // messages. If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + protoregistry.ExtensionTypeResolver + protoregistry.MessageTypeResolver + } +} + +// Format formats the message as a string. +// This method is only intended for human consumption and ignores errors. +// Do not depend on the output being stable. It may change over time across +// different versions of the program. +func (o MarshalOptions) Format(m proto.Message) string { + if m == nil || !m.ProtoReflect().IsValid() { + return "" // invalid syntax, but okay since this is for debugging + } + o.allowInvalidUTF8 = true + o.AllowPartial = true + o.EmitUnknown = true + b, _ := o.Marshal(m) + return string(b) +} + +// Marshal writes the given proto.Message in textproto format using options in +// MarshalOptions object. Do not depend on the output being stable. It may +// change over time across different versions of the program. +func (o MarshalOptions) Marshal(m proto.Message) ([]byte, error) { + var delims = [2]byte{'{', '}'} + + if o.Multiline && o.Indent == "" { + o.Indent = defaultIndent + } + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + + internalEnc, err := text.NewEncoder(o.Indent, delims, o.EmitASCII) + if err != nil { + return nil, err + } + + // Treat nil message interface as an empty message, + // in which case there is nothing to output. + if m == nil { + return []byte{}, nil + } + + enc := encoder{internalEnc, o} + err = enc.marshalMessage(m.ProtoReflect(), false) + if err != nil { + return nil, err + } + out := enc.Bytes() + if len(o.Indent) > 0 && len(out) > 0 { + out = append(out, '\n') + } + if o.AllowPartial { + return out, nil + } + return out, proto.CheckInitialized(m) +} + +type encoder struct { + *text.Encoder + opts MarshalOptions +} + +// marshalMessage marshals the given protoreflect.Message. +func (e encoder) marshalMessage(m pref.Message, inclDelims bool) error { + messageDesc := m.Descriptor() + if !flags.ProtoLegacy && messageset.IsMessageSet(messageDesc) { + return errors.New("no support for proto1 MessageSets") + } + + if inclDelims { + e.StartMessage() + defer e.EndMessage() + } + + // Handle Any expansion. + if messageDesc.FullName() == "google.protobuf.Any" { + if e.marshalAny(m) { + return nil + } + // If unable to expand, continue on to marshal Any as a regular message. + } + + // Marshal known fields. + fieldDescs := messageDesc.Fields() + size := fieldDescs.Len() + for i := 0; i < size; { + fd := fieldDescs.Get(i) + if od := fd.ContainingOneof(); od != nil { + fd = m.WhichOneof(od) + i += od.Fields().Len() + } else { + i++ + } + + if fd == nil || !m.Has(fd) { + continue + } + + name := fd.Name() + // Use type name for group field name. + if fd.Kind() == pref.GroupKind { + name = fd.Message().Name() + } + val := m.Get(fd) + if err := e.marshalField(string(name), val, fd); err != nil { + return err + } + } + + // Marshal extensions. + if err := e.marshalExtensions(m); err != nil { + return err + } + + // Marshal unknown fields. + if e.opts.EmitUnknown { + e.marshalUnknown(m.GetUnknown()) + } + + return nil +} + +// marshalField marshals the given field with protoreflect.Value. +func (e encoder) marshalField(name string, val pref.Value, fd pref.FieldDescriptor) error { + switch { + case fd.IsList(): + return e.marshalList(name, val.List(), fd) + case fd.IsMap(): + return e.marshalMap(name, val.Map(), fd) + default: + e.WriteName(name) + return e.marshalSingular(val, fd) + } +} + +// marshalSingular marshals the given non-repeated field value. This includes +// all scalar types, enums, messages, and groups. +func (e encoder) marshalSingular(val pref.Value, fd pref.FieldDescriptor) error { + kind := fd.Kind() + switch kind { + case pref.BoolKind: + e.WriteBool(val.Bool()) + + case pref.StringKind: + s := val.String() + if !e.opts.allowInvalidUTF8 && strs.EnforceUTF8(fd) && !utf8.ValidString(s) { + return errors.InvalidUTF8(string(fd.FullName())) + } + e.WriteString(s) + + case pref.Int32Kind, pref.Int64Kind, + pref.Sint32Kind, pref.Sint64Kind, + pref.Sfixed32Kind, pref.Sfixed64Kind: + e.WriteInt(val.Int()) + + case pref.Uint32Kind, pref.Uint64Kind, + pref.Fixed32Kind, pref.Fixed64Kind: + e.WriteUint(val.Uint()) + + case pref.FloatKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 32) + + case pref.DoubleKind: + // Encoder.WriteFloat handles the special numbers NaN and infinites. + e.WriteFloat(val.Float(), 64) + + case pref.BytesKind: + e.WriteString(string(val.Bytes())) + + case pref.EnumKind: + num := val.Enum() + if desc := fd.Enum().Values().ByNumber(num); desc != nil { + e.WriteLiteral(string(desc.Name())) + } else { + // Use numeric value if there is no enum description. + e.WriteInt(int64(num)) + } + + case pref.MessageKind, pref.GroupKind: + return e.marshalMessage(val.Message(), true) + + default: + panic(fmt.Sprintf("%v has unknown kind: %v", fd.FullName(), kind)) + } + return nil +} + +// marshalList marshals the given protoreflect.List as multiple name-value fields. +func (e encoder) marshalList(name string, list pref.List, fd pref.FieldDescriptor) error { + size := list.Len() + for i := 0; i < size; i++ { + e.WriteName(name) + if err := e.marshalSingular(list.Get(i), fd); err != nil { + return err + } + } + return nil +} + +// marshalMap marshals the given protoreflect.Map as multiple name-value fields. +func (e encoder) marshalMap(name string, mmap pref.Map, fd pref.FieldDescriptor) error { + var err error + mapsort.Range(mmap, fd.MapKey().Kind(), func(key pref.MapKey, val pref.Value) bool { + e.WriteName(name) + e.StartMessage() + defer e.EndMessage() + + e.WriteName("key") + err = e.marshalSingular(key.Value(), fd.MapKey()) + if err != nil { + return false + } + + e.WriteName("value") + err = e.marshalSingular(val, fd.MapValue()) + if err != nil { + return false + } + return true + }) + return err +} + +// marshalExtensions marshals extension fields. +func (e encoder) marshalExtensions(m pref.Message) error { + type entry struct { + key string + value pref.Value + desc pref.FieldDescriptor + } + + // Get a sorted list based on field key first. + var entries []entry + m.Range(func(fd pref.FieldDescriptor, v pref.Value) bool { + if !fd.IsExtension() { + return true + } + // For MessageSet extensions, the name used is the parent message. + name := fd.FullName() + if messageset.IsMessageSetExtension(fd) { + name = name.Parent() + } + entries = append(entries, entry{ + key: string(name), + value: v, + desc: fd, + }) + return true + }) + // Sort extensions lexicographically. + sort.Slice(entries, func(i, j int) bool { + return entries[i].key < entries[j].key + }) + + // Write out sorted list. + for _, entry := range entries { + // Extension field name is the proto field name enclosed in []. + name := "[" + entry.key + "]" + if err := e.marshalField(name, entry.value, entry.desc); err != nil { + return err + } + } + return nil +} + +// marshalUnknown parses the given []byte and marshals fields out. +// This function assumes proper encoding in the given []byte. +func (e encoder) marshalUnknown(b []byte) { + const dec = 10 + const hex = 16 + for len(b) > 0 { + num, wtype, n := protowire.ConsumeTag(b) + b = b[n:] + e.WriteName(strconv.FormatInt(int64(num), dec)) + + switch wtype { + case protowire.VarintType: + var v uint64 + v, n = protowire.ConsumeVarint(b) + e.WriteUint(v) + case protowire.Fixed32Type: + var v uint32 + v, n = protowire.ConsumeFixed32(b) + e.WriteLiteral("0x" + strconv.FormatUint(uint64(v), hex)) + case protowire.Fixed64Type: + var v uint64 + v, n = protowire.ConsumeFixed64(b) + e.WriteLiteral("0x" + strconv.FormatUint(v, hex)) + case protowire.BytesType: + var v []byte + v, n = protowire.ConsumeBytes(b) + e.WriteString(string(v)) + case protowire.StartGroupType: + e.StartMessage() + var v []byte + v, n = protowire.ConsumeGroup(num, b) + e.marshalUnknown(v) + e.EndMessage() + default: + panic(fmt.Sprintf("prototext: error parsing unknown field wire type: %v", wtype)) + } + + b = b[n:] + } +} + +// marshalAny marshals the given google.protobuf.Any message in expanded form. +// It returns true if it was able to marshal, else false. +func (e encoder) marshalAny(any pref.Message) bool { + // Construct the embedded message. + fds := any.Descriptor().Fields() + fdType := fds.ByNumber(fieldnum.Any_TypeUrl) + typeURL := any.Get(fdType).String() + mt, err := e.opts.Resolver.FindMessageByURL(typeURL) + if err != nil { + return false + } + m := mt.New().Interface() + + // Unmarshal bytes into embedded message. + fdValue := fds.ByNumber(fieldnum.Any_Value) + value := any.Get(fdValue) + err = proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: e.opts.Resolver, + }.Unmarshal(value.Bytes(), m) + if err != nil { + return false + } + + // Get current encoder position. If marshaling fails, reset encoder output + // back to this position. + pos := e.Snapshot() + + // Field name is the proto field name enclosed in []. + e.WriteName("[" + typeURL + "]") + err = e.marshalMessage(m.ProtoReflect(), true) + if err != nil { + e.Reset(pos) + return false + } + return true +} diff --git a/vendor/google.golang.org/protobuf/encoding/protowire/wire.go b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go new file mode 100644 index 0000000..a427f8b --- /dev/null +++ b/vendor/google.golang.org/protobuf/encoding/protowire/wire.go @@ -0,0 +1,538 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protowire parses and formats the raw wire encoding. +// See https://developers.google.com/protocol-buffers/docs/encoding. +// +// For marshaling and unmarshaling entire protobuf messages, +// use the "google.golang.org/protobuf/proto" package instead. +package protowire + +import ( + "io" + "math" + "math/bits" + + "google.golang.org/protobuf/internal/errors" +) + +// Number represents the field number. +type Number int32 + +const ( + MinValidNumber Number = 1 + FirstReservedNumber Number = 19000 + LastReservedNumber Number = 19999 + MaxValidNumber Number = 1<<29 - 1 +) + +// IsValid reports whether the field number is semantically valid. +// +// Note that while numbers within the reserved range are semantically invalid, +// they are syntactically valid in the wire format. +// Implementations may treat records with reserved field numbers as unknown. +func (n Number) IsValid() bool { + return MinValidNumber <= n && n < FirstReservedNumber || LastReservedNumber < n && n <= MaxValidNumber +} + +// Type represents the wire type. +type Type int8 + +const ( + VarintType Type = 0 + Fixed32Type Type = 5 + Fixed64Type Type = 1 + BytesType Type = 2 + StartGroupType Type = 3 + EndGroupType Type = 4 +) + +const ( + _ = -iota + errCodeTruncated + errCodeFieldNumber + errCodeOverflow + errCodeReserved + errCodeEndGroup +) + +var ( + errFieldNumber = errors.New("invalid field number") + errOverflow = errors.New("variable length integer overflow") + errReserved = errors.New("cannot parse reserved wire type") + errEndGroup = errors.New("mismatching end group marker") + errParse = errors.New("parse error") +) + +// ParseError converts an error code into an error value. +// This returns nil if n is a non-negative number. +func ParseError(n int) error { + if n >= 0 { + return nil + } + switch n { + case errCodeTruncated: + return io.ErrUnexpectedEOF + case errCodeFieldNumber: + return errFieldNumber + case errCodeOverflow: + return errOverflow + case errCodeReserved: + return errReserved + case errCodeEndGroup: + return errEndGroup + default: + return errParse + } +} + +// ConsumeField parses an entire field record (both tag and value) and returns +// the field number, the wire type, and the total length. +// This returns a negative length upon an error (see ParseError). +// +// The total length includes the tag header and the end group marker (if the +// field is a group). +func ConsumeField(b []byte) (Number, Type, int) { + num, typ, n := ConsumeTag(b) + if n < 0 { + return 0, 0, n // forward error code + } + m := ConsumeFieldValue(num, typ, b[n:]) + if m < 0 { + return 0, 0, m // forward error code + } + return num, typ, n + m +} + +// ConsumeFieldValue parses a field value and returns its length. +// This assumes that the field Number and wire Type have already been parsed. +// This returns a negative length upon an error (see ParseError). +// +// When parsing a group, the length includes the end group marker and +// the end group is verified to match the starting field number. +func ConsumeFieldValue(num Number, typ Type, b []byte) (n int) { + switch typ { + case VarintType: + _, n = ConsumeVarint(b) + return n + case Fixed32Type: + _, n = ConsumeFixed32(b) + return n + case Fixed64Type: + _, n = ConsumeFixed64(b) + return n + case BytesType: + _, n = ConsumeBytes(b) + return n + case StartGroupType: + n0 := len(b) + for { + num2, typ2, n := ConsumeTag(b) + if n < 0 { + return n // forward error code + } + b = b[n:] + if typ2 == EndGroupType { + if num != num2 { + return errCodeEndGroup + } + return n0 - len(b) + } + + n = ConsumeFieldValue(num2, typ2, b) + if n < 0 { + return n // forward error code + } + b = b[n:] + } + case EndGroupType: + return errCodeEndGroup + default: + return errCodeReserved + } +} + +// AppendTag encodes num and typ as a varint-encoded tag and appends it to b. +func AppendTag(b []byte, num Number, typ Type) []byte { + return AppendVarint(b, EncodeTag(num, typ)) +} + +// ConsumeTag parses b as a varint-encoded tag, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeTag(b []byte) (Number, Type, int) { + v, n := ConsumeVarint(b) + if n < 0 { + return 0, 0, n // forward error code + } + num, typ := DecodeTag(v) + if num < MinValidNumber { + return 0, 0, errCodeFieldNumber + } + return num, typ, n +} + +func SizeTag(num Number) int { + return SizeVarint(EncodeTag(num, 0)) // wire type has no effect on size +} + +// AppendVarint appends v to b as a varint-encoded uint64. +func AppendVarint(b []byte, v uint64) []byte { + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte((v>>0)&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +// ConsumeVarint parses b as a varint-encoded uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeVarint(b []byte) (v uint64, n int) { + var y uint64 + if len(b) <= 0 { + return 0, errCodeTruncated + } + v = uint64(b[0]) + if v < 0x80 { + return v, 1 + } + v -= 0x80 + + if len(b) <= 1 { + return 0, errCodeTruncated + } + y = uint64(b[1]) + v += y << 7 + if y < 0x80 { + return v, 2 + } + v -= 0x80 << 7 + + if len(b) <= 2 { + return 0, errCodeTruncated + } + y = uint64(b[2]) + v += y << 14 + if y < 0x80 { + return v, 3 + } + v -= 0x80 << 14 + + if len(b) <= 3 { + return 0, errCodeTruncated + } + y = uint64(b[3]) + v += y << 21 + if y < 0x80 { + return v, 4 + } + v -= 0x80 << 21 + + if len(b) <= 4 { + return 0, errCodeTruncated + } + y = uint64(b[4]) + v += y << 28 + if y < 0x80 { + return v, 5 + } + v -= 0x80 << 28 + + if len(b) <= 5 { + return 0, errCodeTruncated + } + y = uint64(b[5]) + v += y << 35 + if y < 0x80 { + return v, 6 + } + v -= 0x80 << 35 + + if len(b) <= 6 { + return 0, errCodeTruncated + } + y = uint64(b[6]) + v += y << 42 + if y < 0x80 { + return v, 7 + } + v -= 0x80 << 42 + + if len(b) <= 7 { + return 0, errCodeTruncated + } + y = uint64(b[7]) + v += y << 49 + if y < 0x80 { + return v, 8 + } + v -= 0x80 << 49 + + if len(b) <= 8 { + return 0, errCodeTruncated + } + y = uint64(b[8]) + v += y << 56 + if y < 0x80 { + return v, 9 + } + v -= 0x80 << 56 + + if len(b) <= 9 { + return 0, errCodeTruncated + } + y = uint64(b[9]) + v += y << 63 + if y < 2 { + return v, 10 + } + return 0, errCodeOverflow +} + +// SizeVarint returns the encoded size of a varint. +// The size is guaranteed to be within 1 and 10, inclusive. +func SizeVarint(v uint64) int { + // This computes 1 + (bits.Len64(v)-1)/7. + // 9/64 is a good enough approximation of 1/7 + return int(9*uint32(bits.Len64(v))+64) / 64 +} + +// AppendFixed32 appends v to b as a little-endian uint32. +func AppendFixed32(b []byte, v uint32) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24)) +} + +// ConsumeFixed32 parses b as a little-endian uint32, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed32(b []byte) (v uint32, n int) { + if len(b) < 4 { + return 0, errCodeTruncated + } + v = uint32(b[0])<<0 | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + return v, 4 +} + +// SizeFixed32 returns the encoded size of a fixed32; which is always 4. +func SizeFixed32() int { + return 4 +} + +// AppendFixed64 appends v to b as a little-endian uint64. +func AppendFixed64(b []byte, v uint64) []byte { + return append(b, + byte(v>>0), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) +} + +// ConsumeFixed64 parses b as a little-endian uint64, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeFixed64(b []byte) (v uint64, n int) { + if len(b) < 8 { + return 0, errCodeTruncated + } + v = uint64(b[0])<<0 | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + return v, 8 +} + +// SizeFixed64 returns the encoded size of a fixed64; which is always 8. +func SizeFixed64() int { + return 8 +} + +// AppendBytes appends v to b as a length-prefixed bytes value. +func AppendBytes(b []byte, v []byte) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeBytes parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeBytes(b []byte) (v []byte, n int) { + m, n := ConsumeVarint(b) + if n < 0 { + return nil, n // forward error code + } + if m > uint64(len(b[n:])) { + return nil, errCodeTruncated + } + return b[n:][:m], n + int(m) +} + +// SizeBytes returns the encoded size of a length-prefixed bytes value, +// given only the length. +func SizeBytes(n int) int { + return SizeVarint(uint64(n)) + n +} + +// AppendString appends v to b as a length-prefixed bytes value. +func AppendString(b []byte, v string) []byte { + return append(AppendVarint(b, uint64(len(v))), v...) +} + +// ConsumeString parses b as a length-prefixed bytes value, reporting its length. +// This returns a negative length upon an error (see ParseError). +func ConsumeString(b []byte) (v string, n int) { + bb, n := ConsumeBytes(b) + return string(bb), n +} + +// AppendGroup appends v to b as group value, with a trailing end group marker. +// The value v must not contain the end marker. +func AppendGroup(b []byte, num Number, v []byte) []byte { + return AppendVarint(append(b, v...), EncodeTag(num, EndGroupType)) +} + +// ConsumeGroup parses b as a group value until the trailing end group marker, +// and verifies that the end marker matches the provided num. The value v +// does not contain the end marker, while the length does contain the end marker. +// This returns a negative length upon an error (see ParseError). +func ConsumeGroup(num Number, b []byte) (v []byte, n int) { + n = ConsumeFieldValue(num, StartGroupType, b) + if n < 0 { + return nil, n // forward error code + } + b = b[:n] + + // Truncate off end group marker, but need to handle denormalized varints. + // Assuming end marker is never 0 (which is always the case since + // EndGroupType is non-zero), we can truncate all trailing bytes where the + // lower 7 bits are all zero (implying that the varint is denormalized). + for len(b) > 0 && b[len(b)-1]&0x7f == 0 { + b = b[:len(b)-1] + } + b = b[:len(b)-SizeTag(num)] + return b, n +} + +// SizeGroup returns the encoded size of a group, given only the length. +func SizeGroup(num Number, n int) int { + return n + SizeTag(num) +} + +// DecodeTag decodes the field Number and wire Type from its unified form. +// The Number is -1 if the decoded field number overflows int32. +// Other than overflow, this does not check for field number validity. +func DecodeTag(x uint64) (Number, Type) { + // NOTE: MessageSet allows for larger field numbers than normal. + if x>>3 > uint64(math.MaxInt32) { + return -1, 0 + } + return Number(x >> 3), Type(x & 7) +} + +// EncodeTag encodes the field Number and wire Type into its unified form. +func EncodeTag(num Number, typ Type) uint64 { + return uint64(num)<<3 | uint64(typ&7) +} + +// DecodeZigZag decodes a zig-zag-encoded uint64 as an int64. +// Input: {…, 5, 3, 1, 0, 2, 4, 6, …} +// Output: {…, -3, -2, -1, 0, +1, +2, +3, …} +func DecodeZigZag(x uint64) int64 { + return int64(x>>1) ^ int64(x)<<63>>63 +} + +// EncodeZigZag encodes an int64 as a zig-zag-encoded uint64. +// Input: {…, -3, -2, -1, 0, +1, +2, +3, …} +// Output: {…, 5, 3, 1, 0, 2, 4, 6, …} +func EncodeZigZag(x int64) uint64 { + return uint64(x<<1) ^ uint64(x>>63) +} + +// DecodeBool decodes a uint64 as a bool. +// Input: { 0, 1, 2, …} +// Output: {false, true, true, …} +func DecodeBool(x uint64) bool { + return x != 0 +} + +// EncodeBool encodes a bool as a uint64. +// Input: {false, true} +// Output: { 0, 1} +func EncodeBool(x bool) uint64 { + if x { + return 1 + } + return 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go new file mode 100644 index 0000000..e7af0fe --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descfmt/stringer.go @@ -0,0 +1,316 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descfmt provides functionality to format descriptors. +package descfmt + +import ( + "fmt" + "io" + "reflect" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type list interface { + Len() int + pragma.DoNotImplement +} + +func FormatList(s fmt.State, r rune, vs list) { + io.WriteString(s, formatListOpt(vs, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatListOpt(vs list, isRoot, allowMulti bool) string { + start, end := "[", "]" + if isRoot { + var name string + switch vs.(type) { + case pref.Names: + name = "Names" + case pref.FieldNumbers: + name = "FieldNumbers" + case pref.FieldRanges: + name = "FieldRanges" + case pref.EnumRanges: + name = "EnumRanges" + case pref.FileImports: + name = "FileImports" + case pref.Descriptor: + name = reflect.ValueOf(vs).MethodByName("Get").Type().Out(0).Name() + "s" + } + start, end = name+"{", "}" + } + + var ss []string + switch vs := vs.(type) { + case pref.Names: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldNumbers: + for i := 0; i < vs.Len(); i++ { + ss = append(ss, fmt.Sprint(vs.Get(i))) + } + return start + joinStrings(ss, false) + end + case pref.FieldRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0]+1 == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], r[1])) // enum ranges are end exclusive + } + } + return start + joinStrings(ss, false) + end + case pref.EnumRanges: + for i := 0; i < vs.Len(); i++ { + r := vs.Get(i) + if r[0] == r[1] { + ss = append(ss, fmt.Sprintf("%d", r[0])) + } else { + ss = append(ss, fmt.Sprintf("%d:%d", r[0], int64(r[1])+1)) // enum ranges are end inclusive + } + } + return start + joinStrings(ss, false) + end + case pref.FileImports: + for i := 0; i < vs.Len(); i++ { + var rs records + rs.Append(reflect.ValueOf(vs.Get(i)), "Path", "Package", "IsPublic", "IsWeak") + ss = append(ss, "{"+rs.Join()+"}") + } + return start + joinStrings(ss, allowMulti) + end + default: + _, isEnumValue := vs.(pref.EnumValueDescriptors) + for i := 0; i < vs.Len(); i++ { + m := reflect.ValueOf(vs).MethodByName("Get") + v := m.Call([]reflect.Value{reflect.ValueOf(i)})[0].Interface() + ss = append(ss, formatDescOpt(v.(pref.Descriptor), false, allowMulti && !isEnumValue)) + } + return start + joinStrings(ss, allowMulti && isEnumValue) + end + } +} + +// descriptorAccessors is a list of accessors to print for each descriptor. +// +// Do not print all accessors since some contain redundant information, +// while others are pointers that we do not want to follow since the descriptor +// is actually a cyclic graph. +// +// Using a list allows us to print the accessors in a sensible order. +var descriptorAccessors = map[reflect.Type][]string{ + reflect.TypeOf((*pref.FileDescriptor)(nil)).Elem(): {"Path", "Package", "Imports", "Messages", "Enums", "Extensions", "Services"}, + reflect.TypeOf((*pref.MessageDescriptor)(nil)).Elem(): {"IsMapEntry", "Fields", "Oneofs", "ReservedNames", "ReservedRanges", "RequiredNumbers", "ExtensionRanges", "Messages", "Enums", "Extensions"}, + reflect.TypeOf((*pref.FieldDescriptor)(nil)).Elem(): {"Number", "Cardinality", "Kind", "HasJSONName", "JSONName", "HasPresence", "IsExtension", "IsPacked", "IsWeak", "IsList", "IsMap", "MapKey", "MapValue", "HasDefault", "Default", "ContainingOneof", "ContainingMessage", "Message", "Enum"}, + reflect.TypeOf((*pref.OneofDescriptor)(nil)).Elem(): {"Fields"}, // not directly used; must keep in sync with formatDescOpt + reflect.TypeOf((*pref.EnumDescriptor)(nil)).Elem(): {"Values", "ReservedNames", "ReservedRanges"}, + reflect.TypeOf((*pref.EnumValueDescriptor)(nil)).Elem(): {"Number"}, + reflect.TypeOf((*pref.ServiceDescriptor)(nil)).Elem(): {"Methods"}, + reflect.TypeOf((*pref.MethodDescriptor)(nil)).Elem(): {"Input", "Output", "IsStreamingClient", "IsStreamingServer"}, +} + +func FormatDesc(s fmt.State, r rune, t pref.Descriptor) { + io.WriteString(s, formatDescOpt(t, true, r == 'v' && (s.Flag('+') || s.Flag('#')))) +} +func formatDescOpt(t pref.Descriptor, isRoot, allowMulti bool) string { + rv := reflect.ValueOf(t) + rt := rv.MethodByName("ProtoType").Type().In(0) + + start, end := "{", "}" + if isRoot { + start = rt.Name() + "{" + } + + _, isFile := t.(pref.FileDescriptor) + rs := records{allowMulti: allowMulti} + if t.IsPlaceholder() { + if isFile { + rs.Append(rv, "Path", "Package", "IsPlaceholder") + } else { + rs.Append(rv, "FullName", "IsPlaceholder") + } + } else { + switch { + case isFile: + rs.Append(rv, "Syntax") + case isRoot: + rs.Append(rv, "Syntax", "FullName") + default: + rs.Append(rv, "Name") + } + switch t := t.(type) { + case pref.FieldDescriptor: + for _, s := range descriptorAccessors[rt] { + switch s { + case "MapKey": + if k := t.MapKey(); k != nil { + rs.recs = append(rs.recs, [2]string{"MapKey", k.Kind().String()}) + } + case "MapValue": + if v := t.MapValue(); v != nil { + switch v.Kind() { + case pref.EnumKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Enum().FullName())}) + case pref.MessageKind, pref.GroupKind: + rs.recs = append(rs.recs, [2]string{"MapValue", string(v.Message().FullName())}) + default: + rs.recs = append(rs.recs, [2]string{"MapValue", v.Kind().String()}) + } + } + case "ContainingOneof": + if od := t.ContainingOneof(); od != nil { + rs.recs = append(rs.recs, [2]string{"Oneof", string(od.Name())}) + } + case "ContainingMessage": + if t.IsExtension() { + rs.recs = append(rs.recs, [2]string{"Extendee", string(t.ContainingMessage().FullName())}) + } + case "Message": + if !t.IsMap() { + rs.Append(rv, s) + } + default: + rs.Append(rv, s) + } + } + case pref.OneofDescriptor: + var ss []string + fs := t.Fields() + for i := 0; i < fs.Len(); i++ { + ss = append(ss, string(fs.Get(i).Name())) + } + if len(ss) > 0 { + rs.recs = append(rs.recs, [2]string{"Fields", "[" + joinStrings(ss, false) + "]"}) + } + default: + rs.Append(rv, descriptorAccessors[rt]...) + } + if rv.MethodByName("GoType").IsValid() { + rs.Append(rv, "GoType") + } + } + return start + rs.Join() + end +} + +type records struct { + recs [][2]string + allowMulti bool +} + +func (rs *records) Append(v reflect.Value, accessors ...string) { + for _, a := range accessors { + var rv reflect.Value + if m := v.MethodByName(a); m.IsValid() { + rv = m.Call(nil)[0] + } + if v.Kind() == reflect.Struct && !rv.IsValid() { + rv = v.FieldByName(a) + } + if !rv.IsValid() { + panic(fmt.Sprintf("unknown accessor: %v.%s", v.Type(), a)) + } + if _, ok := rv.Interface().(pref.Value); ok { + rv = rv.MethodByName("Interface").Call(nil)[0] + if !rv.IsNil() { + rv = rv.Elem() + } + } + + // Ignore zero values. + var isZero bool + switch rv.Kind() { + case reflect.Interface, reflect.Slice: + isZero = rv.IsNil() + case reflect.Bool: + isZero = rv.Bool() == false + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + isZero = rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + isZero = rv.Uint() == 0 + case reflect.String: + isZero = rv.String() == "" + } + if n, ok := rv.Interface().(list); ok { + isZero = n.Len() == 0 + } + if isZero { + continue + } + + // Format the value. + var s string + v := rv.Interface() + switch v := v.(type) { + case list: + s = formatListOpt(v, false, rs.allowMulti) + case pref.FieldDescriptor, pref.OneofDescriptor, pref.EnumValueDescriptor, pref.MethodDescriptor: + s = string(v.(pref.Descriptor).Name()) + case pref.Descriptor: + s = string(v.FullName()) + case string: + s = strconv.Quote(v) + case []byte: + s = fmt.Sprintf("%q", v) + default: + s = fmt.Sprint(v) + } + rs.recs = append(rs.recs, [2]string{a, s}) + } +} + +func (rs *records) Join() string { + var ss []string + + // In single line mode, simply join all records with commas. + if !rs.allowMulti { + for _, r := range rs.recs { + ss = append(ss, r[0]+formatColon(0)+r[1]) + } + return joinStrings(ss, false) + } + + // In allowMulti line mode, align single line records for more readable output. + var maxLen int + flush := func(i int) { + for _, r := range rs.recs[len(ss):i] { + ss = append(ss, r[0]+formatColon(maxLen-len(r[0]))+r[1]) + } + maxLen = 0 + } + for i, r := range rs.recs { + if isMulti := strings.Contains(r[1], "\n"); isMulti { + flush(i) + ss = append(ss, r[0]+formatColon(0)+strings.Join(strings.Split(r[1], "\n"), "\n\t")) + } else if maxLen < len(r[0]) { + maxLen = len(r[0]) + } + } + flush(len(rs.recs)) + return joinStrings(ss, true) +} + +func formatColon(padding int) string { + // Deliberately introduce instability into the debug output to + // discourage users from performing string comparisons. + // This provides us flexibility to change the output in the future. + if detrand.Bool() { + return ":" + strings.Repeat(" ", 1+padding) // use non-breaking spaces (U+00a0) + } else { + return ":" + strings.Repeat(" ", 1+padding) // use regular spaces (U+0020) + } +} + +func joinStrings(ss []string, isMulti bool) string { + if len(ss) == 0 { + return "" + } + if isMulti { + return "\n\t" + strings.Join(ss, "\n\t") + "\n" + } + return strings.Join(ss, ", ") +} diff --git a/vendor/google.golang.org/protobuf/internal/descopts/options.go b/vendor/google.golang.org/protobuf/internal/descopts/options.go new file mode 100644 index 0000000..8401be8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/descopts/options.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descopts contains the nil pointers to concrete descriptor options. +// +// This package exists as a form of reverse dependency injection so that certain +// packages (e.g., internal/filedesc and internal/filetype can avoid a direct +// dependency on the descriptor proto package). +package descopts + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +// These variables are set by the init function in descriptor.pb.go via logic +// in internal/filetype. In other words, so long as the descriptor proto package +// is linked in, these variables will be populated. +// +// Each variable is populated with a nil pointer to the options struct. +var ( + File pref.ProtoMessage + Enum pref.ProtoMessage + EnumValue pref.ProtoMessage + Message pref.ProtoMessage + Field pref.ProtoMessage + Oneof pref.ProtoMessage + ExtensionRange pref.ProtoMessage + Service pref.ProtoMessage + Method pref.ProtoMessage +) diff --git a/vendor/google.golang.org/protobuf/internal/detrand/rand.go b/vendor/google.golang.org/protobuf/internal/detrand/rand.go new file mode 100644 index 0000000..a904dd1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/detrand/rand.go @@ -0,0 +1,61 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package detrand provides deterministically random functionality. +// +// The pseudo-randomness of these functions is seeded by the program binary +// itself and guarantees that the output does not change within a program, +// while ensuring that the output is unstable across different builds. +package detrand + +import ( + "encoding/binary" + "hash/fnv" + "os" +) + +// Disable disables detrand such that all functions returns the zero value. +// This function is not concurrent-safe and must be called during program init. +func Disable() { + randSeed = 0 +} + +// Bool returns a deterministically random boolean. +func Bool() bool { + return randSeed%2 == 1 +} + +// randSeed is a best-effort at an approximate hash of the Go binary. +var randSeed = binaryHash() + +func binaryHash() uint64 { + // Open the Go binary. + s, err := os.Executable() + if err != nil { + return 0 + } + f, err := os.Open(s) + if err != nil { + return 0 + } + defer f.Close() + + // Hash the size and several samples of the Go binary. + const numSamples = 8 + var buf [64]byte + h := fnv.New64() + fi, err := f.Stat() + if err != nil { + return 0 + } + binary.LittleEndian.PutUint64(buf[:8], uint64(fi.Size())) + h.Write(buf[:8]) + for i := int64(0); i < numSamples; i++ { + if _, err := f.ReadAt(buf[:], i*fi.Size()/numSamples); err != nil { + return 0 + } + h.Write(buf[:]) + } + return h.Sum64() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go new file mode 100644 index 0000000..fdd9b13 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/defval/default.go @@ -0,0 +1,213 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package defval marshals and unmarshals textual forms of default values. +// +// This package handles both the form historically used in Go struct field tags +// and also the form used by google.protobuf.FieldDescriptorProto.default_value +// since they differ in superficial ways. +package defval + +import ( + "fmt" + "math" + "strconv" + + ptext "google.golang.org/protobuf/internal/encoding/text" + errors "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Format is the serialization format used to represent the default value. +type Format int + +const ( + _ Format = iota + + // Descriptor uses the serialization format that protoc uses with the + // google.protobuf.FieldDescriptorProto.default_value field. + Descriptor + + // GoTag uses the historical serialization format in Go struct field tags. + GoTag +) + +// Unmarshal deserializes the default string s according to the given kind k. +// When k is an enum, a list of enum value descriptors must be provided. +func Unmarshal(s string, k pref.Kind, evs pref.EnumValueDescriptors, f Format) (pref.Value, pref.EnumValueDescriptor, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + switch s { + case "1": + return pref.ValueOfBool(true), nil, nil + case "0": + return pref.ValueOfBool(false), nil, nil + } + } else { + switch s { + case "true": + return pref.ValueOfBool(true), nil, nil + case "false": + return pref.ValueOfBool(false), nil, nil + } + } + case pref.EnumKind: + if f == GoTag { + // Go tags use the numeric form of the enum value. + if n, err := strconv.ParseInt(s, 10, 32); err == nil { + if ev := evs.ByNumber(pref.EnumNumber(n)); ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + } else { + // Descriptor default_value use the enum identifier. + ev := evs.ByName(pref.Name(s)) + if ev != nil { + return pref.ValueOfEnum(ev.Number()), ev, nil + } + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if v, err := strconv.ParseInt(s, 10, 32); err == nil { + return pref.ValueOfInt32(int32(v)), nil, nil + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if v, err := strconv.ParseInt(s, 10, 64); err == nil { + return pref.ValueOfInt64(int64(v)), nil, nil + } + case pref.Uint32Kind, pref.Fixed32Kind: + if v, err := strconv.ParseUint(s, 10, 32); err == nil { + return pref.ValueOfUint32(uint32(v)), nil, nil + } + case pref.Uint64Kind, pref.Fixed64Kind: + if v, err := strconv.ParseUint(s, 10, 64); err == nil { + return pref.ValueOfUint64(uint64(v)), nil, nil + } + case pref.FloatKind, pref.DoubleKind: + var v float64 + var err error + switch s { + case "-inf": + v = math.Inf(-1) + case "inf": + v = math.Inf(+1) + case "nan": + v = math.NaN() + default: + v, err = strconv.ParseFloat(s, 64) + } + if err == nil { + if k == pref.FloatKind { + return pref.ValueOfFloat32(float32(v)), nil, nil + } else { + return pref.ValueOfFloat64(float64(v)), nil, nil + } + } + case pref.StringKind: + // String values are already unescaped and can be used as is. + return pref.ValueOfString(s), nil, nil + case pref.BytesKind: + if b, ok := unmarshalBytes(s); ok { + return pref.ValueOfBytes(b), nil, nil + } + } + return pref.Value{}, nil, errors.New("could not parse value for %v: %q", k, s) +} + +// Marshal serializes v as the default string according to the given kind k. +// When specifying the Descriptor format for an enum kind, the associated +// enum value descriptor must be provided. +func Marshal(v pref.Value, ev pref.EnumValueDescriptor, k pref.Kind, f Format) (string, error) { + switch k { + case pref.BoolKind: + if f == GoTag { + if v.Bool() { + return "1", nil + } else { + return "0", nil + } + } else { + if v.Bool() { + return "true", nil + } else { + return "false", nil + } + } + case pref.EnumKind: + if f == GoTag { + return strconv.FormatInt(int64(v.Enum()), 10), nil + } else { + return string(ev.Name()), nil + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind, pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return strconv.FormatInt(v.Int(), 10), nil + case pref.Uint32Kind, pref.Fixed32Kind, pref.Uint64Kind, pref.Fixed64Kind: + return strconv.FormatUint(v.Uint(), 10), nil + case pref.FloatKind, pref.DoubleKind: + f := v.Float() + switch { + case math.IsInf(f, -1): + return "-inf", nil + case math.IsInf(f, +1): + return "inf", nil + case math.IsNaN(f): + return "nan", nil + default: + if k == pref.FloatKind { + return strconv.FormatFloat(f, 'g', -1, 32), nil + } else { + return strconv.FormatFloat(f, 'g', -1, 64), nil + } + } + case pref.StringKind: + // String values are serialized as is without any escaping. + return v.String(), nil + case pref.BytesKind: + if s, ok := marshalBytes(v.Bytes()); ok { + return s, nil + } + } + return "", errors.New("could not format value for %v: %v", k, v) +} + +// unmarshalBytes deserializes bytes by applying C unescaping. +func unmarshalBytes(s string) ([]byte, bool) { + // Bytes values use the same escaping as the text format, + // however they lack the surrounding double quotes. + v, err := ptext.UnmarshalString(`"` + s + `"`) + if err != nil { + return nil, false + } + return []byte(v), true +} + +// marshalBytes serializes bytes by using C escaping. +// To match the exact output of protoc, this is identical to the +// CEscape function in strutil.cc of the protoc source code. +func marshalBytes(b []byte) (string, bool) { + var s []byte + for _, c := range b { + switch c { + case '\n': + s = append(s, `\n`...) + case '\r': + s = append(s, `\r`...) + case '\t': + s = append(s, `\t`...) + case '"': + s = append(s, `\"`...) + case '\'': + s = append(s, `\'`...) + case '\\': + s = append(s, `\\`...) + default: + if printableASCII := c >= 0x20 && c <= 0x7e; printableASCII { + s = append(s, c) + } else { + s = append(s, fmt.Sprintf(`\%03o`, c)...) + } + } + } + return string(s), true +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go new file mode 100644 index 0000000..b1eeea5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/messageset/messageset.go @@ -0,0 +1,258 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package messageset encodes and decodes the obsolete MessageSet wire format. +package messageset + +import ( + "math" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// The MessageSet wire format is equivalent to a message defiend as follows, +// where each Item defines an extension field with a field number of 'type_id' +// and content of 'message'. MessageSet extensions must be non-repeated message +// fields. +// +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// } +// } +const ( + FieldItem = protowire.Number(1) + FieldTypeID = protowire.Number(2) + FieldMessage = protowire.Number(3) +) + +// ExtensionName is the field name for extensions of MessageSet. +// +// A valid MessageSet extension must be of the form: +// message MyMessage { +// extend proto2.bridge.MessageSet { +// optional MyMessage message_set_extension = 1234; +// } +// ... +// } +const ExtensionName = "message_set_extension" + +// IsMessageSet returns whether the message uses the MessageSet wire format. +func IsMessageSet(md pref.MessageDescriptor) bool { + xmd, ok := md.(interface{ IsMessageSet() bool }) + return ok && xmd.IsMessageSet() +} + +// IsMessageSetExtension reports this field extends a MessageSet. +func IsMessageSetExtension(fd pref.FieldDescriptor) bool { + if fd.Name() != ExtensionName { + return false + } + if fd.FullName().Parent() != fd.Message().FullName() { + return false + } + return IsMessageSet(fd.ContainingMessage()) +} + +// FindMessageSetExtension locates a MessageSet extension field by name. +// In text and JSON formats, the extension name used is the message itself. +// The extension field name is derived by appending ExtensionName. +func FindMessageSetExtension(r preg.ExtensionTypeResolver, s pref.FullName) (pref.ExtensionType, error) { + name := s.Append(ExtensionName) + xt, err := r.FindExtensionByName(name) + if err != nil { + if err == preg.NotFound { + return nil, err + } + return nil, errors.Wrap(err, "%q", name) + } + if !IsMessageSetExtension(xt.TypeDescriptor()) { + return nil, preg.NotFound + } + return xt, nil +} + +// SizeField returns the size of a MessageSet item field containing an extension +// with the given field number, not counting the contents of the message subfield. +func SizeField(num protowire.Number) int { + return 2*protowire.SizeTag(FieldItem) + protowire.SizeTag(FieldTypeID) + protowire.SizeVarint(uint64(num)) +} + +// Unmarshal parses a MessageSet. +// +// It calls fn with the type ID and value of each item in the MessageSet. +// Unknown fields are discarded. +// +// If wantLen is true, the item values include the varint length prefix. +// This is ugly, but simplifies the fast-path decoder in internal/impl. +func Unmarshal(b []byte, wantLen bool, fn func(typeID protowire.Number, value []byte) error) error { + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + if num != FieldItem || wtyp != protowire.StartGroupType { + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return protowire.ParseError(n) + } + b = b[n:] + continue + } + typeID, value, n, err := ConsumeFieldValue(b, wantLen) + if err != nil { + return err + } + b = b[n:] + if typeID == 0 { + continue + } + if err := fn(typeID, value); err != nil { + return err + } + } + return nil +} + +// ConsumeFieldValue parses b as a MessageSet item field value until and including +// the trailing end group marker. It assumes the start group tag has already been parsed. +// It returns the contents of the type_id and message subfields and the total +// item length. +// +// If wantLen is true, the returned message value includes the length prefix. +func ConsumeFieldValue(b []byte, wantLen bool) (typeid protowire.Number, message []byte, n int, err error) { + ilen := len(b) + for { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + switch { + case num == FieldItem && wtyp == protowire.EndGroupType: + if wantLen && len(message) == 0 { + // The message field was missing, which should never happen. + // Be prepared for this case anyway. + message = protowire.AppendVarint(message, 0) + } + return typeid, message, ilen - len(b), nil + case num == FieldTypeID && wtyp == protowire.VarintType: + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + if v < 1 || v > math.MaxInt32 { + return 0, nil, 0, errors.New("invalid type_id in message set") + } + typeid = protowire.Number(v) + case num == FieldMessage && wtyp == protowire.BytesType: + m, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + if message == nil { + if wantLen { + message = b[:n:n] + } else { + message = m[:len(m):len(m)] + } + } else { + // This case should never happen in practice, but handle it for + // correctness: The MessageSet item contains multiple message + // fields, which need to be merged. + // + // In the case where we're returning the length, this becomes + // quite inefficient since we need to strip the length off + // the existing data and reconstruct it with the combined length. + if wantLen { + _, nn := protowire.ConsumeVarint(message) + m0 := message[nn:] + message = nil + message = protowire.AppendVarint(message, uint64(len(m0)+len(m))) + message = append(message, m0...) + message = append(message, m...) + } else { + message = append(message, m...) + } + } + b = b[n:] + default: + // We have no place to put it, so we just ignore unknown fields. + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, nil, 0, protowire.ParseError(n) + } + b = b[n:] + } + } +} + +// AppendFieldStart appends the start of a MessageSet item field containing +// an extension with the given number. The caller must add the message +// subfield (including the tag). +func AppendFieldStart(b []byte, num protowire.Number) []byte { + b = protowire.AppendTag(b, FieldItem, protowire.StartGroupType) + b = protowire.AppendTag(b, FieldTypeID, protowire.VarintType) + b = protowire.AppendVarint(b, uint64(num)) + return b +} + +// AppendFieldEnd appends the trailing end group marker for a MessageSet item field. +func AppendFieldEnd(b []byte) []byte { + return protowire.AppendTag(b, FieldItem, protowire.EndGroupType) +} + +// SizeUnknown returns the size of an unknown fields section in MessageSet format. +// +// See AppendUnknown. +func SizeUnknown(unknown []byte) (size int) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return 0 + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return 0 + } + unknown = unknown[n:] + size += SizeField(num) + protowire.SizeTag(FieldMessage) + n + } + return size +} + +// AppendUnknown appends unknown fields to b in MessageSet format. +// +// For historic reasons, unresolved items in a MessageSet are stored in a +// message's unknown fields section in non-MessageSet format. That is, an +// unknown item with typeID T and value V appears in the unknown fields as +// a field with number T and value V. +// +// This function converts the unknown fields back into MessageSet form. +func AppendUnknown(b, unknown []byte) ([]byte, error) { + for len(unknown) > 0 { + num, typ, n := protowire.ConsumeTag(unknown) + if n < 0 || typ != protowire.BytesType { + return nil, errors.New("invalid data in message set unknown fields") + } + unknown = unknown[n:] + _, n = protowire.ConsumeBytes(unknown) + if n < 0 { + return nil, errors.New("invalid data in message set unknown fields") + } + b = AppendFieldStart(b, num) + b = protowire.AppendTag(b, FieldMessage, protowire.BytesType) + b = append(b, unknown[:n]...) + b = AppendFieldEnd(b) + unknown = unknown[n:] + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go new file mode 100644 index 0000000..16c02d7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -0,0 +1,207 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package tag marshals and unmarshals the legacy struct tags as generated +// by historical versions of protoc-gen-go. +package tag + +import ( + "reflect" + "strconv" + "strings" + + defval "google.golang.org/protobuf/internal/encoding/defval" + fdesc "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var byteType = reflect.TypeOf(byte(0)) + +// Unmarshal decodes the tag into a prototype.Field. +// +// The goType is needed to determine the original protoreflect.Kind since the +// tag does not record sufficient information to determine that. +// The type is the underlying field type (e.g., a repeated field may be +// represented by []T, but the Go type passed in is just T). +// A list of enum value descriptors must be provided for enum fields. +// This does not populate the Enum or Message (except for weak message). +// +// This function is a best effort attempt; parsing errors are ignored. +func Unmarshal(tag string, goType reflect.Type, evs pref.EnumValueDescriptors) pref.FieldDescriptor { + f := new(fdesc.Field) + f.L0.ParentFile = fdesc.SurrogateProto2 + for len(tag) > 0 { + i := strings.IndexByte(tag, ',') + if i < 0 { + i = len(tag) + } + switch s := tag[:i]; { + case strings.HasPrefix(s, "name="): + f.L0.FullName = pref.FullName(s[len("name="):]) + case strings.Trim(s, "0123456789") == "": + n, _ := strconv.ParseUint(s, 10, 32) + f.L1.Number = pref.FieldNumber(n) + case s == "opt": + f.L1.Cardinality = pref.Optional + case s == "req": + f.L1.Cardinality = pref.Required + case s == "rep": + f.L1.Cardinality = pref.Repeated + case s == "varint": + switch goType.Kind() { + case reflect.Bool: + f.L1.Kind = pref.BoolKind + case reflect.Int32: + f.L1.Kind = pref.Int32Kind + case reflect.Int64: + f.L1.Kind = pref.Int64Kind + case reflect.Uint32: + f.L1.Kind = pref.Uint32Kind + case reflect.Uint64: + f.L1.Kind = pref.Uint64Kind + } + case s == "zigzag32": + if goType.Kind() == reflect.Int32 { + f.L1.Kind = pref.Sint32Kind + } + case s == "zigzag64": + if goType.Kind() == reflect.Int64 { + f.L1.Kind = pref.Sint64Kind + } + case s == "fixed32": + switch goType.Kind() { + case reflect.Int32: + f.L1.Kind = pref.Sfixed32Kind + case reflect.Uint32: + f.L1.Kind = pref.Fixed32Kind + case reflect.Float32: + f.L1.Kind = pref.FloatKind + } + case s == "fixed64": + switch goType.Kind() { + case reflect.Int64: + f.L1.Kind = pref.Sfixed64Kind + case reflect.Uint64: + f.L1.Kind = pref.Fixed64Kind + case reflect.Float64: + f.L1.Kind = pref.DoubleKind + } + case s == "bytes": + switch { + case goType.Kind() == reflect.String: + f.L1.Kind = pref.StringKind + case goType.Kind() == reflect.Slice && goType.Elem() == byteType: + f.L1.Kind = pref.BytesKind + default: + f.L1.Kind = pref.MessageKind + } + case s == "group": + f.L1.Kind = pref.GroupKind + case strings.HasPrefix(s, "enum="): + f.L1.Kind = pref.EnumKind + case strings.HasPrefix(s, "json="): + jsonName := s[len("json="):] + if jsonName != strs.JSONCamelCase(string(f.L0.FullName.Name())) { + f.L1.JSONName.Init(jsonName) + } + case s == "packed": + f.L1.HasPacked = true + f.L1.IsPacked = true + case strings.HasPrefix(s, "weak="): + f.L1.IsWeak = true + f.L1.Message = fdesc.PlaceholderMessage(pref.FullName(s[len("weak="):])) + case strings.HasPrefix(s, "def="): + // The default tag is special in that everything afterwards is the + // default regardless of the presence of commas. + s, i = tag[len("def="):], len(tag) + v, ev, _ := defval.Unmarshal(s, f.L1.Kind, evs, defval.GoTag) + f.L1.Default = fdesc.DefaultValue(v, ev) + case s == "proto3": + f.L0.ParentFile = fdesc.SurrogateProto3 + } + tag = strings.TrimPrefix(tag[i:], ",") + } + + // The generator uses the group message name instead of the field name. + // We obtain the real field name by lowercasing the group name. + if f.L1.Kind == pref.GroupKind { + f.L0.FullName = pref.FullName(strings.ToLower(string(f.L0.FullName))) + } + return f +} + +// Marshal encodes the protoreflect.FieldDescriptor as a tag. +// +// The enumName must be provided if the kind is an enum. +// Historically, the formulation of the enum "name" was the proto package +// dot-concatenated with the generated Go identifier for the enum type. +// Depending on the context on how Marshal is called, there are different ways +// through which that information is determined. As such it is the caller's +// responsibility to provide a function to obtain that information. +func Marshal(fd pref.FieldDescriptor, enumName string) string { + var tag []string + switch fd.Kind() { + case pref.BoolKind, pref.EnumKind, pref.Int32Kind, pref.Uint32Kind, pref.Int64Kind, pref.Uint64Kind: + tag = append(tag, "varint") + case pref.Sint32Kind: + tag = append(tag, "zigzag32") + case pref.Sint64Kind: + tag = append(tag, "zigzag64") + case pref.Sfixed32Kind, pref.Fixed32Kind, pref.FloatKind: + tag = append(tag, "fixed32") + case pref.Sfixed64Kind, pref.Fixed64Kind, pref.DoubleKind: + tag = append(tag, "fixed64") + case pref.StringKind, pref.BytesKind, pref.MessageKind: + tag = append(tag, "bytes") + case pref.GroupKind: + tag = append(tag, "group") + } + tag = append(tag, strconv.Itoa(int(fd.Number()))) + switch fd.Cardinality() { + case pref.Optional: + tag = append(tag, "opt") + case pref.Required: + tag = append(tag, "req") + case pref.Repeated: + tag = append(tag, "rep") + } + if fd.IsPacked() { + tag = append(tag, "packed") + } + name := string(fd.Name()) + if fd.Kind() == pref.GroupKind { + // The name of the FieldDescriptor for a group field is + // lowercased. To find the original capitalization, we + // look in the field's MessageType. + name = string(fd.Message().Name()) + } + tag = append(tag, "name="+name) + if jsonName := fd.JSONName(); jsonName != "" && jsonName != name && !fd.IsExtension() { + // NOTE: The jsonName != name condition is suspect, but it preserve + // the exact same semantics from the previous generator. + tag = append(tag, "json="+jsonName) + } + if fd.IsWeak() { + tag = append(tag, "weak="+string(fd.Message().FullName())) + } + // The previous implementation does not tag extension fields as proto3, + // even when the field is defined in a proto3 file. Match that behavior + // for consistency. + if fd.Syntax() == pref.Proto3 && !fd.IsExtension() { + tag = append(tag, "proto3") + } + if fd.Kind() == pref.EnumKind && enumName != "" { + tag = append(tag, "enum="+enumName) + } + if fd.ContainingOneof() != nil { + tag = append(tag, "oneof") + } + // This must appear last in the tag, since commas in strings aren't escaped. + if fd.HasDefault() { + def, _ := defval.Marshal(fd.Default(), fd.DefaultEnumValue(), fd.Kind(), defval.GoTag) + tag = append(tag, "def="+def) + } + return strings.Join(tag, ",") +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go new file mode 100644 index 0000000..eb10ea1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode.go @@ -0,0 +1,665 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "io" + "regexp" + "strconv" + "unicode/utf8" + + "google.golang.org/protobuf/internal/errors" +) + +// Decoder is a token-based textproto decoder. +type Decoder struct { + // lastCall is last method called, either readCall or peekCall. + // Initial value is readCall. + lastCall call + + // lastToken contains the last read token. + lastToken Token + + // lastErr contains the last read error. + lastErr error + + // openStack is a stack containing the byte characters for MessageOpen and + // ListOpen kinds. The top of stack represents the message or the list that + // the current token is nested in. An empty stack means the current token is + // at the top level message. The characters '{' and '<' both represent the + // MessageOpen kind. + openStack []byte + + // orig is used in reporting line and column. + orig []byte + // in contains the unconsumed input. + in []byte +} + +// NewDecoder returns a Decoder to read the given []byte. +func NewDecoder(b []byte) *Decoder { + return &Decoder{orig: b, in: b} +} + +// ErrUnexpectedEOF means that EOF was encountered in the middle of the input. +var ErrUnexpectedEOF = errors.New("%v", io.ErrUnexpectedEOF) + +// call specifies which Decoder method was invoked. +type call uint8 + +const ( + readCall call = iota + peekCall +) + +// Peek looks ahead and returns the next token and error without advancing a read. +func (d *Decoder) Peek() (Token, error) { + defer func() { d.lastCall = peekCall }() + if d.lastCall == readCall { + d.lastToken, d.lastErr = d.Read() + } + return d.lastToken, d.lastErr +} + +// Read returns the next token. +// It will return an error if there is no valid token. +func (d *Decoder) Read() (Token, error) { + defer func() { d.lastCall = readCall }() + if d.lastCall == peekCall { + return d.lastToken, d.lastErr + } + + tok, err := d.parseNext(d.lastToken.kind) + if err != nil { + return Token{}, err + } + + switch tok.kind { + case comma, semicolon: + tok, err = d.parseNext(tok.kind) + if err != nil { + return Token{}, err + } + } + d.lastToken = tok + return tok, nil +} + +const ( + mismatchedFmt = "mismatched close character %q" + unexpectedFmt = "unexpected character %q" +) + +// parseNext parses the next Token based on given last kind. +func (d *Decoder) parseNext(lastKind Kind) (Token, error) { + // Trim leading spaces. + d.consume(0) + isEOF := false + if len(d.in) == 0 { + isEOF = true + } + + switch lastKind { + case EOF: + return d.consumeToken(EOF, 0, 0), nil + + case bof: + // Start of top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case Name: + // Next token can be MessageOpen, ListOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + case '[': + d.pushOpenStack(ch) + return d.consumeToken(ListOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case Scalar: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch d.in[0] { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + _, closeCh := d.currentOpenKind() + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case MessageClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case ListOpen: + // Next token can be ListClose or comma + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case ',': + return d.consumeToken(comma, 1, 0), nil + default: + return Token{}, d.newSyntaxError(unexpectedFmt, ch) + } + } + + case ListOpen: + // Next token can be ListClose, MessageStart or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case ']': + d.popOpenStack() + return d.consumeToken(ListClose, 1, 0), nil + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + + case ListClose: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. + // Next token can be EOF, comma, semicolon or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + switch ch := d.in[0]; ch { + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + case MessageOpen: + // Next token can be MessageClose, comma, semicolon or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + case ',': + return d.consumeToken(comma, 1, 0), nil + case ';': + return d.consumeToken(semicolon, 1, 0), nil + default: + return d.parseFieldName() + } + + default: + // It is not possible to have this case. Let it panic below. + } + + case comma, semicolon: + openKind, closeCh := d.currentOpenKind() + switch openKind { + case bof: + // Top level message. Next token can be EOF or Name. + if isEOF { + return d.consumeToken(EOF, 0, 0), nil + } + return d.parseFieldName() + + case MessageOpen: + // Next token can be MessageClose or Name. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case closeCh: + d.popOpenStack() + return d.consumeToken(MessageClose, 1, 0), nil + case otherCloseChar[closeCh]: + return Token{}, d.newSyntaxError(mismatchedFmt, ch) + default: + return d.parseFieldName() + } + + case ListOpen: + if lastKind == semicolon { + // It is not be possible to have this case as logic here + // should not have produced a semicolon Token when inside a + // list. Let it panic below. + break + } + // Next token can be MessageOpen or Scalar. + if isEOF { + return Token{}, ErrUnexpectedEOF + } + switch ch := d.in[0]; ch { + case '{', '<': + d.pushOpenStack(ch) + return d.consumeToken(MessageOpen, 1, 0), nil + default: + return d.parseScalar() + } + } + } + + line, column := d.Position(len(d.orig) - len(d.in)) + panic(fmt.Sprintf("Decoder.parseNext: bug at handling line %d:%d with lastKind=%v", line, column, lastKind)) +} + +var otherCloseChar = map[byte]byte{ + '}': '>', + '>': '}', +} + +// currentOpenKind indicates whether current position is inside a message, list +// or top-level message by returning MessageOpen, ListOpen or bof respectively. +// If the returned kind is either a MessageOpen or ListOpen, it also returns the +// corresponding closing character. +func (d *Decoder) currentOpenKind() (Kind, byte) { + if len(d.openStack) == 0 { + return bof, 0 + } + openCh := d.openStack[len(d.openStack)-1] + switch openCh { + case '{': + return MessageOpen, '}' + case '<': + return MessageOpen, '>' + case '[': + return ListOpen, ']' + } + panic(fmt.Sprintf("Decoder: openStack contains invalid byte %s", string(openCh))) +} + +func (d *Decoder) pushOpenStack(ch byte) { + d.openStack = append(d.openStack, ch) +} + +func (d *Decoder) popOpenStack() { + d.openStack = d.openStack[:len(d.openStack)-1] +} + +// parseFieldName parses field name and separator. +func (d *Decoder) parseFieldName() (tok Token, err error) { + defer func() { + if err == nil && d.tryConsumeChar(':') { + tok.attrs |= hasSeparator + } + }() + + // Extension or Any type URL. + if d.in[0] == '[' { + return d.parseTypeName() + } + + // Identifier. + if size := parseIdent(d.in, false); size > 0 { + return d.consumeToken(Name, size, uint8(IdentName)), nil + } + + // Field number. Identify if input is a valid number that is not negative + // and is decimal integer within 32-bit range. + if num := parseNumber(d.in); num.size > 0 { + if !num.neg && num.kind == numDec { + if _, err := strconv.ParseInt(string(d.in[:num.size]), 10, 32); err == nil { + return d.consumeToken(Name, num.size, uint8(FieldNumber)), nil + } + } + return Token{}, d.newSyntaxError("invalid field number: %s", d.in[:num.size]) + } + + return Token{}, d.newSyntaxError("invalid field name: %s", errRegexp.Find(d.in)) +} + +// parseTypeName parses Any type URL or extension field name. The name is +// enclosed in [ and ] characters. The C++ parser does not handle many legal URL +// strings. This implementation is more liberal and allows for the pattern +// ^[-_a-zA-Z0-9]+([./][-_a-zA-Z0-9]+)*`). Whitespaces and comments are allowed +// in between [ ], '.', '/' and the sub names. +func (d *Decoder) parseTypeName() (Token, error) { + startPos := len(d.orig) - len(d.in) + // Use alias s to advance first in order to use d.in for error handling. + // Caller already checks for [ as first character. + s := consume(d.in[1:], 0) + if len(s) == 0 { + return Token{}, ErrUnexpectedEOF + } + + var name []byte + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + var closed bool + for len(s) > 0 && !closed { + switch { + case s[0] == ']': + s = s[1:] + closed = true + + case s[0] == '/', s[0] == '.': + if len(name) > 0 && (name[len(name)-1] == '/' || name[len(name)-1] == '.') { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)+1]) + } + name = append(name, s[0]) + s = s[1:] + s = consume(s, 0) + for len(s) > 0 && isTypeNameChar(s[0]) { + name = append(name, s[0]) + s = s[1:] + } + s = consume(s, 0) + + default: + return Token{}, d.newSyntaxError( + "invalid type URL/extension field name: %s", d.orig[startPos:len(d.orig)-len(s)+1]) + } + } + + if !closed { + return Token{}, ErrUnexpectedEOF + } + + // First character cannot be '.'. Last character cannot be '.' or '/'. + size := len(name) + if size == 0 || name[0] == '.' || name[size-1] == '.' || name[size-1] == '/' { + return Token{}, d.newSyntaxError("invalid type URL/extension field name: %s", + d.orig[startPos:len(d.orig)-len(s)]) + } + + d.in = s + endPos := len(d.orig) - len(d.in) + d.consume(0) + + return Token{ + kind: Name, + attrs: uint8(TypeName), + pos: startPos, + raw: d.orig[startPos:endPos], + str: string(name), + }, nil +} + +func isTypeNameChar(b byte) bool { + return (b == '-' || b == '_' || + ('0' <= b && b <= '9') || + ('a' <= b && b <= 'z') || + ('A' <= b && b <= 'Z')) +} + +func isWhiteSpace(b byte) bool { + switch b { + case ' ', '\n', '\r', '\t': + return true + default: + return false + } +} + +// parseIdent parses an unquoted proto identifier and returns size. +// If allowNeg is true, it allows '-' to be the first character in the +// identifier. This is used when parsing literal values like -infinity, etc. +// Regular expression matches an identifier: `^[_a-zA-Z][_a-zA-Z0-9]*` +func parseIdent(input []byte, allowNeg bool) int { + var size int + + s := input + if len(s) == 0 { + return 0 + } + + if allowNeg && s[0] == '-' { + s = s[1:] + size++ + if len(s) == 0 { + return 0 + } + } + + switch { + case s[0] == '_', + 'a' <= s[0] && s[0] <= 'z', + 'A' <= s[0] && s[0] <= 'Z': + s = s[1:] + size++ + default: + return 0 + } + + for len(s) > 0 && (s[0] == '_' || + 'a' <= s[0] && s[0] <= 'z' || + 'A' <= s[0] && s[0] <= 'Z' || + '0' <= s[0] && s[0] <= '9') { + s = s[1:] + size++ + } + + if len(s) > 0 && !isDelim(s[0]) { + return 0 + } + + return size +} + +// parseScalar parses for a string, literal or number value. +func (d *Decoder) parseScalar() (Token, error) { + if d.in[0] == '"' || d.in[0] == '\'' { + return d.parseStringValue() + } + + if tok, ok := d.parseLiteralValue(); ok { + return tok, nil + } + + if tok, ok := d.parseNumberValue(); ok { + return tok, nil + } + + return Token{}, d.newSyntaxError("invalid scalar value: %s", errRegexp.Find(d.in)) +} + +// parseLiteralValue parses a literal value. A literal value is used for +// bools, special floats and enums. This function simply identifies that the +// field value is a literal. +func (d *Decoder) parseLiteralValue() (Token, bool) { + size := parseIdent(d.in, true) + if size == 0 { + return Token{}, false + } + return d.consumeToken(Scalar, size, literalValue), true +} + +// consumeToken constructs a Token for given Kind from d.in and consumes given +// size-length from it. +func (d *Decoder) consumeToken(kind Kind, size int, attrs uint8) Token { + // Important to compute raw and pos before consuming. + tok := Token{ + kind: kind, + attrs: attrs, + pos: len(d.orig) - len(d.in), + raw: d.in[:size], + } + d.consume(size) + return tok +} + +// newSyntaxError returns a syntax error with line and column information for +// current position. +func (d *Decoder) newSyntaxError(f string, x ...interface{}) error { + e := errors.New(f, x...) + line, column := d.Position(len(d.orig) - len(d.in)) + return errors.New("syntax error (line %d:%d): %v", line, column, e) +} + +// Position returns line and column number of given index of the original input. +// It will panic if index is out of range. +func (d *Decoder) Position(idx int) (line int, column int) { + b := d.orig[:idx] + line = bytes.Count(b, []byte("\n")) + 1 + if i := bytes.LastIndexByte(b, '\n'); i >= 0 { + b = b[i+1:] + } + column = utf8.RuneCount(b) + 1 // ignore multi-rune characters + return line, column +} + +func (d *Decoder) tryConsumeChar(c byte) bool { + if len(d.in) > 0 && d.in[0] == c { + d.consume(1) + return true + } + return false +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func (d *Decoder) consume(n int) { + d.in = consume(d.in, n) + return +} + +// consume consumes n bytes of input and any subsequent whitespace or comments. +func consume(b []byte, n int) []byte { + b = b[n:] + for len(b) > 0 { + switch b[0] { + case ' ', '\n', '\r', '\t': + b = b[1:] + case '#': + if i := bytes.IndexByte(b, '\n'); i >= 0 { + b = b[i+len("\n"):] + } else { + b = nil + } + default: + return b + } + } + return b +} + +// Any sequence that looks like a non-delimiter (for error reporting). +var errRegexp = regexp.MustCompile(`^([-+._a-zA-Z0-9\/]+|.)`) + +// isDelim returns true if given byte is a delimiter character. +func isDelim(c byte) bool { + return !(c == '-' || c == '+' || c == '.' || c == '_' || + ('a' <= c && c <= 'z') || + ('A' <= c && c <= 'Z') || + ('0' <= c && c <= '9')) +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go new file mode 100644 index 0000000..f2d90b7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_number.go @@ -0,0 +1,190 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +// parseNumberValue parses a number from the input and returns a Token object. +func (d *Decoder) parseNumberValue() (Token, bool) { + in := d.in + num := parseNumber(in) + if num.size == 0 { + return Token{}, false + } + numAttrs := num.kind + if num.neg { + numAttrs |= isNegative + } + strSize := num.size + last := num.size - 1 + if num.kind == numFloat && (d.in[last] == 'f' || d.in[last] == 'F') { + strSize = last + } + tok := Token{ + kind: Scalar, + attrs: numberValue, + pos: len(d.orig) - len(d.in), + raw: d.in[:num.size], + str: string(d.in[:strSize]), + numAttrs: numAttrs, + } + d.consume(num.size) + return tok, true +} + +const ( + numDec uint8 = (1 << iota) / 2 + numHex + numOct + numFloat +) + +// number is the result of parsing out a valid number from parseNumber. It +// contains data for doing float or integer conversion via the strconv package +// in conjunction with the input bytes. +type number struct { + kind uint8 + neg bool + size int +} + +// parseNumber constructs a number object from given input. It allows for the +// following patterns: +// integer: ^-?([1-9][0-9]*|0[xX][0-9a-fA-F]+|0[0-7]*) +// float: ^-?((0|[1-9][0-9]*)?([.][0-9]*)?([eE][+-]?[0-9]+)?[fF]?) +// It also returns the number of parsed bytes for the given number, 0 if it is +// not a number. +func parseNumber(input []byte) number { + kind := numDec + var size int + var neg bool + + s := input + if len(s) == 0 { + return number{} + } + + // Optional - + if s[0] == '-' { + neg = true + s = s[1:] + size++ + if len(s) == 0 { + return number{} + } + } + + // C++ allows for whitespace and comments in between the negative sign and + // the rest of the number. This logic currently does not but is consistent + // with v1. + + switch { + case s[0] == '0': + if len(s) > 1 { + switch { + case s[1] == 'x' || s[1] == 'X': + // Parse as hex number. + kind = numHex + n := 2 + s = s[2:] + for len(s) > 0 && (('0' <= s[0] && s[0] <= '9') || + ('a' <= s[0] && s[0] <= 'f') || + ('A' <= s[0] && s[0] <= 'F')) { + s = s[1:] + n++ + } + if n == 2 { + return number{} + } + size += n + + case '0' <= s[1] && s[1] <= '7': + // Parse as octal number. + kind = numOct + n := 2 + s = s[2:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '7' { + s = s[1:] + n++ + } + size += n + } + + if kind&(numHex|numOct) > 0 { + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + return number{kind: kind, neg: neg, size: size} + } + } + s = s[1:] + size++ + + case '1' <= s[0] && s[0] <= '9': + n := 1 + s = s[1:] + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + + case s[0] == '.': + // Set kind to numFloat to signify the intent to parse as float. And + // that it needs to have other digits after '.'. + kind = numFloat + + default: + return number{} + } + + // . followed by 0 or more digits. + if len(s) > 0 && s[0] == '.' { + n := 1 + s = s[1:] + // If decimal point was before any digits, it should be followed by + // other digits. + if len(s) == 0 && kind == numFloat { + return number{} + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + kind = numFloat + } + + // e or E followed by an optional - or + and 1 or more digits. + if len(s) >= 2 && (s[0] == 'e' || s[0] == 'E') { + kind = numFloat + s = s[1:] + n := 1 + if s[0] == '+' || s[0] == '-' { + s = s[1:] + n++ + if len(s) == 0 { + return number{} + } + } + for len(s) > 0 && '0' <= s[0] && s[0] <= '9' { + s = s[1:] + n++ + } + size += n + } + + // Optional suffix f or F for floats. + if len(s) > 0 && (s[0] == 'f' || s[0] == 'F') { + kind = numFloat + s = s[1:] + size++ + } + + // Check that next byte is a delimiter or it is at the end. + if len(s) > 0 && !isDelim(s[0]) { + return number{} + } + + return number{kind: kind, neg: neg, size: size} +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go new file mode 100644 index 0000000..d4d3490 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_string.go @@ -0,0 +1,161 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "strconv" + "strings" + "unicode" + "unicode/utf16" + "unicode/utf8" + + "google.golang.org/protobuf/internal/strs" +) + +// parseStringValue parses string field token. +// This differs from parseString since the text format allows +// multiple back-to-back string literals where they are semantically treated +// as a single large string with all values concatenated. +// +// E.g., `"foo" "bar" "baz"` => "foobarbaz" +func (d *Decoder) parseStringValue() (Token, error) { + // Note that the ending quote is sufficient to unambiguously mark the end + // of a string. Thus, the text grammar does not require intervening + // whitespace or control characters in-between strings. + // Thus, the following is valid: + // `"foo"'bar'"baz"` => "foobarbaz" + in0 := d.in + var ss []string + for len(d.in) > 0 && (d.in[0] == '"' || d.in[0] == '\'') { + s, err := d.parseString() + if err != nil { + return Token{}, err + } + ss = append(ss, s) + } + // d.in already points to the end of the value at this point. + return Token{ + kind: Scalar, + attrs: stringValue, + pos: len(d.orig) - len(in0), + raw: in0[:len(in0)-len(d.in)], + str: strings.Join(ss, ""), + }, nil +} + +// parseString parses a string value enclosed in " or '. +func (d *Decoder) parseString() (string, error) { + in := d.in + if len(in) == 0 { + return "", ErrUnexpectedEOF + } + quote := in[0] + in = in[1:] + i := indexNeedEscapeInBytes(in) + in, out := in[i:], in[:i:i] // set cap to prevent mutations + for len(in) > 0 { + switch r, n := utf8.DecodeRune(in); { + case r == utf8.RuneError && n == 1: + return "", d.newSyntaxError("invalid UTF-8 detected") + case r == 0 || r == '\n': + return "", d.newSyntaxError("invalid character %q in string", r) + case r == rune(quote): + in = in[1:] + d.consume(len(d.in) - len(in)) + return string(out), nil + case r == '\\': + if len(in) < 2 { + return "", ErrUnexpectedEOF + } + switch r := in[1]; r { + case '"', '\'', '\\', '?': + in, out = in[2:], append(out, r) + case 'a': + in, out = in[2:], append(out, '\a') + case 'b': + in, out = in[2:], append(out, '\b') + case 'n': + in, out = in[2:], append(out, '\n') + case 'r': + in, out = in[2:], append(out, '\r') + case 't': + in, out = in[2:], append(out, '\t') + case 'v': + in, out = in[2:], append(out, '\v') + case 'f': + in, out = in[2:], append(out, '\f') + case '0', '1', '2', '3', '4', '5', '6', '7': + // One, two, or three octal characters. + n := len(in[1:]) - len(bytes.TrimLeft(in[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(string(in[1:1+n]), 8, 8) + if err != nil { + return "", d.newSyntaxError("invalid octal escape code %q in string", in[:1+n]) + } + in, out = in[1+n:], append(out, byte(v)) + case 'x': + // One or two hexadecimal characters. + n := len(in[2:]) - len(bytes.TrimLeft(in[2:], "0123456789abcdefABCDEF")) + if n > 2 { + n = 2 + } + v, err := strconv.ParseUint(string(in[2:2+n]), 16, 8) + if err != nil { + return "", d.newSyntaxError("invalid hex escape code %q in string", in[:2+n]) + } + in, out = in[2+n:], append(out, byte(v)) + case 'u', 'U': + // Four or eight hexadecimal characters + n := 6 + if r == 'U' { + n = 10 + } + if len(in) < n { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:n]), 16, 32) + if utf8.MaxRune < v || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:n]) + } + in = in[n:] + + r := rune(v) + if utf16.IsSurrogate(r) { + if len(in) < 6 { + return "", ErrUnexpectedEOF + } + v, err := strconv.ParseUint(string(in[2:6]), 16, 16) + r = utf16.DecodeRune(r, rune(v)) + if in[0] != '\\' || in[1] != 'u' || r == unicode.ReplacementChar || err != nil { + return "", d.newSyntaxError("invalid Unicode escape code %q in string", in[:6]) + } + in = in[6:] + } + out = append(out, string(r)...) + default: + return "", d.newSyntaxError("invalid escape code %q in string", in[:2]) + } + default: + i := indexNeedEscapeInBytes(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + return "", ErrUnexpectedEOF +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInBytes(b []byte) int { return indexNeedEscapeInString(strs.UnsafeString(b)) } + +// UnmarshalString returns an unescaped string given a textproto string value. +// String value needs to contain single or double quotes. This is only used by +// internal/encoding/defval package for unmarshaling bytes. +func UnmarshalString(s string) (string, error) { + d := NewDecoder([]byte(s)) + return d.parseString() +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go new file mode 100644 index 0000000..83d2b0d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/decode_token.go @@ -0,0 +1,373 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "bytes" + "fmt" + "math" + "strconv" + "strings" + + "google.golang.org/protobuf/internal/flags" +) + +// Kind represents a token kind expressible in the textproto format. +type Kind uint8 + +// Kind values. +const ( + Invalid Kind = iota + EOF + Name // Name indicates the field name. + Scalar // Scalar are scalar values, e.g. "string", 47, ENUM_LITERAL, true. + MessageOpen + MessageClose + ListOpen + ListClose + + // comma and semi-colon are only for parsing in between values and should not be exposed. + comma + semicolon + + // bof indicates beginning of file, which is the default token + // kind at the beginning of parsing. + bof = Invalid +) + +func (t Kind) String() string { + switch t { + case Invalid: + return "" + case EOF: + return "eof" + case Scalar: + return "scalar" + case Name: + return "name" + case MessageOpen: + return "{" + case MessageClose: + return "}" + case ListOpen: + return "[" + case ListClose: + return "]" + case comma: + return "," + case semicolon: + return ";" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// NameKind represents different types of field names. +type NameKind uint8 + +// NameKind values. +const ( + IdentName NameKind = iota + 1 + TypeName + FieldNumber +) + +func (t NameKind) String() string { + switch t { + case IdentName: + return "IdentName" + case TypeName: + return "TypeName" + case FieldNumber: + return "FieldNumber" + default: + return fmt.Sprintf("", uint8(t)) + } +} + +// Bit mask in Token.attrs to indicate if a Name token is followed by the +// separator char ':'. The field name separator char is optional for message +// field or repeated message field, but required for all other types. Decoder +// simply indicates whether a Name token is followed by separator or not. It is +// up to the prototext package to validate. +const hasSeparator = 1 << 7 + +// Scalar value types. +const ( + numberValue = iota + 1 + stringValue + literalValue +) + +// Bit mask in Token.numAttrs to indicate that the number is a negative. +const isNegative = 1 << 7 + +// Token provides a parsed token kind and value. Values are provided by the +// different accessor methods. +type Token struct { + // Kind of the Token object. + kind Kind + // attrs contains metadata for the following Kinds: + // Name: hasSeparator bit and one of NameKind. + // Scalar: one of numberValue, stringValue, literalValue. + attrs uint8 + // numAttrs contains metadata for numberValue: + // - highest bit is whether negative or positive. + // - lower bits indicate one of numDec, numHex, numOct, numFloat. + numAttrs uint8 + // pos provides the position of the token in the original input. + pos int + // raw bytes of the serialized token. + // This is a subslice into the original input. + raw []byte + // str contains parsed string for the following: + // - stringValue of Scalar kind + // - numberValue of Scalar kind + // - TypeName of Name kind + str string +} + +// Kind returns the token kind. +func (t Token) Kind() Kind { + return t.kind +} + +// RawString returns the read value in string. +func (t Token) RawString() string { + return string(t.raw) +} + +// Pos returns the token position from the input. +func (t Token) Pos() int { + return t.pos +} + +// NameKind returns IdentName, TypeName or FieldNumber. +// It panics if type is not Name. +func (t Token) NameKind() NameKind { + if t.kind == Name { + return NameKind(t.attrs &^ hasSeparator) + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// HasSeparator returns true if the field name is followed by the separator char +// ':', else false. It panics if type is not Name. +func (t Token) HasSeparator() bool { + if t.kind == Name { + return t.attrs&hasSeparator != 0 + } + panic(fmt.Sprintf("Token is not a Name type: %s", t.kind)) +} + +// IdentName returns the value for IdentName type. +func (t Token) IdentName() string { + if t.kind == Name && t.attrs&uint8(IdentName) != 0 { + return string(t.raw) + } + panic(fmt.Sprintf("Token is not an IdentName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// TypeName returns the value for TypeName type. +func (t Token) TypeName() string { + if t.kind == Name && t.attrs&uint8(TypeName) != 0 { + return t.str + } + panic(fmt.Sprintf("Token is not a TypeName: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) +} + +// FieldNumber returns the value for FieldNumber type. It returns a +// non-negative int32 value. Caller will still need to validate for the correct +// field number range. +func (t Token) FieldNumber() int32 { + if t.kind != Name || t.attrs&uint8(FieldNumber) == 0 { + panic(fmt.Sprintf("Token is not a FieldNumber: %s:%s", t.kind, NameKind(t.attrs&^hasSeparator))) + } + // Following should not return an error as it had already been called right + // before this Token was constructed. + num, _ := strconv.ParseInt(string(t.raw), 10, 32) + return int32(num) +} + +// String returns the string value for a Scalar type. +func (t Token) String() (string, bool) { + if t.kind != Scalar || t.attrs != stringValue { + return "", false + } + return t.str, true +} + +// Enum returns the literal value for a Scalar type for use as enum literals. +func (t Token) Enum() (string, bool) { + if t.kind != Scalar || t.attrs != literalValue || (len(t.raw) > 0 && t.raw[0] == '-') { + return "", false + } + return string(t.raw), true +} + +// Bool returns the bool value for a Scalar type. +func (t Token) Bool() (bool, bool) { + if t.kind != Scalar { + return false, false + } + switch t.attrs { + case literalValue: + if b, ok := boolLits[string(t.raw)]; ok { + return b, true + } + case numberValue: + // Unsigned integer representation of 0 or 1 is permitted: 00, 0x0, 01, + // 0x1, etc. + n, err := strconv.ParseUint(t.str, 0, 64) + if err == nil { + switch n { + case 0: + return false, true + case 1: + return true, true + } + } + } + return false, false +} + +// These exact boolean literals are the ones supported in C++. +var boolLits = map[string]bool{ + "t": true, + "true": true, + "True": true, + "f": false, + "false": false, + "False": false, +} + +// Uint64 returns the uint64 value for a Scalar type. +func (t Token) Uint64() (uint64, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 64) + if err != nil { + return 0, false + } + return n, true +} + +// Uint32 returns the uint32 value for a Scalar type. +func (t Token) Uint32() (uint32, bool) { + if t.kind != Scalar || t.attrs != numberValue || + t.numAttrs&isNegative > 0 || t.numAttrs&numFloat > 0 { + return 0, false + } + n, err := strconv.ParseUint(t.str, 0, 32) + if err != nil { + return 0, false + } + return uint32(n), true +} + +// Int64 returns the int64 value for a Scalar type. +func (t Token) Int64() (int64, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 64); err == nil { + return n, true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 64); err == nil { + return int64(n), true + } + } + return 0, false +} + +// Int32 returns the int32 value for a Scalar type. +func (t Token) Int32() (int32, bool) { + if t.kind != Scalar || t.attrs != numberValue || t.numAttrs&numFloat > 0 { + return 0, false + } + if n, err := strconv.ParseInt(t.str, 0, 32); err == nil { + return int32(n), true + } + // C++ accepts large positive hex numbers as negative values. + // This feature is here for proto1 backwards compatibility purposes. + if flags.ProtoLegacy && (t.numAttrs == numHex) { + if n, err := strconv.ParseUint(t.str, 0, 32); err == nil { + return int32(n), true + } + } + return 0, false +} + +// Float64 returns the float64 value for a Scalar type. +func (t Token) Float64() (float64, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return f, true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + return n, true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return n, true + } + } + return 0, false +} + +// Float32 returns the float32 value for a Scalar type. +func (t Token) Float32() (float32, bool) { + if t.kind != Scalar { + return 0, false + } + switch t.attrs { + case literalValue: + if f, ok := floatLits[strings.ToLower(string(t.raw))]; ok { + return float32(f), true + } + case numberValue: + n, err := strconv.ParseFloat(t.str, 64) + if err == nil { + // Overflows are treated as (-)infinity. + return float32(n), true + } + nerr := err.(*strconv.NumError) + if nerr.Err == strconv.ErrRange { + return float32(n), true + } + } + return 0, false +} + +// These are the supported float literals which C++ permits case-insensitive +// variants of these. +var floatLits = map[string]float64{ + "nan": math.NaN(), + "inf": math.Inf(1), + "infinity": math.Inf(1), + "-inf": math.Inf(-1), + "-infinity": math.Inf(-1), +} + +// TokenEquals returns true if given Tokens are equal, else false. +func TokenEquals(x, y Token) bool { + return x.kind == y.kind && + x.attrs == y.attrs && + x.numAttrs == y.numAttrs && + x.pos == y.pos && + bytes.Equal(x.raw, y.raw) && + x.str == y.str +} diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go new file mode 100644 index 0000000..0ce8d6f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/doc.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package text implements the text format for protocol buffers. +// This package has no semantic understanding for protocol buffers and is only +// a parser and composer for the format. +// +// There is no formal specification for the protobuf text format, as such the +// C++ implementation (see google::protobuf::TextFormat) is the reference +// implementation of the text format. +// +// This package is neither a superset nor a subset of the C++ implementation. +// This implementation permits a more liberal grammar in some cases to be +// backwards compatible with the historical Go implementation. +// Future parsings unique to Go should not be added. +// Some grammars allowed by the C++ implementation are deliberately +// not implemented here because they are considered a bug by the protobuf team +// and should not be replicated. +// +// The Go implementation should implement a sufficient amount of the C++ +// grammar such that the default text serialization by C++ can be parsed by Go. +// However, just because the C++ parser accepts some input does not mean that +// the Go implementation should as well. +// +// The text format is almost a superset of JSON except: +// * message keys are not quoted strings, but identifiers +// * the top-level value must be a message without the delimiters +package text diff --git a/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go new file mode 100644 index 0000000..c4ba1c5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/encoding/text/encode.go @@ -0,0 +1,267 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package text + +import ( + "math" + "math/bits" + "strconv" + "strings" + "unicode/utf8" + + "google.golang.org/protobuf/internal/detrand" + "google.golang.org/protobuf/internal/errors" +) + +// encType represents an encoding type. +type encType uint8 + +const ( + _ encType = (1 << iota) / 2 + name + scalar + messageOpen + messageClose +) + +// Encoder provides methods to write out textproto constructs and values. The user is +// responsible for producing valid sequences of constructs and values. +type Encoder struct { + encoderState + + indent string + newline string // set to "\n" if len(indent) > 0 + delims [2]byte + outputASCII bool +} + +type encoderState struct { + lastType encType + indents []byte + out []byte +} + +// NewEncoder returns an Encoder. +// +// If indent is a non-empty string, it causes every entry in a List or Message +// to be preceded by the indent and trailed by a newline. +// +// If delims is not the zero value, it controls the delimiter characters used +// for messages (e.g., "{}" vs "<>"). +// +// If outputASCII is true, strings will be serialized in such a way that +// multi-byte UTF-8 sequences are escaped. This property ensures that the +// overall output is ASCII (as opposed to UTF-8). +func NewEncoder(indent string, delims [2]byte, outputASCII bool) (*Encoder, error) { + e := &Encoder{} + if len(indent) > 0 { + if strings.Trim(indent, " \t") != "" { + return nil, errors.New("indent may only be composed of space and tab characters") + } + e.indent = indent + e.newline = "\n" + } + switch delims { + case [2]byte{0, 0}: + e.delims = [2]byte{'{', '}'} + case [2]byte{'{', '}'}, [2]byte{'<', '>'}: + e.delims = delims + default: + return nil, errors.New("delimiters may only be \"{}\" or \"<>\"") + } + e.outputASCII = outputASCII + + return e, nil +} + +// Bytes returns the content of the written bytes. +func (e *Encoder) Bytes() []byte { + return e.out +} + +// StartMessage writes out the '{' or '<' symbol. +func (e *Encoder) StartMessage() { + e.prepareNext(messageOpen) + e.out = append(e.out, e.delims[0]) +} + +// EndMessage writes out the '}' or '>' symbol. +func (e *Encoder) EndMessage() { + e.prepareNext(messageClose) + e.out = append(e.out, e.delims[1]) +} + +// WriteName writes out the field name and the separator ':'. +func (e *Encoder) WriteName(s string) { + e.prepareNext(name) + e.out = append(e.out, s...) + e.out = append(e.out, ':') +} + +// WriteBool writes out the given boolean value. +func (e *Encoder) WriteBool(b bool) { + if b { + e.WriteLiteral("true") + } else { + e.WriteLiteral("false") + } +} + +// WriteString writes out the given string value. +func (e *Encoder) WriteString(s string) { + e.prepareNext(scalar) + e.out = appendString(e.out, s, e.outputASCII) +} + +func appendString(out []byte, in string, outputASCII bool) []byte { + out = append(out, '"') + i := indexNeedEscapeInString(in) + in, out = in[i:], append(out, in[:i]...) + for len(in) > 0 { + switch r, n := utf8.DecodeRuneInString(in); { + case r == utf8.RuneError && n == 1: + // We do not report invalid UTF-8 because strings in the text format + // are used to represent both the proto string and bytes type. + r = rune(in[0]) + fallthrough + case r < ' ' || r == '"' || r == '\\': + out = append(out, '\\') + switch r { + case '"', '\\': + out = append(out, byte(r)) + case '\n': + out = append(out, 'n') + case '\r': + out = append(out, 'r') + case '\t': + out = append(out, 't') + default: + out = append(out, 'x') + out = append(out, "00"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + case outputASCII && r >= utf8.RuneSelf: + out = append(out, '\\') + if r <= math.MaxUint16 { + out = append(out, 'u') + out = append(out, "0000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } else { + out = append(out, 'U') + out = append(out, "00000000"[1+(bits.Len32(uint32(r))-1)/4:]...) + out = strconv.AppendUint(out, uint64(r), 16) + } + in = in[n:] + default: + i := indexNeedEscapeInString(in[n:]) + in, out = in[n+i:], append(out, in[:n+i]...) + } + } + out = append(out, '"') + return out +} + +// indexNeedEscapeInString returns the index of the character that needs +// escaping. If no characters need escaping, this returns the input length. +func indexNeedEscapeInString(s string) int { + for i := 0; i < len(s); i++ { + if c := s[i]; c < ' ' || c == '"' || c == '\'' || c == '\\' || c >= utf8.RuneSelf { + return i + } + } + return len(s) +} + +// WriteFloat writes out the given float value for given bitSize. +func (e *Encoder) WriteFloat(n float64, bitSize int) { + e.prepareNext(scalar) + e.out = appendFloat(e.out, n, bitSize) +} + +func appendFloat(out []byte, n float64, bitSize int) []byte { + switch { + case math.IsNaN(n): + return append(out, "nan"...) + case math.IsInf(n, +1): + return append(out, "inf"...) + case math.IsInf(n, -1): + return append(out, "-inf"...) + default: + return strconv.AppendFloat(out, n, 'g', -1, bitSize) + } +} + +// WriteInt writes out the given signed integer value. +func (e *Encoder) WriteInt(n int64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatInt(n, 10)...) +} + +// WriteUint writes out the given unsigned integer value. +func (e *Encoder) WriteUint(n uint64) { + e.prepareNext(scalar) + e.out = append(e.out, strconv.FormatUint(n, 10)...) +} + +// WriteLiteral writes out the given string as a literal value without quotes. +// This is used for writing enum literal strings. +func (e *Encoder) WriteLiteral(s string) { + e.prepareNext(scalar) + e.out = append(e.out, s...) +} + +// prepareNext adds possible space and indentation for the next value based +// on last encType and indent option. It also updates e.lastType to next. +func (e *Encoder) prepareNext(next encType) { + defer func() { + e.lastType = next + }() + + // Single line. + if len(e.indent) == 0 { + // Add space after each field before the next one. + if e.lastType&(scalar|messageClose) != 0 && next == name { + e.out = append(e.out, ' ') + // Add a random extra space to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + } + return + } + + // Multi-line. + switch { + case e.lastType == name: + e.out = append(e.out, ' ') + // Add a random extra space after name: to make output unstable. + if detrand.Bool() { + e.out = append(e.out, ' ') + } + + case e.lastType == messageOpen && next != messageClose: + e.indents = append(e.indents, e.indent...) + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + + case e.lastType&(scalar|messageClose) != 0: + if next == messageClose { + e.indents = e.indents[:len(e.indents)-len(e.indent)] + } + e.out = append(e.out, '\n') + e.out = append(e.out, e.indents...) + } +} + +// Snapshot returns the current snapshot for use in Reset. +func (e *Encoder) Snapshot() encoderState { + return e.encoderState +} + +// Reset resets the Encoder to the given encoderState from a Snapshot. +func (e *Encoder) Reset(es encoderState) { + e.encoderState = es +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/errors.go b/vendor/google.golang.org/protobuf/internal/errors/errors.go new file mode 100644 index 0000000..20c17b3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/errors.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errors implements functions to manipulate errors. +package errors + +import ( + "errors" + "fmt" + + "google.golang.org/protobuf/internal/detrand" +) + +// Error is a sentinel matching all errors produced by this package. +var Error = errors.New("protobuf error") + +// New formats a string according to the format specifier and arguments and +// returns an error that has a "proto" prefix. +func New(f string, x ...interface{}) error { + return &prefixError{s: format(f, x...)} +} + +type prefixError struct{ s string } + +var prefix = func() string { + // Deliberately introduce instability into the error message string to + // discourage users from performing error string comparisons. + if detrand.Bool() { + return "proto: " // use non-breaking spaces (U+00a0) + } else { + return "proto: " // use regular spaces (U+0020) + } +}() + +func (e *prefixError) Error() string { + return prefix + e.s +} + +func (e *prefixError) Unwrap() error { + return Error +} + +// Wrap returns an error that has a "proto" prefix, the formatted string described +// by the format specifier and arguments, and a suffix of err. The error wraps err. +func Wrap(err error, f string, x ...interface{}) error { + return &wrapError{ + s: format(f, x...), + err: err, + } +} + +type wrapError struct { + s string + err error +} + +func (e *wrapError) Error() string { + return format("%v%v: %v", prefix, e.s, e.err) +} + +func (e *wrapError) Unwrap() error { + return e.err +} + +func (e *wrapError) Is(target error) bool { + return target == Error +} + +func format(f string, x ...interface{}) string { + // avoid "proto: " prefix when chaining + for i := 0; i < len(x); i++ { + switch e := x[i].(type) { + case *prefixError: + x[i] = e.s + case *wrapError: + x[i] = format("%v: %v", e.s, e.err) + } + } + return fmt.Sprintf(f, x...) +} + +func InvalidUTF8(name string) error { + return New("field %v contains invalid UTF-8", name) +} + +func RequiredNotSet(name string) error { + return New("required field %v not set", name) +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go112.go b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go new file mode 100644 index 0000000..f90e909 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go112.go @@ -0,0 +1,39 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.13 + +package errors + +import "reflect" + +// Is is a copy of Go 1.13's errors.Is for use with older Go versions. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + if err = unwrap(err); err == nil { + return false + } + } +} + +func unwrap(err error) error { + u, ok := err.(interface { + Unwrap() error + }) + if !ok { + return nil + } + return u.Unwrap() +} diff --git a/vendor/google.golang.org/protobuf/internal/errors/is_go113.go b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go new file mode 100644 index 0000000..dc05f41 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/errors/is_go113.go @@ -0,0 +1,12 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.13 + +package errors + +import "errors" + +// Is is errors.Is. +func Is(err, target error) bool { return errors.Is(err, target) } diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go new file mode 100644 index 0000000..74c5fef --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/any_gen.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Any. +const ( + Any_TypeUrl = 1 // optional string + Any_Value = 2 // optional bytes +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go new file mode 100644 index 0000000..9a6b5f2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/api_gen.go @@ -0,0 +1,35 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Api. +const ( + Api_Name = 1 // optional string + Api_Methods = 2 // repeated google.protobuf.Method + Api_Options = 3 // repeated google.protobuf.Option + Api_Version = 4 // optional string + Api_SourceContext = 5 // optional google.protobuf.SourceContext + Api_Mixins = 6 // repeated google.protobuf.Mixin + Api_Syntax = 7 // optional google.protobuf.Syntax +) + +// Field numbers for google.protobuf.Method. +const ( + Method_Name = 1 // optional string + Method_RequestTypeUrl = 2 // optional string + Method_RequestStreaming = 3 // optional bool + Method_ResponseTypeUrl = 4 // optional string + Method_ResponseStreaming = 5 // optional bool + Method_Options = 6 // repeated google.protobuf.Option + Method_Syntax = 7 // optional google.protobuf.Syntax +) + +// Field numbers for google.protobuf.Mixin. +const ( + Mixin_Name = 1 // optional string + Mixin_Root = 2 // optional string +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go new file mode 100644 index 0000000..6e37b59 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/descriptor_gen.go @@ -0,0 +1,240 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.FileDescriptorSet. +const ( + FileDescriptorSet_File = 1 // repeated google.protobuf.FileDescriptorProto +) + +// Field numbers for google.protobuf.FileDescriptorProto. +const ( + FileDescriptorProto_Name = 1 // optional string + FileDescriptorProto_Package = 2 // optional string + FileDescriptorProto_Dependency = 3 // repeated string + FileDescriptorProto_PublicDependency = 10 // repeated int32 + FileDescriptorProto_WeakDependency = 11 // repeated int32 + FileDescriptorProto_MessageType = 4 // repeated google.protobuf.DescriptorProto + FileDescriptorProto_EnumType = 5 // repeated google.protobuf.EnumDescriptorProto + FileDescriptorProto_Service = 6 // repeated google.protobuf.ServiceDescriptorProto + FileDescriptorProto_Extension = 7 // repeated google.protobuf.FieldDescriptorProto + FileDescriptorProto_Options = 8 // optional google.protobuf.FileOptions + FileDescriptorProto_SourceCodeInfo = 9 // optional google.protobuf.SourceCodeInfo + FileDescriptorProto_Syntax = 12 // optional string +) + +// Field numbers for google.protobuf.DescriptorProto. +const ( + DescriptorProto_Name = 1 // optional string + DescriptorProto_Field = 2 // repeated google.protobuf.FieldDescriptorProto + DescriptorProto_Extension = 6 // repeated google.protobuf.FieldDescriptorProto + DescriptorProto_NestedType = 3 // repeated google.protobuf.DescriptorProto + DescriptorProto_EnumType = 4 // repeated google.protobuf.EnumDescriptorProto + DescriptorProto_ExtensionRange = 5 // repeated google.protobuf.DescriptorProto.ExtensionRange + DescriptorProto_OneofDecl = 8 // repeated google.protobuf.OneofDescriptorProto + DescriptorProto_Options = 7 // optional google.protobuf.MessageOptions + DescriptorProto_ReservedRange = 9 // repeated google.protobuf.DescriptorProto.ReservedRange + DescriptorProto_ReservedName = 10 // repeated string +) + +// Field numbers for google.protobuf.DescriptorProto.ExtensionRange. +const ( + DescriptorProto_ExtensionRange_Start = 1 // optional int32 + DescriptorProto_ExtensionRange_End = 2 // optional int32 + DescriptorProto_ExtensionRange_Options = 3 // optional google.protobuf.ExtensionRangeOptions +) + +// Field numbers for google.protobuf.DescriptorProto.ReservedRange. +const ( + DescriptorProto_ReservedRange_Start = 1 // optional int32 + DescriptorProto_ReservedRange_End = 2 // optional int32 +) + +// Field numbers for google.protobuf.ExtensionRangeOptions. +const ( + ExtensionRangeOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.FieldDescriptorProto. +const ( + FieldDescriptorProto_Name = 1 // optional string + FieldDescriptorProto_Number = 3 // optional int32 + FieldDescriptorProto_Label = 4 // optional google.protobuf.FieldDescriptorProto.Label + FieldDescriptorProto_Type = 5 // optional google.protobuf.FieldDescriptorProto.Type + FieldDescriptorProto_TypeName = 6 // optional string + FieldDescriptorProto_Extendee = 2 // optional string + FieldDescriptorProto_DefaultValue = 7 // optional string + FieldDescriptorProto_OneofIndex = 9 // optional int32 + FieldDescriptorProto_JsonName = 10 // optional string + FieldDescriptorProto_Options = 8 // optional google.protobuf.FieldOptions + FieldDescriptorProto_Proto3Optional = 17 // optional bool +) + +// Field numbers for google.protobuf.OneofDescriptorProto. +const ( + OneofDescriptorProto_Name = 1 // optional string + OneofDescriptorProto_Options = 2 // optional google.protobuf.OneofOptions +) + +// Field numbers for google.protobuf.EnumDescriptorProto. +const ( + EnumDescriptorProto_Name = 1 // optional string + EnumDescriptorProto_Value = 2 // repeated google.protobuf.EnumValueDescriptorProto + EnumDescriptorProto_Options = 3 // optional google.protobuf.EnumOptions + EnumDescriptorProto_ReservedRange = 4 // repeated google.protobuf.EnumDescriptorProto.EnumReservedRange + EnumDescriptorProto_ReservedName = 5 // repeated string +) + +// Field numbers for google.protobuf.EnumDescriptorProto.EnumReservedRange. +const ( + EnumDescriptorProto_EnumReservedRange_Start = 1 // optional int32 + EnumDescriptorProto_EnumReservedRange_End = 2 // optional int32 +) + +// Field numbers for google.protobuf.EnumValueDescriptorProto. +const ( + EnumValueDescriptorProto_Name = 1 // optional string + EnumValueDescriptorProto_Number = 2 // optional int32 + EnumValueDescriptorProto_Options = 3 // optional google.protobuf.EnumValueOptions +) + +// Field numbers for google.protobuf.ServiceDescriptorProto. +const ( + ServiceDescriptorProto_Name = 1 // optional string + ServiceDescriptorProto_Method = 2 // repeated google.protobuf.MethodDescriptorProto + ServiceDescriptorProto_Options = 3 // optional google.protobuf.ServiceOptions +) + +// Field numbers for google.protobuf.MethodDescriptorProto. +const ( + MethodDescriptorProto_Name = 1 // optional string + MethodDescriptorProto_InputType = 2 // optional string + MethodDescriptorProto_OutputType = 3 // optional string + MethodDescriptorProto_Options = 4 // optional google.protobuf.MethodOptions + MethodDescriptorProto_ClientStreaming = 5 // optional bool + MethodDescriptorProto_ServerStreaming = 6 // optional bool +) + +// Field numbers for google.protobuf.FileOptions. +const ( + FileOptions_JavaPackage = 1 // optional string + FileOptions_JavaOuterClassname = 8 // optional string + FileOptions_JavaMultipleFiles = 10 // optional bool + FileOptions_JavaGenerateEqualsAndHash = 20 // optional bool + FileOptions_JavaStringCheckUtf8 = 27 // optional bool + FileOptions_OptimizeFor = 9 // optional google.protobuf.FileOptions.OptimizeMode + FileOptions_GoPackage = 11 // optional string + FileOptions_CcGenericServices = 16 // optional bool + FileOptions_JavaGenericServices = 17 // optional bool + FileOptions_PyGenericServices = 18 // optional bool + FileOptions_PhpGenericServices = 42 // optional bool + FileOptions_Deprecated = 23 // optional bool + FileOptions_CcEnableArenas = 31 // optional bool + FileOptions_ObjcClassPrefix = 36 // optional string + FileOptions_CsharpNamespace = 37 // optional string + FileOptions_SwiftPrefix = 39 // optional string + FileOptions_PhpClassPrefix = 40 // optional string + FileOptions_PhpNamespace = 41 // optional string + FileOptions_PhpMetadataNamespace = 44 // optional string + FileOptions_RubyPackage = 45 // optional string + FileOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.MessageOptions. +const ( + MessageOptions_MessageSetWireFormat = 1 // optional bool + MessageOptions_NoStandardDescriptorAccessor = 2 // optional bool + MessageOptions_Deprecated = 3 // optional bool + MessageOptions_MapEntry = 7 // optional bool + MessageOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.FieldOptions. +const ( + FieldOptions_Ctype = 1 // optional google.protobuf.FieldOptions.CType + FieldOptions_Packed = 2 // optional bool + FieldOptions_Jstype = 6 // optional google.protobuf.FieldOptions.JSType + FieldOptions_Lazy = 5 // optional bool + FieldOptions_Deprecated = 3 // optional bool + FieldOptions_Weak = 10 // optional bool + FieldOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.OneofOptions. +const ( + OneofOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.EnumOptions. +const ( + EnumOptions_AllowAlias = 2 // optional bool + EnumOptions_Deprecated = 3 // optional bool + EnumOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.EnumValueOptions. +const ( + EnumValueOptions_Deprecated = 1 // optional bool + EnumValueOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.ServiceOptions. +const ( + ServiceOptions_Deprecated = 33 // optional bool + ServiceOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.MethodOptions. +const ( + MethodOptions_Deprecated = 33 // optional bool + MethodOptions_IdempotencyLevel = 34 // optional google.protobuf.MethodOptions.IdempotencyLevel + MethodOptions_UninterpretedOption = 999 // repeated google.protobuf.UninterpretedOption +) + +// Field numbers for google.protobuf.UninterpretedOption. +const ( + UninterpretedOption_Name = 2 // repeated google.protobuf.UninterpretedOption.NamePart + UninterpretedOption_IdentifierValue = 3 // optional string + UninterpretedOption_PositiveIntValue = 4 // optional uint64 + UninterpretedOption_NegativeIntValue = 5 // optional int64 + UninterpretedOption_DoubleValue = 6 // optional double + UninterpretedOption_StringValue = 7 // optional bytes + UninterpretedOption_AggregateValue = 8 // optional string +) + +// Field numbers for google.protobuf.UninterpretedOption.NamePart. +const ( + UninterpretedOption_NamePart_NamePart = 1 // required string + UninterpretedOption_NamePart_IsExtension = 2 // required bool +) + +// Field numbers for google.protobuf.SourceCodeInfo. +const ( + SourceCodeInfo_Location = 1 // repeated google.protobuf.SourceCodeInfo.Location +) + +// Field numbers for google.protobuf.SourceCodeInfo.Location. +const ( + SourceCodeInfo_Location_Path = 1 // repeated int32 + SourceCodeInfo_Location_Span = 2 // repeated int32 + SourceCodeInfo_Location_LeadingComments = 3 // optional string + SourceCodeInfo_Location_TrailingComments = 4 // optional string + SourceCodeInfo_Location_LeadingDetachedComments = 6 // repeated string +) + +// Field numbers for google.protobuf.GeneratedCodeInfo. +const ( + GeneratedCodeInfo_Annotation = 1 // repeated google.protobuf.GeneratedCodeInfo.Annotation +) + +// Field numbers for google.protobuf.GeneratedCodeInfo.Annotation. +const ( + GeneratedCodeInfo_Annotation_Path = 1 // repeated int32 + GeneratedCodeInfo_Annotation_SourceFile = 2 // optional string + GeneratedCodeInfo_Annotation_Begin = 3 // optional int32 + GeneratedCodeInfo_Annotation_End = 4 // optional int32 +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go new file mode 100644 index 0000000..e597885 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/doc.go @@ -0,0 +1,7 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fieldnum contains constants for field numbers of fields in messages +// declared in descriptor.proto and any of the well-known types. +package fieldnum diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go new file mode 100644 index 0000000..8816c73 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/duration_gen.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Duration. +const ( + Duration_Seconds = 1 // optional int64 + Duration_Nanos = 2 // optional int32 +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go new file mode 100644 index 0000000..b5130a6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/empty_gen.go @@ -0,0 +1,10 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Empty. +const () diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go new file mode 100644 index 0000000..7e3bfa2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/field_mask_gen.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.FieldMask. +const ( + FieldMask_Paths = 1 // repeated string +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go new file mode 100644 index 0000000..241972b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/source_context_gen.go @@ -0,0 +1,12 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.SourceContext. +const ( + SourceContext_FileName = 1 // optional string +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go new file mode 100644 index 0000000..c460aab --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/struct_gen.go @@ -0,0 +1,33 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Struct. +const ( + Struct_Fields = 1 // repeated google.protobuf.Struct.FieldsEntry +) + +// Field numbers for google.protobuf.Struct.FieldsEntry. +const ( + Struct_FieldsEntry_Key = 1 // optional string + Struct_FieldsEntry_Value = 2 // optional google.protobuf.Value +) + +// Field numbers for google.protobuf.Value. +const ( + Value_NullValue = 1 // optional google.protobuf.NullValue + Value_NumberValue = 2 // optional double + Value_StringValue = 3 // optional string + Value_BoolValue = 4 // optional bool + Value_StructValue = 5 // optional google.protobuf.Struct + Value_ListValue = 6 // optional google.protobuf.ListValue +) + +// Field numbers for google.protobuf.ListValue. +const ( + ListValue_Values = 1 // repeated google.protobuf.Value +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go new file mode 100644 index 0000000..b4346fb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/timestamp_gen.go @@ -0,0 +1,13 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Timestamp. +const ( + Timestamp_Seconds = 1 // optional int64 + Timestamp_Nanos = 2 // optional int32 +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go new file mode 100644 index 0000000..b392e95 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/type_gen.go @@ -0,0 +1,53 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.Type. +const ( + Type_Name = 1 // optional string + Type_Fields = 2 // repeated google.protobuf.Field + Type_Oneofs = 3 // repeated string + Type_Options = 4 // repeated google.protobuf.Option + Type_SourceContext = 5 // optional google.protobuf.SourceContext + Type_Syntax = 6 // optional google.protobuf.Syntax +) + +// Field numbers for google.protobuf.Field. +const ( + Field_Kind = 1 // optional google.protobuf.Field.Kind + Field_Cardinality = 2 // optional google.protobuf.Field.Cardinality + Field_Number = 3 // optional int32 + Field_Name = 4 // optional string + Field_TypeUrl = 6 // optional string + Field_OneofIndex = 7 // optional int32 + Field_Packed = 8 // optional bool + Field_Options = 9 // repeated google.protobuf.Option + Field_JsonName = 10 // optional string + Field_DefaultValue = 11 // optional string +) + +// Field numbers for google.protobuf.Enum. +const ( + Enum_Name = 1 // optional string + Enum_Enumvalue = 2 // repeated google.protobuf.EnumValue + Enum_Options = 3 // repeated google.protobuf.Option + Enum_SourceContext = 4 // optional google.protobuf.SourceContext + Enum_Syntax = 5 // optional google.protobuf.Syntax +) + +// Field numbers for google.protobuf.EnumValue. +const ( + EnumValue_Name = 1 // optional string + EnumValue_Number = 2 // optional int32 + EnumValue_Options = 3 // repeated google.protobuf.Option +) + +// Field numbers for google.protobuf.Option. +const ( + Option_Name = 1 // optional string + Option_Value = 2 // optional google.protobuf.Any +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go new file mode 100644 index 0000000..42f846a --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldnum/wrappers_gen.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-protos. DO NOT EDIT. + +package fieldnum + +// Field numbers for google.protobuf.DoubleValue. +const ( + DoubleValue_Value = 1 // optional double +) + +// Field numbers for google.protobuf.FloatValue. +const ( + FloatValue_Value = 1 // optional float +) + +// Field numbers for google.protobuf.Int64Value. +const ( + Int64Value_Value = 1 // optional int64 +) + +// Field numbers for google.protobuf.UInt64Value. +const ( + UInt64Value_Value = 1 // optional uint64 +) + +// Field numbers for google.protobuf.Int32Value. +const ( + Int32Value_Value = 1 // optional int32 +) + +// Field numbers for google.protobuf.UInt32Value. +const ( + UInt32Value_Value = 1 // optional uint32 +) + +// Field numbers for google.protobuf.BoolValue. +const ( + BoolValue_Value = 1 // optional bool +) + +// Field numbers for google.protobuf.StringValue. +const ( + StringValue_Value = 1 // optional string +) + +// Field numbers for google.protobuf.BytesValue. +const ( + BytesValue_Value = 1 // optional bytes +) diff --git a/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go new file mode 100644 index 0000000..517c4e2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/fieldsort/fieldsort.go @@ -0,0 +1,40 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package fieldsort defines an ordering of fields. +// +// The ordering defined by this package matches the historic behavior of the proto +// package, placing extensions first and oneofs last. +// +// There is no guarantee about stability of the wire encoding, and users should not +// depend on the order defined in this package as it is subject to change without +// notice. +package fieldsort + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Less returns true if field a comes before field j in ordered wire marshal output. +func Less(a, b protoreflect.FieldDescriptor) bool { + ea := a.IsExtension() + eb := b.IsExtension() + oa := a.ContainingOneof() + ob := b.ContainingOneof() + switch { + case ea != eb: + return ea + case oa != nil && ob != nil: + if oa == ob { + return a.Number() < b.Number() + } + return oa.Index() < ob.Index() + case oa != nil && !oa.IsSynthetic(): + return false + case ob != nil && !ob.IsSynthetic(): + return true + default: + return a.Number() < b.Number() + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/build.go b/vendor/google.golang.org/protobuf/internal/filedesc/build.go new file mode 100644 index 0000000..462d384 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/build.go @@ -0,0 +1,155 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filedesc provides functionality for constructing descriptors. +package filedesc + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder construct a protoreflect.FileDescriptor from the raw descriptor. +type Builder struct { + // GoPackagePath is the Go package path that is invoking this builder. + GoPackagePath string + + // RawDescriptor is the wire-encoded bytes of FileDescriptorProto + // and must be populated. + RawDescriptor []byte + + // NumEnums is the total number of enums declared in the file. + NumEnums int32 + // NumMessages is the total number of messages declared in the file. + // It includes the implicit message declarations for map entries. + NumMessages int32 + // NumExtensions is the total number of extensions declared in the file. + NumExtensions int32 + // NumServices is the total number of services declared in the file. + NumServices int32 + + // TypeResolver resolves extension field types for descriptor options. + // If nil, it uses protoregistry.GlobalTypes. + TypeResolver interface { + preg.ExtensionTypeResolver + } + + // FileRegistry is use to lookup file, enum, and message dependencies. + // Once constructed, the file descriptor is registered here. + // If nil, it uses protoregistry.GlobalFiles. + FileRegistry interface { + FindFileByPath(string) (protoreflect.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +} + +// resolverByIndex is an interface Builder.FileRegistry may implement. +// If so, it permits looking up an enum or message dependency based on the +// sub-list and element index into filetype.Builder.DependencyIndexes. +type resolverByIndex interface { + FindEnumByIndex(int32, int32, []Enum, []Message) pref.EnumDescriptor + FindMessageByIndex(int32, int32, []Enum, []Message) pref.MessageDescriptor +} + +// Indexes of each sub-list in filetype.Builder.DependencyIndexes. +const ( + listFieldDeps int32 = iota + listExtTargets + listExtDeps + listMethInDeps + listMethOutDeps +) + +// Out is the output of the Builder. +type Out struct { + File pref.FileDescriptor + + // Enums is all enum descriptors in "flattened ordering". + Enums []Enum + // Messages is all message descriptors in "flattened ordering". + // It includes the implicit message declarations for map entries. + Messages []Message + // Extensions is all extension descriptors in "flattened ordering". + Extensions []Extension + // Service is all service descriptors in "flattened ordering". + Services []Service +} + +// Build constructs a FileDescriptor given the parameters set in Builder. +// It assumes that the inputs are well-formed and panics if any inconsistencies +// are encountered. +// +// If NumEnums+NumMessages+NumExtensions+NumServices is zero, +// then Build automatically derives them from the raw descriptor. +func (db Builder) Build() (out Out) { + // Populate the counts if uninitialized. + if db.NumEnums+db.NumMessages+db.NumExtensions+db.NumServices == 0 { + db.unmarshalCounts(db.RawDescriptor, true) + } + + // Initialize resolvers and registries if unpopulated. + if db.TypeResolver == nil { + db.TypeResolver = preg.GlobalTypes + } + if db.FileRegistry == nil { + db.FileRegistry = preg.GlobalFiles + } + + fd := newRawFile(db) + out.File = fd + out.Enums = fd.allEnums + out.Messages = fd.allMessages + out.Extensions = fd.allExtensions + out.Services = fd.allServices + + if err := db.FileRegistry.RegisterFile(fd); err != nil { + panic(err) + } + return out +} + +// unmarshalCounts counts the number of enum, message, extension, and service +// declarations in the raw message, which is either a FileDescriptorProto +// or a MessageDescriptorProto depending on whether isFile is set. +func (db *Builder) unmarshalCounts(b []byte, isFile bool) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + if isFile { + switch num { + case fieldnum.FileDescriptorProto_EnumType: + db.NumEnums++ + case fieldnum.FileDescriptorProto_MessageType: + db.unmarshalCounts(v, false) + db.NumMessages++ + case fieldnum.FileDescriptorProto_Extension: + db.NumExtensions++ + case fieldnum.FileDescriptorProto_Service: + db.NumServices++ + } + } else { + switch num { + case fieldnum.DescriptorProto_EnumType: + db.NumEnums++ + case fieldnum.DescriptorProto_NestedType: + db.unmarshalCounts(v, false) + db.NumMessages++ + case fieldnum.DescriptorProto_Extension: + db.NumExtensions++ + } + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go new file mode 100644 index 0000000..2540bef --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -0,0 +1,613 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "bytes" + "fmt" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/defval" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// The types in this file may have a suffix: +// • L0: Contains fields common to all descriptors (except File) and +// must be initialized up front. +// • L1: Contains fields specific to a descriptor and +// must be initialized up front. +// • L2: Contains fields that are lazily initialized when constructing +// from the raw file descriptor. When constructing as a literal, the L2 +// fields must be initialized up front. +// +// The types are exported so that packages like reflect/protodesc can +// directly construct descriptors. + +type ( + File struct { + fileRaw + L1 FileL1 + + once uint32 // atomically set if L2 is valid + mu sync.Mutex // protects L2 + L2 *FileL2 + } + FileL1 struct { + Syntax pref.Syntax + Path string + Package pref.FullName + + Enums Enums + Messages Messages + Extensions Extensions + Services Services + } + FileL2 struct { + Options func() pref.ProtoMessage + Imports FileImports + Locations SourceLocations + } +) + +func (fd *File) ParentFile() pref.FileDescriptor { return fd } +func (fd *File) Parent() pref.Descriptor { return nil } +func (fd *File) Index() int { return 0 } +func (fd *File) Syntax() pref.Syntax { return fd.L1.Syntax } +func (fd *File) Name() pref.Name { return fd.L1.Package.Name() } +func (fd *File) FullName() pref.FullName { return fd.L1.Package } +func (fd *File) IsPlaceholder() bool { return false } +func (fd *File) Options() pref.ProtoMessage { + if f := fd.lazyInit().Options; f != nil { + return f() + } + return descopts.File +} +func (fd *File) Path() string { return fd.L1.Path } +func (fd *File) Package() pref.FullName { return fd.L1.Package } +func (fd *File) Imports() pref.FileImports { return &fd.lazyInit().Imports } +func (fd *File) Enums() pref.EnumDescriptors { return &fd.L1.Enums } +func (fd *File) Messages() pref.MessageDescriptors { return &fd.L1.Messages } +func (fd *File) Extensions() pref.ExtensionDescriptors { return &fd.L1.Extensions } +func (fd *File) Services() pref.ServiceDescriptors { return &fd.L1.Services } +func (fd *File) SourceLocations() pref.SourceLocations { return &fd.lazyInit().Locations } +func (fd *File) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *File) ProtoType(pref.FileDescriptor) {} +func (fd *File) ProtoInternal(pragma.DoNotImplement) {} + +func (fd *File) lazyInit() *FileL2 { + if atomic.LoadUint32(&fd.once) == 0 { + fd.lazyInitOnce() + } + return fd.L2 +} + +func (fd *File) lazyInitOnce() { + fd.mu.Lock() + if fd.L2 == nil { + fd.lazyRawInit() // recursively initializes all L2 structures + } + atomic.StoreUint32(&fd.once, 1) + fd.mu.Unlock() +} + +// ProtoLegacyRawDesc is a pseudo-internal API for allowing the v1 code +// to be able to retrieve the raw descriptor. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) ProtoLegacyRawDesc() []byte { + return fd.builder.RawDescriptor +} + +// GoPackagePath is a pseudo-internal API for determining the Go package path +// that this file descriptor is declared in. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *File) GoPackagePath() string { + return fd.builder.GoPackagePath +} + +type ( + Enum struct { + Base + L1 EnumL1 + L2 *EnumL2 // protected by fileDesc.once + } + EnumL1 struct { + eagerValues bool // controls whether EnumL2.Values is already populated + } + EnumL2 struct { + Options func() pref.ProtoMessage + Values EnumValues + ReservedNames Names + ReservedRanges EnumRanges + } + + EnumValue struct { + Base + L1 EnumValueL1 + } + EnumValueL1 struct { + Options func() pref.ProtoMessage + Number pref.EnumNumber + } +) + +func (ed *Enum) Options() pref.ProtoMessage { + if f := ed.lazyInit().Options; f != nil { + return f() + } + return descopts.Enum +} +func (ed *Enum) Values() pref.EnumValueDescriptors { + if ed.L1.eagerValues { + return &ed.L2.Values + } + return &ed.lazyInit().Values +} +func (ed *Enum) ReservedNames() pref.Names { return &ed.lazyInit().ReservedNames } +func (ed *Enum) ReservedRanges() pref.EnumRanges { return &ed.lazyInit().ReservedRanges } +func (ed *Enum) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *Enum) ProtoType(pref.EnumDescriptor) {} +func (ed *Enum) lazyInit() *EnumL2 { + ed.L0.ParentFile.lazyInit() // implicitly initializes L2 + return ed.L2 +} + +func (ed *EnumValue) Options() pref.ProtoMessage { + if f := ed.L1.Options; f != nil { + return f() + } + return descopts.EnumValue +} +func (ed *EnumValue) Number() pref.EnumNumber { return ed.L1.Number } +func (ed *EnumValue) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, ed) } +func (ed *EnumValue) ProtoType(pref.EnumValueDescriptor) {} + +type ( + Message struct { + Base + L1 MessageL1 + L2 *MessageL2 // protected by fileDesc.once + } + MessageL1 struct { + Enums Enums + Messages Messages + Extensions Extensions + IsMapEntry bool // promoted from google.protobuf.MessageOptions + IsMessageSet bool // promoted from google.protobuf.MessageOptions + } + MessageL2 struct { + Options func() pref.ProtoMessage + Fields Fields + Oneofs Oneofs + ReservedNames Names + ReservedRanges FieldRanges + RequiredNumbers FieldNumbers // must be consistent with Fields.Cardinality + ExtensionRanges FieldRanges + ExtensionRangeOptions []func() pref.ProtoMessage // must be same length as ExtensionRanges + } + + Field struct { + Base + L1 FieldL1 + } + FieldL1 struct { + Options func() pref.ProtoMessage + Number pref.FieldNumber + Cardinality pref.Cardinality // must be consistent with Message.RequiredNumbers + Kind pref.Kind + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsWeak bool // promoted from google.protobuf.FieldOptions + HasPacked bool // promoted from google.protobuf.FieldOptions + IsPacked bool // promoted from google.protobuf.FieldOptions + HasEnforceUTF8 bool // promoted from google.protobuf.FieldOptions + EnforceUTF8 bool // promoted from google.protobuf.FieldOptions + Default defaultValue + ContainingOneof pref.OneofDescriptor // must be consistent with Message.Oneofs.Fields + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } + + Oneof struct { + Base + L1 OneofL1 + } + OneofL1 struct { + Options func() pref.ProtoMessage + Fields OneofFields // must be consistent with Message.Fields.ContainingOneof + } +) + +func (md *Message) Options() pref.ProtoMessage { + if f := md.lazyInit().Options; f != nil { + return f() + } + return descopts.Message +} +func (md *Message) IsMapEntry() bool { return md.L1.IsMapEntry } +func (md *Message) Fields() pref.FieldDescriptors { return &md.lazyInit().Fields } +func (md *Message) Oneofs() pref.OneofDescriptors { return &md.lazyInit().Oneofs } +func (md *Message) ReservedNames() pref.Names { return &md.lazyInit().ReservedNames } +func (md *Message) ReservedRanges() pref.FieldRanges { return &md.lazyInit().ReservedRanges } +func (md *Message) RequiredNumbers() pref.FieldNumbers { return &md.lazyInit().RequiredNumbers } +func (md *Message) ExtensionRanges() pref.FieldRanges { return &md.lazyInit().ExtensionRanges } +func (md *Message) ExtensionRangeOptions(i int) pref.ProtoMessage { + if f := md.lazyInit().ExtensionRangeOptions[i]; f != nil { + return f() + } + return descopts.ExtensionRange +} +func (md *Message) Enums() pref.EnumDescriptors { return &md.L1.Enums } +func (md *Message) Messages() pref.MessageDescriptors { return &md.L1.Messages } +func (md *Message) Extensions() pref.ExtensionDescriptors { return &md.L1.Extensions } +func (md *Message) ProtoType(pref.MessageDescriptor) {} +func (md *Message) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Message) lazyInit() *MessageL2 { + md.L0.ParentFile.lazyInit() // implicitly initializes L2 + return md.L2 +} + +// IsMessageSet is a pseudo-internal API for checking whether a message +// should serialize in the proto1 message format. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (md *Message) IsMessageSet() bool { + return md.L1.IsMessageSet +} + +func (fd *Field) Options() pref.ProtoMessage { + if f := fd.L1.Options; f != nil { + return f() + } + return descopts.Field +} +func (fd *Field) Number() pref.FieldNumber { return fd.L1.Number } +func (fd *Field) Cardinality() pref.Cardinality { return fd.L1.Cardinality } +func (fd *Field) Kind() pref.Kind { return fd.L1.Kind } +func (fd *Field) HasJSONName() bool { return fd.L1.JSONName.has } +func (fd *Field) JSONName() string { return fd.L1.JSONName.get(fd) } +func (fd *Field) HasPresence() bool { + return fd.L1.Cardinality != pref.Repeated && (fd.L0.ParentFile.L1.Syntax == pref.Proto2 || fd.L1.Message != nil || fd.L1.ContainingOneof != nil) +} +func (fd *Field) HasOptionalKeyword() bool { + return (fd.L0.ParentFile.L1.Syntax == pref.Proto2 && fd.L1.Cardinality == pref.Optional && fd.L1.ContainingOneof == nil) || fd.L1.IsProto3Optional +} +func (fd *Field) IsPacked() bool { + if !fd.L1.HasPacked && fd.L0.ParentFile.L1.Syntax != pref.Proto2 && fd.L1.Cardinality == pref.Repeated { + switch fd.L1.Kind { + case pref.StringKind, pref.BytesKind, pref.MessageKind, pref.GroupKind: + default: + return true + } + } + return fd.L1.IsPacked +} +func (fd *Field) IsExtension() bool { return false } +func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsList() bool { return fd.Cardinality() == pref.Repeated && !fd.IsMap() } +func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } +func (fd *Field) MapKey() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(1) +} +func (fd *Field) MapValue() pref.FieldDescriptor { + if !fd.IsMap() { + return nil + } + return fd.Message().Fields().ByNumber(2) +} +func (fd *Field) HasDefault() bool { return fd.L1.Default.has } +func (fd *Field) Default() pref.Value { return fd.L1.Default.get(fd) } +func (fd *Field) DefaultEnumValue() pref.EnumValueDescriptor { return fd.L1.Default.enum } +func (fd *Field) ContainingOneof() pref.OneofDescriptor { return fd.L1.ContainingOneof } +func (fd *Field) ContainingMessage() pref.MessageDescriptor { + return fd.L0.Parent.(pref.MessageDescriptor) +} +func (fd *Field) Enum() pref.EnumDescriptor { + return fd.L1.Enum +} +func (fd *Field) Message() pref.MessageDescriptor { + if fd.L1.IsWeak { + if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + } + return fd.L1.Message +} +func (fd *Field) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, fd) } +func (fd *Field) ProtoType(pref.FieldDescriptor) {} + +// EnforceUTF8 is a pseudo-internal API to determine whether to enforce UTF-8 +// validation for the string field. This exists for Google-internal use only +// since proto3 did not enforce UTF-8 validity prior to the open-source release. +// If this method does not exist, the default is to enforce valid UTF-8. +// +// WARNING: This method is exempt from the compatibility promise and may be +// removed in the future without warning. +func (fd *Field) EnforceUTF8() bool { + if fd.L1.HasEnforceUTF8 { + return fd.L1.EnforceUTF8 + } + return fd.L0.ParentFile.L1.Syntax == pref.Proto3 +} + +func (od *Oneof) IsSynthetic() bool { + return od.L0.ParentFile.L1.Syntax == pref.Proto3 && len(od.L1.Fields.List) == 1 && od.L1.Fields.List[0].HasOptionalKeyword() +} +func (od *Oneof) Options() pref.ProtoMessage { + if f := od.L1.Options; f != nil { + return f() + } + return descopts.Oneof +} +func (od *Oneof) Fields() pref.FieldDescriptors { return &od.L1.Fields } +func (od *Oneof) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, od) } +func (od *Oneof) ProtoType(pref.OneofDescriptor) {} + +type ( + Extension struct { + Base + L1 ExtensionL1 + L2 *ExtensionL2 // protected by fileDesc.once + } + ExtensionL1 struct { + Number pref.FieldNumber + Extendee pref.MessageDescriptor + Cardinality pref.Cardinality + Kind pref.Kind + } + ExtensionL2 struct { + Options func() pref.ProtoMessage + JSONName jsonName + IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto + IsPacked bool // promoted from google.protobuf.FieldOptions + Default defaultValue + Enum pref.EnumDescriptor + Message pref.MessageDescriptor + } +) + +func (xd *Extension) Options() pref.ProtoMessage { + if f := xd.lazyInit().Options; f != nil { + return f() + } + return descopts.Field +} +func (xd *Extension) Number() pref.FieldNumber { return xd.L1.Number } +func (xd *Extension) Cardinality() pref.Cardinality { return xd.L1.Cardinality } +func (xd *Extension) Kind() pref.Kind { return xd.L1.Kind } +func (xd *Extension) HasJSONName() bool { return xd.lazyInit().JSONName.has } +func (xd *Extension) JSONName() string { return xd.lazyInit().JSONName.get(xd) } +func (xd *Extension) HasPresence() bool { return xd.L1.Cardinality != pref.Repeated } +func (xd *Extension) HasOptionalKeyword() bool { + return (xd.L0.ParentFile.L1.Syntax == pref.Proto2 && xd.L1.Cardinality == pref.Optional) || xd.lazyInit().IsProto3Optional +} +func (xd *Extension) IsPacked() bool { return xd.lazyInit().IsPacked } +func (xd *Extension) IsExtension() bool { return true } +func (xd *Extension) IsWeak() bool { return false } +func (xd *Extension) IsList() bool { return xd.Cardinality() == pref.Repeated } +func (xd *Extension) IsMap() bool { return false } +func (xd *Extension) MapKey() pref.FieldDescriptor { return nil } +func (xd *Extension) MapValue() pref.FieldDescriptor { return nil } +func (xd *Extension) HasDefault() bool { return xd.lazyInit().Default.has } +func (xd *Extension) Default() pref.Value { return xd.lazyInit().Default.get(xd) } +func (xd *Extension) DefaultEnumValue() pref.EnumValueDescriptor { return xd.lazyInit().Default.enum } +func (xd *Extension) ContainingOneof() pref.OneofDescriptor { return nil } +func (xd *Extension) ContainingMessage() pref.MessageDescriptor { return xd.L1.Extendee } +func (xd *Extension) Enum() pref.EnumDescriptor { return xd.lazyInit().Enum } +func (xd *Extension) Message() pref.MessageDescriptor { return xd.lazyInit().Message } +func (xd *Extension) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, xd) } +func (xd *Extension) ProtoType(pref.FieldDescriptor) {} +func (xd *Extension) ProtoInternal(pragma.DoNotImplement) {} +func (xd *Extension) lazyInit() *ExtensionL2 { + xd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return xd.L2 +} + +type ( + Service struct { + Base + L1 ServiceL1 + L2 *ServiceL2 // protected by fileDesc.once + } + ServiceL1 struct{} + ServiceL2 struct { + Options func() pref.ProtoMessage + Methods Methods + } + + Method struct { + Base + L1 MethodL1 + } + MethodL1 struct { + Options func() pref.ProtoMessage + Input pref.MessageDescriptor + Output pref.MessageDescriptor + IsStreamingClient bool + IsStreamingServer bool + } +) + +func (sd *Service) Options() pref.ProtoMessage { + if f := sd.lazyInit().Options; f != nil { + return f() + } + return descopts.Service +} +func (sd *Service) Methods() pref.MethodDescriptors { return &sd.lazyInit().Methods } +func (sd *Service) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, sd) } +func (sd *Service) ProtoType(pref.ServiceDescriptor) {} +func (sd *Service) ProtoInternal(pragma.DoNotImplement) {} +func (sd *Service) lazyInit() *ServiceL2 { + sd.L0.ParentFile.lazyInit() // implicitly initializes L2 + return sd.L2 +} + +func (md *Method) Options() pref.ProtoMessage { + if f := md.L1.Options; f != nil { + return f() + } + return descopts.Method +} +func (md *Method) Input() pref.MessageDescriptor { return md.L1.Input } +func (md *Method) Output() pref.MessageDescriptor { return md.L1.Output } +func (md *Method) IsStreamingClient() bool { return md.L1.IsStreamingClient } +func (md *Method) IsStreamingServer() bool { return md.L1.IsStreamingServer } +func (md *Method) Format(s fmt.State, r rune) { descfmt.FormatDesc(s, r, md) } +func (md *Method) ProtoType(pref.MethodDescriptor) {} +func (md *Method) ProtoInternal(pragma.DoNotImplement) {} + +// Surrogate files are can be used to create standalone descriptors +// where the syntax is only information derived from the parent file. +var ( + SurrogateProto2 = &File{L1: FileL1{Syntax: pref.Proto2}, L2: &FileL2{}} + SurrogateProto3 = &File{L1: FileL1{Syntax: pref.Proto3}, L2: &FileL2{}} +) + +type ( + Base struct { + L0 BaseL0 + } + BaseL0 struct { + FullName pref.FullName // must be populated + ParentFile *File // must be populated + Parent pref.Descriptor + Index int + } +) + +func (d *Base) Name() pref.Name { return d.L0.FullName.Name() } +func (d *Base) FullName() pref.FullName { return d.L0.FullName } +func (d *Base) ParentFile() pref.FileDescriptor { + if d.L0.ParentFile == SurrogateProto2 || d.L0.ParentFile == SurrogateProto3 { + return nil // surrogate files are not real parents + } + return d.L0.ParentFile +} +func (d *Base) Parent() pref.Descriptor { return d.L0.Parent } +func (d *Base) Index() int { return d.L0.Index } +func (d *Base) Syntax() pref.Syntax { return d.L0.ParentFile.Syntax() } +func (d *Base) IsPlaceholder() bool { return false } +func (d *Base) ProtoInternal(pragma.DoNotImplement) {} + +type jsonName struct { + has bool + once sync.Once + name string +} + +// Init initializes the name. It is exported for use by other internal packages. +func (js *jsonName) Init(s string) { + js.has = true + js.name = s +} + +func (js *jsonName) get(fd pref.FieldDescriptor) string { + if !js.has { + js.once.Do(func() { + js.name = strs.JSONCamelCase(string(fd.Name())) + }) + } + return js.name +} + +func DefaultValue(v pref.Value, ev pref.EnumValueDescriptor) defaultValue { + dv := defaultValue{has: v.IsValid(), val: v, enum: ev} + if b, ok := v.Interface().([]byte); ok { + // Store a copy of the default bytes, so that we can detect + // accidental mutations of the original value. + dv.bytes = append([]byte(nil), b...) + } + return dv +} + +func unmarshalDefault(b []byte, k pref.Kind, pf *File, ed pref.EnumDescriptor) defaultValue { + var evs pref.EnumValueDescriptors + if k == pref.EnumKind { + // If the enum is declared within the same file, be careful not to + // blindly call the Values method, lest we bind ourselves in a deadlock. + if e, ok := ed.(*Enum); ok && e.L0.ParentFile == pf { + evs = &e.L2.Values + } else { + evs = ed.Values() + } + + // If we are unable to resolve the enum dependency, use a placeholder + // enum value since we will not be able to parse the default value. + if ed.IsPlaceholder() && pref.Name(b).IsValid() { + v := pref.ValueOfEnum(0) + ev := PlaceholderEnumValue(ed.FullName().Parent().Append(pref.Name(b))) + return DefaultValue(v, ev) + } + } + + v, ev, err := defval.Unmarshal(string(b), k, evs, defval.Descriptor) + if err != nil { + panic(err) + } + return DefaultValue(v, ev) +} + +type defaultValue struct { + has bool + val pref.Value + enum pref.EnumValueDescriptor + bytes []byte +} + +func (dv *defaultValue) get(fd pref.FieldDescriptor) pref.Value { + // Return the zero value as the default if unpopulated. + if !dv.has { + if fd.Cardinality() == pref.Repeated { + return pref.Value{} + } + switch fd.Kind() { + case pref.BoolKind: + return pref.ValueOfBool(false) + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + return pref.ValueOfInt32(0) + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + return pref.ValueOfInt64(0) + case pref.Uint32Kind, pref.Fixed32Kind: + return pref.ValueOfUint32(0) + case pref.Uint64Kind, pref.Fixed64Kind: + return pref.ValueOfUint64(0) + case pref.FloatKind: + return pref.ValueOfFloat32(0) + case pref.DoubleKind: + return pref.ValueOfFloat64(0) + case pref.StringKind: + return pref.ValueOfString("") + case pref.BytesKind: + return pref.ValueOfBytes(nil) + case pref.EnumKind: + if evs := fd.Enum().Values(); evs.Len() > 0 { + return pref.ValueOfEnum(evs.Get(0).Number()) + } + return pref.ValueOfEnum(0) + } + } + + if len(dv.bytes) > 0 && !bytes.Equal(dv.bytes, dv.val.Bytes()) { + // TODO: Avoid panic if we're running with the race detector + // and instead spawn a goroutine that periodically resets + // this value back to the original to induce a race. + panic(fmt.Sprintf("detected mutation on the default bytes for %v", fd.FullName())) + } + return dv.val +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go new file mode 100644 index 0000000..c0cddf8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_init.go @@ -0,0 +1,471 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// fileRaw is a data struct used when initializing a file descriptor from +// a raw FileDescriptorProto. +type fileRaw struct { + builder Builder + allEnums []Enum + allMessages []Message + allExtensions []Extension + allServices []Service +} + +func newRawFile(db Builder) *File { + fd := &File{fileRaw: fileRaw{builder: db}} + fd.initDecls(db.NumEnums, db.NumMessages, db.NumExtensions, db.NumServices) + fd.unmarshalSeed(db.RawDescriptor) + + // Extended message targets are eagerly resolved since registration + // needs this information at program init time. + for i := range fd.allExtensions { + xd := &fd.allExtensions[i] + xd.L1.Extendee = fd.resolveMessageDependency(xd.L1.Extendee, listExtTargets, int32(i)) + } + + fd.checkDecls() + return fd +} + +// initDecls pre-allocates slices for the exact number of enums, messages +// (including map entries), extensions, and services declared in the proto file. +// This is done to avoid regrowing the slice, which would change the address +// for any previously seen declaration. +// +// The alloc methods "allocates" slices by pulling from the capacity. +func (fd *File) initDecls(numEnums, numMessages, numExtensions, numServices int32) { + fd.allEnums = make([]Enum, 0, numEnums) + fd.allMessages = make([]Message, 0, numMessages) + fd.allExtensions = make([]Extension, 0, numExtensions) + fd.allServices = make([]Service, 0, numServices) +} + +func (fd *File) allocEnums(n int) []Enum { + total := len(fd.allEnums) + es := fd.allEnums[total : total+n] + fd.allEnums = fd.allEnums[:total+n] + return es +} +func (fd *File) allocMessages(n int) []Message { + total := len(fd.allMessages) + ms := fd.allMessages[total : total+n] + fd.allMessages = fd.allMessages[:total+n] + return ms +} +func (fd *File) allocExtensions(n int) []Extension { + total := len(fd.allExtensions) + xs := fd.allExtensions[total : total+n] + fd.allExtensions = fd.allExtensions[:total+n] + return xs +} +func (fd *File) allocServices(n int) []Service { + total := len(fd.allServices) + xs := fd.allServices[total : total+n] + fd.allServices = fd.allServices[:total+n] + return xs +} + +// checkDecls performs a sanity check that the expected number of expected +// declarations matches the number that were found in the descriptor proto. +func (fd *File) checkDecls() { + switch { + case len(fd.allEnums) != cap(fd.allEnums): + case len(fd.allMessages) != cap(fd.allMessages): + case len(fd.allExtensions) != cap(fd.allExtensions): + case len(fd.allServices) != cap(fd.allServices): + default: + return + } + panic("mismatching cardinality") +} + +func (fd *File) unmarshalSeed(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions, numServices int + var posEnums, posMessages, posExtensions, posServices int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.FileDescriptorProto_Syntax: + switch string(v) { + case "proto2": + fd.L1.Syntax = pref.Proto2 + case "proto3": + fd.L1.Syntax = pref.Proto3 + default: + panic("invalid syntax") + } + case fieldnum.FileDescriptorProto_Name: + fd.L1.Path = sb.MakeString(v) + case fieldnum.FileDescriptorProto_Package: + fd.L1.Package = pref.FullName(sb.MakeString(v)) + case fieldnum.FileDescriptorProto_EnumType: + if prevField != fieldnum.FileDescriptorProto_EnumType { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case fieldnum.FileDescriptorProto_MessageType: + if prevField != fieldnum.FileDescriptorProto_MessageType { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case fieldnum.FileDescriptorProto_Extension: + if prevField != fieldnum.FileDescriptorProto_Extension { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case fieldnum.FileDescriptorProto_Service: + if prevField != fieldnum.FileDescriptorProto_Service { + if numServices > 0 { + panic("non-contiguous repeated field") + } + posServices = len(b0) - len(b) - n - m + } + numServices++ + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // If syntax is missing, it is assumed to be proto2. + if fd.L1.Syntax == 0 { + fd.L1.Syntax = pref.Proto2 + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + fd.L1.Enums.List = fd.allocEnums(numEnums) + } + if numMessages > 0 { + fd.L1.Messages.List = fd.allocMessages(numMessages) + } + if numExtensions > 0 { + fd.L1.Extensions.List = fd.allocExtensions(numExtensions) + } + if numServices > 0 { + fd.L1.Services.List = fd.allocServices(numServices) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range fd.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Enums.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range fd.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Messages.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range fd.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Extensions.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } + if numServices > 0 { + b := b0[posServices:] + for i := range fd.L1.Services.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + fd.L1.Services.List[i].unmarshalSeed(v, sb, fd, fd, i) + b = b[n+m:] + } + } +} + +func (ed *Enum) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + ed.L0.ParentFile = pf + ed.L0.Parent = pd + ed.L0.Index = i + + var numValues int + for b := b; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.EnumDescriptorProto_Name: + ed.L0.FullName = appendFullName(sb, pd.FullName(), v) + case fieldnum.EnumDescriptorProto_Value: + numValues++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + + // Only construct enum value descriptors for top-level enums since + // they are needed for registration. + if pd != pf { + return + } + ed.L1.eagerValues = true + ed.L2 = new(EnumL2) + ed.L2.Values.List = make([]EnumValue, numValues) + for i := 0; len(b) > 0; { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.EnumDescriptorProto_Value: + ed.L2.Values.List[i].unmarshalFull(v, sb, pf, ed, i) + i++ + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (md *Message) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var prevField pref.FieldNumber + var numEnums, numMessages, numExtensions int + var posEnums, posMessages, posExtensions int + b0 := b + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.DescriptorProto_Name: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case fieldnum.DescriptorProto_EnumType: + if prevField != fieldnum.DescriptorProto_EnumType { + if numEnums > 0 { + panic("non-contiguous repeated field") + } + posEnums = len(b0) - len(b) - n - m + } + numEnums++ + case fieldnum.DescriptorProto_NestedType: + if prevField != fieldnum.DescriptorProto_NestedType { + if numMessages > 0 { + panic("non-contiguous repeated field") + } + posMessages = len(b0) - len(b) - n - m + } + numMessages++ + case fieldnum.DescriptorProto_Extension: + if prevField != fieldnum.DescriptorProto_Extension { + if numExtensions > 0 { + panic("non-contiguous repeated field") + } + posExtensions = len(b0) - len(b) - n - m + } + numExtensions++ + case fieldnum.DescriptorProto_Options: + md.unmarshalSeedOptions(v) + } + prevField = num + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + prevField = -1 // ignore known field numbers of unknown wire type + } + } + + // Must allocate all declarations before parsing each descriptor type + // to ensure we handled all descriptors in "flattened ordering". + if numEnums > 0 { + md.L1.Enums.List = pf.allocEnums(numEnums) + } + if numMessages > 0 { + md.L1.Messages.List = pf.allocMessages(numMessages) + } + if numExtensions > 0 { + md.L1.Extensions.List = pf.allocExtensions(numExtensions) + } + + if numEnums > 0 { + b := b0[posEnums:] + for i := range md.L1.Enums.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Enums.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numMessages > 0 { + b := b0[posMessages:] + for i := range md.L1.Messages.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Messages.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } + if numExtensions > 0 { + b := b0[posExtensions:] + for i := range md.L1.Extensions.List { + _, n := protowire.ConsumeVarint(b) + v, m := protowire.ConsumeBytes(b[n:]) + md.L1.Extensions.List[i].unmarshalSeed(v, sb, pf, md, i) + b = b[n+m:] + } + } +} + +func (md *Message) unmarshalSeedOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.MessageOptions_MapEntry: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case fieldnum.MessageOptions_MessageSetWireFormat: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (xd *Extension) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + xd.L0.ParentFile = pf + xd.L0.Parent = pd + xd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_Number: + xd.L1.Number = pref.FieldNumber(v) + case fieldnum.FieldDescriptorProto_Label: + xd.L1.Cardinality = pref.Cardinality(v) + case fieldnum.FieldDescriptorProto_Type: + xd.L1.Kind = pref.Kind(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_Name: + xd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case fieldnum.FieldDescriptorProto_Extendee: + xd.L1.Extendee = PlaceholderMessage(makeFullName(sb, v)) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalSeed(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + sd.L0.ParentFile = pf + sd.L0.Parent = pd + sd.L0.Index = i + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.ServiceDescriptorProto_Name: + sd.L0.FullName = appendFullName(sb, pd.FullName(), v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +var nameBuilderPool = sync.Pool{ + New: func() interface{} { return new(strs.Builder) }, +} + +func getBuilder() *strs.Builder { + return nameBuilderPool.Get().(*strs.Builder) +} +func putBuilder(b *strs.Builder) { + nameBuilderPool.Put(b) +} + +// makeFullName converts b to a protoreflect.FullName, +// where b must start with a leading dot. +func makeFullName(sb *strs.Builder, b []byte) pref.FullName { + if len(b) == 0 || b[0] != '.' { + panic("name reference must be fully qualified") + } + return pref.FullName(sb.MakeString(b[1:])) +} + +func appendFullName(sb *strs.Builder, prefix pref.FullName, suffix []byte) pref.FullName { + return sb.AppendFullName(prefix, pref.Name(strs.UnsafeString(suffix))) +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go new file mode 100644 index 0000000..bc21594 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -0,0 +1,704 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/fieldnum" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func (fd *File) lazyRawInit() { + fd.unmarshalFull(fd.builder.RawDescriptor) + fd.resolveMessages() + fd.resolveExtensions() + fd.resolveServices() +} + +func (file *File) resolveMessages() { + var depIdx int32 + for i := range file.allMessages { + md := &file.allMessages[i] + + // Resolve message field dependencies. + for j := range md.L2.Fields.List { + fd := &md.L2.Fields.List[j] + + // Weak fields are resolved upon actual use. + if fd.L1.IsWeak { + continue + } + + // Resolve message field dependency. + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = file.resolveEnumDependency(fd.L1.Enum, listFieldDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = file.resolveMessageDependency(fd.L1.Message, listFieldDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := fd.L1.Default.val; v.IsValid() { + fd.L1.Default = unmarshalDefault(v.Bytes(), fd.L1.Kind, file, fd.L1.Enum) + } + } + } +} + +func (file *File) resolveExtensions() { + var depIdx int32 + for i := range file.allExtensions { + xd := &file.allExtensions[i] + + // Resolve extension field dependency. + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = file.resolveEnumDependency(xd.L2.Enum, listExtDeps, depIdx) + depIdx++ + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = file.resolveMessageDependency(xd.L2.Message, listExtDeps, depIdx) + depIdx++ + } + + // Default is resolved here since it depends on Enum being resolved. + if v := xd.L2.Default.val; v.IsValid() { + xd.L2.Default = unmarshalDefault(v.Bytes(), xd.L1.Kind, file, xd.L2.Enum) + } + } +} + +func (file *File) resolveServices() { + var depIdx int32 + for i := range file.allServices { + sd := &file.allServices[i] + + // Resolve method dependencies. + for j := range sd.L2.Methods.List { + md := &sd.L2.Methods.List[j] + md.L1.Input = file.resolveMessageDependency(md.L1.Input, listMethInDeps, depIdx) + md.L1.Output = file.resolveMessageDependency(md.L1.Output, listMethOutDeps, depIdx) + depIdx++ + } + } +} + +func (file *File) resolveEnumDependency(ed pref.EnumDescriptor, i, j int32) pref.EnumDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if ed2 := r.FindEnumByIndex(i, j, file.allEnums, file.allMessages); ed2 != nil { + return ed2 + } + } + for i := range file.allEnums { + if ed2 := &file.allEnums[i]; ed2.L0.FullName == ed.FullName() { + return ed2 + } + } + if d, _ := r.FindDescriptorByName(ed.FullName()); d != nil { + return d.(pref.EnumDescriptor) + } + return ed +} + +func (file *File) resolveMessageDependency(md pref.MessageDescriptor, i, j int32) pref.MessageDescriptor { + r := file.builder.FileRegistry + if r, ok := r.(resolverByIndex); ok { + if md2 := r.FindMessageByIndex(i, j, file.allEnums, file.allMessages); md2 != nil { + return md2 + } + } + for i := range file.allMessages { + if md2 := &file.allMessages[i]; md2.L0.FullName == md.FullName() { + return md2 + } + } + if d, _ := r.FindDescriptorByName(md.FullName()); d != nil { + return d.(pref.MessageDescriptor) + } + return md +} + +func (fd *File) unmarshalFull(b []byte) { + sb := getBuilder() + defer putBuilder(sb) + + var enumIdx, messageIdx, extensionIdx, serviceIdx int + var rawOptions []byte + fd.L2 = new(FileL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FileDescriptorProto_PublicDependency: + fd.L2.Imports[v].IsPublic = true + case fieldnum.FileDescriptorProto_WeakDependency: + fd.L2.Imports[v].IsWeak = true + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.FileDescriptorProto_Dependency: + path := sb.MakeString(v) + imp, _ := fd.builder.FileRegistry.FindFileByPath(path) + if imp == nil { + imp = PlaceholderFile(path) + } + fd.L2.Imports = append(fd.L2.Imports, pref.FileImport{FileDescriptor: imp}) + case fieldnum.FileDescriptorProto_EnumType: + fd.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case fieldnum.FileDescriptorProto_MessageType: + fd.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case fieldnum.FileDescriptorProto_Extension: + fd.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case fieldnum.FileDescriptorProto_Service: + fd.L1.Services.List[serviceIdx].unmarshalFull(v, sb) + serviceIdx++ + case fieldnum.FileDescriptorProto_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + fd.L2.Options = fd.builder.optionsUnmarshaler(&descopts.File, rawOptions) +} + +func (ed *Enum) unmarshalFull(b []byte, sb *strs.Builder) { + var rawValues [][]byte + var rawOptions []byte + if !ed.L1.eagerValues { + ed.L2 = new(EnumL2) + } + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.EnumDescriptorProto_Value: + rawValues = append(rawValues, v) + case fieldnum.EnumDescriptorProto_ReservedName: + ed.L2.ReservedNames.List = append(ed.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case fieldnum.EnumDescriptorProto_ReservedRange: + ed.L2.ReservedRanges.List = append(ed.L2.ReservedRanges.List, unmarshalEnumReservedRange(v)) + case fieldnum.EnumDescriptorProto_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if !ed.L1.eagerValues && len(rawValues) > 0 { + ed.L2.Values.List = make([]EnumValue, len(rawValues)) + for i, b := range rawValues { + ed.L2.Values.List[i].unmarshalFull(b, sb, ed.L0.ParentFile, ed, i) + } + } + ed.L2.Options = ed.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Enum, rawOptions) +} + +func unmarshalEnumReservedRange(b []byte) (r [2]pref.EnumNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.EnumDescriptorProto_EnumReservedRange_Start: + r[0] = pref.EnumNumber(v) + case fieldnum.EnumDescriptorProto_EnumReservedRange_End: + r[1] = pref.EnumNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func (vd *EnumValue) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + vd.L0.ParentFile = pf + vd.L0.Parent = pd + vd.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.EnumValueDescriptorProto_Number: + vd.L1.Number = pref.EnumNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.EnumValueDescriptorProto_Name: + // NOTE: Enum values are in the same scope as the enum parent. + vd.L0.FullName = appendFullName(sb, pd.Parent().FullName(), v) + case fieldnum.EnumValueDescriptorProto_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + vd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.EnumValue, rawOptions) +} + +func (md *Message) unmarshalFull(b []byte, sb *strs.Builder) { + var rawFields, rawOneofs [][]byte + var enumIdx, messageIdx, extensionIdx int + var rawOptions []byte + md.L2 = new(MessageL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.DescriptorProto_Field: + rawFields = append(rawFields, v) + case fieldnum.DescriptorProto_OneofDecl: + rawOneofs = append(rawOneofs, v) + case fieldnum.DescriptorProto_ReservedName: + md.L2.ReservedNames.List = append(md.L2.ReservedNames.List, pref.Name(sb.MakeString(v))) + case fieldnum.DescriptorProto_ReservedRange: + md.L2.ReservedRanges.List = append(md.L2.ReservedRanges.List, unmarshalMessageReservedRange(v)) + case fieldnum.DescriptorProto_ExtensionRange: + r, rawOptions := unmarshalMessageExtensionRange(v) + opts := md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.ExtensionRange, rawOptions) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, r) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, opts) + case fieldnum.DescriptorProto_EnumType: + md.L1.Enums.List[enumIdx].unmarshalFull(v, sb) + enumIdx++ + case fieldnum.DescriptorProto_NestedType: + md.L1.Messages.List[messageIdx].unmarshalFull(v, sb) + messageIdx++ + case fieldnum.DescriptorProto_Extension: + md.L1.Extensions.List[extensionIdx].unmarshalFull(v, sb) + extensionIdx++ + case fieldnum.DescriptorProto_Options: + md.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawFields) > 0 || len(rawOneofs) > 0 { + md.L2.Fields.List = make([]Field, len(rawFields)) + md.L2.Oneofs.List = make([]Oneof, len(rawOneofs)) + for i, b := range rawFields { + fd := &md.L2.Fields.List[i] + fd.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + if fd.L1.Cardinality == pref.Required { + md.L2.RequiredNumbers.List = append(md.L2.RequiredNumbers.List, fd.L1.Number) + } + } + for i, b := range rawOneofs { + od := &md.L2.Oneofs.List[i] + od.unmarshalFull(b, sb, md.L0.ParentFile, md, i) + } + } + md.L2.Options = md.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Message, rawOptions) +} + +func (md *Message) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.MessageOptions_MapEntry: + md.L1.IsMapEntry = protowire.DecodeBool(v) + case fieldnum.MessageOptions_MessageSetWireFormat: + md.L1.IsMessageSet = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func unmarshalMessageReservedRange(b []byte) (r [2]pref.FieldNumber) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.DescriptorProto_ReservedRange_Start: + r[0] = pref.FieldNumber(v) + case fieldnum.DescriptorProto_ReservedRange_End: + r[1] = pref.FieldNumber(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r +} + +func unmarshalMessageExtensionRange(b []byte) (r [2]pref.FieldNumber, rawOptions []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.DescriptorProto_ExtensionRange_Start: + r[0] = pref.FieldNumber(v) + case fieldnum.DescriptorProto_ExtensionRange_End: + r[1] = pref.FieldNumber(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.DescriptorProto_ExtensionRange_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + return r, rawOptions +} + +func (fd *Field) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + fd.L0.ParentFile = pf + fd.L0.Parent = pd + fd.L0.Index = i + + var rawTypeName []byte + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_Number: + fd.L1.Number = pref.FieldNumber(v) + case fieldnum.FieldDescriptorProto_Label: + fd.L1.Cardinality = pref.Cardinality(v) + case fieldnum.FieldDescriptorProto_Type: + fd.L1.Kind = pref.Kind(v) + case fieldnum.FieldDescriptorProto_OneofIndex: + // In Message.unmarshalFull, we allocate slices for both + // the field and oneof descriptors before unmarshaling either + // of them. This ensures pointers to slice elements are stable. + od := &pd.(*Message).L2.Oneofs.List[v] + od.L1.Fields.List = append(od.L1.Fields.List, fd) + if fd.L1.ContainingOneof != nil { + panic("oneof type already set") + } + fd.L1.ContainingOneof = od + case fieldnum.FieldDescriptorProto_Proto3Optional: + fd.L1.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_Name: + fd.L0.FullName = appendFullName(sb, pd.FullName(), v) + case fieldnum.FieldDescriptorProto_JsonName: + fd.L1.JSONName.Init(sb.MakeString(v)) + case fieldnum.FieldDescriptorProto_DefaultValue: + fd.L1.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveMessages + case fieldnum.FieldDescriptorProto_TypeName: + rawTypeName = v + case fieldnum.FieldDescriptorProto_Options: + fd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch fd.L1.Kind { + case pref.EnumKind: + fd.L1.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + fd.L1.Message = PlaceholderMessage(name) + } + } + fd.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (fd *Field) unmarshalOptions(b []byte) { + const FieldOptions_EnforceUTF8 = 13 + + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FieldOptions_Packed: + fd.L1.HasPacked = true + fd.L1.IsPacked = protowire.DecodeBool(v) + case fieldnum.FieldOptions_Weak: + fd.L1.IsWeak = protowire.DecodeBool(v) + case FieldOptions_EnforceUTF8: + fd.L1.HasEnforceUTF8 = true + fd.L1.EnforceUTF8 = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (od *Oneof) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + od.L0.ParentFile = pf + od.L0.Parent = pd + od.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.OneofDescriptorProto_Name: + od.L0.FullName = appendFullName(sb, pd.FullName(), v) + case fieldnum.OneofDescriptorProto_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + od.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Oneof, rawOptions) +} + +func (xd *Extension) unmarshalFull(b []byte, sb *strs.Builder) { + var rawTypeName []byte + var rawOptions []byte + xd.L2 = new(ExtensionL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_Proto3Optional: + xd.L2.IsProto3Optional = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.FieldDescriptorProto_JsonName: + xd.L2.JSONName.Init(sb.MakeString(v)) + case fieldnum.FieldDescriptorProto_DefaultValue: + xd.L2.Default.val = pref.ValueOfBytes(v) // temporarily store as bytes; later resolved in resolveExtensions + case fieldnum.FieldDescriptorProto_TypeName: + rawTypeName = v + case fieldnum.FieldDescriptorProto_Options: + xd.unmarshalOptions(v) + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if rawTypeName != nil { + name := makeFullName(sb, rawTypeName) + switch xd.L1.Kind { + case pref.EnumKind: + xd.L2.Enum = PlaceholderEnum(name) + case pref.MessageKind, pref.GroupKind: + xd.L2.Message = PlaceholderMessage(name) + } + } + xd.L2.Options = xd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Field, rawOptions) +} + +func (xd *Extension) unmarshalOptions(b []byte) { + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.FieldOptions_Packed: + xd.L2.IsPacked = protowire.DecodeBool(v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } +} + +func (sd *Service) unmarshalFull(b []byte, sb *strs.Builder) { + var rawMethods [][]byte + var rawOptions []byte + sd.L2 = new(ServiceL2) + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.ServiceDescriptorProto_Method: + rawMethods = append(rawMethods, v) + case fieldnum.ServiceDescriptorProto_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + if len(rawMethods) > 0 { + sd.L2.Methods.List = make([]Method, len(rawMethods)) + for i, b := range rawMethods { + sd.L2.Methods.List[i].unmarshalFull(b, sb, sd.L0.ParentFile, sd, i) + } + } + sd.L2.Options = sd.L0.ParentFile.builder.optionsUnmarshaler(&descopts.Service, rawOptions) +} + +func (md *Method) unmarshalFull(b []byte, sb *strs.Builder, pf *File, pd pref.Descriptor, i int) { + md.L0.ParentFile = pf + md.L0.Parent = pd + md.L0.Index = i + + var rawOptions []byte + for len(b) > 0 { + num, typ, n := protowire.ConsumeTag(b) + b = b[n:] + switch typ { + case protowire.VarintType: + v, m := protowire.ConsumeVarint(b) + b = b[m:] + switch num { + case fieldnum.MethodDescriptorProto_ClientStreaming: + md.L1.IsStreamingClient = protowire.DecodeBool(v) + case fieldnum.MethodDescriptorProto_ServerStreaming: + md.L1.IsStreamingServer = protowire.DecodeBool(v) + } + case protowire.BytesType: + v, m := protowire.ConsumeBytes(b) + b = b[m:] + switch num { + case fieldnum.MethodDescriptorProto_Name: + md.L0.FullName = appendFullName(sb, pd.FullName(), v) + case fieldnum.MethodDescriptorProto_InputType: + md.L1.Input = PlaceholderMessage(makeFullName(sb, v)) + case fieldnum.MethodDescriptorProto_OutputType: + md.L1.Output = PlaceholderMessage(makeFullName(sb, v)) + case fieldnum.MethodDescriptorProto_Options: + rawOptions = appendOptions(rawOptions, v) + } + default: + m := protowire.ConsumeFieldValue(num, typ, b) + b = b[m:] + } + } + md.L1.Options = pf.builder.optionsUnmarshaler(&descopts.Method, rawOptions) +} + +// appendOptions appends src to dst, where the returned slice is never nil. +// This is necessary to distinguish between empty and unpopulated options. +func appendOptions(dst, src []byte) []byte { + if dst == nil { + dst = []byte{} + } + return append(dst, src...) +} + +// optionsUnmarshaler constructs a lazy unmarshal function for an options message. +// +// The type of message to unmarshal to is passed as a pointer since the +// vars in descopts may not yet be populated at the time this function is called. +func (db *Builder) optionsUnmarshaler(p *pref.ProtoMessage, b []byte) func() pref.ProtoMessage { + if b == nil { + return nil + } + var opts pref.ProtoMessage + var once sync.Once + return func() pref.ProtoMessage { + once.Do(func() { + if *p == nil { + panic("Descriptor.Options called without importing the descriptor package") + } + opts = reflect.New(reflect.TypeOf(*p).Elem()).Interface().(pref.ProtoMessage) + if err := (proto.UnmarshalOptions{ + AllowPartial: true, + Resolver: db.TypeResolver, + }).Unmarshal(b, opts); err != nil { + panic(err) + } + }) + return opts + } +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go new file mode 100644 index 0000000..1b7089b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list.go @@ -0,0 +1,286 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "fmt" + "math" + "sort" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type FileImports []pref.FileImport + +func (p *FileImports) Len() int { return len(*p) } +func (p *FileImports) Get(i int) pref.FileImport { return (*p)[i] } +func (p *FileImports) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FileImports) ProtoInternal(pragma.DoNotImplement) {} + +type Names struct { + List []pref.Name + once sync.Once + has map[pref.Name]int // protected by once +} + +func (p *Names) Len() int { return len(p.List) } +func (p *Names) Get(i int) pref.Name { return p.List[i] } +func (p *Names) Has(s pref.Name) bool { return p.lazyInit().has[s] > 0 } +func (p *Names) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *Names) ProtoInternal(pragma.DoNotImplement) {} +func (p *Names) lazyInit() *Names { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.Name]int, len(p.List)) + for _, s := range p.List { + p.has[s] = p.has[s] + 1 + } + } + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *Names) CheckValid() error { + for s, n := range p.lazyInit().has { + switch { + case n > 1: + return errors.New("duplicate name: %q", s) + case false && !s.IsValid(): + // NOTE: The C++ implementation does not validate the identifier. + // See https://github.com/protocolbuffers/protobuf/issues/6335. + return errors.New("invalid name: %q", s) + } + } + return nil +} + +type EnumRanges struct { + List [][2]pref.EnumNumber // start inclusive; end inclusive + once sync.Once + sorted [][2]pref.EnumNumber // protected by once +} + +func (p *EnumRanges) Len() int { return len(p.List) } +func (p *EnumRanges) Get(i int) [2]pref.EnumNumber { return p.List[i] } +func (p *EnumRanges) Has(n pref.EnumNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := enumRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *EnumRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *EnumRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumRanges) lazyInit() *EnumRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of names with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *EnumRanges) CheckValid() error { + var rp enumRange + for i, r := range p.lazyInit().sorted { + r := enumRange(r) + switch { + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +type enumRange [2]protoreflect.EnumNumber + +func (r enumRange) Start() protoreflect.EnumNumber { return r[0] } // inclusive +func (r enumRange) End() protoreflect.EnumNumber { return r[1] } // inclusive +func (r enumRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldRanges struct { + List [][2]pref.FieldNumber // start inclusive; end exclusive + once sync.Once + sorted [][2]pref.FieldNumber // protected by once +} + +func (p *FieldRanges) Len() int { return len(p.List) } +func (p *FieldRanges) Get(i int) [2]pref.FieldNumber { return p.List[i] } +func (p *FieldRanges) Has(n pref.FieldNumber) bool { + for ls := p.lazyInit().sorted; len(ls) > 0; { + i := len(ls) / 2 + switch r := fieldRange(ls[i]); { + case n < r.Start(): + ls = ls[:i] // search lower + case n > r.End(): + ls = ls[i+1:] // search upper + default: + return true + } + } + return false +} +func (p *FieldRanges) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldRanges) ProtoInternal(pragma.DoNotImplement) {} +func (p *FieldRanges) lazyInit() *FieldRanges { + p.once.Do(func() { + p.sorted = append(p.sorted, p.List...) + sort.Slice(p.sorted, func(i, j int) bool { + return p.sorted[i][0] < p.sorted[j][0] + }) + }) + return p +} + +// CheckValid reports any errors with the set of ranges with an error message +// that completes the sentence: "ranges is invalid because it has ..." +func (p *FieldRanges) CheckValid(isMessageSet bool) error { + var rp fieldRange + for i, r := range p.lazyInit().sorted { + r := fieldRange(r) + switch { + case !isValidFieldNumber(r.Start(), isMessageSet): + return errors.New("invalid field number: %d", r.Start()) + case !isValidFieldNumber(r.End(), isMessageSet): + return errors.New("invalid field number: %d", r.End()) + case !(r.Start() <= r.End()): + return errors.New("invalid range: %v", r) + case !(rp.End() < r.Start()) && i > 0: + return errors.New("overlapping ranges: %v with %v", rp, r) + } + rp = r + } + return nil +} + +// isValidFieldNumber reports whether the field number is valid. +// Unlike the FieldNumber.IsValid method, it allows ranges that cover the +// reserved number range. +func isValidFieldNumber(n protoreflect.FieldNumber, isMessageSet bool) bool { + if isMessageSet { + return protowire.MinValidNumber <= n && n <= math.MaxInt32 + } + return protowire.MinValidNumber <= n && n <= protowire.MaxValidNumber +} + +// CheckOverlap reports an error if p and q overlap. +func (p *FieldRanges) CheckOverlap(q *FieldRanges) error { + rps := p.lazyInit().sorted + rqs := q.lazyInit().sorted + for pi, qi := 0, 0; pi < len(rps) && qi < len(rqs); { + rp := fieldRange(rps[pi]) + rq := fieldRange(rqs[qi]) + if !(rp.End() < rq.Start() || rq.End() < rp.Start()) { + return errors.New("overlapping ranges: %v with %v", rp, rq) + } + if rp.Start() < rq.Start() { + pi++ + } else { + qi++ + } + } + return nil +} + +type fieldRange [2]protoreflect.FieldNumber + +func (r fieldRange) Start() protoreflect.FieldNumber { return r[0] } // inclusive +func (r fieldRange) End() protoreflect.FieldNumber { return r[1] - 1 } // inclusive +func (r fieldRange) String() string { + if r.Start() == r.End() { + return fmt.Sprintf("%d", r.Start()) + } + return fmt.Sprintf("%d to %d", r.Start(), r.End()) +} + +type FieldNumbers struct { + List []pref.FieldNumber + once sync.Once + has map[pref.FieldNumber]struct{} // protected by once +} + +func (p *FieldNumbers) Len() int { return len(p.List) } +func (p *FieldNumbers) Get(i int) pref.FieldNumber { return p.List[i] } +func (p *FieldNumbers) Has(n pref.FieldNumber) bool { + p.once.Do(func() { + if len(p.List) > 0 { + p.has = make(map[pref.FieldNumber]struct{}, len(p.List)) + for _, n := range p.List { + p.has[n] = struct{}{} + } + } + }) + _, ok := p.has[n] + return ok +} +func (p *FieldNumbers) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *FieldNumbers) ProtoInternal(pragma.DoNotImplement) {} + +type OneofFields struct { + List []pref.FieldDescriptor + once sync.Once + byName map[pref.Name]pref.FieldDescriptor // protected by once + byJSON map[string]pref.FieldDescriptor // protected by once + byNum map[pref.FieldNumber]pref.FieldDescriptor // protected by once +} + +func (p *OneofFields) Len() int { return len(p.List) } +func (p *OneofFields) Get(i int) pref.FieldDescriptor { return p.List[i] } +func (p *OneofFields) ByName(s pref.Name) pref.FieldDescriptor { return p.lazyInit().byName[s] } +func (p *OneofFields) ByJSONName(s string) pref.FieldDescriptor { return p.lazyInit().byJSON[s] } +func (p *OneofFields) ByNumber(n pref.FieldNumber) pref.FieldDescriptor { return p.lazyInit().byNum[n] } +func (p *OneofFields) Format(s fmt.State, r rune) { descfmt.FormatList(s, r, p) } +func (p *OneofFields) ProtoInternal(pragma.DoNotImplement) {} + +func (p *OneofFields) lazyInit() *OneofFields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[pref.Name]pref.FieldDescriptor, len(p.List)) + p.byJSON = make(map[string]pref.FieldDescriptor, len(p.List)) + p.byNum = make(map[pref.FieldNumber]pref.FieldDescriptor, len(p.List)) + for _, f := range p.List { + // Field names and numbers are guaranteed to be unique. + p.byName[f.Name()] = f + p.byJSON[f.JSONName()] = f + p.byNum[f.Number()] = f + } + } + }) + return p +} + +type SourceLocations struct { + List []pref.SourceLocation +} + +func (p *SourceLocations) Len() int { return len(p.List) } +func (p *SourceLocations) Get(i int) pref.SourceLocation { return p.List[i] } +func (p *SourceLocations) ProtoInternal(pragma.DoNotImplement) {} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go new file mode 100644 index 0000000..6a8825e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_list_gen.go @@ -0,0 +1,345 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package filedesc + +import ( + "fmt" + "sync" + + "google.golang.org/protobuf/internal/descfmt" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +type Enums struct { + List []Enum + once sync.Once + byName map[protoreflect.Name]*Enum // protected by once +} + +func (p *Enums) Len() int { + return len(p.List) +} +func (p *Enums) Get(i int) protoreflect.EnumDescriptor { + return &p.List[i] +} +func (p *Enums) ByName(s protoreflect.Name) protoreflect.EnumDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Enums) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Enums) ProtoInternal(pragma.DoNotImplement) {} +func (p *Enums) lazyInit() *Enums { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Enum, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type EnumValues struct { + List []EnumValue + once sync.Once + byName map[protoreflect.Name]*EnumValue // protected by once + byNum map[protoreflect.EnumNumber]*EnumValue // protected by once +} + +func (p *EnumValues) Len() int { + return len(p.List) +} +func (p *EnumValues) Get(i int) protoreflect.EnumValueDescriptor { + return &p.List[i] +} +func (p *EnumValues) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *EnumValues) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *EnumValues) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *EnumValues) ProtoInternal(pragma.DoNotImplement) {} +func (p *EnumValues) lazyInit() *EnumValues { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*EnumValue, len(p.List)) + p.byNum = make(map[protoreflect.EnumNumber]*EnumValue, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Messages struct { + List []Message + once sync.Once + byName map[protoreflect.Name]*Message // protected by once +} + +func (p *Messages) Len() int { + return len(p.List) +} +func (p *Messages) Get(i int) protoreflect.MessageDescriptor { + return &p.List[i] +} +func (p *Messages) ByName(s protoreflect.Name) protoreflect.MessageDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Messages) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Messages) ProtoInternal(pragma.DoNotImplement) {} +func (p *Messages) lazyInit() *Messages { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Message, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Fields struct { + List []Field + once sync.Once + byName map[protoreflect.Name]*Field // protected by once + byJSON map[string]*Field // protected by once + byNum map[protoreflect.FieldNumber]*Field // protected by once +} + +func (p *Fields) Len() int { + return len(p.List) +} +func (p *Fields) Get(i int) protoreflect.FieldDescriptor { + return &p.List[i] +} +func (p *Fields) ByName(s protoreflect.Name) protoreflect.FieldDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByJSONName(s string) protoreflect.FieldDescriptor { + if d := p.lazyInit().byJSON[s]; d != nil { + return d + } + return nil +} +func (p *Fields) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor { + if d := p.lazyInit().byNum[n]; d != nil { + return d + } + return nil +} +func (p *Fields) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Fields) ProtoInternal(pragma.DoNotImplement) {} +func (p *Fields) lazyInit() *Fields { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Field, len(p.List)) + p.byJSON = make(map[string]*Field, len(p.List)) + p.byNum = make(map[protoreflect.FieldNumber]*Field, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + if _, ok := p.byJSON[d.JSONName()]; !ok { + p.byJSON[d.JSONName()] = d + } + if _, ok := p.byNum[d.Number()]; !ok { + p.byNum[d.Number()] = d + } + } + } + }) + return p +} + +type Oneofs struct { + List []Oneof + once sync.Once + byName map[protoreflect.Name]*Oneof // protected by once +} + +func (p *Oneofs) Len() int { + return len(p.List) +} +func (p *Oneofs) Get(i int) protoreflect.OneofDescriptor { + return &p.List[i] +} +func (p *Oneofs) ByName(s protoreflect.Name) protoreflect.OneofDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Oneofs) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Oneofs) ProtoInternal(pragma.DoNotImplement) {} +func (p *Oneofs) lazyInit() *Oneofs { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Oneof, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Extensions struct { + List []Extension + once sync.Once + byName map[protoreflect.Name]*Extension // protected by once +} + +func (p *Extensions) Len() int { + return len(p.List) +} +func (p *Extensions) Get(i int) protoreflect.ExtensionDescriptor { + return &p.List[i] +} +func (p *Extensions) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Extensions) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Extensions) ProtoInternal(pragma.DoNotImplement) {} +func (p *Extensions) lazyInit() *Extensions { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Extension, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Services struct { + List []Service + once sync.Once + byName map[protoreflect.Name]*Service // protected by once +} + +func (p *Services) Len() int { + return len(p.List) +} +func (p *Services) Get(i int) protoreflect.ServiceDescriptor { + return &p.List[i] +} +func (p *Services) ByName(s protoreflect.Name) protoreflect.ServiceDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Services) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Services) ProtoInternal(pragma.DoNotImplement) {} +func (p *Services) lazyInit() *Services { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Service, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} + +type Methods struct { + List []Method + once sync.Once + byName map[protoreflect.Name]*Method // protected by once +} + +func (p *Methods) Len() int { + return len(p.List) +} +func (p *Methods) Get(i int) protoreflect.MethodDescriptor { + return &p.List[i] +} +func (p *Methods) ByName(s protoreflect.Name) protoreflect.MethodDescriptor { + if d := p.lazyInit().byName[s]; d != nil { + return d + } + return nil +} +func (p *Methods) Format(s fmt.State, r rune) { + descfmt.FormatList(s, r, p) +} +func (p *Methods) ProtoInternal(pragma.DoNotImplement) {} +func (p *Methods) lazyInit() *Methods { + p.once.Do(func() { + if len(p.List) > 0 { + p.byName = make(map[protoreflect.Name]*Method, len(p.List)) + for i := range p.List { + d := &p.List[i] + if _, ok := p.byName[d.Name()]; !ok { + p.byName[d.Name()] = d + } + } + } + }) + return p +} diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go new file mode 100644 index 0000000..dbf2c60 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filedesc/placeholder.go @@ -0,0 +1,107 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package filedesc + +import ( + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +var ( + emptyNames = new(Names) + emptyEnumRanges = new(EnumRanges) + emptyFieldRanges = new(FieldRanges) + emptyFieldNumbers = new(FieldNumbers) + emptySourceLocations = new(SourceLocations) + + emptyFiles = new(FileImports) + emptyMessages = new(Messages) + emptyFields = new(Fields) + emptyOneofs = new(Oneofs) + emptyEnums = new(Enums) + emptyEnumValues = new(EnumValues) + emptyExtensions = new(Extensions) + emptyServices = new(Services) +) + +// PlaceholderFile is a placeholder, representing only the file path. +type PlaceholderFile string + +func (f PlaceholderFile) ParentFile() pref.FileDescriptor { return f } +func (f PlaceholderFile) Parent() pref.Descriptor { return nil } +func (f PlaceholderFile) Index() int { return 0 } +func (f PlaceholderFile) Syntax() pref.Syntax { return 0 } +func (f PlaceholderFile) Name() pref.Name { return "" } +func (f PlaceholderFile) FullName() pref.FullName { return "" } +func (f PlaceholderFile) IsPlaceholder() bool { return true } +func (f PlaceholderFile) Options() pref.ProtoMessage { return descopts.File } +func (f PlaceholderFile) Path() string { return string(f) } +func (f PlaceholderFile) Package() pref.FullName { return "" } +func (f PlaceholderFile) Imports() pref.FileImports { return emptyFiles } +func (f PlaceholderFile) Messages() pref.MessageDescriptors { return emptyMessages } +func (f PlaceholderFile) Enums() pref.EnumDescriptors { return emptyEnums } +func (f PlaceholderFile) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (f PlaceholderFile) Services() pref.ServiceDescriptors { return emptyServices } +func (f PlaceholderFile) SourceLocations() pref.SourceLocations { return emptySourceLocations } +func (f PlaceholderFile) ProtoType(pref.FileDescriptor) { return } +func (f PlaceholderFile) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnum is a placeholder, representing only the full name. +type PlaceholderEnum pref.FullName + +func (e PlaceholderEnum) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnum) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnum) Index() int { return 0 } +func (e PlaceholderEnum) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnum) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnum) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnum) IsPlaceholder() bool { return true } +func (e PlaceholderEnum) Options() pref.ProtoMessage { return descopts.Enum } +func (e PlaceholderEnum) Values() pref.EnumValueDescriptors { return emptyEnumValues } +func (e PlaceholderEnum) ReservedNames() pref.Names { return emptyNames } +func (e PlaceholderEnum) ReservedRanges() pref.EnumRanges { return emptyEnumRanges } +func (e PlaceholderEnum) ProtoType(pref.EnumDescriptor) { return } +func (e PlaceholderEnum) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderEnumValue is a placeholder, representing only the full name. +type PlaceholderEnumValue pref.FullName + +func (e PlaceholderEnumValue) ParentFile() pref.FileDescriptor { return nil } +func (e PlaceholderEnumValue) Parent() pref.Descriptor { return nil } +func (e PlaceholderEnumValue) Index() int { return 0 } +func (e PlaceholderEnumValue) Syntax() pref.Syntax { return 0 } +func (e PlaceholderEnumValue) Name() pref.Name { return pref.FullName(e).Name() } +func (e PlaceholderEnumValue) FullName() pref.FullName { return pref.FullName(e) } +func (e PlaceholderEnumValue) IsPlaceholder() bool { return true } +func (e PlaceholderEnumValue) Options() pref.ProtoMessage { return descopts.EnumValue } +func (e PlaceholderEnumValue) Number() pref.EnumNumber { return 0 } +func (e PlaceholderEnumValue) ProtoType(pref.EnumValueDescriptor) { return } +func (e PlaceholderEnumValue) ProtoInternal(pragma.DoNotImplement) { return } + +// PlaceholderMessage is a placeholder, representing only the full name. +type PlaceholderMessage pref.FullName + +func (m PlaceholderMessage) ParentFile() pref.FileDescriptor { return nil } +func (m PlaceholderMessage) Parent() pref.Descriptor { return nil } +func (m PlaceholderMessage) Index() int { return 0 } +func (m PlaceholderMessage) Syntax() pref.Syntax { return 0 } +func (m PlaceholderMessage) Name() pref.Name { return pref.FullName(m).Name() } +func (m PlaceholderMessage) FullName() pref.FullName { return pref.FullName(m) } +func (m PlaceholderMessage) IsPlaceholder() bool { return true } +func (m PlaceholderMessage) Options() pref.ProtoMessage { return descopts.Message } +func (m PlaceholderMessage) IsMapEntry() bool { return false } +func (m PlaceholderMessage) Fields() pref.FieldDescriptors { return emptyFields } +func (m PlaceholderMessage) Oneofs() pref.OneofDescriptors { return emptyOneofs } +func (m PlaceholderMessage) ReservedNames() pref.Names { return emptyNames } +func (m PlaceholderMessage) ReservedRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) RequiredNumbers() pref.FieldNumbers { return emptyFieldNumbers } +func (m PlaceholderMessage) ExtensionRanges() pref.FieldRanges { return emptyFieldRanges } +func (m PlaceholderMessage) ExtensionRangeOptions(int) pref.ProtoMessage { panic("index out of range") } +func (m PlaceholderMessage) Messages() pref.MessageDescriptors { return emptyMessages } +func (m PlaceholderMessage) Enums() pref.EnumDescriptors { return emptyEnums } +func (m PlaceholderMessage) Extensions() pref.ExtensionDescriptors { return emptyExtensions } +func (m PlaceholderMessage) ProtoType(pref.MessageDescriptor) { return } +func (m PlaceholderMessage) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go new file mode 100644 index 0000000..0a0dd35 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -0,0 +1,297 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package filetype provides functionality for wrapping descriptors +// with Go type information. +package filetype + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + fdesc "google.golang.org/protobuf/internal/filedesc" + pimpl "google.golang.org/protobuf/internal/impl" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +// Builder constructs type descriptors from a raw file descriptor +// and associated Go types for each enum and message declaration. +// +// +// Flattened Ordering +// +// The protobuf type system represents declarations as a tree. Certain nodes in +// the tree require us to either associate it with a concrete Go type or to +// resolve a dependency, which is information that must be provided separately +// since it cannot be derived from the file descriptor alone. +// +// However, representing a tree as Go literals is difficult to simply do in a +// space and time efficient way. Thus, we store them as a flattened list of +// objects where the serialization order from the tree-based form is important. +// +// The "flattened ordering" is defined as a tree traversal of all enum, message, +// extension, and service declarations using the following algorithm: +// +// def VisitFileDecls(fd): +// for e in fd.Enums: yield e +// for m in fd.Messages: yield m +// for x in fd.Extensions: yield x +// for s in fd.Services: yield s +// for m in fd.Messages: yield from VisitMessageDecls(m) +// +// def VisitMessageDecls(md): +// for e in md.Enums: yield e +// for m in md.Messages: yield m +// for x in md.Extensions: yield x +// for m in md.Messages: yield from VisitMessageDecls(m) +// +// The traversal starts at the root file descriptor and yields each direct +// declaration within each node before traversing into sub-declarations +// that children themselves may have. +type Builder struct { + // File is the underlying file descriptor builder. + File fdesc.Builder + + // GoTypes is a unique set of the Go types for all declarations and + // dependencies. Each type is represented as a zero value of the Go type. + // + // Declarations are Go types generated for enums and messages directly + // declared (not publicly imported) in the proto source file. + // Messages for map entries are accounted for, but represented by nil. + // Enum declarations in "flattened ordering" come first, followed by + // message declarations in "flattened ordering". + // + // Dependencies are Go types for enums or messages referenced by + // message fields (excluding weak fields), for parent extended messages of + // extension fields, for enums or messages referenced by extension fields, + // and for input and output messages referenced by service methods. + // Dependencies must come after declarations, but the ordering of + // dependencies themselves is unspecified. + GoTypes []interface{} + + // DependencyIndexes is an ordered list of indexes into GoTypes for the + // dependencies of messages, extensions, or services. + // + // There are 5 sub-lists in "flattened ordering" concatenated back-to-back: + // 0. Message field dependencies: list of the enum or message type + // referred to by every message field. + // 1. Extension field targets: list of the extended parent message of + // every extension. + // 2. Extension field dependencies: list of the enum or message type + // referred to by every extension field. + // 3. Service method inputs: list of the input message type + // referred to by every service method. + // 4. Service method outputs: list of the output message type + // referred to by every service method. + // + // The offset into DependencyIndexes for the start of each sub-list + // is appended to the end in reverse order. + DependencyIndexes []int32 + + // EnumInfos is a list of enum infos in "flattened ordering". + EnumInfos []pimpl.EnumInfo + + // MessageInfos is a list of message infos in "flattened ordering". + // If provided, the GoType and PBType for each element is populated. + // + // Requirement: len(MessageInfos) == len(Build.Messages) + MessageInfos []pimpl.MessageInfo + + // ExtensionInfos is a list of extension infos in "flattened ordering". + // Each element is initialized and registered with the protoregistry package. + // + // Requirement: len(LegacyExtensions) == len(Build.Extensions) + ExtensionInfos []pimpl.ExtensionInfo + + // TypeRegistry is the registry to register each type descriptor. + // If nil, it uses protoregistry.GlobalTypes. + TypeRegistry interface { + RegisterMessage(pref.MessageType) error + RegisterEnum(pref.EnumType) error + RegisterExtension(pref.ExtensionType) error + } +} + +// Out is the output of the builder. +type Out struct { + File pref.FileDescriptor +} + +func (tb Builder) Build() (out Out) { + // Replace the resolver with one that resolves dependencies by index, + // which is faster and more reliable than relying on the global registry. + if tb.File.FileRegistry == nil { + tb.File.FileRegistry = preg.GlobalFiles + } + tb.File.FileRegistry = &resolverByIndex{ + goTypes: tb.GoTypes, + depIdxs: tb.DependencyIndexes, + fileRegistry: tb.File.FileRegistry, + } + + // Initialize registry if unpopulated. + if tb.TypeRegistry == nil { + tb.TypeRegistry = preg.GlobalTypes + } + + fbOut := tb.File.Build() + out.File = fbOut.File + + // Process enums. + enumGoTypes := tb.GoTypes[:len(fbOut.Enums)] + if len(tb.EnumInfos) != len(fbOut.Enums) { + panic("mismatching enum lengths") + } + if len(fbOut.Enums) > 0 { + for i := range fbOut.Enums { + tb.EnumInfos[i] = pimpl.EnumInfo{ + GoReflectType: reflect.TypeOf(enumGoTypes[i]), + Desc: &fbOut.Enums[i], + } + // Register enum types. + if err := tb.TypeRegistry.RegisterEnum(&tb.EnumInfos[i]); err != nil { + panic(err) + } + } + } + + // Process messages. + messageGoTypes := tb.GoTypes[len(fbOut.Enums):][:len(fbOut.Messages)] + if len(tb.MessageInfos) != len(fbOut.Messages) { + panic("mismatching message lengths") + } + if len(fbOut.Messages) > 0 { + for i := range fbOut.Messages { + if messageGoTypes[i] == nil { + continue // skip map entry + } + + tb.MessageInfos[i].GoReflectType = reflect.TypeOf(messageGoTypes[i]) + tb.MessageInfos[i].Desc = &fbOut.Messages[i] + + // Register message types. + if err := tb.TypeRegistry.RegisterMessage(&tb.MessageInfos[i]); err != nil { + panic(err) + } + } + + // As a special-case for descriptor.proto, + // locally register concrete message type for the options. + if out.File.Path() == "google/protobuf/descriptor.proto" && out.File.Package() == "google.protobuf" { + for i := range fbOut.Messages { + switch fbOut.Messages[i].Name() { + case "FileOptions": + descopts.File = messageGoTypes[i].(pref.ProtoMessage) + case "EnumOptions": + descopts.Enum = messageGoTypes[i].(pref.ProtoMessage) + case "EnumValueOptions": + descopts.EnumValue = messageGoTypes[i].(pref.ProtoMessage) + case "MessageOptions": + descopts.Message = messageGoTypes[i].(pref.ProtoMessage) + case "FieldOptions": + descopts.Field = messageGoTypes[i].(pref.ProtoMessage) + case "OneofOptions": + descopts.Oneof = messageGoTypes[i].(pref.ProtoMessage) + case "ExtensionRangeOptions": + descopts.ExtensionRange = messageGoTypes[i].(pref.ProtoMessage) + case "ServiceOptions": + descopts.Service = messageGoTypes[i].(pref.ProtoMessage) + case "MethodOptions": + descopts.Method = messageGoTypes[i].(pref.ProtoMessage) + } + } + } + } + + // Process extensions. + if len(tb.ExtensionInfos) != len(fbOut.Extensions) { + panic("mismatching extension lengths") + } + var depIdx int32 + for i := range fbOut.Extensions { + // For enum and message kinds, determine the referent Go type so + // that we can construct their constructors. + const listExtDeps = 2 + var goType reflect.Type + switch fbOut.Extensions[i].L1.Kind { + case pref.EnumKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + case pref.MessageKind, pref.GroupKind: + j := depIdxs.Get(tb.DependencyIndexes, listExtDeps, depIdx) + goType = reflect.TypeOf(tb.GoTypes[j]) + depIdx++ + default: + goType = goTypeForPBKind[fbOut.Extensions[i].L1.Kind] + } + if fbOut.Extensions[i].IsList() { + goType = reflect.SliceOf(goType) + } + + pimpl.InitExtensionInfo(&tb.ExtensionInfos[i], &fbOut.Extensions[i], goType) + + // Register extension types. + if err := tb.TypeRegistry.RegisterExtension(&tb.ExtensionInfos[i]); err != nil { + panic(err) + } + } + + return out +} + +var goTypeForPBKind = map[pref.Kind]reflect.Type{ + pref.BoolKind: reflect.TypeOf(bool(false)), + pref.Int32Kind: reflect.TypeOf(int32(0)), + pref.Sint32Kind: reflect.TypeOf(int32(0)), + pref.Sfixed32Kind: reflect.TypeOf(int32(0)), + pref.Int64Kind: reflect.TypeOf(int64(0)), + pref.Sint64Kind: reflect.TypeOf(int64(0)), + pref.Sfixed64Kind: reflect.TypeOf(int64(0)), + pref.Uint32Kind: reflect.TypeOf(uint32(0)), + pref.Fixed32Kind: reflect.TypeOf(uint32(0)), + pref.Uint64Kind: reflect.TypeOf(uint64(0)), + pref.Fixed64Kind: reflect.TypeOf(uint64(0)), + pref.FloatKind: reflect.TypeOf(float32(0)), + pref.DoubleKind: reflect.TypeOf(float64(0)), + pref.StringKind: reflect.TypeOf(string("")), + pref.BytesKind: reflect.TypeOf([]byte(nil)), +} + +type depIdxs []int32 + +// Get retrieves the jth element of the ith sub-list. +func (x depIdxs) Get(i, j int32) int32 { + return x[x[int32(len(x))-i-1]+j] +} + +type ( + resolverByIndex struct { + goTypes []interface{} + depIdxs depIdxs + fileRegistry + } + fileRegistry interface { + FindFileByPath(string) (pref.FileDescriptor, error) + FindDescriptorByName(pref.FullName) (pref.Descriptor, error) + RegisterFile(pref.FileDescriptor) error + } +) + +func (r *resolverByIndex) FindEnumByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.EnumDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); int(depIdx) < len(es)+len(ms) { + return &es[depIdx] + } else { + return pimpl.Export{}.EnumDescriptorOf(r.goTypes[depIdx]) + } +} + +func (r *resolverByIndex) FindMessageByIndex(i, j int32, es []fdesc.Enum, ms []fdesc.Message) pref.MessageDescriptor { + if depIdx := int(r.depIdxs.Get(i, j)); depIdx < len(es)+len(ms) { + return &ms[depIdx-len(es)] + } else { + return pimpl.Export{}.MessageDescriptorOf(r.goTypes[depIdx]) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go new file mode 100644 index 0000000..58372dd --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -0,0 +1,24 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package flags provides a set of flags controlled by build tags. +package flags + +// ProtoLegacy specifies whether to enable support for legacy functionality +// such as MessageSets, weak fields, and various other obscure behavior +// that is necessary to maintain backwards compatibility with proto1 or +// the pre-release variants of proto2 and proto3. +// +// This is disabled by default unless built with the "protolegacy" tag. +// +// WARNING: The compatibility agreement covers nothing provided by this flag. +// As such, functionality may suddenly be removed or changed at our discretion. +const ProtoLegacy = protoLegacy + +// LazyUnmarshalExtensions specifies whether to lazily unmarshal extensions. +// +// Lazy extension unmarshaling validates the contents of message-valued +// extension fields at unmarshal time, but defers creating the message +// structure until the extension is first accessed. +const LazyUnmarshalExtensions = ProtoLegacy diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go new file mode 100644 index 0000000..a72995f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_disable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !protolegacy + +package flags + +const protoLegacy = false diff --git a/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go new file mode 100644 index 0000000..772e2f0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/flags/proto_legacy_enable.go @@ -0,0 +1,9 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build protolegacy + +package flags + +const protoLegacy = true diff --git a/vendor/google.golang.org/protobuf/internal/genname/name.go b/vendor/google.golang.org/protobuf/internal/genname/name.go new file mode 100644 index 0000000..f45509f --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/genname/name.go @@ -0,0 +1,25 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package genname contains constants for generated names. +package genname + +const ( + State = "state" + + SizeCache = "sizeCache" + SizeCacheA = "XXX_sizecache" + + WeakFields = "weakFields" + WeakFieldsA = "XXX_weak" + + UnknownFields = "unknownFields" + UnknownFieldsA = "XXX_unrecognized" + + ExtensionFields = "extensionFields" + ExtensionFieldsA = "XXX_InternalExtensions" + ExtensionFieldsB = "XXX_extensions" + + WeakFieldPrefix = "XXX_weak_" +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/api_export.go b/vendor/google.golang.org/protobuf/internal/impl/api_export.go new file mode 100644 index 0000000..4d22c96 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/api_export.go @@ -0,0 +1,170 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + + "google.golang.org/protobuf/encoding/prototext" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// Export is a zero-length named type that exists only to export a set of +// functions that we do not want to appear in godoc. +type Export struct{} + +// enum is any enum type generated by protoc-gen-go +// and must be a named int32 type. +type enum = interface{} + +// EnumOf returns the protoreflect.Enum interface over e. +// It returns nil if e is nil. +func (Export) EnumOf(e enum) pref.Enum { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e + default: + return legacyWrapEnum(reflect.ValueOf(e)) + } +} + +// EnumDescriptorOf returns the protoreflect.EnumDescriptor for e. +// It returns nil if e is nil. +func (Export) EnumDescriptorOf(e enum) pref.EnumDescriptor { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Descriptor() + default: + return LegacyLoadEnumDesc(reflect.TypeOf(e)) + } +} + +// EnumTypeOf returns the protoreflect.EnumType for e. +// It returns nil if e is nil. +func (Export) EnumTypeOf(e enum) pref.EnumType { + switch e := e.(type) { + case nil: + return nil + case pref.Enum: + return e.Type() + default: + return legacyLoadEnumType(reflect.TypeOf(e)) + } +} + +// EnumStringOf returns the enum value as a string, either as the name if +// the number is resolvable, or the number formatted as a string. +func (Export) EnumStringOf(ed pref.EnumDescriptor, n pref.EnumNumber) string { + ev := ed.Values().ByNumber(n) + if ev != nil { + return string(ev.Name()) + } + return strconv.Itoa(int(n)) +} + +// message is any message type generated by protoc-gen-go +// and must be a pointer to a named struct type. +type message = interface{} + +// legacyMessageWrapper wraps a v2 message as a v1 message. +type legacyMessageWrapper struct{ m pref.ProtoMessage } + +func (m legacyMessageWrapper) Reset() { proto.Reset(m.m) } +func (m legacyMessageWrapper) String() string { return Export{}.MessageStringOf(m.m) } +func (m legacyMessageWrapper) ProtoMessage() {} + +// ProtoMessageV1Of converts either a v1 or v2 message to a v1 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV1Of(m message) piface.MessageV1 { + switch mv := m.(type) { + case nil: + return nil + case piface.MessageV1: + return mv + case unwrapper: + return Export{}.ProtoMessageV1Of(mv.protoUnwrap()) + case pref.ProtoMessage: + return legacyMessageWrapper{mv} + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +func (Export) protoMessageV2Of(m message) pref.ProtoMessage { + switch mv := m.(type) { + case nil: + return nil + case pref.ProtoMessage: + return mv + case legacyMessageWrapper: + return mv.m + case piface.MessageV1: + return nil + default: + panic(fmt.Sprintf("message %T is neither a v1 or v2 Message", m)) + } +} + +// ProtoMessageV2Of converts either a v1 or v2 message to a v2 message. +// It returns nil if m is nil. +func (Export) ProtoMessageV2Of(m message) pref.ProtoMessage { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv + } + return legacyWrapMessage(reflect.ValueOf(m)).Interface() +} + +// MessageOf returns the protoreflect.Message interface over m. +// It returns nil if m is nil. +func (Export) MessageOf(m message) pref.Message { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect() + } + return legacyWrapMessage(reflect.ValueOf(m)) +} + +// MessageDescriptorOf returns the protoreflect.MessageDescriptor for m. +// It returns nil if m is nil. +func (Export) MessageDescriptorOf(m message) pref.MessageDescriptor { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Descriptor() + } + return LegacyLoadMessageDesc(reflect.TypeOf(m)) +} + +// MessageTypeOf returns the protoreflect.MessageType for m. +// It returns nil if m is nil. +func (Export) MessageTypeOf(m message) pref.MessageType { + if m == nil { + return nil + } + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageInfo(reflect.TypeOf(m), "") +} + +// MessageStringOf returns the message value as a string, +// which is the message serialized in the protobuf text format. +func (Export) MessageStringOf(m pref.ProtoMessage) string { + return prototext.MarshalOptions{Multiline: false}.Format(m) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/checkinit.go b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go new file mode 100644 index 0000000..b82341e --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/checkinit.go @@ -0,0 +1,141 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (mi *MessageInfo) checkInitialized(in piface.CheckInitializedInput) (piface.CheckInitializedOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + return piface.CheckInitializedOutput{}, mi.checkInitializedPointer(p) +} + +func (mi *MessageInfo) checkInitializedPointer(p pointer) error { + mi.init() + if !mi.needsInitCheck { + return nil + } + if p.IsNil() { + for _, f := range mi.orderedCoderFields { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + } + return nil + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + if err := mi.isInitExtensions(e); err != nil { + return err + } + } + for _, f := range mi.orderedCoderFields { + if !f.isRequired && f.funcs.isInit == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + if f.isRequired { + return errors.RequiredNotSet(string(mi.Desc.Fields().ByNumber(f.num).FullName())) + } + continue + } + if f.funcs.isInit == nil { + continue + } + if err := f.funcs.isInit(fptr, f); err != nil { + return err + } + } + return nil +} + +func (mi *MessageInfo) isInitExtensions(ext *map[int32]ExtensionField) error { + if ext == nil { + return nil + } + for _, x := range *ext { + ei := getExtensionFieldInfo(x.Type()) + if ei.funcs.isInit == nil { + continue + } + v := x.Value() + if !v.IsValid() { + continue + } + if err := ei.funcs.isInit(v); err != nil { + return err + } + } + return nil +} + +var ( + needsInitCheckMu sync.Mutex + needsInitCheckMap sync.Map +) + +// needsInitCheck reports whether a message needs to be checked for partial initialization. +// +// It returns true if the message transitively includes any required or extension fields. +func needsInitCheck(md pref.MessageDescriptor) bool { + if v, ok := needsInitCheckMap.Load(md); ok { + if has, ok := v.(bool); ok { + return has + } + } + needsInitCheckMu.Lock() + defer needsInitCheckMu.Unlock() + return needsInitCheckLocked(md) +} + +func needsInitCheckLocked(md pref.MessageDescriptor) (has bool) { + if v, ok := needsInitCheckMap.Load(md); ok { + // If has is true, we've previously determined that this message + // needs init checks. + // + // If has is false, we've previously determined that it can never + // be uninitialized. + // + // If has is not a bool, we've just encountered a cycle in the + // message graph. In this case, it is safe to return false: If + // the message does have required fields, we'll detect them later + // in the graph traversal. + has, ok := v.(bool) + return ok && has + } + needsInitCheckMap.Store(md, struct{}{}) // avoid cycles while descending into this message + defer func() { + needsInitCheckMap.Store(md, has) + }() + if md.RequiredNumbers().Len() > 0 { + return true + } + if md.ExtensionRanges().Len() > 0 { + return true + } + for i := 0; i < md.Fields().Len(); i++ { + fd := md.Fields().Get(i) + // Map keys are never messages, so just consider the map value. + if fd.IsMap() { + fd = fd.MapValue() + } + fmd := fd.Message() + if fmd != nil && needsInitCheckLocked(fmd) { + return true + } + } + return false +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go new file mode 100644 index 0000000..08d3517 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_extension.go @@ -0,0 +1,223 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sync" + "sync/atomic" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type extensionFieldInfo struct { + wiretag uint64 + tagsize int + unmarshalNeedsValue bool + funcs valueCoderFuncs + validation validationInfo +} + +var legacyExtensionFieldInfoCache sync.Map // map[protoreflect.ExtensionType]*extensionFieldInfo + +func getExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := xt.(*ExtensionInfo); ok { + xi.lazyInit() + return xi.info + } + return legacyLoadExtensionFieldInfo(xt) +} + +// legacyLoadExtensionFieldInfo dynamically loads a *ExtensionInfo for xt. +func legacyLoadExtensionFieldInfo(xt pref.ExtensionType) *extensionFieldInfo { + if xi, ok := legacyExtensionFieldInfoCache.Load(xt); ok { + return xi.(*extensionFieldInfo) + } + e := makeExtensionFieldInfo(xt.TypeDescriptor()) + if e, ok := legacyMessageTypeCache.LoadOrStore(xt, e); ok { + return e.(*extensionFieldInfo) + } + return e +} + +func makeExtensionFieldInfo(xd pref.ExtensionDescriptor) *extensionFieldInfo { + var wiretag uint64 + if !xd.IsPacked() { + wiretag = protowire.EncodeTag(xd.Number(), wireTypes[xd.Kind()]) + } else { + wiretag = protowire.EncodeTag(xd.Number(), protowire.BytesType) + } + e := &extensionFieldInfo{ + wiretag: wiretag, + tagsize: protowire.SizeVarint(wiretag), + funcs: encoderFuncsForValue(xd), + } + // Does the unmarshal function need a value passed to it? + // This is true for composite types, where we pass in a message, list, or map to fill in, + // and for enums, where we pass in a prototype value to specify the concrete enum type. + switch xd.Kind() { + case pref.MessageKind, pref.GroupKind, pref.EnumKind: + e.unmarshalNeedsValue = true + default: + if xd.Cardinality() == pref.Repeated { + e.unmarshalNeedsValue = true + } + } + return e +} + +type lazyExtensionValue struct { + atomicOnce uint32 // atomically set if value is valid + mu sync.Mutex + xi *extensionFieldInfo + value pref.Value + b []byte + fn func() pref.Value +} + +type ExtensionField struct { + typ pref.ExtensionType + + // value is either the value of GetValue, + // or a *lazyExtensionValue that then returns the value of GetValue. + value pref.Value + lazy *lazyExtensionValue +} + +func (f *ExtensionField) appendLazyBytes(xt pref.ExtensionType, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, b []byte) { + if f.lazy == nil { + f.lazy = &lazyExtensionValue{xi: xi} + } + f.typ = xt + f.lazy.xi = xi + f.lazy.b = protowire.AppendTag(f.lazy.b, num, wtyp) + f.lazy.b = append(f.lazy.b, b...) +} + +func (f *ExtensionField) canLazy(xt pref.ExtensionType) bool { + if f.typ == nil { + return true + } + if f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + return true + } + return false +} + +func (f *ExtensionField) lazyInit() { + f.lazy.mu.Lock() + defer f.lazy.mu.Unlock() + if atomic.LoadUint32(&f.lazy.atomicOnce) == 1 { + return + } + if f.lazy.xi != nil { + b := f.lazy.b + val := f.typ.New() + for len(b) > 0 { + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + panic(errors.New("bad tag in lazy extension decoding")) + } + b = b[n:] + } + num := protowire.Number(tag >> 3) + wtyp := protowire.Type(tag & 7) + var out unmarshalOutput + var err error + val, out, err = f.lazy.xi.funcs.unmarshal(b, val, num, wtyp, lazyUnmarshalOptions) + if err != nil { + panic(errors.New("decode failure in lazy extension decoding: %v", err)) + } + b = b[out.n:] + } + f.lazy.value = val + } else { + f.lazy.value = f.lazy.fn() + } + f.lazy.xi = nil + f.lazy.fn = nil + f.lazy.b = nil + atomic.StoreUint32(&f.lazy.atomicOnce, 1) +} + +// Set sets the type and value of the extension field. +// This must not be called concurrently. +func (f *ExtensionField) Set(t pref.ExtensionType, v pref.Value) { + f.typ = t + f.value = v + f.lazy = nil +} + +// SetLazy sets the type and a value that is to be lazily evaluated upon first use. +// This must not be called concurrently. +func (f *ExtensionField) SetLazy(t pref.ExtensionType, fn func() pref.Value) { + f.typ = t + f.lazy = &lazyExtensionValue{fn: fn} +} + +// Value returns the value of the extension field. +// This may be called concurrently. +func (f *ExtensionField) Value() pref.Value { + if f.lazy != nil { + if atomic.LoadUint32(&f.lazy.atomicOnce) == 0 { + f.lazyInit() + } + return f.lazy.value + } + return f.value +} + +// Type returns the type of the extension field. +// This may be called concurrently. +func (f ExtensionField) Type() pref.ExtensionType { + return f.typ +} + +// IsSet returns whether the extension field is set. +// This may be called concurrently. +func (f ExtensionField) IsSet() bool { + return f.typ != nil +} + +// IsLazy reports whether a field is lazily encoded. +// It is exported for testing. +func IsLazy(m pref.Message, fd pref.FieldDescriptor) bool { + var mi *MessageInfo + var p pointer + switch m := m.(type) { + case *messageState: + mi = m.messageInfo() + p = m.pointer() + case *messageReflectWrapper: + mi = m.messageInfo() + p = m.pointer() + default: + return false + } + xd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + return false + } + xt := xd.Type() + ext := mi.extensionMap(p) + if ext == nil { + return false + } + f, ok := (*ext)[int32(fd.Number())] + if !ok { + return false + } + return f.typ == xt && f.lazy != nil && atomic.LoadUint32(&f.lazy.atomicOnce) == 0 +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go new file mode 100644 index 0000000..c00744d --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -0,0 +1,828 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sync" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type errInvalidUTF8 struct{} + +func (errInvalidUTF8) Error() string { return "string field contains invalid UTF-8" } +func (errInvalidUTF8) InvalidUTF8() bool { return true } + +// initOneofFieldCoders initializes the fast-path functions for the fields in a oneof. +// +// For size, marshal, and isInit operations, functions are set only on the first field +// in the oneof. The functions are called when the oneof is non-nil, and will dispatch +// to the appropriate field-specific function as necessary. +// +// The unmarshal function is set on each field individually as usual. +func (mi *MessageInfo) initOneofFieldCoders(od pref.OneofDescriptor, si structInfo) { + fs := si.oneofsByName[od.Name()] + ft := fs.Type + oneofFields := make(map[reflect.Type]*coderFieldInfo) + needIsInit := false + fields := od.Fields() + for i, lim := 0, fields.Len(); i < lim; i++ { + fd := od.Fields().Get(i) + num := fd.Number() + // Make a copy of the original coderFieldInfo for use in unmarshaling. + // + // oneofFields[oneofType].funcs.marshal is the field-specific marshal function. + // + // mi.coderFields[num].marshal is set on only the first field in the oneof, + // and dispatches to the field-specific marshaler in oneofFields. + cf := *mi.coderFields[num] + ot := si.oneofWrappersByNumber[num] + cf.ft = ot.Field(0).Type + cf.mi, cf.funcs = fieldCoder(fd, cf.ft) + oneofFields[ot] = &cf + if cf.funcs.isInit != nil { + needIsInit = true + } + mi.coderFields[num].funcs.unmarshal = func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + var vw reflect.Value // pointer to wrapper type + vi := p.AsValueOf(ft).Elem() // oneof field value of interface kind + if !vi.IsNil() && !vi.Elem().IsNil() && vi.Elem().Elem().Type() == ot { + vw = vi.Elem() + } else { + vw = reflect.New(ot) + } + out, err := cf.funcs.unmarshal(b, pointerOfValue(vw).Apply(zeroOffset), wtyp, &cf, opts) + if err != nil { + return out, err + } + vi.Set(vw) + return out, nil + } + } + getInfo := func(p pointer) (pointer, *coderFieldInfo) { + v := p.AsValueOf(ft).Elem() + if v.IsNil() { + return pointer{}, nil + } + v = v.Elem() // interface -> *struct + if v.IsNil() { + return pointer{}, nil + } + return pointerOfValue(v).Apply(zeroOffset), oneofFields[v.Elem().Type()] + } + first := mi.coderFields[od.Fields().Get(0).Number()] + first.funcs.size = func(p pointer, _ *coderFieldInfo, opts marshalOptions) int { + p, info := getInfo(p) + if info == nil || info.funcs.size == nil { + return 0 + } + return info.funcs.size(p, info, opts) + } + first.funcs.marshal = func(b []byte, p pointer, _ *coderFieldInfo, opts marshalOptions) ([]byte, error) { + p, info := getInfo(p) + if info == nil || info.funcs.marshal == nil { + return b, nil + } + return info.funcs.marshal(b, p, info, opts) + } + first.funcs.merge = func(dst, src pointer, _ *coderFieldInfo, opts mergeOptions) { + srcp, srcinfo := getInfo(src) + if srcinfo == nil || srcinfo.funcs.merge == nil { + return + } + dstp, dstinfo := getInfo(dst) + if dstinfo != srcinfo { + dst.AsValueOf(ft).Elem().Set(reflect.New(src.AsValueOf(ft).Elem().Elem().Elem().Type())) + dstp = pointerOfValue(dst.AsValueOf(ft).Elem().Elem()).Apply(zeroOffset) + } + srcinfo.funcs.merge(dstp, srcp, srcinfo, opts) + } + if needIsInit { + first.funcs.isInit = func(p pointer, _ *coderFieldInfo) error { + p, info := getInfo(p) + if info == nil || info.funcs.isInit == nil { + return nil + } + return info.funcs.isInit(p, info) + } + } +} + +func makeWeakMessageFieldCoder(fd pref.FieldDescriptor) pointerCoderFuncs { + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + }) + } + + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m, ok := p.WeakFields().get(f.num) + if !ok { + return 0 + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m, ok := p.WeakFields().get(f.num) + if !ok { + return b, nil + } + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + fs := p.WeakFields() + m, ok := fs.get(f.num) + if !ok { + lazyInit() + if messageType == nil { + return unmarshalOutput{}, errUnknown + } + m = messageType.New().Interface() + fs.set(f.num, m) + } + return consumeMessage(b, m, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m, ok := p.WeakFields().get(f.num) + if !ok { + return nil + } + return proto.CheckInitialized(m) + }, + merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + sm, ok := src.WeakFields().get(f.num) + if !ok { + return + } + dm, ok := dst.WeakFields().get(f.num) + if !ok { + lazyInit() + if messageType == nil { + panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) + } + dm = messageType.New().Interface() + dst.WeakFields().set(f.num, dm) + } + opts.Merge(dm, sm) + }, + } +} + +func makeMessageFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageInfo, + marshal: appendMessageInfo, + unmarshal: consumeMessageInfo, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeMessage(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendMessage(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeMessage(b, asMessage(mp), wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeMessageInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return protowire.SizeBytes(f.mi.sizePointer(p.Elem(), opts)) + f.tagsize +} + +func appendMessageInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(f.mi.sizePointer(p.Elem(), opts))) + return f.mi.marshalAppendPointer(b, p.Elem(), opts) +} + +func consumeMessageInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + o, err := f.mi.unmarshalPointer(v, p.Elem(), 0, opts) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageInfo(p pointer, f *coderFieldInfo) error { + return f.mi.checkInitializedPointer(p.Elem()) +} + +func sizeMessage(m proto.Message, tagsize int, _ marshalOptions) int { + return protowire.SizeBytes(proto.Size(m)) + tagsize +} + +func appendMessage(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(proto.Size(m))) + return opts.Options().MarshalAppend(b, m) +} + +func consumeMessage(b []byte, m proto.Message, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeMessageValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeMessage(m, tagsize, opts) +} + +func appendMessageValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendMessage(b, m, wiretag, opts) +} + +func consumeMessageValue(b []byte, v pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeMessage(b, m, wtyp, opts) + return v, out, err +} + +func isInitMessageValue(v pref.Value) error { + m := v.Message().Interface() + return proto.CheckInitialized(m) +} + +var coderMessageValue = valueCoderFuncs{ + size: sizeMessageValue, + marshal: appendMessageValue, + unmarshal: consumeMessageValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func sizeGroupValue(v pref.Value, tagsize int, opts marshalOptions) int { + m := v.Message().Interface() + return sizeGroup(m, tagsize, opts) +} + +func appendGroupValue(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + m := v.Message().Interface() + return appendGroup(b, m, wiretag, opts) +} + +func consumeGroupValue(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) { + m := v.Message().Interface() + out, err := consumeGroup(b, m, num, wtyp, opts) + return v, out, err +} + +var coderGroupValue = valueCoderFuncs{ + size: sizeGroupValue, + marshal: appendGroupValue, + unmarshal: consumeGroupValue, + isInit: isInitMessageValue, + merge: mergeMessageValue, +} + +func makeGroupFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupType, + marshal: appendGroupType, + unmarshal: consumeGroupType, + merge: mergeMessage, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageInfo + } + return funcs + } else { + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + m := asMessage(p.AsValueOf(ft).Elem()) + return sizeGroup(m, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + m := asMessage(p.AsValueOf(ft).Elem()) + return appendGroup(b, m, f.wiretag, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft).Elem() + if mp.IsNil() { + mp.Set(reflect.New(ft.Elem())) + } + return consumeGroup(b, asMessage(mp), num, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + m := asMessage(p.AsValueOf(ft).Elem()) + return proto.CheckInitialized(m) + }, + merge: mergeMessage, + } + } +} + +func sizeGroupType(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return 2*f.tagsize + f.mi.sizePointer(p.Elem(), opts) +} + +func appendGroupType(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err := f.mi.marshalAppendPointer(b, p.Elem(), opts) + b = protowire.AppendVarint(b, f.wiretag+1) // end group + return b, err +} + +func consumeGroupType(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + if p.Elem().IsNil() { + p.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + return f.mi.unmarshalPointer(b, p.Elem(), f.num, opts) +} + +func sizeGroup(m proto.Message, tagsize int, _ marshalOptions) int { + return 2*tagsize + proto.Size(m) +} + +func appendGroup(b []byte, m proto.Message, wiretag uint64, opts marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) // start group + b, err := opts.Options().MarshalAppend(b, m) + b = protowire.AppendVarint(b, wiretag+1) // end group + return b, err +} + +func consumeGroup(b []byte, m proto.Message, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, protowire.ParseError(n) + } + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.ProtoReflect(), + }) + if err != nil { + return out, err + } + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func makeMessageSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeMessageSliceInfo, + marshal: appendMessageSliceInfo, + unmarshal: consumeMessageSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMessageSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMessageSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeMessageSlice(b, p, ft, wtyp, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeMessageSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += protowire.SizeBytes(f.mi.sizePointer(v, opts)) + f.tagsize + } + return n +} + +func appendMessageSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + siz := f.mi.sizePointer(v, opts) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + o, err := f.mi.unmarshalPointer(v, mp, 0, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + out.n = n + out.initialized = o.initialized + return out, nil +} + +func isInitMessageSliceInfo(p pointer, f *coderFieldInfo) error { + s := p.PointerSlice() + for _, v := range s { + if err := f.mi.checkInitializedPointer(v); err != nil { + return err + } + } + return nil +} + +func sizeMessageSlice(p pointer, goType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSlice(b []byte, p pointer, wiretag uint64, goType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSlice(b []byte, p pointer, goType reflect.Type, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func isInitMessageSlice(p pointer, goType reflect.Type) error { + s := p.PointerSlice() + for _, v := range s { + m := asMessage(v.AsValueOf(goType.Elem())) + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +// Slices of messages + +func sizeMessageSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += protowire.SizeBytes(proto.Size(m)) + tagsize + } + return n +} + +func appendMessageSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) + siz := proto.Size(m) + b = protowire.AppendVarint(b, uint64(siz)) + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + } + return b, nil +} + +func consumeMessageSliceValue(b []byte, listv pref.Value, _ protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return pref.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return pref.Value{}, out, protowire.ParseError(n) + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: v, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +func isInitMessageSliceValue(listv pref.Value) error { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + if err := proto.CheckInitialized(m); err != nil { + return err + } + } + return nil +} + +var coderMessageSliceValue = valueCoderFuncs{ + size: sizeMessageSliceValue, + marshal: appendMessageSliceValue, + unmarshal: consumeMessageSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func sizeGroupSliceValue(listv pref.Value, tagsize int, opts marshalOptions) int { + list := listv.List() + n := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSliceValue(b []byte, listv pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) { + list := listv.List() + mopts := opts.Options() + for i, llen := 0, list.Len(); i < llen; i++ { + m := list.Get(i).Message().Interface() + b = protowire.AppendVarint(b, wiretag) // start group + var err error + b, err = mopts.MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceValue(b []byte, listv pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (_ pref.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.StartGroupType { + return pref.Value{}, out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return pref.Value{}, out, protowire.ParseError(n) + } + m := list.NewElement() + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: m.Message(), + }) + if err != nil { + return pref.Value{}, out, err + } + list.Append(m) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return listv, out, nil +} + +var coderGroupSliceValue = valueCoderFuncs{ + size: sizeGroupSliceValue, + marshal: appendGroupSliceValue, + unmarshal: consumeGroupSliceValue, + isInit: isInitMessageSliceValue, + merge: mergeMessageListValue, +} + +func makeGroupSliceFieldCoder(fd pref.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { + num := fd.Number() + if mi := getMessageInfo(ft); mi != nil { + funcs := pointerCoderFuncs{ + size: sizeGroupSliceInfo, + marshal: appendGroupSliceInfo, + unmarshal: consumeGroupSliceInfo, + merge: mergeMessageSlice, + } + if needsInitCheck(mi.Desc) { + funcs.isInit = isInitMessageSliceInfo + } + return funcs + } + return pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeGroupSlice(p, ft, f.tagsize, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendGroupSlice(b, p, f.wiretag, ft, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + return consumeGroupSlice(b, p, num, wtyp, ft, opts) + }, + isInit: func(p pointer, f *coderFieldInfo) error { + return isInitMessageSlice(p, ft) + }, + merge: mergeMessageSlice, + } +} + +func sizeGroupSlice(p pointer, messageType reflect.Type, tagsize int, _ marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + n += 2*tagsize + proto.Size(m) + } + return n +} + +func appendGroupSlice(b []byte, p pointer, wiretag uint64, messageType reflect.Type, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + m := asMessage(v.AsValueOf(messageType.Elem())) + b = protowire.AppendVarint(b, wiretag) // start group + b, err = opts.Options().MarshalAppend(b, m) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSlice(b []byte, p pointer, num protowire.Number, wtyp protowire.Type, goType reflect.Type, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.StartGroupType { + return out, errUnknown + } + b, n := protowire.ConsumeGroup(num, b) + if n < 0 { + return out, protowire.ParseError(n) + } + mp := reflect.New(goType.Elem()) + o, err := opts.Options().UnmarshalState(piface.UnmarshalInput{ + Buf: b, + Message: asMessage(mp).ProtoReflect(), + }) + if err != nil { + return out, err + } + p.AppendPointerSlice(pointerOfValue(mp)) + out.n = n + out.initialized = o.Flags&piface.UnmarshalInitialized != 0 + return out, nil +} + +func sizeGroupSliceInfo(p pointer, f *coderFieldInfo, opts marshalOptions) int { + s := p.PointerSlice() + n := 0 + for _, v := range s { + n += 2*f.tagsize + f.mi.sizePointer(v, opts) + } + return n +} + +func appendGroupSliceInfo(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.PointerSlice() + var err error + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) // start group + b, err = f.mi.marshalAppendPointer(b, v, opts) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, f.wiretag+1) // end group + } + return b, nil +} + +func consumeGroupSliceInfo(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + if wtyp != protowire.StartGroupType { + return unmarshalOutput{}, errUnknown + } + m := reflect.New(f.mi.GoReflectType.Elem()).Interface() + mp := pointerOfIface(m) + out, err := f.mi.unmarshalPointer(b, mp, f.num, opts) + if err != nil { + return out, err + } + p.AppendPointerSlice(mp) + return out, nil +} + +func asMessage(v reflect.Value) pref.ProtoMessage { + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return m + } + return legacyWrapMessage(v).Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go new file mode 100644 index 0000000..ff198d0 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_gen.go @@ -0,0 +1,5637 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// sizeBool returns the size of wire encoding a bool pointer as a Bool. +func sizeBool(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bool() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBool wire encodes a bool pointer as a Bool. +func appendBool(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bool() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBool wire decodes a bool pointer as a Bool. +func consumeBool(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bool() = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBool = pointerCoderFuncs{ + size: sizeBool, + marshal: appendBool, + unmarshal: consumeBool, + merge: mergeBool, +} + +// sizeBoolNoZero returns the size of wire encoding a bool pointer as a Bool. +// The zero value is not encoded. +func sizeBoolNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bool() + if v == false { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolNoZero wire encodes a bool pointer as a Bool. +// The zero value is not encoded. +func appendBoolNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bool() + if v == false { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +var coderBoolNoZero = pointerCoderFuncs{ + size: sizeBoolNoZero, + marshal: appendBoolNoZero, + unmarshal: consumeBool, + merge: mergeBoolNoZero, +} + +// sizeBoolPtr returns the size of wire encoding a *bool pointer as a Bool. +// It panics if the pointer is nil. +func sizeBoolPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.BoolPtr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) +} + +// appendBoolPtr wire encodes a *bool pointer as a Bool. +// It panics if the pointer is nil. +func appendBoolPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.BoolPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + return b, nil +} + +// consumeBoolPtr wire decodes a *bool pointer as a Bool. +func consumeBoolPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.BoolPtr() + if *vp == nil { + *vp = new(bool) + } + **vp = protowire.DecodeBool(v) + out.n = n + return out, nil +} + +var coderBoolPtr = pointerCoderFuncs{ + size: sizeBoolPtr, + marshal: appendBoolPtr, + unmarshal: consumeBoolPtr, + merge: mergeBoolPtr, +} + +// sizeBoolSlice returns the size of wire encoding a []bool pointer as a repeated Bool. +func sizeBoolSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BoolSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeBool(v)) + } + return size +} + +// appendBoolSlice encodes a []bool pointer as a repeated Bool. +func appendBoolSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +// consumeBoolSlice wire decodes a []bool pointer as a repeated Bool. +func consumeBoolSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BoolSlice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, protowire.DecodeBool(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, protowire.DecodeBool(v)) + out.n = n + return out, nil +} + +var coderBoolSlice = pointerCoderFuncs{ + size: sizeBoolSlice, + marshal: appendBoolSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolPackedSlice returns the size of wire encoding a []bool pointer as a packed repeated Bool. +func sizeBoolPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BoolSlice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSlice encodes a []bool pointer as a packed repeated Bool. +func appendBoolPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BoolSlice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeBool(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeBool(v)) + } + return b, nil +} + +var coderBoolPackedSlice = pointerCoderFuncs{ + size: sizeBoolPackedSlice, + marshal: appendBoolPackedSlice, + unmarshal: consumeBoolSlice, + merge: mergeBoolSlice, +} + +// sizeBoolValue returns the size of wire encoding a bool value as a Bool. +func sizeBoolValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) +} + +// appendBoolValue encodes a bool value as a Bool. +func appendBoolValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + return b, nil +} + +// consumeBoolValue decodes a bool value as a Bool. +func consumeBoolValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), out, nil +} + +var coderBoolValue = valueCoderFuncs{ + size: sizeBoolValue, + marshal: appendBoolValue, + unmarshal: consumeBoolValue, + merge: mergeScalarValue, +} + +// sizeBoolSliceValue returns the size of wire encoding a []bool value as a repeated Bool. +func sizeBoolSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return size +} + +// appendBoolSliceValue encodes a []bool value as a repeated Bool. +func appendBoolSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +// consumeBoolSliceValue wire decodes a []bool value as a repeated Bool. +func consumeBoolSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + out.n = n + return listv, out, nil +} + +var coderBoolSliceValue = valueCoderFuncs{ + size: sizeBoolSliceValue, + marshal: appendBoolSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeBoolPackedSliceValue returns the size of wire encoding a []bool value as a packed repeated Bool. +func sizeBoolPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendBoolPackedSliceValue encodes a []bool value as a packed repeated Bool. +func appendBoolPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + } + return b, nil +} + +var coderBoolPackedSliceValue = valueCoderFuncs{ + size: sizeBoolPackedSliceValue, + marshal: appendBoolPackedSliceValue, + unmarshal: consumeBoolSliceValue, + merge: mergeListValue, +} + +// sizeEnumValue returns the size of wire encoding a value as a Enum. +func sizeEnumValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Enum())) +} + +// appendEnumValue encodes a value as a Enum. +func appendEnumValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + return b, nil +} + +// consumeEnumValue decodes a value as a Enum. +func consumeEnumValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), out, nil +} + +var coderEnumValue = valueCoderFuncs{ + size: sizeEnumValue, + marshal: appendEnumValue, + unmarshal: consumeEnumValue, + merge: mergeScalarValue, +} + +// sizeEnumSliceValue returns the size of wire encoding a [] value as a repeated Enum. +func sizeEnumSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Enum())) + } + return size +} + +// appendEnumSliceValue encodes a [] value as a repeated Enum. +func appendEnumSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +// consumeEnumSliceValue wire decodes a [] value as a repeated Enum. +func consumeEnumSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + out.n = n + return listv, out, nil +} + +var coderEnumSliceValue = valueCoderFuncs{ + size: sizeEnumSliceValue, + marshal: appendEnumSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeEnumPackedSliceValue returns the size of wire encoding a [] value as a packed repeated Enum. +func sizeEnumPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendEnumPackedSliceValue encodes a [] value as a packed repeated Enum. +func appendEnumPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Enum())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Enum())) + } + return b, nil +} + +var coderEnumPackedSliceValue = valueCoderFuncs{ + size: sizeEnumPackedSliceValue, + marshal: appendEnumPackedSliceValue, + unmarshal: consumeEnumSliceValue, + merge: mergeListValue, +} + +// sizeInt32 returns the size of wire encoding a int32 pointer as a Int32. +func sizeInt32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32 wire encodes a int32 pointer as a Int32. +func appendInt32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32 wire decodes a int32 pointer as a Int32. +func consumeInt32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderInt32 = pointerCoderFuncs{ + size: sizeInt32, + marshal: appendInt32, + unmarshal: consumeInt32, + merge: mergeInt32, +} + +// sizeInt32NoZero returns the size of wire encoding a int32 pointer as a Int32. +// The zero value is not encoded. +func sizeInt32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32NoZero wire encodes a int32 pointer as a Int32. +// The zero value is not encoded. +func appendInt32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt32NoZero = pointerCoderFuncs{ + size: sizeInt32NoZero, + marshal: appendInt32NoZero, + unmarshal: consumeInt32, + merge: mergeInt32NoZero, +} + +// sizeInt32Ptr returns the size of wire encoding a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func sizeInt32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt32Ptr wire encodes a *int32 pointer as a Int32. +// It panics if the pointer is nil. +func appendInt32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt32Ptr wire decodes a *int32 pointer as a Int32. +func consumeInt32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderInt32Ptr = pointerCoderFuncs{ + size: sizeInt32Ptr, + marshal: appendInt32Ptr, + unmarshal: consumeInt32Ptr, + merge: mergeInt32Ptr, +} + +// sizeInt32Slice returns the size of wire encoding a []int32 pointer as a repeated Int32. +func sizeInt32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt32Slice encodes a []int32 pointer as a repeated Int32. +func appendInt32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt32Slice wire decodes a []int32 pointer as a repeated Int32. +func consumeInt32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderInt32Slice = pointerCoderFuncs{ + size: sizeInt32Slice, + marshal: appendInt32Slice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Int32. +func sizeInt32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSlice encodes a []int32 pointer as a packed repeated Int32. +func appendInt32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt32PackedSlice = pointerCoderFuncs{ + size: sizeInt32PackedSlice, + marshal: appendInt32PackedSlice, + unmarshal: consumeInt32Slice, + merge: mergeInt32Slice, +} + +// sizeInt32Value returns the size of wire encoding a int32 value as a Int32. +func sizeInt32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) +} + +// appendInt32Value encodes a int32 value as a Int32. +func appendInt32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + return b, nil +} + +// consumeInt32Value decodes a int32 value as a Int32. +func consumeInt32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderInt32Value = valueCoderFuncs{ + size: sizeInt32Value, + marshal: appendInt32Value, + unmarshal: consumeInt32Value, + merge: mergeScalarValue, +} + +// sizeInt32SliceValue returns the size of wire encoding a []int32 value as a repeated Int32. +func sizeInt32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(int32(v.Int()))) + } + return size +} + +// appendInt32SliceValue encodes a []int32 value as a repeated Int32. +func appendInt32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +// consumeInt32SliceValue wire decodes a []int32 value as a repeated Int32. +func consumeInt32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderInt32SliceValue = valueCoderFuncs{ + size: sizeInt32SliceValue, + marshal: appendInt32SliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeInt32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Int32. +func sizeInt32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt32PackedSliceValue encodes a []int32 value as a packed repeated Int32. +func appendInt32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(int32(v.Int()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + } + return b, nil +} + +var coderInt32PackedSliceValue = valueCoderFuncs{ + size: sizeInt32PackedSliceValue, + marshal: appendInt32PackedSliceValue, + unmarshal: consumeInt32SliceValue, + merge: mergeListValue, +} + +// sizeSint32 returns the size of wire encoding a int32 pointer as a Sint32. +func sizeSint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32 wire encodes a int32 pointer as a Sint32. +func appendSint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32 wire decodes a int32 pointer as a Sint32. +func consumeSint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32 = pointerCoderFuncs{ + size: sizeSint32, + marshal: appendSint32, + unmarshal: consumeSint32, + merge: mergeInt32, +} + +// sizeSint32NoZero returns the size of wire encoding a int32 pointer as a Sint32. +// The zero value is not encoded. +func sizeSint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32NoZero wire encodes a int32 pointer as a Sint32. +// The zero value is not encoded. +func appendSint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +var coderSint32NoZero = pointerCoderFuncs{ + size: sizeSint32NoZero, + marshal: appendSint32NoZero, + unmarshal: consumeSint32, + merge: mergeInt32NoZero, +} + +// sizeSint32Ptr returns the size of wire encoding a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func sizeSint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int32Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) +} + +// appendSint32Ptr wire encodes a *int32 pointer as a Sint32. +// It panics if the pointer is nil. +func appendSint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + return b, nil +} + +// consumeSint32Ptr wire decodes a *int32 pointer as a Sint32. +func consumeSint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(protowire.DecodeZigZag(v & math.MaxUint32)) + out.n = n + return out, nil +} + +var coderSint32Ptr = pointerCoderFuncs{ + size: sizeSint32Ptr, + marshal: appendSint32Ptr, + unmarshal: consumeSint32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSint32Slice returns the size of wire encoding a []int32 pointer as a repeated Sint32. +func sizeSint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return size +} + +// appendSint32Slice encodes a []int32 pointer as a repeated Sint32. +func appendSint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +// consumeSint32Slice wire decodes a []int32 pointer as a repeated Sint32. +func consumeSint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(protowire.DecodeZigZag(v&math.MaxUint32))) + out.n = n + return out, nil +} + +var coderSint32Slice = pointerCoderFuncs{ + size: sizeSint32Slice, + marshal: appendSint32Slice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sint32. +func sizeSint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSlice encodes a []int32 pointer as a packed repeated Sint32. +func appendSint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(v))) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(v))) + } + return b, nil +} + +var coderSint32PackedSlice = pointerCoderFuncs{ + size: sizeSint32PackedSlice, + marshal: appendSint32PackedSlice, + unmarshal: consumeSint32Slice, + merge: mergeInt32Slice, +} + +// sizeSint32Value returns the size of wire encoding a int32 value as a Sint32. +func sizeSint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) +} + +// appendSint32Value encodes a int32 value as a Sint32. +func appendSint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + return b, nil +} + +// consumeSint32Value decodes a int32 value as a Sint32. +func consumeSint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), out, nil +} + +var coderSint32Value = valueCoderFuncs{ + size: sizeSint32Value, + marshal: appendSint32Value, + unmarshal: consumeSint32Value, + merge: mergeScalarValue, +} + +// sizeSint32SliceValue returns the size of wire encoding a []int32 value as a repeated Sint32. +func sizeSint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return size +} + +// appendSint32SliceValue encodes a []int32 value as a repeated Sint32. +func appendSint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +// consumeSint32SliceValue wire decodes a []int32 value as a repeated Sint32. +func consumeSint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + out.n = n + return listv, out, nil +} + +var coderSint32SliceValue = valueCoderFuncs{ + size: sizeSint32SliceValue, + marshal: appendSint32SliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeSint32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sint32. +func sizeSint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint32PackedSliceValue encodes a []int32 value as a packed repeated Sint32. +func appendSint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + } + return b, nil +} + +var coderSint32PackedSliceValue = valueCoderFuncs{ + size: sizeSint32PackedSliceValue, + marshal: appendSint32PackedSliceValue, + unmarshal: consumeSint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32 returns the size of wire encoding a uint32 pointer as a Uint32. +func sizeUint32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32 wire encodes a uint32 pointer as a Uint32. +func appendUint32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32 wire decodes a uint32 pointer as a Uint32. +func consumeUint32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint32() = uint32(v) + out.n = n + return out, nil +} + +var coderUint32 = pointerCoderFuncs{ + size: sizeUint32, + marshal: appendUint32, + unmarshal: consumeUint32, + merge: mergeUint32, +} + +// sizeUint32NoZero returns the size of wire encoding a uint32 pointer as a Uint32. +// The zero value is not encoded. +func sizeUint32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32NoZero wire encodes a uint32 pointer as a Uint32. +// The zero value is not encoded. +func appendUint32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderUint32NoZero = pointerCoderFuncs{ + size: sizeUint32NoZero, + marshal: appendUint32NoZero, + unmarshal: consumeUint32, + merge: mergeUint32NoZero, +} + +// sizeUint32Ptr returns the size of wire encoding a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func sizeUint32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Uint32Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendUint32Ptr wire encodes a *uint32 pointer as a Uint32. +// It panics if the pointer is nil. +func appendUint32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeUint32Ptr wire decodes a *uint32 pointer as a Uint32. +func consumeUint32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = uint32(v) + out.n = n + return out, nil +} + +var coderUint32Ptr = pointerCoderFuncs{ + size: sizeUint32Ptr, + marshal: appendUint32Ptr, + unmarshal: consumeUint32Ptr, + merge: mergeUint32Ptr, +} + +// sizeUint32Slice returns the size of wire encoding a []uint32 pointer as a repeated Uint32. +func sizeUint32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendUint32Slice encodes a []uint32 pointer as a repeated Uint32. +func appendUint32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeUint32Slice wire decodes a []uint32 pointer as a repeated Uint32. +func consumeUint32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, uint32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, uint32(v)) + out.n = n + return out, nil +} + +var coderUint32Slice = pointerCoderFuncs{ + size: sizeUint32Slice, + marshal: appendUint32Slice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Uint32. +func sizeUint32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSlice encodes a []uint32 pointer as a packed repeated Uint32. +func appendUint32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderUint32PackedSlice = pointerCoderFuncs{ + size: sizeUint32PackedSlice, + marshal: appendUint32PackedSlice, + unmarshal: consumeUint32Slice, + merge: mergeUint32Slice, +} + +// sizeUint32Value returns the size of wire encoding a uint32 value as a Uint32. +func sizeUint32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) +} + +// appendUint32Value encodes a uint32 value as a Uint32. +func appendUint32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + return b, nil +} + +// consumeUint32Value decodes a uint32 value as a Uint32. +func consumeUint32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderUint32Value = valueCoderFuncs{ + size: sizeUint32Value, + marshal: appendUint32Value, + unmarshal: consumeUint32Value, + merge: mergeScalarValue, +} + +// sizeUint32SliceValue returns the size of wire encoding a []uint32 value as a repeated Uint32. +func sizeUint32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return size +} + +// appendUint32SliceValue encodes a []uint32 value as a repeated Uint32. +func appendUint32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +// consumeUint32SliceValue wire decodes a []uint32 value as a repeated Uint32. +func consumeUint32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderUint32SliceValue = valueCoderFuncs{ + size: sizeUint32SliceValue, + marshal: appendUint32SliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeUint32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Uint32. +func sizeUint32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint32PackedSliceValue encodes a []uint32 value as a packed repeated Uint32. +func appendUint32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(uint32(v.Uint()))) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + } + return b, nil +} + +var coderUint32PackedSliceValue = valueCoderFuncs{ + size: sizeUint32PackedSliceValue, + marshal: appendUint32PackedSliceValue, + unmarshal: consumeUint32SliceValue, + merge: mergeListValue, +} + +// sizeInt64 returns the size of wire encoding a int64 pointer as a Int64. +func sizeInt64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64 wire encodes a int64 pointer as a Int64. +func appendInt64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64 wire decodes a int64 pointer as a Int64. +func consumeInt64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderInt64 = pointerCoderFuncs{ + size: sizeInt64, + marshal: appendInt64, + unmarshal: consumeInt64, + merge: mergeInt64, +} + +// sizeInt64NoZero returns the size of wire encoding a int64 pointer as a Int64. +// The zero value is not encoded. +func sizeInt64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64NoZero wire encodes a int64 pointer as a Int64. +// The zero value is not encoded. +func appendInt64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +var coderInt64NoZero = pointerCoderFuncs{ + size: sizeInt64NoZero, + marshal: appendInt64NoZero, + unmarshal: consumeInt64, + merge: mergeInt64NoZero, +} + +// sizeInt64Ptr returns the size of wire encoding a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func sizeInt64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +// appendInt64Ptr wire encodes a *int64 pointer as a Int64. +// It panics if the pointer is nil. +func appendInt64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +// consumeInt64Ptr wire decodes a *int64 pointer as a Int64. +func consumeInt64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderInt64Ptr = pointerCoderFuncs{ + size: sizeInt64Ptr, + marshal: appendInt64Ptr, + unmarshal: consumeInt64Ptr, + merge: mergeInt64Ptr, +} + +// sizeInt64Slice returns the size of wire encoding a []int64 pointer as a repeated Int64. +func sizeInt64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(uint64(v)) + } + return size +} + +// appendInt64Slice encodes a []int64 pointer as a repeated Int64. +func appendInt64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +// consumeInt64Slice wire decodes a []int64 pointer as a repeated Int64. +func consumeInt64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderInt64Slice = pointerCoderFuncs{ + size: sizeInt64Slice, + marshal: appendInt64Slice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Int64. +func sizeInt64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSlice encodes a []int64 pointer as a packed repeated Int64. +func appendInt64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(uint64(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, uint64(v)) + } + return b, nil +} + +var coderInt64PackedSlice = pointerCoderFuncs{ + size: sizeInt64PackedSlice, + marshal: appendInt64PackedSlice, + unmarshal: consumeInt64Slice, + merge: mergeInt64Slice, +} + +// sizeInt64Value returns the size of wire encoding a int64 value as a Int64. +func sizeInt64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(uint64(v.Int())) +} + +// appendInt64Value encodes a int64 value as a Int64. +func appendInt64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + return b, nil +} + +// consumeInt64Value decodes a int64 value as a Int64. +func consumeInt64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderInt64Value = valueCoderFuncs{ + size: sizeInt64Value, + marshal: appendInt64Value, + unmarshal: consumeInt64Value, + merge: mergeScalarValue, +} + +// sizeInt64SliceValue returns the size of wire encoding a []int64 value as a repeated Int64. +func sizeInt64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(uint64(v.Int())) + } + return size +} + +// appendInt64SliceValue encodes a []int64 value as a repeated Int64. +func appendInt64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +// consumeInt64SliceValue wire decodes a []int64 value as a repeated Int64. +func consumeInt64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderInt64SliceValue = valueCoderFuncs{ + size: sizeInt64SliceValue, + marshal: appendInt64SliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeInt64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Int64. +func sizeInt64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendInt64PackedSliceValue encodes a []int64 value as a packed repeated Int64. +func appendInt64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(uint64(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, uint64(v.Int())) + } + return b, nil +} + +var coderInt64PackedSliceValue = valueCoderFuncs{ + size: sizeInt64PackedSliceValue, + marshal: appendInt64PackedSliceValue, + unmarshal: consumeInt64SliceValue, + merge: mergeListValue, +} + +// sizeSint64 returns the size of wire encoding a int64 pointer as a Sint64. +func sizeSint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64 wire encodes a int64 pointer as a Sint64. +func appendSint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64 wire decodes a int64 pointer as a Sint64. +func consumeSint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64 = pointerCoderFuncs{ + size: sizeSint64, + marshal: appendSint64, + unmarshal: consumeSint64, + merge: mergeInt64, +} + +// sizeSint64NoZero returns the size of wire encoding a int64 pointer as a Sint64. +// The zero value is not encoded. +func sizeSint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64NoZero wire encodes a int64 pointer as a Sint64. +// The zero value is not encoded. +func appendSint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +var coderSint64NoZero = pointerCoderFuncs{ + size: sizeSint64NoZero, + marshal: appendSint64NoZero, + unmarshal: consumeSint64, + merge: mergeInt64NoZero, +} + +// sizeSint64Ptr returns the size of wire encoding a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func sizeSint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Int64Ptr() + return f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) +} + +// appendSint64Ptr wire encodes a *int64 pointer as a Sint64. +// It panics if the pointer is nil. +func appendSint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + return b, nil +} + +// consumeSint64Ptr wire decodes a *int64 pointer as a Sint64. +func consumeSint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = protowire.DecodeZigZag(v) + out.n = n + return out, nil +} + +var coderSint64Ptr = pointerCoderFuncs{ + size: sizeSint64Ptr, + marshal: appendSint64Ptr, + unmarshal: consumeSint64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSint64Slice returns the size of wire encoding a []int64 pointer as a repeated Sint64. +func sizeSint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return size +} + +// appendSint64Slice encodes a []int64 pointer as a repeated Sint64. +func appendSint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +// consumeSint64Slice wire decodes a []int64 pointer as a repeated Sint64. +func consumeSint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, protowire.DecodeZigZag(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, protowire.DecodeZigZag(v)) + out.n = n + return out, nil +} + +var coderSint64Slice = pointerCoderFuncs{ + size: sizeSint64Slice, + marshal: appendSint64Slice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sint64. +func sizeSint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSlice encodes a []int64 pointer as a packed repeated Sint64. +func appendSint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(protowire.EncodeZigZag(v)) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v)) + } + return b, nil +} + +var coderSint64PackedSlice = pointerCoderFuncs{ + size: sizeSint64PackedSlice, + marshal: appendSint64PackedSlice, + unmarshal: consumeSint64Slice, + merge: mergeInt64Slice, +} + +// sizeSint64Value returns the size of wire encoding a int64 value as a Sint64. +func sizeSint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) +} + +// appendSint64Value encodes a int64 value as a Sint64. +func appendSint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + return b, nil +} + +// consumeSint64Value decodes a int64 value as a Sint64. +func consumeSint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), out, nil +} + +var coderSint64Value = valueCoderFuncs{ + size: sizeSint64Value, + marshal: appendSint64Value, + unmarshal: consumeSint64Value, + merge: mergeScalarValue, +} + +// sizeSint64SliceValue returns the size of wire encoding a []int64 value as a repeated Sint64. +func sizeSint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return size +} + +// appendSint64SliceValue encodes a []int64 value as a repeated Sint64. +func appendSint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +// consumeSint64SliceValue wire decodes a []int64 value as a repeated Sint64. +func consumeSint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + out.n = n + return listv, out, nil +} + +var coderSint64SliceValue = valueCoderFuncs{ + size: sizeSint64SliceValue, + marshal: appendSint64SliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeSint64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sint64. +func sizeSint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendSint64PackedSliceValue encodes a []int64 value as a packed repeated Sint64. +func appendSint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + } + return b, nil +} + +var coderSint64PackedSliceValue = valueCoderFuncs{ + size: sizeSint64PackedSliceValue, + marshal: appendSint64PackedSliceValue, + unmarshal: consumeSint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64 returns the size of wire encoding a uint64 pointer as a Uint64. +func sizeUint64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64 wire encodes a uint64 pointer as a Uint64. +func appendUint64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64 wire decodes a uint64 pointer as a Uint64. +func consumeUint64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderUint64 = pointerCoderFuncs{ + size: sizeUint64, + marshal: appendUint64, + unmarshal: consumeUint64, + merge: mergeUint64, +} + +// sizeUint64NoZero returns the size of wire encoding a uint64 pointer as a Uint64. +// The zero value is not encoded. +func sizeUint64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64NoZero wire encodes a uint64 pointer as a Uint64. +// The zero value is not encoded. +func appendUint64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +var coderUint64NoZero = pointerCoderFuncs{ + size: sizeUint64NoZero, + marshal: appendUint64NoZero, + unmarshal: consumeUint64, + merge: mergeUint64NoZero, +} + +// sizeUint64Ptr returns the size of wire encoding a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func sizeUint64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.Uint64Ptr() + return f.tagsize + protowire.SizeVarint(v) +} + +// appendUint64Ptr wire encodes a *uint64 pointer as a Uint64. +// It panics if the pointer is nil. +func appendUint64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + return b, nil +} + +// consumeUint64Ptr wire decodes a *uint64 pointer as a Uint64. +func consumeUint64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderUint64Ptr = pointerCoderFuncs{ + size: sizeUint64Ptr, + marshal: appendUint64Ptr, + unmarshal: consumeUint64Ptr, + merge: mergeUint64Ptr, +} + +// sizeUint64Slice returns the size of wire encoding a []uint64 pointer as a repeated Uint64. +func sizeUint64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + for _, v := range s { + size += f.tagsize + protowire.SizeVarint(v) + } + return size +} + +// appendUint64Slice encodes a []uint64 pointer as a repeated Uint64. +func appendUint64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +// consumeUint64Slice wire decodes a []uint64 pointer as a repeated Uint64. +func consumeUint64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderUint64Slice = pointerCoderFuncs{ + size: sizeUint64Slice, + marshal: appendUint64Slice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Uint64. +func sizeUint64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + return f.tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSlice encodes a []uint64 pointer as a packed repeated Uint64. +func appendUint64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for _, v := range s { + n += protowire.SizeVarint(v) + } + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendVarint(b, v) + } + return b, nil +} + +var coderUint64PackedSlice = pointerCoderFuncs{ + size: sizeUint64PackedSlice, + marshal: appendUint64PackedSlice, + unmarshal: consumeUint64Slice, + merge: mergeUint64Slice, +} + +// sizeUint64Value returns the size of wire encoding a uint64 value as a Uint64. +func sizeUint64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeVarint(v.Uint()) +} + +// appendUint64Value encodes a uint64 value as a Uint64. +func appendUint64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + return b, nil +} + +// consumeUint64Value decodes a uint64 value as a Uint64. +func consumeUint64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderUint64Value = valueCoderFuncs{ + size: sizeUint64Value, + marshal: appendUint64Value, + unmarshal: consumeUint64Value, + merge: mergeScalarValue, +} + +// sizeUint64SliceValue returns the size of wire encoding a []uint64 value as a repeated Uint64. +func sizeUint64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeVarint(v.Uint()) + } + return size +} + +// appendUint64SliceValue encodes a []uint64 value as a repeated Uint64. +func appendUint64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +// consumeUint64SliceValue wire decodes a []uint64 value as a repeated Uint64. +func consumeUint64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.VarintType { + return protoreflect.Value{}, out, errUnknown + } + var v uint64 + var n int + if len(b) >= 1 && b[0] < 0x80 { + v = uint64(b[0]) + n = 1 + } else if len(b) >= 2 && b[1] < 128 { + v = uint64(b[0]&0x7f) + uint64(b[1])<<7 + n = 2 + } else { + v, n = protowire.ConsumeVarint(b) + } + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderUint64SliceValue = valueCoderFuncs{ + size: sizeUint64SliceValue, + marshal: appendUint64SliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeUint64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Uint64. +func sizeUint64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := 0 + for i, llen := 0, llen; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + return tagsize + protowire.SizeBytes(n) +} + +// appendUint64PackedSliceValue encodes a []uint64 value as a packed repeated Uint64. +func appendUint64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := 0 + for i := 0; i < llen; i++ { + v := list.Get(i) + n += protowire.SizeVarint(v.Uint()) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, v.Uint()) + } + return b, nil +} + +var coderUint64PackedSliceValue = valueCoderFuncs{ + size: sizeUint64PackedSliceValue, + marshal: appendUint64PackedSliceValue, + unmarshal: consumeUint64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32 returns the size of wire encoding a int32 pointer as a Sfixed32. +func sizeSfixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32 wire encodes a int32 pointer as a Sfixed32. +func appendSfixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32 wire decodes a int32 pointer as a Sfixed32. +func consumeSfixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int32() = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32 = pointerCoderFuncs{ + size: sizeSfixed32, + marshal: appendSfixed32, + unmarshal: consumeSfixed32, + merge: mergeInt32, +} + +// sizeSfixed32NoZero returns the size of wire encoding a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func sizeSfixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32NoZero wire encodes a int32 pointer as a Sfixed32. +// The zero value is not encoded. +func appendSfixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +var coderSfixed32NoZero = pointerCoderFuncs{ + size: sizeSfixed32NoZero, + marshal: appendSfixed32NoZero, + unmarshal: consumeSfixed32, + merge: mergeInt32NoZero, +} + +// sizeSfixed32Ptr returns the size of wire encoding a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func sizeSfixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Ptr wire encodes a *int32 pointer as a Sfixed32. +// It panics if the pointer is nil. +func appendSfixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + return b, nil +} + +// consumeSfixed32Ptr wire decodes a *int32 pointer as a Sfixed32. +func consumeSfixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int32Ptr() + if *vp == nil { + *vp = new(int32) + } + **vp = int32(v) + out.n = n + return out, nil +} + +var coderSfixed32Ptr = pointerCoderFuncs{ + size: sizeSfixed32Ptr, + marshal: appendSfixed32Ptr, + unmarshal: consumeSfixed32Ptr, + merge: mergeInt32Ptr, +} + +// sizeSfixed32Slice returns the size of wire encoding a []int32 pointer as a repeated Sfixed32. +func sizeSfixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32Slice encodes a []int32 pointer as a repeated Sfixed32. +func appendSfixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +// consumeSfixed32Slice wire decodes a []int32 pointer as a repeated Sfixed32. +func consumeSfixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int32(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int32(v)) + out.n = n + return out, nil +} + +var coderSfixed32Slice = pointerCoderFuncs{ + size: sizeSfixed32Slice, + marshal: appendSfixed32Slice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32PackedSlice returns the size of wire encoding a []int32 pointer as a packed repeated Sfixed32. +func sizeSfixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSlice encodes a []int32 pointer as a packed repeated Sfixed32. +func appendSfixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, uint32(v)) + } + return b, nil +} + +var coderSfixed32PackedSlice = pointerCoderFuncs{ + size: sizeSfixed32PackedSlice, + marshal: appendSfixed32PackedSlice, + unmarshal: consumeSfixed32Slice, + merge: mergeInt32Slice, +} + +// sizeSfixed32Value returns the size of wire encoding a int32 value as a Sfixed32. +func sizeSfixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendSfixed32Value encodes a int32 value as a Sfixed32. +func appendSfixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + return b, nil +} + +// consumeSfixed32Value decodes a int32 value as a Sfixed32. +func consumeSfixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt32(int32(v)), out, nil +} + +var coderSfixed32Value = valueCoderFuncs{ + size: sizeSfixed32Value, + marshal: appendSfixed32Value, + unmarshal: consumeSfixed32Value, + merge: mergeScalarValue, +} + +// sizeSfixed32SliceValue returns the size of wire encoding a []int32 value as a repeated Sfixed32. +func sizeSfixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendSfixed32SliceValue encodes a []int32 value as a repeated Sfixed32. +func appendSfixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +// consumeSfixed32SliceValue wire decodes a []int32 value as a repeated Sfixed32. +func consumeSfixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed32SliceValue = valueCoderFuncs{ + size: sizeSfixed32SliceValue, + marshal: appendSfixed32SliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeSfixed32PackedSliceValue returns the size of wire encoding a []int32 value as a packed repeated Sfixed32. +func sizeSfixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed32PackedSliceValue encodes a []int32 value as a packed repeated Sfixed32. +func appendSfixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Int())) + } + return b, nil +} + +var coderSfixed32PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed32PackedSliceValue, + marshal: appendSfixed32PackedSliceValue, + unmarshal: consumeSfixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32 returns the size of wire encoding a uint32 pointer as a Fixed32. +func sizeFixed32(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32 wire encodes a uint32 pointer as a Fixed32. +func appendFixed32(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32 wire decodes a uint32 pointer as a Fixed32. +func consumeFixed32(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint32() = v + out.n = n + return out, nil +} + +var coderFixed32 = pointerCoderFuncs{ + size: sizeFixed32, + marshal: appendFixed32, + unmarshal: consumeFixed32, + merge: mergeUint32, +} + +// sizeFixed32NoZero returns the size of wire encoding a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func sizeFixed32NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint32() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32NoZero wire encodes a uint32 pointer as a Fixed32. +// The zero value is not encoded. +func appendFixed32NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint32() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +var coderFixed32NoZero = pointerCoderFuncs{ + size: sizeFixed32NoZero, + marshal: appendFixed32NoZero, + unmarshal: consumeFixed32, + merge: mergeUint32NoZero, +} + +// sizeFixed32Ptr returns the size of wire encoding a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func sizeFixed32Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFixed32Ptr wire encodes a *uint32 pointer as a Fixed32. +// It panics if the pointer is nil. +func appendFixed32Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + return b, nil +} + +// consumeFixed32Ptr wire decodes a *uint32 pointer as a Fixed32. +func consumeFixed32Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint32Ptr() + if *vp == nil { + *vp = new(uint32) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed32Ptr = pointerCoderFuncs{ + size: sizeFixed32Ptr, + marshal: appendFixed32Ptr, + unmarshal: consumeFixed32Ptr, + merge: mergeUint32Ptr, +} + +// sizeFixed32Slice returns the size of wire encoding a []uint32 pointer as a repeated Fixed32. +func sizeFixed32Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32Slice encodes a []uint32 pointer as a repeated Fixed32. +func appendFixed32Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +// consumeFixed32Slice wire decodes a []uint32 pointer as a repeated Fixed32. +func consumeFixed32Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed32Slice = pointerCoderFuncs{ + size: sizeFixed32Slice, + marshal: appendFixed32Slice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32PackedSlice returns the size of wire encoding a []uint32 pointer as a packed repeated Fixed32. +func sizeFixed32PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSlice encodes a []uint32 pointer as a packed repeated Fixed32. +func appendFixed32PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, v) + } + return b, nil +} + +var coderFixed32PackedSlice = pointerCoderFuncs{ + size: sizeFixed32PackedSlice, + marshal: appendFixed32PackedSlice, + unmarshal: consumeFixed32Slice, + merge: mergeUint32Slice, +} + +// sizeFixed32Value returns the size of wire encoding a uint32 value as a Fixed32. +func sizeFixed32Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFixed32Value encodes a uint32 value as a Fixed32. +func appendFixed32Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + return b, nil +} + +// consumeFixed32Value decodes a uint32 value as a Fixed32. +func consumeFixed32Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint32(uint32(v)), out, nil +} + +var coderFixed32Value = valueCoderFuncs{ + size: sizeFixed32Value, + marshal: appendFixed32Value, + unmarshal: consumeFixed32Value, + merge: mergeScalarValue, +} + +// sizeFixed32SliceValue returns the size of wire encoding a []uint32 value as a repeated Fixed32. +func sizeFixed32SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFixed32SliceValue encodes a []uint32 value as a repeated Fixed32. +func appendFixed32SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +// consumeFixed32SliceValue wire decodes a []uint32 value as a repeated Fixed32. +func consumeFixed32SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + out.n = n + return listv, out, nil +} + +var coderFixed32SliceValue = valueCoderFuncs{ + size: sizeFixed32SliceValue, + marshal: appendFixed32SliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFixed32PackedSliceValue returns the size of wire encoding a []uint32 value as a packed repeated Fixed32. +func sizeFixed32PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed32PackedSliceValue encodes a []uint32 value as a packed repeated Fixed32. +func appendFixed32PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, uint32(v.Uint())) + } + return b, nil +} + +var coderFixed32PackedSliceValue = valueCoderFuncs{ + size: sizeFixed32PackedSliceValue, + marshal: appendFixed32PackedSliceValue, + unmarshal: consumeFixed32SliceValue, + merge: mergeListValue, +} + +// sizeFloat returns the size of wire encoding a float32 pointer as a Float. +func sizeFloat(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloat wire encodes a float32 pointer as a Float. +func appendFloat(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float32() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloat wire decodes a float32 pointer as a Float. +func consumeFloat(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Float32() = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloat = pointerCoderFuncs{ + size: sizeFloat, + marshal: appendFloat, + unmarshal: consumeFloat, + merge: mergeFloat32, +} + +// sizeFloatNoZero returns the size of wire encoding a float32 pointer as a Float. +// The zero value is not encoded. +func sizeFloatNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatNoZero wire encodes a float32 pointer as a Float. +// The zero value is not encoded. +func appendFloatNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float32() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +var coderFloatNoZero = pointerCoderFuncs{ + size: sizeFloatNoZero, + marshal: appendFloatNoZero, + unmarshal: consumeFloat, + merge: mergeFloat32NoZero, +} + +// sizeFloatPtr returns the size of wire encoding a *float32 pointer as a Float. +// It panics if the pointer is nil. +func sizeFloatPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed32() +} + +// appendFloatPtr wire encodes a *float32 pointer as a Float. +// It panics if the pointer is nil. +func appendFloatPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Float32Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + return b, nil +} + +// consumeFloatPtr wire decodes a *float32 pointer as a Float. +func consumeFloatPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Float32Ptr() + if *vp == nil { + *vp = new(float32) + } + **vp = math.Float32frombits(v) + out.n = n + return out, nil +} + +var coderFloatPtr = pointerCoderFuncs{ + size: sizeFloatPtr, + marshal: appendFloatPtr, + unmarshal: consumeFloatPtr, + merge: mergeFloat32Ptr, +} + +// sizeFloatSlice returns the size of wire encoding a []float32 pointer as a repeated Float. +func sizeFloatSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float32Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSlice encodes a []float32 pointer as a repeated Float. +func appendFloatSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +// consumeFloatSlice wire decodes a []float32 pointer as a repeated Float. +func consumeFloatSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float32Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, math.Float32frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed32Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, math.Float32frombits(v)) + out.n = n + return out, nil +} + +var coderFloatSlice = pointerCoderFuncs{ + size: sizeFloatSlice, + marshal: appendFloatSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatPackedSlice returns the size of wire encoding a []float32 pointer as a packed repeated Float. +func sizeFloatPackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float32Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed32() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSlice encodes a []float32 pointer as a packed repeated Float. +func appendFloatPackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float32Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed32(b, math.Float32bits(v)) + } + return b, nil +} + +var coderFloatPackedSlice = pointerCoderFuncs{ + size: sizeFloatPackedSlice, + marshal: appendFloatPackedSlice, + unmarshal: consumeFloatSlice, + merge: mergeFloat32Slice, +} + +// sizeFloatValue returns the size of wire encoding a float32 value as a Float. +func sizeFloatValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed32() +} + +// appendFloatValue encodes a float32 value as a Float. +func appendFloatValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + return b, nil +} + +// consumeFloatValue decodes a float32 value as a Float. +func consumeFloatValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), out, nil +} + +var coderFloatValue = valueCoderFuncs{ + size: sizeFloatValue, + marshal: appendFloatValue, + unmarshal: consumeFloatValue, + merge: mergeScalarValue, +} + +// sizeFloatSliceValue returns the size of wire encoding a []float32 value as a repeated Float. +func sizeFloatSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed32()) + return size +} + +// appendFloatSliceValue encodes a []float32 value as a repeated Float. +func appendFloatSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +// consumeFloatSliceValue wire decodes a []float32 value as a repeated Float. +func consumeFloatSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed32Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + out.n = n + return listv, out, nil +} + +var coderFloatSliceValue = valueCoderFuncs{ + size: sizeFloatSliceValue, + marshal: appendFloatSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeFloatPackedSliceValue returns the size of wire encoding a []float32 value as a packed repeated Float. +func sizeFloatPackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed32() + return tagsize + protowire.SizeBytes(n) +} + +// appendFloatPackedSliceValue encodes a []float32 value as a packed repeated Float. +func appendFloatPackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed32() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + } + return b, nil +} + +var coderFloatPackedSliceValue = valueCoderFuncs{ + size: sizeFloatPackedSliceValue, + marshal: appendFloatPackedSliceValue, + unmarshal: consumeFloatSliceValue, + merge: mergeListValue, +} + +// sizeSfixed64 returns the size of wire encoding a int64 pointer as a Sfixed64. +func sizeSfixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64 wire encodes a int64 pointer as a Sfixed64. +func appendSfixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64 wire decodes a int64 pointer as a Sfixed64. +func consumeSfixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Int64() = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64 = pointerCoderFuncs{ + size: sizeSfixed64, + marshal: appendSfixed64, + unmarshal: consumeSfixed64, + merge: mergeInt64, +} + +// sizeSfixed64NoZero returns the size of wire encoding a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func sizeSfixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Int64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64NoZero wire encodes a int64 pointer as a Sfixed64. +// The zero value is not encoded. +func appendSfixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Int64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +var coderSfixed64NoZero = pointerCoderFuncs{ + size: sizeSfixed64NoZero, + marshal: appendSfixed64NoZero, + unmarshal: consumeSfixed64, + merge: mergeInt64NoZero, +} + +// sizeSfixed64Ptr returns the size of wire encoding a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func sizeSfixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Ptr wire encodes a *int64 pointer as a Sfixed64. +// It panics if the pointer is nil. +func appendSfixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Int64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + return b, nil +} + +// consumeSfixed64Ptr wire decodes a *int64 pointer as a Sfixed64. +func consumeSfixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Int64Ptr() + if *vp == nil { + *vp = new(int64) + } + **vp = int64(v) + out.n = n + return out, nil +} + +var coderSfixed64Ptr = pointerCoderFuncs{ + size: sizeSfixed64Ptr, + marshal: appendSfixed64Ptr, + unmarshal: consumeSfixed64Ptr, + merge: mergeInt64Ptr, +} + +// sizeSfixed64Slice returns the size of wire encoding a []int64 pointer as a repeated Sfixed64. +func sizeSfixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64Slice encodes a []int64 pointer as a repeated Sfixed64. +func appendSfixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +// consumeSfixed64Slice wire decodes a []int64 pointer as a repeated Sfixed64. +func consumeSfixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Int64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, int64(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, int64(v)) + out.n = n + return out, nil +} + +var coderSfixed64Slice = pointerCoderFuncs{ + size: sizeSfixed64Slice, + marshal: appendSfixed64Slice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64PackedSlice returns the size of wire encoding a []int64 pointer as a packed repeated Sfixed64. +func sizeSfixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Int64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSlice encodes a []int64 pointer as a packed repeated Sfixed64. +func appendSfixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Int64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, uint64(v)) + } + return b, nil +} + +var coderSfixed64PackedSlice = pointerCoderFuncs{ + size: sizeSfixed64PackedSlice, + marshal: appendSfixed64PackedSlice, + unmarshal: consumeSfixed64Slice, + merge: mergeInt64Slice, +} + +// sizeSfixed64Value returns the size of wire encoding a int64 value as a Sfixed64. +func sizeSfixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendSfixed64Value encodes a int64 value as a Sfixed64. +func appendSfixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + return b, nil +} + +// consumeSfixed64Value decodes a int64 value as a Sfixed64. +func consumeSfixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfInt64(int64(v)), out, nil +} + +var coderSfixed64Value = valueCoderFuncs{ + size: sizeSfixed64Value, + marshal: appendSfixed64Value, + unmarshal: consumeSfixed64Value, + merge: mergeScalarValue, +} + +// sizeSfixed64SliceValue returns the size of wire encoding a []int64 value as a repeated Sfixed64. +func sizeSfixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendSfixed64SliceValue encodes a []int64 value as a repeated Sfixed64. +func appendSfixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +// consumeSfixed64SliceValue wire decodes a []int64 value as a repeated Sfixed64. +func consumeSfixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + out.n = n + return listv, out, nil +} + +var coderSfixed64SliceValue = valueCoderFuncs{ + size: sizeSfixed64SliceValue, + marshal: appendSfixed64SliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeSfixed64PackedSliceValue returns the size of wire encoding a []int64 value as a packed repeated Sfixed64. +func sizeSfixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendSfixed64PackedSliceValue encodes a []int64 value as a packed repeated Sfixed64. +func appendSfixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, uint64(v.Int())) + } + return b, nil +} + +var coderSfixed64PackedSliceValue = valueCoderFuncs{ + size: sizeSfixed64PackedSliceValue, + marshal: appendSfixed64PackedSliceValue, + unmarshal: consumeSfixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64 returns the size of wire encoding a uint64 pointer as a Fixed64. +func sizeFixed64(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64 wire encodes a uint64 pointer as a Fixed64. +func appendFixed64(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64 wire decodes a uint64 pointer as a Fixed64. +func consumeFixed64(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Uint64() = v + out.n = n + return out, nil +} + +var coderFixed64 = pointerCoderFuncs{ + size: sizeFixed64, + marshal: appendFixed64, + unmarshal: consumeFixed64, + merge: mergeUint64, +} + +// sizeFixed64NoZero returns the size of wire encoding a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func sizeFixed64NoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Uint64() + if v == 0 { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64NoZero wire encodes a uint64 pointer as a Fixed64. +// The zero value is not encoded. +func appendFixed64NoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Uint64() + if v == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +var coderFixed64NoZero = pointerCoderFuncs{ + size: sizeFixed64NoZero, + marshal: appendFixed64NoZero, + unmarshal: consumeFixed64, + merge: mergeUint64NoZero, +} + +// sizeFixed64Ptr returns the size of wire encoding a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func sizeFixed64Ptr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendFixed64Ptr wire encodes a *uint64 pointer as a Fixed64. +// It panics if the pointer is nil. +func appendFixed64Ptr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Uint64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + return b, nil +} + +// consumeFixed64Ptr wire decodes a *uint64 pointer as a Fixed64. +func consumeFixed64Ptr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Uint64Ptr() + if *vp == nil { + *vp = new(uint64) + } + **vp = v + out.n = n + return out, nil +} + +var coderFixed64Ptr = pointerCoderFuncs{ + size: sizeFixed64Ptr, + marshal: appendFixed64Ptr, + unmarshal: consumeFixed64Ptr, + merge: mergeUint64Ptr, +} + +// sizeFixed64Slice returns the size of wire encoding a []uint64 pointer as a repeated Fixed64. +func sizeFixed64Slice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64Slice encodes a []uint64 pointer as a repeated Fixed64. +func appendFixed64Slice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +// consumeFixed64Slice wire decodes a []uint64 pointer as a repeated Fixed64. +func consumeFixed64Slice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Uint64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, v) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderFixed64Slice = pointerCoderFuncs{ + size: sizeFixed64Slice, + marshal: appendFixed64Slice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64PackedSlice returns the size of wire encoding a []uint64 pointer as a packed repeated Fixed64. +func sizeFixed64PackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Uint64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSlice encodes a []uint64 pointer as a packed repeated Fixed64. +func appendFixed64PackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Uint64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, v) + } + return b, nil +} + +var coderFixed64PackedSlice = pointerCoderFuncs{ + size: sizeFixed64PackedSlice, + marshal: appendFixed64PackedSlice, + unmarshal: consumeFixed64Slice, + merge: mergeUint64Slice, +} + +// sizeFixed64Value returns the size of wire encoding a uint64 value as a Fixed64. +func sizeFixed64Value(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendFixed64Value encodes a uint64 value as a Fixed64. +func appendFixed64Value(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + return b, nil +} + +// consumeFixed64Value decodes a uint64 value as a Fixed64. +func consumeFixed64Value(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfUint64(v), out, nil +} + +var coderFixed64Value = valueCoderFuncs{ + size: sizeFixed64Value, + marshal: appendFixed64Value, + unmarshal: consumeFixed64Value, + merge: mergeScalarValue, +} + +// sizeFixed64SliceValue returns the size of wire encoding a []uint64 value as a repeated Fixed64. +func sizeFixed64SliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendFixed64SliceValue encodes a []uint64 value as a repeated Fixed64. +func appendFixed64SliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +// consumeFixed64SliceValue wire decodes a []uint64 value as a repeated Fixed64. +func consumeFixed64SliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + out.n = n + return listv, out, nil +} + +var coderFixed64SliceValue = valueCoderFuncs{ + size: sizeFixed64SliceValue, + marshal: appendFixed64SliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeFixed64PackedSliceValue returns the size of wire encoding a []uint64 value as a packed repeated Fixed64. +func sizeFixed64PackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendFixed64PackedSliceValue encodes a []uint64 value as a packed repeated Fixed64. +func appendFixed64PackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, v.Uint()) + } + return b, nil +} + +var coderFixed64PackedSliceValue = valueCoderFuncs{ + size: sizeFixed64PackedSliceValue, + marshal: appendFixed64PackedSliceValue, + unmarshal: consumeFixed64SliceValue, + merge: mergeListValue, +} + +// sizeDouble returns the size of wire encoding a float64 pointer as a Double. +func sizeDouble(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + + return f.tagsize + protowire.SizeFixed64() +} + +// appendDouble wire encodes a float64 pointer as a Double. +func appendDouble(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float64() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDouble wire decodes a float64 pointer as a Double. +func consumeDouble(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Float64() = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDouble = pointerCoderFuncs{ + size: sizeDouble, + marshal: appendDouble, + unmarshal: consumeDouble, + merge: mergeFloat64, +} + +// sizeDoubleNoZero returns the size of wire encoding a float64 pointer as a Double. +// The zero value is not encoded. +func sizeDoubleNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return 0 + } + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoubleNoZero wire encodes a float64 pointer as a Double. +// The zero value is not encoded. +func appendDoubleNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Float64() + if v == 0 && !math.Signbit(float64(v)) { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +var coderDoubleNoZero = pointerCoderFuncs{ + size: sizeDoubleNoZero, + marshal: appendDoubleNoZero, + unmarshal: consumeDouble, + merge: mergeFloat64NoZero, +} + +// sizeDoublePtr returns the size of wire encoding a *float64 pointer as a Double. +// It panics if the pointer is nil. +func sizeDoublePtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + return f.tagsize + protowire.SizeFixed64() +} + +// appendDoublePtr wire encodes a *float64 pointer as a Double. +// It panics if the pointer is nil. +func appendDoublePtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.Float64Ptr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + return b, nil +} + +// consumeDoublePtr wire decodes a *float64 pointer as a Double. +func consumeDoublePtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.Float64Ptr() + if *vp == nil { + *vp = new(float64) + } + **vp = math.Float64frombits(v) + out.n = n + return out, nil +} + +var coderDoublePtr = pointerCoderFuncs{ + size: sizeDoublePtr, + marshal: appendDoublePtr, + unmarshal: consumeDoublePtr, + merge: mergeFloat64Ptr, +} + +// sizeDoubleSlice returns the size of wire encoding a []float64 pointer as a repeated Double. +func sizeDoubleSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float64Slice() + size = len(s) * (f.tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSlice encodes a []float64 pointer as a repeated Double. +func appendDoubleSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +// consumeDoubleSlice wire decodes a []float64 pointer as a repeated Double. +func consumeDoubleSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.Float64Slice() + if wtyp == protowire.BytesType { + s := *sp + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + s = append(s, math.Float64frombits(v)) + b = b[n:] + } + *sp = s + out.n = n + return out, nil + } + if wtyp != protowire.Fixed64Type { + return out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, math.Float64frombits(v)) + out.n = n + return out, nil +} + +var coderDoubleSlice = pointerCoderFuncs{ + size: sizeDoubleSlice, + marshal: appendDoubleSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoublePackedSlice returns the size of wire encoding a []float64 pointer as a packed repeated Double. +func sizeDoublePackedSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.Float64Slice() + if len(s) == 0 { + return 0 + } + n := len(s) * protowire.SizeFixed64() + return f.tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSlice encodes a []float64 pointer as a packed repeated Double. +func appendDoublePackedSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.Float64Slice() + if len(s) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := len(s) * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for _, v := range s { + b = protowire.AppendFixed64(b, math.Float64bits(v)) + } + return b, nil +} + +var coderDoublePackedSlice = pointerCoderFuncs{ + size: sizeDoublePackedSlice, + marshal: appendDoublePackedSlice, + unmarshal: consumeDoubleSlice, + merge: mergeFloat64Slice, +} + +// sizeDoubleValue returns the size of wire encoding a float64 value as a Double. +func sizeDoubleValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeFixed64() +} + +// appendDoubleValue encodes a float64 value as a Double. +func appendDoubleValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + return b, nil +} + +// consumeDoubleValue decodes a float64 value as a Double. +func consumeDoubleValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), out, nil +} + +var coderDoubleValue = valueCoderFuncs{ + size: sizeDoubleValue, + marshal: appendDoubleValue, + unmarshal: consumeDoubleValue, + merge: mergeScalarValue, +} + +// sizeDoubleSliceValue returns the size of wire encoding a []float64 value as a repeated Double. +func sizeDoubleSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + size = list.Len() * (tagsize + protowire.SizeFixed64()) + return size +} + +// appendDoubleSliceValue encodes a []float64 value as a repeated Double. +func appendDoubleSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +// consumeDoubleSliceValue wire decodes a []float64 value as a repeated Double. +func consumeDoubleSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + b = b[n:] + } + out.n = n + return listv, out, nil + } + if wtyp != protowire.Fixed64Type { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + out.n = n + return listv, out, nil +} + +var coderDoubleSliceValue = valueCoderFuncs{ + size: sizeDoubleSliceValue, + marshal: appendDoubleSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeDoublePackedSliceValue returns the size of wire encoding a []float64 value as a packed repeated Double. +func sizeDoublePackedSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return 0 + } + n := llen * protowire.SizeFixed64() + return tagsize + protowire.SizeBytes(n) +} + +// appendDoublePackedSliceValue encodes a []float64 value as a packed repeated Double. +func appendDoublePackedSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + llen := list.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, wiretag) + n := llen * protowire.SizeFixed64() + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + v := list.Get(i) + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + } + return b, nil +} + +var coderDoublePackedSliceValue = valueCoderFuncs{ + size: sizeDoublePackedSliceValue, + marshal: appendDoublePackedSliceValue, + unmarshal: consumeDoubleSliceValue, + merge: mergeListValue, +} + +// sizeString returns the size of wire encoding a string pointer as a String. +func sizeString(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.String() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendString wire encodes a string pointer as a String. +func appendString(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeString wire decodes a string pointer as a String. +func consumeString(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.String() = v + out.n = n + return out, nil +} + +var coderString = pointerCoderFuncs{ + size: sizeString, + marshal: appendString, + unmarshal: consumeString, + merge: mergeString, +} + +// appendStringValidateUTF8 wire encodes a string pointer as a String. +func appendStringValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValidateUTF8 wire decodes a string pointer as a String. +func consumeStringValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + *p.String() = v + out.n = n + return out, nil +} + +var coderStringValidateUTF8 = pointerCoderFuncs{ + size: sizeString, + marshal: appendStringValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeString, +} + +// sizeStringNoZero returns the size of wire encoding a string pointer as a String. +// The zero value is not encoded. +func sizeStringNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.String() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringNoZero wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +var coderStringNoZero = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZero, + unmarshal: consumeString, + merge: mergeStringNoZero, +} + +// appendStringNoZeroValidateUTF8 wire encodes a string pointer as a String. +// The zero value is not encoded. +func appendStringNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.String() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +var coderStringNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeStringNoZero, + marshal: appendStringNoZeroValidateUTF8, + unmarshal: consumeStringValidateUTF8, + merge: mergeStringNoZero, +} + +// sizeStringPtr returns the size of wire encoding a *string pointer as a String. +// It panics if the pointer is nil. +func sizeStringPtr(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := **p.StringPtr() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendStringPtr wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtr(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + return b, nil +} + +// consumeStringPtr wire decodes a *string pointer as a String. +func consumeStringPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtr = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtr, + unmarshal: consumeStringPtr, + merge: mergeStringPtr, +} + +// appendStringPtrValidateUTF8 wire encodes a *string pointer as a String. +// It panics if the pointer is nil. +func appendStringPtrValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := **p.StringPtr() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringPtrValidateUTF8 wire decodes a *string pointer as a String. +func consumeStringPtrValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + vp := p.StringPtr() + if *vp == nil { + *vp = new(string) + } + **vp = v + out.n = n + return out, nil +} + +var coderStringPtrValidateUTF8 = pointerCoderFuncs{ + size: sizeStringPtr, + marshal: appendStringPtrValidateUTF8, + unmarshal: consumeStringPtrValidateUTF8, + merge: mergeStringPtr, +} + +// sizeStringSlice returns the size of wire encoding a []string pointer as a repeated String. +func sizeStringSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.StringSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendStringSlice encodes a []string pointer as a repeated String. +func appendStringSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + } + return b, nil +} + +// consumeStringSlice wire decodes a []string pointer as a repeated String. +func consumeStringSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderStringSlice = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSlice, + unmarshal: consumeStringSlice, + merge: mergeStringSlice, +} + +// appendStringSliceValidateUTF8 encodes a []string pointer as a repeated String. +func appendStringSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.StringSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendString(b, v) + if !utf8.ValidString(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeStringSliceValidateUTF8 wire decodes a []string pointer as a repeated String. +func consumeStringSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.StringSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return out, errInvalidUTF8{} + } + *sp = append(*sp, v) + out.n = n + return out, nil +} + +var coderStringSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeStringSlice, + marshal: appendStringSliceValidateUTF8, + unmarshal: consumeStringSliceValidateUTF8, + merge: mergeStringSlice, +} + +// sizeStringValue returns the size of wire encoding a string value as a String. +func sizeStringValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.String())) +} + +// appendStringValue encodes a string value as a String. +func appendStringValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + return b, nil +} + +// consumeStringValue decodes a string value as a String. +func consumeStringValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValue = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValue, + unmarshal: consumeStringValue, + merge: mergeScalarValue, +} + +// appendStringValueValidateUTF8 encodes a string value as a String. +func appendStringValueValidateUTF8(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + if !utf8.ValidString(v.String()) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeStringValueValidateUTF8 decodes a string value as a String. +func consumeStringValueValidateUTF8(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + if !utf8.ValidString(v) { + return protoreflect.Value{}, out, errInvalidUTF8{} + } + out.n = n + return protoreflect.ValueOfString(string(v)), out, nil +} + +var coderStringValueValidateUTF8 = valueCoderFuncs{ + size: sizeStringValue, + marshal: appendStringValueValidateUTF8, + unmarshal: consumeStringValueValidateUTF8, + merge: mergeScalarValue, +} + +// sizeStringSliceValue returns the size of wire encoding a []string value as a repeated String. +func sizeStringSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.String())) + } + return size +} + +// appendStringSliceValue encodes a []string value as a repeated String. +func appendStringSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendString(b, v.String()) + } + return b, nil +} + +// consumeStringSliceValue wire decodes a []string value as a repeated String. +func consumeStringSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeString(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfString(string(v))) + out.n = n + return listv, out, nil +} + +var coderStringSliceValue = valueCoderFuncs{ + size: sizeStringSliceValue, + marshal: appendStringSliceValue, + unmarshal: consumeStringSliceValue, + merge: mergeListValue, +} + +// sizeBytes returns the size of wire encoding a []byte pointer as a Bytes. +func sizeBytes(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bytes() + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytes wire encodes a []byte pointer as a Bytes. +func appendBytes(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytes wire decodes a []byte pointer as a Bytes. +func consumeBytes(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytes = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytes, + unmarshal: consumeBytes, + merge: mergeBytes, +} + +// appendBytesValidateUTF8 wire encodes a []byte pointer as a Bytes. +func appendBytesValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(emptyBuf[:], v...) + out.n = n + return out, nil +} + +var coderBytesValidateUTF8 = pointerCoderFuncs{ + size: sizeBytes, + marshal: appendBytesValidateUTF8, + unmarshal: consumeBytesValidateUTF8, + merge: mergeBytes, +} + +// sizeBytesNoZero returns the size of wire encoding a []byte pointer as a Bytes. +// The zero value is not encoded. +func sizeBytesNoZero(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := *p.Bytes() + if len(v) == 0 { + return 0 + } + return f.tagsize + protowire.SizeBytes(len(v)) +} + +// appendBytesNoZero wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZero(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + return b, nil +} + +// consumeBytesNoZero wire decodes a []byte pointer as a Bytes. +// The zero value is not decoded. +func consumeBytesNoZero(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZero = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZero, + unmarshal: consumeBytesNoZero, + merge: mergeBytesNoZero, +} + +// appendBytesNoZeroValidateUTF8 wire encodes a []byte pointer as a Bytes. +// The zero value is not encoded. +func appendBytesNoZeroValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + v := *p.Bytes() + if len(v) == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + return b, nil +} + +// consumeBytesNoZeroValidateUTF8 wire decodes a []byte pointer as a Bytes. +func consumeBytesNoZeroValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *p.Bytes() = append(([]byte)(nil), v...) + out.n = n + return out, nil +} + +var coderBytesNoZeroValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesNoZero, + marshal: appendBytesNoZeroValidateUTF8, + unmarshal: consumeBytesNoZeroValidateUTF8, + merge: mergeBytesNoZero, +} + +// sizeBytesSlice returns the size of wire encoding a [][]byte pointer as a repeated Bytes. +func sizeBytesSlice(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + s := *p.BytesSlice() + for _, v := range s { + size += f.tagsize + protowire.SizeBytes(len(v)) + } + return size +} + +// appendBytesSlice encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSlice(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + } + return b, nil +} + +// consumeBytesSlice wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSlice = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSlice, + unmarshal: consumeBytesSlice, + merge: mergeBytesSlice, +} + +// appendBytesSliceValidateUTF8 encodes a [][]byte pointer as a repeated Bytes. +func appendBytesSliceValidateUTF8(b []byte, p pointer, f *coderFieldInfo, _ marshalOptions) ([]byte, error) { + s := *p.BytesSlice() + for _, v := range s { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendBytes(b, v) + if !utf8.Valid(v) { + return b, errInvalidUTF8{} + } + } + return b, nil +} + +// consumeBytesSliceValidateUTF8 wire decodes a [][]byte pointer as a repeated Bytes. +func consumeBytesSliceValidateUTF8(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + sp := p.BytesSlice() + if wtyp != protowire.BytesType { + return out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !utf8.Valid(v) { + return out, errInvalidUTF8{} + } + *sp = append(*sp, append(emptyBuf[:], v...)) + out.n = n + return out, nil +} + +var coderBytesSliceValidateUTF8 = pointerCoderFuncs{ + size: sizeBytesSlice, + marshal: appendBytesSliceValidateUTF8, + unmarshal: consumeBytesSliceValidateUTF8, + merge: mergeBytesSlice, +} + +// sizeBytesValue returns the size of wire encoding a []byte value as a Bytes. +func sizeBytesValue(v protoreflect.Value, tagsize int, _ marshalOptions) int { + return tagsize + protowire.SizeBytes(len(v.Bytes())) +} + +// appendBytesValue encodes a []byte value as a Bytes. +func appendBytesValue(b []byte, v protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + return b, nil +} + +// consumeBytesValue decodes a []byte value as a Bytes. +func consumeBytesValue(b []byte, _ protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + out.n = n + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), out, nil +} + +var coderBytesValue = valueCoderFuncs{ + size: sizeBytesValue, + marshal: appendBytesValue, + unmarshal: consumeBytesValue, + merge: mergeBytesValue, +} + +// sizeBytesSliceValue returns the size of wire encoding a [][]byte value as a repeated Bytes. +func sizeBytesSliceValue(listv protoreflect.Value, tagsize int, _ marshalOptions) (size int) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + size += tagsize + protowire.SizeBytes(len(v.Bytes())) + } + return size +} + +// appendBytesSliceValue encodes a [][]byte value as a repeated Bytes. +func appendBytesSliceValue(b []byte, listv protoreflect.Value, wiretag uint64, _ marshalOptions) ([]byte, error) { + list := listv.List() + for i, llen := 0, list.Len(); i < llen; i++ { + v := list.Get(i) + b = protowire.AppendVarint(b, wiretag) + b = protowire.AppendBytes(b, v.Bytes()) + } + return b, nil +} + +// consumeBytesSliceValue wire decodes a [][]byte value as a repeated Bytes. +func consumeBytesSliceValue(b []byte, listv protoreflect.Value, _ protowire.Number, wtyp protowire.Type, _ unmarshalOptions) (_ protoreflect.Value, out unmarshalOutput, err error) { + list := listv.List() + if wtyp != protowire.BytesType { + return protoreflect.Value{}, out, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return protoreflect.Value{}, out, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + out.n = n + return listv, out, nil +} + +var coderBytesSliceValue = valueCoderFuncs{ + size: sizeBytesSliceValue, + marshal: appendBytesSliceValue, + unmarshal: consumeBytesSliceValue, + merge: mergeBytesListValue, +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go new file mode 100644 index 0000000..35a67c2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -0,0 +1,388 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "errors" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapInfo struct { + goType reflect.Type + keyWiretag uint64 + valWiretag uint64 + keyFuncs valueCoderFuncs + valFuncs valueCoderFuncs + keyZero pref.Value + keyKind pref.Kind + conv *mapConverter +} + +func encoderFuncsForMap(fd pref.FieldDescriptor, ft reflect.Type) (valueMessage *MessageInfo, funcs pointerCoderFuncs) { + // TODO: Consider generating specialized map coders. + keyField := fd.MapKey() + valField := fd.MapValue() + keyWiretag := protowire.EncodeTag(1, wireTypes[keyField.Kind()]) + valWiretag := protowire.EncodeTag(2, wireTypes[valField.Kind()]) + keyFuncs := encoderFuncsForValue(keyField) + valFuncs := encoderFuncsForValue(valField) + conv := newMapConverter(ft, fd) + + mapi := &mapInfo{ + goType: ft, + keyWiretag: keyWiretag, + valWiretag: valWiretag, + keyFuncs: keyFuncs, + valFuncs: valFuncs, + keyZero: keyField.Default(), + keyKind: keyField.Kind(), + conv: conv, + } + if valField.Kind() == pref.MessageKind { + valueMessage = getMessageInfo(ft.Elem()) + } + + funcs = pointerCoderFuncs{ + size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { + return sizeMap(p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendMap(b, p.AsValueOf(ft).Elem(), mapi, f, opts) + }, + unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { + mp := p.AsValueOf(ft) + if mp.Elem().IsNil() { + mp.Elem().Set(reflect.MakeMap(mapi.goType)) + } + if f.mi == nil { + return consumeMap(b, mp.Elem(), wtyp, mapi, f, opts) + } else { + return consumeMapOfMessage(b, mp.Elem(), wtyp, mapi, f, opts) + } + }, + } + switch valField.Kind() { + case pref.MessageKind: + funcs.merge = mergeMapOfMessage + case pref.BytesKind: + funcs.merge = mergeMapOfBytes + default: + funcs.merge = mergeMap + } + if valFuncs.isInit != nil { + funcs.isInit = func(p pointer, f *coderFieldInfo) error { + return isInitMap(p.AsValueOf(ft).Elem(), mapi, f) + } + } + return valueMessage, funcs +} + +const ( + mapKeyTagSize = 1 // field 1, tag size 1. + mapValTagSize = 1 // field 2, tag size 2. +) + +func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) int { + if mapv.Len() == 0 { + return 0 + } + n := 0 + iter := mapRange(mapv) + for iter.Next() { + key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() + keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + var valSize int + value := mapi.conv.valConv.PBValueOf(iter.Value()) + if f.mi == nil { + valSize = mapi.valFuncs.size(value, mapValTagSize, opts) + } else { + p := pointerOfValue(iter.Value()) + valSize += mapValTagSize + valSize += protowire.SizeBytes(f.mi.sizePointer(p, opts)) + } + n += f.tagsize + protowire.SizeBytes(keySize+valSize) + } + return n +} + +func consumeMap(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var ( + key = mapi.keyZero + val = mapi.conv.valConv.New() + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return out, errors.New("invalid field number") + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.valFuncs.unmarshal(b, val, num, wtyp, opts) + if err != nil { + break + } + val = v + n = o.n + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), mapi.conv.valConv.GoValueOf(val)) + out.n = n + return out, nil +} + +func consumeMapOfMessage(b []byte, mapv reflect.Value, wtyp protowire.Type, mapi *mapInfo, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.BytesType { + return out, errUnknown + } + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var ( + key = mapi.keyZero + val = reflect.New(f.mi.GoReflectType.Elem()) + ) + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return out, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return out, errors.New("invalid field number") + } + b = b[n:] + err := errUnknown + switch num { + case 1: + var v pref.Value + var o unmarshalOutput + v, o, err = mapi.keyFuncs.unmarshal(b, key, num, wtyp, opts) + if err != nil { + break + } + key = v + n = o.n + case 2: + if wtyp != protowire.BytesType { + break + } + var v []byte + v, n = protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + var o unmarshalOutput + o, err = f.mi.unmarshalPointer(v, pointerOfValue(val), 0, opts) + if o.initialized { + // Consider this map item initialized so long as we see + // an initialized value. + out.initialized = true + } + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + } else if err != nil { + return out, err + } + b = b[n:] + } + mapv.SetMapIndex(mapi.conv.keyConv.GoValueOf(key), val) + out.n = n + return out, nil +} + +func appendMapItem(b []byte, keyrv, valrv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if f.mi == nil { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := mapi.conv.valConv.PBValueOf(valrv) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapi.valFuncs.size(val, mapValTagSize, opts) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + return mapi.valFuncs.marshal(b, val, mapi.valWiretag, opts) + } else { + key := mapi.conv.keyConv.PBValueOf(keyrv).MapKey() + val := pointerOfValue(valrv) + valSize := f.mi.sizePointer(val, opts) + size := 0 + size += mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) + size += mapValTagSize + protowire.SizeBytes(valSize) + b = protowire.AppendVarint(b, uint64(size)) + b, err := mapi.keyFuncs.marshal(b, key.Value(), mapi.keyWiretag, opts) + if err != nil { + return nil, err + } + b = protowire.AppendVarint(b, mapi.valWiretag) + b = protowire.AppendVarint(b, uint64(valSize)) + return f.mi.marshalAppendPointer(b, val, opts) + } +} + +func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if mapv.Len() == 0 { + return b, nil + } + if opts.Deterministic() { + return appendMapDeterministic(b, mapv, mapi, f, opts) + } + iter := mapRange(mapv) + for iter.Next() { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, iter.Key(), iter.Value(), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func appendMapDeterministic(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + keys := mapv.MapKeys() + sort.Slice(keys, func(i, j int) bool { + switch keys[i].Kind() { + case reflect.Bool: + return !keys[i].Bool() && keys[j].Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return keys[i].Int() < keys[j].Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return keys[i].Uint() < keys[j].Uint() + case reflect.Float32, reflect.Float64: + return keys[i].Float() < keys[j].Float() + case reflect.String: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keys[i].Kind().String()) + } + }) + for _, key := range keys { + var err error + b = protowire.AppendVarint(b, f.wiretag) + b, err = appendMapItem(b, key, mapv.MapIndex(key), mapi, f, opts) + if err != nil { + return b, err + } + } + return b, nil +} + +func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { + if mi := f.mi; mi != nil { + mi.init() + if !mi.needsInitCheck { + return nil + } + iter := mapRange(mapv) + for iter.Next() { + val := pointerOfValue(iter.Value()) + if err := mi.checkInitializedPointer(val); err != nil { + return err + } + } + } else { + iter := mapRange(mapv) + for iter.Next() { + val := mapi.conv.valConv.PBValueOf(iter.Value()) + if err := mapi.valFuncs.isInit(val); err != nil { + return err + } + } + } + return nil +} + +func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), iter.Value()) + } +} + +func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) + } +} + +func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + dstm := dst.AsValueOf(f.ft).Elem() + srcm := src.AsValueOf(f.ft).Elem() + if srcm.Len() == 0 { + return + } + if dstm.IsNil() { + dstm.Set(reflect.MakeMap(f.ft)) + } + iter := mapRange(srcm) + for iter.Next() { + val := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(val), pointerOfValue(iter.Value()), opts) + } else { + opts.Merge(asMessage(val), asMessage(iter.Value())) + } + dstm.SetMapIndex(iter.Key(), val) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go new file mode 100644 index 0000000..2706bb6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go @@ -0,0 +1,37 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.12 + +package impl + +import "reflect" + +type mapIter struct { + v reflect.Value + keys []reflect.Value +} + +// mapRange provides a less-efficient equivalent to +// the Go 1.12 reflect.Value.MapRange method. +func mapRange(v reflect.Value) *mapIter { + return &mapIter{v: v} +} + +func (i *mapIter) Next() bool { + if i.keys == nil { + i.keys = i.v.MapKeys() + } else { + i.keys = i.keys[1:] + } + return len(i.keys) > 0 +} + +func (i *mapIter) Key() reflect.Value { + return i.keys[0] +} + +func (i *mapIter) Value() reflect.Value { + return i.v.MapIndex(i.keys[0]) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go new file mode 100644 index 0000000..1533ef6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.12 + +package impl + +import "reflect" + +func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go new file mode 100644 index 0000000..0e176d5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -0,0 +1,159 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/fieldsort" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// coderMessageInfo contains per-message information used by the fast-path functions. +// This is a different type from MessageInfo to keep MessageInfo as general-purpose as +// possible. +type coderMessageInfo struct { + methods piface.Methods + + orderedCoderFields []*coderFieldInfo + denseCoderFields []*coderFieldInfo + coderFields map[protowire.Number]*coderFieldInfo + sizecacheOffset offset + unknownOffset offset + extensionOffset offset + needsInitCheck bool + isMessageSet bool + numRequiredFields uint8 +} + +type coderFieldInfo struct { + funcs pointerCoderFuncs // fast-path per-field functions + mi *MessageInfo // field's message + ft reflect.Type + validation validationInfo // information used by message validation + num pref.FieldNumber // field number + offset offset // struct field offset + wiretag uint64 // field tag (number + wire type) + tagsize int // size of the varint-encoded tag + isPointer bool // true if IsNil may be called on the struct field + isRequired bool // true if field is required +} + +func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { + mi.sizecacheOffset = si.sizecacheOffset + mi.unknownOffset = si.unknownOffset + mi.extensionOffset = si.extensionOffset + + mi.coderFields = make(map[protowire.Number]*coderFieldInfo) + fields := mi.Desc.Fields() + preallocFields := make([]coderFieldInfo, fields.Len()) + for i := 0; i < fields.Len(); i++ { + fd := fields.Get(i) + + fs := si.fieldsByNumber[fd.Number()] + isOneof := fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic() + if isOneof { + fs = si.oneofsByName[fd.ContainingOneof().Name()] + } + ft := fs.Type + var wiretag uint64 + if !fd.IsPacked() { + wiretag = protowire.EncodeTag(fd.Number(), wireTypes[fd.Kind()]) + } else { + wiretag = protowire.EncodeTag(fd.Number(), protowire.BytesType) + } + var fieldOffset offset + var funcs pointerCoderFuncs + var childMessage *MessageInfo + switch { + case isOneof: + fieldOffset = offsetOf(fs, mi.Exporter) + case fd.IsWeak(): + fieldOffset = si.weakOffset + funcs = makeWeakMessageFieldCoder(fd) + default: + fieldOffset = offsetOf(fs, mi.Exporter) + childMessage, funcs = fieldCoder(fd, ft) + } + cf := &preallocFields[i] + *cf = coderFieldInfo{ + num: fd.Number(), + offset: fieldOffset, + wiretag: wiretag, + ft: ft, + tagsize: protowire.SizeVarint(wiretag), + funcs: funcs, + mi: childMessage, + validation: newFieldValidationInfo(mi, si, fd, ft), + isPointer: fd.Cardinality() == pref.Repeated || fd.HasPresence(), + isRequired: fd.Cardinality() == pref.Required, + } + mi.orderedCoderFields = append(mi.orderedCoderFields, cf) + mi.coderFields[cf.num] = cf + } + for i, oneofs := 0, mi.Desc.Oneofs(); i < oneofs.Len(); i++ { + if od := oneofs.Get(i); !od.IsSynthetic() { + mi.initOneofFieldCoders(od, si) + } + } + if messageset.IsMessageSet(mi.Desc) { + if !mi.extensionOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no extensions field", mi.Desc.FullName())) + } + if !mi.unknownOffset.IsValid() { + panic(fmt.Sprintf("%v: MessageSet with no unknown field", mi.Desc.FullName())) + } + mi.isMessageSet = true + } + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + return mi.orderedCoderFields[i].num < mi.orderedCoderFields[j].num + }) + + var maxDense pref.FieldNumber + for _, cf := range mi.orderedCoderFields { + if cf.num >= 16 && cf.num >= 2*maxDense { + break + } + maxDense = cf.num + } + mi.denseCoderFields = make([]*coderFieldInfo, maxDense+1) + for _, cf := range mi.orderedCoderFields { + if int(cf.num) >= len(mi.denseCoderFields) { + break + } + mi.denseCoderFields[cf.num] = cf + } + + // To preserve compatibility with historic wire output, marshal oneofs last. + if mi.Desc.Oneofs().Len() > 0 { + sort.Slice(mi.orderedCoderFields, func(i, j int) bool { + fi := fields.ByNumber(mi.orderedCoderFields[i].num) + fj := fields.ByNumber(mi.orderedCoderFields[j].num) + return fieldsort.Less(fi, fj) + }) + } + + mi.needsInitCheck = needsInitCheck(mi.Desc) + if mi.methods.Marshal == nil && mi.methods.Size == nil { + mi.methods.Flags |= piface.SupportMarshalDeterministic + mi.methods.Marshal = mi.marshal + mi.methods.Size = mi.size + } + if mi.methods.Unmarshal == nil { + mi.methods.Flags |= piface.SupportUnmarshalDiscardUnknown + mi.methods.Unmarshal = mi.unmarshal + } + if mi.methods.CheckInitialized == nil { + mi.methods.CheckInitialized = mi.checkInitialized + } + if mi.methods.Merge == nil { + mi.methods.Merge = mi.merge + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go new file mode 100644 index 0000000..cfb68e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_messageset.go @@ -0,0 +1,120 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" +) + +func sizeMessageSet(mi *MessageInfo, p pointer, opts marshalOptions) (size int) { + if !flags.ProtoLegacy { + return 0 + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + for _, x := range ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + num, _ := protowire.DecodeTag(xi.wiretag) + size += messageset.SizeField(num) + size += xi.funcs.size(x.Value(), protowire.SizeTag(messageset.FieldMessage), opts) + } + + unknown := *p.Apply(mi.unknownOffset).Bytes() + size += messageset.SizeUnknown(unknown) + + return size +} + +func marshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts marshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + + ext := *p.Apply(mi.extensionOffset).Extensions() + switch len(ext) { + case 0: + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + for _, x := range ext { + var err error + b, err = marshalMessageSetField(mi, b, x, opts) + if err != nil { + return b, err + } + } + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(ext)) + for k := range ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + var err error + b, err = marshalMessageSetField(mi, b, ext[int32(k)], opts) + if err != nil { + return b, err + } + } + } + + unknown := *p.Apply(mi.unknownOffset).Bytes() + b, err := messageset.AppendUnknown(b, unknown) + if err != nil { + return b, err + } + + return b, nil +} + +func marshalMessageSetField(mi *MessageInfo, b []byte, x ExtensionField, opts marshalOptions) ([]byte, error) { + xi := getExtensionFieldInfo(x.Type()) + num, _ := protowire.DecodeTag(xi.wiretag) + b = messageset.AppendFieldStart(b, num) + b, err := xi.funcs.marshal(b, x.Value(), protowire.EncodeTag(messageset.FieldMessage, protowire.BytesType), opts) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(mi *MessageInfo, b []byte, p pointer, opts unmarshalOptions) (out unmarshalOutput, err error) { + if !flags.ProtoLegacy { + return out, errors.New("no support for message_set_wire_format") + } + + ep := p.Apply(mi.extensionOffset).Extensions() + if *ep == nil { + *ep = make(map[int32]ExtensionField) + } + ext := *ep + unknown := p.Apply(mi.unknownOffset).Bytes() + initialized := true + err = messageset.Unmarshal(b, true, func(num protowire.Number, v []byte) error { + o, err := mi.unmarshalExtension(v, num, protowire.BytesType, ext, opts) + if err == errUnknown { + *unknown = protowire.AppendTag(*unknown, num, protowire.BytesType) + *unknown = append(*unknown, v...) + return nil + } + if !o.initialized { + initialized = false + } + return err + }) + out.n = len(b) + out.initialized = initialized + return out, err +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go new file mode 100644 index 0000000..86f7dc3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_reflect.go @@ -0,0 +1,209 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/encoding/protowire" +) + +func sizeEnum(p pointer, f *coderFieldInfo, _ marshalOptions) (size int) { + v := p.v.Elem().Int() + return f.tagsize + protowire.SizeVarint(uint64(v)) +} + +func appendEnum(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + v := p.v.Elem().Int() + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(v)) + return b, nil +} + +func consumeEnum(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, _ unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + p.v.Elem().SetInt(int64(v)) + out.n = n + return out, nil +} + +func mergeEnum(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(src.v.Elem()) +} + +var coderEnum = pointerCoderFuncs{ + size: sizeEnum, + marshal: appendEnum, + unmarshal: consumeEnum, + merge: mergeEnum, +} + +func sizeEnumNoZero(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + if p.v.Elem().Int() == 0 { + return 0 + } + return sizeEnum(p, f, opts) +} + +func appendEnumNoZero(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + if p.v.Elem().Int() == 0 { + return b, nil + } + return appendEnum(b, p, f, opts) +} + +func mergeEnumNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if src.v.Elem().Int() != 0 { + dst.v.Elem().Set(src.v.Elem()) + } +} + +var coderEnumNoZero = pointerCoderFuncs{ + size: sizeEnumNoZero, + marshal: appendEnumNoZero, + unmarshal: consumeEnum, + merge: mergeEnumNoZero, +} + +func sizeEnumPtr(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + return sizeEnum(pointer{p.v.Elem()}, f, opts) +} + +func appendEnumPtr(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + return appendEnum(b, pointer{p.v.Elem()}, f, opts) +} + +func consumeEnumPtr(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + if wtyp != protowire.VarintType { + return out, errUnknown + } + if p.v.Elem().IsNil() { + p.v.Elem().Set(reflect.New(p.v.Elem().Type().Elem())) + } + return consumeEnum(b, pointer{p.v.Elem()}, wtyp, f, opts) +} + +func mergeEnumPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + if !src.v.Elem().IsNil() { + v := reflect.New(dst.v.Type().Elem().Elem()) + v.Elem().Set(src.v.Elem().Elem()) + dst.v.Elem().Set(v) + } +} + +var coderEnumPtr = pointerCoderFuncs{ + size: sizeEnumPtr, + marshal: appendEnumPtr, + unmarshal: consumeEnumPtr, + merge: mergeEnumPtr, +} + +func sizeEnumSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + size += protowire.SizeVarint(uint64(s.Index(i).Int())) + f.tagsize + } + return size +} + +func appendEnumSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + for i, llen := 0, s.Len(); i < llen; i++ { + b = protowire.AppendVarint(b, f.wiretag) + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +func consumeEnumSlice(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (out unmarshalOutput, err error) { + s := p.v.Elem() + if wtyp == protowire.BytesType { + b, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, protowire.ParseError(n) + } + for len(b) > 0 { + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + b = b[n:] + } + out.n = n + return out, nil + } + if wtyp != protowire.VarintType { + return out, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + rv := reflect.New(s.Type().Elem()).Elem() + rv.SetInt(int64(v)) + s.Set(reflect.Append(s, rv)) + out.n = n + return out, nil +} + +func mergeEnumSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + dst.v.Elem().Set(reflect.AppendSlice(dst.v.Elem(), src.v.Elem())) +} + +var coderEnumSlice = pointerCoderFuncs{ + size: sizeEnumSlice, + marshal: appendEnumSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} + +func sizeEnumPackedSlice(p pointer, f *coderFieldInfo, opts marshalOptions) (size int) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return 0 + } + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + return f.tagsize + protowire.SizeBytes(n) +} + +func appendEnumPackedSlice(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { + s := p.v.Elem() + llen := s.Len() + if llen == 0 { + return b, nil + } + b = protowire.AppendVarint(b, f.wiretag) + n := 0 + for i := 0; i < llen; i++ { + n += protowire.SizeVarint(uint64(s.Index(i).Int())) + } + b = protowire.AppendVarint(b, uint64(n)) + for i := 0; i < llen; i++ { + b = protowire.AppendVarint(b, uint64(s.Index(i).Int())) + } + return b, nil +} + +var coderEnumPackedSlice = pointerCoderFuncs{ + size: sizeEnumPackedSlice, + marshal: appendEnumPackedSlice, + unmarshal: consumeEnumSlice, + merge: mergeEnumSlice, +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go new file mode 100644 index 0000000..e899712 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_tables.go @@ -0,0 +1,557 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// pointerCoderFuncs is a set of pointer encoding functions. +type pointerCoderFuncs struct { + mi *MessageInfo + size func(p pointer, f *coderFieldInfo, opts marshalOptions) int + marshal func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) + isInit func(p pointer, f *coderFieldInfo) error + merge func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) +} + +// valueCoderFuncs is a set of protoreflect.Value encoding functions. +type valueCoderFuncs struct { + size func(v pref.Value, tagsize int, opts marshalOptions) int + marshal func(b []byte, v pref.Value, wiretag uint64, opts marshalOptions) ([]byte, error) + unmarshal func(b []byte, v pref.Value, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (pref.Value, unmarshalOutput, error) + isInit func(v pref.Value) error + merge func(dst, src pref.Value, opts mergeOptions) pref.Value +} + +// fieldCoder returns pointer functions for a field, used for operating on +// struct fields. +func fieldCoder(fd pref.FieldDescriptor, ft reflect.Type) (*MessageInfo, pointerCoderFuncs) { + switch { + case fd.IsMap(): + return encoderFuncsForMap(fd, ft) + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + // Repeated fields (not packed). + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Slice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Slice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Slice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Slice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Slice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Slice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Slice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Slice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Slice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Slice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleSlice + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringSliceValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesSliceValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringSlice + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesSlice + } + case pref.MessageKind: + return getMessageInfo(ft), makeMessageSliceFieldCoder(fd, ft) + case pref.GroupKind: + return getMessageInfo(ft), makeGroupSliceFieldCoder(fd, ft) + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + // Packed repeated fields. + // + // Only repeated fields of primitive numeric types + // (Varint, Fixed32, or Fixed64 wire type) can be packed. + if ft.Kind() != reflect.Slice { + break + } + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPackedSlice + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPackedSlice + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32PackedSlice + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32PackedSlice + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32PackedSlice + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64PackedSlice + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64PackedSlice + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64PackedSlice + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32PackedSlice + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32PackedSlice + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPackedSlice + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64PackedSlice + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64PackedSlice + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePackedSlice + } + } + case fd.Kind() == pref.MessageKind: + return getMessageInfo(ft), makeMessageFieldCoder(fd, ft) + case fd.Kind() == pref.GroupKind: + return getMessageInfo(ft), makeGroupFieldCoder(fd, ft) + case fd.Syntax() == pref.Proto3 && fd.ContainingOneof() == nil: + // Populated oneof fields always encode even if set to the zero value, + // which normally are not encoded in proto3. + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolNoZero + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumNoZero + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32NoZero + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32NoZero + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32NoZero + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64NoZero + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64NoZero + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64NoZero + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32NoZero + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32NoZero + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatNoZero + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64NoZero + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64NoZero + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoubleNoZero + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringNoZeroValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesNoZeroValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringNoZero + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytesNoZero + } + } + case ft.Kind() == reflect.Ptr: + ft := ft.Elem() + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBoolPtr + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnumPtr + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32Ptr + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32Ptr + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32Ptr + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64Ptr + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64Ptr + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64Ptr + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32Ptr + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32Ptr + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloatPtr + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64Ptr + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64Ptr + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDoublePtr + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringPtrValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderStringPtr + } + } + default: + switch fd.Kind() { + case pref.BoolKind: + if ft.Kind() == reflect.Bool { + return nil, coderBool + } + case pref.EnumKind: + if ft.Kind() == reflect.Int32 { + return nil, coderEnum + } + case pref.Int32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderInt32 + } + case pref.Sint32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSint32 + } + case pref.Uint32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderUint32 + } + case pref.Int64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderInt64 + } + case pref.Sint64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSint64 + } + case pref.Uint64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderUint64 + } + case pref.Sfixed32Kind: + if ft.Kind() == reflect.Int32 { + return nil, coderSfixed32 + } + case pref.Fixed32Kind: + if ft.Kind() == reflect.Uint32 { + return nil, coderFixed32 + } + case pref.FloatKind: + if ft.Kind() == reflect.Float32 { + return nil, coderFloat + } + case pref.Sfixed64Kind: + if ft.Kind() == reflect.Int64 { + return nil, coderSfixed64 + } + case pref.Fixed64Kind: + if ft.Kind() == reflect.Uint64 { + return nil, coderFixed64 + } + case pref.DoubleKind: + if ft.Kind() == reflect.Float64 { + return nil, coderDouble + } + case pref.StringKind: + if ft.Kind() == reflect.String && strs.EnforceUTF8(fd) { + return nil, coderStringValidateUTF8 + } + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 && strs.EnforceUTF8(fd) { + return nil, coderBytesValidateUTF8 + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + case pref.BytesKind: + if ft.Kind() == reflect.String { + return nil, coderString + } + if ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 { + return nil, coderBytes + } + } + } + panic(fmt.Sprintf("invalid type: no encoder for %v %v %v/%v", fd.FullName(), fd.Cardinality(), fd.Kind(), ft)) +} + +// encoderFuncsForValue returns value functions for a field, used for +// extension values and map encoding. +func encoderFuncsForValue(fd pref.FieldDescriptor) valueCoderFuncs { + switch { + case fd.Cardinality() == pref.Repeated && !fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolSliceValue + case pref.EnumKind: + return coderEnumSliceValue + case pref.Int32Kind: + return coderInt32SliceValue + case pref.Sint32Kind: + return coderSint32SliceValue + case pref.Uint32Kind: + return coderUint32SliceValue + case pref.Int64Kind: + return coderInt64SliceValue + case pref.Sint64Kind: + return coderSint64SliceValue + case pref.Uint64Kind: + return coderUint64SliceValue + case pref.Sfixed32Kind: + return coderSfixed32SliceValue + case pref.Fixed32Kind: + return coderFixed32SliceValue + case pref.FloatKind: + return coderFloatSliceValue + case pref.Sfixed64Kind: + return coderSfixed64SliceValue + case pref.Fixed64Kind: + return coderFixed64SliceValue + case pref.DoubleKind: + return coderDoubleSliceValue + case pref.StringKind: + // We don't have a UTF-8 validating coder for repeated string fields. + // Value coders are used for extensions and maps. + // Extensions are never proto3, and maps never contain lists. + return coderStringSliceValue + case pref.BytesKind: + return coderBytesSliceValue + case pref.MessageKind: + return coderMessageSliceValue + case pref.GroupKind: + return coderGroupSliceValue + } + case fd.Cardinality() == pref.Repeated && fd.IsPacked(): + switch fd.Kind() { + case pref.BoolKind: + return coderBoolPackedSliceValue + case pref.EnumKind: + return coderEnumPackedSliceValue + case pref.Int32Kind: + return coderInt32PackedSliceValue + case pref.Sint32Kind: + return coderSint32PackedSliceValue + case pref.Uint32Kind: + return coderUint32PackedSliceValue + case pref.Int64Kind: + return coderInt64PackedSliceValue + case pref.Sint64Kind: + return coderSint64PackedSliceValue + case pref.Uint64Kind: + return coderUint64PackedSliceValue + case pref.Sfixed32Kind: + return coderSfixed32PackedSliceValue + case pref.Fixed32Kind: + return coderFixed32PackedSliceValue + case pref.FloatKind: + return coderFloatPackedSliceValue + case pref.Sfixed64Kind: + return coderSfixed64PackedSliceValue + case pref.Fixed64Kind: + return coderFixed64PackedSliceValue + case pref.DoubleKind: + return coderDoublePackedSliceValue + } + default: + switch fd.Kind() { + default: + case pref.BoolKind: + return coderBoolValue + case pref.EnumKind: + return coderEnumValue + case pref.Int32Kind: + return coderInt32Value + case pref.Sint32Kind: + return coderSint32Value + case pref.Uint32Kind: + return coderUint32Value + case pref.Int64Kind: + return coderInt64Value + case pref.Sint64Kind: + return coderSint64Value + case pref.Uint64Kind: + return coderUint64Value + case pref.Sfixed32Kind: + return coderSfixed32Value + case pref.Fixed32Kind: + return coderFixed32Value + case pref.FloatKind: + return coderFloatValue + case pref.Sfixed64Kind: + return coderSfixed64Value + case pref.Fixed64Kind: + return coderFixed64Value + case pref.DoubleKind: + return coderDoubleValue + case pref.StringKind: + if strs.EnforceUTF8(fd) { + return coderStringValueValidateUTF8 + } + return coderStringValue + case pref.BytesKind: + return coderBytesValue + case pref.MessageKind: + return coderMessageValue + case pref.GroupKind: + return coderGroupValue + } + } + panic(fmt.Sprintf("invalid field: no encoder for %v %v %v", fd.FullName(), fd.Cardinality(), fd.Kind())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go new file mode 100644 index 0000000..e118af1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_unsafe.go @@ -0,0 +1,17 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +// When using unsafe pointers, we can just treat enum values as int32s. + +var ( + coderEnumNoZero = coderInt32NoZero + coderEnum = coderInt32 + coderEnumPtr = coderInt32Ptr + coderEnumSlice = coderInt32Slice + coderEnumPackedSlice = coderInt32PackedSlice +) diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert.go b/vendor/google.golang.org/protobuf/internal/impl/convert.go new file mode 100644 index 0000000..36a90df --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert.go @@ -0,0 +1,467 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// unwrapper unwraps the value to the underlying value. +// This is implemented by List and Map. +type unwrapper interface { + protoUnwrap() interface{} +} + +// A Converter coverts to/from Go reflect.Value types and protobuf protoreflect.Value types. +type Converter interface { + // PBValueOf converts a reflect.Value to a protoreflect.Value. + PBValueOf(reflect.Value) pref.Value + + // GoValueOf converts a protoreflect.Value to a reflect.Value. + GoValueOf(pref.Value) reflect.Value + + // IsValidPB returns whether a protoreflect.Value is compatible with this type. + IsValidPB(pref.Value) bool + + // IsValidGo returns whether a reflect.Value is compatible with this type. + IsValidGo(reflect.Value) bool + + // New returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns a new mutable value. + New() pref.Value + + // Zero returns a new field value. + // For scalars, it returns the default value of the field. + // For composite types, it returns an immutable, empty value. + Zero() pref.Value +} + +// NewConverter matches a Go type with a protobuf field and returns a Converter +// that converts between the two. Enums must be a named int32 kind that +// implements protoreflect.Enum, and messages must be pointer to a named +// struct type that implements protoreflect.ProtoMessage. +// +// This matcher deliberately supports a wider range of Go types than what +// protoc-gen-go historically generated to be able to automatically wrap some +// v1 messages generated by other forks of protoc-gen-go. +func NewConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case fd.IsList(): + return newListConverter(t, fd) + case fd.IsMap(): + return newMapConverter(t, fd) + default: + return newSingularConverter(t, fd) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +var ( + boolType = reflect.TypeOf(bool(false)) + int32Type = reflect.TypeOf(int32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint32Type = reflect.TypeOf(uint32(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float32Type = reflect.TypeOf(float32(0)) + float64Type = reflect.TypeOf(float64(0)) + stringType = reflect.TypeOf(string("")) + bytesType = reflect.TypeOf([]byte(nil)) + byteType = reflect.TypeOf(byte(0)) +) + +var ( + boolZero = pref.ValueOfBool(false) + int32Zero = pref.ValueOfInt32(0) + int64Zero = pref.ValueOfInt64(0) + uint32Zero = pref.ValueOfUint32(0) + uint64Zero = pref.ValueOfUint64(0) + float32Zero = pref.ValueOfFloat32(0) + float64Zero = pref.ValueOfFloat64(0) + stringZero = pref.ValueOfString("") + bytesZero = pref.ValueOfBytes(nil) +) + +func newSingularConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + defVal := func(fd pref.FieldDescriptor, zero pref.Value) pref.Value { + if fd.Cardinality() == pref.Repeated { + // Default isn't defined for repeated fields. + return zero + } + return fd.Default() + } + switch fd.Kind() { + case pref.BoolKind: + if t.Kind() == reflect.Bool { + return &boolConverter{t, defVal(fd, boolZero)} + } + case pref.Int32Kind, pref.Sint32Kind, pref.Sfixed32Kind: + if t.Kind() == reflect.Int32 { + return &int32Converter{t, defVal(fd, int32Zero)} + } + case pref.Int64Kind, pref.Sint64Kind, pref.Sfixed64Kind: + if t.Kind() == reflect.Int64 { + return &int64Converter{t, defVal(fd, int64Zero)} + } + case pref.Uint32Kind, pref.Fixed32Kind: + if t.Kind() == reflect.Uint32 { + return &uint32Converter{t, defVal(fd, uint32Zero)} + } + case pref.Uint64Kind, pref.Fixed64Kind: + if t.Kind() == reflect.Uint64 { + return &uint64Converter{t, defVal(fd, uint64Zero)} + } + case pref.FloatKind: + if t.Kind() == reflect.Float32 { + return &float32Converter{t, defVal(fd, float32Zero)} + } + case pref.DoubleKind: + if t.Kind() == reflect.Float64 { + return &float64Converter{t, defVal(fd, float64Zero)} + } + case pref.StringKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &stringConverter{t, defVal(fd, stringZero)} + } + case pref.BytesKind: + if t.Kind() == reflect.String || (t.Kind() == reflect.Slice && t.Elem() == byteType) { + return &bytesConverter{t, defVal(fd, bytesZero)} + } + case pref.EnumKind: + // Handle enums, which must be a named int32 type. + if t.Kind() == reflect.Int32 { + return newEnumConverter(t, fd) + } + case pref.MessageKind, pref.GroupKind: + return newMessageConverter(t) + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type boolConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *boolConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfBool(v.Bool()) +} +func (c *boolConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bool()).Convert(c.goType) +} +func (c *boolConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(bool) + return ok +} +func (c *boolConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *boolConverter) New() pref.Value { return c.def } +func (c *boolConverter) Zero() pref.Value { return c.def } + +type int32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt32(int32(v.Int())) +} +func (c *int32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int32(v.Int())).Convert(c.goType) +} +func (c *int32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int32) + return ok +} +func (c *int32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int32Converter) New() pref.Value { return c.def } +func (c *int32Converter) Zero() pref.Value { return c.def } + +type int64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *int64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfInt64(int64(v.Int())) +} +func (c *int64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(int64(v.Int())).Convert(c.goType) +} +func (c *int64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(int64) + return ok +} +func (c *int64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *int64Converter) New() pref.Value { return c.def } +func (c *int64Converter) Zero() pref.Value { return c.def } + +type uint32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint32(uint32(v.Uint())) +} +func (c *uint32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint32(v.Uint())).Convert(c.goType) +} +func (c *uint32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint32) + return ok +} +func (c *uint32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint32Converter) New() pref.Value { return c.def } +func (c *uint32Converter) Zero() pref.Value { return c.def } + +type uint64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *uint64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfUint64(uint64(v.Uint())) +} +func (c *uint64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(uint64(v.Uint())).Convert(c.goType) +} +func (c *uint64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(uint64) + return ok +} +func (c *uint64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *uint64Converter) New() pref.Value { return c.def } +func (c *uint64Converter) Zero() pref.Value { return c.def } + +type float32Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float32Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat32(float32(v.Float())) +} +func (c *float32Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float32(v.Float())).Convert(c.goType) +} +func (c *float32Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float32) + return ok +} +func (c *float32Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float32Converter) New() pref.Value { return c.def } +func (c *float32Converter) Zero() pref.Value { return c.def } + +type float64Converter struct { + goType reflect.Type + def pref.Value +} + +func (c *float64Converter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfFloat64(float64(v.Float())) +} +func (c *float64Converter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(float64(v.Float())).Convert(c.goType) +} +func (c *float64Converter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(float64) + return ok +} +func (c *float64Converter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *float64Converter) New() pref.Value { return c.def } +func (c *float64Converter) Zero() pref.Value { return c.def } + +type stringConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *stringConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfString(v.Convert(stringType).String()) +} +func (c *stringConverter) GoValueOf(v pref.Value) reflect.Value { + // pref.Value.String never panics, so we go through an interface + // conversion here to check the type. + s := v.Interface().(string) + if c.goType.Kind() == reflect.Slice && s == "" { + return reflect.Zero(c.goType) // ensure empty string is []byte(nil) + } + return reflect.ValueOf(s).Convert(c.goType) +} +func (c *stringConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(string) + return ok +} +func (c *stringConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *stringConverter) New() pref.Value { return c.def } +func (c *stringConverter) Zero() pref.Value { return c.def } + +type bytesConverter struct { + goType reflect.Type + def pref.Value +} + +func (c *bytesConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if c.goType.Kind() == reflect.String && v.Len() == 0 { + return pref.ValueOfBytes(nil) // ensure empty string is []byte(nil) + } + return pref.ValueOfBytes(v.Convert(bytesType).Bytes()) +} +func (c *bytesConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Bytes()).Convert(c.goType) +} +func (c *bytesConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().([]byte) + return ok +} +func (c *bytesConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} +func (c *bytesConverter) New() pref.Value { return c.def } +func (c *bytesConverter) Zero() pref.Value { return c.def } + +type enumConverter struct { + goType reflect.Type + def pref.Value +} + +func newEnumConverter(goType reflect.Type, fd pref.FieldDescriptor) Converter { + var def pref.Value + if fd.Cardinality() == pref.Repeated { + def = pref.ValueOfEnum(fd.Enum().Values().Get(0).Number()) + } else { + def = fd.Default() + } + return &enumConverter{goType, def} +} + +func (c *enumConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfEnum(pref.EnumNumber(v.Int())) +} + +func (c *enumConverter) GoValueOf(v pref.Value) reflect.Value { + return reflect.ValueOf(v.Enum()).Convert(c.goType) +} + +func (c *enumConverter) IsValidPB(v pref.Value) bool { + _, ok := v.Interface().(pref.EnumNumber) + return ok +} + +func (c *enumConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *enumConverter) New() pref.Value { + return c.def +} + +func (c *enumConverter) Zero() pref.Value { + return c.def +} + +type messageConverter struct { + goType reflect.Type +} + +func newMessageConverter(goType reflect.Type) Converter { + return &messageConverter{goType} +} + +func (c *messageConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + if m, ok := v.Interface().(pref.ProtoMessage); ok { + return pref.ValueOfMessage(m.ProtoReflect()) + } + return pref.ValueOfMessage(legacyWrapMessage(v)) +} + +func (c *messageConverter) GoValueOf(v pref.Value) reflect.Value { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + if rv.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", rv.Type(), c.goType)) + } + return rv +} + +func (c *messageConverter) IsValidPB(v pref.Value) bool { + m := v.Message() + var rv reflect.Value + if u, ok := m.(unwrapper); ok { + rv = reflect.ValueOf(u.protoUnwrap()) + } else { + rv = reflect.ValueOf(m.Interface()) + } + return rv.Type() == c.goType +} + +func (c *messageConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *messageConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *messageConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_list.go b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go new file mode 100644 index 0000000..6fccab5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_list.go @@ -0,0 +1,141 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +func newListConverter(t reflect.Type, fd pref.FieldDescriptor) Converter { + switch { + case t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Slice: + return &listPtrConverter{t, newSingularConverter(t.Elem().Elem(), fd)} + case t.Kind() == reflect.Slice: + return &listConverter{t, newSingularConverter(t.Elem(), fd)} + } + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) +} + +type listConverter struct { + goType reflect.Type // []T + c Converter +} + +func (c *listConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + pv := reflect.New(c.goType) + pv.Elem().Set(v) + return pref.ValueOfList(&listReflect{pv, c.c}) +} + +func (c *listConverter) GoValueOf(v pref.Value) reflect.Value { + rv := v.List().(*listReflect).v + if rv.IsNil() { + return reflect.Zero(c.goType) + } + return rv.Elem() +} + +func (c *listConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type().Elem() == c.goType +} + +func (c *listConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listConverter) New() pref.Value { + return pref.ValueOfList(&listReflect{reflect.New(c.goType), c.c}) +} + +func (c *listConverter) Zero() pref.Value { + return pref.ValueOfList(&listReflect{reflect.Zero(reflect.PtrTo(c.goType)), c.c}) +} + +type listPtrConverter struct { + goType reflect.Type // *[]T + c Converter +} + +func (c *listPtrConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfList(&listReflect{v, c.c}) +} + +func (c *listPtrConverter) GoValueOf(v pref.Value) reflect.Value { + return v.List().(*listReflect).v +} + +func (c *listPtrConverter) IsValidPB(v pref.Value) bool { + list, ok := v.Interface().(*listReflect) + if !ok { + return false + } + return list.v.Type() == c.goType +} + +func (c *listPtrConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *listPtrConverter) New() pref.Value { + return c.PBValueOf(reflect.New(c.goType.Elem())) +} + +func (c *listPtrConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type listReflect struct { + v reflect.Value // *[]T + conv Converter +} + +func (ls *listReflect) Len() int { + if ls.v.IsNil() { + return 0 + } + return ls.v.Elem().Len() +} +func (ls *listReflect) Get(i int) pref.Value { + return ls.conv.PBValueOf(ls.v.Elem().Index(i)) +} +func (ls *listReflect) Set(i int, v pref.Value) { + ls.v.Elem().Index(i).Set(ls.conv.GoValueOf(v)) +} +func (ls *listReflect) Append(v pref.Value) { + ls.v.Elem().Set(reflect.Append(ls.v.Elem(), ls.conv.GoValueOf(v))) +} +func (ls *listReflect) AppendMutable() pref.Value { + if _, ok := ls.conv.(*messageConverter); !ok { + panic("invalid AppendMutable on list with non-message type") + } + v := ls.NewElement() + ls.Append(v) + return v +} +func (ls *listReflect) Truncate(i int) { + ls.v.Elem().Set(ls.v.Elem().Slice(0, i)) +} +func (ls *listReflect) NewElement() pref.Value { + return ls.conv.New() +} +func (ls *listReflect) IsValid() bool { + return !ls.v.IsNil() +} +func (ls *listReflect) protoUnwrap() interface{} { + return ls.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go new file mode 100644 index 0000000..de06b25 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -0,0 +1,121 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type mapConverter struct { + goType reflect.Type // map[K]V + keyConv, valConv Converter +} + +func newMapConverter(t reflect.Type, fd pref.FieldDescriptor) *mapConverter { + if t.Kind() != reflect.Map { + panic(fmt.Sprintf("invalid Go type %v for field %v", t, fd.FullName())) + } + return &mapConverter{ + goType: t, + keyConv: newSingularConverter(t.Key(), fd.MapKey()), + valConv: newSingularConverter(t.Elem(), fd.MapValue()), + } +} + +func (c *mapConverter) PBValueOf(v reflect.Value) pref.Value { + if v.Type() != c.goType { + panic(fmt.Sprintf("invalid type: got %v, want %v", v.Type(), c.goType)) + } + return pref.ValueOfMap(&mapReflect{v, c.keyConv, c.valConv}) +} + +func (c *mapConverter) GoValueOf(v pref.Value) reflect.Value { + return v.Map().(*mapReflect).v +} + +func (c *mapConverter) IsValidPB(v pref.Value) bool { + mapv, ok := v.Interface().(*mapReflect) + if !ok { + return false + } + return mapv.v.Type() == c.goType +} + +func (c *mapConverter) IsValidGo(v reflect.Value) bool { + return v.IsValid() && v.Type() == c.goType +} + +func (c *mapConverter) New() pref.Value { + return c.PBValueOf(reflect.MakeMap(c.goType)) +} + +func (c *mapConverter) Zero() pref.Value { + return c.PBValueOf(reflect.Zero(c.goType)) +} + +type mapReflect struct { + v reflect.Value // map[K]V + keyConv Converter + valConv Converter +} + +func (ms *mapReflect) Len() int { + return ms.v.Len() +} +func (ms *mapReflect) Has(k pref.MapKey) bool { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + return rv.IsValid() +} +func (ms *mapReflect) Get(k pref.MapKey) pref.Value { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.v.MapIndex(rk) + if !rv.IsValid() { + return pref.Value{} + } + return ms.valConv.PBValueOf(rv) +} +func (ms *mapReflect) Set(k pref.MapKey, v pref.Value) { + rk := ms.keyConv.GoValueOf(k.Value()) + rv := ms.valConv.GoValueOf(v) + ms.v.SetMapIndex(rk, rv) +} +func (ms *mapReflect) Clear(k pref.MapKey) { + rk := ms.keyConv.GoValueOf(k.Value()) + ms.v.SetMapIndex(rk, reflect.Value{}) +} +func (ms *mapReflect) Mutable(k pref.MapKey) pref.Value { + if _, ok := ms.valConv.(*messageConverter); !ok { + panic("invalid Mutable on map with non-message value type") + } + v := ms.Get(k) + if !v.IsValid() { + v = ms.NewValue() + ms.Set(k, v) + } + return v +} +func (ms *mapReflect) Range(f func(pref.MapKey, pref.Value) bool) { + iter := mapRange(ms.v) + for iter.Next() { + k := ms.keyConv.PBValueOf(iter.Key()).MapKey() + v := ms.valConv.PBValueOf(iter.Value()) + if !f(k, v) { + return + } + } +} +func (ms *mapReflect) NewValue() pref.Value { + return ms.valConv.New() +} +func (ms *mapReflect) IsValid() bool { + return !ms.v.IsNil() +} +func (ms *mapReflect) protoUnwrap() interface{} { + return ms.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/decode.go b/vendor/google.golang.org/protobuf/internal/impl/decode.go new file mode 100644 index 0000000..85ba1d3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/decode.go @@ -0,0 +1,274 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math/bits" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/proto" + "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type unmarshalOptions struct { + flags protoiface.UnmarshalInputFlags + resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +func (o unmarshalOptions) Options() proto.UnmarshalOptions { + return proto.UnmarshalOptions{ + Merge: true, + AllowPartial: true, + DiscardUnknown: o.DiscardUnknown(), + Resolver: o.resolver, + } +} + +func (o unmarshalOptions) DiscardUnknown() bool { return o.flags&piface.UnmarshalDiscardUnknown != 0 } + +func (o unmarshalOptions) IsDefault() bool { + return o.flags == 0 && o.resolver == preg.GlobalTypes +} + +var lazyUnmarshalOptions = unmarshalOptions{ + resolver: preg.GlobalTypes, +} + +type unmarshalOutput struct { + n int // number of bytes consumed + initialized bool +} + +// unmarshal is protoreflect.Methods.Unmarshal. +func (mi *MessageInfo) unmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + out, err := mi.unmarshalPointer(in.Buf, p, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + var flags piface.UnmarshalOutputFlags + if out.initialized { + flags |= piface.UnmarshalInitialized + } + return piface.UnmarshalOutput{ + Flags: flags, + }, err +} + +// errUnknown is returned during unmarshaling to indicate a parse error that +// should result in a field being placed in the unknown fields section (for example, +// when the wire type doesn't match) as opposed to the entire unmarshal operation +// failing (for example, when a field extends past the available input). +// +// This is a sentinel error which should never be visible to the user. +var errUnknown = errors.New("unknown") + +func (mi *MessageInfo) unmarshalPointer(b []byte, p pointer, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, err error) { + mi.init() + if flags.ProtoLegacy && mi.isMessageSet { + return unmarshalMessageSet(mi, b, p, opts) + } + initialized := true + var requiredMask uint64 + var exts *map[int32]ExtensionField + start := len(b) + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, protowire.ParseError(n) + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, errors.New("invalid field number") + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if num != groupTag { + return out, errors.New("mismatching end group marker") + } + groupTag = 0 + break + } + + var f *coderFieldInfo + if int(num) < len(mi.denseCoderFields) { + f = mi.denseCoderFields[num] + } else { + f = mi.coderFields[num] + } + var n int + err := errUnknown + switch { + case f != nil: + if f.funcs.unmarshal == nil { + break + } + var o unmarshalOutput + o, err = f.funcs.unmarshal(b, p.Apply(f.offset), wtyp, f, opts) + n = o.n + if err != nil { + break + } + requiredMask |= f.validation.requiredBit + if f.funcs.isInit != nil && !o.initialized { + initialized = false + } + default: + // Possible extension. + if exts == nil && mi.extensionOffset.IsValid() { + exts = p.Apply(mi.extensionOffset).Extensions() + if *exts == nil { + *exts = make(map[int32]ExtensionField) + } + } + if exts == nil { + break + } + var o unmarshalOutput + o, err = mi.unmarshalExtension(b, num, wtyp, *exts, opts) + if err != nil { + break + } + n = o.n + if !o.initialized { + initialized = false + } + } + if err != nil { + if err != errUnknown { + return out, err + } + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, protowire.ParseError(n) + } + if !opts.DiscardUnknown() && mi.unknownOffset.IsValid() { + u := p.Apply(mi.unknownOffset).Bytes() + *u = protowire.AppendTag(*u, num, wtyp) + *u = append(*u, b[:n]...) + } + } + b = b[n:] + } + if groupTag != 0 { + return out, errors.New("missing end group marker") + } + if mi.numRequiredFields > 0 && bits.OnesCount64(requiredMask) != int(mi.numRequiredFields) { + initialized = false + } + if initialized { + out.initialized = true + } + out.n = start - len(b) + return out, nil +} + +func (mi *MessageInfo) unmarshalExtension(b []byte, num protowire.Number, wtyp protowire.Type, exts map[int32]ExtensionField, opts unmarshalOptions) (out unmarshalOutput, err error) { + x := exts[int32(num)] + xt := x.Type() + if xt == nil { + var err error + xt, err = opts.resolver.FindExtensionByNumber(mi.Desc.FullName(), num) + if err != nil { + if err == preg.NotFound { + return out, errUnknown + } + return out, errors.New("%v: unable to resolve extension %v: %v", mi.Desc.FullName(), num, err) + } + } + xi := getExtensionFieldInfo(xt) + if xi.funcs.unmarshal == nil { + return out, errUnknown + } + if flags.LazyUnmarshalExtensions { + if opts.IsDefault() && x.canLazy(xt) { + out, valid := skipExtension(b, xi, num, wtyp, opts) + switch valid { + case ValidationValid: + if out.initialized { + x.appendLazyBytes(xt, xi, num, wtyp, b[:out.n]) + exts[int32(num)] = x + return out, nil + } + case ValidationInvalid: + return out, errors.New("invalid wire format") + case ValidationUnknown: + } + } + } + ival := x.Value() + if !ival.IsValid() && xi.unmarshalNeedsValue { + // Create a new message, list, or map value to fill in. + // For enums, create a prototype value to let the unmarshal func know the + // concrete type. + ival = xt.New() + } + v, out, err := xi.funcs.unmarshal(b, ival, num, wtyp, opts) + if err != nil { + return out, err + } + if xi.funcs.isInit == nil { + out.initialized = true + } + x.Set(xt, v) + exts[int32(num)] = x + return out, nil +} + +func skipExtension(b []byte, xi *extensionFieldInfo, num protowire.Number, wtyp protowire.Type, opts unmarshalOptions) (out unmarshalOutput, _ ValidationStatus) { + if xi.validation.mi == nil { + return out, ValidationUnknown + } + xi.validation.mi.init() + switch xi.validation.typ { + case validationTypeMessage: + if wtyp != protowire.BytesType { + return out, ValidationUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(v, 0, opts) + out.n = n + return out, st + case validationTypeGroup: + if wtyp != protowire.StartGroupType { + return out, ValidationUnknown + } + out, st := xi.validation.mi.validate(b, num, opts) + return out, st + default: + return out, ValidationUnknown + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/encode.go b/vendor/google.golang.org/protobuf/internal/impl/encode.go new file mode 100644 index 0000000..8c8a794 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/encode.go @@ -0,0 +1,199 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "math" + "sort" + "sync/atomic" + + "google.golang.org/protobuf/internal/flags" + proto "google.golang.org/protobuf/proto" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type marshalOptions struct { + flags piface.MarshalInputFlags +} + +func (o marshalOptions) Options() proto.MarshalOptions { + return proto.MarshalOptions{ + AllowPartial: true, + Deterministic: o.Deterministic(), + UseCachedSize: o.UseCachedSize(), + } +} + +func (o marshalOptions) Deterministic() bool { return o.flags&piface.MarshalDeterministic != 0 } +func (o marshalOptions) UseCachedSize() bool { return o.flags&piface.MarshalUseCachedSize != 0 } + +// size is protoreflect.Methods.Size. +func (mi *MessageInfo) size(in piface.SizeInput) piface.SizeOutput { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + size := mi.sizePointer(p, marshalOptions{ + flags: in.Flags, + }) + return piface.SizeOutput{Size: size} +} + +func (mi *MessageInfo) sizePointer(p pointer, opts marshalOptions) (size int) { + mi.init() + if p.IsNil() { + return 0 + } + if opts.UseCachedSize() && mi.sizecacheOffset.IsValid() { + if size := atomic.LoadInt32(p.Apply(mi.sizecacheOffset).Int32()); size >= 0 { + return int(size) + } + } + return mi.sizePointerSlow(p, opts) +} + +func (mi *MessageInfo) sizePointerSlow(p pointer, opts marshalOptions) (size int) { + if flags.ProtoLegacy && mi.isMessageSet { + size = sizeMessageSet(mi, p, opts) + if mi.sizecacheOffset.IsValid() { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + return size + } + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + size += mi.sizeExtensions(e, opts) + } + for _, f := range mi.orderedCoderFields { + if f.funcs.size == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + size += f.funcs.size(fptr, f, opts) + } + if mi.unknownOffset.IsValid() { + u := *p.Apply(mi.unknownOffset).Bytes() + size += len(u) + } + if mi.sizecacheOffset.IsValid() { + if size > math.MaxInt32 { + // The size is too large for the int32 sizecache field. + // We will need to recompute the size when encoding; + // unfortunately expensive, but better than invalid output. + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), -1) + } else { + atomic.StoreInt32(p.Apply(mi.sizecacheOffset).Int32(), int32(size)) + } + } + return size +} + +// marshal is protoreflect.Methods.Marshal. +func (mi *MessageInfo) marshal(in piface.MarshalInput) (out piface.MarshalOutput, err error) { + var p pointer + if ms, ok := in.Message.(*messageState); ok { + p = ms.pointer() + } else { + p = in.Message.(*messageReflectWrapper).pointer() + } + b, err := mi.marshalAppendPointer(in.Buf, p, marshalOptions{ + flags: in.Flags, + }) + return piface.MarshalOutput{Buf: b}, err +} + +func (mi *MessageInfo) marshalAppendPointer(b []byte, p pointer, opts marshalOptions) ([]byte, error) { + mi.init() + if p.IsNil() { + return b, nil + } + if flags.ProtoLegacy && mi.isMessageSet { + return marshalMessageSet(mi, b, p, opts) + } + var err error + // The old marshaler encodes extensions at beginning. + if mi.extensionOffset.IsValid() { + e := p.Apply(mi.extensionOffset).Extensions() + // TODO: Special handling for MessageSet? + b, err = mi.appendExtensions(b, e, opts) + if err != nil { + return b, err + } + } + for _, f := range mi.orderedCoderFields { + if f.funcs.marshal == nil { + continue + } + fptr := p.Apply(f.offset) + if f.isPointer && fptr.Elem().IsNil() { + continue + } + b, err = f.funcs.marshal(b, fptr, f, opts) + if err != nil { + return b, err + } + } + if mi.unknownOffset.IsValid() && !mi.isMessageSet { + u := *p.Apply(mi.unknownOffset).Bytes() + b = append(b, u...) + } + return b, nil +} + +func (mi *MessageInfo) sizeExtensions(ext *map[int32]ExtensionField, opts marshalOptions) (n int) { + if ext == nil { + return 0 + } + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + if xi.funcs.size == nil { + continue + } + n += xi.funcs.size(x.Value(), xi.tagsize, opts) + } + return n +} + +func (mi *MessageInfo) appendExtensions(b []byte, ext *map[int32]ExtensionField, opts marshalOptions) ([]byte, error) { + if ext == nil { + return b, nil + } + + switch len(*ext) { + case 0: + return b, nil + case 1: + // Fast-path for one extension: Don't bother sorting the keys. + var err error + for _, x := range *ext { + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + } + return b, err + default: + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(*ext)) + for k := range *ext { + keys = append(keys, int(k)) + } + sort.Ints(keys) + var err error + for _, k := range keys { + x := (*ext)[int32(k)] + xi := getExtensionFieldInfo(x.Type()) + b, err = xi.funcs.marshal(b, x.Value(), xi.wiretag, opts) + if err != nil { + return b, err + } + } + return b, nil + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/enum.go b/vendor/google.golang.org/protobuf/internal/impl/enum.go new file mode 100644 index 0000000..8c1eab4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/enum.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type EnumInfo struct { + GoReflectType reflect.Type // int32 kind + Desc pref.EnumDescriptor +} + +func (t *EnumInfo) New(n pref.EnumNumber) pref.Enum { + return reflect.ValueOf(n).Convert(t.GoReflectType).Interface().(pref.Enum) +} +func (t *EnumInfo) Descriptor() pref.EnumDescriptor { return t.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/extension.go b/vendor/google.golang.org/protobuf/internal/impl/extension.go new file mode 100644 index 0000000..e904fd9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/extension.go @@ -0,0 +1,156 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + "sync" + "sync/atomic" + + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ExtensionInfo implements ExtensionType. +// +// This type contains a number of exported fields for legacy compatibility. +// The only non-deprecated use of this type is through the methods of the +// ExtensionType interface. +type ExtensionInfo struct { + // An ExtensionInfo may exist in several stages of initialization. + // + // extensionInfoUninitialized: Some or all of the legacy exported + // fields may be set, but none of the unexported fields have been + // initialized. This is the starting state for an ExtensionInfo + // in legacy generated code. + // + // extensionInfoDescInit: The desc field is set, but other unexported fields + // may not be initialized. Legacy exported fields may or may not be set. + // This is the starting state for an ExtensionInfo in newly generated code. + // + // extensionInfoFullInit: The ExtensionInfo is fully initialized. + // This state is only entered after lazy initialization is complete. + init uint32 + mu sync.Mutex + + goType reflect.Type + desc extensionTypeDescriptor + conv Converter + info *extensionFieldInfo // for fast-path method implementations + + // ExtendedType is a typed nil-pointer to the parent message type that + // is being extended. It is possible for this to be unpopulated in v2 + // since the message may no longer implement the MessageV1 interface. + // + // Deprecated: Use the ExtendedType method instead. + ExtendedType piface.MessageV1 + + // ExtensionType is the zero value of the extension type. + // + // For historical reasons, reflect.TypeOf(ExtensionType) and the + // type returned by InterfaceOf may not be identical. + // + // Deprecated: Use InterfaceOf(xt.Zero()) instead. + ExtensionType interface{} + + // Field is the field number of the extension. + // + // Deprecated: Use the Descriptor().Number method instead. + Field int32 + + // Name is the fully qualified name of extension. + // + // Deprecated: Use the Descriptor().FullName method instead. + Name string + + // Tag is the protobuf struct tag used in the v1 API. + // + // Deprecated: Do not use. + Tag string + + // Filename is the proto filename in which the extension is defined. + // + // Deprecated: Use Descriptor().ParentFile().Path() instead. + Filename string +} + +// Stages of initialization: See the ExtensionInfo.init field. +const ( + extensionInfoUninitialized = 0 + extensionInfoDescInit = 1 + extensionInfoFullInit = 2 +) + +func InitExtensionInfo(xi *ExtensionInfo, xd pref.ExtensionDescriptor, goType reflect.Type) { + xi.goType = goType + xi.desc = extensionTypeDescriptor{xd, xi} + xi.init = extensionInfoDescInit +} + +func (xi *ExtensionInfo) New() pref.Value { + return xi.lazyInit().New() +} +func (xi *ExtensionInfo) Zero() pref.Value { + return xi.lazyInit().Zero() +} +func (xi *ExtensionInfo) ValueOf(v interface{}) pref.Value { + return xi.lazyInit().PBValueOf(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) InterfaceOf(v pref.Value) interface{} { + return xi.lazyInit().GoValueOf(v).Interface() +} +func (xi *ExtensionInfo) IsValidValue(v pref.Value) bool { + return xi.lazyInit().IsValidPB(v) +} +func (xi *ExtensionInfo) IsValidInterface(v interface{}) bool { + return xi.lazyInit().IsValidGo(reflect.ValueOf(v)) +} +func (xi *ExtensionInfo) TypeDescriptor() pref.ExtensionTypeDescriptor { + if atomic.LoadUint32(&xi.init) < extensionInfoDescInit { + xi.lazyInitSlow() + } + return &xi.desc +} + +func (xi *ExtensionInfo) lazyInit() Converter { + if atomic.LoadUint32(&xi.init) < extensionInfoFullInit { + xi.lazyInitSlow() + } + return xi.conv +} + +func (xi *ExtensionInfo) lazyInitSlow() { + xi.mu.Lock() + defer xi.mu.Unlock() + + if xi.init == extensionInfoFullInit { + return + } + defer atomic.StoreUint32(&xi.init, extensionInfoFullInit) + + if xi.desc.ExtensionDescriptor == nil { + xi.initFromLegacy() + } + if !xi.desc.ExtensionDescriptor.IsPlaceholder() { + if xi.ExtensionType == nil { + xi.initToLegacy() + } + xi.conv = NewConverter(xi.goType, xi.desc.ExtensionDescriptor) + xi.info = makeExtensionFieldInfo(xi.desc.ExtensionDescriptor) + xi.info.validation = newValidationInfo(xi.desc.ExtensionDescriptor, xi.goType) + } +} + +type extensionTypeDescriptor struct { + pref.ExtensionDescriptor + xi *ExtensionInfo +} + +func (xtd *extensionTypeDescriptor) Type() pref.ExtensionType { + return xtd.xi +} +func (xtd *extensionTypeDescriptor) Descriptor() pref.ExtensionDescriptor { + return xtd.ExtensionDescriptor +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go new file mode 100644 index 0000000..f7d7ffb --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_enum.go @@ -0,0 +1,219 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// legacyEnumName returns the name of enums used in legacy code. +// It is neither the protobuf full name nor the qualified Go name, +// but rather an odd hybrid of both. +func legacyEnumName(ed pref.EnumDescriptor) string { + var protoPkg string + enumName := string(ed.FullName()) + if fd := ed.ParentFile(); fd != nil { + protoPkg = string(fd.Package()) + enumName = strings.TrimPrefix(enumName, protoPkg+".") + } + if protoPkg == "" { + return strs.GoCamelCase(enumName) + } + return protoPkg + "." + strs.GoCamelCase(enumName) +} + +// legacyWrapEnum wraps v as a protoreflect.Enum, +// where v must be a int32 kind and not implement the v2 API already. +func legacyWrapEnum(v reflect.Value) pref.Enum { + et := legacyLoadEnumType(v.Type()) + return et.New(pref.EnumNumber(v.Int())) +} + +var legacyEnumTypeCache sync.Map // map[reflect.Type]protoreflect.EnumType + +// legacyLoadEnumType dynamically loads a protoreflect.EnumType for t, +// where t must be an int32 kind and not implement the v2 API already. +func legacyLoadEnumType(t reflect.Type) pref.EnumType { + // Fast-path: check if a EnumType is cached for this concrete type. + if et, ok := legacyEnumTypeCache.Load(t); ok { + return et.(pref.EnumType) + } + + // Slow-path: derive enum descriptor and initialize EnumType. + var et pref.EnumType + ed := LegacyLoadEnumDesc(t) + et = &legacyEnumType{ + desc: ed, + goType: t, + } + if et, ok := legacyEnumTypeCache.LoadOrStore(t, et); ok { + return et.(pref.EnumType) + } + return et +} + +type legacyEnumType struct { + desc pref.EnumDescriptor + goType reflect.Type + m sync.Map // map[protoreflect.EnumNumber]proto.Enum +} + +func (t *legacyEnumType) New(n pref.EnumNumber) pref.Enum { + if e, ok := t.m.Load(n); ok { + return e.(pref.Enum) + } + e := &legacyEnumWrapper{num: n, pbTyp: t, goTyp: t.goType} + t.m.Store(n, e) + return e +} +func (t *legacyEnumType) Descriptor() pref.EnumDescriptor { + return t.desc +} + +type legacyEnumWrapper struct { + num pref.EnumNumber + pbTyp pref.EnumType + goTyp reflect.Type +} + +func (e *legacyEnumWrapper) Descriptor() pref.EnumDescriptor { + return e.pbTyp.Descriptor() +} +func (e *legacyEnumWrapper) Type() pref.EnumType { + return e.pbTyp +} +func (e *legacyEnumWrapper) Number() pref.EnumNumber { + return e.num +} +func (e *legacyEnumWrapper) ProtoReflect() pref.Enum { + return e +} +func (e *legacyEnumWrapper) protoUnwrap() interface{} { + v := reflect.New(e.goTyp).Elem() + v.SetInt(int64(e.num)) + return v.Interface() +} + +var ( + _ pref.Enum = (*legacyEnumWrapper)(nil) + _ unwrapper = (*legacyEnumWrapper)(nil) +) + +var legacyEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// LegacyLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must be an int32 kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := legacyEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: initialize EnumDescriptor from the raw descriptor. + ev := reflect.Zero(t).Interface() + if _, ok := ev.(pref.Enum); ok { + panic(fmt.Sprintf("%v already implements proto.Enum", t)) + } + edV1, ok := ev.(enumV1) + if !ok { + return aberrantLoadEnumDesc(t) + } + b, idxs := edV1.EnumDescriptor() + + var ed pref.EnumDescriptor + if len(idxs) == 1 { + ed = legacyLoadFileDesc(b).Enums().Get(idxs[0]) + } else { + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1 : len(idxs)-1] { + md = md.Messages().Get(i) + } + ed = md.Enums().Get(idxs[len(idxs)-1]) + } + if ed, ok := legacyEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(protoreflect.EnumDescriptor) + } + return ed +} + +var aberrantEnumDescCache sync.Map // map[reflect.Type]protoreflect.EnumDescriptor + +// aberrantLoadEnumDesc returns an EnumDescriptor derived from the Go type, +// which must not implement protoreflect.Enum or enumV1. +// +// If the type does not implement enumV1, then there is no reliable +// way to derive the original protobuf type information. +// We are unable to use the global enum registry since it is +// unfortunately keyed by the protobuf full name, which we also do not know. +// Thus, this produces some bogus enum descriptor based on the Go type name. +func aberrantLoadEnumDesc(t reflect.Type) pref.EnumDescriptor { + // Fast-path: check if an EnumDescriptor is cached for this concrete type. + if ed, ok := aberrantEnumDescCache.Load(t); ok { + return ed.(pref.EnumDescriptor) + } + + // Slow-path: construct a bogus, but unique EnumDescriptor. + ed := &filedesc.Enum{L2: new(filedesc.EnumL2)} + ed.L0.FullName = AberrantDeriveFullName(t) // e.g., github_com.user.repo.MyEnum + ed.L0.ParentFile = filedesc.SurrogateProto3 + ed.L2.Values.List = append(ed.L2.Values.List, filedesc.EnumValue{}) + + // TODO: Use the presence of a UnmarshalJSON method to determine proto2? + + vd := &ed.L2.Values.List[0] + vd.L0.FullName = ed.L0.FullName + "_UNKNOWN" // e.g., github_com.user.repo.MyEnum_UNKNOWN + vd.L0.ParentFile = ed.L0.ParentFile + vd.L0.Parent = ed + + // TODO: We could use the String method to obtain some enum value names by + // starting at 0 and print the enum until it produces invalid identifiers. + // An exhaustive query is clearly impractical, but can be best-effort. + + if ed, ok := aberrantEnumDescCache.LoadOrStore(t, ed); ok { + return ed.(pref.EnumDescriptor) + } + return ed +} + +// AberrantDeriveFullName derives a fully qualified protobuf name for the given Go type +// The provided name is not guaranteed to be stable nor universally unique. +// It should be sufficiently unique within a program. +// +// This is exported for testing purposes. +func AberrantDeriveFullName(t reflect.Type) pref.FullName { + sanitize := func(r rune) rune { + switch { + case r == '/': + return '.' + case 'a' <= r && r <= 'z', 'A' <= r && r <= 'Z', '0' <= r && r <= '9': + return r + default: + return '_' + } + } + prefix := strings.Map(sanitize, t.PkgPath()) + suffix := strings.Map(sanitize, t.Name()) + if suffix == "" { + suffix = fmt.Sprintf("UnknownX%X", reflect.ValueOf(t).Pointer()) + } + + ss := append(strings.Split(prefix, "."), suffix) + for i, s := range ss { + if s == "" || ('0' <= s[0] && s[0] <= '9') { + ss[i] = "x" + s + } + } + return pref.FullName(strings.Join(ss, ".")) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go new file mode 100644 index 0000000..c3d741c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_export.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "encoding/binary" + "encoding/json" + "hash/crc32" + "math" + "reflect" + + "google.golang.org/protobuf/internal/errors" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// These functions exist to support exported APIs in generated protobufs. +// While these are deprecated, they cannot be removed for compatibility reasons. + +// LegacyEnumName returns the name of enums used in legacy code. +func (Export) LegacyEnumName(ed pref.EnumDescriptor) string { + return legacyEnumName(ed) +} + +// LegacyMessageTypeOf returns the protoreflect.MessageType for m, +// with name used as the message name if necessary. +func (Export) LegacyMessageTypeOf(m piface.MessageV1, name pref.FullName) pref.MessageType { + if mv := (Export{}).protoMessageV2Of(m); mv != nil { + return mv.ProtoReflect().Type() + } + return legacyLoadMessageInfo(reflect.TypeOf(m), name) +} + +// UnmarshalJSONEnum unmarshals an enum from a JSON-encoded input. +// The input can either be a string representing the enum value by name, +// or a number representing the enum number itself. +func (Export) UnmarshalJSONEnum(ed pref.EnumDescriptor, b []byte) (pref.EnumNumber, error) { + if b[0] == '"' { + var name pref.Name + if err := json.Unmarshal(b, &name); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + ev := ed.Values().ByName(name) + if ev == nil { + return 0, errors.New("invalid value for enum %v: %s", ed.FullName(), name) + } + return ev.Number(), nil + } else { + var num pref.EnumNumber + if err := json.Unmarshal(b, &num); err != nil { + return 0, errors.New("invalid input for enum %v: %s", ed.FullName(), b) + } + return num, nil + } +} + +// CompressGZIP compresses the input as a GZIP-encoded file. +// The current implementation does no compression. +func (Export) CompressGZIP(in []byte) (out []byte) { + // RFC 1952, section 2.3.1. + var gzipHeader = [10]byte{0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff} + + // RFC 1951, section 3.2.4. + var blockHeader [5]byte + const maxBlockSize = math.MaxUint16 + numBlocks := 1 + len(in)/maxBlockSize + + // RFC 1952, section 2.3.1. + var gzipFooter [8]byte + binary.LittleEndian.PutUint32(gzipFooter[0:4], crc32.ChecksumIEEE(in)) + binary.LittleEndian.PutUint32(gzipFooter[4:8], uint32(len(in))) + + // Encode the input without compression using raw DEFLATE blocks. + out = make([]byte, 0, len(gzipHeader)+len(blockHeader)*numBlocks+len(in)+len(gzipFooter)) + out = append(out, gzipHeader[:]...) + for blockHeader[0] == 0 { + blockSize := maxBlockSize + if blockSize > len(in) { + blockHeader[0] = 0x01 // final bit per RFC 1951, section 3.2.3. + blockSize = len(in) + } + binary.LittleEndian.PutUint16(blockHeader[1:3], uint16(blockSize)^0x0000) + binary.LittleEndian.PutUint16(blockHeader[3:5], uint16(blockSize)^0xffff) + out = append(out, blockHeader[:]...) + out = append(out, in[:blockSize]...) + in = in[blockSize:] + } + out = append(out, gzipFooter[:]...) + return out +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go new file mode 100644 index 0000000..61757ce --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_extension.go @@ -0,0 +1,175 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "reflect" + + "google.golang.org/protobuf/internal/descopts" + "google.golang.org/protobuf/internal/encoding/messageset" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +func (xi *ExtensionInfo) initToLegacy() { + xd := xi.desc + var parent piface.MessageV1 + messageName := xd.ContainingMessage().FullName() + if mt, _ := preg.GlobalTypes.FindMessageByName(messageName); mt != nil { + // Create a new parent message and unwrap it if possible. + mv := mt.New().Interface() + t := reflect.TypeOf(mv) + if mv, ok := mv.(unwrapper); ok { + t = reflect.TypeOf(mv.protoUnwrap()) + } + + // Check whether the message implements the legacy v1 Message interface. + mz := reflect.Zero(t).Interface() + if mz, ok := mz.(piface.MessageV1); ok { + parent = mz + } + } + + // Determine the v1 extension type, which is unfortunately not the same as + // the v2 ExtensionType.GoType. + extType := xi.goType + switch extType.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + extType = reflect.PtrTo(extType) // T -> *T for singular scalar fields + } + + // Reconstruct the legacy enum full name. + var enumName string + if xd.Kind() == pref.EnumKind { + enumName = legacyEnumName(xd.Enum()) + } + + // Derive the proto file that the extension was declared within. + var filename string + if fd := xd.ParentFile(); fd != nil { + filename = fd.Path() + } + + // For MessageSet extensions, the name used is the parent message. + name := xd.FullName() + if messageset.IsMessageSetExtension(xd) { + name = name.Parent() + } + + xi.ExtendedType = parent + xi.ExtensionType = reflect.Zero(extType).Interface() + xi.Field = int32(xd.Number()) + xi.Name = string(name) + xi.Tag = ptag.Marshal(xd, enumName) + xi.Filename = filename +} + +// initFromLegacy initializes an ExtensionInfo from +// the contents of the deprecated exported fields of the type. +func (xi *ExtensionInfo) initFromLegacy() { + // The v1 API returns "type incomplete" descriptors where only the + // field number is specified. In such a case, use a placeholder. + if xi.ExtendedType == nil || xi.ExtensionType == nil { + xd := placeholderExtension{ + name: pref.FullName(xi.Name), + number: pref.FieldNumber(xi.Field), + } + xi.desc = extensionTypeDescriptor{xd, xi} + return + } + + // Resolve enum or message dependencies. + var ed pref.EnumDescriptor + var md pref.MessageDescriptor + t := reflect.TypeOf(xi.ExtensionType) + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + ed = v.Descriptor() + case enumV1: + ed = LegacyLoadEnumDesc(t) + case pref.ProtoMessage: + md = v.ProtoReflect().Descriptor() + case messageV1: + md = LegacyLoadMessageDesc(t) + } + + // Derive basic field information from the struct tag. + var evs pref.EnumValueDescriptors + if ed != nil { + evs = ed.Values() + } + fd := ptag.Unmarshal(xi.Tag, t, evs).(*filedesc.Field) + + // Construct a v2 ExtensionType. + xd := &filedesc.Extension{L2: new(filedesc.ExtensionL2)} + xd.L0.ParentFile = filedesc.SurrogateProto2 + xd.L0.FullName = pref.FullName(xi.Name) + xd.L1.Number = pref.FieldNumber(xi.Field) + xd.L1.Cardinality = fd.L1.Cardinality + xd.L1.Kind = fd.L1.Kind + xd.L2.IsPacked = fd.L1.IsPacked + xd.L2.Default = fd.L1.Default + xd.L1.Extendee = Export{}.MessageDescriptorOf(xi.ExtendedType) + xd.L2.Enum = ed + xd.L2.Message = md + + // Derive real extension field name for MessageSets. + if messageset.IsMessageSet(xd.L1.Extendee) && md.FullName() == xd.L0.FullName { + xd.L0.FullName = xd.L0.FullName.Append(messageset.ExtensionName) + } + + tt := reflect.TypeOf(xi.ExtensionType) + if isOptional { + tt = tt.Elem() + } + xi.goType = tt + xi.desc = extensionTypeDescriptor{xd, xi} +} + +type placeholderExtension struct { + name pref.FullName + number pref.FieldNumber +} + +func (x placeholderExtension) ParentFile() pref.FileDescriptor { return nil } +func (x placeholderExtension) Parent() pref.Descriptor { return nil } +func (x placeholderExtension) Index() int { return 0 } +func (x placeholderExtension) Syntax() pref.Syntax { return 0 } +func (x placeholderExtension) Name() pref.Name { return x.name.Name() } +func (x placeholderExtension) FullName() pref.FullName { return x.name } +func (x placeholderExtension) IsPlaceholder() bool { return true } +func (x placeholderExtension) Options() pref.ProtoMessage { return descopts.Field } +func (x placeholderExtension) Number() pref.FieldNumber { return x.number } +func (x placeholderExtension) Cardinality() pref.Cardinality { return 0 } +func (x placeholderExtension) Kind() pref.Kind { return 0 } +func (x placeholderExtension) HasJSONName() bool { return false } +func (x placeholderExtension) JSONName() string { return "" } +func (x placeholderExtension) HasPresence() bool { return false } +func (x placeholderExtension) HasOptionalKeyword() bool { return false } +func (x placeholderExtension) IsExtension() bool { return true } +func (x placeholderExtension) IsWeak() bool { return false } +func (x placeholderExtension) IsPacked() bool { return false } +func (x placeholderExtension) IsList() bool { return false } +func (x placeholderExtension) IsMap() bool { return false } +func (x placeholderExtension) MapKey() pref.FieldDescriptor { return nil } +func (x placeholderExtension) MapValue() pref.FieldDescriptor { return nil } +func (x placeholderExtension) HasDefault() bool { return false } +func (x placeholderExtension) Default() pref.Value { return pref.Value{} } +func (x placeholderExtension) DefaultEnumValue() pref.EnumValueDescriptor { return nil } +func (x placeholderExtension) ContainingOneof() pref.OneofDescriptor { return nil } +func (x placeholderExtension) ContainingMessage() pref.MessageDescriptor { return nil } +func (x placeholderExtension) Enum() pref.EnumDescriptor { return nil } +func (x placeholderExtension) Message() pref.MessageDescriptor { return nil } +func (x placeholderExtension) ProtoType(pref.FieldDescriptor) { return } +func (x placeholderExtension) ProtoInternal(pragma.DoNotImplement) { return } diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go new file mode 100644 index 0000000..9ab0910 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_file.go @@ -0,0 +1,81 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// Every enum and message type generated by protoc-gen-go since commit 2fc053c5 +// on February 25th, 2016 has had a method to get the raw descriptor. +// Types that were not generated by protoc-gen-go or were generated prior +// to that version are not supported. +// +// The []byte returned is the encoded form of a FileDescriptorProto message +// compressed using GZIP. The []int is the path from the top-level file +// to the specific message or enum declaration. +type ( + enumV1 interface { + EnumDescriptor() ([]byte, []int) + } + messageV1 interface { + Descriptor() ([]byte, []int) + } +) + +var legacyFileDescCache sync.Map // map[*byte]protoreflect.FileDescriptor + +// legacyLoadFileDesc unmarshals b as a compressed FileDescriptorProto message. +// +// This assumes that b is immutable and that b does not refer to part of a +// concatenated series of GZIP files (which would require shenanigans that +// rely on the concatenation properties of both protobufs and GZIP). +// File descriptors generated by protoc-gen-go do not rely on that property. +func legacyLoadFileDesc(b []byte) protoreflect.FileDescriptor { + // Fast-path: check whether we already have a cached file descriptor. + if fd, ok := legacyFileDescCache.Load(&b[0]); ok { + return fd.(protoreflect.FileDescriptor) + } + + // Slow-path: decompress and unmarshal the file descriptor proto. + zr, err := gzip.NewReader(bytes.NewReader(b)) + if err != nil { + panic(err) + } + b2, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + + fd := filedesc.Builder{ + RawDescriptor: b2, + FileRegistry: resolverOnly{protoregistry.GlobalFiles}, // do not register back to global registry + }.Build().File + if fd, ok := legacyFileDescCache.LoadOrStore(&b[0], fd); ok { + return fd.(protoreflect.FileDescriptor) + } + return fd +} + +type resolverOnly struct { + reg *protoregistry.Files +} + +func (r resolverOnly) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + return r.reg.FindFileByPath(path) +} +func (r resolverOnly) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + return r.reg.FindDescriptorByName(name) +} +func (resolverOnly) RegisterFile(protoreflect.FileDescriptor) error { + return nil +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go new file mode 100644 index 0000000..06c68e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -0,0 +1,502 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "google.golang.org/protobuf/internal/descopts" + ptag "google.golang.org/protobuf/internal/encoding/tag" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// legacyWrapMessage wraps v as a protoreflect.Message, +// where v must be a *struct kind and not implement the v2 API already. +func legacyWrapMessage(v reflect.Value) pref.Message { + typ := v.Type() + if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Struct { + return aberrantMessage{v: v} + } + mt := legacyLoadMessageInfo(typ, "") + return mt.MessageOf(v.Interface()) +} + +var legacyMessageTypeCache sync.Map // map[reflect.Type]*MessageInfo + +// legacyLoadMessageInfo dynamically loads a *MessageInfo for t, +// where t must be a *struct kind and not implement the v2 API already. +// The provided name is used if it cannot be determined from the message. +func legacyLoadMessageInfo(t reflect.Type, name pref.FullName) *MessageInfo { + // Fast-path: check if a MessageInfo is cached for this concrete type. + if mt, ok := legacyMessageTypeCache.Load(t); ok { + return mt.(*MessageInfo) + } + + // Slow-path: derive message descriptor and initialize MessageInfo. + mi := &MessageInfo{ + Desc: legacyLoadMessageDesc(t, name), + GoReflectType: t, + } + + v := reflect.Zero(t).Interface() + if _, ok := v.(legacyMarshaler); ok { + mi.methods.Marshal = legacyMarshal + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + mi.methods.Flags |= piface.SupportMarshalDeterministic + } + if _, ok := v.(legacyUnmarshaler); ok { + mi.methods.Unmarshal = legacyUnmarshal + } + if _, ok := v.(legacyMerger); ok { + mi.methods.Merge = legacyMerge + } + + if mi, ok := legacyMessageTypeCache.LoadOrStore(t, mi); ok { + return mi.(*MessageInfo) + } + return mi +} + +var legacyMessageDescCache sync.Map // map[reflect.Type]protoreflect.MessageDescriptor + +// LegacyLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must be a *struct kind and not implement the v2 API already. +// +// This is exported for testing purposes. +func LegacyLoadMessageDesc(t reflect.Type) pref.MessageDescriptor { + return legacyLoadMessageDesc(t, "") +} +func legacyLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if a MessageDescriptor is cached for this concrete type. + if mi, ok := legacyMessageDescCache.Load(t); ok { + return mi.(pref.MessageDescriptor) + } + + // Slow-path: initialize MessageDescriptor from the raw descriptor. + mv := reflect.Zero(t).Interface() + if _, ok := mv.(pref.ProtoMessage); ok { + panic(fmt.Sprintf("%v already implements proto.Message", t)) + } + mdV1, ok := mv.(messageV1) + if !ok { + return aberrantLoadMessageDesc(t, name) + } + + // If this is a dynamic message type where there isn't a 1-1 mapping between + // Go and protobuf types, calling the Descriptor method on the zero value of + // the message type isn't likely to work. If it panics, swallow the panic and + // continue as if the Descriptor method wasn't present. + b, idxs := func() ([]byte, []int) { + defer func() { + recover() + }() + return mdV1.Descriptor() + }() + if b == nil { + return aberrantLoadMessageDesc(t, name) + } + + // If the Go type has no fields, then this might be a proto3 empty message + // from before the size cache was added. If there are any fields, check to + // see that at least one of them looks like something we generated. + if nfield := t.Elem().NumField(); nfield > 0 { + hasProtoField := false + for i := 0; i < nfield; i++ { + f := t.Elem().Field(i) + if f.Tag.Get("protobuf") != "" || f.Tag.Get("protobuf_oneof") != "" || strings.HasPrefix(f.Name, "XXX_") { + hasProtoField = true + break + } + } + if !hasProtoField { + return aberrantLoadMessageDesc(t, name) + } + } + + md := legacyLoadFileDesc(b).Messages().Get(idxs[0]) + for _, i := range idxs[1:] { + md = md.Messages().Get(i) + } + if name != "" && md.FullName() != name { + panic(fmt.Sprintf("mismatching message name: got %v, want %v", md.FullName(), name)) + } + if md, ok := legacyMessageDescCache.LoadOrStore(t, md); ok { + return md.(protoreflect.MessageDescriptor) + } + return md +} + +var ( + aberrantMessageDescLock sync.Mutex + aberrantMessageDescCache map[reflect.Type]protoreflect.MessageDescriptor +) + +// aberrantLoadMessageDesc returns an MessageDescriptor derived from the Go type, +// which must not implement protoreflect.ProtoMessage or messageV1. +// +// This is a best-effort derivation of the message descriptor using the protobuf +// tags on the struct fields. +func aberrantLoadMessageDesc(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + aberrantMessageDescLock.Lock() + defer aberrantMessageDescLock.Unlock() + if aberrantMessageDescCache == nil { + aberrantMessageDescCache = make(map[reflect.Type]protoreflect.MessageDescriptor) + } + return aberrantLoadMessageDescReentrant(t, name) +} +func aberrantLoadMessageDescReentrant(t reflect.Type, name pref.FullName) pref.MessageDescriptor { + // Fast-path: check if an MessageDescriptor is cached for this concrete type. + if md, ok := aberrantMessageDescCache[t]; ok { + return md + } + + // Slow-path: construct a descriptor from the Go struct type (best-effort). + // Cache the MessageDescriptor early on so that we can resolve internal + // cyclic references. + md := &filedesc.Message{L2: new(filedesc.MessageL2)} + md.L0.FullName = aberrantDeriveMessageName(t, name) + md.L0.ParentFile = filedesc.SurrogateProto2 + aberrantMessageDescCache[t] = md + + if t.Kind() != reflect.Ptr || t.Elem().Kind() != reflect.Struct { + return md + } + + // Try to determine if the message is using proto3 by checking scalars. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + switch f.Type.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + md.L0.ParentFile = filedesc.SurrogateProto3 + } + for _, s := range strings.Split(tag, ",") { + if s == "proto3" { + md.L0.ParentFile = filedesc.SurrogateProto3 + } + } + } + } + + // Obtain a list of oneof wrapper types. + var oneofWrappers []reflect.Type + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := t.MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + for _, v := range vs { + oneofWrappers = append(oneofWrappers, reflect.TypeOf(v)) + } + } + } + } + } + + // Obtain a list of the extension ranges. + if fn, ok := t.MethodByName("ExtensionRangeArray"); ok { + vs := fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0] + for i := 0; i < vs.Len(); i++ { + v := vs.Index(i) + md.L2.ExtensionRanges.List = append(md.L2.ExtensionRanges.List, [2]pref.FieldNumber{ + pref.FieldNumber(v.FieldByName("Start").Int()), + pref.FieldNumber(v.FieldByName("End").Int() + 1), + }) + md.L2.ExtensionRangeOptions = append(md.L2.ExtensionRangeOptions, nil) + } + } + + // Derive the message fields by inspecting the struct fields. + for i := 0; i < t.Elem().NumField(); i++ { + f := t.Elem().Field(i) + if tag := f.Tag.Get("protobuf"); tag != "" { + tagKey := f.Tag.Get("protobuf_key") + tagVal := f.Tag.Get("protobuf_val") + aberrantAppendField(md, f.Type, tag, tagKey, tagVal) + } + if tag := f.Tag.Get("protobuf_oneof"); tag != "" { + n := len(md.L2.Oneofs.List) + md.L2.Oneofs.List = append(md.L2.Oneofs.List, filedesc.Oneof{}) + od := &md.L2.Oneofs.List[n] + od.L0.FullName = md.FullName().Append(pref.Name(tag)) + od.L0.ParentFile = md.L0.ParentFile + od.L0.Parent = md + od.L0.Index = n + + for _, t := range oneofWrappers { + if t.Implements(f.Type) { + f := t.Elem().Field(0) + if tag := f.Tag.Get("protobuf"); tag != "" { + aberrantAppendField(md, f.Type, tag, "", "") + fd := &md.L2.Fields.List[len(md.L2.Fields.List)-1] + fd.L1.ContainingOneof = od + od.L1.Fields.List = append(od.L1.Fields.List, fd) + } + } + } + } + } + + return md +} + +func aberrantDeriveMessageName(t reflect.Type, name pref.FullName) pref.FullName { + if name.IsValid() { + return name + } + func() { + defer func() { recover() }() // swallow possible nil panics + if m, ok := reflect.Zero(t).Interface().(interface{ XXX_MessageName() string }); ok { + name = pref.FullName(m.XXX_MessageName()) + } + }() + if name.IsValid() { + return name + } + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return AberrantDeriveFullName(t) +} + +func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, tagVal string) { + t := goType + isOptional := t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct + isRepeated := t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 + if isOptional || isRepeated { + t = t.Elem() + } + fd := ptag.Unmarshal(tag, t, placeholderEnumValues{}).(*filedesc.Field) + + // Append field descriptor to the message. + n := len(md.L2.Fields.List) + md.L2.Fields.List = append(md.L2.Fields.List, *fd) + fd = &md.L2.Fields.List[n] + fd.L0.FullName = md.FullName().Append(fd.Name()) + fd.L0.ParentFile = md.L0.ParentFile + fd.L0.Parent = md + fd.L0.Index = n + + if fd.L1.IsWeak || fd.L1.HasPacked { + fd.L1.Options = func() pref.ProtoMessage { + opts := descopts.Field.ProtoReflect().New() + if fd.L1.IsWeak { + opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) + } + if fd.L1.HasPacked { + opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.IsPacked)) + } + return opts.Interface() + } + } + + // Populate Enum and Message. + if fd.Enum() == nil && fd.Kind() == pref.EnumKind { + switch v := reflect.Zero(t).Interface().(type) { + case pref.Enum: + fd.L1.Enum = v.Descriptor() + default: + fd.L1.Enum = LegacyLoadEnumDesc(t) + } + } + if fd.Message() == nil && (fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind) { + switch v := reflect.Zero(t).Interface().(type) { + case pref.ProtoMessage: + fd.L1.Message = v.ProtoReflect().Descriptor() + case messageV1: + fd.L1.Message = LegacyLoadMessageDesc(t) + default: + if t.Kind() == reflect.Map { + n := len(md.L1.Messages.List) + md.L1.Messages.List = append(md.L1.Messages.List, filedesc.Message{L2: new(filedesc.MessageL2)}) + md2 := &md.L1.Messages.List[n] + md2.L0.FullName = md.FullName().Append(pref.Name(strs.MapEntryName(string(fd.Name())))) + md2.L0.ParentFile = md.L0.ParentFile + md2.L0.Parent = md + md2.L0.Index = n + + md2.L1.IsMapEntry = true + md2.L2.Options = func() pref.ProtoMessage { + opts := descopts.Message.ProtoReflect().New() + opts.Set(opts.Descriptor().Fields().ByName("map_entry"), protoreflect.ValueOfBool(true)) + return opts.Interface() + } + + aberrantAppendField(md2, t.Key(), tagKey, "", "") + aberrantAppendField(md2, t.Elem(), tagVal, "", "") + + fd.L1.Message = md2 + break + } + fd.L1.Message = aberrantLoadMessageDescReentrant(t, "") + } + } +} + +type placeholderEnumValues struct { + protoreflect.EnumValueDescriptors +} + +func (placeholderEnumValues) ByNumber(n pref.EnumNumber) pref.EnumValueDescriptor { + return filedesc.PlaceholderEnumValue(pref.FullName(fmt.Sprintf("UNKNOWN_%d", n))) +} + +// legacyMarshaler is the proto.Marshaler interface superseded by protoiface.Methoder. +type legacyMarshaler interface { + Marshal() ([]byte, error) +} + +// legacyUnmarshaler is the proto.Unmarshaler interface superseded by protoiface.Methoder. +type legacyUnmarshaler interface { + Unmarshal([]byte) error +} + +// legacyMerger is the proto.Merger interface superseded by protoiface.Methoder. +type legacyMerger interface { + Merge(protoiface.MessageV1) +} + +var legacyProtoMethods = &piface.Methods{ + Marshal: legacyMarshal, + Unmarshal: legacyUnmarshal, + Merge: legacyMerge, + + // We have no way to tell whether the type's Marshal method + // supports deterministic serialization or not, but this + // preserves the v1 implementation's behavior of always + // calling Marshal methods when present. + Flags: piface.SupportMarshalDeterministic, +} + +func legacyMarshal(in piface.MarshalInput) (piface.MarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + marshaler, ok := v.(legacyMarshaler) + if !ok { + return piface.MarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + out, err := marshaler.Marshal() + if in.Buf != nil { + out = append(in.Buf, out...) + } + return piface.MarshalOutput{ + Buf: out, + }, err +} + +func legacyUnmarshal(in piface.UnmarshalInput) (piface.UnmarshalOutput, error) { + v := in.Message.(unwrapper).protoUnwrap() + unmarshaler, ok := v.(legacyUnmarshaler) + if !ok { + return piface.UnmarshalOutput{}, errors.New("%T does not implement Marshal", v) + } + return piface.UnmarshalOutput{}, unmarshaler.Unmarshal(in.Buf) +} + +func legacyMerge(in piface.MergeInput) piface.MergeOutput { + dstv := in.Destination.(unwrapper).protoUnwrap() + merger, ok := dstv.(legacyMerger) + if !ok { + return piface.MergeOutput{} + } + merger.Merge(Export{}.ProtoMessageV1Of(in.Source)) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +// aberrantMessageType implements MessageType for all types other than pointer-to-struct. +type aberrantMessageType struct { + t reflect.Type +} + +func (mt aberrantMessageType) New() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) Zero() pref.Message { + return aberrantMessage{reflect.Zero(mt.t)} +} +func (mt aberrantMessageType) GoType() reflect.Type { + return mt.t +} +func (mt aberrantMessageType) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(mt.t) +} + +// aberrantMessage implements Message for all types other than pointer-to-struct. +// +// When the underlying type implements legacyMarshaler or legacyUnmarshaler, +// the aberrant Message can be marshaled or unmarshaled. Otherwise, there is +// not much that can be done with values of this type. +type aberrantMessage struct { + v reflect.Value +} + +func (m aberrantMessage) ProtoReflect() pref.Message { + return m +} + +func (m aberrantMessage) Descriptor() pref.MessageDescriptor { + return LegacyLoadMessageDesc(m.v.Type()) +} +func (m aberrantMessage) Type() pref.MessageType { + return aberrantMessageType{m.v.Type()} +} +func (m aberrantMessage) New() pref.Message { + return aberrantMessage{reflect.Zero(m.v.Type())} +} +func (m aberrantMessage) Interface() pref.ProtoMessage { + return m +} +func (m aberrantMessage) Range(f func(pref.FieldDescriptor, pref.Value) bool) { +} +func (m aberrantMessage) Has(pref.FieldDescriptor) bool { + panic("invalid field descriptor") +} +func (m aberrantMessage) Clear(pref.FieldDescriptor) { + panic("invalid field descriptor") +} +func (m aberrantMessage) Get(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) Set(pref.FieldDescriptor, pref.Value) { + panic("invalid field descriptor") +} +func (m aberrantMessage) Mutable(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) NewField(pref.FieldDescriptor) pref.Value { + panic("invalid field descriptor") +} +func (m aberrantMessage) WhichOneof(pref.OneofDescriptor) pref.FieldDescriptor { + panic("invalid oneof descriptor") +} +func (m aberrantMessage) GetUnknown() pref.RawFields { + return nil +} +func (m aberrantMessage) SetUnknown(pref.RawFields) { + // SetUnknown discards its input on messages which don't support unknown field storage. +} +func (m aberrantMessage) IsValid() bool { + // An invalid message is a read-only, empty message. Since we don't know anything + // about the alleged contents of this message, we can't say with confidence that + // it is invalid in this sense. Therefore, report it as valid. + return true +} +func (m aberrantMessage) ProtoMethods() *piface.Methods { + return legacyProtoMethods +} +func (m aberrantMessage) protoUnwrap() interface{} { + return m.v.Interface() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge.go b/vendor/google.golang.org/protobuf/internal/impl/merge.go new file mode 100644 index 0000000..cdc4267 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge.go @@ -0,0 +1,176 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/proto" + pref "google.golang.org/protobuf/reflect/protoreflect" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +type mergeOptions struct{} + +func (o mergeOptions) Merge(dst, src proto.Message) { + proto.Merge(dst, src) +} + +// merge is protoreflect.Methods.Merge. +func (mi *MessageInfo) merge(in piface.MergeInput) piface.MergeOutput { + dp, ok := mi.getPointer(in.Destination) + if !ok { + return piface.MergeOutput{} + } + sp, ok := mi.getPointer(in.Source) + if !ok { + return piface.MergeOutput{} + } + mi.mergePointer(dp, sp, mergeOptions{}) + return piface.MergeOutput{Flags: piface.MergeComplete} +} + +func (mi *MessageInfo) mergePointer(dst, src pointer, opts mergeOptions) { + mi.init() + if dst.IsNil() { + panic(fmt.Sprintf("invalid value: merging into nil message")) + } + if src.IsNil() { + return + } + for _, f := range mi.orderedCoderFields { + if f.funcs.merge == nil { + continue + } + sfptr := src.Apply(f.offset) + if f.isPointer && sfptr.Elem().IsNil() { + continue + } + f.funcs.merge(dst.Apply(f.offset), sfptr, f, opts) + } + if mi.extensionOffset.IsValid() { + sext := src.Apply(mi.extensionOffset).Extensions() + dext := dst.Apply(mi.extensionOffset).Extensions() + if *dext == nil { + *dext = make(map[int32]ExtensionField) + } + for num, sx := range *sext { + xt := sx.Type() + xi := getExtensionFieldInfo(xt) + if xi.funcs.merge == nil { + continue + } + dx := (*dext)[num] + var dv pref.Value + if dx.Type() == sx.Type() { + dv = dx.Value() + } + if !dv.IsValid() && xi.unmarshalNeedsValue { + dv = xt.New() + } + dv = xi.funcs.merge(dv, sx.Value(), opts) + dx.Set(sx.Type(), dv) + (*dext)[num] = dx + } + } + if mi.unknownOffset.IsValid() { + du := dst.Apply(mi.unknownOffset).Bytes() + su := src.Apply(mi.unknownOffset).Bytes() + if len(*su) > 0 { + *du = append(*du, *su...) + } + } +} + +func mergeScalarValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return src +} + +func mergeBytesValue(dst, src pref.Value, opts mergeOptions) pref.Value { + return pref.ValueOfBytes(append(emptyBuf[:], src.Bytes()...)) +} + +func mergeListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + dstl.Append(srcl.Get(i)) + } + return dst +} + +func mergeBytesListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sb := srcl.Get(i).Bytes() + db := append(emptyBuf[:], sb...) + dstl.Append(pref.ValueOfBytes(db)) + } + return dst +} + +func mergeMessageListValue(dst, src pref.Value, opts mergeOptions) pref.Value { + dstl := dst.List() + srcl := src.List() + for i, llen := 0, srcl.Len(); i < llen; i++ { + sm := srcl.Get(i).Message() + dm := proto.Clone(sm.Interface()).ProtoReflect() + dstl.Append(pref.ValueOfMessage(dm)) + } + return dst +} + +func mergeMessageValue(dst, src pref.Value, opts mergeOptions) pref.Value { + opts.Merge(dst.Message().Interface(), src.Message().Interface()) + return dst +} + +func mergeMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + if f.mi != nil { + if dst.Elem().IsNil() { + dst.SetPointer(pointerOfValue(reflect.New(f.mi.GoReflectType.Elem()))) + } + f.mi.mergePointer(dst.Elem(), src.Elem(), opts) + } else { + dm := dst.AsValueOf(f.ft).Elem() + sm := src.AsValueOf(f.ft).Elem() + if dm.IsNil() { + dm.Set(reflect.New(f.ft.Elem())) + } + opts.Merge(asMessage(dm), asMessage(sm)) + } +} + +func mergeMessageSlice(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { + for _, sp := range src.PointerSlice() { + dm := reflect.New(f.ft.Elem().Elem()) + if f.mi != nil { + f.mi.mergePointer(pointerOfValue(dm), sp, opts) + } else { + opts.Merge(asMessage(dm), asMessage(sp.AsValueOf(f.ft.Elem().Elem()))) + } + dst.AppendPointerSlice(pointerOfValue(dm)) + } +} + +func mergeBytes(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bytes() = append(emptyBuf[:], *src.Bytes()...) +} + +func mergeBytesNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bytes() + if len(v) > 0 { + *dst.Bytes() = append(emptyBuf[:], v...) + } +} + +func mergeBytesSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BytesSlice() + for _, v := range *src.BytesSlice() { + *ds = append(*ds, append(emptyBuf[:], v...)) + } +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go new file mode 100644 index 0000000..8816c27 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/merge_gen.go @@ -0,0 +1,209 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import () + +func mergeBool(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Bool() = *src.Bool() +} + +func mergeBoolNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Bool() + if v != false { + *dst.Bool() = v + } +} + +func mergeBoolPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.BoolPtr() + if p != nil { + v := *p + *dst.BoolPtr() = &v + } +} + +func mergeBoolSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.BoolSlice() + ss := src.BoolSlice() + *ds = append(*ds, *ss...) +} + +func mergeInt32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int32() = *src.Int32() +} + +func mergeInt32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int32() + if v != 0 { + *dst.Int32() = v + } +} + +func mergeInt32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int32Ptr() + if p != nil { + v := *p + *dst.Int32Ptr() = &v + } +} + +func mergeInt32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int32Slice() + ss := src.Int32Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint32() = *src.Uint32() +} + +func mergeUint32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint32() + if v != 0 { + *dst.Uint32() = v + } +} + +func mergeUint32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint32Ptr() + if p != nil { + v := *p + *dst.Uint32Ptr() = &v + } +} + +func mergeUint32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint32Slice() + ss := src.Uint32Slice() + *ds = append(*ds, *ss...) +} + +func mergeInt64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Int64() = *src.Int64() +} + +func mergeInt64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Int64() + if v != 0 { + *dst.Int64() = v + } +} + +func mergeInt64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Int64Ptr() + if p != nil { + v := *p + *dst.Int64Ptr() = &v + } +} + +func mergeInt64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Int64Slice() + ss := src.Int64Slice() + *ds = append(*ds, *ss...) +} + +func mergeUint64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Uint64() = *src.Uint64() +} + +func mergeUint64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Uint64() + if v != 0 { + *dst.Uint64() = v + } +} + +func mergeUint64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Uint64Ptr() + if p != nil { + v := *p + *dst.Uint64Ptr() = &v + } +} + +func mergeUint64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Uint64Slice() + ss := src.Uint64Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat32(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float32() = *src.Float32() +} + +func mergeFloat32NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float32() + if v != 0 { + *dst.Float32() = v + } +} + +func mergeFloat32Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float32Ptr() + if p != nil { + v := *p + *dst.Float32Ptr() = &v + } +} + +func mergeFloat32Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float32Slice() + ss := src.Float32Slice() + *ds = append(*ds, *ss...) +} + +func mergeFloat64(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.Float64() = *src.Float64() +} + +func mergeFloat64NoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.Float64() + if v != 0 { + *dst.Float64() = v + } +} + +func mergeFloat64Ptr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.Float64Ptr() + if p != nil { + v := *p + *dst.Float64Ptr() = &v + } +} + +func mergeFloat64Slice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.Float64Slice() + ss := src.Float64Slice() + *ds = append(*ds, *ss...) +} + +func mergeString(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + *dst.String() = *src.String() +} + +func mergeStringNoZero(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + v := *src.String() + if v != "" { + *dst.String() = v + } +} + +func mergeStringPtr(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + p := *src.StringPtr() + if p != nil { + v := *p + *dst.StringPtr() = &v + } +} + +func mergeStringSlice(dst, src pointer, _ *coderFieldInfo, _ mergeOptions) { + ds := dst.StringSlice() + ss := src.StringSlice() + *ds = append(*ds, *ss...) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go new file mode 100644 index 0000000..7dd994b --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -0,0 +1,215 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + + "google.golang.org/protobuf/internal/genname" + "google.golang.org/protobuf/reflect/protoreflect" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// MessageInfo provides protobuf related functionality for a given Go type +// that represents a message. A given instance of MessageInfo is tied to +// exactly one Go type, which must be a pointer to a struct type. +// +// The exported fields must be populated before any methods are called +// and cannot be mutated after set. +type MessageInfo struct { + // GoReflectType is the underlying message Go type and must be populated. + GoReflectType reflect.Type // pointer to struct + + // Desc is the underlying message descriptor type and must be populated. + Desc pref.MessageDescriptor + + // Exporter must be provided in a purego environment in order to provide + // access to unexported fields. + Exporter exporter + + // OneofWrappers is list of pointers to oneof wrapper struct types. + OneofWrappers []interface{} + + initMu sync.Mutex // protects all unexported fields + initDone uint32 + + reflectMessageInfo // for reflection implementation + coderMessageInfo // for fast-path method implementations +} + +// exporter is a function that returns a reference to the ith field of v, +// where v is a pointer to a struct. It returns nil if it does not support +// exporting the requested field (e.g., already exported). +type exporter func(v interface{}, i int) interface{} + +// getMessageInfo returns the MessageInfo for any message type that +// is generated by our implementation of protoc-gen-go (for v2 and on). +// If it is unable to obtain a MessageInfo, it returns nil. +func getMessageInfo(mt reflect.Type) *MessageInfo { + m, ok := reflect.Zero(mt).Interface().(pref.ProtoMessage) + if !ok { + return nil + } + mr, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *MessageInfo }) + if !ok { + return nil + } + return mr.ProtoMessageInfo() +} + +func (mi *MessageInfo) init() { + // This function is called in the hot path. Inline the sync.Once logic, + // since allocating a closure for Once.Do is expensive. + // Keep init small to ensure that it can be inlined. + if atomic.LoadUint32(&mi.initDone) == 0 { + mi.initOnce() + } +} + +func (mi *MessageInfo) initOnce() { + mi.initMu.Lock() + defer mi.initMu.Unlock() + if mi.initDone == 1 { + return + } + + t := mi.GoReflectType + if t.Kind() != reflect.Ptr && t.Elem().Kind() != reflect.Struct { + panic(fmt.Sprintf("got %v, want *struct kind", t)) + } + t = t.Elem() + + si := mi.makeStructInfo(t) + mi.makeReflectFuncs(t, si) + mi.makeCoderMethods(t, si) + + atomic.StoreUint32(&mi.initDone, 1) +} + +// getPointer returns the pointer for a message, which should be of +// the type of the MessageInfo. If the message is of a different type, +// it returns ok==false. +func (mi *MessageInfo) getPointer(m pref.Message) (p pointer, ok bool) { + switch m := m.(type) { + case *messageState: + return m.pointer(), m.messageInfo() == mi + case *messageReflectWrapper: + return m.pointer(), m.messageInfo() == mi + } + return pointer{}, false +} + +type ( + SizeCache = int32 + WeakFields = map[int32]protoreflect.ProtoMessage + UnknownFields = []byte + ExtensionFields = map[int32]ExtensionField +) + +var ( + sizecacheType = reflect.TypeOf(SizeCache(0)) + weakFieldsType = reflect.TypeOf(WeakFields(nil)) + unknownFieldsType = reflect.TypeOf(UnknownFields(nil)) + extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) +) + +type structInfo struct { + sizecacheOffset offset + weakOffset offset + unknownOffset offset + extensionOffset offset + + fieldsByNumber map[pref.FieldNumber]reflect.StructField + oneofsByName map[pref.Name]reflect.StructField + oneofWrappersByType map[reflect.Type]pref.FieldNumber + oneofWrappersByNumber map[pref.FieldNumber]reflect.Type +} + +func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { + si := structInfo{ + sizecacheOffset: invalidOffset, + weakOffset: invalidOffset, + unknownOffset: invalidOffset, + extensionOffset: invalidOffset, + + fieldsByNumber: map[pref.FieldNumber]reflect.StructField{}, + oneofsByName: map[pref.Name]reflect.StructField{}, + oneofWrappersByType: map[reflect.Type]pref.FieldNumber{}, + oneofWrappersByNumber: map[pref.FieldNumber]reflect.Type{}, + } + +fieldLoop: + for i := 0; i < t.NumField(); i++ { + switch f := t.Field(i); f.Name { + case genname.SizeCache, genname.SizeCacheA: + if f.Type == sizecacheType { + si.sizecacheOffset = offsetOf(f, mi.Exporter) + } + case genname.WeakFields, genname.WeakFieldsA: + if f.Type == weakFieldsType { + si.weakOffset = offsetOf(f, mi.Exporter) + } + case genname.UnknownFields, genname.UnknownFieldsA: + if f.Type == unknownFieldsType { + si.unknownOffset = offsetOf(f, mi.Exporter) + } + case genname.ExtensionFields, genname.ExtensionFieldsA, genname.ExtensionFieldsB: + if f.Type == extensionFieldsType { + si.extensionOffset = offsetOf(f, mi.Exporter) + } + default: + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.fieldsByNumber[pref.FieldNumber(n)] = f + continue fieldLoop + } + } + if s := f.Tag.Get("protobuf_oneof"); len(s) > 0 { + si.oneofsByName[pref.Name(s)] = f + continue fieldLoop + } + } + } + + // Derive a mapping of oneof wrappers to fields. + oneofWrappers := mi.OneofWrappers + for _, method := range []string{"XXX_OneofFuncs", "XXX_OneofWrappers"} { + if fn, ok := reflect.PtrTo(t).MethodByName(method); ok { + for _, v := range fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))}) { + if vs, ok := v.Interface().([]interface{}); ok { + oneofWrappers = vs + } + } + } + } + for _, v := range oneofWrappers { + tf := reflect.TypeOf(v).Elem() + f := tf.Field(0) + for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { + if len(s) > 0 && strings.Trim(s, "0123456789") == "" { + n, _ := strconv.ParseUint(s, 10, 64) + si.oneofWrappersByType[tf] = pref.FieldNumber(n) + si.oneofWrappersByNumber[pref.FieldNumber(n)] = tf + break + } + } + } + + return si +} + +func (mi *MessageInfo) New() protoreflect.Message { + return mi.MessageOf(reflect.New(mi.GoReflectType.Elem()).Interface()) +} +func (mi *MessageInfo) Zero() protoreflect.Message { + return mi.MessageOf(reflect.Zero(mi.GoReflectType).Interface()) +} +func (mi *MessageInfo) Descriptor() protoreflect.MessageDescriptor { return mi.Desc } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go new file mode 100644 index 0000000..0f4b8db --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -0,0 +1,364 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "reflect" + + "google.golang.org/protobuf/internal/pragma" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type reflectMessageInfo struct { + fields map[pref.FieldNumber]*fieldInfo + oneofs map[pref.Name]*oneofInfo + + // denseFields is a subset of fields where: + // 0 < fieldDesc.Number() < len(denseFields) + // It provides faster access to the fieldInfo, but may be incomplete. + denseFields []*fieldInfo + + // rangeInfos is a list of all fields (not belonging to a oneof) and oneofs. + rangeInfos []interface{} // either *fieldInfo or *oneofInfo + + getUnknown func(pointer) pref.RawFields + setUnknown func(pointer, pref.RawFields) + extensionMap func(pointer) *extensionMap + + nilMessage atomicNilMessage +} + +// makeReflectFuncs generates the set of functions to support reflection. +func (mi *MessageInfo) makeReflectFuncs(t reflect.Type, si structInfo) { + mi.makeKnownFieldsFunc(si) + mi.makeUnknownFieldsFunc(t, si) + mi.makeExtensionFieldsFunc(t, si) +} + +// makeKnownFieldsFunc generates functions for operations that can be performed +// on each protobuf message field. It takes in a reflect.Type representing the +// Go struct and matches message fields with struct fields. +// +// This code assumes that the struct is well-formed and panics if there are +// any discrepancies. +func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { + mi.fields = map[pref.FieldNumber]*fieldInfo{} + md := mi.Desc + fds := md.Fields() + for i := 0; i < fds.Len(); i++ { + fd := fds.Get(i) + fs := si.fieldsByNumber[fd.Number()] + var fi fieldInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) + case fd.IsMap(): + fi = fieldInfoForMap(fd, fs, mi.Exporter) + case fd.IsList(): + fi = fieldInfoForList(fd, fs, mi.Exporter) + case fd.IsWeak(): + fi = fieldInfoForWeakMessage(fd, si.weakOffset) + case fd.Kind() == pref.MessageKind || fd.Kind() == pref.GroupKind: + fi = fieldInfoForMessage(fd, fs, mi.Exporter) + default: + fi = fieldInfoForScalar(fd, fs, mi.Exporter) + } + mi.fields[fd.Number()] = &fi + } + + mi.oneofs = map[pref.Name]*oneofInfo{} + for i := 0; i < md.Oneofs().Len(); i++ { + od := md.Oneofs().Get(i) + mi.oneofs[od.Name()] = makeOneofInfo(od, si, mi.Exporter) + } + + mi.denseFields = make([]*fieldInfo, fds.Len()*2) + for i := 0; i < fds.Len(); i++ { + if fd := fds.Get(i); int(fd.Number()) < len(mi.denseFields) { + mi.denseFields[fd.Number()] = mi.fields[fd.Number()] + } + } + + for i := 0; i < fds.Len(); { + fd := fds.Get(i) + if od := fd.ContainingOneof(); od != nil && !od.IsSynthetic() { + mi.rangeInfos = append(mi.rangeInfos, mi.oneofs[od.Name()]) + i += od.Fields().Len() + } else { + mi.rangeInfos = append(mi.rangeInfos, mi.fields[fd.Number()]) + i++ + } + } +} + +func (mi *MessageInfo) makeUnknownFieldsFunc(t reflect.Type, si structInfo) { + mi.getUnknown = func(pointer) pref.RawFields { return nil } + mi.setUnknown = func(pointer, pref.RawFields) { return } + if si.unknownOffset.IsValid() { + mi.getUnknown = func(p pointer) pref.RawFields { + if p.IsNil() { + return nil + } + rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) + return pref.RawFields(*rv.Interface().(*[]byte)) + } + mi.setUnknown = func(p pointer, b pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + rv := p.Apply(si.unknownOffset).AsValueOf(unknownFieldsType) + *rv.Interface().(*[]byte) = []byte(b) + } + } else { + mi.getUnknown = func(pointer) pref.RawFields { + return nil + } + mi.setUnknown = func(p pointer, _ pref.RawFields) { + if p.IsNil() { + panic("invalid SetUnknown on nil Message") + } + } + } +} + +func (mi *MessageInfo) makeExtensionFieldsFunc(t reflect.Type, si structInfo) { + if si.extensionOffset.IsValid() { + mi.extensionMap = func(p pointer) *extensionMap { + if p.IsNil() { + return (*extensionMap)(nil) + } + v := p.Apply(si.extensionOffset).AsValueOf(extensionFieldsType) + return (*extensionMap)(v.Interface().(*map[int32]ExtensionField)) + } + } else { + mi.extensionMap = func(pointer) *extensionMap { + return (*extensionMap)(nil) + } + } +} + +type extensionMap map[int32]ExtensionField + +func (m *extensionMap) Range(f func(pref.FieldDescriptor, pref.Value) bool) { + if m != nil { + for _, x := range *m { + xd := x.Type().TypeDescriptor() + v := x.Value() + if xd.IsList() && v.List().Len() == 0 { + continue + } + if !f(xd, v) { + return + } + } + } +} +func (m *extensionMap) Has(xt pref.ExtensionType) (ok bool) { + if m == nil { + return false + } + xd := xt.TypeDescriptor() + x, ok := (*m)[int32(xd.Number())] + if !ok { + return false + } + switch { + case xd.IsList(): + return x.Value().List().Len() > 0 + case xd.IsMap(): + return x.Value().Map().Len() > 0 + case xd.Message() != nil: + return x.Value().Message().IsValid() + } + return true +} +func (m *extensionMap) Clear(xt pref.ExtensionType) { + delete(*m, int32(xt.TypeDescriptor().Number())) +} +func (m *extensionMap) Get(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if m != nil { + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + } + return xt.Zero() +} +func (m *extensionMap) Set(xt pref.ExtensionType, v pref.Value) { + xd := xt.TypeDescriptor() + isValid := true + switch { + case !xt.IsValidValue(v): + isValid = false + case xd.IsList(): + isValid = v.List().IsValid() + case xd.IsMap(): + isValid = v.Map().IsValid() + case xd.Message() != nil: + isValid = v.Message().IsValid() + } + if !isValid { + panic(fmt.Sprintf("%v: assigning invalid value", xt.TypeDescriptor().FullName())) + } + + if *m == nil { + *m = make(map[int32]ExtensionField) + } + var x ExtensionField + x.Set(xt, v) + (*m)[int32(xd.Number())] = x +} +func (m *extensionMap) Mutable(xt pref.ExtensionType) pref.Value { + xd := xt.TypeDescriptor() + if xd.Kind() != pref.MessageKind && xd.Kind() != pref.GroupKind && !xd.IsList() && !xd.IsMap() { + panic("invalid Mutable on field with non-composite type") + } + if x, ok := (*m)[int32(xd.Number())]; ok { + return x.Value() + } + v := xt.New() + m.Set(xt, v) + return v +} + +// MessageState is a data structure that is nested as the first field in a +// concrete message. It provides a way to implement the ProtoReflect method +// in an allocation-free way without needing to have a shadow Go type generated +// for every message type. This technique only works using unsafe. +// +// +// Example generated code: +// +// type M struct { +// state protoimpl.MessageState +// +// Field1 int32 +// Field2 string +// Field3 *BarMessage +// ... +// } +// +// func (m *M) ProtoReflect() protoreflect.Message { +// mi := &file_fizz_buzz_proto_msgInfos[5] +// if protoimpl.UnsafeEnabled && m != nil { +// ms := protoimpl.X.MessageStateOf(Pointer(m)) +// if ms.LoadMessageInfo() == nil { +// ms.StoreMessageInfo(mi) +// } +// return ms +// } +// return mi.MessageOf(m) +// } +// +// The MessageState type holds a *MessageInfo, which must be atomically set to +// the message info associated with a given message instance. +// By unsafely converting a *M into a *MessageState, the MessageState object +// has access to all the information needed to implement protobuf reflection. +// It has access to the message info as its first field, and a pointer to the +// MessageState is identical to a pointer to the concrete message value. +// +// +// Requirements: +// • The type M must implement protoreflect.ProtoMessage. +// • The address of m must not be nil. +// • The address of m and the address of m.state must be equal, +// even though they are different Go types. +type MessageState struct { + pragma.NoUnkeyedLiterals + pragma.DoNotCompare + pragma.DoNotCopy + + atomicMessageInfo *MessageInfo +} + +type messageState MessageState + +var ( + _ pref.Message = (*messageState)(nil) + _ unwrapper = (*messageState)(nil) +) + +// messageDataType is a tuple of a pointer to the message data and +// a pointer to the message type. It is a generalized way of providing a +// reflective view over a message instance. The disadvantage of this approach +// is the need to allocate this tuple of 16B. +type messageDataType struct { + p pointer + mi *MessageInfo +} + +type ( + messageReflectWrapper messageDataType + messageIfaceWrapper messageDataType +) + +var ( + _ pref.Message = (*messageReflectWrapper)(nil) + _ unwrapper = (*messageReflectWrapper)(nil) + _ pref.ProtoMessage = (*messageIfaceWrapper)(nil) + _ unwrapper = (*messageIfaceWrapper)(nil) +) + +// MessageOf returns a reflective view over a message. The input must be a +// pointer to a named Go struct. If the provided type has a ProtoReflect method, +// it must be implemented by calling this method. +func (mi *MessageInfo) MessageOf(m interface{}) pref.Message { + // TODO: Switch the input to be an opaque Pointer. + if reflect.TypeOf(m) != mi.GoReflectType { + panic(fmt.Sprintf("type mismatch: got %T, want %v", m, mi.GoReflectType)) + } + p := pointerOfIface(m) + if p.IsNil() { + return mi.nilMessage.Init(mi) + } + return &messageReflectWrapper{p, mi} +} + +func (m *messageReflectWrapper) pointer() pointer { return m.p } +func (m *messageReflectWrapper) messageInfo() *MessageInfo { return m.mi } + +func (m *messageIfaceWrapper) ProtoReflect() pref.Message { + return (*messageReflectWrapper)(m) +} +func (m *messageIfaceWrapper) protoUnwrap() interface{} { + return m.p.AsIfaceOf(m.mi.GoReflectType.Elem()) +} + +// checkField verifies that the provided field descriptor is valid. +// Exactly one of the returned values is populated. +func (mi *MessageInfo) checkField(fd pref.FieldDescriptor) (*fieldInfo, pref.ExtensionType) { + var fi *fieldInfo + if n := fd.Number(); 0 < n && int(n) < len(mi.denseFields) { + fi = mi.denseFields[n] + } else { + fi = mi.fields[n] + } + if fi != nil { + if fi.fieldDesc != fd { + if got, want := fd.FullName(), fi.fieldDesc.FullName(); got != want { + panic(fmt.Sprintf("mismatching field: got %v, want %v", got, want)) + } + panic(fmt.Sprintf("mismatching field: %v", fd.FullName())) + } + return fi, nil + } + + if fd.IsExtension() { + if got, want := fd.ContainingMessage().FullName(), mi.Desc.FullName(); got != want { + // TODO: Should this be exact containing message descriptor match? + panic(fmt.Sprintf("extension %v has mismatching containing message: got %v, want %v", fd.FullName(), got, want)) + } + if !mi.Desc.ExtensionRanges().Has(fd.Number()) { + panic(fmt.Sprintf("extension %v extends %v outside the extension range", fd.FullName(), mi.Desc.FullName())) + } + xtd, ok := fd.(pref.ExtensionTypeDescriptor) + if !ok { + panic(fmt.Sprintf("extension %v does not implement protoreflect.ExtensionTypeDescriptor", fd.FullName())) + } + return nil, xtd.Type() + } + panic(fmt.Sprintf("field %v is invalid", fd.FullName())) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go new file mode 100644 index 0000000..23124a8 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -0,0 +1,466 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "reflect" + "sync" + + "google.golang.org/protobuf/internal/flags" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" +) + +type fieldInfo struct { + fieldDesc pref.FieldDescriptor + + // These fields are used for protobuf reflection support. + has func(pointer) bool + clear func(pointer) + get func(pointer) pref.Value + set func(pointer, pref.Value) + mutable func(pointer) pref.Value + newMessage func() pref.Message + newField func() pref.Value +} + +func fieldInfoForOneof(fd pref.FieldDescriptor, fs reflect.StructField, x exporter, ot reflect.Type) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Interface { + panic(fmt.Sprintf("field %v has invalid type: got %v, want interface kind", fd.FullName(), ft)) + } + if ot.Kind() != reflect.Struct { + panic(fmt.Sprintf("field %v has invalid type: got %v, want struct kind", fd.FullName(), ot)) + } + if !reflect.PtrTo(ot).Implements(ft) { + panic(fmt.Sprintf("field %v has invalid type: %v does not implement %v", fd.FullName(), ot, ft)) + } + conv := NewConverter(ot.Field(0).Type, fd) + isMessage := fd.Message() != nil + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + // NOTE: The logic below intentionally assumes that oneof fields are + // well-formatted. That is, the oneof interface never contains a + // typed nil pointer to one of the wrapper structs. + + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return false + } + return true + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot { + // NOTE: We intentionally don't check for rv.Elem().IsNil() + // so that (*OneofWrapperType)(nil) gets cleared to nil. + return + } + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + return conv.Zero() + } + rv = rv.Elem().Elem().Field(0) + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + rv.Set(conv.GoValueOf(v)) + }, + mutable: func(p pointer) pref.Value { + if !isMessage { + panic(fmt.Sprintf("field %v with invalid Mutable call on field with non-composite type", fd.FullName())) + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() || rv.Elem().Type().Elem() != ot || rv.Elem().IsNil() { + rv.Set(reflect.New(ot)) + } + rv = rv.Elem().Elem().Field(0) + if rv.IsNil() { + rv.Set(conv.GoValueOf(pref.ValueOfMessage(conv.New().Message()))) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForMap(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Map { + panic(fmt.Sprintf("field %v has invalid type: got %v, want map kind", fd.FullName(), ft)) + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("map field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if v.IsNil() { + v.Set(reflect.MakeMap(fs.Type)) + } + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForList(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + if ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want slice kind", fd.FullName(), ft)) + } + conv := NewConverter(reflect.PtrTo(ft), fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return rv.Len() > 0 + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type) + if rv.Elem().Len() == 0 { + return conv.Zero() + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + pv := conv.GoValueOf(v) + if pv.IsNil() { + panic(fmt.Sprintf("list field %v cannot be set with read-only value", fd.FullName())) + } + rv.Set(pv.Elem()) + }, + mutable: func(p pointer) pref.Value { + v := p.Apply(fieldOffset).AsValueOf(fs.Type) + return conv.PBValueOf(v) + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +var ( + nilBytes = reflect.ValueOf([]byte(nil)) + emptyBytes = reflect.ValueOf([]byte{}) +) + +func fieldInfoForScalar(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + nullable := fd.HasPresence() + isBytes := ft.Kind() == reflect.Slice && ft.Elem().Kind() == reflect.Uint8 + if nullable { + if ft.Kind() != reflect.Ptr && ft.Kind() != reflect.Slice { + panic(fmt.Sprintf("field %v has invalid type: got %v, want pointer", fd.FullName(), ft)) + } + if ft.Kind() == reflect.Ptr { + ft = ft.Elem() + } + } + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + return !rv.IsNil() + } + switch rv.Kind() { + case reflect.Bool: + return rv.Bool() + case reflect.Int32, reflect.Int64: + return rv.Int() != 0 + case reflect.Uint32, reflect.Uint64: + return rv.Uint() != 0 + case reflect.Float32, reflect.Float64: + return rv.Float() != 0 || math.Signbit(rv.Float()) + case reflect.String, reflect.Slice: + return rv.Len() > 0 + default: + panic(fmt.Sprintf("field %v has invalid type: %v", fd.FullName(), rv.Type())) // should never happen + } + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable { + if rv.IsNil() { + return conv.Zero() + } + if rv.Kind() == reflect.Ptr { + rv = rv.Elem() + } + } + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if nullable && rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(ft)) + } + rv = rv.Elem() + } + rv.Set(conv.GoValueOf(v)) + if isBytes && rv.Len() == 0 { + if nullable { + rv.Set(emptyBytes) // preserve presence + } else { + rv.Set(nilBytes) // do not preserve presence + } + } + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +func fieldInfoForWeakMessage(fd pref.FieldDescriptor, weakOffset offset) fieldInfo { + if !flags.ProtoLegacy { + panic("no support for proto1 weak fields") + } + + var once sync.Once + var messageType pref.MessageType + lazyInit := func() { + once.Do(func() { + messageName := fd.Message().FullName() + messageType, _ = preg.GlobalTypes.FindMessageByName(messageName) + if messageType == nil { + panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) + } + }) + } + + num := fd.Number() + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + _, ok := p.Apply(weakOffset).WeakFields().get(num) + return ok + }, + clear: func(p pointer) { + p.Apply(weakOffset).WeakFields().clear(num) + }, + get: func(p pointer) pref.Value { + lazyInit() + if p.IsNil() { + return pref.ValueOfMessage(messageType.Zero()) + } + m, ok := p.Apply(weakOffset).WeakFields().get(num) + if !ok { + return pref.ValueOfMessage(messageType.Zero()) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + set: func(p pointer, v pref.Value) { + lazyInit() + m := v.Message() + if m.Descriptor() != messageType.Descriptor() { + if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) + } + panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) + } + p.Apply(weakOffset).WeakFields().set(num, m.Interface()) + }, + mutable: func(p pointer) pref.Value { + lazyInit() + fs := p.Apply(weakOffset).WeakFields() + m, ok := fs.get(num) + if !ok { + m = messageType.New().Interface() + fs.set(num, m) + } + return pref.ValueOfMessage(m.ProtoReflect()) + }, + newMessage: func() pref.Message { + lazyInit() + return messageType.New() + }, + newField: func() pref.Value { + lazyInit() + return pref.ValueOfMessage(messageType.New()) + }, + } +} + +func fieldInfoForMessage(fd pref.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { + ft := fs.Type + conv := NewConverter(ft, fd) + + // TODO: Implement unsafe fast path? + fieldOffset := offsetOf(fs, x) + return fieldInfo{ + fieldDesc: fd, + has: func(p pointer) bool { + if p.IsNil() { + return false + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return !rv.IsNil() + }, + clear: func(p pointer) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(reflect.Zero(rv.Type())) + }, + get: func(p pointer) pref.Value { + if p.IsNil() { + return conv.Zero() + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + return conv.PBValueOf(rv) + }, + set: func(p pointer, v pref.Value) { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + rv.Set(conv.GoValueOf(v)) + if rv.IsNil() { + panic(fmt.Sprintf("field %v has invalid nil pointer", fd.FullName())) + } + }, + mutable: func(p pointer) pref.Value { + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + rv.Set(conv.GoValueOf(conv.New())) + } + return conv.PBValueOf(rv) + }, + newMessage: func() pref.Message { + return conv.New().Message() + }, + newField: func() pref.Value { + return conv.New() + }, + } +} + +type oneofInfo struct { + oneofDesc pref.OneofDescriptor + which func(pointer) pref.FieldNumber +} + +func makeOneofInfo(od pref.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fs := si.fieldsByNumber[od.Fields().Get(0).Number()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { // valid on either *T or []byte + return 0 + } + return od.Fields().Get(0).Number() + } + } else { + fs := si.oneofsByName[od.Name()] + fieldOffset := offsetOf(fs, x) + oi.which = func(p pointer) pref.FieldNumber { + if p.IsNil() { + return 0 + } + rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() + if rv.IsNil() { + return 0 + } + rv = rv.Elem() + if rv.IsNil() { + return 0 + } + return si.oneofWrappersByType[rv.Type().Elem()] + } + } + return oi +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go new file mode 100644 index 0000000..741d6e5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_gen.go @@ -0,0 +1,249 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package impl + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +func (m *messageState) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageState) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageState) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageState) Interface() protoreflect.ProtoMessage { + return m.protoUnwrap().(protoreflect.ProtoMessage) +} +func (m *messageState) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageState) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageState) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageState) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageState) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageState) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageState) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageState) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageState) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageState) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageState) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageState) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageState) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageState) IsValid() bool { + return !m.pointer().IsNil() +} + +func (m *messageReflectWrapper) Descriptor() protoreflect.MessageDescriptor { + return m.messageInfo().Desc +} +func (m *messageReflectWrapper) Type() protoreflect.MessageType { + return m.messageInfo() +} +func (m *messageReflectWrapper) New() protoreflect.Message { + return m.messageInfo().New() +} +func (m *messageReflectWrapper) Interface() protoreflect.ProtoMessage { + if m, ok := m.protoUnwrap().(protoreflect.ProtoMessage); ok { + return m + } + return (*messageIfaceWrapper)(m) +} +func (m *messageReflectWrapper) protoUnwrap() interface{} { + return m.pointer().AsIfaceOf(m.messageInfo().GoReflectType.Elem()) +} +func (m *messageReflectWrapper) ProtoMethods() *protoiface.Methods { + m.messageInfo().init() + return &m.messageInfo().methods +} + +// ProtoMessageInfo is a pseudo-internal API for allowing the v1 code +// to be able to retrieve a v2 MessageInfo struct. +// +// WARNING: This method is exempt from the compatibility promise and +// may be removed in the future without warning. +func (m *messageReflectWrapper) ProtoMessageInfo() *MessageInfo { + return m.messageInfo() +} + +func (m *messageReflectWrapper) Range(f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + m.messageInfo().init() + for _, ri := range m.messageInfo().rangeInfos { + switch ri := ri.(type) { + case *fieldInfo: + if ri.has(m.pointer()) { + if !f(ri.fieldDesc, ri.get(m.pointer())) { + return + } + } + case *oneofInfo: + if n := ri.which(m.pointer()); n > 0 { + fi := m.messageInfo().fields[n] + if !f(fi.fieldDesc, fi.get(m.pointer())) { + return + } + } + } + } + m.messageInfo().extensionMap(m.pointer()).Range(f) +} +func (m *messageReflectWrapper) Has(fd protoreflect.FieldDescriptor) bool { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.has(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Has(xt) + } +} +func (m *messageReflectWrapper) Clear(fd protoreflect.FieldDescriptor) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.clear(m.pointer()) + } else { + m.messageInfo().extensionMap(m.pointer()).Clear(xt) + } +} +func (m *messageReflectWrapper) Get(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.get(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Get(xt) + } +} +func (m *messageReflectWrapper) Set(fd protoreflect.FieldDescriptor, v protoreflect.Value) { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + fi.set(m.pointer(), v) + } else { + m.messageInfo().extensionMap(m.pointer()).Set(xt, v) + } +} +func (m *messageReflectWrapper) Mutable(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.mutable(m.pointer()) + } else { + return m.messageInfo().extensionMap(m.pointer()).Mutable(xt) + } +} +func (m *messageReflectWrapper) NewField(fd protoreflect.FieldDescriptor) protoreflect.Value { + m.messageInfo().init() + if fi, xt := m.messageInfo().checkField(fd); fi != nil { + return fi.newField() + } else { + return xt.New() + } +} +func (m *messageReflectWrapper) WhichOneof(od protoreflect.OneofDescriptor) protoreflect.FieldDescriptor { + m.messageInfo().init() + if oi := m.messageInfo().oneofs[od.Name()]; oi != nil && oi.oneofDesc == od { + return od.Fields().ByNumber(oi.which(m.pointer())) + } + panic("invalid oneof descriptor " + string(od.FullName()) + " for message " + string(m.Descriptor().FullName())) +} +func (m *messageReflectWrapper) GetUnknown() protoreflect.RawFields { + m.messageInfo().init() + return m.messageInfo().getUnknown(m.pointer()) +} +func (m *messageReflectWrapper) SetUnknown(b protoreflect.RawFields) { + m.messageInfo().init() + m.messageInfo().setUnknown(m.pointer(), b) +} +func (m *messageReflectWrapper) IsValid() bool { + return !m.pointer().IsNil() +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go new file mode 100644 index 0000000..67b4ede --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_reflect.go @@ -0,0 +1,177 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package impl + +import ( + "fmt" + "reflect" + "sync" +) + +const UnsafeEnabled = false + +// Pointer is an opaque pointer type. +type Pointer interface{} + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the field index into a struct. +type offset struct { + index int + export exporter +} + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + if len(f.Index) != 1 { + panic("embedded structs are not supported") + } + if f.PkgPath == "" { + return offset{index: f.Index[0]} // field is already exported + } + if x == nil { + panic("exporter must be provided for unexported field") + } + return offset{index: f.Index[0], export: x} +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f.index >= 0 } + +// invalidOffset is an invalid field offset. +var invalidOffset = offset{index: -1} + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset{index: 0} + +// pointer is an abstract representation of a pointer to a struct or field. +type pointer struct{ v reflect.Value } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointerOfIface(p) +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{v: v} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + return pointer{v: reflect.ValueOf(v)} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.v.IsNil() +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The current pointer must be pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if f.export != nil { + if v := reflect.ValueOf(f.export(p.v.Interface(), f.index)); v.IsValid() { + return pointer{v: v} + } + } + return pointer{v: p.v.Elem().Field(f.index).Addr()} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + if got := p.v.Type().Elem(); got != t { + panic(fmt.Sprintf("invalid type: got %v, want %v", got, t)) + } + return p.v +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return p.v.Interface().(*bool) } +func (p pointer) BoolPtr() **bool { return p.v.Interface().(**bool) } +func (p pointer) BoolSlice() *[]bool { return p.v.Interface().(*[]bool) } +func (p pointer) Int32() *int32 { return p.v.Interface().(*int32) } +func (p pointer) Int32Ptr() **int32 { return p.v.Interface().(**int32) } +func (p pointer) Int32Slice() *[]int32 { return p.v.Interface().(*[]int32) } +func (p pointer) Int64() *int64 { return p.v.Interface().(*int64) } +func (p pointer) Int64Ptr() **int64 { return p.v.Interface().(**int64) } +func (p pointer) Int64Slice() *[]int64 { return p.v.Interface().(*[]int64) } +func (p pointer) Uint32() *uint32 { return p.v.Interface().(*uint32) } +func (p pointer) Uint32Ptr() **uint32 { return p.v.Interface().(**uint32) } +func (p pointer) Uint32Slice() *[]uint32 { return p.v.Interface().(*[]uint32) } +func (p pointer) Uint64() *uint64 { return p.v.Interface().(*uint64) } +func (p pointer) Uint64Ptr() **uint64 { return p.v.Interface().(**uint64) } +func (p pointer) Uint64Slice() *[]uint64 { return p.v.Interface().(*[]uint64) } +func (p pointer) Float32() *float32 { return p.v.Interface().(*float32) } +func (p pointer) Float32Ptr() **float32 { return p.v.Interface().(**float32) } +func (p pointer) Float32Slice() *[]float32 { return p.v.Interface().(*[]float32) } +func (p pointer) Float64() *float64 { return p.v.Interface().(*float64) } +func (p pointer) Float64Ptr() **float64 { return p.v.Interface().(**float64) } +func (p pointer) Float64Slice() *[]float64 { return p.v.Interface().(*[]float64) } +func (p pointer) String() *string { return p.v.Interface().(*string) } +func (p pointer) StringPtr() **string { return p.v.Interface().(**string) } +func (p pointer) StringSlice() *[]string { return p.v.Interface().(*[]string) } +func (p pointer) Bytes() *[]byte { return p.v.Interface().(*[]byte) } +func (p pointer) BytesSlice() *[][]byte { return p.v.Interface().(*[][]byte) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.v.Interface().(*WeakFields)) } +func (p pointer) Extensions() *map[int32]ExtensionField { + return p.v.Interface().(*map[int32]ExtensionField) +} + +func (p pointer) Elem() pointer { + return pointer{v: p.v.Elem()} +} + +// PointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) PointerSlice() []pointer { + // TODO: reconsider this + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + sp := p.v.Elem() + sp.Set(reflect.Append(sp, v.v)) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + p.v.Elem().Set(v.v) +} + +func (Export) MessageStateOf(p Pointer) *messageState { panic("not supported") } +func (ms *messageState) pointer() pointer { panic("not supported") } +func (ms *messageState) messageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) LoadMessageInfo() *MessageInfo { panic("not supported") } +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { panic("not supported") } + +type atomicNilMessage struct { + once sync.Once + m messageReflectWrapper +} + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + m.once.Do(func() { + m.m.p = pointerOfIface(reflect.Zero(mi.GoReflectType).Interface()) + m.m.mi = mi + }) + return &m.m +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go new file mode 100644 index 0000000..088aa85 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -0,0 +1,173 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package impl + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const UnsafeEnabled = true + +// Pointer is an opaque pointer type. +type Pointer unsafe.Pointer + +// offset represents the offset to a struct field, accessible from a pointer. +// The offset is the byte offset to the field from the start of the struct. +type offset uintptr + +// offsetOf returns a field offset for the struct field. +func offsetOf(f reflect.StructField, x exporter) offset { + return offset(f.Offset) +} + +// IsValid reports whether the offset is valid. +func (f offset) IsValid() bool { return f != invalidOffset } + +// invalidOffset is an invalid field offset. +var invalidOffset = ^offset(0) + +// zeroOffset is a noop when calling pointer.Apply. +var zeroOffset = offset(0) + +// pointer is a pointer to a message struct or field. +type pointer struct{ p unsafe.Pointer } + +// pointerOf returns p as a pointer. +func pointerOf(p Pointer) pointer { + return pointer{p: unsafe.Pointer(p)} +} + +// pointerOfValue returns v as a pointer. +func pointerOfValue(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// pointerOfIface returns the pointer portion of an interface. +func pointerOfIface(v interface{}) pointer { + type ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } + return pointer{p: (*ifaceHeader)(unsafe.Pointer(&v)).Data} +} + +// IsNil reports whether the pointer is nil. +func (p pointer) IsNil() bool { + return p.p == nil +} + +// Apply adds an offset to the pointer to derive a new pointer +// to a specified field. The pointer must be valid and pointing at a struct. +func (p pointer) Apply(f offset) pointer { + if p.IsNil() { + panic("invalid nil pointer") + } + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +// AsValueOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to reflect.ValueOf(p.AsIfaceOf(t)) +func (p pointer) AsValueOf(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +// AsIfaceOf treats p as a pointer to an object of type t and returns the value. +// It is equivalent to p.AsValueOf(t).Interface() +func (p pointer) AsIfaceOf(t reflect.Type) interface{} { + // TODO: Use tricky unsafe magic to directly create ifaceHeader. + return p.AsValueOf(t).Interface() +} + +func (p pointer) Bool() *bool { return (*bool)(p.p) } +func (p pointer) BoolPtr() **bool { return (**bool)(p.p) } +func (p pointer) BoolSlice() *[]bool { return (*[]bool)(p.p) } +func (p pointer) Int32() *int32 { return (*int32)(p.p) } +func (p pointer) Int32Ptr() **int32 { return (**int32)(p.p) } +func (p pointer) Int32Slice() *[]int32 { return (*[]int32)(p.p) } +func (p pointer) Int64() *int64 { return (*int64)(p.p) } +func (p pointer) Int64Ptr() **int64 { return (**int64)(p.p) } +func (p pointer) Int64Slice() *[]int64 { return (*[]int64)(p.p) } +func (p pointer) Uint32() *uint32 { return (*uint32)(p.p) } +func (p pointer) Uint32Ptr() **uint32 { return (**uint32)(p.p) } +func (p pointer) Uint32Slice() *[]uint32 { return (*[]uint32)(p.p) } +func (p pointer) Uint64() *uint64 { return (*uint64)(p.p) } +func (p pointer) Uint64Ptr() **uint64 { return (**uint64)(p.p) } +func (p pointer) Uint64Slice() *[]uint64 { return (*[]uint64)(p.p) } +func (p pointer) Float32() *float32 { return (*float32)(p.p) } +func (p pointer) Float32Ptr() **float32 { return (**float32)(p.p) } +func (p pointer) Float32Slice() *[]float32 { return (*[]float32)(p.p) } +func (p pointer) Float64() *float64 { return (*float64)(p.p) } +func (p pointer) Float64Ptr() **float64 { return (**float64)(p.p) } +func (p pointer) Float64Slice() *[]float64 { return (*[]float64)(p.p) } +func (p pointer) String() *string { return (*string)(p.p) } +func (p pointer) StringPtr() **string { return (**string)(p.p) } +func (p pointer) StringSlice() *[]string { return (*[]string)(p.p) } +func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } +func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } +func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } +func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } + +func (p pointer) Elem() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// PointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) PointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// AppendPointerSlice appends v to p, which must be a []*T. +func (p pointer) AppendPointerSlice(v pointer) { + *(*[]pointer)(p.p) = append(*(*[]pointer)(p.p), v) +} + +// SetPointer sets *p to v. +func (p pointer) SetPointer(v pointer) { + *(*unsafe.Pointer)(p.p) = (unsafe.Pointer)(v.p) +} + +// Static check that MessageState does not exceed the size of a pointer. +const _ = uint(unsafe.Sizeof(unsafe.Pointer(nil)) - unsafe.Sizeof(MessageState{})) + +func (Export) MessageStateOf(p Pointer) *messageState { + // Super-tricky - see documentation on MessageState. + return (*messageState)(unsafe.Pointer(p)) +} +func (ms *messageState) pointer() pointer { + // Super-tricky - see documentation on MessageState. + return pointer{p: unsafe.Pointer(ms)} +} +func (ms *messageState) messageInfo() *MessageInfo { + mi := ms.LoadMessageInfo() + if mi == nil { + panic("invalid nil message info; this suggests memory corruption due to a race or shallow copy on the message struct") + } + return mi +} +func (ms *messageState) LoadMessageInfo() *MessageInfo { + return (*MessageInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)))) +} +func (ms *messageState) StoreMessageInfo(mi *MessageInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(&ms.atomicMessageInfo)), unsafe.Pointer(mi)) +} + +type atomicNilMessage struct{ p unsafe.Pointer } // p is a *messageReflectWrapper + +func (m *atomicNilMessage) Init(mi *MessageInfo) *messageReflectWrapper { + if p := atomic.LoadPointer(&m.p); p != nil { + return (*messageReflectWrapper)(p) + } + w := &messageReflectWrapper{mi: mi} + atomic.CompareAndSwapPointer(&m.p, nil, (unsafe.Pointer)(w)) + return (*messageReflectWrapper)(atomic.LoadPointer(&m.p)) +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go new file mode 100644 index 0000000..57de9cc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -0,0 +1,575 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + "math" + "math/bits" + "reflect" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/strs" + pref "google.golang.org/protobuf/reflect/protoreflect" + preg "google.golang.org/protobuf/reflect/protoregistry" + piface "google.golang.org/protobuf/runtime/protoiface" +) + +// ValidationStatus is the result of validating the wire-format encoding of a message. +type ValidationStatus int + +const ( + // ValidationUnknown indicates that unmarshaling the message might succeed or fail. + // The validator was unable to render a judgement. + // + // The only causes of this status are an aberrant message type appearing somewhere + // in the message or a failure in the extension resolver. + ValidationUnknown ValidationStatus = iota + 1 + + // ValidationInvalid indicates that unmarshaling the message will fail. + ValidationInvalid + + // ValidationValid indicates that unmarshaling the message will succeed. + ValidationValid +) + +func (v ValidationStatus) String() string { + switch v { + case ValidationUnknown: + return "ValidationUnknown" + case ValidationInvalid: + return "ValidationInvalid" + case ValidationValid: + return "ValidationValid" + default: + return fmt.Sprintf("ValidationStatus(%d)", int(v)) + } +} + +// Validate determines whether the contents of the buffer are a valid wire encoding +// of the message type. +// +// This function is exposed for testing. +func Validate(mt pref.MessageType, in piface.UnmarshalInput) (out piface.UnmarshalOutput, _ ValidationStatus) { + mi, ok := mt.(*MessageInfo) + if !ok { + return out, ValidationUnknown + } + if in.Resolver == nil { + in.Resolver = preg.GlobalTypes + } + o, st := mi.validate(in.Buf, 0, unmarshalOptions{ + flags: in.Flags, + resolver: in.Resolver, + }) + if o.initialized { + out.Flags |= piface.UnmarshalInitialized + } + return out, st +} + +type validationInfo struct { + mi *MessageInfo + typ validationType + keyType, valType validationType + + // For non-required fields, requiredBit is 0. + // + // For required fields, requiredBit's nth bit is set, where n is a + // unique index in the range [0, MessageInfo.numRequiredFields). + // + // If there are more than 64 required fields, requiredBit is 0. + requiredBit uint64 +} + +type validationType uint8 + +const ( + validationTypeOther validationType = iota + validationTypeMessage + validationTypeGroup + validationTypeMap + validationTypeRepeatedVarint + validationTypeRepeatedFixed32 + validationTypeRepeatedFixed64 + validationTypeVarint + validationTypeFixed32 + validationTypeFixed64 + validationTypeBytes + validationTypeUTF8String + validationTypeMessageSetItem +) + +func newFieldValidationInfo(mi *MessageInfo, si structInfo, fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ot, ok := si.oneofWrappersByNumber[fd.Number()]; ok { + vi.mi = getMessageInfo(ot.Field(0).Type) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + } + default: + vi = newValidationInfo(fd, ft) + } + if fd.Cardinality() == pref.Required { + // Avoid overflow. The required field check is done with a 64-bit mask, with + // any message containing more than 64 required fields always reported as + // potentially uninitialized, so it is not important to get a precise count + // of the required fields past 64. + if mi.numRequiredFields < math.MaxUint8 { + mi.numRequiredFields++ + vi.requiredBit = 1 << (mi.numRequiredFields - 1) + } + } + return vi +} + +func newValidationInfo(fd pref.FieldDescriptor, ft reflect.Type) validationInfo { + var vi validationInfo + switch { + case fd.IsList(): + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + if ft.Kind() == reflect.Slice { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeRepeatedVarint + case protowire.Fixed32Type: + vi.typ = validationTypeRepeatedFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeRepeatedFixed64 + } + } + case fd.IsMap(): + vi.typ = validationTypeMap + switch fd.MapKey().Kind() { + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.keyType = validationTypeUTF8String + } + } + switch fd.MapValue().Kind() { + case pref.MessageKind: + vi.valType = validationTypeMessage + if ft.Kind() == reflect.Map { + vi.mi = getMessageInfo(ft.Elem()) + } + case pref.StringKind: + if strs.EnforceUTF8(fd) { + vi.valType = validationTypeUTF8String + } + } + default: + switch fd.Kind() { + case pref.MessageKind: + vi.typ = validationTypeMessage + if !fd.IsWeak() { + vi.mi = getMessageInfo(ft) + } + case pref.GroupKind: + vi.typ = validationTypeGroup + vi.mi = getMessageInfo(ft) + case pref.StringKind: + vi.typ = validationTypeBytes + if strs.EnforceUTF8(fd) { + vi.typ = validationTypeUTF8String + } + default: + switch wireTypes[fd.Kind()] { + case protowire.VarintType: + vi.typ = validationTypeVarint + case protowire.Fixed32Type: + vi.typ = validationTypeFixed32 + case protowire.Fixed64Type: + vi.typ = validationTypeFixed64 + case protowire.BytesType: + vi.typ = validationTypeBytes + } + } + } + return vi +} + +func (mi *MessageInfo) validate(b []byte, groupTag protowire.Number, opts unmarshalOptions) (out unmarshalOutput, result ValidationStatus) { + mi.init() + type validationState struct { + typ validationType + keyType, valType validationType + endGroup protowire.Number + mi *MessageInfo + tail []byte + requiredMask uint64 + } + + // Pre-allocate some slots to avoid repeated slice reallocation. + states := make([]validationState, 0, 16) + states = append(states, validationState{ + typ: validationTypeMessage, + mi: mi, + }) + if groupTag > 0 { + states[0].typ = validationTypeGroup + states[0].endGroup = groupTag + } + initialized := true + start := len(b) +State: + for len(states) > 0 { + st := &states[len(states)-1] + for len(b) > 0 { + // Parse the tag (field number and wire type). + var tag uint64 + if b[0] < 0x80 { + tag = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + tag = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + tag, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + var num protowire.Number + if n := tag >> 3; n < uint64(protowire.MinValidNumber) || n > uint64(protowire.MaxValidNumber) { + return out, ValidationInvalid + } else { + num = protowire.Number(n) + } + wtyp := protowire.Type(tag & 7) + + if wtyp == protowire.EndGroupType { + if st.endGroup == num { + goto PopState + } + return out, ValidationInvalid + } + var vi validationInfo + switch { + case st.typ == validationTypeMap: + switch num { + case 1: + vi.typ = st.keyType + case 2: + vi.typ = st.valType + vi.mi = st.mi + vi.requiredBit = 1 + } + case flags.ProtoLegacy && st.mi.isMessageSet: + switch num { + case messageset.FieldItem: + vi.typ = validationTypeMessageSetItem + } + default: + var f *coderFieldInfo + if int(num) < len(st.mi.denseCoderFields) { + f = st.mi.denseCoderFields[num] + } else { + f = st.mi.coderFields[num] + } + if f != nil { + vi = f.validation + if vi.typ == validationTypeMessage && vi.mi == nil { + // Probable weak field. + // + // TODO: Consider storing the results of this lookup somewhere + // rather than recomputing it on every validation. + fd := st.mi.Desc.Fields().ByNumber(num) + if fd == nil || !fd.IsWeak() { + break + } + messageName := fd.Message().FullName() + messageType, err := preg.GlobalTypes.FindMessageByName(messageName) + switch err { + case nil: + vi.mi, _ = messageType.(*MessageInfo) + case preg.NotFound: + vi.typ = validationTypeBytes + default: + return out, ValidationUnknown + } + } + break + } + // Possible extension field. + // + // TODO: We should return ValidationUnknown when: + // 1. The resolver is not frozen. (More extensions may be added to it.) + // 2. The resolver returns preg.NotFound. + // In this case, a type added to the resolver in the future could cause + // unmarshaling to begin failing. Supporting this requires some way to + // determine if the resolver is frozen. + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), num) + if err != nil && err != preg.NotFound { + return out, ValidationUnknown + } + if err == nil { + vi = getExtensionFieldInfo(xt).validation + } + } + if vi.requiredBit != 0 { + // Check that the field has a compatible wire type. + // We only need to consider non-repeated field types, + // since repeated fields (and maps) can never be required. + ok := false + switch vi.typ { + case validationTypeVarint: + ok = wtyp == protowire.VarintType + case validationTypeFixed32: + ok = wtyp == protowire.Fixed32Type + case validationTypeFixed64: + ok = wtyp == protowire.Fixed64Type + case validationTypeBytes, validationTypeUTF8String, validationTypeMessage: + ok = wtyp == protowire.BytesType + case validationTypeGroup: + ok = wtyp == protowire.StartGroupType + } + if ok { + st.requiredMask |= vi.requiredBit + } + } + + switch wtyp { + case protowire.VarintType: + if len(b) >= 10 { + switch { + case b[0] < 0x80: + b = b[1:] + case b[1] < 0x80: + b = b[2:] + case b[2] < 0x80: + b = b[3:] + case b[3] < 0x80: + b = b[4:] + case b[4] < 0x80: + b = b[5:] + case b[5] < 0x80: + b = b[6:] + case b[6] < 0x80: + b = b[7:] + case b[7] < 0x80: + b = b[8:] + case b[8] < 0x80: + b = b[9:] + case b[9] < 0x80 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } else { + switch { + case len(b) > 0 && b[0] < 0x80: + b = b[1:] + case len(b) > 1 && b[1] < 0x80: + b = b[2:] + case len(b) > 2 && b[2] < 0x80: + b = b[3:] + case len(b) > 3 && b[3] < 0x80: + b = b[4:] + case len(b) > 4 && b[4] < 0x80: + b = b[5:] + case len(b) > 5 && b[5] < 0x80: + b = b[6:] + case len(b) > 6 && b[6] < 0x80: + b = b[7:] + case len(b) > 7 && b[7] < 0x80: + b = b[8:] + case len(b) > 8 && b[8] < 0x80: + b = b[9:] + case len(b) > 9 && b[9] < 2: + b = b[10:] + default: + return out, ValidationInvalid + } + } + continue State + case protowire.BytesType: + var size uint64 + if len(b) >= 1 && b[0] < 0x80 { + size = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + size = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + size, n = protowire.ConsumeVarint(b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + if size > uint64(len(b)) { + return out, ValidationInvalid + } + v := b[:size] + b = b[size:] + switch vi.typ { + case validationTypeMessage: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + fallthrough + case validationTypeMap: + if vi.mi != nil { + vi.mi.init() + } + states = append(states, validationState{ + typ: vi.typ, + keyType: vi.keyType, + valType: vi.valType, + mi: vi.mi, + tail: b, + }) + b = v + continue State + case validationTypeRepeatedVarint: + // Packed field. + for len(v) > 0 { + _, n := protowire.ConsumeVarint(v) + if n < 0 { + return out, ValidationInvalid + } + v = v[n:] + } + case validationTypeRepeatedFixed32: + // Packed field. + if len(v)%4 != 0 { + return out, ValidationInvalid + } + case validationTypeRepeatedFixed64: + // Packed field. + if len(v)%8 != 0 { + return out, ValidationInvalid + } + case validationTypeUTF8String: + if !utf8.Valid(v) { + return out, ValidationInvalid + } + } + case protowire.Fixed32Type: + if len(b) < 4 { + return out, ValidationInvalid + } + b = b[4:] + case protowire.Fixed64Type: + if len(b) < 8 { + return out, ValidationInvalid + } + b = b[8:] + case protowire.StartGroupType: + switch { + case vi.typ == validationTypeGroup: + if vi.mi == nil { + return out, ValidationUnknown + } + vi.mi.init() + states = append(states, validationState{ + typ: validationTypeGroup, + mi: vi.mi, + endGroup: num, + }) + continue State + case flags.ProtoLegacy && vi.typ == validationTypeMessageSetItem: + typeid, v, n, err := messageset.ConsumeFieldValue(b, false) + if err != nil { + return out, ValidationInvalid + } + xt, err := opts.resolver.FindExtensionByNumber(st.mi.Desc.FullName(), typeid) + switch { + case err == preg.NotFound: + b = b[n:] + case err != nil: + return out, ValidationUnknown + default: + xvi := getExtensionFieldInfo(xt).validation + if xvi.mi != nil { + xvi.mi.init() + } + states = append(states, validationState{ + typ: xvi.typ, + mi: xvi.mi, + tail: b[n:], + }) + b = v + continue State + } + default: + n := protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return out, ValidationInvalid + } + b = b[n:] + } + default: + return out, ValidationInvalid + } + } + if st.endGroup != 0 { + return out, ValidationInvalid + } + if len(b) != 0 { + return out, ValidationInvalid + } + b = st.tail + PopState: + numRequiredFields := 0 + switch st.typ { + case validationTypeMessage, validationTypeGroup: + numRequiredFields = int(st.mi.numRequiredFields) + case validationTypeMap: + // If this is a map field with a message value that contains + // required fields, require that the value be present. + if st.mi != nil && st.mi.numRequiredFields > 0 { + numRequiredFields = 1 + } + } + // If there are more than 64 required fields, this check will + // always fail and we will report that the message is potentially + // uninitialized. + if numRequiredFields > 0 && bits.OnesCount64(st.requiredMask) != numRequiredFields { + initialized = false + } + states = states[:len(states)-1] + } + out.n = start - len(b) + if initialized { + out.initialized = true + } + return out, ValidationValid +} diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go new file mode 100644 index 0000000..009cbef --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/impl/weak.go @@ -0,0 +1,74 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package impl + +import ( + "fmt" + + pref "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +// weakFields adds methods to the exported WeakFields type for internal use. +// +// The exported type is an alias to an unnamed type, so methods can't be +// defined directly on it. +type weakFields WeakFields + +func (w weakFields) get(num pref.FieldNumber) (pref.ProtoMessage, bool) { + m, ok := w[int32(num)] + return m, ok +} + +func (w *weakFields) set(num pref.FieldNumber, m pref.ProtoMessage) { + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} + +func (w *weakFields) clear(num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) HasWeak(w WeakFields, num pref.FieldNumber) bool { + _, ok := w[int32(num)] + return ok +} + +func (Export) ClearWeak(w *WeakFields, num pref.FieldNumber) { + delete(*w, int32(num)) +} + +func (Export) GetWeak(w WeakFields, num pref.FieldNumber, name pref.FullName) pref.ProtoMessage { + if m, ok := w[int32(num)]; ok { + return m + } + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + return mt.Zero().Interface() +} + +func (Export) SetWeak(w *WeakFields, num pref.FieldNumber, name pref.FullName, m pref.ProtoMessage) { + if m != nil { + mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) + if mt == nil { + panic(fmt.Sprintf("message %v for weak field is not linked in", name)) + } + if mt != m.ProtoReflect().Type() { + panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) + } + } + if m == nil || !m.ProtoReflect().IsValid() { + delete(*w, int32(num)) + return + } + if *w == nil { + *w = make(weakFields) + } + (*w)[int32(num)] = m +} diff --git a/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go new file mode 100644 index 0000000..a3de1cf --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/mapsort/mapsort.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package mapsort provides sorted access to maps. +package mapsort + +import ( + "sort" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Range iterates over every map entry in sorted key order, +// calling f for each key and value encountered. +func Range(mapv protoreflect.Map, keyKind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { + var keys []protoreflect.MapKey + mapv.Range(func(key protoreflect.MapKey, _ protoreflect.Value) bool { + keys = append(keys, key) + return true + }) + sort.Slice(keys, func(i, j int) bool { + switch keyKind { + case protoreflect.BoolKind: + return !keys[i].Bool() && keys[j].Bool() + case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, + protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind: + return keys[i].Int() < keys[j].Int() + case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, + protoreflect.Uint64Kind, protoreflect.Fixed64Kind: + return keys[i].Uint() < keys[j].Uint() + case protoreflect.StringKind: + return keys[i].String() < keys[j].String() + default: + panic("invalid kind: " + keyKind.String()) + } + }) + for _, key := range keys { + if !f(key, mapv.Get(key)) { + break + } + } +} diff --git a/vendor/google.golang.org/protobuf/internal/pragma/pragma.go b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go new file mode 100644 index 0000000..49dc4fc --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/pragma/pragma.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package pragma provides types that can be embedded into a struct to +// statically enforce or prevent certain language properties. +package pragma + +import "sync" + +// NoUnkeyedLiterals can be embedded in a struct to prevent unkeyed literals. +type NoUnkeyedLiterals struct{} + +// DoNotImplement can be embedded in an interface to prevent trivial +// implementations of the interface. +// +// This is useful to prevent unauthorized implementations of an interface +// so that it can be extended in the future for any protobuf language changes. +type DoNotImplement interface{ ProtoInternal(DoNotImplement) } + +// DoNotCompare can be embedded in a struct to prevent comparability. +type DoNotCompare [0]func() + +// DoNotCopy can be embedded in a struct to help prevent shallow copies. +// This does not rely on a Go language feature, but rather a special case +// within the vet checker. +// +// See https://golang.org/issues/8005. +type DoNotCopy [0]sync.Mutex diff --git a/vendor/google.golang.org/protobuf/internal/set/ints.go b/vendor/google.golang.org/protobuf/internal/set/ints.go new file mode 100644 index 0000000..d3d7f89 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/set/ints.go @@ -0,0 +1,58 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package set provides simple set data structures for uint64s. +package set + +import "math/bits" + +// int64s represents a set of integers within the range of 0..63. +type int64s uint64 + +func (bs *int64s) Len() int { + return bits.OnesCount64(uint64(*bs)) +} +func (bs *int64s) Has(n uint64) bool { + return uint64(*bs)&(uint64(1)< 0 +} +func (bs *int64s) Set(n uint64) { + *(*uint64)(bs) |= uint64(1) << n +} +func (bs *int64s) Clear(n uint64) { + *(*uint64)(bs) &^= uint64(1) << n +} + +// Ints represents a set of integers within the range of 0..math.MaxUint64. +type Ints struct { + lo int64s + hi map[uint64]struct{} +} + +func (bs *Ints) Len() int { + return bs.lo.Len() + len(bs.hi) +} +func (bs *Ints) Has(n uint64) bool { + if n < 64 { + return bs.lo.Has(n) + } + _, ok := bs.hi[n] + return ok +} +func (bs *Ints) Set(n uint64) { + if n < 64 { + bs.lo.Set(n) + return + } + if bs.hi == nil { + bs.hi = make(map[uint64]struct{}) + } + bs.hi[n] = struct{}{} +} +func (bs *Ints) Clear(n uint64) { + if n < 64 { + bs.lo.Clear(n) + return + } + delete(bs.hi, n) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings.go b/vendor/google.golang.org/protobuf/internal/strs/strings.go new file mode 100644 index 0000000..0b74e76 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings.go @@ -0,0 +1,196 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package strs provides string manipulation functionality specific to protobuf. +package strs + +import ( + "go/token" + "strings" + "unicode" + "unicode/utf8" + + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// EnforceUTF8 reports whether to enforce strict UTF-8 validation. +func EnforceUTF8(fd protoreflect.FieldDescriptor) bool { + if flags.ProtoLegacy { + if fd, ok := fd.(interface{ EnforceUTF8() bool }); ok { + return fd.EnforceUTF8() + } + } + return fd.Syntax() == protoreflect.Proto3 +} + +// GoCamelCase camel-cases a protobuf name for use as a Go identifier. +// +// If there is an interior underscore followed by a lower case letter, +// drop the underscore and convert the letter to upper case. +func GoCamelCase(s string) string { + // Invariant: if the next letter is lower case, it must be converted + // to upper case. + // That is, we process a word at a time, where words are marked by _ or + // upper case letter. Digits are treated as words. + var b []byte + for i := 0; i < len(s); i++ { + c := s[i] + switch { + case c == '.' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '.' in ".{{lowercase}}". + case c == '.': + b = append(b, '_') // convert '.' to '_' + case c == '_' && (i == 0 || s[i-1] == '.'): + // Convert initial '_' to ensure we start with a capital letter. + // Do the same for '_' after '.' to match historic behavior. + b = append(b, 'X') // convert '_' to 'X' + case c == '_' && i+1 < len(s) && isASCIILower(s[i+1]): + // Skip over '_' in "_{{lowercase}}". + case isASCIIDigit(c): + b = append(b, c) + default: + // Assume we have a letter now - if not, it's a bogus identifier. + // The next word is a sequence of characters that must start upper case. + if isASCIILower(c) { + c -= 'a' - 'A' // convert lowercase to uppercase + } + b = append(b, c) + + // Accept lower case sequence that follows. + for ; i+1 < len(s) && isASCIILower(s[i+1]); i++ { + b = append(b, s[i+1]) + } + } + } + return string(b) +} + +// GoSanitized converts a string to a valid Go identifier. +func GoSanitized(s string) string { + // Sanitize the input to the set of valid characters, + // which must be '_' or be in the Unicode L or N categories. + s = strings.Map(func(r rune) rune { + if unicode.IsLetter(r) || unicode.IsDigit(r) { + return r + } + return '_' + }, s) + + // Prepend '_' in the event of a Go keyword conflict or if + // the identifier is invalid (does not start in the Unicode L category). + r, _ := utf8.DecodeRuneInString(s) + if token.Lookup(s).IsKeyword() || !unicode.IsLetter(r) { + return "_" + s + } + return s +} + +// JSONCamelCase converts a snake_case identifier to a camelCase identifier, +// according to the protobuf JSON specification. +func JSONCamelCase(s string) string { + var b []byte + var wasUnderscore bool + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if c != '_' { + if wasUnderscore && isASCIILower(c) { + c -= 'a' - 'A' // convert to uppercase + } + b = append(b, c) + } + wasUnderscore = c == '_' + } + return string(b) +} + +// JSONSnakeCase converts a camelCase identifier to a snake_case identifier, +// according to the protobuf JSON specification. +func JSONSnakeCase(s string) string { + var b []byte + for i := 0; i < len(s); i++ { // proto identifiers are always ASCII + c := s[i] + if isASCIIUpper(c) { + b = append(b, '_') + c += 'a' - 'A' // convert to lowercase + } + b = append(b, c) + } + return string(b) +} + +// MapEntryName derives the name of the map entry message given the field name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:254-276,6057 +func MapEntryName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(c)) + } + } + b = append(b, "Entry"...) + return string(b) +} + +// EnumValueName derives the camel-cased enum value name. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:297-313 +func EnumValueName(s string) string { + var b []byte + upperNext := true + for _, c := range s { + switch { + case c == '_': + upperNext = true + case upperNext: + b = append(b, byte(unicode.ToUpper(c))) + upperNext = false + default: + b = append(b, byte(unicode.ToLower(c))) + upperNext = false + } + } + return string(b) +} + +// TrimEnumPrefix trims the enum name prefix from an enum value name, +// where the prefix is all lowercase without underscores. +// See protoc v3.8.0: src/google/protobuf/descriptor.cc:330-375 +func TrimEnumPrefix(s, prefix string) string { + s0 := s // original input + for len(s) > 0 && len(prefix) > 0 { + if s[0] == '_' { + s = s[1:] + continue + } + if unicode.ToLower(rune(s[0])) != rune(prefix[0]) { + return s0 // no prefix match + } + s, prefix = s[1:], prefix[1:] + } + if len(prefix) > 0 { + return s0 // no prefix match + } + s = strings.TrimLeft(s, "_") + if len(s) == 0 { + return s0 // avoid returning empty string + } + return s +} + +func isASCIILower(c byte) bool { + return 'a' <= c && c <= 'z' +} +func isASCIIUpper(c byte) bool { + return 'A' <= c && c <= 'Z' +} +func isASCIIDigit(c byte) bool { + return '0' <= c && c <= '9' +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go new file mode 100644 index 0000000..85e074c --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_pure.go @@ -0,0 +1,27 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package strs + +import pref "google.golang.org/protobuf/reflect/protoreflect" + +func UnsafeString(b []byte) string { + return string(b) +} + +func UnsafeBytes(s string) []byte { + return []byte(s) +} + +type Builder struct{} + +func (*Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + return prefix.Append(name) +} + +func (*Builder) MakeString(b []byte) string { + return string(b) +} diff --git a/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go new file mode 100644 index 0000000..2160c70 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/strs/strings_unsafe.go @@ -0,0 +1,94 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package strs + +import ( + "unsafe" + + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } +) + +// UnsafeString returns an unsafe string reference of b. +// The caller must treat the input slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user +// unless the input slice is provably immutable. +func UnsafeString(b []byte) (s string) { + src := (*sliceHeader)(unsafe.Pointer(&b)) + dst := (*stringHeader)(unsafe.Pointer(&s)) + dst.Data = src.Data + dst.Len = src.Len + return s +} + +// UnsafeBytes returns an unsafe bytes slice reference of s. +// The caller must treat returned slice as immutable. +// +// WARNING: Use carefully. The returned result must not leak to the end user. +func UnsafeBytes(s string) (b []byte) { + src := (*stringHeader)(unsafe.Pointer(&s)) + dst := (*sliceHeader)(unsafe.Pointer(&b)) + dst.Data = src.Data + dst.Len = src.Len + dst.Cap = src.Len + return b +} + +// Builder builds a set of strings with shared lifetime. +// This differs from strings.Builder, which is for building a single string. +type Builder struct { + buf []byte +} + +// AppendFullName is equivalent to protoreflect.FullName.Append, +// but optimized for large batches where each name has a shared lifetime. +func (sb *Builder) AppendFullName(prefix pref.FullName, name pref.Name) pref.FullName { + n := len(prefix) + len(".") + len(name) + if len(prefix) == 0 { + n -= len(".") + } + sb.grow(n) + sb.buf = append(sb.buf, prefix...) + sb.buf = append(sb.buf, '.') + sb.buf = append(sb.buf, name...) + return pref.FullName(sb.last(n)) +} + +// MakeString is equivalent to string(b), but optimized for large batches +// with a shared lifetime. +func (sb *Builder) MakeString(b []byte) string { + sb.grow(len(b)) + sb.buf = append(sb.buf, b...) + return sb.last(len(b)) +} + +func (sb *Builder) grow(n int) { + if cap(sb.buf)-len(sb.buf) >= n { + return + } + + // Unlike strings.Builder, we do not need to copy over the contents + // of the old buffer since our builder provides no API for + // retrieving previously created strings. + sb.buf = make([]byte, 2*(cap(sb.buf)+n)) +} + +func (sb *Builder) last(n int) string { + return UnsafeString(sb.buf[len(sb.buf)-n:]) +} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go new file mode 100644 index 0000000..4088e59 --- /dev/null +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -0,0 +1,79 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package version records versioning information about this module. +package version + +import ( + "fmt" + "strings" +) + +// These constants determine the current version of this module. +// +// +// For our release process, we enforce the following rules: +// * Tagged releases use a tag that is identical to String. +// * Tagged releases never reference a commit where the String +// contains "devel". +// * The set of all commits in this repository where String +// does not contain "devel" must have a unique String. +// +// +// Steps for tagging a new release: +// 1. Create a new CL. +// +// 2. Update Minor, Patch, and/or PreRelease as necessary. +// PreRelease must not contain the string "devel". +// +// 3. Since the last released minor version, have there been any changes to +// generator that relies on new functionality in the runtime? +// If yes, then increment RequiredGenerated. +// +// 4. Since the last released minor version, have there been any changes to +// the runtime that removes support for old .pb.go source code? +// If yes, then increment SupportMinimum. +// +// 5. Send out the CL for review and submit it. +// Note that the next CL in step 8 must be submitted after this CL +// without any other CLs in-between. +// +// 6. Tag a new version, where the tag is is the current String. +// +// 7. Write release notes for all notable changes +// between this release and the last release. +// +// 8. Create a new CL. +// +// 9. Update PreRelease to include the string "devel". +// For example: "" -> "devel" or "rc.1" -> "rc.1.devel" +// +// 10. Send out the CL for review and submit it. +const ( + Major = 1 + Minor = 23 + Patch = 0 + PreRelease = "" +) + +// String formats the version string for this module in semver format. +// +// Examples: +// v1.20.1 +// v1.21.0-rc.1 +func String() string { + v := fmt.Sprintf("v%d.%d.%d", Major, Minor, Patch) + if PreRelease != "" { + v += "-" + PreRelease + + // TODO: Add metadata about the commit or build hash. + // See https://golang.org/issue/29814 + // See https://golang.org/issue/33533 + var metadata string + if strings.Contains(PreRelease, "devel") && metadata != "" { + v += "+" + metadata + } + } + return v +} diff --git a/vendor/google.golang.org/protobuf/proto/checkinit.go b/vendor/google.golang.org/protobuf/proto/checkinit.go new file mode 100644 index 0000000..3e9a6a2 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/checkinit.go @@ -0,0 +1,71 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// CheckInitialized returns an error if any required fields in m are not set. +func CheckInitialized(m Message) error { + // Treat a nil message interface as an "untyped" empty message, + // which we assume to have no required fields. + if m == nil { + return nil + } + + return checkInitialized(m.ProtoReflect()) +} + +// CheckInitialized returns an error if any required fields in m are not set. +func checkInitialized(m protoreflect.Message) error { + if methods := protoMethods(m); methods != nil && methods.CheckInitialized != nil { + _, err := methods.CheckInitialized(protoiface.CheckInitializedInput{ + Message: m, + }) + return err + } + return checkInitializedSlow(m) +} + +func checkInitializedSlow(m protoreflect.Message) error { + md := m.Descriptor() + fds := md.Fields() + for i, nums := 0, md.RequiredNumbers(); i < nums.Len(); i++ { + fd := fds.ByNumber(nums.Get(i)) + if !m.Has(fd) { + return errors.RequiredNotSet(string(fd.FullName())) + } + } + var err error + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + if fd.Message() == nil { + return true + } + for i, list := 0, v.List(); i < list.Len() && err == nil; i++ { + err = checkInitialized(list.Get(i).Message()) + } + case fd.IsMap(): + if fd.MapValue().Message() == nil { + return true + } + v.Map().Range(func(key protoreflect.MapKey, v protoreflect.Value) bool { + err = checkInitialized(v.Message()) + return err == nil + }) + default: + if fd.Message() == nil { + return true + } + err = checkInitialized(v.Message()) + } + return err == nil + }) + return err +} diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go new file mode 100644 index 0000000..1282147 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -0,0 +1,270 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" + "google.golang.org/protobuf/runtime/protoiface" +) + +// UnmarshalOptions configures the unmarshaler. +// +// Example usage: +// err := UnmarshalOptions{DiscardUnknown: true}.Unmarshal(b, m) +type UnmarshalOptions struct { + pragma.NoUnkeyedLiterals + + // Merge merges the input into the destination message. + // The default behavior is to always reset the message before unmarshaling, + // unless Merge is specified. + Merge bool + + // AllowPartial accepts input for messages that will result in missing + // required fields. If AllowPartial is false (the default), Unmarshal will + // return an error if there are any missing required fields. + AllowPartial bool + + // If DiscardUnknown is set, unknown fields are ignored. + DiscardUnknown bool + + // Resolver is used for looking up types when unmarshaling extension fields. + // If nil, this defaults to using protoregistry.GlobalTypes. + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// Unmarshal parses the wire-format message in b and places the result in m. +func Unmarshal(b []byte, m Message) error { + _, err := UnmarshalOptions{}.unmarshal(b, m.ProtoReflect()) + return err +} + +// Unmarshal parses the wire-format message in b and places the result in m. +func (o UnmarshalOptions) Unmarshal(b []byte, m Message) error { + _, err := o.unmarshal(b, m.ProtoReflect()) + return err +} + +// UnmarshalState parses a wire-format message and places the result in m. +// +// This method permits fine-grained control over the unmarshaler. +// Most users should use Unmarshal instead. +func (o UnmarshalOptions) UnmarshalState(in protoiface.UnmarshalInput) (protoiface.UnmarshalOutput, error) { + return o.unmarshal(in.Buf, in.Message) +} + +func (o UnmarshalOptions) unmarshal(b []byte, m protoreflect.Message) (out protoiface.UnmarshalOutput, err error) { + if o.Resolver == nil { + o.Resolver = protoregistry.GlobalTypes + } + if !o.Merge { + Reset(m.Interface()) // TODO + } + allowPartial := o.AllowPartial + o.Merge = true + o.AllowPartial = true + methods := protoMethods(m) + if methods != nil && methods.Unmarshal != nil && + !(o.DiscardUnknown && methods.Flags&protoiface.SupportUnmarshalDiscardUnknown == 0) { + in := protoiface.UnmarshalInput{ + Message: m, + Buf: b, + Resolver: o.Resolver, + } + if o.DiscardUnknown { + in.Flags |= protoiface.UnmarshalDiscardUnknown + } + out, err = methods.Unmarshal(in) + } else { + err = o.unmarshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial || (out.Flags&protoiface.UnmarshalInitialized != 0) { + return out, nil + } + return out, checkInitialized(m) +} + +func (o UnmarshalOptions) unmarshalMessage(b []byte, m protoreflect.Message) error { + _, err := o.unmarshal(b, m) + return err +} + +func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) error { + md := m.Descriptor() + if messageset.IsMessageSet(md) { + return unmarshalMessageSet(b, m, o) + } + fields := md.Fields() + for len(b) > 0 { + // Parse the tag (field number and wire type). + num, wtyp, tagLen := protowire.ConsumeTag(b) + if tagLen < 0 { + return protowire.ParseError(tagLen) + } + if num > protowire.MaxValidNumber { + return errors.New("invalid field number") + } + + // Find the field descriptor for this field number. + fd := fields.ByNumber(num) + if fd == nil && md.ExtensionRanges().Has(num) { + extType, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err != nil && err != protoregistry.NotFound { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + if extType != nil { + fd = extType.TypeDescriptor() + } + } + var err error + if fd == nil { + err = errUnknown + } else if flags.ProtoLegacy { + if fd.IsWeak() && fd.Message().IsPlaceholder() { + err = errUnknown // weak referent is not linked in + } + } + + // Parse the field value. + var valLen int + switch { + case err != nil: + case fd.IsList(): + valLen, err = o.unmarshalList(b[tagLen:], wtyp, m.Mutable(fd).List(), fd) + case fd.IsMap(): + valLen, err = o.unmarshalMap(b[tagLen:], wtyp, m.Mutable(fd).Map(), fd) + default: + valLen, err = o.unmarshalSingular(b[tagLen:], wtyp, m, fd) + } + if err != nil { + if err != errUnknown { + return err + } + valLen = protowire.ConsumeFieldValue(num, wtyp, b[tagLen:]) + if valLen < 0 { + return protowire.ParseError(valLen) + } + if !o.DiscardUnknown { + m.SetUnknown(append(m.GetUnknown(), b[:tagLen+valLen]...)) + } + } + b = b[tagLen+valLen:] + } + return nil +} + +func (o UnmarshalOptions) unmarshalSingular(b []byte, wtyp protowire.Type, m protoreflect.Message, fd protoreflect.FieldDescriptor) (n int, err error) { + v, n, err := o.unmarshalScalar(b, wtyp, fd) + if err != nil { + return 0, err + } + switch fd.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + m2 := m.Mutable(fd).Message() + if err := o.unmarshalMessage(v.Bytes(), m2); err != nil { + return n, err + } + default: + // Non-message scalars replace the previous value. + m.Set(fd, v) + } + return n, nil +} + +func (o UnmarshalOptions) unmarshalMap(b []byte, wtyp protowire.Type, mapv protoreflect.Map, fd protoreflect.FieldDescriptor) (n int, err error) { + if wtyp != protowire.BytesType { + return 0, errUnknown + } + b, n = protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + var ( + keyField = fd.MapKey() + valField = fd.MapValue() + key protoreflect.Value + val protoreflect.Value + haveKey bool + haveVal bool + ) + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + val = mapv.NewValue() + } + // Map entries are represented as a two-element message with fields + // containing the key and value. + for len(b) > 0 { + num, wtyp, n := protowire.ConsumeTag(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + if num > protowire.MaxValidNumber { + return 0, errors.New("invalid field number") + } + b = b[n:] + err = errUnknown + switch num { + case 1: + key, n, err = o.unmarshalScalar(b, wtyp, keyField) + if err != nil { + break + } + haveKey = true + case 2: + var v protoreflect.Value + v, n, err = o.unmarshalScalar(b, wtyp, valField) + if err != nil { + break + } + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + if err := o.unmarshalMessage(v.Bytes(), val.Message()); err != nil { + return 0, err + } + default: + val = v + } + haveVal = true + } + if err == errUnknown { + n = protowire.ConsumeFieldValue(num, wtyp, b) + if n < 0 { + return 0, protowire.ParseError(n) + } + } else if err != nil { + return 0, err + } + b = b[n:] + } + // Every map entry should have entries for key and value, but this is not strictly required. + if !haveKey { + key = keyField.Default() + } + if !haveVal { + switch valField.Kind() { + case protoreflect.GroupKind, protoreflect.MessageKind: + default: + val = valField.Default() + } + } + mapv.Set(key.MapKey(), val) + return n, nil +} + +// errUnknown is used internally to indicate fields which should be added +// to the unknown field set of a message. It is never returned from an exported +// function. +var errUnknown = errors.New("BUG: internal error (unknown)") diff --git a/vendor/google.golang.org/protobuf/proto/decode_gen.go b/vendor/google.golang.org/protobuf/proto/decode_gen.go new file mode 100644 index 0000000..d6dc904 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/decode_gen.go @@ -0,0 +1,603 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// unmarshalScalar decodes a value of the given kind. +// +// Message values are decoded into a []byte which aliases the input data. +func (o UnmarshalOptions) unmarshalScalar(b []byte, wtyp protowire.Type, fd protoreflect.FieldDescriptor) (val protoreflect.Value, n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBool(protowire.DecodeBool(v)), n, nil + case protoreflect.EnumKind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfEnum(protoreflect.EnumNumber(v)), n, nil + case protoreflect.Int32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Sint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32))), n, nil + case protoreflect.Uint32Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.Int64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Sint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(protowire.DecodeZigZag(v)), n, nil + case protoreflect.Uint64Kind: + if wtyp != protowire.VarintType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.Sfixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt32(int32(v)), n, nil + case protoreflect.Fixed32Kind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint32(uint32(v)), n, nil + case protoreflect.FloatKind: + if wtyp != protowire.Fixed32Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v))), n, nil + case protoreflect.Sfixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfInt64(int64(v)), n, nil + case protoreflect.Fixed64Kind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfUint64(v), n, nil + case protoreflect.DoubleKind: + if wtyp != protowire.Fixed64Type { + return val, 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfFloat64(math.Float64frombits(v)), n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return protoreflect.Value{}, 0, errors.InvalidUTF8(string(fd.FullName())) + } + return protoreflect.ValueOfString(string(v)), n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(append(emptyBuf[:], v...)), n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(v), n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return val, 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return val, 0, protowire.ParseError(n) + } + return protoreflect.ValueOfBytes(v), n, nil + default: + return val, 0, errUnknown + } +} + +func (o UnmarshalOptions) unmarshalList(b []byte, wtyp protowire.Type, list protoreflect.List, fd protoreflect.FieldDescriptor) (n int, err error) { + switch fd.Kind() { + case protoreflect.BoolKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBool(protowire.DecodeBool(v))) + return n, nil + case protoreflect.EnumKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfEnum(protoreflect.EnumNumber(v))) + return n, nil + case protoreflect.Int32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Sint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(protowire.DecodeZigZag(v & math.MaxUint32)))) + return n, nil + case protoreflect.Uint32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.Int64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Sint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(protowire.DecodeZigZag(v))) + return n, nil + case protoreflect.Uint64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeVarint(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.VarintType { + return 0, errUnknown + } + v, n := protowire.ConsumeVarint(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.Sfixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt32(int32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt32(int32(v))) + return n, nil + case protoreflect.Fixed32Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint32(uint32(v))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint32(uint32(v))) + return n, nil + case protoreflect.FloatKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed32(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + } + return n, nil + } + if wtyp != protowire.Fixed32Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed32(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat32(math.Float32frombits(uint32(v)))) + return n, nil + case protoreflect.Sfixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfInt64(int64(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfInt64(int64(v))) + return n, nil + case protoreflect.Fixed64Kind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfUint64(v)) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfUint64(v)) + return n, nil + case protoreflect.DoubleKind: + if wtyp == protowire.BytesType { + buf, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + for len(buf) > 0 { + v, n := protowire.ConsumeFixed64(buf) + if n < 0 { + return 0, protowire.ParseError(n) + } + buf = buf[n:] + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + } + return n, nil + } + if wtyp != protowire.Fixed64Type { + return 0, errUnknown + } + v, n := protowire.ConsumeFixed64(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfFloat64(math.Float64frombits(v))) + return n, nil + case protoreflect.StringKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + if strs.EnforceUTF8(fd) && !utf8.Valid(v) { + return 0, errors.InvalidUTF8(string(fd.FullName())) + } + list.Append(protoreflect.ValueOfString(string(v))) + return n, nil + case protoreflect.BytesKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + list.Append(protoreflect.ValueOfBytes(append(emptyBuf[:], v...))) + return n, nil + case protoreflect.MessageKind: + if wtyp != protowire.BytesType { + return 0, errUnknown + } + v, n := protowire.ConsumeBytes(b) + if n < 0 { + return 0, protowire.ParseError(n) + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + case protoreflect.GroupKind: + if wtyp != protowire.StartGroupType { + return 0, errUnknown + } + v, n := protowire.ConsumeGroup(fd.Number(), b) + if n < 0 { + return 0, protowire.ParseError(n) + } + m := list.NewElement() + if err := o.unmarshalMessage(v, m.Message()); err != nil { + return 0, err + } + list.Append(m) + return n, nil + default: + return 0, errUnknown + } +} + +// We append to an empty array rather than a nil []byte to get non-nil zero-length byte slices. +var emptyBuf [0]byte diff --git a/vendor/google.golang.org/protobuf/proto/doc.go b/vendor/google.golang.org/protobuf/proto/doc.go new file mode 100644 index 0000000..c52d8c4 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/doc.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proto provides functions operating on protocol buffer messages. +// +// For documentation on protocol buffers in general, see: +// +// https://developers.google.com/protocol-buffers +// +// For a tutorial on using protocol buffers with Go, see: +// +// https://developers.google.com/protocol-buffers/docs/gotutorial +// +// For a guide to generated Go protocol buffer code, see: +// +// https://developers.google.com/protocol-buffers/docs/reference/go-generated +// +// +// Binary serialization +// +// This package contains functions to convert to and from the wire format, +// an efficient binary serialization of protocol buffers. +// +// • Size reports the size of a message in the wire format. +// +// • Marshal converts a message to the wire format. +// The MarshalOptions type provides more control over wire marshaling. +// +// • Unmarshal converts a message from the wire format. +// The UnmarshalOptions type provides more control over wire unmarshaling. +// +// +// Basic message operations +// +// • Clone makes a deep copy of a message. +// +// • Merge merges the content of a message into another. +// +// • Equal compares two messages. For more control over comparisons +// and detailed reporting of differences, see package +// "google.golang.org/protobuf/testing/protocmp". +// +// • Reset clears the content of a message. +// +// • CheckInitialized reports whether all required fields in a message are set. +// +// +// Optional scalar constructors +// +// The API for some generated messages represents optional scalar fields +// as pointers to a value. For example, an optional string field has the +// Go type *string. +// +// • Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, and String +// take a value and return a pointer to a new instance of it, +// to simplify construction of optional field values. +// +// Generated enum types usually have an Enum method which performs the +// same operation. +// +// Optional scalar fields are only supported in proto2. +// +// +// Extension accessors +// +// • HasExtension, GetExtension, SetExtension, and ClearExtension +// access extension field values in a protocol buffer message. +// +// Extension fields are only supported in proto2. +// +// +// Related packages +// +// • Package "google.golang.org/protobuf/encoding/protojson" converts messages to +// and from JSON. +// +// • Package "google.golang.org/protobuf/encoding/prototext" converts messages to +// and from the text format. +// +// • Package "google.golang.org/protobuf/reflect/protoreflect" provides a +// reflection interface for protocol buffer data types. +// +// • Package "google.golang.org/protobuf/testing/protocmp" provides features +// to compare protocol buffer messages with the "github.com/google/go-cmp/cmp" +// package. +// +// • Package "google.golang.org/protobuf/types/dynamicpb" provides a dynamic +// message type, suitable for working with messages where the protocol buffer +// type is only known at runtime. +// +// This module contains additional packages for more specialized use cases. +// Consult the individual package documentation for details. +package proto diff --git a/vendor/google.golang.org/protobuf/proto/encode.go b/vendor/google.golang.org/protobuf/proto/encode.go new file mode 100644 index 0000000..456bfda --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode.go @@ -0,0 +1,343 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "sort" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/fieldsort" + "google.golang.org/protobuf/internal/mapsort" + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// MarshalOptions configures the marshaler. +// +// Example usage: +// b, err := MarshalOptions{Deterministic: true}.Marshal(m) +type MarshalOptions struct { + pragma.NoUnkeyedLiterals + + // AllowPartial allows messages that have missing required fields to marshal + // without returning an error. If AllowPartial is false (the default), + // Marshal will return an error if there are any missing required fields. + AllowPartial bool + + // Deterministic controls whether the same message will always be + // serialized to the same bytes within the same binary. + // + // Setting this option guarantees that repeated serialization of + // the same message will return the same bytes, and that different + // processes of the same binary (which may be executing on different + // machines) will serialize equal messages to the same bytes. + // It has no effect on the resulting size of the encoded message compared + // to a non-deterministic marshal. + // + // Note that the deterministic serialization is NOT canonical across + // languages. It is not guaranteed to remain stable over time. It is + // unstable across different builds with schema changes due to unknown + // fields. Users who need canonical serialization (e.g., persistent + // storage in a canonical form, fingerprinting, etc.) must define + // their own canonicalization specification and implement their own + // serializer rather than relying on this API. + // + // If deterministic serialization is requested, map entries will be + // sorted by keys in lexographical order. This is an implementation + // detail and subject to change. + Deterministic bool + + // UseCachedSize indicates that the result of a previous Size call + // may be reused. + // + // Setting this option asserts that: + // + // 1. Size has previously been called on this message with identical + // options (except for UseCachedSize itself). + // + // 2. The message and all its submessages have not changed in any + // way since the Size call. + // + // If either of these invariants is violated, + // the results are undefined and may include panics or corrupted output. + // + // Implementations MAY take this option into account to provide + // better performance, but there is no guarantee that they will do so. + // There is absolutely no guarantee that Size followed by Marshal with + // UseCachedSize set will perform equivalently to Marshal alone. + UseCachedSize bool +} + +// Marshal returns the wire-format encoding of m. +func Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := MarshalOptions{}.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// Marshal returns the wire-format encoding of m. +func (o MarshalOptions) Marshal(m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to output. + if m == nil { + return nil, nil + } + + out, err := o.marshal(nil, m.ProtoReflect()) + if len(out.Buf) == 0 && err == nil { + out.Buf = emptyBytesForMessage(m) + } + return out.Buf, err +} + +// emptyBytesForMessage returns a nil buffer if and only if m is invalid, +// otherwise it returns a non-nil empty buffer. +// +// This is to assist the edge-case where user-code does the following: +// m1.OptionalBytes, _ = proto.Marshal(m2) +// where they expect the proto2 "optional_bytes" field to be populated +// if any only if m2 is a valid message. +func emptyBytesForMessage(m Message) []byte { + if m == nil || !m.ProtoReflect().IsValid() { + return nil + } + return emptyBuf[:] +} + +// MarshalAppend appends the wire-format encoding of m to b, +// returning the result. +func (o MarshalOptions) MarshalAppend(b []byte, m Message) ([]byte, error) { + // Treat nil message interface as an empty message; nothing to append. + if m == nil { + return b, nil + } + + out, err := o.marshal(b, m.ProtoReflect()) + return out.Buf, err +} + +// MarshalState returns the wire-format encoding of a message. +// +// This method permits fine-grained control over the marshaler. +// Most users should use Marshal instead. +func (o MarshalOptions) MarshalState(in protoiface.MarshalInput) (protoiface.MarshalOutput, error) { + return o.marshal(in.Buf, in.Message) +} + +func (o MarshalOptions) marshal(b []byte, m protoreflect.Message) (out protoiface.MarshalOutput, err error) { + allowPartial := o.AllowPartial + o.AllowPartial = true + if methods := protoMethods(m); methods != nil && methods.Marshal != nil && + !(o.Deterministic && methods.Flags&protoiface.SupportMarshalDeterministic == 0) { + in := protoiface.MarshalInput{ + Message: m, + Buf: b, + } + if o.Deterministic { + in.Flags |= protoiface.MarshalDeterministic + } + if o.UseCachedSize { + in.Flags |= protoiface.MarshalUseCachedSize + } + if methods.Size != nil { + sout := methods.Size(protoiface.SizeInput{ + Message: m, + Flags: in.Flags, + }) + if cap(b) < len(b)+sout.Size { + in.Buf = make([]byte, len(b), growcap(cap(b), len(b)+sout.Size)) + copy(in.Buf, b) + } + in.Flags |= protoiface.MarshalUseCachedSize + } + out, err = methods.Marshal(in) + } else { + out.Buf, err = o.marshalMessageSlow(b, m) + } + if err != nil { + return out, err + } + if allowPartial { + return out, nil + } + return out, checkInitialized(m) +} + +func (o MarshalOptions) marshalMessage(b []byte, m protoreflect.Message) ([]byte, error) { + out, err := o.marshal(b, m) + return out.Buf, err +} + +// growcap scales up the capacity of a slice. +// +// Given a slice with a current capacity of oldcap and a desired +// capacity of wantcap, growcap returns a new capacity >= wantcap. +// +// The algorithm is mostly identical to the one used by append as of Go 1.14. +func growcap(oldcap, wantcap int) (newcap int) { + if wantcap > oldcap*2 { + newcap = wantcap + } else if oldcap < 1024 { + // The Go 1.14 runtime takes this case when len(s) < 1024, + // not when cap(s) < 1024. The difference doesn't seem + // significant here. + newcap = oldcap * 2 + } else { + newcap = oldcap + for 0 < newcap && newcap < wantcap { + newcap += newcap / 4 + } + if newcap <= 0 { + newcap = wantcap + } + } + return newcap +} + +func (o MarshalOptions) marshalMessageSlow(b []byte, m protoreflect.Message) ([]byte, error) { + if messageset.IsMessageSet(m.Descriptor()) { + return marshalMessageSet(b, m, o) + } + // There are many choices for what order we visit fields in. The default one here + // is chosen for reasonable efficiency and simplicity given the protoreflect API. + // It is not deterministic, since Message.Range does not return fields in any + // defined order. + // + // When using deterministic serialization, we sort the known fields. + var err error + o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = o.marshalField(b, fd, v) + return err == nil + }) + if err != nil { + return b, err + } + b = append(b, m.GetUnknown()...) + return b, nil +} + +// rangeFields visits fields in a defined order when deterministic serialization is enabled. +func (o MarshalOptions) rangeFields(m protoreflect.Message, f func(protoreflect.FieldDescriptor, protoreflect.Value) bool) { + if !o.Deterministic { + m.Range(f) + return + } + var fds []protoreflect.FieldDescriptor + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + fds = append(fds, fd) + return true + }) + sort.Slice(fds, func(a, b int) bool { + return fieldsort.Less(fds[a], fds[b]) + }) + for _, fd := range fds { + if !f(fd, m.Get(fd)) { + break + } + } +} + +func (o MarshalOptions) marshalField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value) ([]byte, error) { + switch { + case fd.IsList(): + return o.marshalList(b, fd, value.List()) + case fd.IsMap(): + return o.marshalMap(b, fd, value.Map()) + default: + b = protowire.AppendTag(b, fd.Number(), wireTypes[fd.Kind()]) + return o.marshalSingular(b, fd, value) + } +} + +func (o MarshalOptions) marshalList(b []byte, fd protoreflect.FieldDescriptor, list protoreflect.List) ([]byte, error) { + if fd.IsPacked() && list.Len() > 0 { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + b, pos := appendSpeculativeLength(b) + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + b = finishSpeculativeLength(b, pos) + return b, nil + } + + kind := fd.Kind() + for i, llen := 0, list.Len(); i < llen; i++ { + var err error + b = protowire.AppendTag(b, fd.Number(), wireTypes[kind]) + b, err = o.marshalSingular(b, fd, list.Get(i)) + if err != nil { + return b, err + } + } + return b, nil +} + +func (o MarshalOptions) marshalMap(b []byte, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) ([]byte, error) { + keyf := fd.MapKey() + valf := fd.MapValue() + var err error + o.rangeMap(mapv, keyf.Kind(), func(key protoreflect.MapKey, value protoreflect.Value) bool { + b = protowire.AppendTag(b, fd.Number(), protowire.BytesType) + var pos int + b, pos = appendSpeculativeLength(b) + + b, err = o.marshalField(b, keyf, key.Value()) + if err != nil { + return false + } + b, err = o.marshalField(b, valf, value) + if err != nil { + return false + } + b = finishSpeculativeLength(b, pos) + return true + }) + return b, err +} + +func (o MarshalOptions) rangeMap(mapv protoreflect.Map, kind protoreflect.Kind, f func(protoreflect.MapKey, protoreflect.Value) bool) { + if !o.Deterministic { + mapv.Range(f) + return + } + mapsort.Range(mapv, kind, f) +} + +// When encoding length-prefixed fields, we speculatively set aside some number of bytes +// for the length, encode the data, and then encode the length (shifting the data if necessary +// to make room). +const speculativeLength = 1 + +func appendSpeculativeLength(b []byte) ([]byte, int) { + pos := len(b) + b = append(b, "\x00\x00\x00\x00"[:speculativeLength]...) + return b, pos +} + +func finishSpeculativeLength(b []byte, pos int) []byte { + mlen := len(b) - pos - speculativeLength + msiz := protowire.SizeVarint(uint64(mlen)) + if msiz != speculativeLength { + for i := 0; i < msiz-speculativeLength; i++ { + b = append(b, 0) + } + copy(b[pos+msiz:], b[pos+speculativeLength:]) + b = b[:pos+msiz+mlen] + } + protowire.AppendVarint(b[:pos], uint64(mlen)) + return b +} diff --git a/vendor/google.golang.org/protobuf/proto/encode_gen.go b/vendor/google.golang.org/protobuf/proto/encode_gen.go new file mode 100644 index 0000000..185dacf --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/encode_gen.go @@ -0,0 +1,97 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "math" + "unicode/utf8" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/strs" + "google.golang.org/protobuf/reflect/protoreflect" +) + +var wireTypes = map[protoreflect.Kind]protowire.Type{ + protoreflect.BoolKind: protowire.VarintType, + protoreflect.EnumKind: protowire.VarintType, + protoreflect.Int32Kind: protowire.VarintType, + protoreflect.Sint32Kind: protowire.VarintType, + protoreflect.Uint32Kind: protowire.VarintType, + protoreflect.Int64Kind: protowire.VarintType, + protoreflect.Sint64Kind: protowire.VarintType, + protoreflect.Uint64Kind: protowire.VarintType, + protoreflect.Sfixed32Kind: protowire.Fixed32Type, + protoreflect.Fixed32Kind: protowire.Fixed32Type, + protoreflect.FloatKind: protowire.Fixed32Type, + protoreflect.Sfixed64Kind: protowire.Fixed64Type, + protoreflect.Fixed64Kind: protowire.Fixed64Type, + protoreflect.DoubleKind: protowire.Fixed64Type, + protoreflect.StringKind: protowire.BytesType, + protoreflect.BytesKind: protowire.BytesType, + protoreflect.MessageKind: protowire.BytesType, + protoreflect.GroupKind: protowire.StartGroupType, +} + +func (o MarshalOptions) marshalSingular(b []byte, fd protoreflect.FieldDescriptor, v protoreflect.Value) ([]byte, error) { + switch fd.Kind() { + case protoreflect.BoolKind: + b = protowire.AppendVarint(b, protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + b = protowire.AppendVarint(b, uint64(v.Enum())) + case protoreflect.Int32Kind: + b = protowire.AppendVarint(b, uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + b = protowire.AppendVarint(b, uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + b = protowire.AppendVarint(b, uint64(v.Int())) + case protoreflect.Sint64Kind: + b = protowire.AppendVarint(b, protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + b = protowire.AppendVarint(b, v.Uint()) + case protoreflect.Sfixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Int())) + case protoreflect.Fixed32Kind: + b = protowire.AppendFixed32(b, uint32(v.Uint())) + case protoreflect.FloatKind: + b = protowire.AppendFixed32(b, math.Float32bits(float32(v.Float()))) + case protoreflect.Sfixed64Kind: + b = protowire.AppendFixed64(b, uint64(v.Int())) + case protoreflect.Fixed64Kind: + b = protowire.AppendFixed64(b, v.Uint()) + case protoreflect.DoubleKind: + b = protowire.AppendFixed64(b, math.Float64bits(v.Float())) + case protoreflect.StringKind: + if strs.EnforceUTF8(fd) && !utf8.ValidString(v.String()) { + return b, errors.InvalidUTF8(string(fd.FullName())) + } + b = protowire.AppendString(b, v.String()) + case protoreflect.BytesKind: + b = protowire.AppendBytes(b, v.Bytes()) + case protoreflect.MessageKind: + var pos int + var err error + b, pos = appendSpeculativeLength(b) + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = finishSpeculativeLength(b, pos) + case protoreflect.GroupKind: + var err error + b, err = o.marshalMessage(b, v.Message()) + if err != nil { + return b, err + } + b = protowire.AppendVarint(b, protowire.EncodeTag(fd.Number(), protowire.EndGroupType)) + default: + return b, errors.New("invalid kind %v", fd.Kind()) + } + return b, nil +} diff --git a/vendor/google.golang.org/protobuf/proto/equal.go b/vendor/google.golang.org/protobuf/proto/equal.go new file mode 100644 index 0000000..10902bd --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/equal.go @@ -0,0 +1,154 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "bytes" + "math" + "reflect" + + "google.golang.org/protobuf/encoding/protowire" + pref "google.golang.org/protobuf/reflect/protoreflect" +) + +// Equal reports whether two messages are equal. +// If two messages marshal to the same bytes under deterministic serialization, +// then Equal is guaranteed to report true. +// +// Two messages are equal if they belong to the same message descriptor, +// have the same set of populated known and extension field values, +// and the same set of unknown fields values. If either of the top-level +// messages are invalid, then Equal reports true only if both are invalid. +// +// Scalar values are compared with the equivalent of the == operator in Go, +// except bytes values which are compared using bytes.Equal and +// floating point values which specially treat NaNs as equal. +// Message values are compared by recursively calling Equal. +// Lists are equal if each element value is also equal. +// Maps are equal if they have the same set of keys, where the pair of values +// for each key is also equal. +func Equal(x, y Message) bool { + if x == nil || y == nil { + return x == nil && y == nil + } + mx := x.ProtoReflect() + my := y.ProtoReflect() + if mx.IsValid() != my.IsValid() { + return false + } + return equalMessage(mx, my) +} + +// equalMessage compares two messages. +func equalMessage(mx, my pref.Message) bool { + if mx.Descriptor() != my.Descriptor() { + return false + } + + nx := 0 + equal := true + mx.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + nx++ + vy := my.Get(fd) + equal = my.Has(fd) && equalField(fd, vx, vy) + return equal + }) + if !equal { + return false + } + ny := 0 + my.Range(func(fd pref.FieldDescriptor, vx pref.Value) bool { + ny++ + return true + }) + if nx != ny { + return false + } + + return equalUnknown(mx.GetUnknown(), my.GetUnknown()) +} + +// equalField compares two fields. +func equalField(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.IsList(): + return equalList(fd, x.List(), y.List()) + case fd.IsMap(): + return equalMap(fd, x.Map(), y.Map()) + default: + return equalValue(fd, x, y) + } +} + +// equalMap compares two maps. +func equalMap(fd pref.FieldDescriptor, x, y pref.Map) bool { + if x.Len() != y.Len() { + return false + } + equal := true + x.Range(func(k pref.MapKey, vx pref.Value) bool { + vy := y.Get(k) + equal = y.Has(k) && equalValue(fd.MapValue(), vx, vy) + return equal + }) + return equal +} + +// equalList compares two lists. +func equalList(fd pref.FieldDescriptor, x, y pref.List) bool { + if x.Len() != y.Len() { + return false + } + for i := x.Len() - 1; i >= 0; i-- { + if !equalValue(fd, x.Get(i), y.Get(i)) { + return false + } + } + return true +} + +// equalValue compares two singular values. +func equalValue(fd pref.FieldDescriptor, x, y pref.Value) bool { + switch { + case fd.Message() != nil: + return equalMessage(x.Message(), y.Message()) + case fd.Kind() == pref.BytesKind: + return bytes.Equal(x.Bytes(), y.Bytes()) + case fd.Kind() == pref.FloatKind, fd.Kind() == pref.DoubleKind: + fx := x.Float() + fy := y.Float() + if math.IsNaN(fx) || math.IsNaN(fy) { + return math.IsNaN(fx) && math.IsNaN(fy) + } + return fx == fy + default: + return x.Interface() == y.Interface() + } +} + +// equalUnknown compares unknown fields by direct comparison on the raw bytes +// of each individual field number. +func equalUnknown(x, y pref.RawFields) bool { + if len(x) != len(y) { + return false + } + if bytes.Equal([]byte(x), []byte(y)) { + return true + } + + mx := make(map[pref.FieldNumber]pref.RawFields) + my := make(map[pref.FieldNumber]pref.RawFields) + for len(x) > 0 { + fnum, _, n := protowire.ConsumeField(x) + mx[fnum] = append(mx[fnum], x[:n]...) + x = x[n:] + } + for len(y) > 0 { + fnum, _, n := protowire.ConsumeField(y) + my[fnum] = append(my[fnum], y[:n]...) + y = y[n:] + } + return reflect.DeepEqual(mx, my) +} diff --git a/vendor/google.golang.org/protobuf/proto/extension.go b/vendor/google.golang.org/protobuf/proto/extension.go new file mode 100644 index 0000000..5f293cd --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/extension.go @@ -0,0 +1,92 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" +) + +// HasExtension reports whether an extension field is populated. +// It returns false if m is invalid or if xt does not extend m. +func HasExtension(m Message, xt protoreflect.ExtensionType) bool { + // Treat nil message interface as an empty message; no populated fields. + if m == nil { + return false + } + + // As a special-case, we reports invalid or mismatching descriptors + // as always not being populated (since they aren't). + if xt == nil || m.ProtoReflect().Descriptor() != xt.TypeDescriptor().ContainingMessage() { + return false + } + + return m.ProtoReflect().Has(xt.TypeDescriptor()) +} + +// ClearExtension clears an extension field such that subsequent +// HasExtension calls return false. +// It panics if m is invalid or if xt does not extend m. +func ClearExtension(m Message, xt protoreflect.ExtensionType) { + m.ProtoReflect().Clear(xt.TypeDescriptor()) +} + +// GetExtension retrieves the value for an extension field. +// If the field is unpopulated, it returns the default value for +// scalars and an immutable, empty value for lists or messages. +// It panics if xt does not extend m. +func GetExtension(m Message, xt protoreflect.ExtensionType) interface{} { + // Treat nil message interface as an empty message; return the default. + if m == nil { + return xt.InterfaceOf(xt.Zero()) + } + + return xt.InterfaceOf(m.ProtoReflect().Get(xt.TypeDescriptor())) +} + +// SetExtension stores the value of an extension field. +// It panics if m is invalid, xt does not extend m, or if type of v +// is invalid for the specified extension field. +func SetExtension(m Message, xt protoreflect.ExtensionType, v interface{}) { + xd := xt.TypeDescriptor() + pv := xt.ValueOf(v) + + // Specially treat an invalid list, map, or message as clear. + isValid := true + switch { + case xd.IsList(): + isValid = pv.List().IsValid() + case xd.IsMap(): + isValid = pv.Map().IsValid() + case xd.Message() != nil: + isValid = pv.Message().IsValid() + } + if !isValid { + m.ProtoReflect().Clear(xd) + return + } + + m.ProtoReflect().Set(xd, pv) +} + +// RangeExtensions iterates over every populated extension field in m in an +// undefined order, calling f for each extension type and value encountered. +// It returns immediately if f returns false. +// While iterating, mutating operations may only be performed +// on the current extension field. +func RangeExtensions(m Message, f func(protoreflect.ExtensionType, interface{}) bool) { + // Treat nil message interface as an empty message; nothing to range over. + if m == nil { + return + } + + m.ProtoReflect().Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + if fd.IsExtension() { + xt := fd.(protoreflect.ExtensionTypeDescriptor).Type() + vi := xt.InterfaceOf(v) + return f(xt, vi) + } + return true + }) +} diff --git a/vendor/google.golang.org/protobuf/proto/merge.go b/vendor/google.golang.org/protobuf/proto/merge.go new file mode 100644 index 0000000..d761ab3 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/merge.go @@ -0,0 +1,139 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Merge merges src into dst, which must be a message with the same descriptor. +// +// Populated scalar fields in src are copied to dst, while populated +// singular messages in src are merged into dst by recursively calling Merge. +// The elements of every list field in src is appended to the corresponded +// list fields in dst. The entries of every map field in src is copied into +// the corresponding map field in dst, possibly replacing existing entries. +// The unknown fields of src are appended to the unknown fields of dst. +// +// It is semantically equivalent to unmarshaling the encoded form of src +// into dst with the UnmarshalOptions.Merge option specified. +func Merge(dst, src Message) { + // TODO: Should nil src be treated as semantically equivalent to a + // untyped, read-only, empty message? What about a nil dst? + + dstMsg, srcMsg := dst.ProtoReflect(), src.ProtoReflect() + if dstMsg.Descriptor() != srcMsg.Descriptor() { + if got, want := dstMsg.Descriptor().FullName(), srcMsg.Descriptor().FullName(); got != want { + panic(fmt.Sprintf("descriptor mismatch: %v != %v", got, want)) + } + panic("descriptor mismatch") + } + mergeOptions{}.mergeMessage(dstMsg, srcMsg) +} + +// Clone returns a deep copy of m. +// If the top-level message is invalid, it returns an invalid message as well. +func Clone(m Message) Message { + // NOTE: Most usages of Clone assume the following properties: + // t := reflect.TypeOf(m) + // t == reflect.TypeOf(m.ProtoReflect().New().Interface()) + // t == reflect.TypeOf(m.ProtoReflect().Type().Zero().Interface()) + // + // Embedding protobuf messages breaks this since the parent type will have + // a forwarded ProtoReflect method, but the Interface method will return + // the underlying embedded message type. + if m == nil { + return nil + } + src := m.ProtoReflect() + if !src.IsValid() { + return src.Type().Zero().Interface() + } + dst := src.New() + mergeOptions{}.mergeMessage(dst, src) + return dst.Interface() +} + +// mergeOptions provides a namespace for merge functions, and can be +// exported in the future if we add user-visible merge options. +type mergeOptions struct{} + +func (o mergeOptions) mergeMessage(dst, src protoreflect.Message) { + methods := protoMethods(dst) + if methods != nil && methods.Merge != nil { + in := protoiface.MergeInput{ + Destination: dst, + Source: src, + } + out := methods.Merge(in) + if out.Flags&protoiface.MergeComplete != 0 { + return + } + } + + if !dst.IsValid() { + panic(fmt.Sprintf("cannot merge into invalid %v message", dst.Descriptor().FullName())) + } + + src.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + switch { + case fd.IsList(): + o.mergeList(dst.Mutable(fd).List(), v.List(), fd) + case fd.IsMap(): + o.mergeMap(dst.Mutable(fd).Map(), v.Map(), fd.MapValue()) + case fd.Message() != nil: + o.mergeMessage(dst.Mutable(fd).Message(), v.Message()) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(fd, o.cloneBytes(v)) + default: + dst.Set(fd, v) + } + return true + }) + + if len(src.GetUnknown()) > 0 { + dst.SetUnknown(append(dst.GetUnknown(), src.GetUnknown()...)) + } +} + +func (o mergeOptions) mergeList(dst, src protoreflect.List, fd protoreflect.FieldDescriptor) { + // Merge semantics appends to the end of the existing list. + for i, n := 0, src.Len(); i < n; i++ { + switch v := src.Get(i); { + case fd.Message() != nil: + dstv := dst.NewElement() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Append(dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Append(o.cloneBytes(v)) + default: + dst.Append(v) + } + } +} + +func (o mergeOptions) mergeMap(dst, src protoreflect.Map, fd protoreflect.FieldDescriptor) { + // Merge semantics replaces, rather than merges into existing entries. + src.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool { + switch { + case fd.Message() != nil: + dstv := dst.NewValue() + o.mergeMessage(dstv.Message(), v.Message()) + dst.Set(k, dstv) + case fd.Kind() == protoreflect.BytesKind: + dst.Set(k, o.cloneBytes(v)) + default: + dst.Set(k, v) + } + return true + }) +} + +func (o mergeOptions) cloneBytes(v protoreflect.Value) protoreflect.Value { + return protoreflect.ValueOfBytes(append([]byte{}, v.Bytes()...)) +} diff --git a/vendor/google.golang.org/protobuf/proto/messageset.go b/vendor/google.golang.org/protobuf/proto/messageset.go new file mode 100644 index 0000000..b6b3de5 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/messageset.go @@ -0,0 +1,88 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/internal/flags" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/reflect/protoregistry" +) + +func sizeMessageSet(m protoreflect.Message) (size int) { + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += messageset.SizeField(fd.Number()) + size += protowire.SizeTag(messageset.FieldMessage) + size += protowire.SizeBytes(sizeMessage(v.Message())) + return true + }) + size += messageset.SizeUnknown(m.GetUnknown()) + return size +} + +func marshalMessageSet(b []byte, m protoreflect.Message, o MarshalOptions) ([]byte, error) { + if !flags.ProtoLegacy { + return b, errors.New("no support for message_set_wire_format") + } + var err error + o.rangeFields(m, func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + b, err = marshalMessageSetField(b, fd, v, o) + return err == nil + }) + if err != nil { + return b, err + } + return messageset.AppendUnknown(b, m.GetUnknown()) +} + +func marshalMessageSetField(b []byte, fd protoreflect.FieldDescriptor, value protoreflect.Value, o MarshalOptions) ([]byte, error) { + b = messageset.AppendFieldStart(b, fd.Number()) + b = protowire.AppendTag(b, messageset.FieldMessage, protowire.BytesType) + b = protowire.AppendVarint(b, uint64(o.Size(value.Message().Interface()))) + b, err := o.marshalMessage(b, value.Message()) + if err != nil { + return b, err + } + b = messageset.AppendFieldEnd(b) + return b, nil +} + +func unmarshalMessageSet(b []byte, m protoreflect.Message, o UnmarshalOptions) error { + if !flags.ProtoLegacy { + return errors.New("no support for message_set_wire_format") + } + return messageset.Unmarshal(b, false, func(num protowire.Number, v []byte) error { + err := unmarshalMessageSetField(m, num, v, o) + if err == errUnknown { + unknown := m.GetUnknown() + unknown = protowire.AppendTag(unknown, num, protowire.BytesType) + unknown = protowire.AppendBytes(unknown, v) + m.SetUnknown(unknown) + return nil + } + return err + }) +} + +func unmarshalMessageSetField(m protoreflect.Message, num protowire.Number, v []byte, o UnmarshalOptions) error { + md := m.Descriptor() + if !md.ExtensionRanges().Has(num) { + return errUnknown + } + xt, err := o.Resolver.FindExtensionByNumber(md.FullName(), num) + if err == protoregistry.NotFound { + return errUnknown + } + if err != nil { + return errors.New("%v: unable to resolve extension %v: %v", md.FullName(), num, err) + } + xd := xt.TypeDescriptor() + if err := o.unmarshalMessage(v, m.Mutable(xd).Message()); err != nil { + return err + } + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/proto.go b/vendor/google.golang.org/protobuf/proto/proto.go new file mode 100644 index 0000000..ca14b09 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Message is the top-level interface that all messages must implement. +// It provides access to a reflective view of a message. +// Any implementation of this interface may be used with all functions in the +// protobuf module that accept a Message, except where otherwise specified. +// +// This is the v2 interface definition for protobuf messages. +// The v1 interface definition is "github.com/golang/protobuf/proto".Message. +// +// To convert a v1 message to a v2 message, +// use "github.com/golang/protobuf/proto".MessageV2. +// To convert a v2 message to a v1 message, +// use "github.com/golang/protobuf/proto".MessageV1. +type Message = protoreflect.ProtoMessage + +// Error matches all errors produced by packages in the protobuf module. +// +// That is, errors.Is(err, Error) reports whether an error is produced +// by this module. +var Error error + +func init() { + Error = errors.Error +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_methods.go b/vendor/google.golang.org/protobuf/proto/proto_methods.go new file mode 100644 index 0000000..d8dd604 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_methods.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build !protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = true + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return m.ProtoMethods() +} diff --git a/vendor/google.golang.org/protobuf/proto/proto_reflect.go b/vendor/google.golang.org/protobuf/proto/proto_reflect.go new file mode 100644 index 0000000..b103d43 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/proto_reflect.go @@ -0,0 +1,19 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The protoreflect build tag disables use of fast-path methods. +// +build protoreflect + +package proto + +import ( + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +const hasProtoMethods = false + +func protoMethods(m protoreflect.Message) *protoiface.Methods { + return nil +} diff --git a/vendor/google.golang.org/protobuf/proto/reset.go b/vendor/google.golang.org/protobuf/proto/reset.go new file mode 100644 index 0000000..3d7f894 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/reset.go @@ -0,0 +1,43 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "fmt" + + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Reset clears every field in the message. +// The resulting message shares no observable memory with its previous state +// other than the memory for the message itself. +func Reset(m Message) { + if mr, ok := m.(interface{ Reset() }); ok && hasProtoMethods { + mr.Reset() + return + } + resetMessage(m.ProtoReflect()) +} + +func resetMessage(m protoreflect.Message) { + if !m.IsValid() { + panic(fmt.Sprintf("cannot reset invalid %v message", m.Descriptor().FullName())) + } + + // Clear all known fields. + fds := m.Descriptor().Fields() + for i := 0; i < fds.Len(); i++ { + m.Clear(fds.Get(i)) + } + + // Clear extension fields. + m.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool { + m.Clear(fd) + return true + }) + + // Clear unknown fields. + m.SetUnknown(nil) +} diff --git a/vendor/google.golang.org/protobuf/proto/size.go b/vendor/google.golang.org/protobuf/proto/size.go new file mode 100644 index 0000000..11ba841 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size.go @@ -0,0 +1,94 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/encoding/messageset" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoiface" +) + +// Size returns the size in bytes of the wire-format encoding of m. +func Size(m Message) int { + return MarshalOptions{}.Size(m) +} + +// Size returns the size in bytes of the wire-format encoding of m. +func (o MarshalOptions) Size(m Message) int { + // Treat a nil message interface as an empty message; nothing to output. + if m == nil { + return 0 + } + + return sizeMessage(m.ProtoReflect()) +} + +func sizeMessage(m protoreflect.Message) (size int) { + methods := protoMethods(m) + if methods != nil && methods.Size != nil { + out := methods.Size(protoiface.SizeInput{ + Message: m, + }) + return out.Size + } + if methods != nil && methods.Marshal != nil { + // This is not efficient, but we don't have any choice. + // This case is mainly used for legacy types with a Marshal method. + out, _ := methods.Marshal(protoiface.MarshalInput{ + Message: m, + }) + return len(out.Buf) + } + return sizeMessageSlow(m) +} + +func sizeMessageSlow(m protoreflect.Message) (size int) { + if messageset.IsMessageSet(m.Descriptor()) { + return sizeMessageSet(m) + } + m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool { + size += sizeField(fd, v) + return true + }) + size += len(m.GetUnknown()) + return size +} + +func sizeField(fd protoreflect.FieldDescriptor, value protoreflect.Value) (size int) { + num := fd.Number() + switch { + case fd.IsList(): + return sizeList(num, fd, value.List()) + case fd.IsMap(): + return sizeMap(num, fd, value.Map()) + default: + return protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), value) + } +} + +func sizeList(num protowire.Number, fd protoreflect.FieldDescriptor, list protoreflect.List) (size int) { + if fd.IsPacked() && list.Len() > 0 { + content := 0 + for i, llen := 0, list.Len(); i < llen; i++ { + content += sizeSingular(num, fd.Kind(), list.Get(i)) + } + return protowire.SizeTag(num) + protowire.SizeBytes(content) + } + + for i, llen := 0, list.Len(); i < llen; i++ { + size += protowire.SizeTag(num) + sizeSingular(num, fd.Kind(), list.Get(i)) + } + return size +} + +func sizeMap(num protowire.Number, fd protoreflect.FieldDescriptor, mapv protoreflect.Map) (size int) { + mapv.Range(func(key protoreflect.MapKey, value protoreflect.Value) bool { + size += protowire.SizeTag(num) + size += protowire.SizeBytes(sizeField(fd.MapKey(), key.Value()) + sizeField(fd.MapValue(), value)) + return true + }) + return size +} diff --git a/vendor/google.golang.org/protobuf/proto/size_gen.go b/vendor/google.golang.org/protobuf/proto/size_gen.go new file mode 100644 index 0000000..1118460 --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/size_gen.go @@ -0,0 +1,55 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Code generated by generate-types. DO NOT EDIT. + +package proto + +import ( + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/reflect/protoreflect" +) + +func sizeSingular(num protowire.Number, kind protoreflect.Kind, v protoreflect.Value) int { + switch kind { + case protoreflect.BoolKind: + return protowire.SizeVarint(protowire.EncodeBool(v.Bool())) + case protoreflect.EnumKind: + return protowire.SizeVarint(uint64(v.Enum())) + case protoreflect.Int32Kind: + return protowire.SizeVarint(uint64(int32(v.Int()))) + case protoreflect.Sint32Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(int64(int32(v.Int())))) + case protoreflect.Uint32Kind: + return protowire.SizeVarint(uint64(uint32(v.Uint()))) + case protoreflect.Int64Kind: + return protowire.SizeVarint(uint64(v.Int())) + case protoreflect.Sint64Kind: + return protowire.SizeVarint(protowire.EncodeZigZag(v.Int())) + case protoreflect.Uint64Kind: + return protowire.SizeVarint(v.Uint()) + case protoreflect.Sfixed32Kind: + return protowire.SizeFixed32() + case protoreflect.Fixed32Kind: + return protowire.SizeFixed32() + case protoreflect.FloatKind: + return protowire.SizeFixed32() + case protoreflect.Sfixed64Kind: + return protowire.SizeFixed64() + case protoreflect.Fixed64Kind: + return protowire.SizeFixed64() + case protoreflect.DoubleKind: + return protowire.SizeFixed64() + case protoreflect.StringKind: + return protowire.SizeBytes(len(v.String())) + case protoreflect.BytesKind: + return protowire.SizeBytes(len(v.Bytes())) + case protoreflect.MessageKind: + return protowire.SizeBytes(sizeMessage(v.Message())) + case protoreflect.GroupKind: + return protowire.SizeGroup(num, sizeMessage(v.Message())) + default: + return 0 + } +} diff --git a/vendor/google.golang.org/protobuf/proto/wrappers.go b/vendor/google.golang.org/protobuf/proto/wrappers.go new file mode 100644 index 0000000..653b12c --- /dev/null +++ b/vendor/google.golang.org/protobuf/proto/wrappers.go @@ -0,0 +1,29 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proto + +// Bool stores v in a new bool value and returns a pointer to it. +func Bool(v bool) *bool { return &v } + +// Int32 stores v in a new int32 value and returns a pointer to it. +func Int32(v int32) *int32 { return &v } + +// Int64 stores v in a new int64 value and returns a pointer to it. +func Int64(v int64) *int64 { return &v } + +// Float32 stores v in a new float32 value and returns a pointer to it. +func Float32(v float32) *float32 { return &v } + +// Float64 stores v in a new float64 value and returns a pointer to it. +func Float64(v float64) *float64 { return &v } + +// Uint32 stores v in a new uint32 value and returns a pointer to it. +func Uint32(v uint32) *uint32 { return &v } + +// Uint64 stores v in a new uint64 value and returns a pointer to it. +func Uint64(v uint64) *uint64 { return &v } + +// String stores v in a new string value and returns a pointer to it. +func String(v string) *string { return &v } diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go new file mode 100644 index 0000000..6be5d16 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/methods.go @@ -0,0 +1,77 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "google.golang.org/protobuf/internal/pragma" +) + +// The following types are used by the fast-path Message.ProtoMethods method. +// +// To avoid polluting the public protoreflect API with types used only by +// low-level implementations, the canonical definitions of these types are +// in the runtime/protoiface package. The definitions here and in protoiface +// must be kept in sync. +type ( + methods = struct { + pragma.NoUnkeyedLiterals + Flags supportFlags + Size func(sizeInput) sizeOutput + Marshal func(marshalInput) (marshalOutput, error) + Unmarshal func(unmarshalInput) (unmarshalOutput, error) + Merge func(mergeInput) mergeOutput + CheckInitialized func(checkInitializedInput) (checkInitializedOutput, error) + } + supportFlags = uint64 + sizeInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Flags uint8 + } + sizeOutput = struct { + pragma.NoUnkeyedLiterals + Size int + } + marshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + } + marshalOutput = struct { + pragma.NoUnkeyedLiterals + Buf []byte + } + unmarshalInput = struct { + pragma.NoUnkeyedLiterals + Message Message + Buf []byte + Flags uint8 + Resolver interface { + FindExtensionByName(field FullName) (ExtensionType, error) + FindExtensionByNumber(message FullName, field FieldNumber) (ExtensionType, error) + } + } + unmarshalOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + mergeInput = struct { + pragma.NoUnkeyedLiterals + Source Message + Destination Message + } + mergeOutput = struct { + pragma.NoUnkeyedLiterals + Flags uint8 + } + checkInitializedInput = struct { + pragma.NoUnkeyedLiterals + Message Message + } + checkInitializedOutput = struct { + pragma.NoUnkeyedLiterals + } +) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go new file mode 100644 index 0000000..b669a4e --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/proto.go @@ -0,0 +1,478 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoreflect provides interfaces to dynamically manipulate messages. +// +// This package includes type descriptors which describe the structure of types +// defined in proto source files and value interfaces which provide the +// ability to examine and manipulate the contents of messages. +// +// +// Protocol Buffer Descriptors +// +// Protobuf descriptors (e.g., EnumDescriptor or MessageDescriptor) +// are immutable objects that represent protobuf type information. +// They are wrappers around the messages declared in descriptor.proto. +// Protobuf descriptors alone lack any information regarding Go types. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Descriptor and ProtoReflect.Descriptor accessors respectively +// return the protobuf descriptor for the values. +// +// The protobuf descriptor interfaces are not meant to be implemented by +// user code since they might need to be extended in the future to support +// additions to the protobuf language. +// The "google.golang.org/protobuf/reflect/protodesc" package converts between +// google.protobuf.DescriptorProto messages and protobuf descriptors. +// +// +// Go Type Descriptors +// +// A type descriptor (e.g., EnumType or MessageType) is a constructor for +// a concrete Go type that represents the associated protobuf descriptor. +// There is commonly a one-to-one relationship between protobuf descriptors and +// Go type descriptors, but it can potentially be a one-to-many relationship. +// +// Enums and messages generated by this module implement Enum and ProtoMessage, +// where the Type and ProtoReflect.Type accessors respectively +// return the protobuf descriptor for the values. +// +// The "google.golang.org/protobuf/types/dynamicpb" package can be used to +// create Go type descriptors from protobuf descriptors. +// +// +// Value Interfaces +// +// The Enum and Message interfaces provide a reflective view over an +// enum or message instance. For enums, it provides the ability to retrieve +// the enum value number for any concrete enum type. For messages, it provides +// the ability to access or manipulate fields of the message. +// +// To convert a proto.Message to a protoreflect.Message, use the +// former's ProtoReflect method. Since the ProtoReflect method is new to the +// v2 message interface, it may not be present on older message implementations. +// The "github.com/golang/protobuf/proto".MessageReflect function can be used +// to obtain a reflective view on older messages. +// +// +// Relationships +// +// The following diagrams demonstrate the relationships between +// various types declared in this package. +// +// +// ┌───────────────────────────────────┐ +// V │ +// ┌────────────── New(n) ─────────────┐ │ +// │ │ │ +// │ ┌──── Descriptor() ──┐ │ ┌── Number() ──┐ │ +// │ │ V V │ V │ +// ╔════════════╗ ╔════════════════╗ ╔════════╗ ╔════════════╗ +// ║ EnumType ║ ║ EnumDescriptor ║ ║ Enum ║ ║ EnumNumber ║ +// ╚════════════╝ ╚════════════════╝ ╚════════╝ ╚════════════╝ +// Λ Λ │ │ +// │ └─── Descriptor() ──┘ │ +// │ │ +// └────────────────── Type() ───────┘ +// +// • An EnumType describes a concrete Go enum type. +// It has an EnumDescriptor and can construct an Enum instance. +// +// • An EnumDescriptor describes an abstract protobuf enum type. +// +// • An Enum is a concrete enum instance. Generated enums implement Enum. +// +// +// ┌──────────────── New() ─────────────────┐ +// │ │ +// │ ┌─── Descriptor() ─────┐ │ ┌── Interface() ───┐ +// │ │ V V │ V +// ╔═════════════╗ ╔═══════════════════╗ ╔═════════╗ ╔══════════════╗ +// ║ MessageType ║ ║ MessageDescriptor ║ ║ Message ║ ║ ProtoMessage ║ +// ╚═════════════╝ ╚═══════════════════╝ ╚═════════╝ ╚══════════════╝ +// Λ Λ │ │ Λ │ +// │ └──── Descriptor() ────┘ │ └─ ProtoReflect() ─┘ +// │ │ +// └─────────────────── Type() ─────────┘ +// +// • A MessageType describes a concrete Go message type. +// It has a MessageDescriptor and can construct a Message instance. +// +// • A MessageDescriptor describes an abstract protobuf message type. +// +// • A Message is a concrete message instance. Generated messages implement +// ProtoMessage, which can convert to/from a Message. +// +// +// ┌── TypeDescriptor() ──┐ ┌───── Descriptor() ─────┐ +// │ V │ V +// ╔═══════════════╗ ╔═════════════════════════╗ ╔═════════════════════╗ +// ║ ExtensionType ║ ║ ExtensionTypeDescriptor ║ ║ ExtensionDescriptor ║ +// ╚═══════════════╝ ╚═════════════════════════╝ ╚═════════════════════╝ +// Λ │ │ Λ │ Λ +// └─────── Type() ───────┘ │ └─── may implement ────┘ │ +// │ │ +// └────── implements ────────┘ +// +// • An ExtensionType describes a concrete Go implementation of an extension. +// It has an ExtensionTypeDescriptor and can convert to/from +// abstract Values and Go values. +// +// • An ExtensionTypeDescriptor is an ExtensionDescriptor +// which also has an ExtensionType. +// +// • An ExtensionDescriptor describes an abstract protobuf extension field and +// may not always be an ExtensionTypeDescriptor. +package protoreflect + +import ( + "fmt" + "regexp" + "strings" + + "google.golang.org/protobuf/encoding/protowire" + "google.golang.org/protobuf/internal/pragma" +) + +type doNotImplement pragma.DoNotImplement + +// ProtoMessage is the top-level interface that all proto messages implement. +// This is declared in the protoreflect package to avoid a cyclic dependency; +// use the proto.Message type instead, which aliases this type. +type ProtoMessage interface{ ProtoReflect() Message } + +// Syntax is the language version of the proto file. +type Syntax syntax + +type syntax int8 // keep exact type opaque as the int type may change + +const ( + Proto2 Syntax = 2 + Proto3 Syntax = 3 +) + +// IsValid reports whether the syntax is valid. +func (s Syntax) IsValid() bool { + switch s { + case Proto2, Proto3: + return true + default: + return false + } +} + +// String returns s as a proto source identifier (e.g., "proto2"). +func (s Syntax) String() string { + switch s { + case Proto2: + return "proto2" + case Proto3: + return "proto3" + default: + return fmt.Sprintf("", s) + } +} + +// GoString returns s as a Go source identifier (e.g., "Proto2"). +func (s Syntax) GoString() string { + switch s { + case Proto2: + return "Proto2" + case Proto3: + return "Proto3" + default: + return fmt.Sprintf("Syntax(%d)", s) + } +} + +// Cardinality determines whether a field is optional, required, or repeated. +type Cardinality cardinality + +type cardinality int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Cardinality enumeration. +const ( + Optional Cardinality = 1 // appears zero or one times + Required Cardinality = 2 // appears exactly one time; invalid with Proto3 + Repeated Cardinality = 3 // appears zero or more times +) + +// IsValid reports whether the cardinality is valid. +func (c Cardinality) IsValid() bool { + switch c { + case Optional, Required, Repeated: + return true + default: + return false + } +} + +// String returns c as a proto source identifier (e.g., "optional"). +func (c Cardinality) String() string { + switch c { + case Optional: + return "optional" + case Required: + return "required" + case Repeated: + return "repeated" + default: + return fmt.Sprintf("", c) + } +} + +// GoString returns c as a Go source identifier (e.g., "Optional"). +func (c Cardinality) GoString() string { + switch c { + case Optional: + return "Optional" + case Required: + return "Required" + case Repeated: + return "Repeated" + default: + return fmt.Sprintf("Cardinality(%d)", c) + } +} + +// Kind indicates the basic proto kind of a field. +type Kind kind + +type kind int8 // keep exact type opaque as the int type may change + +// Constants as defined by the google.protobuf.Field.Kind enumeration. +const ( + BoolKind Kind = 8 + EnumKind Kind = 14 + Int32Kind Kind = 5 + Sint32Kind Kind = 17 + Uint32Kind Kind = 13 + Int64Kind Kind = 3 + Sint64Kind Kind = 18 + Uint64Kind Kind = 4 + Sfixed32Kind Kind = 15 + Fixed32Kind Kind = 7 + FloatKind Kind = 2 + Sfixed64Kind Kind = 16 + Fixed64Kind Kind = 6 + DoubleKind Kind = 1 + StringKind Kind = 9 + BytesKind Kind = 12 + MessageKind Kind = 11 + GroupKind Kind = 10 +) + +// IsValid reports whether the kind is valid. +func (k Kind) IsValid() bool { + switch k { + case BoolKind, EnumKind, + Int32Kind, Sint32Kind, Uint32Kind, + Int64Kind, Sint64Kind, Uint64Kind, + Sfixed32Kind, Fixed32Kind, FloatKind, + Sfixed64Kind, Fixed64Kind, DoubleKind, + StringKind, BytesKind, MessageKind, GroupKind: + return true + default: + return false + } +} + +// String returns k as a proto source identifier (e.g., "bool"). +func (k Kind) String() string { + switch k { + case BoolKind: + return "bool" + case EnumKind: + return "enum" + case Int32Kind: + return "int32" + case Sint32Kind: + return "sint32" + case Uint32Kind: + return "uint32" + case Int64Kind: + return "int64" + case Sint64Kind: + return "sint64" + case Uint64Kind: + return "uint64" + case Sfixed32Kind: + return "sfixed32" + case Fixed32Kind: + return "fixed32" + case FloatKind: + return "float" + case Sfixed64Kind: + return "sfixed64" + case Fixed64Kind: + return "fixed64" + case DoubleKind: + return "double" + case StringKind: + return "string" + case BytesKind: + return "bytes" + case MessageKind: + return "message" + case GroupKind: + return "group" + default: + return fmt.Sprintf("", k) + } +} + +// GoString returns k as a Go source identifier (e.g., "BoolKind"). +func (k Kind) GoString() string { + switch k { + case BoolKind: + return "BoolKind" + case EnumKind: + return "EnumKind" + case Int32Kind: + return "Int32Kind" + case Sint32Kind: + return "Sint32Kind" + case Uint32Kind: + return "Uint32Kind" + case Int64Kind: + return "Int64Kind" + case Sint64Kind: + return "Sint64Kind" + case Uint64Kind: + return "Uint64Kind" + case Sfixed32Kind: + return "Sfixed32Kind" + case Fixed32Kind: + return "Fixed32Kind" + case FloatKind: + return "FloatKind" + case Sfixed64Kind: + return "Sfixed64Kind" + case Fixed64Kind: + return "Fixed64Kind" + case DoubleKind: + return "DoubleKind" + case StringKind: + return "StringKind" + case BytesKind: + return "BytesKind" + case MessageKind: + return "MessageKind" + case GroupKind: + return "GroupKind" + default: + return fmt.Sprintf("Kind(%d)", k) + } +} + +// FieldNumber is the field number in a message. +type FieldNumber = protowire.Number + +// FieldNumbers represent a list of field numbers. +type FieldNumbers interface { + // Len reports the number of fields in the list. + Len() int + // Get returns the ith field number. It panics if out of bounds. + Get(i int) FieldNumber + // Has reports whether n is within the list of fields. + Has(n FieldNumber) bool + + doNotImplement +} + +// FieldRanges represent a list of field number ranges. +type FieldRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]FieldNumber // start inclusive; end exclusive + // Has reports whether n is within any of the ranges. + Has(n FieldNumber) bool + + doNotImplement +} + +// EnumNumber is the numeric value for an enum. +type EnumNumber int32 + +// EnumRanges represent a list of enum number ranges. +type EnumRanges interface { + // Len reports the number of ranges in the list. + Len() int + // Get returns the ith range. It panics if out of bounds. + Get(i int) [2]EnumNumber // start inclusive; end inclusive + // Has reports whether n is within any of the ranges. + Has(n EnumNumber) bool + + doNotImplement +} + +var ( + regexName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*$`) + regexFullName = regexp.MustCompile(`^[_a-zA-Z][_a-zA-Z0-9]*(\.[_a-zA-Z][_a-zA-Z0-9]*)*$`) +) + +// Name is the short name for a proto declaration. This is not the name +// as used in Go source code, which might not be identical to the proto name. +type Name string // e.g., "Kind" + +// IsValid reports whether n is a syntactically valid name. +// An empty name is invalid. +func (n Name) IsValid() bool { + return regexName.MatchString(string(n)) +} + +// Names represent a list of names. +type Names interface { + // Len reports the number of names in the list. + Len() int + // Get returns the ith name. It panics if out of bounds. + Get(i int) Name + // Has reports whether s matches any names in the list. + Has(s Name) bool + + doNotImplement +} + +// FullName is a qualified name that uniquely identifies a proto declaration. +// A qualified name is the concatenation of the proto package along with the +// fully-declared name (i.e., name of parent preceding the name of the child), +// with a '.' delimiter placed between each Name. +// +// This should not have any leading or trailing dots. +type FullName string // e.g., "google.protobuf.Field.Kind" + +// IsValid reports whether n is a syntactically valid full name. +// An empty full name is invalid. +func (n FullName) IsValid() bool { + return regexFullName.MatchString(string(n)) +} + +// Name returns the short name, which is the last identifier segment. +// A single segment FullName is the Name itself. +func (n FullName) Name() Name { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return Name(n[i+1:]) + } + return Name(n) +} + +// Parent returns the full name with the trailing identifier removed. +// A single segment FullName has no parent. +func (n FullName) Parent() FullName { + if i := strings.LastIndexByte(string(n), '.'); i >= 0 { + return n[:i] + } + return "" +} + +// Append returns the qualified name appended with the provided short name. +// +// Invariant: n == n.Parent().Append(n.Name()) // assuming n is valid +func (n FullName) Append(s Name) FullName { + if n == "" { + return FullName(s) + } + return n + "." + FullName(s) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go new file mode 100644 index 0000000..32ea3d9 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/source.go @@ -0,0 +1,52 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// SourceLocations is a list of source locations. +type SourceLocations interface { + // Len reports the number of source locations in the proto file. + Len() int + // Get returns the ith SourceLocation. It panics if out of bounds. + Get(int) SourceLocation + + doNotImplement + + // TODO: Add ByPath and ByDescriptor helper methods. +} + +// SourceLocation describes a source location and +// corresponds with the google.protobuf.SourceCodeInfo.Location message. +type SourceLocation struct { + // Path is the path to the declaration from the root file descriptor. + // The contents of this slice must not be mutated. + Path SourcePath + + // StartLine and StartColumn are the zero-indexed starting location + // in the source file for the declaration. + StartLine, StartColumn int + // EndLine and EndColumn are the zero-indexed ending location + // in the source file for the declaration. + // In the descriptor.proto, the end line may be omitted if it is identical + // to the start line. Here, it is always populated. + EndLine, EndColumn int + + // LeadingDetachedComments are the leading detached comments + // for the declaration. The contents of this slice must not be mutated. + LeadingDetachedComments []string + // LeadingComments is the leading attached comment for the declaration. + LeadingComments string + // TrailingComments is the trailing attached comment for the declaration. + TrailingComments string +} + +// SourcePath identifies part of a file descriptor for a source location. +// The SourcePath is a sequence of either field numbers or indexes into +// a repeated field that form a path starting from the root file descriptor. +// +// See google.protobuf.SourceCodeInfo.Location.path. +type SourcePath []int32 + +// TODO: Add SourcePath.String method to pretty-print the path. For example: +// ".message_type[6].nested_type[15].field[3]" diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go new file mode 100644 index 0000000..5be14a7 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -0,0 +1,631 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +// Descriptor provides a set of accessors that are common to every descriptor. +// Each descriptor type wraps the equivalent google.protobuf.XXXDescriptorProto, +// but provides efficient lookup and immutability. +// +// Each descriptor is comparable. Equality implies that the two types are +// exactly identical. However, it is possible for the same semantically +// identical proto type to be represented by multiple type descriptors. +// +// For example, suppose we have t1 and t2 which are both MessageDescriptors. +// If t1 == t2, then the types are definitely equal and all accessors return +// the same information. However, if t1 != t2, then it is still possible that +// they still represent the same proto type (e.g., t1.FullName == t2.FullName). +// This can occur if a descriptor type is created dynamically, or multiple +// versions of the same proto type are accidentally linked into the Go binary. +type Descriptor interface { + // ParentFile returns the parent file descriptor that this descriptor + // is declared within. The parent file for the file descriptor is itself. + // + // Support for this functionality is optional and may return nil. + ParentFile() FileDescriptor + + // Parent returns the parent containing this descriptor declaration. + // The following shows the mapping from child type to possible parent types: + // + // ╔═════════════════════╤═══════════════════════════════════╗ + // ║ Child type │ Possible parent types ║ + // ╠═════════════════════╪═══════════════════════════════════╣ + // ║ FileDescriptor │ nil ║ + // ║ MessageDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ FieldDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ OneofDescriptor │ MessageDescriptor ║ + // ║ EnumDescriptor │ FileDescriptor, MessageDescriptor ║ + // ║ EnumValueDescriptor │ EnumDescriptor ║ + // ║ ServiceDescriptor │ FileDescriptor ║ + // ║ MethodDescriptor │ ServiceDescriptor ║ + // ╚═════════════════════╧═══════════════════════════════════╝ + // + // Support for this functionality is optional and may return nil. + Parent() Descriptor + + // Index returns the index of this descriptor within its parent. + // It returns 0 if the descriptor does not have a parent or if the parent + // is unknown. + Index() int + + // Syntax is the protobuf syntax. + Syntax() Syntax // e.g., Proto2 or Proto3 + + // Name is the short name of the declaration (i.e., FullName.Name). + Name() Name // e.g., "Any" + + // FullName is the fully-qualified name of the declaration. + // + // The FullName is a concatenation of the full name of the type that this + // type is declared within and the declaration name. For example, + // field "foo_field" in message "proto.package.MyMessage" is + // uniquely identified as "proto.package.MyMessage.foo_field". + // Enum values are an exception to the rule (see EnumValueDescriptor). + FullName() FullName // e.g., "google.protobuf.Any" + + // IsPlaceholder reports whether type information is missing since a + // dependency is not resolved, in which case only name information is known. + // + // Placeholder types may only be returned by the following accessors + // as a result of unresolved dependencies or weak imports: + // + // ╔═══════════════════════════════════╤═════════════════════╗ + // ║ Accessor │ Descriptor ║ + // ╠═══════════════════════════════════╪═════════════════════╣ + // ║ FileImports.FileDescriptor │ FileDescriptor ║ + // ║ FieldDescriptor.Enum │ EnumDescriptor ║ + // ║ FieldDescriptor.Message │ MessageDescriptor ║ + // ║ FieldDescriptor.DefaultEnumValue │ EnumValueDescriptor ║ + // ║ FieldDescriptor.ContainingMessage │ MessageDescriptor ║ + // ║ MethodDescriptor.Input │ MessageDescriptor ║ + // ║ MethodDescriptor.Output │ MessageDescriptor ║ + // ╚═══════════════════════════════════╧═════════════════════╝ + // + // If true, only Name and FullName are valid. + // For FileDescriptor, the Path is also valid. + IsPlaceholder() bool + + // Options returns the descriptor options. The caller must not modify + // the returned value. + // + // To avoid a dependency cycle, this function returns a proto.Message value. + // The proto message type returned for each descriptor type is as follows: + // ╔═════════════════════╤══════════════════════════════════════════╗ + // ║ Go type │ Protobuf message type ║ + // ╠═════════════════════╪══════════════════════════════════════════╣ + // ║ FileDescriptor │ google.protobuf.FileOptions ║ + // ║ EnumDescriptor │ google.protobuf.EnumOptions ║ + // ║ EnumValueDescriptor │ google.protobuf.EnumValueOptions ║ + // ║ MessageDescriptor │ google.protobuf.MessageOptions ║ + // ║ FieldDescriptor │ google.protobuf.FieldOptions ║ + // ║ OneofDescriptor │ google.protobuf.OneofOptions ║ + // ║ ServiceDescriptor │ google.protobuf.ServiceOptions ║ + // ║ MethodDescriptor │ google.protobuf.MethodOptions ║ + // ╚═════════════════════╧══════════════════════════════════════════╝ + // + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + Options() ProtoMessage + + doNotImplement +} + +// FileDescriptor describes the types in a complete proto file and +// corresponds with the google.protobuf.FileDescriptorProto message. +// +// Top-level declarations: +// EnumDescriptor, MessageDescriptor, FieldDescriptor, and/or ServiceDescriptor. +type FileDescriptor interface { + Descriptor // Descriptor.FullName is identical to Package + + // Path returns the file name, relative to the source tree root. + Path() string // e.g., "path/to/file.proto" + // Package returns the protobuf package namespace. + Package() FullName // e.g., "google.protobuf" + + // Imports is a list of imported proto files. + Imports() FileImports + + // Enums is a list of the top-level enum declarations. + Enums() EnumDescriptors + // Messages is a list of the top-level message declarations. + Messages() MessageDescriptors + // Extensions is a list of the top-level extension declarations. + Extensions() ExtensionDescriptors + // Services is a list of the top-level service declarations. + Services() ServiceDescriptors + + // SourceLocations is a list of source locations. + SourceLocations() SourceLocations + + isFileDescriptor +} +type isFileDescriptor interface{ ProtoType(FileDescriptor) } + +// FileImports is a list of file imports. +type FileImports interface { + // Len reports the number of files imported by this proto file. + Len() int + // Get returns the ith FileImport. It panics if out of bounds. + Get(i int) FileImport + + doNotImplement +} + +// FileImport is the declaration for a proto file import. +type FileImport struct { + // FileDescriptor is the file type for the given import. + // It is a placeholder descriptor if IsWeak is set or if a dependency has + // not been regenerated to implement the new reflection APIs. + FileDescriptor + + // IsPublic reports whether this is a public import, which causes this file + // to alias declarations within the imported file. The intended use cases + // for this feature is the ability to move proto files without breaking + // existing dependencies. + // + // The current file and the imported file must be within proto package. + IsPublic bool + + // IsWeak reports whether this is a weak import, which does not impose + // a direct dependency on the target file. + // + // Weak imports are a legacy proto1 feature. Equivalent behavior is + // achieved using proto2 extension fields or proto3 Any messages. + IsWeak bool +} + +// MessageDescriptor describes a message and +// corresponds with the google.protobuf.DescriptorProto message. +// +// Nested declarations: +// FieldDescriptor, OneofDescriptor, FieldDescriptor, EnumDescriptor, +// and/or MessageDescriptor. +type MessageDescriptor interface { + Descriptor + + // IsMapEntry indicates that this is an auto-generated message type to + // represent the entry type for a map field. + // + // Map entry messages have only two fields: + // • a "key" field with a field number of 1 + // • a "value" field with a field number of 2 + // The key and value types are determined by these two fields. + // + // If IsMapEntry is true, it implies that FieldDescriptor.IsMap is true + // for some field with this message type. + IsMapEntry() bool + + // Fields is a list of nested field declarations. + Fields() FieldDescriptors + // Oneofs is a list of nested oneof declarations. + Oneofs() OneofDescriptors + + // ReservedNames is a list of reserved field names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of field numbers. + ReservedRanges() FieldRanges + // RequiredNumbers is a list of required field numbers. + // In Proto3, it is always an empty list. + RequiredNumbers() FieldNumbers + // ExtensionRanges is the field ranges used for extension fields. + // In Proto3, it is always an empty ranges. + ExtensionRanges() FieldRanges + // ExtensionRangeOptions returns the ith extension range options. + // + // To avoid a dependency cycle, this method returns a proto.Message value, + // which always contains a google.protobuf.ExtensionRangeOptions message. + // This method returns a typed nil-pointer if no options are present. + // The caller must import the descriptorpb package to use this. + ExtensionRangeOptions(i int) ProtoMessage + + // Enums is a list of nested enum declarations. + Enums() EnumDescriptors + // Messages is a list of nested message declarations. + Messages() MessageDescriptors + // Extensions is a list of nested extension declarations. + Extensions() ExtensionDescriptors + + isMessageDescriptor +} +type isMessageDescriptor interface{ ProtoType(MessageDescriptor) } + +// MessageType encapsulates a MessageDescriptor with a concrete Go implementation. +type MessageType interface { + // New returns a newly allocated empty message. + New() Message + + // Zero returns an empty, read-only message. + Zero() Message + + // Descriptor returns the message descriptor. + // + // Invariant: t.Descriptor() == t.New().Descriptor() + Descriptor() MessageDescriptor +} + +// MessageDescriptors is a list of message declarations. +type MessageDescriptors interface { + // Len reports the number of messages. + Len() int + // Get returns the ith MessageDescriptor. It panics if out of bounds. + Get(i int) MessageDescriptor + // ByName returns the MessageDescriptor for a message named s. + // It returns nil if not found. + ByName(s Name) MessageDescriptor + + doNotImplement +} + +// FieldDescriptor describes a field within a message and +// corresponds with the google.protobuf.FieldDescriptorProto message. +// +// It is used for both normal fields defined within the parent message +// (e.g., MessageDescriptor.Fields) and fields that extend some remote message +// (e.g., FileDescriptor.Extensions or MessageDescriptor.Extensions). +type FieldDescriptor interface { + Descriptor + + // Number reports the unique number for this field. + Number() FieldNumber + // Cardinality reports the cardinality for this field. + Cardinality() Cardinality + // Kind reports the basic kind for this field. + Kind() Kind + + // HasJSONName reports whether this field has an explicitly set JSON name. + HasJSONName() bool + + // JSONName reports the name used for JSON serialization. + // It is usually the camel-cased form of the field name. + JSONName() string + + // HasPresence reports whether the field distinguishes between unpopulated + // and default values. + HasPresence() bool + + // IsExtension reports whether this is an extension field. If false, + // then Parent and ContainingMessage refer to the same message. + // Otherwise, ContainingMessage and Parent likely differ. + IsExtension() bool + + // HasOptionalKeyword reports whether the "optional" keyword was explicitly + // specified in the source .proto file. + HasOptionalKeyword() bool + + // IsWeak reports whether this is a weak field, which does not impose a + // direct dependency on the target type. + // If true, then Message returns a placeholder type. + IsWeak() bool + + // IsPacked reports whether repeated primitive numeric kinds should be + // serialized using a packed encoding. + // If true, then it implies Cardinality is Repeated. + IsPacked() bool + + // IsList reports whether this field represents a list, + // where the value type for the associated field is a List. + // It is equivalent to checking whether Cardinality is Repeated and + // that IsMap reports false. + IsList() bool + + // IsMap reports whether this field represents a map, + // where the value type for the associated field is a Map. + // It is equivalent to checking whether Cardinality is Repeated, + // that the Kind is MessageKind, and that Message.IsMapEntry reports true. + IsMap() bool + + // MapKey returns the field descriptor for the key in the map entry. + // It returns nil if IsMap reports false. + MapKey() FieldDescriptor + + // MapValue returns the field descriptor for the value in the map entry. + // It returns nil if IsMap reports false. + MapValue() FieldDescriptor + + // HasDefault reports whether this field has a default value. + HasDefault() bool + + // Default returns the default value for scalar fields. + // For proto2, it is the default value as specified in the proto file, + // or the zero value if unspecified. + // For proto3, it is always the zero value of the scalar. + // The Value type is determined by the Kind. + Default() Value + + // DefaultEnumValue returns the enum value descriptor for the default value + // of an enum field, and is nil for any other kind of field. + DefaultEnumValue() EnumValueDescriptor + + // ContainingOneof is the containing oneof that this field belongs to, + // and is nil if this field is not part of a oneof. + ContainingOneof() OneofDescriptor + + // ContainingMessage is the containing message that this field belongs to. + // For extension fields, this may not necessarily be the parent message + // that the field is declared within. + ContainingMessage() MessageDescriptor + + // Enum is the enum descriptor if Kind is EnumKind. + // It returns nil for any other Kind. + Enum() EnumDescriptor + + // Message is the message descriptor if Kind is + // MessageKind or GroupKind. It returns nil for any other Kind. + Message() MessageDescriptor + + isFieldDescriptor +} +type isFieldDescriptor interface{ ProtoType(FieldDescriptor) } + +// FieldDescriptors is a list of field declarations. +type FieldDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith FieldDescriptor. It panics if out of bounds. + Get(i int) FieldDescriptor + // ByName returns the FieldDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) FieldDescriptor + // ByJSONName returns the FieldDescriptor for a field with s as the JSON name. + // It returns nil if not found. + ByJSONName(s string) FieldDescriptor + // ByNumber returns the FieldDescriptor for a field numbered n. + // It returns nil if not found. + ByNumber(n FieldNumber) FieldDescriptor + + doNotImplement +} + +// OneofDescriptor describes a oneof field set within a given message and +// corresponds with the google.protobuf.OneofDescriptorProto message. +type OneofDescriptor interface { + Descriptor + + // IsSynthetic reports whether this is a synthetic oneof created to support + // proto3 optional semantics. If true, Fields contains exactly one field + // with HasOptionalKeyword specified. + IsSynthetic() bool + + // Fields is a list of fields belonging to this oneof. + Fields() FieldDescriptors + + isOneofDescriptor +} +type isOneofDescriptor interface{ ProtoType(OneofDescriptor) } + +// OneofDescriptors is a list of oneof declarations. +type OneofDescriptors interface { + // Len reports the number of oneof fields. + Len() int + // Get returns the ith OneofDescriptor. It panics if out of bounds. + Get(i int) OneofDescriptor + // ByName returns the OneofDescriptor for a oneof named s. + // It returns nil if not found. + ByName(s Name) OneofDescriptor + + doNotImplement +} + +// ExtensionDescriptor is an alias of FieldDescriptor for documentation. +type ExtensionDescriptor = FieldDescriptor + +// ExtensionTypeDescriptor is an ExtensionDescriptor with an associated ExtensionType. +type ExtensionTypeDescriptor interface { + ExtensionDescriptor + + // Type returns the associated ExtensionType. + Type() ExtensionType + + // Descriptor returns the plain ExtensionDescriptor without the + // associated ExtensionType. + Descriptor() ExtensionDescriptor +} + +// ExtensionDescriptors is a list of field declarations. +type ExtensionDescriptors interface { + // Len reports the number of fields. + Len() int + // Get returns the ith ExtensionDescriptor. It panics if out of bounds. + Get(i int) ExtensionDescriptor + // ByName returns the ExtensionDescriptor for a field named s. + // It returns nil if not found. + ByName(s Name) ExtensionDescriptor + + doNotImplement +} + +// ExtensionType encapsulates an ExtensionDescriptor with a concrete +// Go implementation. The nested field descriptor must be for a extension field. +// +// While a normal field is a member of the parent message that it is declared +// within (see Descriptor.Parent), an extension field is a member of some other +// target message (see ExtensionDescriptor.Extendee) and may have no +// relationship with the parent. However, the full name of an extension field is +// relative to the parent that it is declared within. +// +// For example: +// syntax = "proto2"; +// package example; +// message FooMessage { +// extensions 100 to max; +// } +// message BarMessage { +// extends FooMessage { optional BarMessage bar_field = 100; } +// } +// +// Field "bar_field" is an extension of FooMessage, but its full name is +// "example.BarMessage.bar_field" instead of "example.FooMessage.bar_field". +type ExtensionType interface { + // New returns a new value for the field. + // For scalars, this returns the default value in native Go form. + New() Value + + // Zero returns a new value for the field. + // For scalars, this returns the default value in native Go form. + // For composite types, this returns an empty, read-only message, list, or map. + Zero() Value + + // TypeDescriptor returns the extension type descriptor. + TypeDescriptor() ExtensionTypeDescriptor + + // ValueOf wraps the input and returns it as a Value. + // ValueOf panics if the input value is invalid or not the appropriate type. + // + // ValueOf is more extensive than protoreflect.ValueOf for a given field's + // value as it has more type information available. + ValueOf(interface{}) Value + + // InterfaceOf completely unwraps the Value to the underlying Go type. + // InterfaceOf panics if the input is nil or does not represent the + // appropriate underlying Go type. For composite types, it panics if the + // value is not mutable. + // + // InterfaceOf is able to unwrap the Value further than Value.Interface + // as it has more type information available. + InterfaceOf(Value) interface{} + + // IsValidValue reports whether the Value is valid to assign to the field. + IsValidValue(Value) bool + + // IsValidInterface reports whether the input is valid to assign to the field. + IsValidInterface(interface{}) bool +} + +// EnumDescriptor describes an enum and +// corresponds with the google.protobuf.EnumDescriptorProto message. +// +// Nested declarations: +// EnumValueDescriptor. +type EnumDescriptor interface { + Descriptor + + // Values is a list of nested enum value declarations. + Values() EnumValueDescriptors + + // ReservedNames is a list of reserved enum names. + ReservedNames() Names + // ReservedRanges is a list of reserved ranges of enum numbers. + ReservedRanges() EnumRanges + + isEnumDescriptor +} +type isEnumDescriptor interface{ ProtoType(EnumDescriptor) } + +// EnumType encapsulates an EnumDescriptor with a concrete Go implementation. +type EnumType interface { + // New returns an instance of this enum type with its value set to n. + New(n EnumNumber) Enum + + // Descriptor returns the enum descriptor. + // + // Invariant: t.Descriptor() == t.New(0).Descriptor() + Descriptor() EnumDescriptor +} + +// EnumDescriptors is a list of enum declarations. +type EnumDescriptors interface { + // Len reports the number of enum types. + Len() int + // Get returns the ith EnumDescriptor. It panics if out of bounds. + Get(i int) EnumDescriptor + // ByName returns the EnumDescriptor for an enum named s. + // It returns nil if not found. + ByName(s Name) EnumDescriptor + + doNotImplement +} + +// EnumValueDescriptor describes an enum value and +// corresponds with the google.protobuf.EnumValueDescriptorProto message. +// +// All other proto declarations are in the namespace of the parent. +// However, enum values do not follow this rule and are within the namespace +// of the parent's parent (i.e., they are a sibling of the containing enum). +// Thus, a value named "FOO_VALUE" declared within an enum uniquely identified +// as "proto.package.MyEnum" has a full name of "proto.package.FOO_VALUE". +type EnumValueDescriptor interface { + Descriptor + + // Number returns the enum value as an integer. + Number() EnumNumber + + isEnumValueDescriptor +} +type isEnumValueDescriptor interface{ ProtoType(EnumValueDescriptor) } + +// EnumValueDescriptors is a list of enum value declarations. +type EnumValueDescriptors interface { + // Len reports the number of enum values. + Len() int + // Get returns the ith EnumValueDescriptor. It panics if out of bounds. + Get(i int) EnumValueDescriptor + // ByName returns the EnumValueDescriptor for the enum value named s. + // It returns nil if not found. + ByName(s Name) EnumValueDescriptor + // ByNumber returns the EnumValueDescriptor for the enum value numbered n. + // If multiple have the same number, the first one defined is returned + // It returns nil if not found. + ByNumber(n EnumNumber) EnumValueDescriptor + + doNotImplement +} + +// ServiceDescriptor describes a service and +// corresponds with the google.protobuf.ServiceDescriptorProto message. +// +// Nested declarations: MethodDescriptor. +type ServiceDescriptor interface { + Descriptor + + // Methods is a list of nested message declarations. + Methods() MethodDescriptors + + isServiceDescriptor +} +type isServiceDescriptor interface{ ProtoType(ServiceDescriptor) } + +// ServiceDescriptors is a list of service declarations. +type ServiceDescriptors interface { + // Len reports the number of services. + Len() int + // Get returns the ith ServiceDescriptor. It panics if out of bounds. + Get(i int) ServiceDescriptor + // ByName returns the ServiceDescriptor for a service named s. + // It returns nil if not found. + ByName(s Name) ServiceDescriptor + + doNotImplement +} + +// MethodDescriptor describes a method and +// corresponds with the google.protobuf.MethodDescriptorProto message. +type MethodDescriptor interface { + Descriptor + + // Input is the input message descriptor. + Input() MessageDescriptor + // Output is the output message descriptor. + Output() MessageDescriptor + // IsStreamingClient reports whether the client streams multiple messages. + IsStreamingClient() bool + // IsStreamingServer reports whether the server streams multiple messages. + IsStreamingServer() bool + + isMethodDescriptor +} +type isMethodDescriptor interface{ ProtoType(MethodDescriptor) } + +// MethodDescriptors is a list of method declarations. +type MethodDescriptors interface { + // Len reports the number of methods. + Len() int + // Get returns the ith MethodDescriptor. It panics if out of bounds. + Get(i int) MethodDescriptor + // ByName returns the MethodDescriptor for a service method named s. + // It returns nil if not found. + ByName(s Name) MethodDescriptor + + doNotImplement +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go new file mode 100644 index 0000000..f319810 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value.go @@ -0,0 +1,285 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import "google.golang.org/protobuf/encoding/protowire" + +// Enum is a reflection interface for a concrete enum value, +// which provides type information and a getter for the enum number. +// Enum does not provide a mutable API since enums are commonly backed by +// Go constants, which are not addressable. +type Enum interface { + // Descriptor returns enum descriptor, which contains only the protobuf + // type information for the enum. + Descriptor() EnumDescriptor + + // Type returns the enum type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the enum descriptor be used instead. + Type() EnumType + + // Number returns the enum value as an integer. + Number() EnumNumber +} + +// Message is a reflective interface for a concrete message value, +// encapsulating both type and value information for the message. +// +// Accessor/mutators for individual fields are keyed by FieldDescriptor. +// For non-extension fields, the descriptor must exactly match the +// field known by the parent message. +// For extension fields, the descriptor must implement ExtensionTypeDescriptor, +// extend the parent message (i.e., have the same message FullName), and +// be within the parent's extension range. +// +// Each field Value can be a scalar or a composite type (Message, List, or Map). +// See Value for the Go types associated with a FieldDescriptor. +// Providing a Value that is invalid or of an incorrect type panics. +type Message interface { + // Descriptor returns message descriptor, which contains only the protobuf + // type information for the message. + Descriptor() MessageDescriptor + + // Type returns the message type, which encapsulates both Go and protobuf + // type information. If the Go type information is not needed, + // it is recommended that the message descriptor be used instead. + Type() MessageType + + // New returns a newly allocated and mutable empty message. + New() Message + + // Interface unwraps the message reflection interface and + // returns the underlying ProtoMessage interface. + Interface() ProtoMessage + + // Range iterates over every populated field in an undefined order, + // calling f for each field descriptor and value encountered. + // Range returns immediately if f returns false. + // While iterating, mutating operations may only be performed + // on the current field descriptor. + Range(f func(FieldDescriptor, Value) bool) + + // Has reports whether a field is populated. + // + // Some fields have the property of nullability where it is possible to + // distinguish between the default value of a field and whether the field + // was explicitly populated with the default value. Singular message fields, + // member fields of a oneof, and proto2 scalar fields are nullable. Such + // fields are populated only if explicitly set. + // + // In other cases (aside from the nullable cases above), + // a proto3 scalar field is populated if it contains a non-zero value, and + // a repeated field is populated if it is non-empty. + Has(FieldDescriptor) bool + + // Clear clears the field such that a subsequent Has call reports false. + // + // Clearing an extension field clears both the extension type and value + // associated with the given field number. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(FieldDescriptor) + + // Get retrieves the value for a field. + // + // For unpopulated scalars, it returns the default value, where + // the default value of a bytes scalar is guaranteed to be a copy. + // For unpopulated composite types, it returns an empty, read-only view + // of the value; to obtain a mutable reference, use Mutable. + Get(FieldDescriptor) Value + + // Set stores the value for a field. + // + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType. + // When setting a composite type, it is unspecified whether the stored value + // aliases the source's memory in any way. If the composite value is an + // empty, read-only value, then it panics. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(FieldDescriptor, Value) + + // Mutable returns a mutable reference to a composite type. + // + // If the field is unpopulated, it may allocate a composite value. + // For a field belonging to a oneof, it implicitly clears any other field + // that may be currently set within the same oneof. + // For extension fields, it implicitly stores the provided ExtensionType + // if not already stored. + // It panics if the field does not contain a composite type. + // + // Mutable is a mutating operation and unsafe for concurrent use. + Mutable(FieldDescriptor) Value + + // NewField returns a new value that is assignable to the field + // for the given descriptor. For scalars, this returns the default value. + // For lists, maps, and messages, this returns a new, empty, mutable value. + NewField(FieldDescriptor) Value + + // WhichOneof reports which field within the oneof is populated, + // returning nil if none are populated. + // It panics if the oneof descriptor does not belong to this message. + WhichOneof(OneofDescriptor) FieldDescriptor + + // GetUnknown retrieves the entire list of unknown fields. + // The caller may only mutate the contents of the RawFields + // if the mutated bytes are stored back into the message with SetUnknown. + GetUnknown() RawFields + + // SetUnknown stores an entire list of unknown fields. + // The raw fields must be syntactically valid according to the wire format. + // An implementation may panic if this is not the case. + // Once stored, the caller must not mutate the content of the RawFields. + // An empty RawFields may be passed to clear the fields. + // + // SetUnknown is a mutating operation and unsafe for concurrent use. + SetUnknown(RawFields) + + // IsValid reports whether the message is valid. + // + // An invalid message is an empty, read-only value. + // + // An invalid message often corresponds to a nil pointer of the concrete + // message type, but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool + + // ProtoMethods returns optional fast-path implementions of various operations. + // This method may return nil. + // + // The returned methods type is identical to + // "google.golang.org/protobuf/runtime/protoiface".Methods. + // Consult the protoiface package documentation for details. + ProtoMethods() *methods +} + +// RawFields is the raw bytes for an ordered sequence of fields. +// Each field contains both the tag (representing field number and wire type), +// and also the wire data itself. +type RawFields []byte + +// IsValid reports whether b is syntactically correct wire format. +func (b RawFields) IsValid() bool { + for len(b) > 0 { + _, _, n := protowire.ConsumeField(b) + if n < 0 { + return false + } + b = b[n:] + } + return true +} + +// List is a zero-indexed, ordered list. +// The element Value type is determined by FieldDescriptor.Kind. +// Providing a Value that is invalid or of an incorrect type panics. +type List interface { + // Len reports the number of entries in the List. + // Get, Set, and Truncate panic with out of bound indexes. + Len() int + + // Get retrieves the value at the given index. + // It never returns an invalid value. + Get(int) Value + + // Set stores a value for the given index. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(int, Value) + + // Append appends the provided value to the end of the list. + // When appending a composite type, it is unspecified whether the appended + // value aliases the source's memory in any way. + // + // Append is a mutating operation and unsafe for concurrent use. + Append(Value) + + // AppendMutable appends a new, empty, mutable message value to the end + // of the list and returns it. + // It panics if the list does not contain a message type. + AppendMutable() Value + + // Truncate truncates the list to a smaller length. + // + // Truncate is a mutating operation and unsafe for concurrent use. + Truncate(int) + + // NewElement returns a new value for a list element. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewElement() Value + + // IsValid reports whether the list is valid. + // + // An invalid list is an empty, read-only value. + // + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} + +// Map is an unordered, associative map. +// The entry MapKey type is determined by FieldDescriptor.MapKey.Kind. +// The entry Value type is determined by FieldDescriptor.MapValue.Kind. +// Providing a MapKey or Value that is invalid or of an incorrect type panics. +type Map interface { + // Len reports the number of elements in the map. + Len() int + + // Range iterates over every map entry in an undefined order, + // calling f for each key and value encountered. + // Range calls f Len times unless f returns false, which stops iteration. + // While iterating, mutating operations may only be performed + // on the current map key. + Range(f func(MapKey, Value) bool) + + // Has reports whether an entry with the given key is in the map. + Has(MapKey) bool + + // Clear clears the entry associated with they given key. + // The operation does nothing if there is no entry associated with the key. + // + // Clear is a mutating operation and unsafe for concurrent use. + Clear(MapKey) + + // Get retrieves the value for an entry with the given key. + // It returns an invalid value for non-existent entries. + Get(MapKey) Value + + // Set stores the value for an entry with the given key. + // It panics when given a key or value that is invalid or the wrong type. + // When setting a composite type, it is unspecified whether the set + // value aliases the source's memory in any way. + // + // Set is a mutating operation and unsafe for concurrent use. + Set(MapKey, Value) + + // Mutable retrieves a mutable reference to the entry for the given key. + // If no entry exists for the key, it creates a new, empty, mutable value + // and stores it as the entry for the key. + // It panics if the map value is not a message. + Mutable(MapKey) Value + + // NewValue returns a new value assignable as a map value. + // For enums, this returns the first enum value. + // For other scalars, this returns the zero value. + // For messages, this returns a new, empty, mutable value. + NewValue() Value + + // IsValid reports whether the map is valid. + // + // An invalid map is an empty, read-only value. + // + // An invalid message often corresponds to a nil Go map value, + // but the details are implementation dependent. + // Validity is not part of the protobuf data model, and may not + // be preserved in marshaling or other operations. + IsValid() bool +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go new file mode 100644 index 0000000..918e685 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_pure.go @@ -0,0 +1,59 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build purego appengine + +package protoreflect + +import "google.golang.org/protobuf/internal/pragma" + +type valueType int + +const ( + nilType valueType = iota + boolType + int32Type + int64Type + uint32Type + uint64Type + float32Type + float64Type + stringType + bytesType + enumType + ifaceType +) + +// value is a union where only one type can be represented at a time. +// This uses a distinct field for each type. This is type safe in Go, but +// occupies more memory than necessary (72B). +type value struct { + pragma.DoNotCompare // 0B + + typ valueType // 8B + num uint64 // 8B + str string // 16B + bin []byte // 24B + iface interface{} // 16B +} + +func valueOfString(v string) Value { + return Value{typ: stringType, str: v} +} +func valueOfBytes(v []byte) Value { + return Value{typ: bytesType, bin: v} +} +func valueOfIface(v interface{}) Value { + return Value{typ: ifaceType, iface: v} +} + +func (v Value) getString() string { + return v.str +} +func (v Value) getBytes() []byte { + return v.bin +} +func (v Value) getIface() interface{} { + return v.iface +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go new file mode 100644 index 0000000..f334f71 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_union.go @@ -0,0 +1,409 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoreflect + +import ( + "fmt" + "math" +) + +// Value is a union where only one Go type may be set at a time. +// The Value is used to represent all possible values a field may take. +// The following shows which Go type is used to represent each proto Kind: +// +// ╔════════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠════════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ float32 │ FloatKind ║ +// ║ float64 │ DoubleKind ║ +// ║ string │ StringKind ║ +// ║ []byte │ BytesKind ║ +// ║ EnumNumber │ EnumKind ║ +// ║ Message │ MessageKind, GroupKind ║ +// ╚════════════╧═════════════════════════════════════╝ +// +// Multiple protobuf Kinds may be represented by a single Go type if the type +// can losslessly represent the information for the proto kind. For example, +// Int64Kind, Sint64Kind, and Sfixed64Kind are all represented by int64, +// but use different integer encoding methods. +// +// The List or Map types are used if the field cardinality is repeated. +// A field is a List if FieldDescriptor.IsList reports true. +// A field is a Map if FieldDescriptor.IsMap reports true. +// +// Converting to/from a Value and a concrete Go value panics on type mismatch. +// For example, ValueOf("hello").Int() panics because this attempts to +// retrieve an int64 from a string. +type Value value + +// The protoreflect API uses a custom Value union type instead of interface{} +// to keep the future open for performance optimizations. Using an interface{} +// always incurs an allocation for primitives (e.g., int64) since it needs to +// be boxed on the heap (as interfaces can only contain pointers natively). +// Instead, we represent the Value union as a flat struct that internally keeps +// track of which type is set. Using unsafe, the Value union can be reduced +// down to 24B, which is identical in size to a slice. +// +// The latest compiler (Go1.11) currently suffers from some limitations: +// • With inlining, the compiler should be able to statically prove that +// only one of these switch cases are taken and inline one specific case. +// See https://golang.org/issue/22310. + +// ValueOf returns a Value initialized with the concrete value stored in v. +// This panics if the type does not match one of the allowed types in the +// Value union. +func ValueOf(v interface{}) Value { + switch v := v.(type) { + case nil: + return Value{} + case bool: + return ValueOfBool(v) + case int32: + return ValueOfInt32(v) + case int64: + return ValueOfInt64(v) + case uint32: + return ValueOfUint32(v) + case uint64: + return ValueOfUint64(v) + case float32: + return ValueOfFloat32(v) + case float64: + return ValueOfFloat64(v) + case string: + return ValueOfString(v) + case []byte: + return ValueOfBytes(v) + case EnumNumber: + return ValueOfEnum(v) + case Message, List, Map: + return valueOfIface(v) + default: + panic(fmt.Sprintf("invalid type: %T", v)) + } +} + +// ValueOfBool returns a new boolean value. +func ValueOfBool(v bool) Value { + if v { + return Value{typ: boolType, num: 1} + } else { + return Value{typ: boolType, num: 0} + } +} + +// ValueOfInt32 returns a new int32 value. +func ValueOfInt32(v int32) Value { + return Value{typ: int32Type, num: uint64(v)} +} + +// ValueOfInt64 returns a new int64 value. +func ValueOfInt64(v int64) Value { + return Value{typ: int64Type, num: uint64(v)} +} + +// ValueOfUint32 returns a new uint32 value. +func ValueOfUint32(v uint32) Value { + return Value{typ: uint32Type, num: uint64(v)} +} + +// ValueOfUint64 returns a new uint64 value. +func ValueOfUint64(v uint64) Value { + return Value{typ: uint64Type, num: v} +} + +// ValueOfFloat32 returns a new float32 value. +func ValueOfFloat32(v float32) Value { + return Value{typ: float32Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfFloat64 returns a new float64 value. +func ValueOfFloat64(v float64) Value { + return Value{typ: float64Type, num: uint64(math.Float64bits(float64(v)))} +} + +// ValueOfString returns a new string value. +func ValueOfString(v string) Value { + return valueOfString(v) +} + +// ValueOfBytes returns a new bytes value. +func ValueOfBytes(v []byte) Value { + return valueOfBytes(v[:len(v):len(v)]) +} + +// ValueOfEnum returns a new enum value. +func ValueOfEnum(v EnumNumber) Value { + return Value{typ: enumType, num: uint64(v)} +} + +// ValueOfMessage returns a new Message value. +func ValueOfMessage(v Message) Value { + return valueOfIface(v) +} + +// ValueOfList returns a new List value. +func ValueOfList(v List) Value { + return valueOfIface(v) +} + +// ValueOfMap returns a new Map value. +func ValueOfMap(v Map) Value { + return valueOfIface(v) +} + +// IsValid reports whether v is populated with a value. +func (v Value) IsValid() bool { + return v.typ != nilType +} + +// Interface returns v as an interface{}. +// +// Invariant: v == ValueOf(v).Interface() +func (v Value) Interface() interface{} { + switch v.typ { + case nilType: + return nil + case boolType: + return v.Bool() + case int32Type: + return int32(v.Int()) + case int64Type: + return int64(v.Int()) + case uint32Type: + return uint32(v.Uint()) + case uint64Type: + return uint64(v.Uint()) + case float32Type: + return float32(v.Float()) + case float64Type: + return float64(v.Float()) + case stringType: + return v.String() + case bytesType: + return v.Bytes() + case enumType: + return v.Enum() + default: + return v.getIface() + } +} + +func (v Value) typeName() string { + switch v.typ { + case nilType: + return "nil" + case boolType: + return "bool" + case int32Type: + return "int32" + case int64Type: + return "int64" + case uint32Type: + return "uint32" + case uint64Type: + return "uint64" + case float32Type: + return "float32" + case float64Type: + return "float64" + case stringType: + return "string" + case bytesType: + return "bytes" + case enumType: + return "enum" + default: + switch v := v.getIface().(type) { + case Message: + return "message" + case List: + return "list" + case Map: + return "map" + default: + return fmt.Sprintf("", v) + } + } +} + +func (v Value) panicMessage(what string) string { + return fmt.Sprintf("type mismatch: cannot convert %v to %s", v.typeName(), what) +} + +// Bool returns v as a bool and panics if the type is not a bool. +func (v Value) Bool() bool { + switch v.typ { + case boolType: + return v.num > 0 + default: + panic(v.panicMessage("bool")) + } +} + +// Int returns v as a int64 and panics if the type is not a int32 or int64. +func (v Value) Int() int64 { + switch v.typ { + case int32Type, int64Type: + return int64(v.num) + default: + panic(v.panicMessage("int")) + } +} + +// Uint returns v as a uint64 and panics if the type is not a uint32 or uint64. +func (v Value) Uint() uint64 { + switch v.typ { + case uint32Type, uint64Type: + return uint64(v.num) + default: + panic(v.panicMessage("uint")) + } +} + +// Float returns v as a float64 and panics if the type is not a float32 or float64. +func (v Value) Float() float64 { + switch v.typ { + case float32Type, float64Type: + return math.Float64frombits(uint64(v.num)) + default: + panic(v.panicMessage("float")) + } +} + +// String returns v as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (v Value) String() string { + switch v.typ { + case stringType: + return v.getString() + default: + return fmt.Sprint(v.Interface()) + } +} + +// Bytes returns v as a []byte and panics if the type is not a []byte. +func (v Value) Bytes() []byte { + switch v.typ { + case bytesType: + return v.getBytes() + default: + panic(v.panicMessage("bytes")) + } +} + +// Enum returns v as a EnumNumber and panics if the type is not a EnumNumber. +func (v Value) Enum() EnumNumber { + switch v.typ { + case enumType: + return EnumNumber(v.num) + default: + panic(v.panicMessage("enum")) + } +} + +// Message returns v as a Message and panics if the type is not a Message. +func (v Value) Message() Message { + switch vi := v.getIface().(type) { + case Message: + return vi + default: + panic(v.panicMessage("message")) + } +} + +// List returns v as a List and panics if the type is not a List. +func (v Value) List() List { + switch vi := v.getIface().(type) { + case List: + return vi + default: + panic(v.panicMessage("list")) + } +} + +// Map returns v as a Map and panics if the type is not a Map. +func (v Value) Map() Map { + switch vi := v.getIface().(type) { + case Map: + return vi + default: + panic(v.panicMessage("map")) + } +} + +// MapKey returns v as a MapKey and panics for invalid MapKey types. +func (v Value) MapKey() MapKey { + switch v.typ { + case boolType, int32Type, int64Type, uint32Type, uint64Type, stringType: + return MapKey(v) + default: + panic(v.panicMessage("map key")) + } +} + +// MapKey is used to index maps, where the Go type of the MapKey must match +// the specified key Kind (see MessageDescriptor.IsMapEntry). +// The following shows what Go type is used to represent each proto Kind: +// +// ╔═════════╤═════════════════════════════════════╗ +// ║ Go type │ Protobuf kind ║ +// ╠═════════╪═════════════════════════════════════╣ +// ║ bool │ BoolKind ║ +// ║ int32 │ Int32Kind, Sint32Kind, Sfixed32Kind ║ +// ║ int64 │ Int64Kind, Sint64Kind, Sfixed64Kind ║ +// ║ uint32 │ Uint32Kind, Fixed32Kind ║ +// ║ uint64 │ Uint64Kind, Fixed64Kind ║ +// ║ string │ StringKind ║ +// ╚═════════╧═════════════════════════════════════╝ +// +// A MapKey is constructed and accessed through a Value: +// k := ValueOf("hash").MapKey() // convert string to MapKey +// s := k.String() // convert MapKey to string +// +// The MapKey is a strict subset of valid types used in Value; +// converting a Value to a MapKey with an invalid type panics. +type MapKey value + +// IsValid reports whether k is populated with a value. +func (k MapKey) IsValid() bool { + return Value(k).IsValid() +} + +// Interface returns k as an interface{}. +func (k MapKey) Interface() interface{} { + return Value(k).Interface() +} + +// Bool returns k as a bool and panics if the type is not a bool. +func (k MapKey) Bool() bool { + return Value(k).Bool() +} + +// Int returns k as a int64 and panics if the type is not a int32 or int64. +func (k MapKey) Int() int64 { + return Value(k).Int() +} + +// Uint returns k as a uint64 and panics if the type is not a uint32 or uint64. +func (k MapKey) Uint() uint64 { + return Value(k).Uint() +} + +// String returns k as a string. Since this method implements fmt.Stringer, +// this returns the formatted string value for any non-string type. +func (k MapKey) String() string { + return Value(k).String() +} + +// Value returns k as a Value. +func (k MapKey) Value() Value { + return Value(k) +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go new file mode 100644 index 0000000..c45debd --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/value_unsafe.go @@ -0,0 +1,98 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !purego,!appengine + +package protoreflect + +import ( + "unsafe" + + "google.golang.org/protobuf/internal/pragma" +) + +type ( + stringHeader struct { + Data unsafe.Pointer + Len int + } + sliceHeader struct { + Data unsafe.Pointer + Len int + Cap int + } + ifaceHeader struct { + Type unsafe.Pointer + Data unsafe.Pointer + } +) + +var ( + nilType = typeOf(nil) + boolType = typeOf(*new(bool)) + int32Type = typeOf(*new(int32)) + int64Type = typeOf(*new(int64)) + uint32Type = typeOf(*new(uint32)) + uint64Type = typeOf(*new(uint64)) + float32Type = typeOf(*new(float32)) + float64Type = typeOf(*new(float64)) + stringType = typeOf(*new(string)) + bytesType = typeOf(*new([]byte)) + enumType = typeOf(*new(EnumNumber)) +) + +// typeOf returns a pointer to the Go type information. +// The pointer is comparable and equal if and only if the types are identical. +func typeOf(t interface{}) unsafe.Pointer { + return (*ifaceHeader)(unsafe.Pointer(&t)).Type +} + +// value is a union where only one type can be represented at a time. +// The struct is 24B large on 64-bit systems and requires the minimum storage +// necessary to represent each possible type. +// +// The Go GC needs to be able to scan variables containing pointers. +// As such, pointers and non-pointers cannot be intermixed. +type value struct { + pragma.DoNotCompare // 0B + + // typ stores the type of the value as a pointer to the Go type. + typ unsafe.Pointer // 8B + + // ptr stores the data pointer for a String, Bytes, or interface value. + ptr unsafe.Pointer // 8B + + // num stores a Bool, Int32, Int64, Uint32, Uint64, Float32, Float64, or + // Enum value as a raw uint64. + // + // It is also used to store the length of a String or Bytes value; + // the capacity is ignored. + num uint64 // 8B +} + +func valueOfString(v string) Value { + p := (*stringHeader)(unsafe.Pointer(&v)) + return Value{typ: stringType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfBytes(v []byte) Value { + p := (*sliceHeader)(unsafe.Pointer(&v)) + return Value{typ: bytesType, ptr: p.Data, num: uint64(len(v))} +} +func valueOfIface(v interface{}) Value { + p := (*ifaceHeader)(unsafe.Pointer(&v)) + return Value{typ: p.Type, ptr: p.Data} +} + +func (v Value) getString() (x string) { + *(*stringHeader)(unsafe.Pointer(&x)) = stringHeader{Data: v.ptr, Len: int(v.num)} + return x +} +func (v Value) getBytes() (x []byte) { + *(*sliceHeader)(unsafe.Pointer(&x)) = sliceHeader{Data: v.ptr, Len: int(v.num), Cap: int(v.num)} + return x +} +func (v Value) getIface() (x interface{}) { + *(*ifaceHeader)(unsafe.Pointer(&x)) = ifaceHeader{Type: v.typ, Data: v.ptr} + return x +} diff --git a/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go new file mode 100644 index 0000000..43f16c6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/reflect/protoregistry/registry.go @@ -0,0 +1,768 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoregistry provides data structures to register and lookup +// protobuf descriptor types. +// +// The Files registry contains file descriptors and provides the ability +// to iterate over the files or lookup a specific descriptor within the files. +// Files only contains protobuf descriptors and has no understanding of Go +// type information that may be associated with each descriptor. +// +// The Types registry contains descriptor types for which there is a known +// Go type associated with that descriptor. It provides the ability to iterate +// over the registered types or lookup a type by name. +package protoregistry + +import ( + "fmt" + "log" + "strings" + "sync" + + "google.golang.org/protobuf/internal/errors" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// ignoreConflict reports whether to ignore a registration conflict +// given the descriptor being registered and the error. +// It is a variable so that the behavior is easily overridden in another file. +var ignoreConflict = func(d protoreflect.Descriptor, err error) bool { + log.Printf(""+ + "WARNING: %v\n"+ + "A future release will panic on registration conflicts. See:\n"+ + "https://developers.google.com/protocol-buffers/docs/reference/go/faq#namespace-conflict\n"+ + "\n", err) + return true +} + +var globalMutex sync.RWMutex + +// GlobalFiles is a global registry of file descriptors. +var GlobalFiles *Files = new(Files) + +// GlobalTypes is the registry used by default for type lookups +// unless a local registry is provided by the user. +var GlobalTypes *Types = new(Types) + +// NotFound is a sentinel error value to indicate that the type was not found. +// +// Since registry lookup can happen in the critical performance path, resolvers +// must return this exact error value, not an error wrapping it. +var NotFound = errors.New("not found") + +// Files is a registry for looking up or iterating over files and the +// descriptors contained within them. +// The Find and Range methods are safe for concurrent use. +type Files struct { + // The map of descsByName contains: + // EnumDescriptor + // EnumValueDescriptor + // MessageDescriptor + // ExtensionDescriptor + // ServiceDescriptor + // *packageDescriptor + // + // Note that files are stored as a slice, since a package may contain + // multiple files. Only top-level declarations are registered. + // Note that enum values are in the top-level since that are in the same + // scope as the parent enum. + descsByName map[protoreflect.FullName]interface{} + filesByPath map[string]protoreflect.FileDescriptor +} + +type packageDescriptor struct { + files []protoreflect.FileDescriptor +} + +// RegisterFile registers the provided file descriptor. +// +// If any descriptor within the file conflicts with the descriptor of any +// previously registered file (e.g., two enums with the same full name), +// then the file is not registered and an error is returned. +// +// It is permitted for multiple files to have the same file path. +func (r *Files) RegisterFile(file protoreflect.FileDescriptor) error { + if r == GlobalFiles { + globalMutex.Lock() + defer globalMutex.Unlock() + } + if r.descsByName == nil { + r.descsByName = map[protoreflect.FullName]interface{}{ + "": &packageDescriptor{}, + } + r.filesByPath = make(map[string]protoreflect.FileDescriptor) + } + path := file.Path() + if prev := r.filesByPath[path]; prev != nil { + err := errors.New("file %q is already registered", file.Path()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + switch prev := r.descsByName[name]; prev.(type) { + case nil, *packageDescriptor: + default: + err := errors.New("file %q has a package name conflict over %v", file.Path(), name) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(file, err) { + err = nil + } + return err + } + } + var err error + var hasConflict bool + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + if prev := r.descsByName[d.FullName()]; prev != nil { + hasConflict = true + err = errors.New("file %q has a name conflict over %v", file.Path(), d.FullName()) + err = amendErrorWithCaller(err, prev, file) + if r == GlobalFiles && ignoreConflict(d, err) { + err = nil + } + } + }) + if hasConflict { + return err + } + + for name := file.Package(); name != ""; name = name.Parent() { + if r.descsByName[name] == nil { + r.descsByName[name] = &packageDescriptor{} + } + } + p := r.descsByName[file.Package()].(*packageDescriptor) + p.files = append(p.files, file) + rangeTopLevelDescriptors(file, func(d protoreflect.Descriptor) { + r.descsByName[d.FullName()] = d + }) + r.filesByPath[path] = file + return nil +} + +// FindDescriptorByName looks up a descriptor by the full name. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + prefix := name + suffix := nameSuffix("") + for prefix != "" { + if d, ok := r.descsByName[prefix]; ok { + switch d := d.(type) { + case protoreflect.EnumDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.EnumValueDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.MessageDescriptor: + if d.FullName() == name { + return d, nil + } + if d := findDescriptorInMessage(d, suffix); d != nil && d.FullName() == name { + return d, nil + } + case protoreflect.ExtensionDescriptor: + if d.FullName() == name { + return d, nil + } + case protoreflect.ServiceDescriptor: + if d.FullName() == name { + return d, nil + } + if d := d.Methods().ByName(suffix.Pop()); d != nil && d.FullName() == name { + return d, nil + } + } + return nil, NotFound + } + prefix = prefix.Parent() + suffix = nameSuffix(name[len(prefix)+len("."):]) + } + return nil, NotFound +} + +func findDescriptorInMessage(md protoreflect.MessageDescriptor, suffix nameSuffix) protoreflect.Descriptor { + name := suffix.Pop() + if suffix == "" { + if ed := md.Enums().ByName(name); ed != nil { + return ed + } + for i := md.Enums().Len() - 1; i >= 0; i-- { + if vd := md.Enums().Get(i).Values().ByName(name); vd != nil { + return vd + } + } + if xd := md.Extensions().ByName(name); xd != nil { + return xd + } + if fd := md.Fields().ByName(name); fd != nil { + return fd + } + if od := md.Oneofs().ByName(name); od != nil { + return od + } + } + if md := md.Messages().ByName(name); md != nil { + if suffix == "" { + return md + } + return findDescriptorInMessage(md, suffix) + } + return nil +} + +type nameSuffix string + +func (s *nameSuffix) Pop() (name protoreflect.Name) { + if i := strings.IndexByte(string(*s), '.'); i >= 0 { + name, *s = protoreflect.Name((*s)[:i]), (*s)[i+1:] + } else { + name, *s = protoreflect.Name((*s)), "" + } + return name +} + +// FindFileByPath looks up a file by the path. +// +// This returns (nil, NotFound) if not found. +func (r *Files) FindFileByPath(path string) (protoreflect.FileDescriptor, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if fd, ok := r.filesByPath[path]; ok { + return fd, nil + } + return nil, NotFound +} + +// NumFiles reports the number of registered files. +func (r *Files) NumFiles() int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.filesByPath) +} + +// RangeFiles iterates over all registered files while f returns true. +// The iteration order is undefined. +func (r *Files) RangeFiles(f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, file := range r.filesByPath { + if !f(file) { + return + } + } +} + +// NumFilesByPackage reports the number of registered files in a proto package. +func (r *Files) NumFilesByPackage(name protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return 0 + } + return len(p.files) +} + +// RangeFilesByPackage iterates over all registered files in a given proto package +// while f returns true. The iteration order is undefined. +func (r *Files) RangeFilesByPackage(name protoreflect.FullName, f func(protoreflect.FileDescriptor) bool) { + if r == nil { + return + } + if r == GlobalFiles { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + p, ok := r.descsByName[name].(*packageDescriptor) + if !ok { + return + } + for _, file := range p.files { + if !f(file) { + return + } + } +} + +// rangeTopLevelDescriptors iterates over all top-level descriptors in a file +// which will be directly entered into the registry. +func rangeTopLevelDescriptors(fd protoreflect.FileDescriptor, f func(protoreflect.Descriptor)) { + eds := fd.Enums() + for i := eds.Len() - 1; i >= 0; i-- { + f(eds.Get(i)) + vds := eds.Get(i).Values() + for i := vds.Len() - 1; i >= 0; i-- { + f(vds.Get(i)) + } + } + mds := fd.Messages() + for i := mds.Len() - 1; i >= 0; i-- { + f(mds.Get(i)) + } + xds := fd.Extensions() + for i := xds.Len() - 1; i >= 0; i-- { + f(xds.Get(i)) + } + sds := fd.Services() + for i := sds.Len() - 1; i >= 0; i-- { + f(sds.Get(i)) + } +} + +// MessageTypeResolver is an interface for looking up messages. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type MessageTypeResolver interface { + // FindMessageByName looks up a message by its full name. + // E.g., "google.protobuf.Any" + // + // This return (nil, NotFound) if not found. + FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) + + // FindMessageByURL looks up a message by a URL identifier. + // See documentation on google.protobuf.Any.type_url for the URL format. + // + // This returns (nil, NotFound) if not found. + FindMessageByURL(url string) (protoreflect.MessageType, error) +} + +// ExtensionTypeResolver is an interface for looking up extensions. +// +// A compliant implementation must deterministically return the same type +// if no error is encountered. +// +// The Types type implements this interface. +type ExtensionTypeResolver interface { + // FindExtensionByName looks up a extension field by the field's full name. + // Note that this is the full name of the field as determined by + // where the extension is declared and is unrelated to the full name of the + // message being extended. + // + // This returns (nil, NotFound) if not found. + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + + // FindExtensionByNumber looks up a extension field by the field number + // within some parent message, identified by full name. + // + // This returns (nil, NotFound) if not found. + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) +} + +var ( + _ MessageTypeResolver = (*Types)(nil) + _ ExtensionTypeResolver = (*Types)(nil) +) + +// Types is a registry for looking up or iterating over descriptor types. +// The Find and Range methods are safe for concurrent use. +type Types struct { + typesByName typesByName + extensionsByMessage extensionsByMessage + + numEnums int + numMessages int + numExtensions int +} + +type ( + typesByName map[protoreflect.FullName]interface{} + extensionsByMessage map[protoreflect.FullName]extensionsByNumber + extensionsByNumber map[protoreflect.FieldNumber]protoreflect.ExtensionType +) + +// RegisterMessage registers the provided message type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterMessage(mt protoreflect.MessageType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + md := mt.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("message", md, mt); err != nil { + return err + } + r.numMessages++ + return nil +} + +// RegisterEnum registers the provided enum type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterEnum(et protoreflect.EnumType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + ed := et.Descriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + if err := r.register("enum", ed, et); err != nil { + return err + } + r.numEnums++ + return nil +} + +// RegisterExtension registers the provided extension type. +// +// If a naming conflict occurs, the type is not registered and an error is returned. +func (r *Types) RegisterExtension(xt protoreflect.ExtensionType) error { + // Under rare circumstances getting the descriptor might recursively + // examine the registry, so fetch it before locking. + // + // A known case where this can happen: Fetching the TypeDescriptor for a + // legacy ExtensionDesc can consult the global registry. + xd := xt.TypeDescriptor() + + if r == GlobalTypes { + globalMutex.Lock() + defer globalMutex.Unlock() + } + + field := xd.Number() + message := xd.ContainingMessage().FullName() + if prev := r.extensionsByMessage[message][field]; prev != nil { + err := errors.New("extension number %d is already registered on message %v", field, message) + err = amendErrorWithCaller(err, prev, xt) + if !(r == GlobalTypes && ignoreConflict(xd, err)) { + return err + } + } + + if err := r.register("extension", xd, xt); err != nil { + return err + } + if r.extensionsByMessage == nil { + r.extensionsByMessage = make(extensionsByMessage) + } + if r.extensionsByMessage[message] == nil { + r.extensionsByMessage[message] = make(extensionsByNumber) + } + r.extensionsByMessage[message][field] = xt + r.numExtensions++ + return nil +} + +func (r *Types) register(kind string, desc protoreflect.Descriptor, typ interface{}) error { + name := desc.FullName() + prev := r.typesByName[name] + if prev != nil { + err := errors.New("%v %v is already registered", kind, name) + err = amendErrorWithCaller(err, prev, typ) + if !(r == GlobalTypes && ignoreConflict(desc, err)) { + return err + } + } + if r.typesByName == nil { + r.typesByName = make(typesByName) + } + r.typesByName[name] = typ + return nil +} + +// FindEnumByName looks up an enum by its full name. +// E.g., "google.protobuf.Field.Kind". +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindEnumByName(enum protoreflect.FullName) (protoreflect.EnumType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[enum]; v != nil { + if et, _ := v.(protoreflect.EnumType); et != nil { + return et, nil + } + return nil, errors.New("found wrong type: got %v, want enum", typeName(v)) + } + return nil, NotFound +} + +// FindMessageByName looks up a message by its full name. +// E.g., "google.protobuf.Any" +// +// This return (nil, NotFound) if not found. +func (r *Types) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) { + // The full name by itself is a valid URL. + return r.FindMessageByURL(string(message)) +} + +// FindMessageByURL looks up a message by a URL identifier. +// See documentation on google.protobuf.Any.type_url for the URL format. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindMessageByURL(url string) (protoreflect.MessageType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + message := protoreflect.FullName(url) + if i := strings.LastIndexByte(url, '/'); i >= 0 { + message = message[i+len("/"):] + } + + if v := r.typesByName[message]; v != nil { + if mt, _ := v.(protoreflect.MessageType); mt != nil { + return mt, nil + } + return nil, errors.New("found wrong type: got %v, want message", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByName looks up a extension field by the field's full name. +// Note that this is the full name of the field as determined by +// where the extension is declared and is unrelated to the full name of the +// message being extended. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if v := r.typesByName[field]; v != nil { + if xt, _ := v.(protoreflect.ExtensionType); xt != nil { + return xt, nil + } + return nil, errors.New("found wrong type: got %v, want extension", typeName(v)) + } + return nil, NotFound +} + +// FindExtensionByNumber looks up a extension field by the field number +// within some parent message, identified by full name. +// +// This returns (nil, NotFound) if not found. +func (r *Types) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) { + if r == nil { + return nil, NotFound + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + if xt, ok := r.extensionsByMessage[message][field]; ok { + return xt, nil + } + return nil, NotFound +} + +// NumEnums reports the number of registered enums. +func (r *Types) NumEnums() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numEnums +} + +// RangeEnums iterates over all registered enums while f returns true. +// Iteration order is undefined. +func (r *Types) RangeEnums(f func(protoreflect.EnumType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if et, ok := typ.(protoreflect.EnumType); ok { + if !f(et) { + return + } + } + } +} + +// NumMessages reports the number of registered messages. +func (r *Types) NumMessages() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numMessages +} + +// RangeMessages iterates over all registered messages while f returns true. +// Iteration order is undefined. +func (r *Types) RangeMessages(f func(protoreflect.MessageType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if mt, ok := typ.(protoreflect.MessageType); ok { + if !f(mt) { + return + } + } + } +} + +// NumExtensions reports the number of registered extensions. +func (r *Types) NumExtensions() int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return r.numExtensions +} + +// RangeExtensions iterates over all registered extensions while f returns true. +// Iteration order is undefined. +func (r *Types) RangeExtensions(f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, typ := range r.typesByName { + if xt, ok := typ.(protoreflect.ExtensionType); ok { + if !f(xt) { + return + } + } + } +} + +// NumExtensionsByMessage reports the number of registered extensions for +// a given message type. +func (r *Types) NumExtensionsByMessage(message protoreflect.FullName) int { + if r == nil { + return 0 + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + return len(r.extensionsByMessage[message]) +} + +// RangeExtensionsByMessage iterates over all registered extensions filtered +// by a given message type while f returns true. Iteration order is undefined. +func (r *Types) RangeExtensionsByMessage(message protoreflect.FullName, f func(protoreflect.ExtensionType) bool) { + if r == nil { + return + } + if r == GlobalTypes { + globalMutex.RLock() + defer globalMutex.RUnlock() + } + for _, xt := range r.extensionsByMessage[message] { + if !f(xt) { + return + } + } +} + +func typeName(t interface{}) string { + switch t.(type) { + case protoreflect.EnumType: + return "enum" + case protoreflect.MessageType: + return "message" + case protoreflect.ExtensionType: + return "extension" + default: + return fmt.Sprintf("%T", t) + } +} + +func amendErrorWithCaller(err error, prev, curr interface{}) error { + prevPkg := goPackage(prev) + currPkg := goPackage(curr) + if prevPkg == "" || currPkg == "" || prevPkg == currPkg { + return err + } + return errors.New("%s\n\tpreviously from: %q\n\tcurrently from: %q", err, prevPkg, currPkg) +} + +func goPackage(v interface{}) string { + switch d := v.(type) { + case protoreflect.EnumType: + v = d.Descriptor() + case protoreflect.MessageType: + v = d.Descriptor() + case protoreflect.ExtensionType: + v = d.TypeDescriptor() + } + if d, ok := v.(protoreflect.Descriptor); ok { + v = d.ParentFile() + } + if d, ok := v.(interface{ GoPackagePath() string }); ok { + return d.GoPackagePath() + } + return "" +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go new file mode 100644 index 0000000..c587276 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/legacy.go @@ -0,0 +1,15 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoiface + +type MessageV1 interface { + Reset() + String() string + ProtoMessage() +} + +type ExtensionRangeV1 struct { + Start, End int32 // both inclusive +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go new file mode 100644 index 0000000..32c04f6 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoiface/methods.go @@ -0,0 +1,167 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoiface contains types referenced or implemented by messages. +// +// WARNING: This package should only be imported by message implementations. +// The functionality found in this package should be accessed through +// higher-level abstractions provided by the proto package. +package protoiface + +import ( + "google.golang.org/protobuf/internal/pragma" + "google.golang.org/protobuf/reflect/protoreflect" +) + +// Methods is a set of optional fast-path implementations of various operations. +type Methods = struct { + pragma.NoUnkeyedLiterals + + // Flags indicate support for optional features. + Flags SupportFlags + + // Size returns the size in bytes of the wire-format encoding of a message. + // Marshal must be provided if a custom Size is provided. + Size func(SizeInput) SizeOutput + + // Marshal formats a message in the wire-format encoding to the provided buffer. + // Size should be provided if a custom Marshal is provided. + // It must not return an error for a partial message. + Marshal func(MarshalInput) (MarshalOutput, error) + + // Unmarshal parses the wire-format encoding and merges the result into a message. + // It must not reset the target message or return an error for a partial message. + Unmarshal func(UnmarshalInput) (UnmarshalOutput, error) + + // Merge merges the contents of a source message into a destination message. + Merge func(MergeInput) MergeOutput + + // CheckInitialized returns an error if any required fields in the message are not set. + CheckInitialized func(CheckInitializedInput) (CheckInitializedOutput, error) +} + +// SupportFlags indicate support for optional features. +type SupportFlags = uint64 + +const ( + // SupportMarshalDeterministic reports whether MarshalOptions.Deterministic is supported. + SupportMarshalDeterministic SupportFlags = 1 << iota + + // SupportUnmarshalDiscardUnknown reports whether UnmarshalOptions.DiscardUnknown is supported. + SupportUnmarshalDiscardUnknown +) + +// SizeInput is input to the Size method. +type SizeInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Flags MarshalInputFlags +} + +// SizeOutput is output from the Size method. +type SizeOutput = struct { + pragma.NoUnkeyedLiterals + + Size int +} + +// MarshalInput is input to the Marshal method. +type MarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // output is appended to this buffer + Flags MarshalInputFlags +} + +// MarshalOutput is output from the Marshal method. +type MarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Buf []byte // contains marshaled message +} + +// MarshalInputFlags configure the marshaler. +// Most flags correspond to fields in proto.MarshalOptions. +type MarshalInputFlags = uint8 + +const ( + MarshalDeterministic MarshalInputFlags = 1 << iota + MarshalUseCachedSize +) + +// UnmarshalInput is input to the Unmarshal method. +type UnmarshalInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message + Buf []byte // input buffer + Flags UnmarshalInputFlags + Resolver interface { + FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) + FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) + } +} + +// UnmarshalOutput is output from the Unmarshal method. +type UnmarshalOutput = struct { + pragma.NoUnkeyedLiterals + + Flags UnmarshalOutputFlags +} + +// UnmarshalInputFlags configure the unmarshaler. +// Most flags correspond to fields in proto.UnmarshalOptions. +type UnmarshalInputFlags = uint8 + +const ( + UnmarshalDiscardUnknown UnmarshalInputFlags = 1 << iota +) + +// UnmarshalOutputFlags are output from the Unmarshal method. +type UnmarshalOutputFlags = uint8 + +const ( + // UnmarshalInitialized may be set on return if all required fields are known to be set. + // If unset, then it does not necessarily indicate that the message is uninitialized, + // only that its status could not be confirmed. + UnmarshalInitialized UnmarshalOutputFlags = 1 << iota +) + +// MergeInput is input to the Merge method. +type MergeInput = struct { + pragma.NoUnkeyedLiterals + + Source protoreflect.Message + Destination protoreflect.Message +} + +// MergeOutput is output from the Merge method. +type MergeOutput = struct { + pragma.NoUnkeyedLiterals + + Flags MergeOutputFlags +} + +// MergeOutputFlags are output from the Merge method. +type MergeOutputFlags = uint8 + +const ( + // MergeComplete reports whether the merge was performed. + // If unset, the merger must have made no changes to the destination. + MergeComplete MergeOutputFlags = 1 << iota +) + +// CheckInitializedInput is input to the CheckInitialized method. +type CheckInitializedInput = struct { + pragma.NoUnkeyedLiterals + + Message protoreflect.Message +} + +// CheckInitializedOutput is output from the CheckInitialized method. +type CheckInitializedOutput = struct { + pragma.NoUnkeyedLiterals +} diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go new file mode 100644 index 0000000..4a1ab7f --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/impl.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package protoimpl contains the default implementation for messages +// generated by protoc-gen-go. +// +// WARNING: This package should only ever be imported by generated messages. +// The compatibility agreement covers nothing except for functionality needed +// to keep existing generated messages operational. Breakages that occur due +// to unauthorized usages of this package are not the author's responsibility. +package protoimpl + +import ( + "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/filetype" + "google.golang.org/protobuf/internal/impl" +) + +// UnsafeEnabled specifies whether package unsafe can be used. +const UnsafeEnabled = impl.UnsafeEnabled + +type ( + // Types used by generated code in init functions. + DescBuilder = filedesc.Builder + TypeBuilder = filetype.Builder + + // Types used by generated code to implement EnumType, MessageType, and ExtensionType. + EnumInfo = impl.EnumInfo + MessageInfo = impl.MessageInfo + ExtensionInfo = impl.ExtensionInfo + + // Types embedded in generated messages. + MessageState = impl.MessageState + SizeCache = impl.SizeCache + WeakFields = impl.WeakFields + UnknownFields = impl.UnknownFields + ExtensionFields = impl.ExtensionFields + ExtensionFieldV1 = impl.ExtensionField + + Pointer = impl.Pointer +) + +var X impl.Export diff --git a/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go new file mode 100644 index 0000000..ff094e1 --- /dev/null +++ b/vendor/google.golang.org/protobuf/runtime/protoimpl/version.go @@ -0,0 +1,56 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package protoimpl + +import ( + "google.golang.org/protobuf/internal/version" +) + +const ( + // MaxVersion is the maximum supported version for generated .pb.go files. + // It is always the current version of the module. + MaxVersion = version.Minor + + // GenVersion is the runtime version required by generated .pb.go files. + // This is incremented when generated code relies on new functionality + // in the runtime. + GenVersion = 20 + + // MinVersion is the minimum supported version for generated .pb.go files. + // This is incremented when the runtime drops support for old code. + MinVersion = 0 +) + +// EnforceVersion is used by code generated by protoc-gen-go +// to statically enforce minimum and maximum versions of this package. +// A compilation failure implies either that: +// * the runtime package is too old and needs to be updated OR +// * the generated code is too old and needs to be regenerated. +// +// The runtime package can be upgraded by running: +// go get google.golang.org/protobuf +// +// The generated code can be regenerated by running: +// protoc --go_out=${PROTOC_GEN_GO_ARGS} ${PROTO_FILES} +// +// Example usage by generated code: +// const ( +// // Verify that this generated code is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(genVersion - protoimpl.MinVersion) +// // Verify that runtime/protoimpl is sufficiently up-to-date. +// _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - genVersion) +// ) +// +// The genVersion is the current minor version used to generated the code. +// This compile-time check relies on negative integer overflow of a uint +// being a compilation failure (guaranteed by the Go specification). +type EnforceVersion uint + +// This enforces the following invariant: +// MinVersion ≤ GenVersion ≤ MaxVersion +const ( + _ = EnforceVersion(GenVersion - MinVersion) + _ = EnforceVersion(MaxVersion - GenVersion) +) diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go new file mode 100644 index 0000000..5f9498e --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -0,0 +1,287 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto + +package anypb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. This string must contain at least + // one "/" character. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *Any) Reset() { + *x = Any{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_any_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Any) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Any) ProtoMessage() {} + +func (x *Any) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_any_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Any.ProtoReflect.Descriptor instead. +func (*Any) Descriptor() ([]byte, []int) { + return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} +} + +func (x *Any) GetTypeUrl() string { + if x != nil { + return x.TypeUrl + } + return "" +} + +func (x *Any) GetValue() []byte { + if x != nil { + return x.Value + } + return nil +} + +var File_google_protobuf_any_proto protoreflect.FileDescriptor + +var file_google_protobuf_any_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, + 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, + 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, + 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, + 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_any_proto_rawDescOnce sync.Once + file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc +) + +func file_google_protobuf_any_proto_rawDescGZIP() []byte { + file_google_protobuf_any_proto_rawDescOnce.Do(func() { + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + }) + return file_google_protobuf_any_proto_rawDescData +} + +var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_any_proto_goTypes = []interface{}{ + (*Any)(nil), // 0: google.protobuf.Any +} +var file_google_protobuf_any_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_any_proto_init() } +func file_google_protobuf_any_proto_init() { + if File_google_protobuf_any_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Any); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_any_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_any_proto_goTypes, + DependencyIndexes: file_google_protobuf_any_proto_depIdxs, + MessageInfos: file_google_protobuf_any_proto_msgTypes, + }.Build() + File_google_protobuf_any_proto = out.File + file_google_protobuf_any_proto_rawDesc = nil + file_google_protobuf_any_proto_goTypes = nil + file_google_protobuf_any_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go new file mode 100644 index 0000000..3997c60 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go @@ -0,0 +1,249 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package durationpb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (duration.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (x *Duration) Reset() { + *x = Duration{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Duration) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Duration) ProtoMessage() {} + +func (x *Duration) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_duration_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Duration.ProtoReflect.Descriptor instead. +func (*Duration) Descriptor() ([]byte, []int) { + return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} +} + +func (x *Duration) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Duration) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_duration_proto protoreflect.FileDescriptor + +var file_google_protobuf_duration_proto_rawDesc = []byte{ + 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, + 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, + 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, + 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_duration_proto_rawDescOnce sync.Once + file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc +) + +func file_google_protobuf_duration_proto_rawDescGZIP() []byte { + file_google_protobuf_duration_proto_rawDescOnce.Do(func() { + file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) + }) + return file_google_protobuf_duration_proto_rawDescData +} + +var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_duration_proto_goTypes = []interface{}{ + (*Duration)(nil), // 0: google.protobuf.Duration +} +var file_google_protobuf_duration_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_duration_proto_init() } +func file_google_protobuf_duration_proto_init() { + if File_google_protobuf_duration_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Duration); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_duration_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_duration_proto_goTypes, + DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, + MessageInfos: file_google_protobuf_duration_proto_msgTypes, + }.Build() + File_google_protobuf_duration_proto = out.File + file_google_protobuf_duration_proto_rawDesc = nil + file_google_protobuf_duration_proto_goTypes = nil + file_google_protobuf_duration_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go new file mode 100644 index 0000000..6fe6d42 --- /dev/null +++ b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go @@ -0,0 +1,271 @@ +// Protocol Buffers - Google's data interchange format +// Copyright 2008 Google Inc. All rights reserved. +// https://developers.google.com/protocol-buffers/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package timestamppb + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +// A Timestamp represents a point in time independent of any time zone or local +// calendar, encoded as a count of seconds and fractions of seconds at +// nanosecond resolution. The count is relative to an epoch at UTC midnight on +// January 1, 1970, in the proleptic Gregorian calendar which extends the +// Gregorian calendar backwards to year one. +// +// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap +// second table is needed for interpretation, using a [24-hour linear +// smear](https://developers.google.com/time/smear). +// +// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By +// restricting to that range, we ensure that we can convert to and from [RFC +// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard +// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using +// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with +// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use +// the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (x *Timestamp) Reset() { + *x = Timestamp{} + if protoimpl.UnsafeEnabled { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Timestamp) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Timestamp) ProtoMessage() {} + +func (x *Timestamp) ProtoReflect() protoreflect.Message { + mi := &file_google_protobuf_timestamp_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. +func (*Timestamp) Descriptor() ([]byte, []int) { + return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} +} + +func (x *Timestamp) GetSeconds() int64 { + if x != nil { + return x.Seconds + } + return 0 +} + +func (x *Timestamp) GetNanos() int32 { + if x != nil { + return x.Nanos + } + return 0 +} + +var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor + +var file_google_protobuf_timestamp_proto_rawDesc = []byte{ + 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, + 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, + 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, + 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, + 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, + 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_protobuf_timestamp_proto_rawDescOnce sync.Once + file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc +) + +func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { + file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { + file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) + }) + return file_google_protobuf_timestamp_proto_rawDescData +} + +var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ + (*Timestamp)(nil), // 0: google.protobuf.Timestamp +} +var file_google_protobuf_timestamp_proto_depIdxs = []int32{ + 0, // [0:0] is the sub-list for method output_type + 0, // [0:0] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_google_protobuf_timestamp_proto_init() } +func file_google_protobuf_timestamp_proto_init() { + if File_google_protobuf_timestamp_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Timestamp); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_protobuf_timestamp_proto_goTypes, + DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, + MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, + }.Build() + File_google_protobuf_timestamp_proto = out.File + file_google_protobuf_timestamp_proto_rawDesc = nil + file_google_protobuf_timestamp_proto_goTypes = nil + file_google_protobuf_timestamp_proto_depIdxs = nil +} diff --git a/vendor/gopkg.in/inf.v0/LICENSE b/vendor/gopkg.in/inf.v0/LICENSE new file mode 100644 index 0000000..87a5ced --- /dev/null +++ b/vendor/gopkg.in/inf.v0/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2012 Péter Surányi. Portions Copyright (c) 2009 The Go +Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/gopkg.in/inf.v0/dec.go b/vendor/gopkg.in/inf.v0/dec.go new file mode 100644 index 0000000..26548b6 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/dec.go @@ -0,0 +1,615 @@ +// Package inf (type inf.Dec) implements "infinite-precision" decimal +// arithmetic. +// "Infinite precision" describes two characteristics: practically unlimited +// precision for decimal number representation and no support for calculating +// with any specific fixed precision. +// (Although there is no practical limit on precision, inf.Dec can only +// represent finite decimals.) +// +// This package is currently in experimental stage and the API may change. +// +// This package does NOT support: +// - rounding to specific precisions (as opposed to specific decimal positions) +// - the notion of context (each rounding must be explicit) +// - NaN and Inf values, and distinguishing between positive and negative zero +// - conversions to and from float32/64 types +// +// Features considered for possible addition: +// + formatting options +// + Exp method +// + combined operations such as AddRound/MulAdd etc +// + exchanging data in decimal32/64/128 formats +// +package inf // import "gopkg.in/inf.v0" + +// TODO: +// - avoid excessive deep copying (quo and rounders) + +import ( + "fmt" + "io" + "math/big" + "strings" +) + +// A Dec represents a signed arbitrary-precision decimal. +// It is a combination of a sign, an arbitrary-precision integer coefficient +// value, and a signed fixed-precision exponent value. +// The sign and the coefficient value are handled together as a signed value +// and referred to as the unscaled value. +// (Positive and negative zero values are not distinguished.) +// Since the exponent is most commonly non-positive, it is handled in negated +// form and referred to as scale. +// +// The mathematical value of a Dec equals: +// +// unscaled * 10**(-scale) +// +// Note that different Dec representations may have equal mathematical values. +// +// unscaled scale String() +// ------------------------- +// 0 0 "0" +// 0 2 "0.00" +// 0 -2 "0" +// 1 0 "1" +// 100 2 "1.00" +// 10 0 "10" +// 1 -1 "10" +// +// The zero value for a Dec represents the value 0 with scale 0. +// +// Operations are typically performed through the *Dec type. +// The semantics of the assignment operation "=" for "bare" Dec values is +// undefined and should not be relied on. +// +// Methods are typically of the form: +// +// func (z *Dec) Op(x, y *Dec) *Dec +// +// and implement operations z = x Op y with the result as receiver; if it +// is one of the operands it may be overwritten (and its memory reused). +// To enable chaining of operations, the result is also returned. Methods +// returning a result other than *Dec take one of the operands as the receiver. +// +// A "bare" Quo method (quotient / division operation) is not provided, as the +// result is not always a finite decimal and thus in general cannot be +// represented as a Dec. +// Instead, in the common case when rounding is (potentially) necessary, +// QuoRound should be used with a Scale and a Rounder. +// QuoExact or QuoRound with RoundExact can be used in the special cases when it +// is known that the result is always a finite decimal. +// +type Dec struct { + unscaled big.Int + scale Scale +} + +// Scale represents the type used for the scale of a Dec. +type Scale int32 + +const scaleSize = 4 // bytes in a Scale value + +// Scaler represents a method for obtaining the scale to use for the result of +// an operation on x and y. +type scaler interface { + Scale(x *Dec, y *Dec) Scale +} + +var bigInt = [...]*big.Int{ + big.NewInt(0), big.NewInt(1), big.NewInt(2), big.NewInt(3), big.NewInt(4), + big.NewInt(5), big.NewInt(6), big.NewInt(7), big.NewInt(8), big.NewInt(9), + big.NewInt(10), +} + +var exp10cache [64]big.Int = func() [64]big.Int { + e10, e10i := [64]big.Int{}, bigInt[1] + for i := range e10 { + e10[i].Set(e10i) + e10i = new(big.Int).Mul(e10i, bigInt[10]) + } + return e10 +}() + +// NewDec allocates and returns a new Dec set to the given int64 unscaled value +// and scale. +func NewDec(unscaled int64, scale Scale) *Dec { + return new(Dec).SetUnscaled(unscaled).SetScale(scale) +} + +// NewDecBig allocates and returns a new Dec set to the given *big.Int unscaled +// value and scale. +func NewDecBig(unscaled *big.Int, scale Scale) *Dec { + return new(Dec).SetUnscaledBig(unscaled).SetScale(scale) +} + +// Scale returns the scale of x. +func (x *Dec) Scale() Scale { + return x.scale +} + +// Unscaled returns the unscaled value of x for u and true for ok when the +// unscaled value can be represented as int64; otherwise it returns an undefined +// int64 value for u and false for ok. Use x.UnscaledBig().Int64() to avoid +// checking the validity of the value when the check is known to be redundant. +func (x *Dec) Unscaled() (u int64, ok bool) { + u = x.unscaled.Int64() + var i big.Int + ok = i.SetInt64(u).Cmp(&x.unscaled) == 0 + return +} + +// UnscaledBig returns the unscaled value of x as *big.Int. +func (x *Dec) UnscaledBig() *big.Int { + return &x.unscaled +} + +// SetScale sets the scale of z, with the unscaled value unchanged, and returns +// z. +// The mathematical value of the Dec changes as if it was multiplied by +// 10**(oldscale-scale). +func (z *Dec) SetScale(scale Scale) *Dec { + z.scale = scale + return z +} + +// SetUnscaled sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaled(unscaled int64) *Dec { + z.unscaled.SetInt64(unscaled) + return z +} + +// SetUnscaledBig sets the unscaled value of z, with the scale unchanged, and +// returns z. +func (z *Dec) SetUnscaledBig(unscaled *big.Int) *Dec { + z.unscaled.Set(unscaled) + return z +} + +// Set sets z to the value of x and returns z. +// It does nothing if z == x. +func (z *Dec) Set(x *Dec) *Dec { + if z != x { + z.SetUnscaledBig(x.UnscaledBig()) + z.SetScale(x.Scale()) + } + return z +} + +// Sign returns: +// +// -1 if x < 0 +// 0 if x == 0 +// +1 if x > 0 +// +func (x *Dec) Sign() int { + return x.UnscaledBig().Sign() +} + +// Neg sets z to -x and returns z. +func (z *Dec) Neg(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Neg(x.UnscaledBig()) + return z +} + +// Cmp compares x and y and returns: +// +// -1 if x < y +// 0 if x == y +// +1 if x > y +// +func (x *Dec) Cmp(y *Dec) int { + xx, yy := upscale(x, y) + return xx.UnscaledBig().Cmp(yy.UnscaledBig()) +} + +// Abs sets z to |x| (the absolute value of x) and returns z. +func (z *Dec) Abs(x *Dec) *Dec { + z.SetScale(x.Scale()) + z.UnscaledBig().Abs(x.UnscaledBig()) + return z +} + +// Add sets z to the sum x+y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Add(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Add(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Sub sets z to the difference x-y and returns z. +// The scale of z is the greater of the scales of x and y. +func (z *Dec) Sub(x, y *Dec) *Dec { + xx, yy := upscale(x, y) + z.SetScale(xx.Scale()) + z.UnscaledBig().Sub(xx.UnscaledBig(), yy.UnscaledBig()) + return z +} + +// Mul sets z to the product x*y and returns z. +// The scale of z is the sum of the scales of x and y. +func (z *Dec) Mul(x, y *Dec) *Dec { + z.SetScale(x.Scale() + y.Scale()) + z.UnscaledBig().Mul(x.UnscaledBig(), y.UnscaledBig()) + return z +} + +// Round sets z to the value of x rounded to Scale s using Rounder r, and +// returns z. +func (z *Dec) Round(x *Dec, s Scale, r Rounder) *Dec { + return z.QuoRound(x, NewDec(1, 0), s, r) +} + +// QuoRound sets z to the quotient x/y, rounded using the given Rounder to the +// specified scale. +// +// If the rounder is RoundExact but the result can not be expressed exactly at +// the specified scale, QuoRound returns nil, and the value of z is undefined. +// +// There is no corresponding Div method; the equivalent can be achieved through +// the choice of Rounder used. +// +func (z *Dec) QuoRound(x, y *Dec, s Scale, r Rounder) *Dec { + return z.quo(x, y, sclr{s}, r) +} + +func (z *Dec) quo(x, y *Dec, s scaler, r Rounder) *Dec { + scl := s.Scale(x, y) + var zzz *Dec + if r.UseRemainder() { + zz, rA, rB := new(Dec).quoRem(x, y, scl, true, new(big.Int), new(big.Int)) + zzz = r.Round(new(Dec), zz, rA, rB) + } else { + zz, _, _ := new(Dec).quoRem(x, y, scl, false, nil, nil) + zzz = r.Round(new(Dec), zz, nil, nil) + } + if zzz == nil { + return nil + } + return z.Set(zzz) +} + +// QuoExact sets z to the quotient x/y and returns z when x/y is a finite +// decimal. Otherwise it returns nil and the value of z is undefined. +// +// The scale of a non-nil result is "x.Scale() - y.Scale()" or greater; it is +// calculated so that the remainder will be zero whenever x/y is a finite +// decimal. +func (z *Dec) QuoExact(x, y *Dec) *Dec { + return z.quo(x, y, scaleQuoExact{}, RoundExact) +} + +// quoRem sets z to the quotient x/y with the scale s, and if useRem is true, +// it sets remNum and remDen to the numerator and denominator of the remainder. +// It returns z, remNum and remDen. +// +// The remainder is normalized to the range -1 < r < 1 to simplify rounding; +// that is, the results satisfy the following equation: +// +// x / y = z + (remNum/remDen) * 10**(-z.Scale()) +// +// See Rounder for more details about rounding. +// +func (z *Dec) quoRem(x, y *Dec, s Scale, useRem bool, + remNum, remDen *big.Int) (*Dec, *big.Int, *big.Int) { + // difference (required adjustment) compared to "canonical" result scale + shift := s - (x.Scale() - y.Scale()) + // pointers to adjusted unscaled dividend and divisor + var ix, iy *big.Int + switch { + case shift > 0: + // increased scale: decimal-shift dividend left + ix = new(big.Int).Mul(x.UnscaledBig(), exp10(shift)) + iy = y.UnscaledBig() + case shift < 0: + // decreased scale: decimal-shift divisor left + ix = x.UnscaledBig() + iy = new(big.Int).Mul(y.UnscaledBig(), exp10(-shift)) + default: + ix = x.UnscaledBig() + iy = y.UnscaledBig() + } + // save a copy of iy in case it to be overwritten with the result + iy2 := iy + if iy == z.UnscaledBig() { + iy2 = new(big.Int).Set(iy) + } + // set scale + z.SetScale(s) + // set unscaled + if useRem { + // Int division + _, intr := z.UnscaledBig().QuoRem(ix, iy, new(big.Int)) + // set remainder + remNum.Set(intr) + remDen.Set(iy2) + } else { + z.UnscaledBig().Quo(ix, iy) + } + return z, remNum, remDen +} + +type sclr struct{ s Scale } + +func (s sclr) Scale(x, y *Dec) Scale { + return s.s +} + +type scaleQuoExact struct{} + +func (sqe scaleQuoExact) Scale(x, y *Dec) Scale { + rem := new(big.Rat).SetFrac(x.UnscaledBig(), y.UnscaledBig()) + f2, f5 := factor2(rem.Denom()), factor(rem.Denom(), bigInt[5]) + var f10 Scale + if f2 > f5 { + f10 = Scale(f2) + } else { + f10 = Scale(f5) + } + return x.Scale() - y.Scale() + f10 +} + +func factor(n *big.Int, p *big.Int) int { + // could be improved for large factors + d, f := n, 0 + for { + dd, dm := new(big.Int).DivMod(d, p, new(big.Int)) + if dm.Sign() == 0 { + f++ + d = dd + } else { + break + } + } + return f +} + +func factor2(n *big.Int) int { + // could be improved for large factors + f := 0 + for ; n.Bit(f) == 0; f++ { + } + return f +} + +func upscale(a, b *Dec) (*Dec, *Dec) { + if a.Scale() == b.Scale() { + return a, b + } + if a.Scale() > b.Scale() { + bb := b.rescale(a.Scale()) + return a, bb + } + aa := a.rescale(b.Scale()) + return aa, b +} + +func exp10(x Scale) *big.Int { + if int(x) < len(exp10cache) { + return &exp10cache[int(x)] + } + return new(big.Int).Exp(bigInt[10], big.NewInt(int64(x)), nil) +} + +func (x *Dec) rescale(newScale Scale) *Dec { + shift := newScale - x.Scale() + switch { + case shift < 0: + e := exp10(-shift) + return NewDecBig(new(big.Int).Quo(x.UnscaledBig(), e), newScale) + case shift > 0: + e := exp10(shift) + return NewDecBig(new(big.Int).Mul(x.UnscaledBig(), e), newScale) + } + return x +} + +var zeros = []byte("00000000000000000000000000000000" + + "00000000000000000000000000000000") +var lzeros = Scale(len(zeros)) + +func appendZeros(s []byte, n Scale) []byte { + for i := Scale(0); i < n; i += lzeros { + if n > i+lzeros { + s = append(s, zeros...) + } else { + s = append(s, zeros[0:n-i]...) + } + } + return s +} + +func (x *Dec) String() string { + if x == nil { + return "" + } + scale := x.Scale() + s := []byte(x.UnscaledBig().String()) + if scale <= 0 { + if scale != 0 && x.unscaled.Sign() != 0 { + s = appendZeros(s, -scale) + } + return string(s) + } + negbit := Scale(-((x.Sign() - 1) / 2)) + // scale > 0 + lens := Scale(len(s)) + if lens-negbit <= scale { + ss := make([]byte, 0, scale+2) + if negbit == 1 { + ss = append(ss, '-') + } + ss = append(ss, '0', '.') + ss = appendZeros(ss, scale-lens+negbit) + ss = append(ss, s[negbit:]...) + return string(ss) + } + // lens > scale + ss := make([]byte, 0, lens+1) + ss = append(ss, s[:lens-scale]...) + ss = append(ss, '.') + ss = append(ss, s[lens-scale:]...) + return string(ss) +} + +// Format is a support routine for fmt.Formatter. It accepts the decimal +// formats 'd' and 'f', and handles both equivalently. +// Width, precision, flags and bases 2, 8, 16 are not supported. +func (x *Dec) Format(s fmt.State, ch rune) { + if ch != 'd' && ch != 'f' && ch != 'v' && ch != 's' { + fmt.Fprintf(s, "%%!%c(dec.Dec=%s)", ch, x.String()) + return + } + fmt.Fprintf(s, x.String()) +} + +func (z *Dec) scan(r io.RuneScanner) (*Dec, error) { + unscaled := make([]byte, 0, 256) // collects chars of unscaled as bytes + dp, dg := -1, -1 // indexes of decimal point, first digit +loop: + for { + ch, _, err := r.ReadRune() + if err == io.EOF { + break loop + } + if err != nil { + return nil, err + } + switch { + case ch == '+' || ch == '-': + if len(unscaled) > 0 || dp >= 0 { // must be first character + r.UnreadRune() + break loop + } + case ch == '.': + if dp >= 0 { + r.UnreadRune() + break loop + } + dp = len(unscaled) + continue // don't add to unscaled + case ch >= '0' && ch <= '9': + if dg == -1 { + dg = len(unscaled) + } + default: + r.UnreadRune() + break loop + } + unscaled = append(unscaled, byte(ch)) + } + if dg == -1 { + return nil, fmt.Errorf("no digits read") + } + if dp >= 0 { + z.SetScale(Scale(len(unscaled) - dp)) + } else { + z.SetScale(0) + } + _, ok := z.UnscaledBig().SetString(string(unscaled), 10) + if !ok { + return nil, fmt.Errorf("invalid decimal: %s", string(unscaled)) + } + return z, nil +} + +// SetString sets z to the value of s, interpreted as a decimal (base 10), +// and returns z and a boolean indicating success. The scale of z is the +// number of digits after the decimal point (including any trailing 0s), +// or 0 if there is no decimal point. If SetString fails, the value of z +// is undefined but the returned value is nil. +func (z *Dec) SetString(s string) (*Dec, bool) { + r := strings.NewReader(s) + _, err := z.scan(r) + if err != nil { + return nil, false + } + _, _, err = r.ReadRune() + if err != io.EOF { + return nil, false + } + // err == io.EOF => scan consumed all of s + return z, true +} + +// Scan is a support routine for fmt.Scanner; it sets z to the value of +// the scanned number. It accepts the decimal formats 'd' and 'f', and +// handles both equivalently. Bases 2, 8, 16 are not supported. +// The scale of z is the number of digits after the decimal point +// (including any trailing 0s), or 0 if there is no decimal point. +func (z *Dec) Scan(s fmt.ScanState, ch rune) error { + if ch != 'd' && ch != 'f' && ch != 's' && ch != 'v' { + return fmt.Errorf("Dec.Scan: invalid verb '%c'", ch) + } + s.SkipSpace() + _, err := z.scan(s) + return err +} + +// Gob encoding version +const decGobVersion byte = 1 + +func scaleBytes(s Scale) []byte { + buf := make([]byte, scaleSize) + i := scaleSize + for j := 0; j < scaleSize; j++ { + i-- + buf[i] = byte(s) + s >>= 8 + } + return buf +} + +func scale(b []byte) (s Scale) { + for j := 0; j < scaleSize; j++ { + s <<= 8 + s |= Scale(b[j]) + } + return +} + +// GobEncode implements the gob.GobEncoder interface. +func (x *Dec) GobEncode() ([]byte, error) { + buf, err := x.UnscaledBig().GobEncode() + if err != nil { + return nil, err + } + buf = append(append(buf, scaleBytes(x.Scale())...), decGobVersion) + return buf, nil +} + +// GobDecode implements the gob.GobDecoder interface. +func (z *Dec) GobDecode(buf []byte) error { + if len(buf) == 0 { + return fmt.Errorf("Dec.GobDecode: no data") + } + b := buf[len(buf)-1] + if b != decGobVersion { + return fmt.Errorf("Dec.GobDecode: encoding version %d not supported", b) + } + l := len(buf) - scaleSize - 1 + err := z.UnscaledBig().GobDecode(buf[:l]) + if err != nil { + return err + } + z.SetScale(scale(buf[l : l+scaleSize])) + return nil +} + +// MarshalText implements the encoding.TextMarshaler interface. +func (x *Dec) MarshalText() ([]byte, error) { + return []byte(x.String()), nil +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +func (z *Dec) UnmarshalText(data []byte) error { + _, ok := z.SetString(string(data)) + if !ok { + return fmt.Errorf("invalid inf.Dec") + } + return nil +} diff --git a/vendor/gopkg.in/inf.v0/rounder.go b/vendor/gopkg.in/inf.v0/rounder.go new file mode 100644 index 0000000..3a97ef5 --- /dev/null +++ b/vendor/gopkg.in/inf.v0/rounder.go @@ -0,0 +1,145 @@ +package inf + +import ( + "math/big" +) + +// Rounder represents a method for rounding the (possibly infinite decimal) +// result of a division to a finite Dec. It is used by Dec.Round() and +// Dec.Quo(). +// +// See the Example for results of using each Rounder with some sample values. +// +type Rounder rounder + +// See http://speleotrove.com/decimal/damodel.html#refround for more detailed +// definitions of these rounding modes. +var ( + RoundDown Rounder // towards 0 + RoundUp Rounder // away from 0 + RoundFloor Rounder // towards -infinity + RoundCeil Rounder // towards +infinity + RoundHalfDown Rounder // to nearest; towards 0 if same distance + RoundHalfUp Rounder // to nearest; away from 0 if same distance + RoundHalfEven Rounder // to nearest; even last digit if same distance +) + +// RoundExact is to be used in the case when rounding is not necessary. +// When used with Quo or Round, it returns the result verbatim when it can be +// expressed exactly with the given precision, and it returns nil otherwise. +// QuoExact is a shorthand for using Quo with RoundExact. +var RoundExact Rounder + +type rounder interface { + + // When UseRemainder() returns true, the Round() method is passed the + // remainder of the division, expressed as the numerator and denominator of + // a rational. + UseRemainder() bool + + // Round sets the rounded value of a quotient to z, and returns z. + // quo is rounded down (truncated towards zero) to the scale obtained from + // the Scaler in Quo(). + // + // When the remainder is not used, remNum and remDen are nil. + // When used, the remainder is normalized between -1 and 1; that is: + // + // -|remDen| < remNum < |remDen| + // + // remDen has the same sign as y, and remNum is zero or has the same sign + // as x. + Round(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +type rndr struct { + useRem bool + round func(z, quo *Dec, remNum, remDen *big.Int) *Dec +} + +func (r rndr) UseRemainder() bool { + return r.useRem +} + +func (r rndr) Round(z, quo *Dec, remNum, remDen *big.Int) *Dec { + return r.round(z, quo, remNum, remDen) +} + +var intSign = []*big.Int{big.NewInt(-1), big.NewInt(0), big.NewInt(1)} + +func roundHalf(f func(c int, odd uint) (roundUp bool)) func(z, q *Dec, rA, rB *big.Int) *Dec { + return func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + brA, brB := rA.BitLen(), rB.BitLen() + if brA < brB-1 { + // brA < brB-1 => |rA| < |rB/2| + return z + } + roundUp := false + srA, srB := rA.Sign(), rB.Sign() + s := srA * srB + if brA == brB-1 { + rA2 := new(big.Int).Lsh(rA, 1) + if s < 0 { + rA2.Neg(rA2) + } + roundUp = f(rA2.Cmp(rB)*srB, z.UnscaledBig().Bit(0)) + } else { + // brA > brB-1 => |rA| > |rB/2| + roundUp = true + } + if roundUp { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[s+1]) + } + return z + } +} + +func init() { + RoundExact = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + if rA.Sign() != 0 { + return nil + } + return z.Set(q) + }} + RoundDown = rndr{false, + func(z, q *Dec, rA, rB *big.Int) *Dec { + return z.Set(q) + }} + RoundUp = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign() != 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[rA.Sign()*rB.Sign()+1]) + } + return z + }} + RoundFloor = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() < 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[0]) + } + return z + }} + RoundCeil = rndr{true, + func(z, q *Dec, rA, rB *big.Int) *Dec { + z.Set(q) + if rA.Sign()*rB.Sign() > 0 { + z.UnscaledBig().Add(z.UnscaledBig(), intSign[2]) + } + return z + }} + RoundHalfDown = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 + })} + RoundHalfUp = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c >= 0 + })} + RoundHalfEven = rndr{true, roundHalf( + func(c int, odd uint) bool { + return c > 0 || c == 0 && odd == 1 + })} +} diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 9f55693..055480b 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -1,12 +1,16 @@ language: go go: - - 1.4 - - 1.5 - - 1.6 - - 1.7 - - 1.8 - - 1.9 - - tip + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go index e4e56e2..129bc2a 100644 --- a/vendor/gopkg.in/yaml.v2/decode.go +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -229,6 +229,10 @@ type decoder struct { mapType reflect.Type terrors []string strict bool + + decodeCount int + aliasCount int + aliasDepth int } var ( @@ -314,7 +318,43 @@ func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unm return out, false, false } +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } switch n.kind { case documentNode: return d.document(n, out) @@ -353,7 +393,9 @@ func (d *decoder) alias(n *node, out reflect.Value) (good bool) { failf("anchor '%s' value contains itself", n.value) } d.aliases[n] = true + d.aliasDepth++ good = d.unmarshal(n.alias, out) + d.aliasDepth-- delete(d.aliases, n) return good } @@ -746,8 +788,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { case mappingNode: d.unmarshal(n, out) case aliasNode: - an, ok := d.doc.anchors[n.value] - if ok && an.kind != mappingNode { + if n.alias != nil && n.alias.kind != mappingNode { failWantMap() } d.unmarshal(n, out) @@ -756,8 +797,7 @@ func (d *decoder) merge(n *node, out reflect.Value) { for i := len(n.children) - 1; i >= 0; i-- { ni := n.children[i] if ni.kind == aliasNode { - an, ok := d.doc.anchors[ni.value] - if ok && an.kind != mappingNode { + if ni.alias != nil && ni.alias.kind != mappingNode { failWantMap() } } else if ni.kind != mappingNode { diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go index 6c151db..4120e0c 100644 --- a/vendor/gopkg.in/yaml.v2/resolve.go +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -81,7 +81,7 @@ func resolvableTag(tag string) bool { return false } -var yamlStyleFloat = regexp.MustCompile(`^[-+]?[0-9]*\.?[0-9]+([eE][-+][0-9]+)?$`) +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) func resolve(tag string, in string) (rtag string, out interface{}) { if !resolvableTag(tag) { diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go index 077fd1d..0b9bb60 100644 --- a/vendor/gopkg.in/yaml.v2/scannerc.go +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -626,30 +626,17 @@ func trace(args ...interface{}) func() { func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { // While we need more tokens to fetch, do it. for { - // Check if we really need to fetch more tokens. - need_more_tokens := false - - if parser.tokens_head == len(parser.tokens) { - // Queue is empty. - need_more_tokens = true - } else { - // Check if any potential simple key may occupy the head position. - if !yaml_parser_stale_simple_keys(parser) { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { return false + } else if !valid { + break } - - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - if simple_key.possible && simple_key.token_number == parser.tokens_parsed { - need_more_tokens = true - break - } - } - } - - // We are finished. - if !need_more_tokens { - break } // Fetch the next token. if !yaml_parser_fetch_next_token(parser) { @@ -678,11 +665,6 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { return false } - // Remove obsolete potential simple keys. - if !yaml_parser_stale_simple_keys(parser) { - return false - } - // Check the indentation level against the current column. if !yaml_parser_unroll_indent(parser, parser.mark.column) { return false @@ -837,29 +819,30 @@ func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { "found character that cannot start any token") } -// Check the list of potential simple keys and remove the positions that -// cannot contain simple keys anymore. -func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool { - // Check for a potential simple key for each flow level. - for i := range parser.simple_keys { - simple_key := &parser.simple_keys[i] - - // The specification requires that a simple key - // - // - is limited to a single line, - // - is shorter than 1024 characters. - if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) { - - // Check if the potential simple key to be removed is required. - if simple_key.required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") } + simple_key.possible = false + return false, true } - return true + return true, true } // Check if a simple key may start at the current position and add it if @@ -879,13 +862,14 @@ func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { possible: true, required: required, token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, } - simple_key.mark = parser.mark if !yaml_parser_remove_simple_key(parser) { return false } parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 } return true } @@ -900,19 +884,33 @@ func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { "while scanning a simple key", parser.simple_keys[i].mark, "could not find expected ':'") } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) } - // Remove the key from the stack. - parser.simple_keys[i].possible = false return true } +// max_flow_level limits the flow_level +const max_flow_level = 10000 + // Increase the flow level and resize the simple key list if needed. func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) // Increase the flow level. parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } return true } @@ -920,11 +918,16 @@ func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { if parser.flow_level > 0 { parser.flow_level-- - parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1] + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] } return true } +// max_indents limits the indents stack size +const max_indents = 10000 + // Push the current indentation level to the stack and set the new level // the current column is greater than the indentation level. In this case, // append or insert the specified token into the token queue. @@ -939,6 +942,11 @@ func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml // indentation level. parser.indents = append(parser.indents, parser.indent) parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } // Create a token and insert it into the queue. token := yaml_token_t{ @@ -989,6 +997,8 @@ func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { // Initialize the simple key stack. parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + parser.simple_keys_by_tok = make(map[int]int) + // A simple key is allowed at the beginning of the stream. parser.simple_key_allowed = true @@ -1270,7 +1280,11 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool { simple_key := &parser.simple_keys[len(parser.simple_keys)-1] // Have we found a simple key? - if simple_key.possible { + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + // Create the KEY token and insert it into the queue. token := yaml_token_t{ typ: yaml_KEY_TOKEN, @@ -1288,6 +1302,7 @@ func yaml_parser_fetch_value(parser *yaml_parser_t) bool { // Remove the simple key. simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) // A simple key cannot follow another simple key. parser.simple_key_allowed = false diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index de85aa4..89650e2 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -89,7 +89,7 @@ func UnmarshalStrict(in []byte, out interface{}) (err error) { return unmarshal(in, out, true) } -// A Decorder reads and decodes YAML values from an input stream. +// A Decoder reads and decodes YAML values from an input stream. type Decoder struct { strict bool parser *parser diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go index e25cee5..f6a9c8e 100644 --- a/vendor/gopkg.in/yaml.v2/yamlh.go +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -579,6 +579,7 @@ type yaml_parser_t struct { simple_key_allowed bool // May a simple key occur at the current position? simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number // Parser stuff diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml new file mode 100644 index 0000000..a130fe8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/.travis.yml @@ -0,0 +1,17 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "1.14.x" + - "tip" + +go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE new file mode 100644 index 0000000..2683e4b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/LICENSE @@ -0,0 +1,50 @@ + +This project is covered by two different licenses: MIT and Apache. + +#### MIT License #### + +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original MIT license, with the additional +copyright staring in 2011 when the project was ported over: + + apic.go emitterc.go parserc.go readerc.go scannerc.go + writerc.go yamlh.go yamlprivateh.go + +Copyright (c) 2006-2010 Kirill Simonov +Copyright (c) 2006-2011 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +### Apache License ### + +All the remaining project files are covered by the Apache license: + +Copyright (c) 2011-2019 Canonical Ltd + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md new file mode 100644 index 0000000..08eb1ba --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/README.md @@ -0,0 +1,150 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.2, but preserves some behavior +from 1.1 for backwards compatibility. + +Specifically, as of v3 of the yaml package: + + - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being + decoded into a typed bool value. Otherwise they behave as a string. Booleans + in YAML 1.2 are _true/false_ only. + - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ + as specified in YAML 1.2, because most parsers still use the old format. + Octals in the _0o777_ format are supported though, so new files work. + - Does not support base-60 floats. These are gone from YAML 1.2, and were + actually never supported by this package as it's clearly a poor choice. + +and offers backwards +compatibility with YAML 1.1 in some cases. +1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v3*. + +To install it, run: + + go get gopkg.in/yaml.v3 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) + +API stability +------------- + +The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v3" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go new file mode 100644 index 0000000..ae7d049 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/apic.go @@ -0,0 +1,747 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + best_width: -1, + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +// Create ALIAS. +func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + anchor: anchor, + } + return true +} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go new file mode 100644 index 0000000..21c0dac --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/decode.go @@ -0,0 +1,948 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case yaml_TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.start_mark.line + 1 + n.Column = p.event.start_mark.column + 1 + n.HeadComment = string(p.event.head_comment) + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == yaml_DOCUMENT_END_EVENT { + n.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + var parsedStyle = p.event.scalar_style() + var nodeStyle Style + switch { + case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + var nodeValue = string(p.event.value) + var nodeTag = string(p.event.tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.tag), "") + if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.tag), "") + block := true + if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == yaml_TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.foot_comment) + } + p.expect(yaml_TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.line_comment) + n.FootComment = string(p.event.foot_comment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []string + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]interface{}{}) + generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = generalMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag || n.Kind == 0 && n.IsZero() { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + d.merge(n.Content[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) { + out.SetMapIndex(k, e) + } + } + } + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + if n.Content[i].ShortTag() != strTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + d.merge(n.Content[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *Node, out reflect.Value) { + switch n.Kind { + case MappingNode: + d.unmarshal(n, out) + case AliasNode: + if n.Alias != nil && n.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(n, out) + case SequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.Content) - 1; i >= 0; i-- { + ni := n.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go new file mode 100644 index 0000000..c29217e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/emitterc.go @@ -0,0 +1,2022 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + } + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) + + case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) + + case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical || true { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if len(emitter.head_comment) > 0 { + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !put_break(emitter) { + return false + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.foot_indent = -1 + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first && !trail { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if !yaml_emitter_process_head_comment(emitter) { + return false + } + + if emitter.column == 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + } + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if !yaml_emitter_process_head_comment(emitter) { + return false + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if len(emitter.line_comment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.line_comment + emitter.line_comment = nil + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] A line comment was previously provided for the key. Handle it before + // the value so the inline comments are placed correctly. + if yaml_emitter_silent_nil_event(emitter, event) && len(emitter.line_comment) == 0 { + // Nothing other than the line comment will be written on the line. + emitter.line_comment = emitter.key_line_comment + emitter.key_line_comment = nil + } else { + // An actual value is coming, so emit the comment line. + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + if !yaml_emitter_process_line_comment(emitter) { + return false + } + emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment + // Indent in unless it's a block that will reindent anyway. + if event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || (event.typ != yaml_MAPPING_START_EVENT && event.typ != yaml_SEQUENCE_START_EVENT) { + emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { + return false + } + if !yaml_emitter_process_line_comment(emitter) { + return false + } + if !yaml_emitter_process_foot_comment(emitter) { + return false + } + return true +} + +func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { + if len(emitter.tail_comment) > 0 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { + return false + } + emitter.tail_comment = emitter.tail_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.head_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.head_comment) { + return false + } + emitter.head_comment = emitter.head_comment[:0] + return true +} + +// Write an line comment. +func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { + if len(emitter.line_comment) == 0 { + return true + } + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !yaml_emitter_write_comment(emitter, emitter.line_comment) { + return false + } + emitter.line_comment = emitter.line_comment[:0] + return true +} + +// Write a foot comment. +func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { + if len(emitter.foot_comment) == 0 { + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { + return false + } + emitter.foot_comment = emitter.foot_comment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return true +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + tab_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.head_comment) > 0 { + emitter.head_comment = event.head_comment + } + if len(event.line_comment) > 0 { + emitter.line_comment = event.line_comment + } + if len(event.foot_comment) > 0 { + emitter.foot_comment = event.foot_comment + } + if len(event.tail_comment) > 0 { + emitter.tail_comment = event.tail_comment + } + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + if emitter.foot_indent == indent { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + //emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if len(value) > 0 && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + //emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + //emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} + +func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { + breaks := false + pound := false + for i := 0; i < len(comment); { + if is_break(comment, i) { + if !write_break(emitter, comment, &i) { + return false + } + //emitter.indention = true + breaks = true + pound = false + } else { + if breaks && !yaml_emitter_write_indent(emitter) { + return false + } + if !pound { + if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { + return false + } + pound = true + } + if !write(emitter, comment, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + if !breaks && !put_break(emitter) { + return false + } + + emitter.whitespace = true + //emitter.indention = true + return true +} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go new file mode 100644 index 0000000..45e8d1e --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/encode.go @@ -0,0 +1,572 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.best_indent = e.indent + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshalled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = yaml_LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.event.head_comment = head + e.event.line_comment = line + e.event.foot_comment = foot + e.event.tail_comment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + var tag = node.Tag + var stag = shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + yaml_document_end_event_initialize(&e.event, true) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := yaml_BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := yaml_BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.tail_comment = []byte(tail) + e.event.head_comment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + yaml_mapping_end_event_initialize(&e.event) + e.event.tail_comment = []byte(tail) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case AliasNode: + yaml_alias_event_initialize(&e.event, []byte(node.Value)) + e.event.head_comment = []byte(node.HeadComment) + e.event.line_comment = []byte(node.LineComment) + e.event.foot_comment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := yaml_PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = yaml_LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = yaml_FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case forceQuoting: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod new file mode 100644 index 0000000..f407ea3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v3" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go new file mode 100644 index 0000000..ac66fcc --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/parserc.go @@ -0,0 +1,1249 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + token := &parser.tokens[parser.tokens_head] + yaml_parser_unfold_comments(parser, token) + return token + } + return nil +} + +// yaml_parser_unfold_comments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { + for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.typ == yaml_BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.head_comment) > 0 { + parser.head_comment = append(parser.head_comment, '\n') + } + parser.head_comment = append(parser.head_comment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.foot_comment) > 0 { + parser.foot_comment = append(parser.foot_comment, '\n') + } + parser.foot_comment = append(parser.foot_comment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.line_comment) > 0 { + parser.line_comment = append(parser.line_comment, '\n') + } + parser.line_comment = append(parser.line_comment, comment.line...) + } + *comment = yaml_comment_t{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.head_comment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.head_comment) - 1; i > 0; i-- { + if parser.head_comment[i] == '\n' { + if i == len(parser.head_comment)-1 { + head_comment = parser.head_comment[:i] + parser.head_comment = parser.head_comment[i+1:] + break + } else if parser.head_comment[i-1] == '\n' { + head_comment = parser.head_comment[:i-1] + parser.head_comment = parser.head_comment[i+1:] + break + } + } + } + } + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + + head_comment: head_comment, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + yaml_parser_set_event_comments(parser, event) + if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { + event.foot_comment = event.head_comment + event.head_comment = nil + } + return true +} + +func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { + event.head_comment = parser.head_comment + event.line_comment = parser.line_comment + event.foot_comment = parser.foot_comment + parser.head_comment = nil + parser.line_comment = nil + parser.foot_comment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + yaml_parser_set_event_comments(parser, event) + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.head_comment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + prior_head_len := len(parser.head_comment) + skip_token(parser) + yaml_parser_split_stem_comment(parser, prior_head_len) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { + if stem_len == 0 { + return + } + + token := peek_token(parser) + if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.head_comment[:stem_len] + if len(parser.head_comment) == stem_len { + parser.head_comment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = yaml_event_t{ + typ: yaml_TAIL_COMMENT_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + foot_comment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + yaml_parser_set_event_comments(parser, event) + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go new file mode 100644 index 0000000..b7de0a8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/readerc.go @@ -0,0 +1,434 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go new file mode 100644 index 0000000..64ae888 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/resolve.go @@ -0,0 +1,326 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +var longTags = make(map[string]string) +var shortTags = make(map[string]string) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + // Octals as introduced in version 1.2 of the spec. + // Octals from the 1.1 spec, spelled as 0777, are still + // decoded by default in v3 as well for compatibility. + // May be dropped in v4 depending on how usage evolves. + if strings.HasPrefix(plain, "0o") { + intv, err := strconv.ParseInt(plain[2:], 8, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 8, 64) + if err == nil { + return intTag, uintv + } + } else if strings.HasPrefix(plain, "-0o") { + intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go new file mode 100644 index 0000000..d9a539c --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/scannerc.go @@ -0,0 +1,3028 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + if !is_blank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.newlines++ + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].start_mark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !yaml_parser_scan_line_comment(parser, comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.index + } + + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: block_mark, + end_mark: block_mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1, parser.mark) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.line == parser.mark.line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !yaml_parser_scan_comments(parser, scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + // TODO Test this and then re-enable it. + //if !yaml_parser_scan_line_comment(parser, start_mark) { + // return false + //} + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { + if parser.newlines > 0 { + return true + } + + var start_mark yaml_mark_t + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = read(parser, text) + } else { + skip(parser) + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + var token_mark = token.start_mark + var start_mark yaml_mark_t + + var recent_empty = false + var first_empty = parser.newlines <= 1 + + var line = parser.mark.line + var column = parser.mark.column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + var foot_line = -1 + if scan_mark.line > 0 { + foot_line = parser.mark.line-parser.newlines+1 + if parser.newlines == 0 && parser.mark.column > 1 { + foot_line++ + } + } + + var peek = 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { + break + } + column++ + if is_blank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { + // Got line break or terminator. + if !recent_empty { + if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + if len(text) > 0 { + if start_mark.column-1 < parser.indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !is_break(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, + foot: text, + }) + scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = yaml_mark_t{parser.mark.index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.index+peek + for { + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_breakz(parser.buffer, parser.buffer_pos) { + if parser.mark.index >= seen { + break + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } else if parser.mark.index >= seen { + text = read(parser, text) + } else { + skip(parser) + } + } + + peek = 0 + column = 0 + line = parser.mark.line + } + + if len(text) > 0 { + parser.comments = append(parser.comments, yaml_comment_t{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go new file mode 100644 index 0000000..9210ece --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go new file mode 100644 index 0000000..b8a116b --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/writerc.go @@ -0,0 +1,48 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go new file mode 100644 index 0000000..56e8a84 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yaml.go @@ -0,0 +1,693 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" + "unicode/utf8" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v interface{}) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v interface{}) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, colums, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data plesantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +// +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the apperance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted represenation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if strings.Contains(n.Value, "\n") { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex +var unmarshalerType reflect.Type + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Ptr: + ftype := field.Type + for ftype.Kind() == reflect.Ptr { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PtrTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go new file mode 100644 index 0000000..7c6d007 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlh.go @@ -0,0 +1,807 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 + + yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. + yaml_TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", + yaml_TAIL_COMMENT_EVENT: "tail comment", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Comments + + head_comment []byte // The current head comments + line_comment []byte // The current line comments + foot_comment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []yaml_comment_t // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +type yaml_comment_t struct { + + scan_mark yaml_mark_t // Position where scanning for comments started + token_mark yaml_mark_t // Position after which tokens will be associated with this comment + start_mark yaml_mark_t // Position of '#' comment mark + end_mark yaml_mark_t // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Comments + head_comment []byte + line_comment []byte + foot_comment []byte + tail_comment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go new file mode 100644 index 0000000..e88f9c5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v3/yamlprivateh.go @@ -0,0 +1,198 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( + // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( + // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( + // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/k8s.io/api/LICENSE b/vendor/k8s.io/api/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/k8s.io/api/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go new file mode 100644 index 0000000..0a40726 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -0,0 +1,26 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=admissionregistration.k8s.io + +// Package v1beta1 is the v1beta1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the +// new dynamic admission controller configuration. +package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go new file mode 100644 index 0000000..c98aa74 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -0,0 +1,3444 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } +func (*MutatingWebhook) ProtoMessage() {} +func (*MutatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{0} +} +func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhook.Merge(m, src) +} +func (m *MutatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo + +func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } +func (*MutatingWebhookConfiguration) ProtoMessage() {} +func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{1} +} +func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) +} +func (m *MutatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo + +func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } +func (*MutatingWebhookConfigurationList) ProtoMessage() {} +func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{2} +} +func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) +} +func (m *MutatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{3} +} +func (m *Rule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Rule) XXX_Merge(src proto.Message) { + xxx_messageInfo_Rule.Merge(m, src) +} +func (m *Rule) XXX_Size() int { + return m.Size() +} +func (m *Rule) XXX_DiscardUnknown() { + xxx_messageInfo_Rule.DiscardUnknown(m) +} + +var xxx_messageInfo_Rule proto.InternalMessageInfo + +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{4} +} +func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuleWithOperations) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuleWithOperations.Merge(m, src) +} +func (m *RuleWithOperations) XXX_Size() int { + return m.Size() +} +func (m *RuleWithOperations) XXX_DiscardUnknown() { + xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) +} + +var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{5} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } +func (*ValidatingWebhook) ProtoMessage() {} +func (*ValidatingWebhook) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{6} +} +func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhook.Merge(m, src) +} +func (m *ValidatingWebhook) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhook) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo + +func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } +func (*ValidatingWebhookConfiguration) ProtoMessage() {} +func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{7} +} +func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) +} +func (m *ValidatingWebhookConfiguration) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo + +func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } +func (*ValidatingWebhookConfigurationList) ProtoMessage() {} +func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{8} +} +func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) +} +func (m *ValidatingWebhookConfigurationList) XXX_Size() int { + return m.Size() +} +func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { + xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) +} + +var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_abeea74cbc46f55a, []int{9} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") + proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") + proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") + proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") + proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") + proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") + proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) +} + +var fileDescriptor_abeea74cbc46f55a = []byte{ + // 1114 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, + 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0xbb, 0x9b, 0x7e, 0x5f, 0x76, 0x4d, 0x58, 0x79, 0x2c, + 0x1f, 0x90, 0x25, 0xd8, 0x99, 0x4d, 0x40, 0x08, 0x16, 0x10, 0x8a, 0x03, 0x0b, 0x91, 0x92, 0xdd, + 0xd0, 0xd9, 0x0f, 0x89, 0x0f, 0x69, 0xdb, 0xe3, 0xb2, 0xdd, 0xd8, 0x9e, 0x1e, 0x4d, 0xf7, 0x38, + 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, + 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, + 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, + 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, + 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, + 0xbb, 0xf6, 0x70, 0xa3, 0x09, 0x92, 0x6e, 0xd8, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, + 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x7e, 0xbb, + 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, + 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0xaf, 0xbf, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, + 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0xac, 0xdb, + 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, + 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xb5, 0xbd, 0x40, 0x52, 0xc9, 0xdc, 0xce, 0x13, 0x68, + 0x76, 0x39, 0xef, 0xe1, 0x2a, 0xca, 0xba, 0x74, 0x00, 0x65, 0xa3, 0x6a, 0xd4, 0x8b, 0x8d, 0x95, + 0xe3, 0xd0, 0x5c, 0x8a, 0x42, 0x33, 0x7b, 0x9f, 0x0e, 0x80, 0x28, 0x0b, 0x3e, 0x44, 0x2b, 0x4e, + 0x9f, 0x81, 0x2b, 0xb7, 0xb9, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0x36, 0x3f, 0xb4, + 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0x6d, 0x4f, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, + 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7e, 0xb0, + 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x09, 0x93, 0xdd, 0x07, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, + 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa2, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x7d, 0xde, + 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0xf7, 0x26, 0x0d, 0xa7, 0xa1, + 0xb9, 0x36, 0x05, 0x3c, 0x3c, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, + 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x37, 0x86, 0x4f, 0x43, 0xf3, 0xda, 0xc4, + 0x51, 0xf1, 0x4c, 0xba, 0xe1, 0x6f, 0xd1, 0x5a, 0x5c, 0x77, 0xe1, 0x51, 0x07, 0x0e, 0xa0, 0x0f, + 0x8e, 0xe4, 0x7e, 0x39, 0xa7, 0x8a, 0xfe, 0xd6, 0x44, 0x09, 0xd2, 0x3f, 0x6f, 0x79, 0xbd, 0x4e, + 0x0c, 0x08, 0x2b, 0x6e, 0x30, 0x6b, 0xb8, 0x61, 0xed, 0xd2, 0x26, 0xf4, 0x47, 0xae, 0x8d, 0x57, + 0xa2, 0xd0, 0x5c, 0xbb, 0x3f, 0xcb, 0x48, 0xe6, 0x83, 0x60, 0x8e, 0xae, 0xf2, 0xe6, 0x37, 0xe0, + 0xc8, 0x34, 0x6c, 0xe9, 0xe2, 0x61, 0x71, 0x14, 0x9a, 0x57, 0x1f, 0x4c, 0xd1, 0x91, 0x19, 0xfa, + 0xb8, 0x60, 0x82, 0xb5, 0xe0, 0x93, 0x76, 0x1b, 0x1c, 0x29, 0xca, 0x57, 0xc6, 0x05, 0x3b, 0x18, + 0xc3, 0x71, 0xc1, 0xc6, 0xc7, 0xed, 0x3e, 0x15, 0x82, 0x4c, 0xba, 0xe1, 0xbb, 0xe8, 0x6a, 0xdc, + 0xf5, 0x3c, 0x90, 0x07, 0xe0, 0x70, 0xb7, 0x25, 0xca, 0xf9, 0xaa, 0x51, 0xcf, 0x25, 0x2f, 0x78, + 0x38, 0x65, 0x21, 0x33, 0x37, 0xf1, 0x23, 0x74, 0x33, 0x6d, 0x25, 0x02, 0x43, 0x06, 0x87, 0x8f, + 0xc1, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xdc, 0x3a, + 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x45, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, + 0x04, 0x52, 0xf9, 0xdd, 0x89, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0xde, 0x98, 0x47, 0x55, + 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0xb7, 0x66, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, + 0xfc, 0x14, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xde, 0x59, 0xec, 0x37, + 0x26, 0xff, 0x6c, 0x0f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, + 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, + 0x87, 0x22, 0x85, 0x43, 0x4d, 0x58, 0xfb, 0xd3, 0x40, 0xd5, 0x17, 0xe5, 0xb7, 0xcb, 0x84, 0xc4, + 0x5f, 0xcd, 0xe5, 0x68, 0x2d, 0xd8, 0xaa, 0x4c, 0x24, 0x19, 0x5e, 0xd7, 0x19, 0x16, 0x46, 0xc8, + 0x44, 0x7e, 0x3d, 0x94, 0x63, 0x12, 0x06, 0xa3, 0xe4, 0xee, 0x5d, 0x38, 0xb9, 0xa9, 0x87, 0x8f, + 0x37, 0xd1, 0x4e, 0x4c, 0x4e, 0x92, 0x18, 0xb5, 0x9f, 0x0d, 0x94, 0x8d, 0x57, 0x13, 0x7e, 0x03, + 0x15, 0xa9, 0xc7, 0x3e, 0xf5, 0x79, 0xe0, 0x89, 0xb2, 0xa1, 0x7a, 0x70, 0x35, 0x0a, 0xcd, 0xe2, + 0xd6, 0xfe, 0x4e, 0x02, 0x92, 0xb1, 0x1d, 0x6f, 0xa0, 0x12, 0xf5, 0x58, 0xda, 0xb2, 0xcb, 0xea, + 0xfa, 0xb5, 0x78, 0x80, 0xb6, 0xf6, 0x77, 0xd2, 0x36, 0x9d, 0xbc, 0x13, 0xf3, 0xfb, 0x20, 0x78, + 0xe0, 0x3b, 0x7a, 0xb3, 0x6a, 0x7e, 0x32, 0x02, 0xc9, 0xd8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, + 0x0f, 0xf4, 0x5e, 0xbc, 0x11, 0x3f, 0xfb, 0x20, 0x06, 0x4e, 0x43, 0xb3, 0xa8, 0x3e, 0x54, 0x83, + 0x26, 0x97, 0x6a, 0x3f, 0x1a, 0x08, 0xcf, 0xaf, 0x5e, 0xfc, 0x11, 0x42, 0x3c, 0x3d, 0xe9, 0x94, + 0x4c, 0xd5, 0x55, 0x29, 0x7a, 0x1a, 0x9a, 0xab, 0xe9, 0x49, 0x51, 0x4e, 0xb8, 0xe0, 0x7d, 0x94, + 0x8d, 0xd7, 0xb5, 0x56, 0x1e, 0xeb, 0xdf, 0xe9, 0xc0, 0x58, 0xd3, 0xe2, 0x13, 0x51, 0x4c, 0xb5, + 0x1f, 0x0c, 0x74, 0xfd, 0x00, 0xfc, 0x21, 0x73, 0x80, 0x40, 0x1b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, + 0x2a, 0xa6, 0x3b, 0x51, 0xeb, 0xe1, 0x9a, 0xf6, 0x2d, 0xa6, 0xfb, 0x93, 0x8c, 0xef, 0xa4, 0xda, + 0xb9, 0x7c, 0xae, 0x76, 0xde, 0x42, 0x59, 0x8f, 0xca, 0x6e, 0x39, 0xa3, 0x6e, 0x14, 0x62, 0xeb, + 0x3e, 0x95, 0x5d, 0xa2, 0x50, 0x65, 0xe5, 0xbe, 0x54, 0xc5, 0xcd, 0x69, 0x2b, 0xf7, 0x25, 0x51, + 0x68, 0xed, 0x8f, 0x2b, 0x68, 0xed, 0x31, 0xed, 0xb3, 0xd6, 0xa5, 0x5e, 0x5f, 0xea, 0xf5, 0x82, + 0x7a, 0x8d, 0x2e, 0xf5, 0xfa, 0x22, 0x7a, 0x5d, 0x3b, 0x31, 0x50, 0x65, 0x6e, 0xd6, 0x5e, 0xb6, + 0x9e, 0x7e, 0x3d, 0xa7, 0xa7, 0xef, 0x2f, 0x3e, 0x42, 0x73, 0xaf, 0x9f, 0x53, 0xd4, 0xbf, 0x0c, + 0x54, 0x7b, 0x71, 0x8e, 0x2f, 0x41, 0x53, 0x07, 0xd3, 0x9a, 0xfa, 0xd9, 0x7f, 0x48, 0x70, 0x11, + 0x55, 0xfd, 0xc9, 0x40, 0xff, 0x3b, 0x63, 0x9d, 0xe1, 0x57, 0x51, 0x26, 0xf0, 0xfb, 0x7a, 0x2d, + 0xe7, 0xa3, 0xd0, 0xcc, 0x3c, 0x22, 0xbb, 0x24, 0xc6, 0x30, 0x45, 0x79, 0x91, 0x28, 0x83, 0x4e, + 0xff, 0xee, 0xe2, 0x6f, 0x9c, 0x95, 0x94, 0x46, 0x29, 0x0a, 0xcd, 0xfc, 0x08, 0x1d, 0xf1, 0xe2, + 0x3a, 0x2a, 0x38, 0xb4, 0x11, 0xb8, 0x2d, 0xad, 0x69, 0x2b, 0x8d, 0x95, 0xb8, 0x5c, 0xdb, 0x5b, + 0x09, 0x46, 0x52, 0x6b, 0xe3, 0xf6, 0xf1, 0x49, 0x65, 0xe9, 0xd9, 0x49, 0x65, 0xe9, 0xf9, 0x49, + 0x65, 0xe9, 0xbb, 0xa8, 0x62, 0x1c, 0x47, 0x15, 0xe3, 0x59, 0x54, 0x31, 0x9e, 0x47, 0x15, 0xe3, + 0xb7, 0xa8, 0x62, 0x7c, 0xff, 0x7b, 0x65, 0xe9, 0x8b, 0xbc, 0x8e, 0xff, 0x77, 0x00, 0x00, 0x00, + 0xff, 0xff, 0xae, 0x27, 0x2b, 0xcb, 0x2c, 0x0f, 0x00, 0x00, +} + +func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.ReinvocationPolicy != nil { + i -= len(*m.ReinvocationPolicy) + copy(dAtA[i:], *m.ReinvocationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scope != nil { + i -= len(*m.Scope) + copy(dAtA[i:], *m.Scope) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) + i-- + dAtA[i] = 0x22 + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIVersions) > 0 { + for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIVersions[iNdEx]) + copy(dAtA[i:], m.APIVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Operations) > 0 { + for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Operations[iNdEx]) + copy(dAtA[i:], m.Operations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ObjectSelector != nil { + { + size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.MatchPolicy != nil { + i -= len(*m.MatchPolicy) + copy(dAtA[i:], *m.MatchPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) + i-- + dAtA[i] = 0x4a + } + if len(m.AdmissionReviewVersions) > 0 { + for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AdmissionReviewVersions[iNdEx]) + copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) + i-- + dAtA[i] = 0x42 + } + } + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x38 + } + if m.SideEffects != nil { + i -= len(*m.SideEffects) + copy(dAtA[i:], *m.SideEffects) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i-- + dAtA[i] = 0x32 + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.FailurePolicy != nil { + i -= len(*m.FailurePolicy) + copy(dAtA[i:], *m.FailurePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Webhooks) > 0 { + for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0x1a + } + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x12 + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MutatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReinvocationPolicy != nil { + l = len(*m.ReinvocationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MutatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MutatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Scope != nil { + l = len(*m.Scope) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuleWithOperations) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Operations) > 0 { + for _, s := range m.Operations { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Rule.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *ValidatingWebhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + if len(m.AdmissionReviewVersions) > 0 { + for _, s := range m.AdmissionReviewVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.MatchPolicy != nil { + l = len(*m.MatchPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ObjectSelector != nil { + l = m.ObjectSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ValidatingWebhookConfiguration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ValidatingWebhookConfigurationList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *MutatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&MutatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]MutatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&MutatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *MutatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]MutatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `Scope:` + valueToStringGenerated(this.Scope) + `,`, + `}`, + }, "") + return s +} +func (this *RuleWithOperations) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuleWithOperations{`, + `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, + `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhook) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]RuleWithOperations{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ValidatingWebhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, + `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, + `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfiguration) String() string { + if this == nil { + return "nil" + } + repeatedStringForWebhooks := "[]ValidatingWebhook{" + for _, f := range this.Webhooks { + repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," + } + repeatedStringForWebhooks += "}" + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + repeatedStringForWebhooks + `,`, + `}`, + }, "") + return s +} +func (this *ValidatingWebhookConfigurationList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ValidatingWebhookConfiguration{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) + m.ReinvocationPolicy = &s + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, MutatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScopeType(dAtA[iNdEx:postIndex]) + m.Scope = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MatchPolicyType(dAtA[iNdEx:postIndex]) + m.MatchPolicy = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ObjectSelector == nil { + m.ObjectSelector = &v1.LabelSelector{} + } + if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto new file mode 100644 index 0000000..086cbcc --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -0,0 +1,487 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +message MutatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + optional string reinvocationPolicy = 10; +} + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. +message MutatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated MutatingWebhook Webhooks = 2; +} + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +message MutatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of MutatingWebhookConfiguration. + repeated MutatingWebhookConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + optional string scope = 4; +} + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +message RuleWithOperations { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string operations = 1; + + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + optional Rule rule = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +message ValidatingWebhook { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + optional string name = 1; + + // ClientConfig defines how to communicate with the hook. + // Required + optional WebhookClientConfig clientConfig = 2; + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + repeated RuleWithOperations rules = 3; + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + optional string failurePolicy = 4; + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + optional string matchPolicy = 9; + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + optional string sideEffects = 6; + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + optional int32 timeoutSeconds = 7; + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + repeated string admissionReviewVersions = 8; +} + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ValidatingWebhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; +} + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 3; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 1; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 2; +} + diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go new file mode 100644 index 0000000..d126da9 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "admissionregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ValidatingWebhookConfiguration{}, + &ValidatingWebhookConfigurationList{}, + &MutatingWebhookConfiguration{}, + &MutatingWebhookConfigurationList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go new file mode 100644 index 0000000..37a993e --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -0,0 +1,559 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + + // scope specifies the scope of this rule. + // Valid values are "Cluster", "Namespaced", and "*" + // "Cluster" means that only cluster-scoped resources will match this rule. + // Namespace API objects are cluster-scoped. + // "Namespaced" means that only namespaced resources will match this rule. + // "*" means that there are no scope restrictions. + // Subresources match the scope of their parent resource. + // Default is "*". + // + // +optional + Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` +} + +type ScopeType string + +const ( + // ClusterScope means that scope is limited to cluster-scoped objects. + // Namespace objects are cluster-scoped. + ClusterScope ScopeType = "Cluster" + // NamespacedScope means that scope is limited to namespaced objects. + NamespacedScope ScopeType = "Namespaced" + // AllScopes means that all scopes are included. + AllScopes ScopeType = "*" +) + +type FailurePolicyType string + +const ( + // Ignore means that an error calling the webhook is ignored. + Ignore FailurePolicyType = "Ignore" + // Fail means that an error calling the webhook causes the admission to fail. + Fail FailurePolicyType = "Fail" +) + +// MatchPolicyType specifies the type of match policy +type MatchPolicyType string + +const ( + // Exact means requests should only be sent to the webhook if they exactly match a given rule + Exact MatchPolicyType = "Exact" + // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. + Equivalent MatchPolicyType = "Equivalent" +) + +type SideEffectClass string + +const ( + // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassUnknown SideEffectClass = "Unknown" + // SideEffectClassNone means that calling the webhook will have no side effects. + SideEffectClassNone SideEffectClass = "None" + // SideEffectClassSome means that calling the webhook will possibly have side effects. + // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. + SideEffectClassSome SideEffectClass = "Some" + // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the + // request being reviewed has the dry-run attribute, the side effects will be suppressed. + SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. +type ValidatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +type ValidatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of ValidatingWebhookConfiguration. + Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. +// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. +type MutatingWebhookConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. +type MutatingWebhookConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of MutatingWebhookConfiguration. + Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. +type ValidatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` +} + +// MutatingWebhook describes an admission webhook and the resources and operations it applies to. +type MutatingWebhook struct { + // The name of the admission webhook. + // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where + // "imagepolicy" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // ClientConfig defines how to communicate with the hook. + // Required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` + + // Rules describes what operations on what resources/subresources the webhook cares about. + // The webhook cares about an operation if it matches _any_ Rule. + // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks + // from putting the cluster in a state which cannot be recovered from without completely + // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called + // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. + Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + + // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - + // allowed values are Ignore or Fail. Defaults to Ignore. + // +optional + FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` + + // matchPolicy defines how the "rules" list is used to match incoming requests. + // Allowed values are "Exact" or "Equivalent". + // + // - Exact: match a request only if it exactly matches a specified rule. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. + // + // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. + // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, + // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, + // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. + // + // Defaults to "Exact" + // +optional + MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` + + // NamespaceSelector decides whether to run the webhook on an object based + // on whether the namespace for that object matches the selector. If the + // object itself is a namespace, the matching is performed on + // object.metadata.labels. If the object is another cluster scoped resource, + // it never skips the webhook. + // + // For example, to run the webhook on any objects whose namespace is not + // associated with "runlevel" of "0" or "1"; you will set the selector as + // follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "runlevel", + // "operator": "NotIn", + // "values": [ + // "0", + // "1" + // ] + // } + // ] + // } + // + // If instead you want to only run the webhook on any objects whose + // namespace is associated with the "environment" of "prod" or "staging"; + // you will set the selector as follows: + // "namespaceSelector": { + // "matchExpressions": [ + // { + // "key": "environment", + // "operator": "In", + // "values": [ + // "prod", + // "staging" + // ] + // } + // ] + // } + // + // See + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + // for more examples of label selectors. + // + // Default to the empty LabelSelector, which matches everything. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` + + // ObjectSelector decides whether to run the webhook based on if the + // object has matching labels. objectSelector is evaluated against both + // the oldObject and newObject that would be sent to the webhook, and + // is considered to match if either object matches the selector. A null + // object (oldObject in the case of create, or newObject in the case of + // delete) or an object that cannot have labels (like a + // DeploymentRollback or a PodProxyOptions object) is not considered to + // match. + // Use the object selector only if the webhook is opt-in, because end + // users may skip the admission webhook by setting the labels. + // Default to the empty LabelSelector, which matches everything. + // +optional + ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` + + // SideEffects states whether this webhook has side effects. + // Acceptable values are: Unknown, None, Some, NoneOnDryRun + // Webhooks with side effects MUST implement a reconciliation system, since a request may be + // rejected by a future step in the admission change and the side effects therefore need to be undone. + // Requests with the dryRun attribute will be auto-rejected if they match a webhook with + // sideEffects == Unknown or Some. Defaults to Unknown. + // +optional + SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` + + // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, + // the webhook call will be ignored or the API call will fail based on the + // failure policy. + // The timeout value must be between 1 and 30 seconds. + // Default to 30 seconds. + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` + + // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` + // versions the Webhook expects. API server will try to use first version in + // the list which it supports. If none of the versions specified in this list + // supported by API server, validation will fail for this object. + // If a persisted webhook configuration specifies allowed versions and does not + // include any versions known to the API Server, calls to the webhook will fail + // and be subject to the failure policy. + // Default to `['v1beta1']`. + // +optional + AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` + + // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. + // Allowed values are "Never" and "IfNeeded". + // + // Never: the webhook will not be called more than once in a single admission evaluation. + // + // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation + // if the object being admitted is modified by other admission plugins after the initial webhook call. + // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. + // Note: + // * the number of additional invocations is not guaranteed to be exactly one. + // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. + // * webhooks that use this option may be reordered to minimize the number of additional invocations. + // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. + // + // Defaults to "Never". + // +optional + ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` +} + +// ReinvocationPolicyType specifies what type of policy the admission hook uses. +type ReinvocationPolicyType string + +const ( + // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a + // single admission evaluation. + NeverReinvocationPolicy ReinvocationPolicyType = "Never" + // IfNeededReinvocationPolicy indicates that the webhook may be called at least one + // additional time as part of the admission evaluation if the object being admitted is + // modified by other admission plugins after the initial webhook call. + IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" +) + +// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make +// sure that all the tuple expansions are valid. +type RuleWithOperations struct { + // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * + // for all operations. + // If '*' is present, the length of the slice must be one. + // Required. + Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` + // Rule is embedded, it describes other criteria of the rule, like + // APIGroups, APIVersions, Resources, etc. + Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` +} + +type OperationType string + +// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. +const ( + OperationAll OperationType = "*" + Create OperationType = "CREATE" + Update OperationType = "UPDATE" + Delete OperationType = "DELETE" + Connect OperationType = "CONNECT" +) + +// WebhookClientConfig contains the information to make a TLS +// connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..d9fb5af --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,151 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_MutatingWebhook = map[string]string{ + "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", + "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", +} + +func (MutatingWebhook) SwaggerDoc() map[string]string { + return map_MutatingWebhook +} + +var map_MutatingWebhookConfiguration = map[string]string{ + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfiguration +} + +var map_MutatingWebhookConfigurationList = map[string]string{ + "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of MutatingWebhookConfiguration.", +} + +func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_MutatingWebhookConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", + "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +var map_RuleWithOperations = map[string]string{ + "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", + "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", +} + +func (RuleWithOperations) SwaggerDoc() map[string]string { + return map_RuleWithOperations +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_ValidatingWebhook = map[string]string{ + "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", + "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", + "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", +} + +func (ValidatingWebhook) SwaggerDoc() map[string]string { + return map_ValidatingWebhook +} + +var map_ValidatingWebhookConfiguration = map[string]string{ + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", +} + +func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfiguration +} + +var map_ValidatingWebhookConfigurationList = map[string]string{ + "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ValidatingWebhookConfiguration.", +} + +func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { + return map_ValidatingWebhookConfigurationList +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..c4570d0 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,396 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ReinvocationPolicy != nil { + in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy + *out = new(ReinvocationPolicyType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. +func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { + if in == nil { + return nil + } + out := new(MutatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]MutatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. +func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(MutatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MutatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. +func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(MutatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Rule) DeepCopyInto(out *Rule) { + *out = *in + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Scope != nil { + in, out := &in.Scope, &out.Scope + *out = new(ScopeType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { + if in == nil { + return nil + } + out := new(Rule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { + *out = *in + if in.Operations != nil { + in, out := &in.Operations, &out.Operations + *out = make([]OperationType, len(*in)) + copy(*out, *in) + } + in.Rule.DeepCopyInto(&out.Rule) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. +func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { + if in == nil { + return nil + } + out := new(RuleWithOperations) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.MatchPolicy != nil { + in, out := &in.MatchPolicy, &out.MatchPolicy + *out = new(MatchPolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ObjectSelector != nil { + in, out := &in.ObjectSelector, &out.ObjectSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + if in.AdmissionReviewVersions != nil { + in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. +func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { + if in == nil { + return nil + } + out := new(ValidatingWebhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Webhooks != nil { + in, out := &in.Webhooks, &out.Webhooks + *out = make([]ValidatingWebhook, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. +func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfiguration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ValidatingWebhookConfiguration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. +func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { + if in == nil { + return nil + } + out := new(ValidatingWebhookConfigurationList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go new file mode 100644 index 0000000..61dc97b --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go new file mode 100644 index 0000000..6ef25f5 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -0,0 +1,8238 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{22} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{23} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{24} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{25} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{26} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_e1014cab6f31e43b, []int{27} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) +} + +var fileDescriptor_e1014cab6f31e43b = []byte{ + // 2031 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, + 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, + 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, + 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55, + 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f, + 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d, + 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36, + 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a, + 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88, + 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7, + 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35, + 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02, + 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37, + 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d, + 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c, + 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7, + 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11, + 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e, + 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e, + 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19, + 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16, + 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8, + 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa, + 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd, + 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc, + 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1, + 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1, + 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3, + 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6, + 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d, + 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7, + 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1, + 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92, + 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39, + 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2, + 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57, + 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34, + 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b, + 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9, + 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a, + 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95, + 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68, + 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f, + 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13, + 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda, + 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f, + 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6, + 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60, + 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf, + 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe, + 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39, + 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e, + 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22, + 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b, + 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5, + 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5, + 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9, + 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, + 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, + 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, + 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0, + 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa, + 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6, + 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93, + 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99, + 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96, + 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7, + 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef, + 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed, + 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2, + 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e, + 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97, + 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93, + 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f, + 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d, + 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80, + 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40, + 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e, + 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25, + 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7, + 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9, + 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92, + 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b, + 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f, + 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f, + 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73, + 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2, + 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31, + 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89, + 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae, + 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26, + 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e, + 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab, + 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2, + 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5, + 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb, + 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a, + 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49, + 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20, + 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54, + 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd, + 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39, + 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60, + 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5, + 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91, + 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39, + 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8, + 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24, + 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a, + 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6, + 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b, + 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb, + 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed, + 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf, + 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74, + 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11, + 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c, + 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36, + 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce, + 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e, + 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36, + 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d, + 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0, + 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00, +} + +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Partition != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a + if len(m.VolumeClaimTemplates) > 0 { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto new file mode 100644 index 0000000..6c55279 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -0,0 +1,701 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.apps.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go new file mode 100644 index 0000000..0271010 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/register.go @@ -0,0 +1,60 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &StatefulSet{}, + &StatefulSetList{}, + &DaemonSet{}, + &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go new file mode 100644 index 0000000..e003a0c --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -0,0 +1,826 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" + DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" +) + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement PodManagementPolicyType = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..3f0299d --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -0,0 +1,365 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ControllerRevision = map[string]string{ + "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + +var map_DaemonSet = map[string]string{ + "": "DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_StatefulSet = map[string]string{ + "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..7b7ff38 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -0,0 +1,772 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]corev1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go new file mode 100644 index 0000000..9072bab --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go new file mode 100644 index 0000000..f81b559 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -0,0 +1,6246 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{2} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{3} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{4} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{5} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{6} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{7} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{8} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{9} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{10} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{11} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{12} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{13} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{14} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{15} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{16} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{17} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{18} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{19} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_2a07313e8f66e805, []int{20} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.apps.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) +} + +var fileDescriptor_2a07313e8f66e805 = []byte{ + // 1855 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, + 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, + 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, + 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, + 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, + 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, + 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, + 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, + 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, + 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, + 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, + 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, + 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, + 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, + 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, + 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, + 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, + 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, + 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, + 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, + 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, + 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, + 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, + 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, + 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, + 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, + 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, + 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, + 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, + 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, + 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, + 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, + 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, + 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, + 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, + 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, + 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, + 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, + 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, + 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, + 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, + 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, + 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, + 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, + 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, + 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, + 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, + 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, + 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, + 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, + 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, + 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, + 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, + 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, + 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, + 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, + 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, + 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, + 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, + 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, + 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, + 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, + 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, + 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, + 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, + 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, + 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, + 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, + 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, + 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, + 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, + 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, + 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, + 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, + 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, + 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, + 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, + 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, + 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, + 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, + 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, + 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, + 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, + 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, + 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, + 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, + 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, + 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, + 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, + 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, + 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, + 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, + 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, + 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, + 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, + 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, + 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, + 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, + 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, + 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, + 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, + 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, + 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, + 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, + 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, + 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, + 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, + 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, + 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, + 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, + 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, + 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, + 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, + 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, + 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, +} + +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Partition != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a + if len(m.VolumeClaimTemplates) > 0 { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto new file mode 100644 index 0000000..694f657 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -0,0 +1,484 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.apps.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + // +optional + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// DEPRECATED. +message RollbackConfig { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + optional int32 partition = 1; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional string targetSelector = 3; +} + +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for +// more information. +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go new file mode 100644 index 0000000..5b16819 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go new file mode 100644 index 0000000..b77fcf7 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -0,0 +1,567 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" +) + +// ScaleSpec describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for +// more information. +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement PodManagementPolicyType = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // If empty, defaulted to labels on the pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 2. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + // +optional + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +// DEPRECATED. +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..504b858 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,273 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ControllerRevision = map[string]string{ + "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + +var map_Deployment = map[string]string{ + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", + "paused": "Indicates that the deployment is paused.", + "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_RollbackConfig = map[string]string{ + "": "DEPRECATED.", + "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_StatefulSet = map[string]string{ + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..fb27612 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,594 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]corev1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go new file mode 100644 index 0000000..9f49986 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go new file mode 100644 index 0000000..8a9f200 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -0,0 +1,9014 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto + +package v1beta2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{0} +} +func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevision) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevision.Merge(m, src) +} +func (m *ControllerRevision) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevision) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevision.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo + +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{1} +} +func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ControllerRevisionList.Merge(m, src) +} +func (m *ControllerRevisionList) XXX_Size() int { + return m.Size() +} +func (m *ControllerRevisionList) XXX_DiscardUnknown() { + xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) +} + +var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{2} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{3} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{4} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{5} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{6} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{7} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{8} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{9} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{10} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{11} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{12} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{13} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{14} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{15} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{16} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{17} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{18} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{19} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{20} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + +func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } +func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} +func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{21} +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) +} +func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{22} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{23} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{24} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{25} +} +func (m *StatefulSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSet.Merge(m, src) +} +func (m *StatefulSet) XXX_Size() int { + return m.Size() +} +func (m *StatefulSet) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSet.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSet proto.InternalMessageInfo + +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{26} +} +func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetCondition.Merge(m, src) +} +func (m *StatefulSetCondition) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo + +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{27} +} +func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetList.Merge(m, src) +} +func (m *StatefulSetList) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetList) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo + +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{28} +} +func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetSpec.Merge(m, src) +} +func (m *StatefulSetSpec) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo + +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{29} +} +func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetStatus.Merge(m, src) +} +func (m *StatefulSetStatus) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_42fe616264472f7e, []int{30} +} +func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) +} +func (m *StatefulSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") + proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta2.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta2.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta2.DeploymentList") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta2.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta2.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta2.DeploymentStrategy") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1beta2.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetStatus") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDeployment") + proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateStatefulSetStrategy") + proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry") + proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") + proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") + proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") + proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") + proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") + proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) +} + +var fileDescriptor_42fe616264472f7e = []byte{ + // 2171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, + 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, + 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, + 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, + 0xf5, 0xa2, 0x97, 0x9e, 0x0a, 0x14, 0x28, 0xd0, 0xf6, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6f, + 0x45, 0x50, 0xf8, 0x52, 0x20, 0xe8, 0x25, 0x39, 0x09, 0xf5, 0xe6, 0x54, 0x14, 0xbd, 0x14, 0xe8, + 0x25, 0x40, 0x81, 0x82, 0x1c, 0xce, 0x07, 0xe7, 0xc3, 0x3b, 0x52, 0x1c, 0xa5, 0x29, 0x72, 0xd3, + 0x92, 0xcf, 0xfb, 0xf0, 0x7d, 0xc9, 0x97, 0x7c, 0x1f, 0x72, 0x04, 0xbe, 0x73, 0xfc, 0x96, 0xab, + 0x6a, 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, + 0x25, 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x43, 0x42, 0xf1, 0x5a, 0xab, + 0x47, 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, + 0xa6, 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, + 0x3d, 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc4, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x9f, 0x67, + 0xa9, 0x19, 0x1b, 0xb0, 0x63, 0x39, 0xa4, 0xd5, 0xbf, 0x99, 0x1c, 0x6b, 0xe9, 0xf5, 0x08, 0x63, + 0xe0, 0xce, 0x91, 0x66, 0x12, 0x67, 0xd0, 0xb2, 0x8f, 0x7b, 0xac, 0xc1, 0x6d, 0x19, 0x84, 0xe2, + 0x2c, 0xab, 0x56, 0x9e, 0x95, 0xe3, 0x99, 0x54, 0x33, 0x48, 0xca, 0xe0, 0xcd, 0x51, 0x06, 0x6e, + 0xe7, 0x88, 0x18, 0x38, 0x65, 0xf7, 0x5a, 0x9e, 0x9d, 0x47, 0x35, 0xbd, 0xa5, 0x99, 0xd4, 0xa5, + 0x4e, 0xd2, 0xa8, 0xf9, 0x2f, 0x05, 0xc0, 0x0d, 0xcb, 0xa4, 0x8e, 0xa5, 0xeb, 0xc4, 0x41, 0xa4, + 0xaf, 0xb9, 0x9a, 0x65, 0xc2, 0x87, 0xa0, 0xc6, 0xe2, 0xe9, 0x62, 0x8a, 0xeb, 0xca, 0x15, 0x65, + 0x65, 0x6a, 0xed, 0x86, 0x1a, 0xcd, 0x74, 0x48, 0xaf, 0xda, 0xc7, 0x3d, 0xd6, 0xe0, 0xaa, 0x0c, + 0xad, 0xf6, 0x6f, 0xaa, 0xbb, 0x87, 0x1f, 0x90, 0x0e, 0xdd, 0x26, 0x14, 0xb7, 0xe1, 0x93, 0x93, + 0xe5, 0xb1, 0xe1, 0xc9, 0x32, 0x88, 0xda, 0x50, 0xc8, 0x0a, 0x77, 0x41, 0x85, 0xb3, 0x97, 0x38, + 0xfb, 0x6a, 0x2e, 0xbb, 0x08, 0x5a, 0x45, 0xf8, 0x47, 0xef, 0x3c, 0xa6, 0xc4, 0x64, 0xee, 0xb5, + 0x2f, 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x1d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, + 0x7c, 0x45, 0x59, 0x29, 0xb7, 0x2f, 0x0a, 0x54, 0x2d, 0x08, 0x0b, 0x85, 0x88, 0xe6, 0x13, 0x05, + 0x2c, 0xa4, 0xe3, 0xde, 0xd2, 0x5c, 0x0a, 0xbf, 0x9f, 0x8a, 0x5d, 0x2d, 0x16, 0x3b, 0xb3, 0xe6, + 0x91, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0xb8, 0xf7, 0x40, 0x55, 0xa3, 0xc4, 0x70, 0xeb, 0xa5, 0x2b, + 0xe5, 0x95, 0xa9, 0xb5, 0x6b, 0x6a, 0x4e, 0x02, 0xab, 0x69, 0xef, 0xda, 0xd3, 0x82, 0xb7, 0x7a, + 0x8f, 0x31, 0x20, 0x9f, 0xa8, 0xf9, 0xb3, 0x12, 0x98, 0xdc, 0xc4, 0xc4, 0xb0, 0xcc, 0x7d, 0x42, + 0xcf, 0x61, 0xe5, 0xee, 0x82, 0x8a, 0x6b, 0x93, 0x8e, 0x58, 0xb9, 0xab, 0xb9, 0x01, 0x84, 0x3e, + 0xed, 0xdb, 0xa4, 0x13, 0x2d, 0x19, 0xfb, 0x85, 0x38, 0x03, 0xdc, 0x03, 0xe3, 0x2e, 0xc5, 0xd4, + 0x73, 0xf9, 0x82, 0x4d, 0xad, 0xad, 0x14, 0xe0, 0xe2, 0xf8, 0xf6, 0x8c, 0x60, 0x1b, 0xf7, 0x7f, + 0x23, 0xc1, 0xd3, 0xfc, 0x5b, 0x09, 0xc0, 0x10, 0xbb, 0x61, 0x99, 0x5d, 0x8d, 0xb2, 0x74, 0xbe, + 0x05, 0x2a, 0x74, 0x60, 0x13, 0x3e, 0x21, 0x93, 0xed, 0xab, 0x81, 0x2b, 0xf7, 0x07, 0x36, 0xf9, + 0xec, 0x64, 0x79, 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x5b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, + 0xba, 0x3c, 0xf4, 0x67, 0x27, 0xcb, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x3e, 0x80, + 0x3a, 0x76, 0xe9, 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, + 0x8a, 0x59, 0xb4, 0x97, 0x84, 0x17, 0x70, 0x2b, 0xc5, 0x86, 0x32, 0x46, 0x80, 0x57, 0xc1, 0xb8, + 0x43, 0xb0, 0x6b, 0x99, 0xf5, 0x0a, 0x8f, 0x22, 0x9c, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0xaf, + 0x80, 0x09, 0x83, 0xb8, 0x2e, 0xee, 0x91, 0x7a, 0x95, 0x03, 0x67, 0x05, 0x70, 0x62, 0xdb, 0x6f, + 0x46, 0x41, 0x7f, 0xf3, 0xb7, 0x0a, 0x98, 0x0e, 0x67, 0xee, 0x1c, 0x76, 0xce, 0x1d, 0x79, 0xe7, + 0x34, 0x47, 0x27, 0x4b, 0xce, 0x86, 0xf9, 0xb0, 0x1c, 0x73, 0x9c, 0xa5, 0x23, 0xfc, 0x01, 0xa8, + 0xb9, 0x44, 0x27, 0x1d, 0x6a, 0x39, 0xc2, 0xf1, 0xd7, 0x0a, 0x3a, 0x8e, 0x0f, 0x89, 0xbe, 0x2f, + 0x4c, 0xdb, 0x17, 0x98, 0xe7, 0xc1, 0x2f, 0x14, 0x52, 0xc2, 0xf7, 0x40, 0x8d, 0x12, 0xc3, 0xd6, + 0x31, 0x25, 0x62, 0xd7, 0xbc, 0x18, 0x77, 0x9e, 0xe5, 0x0c, 0x23, 0xdb, 0xb3, 0xba, 0xf7, 0x05, + 0x8c, 0x6f, 0x99, 0x70, 0x32, 0x82, 0x56, 0x14, 0xd2, 0x40, 0x1b, 0xcc, 0x78, 0x76, 0x97, 0x21, + 0x29, 0x3b, 0xce, 0x7b, 0x03, 0x91, 0x43, 0x37, 0x46, 0xcf, 0xca, 0x81, 0x64, 0xd7, 0x5e, 0x10, + 0xa3, 0xcc, 0xc8, 0xed, 0x28, 0xc1, 0x0f, 0xd7, 0xc1, 0xac, 0xa1, 0x99, 0x88, 0xe0, 0xee, 0x60, + 0x9f, 0x74, 0x2c, 0xb3, 0xeb, 0xf2, 0x54, 0xaa, 0xb6, 0x17, 0x05, 0xc1, 0xec, 0xb6, 0xdc, 0x8d, + 0x92, 0x78, 0xb8, 0x05, 0xe6, 0x83, 0x03, 0xf8, 0xae, 0xe6, 0x52, 0xcb, 0x19, 0x6c, 0x69, 0x86, + 0x46, 0xeb, 0xe3, 0x9c, 0xa7, 0x3e, 0x3c, 0x59, 0x9e, 0x47, 0x19, 0xfd, 0x28, 0xd3, 0xaa, 0xf9, + 0xab, 0x71, 0x30, 0x9b, 0x38, 0x17, 0xe0, 0x03, 0xb0, 0xd0, 0xf1, 0x1c, 0x87, 0x98, 0x74, 0xc7, + 0x33, 0x0e, 0x89, 0xb3, 0xdf, 0x39, 0x22, 0x5d, 0x4f, 0x27, 0x5d, 0xbe, 0xac, 0xd5, 0x76, 0x43, + 0xf8, 0xba, 0xb0, 0x91, 0x89, 0x42, 0x39, 0xd6, 0xf0, 0x5d, 0x00, 0x4d, 0xde, 0xb4, 0xad, 0xb9, + 0x6e, 0xc8, 0x59, 0xe2, 0x9c, 0xe1, 0x56, 0xdc, 0x49, 0x21, 0x50, 0x86, 0x15, 0xf3, 0xb1, 0x4b, + 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, 0x65, 0xd9, 0xc7, 0xcd, 0x4c, 0x14, 0xca, 0xb1, 0x86, 0x6f, + 0x80, 0x29, 0x7f, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0xe6, 0x04, 0xd9, 0xd4, 0x4e, 0xd4, 0x85, 0xe2, + 0x38, 0x16, 0x9a, 0x75, 0xe8, 0x12, 0xa7, 0x4f, 0xba, 0x77, 0x7c, 0x71, 0xc0, 0x2a, 0x68, 0x95, + 0x57, 0xd0, 0x30, 0xb4, 0xdd, 0x14, 0x02, 0x65, 0x58, 0xb1, 0xd0, 0xfc, 0xac, 0x49, 0x85, 0x36, + 0x2e, 0x87, 0x76, 0x90, 0x89, 0x42, 0x39, 0xd6, 0x2c, 0xf7, 0x7c, 0x97, 0xd7, 0xfb, 0x58, 0xd3, + 0xf1, 0xa1, 0x4e, 0xea, 0x13, 0x72, 0xee, 0xed, 0xc8, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x25, + 0xbf, 0xe9, 0xc0, 0xc4, 0x21, 0x49, 0x8d, 0x93, 0xbc, 0x20, 0x48, 0x2e, 0xed, 0x24, 0x01, 0x28, + 0x6d, 0x03, 0x6f, 0x81, 0x99, 0x8e, 0xa5, 0xeb, 0x3c, 0x1f, 0x37, 0x2c, 0xcf, 0xa4, 0xf5, 0x49, + 0xce, 0x02, 0xd9, 0x1e, 0xda, 0x90, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x08, 0x40, 0x27, 0x28, 0x0c, + 0x6e, 0x1d, 0x8c, 0x50, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xe6, + 0x87, 0x0a, 0x58, 0xcc, 0xd9, 0xe8, 0xf0, 0xdb, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, + 0xcc, 0x62, 0x95, 0xf0, 0x08, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x3d, 0x1f, 0x22, 0xce, 0xb2, 0x56, + 0x6e, 0x00, 0x28, 0x8e, 0x8e, 0x4e, 0xe5, 0x4b, 0xc3, 0x93, 0xe5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, + 0xcd, 0x9f, 0x97, 0x00, 0xd8, 0x24, 0xb6, 0x6e, 0x0d, 0x0c, 0x62, 0x9e, 0x87, 0xa6, 0xb9, 0x27, + 0x69, 0x9a, 0x97, 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, + 0x21, 0x7b, 0xb6, 0xaa, 0xf9, 0xb8, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, + 0x72, 0x62, 0x45, 0x17, 0x33, 0x4c, 0xbe, 0x30, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, + 0x3f, 0xae, 0x69, 0xc6, 0x4f, 0xad, 0x69, 0xc2, 0x4a, 0xb4, 0x25, 0x31, 0xa1, 0x04, 0x73, 0x8e, + 0x86, 0x9a, 0xf8, 0x2a, 0x6a, 0xa8, 0xdf, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x73, 0x10, 0x51, 0x77, + 0x65, 0x11, 0xf5, 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x1f, 0x57, 0xe2, 0xae, 0x73, 0x19, 0xb5, + 0xc2, 0xae, 0x60, 0xb6, 0xae, 0x75, 0xb0, 0x2b, 0xea, 0xed, 0x05, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, + 0xb0, 0x57, 0x12, 0x5c, 0xa5, 0x2f, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x1e, 0xa8, 0xb9, + 0x81, 0xd4, 0xaa, 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, + 0xba, 0x2c, 0x65, 0x55, 0xfd, 0x32, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf2, 0x4d, + 0x55, 0x8b, 0x12, 0x7d, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, 0xed, 0x58, 0x3d, 0x87, + 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x0f, 0x4f, 0x96, + 0x17, 0xf7, 0xb2, 0x21, 0x28, 0xcf, 0xb6, 0xf9, 0xc7, 0x0a, 0xb8, 0x98, 0x3c, 0x1b, 0x73, 0x64, + 0x8a, 0x72, 0x26, 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, + 0x1d, 0xcc, 0x0a, 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0x81, 0xdc, 0x8d, 0x92, 0x78, + 0x78, 0x1b, 0x4c, 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, + 0x9d, 0x48, 0xc6, 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xf5, 0x24, 0x00, + 0xa5, 0x6d, 0xe0, 0x36, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x41, + 0x1a, 0x82, 0xb2, 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x8c, 0xf3, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, + 0x58, 0xcd, 0x64, 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfc, 0x83, 0x02, 0x60, 0x7a, 0x1f, 0x8e, + 0x7c, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, + 0x5a, 0x4c, 0x00, 0x89, 0x89, 0x3e, 0x9f, 0x47, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, + 0xa0, 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0xdf, 0x4b, 0x60, 0x2e, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, + 0x5f, 0x3f, 0xec, 0x14, 0x13, 0x25, 0xd1, 0xd4, 0xfd, 0x37, 0x89, 0x92, 0xc8, 0xab, 0x1c, 0x51, + 0xf2, 0x9b, 0x52, 0xdc, 0xf5, 0x53, 0x8a, 0x92, 0xe7, 0xf0, 0xc2, 0xf1, 0x95, 0xd3, 0x35, 0xcd, + 0x3f, 0x95, 0xc1, 0xc5, 0xe4, 0x3e, 0x94, 0x0a, 0xa4, 0x32, 0xb2, 0x40, 0xee, 0x81, 0xf9, 0x47, + 0x9e, 0xae, 0x0f, 0x78, 0x0c, 0xb1, 0x2a, 0xe9, 0x97, 0xd6, 0xff, 0x17, 0x96, 0xf3, 0xdf, 0xcd, + 0xc0, 0xa0, 0x4c, 0xcb, 0x74, 0xbd, 0xac, 0x7c, 0xde, 0x7a, 0x59, 0x3d, 0x43, 0xbd, 0xcc, 0x96, + 0x1c, 0xe5, 0x33, 0x49, 0x8e, 0xd3, 0x15, 0xcb, 0x8c, 0x83, 0x6b, 0xe4, 0xd5, 0xff, 0xa7, 0x0a, + 0x58, 0xc8, 0xbe, 0x70, 0x43, 0x1d, 0xcc, 0x18, 0xf8, 0x71, 0xfc, 0xe1, 0x63, 0x54, 0x11, 0xf1, + 0xa8, 0xa6, 0xab, 0xfe, 0x27, 0x23, 0xf5, 0x9e, 0x49, 0x77, 0x9d, 0x7d, 0xea, 0x68, 0x66, 0xcf, + 0xaf, 0xbc, 0xdb, 0x12, 0x17, 0x4a, 0x70, 0x37, 0x3f, 0x55, 0xc0, 0x62, 0x4e, 0xe5, 0x3b, 0x5f, + 0x4f, 0xe0, 0xfb, 0xa0, 0x66, 0xe0, 0xc7, 0xfb, 0x9e, 0xd3, 0xcb, 0xaa, 0xd5, 0xc5, 0xc6, 0xe1, + 0x5b, 0x71, 0x5b, 0xb0, 0xa0, 0x90, 0xaf, 0xb9, 0x0b, 0xae, 0x48, 0x41, 0xb2, 0x9d, 0x43, 0x1e, + 0x79, 0x3a, 0xdf, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x93, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, + 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x6f, 0x05, 0x54, 0xf7, + 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, 0xff, 0x92, 0xce, 0xfd, 0xc9, 0x2d, + 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xd2, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0xb7, 0xc1, 0x64, 0x38, 0xdc, + 0xe9, 0x0e, 0xa0, 0xe6, 0xaf, 0x4b, 0x60, 0x2a, 0x36, 0xc4, 0x29, 0x8f, 0xaf, 0x87, 0xd2, 0x99, + 0xcd, 0x36, 0xe6, 0x5a, 0x91, 0x40, 0xd4, 0xe0, 0x7c, 0x7e, 0xc7, 0xa4, 0x4e, 0xfc, 0x82, 0x97, + 0x3e, 0xb6, 0xbf, 0x05, 0x66, 0x28, 0x76, 0x7a, 0x84, 0x06, 0x7d, 0x7c, 0xc2, 0x26, 0xa3, 0x07, + 0x8f, 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0x97, 0x6e, 0x83, 0x69, 0x69, 0x30, 0x78, 0x11, 0x94, 0x8f, + 0xc9, 0xc0, 0x97, 0x3d, 0x88, 0xfd, 0x09, 0xe7, 0x41, 0xb5, 0x8f, 0x75, 0xcf, 0xcf, 0xf3, 0x49, + 0xe4, 0xff, 0xb8, 0x55, 0x7a, 0x4b, 0x69, 0xfe, 0x82, 0x4d, 0x4e, 0x94, 0x9c, 0xe7, 0x90, 0x5d, + 0xef, 0x4a, 0xd9, 0x95, 0xff, 0x51, 0x2f, 0xbe, 0x65, 0xf2, 0x72, 0x0c, 0x25, 0x72, 0xec, 0xd5, + 0x42, 0x6c, 0xcf, 0xce, 0xb4, 0x7f, 0x94, 0xc0, 0x7c, 0x0c, 0x1d, 0xc9, 0xc9, 0x6f, 0x4a, 0x72, + 0x72, 0x25, 0x21, 0x27, 0xeb, 0x59, 0x36, 0x5f, 0xeb, 0xc9, 0xd1, 0x7a, 0xf2, 0xf7, 0x0a, 0x98, + 0x8d, 0xcd, 0xdd, 0x39, 0x08, 0xca, 0x7b, 0xb2, 0xa0, 0x7c, 0xa9, 0x48, 0xd2, 0xe4, 0x28, 0xca, + 0x3f, 0x57, 0x25, 0xe7, 0xff, 0xe7, 0xdf, 0xb9, 0x7e, 0x0c, 0xe6, 0xfb, 0x96, 0xee, 0x19, 0x64, + 0x43, 0xc7, 0x9a, 0x11, 0x00, 0x98, 0x02, 0x2b, 0x27, 0xef, 0x72, 0x21, 0x3d, 0x71, 0x5c, 0xcd, + 0xa5, 0xc4, 0xa4, 0x0f, 0x22, 0xcb, 0x48, 0xf7, 0x3d, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, + 0xc0, 0x14, 0x53, 0x4e, 0x5a, 0x87, 0xec, 0x60, 0x23, 0x48, 0xac, 0xf0, 0x13, 0xd6, 0x7e, 0xd4, + 0x85, 0xe2, 0x38, 0x78, 0x04, 0xe6, 0x6c, 0xab, 0xbb, 0x8d, 0x4d, 0xdc, 0x23, 0x4c, 0x66, 0xec, + 0x59, 0xba, 0xd6, 0x19, 0xf0, 0xc7, 0xaf, 0xc9, 0xf6, 0x9b, 0xc1, 0xc3, 0xc6, 0x5e, 0x1a, 0xc2, + 0x2e, 0x89, 0x19, 0xcd, 0x7c, 0x53, 0x67, 0x51, 0x42, 0x27, 0xf5, 0xd9, 0xd5, 0x7f, 0x76, 0x5e, + 0x2b, 0x92, 0x61, 0x67, 0xfc, 0xf0, 0x9a, 0xf7, 0xb6, 0x57, 0x3b, 0xd3, 0x57, 0xd3, 0x7f, 0x56, + 0xc0, 0xa5, 0xd4, 0x51, 0xf9, 0x25, 0xbe, 0xae, 0xa5, 0xa4, 0x7e, 0xf9, 0x14, 0x52, 0x7f, 0x1d, + 0xcc, 0x8a, 0x0f, 0xb6, 0x89, 0x9b, 0x42, 0x78, 0x63, 0xdb, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, + 0xba, 0x57, 0x3d, 0xe5, 0xeb, 0x5e, 0xdc, 0x0b, 0xf1, 0x0f, 0x48, 0x7e, 0xea, 0xa5, 0xbd, 0x10, + 0xff, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, + 0x51, 0x02, 0xfd, 0xb9, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xdd, + 0xe4, 0x2f, 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0x37, + 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0x9a, 0x7b, 0xbb, 0xd8, 0xd3, 0x5c, 0x86, 0x76, 0x1f, + 0xfd, 0x46, 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, + 0x63, 0x3f, 0x19, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, + 0x7f, 0x1d, 0x36, 0x94, 0x5f, 0x7e, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0x9f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x70, 0x2d, 0x26, 0x9d, 0xec, 0x28, 0x00, 0x00, +} + +func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x18 + { + size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Partition != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *StatefulSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x40 + } + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + i -= len(m.PodManagementPolicy) + copy(dAtA[i:], m.PodManagementPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i-- + dAtA[i] = 0x32 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0x2a + if len(m.VolumeClaimTemplates) > 0 { + for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i -= len(m.UpdateRevision) + copy(dAtA[i:], m.UpdateRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i-- + dAtA[i] = 0x3a + i -= len(m.CurrentRevision) + copy(dAtA[i:], m.CurrentRevision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ControllerRevision) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Data.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *ControllerRevisionList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Partition != nil { + n += 1 + sovGenerated(uint64(*m.Partition)) + } + return n +} + +func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatefulSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeClaimTemplates) > 0 { + for _, e := range m.VolumeClaimTemplates { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodManagementPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *StatefulSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + l = len(m.CurrentRevision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UpdateRevision) + n += 1 + l + sovGenerated(uint64(l)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StatefulSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ControllerRevision) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ControllerRevision{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *ControllerRevisionList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ControllerRevision{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ControllerRevisionList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateStatefulSetStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, + `Partition:` + valueToStringGenerated(this.Partition) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StatefulSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StatefulSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" + for _, f := range this.VolumeClaimTemplates { + repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeClaimTemplates += "}" + s := strings.Join([]string{`&StatefulSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]StatefulSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&StatefulSetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, + `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *StatefulSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ControllerRevision) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ControllerRevision{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Partition = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StatefulSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) + if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UpdateRevision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, StatefulSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto new file mode 100644 index 0000000..17e4397 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -0,0 +1,752 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.apps.v1beta2; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta2"; + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +message ControllerRevision { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Data is the serialized representation of the state. + optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; + + // Revision indicates the revision of the state represented by Data. + optional int64 revision = 3; +} + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +message ControllerRevisionList { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ControllerRevisions + repeated ControllerRevision items = 2; +} + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused. + // +optional + optional bool paused = 7; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +message RollingUpdateStatefulSetStrategy { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + optional int32 partition = 1; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional string targetSelector = 3; +} + +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +message StatefulSet { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired identities of pods in this set. + // +optional + optional StatefulSetSpec spec = 2; + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + optional StatefulSetStatus status = 3; +} + +// StatefulSetCondition describes the state of a statefulset at a certain point. +message StatefulSetCondition { + // Type of statefulset condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// StatefulSetList is a collection of StatefulSets. +message StatefulSetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated StatefulSet items = 2; +} + +// A StatefulSetSpec is the specification of a StatefulSet. +message StatefulSetSpec { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + optional int32 replicas = 1; + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + optional string serviceName = 5; + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + optional string podManagementPolicy = 6; + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + optional StatefulSetUpdateStrategy updateStrategy = 7; + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + optional int32 revisionHistoryLimit = 8; +} + +// StatefulSetStatus represents the current state of a StatefulSet. +message StatefulSetStatus { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + optional int64 observedGeneration = 1; + + // replicas is the number of Pods created by the StatefulSet controller. + optional int32 replicas = 2; + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + optional int32 readyReplicas = 3; + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + optional int32 currentReplicas = 4; + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + optional int32 updatedReplicas = 5; + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + optional string currentRevision = 6; + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + optional string updateRevision = 7; + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated StatefulSetCondition conditions = 10; +} + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +message StatefulSetUpdateStrategy { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + optional string type = 1; + + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; +} + diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go new file mode 100644 index 0000000..2784ee3 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/register.go @@ -0,0 +1,61 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "apps" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &Scale{}, + &StatefulSet{}, + &StatefulSetList{}, + &DaemonSet{}, + &DaemonSetList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &ControllerRevision{}, + &ControllerRevisionList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go new file mode 100644 index 0000000..d358455 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -0,0 +1,876 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + ControllerRevisionHashLabelKey = "controller-revision-hash" + StatefulSetRevisionLabel = ControllerRevisionHashLabelKey + DeprecatedRollbackTo = "deprecated.deployment.rollback.to" + DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" + StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" +) + +// ScaleSpec describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for +// more information. +// StatefulSet represents a set of pods with consistent identities. +// Identities are defined as: +// - Network: A single stable DNS and hostname. +// - Storage: As many VolumeClaims as requested. +// The StatefulSet guarantees that a given network identity will always +// map to the same storage identity. +type StatefulSet struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired identities of pods in this set. + // +optional + Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current status of Pods in this StatefulSet. This data + // may be out of date by some window of time. + // +optional + Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PodManagementPolicyType defines the policy for creating pods under a stateful set. +type PodManagementPolicyType string + +const ( + // OrderedReadyPodManagement will create pods in strictly increasing order on + // scale up and strictly decreasing order on scale down, progressing only when + // the previous pod is ready or terminated. At most one pod will be changed + // at any time. + OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" + // ParallelPodManagement will create and delete pods as soon as the stateful set + // replica count is changed, and will not wait for pods to be ready or complete + // termination. + ParallelPodManagement PodManagementPolicyType = "Parallel" +) + +// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet +// controller will use to perform updates. It includes any additional parameters +// necessary to perform the update for the indicated strategy. +type StatefulSetUpdateStrategy struct { + // Type indicates the type of the StatefulSetUpdateStrategy. + // Default is RollingUpdate. + // +optional + Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` + // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. + // +optional + RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +// StatefulSetUpdateStrategyType is a string enumeration type that enumerates +// all possible update strategies for the StatefulSet controller. +type StatefulSetUpdateStrategyType string + +const ( + // RollingUpdateStatefulSetStrategyType indicates that update will be + // applied to all Pods in the StatefulSet with respect to the StatefulSet + // ordering constraints. When a scale operation is performed with this + // strategy, new Pods will be created from the specification version indicated + // by the StatefulSet's updateRevision. + RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version + // tracking and ordered rolling restarts are disabled. Pods are recreated + // from the StatefulSetSpec when they are manually deleted. When a scale + // operation is performed with this strategy,specification version indicated + // by the StatefulSet's currentRevision. + OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" +) + +// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. +type RollingUpdateStatefulSetStrategy struct { + // Partition indicates the ordinal at which the StatefulSet should be + // partitioned. + // Default value is 0. + // +optional + Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` +} + +// A StatefulSetSpec is the specification of a StatefulSet. +type StatefulSetSpec struct { + // replicas is the desired number of replicas of the given Template. + // These are replicas in the sense that they are instantiations of the + // same Template, but individual replicas also have a consistent identity. + // If unspecified, defaults to 1. + // TODO: Consider a rename of this field. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // selector is a label query over pods that should match the replica count. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // template is the object that describes the pod that will be created if + // insufficient replicas are detected. Each pod stamped out by the StatefulSet + // will fulfill this Template, but have a unique identity from the rest + // of the StatefulSet. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // volumeClaimTemplates is a list of claims that pods are allowed to reference. + // The StatefulSet controller is responsible for mapping network identities to + // claims in a way that maintains the identity of a pod. Every claim in + // this list must have at least one matching (by name) volumeMount in one + // container in the template. A claim in this list takes precedence over + // any volumes in the template, with the same name. + // TODO: Define the behavior if a claim already exists with the same name. + // +optional + VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` + + // serviceName is the name of the service that governs this StatefulSet. + // This service must exist before the StatefulSet, and is responsible for + // the network identity of the set. Pods get DNS/hostnames that follow the + // pattern: pod-specific-string.serviceName.default.svc.cluster.local + // where "pod-specific-string" is managed by the StatefulSet controller. + ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` + + // podManagementPolicy controls how pods are created during initial scale up, + // when replacing pods on nodes, or when scaling down. The default policy is + // `OrderedReady`, where pods are created in increasing order (pod-0, then + // pod-1, etc) and the controller will wait until each pod is ready before + // continuing. When scaling down, the pods are removed in the opposite order. + // The alternative policy is `Parallel` which will create pods in parallel + // to match the desired scale without waiting, and on scale down will delete + // all pods at once. + // +optional + PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` + + // updateStrategy indicates the StatefulSetUpdateStrategy that will be + // employed to update Pods in the StatefulSet when a revision is made to + // Template. + UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` + + // revisionHistoryLimit is the maximum number of revisions that will + // be maintained in the StatefulSet's revision history. The revision history + // consists of all revisions not represented by a currently applied + // StatefulSetSpec version. The default value is 10. + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` +} + +// StatefulSetStatus represents the current state of a StatefulSet. +type StatefulSetStatus struct { + // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the + // StatefulSet's generation, which is updated on mutation by the API Server. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // replicas is the number of Pods created by the StatefulSet controller. + Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` + + // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` + + // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by currentRevision. + CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` + + // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version + // indicated by updateRevision. + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` + + // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the + // sequence [0,currentReplicas). + CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` + + // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence + // [replicas-updatedReplicas,replicas) + UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` + + // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller + // uses this field as a collision avoidance mechanism when it needs to create the name for the + // newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a statefulset's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type StatefulSetConditionType string + +// StatefulSetCondition describes the state of a statefulset at a certain point. +type StatefulSetCondition struct { + // Type of statefulset condition. + Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StatefulSetList is a collection of StatefulSets. +type StatefulSetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // It must match the pod template's labels. + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. Defaults to 600s. + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets + // to select new pods (and old pods being select by new ReplicaSet). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // Defaults to 25%. + // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet + // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // Defaults to 25%. + // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new ReplicaSet can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for +// more information. +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // Label keys and values that must match in order to be controlled by this replica set. + // It must match the pod template's labels. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the +// release notes for more information. +// ControllerRevision implements an immutable snapshot of state data. Clients +// are responsible for serializing and deserializing the objects that contain +// their internal state. +// Once a ControllerRevision has been successfully created, it can not be updated. +// The API Server will fail validation of all requests that attempt to mutate +// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both +// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, +// it may be subject to name and representation changes in future releases, and clients should not +// depend on its stability. It is primarily for internal use by controllers. +type ControllerRevision struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Data is the serialized representation of the state. + Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` + + // Revision indicates the revision of the state represented by Data. + Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ControllerRevisionList is a resource containing a list of ControllerRevision objects. +type ControllerRevisionList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ControllerRevisions + Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go new file mode 100644 index 0000000..822158a --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -0,0 +1,396 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_ControllerRevision = map[string]string{ + "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "data": "Data is the serialized representation of the state.", + "revision": "Revision indicates the revision of the state represented by Data.", +} + +func (ControllerRevision) SwaggerDoc() map[string]string { + return map_ControllerRevision +} + +var map_ControllerRevisionList = map[string]string{ + "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of ControllerRevisions", +} + +func (ControllerRevisionList) SwaggerDoc() map[string]string { + return map_ControllerRevisionList +} + +var map_DaemonSet = map[string]string{ + "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", + "paused": "Indicates that the deployment is paused.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_ReplicaSet = map[string]string{ + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RollingUpdateStatefulSetStrategy = map[string]string{ + "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", + "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", +} + +func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { + return map_RollingUpdateStatefulSetStrategy +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_StatefulSet = map[string]string{ + "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", + "spec": "Spec defines the desired identities of pods in this set.", + "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", +} + +func (StatefulSet) SwaggerDoc() map[string]string { + return map_StatefulSet +} + +var map_StatefulSetCondition = map[string]string{ + "": "StatefulSetCondition describes the state of a statefulset at a certain point.", + "type": "Type of statefulset condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (StatefulSetCondition) SwaggerDoc() map[string]string { + return map_StatefulSetCondition +} + +var map_StatefulSetList = map[string]string{ + "": "StatefulSetList is a collection of StatefulSets.", +} + +func (StatefulSetList) SwaggerDoc() map[string]string { + return map_StatefulSetList +} + +var map_StatefulSetSpec = map[string]string{ + "": "A StatefulSetSpec is the specification of a StatefulSet.", + "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", + "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", + "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", + "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", + "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", + "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", + "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", +} + +func (StatefulSetSpec) SwaggerDoc() map[string]string { + return map_StatefulSetSpec +} + +var map_StatefulSetStatus = map[string]string{ + "": "StatefulSetStatus represents the current state of a StatefulSet.", + "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", + "replicas": "replicas is the number of Pods created by the StatefulSet controller.", + "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", + "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", + "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", + "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", + "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", + "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a statefulset's current state.", +} + +func (StatefulSetStatus) SwaggerDoc() map[string]string { + return map_StatefulSetStatus +} + +var map_StatefulSetUpdateStrategy = map[string]string{ + "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", + "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", + "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", +} + +func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_StatefulSetUpdateStrategy +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000..127bf09 --- /dev/null +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -0,0 +1,839 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta2 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Data.DeepCopyInto(&out.Data) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. +func (in *ControllerRevision) DeepCopy() *ControllerRevision { + if in == nil { + return nil + } + out := new(ControllerRevision) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevision) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ControllerRevision, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. +func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { + if in == nil { + return nil + } + out := new(ControllerRevisionList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { + *out = *in + if in.Partition != nil { + in, out := &in.Partition, &out.Partition + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. +func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { + if in == nil { + return nil + } + out := new(RollingUpdateStatefulSetStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. +func (in *StatefulSet) DeepCopy() *StatefulSet { + if in == nil { + return nil + } + out := new(StatefulSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. +func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { + if in == nil { + return nil + } + out := new(StatefulSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StatefulSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. +func (in *StatefulSetList) DeepCopy() *StatefulSetList { + if in == nil { + return nil + } + out := new(StatefulSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StatefulSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + if in.VolumeClaimTemplates != nil { + in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates + *out = make([]corev1.PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. +func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { + if in == nil { + return nil + } + out := new(StatefulSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]StatefulSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. +func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { + if in == nil { + return nil + } + out := new(StatefulSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateStatefulSetStrategy) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. +func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { + if in == nil { + return nil + } + out := new(StatefulSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go new file mode 100644 index 0000000..ae8f767 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=auditregistration.k8s.io + +package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1" diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go new file mode 100644 index 0000000..f8eec3d --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go @@ -0,0 +1,2030 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AuditSink) Reset() { *m = AuditSink{} } +func (*AuditSink) ProtoMessage() {} +func (*AuditSink) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{0} +} +func (m *AuditSink) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSink) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSink.Merge(m, src) +} +func (m *AuditSink) XXX_Size() int { + return m.Size() +} +func (m *AuditSink) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSink.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditSink proto.InternalMessageInfo + +func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } +func (*AuditSinkList) ProtoMessage() {} +func (*AuditSinkList) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{1} +} +func (m *AuditSinkList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkList) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkList.Merge(m, src) +} +func (m *AuditSinkList) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkList) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkList.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditSinkList proto.InternalMessageInfo + +func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } +func (*AuditSinkSpec) ProtoMessage() {} +func (*AuditSinkSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{2} +} +func (m *AuditSinkSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditSinkSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AuditSinkSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditSinkSpec.Merge(m, src) +} +func (m *AuditSinkSpec) XXX_Size() int { + return m.Size() +} +func (m *AuditSinkSpec) XXX_DiscardUnknown() { + xxx_messageInfo_AuditSinkSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditSinkSpec proto.InternalMessageInfo + +func (m *Policy) Reset() { *m = Policy{} } +func (*Policy) ProtoMessage() {} +func (*Policy) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{3} +} +func (m *Policy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Policy) XXX_Merge(src proto.Message) { + xxx_messageInfo_Policy.Merge(m, src) +} +func (m *Policy) XXX_Size() int { + return m.Size() +} +func (m *Policy) XXX_DiscardUnknown() { + xxx_messageInfo_Policy.DiscardUnknown(m) +} + +var xxx_messageInfo_Policy proto.InternalMessageInfo + +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{4} +} +func (m *ServiceReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceReference.Merge(m, src) +} +func (m *ServiceReference) XXX_Size() int { + return m.Size() +} +func (m *ServiceReference) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceReference proto.InternalMessageInfo + +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{5} +} +func (m *Webhook) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Webhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Webhook) XXX_Merge(src proto.Message) { + xxx_messageInfo_Webhook.Merge(m, src) +} +func (m *Webhook) XXX_Size() int { + return m.Size() +} +func (m *Webhook) XXX_DiscardUnknown() { + xxx_messageInfo_Webhook.DiscardUnknown(m) +} + +var xxx_messageInfo_Webhook proto.InternalMessageInfo + +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{6} +} +func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookClientConfig.Merge(m, src) +} +func (m *WebhookClientConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookClientConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo + +func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } +func (*WebhookThrottleConfig) ProtoMessage() {} +func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_642d3597c6afa8ba, []int{7} +} +func (m *WebhookThrottleConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WebhookThrottleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WebhookThrottleConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_WebhookThrottleConfig.Merge(m, src) +} +func (m *WebhookThrottleConfig) XXX_Size() int { + return m.Size() +} +func (m *WebhookThrottleConfig) XXX_DiscardUnknown() { + xxx_messageInfo_WebhookThrottleConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_WebhookThrottleConfig proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") + proto.RegisterType((*AuditSinkList)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkList") + proto.RegisterType((*AuditSinkSpec)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkSpec") + proto.RegisterType((*Policy)(nil), "k8s.io.api.auditregistration.v1alpha1.Policy") + proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.auditregistration.v1alpha1.ServiceReference") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.auditregistration.v1alpha1.Webhook") + proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") + proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptor_642d3597c6afa8ba) +} + +var fileDescriptor_642d3597c6afa8ba = []byte{ + // 765 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, + 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, + 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, + 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, + 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, + 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, + 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, + 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, + 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, + 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, + 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, + 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, + 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, + 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, + 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, + 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, + 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, + 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, + 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, + 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, + 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, + 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, + 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, + 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, + 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, + 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, + 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, + 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, + 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, + 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, + 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, + 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, + 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, + 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, + 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, + 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, + 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, + 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, + 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, + 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, + 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, + 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, + 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, + 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, + 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, + 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, + 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, + 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, +} + +func (m *AuditSink) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditSinkSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Policy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Policy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Stages) > 0 { + for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Stages[iNdEx]) + copy(dAtA[i:], m.Stages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stages[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) + i-- + dAtA[i] = 0x20 + } + if m.Path != nil { + i -= len(*m.Path) + copy(dAtA[i:], *m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Webhook) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Webhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Throttle != nil { + { + size, err := m.Throttle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CABundle != nil { + i -= len(m.CABundle) + copy(dAtA[i:], m.CABundle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i-- + dAtA[i] = 0x1a + } + if m.Service != nil { + { + size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.URL != nil { + i -= len(*m.URL) + copy(dAtA[i:], *m.URL) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WebhookThrottleConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Burst != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) + i-- + dAtA[i] = 0x10 + } + if m.QPS != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AuditSink) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AuditSinkList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AuditSinkSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Policy.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Webhook.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Policy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Stages) > 0 { + for _, s := range m.Stages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Path != nil { + l = len(*m.Path) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + n += 1 + sovGenerated(uint64(*m.Port)) + } + return n +} + +func (m *Webhook) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Throttle != nil { + l = m.Throttle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WebhookClientConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.URL != nil { + l = len(*m.URL) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Service != nil { + l = m.Service.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CABundle != nil { + l = len(m.CABundle) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *WebhookThrottleConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.QPS != nil { + n += 1 + sovGenerated(uint64(*m.QPS)) + } + if m.Burst != nil { + n += 1 + sovGenerated(uint64(*m.Burst)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AuditSink) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSink{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]AuditSink{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AuditSink", "AuditSink", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&AuditSinkList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *AuditSinkSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AuditSinkSpec{`, + `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "Policy", "Policy", 1), `&`, ``, 1) + `,`, + `Webhook:` + strings.Replace(strings.Replace(this.Webhook.String(), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Policy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Policy{`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `Stages:` + fmt.Sprintf("%v", this.Stages) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceReference{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Path:` + valueToStringGenerated(this.Path) + `,`, + `Port:` + valueToStringGenerated(this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *Webhook) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Webhook{`, + `Throttle:` + strings.Replace(this.Throttle.String(), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookClientConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookClientConfig{`, + `URL:` + valueToStringGenerated(this.URL) + `,`, + `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, + `}`, + }, "") + return s +} +func (this *WebhookThrottleConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WebhookThrottleConfig{`, + `QPS:` + valueToStringGenerated(this.QPS) + `,`, + `Burst:` + valueToStringGenerated(this.Burst) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AuditSink) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSink: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSink: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, AuditSink{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditSinkSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditSinkSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Policy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Policy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = Level(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Stages = append(m.Stages, Stage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Port = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Webhook) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Throttle == nil { + m.Throttle = &WebhookThrottleConfig{} + } + if err := m.Throttle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.URL = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Service == nil { + m.Service = &ServiceReference{} + } + if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) + if m.CABundle == nil { + m.CABundle = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WebhookThrottleConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WebhookThrottleConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.QPS = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Burst", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Burst = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto new file mode 100644 index 0000000..674debe --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto @@ -0,0 +1,162 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.auditregistration.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// AuditSink represents a cluster level audit sink +message AuditSink { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the audit configuration spec + optional AuditSinkSpec spec = 2; +} + +// AuditSinkList is a list of AuditSink items. +message AuditSinkList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of audit configurations. + repeated AuditSink items = 2; +} + +// AuditSinkSpec holds the spec for the audit sink +message AuditSinkSpec { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + optional Policy policy = 1; + + // Webhook to send events + // required + optional Webhook webhook = 2; +} + +// Policy defines the configuration of how audit events are logged +message Policy { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + optional string level = 1; + + // Stages is a list of stages for which events are created. + // +optional + repeated string stages = 2; +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +message ServiceReference { + // `namespace` is the namespace of the service. + // Required + optional string namespace = 1; + + // `name` is the name of the service. + // Required + optional string name = 2; + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + optional string path = 3; + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + optional int32 port = 4; +} + +// Webhook holds the configuration of the webhook +message Webhook { + // Throttle holds the options for throttling the webhook + // +optional + optional WebhookThrottleConfig throttle = 1; + + // ClientConfig holds the connection parameters for the webhook + // required + optional WebhookClientConfig clientConfig = 2; +} + +// WebhookClientConfig contains the information to make a connection with the webhook +message WebhookClientConfig { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + optional string url = 1; + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + optional ServiceReference service = 2; + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + optional bytes caBundle = 3; +} + +// WebhookThrottleConfig holds the configuration for throttling events +message WebhookThrottleConfig { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + optional int64 qps = 1; + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + optional int64 burst = 2; +} + diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go new file mode 100644 index 0000000..d627160 --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "auditregistration.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &AuditSink{}, + &AuditSinkList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go new file mode 100644 index 0000000..a0fb48c --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go @@ -0,0 +1,198 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Level defines the amount of information logged during auditing +type Level string + +// Valid audit levels +const ( + // LevelNone disables auditing + LevelNone Level = "None" + // LevelMetadata provides the basic level of auditing. + LevelMetadata Level = "Metadata" + // LevelRequest provides Metadata level of auditing, and additionally + // logs the request object (does not apply for non-resource requests). + LevelRequest Level = "Request" + // LevelRequestResponse provides Request level of auditing, and additionally + // logs the response object (does not apply for non-resource requests and watches). + LevelRequestResponse Level = "RequestResponse" +) + +// Stage defines the stages in request handling during which audit events may be generated. +type Stage string + +// Valid audit stages. +const ( + // The stage for events generated after the audit handler receives the request, but before it + // is delegated down the handler chain. + StageRequestReceived = "RequestReceived" + // The stage for events generated after the response headers are sent, but before the response body + // is sent. This stage is only generated for long-running requests (e.g. watch). + StageResponseStarted = "ResponseStarted" + // The stage for events generated after the response body has been completed, and no more bytes + // will be sent. + StageResponseComplete = "ResponseComplete" + // The stage for events generated when a panic occurred. + StagePanic = "Panic" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSink represents a cluster level audit sink +type AuditSink struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the audit configuration spec + Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// AuditSinkSpec holds the spec for the audit sink +type AuditSinkSpec struct { + // Policy defines the policy for selecting which events should be sent to the webhook + // required + Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"` + + // Webhook to send events + // required + Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// AuditSinkList is a list of AuditSink items. +type AuditSinkList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of audit configurations. + Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Policy defines the configuration of how audit events are logged +type Policy struct { + // The Level that all requests are recorded at. + // available options: None, Metadata, Request, RequestResponse + // required + Level Level `json:"level" protobuf:"bytes,1,opt,name=level"` + + // Stages is a list of stages for which events are created. + // +optional + Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"` +} + +// Webhook holds the configuration of the webhook +type Webhook struct { + // Throttle holds the options for throttling the webhook + // +optional + Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"` + + // ClientConfig holds the connection parameters for the webhook + // required + ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` +} + +// WebhookThrottleConfig holds the configuration for throttling events +type WebhookThrottleConfig struct { + // ThrottleQPS maximum number of batches per second + // default 10 QPS + // +optional + QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"` + + // ThrottleBurst is the maximum number of events sent at the same moment + // default 15 QPS + // +optional + Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"` +} + +// WebhookClientConfig contains the information to make a connection with the webhook +type WebhookClientConfig struct { + // `url` gives the location of the webhook, in standard URL form + // (`scheme://host:port/path`). Exactly one of `url` or `service` + // must be specified. + // + // The `host` should not refer to a service running in the cluster; use + // the `service` field instead. The host might be resolved via external + // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve + // in-cluster DNS as that would be a layering violation). `host` may + // also be an IP address. + // + // Please note that using `localhost` or `127.0.0.1` as a `host` is + // risky unless you take great care to run this webhook on all hosts + // which run an apiserver which might need to make calls to this + // webhook. Such installs are likely to be non-portable, i.e., not easy + // to turn up in a new cluster. + // + // The scheme must be "https"; the URL must begin with "https://". + // + // A path is optional, and if present may be any string permissible in + // a URL. You may use the path to pass an arbitrary string to the + // webhook, for example, a cluster identifier. + // + // Attempting to use a user or basic auth e.g. "user:password@" is not + // allowed. Fragments ("#...") and query parameters ("?...") are not + // allowed, either. + // + // +optional + URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` + + // `service` is a reference to the service for this webhook. Either + // `service` or `url` must be specified. + // + // If the webhook is running within the cluster, then you should use `service`. + // + // +optional + Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` + + // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. + // If unspecified, system trust roots on the apiserver are used. + // +optional + CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"` +} + +// ServiceReference holds a reference to Service.legacy.k8s.io +type ServiceReference struct { + // `namespace` is the namespace of the service. + // Required + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // `name` is the name of the service. + // Required + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // `path` is an optional URL path which will be sent in any request to + // this service. + // +optional + Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` + + // If specified, the port on the service that hosting webhook. + // Default to 443 for backward compatibility. + // `port` should be a valid port number (1-65535, inclusive). + // +optional + Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` +} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..1a86f4d --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,111 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AuditSink = map[string]string{ + "": "AuditSink represents a cluster level audit sink", + "spec": "Spec defines the audit configuration spec", +} + +func (AuditSink) SwaggerDoc() map[string]string { + return map_AuditSink +} + +var map_AuditSinkList = map[string]string{ + "": "AuditSinkList is a list of AuditSink items.", + "items": "List of audit configurations.", +} + +func (AuditSinkList) SwaggerDoc() map[string]string { + return map_AuditSinkList +} + +var map_AuditSinkSpec = map[string]string{ + "": "AuditSinkSpec holds the spec for the audit sink", + "policy": "Policy defines the policy for selecting which events should be sent to the webhook required", + "webhook": "Webhook to send events required", +} + +func (AuditSinkSpec) SwaggerDoc() map[string]string { + return map_AuditSinkSpec +} + +var map_Policy = map[string]string{ + "": "Policy defines the configuration of how audit events are logged", + "level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required", + "stages": "Stages is a list of stages for which events are created.", +} + +func (Policy) SwaggerDoc() map[string]string { + return map_Policy +} + +var map_ServiceReference = map[string]string{ + "": "ServiceReference holds a reference to Service.legacy.k8s.io", + "namespace": "`namespace` is the namespace of the service. Required", + "name": "`name` is the name of the service. Required", + "path": "`path` is an optional URL path which will be sent in any request to this service.", + "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", +} + +func (ServiceReference) SwaggerDoc() map[string]string { + return map_ServiceReference +} + +var map_Webhook = map[string]string{ + "": "Webhook holds the configuration of the webhook", + "throttle": "Throttle holds the options for throttling the webhook", + "clientConfig": "ClientConfig holds the connection parameters for the webhook required", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + +var map_WebhookClientConfig = map[string]string{ + "": "WebhookClientConfig contains the information to make a connection with the webhook", + "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", +} + +func (WebhookClientConfig) SwaggerDoc() map[string]string { + return map_WebhookClientConfig +} + +var map_WebhookThrottleConfig = map[string]string{ + "": "WebhookThrottleConfig holds the configuration for throttling events", + "qps": "ThrottleQPS maximum number of batches per second default 10 QPS", + "burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS", +} + +func (WebhookThrottleConfig) SwaggerDoc() map[string]string { + return map_WebhookThrottleConfig +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..621a19e --- /dev/null +++ b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,229 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSink) DeepCopyInto(out *AuditSink) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. +func (in *AuditSink) DeepCopy() *AuditSink { + if in == nil { + return nil + } + out := new(AuditSink) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSink) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]AuditSink, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. +func (in *AuditSinkList) DeepCopy() *AuditSinkList { + if in == nil { + return nil + } + out := new(AuditSinkList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *AuditSinkList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { + *out = *in + in.Policy.DeepCopyInto(&out.Policy) + in.Webhook.DeepCopyInto(&out.Webhook) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. +func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { + if in == nil { + return nil + } + out := new(AuditSinkSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Policy) DeepCopyInto(out *Policy) { + *out = *in + if in.Stages != nil { + in, out := &in.Stages, &out.Stages + *out = make([]Stage, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. +func (in *Policy) DeepCopy() *Policy { + if in == nil { + return nil + } + out := new(Policy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { + *out = *in + if in.Path != nil { + in, out := &in.Path, &out.Path + *out = new(string) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. +func (in *ServiceReference) DeepCopy() *ServiceReference { + if in == nil { + return nil + } + out := new(ServiceReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + if in.Throttle != nil { + in, out := &in.Throttle, &out.Throttle + *out = new(WebhookThrottleConfig) + (*in).DeepCopyInto(*out) + } + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { + *out = *in + if in.URL != nil { + in, out := &in.URL, &out.URL + *out = new(string) + **out = **in + } + if in.Service != nil { + in, out := &in.Service, &out.Service + *out = new(ServiceReference) + (*in).DeepCopyInto(*out) + } + if in.CABundle != nil { + in, out := &in.CABundle, &out.CABundle + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. +func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { + if in == nil { + return nil + } + out := new(WebhookClientConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { + *out = *in + if in.QPS != nil { + in, out := &in.QPS, &out.QPS + *out = new(int64) + **out = **in + } + if in.Burst != nil { + in, out := &in.Burst, &out.Burst + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. +func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { + if in == nil { + return nil + } + out := new(WebhookThrottleConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go new file mode 100644 index 0000000..1614265 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go new file mode 100644 index 0000000..6524f8c --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -0,0 +1,2581 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{0} +} +func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *BoundObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_BoundObjectReference.Merge(m, src) +} +func (m *BoundObjectReference) XXX_Size() int { + return m.Size() +} +func (m *BoundObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{1} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{2} +} +func (m *TokenRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequest.Merge(m, src) +} +func (m *TokenRequest) XXX_Size() int { + return m.Size() +} +func (m *TokenRequest) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequest proto.InternalMessageInfo + +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{3} +} +func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestSpec.Merge(m, src) +} +func (m *TokenRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo + +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{4} +} +func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenRequestStatus.Merge(m, src) +} +func (m *TokenRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{5} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{6} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{7} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_2953ea822e7ffe1e, []int{8} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo + +func init() { + proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") + proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest") + proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec") + proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) +} + +var fileDescriptor_2953ea822e7ffe1e = []byte{ + // 903 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, + 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, + 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, + 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, + 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, + 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, + 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, + 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, + 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, + 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, + 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, + 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, + 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, + 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, + 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, + 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, + 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, + 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, + 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, + 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, + 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, + 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, + 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, + 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, + 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, + 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, + 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, + 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, + 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, + 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, + 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, + 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, + 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, + 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, + 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, + 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, + 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, + 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, + 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, + 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, + 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, + 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, + 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, + 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, + 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, + 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, + 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, + 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, + 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, + 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, + 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, + 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, + 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, + 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, + 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, +} + +func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TokenRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x20 + } + if m.BoundObjectRef != nil { + { + size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- + if m.Authenticated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *UserInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *BoundObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenRequestSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.BoundObjectRef != nil { + l = m.BoundObjectRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + return n +} + +func (m *TokenRequestStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ExpirationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *BoundObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoundObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *TokenRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenRequestSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequestSpec{`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, + `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TokenRequestStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenRequestStatus{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoundObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoundObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BoundObjectRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BoundObjectRef == nil { + m.BoundObjectRef = &BoundObjectReference{} + } + if err := m.BoundObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ExpirationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto new file mode 100644 index 0000000..db7be17 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -0,0 +1,182 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.authentication.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// BoundObjectReference is a reference to an object that a token is bound to. +message BoundObjectReference { + // Kind of the referent. Valid kinds are 'Pod' and 'Secret'. + // +optional + optional string kind = 1; + + // API version of the referent. + // +optional + optional string aPIVersion = 2; + + // Name of the referent. + // +optional + optional string name = 3; + + // UID of the referent. + // +optional + optional string uID = 4; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenRequest requests a token for a given service account. +message TokenRequest { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + optional TokenRequestSpec spec = 2; + + // +optional + optional TokenRequestStatus status = 3; +} + +// TokenRequestSpec contains client provided parameters of a token request. +message TokenRequestSpec { + // Audiences are the intendend audiences of the token. A recipient of a + // token must identitfy themself with an identifier in the list of + // audiences of the token, and otherwise should reject the token. A + // token issued for multiple audiences may be used to authenticate + // against any of the audiences listed but implies a high degree of + // trust between the target audiences. + repeated string audiences = 1; + + // ExpirationSeconds is the requested duration of validity of the request. The + // token issuer may return a token with a different validity duration so a + // client needs to check the 'expiration' field in a response. + // +optional + optional int64 expirationSeconds = 4; + + // BoundObjectRef is a reference to an object that the token will be bound to. + // The token will only be valid for as long as the bound object exists. + // NOTE: The API server's TokenReview endpoint will validate the + // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds + // small if you want prompt revocation. + // +optional + optional BoundObjectReference boundObjectRef = 3; +} + +// TokenRequestStatus is the result of a token request. +message TokenRequestStatus { + // Token is the opaque bearer token. + optional string token = 1; + + // ExpirationTimestamp is the time of expiration of the returned token. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map extra = 4; +} + diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go new file mode 100644 index 0000000..c522e4a --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + &TokenRequest{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go new file mode 100644 index 0000000..668b720 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -0,0 +1,189 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key will be every after the prefix. + // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple + // times to have multiple elements in the slice under a single key + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TokenRequest requests a token for a given service account. +type TokenRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + // +optional + Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenRequestSpec contains client provided parameters of a token request. +type TokenRequestSpec struct { + // Audiences are the intendend audiences of the token. A recipient of a + // token must identitfy themself with an identifier in the list of + // audiences of the token, and otherwise should reject the token. A + // token issued for multiple audiences may be used to authenticate + // against any of the audiences listed but implies a high degree of + // trust between the target audiences. + Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"` + + // ExpirationSeconds is the requested duration of validity of the request. The + // token issuer may return a token with a different validity duration so a + // client needs to check the 'expiration' field in a response. + // +optional + ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` + + // BoundObjectRef is a reference to an object that the token will be bound to. + // The token will only be valid for as long as the bound object exists. + // NOTE: The API server's TokenReview endpoint will validate the + // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds + // small if you want prompt revocation. + // +optional + BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` +} + +// TokenRequestStatus is the result of a token request. +type TokenRequestStatus struct { + // Token is the opaque bearer token. + Token string `json:"token" protobuf:"bytes,1,opt,name=token"` + // ExpirationTimestamp is the time of expiration of the returned token. + ExpirationTimestamp metav1.Time `json:"expirationTimestamp" protobuf:"bytes,2,opt,name=expirationTimestamp"` +} + +// BoundObjectReference is a reference to an object that a token is bound to. +type BoundObjectReference struct { + // Kind of the referent. Valid kinds are 'Pod' and 'Secret'. + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=aPIVersion"` + + // Name of the referent. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"` +} diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..09f6b92 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -0,0 +1,115 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_BoundObjectReference = map[string]string{ + "": "BoundObjectReference is a reference to an object that a token is bound to.", + "kind": "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.", + "apiVersion": "API version of the referent.", + "name": "Name of the referent.", + "uid": "UID of the referent.", +} + +func (BoundObjectReference) SwaggerDoc() map[string]string { + return map_BoundObjectReference +} + +var map_TokenRequest = map[string]string{ + "": "TokenRequest requests a token for a given service account.", +} + +func (TokenRequest) SwaggerDoc() map[string]string { + return map_TokenRequest +} + +var map_TokenRequestSpec = map[string]string{ + "": "TokenRequestSpec contains client provided parameters of a token request.", + "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", + "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", + "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", +} + +func (TokenRequestSpec) SwaggerDoc() map[string]string { + return map_TokenRequestSpec +} + +var map_TokenRequestStatus = map[string]string{ + "": "TokenRequestStatus is the result of a token request.", + "token": "Token is the opaque bearer token.", + "expirationTimestamp": "ExpirationTimestamp is the time of expiration of the returned token.", +} + +func (TokenRequestStatus) SwaggerDoc() map[string]string { + return map_TokenRequestStatus +} + +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..aca99c4 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -0,0 +1,244 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *BoundObjectReference) DeepCopyInto(out *BoundObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundObjectReference. +func (in *BoundObjectReference) DeepCopy() *BoundObjectReference { + if in == nil { + return nil + } + out := new(BoundObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. +func (in *TokenRequest) DeepCopy() *TokenRequest { + if in == nil { + return nil + } + out := new(TokenRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequestSpec) DeepCopyInto(out *TokenRequestSpec) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + if in.BoundObjectRef != nil { + in, out := &in.BoundObjectRef, &out.BoundObjectRef + *out = new(BoundObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestSpec. +func (in *TokenRequestSpec) DeepCopy() *TokenRequestSpec { + if in == nil { + return nil + } + out := new(TokenRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenRequestStatus) DeepCopyInto(out *TokenRequestStatus) { + *out = *in + in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestStatus. +func (in *TokenRequestStatus) DeepCopy() *TokenRequestStatus { + if in == nil { + return nil + } + out := new(TokenRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReview) DeepCopyInto(out *TokenReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. +func (in *TokenReview) DeepCopy() *TokenReview { + if in == nil { + return nil + } + out := new(TokenReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. +func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { + if in == nil { + return nil + } + out := new(TokenReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { + *out = *in + in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. +func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { + if in == nil { + return nil + } + out := new(TokenReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInfo) DeepCopyInto(out *UserInfo) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. +func (in *UserInfo) DeepCopy() *UserInfo { + if in == nil { + return nil + } + out := new(UserInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go new file mode 100644 index 0000000..185a224 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=authentication.k8s.io +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go new file mode 100644 index 0000000..6c391db --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -0,0 +1,1558 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{1} +} +func (m *TokenReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReview.Merge(m, src) +} +func (m *TokenReview) XXX_Size() int { + return m.Size() +} +func (m *TokenReview) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReview.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReview proto.InternalMessageInfo + +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{2} +} +func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewSpec.Merge(m, src) +} +func (m *TokenReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo + +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{3} +} +func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_TokenReviewStatus.Merge(m, src) +} +func (m *TokenReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *TokenReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo + +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_77c9b20d3ad27844, []int{4} +} +func (m *UserInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UserInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_UserInfo.Merge(m, src) +} +func (m *UserInfo) XXX_Size() int { + return m.Size() +} +func (m *UserInfo) XXX_DiscardUnknown() { + xxx_messageInfo_UserInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_UserInfo proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") + proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") + proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") + proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") + proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) +} + +var fileDescriptor_77c9b20d3ad27844 = []byte{ + // 663 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, + 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, + 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, + 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, + 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, + 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, + 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, + 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, + 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, + 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, + 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, + 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, + 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, + 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, + 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, + 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, + 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, + 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, + 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, + 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, + 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, + 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, + 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, + 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, + 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, + 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, + 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, + 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, + 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, + 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, + 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, + 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, + 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, + 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, + 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, + 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, + 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, + 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, + 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, + 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, + 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TokenReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Token) + copy(dAtA[i:], m.Token) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Audiences) > 0 { + for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Audiences[iNdEx]) + copy(dAtA[i:], m.Audiences[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x1a + { + size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i-- + if m.Authenticated { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *UserInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TokenReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Token) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TokenReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = m.User.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *UserInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *TokenReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewSpec{`, + `Token:` + fmt.Sprintf("%v", this.Token) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, + `}`, + }, "") + return s +} +func (this *TokenReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TokenReviewStatus{`, + `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, + `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, + `}`, + }, "") + return s +} +func (this *UserInfo) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&UserInfo{`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Token = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Authenticated = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UserInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto new file mode 100644 index 0000000..caf2a6a --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -0,0 +1,118 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.authentication.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +message TokenReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional TokenReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + optional TokenReviewStatus status = 3; +} + +// TokenReviewSpec is a description of the token authentication request. +message TokenReviewSpec { + // Token is the opaque bearer token. + // +optional + optional string token = 1; + + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + repeated string audiences = 2; +} + +// TokenReviewStatus is the result of the token authentication request. +message TokenReviewStatus { + // Authenticated indicates that the token was associated with a known user. + // +optional + optional bool authenticated = 1; + + // User is the UserInfo associated with the provided token. + // +optional + optional UserInfo user = 2; + + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + repeated string audiences = 4; + + // Error indicates that the token couldn't be checked + // +optional + optional string error = 3; +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +message UserInfo { + // The name that uniquely identifies this user among all active users. + // +optional + optional string username = 1; + + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + optional string uid = 2; + + // The names of groups this user is a part of. + // +optional + repeated string groups = 3; + + // Any additional information provided by the authenticator. + // +optional + map extra = 4; +} + diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go new file mode 100644 index 0000000..ed23e50 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/register.go @@ -0,0 +1,51 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TokenReview{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go new file mode 100644 index 0000000..0083fb0 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -0,0 +1,110 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TokenReview attempts to authenticate a token to a known user. +// Note: TokenReview requests may be cached by the webhook token authenticator +// plugin in the kube-apiserver. +type TokenReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request can be authenticated. + // +optional + Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// TokenReviewSpec is a description of the token authentication request. +type TokenReviewSpec struct { + // Token is the opaque bearer token. + // +optional + Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` + // Audiences is a list of the identifiers that the resource server presented + // with the token identifies as. Audience-aware token authenticators will + // verify that the token was intended for at least one of the audiences in + // this list. If no audiences are provided, the audience will default to the + // audience of the Kubernetes apiserver. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` +} + +// TokenReviewStatus is the result of the token authentication request. +type TokenReviewStatus struct { + // Authenticated indicates that the token was associated with a known user. + // +optional + Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` + // User is the UserInfo associated with the provided token. + // +optional + User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` + // Audiences are audience identifiers chosen by the authenticator that are + // compatible with both the TokenReview and token. An identifier is any + // identifier in the intersection of the TokenReviewSpec audiences and the + // token's audiences. A client of the TokenReview API that sets the + // spec.audiences field should validate that a compatible audience identifier + // is returned in the status.audiences field to ensure that the TokenReview + // server is audience aware. If a TokenReview returns an empty + // status.audience field where status.authenticated is "true", the token is + // valid against the audience of the Kubernetes API server. + // +optional + Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` + // Error indicates that the token couldn't be checked + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` +} + +// UserInfo holds the information about the user needed to implement the +// user.Info interface. +type UserInfo struct { + // The name that uniquely identifies this user among all active users. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` + // A unique value that identifies this user across time. If this user is + // deleted and another user by the same name is added, they will have + // different UIDs. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` + // The names of groups this user is a part of. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` + // Any additional information provided by the authenticator. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..8c9acfb --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,74 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_TokenReview = map[string]string{ + "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request can be authenticated.", +} + +func (TokenReview) SwaggerDoc() map[string]string { + return map_TokenReview +} + +var map_TokenReviewSpec = map[string]string{ + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", + "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", +} + +func (TokenReviewSpec) SwaggerDoc() map[string]string { + return map_TokenReviewSpec +} + +var map_TokenReviewStatus = map[string]string{ + "": "TokenReviewStatus is the result of the token authentication request.", + "authenticated": "Authenticated indicates that the token was associated with a known user.", + "user": "User is the UserInfo associated with the provided token.", + "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", + "error": "Error indicates that the token couldn't be checked", +} + +func (TokenReviewStatus) SwaggerDoc() map[string]string { + return map_TokenReviewStatus +} + +var map_UserInfo = map[string]string{ + "": "UserInfo holds the information about the user needed to implement the user.Info interface.", + "username": "The name that uniquely identifies this user among all active users.", + "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", + "groups": "The names of groups this user is a part of.", + "extra": "Any additional information provided by the authenticator.", +} + +func (UserInfo) SwaggerDoc() map[string]string { + return map_UserInfo +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a5d82a8 --- /dev/null +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,152 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReview) DeepCopyInto(out *TokenReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. +func (in *TokenReview) DeepCopy() *TokenReview { + if in == nil { + return nil + } + out := new(TokenReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TokenReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { + *out = *in + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. +func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { + if in == nil { + return nil + } + out := new(TokenReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { + *out = *in + in.User.DeepCopyInto(&out.User) + if in.Audiences != nil { + in, out := &in.Audiences, &out.Audiences + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. +func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { + if in == nil { + return nil + } + out := new(TokenReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UserInfo) DeepCopyInto(out *UserInfo) { + *out = *in + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. +func (in *UserInfo) DeepCopy() *UserInfo { + if in == nil { + return nil + } + out := new(UserInfo) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go new file mode 100644 index 0000000..cf100e6 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=authorization.k8s.io + +package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go new file mode 100644 index 0000000..dbc0bdc --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -0,0 +1,4087 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e50da13573e369bd, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") + proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1.ResourceAttributes") + proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1.ResourceRule") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) +} + +var fileDescriptor_e50da13573e369bd = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, + 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, + 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, + 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, + 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, + 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, + 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, + 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, + 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, + 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, + 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, + 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, + 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, + 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, + 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, + 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, + 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, + 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, + 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, + 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, + 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, + 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, + 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, + 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, + 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, + 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, + 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, + 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, + 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, + 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, + 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, + 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, + 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, + 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, + 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, + 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, + 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, + 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, + 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, + 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, + 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, + 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, + 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, + 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, + 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, + 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, + 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, + 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, + 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, + 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, + 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, + 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, + 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, + 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, + 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, + 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, + 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, + 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, + 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, + 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, + 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, + 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, + 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, + 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, + 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, + 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, + 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, + 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, + 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, + 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, + 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, + 0x47, 0x0f, 0x00, 0x00, +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourceRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourceRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Incomplete = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto new file mode 100644 index 0000000..f68a04e --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/generated.proto @@ -0,0 +1,272 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.authorization.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// NonResourceRule holds information that describes a rule for the non-resource +message NonResourceRule { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + repeated string verbs = 1; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + repeated string nonResourceURLs = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +message ResourceRule { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + repeated string resourceNames = 4; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +message SelfSubjectRulesReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. + optional SelfSubjectRulesReviewSpec spec = 2; + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + optional SubjectRulesReviewStatus status = 3; +} + +message SelfSubjectRulesReviewSpec { + // Namespace to evaluate rules for. Required. + optional string namespace = 1; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string user = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string groups = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map extra = 5; + + // UID information about the requesting user. + // +optional + optional string uid = 6; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +message SubjectRulesReviewStatus { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated ResourceRule resourceRules = 1; + + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated NonResourceRule nonResourceRules = 2; + + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + optional bool incomplete = 3; + + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + optional string evaluationError = 4; +} + diff --git a/vendor/k8s.io/api/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go new file mode 100644 index 0000000..5931198 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectRulesReview{}, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go new file mode 100644 index 0000000..be8913e --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -0,0 +1,268 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` + // UID information about the requesting user. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +type SelfSubjectRulesReviewSpec struct { + // Namespace to evaluate rules for. Required. + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` +} + +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +type SubjectRulesReviewStatus struct { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"` + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +type ResourceRule struct { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` +} + +// NonResourceRule holds information that describes a rule for the non-resource +type NonResourceRule struct { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` +} diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..8445f71 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go @@ -0,0 +1,173 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_NonResourceRule = map[string]string{ + "": "NonResourceRule holds information that describes a rule for the non-resource", + "verbs": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", +} + +func (NonResourceRule) SwaggerDoc() map[string]string { + return map_NonResourceRule +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_ResourceRule = map[string]string{ + "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", + "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", +} + +func (ResourceRule) SwaggerDoc() map[string]string { + return map_ResourceRule +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "spec": "Spec holds information about the request being evaluated.", + "status": "Status is filled in by the server and indicates the set of actions a user can perform.", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "namespace": "Namespace to evaluate rules for. Required.", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", + "groups": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", + "uid": "UID information about the requesting user.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", + "resourceRules": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "incomplete": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", + "evaluationError": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..1d11b38 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go @@ -0,0 +1,385 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes. +func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes { + if in == nil { + return nil + } + out := new(NonResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule. +func (in *NonResourceRule) DeepCopy() *NonResourceRule { + if in == nil { + return nil + } + out := new(NonResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes. +func (in *ResourceAttributes) DeepCopy() *ResourceAttributes { + if in == nil { + return nil + } + out := new(ResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRule) DeepCopyInto(out *ResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule. +func (in *ResourceRule) DeepCopy() *ResourceRule { + if in == nil { + return nil + } + out := new(ResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview. +func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec. +func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec. +func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus. +func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus { + if in == nil { + return nil + } + out := new(SubjectAccessReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go new file mode 100644 index 0000000..7046f11 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=authorization.k8s.io + +package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go new file mode 100644 index 0000000..647c0c5 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -0,0 +1,4087 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{0} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } +func (*LocalSubjectAccessReview) ProtoMessage() {} +func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{1} +} +func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) +} +func (m *LocalSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo + +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{2} +} +func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceAttributes.Merge(m, src) +} +func (m *NonResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *NonResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo + +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{3} +} +func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NonResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NonResourceRule.Merge(m, src) +} +func (m *NonResourceRule) XXX_Size() int { + return m.Size() +} +func (m *NonResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_NonResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo + +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{4} +} +func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceAttributes.Merge(m, src) +} +func (m *ResourceAttributes) XXX_Size() int { + return m.Size() +} +func (m *ResourceAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo + +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{5} +} +func (m *ResourceRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRule.Merge(m, src) +} +func (m *ResourceRule) XXX_Size() int { + return m.Size() +} +func (m *ResourceRule) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRule.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRule proto.InternalMessageInfo + +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{6} +} +func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) +} +func (m *SelfSubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo + +func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } +func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} +func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{7} +} +func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) +} +func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{8} +} +func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) +} +func (m *SelfSubjectRulesReview) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo + +func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } +func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} +func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{9} +} +func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) +} +func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{10} +} +func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReview.Merge(m, src) +} +func (m *SubjectAccessReview) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReview) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo + +func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } +func (*SubjectAccessReviewSpec) ProtoMessage() {} +func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{11} +} +func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) +} +func (m *SubjectAccessReviewSpec) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo + +func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } +func (*SubjectAccessReviewStatus) ProtoMessage() {} +func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{12} +} +func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) +} +func (m *SubjectAccessReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo + +func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } +func (*SubjectRulesReviewStatus) ProtoMessage() {} +func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_43130d8376f09103, []int{13} +} +func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) +} +func (m *SubjectRulesReviewStatus) XXX_Size() int { + return m.Size() +} +func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { + xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") + proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") + proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.NonResourceAttributes") + proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1beta1.NonResourceRule") + proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.ResourceAttributes") + proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1beta1.ResourceRule") + proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReview") + proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReviewSpec") + proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReview") + proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") + proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") + proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry") + proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") + proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) +} + +var fileDescriptor_43130d8376f09103 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, + 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, + 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, + 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, + 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, + 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, + 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, + 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, + 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, + 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, + 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, + 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, + 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, + 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, + 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, + 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, + 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, + 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, + 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, + 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, + 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, + 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, + 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, + 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, + 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, + 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, + 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, + 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, + 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, + 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, + 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, + 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, + 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, + 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, + 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, + 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, + 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, + 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, + 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, + 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, + 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, + 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, + 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, + 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, + 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, + 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, + 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, + 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, + 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, + 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, + 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, + 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, + 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, + 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, + 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, + 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, + 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, + 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, + 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, + 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, + 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, + 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, + 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, + 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, + 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, + 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, + 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, + 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, + 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, + 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, + 0x34, 0xa4, 0x0f, 0x00, 0x00, +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x3a + i -= len(m.Subresource) + copy(dAtA[i:], m.Subresource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i-- + dAtA[i] = 0x32 + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x2a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x22 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x1a + i -= len(m.Verb) + copy(dAtA[i:], m.Verb) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + if m.NonResourceAttributes != nil { + { + size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ResourceAttributes != nil { + { + size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Denied { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i-- + if m.Allowed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.EvaluationError) + copy(dAtA[i:], m.EvaluationError) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i-- + dAtA[i] = 0x22 + i-- + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + if len(m.NonResourceRules) > 0 { + for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.ResourceRules) > 0 { + for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NonResourceRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Verb) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Subresource) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SelfSubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SelfSubjectRulesReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SelfSubjectRulesReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReview) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ResourceAttributes != nil { + l = m.ResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NonResourceAttributes != nil { + l = m.NonResourceAttributes.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SubjectAccessReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *SubjectRulesReviewStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, e := range m.ResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceRules) > 0 { + for _, e := range m.NonResourceRules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + l = len(m.EvaluationError) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *LocalSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceAttributes{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `}`, + }, "") + return s +} +func (this *NonResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NonResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceAttributes) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceAttributes{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SelfSubjectRulesReviewSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReview) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReview{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&SubjectAccessReviewSpec{`, + `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Extra:` + mapStringForExtra + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectAccessReviewStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SubjectAccessReviewStatus{`, + `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, + `}`, + }, "") + return s +} +func (this *SubjectRulesReviewStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForResourceRules := "[]ResourceRule{" + for _, f := range this.ResourceRules { + repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForResourceRules += "}" + repeatedStringForNonResourceRules := "[]NonResourceRule{" + for _, f := range this.NonResourceRules { + repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," + } + repeatedStringForNonResourceRules += "}" + s := strings.Join([]string{`&SubjectRulesReviewStatus{`, + `ResourceRules:` + repeatedStringForResourceRules + `,`, + `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, + `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NonResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verb = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subresource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceAttributes == nil { + m.ResourceAttributes = &ResourceAttributes{} + } + if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NonResourceAttributes == nil { + m.NonResourceAttributes = &NonResourceAttributes{} + } + if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Allowed = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Denied = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceRules = append(m.ResourceRules, ResourceRule{}) + if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceRules = append(m.NonResourceRules, NonResourceRule{}) + if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Incomplete = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EvaluationError = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto new file mode 100644 index 0000000..3876a3e --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.proto @@ -0,0 +1,272 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.authorization.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +message LocalSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +message NonResourceAttributes { + // Path is the URL path of the request + // +optional + optional string path = 1; + + // Verb is the standard HTTP verb + // +optional + optional string verb = 2; +} + +// NonResourceRule holds information that describes a rule for the non-resource +message NonResourceRule { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + repeated string verbs = 1; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + repeated string nonResourceURLs = 2; +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +message ResourceAttributes { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + optional string namespace = 1; + + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + optional string verb = 2; + + // Group is the API Group of the Resource. "*" means all. + // +optional + optional string group = 3; + + // Version is the API Version of the Resource. "*" means all. + // +optional + optional string version = 4; + + // Resource is one of the existing resource types. "*" means all. + // +optional + optional string resource = 5; + + // Subresource is one of the existing resource types. "" means none. + // +optional + optional string subresource = 6; + + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + optional string name = 7; +} + +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +message ResourceRule { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + repeated string resourceNames = 4; +} + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +message SelfSubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. user and groups must be empty + optional SelfSubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SelfSubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; +} + +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +message SelfSubjectRulesReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated. + optional SelfSubjectRulesReviewSpec spec = 2; + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + optional SubjectRulesReviewStatus status = 3; +} + +message SelfSubjectRulesReviewSpec { + // Namespace to evaluate rules for. Required. + optional string namespace = 1; +} + +// SubjectAccessReview checks whether or not a user or group can perform an action. +message SubjectAccessReview { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec holds information about the request being evaluated + optional SubjectAccessReviewSpec spec = 2; + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + optional SubjectAccessReviewStatus status = 3; +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +message SubjectAccessReviewSpec { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + optional ResourceAttributes resourceAttributes = 1; + + // NonResourceAttributes describes information for a non-resource access request + // +optional + optional NonResourceAttributes nonResourceAttributes = 2; + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + // +optional + optional string user = 3; + + // Groups is the groups you're testing for. + // +optional + repeated string group = 4; + + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + map extra = 5; + + // UID information about the requesting user. + // +optional + optional string uid = 6; +} + +// SubjectAccessReviewStatus +message SubjectAccessReviewStatus { + // Allowed is required. True if the action would be allowed, false otherwise. + optional bool allowed = 1; + + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + optional bool denied = 4; + + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + optional string reason = 2; + + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + optional string evaluationError = 3; +} + +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +message SubjectRulesReviewStatus { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated ResourceRule resourceRules = 1; + + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + repeated NonResourceRule nonResourceRules = 2; + + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + optional bool incomplete = 3; + + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + optional string evaluationError = 4; +} + diff --git a/vendor/k8s.io/api/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go new file mode 100644 index 0000000..84255dd --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &SelfSubjectRulesReview{}, + &SelfSubjectAccessReview{}, + &SubjectAccessReview{}, + &LocalSubjectAccessReview{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go new file mode 100644 index 0000000..cf117d2 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -0,0 +1,268 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SubjectAccessReview checks whether or not a user or group can perform an action. +type SubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a +// spec.namespace means "in all namespaces". Self is a special case, because users should always be able +// to check whether they can perform an action +type SelfSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. user and groups must be empty + Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. +// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions +// checking. +type LocalSubjectAccessReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace + // you made the request against. If empty, it is defaulted. + Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates whether the request is allowed or not + // +optional + Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface +type ResourceAttributes struct { + // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces + // "" (empty) is defaulted for LocalSubjectAccessReviews + // "" (empty) is empty for cluster-scoped resources + // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` + // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` + // Group is the API Group of the Resource. "*" means all. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` + // Version is the API Version of the Resource. "*" means all. + // +optional + Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` + // Resource is one of the existing resource types. "*" means all. + // +optional + Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` + // Subresource is one of the existing resource types. "" means none. + // +optional + Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` + // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` +} + +// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface +type NonResourceAttributes struct { + // Path is the URL path of the request + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Verb is the standard HTTP verb + // +optional + Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` +} + +// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` + + // User is the user you're testing for. + // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Groups is the groups you're testing for. + // +optional + Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` + // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer + // it needs a reflection here. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` + // UID information about the requesting user. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"` +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes +// and NonResourceAuthorizationAttributes must be set +type SelfSubjectAccessReviewSpec struct { + // ResourceAuthorizationAttributes describes information for a resource access request + // +optional + ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` + // NonResourceAttributes describes information for a non-resource access request + // +optional + NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` +} + +// SubjectAccessReviewStatus +type SubjectAccessReviewStatus struct { + // Allowed is required. True if the action would be allowed, false otherwise. + Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` + // Denied is optional. True if the action would be denied, otherwise + // false. If both allowed is false and denied is false, then the + // authorizer has no opinion on whether to authorize the action. Denied + // may not be true if Allowed is true. + // +optional + Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` + // Reason is optional. It indicates why a request was allowed or denied. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // EvaluationError is an indication that some error occurred during the authorization check. + // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. + // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:onlyVerbs=create +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. +// The returned list of actions may be incomplete depending on the server's authorization mode, +// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, +// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to +// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. +// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. +type SelfSubjectRulesReview struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec holds information about the request being evaluated. + Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status is filled in by the server and indicates the set of actions a user can perform. + // +optional + Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +type SelfSubjectRulesReviewSpec struct { + // Namespace to evaluate rules for. Required. + Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` +} + +// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on +// the set of authorizers the server is configured with and any errors experienced during evaluation. +// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, +// even if that list is incomplete. +type SubjectRulesReviewStatus struct { + // ResourceRules is the list of actions the subject is allowed to perform on resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` + // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. + // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. + NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` + // Incomplete is true when the rules returned by this call are incomplete. This is most commonly + // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. + Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"` + // EvaluationError can appear in combination with Rules. It indicates an error occurred during + // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that + // ResourceRules and/or NonResourceRules may be incomplete. + // +optional + EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` +} + +// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, +// may contain duplicates, and possibly be incomplete. +type ResourceRule struct { + // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. "*" means all. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. + // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` +} + +// NonResourceRule holds information that describes a rule for the non-resource +type NonResourceRule struct { + // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, + // final step in the path. "*" means all. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..3ae6e72 --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,173 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_LocalSubjectAccessReview = map[string]string{ + "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", + "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { + return map_LocalSubjectAccessReview +} + +var map_NonResourceAttributes = map[string]string{ + "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", + "path": "Path is the URL path of the request", + "verb": "Verb is the standard HTTP verb", +} + +func (NonResourceAttributes) SwaggerDoc() map[string]string { + return map_NonResourceAttributes +} + +var map_NonResourceRule = map[string]string{ + "": "NonResourceRule holds information that describes a rule for the non-resource", + "verbs": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", +} + +func (NonResourceRule) SwaggerDoc() map[string]string { + return map_NonResourceRule +} + +var map_ResourceAttributes = map[string]string{ + "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", + "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", + "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "group": "Group is the API Group of the Resource. \"*\" means all.", + "version": "Version is the API Version of the Resource. \"*\" means all.", + "resource": "Resource is one of the existing resource types. \"*\" means all.", + "subresource": "Subresource is one of the existing resource types. \"\" means none.", + "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", +} + +func (ResourceAttributes) SwaggerDoc() map[string]string { + return map_ResourceAttributes +} + +var map_ResourceRule = map[string]string{ + "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", + "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", +} + +func (ResourceRule) SwaggerDoc() map[string]string { + return map_ResourceRule +} + +var map_SelfSubjectAccessReview = map[string]string{ + "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", + "spec": "Spec holds information about the request being evaluated. user and groups must be empty", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReview +} + +var map_SelfSubjectAccessReviewSpec = map[string]string{ + "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", +} + +func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectAccessReviewSpec +} + +var map_SelfSubjectRulesReview = map[string]string{ + "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", + "spec": "Spec holds information about the request being evaluated.", + "status": "Status is filled in by the server and indicates the set of actions a user can perform.", +} + +func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReview +} + +var map_SelfSubjectRulesReviewSpec = map[string]string{ + "namespace": "Namespace to evaluate rules for. Required.", +} + +func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { + return map_SelfSubjectRulesReviewSpec +} + +var map_SubjectAccessReview = map[string]string{ + "": "SubjectAccessReview checks whether or not a user or group can perform an action.", + "spec": "Spec holds information about the request being evaluated", + "status": "Status is filled in by the server and indicates whether the request is allowed or not", +} + +func (SubjectAccessReview) SwaggerDoc() map[string]string { + return map_SubjectAccessReview +} + +var map_SubjectAccessReviewSpec = map[string]string{ + "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", + "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", + "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", + "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", + "group": "Groups is the groups you're testing for.", + "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", + "uid": "UID information about the requesting user.", +} + +func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewSpec +} + +var map_SubjectAccessReviewStatus = map[string]string{ + "": "SubjectAccessReviewStatus", + "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", + "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", + "reason": "Reason is optional. It indicates why a request was allowed or denied.", + "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", +} + +func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectAccessReviewStatus +} + +var map_SubjectRulesReviewStatus = map[string]string{ + "": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", + "resourceRules": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", + "incomplete": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", + "evaluationError": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", +} + +func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { + return map_SubjectRulesReviewStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..58b2dfe --- /dev/null +++ b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,385 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. +func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { + if in == nil { + return nil + } + out := new(LocalSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes. +func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes { + if in == nil { + return nil + } + out := new(NonResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule. +func (in *NonResourceRule) DeepCopy() *NonResourceRule { + if in == nil { + return nil + } + out := new(NonResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes. +func (in *ResourceAttributes) DeepCopy() *ResourceAttributes { + if in == nil { + return nil + } + out := new(ResourceAttributes) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRule) DeepCopyInto(out *ResourceRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule. +func (in *ResourceRule) DeepCopy() *ResourceRule { + if in == nil { + return nil + } + out := new(ResourceRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview. +func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec. +func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. +func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. +func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { + if in == nil { + return nil + } + out := new(SelfSubjectRulesReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. +func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { + if in == nil { + return nil + } + out := new(SubjectAccessReview) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { + *out = *in + if in.ResourceAttributes != nil { + in, out := &in.ResourceAttributes, &out.ResourceAttributes + *out = new(ResourceAttributes) + **out = **in + } + if in.NonResourceAttributes != nil { + in, out := &in.NonResourceAttributes, &out.NonResourceAttributes + *out = new(NonResourceAttributes) + **out = **in + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec. +func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec { + if in == nil { + return nil + } + out := new(SubjectAccessReviewSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus. +func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus { + if in == nil { + return nil + } + out := new(SubjectAccessReviewStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { + *out = *in + if in.ResourceRules != nil { + in, out := &in.ResourceRules, &out.ResourceRules + *out = make([]ResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NonResourceRules != nil { + in, out := &in.NonResourceRules, &out.NonResourceRules + *out = make([]NonResourceRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. +func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { + if in == nil { + return nil + } + out := new(SubjectRulesReviewStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go new file mode 100644 index 0000000..8c9c09b --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go new file mode 100644 index 0000000..1e3d890 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -0,0 +1,5570 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{16} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{17} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_2bb1f2101a7f10e2, []int{18} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) +} + +var fileDescriptor_2bb1f2101a7f10e2 = []byte{ + // 1516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, + 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, + 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, + 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, + 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, + 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, + 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, + 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, + 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, + 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, + 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, + 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, + 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, + 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, + 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, + 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, + 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, + 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, + 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, + 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, + 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, + 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, + 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, + 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, + 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, + 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, + 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, + 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, + 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, + 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, + 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, + 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, + 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, + 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, + 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, + 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, + 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, + 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, + 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, + 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, + 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, + 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, + 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, + 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, + 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, + 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, + 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, + 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, + 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, + 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, + 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, + 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, + 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, + 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, + 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, + 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, + 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, + 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, + 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, + 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, + 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, + 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, + 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, + 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, + 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, + 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, + 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, + 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, + 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, + 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, + 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, + 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, + 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, + 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, + 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, + 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, + 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, + 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, + 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, + 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, + 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, + 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, + 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, + 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, + 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, + 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, + 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, + 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, + 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, + 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, + 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, + 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, + 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, +} + +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TargetValue != nil { + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TargetCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x20 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 + if m.MinReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentCPUUtilizationPercentage != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) + i-- + dAtA[i] = 0x28 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Selector) + copy(dAtA[i:], m.Selector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetValue != nil { + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageValue != nil { + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) + } + return n +} + +func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + l = len(m.Selector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `TargetCPUUtilizationPercentage:` + valueToStringGenerated(this.TargetCPUUtilizationPercentage) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetValue == nil { + m.TargetValue = &resource.Quantity{} + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CurrentAverageValue == nil { + m.CurrentAverageValue = &resource.Quantity{} + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentCPUUtilizationPercentage = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Selector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto new file mode 100644 index 0000000..f50ed9d --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -0,0 +1,419 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.autoscaling.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +message ExternalMetricSource { + // metricName is the name of the metric in question. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metricName is the name of a metric used for autoscaling in + // metric system. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // currentValue is the current value of the metric (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; +} + +// configuration of a horizontal pod autoscaler. +message HorizontalPodAutoscaler { + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + +// list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// specification of a horizontal pod autoscaler. +message HorizontalPodAutoscalerSpec { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + optional int32 minReplicas = 2; + + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + optional int32 maxReplicas = 3; + + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + // +optional + optional int32 targetCPUUtilizationPercentage = 4; +} + +// current status of a horizontal pod autoscaler +message HorizontalPodAutoscalerStatus { + // most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // current number of replicas of pods managed by this autoscaler. + optional int32 currentReplicas = 3; + + // desired number of replicas of pods managed by this autoscaler. + optional int32 desiredReplicas = 4; + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + // +optional + optional int32 currentCPUUtilizationPercentage = 5; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric. + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + +// Scale represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// ScaleSpec describes the attributes of a scale subresource. +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// ScaleStatus represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + optional string selector = 2; +} + diff --git a/vendor/k8s.io/api/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go new file mode 100644 index 0000000..8dfe361 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + &Scale{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go new file mode 100644 index 0000000..55b2a0d --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -0,0 +1,432 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// specification of a horizontal pod autoscaler. +type HorizontalPodAutoscalerSpec struct { + // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption + // and will set the desired number of pods by using its Scale subresource. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; + // if not specified the default autoscaling policy will be used. + // +optional + TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` +} + +// current status of a horizontal pod autoscaler +type HorizontalPodAutoscalerStatus struct { + // most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // last time the HorizontalPodAutoscaler scaled the number of pods; + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // current number of replicas of pods managed by this autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desired number of replicas of pods managed by this autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // current average CPU utilization over all pods, represented as a percentage of requested CPU, + // e.g. 70 means that an average pod is using now 70% of its requested CPU. + // +optional + CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// configuration of a horizontal pod autoscaler. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Scale represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// ScaleSpec describes the attributes of a scale subresource. +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// ScaleStatus represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. This is same + // as the label selector but in the string format to avoid introspection + // by clients. The string will be in the same format as the query-param syntax. + // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` +} + +// the types below are used in the alpha metrics annotation + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +const ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric. + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +type ExternalMetricSource struct { + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +const ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metricName is the name of a metric used for autoscaling in + // metric system. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // currentValue is the current value of the metric (as a quantity) + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` +} diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..129ce2b --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -0,0 +1,250 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "metricName": "metricName is the name of the metric in question.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.", + "targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metricName": "metricName is the name of a metric used for autoscaling in metric system.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "currentValue": "currentValue is the current value of the metric (as a quantity)", + "currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "configuration of a horizontal pod autoscaler.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "list of horizontal pod autoscaler objects.", + "metadata": "Standard list metadata.", + "items": "list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "specification of a horizontal pod autoscaler.", + "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", + "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "current status of a horizontal pod autoscaler", + "observedGeneration": "most recent generation observed by this autoscaler.", + "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "current number of replicas of pods managed by this autoscaler.", + "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", + "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric. When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", + "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", + "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +var map_Scale = map[string]string{ + "": "Scale represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "ScaleSpec describes the attributes of a scale subresource.", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "ScaleStatus represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ddb6011 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -0,0 +1,515 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + x := (*in).DeepCopy() + *out = &x + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + out.CurrentValue = in.CurrentValue.DeepCopy() + if in.CurrentAverageValue != nil { + in, out := &in.CurrentAverageValue, &out.CurrentAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.TargetCPUUtilizationPercentage != nil { + in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = (*in).DeepCopy() + } + if in.CurrentCPUUtilizationPercentage != nil { + in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.Target = in.Target + out.TargetValue = in.TargetValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + out.Target = in.Target + out.CurrentValue = in.CurrentValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go new file mode 100644 index 0000000..2cc9f11 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go new file mode 100644 index 0000000..e129e41 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -0,0 +1,5095 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto + +package v2beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{3} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{4} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{5} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{6} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{7} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{8} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{9} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{10} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{11} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{12} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{13} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{14} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_26c1bfc7a52d0478, []int{15} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) +} + +var fileDescriptor_26c1bfc7a52d0478 = []byte{ + // 1475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, + 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, + 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, + 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, + 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, + 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, + 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, + 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, + 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, + 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, + 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, + 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, + 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, + 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, + 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, + 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, + 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, + 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, + 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, + 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, + 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, + 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, + 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, + 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, + 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, + 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, + 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, + 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, + 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, + 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, + 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, + 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, + 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, + 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, + 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, + 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, + 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, + 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, + 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, + 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, + 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, + 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, + 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, + 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, + 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, + 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, + 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, + 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, + 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, + 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, + 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, + 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, + 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, + 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, + 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, + 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, + 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, + 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, + 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, + 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, + 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, + 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, + 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, + 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, + 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, + 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, + 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, + 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, + 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, + 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, + 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, + 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, + 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, + 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, + 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, + 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, + 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, + 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, + 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, + 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, + 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, + 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, + 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, + 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, + 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, + 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, + 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, + 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, + 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, + 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, + 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, + 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, + 0x18, 0x00, 0x00, +} + +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.TargetValue != nil { + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CurrentAverageValue != nil { + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.MetricSelector != nil { + { + size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 + if m.MinReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.CurrentMetrics) > 0 { + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + { + size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.MetricName) + copy(dAtA[i:], m.MetricName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TargetAverageValue != nil { + { + size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.TargetAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.CurrentAverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetValue != nil { + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + if m.MetricSelector != nil { + l = m.MetricSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageValue != nil { + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MetricName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) + } + if m.TargetAverageValue != nil { + l = m.TargetAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.CurrentAverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) + } + l = m.CurrentAverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetValue == nil { + m.TargetValue = &resource.Quantity{} + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MetricSelector == nil { + m.MetricSelector = &v1.LabelSelector{} + } + if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CurrentAverageValue == nil { + m.CurrentAverageValue = &resource.Quantity{} + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MetricName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TargetAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetAverageValue == nil { + m.TargetAverageValue = &resource.Quantity{} + } + if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CurrentAverageUtilization = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto new file mode 100644 index 0000000..f90f93f --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -0,0 +1,400 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.autoscaling.v2beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2beta1"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +// Exactly one "target" type should be set. +message ExternalMetricSource { + // metricName is the name of the metric in question. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metricName is the name of a metric used for autoscaling in + // metric system. + optional string metricName = 1; + + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; + + // currentValue is the current value of the metric (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + repeated MetricSpec metrics = 4; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +optional + repeated MetricStatus currentMetrics = 5; + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + repeated HorizontalPodAutoscalerCondition conditions = 6; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // targetValue is the target value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // target is the described Kubernetes object. + optional CrossVersionObjectReference target = 1; + + // metricName is the name of the metric in question. + optional string metricName = 2; + + // currentValue is the current value of the metric (as a quantity). + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metricName is the name of the metric in question + optional string metricName = 1; + + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metricName is the name of the metric in question + optional string metricName = 1; + + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 targetAverageUtilization = 2; + + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // name is the name of the resource in question. + optional string name = 1; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + optional int32 currentAverageUtilization = 2; + + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; +} + diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go new file mode 100644 index 0000000..12d697f --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go new file mode 100644 index 0000000..53a53a3 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -0,0 +1,408 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +const ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // targetValue is the target value of the metric (as a quantity). + TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // targetAverageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // targetAverageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` + // targetAverageValue is the target value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +// Exactly one "target" type should be set. +type ExternalMetricSource struct { + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // targetValue is the target value of the metric (as a quantity). + // Mutually exclusive with TargetAverageValue. + // +optional + TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` + // targetAverageValue is the target per-pod value of global metric (as a quantity). + // Mutually exclusive with TargetValue. + // +optional + TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"` +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +optional + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +const ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // target is the described Kubernetes object. + Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` + + // metricName is the name of the metric in question. + MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` + // currentValue is the current value of the metric (as a quantity). + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metricName is the name of the metric in question + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // currentAverageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. It will only be + // present if `targetAverageValue` was set in the corresponding metric + // specification. + // +optional + CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` + // currentAverageValue is the current value of the average of the + // resource metric across all relevant pods, as a raw value (instead of as + // a percentage of the request), similar to the "pods" metric source type. + // It will always be set, regardless of the corresponding metric specification. + CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metricName is the name of a metric used for autoscaling in + // metric system. + MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` + // metricSelector is used to identify a specific time series + // within a given metric. + // +optional + MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` + // currentValue is the current value of the metric (as a quantity) + CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` + // currentAverageValue is the current value of metric averaged over autoscaled pods. + // +optional + CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..a0d5f53 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -0,0 +1,221 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \"target\" type should be set.", + "metricName": "metricName is the name of the metric in question.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.", + "targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metricName": "metricName is the name of a metric used for autoscaling in metric system.", + "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", + "currentValue": "currentValue is the current value of the metric (as a quantity)", + "currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "targetValue": "targetValue is the target value of the metric (as a quantity).", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", + "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target is the described Kubernetes object.", + "metricName": "metricName is the name of the metric in question.", + "currentValue": "currentValue is the current value of the metric (as a quantity).", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", + "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metricName": "metricName is the name of the metric in question", + "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metricName": "metricName is the name of the metric in question", + "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", + "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", + "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", + "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..c51e05b --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -0,0 +1,466 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.TargetValue != nil { + in, out := &in.TargetValue, &out.TargetValue + x := (*in).DeepCopy() + *out = &x + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + if in.MetricSelector != nil { + in, out := &in.MetricSelector, &out.MetricSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + out.CurrentValue = in.CurrentValue.DeepCopy() + if in.CurrentAverageValue != nil { + in, out := &in.CurrentAverageValue, &out.CurrentAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.Target = in.Target + out.TargetValue = in.TargetValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + out.Target = in.Target + out.CurrentValue = in.CurrentValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + out.TargetAverageValue = in.TargetAverageValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + if in.TargetAverageUtilization != nil { + in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization + *out = new(int32) + **out = **in + } + if in.TargetAverageValue != nil { + in, out := &in.TargetAverageValue, &out.TargetAverageValue + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + if in.CurrentAverageUtilization != nil { + in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization + *out = new(int32) + **out = **in + } + out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go new file mode 100644 index 0000000..6d275f6 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go new file mode 100644 index 0000000..c69d6cb --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -0,0 +1,6062 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto + +package v2beta2 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } +func (*CrossVersionObjectReference) ProtoMessage() {} +func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{0} +} +func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) +} +func (m *CrossVersionObjectReference) XXX_Size() int { + return m.Size() +} +func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo + +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{1} +} +func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricSource.Merge(m, src) +} +func (m *ExternalMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo + +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{2} +} +func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExternalMetricStatus.Merge(m, src) +} +func (m *ExternalMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ExternalMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo + +func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } +func (*HPAScalingPolicy) ProtoMessage() {} +func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{3} +} +func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_HPAScalingPolicy.Merge(m, src) +} +func (m *HPAScalingPolicy) XXX_Size() int { + return m.Size() +} +func (m *HPAScalingPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo + +func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } +func (*HPAScalingRules) ProtoMessage() {} +func (*HPAScalingRules) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{4} +} +func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HPAScalingRules) XXX_Merge(src proto.Message) { + xxx_messageInfo_HPAScalingRules.Merge(m, src) +} +func (m *HPAScalingRules) XXX_Size() int { + return m.Size() +} +func (m *HPAScalingRules) XXX_DiscardUnknown() { + xxx_messageInfo_HPAScalingRules.DiscardUnknown(m) +} + +var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo + +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{5} +} +func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) +} +func (m *HorizontalPodAutoscaler) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } +func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} +func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{6} +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src) +} +func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } +func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} +func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{7} +} +func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) +} +func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } +func (*HorizontalPodAutoscalerList) ProtoMessage() {} +func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{8} +} +func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) +} +func (m *HorizontalPodAutoscalerList) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } +func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} +func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{9} +} +func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) +} +func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo + +func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } +func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} +func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{10} +} +func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) +} +func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { + return m.Size() +} +func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo + +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{11} +} +func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricIdentifier) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricIdentifier.Merge(m, src) +} +func (m *MetricIdentifier) XXX_Size() int { + return m.Size() +} +func (m *MetricIdentifier) XXX_DiscardUnknown() { + xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo + +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{12} +} +func (m *MetricSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricSpec.Merge(m, src) +} +func (m *MetricSpec) XXX_Size() int { + return m.Size() +} +func (m *MetricSpec) XXX_DiscardUnknown() { + xxx_messageInfo_MetricSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricSpec proto.InternalMessageInfo + +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{13} +} +func (m *MetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricStatus.Merge(m, src) +} +func (m *MetricStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricStatus proto.InternalMessageInfo + +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{14} +} +func (m *MetricTarget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricTarget) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricTarget.Merge(m, src) +} +func (m *MetricTarget) XXX_Size() int { + return m.Size() +} +func (m *MetricTarget) XXX_DiscardUnknown() { + xxx_messageInfo_MetricTarget.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricTarget proto.InternalMessageInfo + +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{15} +} +func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MetricValueStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_MetricValueStatus.Merge(m, src) +} +func (m *MetricValueStatus) XXX_Size() int { + return m.Size() +} +func (m *MetricValueStatus) XXX_DiscardUnknown() { + xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo + +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{16} +} +func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricSource.Merge(m, src) +} +func (m *ObjectMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo + +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{17} +} +func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMetricStatus.Merge(m, src) +} +func (m *ObjectMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ObjectMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo + +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{18} +} +func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricSource.Merge(m, src) +} +func (m *PodsMetricSource) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo + +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{19} +} +func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodsMetricStatus.Merge(m, src) +} +func (m *PodsMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *PodsMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo + +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{20} +} +func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricSource.Merge(m, src) +} +func (m *ResourceMetricSource) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricSource) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo + +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_592ad94d7d6be24f, []int{21} +} +func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceMetricStatus.Merge(m, src) +} +func (m *ResourceMetricStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceMetricStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") + proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingPolicy") + proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingRules") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricIdentifier)(nil), "k8s.io.api.autoscaling.v2beta2.MetricIdentifier") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta2.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.MetricStatus") + proto.RegisterType((*MetricTarget)(nil), "k8s.io.api.autoscaling.v2beta2.MetricTarget") + proto.RegisterType((*MetricValueStatus)(nil), "k8s.io.api.autoscaling.v2beta2.MetricValueStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) +} + +var fileDescriptor_592ad94d7d6be24f = []byte{ + // 1657 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45, + 0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55, + 0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6, + 0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22, + 0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa, + 0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb, + 0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70, + 0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93, + 0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed, + 0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea, + 0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17, + 0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72, + 0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f, + 0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5, + 0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1, + 0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60, + 0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4, + 0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38, + 0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37, + 0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22, + 0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a, + 0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd, + 0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81, + 0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59, + 0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a, + 0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6, + 0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45, + 0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b, + 0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30, + 0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76, + 0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7, + 0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54, + 0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31, + 0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2, + 0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3, + 0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6, + 0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31, + 0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d, + 0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24, + 0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02, + 0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1, + 0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06, + 0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59, + 0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55, + 0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04, + 0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17, + 0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8, + 0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22, + 0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65, + 0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07, + 0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a, + 0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11, + 0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0, + 0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40, + 0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23, + 0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f, + 0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46, + 0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54, + 0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86, + 0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92, + 0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b, + 0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11, + 0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa, + 0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c, + 0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2, + 0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45, + 0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d, + 0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0, + 0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c, + 0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02, + 0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d, + 0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c, + 0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54, + 0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58, + 0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a, + 0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b, + 0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4, + 0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58, + 0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12, + 0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4, + 0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40, + 0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b, + 0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e, + 0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2, + 0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd, + 0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86, + 0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42, + 0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc, + 0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8, + 0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b, + 0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a, + 0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd, + 0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51, + 0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5, + 0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3, + 0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f, + 0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc, + 0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84, + 0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e, + 0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1, + 0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43, + 0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf, + 0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00, +} + +func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HPAScalingPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HPAScalingPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HPAScalingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HPAScalingRules) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HPAScalingRules) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StabilizationWindowSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) + i-- + dAtA[i] = 0x18 + } + if len(m.Policies) > 0 { + for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Policies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.SelectPolicy != nil { + i -= len(*m.SelectPolicy) + copy(dAtA[i:], *m.SelectPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelectPolicy))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerBehavior) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerBehavior) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerBehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScaleDown != nil { + { + size, err := m.ScaleDown.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ScaleUp != nil { + { + size, err := m.ScaleUp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Behavior != nil { + { + size, err := m.Behavior.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Metrics) > 0 { + for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + i-- + dAtA[i] = 0x18 + if m.MinReplicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if len(m.CurrentMetrics) > 0 { + for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + i-- + dAtA[i] = 0x18 + if m.LastScaleTime != nil { + { + size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ObservedGeneration != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.External != nil { + { + size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Pods != nil { + { + size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Object != nil { + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricTarget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x20 + } + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AverageUtilization != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) + i-- + dAtA[i] = 0x18 + } + if m.AverageValue != nil { + { + size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Value != nil { + { + size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CrossVersionObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExternalMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HPAScalingPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + return n +} + +func (m *HPAScalingRules) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SelectPolicy != nil { + l = len(*m.SelectPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Policies) > 0 { + for _, e := range m.Policies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StabilizationWindowSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) + } + return n +} + +func (m *HorizontalPodAutoscaler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerBehavior) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ScaleUp != nil { + l = m.ScaleUp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ScaleDown != nil { + l = m.ScaleDown.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *HorizontalPodAutoscalerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HorizontalPodAutoscalerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ScaleTargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.MinReplicas != nil { + n += 1 + sovGenerated(uint64(*m.MinReplicas)) + } + n += 1 + sovGenerated(uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, e := range m.Metrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Behavior != nil { + l = m.Behavior.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HorizontalPodAutoscalerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ObservedGeneration != nil { + n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + l = m.LastScaleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.CurrentReplicas)) + n += 1 + sovGenerated(uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, e := range m.CurrentMetrics { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *MetricIdentifier) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Object != nil { + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Pods != nil { + l = m.Pods.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.External != nil { + l = m.External.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *MetricTarget) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.AverageUtilization)) + } + return n +} + +func (m *MetricValueStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageValue != nil { + l = m.AverageValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AverageUtilization != nil { + n += 1 + sovGenerated(uint64(*m.AverageUtilization)) + } + return n +} + +func (m *ObjectMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DescribedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DescribedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodsMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Metric.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceMetricStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Current.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CrossVersionObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CrossVersionObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricSource{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ExternalMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExternalMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HPAScalingPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HPAScalingPolicy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *HPAScalingRules) String() string { + if this == nil { + return "nil" + } + repeatedStringForPolicies := "[]HPAScalingPolicy{" + for _, f := range this.Policies { + repeatedStringForPolicies += strings.Replace(strings.Replace(f.String(), "HPAScalingPolicy", "HPAScalingPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForPolicies += "}" + s := strings.Join([]string{`&HPAScalingRules{`, + `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, + `Policies:` + repeatedStringForPolicies + `,`, + `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscaler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscaler{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerBehavior) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerBehavior{`, + `ScaleUp:` + strings.Replace(this.ScaleUp.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, + `ScaleDown:` + strings.Replace(this.ScaleDown.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]HorizontalPodAutoscaler{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForMetrics := "[]MetricSpec{" + for _, f := range this.Metrics { + repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," + } + repeatedStringForMetrics += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, + `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, + `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, + `Metrics:` + repeatedStringForMetrics + `,`, + `Behavior:` + strings.Replace(this.Behavior.String(), "HorizontalPodAutoscalerBehavior", "HorizontalPodAutoscalerBehavior", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HorizontalPodAutoscalerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForCurrentMetrics := "[]MetricStatus{" + for _, f := range this.CurrentMetrics { + repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForCurrentMetrics += "}" + repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, + `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, + `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, + `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *MetricIdentifier) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricIdentifier{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricSpec{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricStatus{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *MetricTarget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricTarget{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, + `}`, + }, "") + return s +} +func (this *MetricValueStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&MetricValueStatus{`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricSource{`, + `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricSource{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodsMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HPAScalingPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HPAScalingPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HPAScalingPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PeriodSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HPAScalingRules: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HPAScalingRules: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelectPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ScalingPolicySelect(dAtA[iNdEx:postIndex]) + m.SelectPolicy = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Policies = append(m.Policies, HPAScalingPolicy{}) + if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StabilizationWindowSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StabilizationWindowSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleUp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleUp == nil { + m.ScaleUp = &HPAScalingRules{} + } + if err := m.ScaleUp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleDown == nil { + m.ScaleDown = &HPAScalingRules{} + } + if err := m.ScaleDown.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, HorizontalPodAutoscaler{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.MinReplicas = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) + } + m.MaxReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metrics = append(m.Metrics, MetricSpec{}) + if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Behavior", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Behavior == nil { + m.Behavior = &HorizontalPodAutoscalerBehavior{} + } + if err := m.Behavior.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ObservedGeneration = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScaleTime == nil { + m.LastScaleTime = &v1.Time{} + } + if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) + } + m.CurrentReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) + } + m.DesiredReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) + if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricIdentifier: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricIdentifier: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricSource{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricSource{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricSource{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricSource{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Object == nil { + m.Object = &ObjectMetricStatus{} + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pods == nil { + m.Pods = &PodsMetricStatus{} + } + if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &ResourceMetricStatus{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.External == nil { + m.External = &ExternalMetricStatus{} + } + if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricTarget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricTarget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = MetricTargetType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &resource.Quantity{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AverageUtilization = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MetricValueStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MetricValueStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &resource.Quantity{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AverageValue == nil { + m.AverageValue = &resource.Quantity{} + } + if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AverageUtilization = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto new file mode 100644 index 0000000..24dc588 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -0,0 +1,438 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.autoscaling.v2beta2; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2beta2"; + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +message CrossVersionObjectReference { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + optional string kind = 1; + + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 2; + + // API version of the referent + // +optional + optional string apiVersion = 3; +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +message ExternalMetricSource { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +message ExternalMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +message HPAScalingPolicy { + // Type is used to specify the scaling policy. + optional string type = 1; + + // Value contains the amount of change which is permitted by the policy. + // It must be greater than zero + optional int32 value = 2; + + // PeriodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + optional int32 periodSeconds = 3; +} + +// HPAScalingRules configures the scaling behavior for one direction. +// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// They can limit the scaling velocity by specifying scaling policies. +// They can prevent flapping by specifying the stabilization window, so that the +// number of replicas is not set instantly, instead, the safest value from the stabilization +// window is chosen. +message HPAScalingRules { + // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // considered while scaling up or scaling down. + // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + // If not set, use the default values: + // - For scale up: 0 (i.e. no stabilization is done). + // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + // +optional + optional int32 stabilizationWindowSeconds = 3; + + // selectPolicy is used to specify which policy should be used. + // If not set, the default value MaxPolicySelect is used. + // +optional + optional string selectPolicy = 1; + + // policies is a list of potential scaling polices which can be used during scaling. + // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // +optional + repeated HPAScalingPolicy policies = 2; +} + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +message HorizontalPodAutoscaler { + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional HorizontalPodAutoscalerSpec spec = 2; + + // status is the current information about the autoscaler. + // +optional + optional HorizontalPodAutoscalerStatus status = 3; +} + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +message HorizontalPodAutoscalerBehavior { + // scaleUp is scaling policy for scaling Up. + // If not set, the default value is the higher of: + // * increase no more than 4 pods per 60 seconds + // * double the number of pods per 60 seconds + // No stabilization is used. + // +optional + optional HPAScalingRules scaleUp = 1; + + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // +optional + optional HPAScalingRules scaleDown = 2; +} + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +message HorizontalPodAutoscalerCondition { + // type describes the current condition + optional string type = 1; + + // status is the status of the condition (True, False, Unknown) + optional string status = 2; + + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // reason is the reason for the condition's last transition. + // +optional + optional string reason = 4; + + // message is a human-readable explanation containing details about + // the transition + // +optional + optional string message = 5; +} + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +message HorizontalPodAutoscalerList { + // metadata is the standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of horizontal pod autoscaler objects. + repeated HorizontalPodAutoscaler items = 2; +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +message HorizontalPodAutoscalerSpec { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + optional CrossVersionObjectReference scaleTargetRef = 1; + + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + optional int32 minReplicas = 2; + + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + optional int32 maxReplicas = 3; + + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +optional + repeated MetricSpec metrics = 4; + + // behavior configures the scaling behavior of the target + // in both Up and Down directions (scaleUp and scaleDown fields respectively). + // If not set, the default HPAScalingRules for scale up and scale down are used. + // +optional + optional HorizontalPodAutoscalerBehavior behavior = 5; +} + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +message HorizontalPodAutoscalerStatus { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + optional int64 observedGeneration = 1; + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + optional int32 currentReplicas = 3; + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + optional int32 desiredReplicas = 4; + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +optional + repeated MetricStatus currentMetrics = 5; + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + repeated HorizontalPodAutoscalerCondition conditions = 6; +} + +// MetricIdentifier defines the name and optionally selector for a metric +message MetricIdentifier { + // name is the name of the given metric + optional string name = 1; + + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +message MetricSpec { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricSource object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricSource pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricSource resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricSource external = 5; +} + +// MetricStatus describes the last-read state of a single metric. +message MetricStatus { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + optional string type = 1; + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + optional ObjectMetricStatus object = 2; + + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + optional PodsMetricStatus pods = 3; + + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + optional ResourceMetricStatus resource = 4; + + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + optional ExternalMetricStatus external = 5; +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +message MetricTarget { + // type represents whether the metric type is Utilization, Value, or AverageValue + optional string type = 1; + + // value is the target value of the metric (as a quantity). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; + + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; + + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // +optional + optional int32 averageUtilization = 4; +} + +// MetricValueStatus holds the current value for a metric +message MetricValueStatus { + // value is the current value of the metric (as a quantity). + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; + + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; + + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + optional int32 averageUtilization = 3; +} + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricSource { + optional CrossVersionObjectReference describedObject = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; + + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 3; +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +message ObjectMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; + + optional CrossVersionObjectReference describedObject = 3; +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +message PodsMetricSource { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +message PodsMetricStatus { + // metric identifies the target metric by name and selector + optional MetricIdentifier metric = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +message ResourceMetricSource { + // name is the name of the resource in question. + optional string name = 1; + + // target specifies the target value for the given metric + optional MetricTarget target = 2; +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +message ResourceMetricStatus { + // Name is the name of the resource in question. + optional string name = 1; + + // current contains the current value for the given metric + optional MetricValueStatus current = 2; +} + diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/register.go b/vendor/k8s.io/api/autoscaling/v2beta2/register.go new file mode 100644 index 0000000..eb1265c --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta2 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "autoscaling" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta2"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &HorizontalPodAutoscaler{}, + &HorizontalPodAutoscalerList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go new file mode 100644 index 0000000..614caeb --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -0,0 +1,480 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +package v2beta2 + +import ( + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscaler is the configuration for a horizontal pod +// autoscaler, which automatically manages the replica count of any resource +// implementing the scale subresource based on the metrics specified. +type HorizontalPodAutoscaler struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification for the behaviour of the autoscaler. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // status is the current information about the autoscaler. + // +optional + Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. +type HorizontalPodAutoscalerSpec struct { + // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics + // should be collected, as well as to actually change the replica count. + ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` + // minReplicas is the lower limit for the number of replicas to which the autoscaler + // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the + // alpha feature gate HPAScaleToZero is enabled and at least one Object or External + // metric is configured. Scaling is active as long as at least one metric value is + // available. + // +optional + MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` + // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. + // It cannot be less that minReplicas. + MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` + // metrics contains the specifications for which to use to calculate the + // desired replica count (the maximum replica count across all metrics will + // be used). The desired replica count is calculated multiplying the + // ratio between the target value and the current value by the current + // number of pods. Ergo, metrics used must decrease as the pod count is + // increased, and vice-versa. See the individual metric source types for + // more information about how each type of metric must respond. + // If not set, the default metric will be set to 80% average CPU utilization. + // +optional + Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` + + // behavior configures the scaling behavior of the target + // in both Up and Down directions (scaleUp and scaleDown fields respectively). + // If not set, the default HPAScalingRules for scale up and scale down are used. + // +optional + Behavior *HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"` +} + +// CrossVersionObjectReference contains enough information to let you identify the referred resource. +type CrossVersionObjectReference struct { + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + // API version of the referent + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` +} + +// MetricSpec specifies how to scale based on a single metric +// (only `type` and one other matching field should be set at once). +type MetricSpec struct { + // type is the type of metric source. It should be one of "Object", + // "Pods" or "Resource", each mapping to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target +// in both Up and Down directions (scaleUp and scaleDown fields respectively). +type HorizontalPodAutoscalerBehavior struct { + // scaleUp is scaling policy for scaling Up. + // If not set, the default value is the higher of: + // * increase no more than 4 pods per 60 seconds + // * double the number of pods per 60 seconds + // No stabilization is used. + // +optional + ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` + // scaleDown is scaling policy for scaling Down. + // If not set, the default value is to allow to scale down to minReplicas pods, with a + // 300 second stabilization window (i.e., the highest recommendation for + // the last 300sec is used). + // +optional + ScaleDown *HPAScalingRules `json:"scaleDown,omitempty" protobuf:"bytes,2,opt,name=scaleDown"` +} + +// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction +type ScalingPolicySelect string + +const ( + // MaxPolicySelect selects the policy with the highest possible change. + MaxPolicySelect ScalingPolicySelect = "Max" + // MinPolicySelect selects the policy with the lowest possible change. + MinPolicySelect ScalingPolicySelect = "Min" + // DisabledPolicySelect disables the scaling in this direction. + DisabledPolicySelect ScalingPolicySelect = "Disabled" +) + +// HPAScalingRules configures the scaling behavior for one direction. +// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. +// They can limit the scaling velocity by specifying scaling policies. +// They can prevent flapping by specifying the stabilization window, so that the +// number of replicas is not set instantly, instead, the safest value from the stabilization +// window is chosen. +type HPAScalingRules struct { + // StabilizationWindowSeconds is the number of seconds for which past recommendations should be + // considered while scaling up or scaling down. + // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). + // If not set, use the default values: + // - For scale up: 0 (i.e. no stabilization is done). + // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). + // +optional + StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` + // selectPolicy is used to specify which policy should be used. + // If not set, the default value MaxPolicySelect is used. + // +optional + SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` + // policies is a list of potential scaling polices which can be used during scaling. + // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid + // +optional + Policies []HPAScalingPolicy `json:"policies,omitempty" protobuf:"bytes,2,rep,name=policies"` +} + +// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. +type HPAScalingPolicyType string + +const ( + // PodsScalingPolicy is a policy used to specify a change in absolute number of pods. + PodsScalingPolicy HPAScalingPolicyType = "Pods" + // PercentScalingPolicy is a policy used to specify a relative amount of change with respect to + // the current number of pods. + PercentScalingPolicy HPAScalingPolicyType = "Percent" +) + +// HPAScalingPolicy is a single policy which must hold true for a specified past interval. +type HPAScalingPolicy struct { + // Type is used to specify the scaling policy. + Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` + // Value contains the amount of change which is permitted by the policy. + // It must be greater than zero + Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` + // PeriodSeconds specifies the window of time for which the policy should hold true. + // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). + PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` +} + +// MetricSourceType indicates the type of metric. +type MetricSourceType string + +const ( + // ObjectMetricSourceType is a metric describing a kubernetes object + // (for example, hits-per-second on an Ingress object). + ObjectMetricSourceType MetricSourceType = "Object" + // PodsMetricSourceType is a metric describing each pod in the current scale + // target (for example, transactions-processed-per-second). The values + // will be averaged together before being compared to the target value. + PodsMetricSourceType MetricSourceType = "Pods" + // ResourceMetricSourceType is a resource metric known to Kubernetes, as + // specified in requests and limits, describing each pod in the current + // scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics (the "pods" source). + ResourceMetricSourceType MetricSourceType = "Resource" + // ExternalMetricSourceType is a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + ExternalMetricSourceType MetricSourceType = "External" +) + +// ObjectMetricSource indicates how to scale on a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricSource struct { + DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` +} + +// PodsMetricSource indicates how to scale on a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +// The values will be averaged together before being compared to the target +// value. +type PodsMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// ResourceMetricSource indicates how to scale on a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). The values will be averaged +// together before being compared to the target. Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. Only one "target" type +// should be set. +type ResourceMetricSource struct { + // name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// ExternalMetricSource indicates how to scale on a metric not associated with +// any Kubernetes object (for example length of queue in cloud +// messaging service, or QPS from loadbalancer running outside of cluster). +type ExternalMetricSource struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // target specifies the target value for the given metric + Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` +} + +// MetricIdentifier defines the name and optionally selector for a metric +type MetricIdentifier struct { + // name is the name of the given metric + Name string `json:"name" protobuf:"bytes,1,name=name"` + // selector is the string-encoded form of a standard kubernetes label selector for the given metric + // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. + // When unset, just the metricName will be used to gather metrics. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,name=selector"` +} + +// MetricTarget defines the target value, average value, or average utilization of a specific metric +type MetricTarget struct { + // type represents whether the metric type is Utilization, Value, or AverageValue + Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` + // value is the target value of the metric (as a quantity). + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // averageValue is the target value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` + // averageUtilization is the target value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // Currently only valid for Resource metric source type + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,opt,name=averageUtilization"` +} + +// MetricTargetType specifies the type of metric being targeted, and should be either +// "Value", "AverageValue", or "Utilization" +type MetricTargetType string + +const ( + // UtilizationMetricType declares a MetricTarget is an AverageUtilization value + UtilizationMetricType MetricTargetType = "Utilization" + // ValueMetricType declares a MetricTarget is a raw value + ValueMetricType MetricTargetType = "Value" + // AverageValueMetricType declares a MetricTarget is an + AverageValueMetricType MetricTargetType = "AverageValue" +) + +// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. +type HorizontalPodAutoscalerStatus struct { + // observedGeneration is the most recent generation observed by this autoscaler. + // +optional + ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, + // used by the autoscaler to control how often the number of pods is changed. + // +optional + LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` + + // currentReplicas is current number of replicas of pods managed by this autoscaler, + // as last seen by the autoscaler. + CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` + + // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, + // as last calculated by the autoscaler. + DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` + + // currentMetrics is the last read state of the metrics used by this autoscaler. + // +optional + CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` + + // conditions is the set of conditions required for this autoscaler to scale its target, + // and indicates whether or not those conditions are met. + Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` +} + +// HorizontalPodAutoscalerConditionType are the valid conditions of +// a HorizontalPodAutoscaler. +type HorizontalPodAutoscalerConditionType string + +const ( + // ScalingActive indicates that the HPA controller is able to scale if necessary: + // it's correctly configured, can fetch the desired metrics, and isn't disabled. + ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" + // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, + // such as being in a backoff window, or being unable to access/update the target scale. + AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" + // ScalingLimited indicates that the calculated scale based on metrics would be above or + // below the range for the HPA, and has thus been capped. + ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" +) + +// HorizontalPodAutoscalerCondition describes the state of +// a HorizontalPodAutoscaler at a certain point. +type HorizontalPodAutoscalerCondition struct { + // type describes the current condition + Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` + // status is the status of the condition (True, False, Unknown) + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` + // lastTransitionTime is the last time the condition transitioned from + // one status to another + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // reason is the reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // message is a human-readable explanation containing details about + // the transition + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// MetricStatus describes the last-read state of a single metric. +type MetricStatus struct { + // type is the type of metric source. It will be one of "Object", + // "Pods" or "Resource", each corresponds to a matching field in the object. + Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` + + // object refers to a metric describing a single kubernetes object + // (for example, hits-per-second on an Ingress object). + // +optional + Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` + // pods refers to a metric describing each pod in the current scale target + // (for example, transactions-processed-per-second). The values will be + // averaged together before being compared to the target value. + // +optional + Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` + // resource refers to a resource metric (such as those specified in + // requests and limits) known to Kubernetes describing each pod in the + // current scale target (e.g. CPU or memory). Such metrics are built in to + // Kubernetes, and have special scaling options on top of those available + // to normal per-pod metrics using the "pods" source. + // +optional + Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` + // external refers to a global metric that is not associated + // with any Kubernetes object. It allows autoscaling based on information + // coming from components running outside of cluster + // (for example length of queue in cloud messaging service, or + // QPS from loadbalancer running outside of cluster). + // +optional + External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` +} + +// ObjectMetricStatus indicates the current value of a metric describing a +// kubernetes object (for example, hits-per-second on an Ingress object). +type ObjectMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` + + DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` +} + +// PodsMetricStatus indicates the current value of a metric describing each pod in +// the current scale target (for example, transactions-processed-per-second). +type PodsMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// ResourceMetricStatus indicates the current value of a resource metric known to +// Kubernetes, as specified in requests and limits, describing each pod in the +// current scale target (e.g. CPU or memory). Such metrics are built in to +// Kubernetes, and have special scaling options on top of those available to +// normal per-pod metrics using the "pods" source. +type ResourceMetricStatus struct { + // Name is the name of the resource in question. + Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// ExternalMetricStatus indicates the current value of a global metric +// not associated with any Kubernetes object. +type ExternalMetricStatus struct { + // metric identifies the target metric by name and selector + Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` + // current contains the current value for the given metric + Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` +} + +// MetricValueStatus holds the current value for a metric +type MetricValueStatus struct { + // value is the current value of the metric (as a quantity). + // +optional + Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` + // averageValue is the current value of the average of the + // metric across all relevant pods (as a quantity) + // +optional + AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` + // currentAverageUtilization is the current value of the average of the + // resource metric across all relevant pods, represented as a percentage of + // the requested value of the resource for the pods. + // +optional + AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,3,opt,name=averageUtilization"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. +type HorizontalPodAutoscalerList struct { + metav1.TypeMeta `json:",inline"` + // metadata is the standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of horizontal pod autoscaler objects. + Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go new file mode 100644 index 0000000..3f38880 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -0,0 +1,273 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2beta2 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CrossVersionObjectReference = map[string]string{ + "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "apiVersion": "API version of the referent", +} + +func (CrossVersionObjectReference) SwaggerDoc() map[string]string { + return map_CrossVersionObjectReference +} + +var map_ExternalMetricSource = map[string]string{ + "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", + "metric": "metric identifies the target metric by name and selector", + "target": "target specifies the target value for the given metric", +} + +func (ExternalMetricSource) SwaggerDoc() map[string]string { + return map_ExternalMetricSource +} + +var map_ExternalMetricStatus = map[string]string{ + "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (ExternalMetricStatus) SwaggerDoc() map[string]string { + return map_ExternalMetricStatus +} + +var map_HPAScalingPolicy = map[string]string{ + "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", + "type": "Type is used to specify the scaling policy.", + "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", + "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", +} + +func (HPAScalingPolicy) SwaggerDoc() map[string]string { + return map_HPAScalingPolicy +} + +var map_HPAScalingRules = map[string]string{ + "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", + "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", + "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", + "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", +} + +func (HPAScalingRules) SwaggerDoc() map[string]string { + return map_HPAScalingRules +} + +var map_HorizontalPodAutoscaler = map[string]string{ + "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "status is the current information about the autoscaler.", +} + +func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscaler +} + +var map_HorizontalPodAutoscalerBehavior = map[string]string{ + "": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", + "scaleUp": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used.", + "scaleDown": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).", +} + +func (HorizontalPodAutoscalerBehavior) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerBehavior +} + +var map_HorizontalPodAutoscalerCondition = map[string]string{ + "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", + "type": "type describes the current condition", + "status": "status is the status of the condition (True, False, Unknown)", + "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", + "reason": "reason is the reason for the condition's last transition.", + "message": "message is a human-readable explanation containing details about the transition", +} + +func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerCondition +} + +var map_HorizontalPodAutoscalerList = map[string]string{ + "": "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.", + "metadata": "metadata is the standard list metadata.", + "items": "items is the list of horizontal pod autoscaler objects.", +} + +func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerList +} + +var map_HorizontalPodAutoscalerSpec = map[string]string{ + "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", + "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", + "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", + "behavior": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.", +} + +func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerSpec +} + +var map_HorizontalPodAutoscalerStatus = map[string]string{ + "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", + "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", + "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", + "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", + "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", + "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", + "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", +} + +func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { + return map_HorizontalPodAutoscalerStatus +} + +var map_MetricIdentifier = map[string]string{ + "": "MetricIdentifier defines the name and optionally selector for a metric", + "name": "name is the name of the given metric", + "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", +} + +func (MetricIdentifier) SwaggerDoc() map[string]string { + return map_MetricIdentifier +} + +var map_MetricSpec = map[string]string{ + "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", + "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricSpec) SwaggerDoc() map[string]string { + return map_MetricSpec +} + +var map_MetricStatus = map[string]string{ + "": "MetricStatus describes the last-read state of a single metric.", + "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", + "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", + "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", +} + +func (MetricStatus) SwaggerDoc() map[string]string { + return map_MetricStatus +} + +var map_MetricTarget = map[string]string{ + "": "MetricTarget defines the target value, average value, or average utilization of a specific metric", + "type": "type represents whether the metric type is Utilization, Value, or AverageValue", + "value": "value is the target value of the metric (as a quantity).", + "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", + "averageUtilization": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type", +} + +func (MetricTarget) SwaggerDoc() map[string]string { + return map_MetricTarget +} + +var map_MetricValueStatus = map[string]string{ + "": "MetricValueStatus holds the current value for a metric", + "value": "value is the current value of the metric (as a quantity).", + "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", + "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", +} + +func (MetricValueStatus) SwaggerDoc() map[string]string { + return map_MetricValueStatus +} + +var map_ObjectMetricSource = map[string]string{ + "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "target": "target specifies the target value for the given metric", + "metric": "metric identifies the target metric by name and selector", +} + +func (ObjectMetricSource) SwaggerDoc() map[string]string { + return map_ObjectMetricSource +} + +var map_ObjectMetricStatus = map[string]string{ + "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (ObjectMetricStatus) SwaggerDoc() map[string]string { + return map_ObjectMetricStatus +} + +var map_PodsMetricSource = map[string]string{ + "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", + "metric": "metric identifies the target metric by name and selector", + "target": "target specifies the target value for the given metric", +} + +func (PodsMetricSource) SwaggerDoc() map[string]string { + return map_PodsMetricSource +} + +var map_PodsMetricStatus = map[string]string{ + "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", + "metric": "metric identifies the target metric by name and selector", + "current": "current contains the current value for the given metric", +} + +func (PodsMetricStatus) SwaggerDoc() map[string]string { + return map_PodsMetricStatus +} + +var map_ResourceMetricSource = map[string]string{ + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", + "target": "target specifies the target value for the given metric", +} + +func (ResourceMetricSource) SwaggerDoc() map[string]string { + return map_ResourceMetricSource +} + +var map_ResourceMetricStatus = map[string]string{ + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "Name is the name of the resource in question.", + "current": "current contains the current value for the given metric", +} + +func (ResourceMetricStatus) SwaggerDoc() map[string]string { + return map_ResourceMetricStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go new file mode 100644 index 0000000..ca26fe9 --- /dev/null +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -0,0 +1,565 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2beta2 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. +func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { + if in == nil { + return nil + } + out := new(CrossVersionObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. +func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { + if in == nil { + return nil + } + out := new(ExternalMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. +func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { + if in == nil { + return nil + } + out := new(ExternalMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy. +func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy { + if in == nil { + return nil + } + out := new(HPAScalingPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) { + *out = *in + if in.StabilizationWindowSeconds != nil { + in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds + *out = new(int32) + **out = **in + } + if in.SelectPolicy != nil { + in, out := &in.SelectPolicy, &out.SelectPolicy + *out = new(ScalingPolicySelect) + **out = **in + } + if in.Policies != nil { + in, out := &in.Policies, &out.Policies + *out = make([]HPAScalingPolicy, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules. +func (in *HPAScalingRules) DeepCopy() *HPAScalingRules { + if in == nil { + return nil + } + out := new(HPAScalingRules) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. +func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscaler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) { + *out = *in + if in.ScaleUp != nil { + in, out := &in.ScaleUp, &out.ScaleUp + *out = new(HPAScalingRules) + (*in).DeepCopyInto(*out) + } + if in.ScaleDown != nil { + in, out := &in.ScaleDown, &out.ScaleDown + *out = new(HPAScalingRules) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior. +func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerBehavior) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. +func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]HorizontalPodAutoscaler, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. +func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { + *out = *in + out.ScaleTargetRef = in.ScaleTargetRef + if in.MinReplicas != nil { + in, out := &in.MinReplicas, &out.MinReplicas + *out = new(int32) + **out = **in + } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = make([]MetricSpec, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Behavior != nil { + in, out := &in.Behavior, &out.Behavior + *out = new(HorizontalPodAutoscalerBehavior) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. +func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { + *out = *in + if in.ObservedGeneration != nil { + in, out := &in.ObservedGeneration, &out.ObservedGeneration + *out = new(int64) + **out = **in + } + if in.LastScaleTime != nil { + in, out := &in.LastScaleTime, &out.LastScaleTime + *out = (*in).DeepCopy() + } + if in.CurrentMetrics != nil { + in, out := &in.CurrentMetrics, &out.CurrentMetrics + *out = make([]MetricStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]HorizontalPodAutoscalerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. +func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { + if in == nil { + return nil + } + out := new(HorizontalPodAutoscalerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier. +func (in *MetricIdentifier) DeepCopy() *MetricIdentifier { + if in == nil { + return nil + } + out := new(MetricIdentifier) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricSource) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricSource) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. +func (in *MetricSpec) DeepCopy() *MetricSpec { + if in == nil { + return nil + } + out := new(MetricSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { + *out = *in + if in.Object != nil { + in, out := &in.Object, &out.Object + *out = new(ObjectMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Pods != nil { + in, out := &in.Pods, &out.Pods + *out = new(PodsMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(ResourceMetricStatus) + (*in).DeepCopyInto(*out) + } + if in.External != nil { + in, out := &in.External, &out.External + *out = new(ExternalMetricStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. +func (in *MetricStatus) DeepCopy() *MetricStatus { + if in == nil { + return nil + } + out := new(MetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricTarget) DeepCopyInto(out *MetricTarget) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget. +func (in *MetricTarget) DeepCopy() *MetricTarget { + if in == nil { + return nil + } + out := new(MetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + x := (*in).DeepCopy() + *out = &x + } + if in.AverageValue != nil { + in, out := &in.AverageValue, &out.AverageValue + x := (*in).DeepCopy() + *out = &x + } + if in.AverageUtilization != nil { + in, out := &in.AverageUtilization, &out.AverageUtilization + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus. +func (in *MetricValueStatus) DeepCopy() *MetricValueStatus { + if in == nil { + return nil + } + out := new(MetricValueStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { + *out = *in + out.DescribedObject = in.DescribedObject + in.Target.DeepCopyInto(&out.Target) + in.Metric.DeepCopyInto(&out.Metric) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. +func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { + if in == nil { + return nil + } + out := new(ObjectMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + out.DescribedObject = in.DescribedObject + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. +func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { + if in == nil { + return nil + } + out := new(ObjectMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. +func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { + if in == nil { + return nil + } + out := new(PodsMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { + *out = *in + in.Metric.DeepCopyInto(&out.Metric) + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. +func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { + if in == nil { + return nil + } + out := new(PodsMetricStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { + *out = *in + in.Target.DeepCopyInto(&out.Target) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. +func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { + if in == nil { + return nil + } + out := new(ResourceMetricSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { + *out = *in + in.Current.DeepCopyInto(&out.Current) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. +func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { + if in == nil { + return nil + } + out := new(ResourceMetricStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go new file mode 100644 index 0000000..c4a8db6 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go new file mode 100644 index 0000000..35944e7 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -0,0 +1,1854 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{0} +} +func (m *Job) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Job) XXX_Merge(src proto.Message) { + xxx_messageInfo_Job.Merge(m, src) +} +func (m *Job) XXX_Size() int { + return m.Size() +} +func (m *Job) XXX_DiscardUnknown() { + xxx_messageInfo_Job.DiscardUnknown(m) +} + +var xxx_messageInfo_Job proto.InternalMessageInfo + +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{1} +} +func (m *JobCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobCondition.Merge(m, src) +} +func (m *JobCondition) XXX_Size() int { + return m.Size() +} +func (m *JobCondition) XXX_DiscardUnknown() { + xxx_messageInfo_JobCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_JobCondition proto.InternalMessageInfo + +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{2} +} +func (m *JobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobList.Merge(m, src) +} +func (m *JobList) XXX_Size() int { + return m.Size() +} +func (m *JobList) XXX_DiscardUnknown() { + xxx_messageInfo_JobList.DiscardUnknown(m) +} + +var xxx_messageInfo_JobList proto.InternalMessageInfo + +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{3} +} +func (m *JobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobSpec.Merge(m, src) +} +func (m *JobSpec) XXX_Size() int { + return m.Size() +} +func (m *JobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobSpec proto.InternalMessageInfo + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b52da57c93de713, []int{4} +} +func (m *JobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobStatus.Merge(m, src) +} +func (m *JobStatus) XXX_Size() int { + return m.Size() +} +func (m *JobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_JobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_JobStatus proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") + proto.RegisterType((*JobCondition)(nil), "k8s.io.api.batch.v1.JobCondition") + proto.RegisterType((*JobList)(nil), "k8s.io.api.batch.v1.JobList") + proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") + proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) +} + +var fileDescriptor_3b52da57c93de713 = []byte{ + // 929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, + 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, + 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, + 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, + 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xcf, + 0x3d, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, + 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, + 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, + 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, + 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, + 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, + 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, + 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, + 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, + 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x0b, 0x95, 0x96, 0x66, 0x3c, 0x19, 0x49, + 0x80, 0x9b, 0x92, 0x6d, 0xce, 0x8e, 0xcd, 0x67, 0xce, 0xcf, 0xe0, 0x8a, 0x4b, 0x10, 0xd4, 0xc6, + 0x37, 0xa9, 0xb1, 0x96, 0xa5, 0x06, 0xaa, 0x30, 0x52, 0xba, 0xe2, 0xaf, 0xd0, 0x06, 0x8f, 0xc1, + 0xed, 0xac, 0x2b, 0xf7, 0x77, 0xcc, 0x25, 0xef, 0x6f, 0x5e, 0x30, 0xa7, 0x1f, 0x83, 0x6b, 0xef, + 0x14, 0x4e, 0x1b, 0xf2, 0x44, 0x94, 0x0e, 0x9f, 0xa3, 0x4d, 0x2e, 0xa8, 0x98, 0xf2, 0x4e, 0x43, + 0x39, 0xe8, 0x2b, 0x1d, 0x14, 0xcb, 0xde, 0x2b, 0x3c, 0x36, 0xf3, 0x33, 0x29, 0xd4, 0xdd, 0x3f, + 0x1b, 0x68, 0xe7, 0x82, 0x39, 0x67, 0x2c, 0xf2, 0x7c, 0xe1, 0xb3, 0x08, 0x9f, 0xa2, 0x0d, 0x71, + 0x1d, 0x83, 0xfa, 0xec, 0xb6, 0xfd, 0x64, 0x5e, 0x7a, 0x70, 0x1d, 0xc3, 0xab, 0xd4, 0xd8, 0xaf, + 0x73, 0x25, 0x46, 0x14, 0x1b, 0xf7, 0xca, 0x76, 0xd6, 0x95, 0xee, 0x74, 0xb1, 0xdc, 0xab, 0xd4, + 0x58, 0x92, 0x0e, 0xb3, 0x74, 0x5a, 0x6c, 0x0a, 0x8f, 0xd0, 0x6e, 0x40, 0xb9, 0xb8, 0x4a, 0x98, + 0x03, 0x03, 0x3f, 0x84, 0xe2, 0x1b, 0x3f, 0x7c, 0xd8, 0x0c, 0xa4, 0xc2, 0x3e, 0x28, 0x1a, 0xd8, + 0xed, 0xd5, 0x8d, 0xc8, 0xa2, 0x2f, 0x9e, 0x21, 0x2c, 0x81, 0x41, 0x42, 0x23, 0x9e, 0x7f, 0x92, + 0xac, 0xb6, 0xf1, 0xda, 0xd5, 0x0e, 0x8b, 0x6a, 0xb8, 0x77, 0xcf, 0x8d, 0x2c, 0xa9, 0x80, 0xdf, + 0x47, 0x9b, 0x09, 0x50, 0xce, 0xa2, 0x4e, 0x53, 0x3d, 0x57, 0x39, 0x1d, 0xa2, 0x50, 0x52, 0xdc, + 0xe2, 0x0f, 0xd0, 0x56, 0x08, 0x9c, 0xd3, 0x11, 0x74, 0x36, 0x15, 0xf1, 0x51, 0x41, 0xdc, 0xba, + 0xcc, 0x61, 0x32, 0xbf, 0xef, 0xfe, 0xae, 0xa1, 0xad, 0x0b, 0xe6, 0xf4, 0x7c, 0x2e, 0xf0, 0x0f, + 0xf7, 0xe2, 0x6b, 0x3e, 0xec, 0x63, 0xa4, 0x5a, 0x85, 0x77, 0xbf, 0xa8, 0xd3, 0x9a, 0x23, 0xb5, + 0xe8, 0x7e, 0x89, 0x9a, 0xbe, 0x80, 0x50, 0x8e, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, + 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, + 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, + 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x1f, 0x33, + 0x8f, 0x5b, 0x21, 0x39, 0xab, 0x60, 0x52, 0xe7, 0xe0, 0x67, 0xe8, 0x80, 0xba, 0xc2, 0x9f, 0xc1, + 0xd7, 0x40, 0xbd, 0xc0, 0x8f, 0xa0, 0x0f, 0x2e, 0x8b, 0xbc, 0xfc, 0xaf, 0xd3, 0xb0, 0xdf, 0xca, + 0x52, 0xe3, 0xe0, 0xe9, 0x32, 0x02, 0x59, 0xae, 0xc3, 0xa7, 0x68, 0xc7, 0xa1, 0xee, 0x84, 0x0d, + 0x87, 0x3d, 0x3f, 0xf4, 0x45, 0x67, 0x4b, 0x35, 0xb1, 0x9f, 0xa5, 0xc6, 0x8e, 0x5d, 0xc3, 0xc9, + 0x02, 0x0b, 0xff, 0x88, 0x5a, 0x1c, 0x02, 0x70, 0x05, 0x4b, 0x8a, 0x88, 0x7d, 0xf2, 0xc0, 0xa9, + 0x50, 0x07, 0x82, 0x7e, 0x21, 0xb5, 0x77, 0xe4, 0x58, 0xe6, 0x27, 0x52, 0x5a, 0xe2, 0xcf, 0xd1, + 0x5e, 0x48, 0xa3, 0x29, 0x2d, 0x99, 0x2a, 0x5b, 0x2d, 0x1b, 0x67, 0xa9, 0xb1, 0x77, 0xb9, 0x70, + 0x43, 0xee, 0x30, 0xf1, 0xb7, 0xa8, 0x25, 0x20, 0x8c, 0x03, 0x2a, 0xf2, 0xa0, 0x6d, 0x9f, 0xbc, + 0x57, 0x9f, 0xaa, 0xfc, 0xbf, 0xca, 0x46, 0xae, 0x98, 0x37, 0x28, 0x68, 0x6a, 0x31, 0x95, 0x29, + 0x99, 0xa3, 0xa4, 0xb4, 0xc1, 0xcf, 0xd1, 0x63, 0x21, 0x82, 0xe2, 0xc5, 0x9e, 0x0e, 0x05, 0x24, + 0xe7, 0x7e, 0xe4, 0xf3, 0x31, 0x78, 0x9d, 0x96, 0x7a, 0xae, 0xb7, 0xb3, 0xd4, 0x78, 0x3c, 0x18, + 0xf4, 0x96, 0x51, 0xc8, 0x2a, 0x6d, 0xf7, 0xb7, 0x06, 0x6a, 0x97, 0x5b, 0x0d, 0x3f, 0x47, 0xc8, + 0x9d, 0xef, 0x10, 0xde, 0xd1, 0x54, 0x1e, 0xdf, 0x5d, 0x95, 0xc7, 0x72, 0xdb, 0x54, 0xab, 0xb9, + 0x84, 0x38, 0xa9, 0x19, 0xe1, 0xef, 0x50, 0x9b, 0x0b, 0x9a, 0x08, 0xb5, 0x0d, 0xd6, 0x5f, 0x7b, + 0x1b, 0xec, 0x66, 0xa9, 0xd1, 0xee, 0xcf, 0x0d, 0x48, 0xe5, 0x85, 0x87, 0x68, 0xaf, 0x0a, 0xe6, + 0xff, 0xdc, 0x6c, 0x6a, 0x9e, 0x67, 0x0b, 0x2e, 0xe4, 0x8e, 0xab, 0xdc, 0x2f, 0x79, 0x72, 0x55, + 0xd0, 0x9a, 0xd5, 0x7e, 0xc9, 0x63, 0x4e, 0x8a, 0x5b, 0x6c, 0xa1, 0x36, 0x9f, 0xba, 0x2e, 0x80, + 0x07, 0x9e, 0x8a, 0x4b, 0xd3, 0x7e, 0xa3, 0xa0, 0xb6, 0xfb, 0xf3, 0x0b, 0x52, 0x71, 0xa4, 0xf1, + 0x90, 0xfa, 0x01, 0x78, 0x2a, 0x26, 0x35, 0xe3, 0x73, 0x85, 0x92, 0xe2, 0xd6, 0x3e, 0xba, 0xb9, + 0xd5, 0xd7, 0x5e, 0xdc, 0xea, 0x6b, 0x2f, 0x6f, 0xf5, 0xb5, 0x5f, 0x32, 0x5d, 0xbb, 0xc9, 0x74, + 0xed, 0x45, 0xa6, 0x6b, 0x2f, 0x33, 0x5d, 0xfb, 0x2b, 0xd3, 0xb5, 0x5f, 0xff, 0xd6, 0xd7, 0xbe, + 0x5f, 0x9f, 0x1d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x73, 0xe7, 0x7a, 0xb8, 0x08, 0x00, + 0x00, +} + +func (m *Job) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Job) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JobCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JobList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JobSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TTLSecondsAfterFinished != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) + i-- + dAtA[i] = 0x40 + } + if m.BackoffLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) + i-- + dAtA[i] = 0x38 + } + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if m.ManualSelector != nil { + i-- + if *m.ManualSelector { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x18 + } + if m.Completions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + i-- + dAtA[i] = 0x10 + } + if m.Parallelism != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *JobStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + i-- + dAtA[i] = 0x20 + if m.CompletionTime != nil { + { + size, err := m.CompletionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.StartTime != nil { + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Job) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *JobSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Parallelism != nil { + n += 1 + sovGenerated(uint64(*m.Parallelism)) + } + if m.Completions != nil { + n += 1 + sovGenerated(uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ManualSelector != nil { + n += 2 + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.BackoffLimit != nil { + n += 1 + sovGenerated(uint64(*m.BackoffLimit)) + } + if m.TTLSecondsAfterFinished != nil { + n += 1 + sovGenerated(uint64(*m.TTLSecondsAfterFinished)) + } + return n +} + +func (m *JobStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CompletionTime != nil { + l = m.CompletionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Active)) + n += 1 + sovGenerated(uint64(m.Succeeded)) + n += 1 + sovGenerated(uint64(m.Failed)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Job) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Job{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *JobList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Job{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Job", "Job", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&JobList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *JobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobSpec{`, + `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, + `Completions:` + valueToStringGenerated(this.Completions) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, + `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, + `}`, + }, "") + return s +} +func (this *JobStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]JobCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "JobCondition", "JobCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&JobStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "v1.Time", 1) + `,`, + `Active:` + fmt.Sprintf("%v", this.Active) + `,`, + `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, + `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Job) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Job: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = JobConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Job{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Parallelism = &v + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Completions = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ManualSelector = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.BackoffLimit = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTLSecondsAfterFinished = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, JobCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &v1.Time{} + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CompletionTime == nil { + m.CompletionTime = &v1.Time{} + } + if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + m.Active = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Active |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) + } + m.Succeeded = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Succeeded |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) + } + m.Failed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Failed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto new file mode 100644 index 0000000..75de45f --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -0,0 +1,184 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.batch.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Job represents the configuration of a single job. +message Job { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of a job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional JobSpec spec = 2; + + // Current status of a job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional JobStatus status = 3; +} + +// JobCondition describes current state of a job. +message JobCondition { + // Type of job condition, Complete or Failed. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition was checked. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// JobList is a collection of jobs. +message JobList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of Jobs. + repeated Job items = 2; +} + +// JobSpec describes how the job execution will look like. +message JobSpec { + // Specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + optional int32 parallelism = 1; + + // Specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + optional int32 completions = 2; + + // Specifies the duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + optional int64 activeDeadlineSeconds = 3; + + // Specifies the number of retries before marking this job failed. + // Defaults to 6 + // +optional + optional int32 backoffLimit = 7; + + // A label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // manualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector + // +optional + optional bool manualSelector = 5; + + // Describes the pod that will be created when executing a job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + optional k8s.io.api.core.v1.PodTemplateSpec template = 6; + + // ttlSecondsAfterFinished limits the lifetime of a Job that has finished + // execution (either Complete or Failed). If this field is set, + // ttlSecondsAfterFinished after the Job finishes, it is eligible to be + // automatically deleted. When the Job is being deleted, its lifecycle + // guarantees (e.g. finalizers) will be honored. If this field is unset, + // the Job won't be automatically deleted. If this field is set to zero, + // the Job becomes eligible to be deleted immediately after it finishes. + // This field is alpha-level and is only honored by servers that enable the + // TTLAfterFinished feature. + // +optional + optional int32 ttlSecondsAfterFinished = 8; +} + +// JobStatus represents the current state of a Job. +message JobStatus { + // The latest available observations of an object's current state. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated JobCondition conditions = 1; + + // Represents time when the job was acknowledged by the job controller. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; + + // Represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; + + // The number of actively running pods. + // +optional + optional int32 active = 4; + + // The number of pods which reached phase Succeeded. + // +optional + optional int32 succeeded = 5; + + // The number of pods which reached phase Failed. + // +optional + optional int32 failed = 6; +} + diff --git a/vendor/k8s.io/api/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go new file mode 100644 index 0000000..32fa51f --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Job{}, + &JobList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go new file mode 100644 index 0000000..646a3cd --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -0,0 +1,193 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Job represents the configuration of a single job. +type Job struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of a job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Current status of a job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JobList is a collection of jobs. +type JobList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of Jobs. + Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// JobSpec describes how the job execution will look like. +type JobSpec struct { + + // Specifies the maximum desired number of pods the job should + // run at any given time. The actual number of pods running in steady state will + // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), + // i.e. when the work left to do is less than max parallelism. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` + + // Specifies the desired number of successfully finished pods the + // job should be run with. Setting to nil means that the success of any + // pod signals the success of all pods, and allows parallelism to have any positive + // value. Setting to 1 means that parallelism is limited to 1 and the success of that + // pod signals the success of the job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` + + // Specifies the duration in seconds relative to the startTime that the job may be active + // before the system tries to terminate it; value must be positive integer + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` + + // Specifies the number of retries before marking this job failed. + // Defaults to 6 + // +optional + BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` + + // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed + // Optional number of failed pods to retain. + // +optional + // FailedPodsLimit *int32 `json:"failedPodsLimit,omitempty" protobuf:"varint,9,opt,name=failedPodsLimit"` + + // A label query over pods that should match the pod count. + // Normally, the system sets this field for you. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + + // manualSelector controls generation of pod labels and pod selectors. + // Leave `manualSelector` unset unless you are certain what you are doing. + // When false or unset, the system pick labels unique to this job + // and appends those labels to the pod template. When true, + // the user is responsible for picking unique labels and specifying + // the selector. Failure to pick a unique label may cause this + // and other jobs to not function correctly. However, You may see + // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` + // API. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector + // +optional + ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` + + // Describes the pod that will be created when executing a job. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` + + // ttlSecondsAfterFinished limits the lifetime of a Job that has finished + // execution (either Complete or Failed). If this field is set, + // ttlSecondsAfterFinished after the Job finishes, it is eligible to be + // automatically deleted. When the Job is being deleted, its lifecycle + // guarantees (e.g. finalizers) will be honored. If this field is unset, + // the Job won't be automatically deleted. If this field is set to zero, + // the Job becomes eligible to be deleted immediately after it finishes. + // This field is alpha-level and is only honored by servers that enable the + // TTLAfterFinished feature. + // +optional + TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` +} + +// JobStatus represents the current state of a Job. +type JobStatus struct { + // The latest available observations of an object's current state. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` + + // Represents time when the job was acknowledged by the job controller. + // It is not guaranteed to be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` + + // Represents time when the job was completed. It is not guaranteed to + // be set in happens-before order across separate operations. + // It is represented in RFC3339 form and is in UTC. + // +optional + CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` + + // The number of actively running pods. + // +optional + Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` + + // The number of pods which reached phase Succeeded. + // +optional + Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` + + // The number of pods which reached phase Failed. + // +optional + Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` +} + +type JobConditionType string + +// These are valid conditions of a job. +const ( + // JobComplete means the job has completed its execution. + JobComplete JobConditionType = "Complete" + // JobFailed means the job has failed its execution. + JobFailed JobConditionType = "Failed" +) + +// JobCondition describes current state of a job. +type JobCondition struct { + // Type of job condition, Complete or Failed. + Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition was checked. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..0120e07 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Job = map[string]string{ + "": "Job represents the configuration of a single job.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Job) SwaggerDoc() map[string]string { + return map_Job +} + +var map_JobCondition = map[string]string{ + "": "JobCondition describes current state of a job.", + "type": "Type of job condition, Complete or Failed.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastProbeTime": "Last time the condition was checked.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (JobCondition) SwaggerDoc() map[string]string { + return map_JobCondition +} + +var map_JobList = map[string]string{ + "": "JobList is a collection of jobs.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of Jobs.", +} + +func (JobList) SwaggerDoc() map[string]string { + return map_JobList +} + +var map_JobSpec = map[string]string{ + "": "JobSpec describes how the job execution will look like.", + "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", + "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", + "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", + "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.", +} + +func (JobSpec) SwaggerDoc() map[string]string { + return map_JobSpec +} + +var map_JobStatus = map[string]string{ + "": "JobStatus represents the current state of a Job.", + "conditions": "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", + "startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", + "active": "The number of actively running pods.", + "succeeded": "The number of pods which reached phase Succeeded.", + "failed": "The number of pods which reached phase Failed.", +} + +func (JobStatus) SwaggerDoc() map[string]string { + return map_JobStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..beba55a --- /dev/null +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -0,0 +1,188 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Job) DeepCopyInto(out *Job) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. +func (in *Job) DeepCopy() *Job { + if in == nil { + return nil + } + out := new(Job) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Job) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobCondition) DeepCopyInto(out *JobCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition. +func (in *JobCondition) DeepCopy() *JobCondition { + if in == nil { + return nil + } + out := new(JobCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobList) DeepCopyInto(out *JobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Job, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. +func (in *JobList) DeepCopy() *JobList { + if in == nil { + return nil + } + out := new(JobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobSpec) DeepCopyInto(out *JobSpec) { + *out = *in + if in.Parallelism != nil { + in, out := &in.Parallelism, &out.Parallelism + *out = new(int32) + **out = **in + } + if in.Completions != nil { + in, out := &in.Completions, &out.Completions + *out = new(int32) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.BackoffLimit != nil { + in, out := &in.BackoffLimit, &out.BackoffLimit + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.ManualSelector != nil { + in, out := &in.ManualSelector, &out.ManualSelector + *out = new(bool) + **out = **in + } + in.Template.DeepCopyInto(&out.Template) + if in.TTLSecondsAfterFinished != nil { + in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. +func (in *JobSpec) DeepCopy() *JobSpec { + if in == nil { + return nil + } + out := new(JobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobStatus) DeepCopyInto(out *JobStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]JobCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.CompletionTime != nil { + in, out := &in.CompletionTime, &out.CompletionTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. +func (in *JobStatus) DeepCopy() *JobStatus { + if in == nil { + return nil + } + out := new(JobStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go new file mode 100644 index 0000000..258ff02 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go new file mode 100644 index 0000000..69c4054 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -0,0 +1,1743 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobList proto.InternalMessageInfo + +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_e57b277b05179ae7, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) +} + +var fileDescriptor_e57b277b05179ae7 = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, + 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, + 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, + 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, + 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, + 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, + 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, + 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, + 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, + 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, + 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, + 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, + 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, + 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, + 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, + 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, + 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, + 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, + 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, + 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, + 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, + 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, + 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, + 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, + 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, + 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, + 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, + 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, + 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, + 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, + 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, + 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, + 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, + 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, + 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, + 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, + 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, + 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, + 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, + 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, + 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, + 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, + 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, + 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, + 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, + 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, + 0x07, 0x00, 0x00, +} + +func (m *CronJob) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronJobList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 + } + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.Suspend != nil { + i-- + if *m.Suspend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *JobTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + return n +} + +func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJob{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CronJobList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CronJobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobSpec{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, + `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" + s := strings.Join([]string{`&CronJobStatus{`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CronJob) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, v11.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &v1.Time{} + } + if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto new file mode 100644 index 0000000..995b4f3 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -0,0 +1,137 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.batch.v1beta1; + +import "k8s.io/api/batch/v1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// CronJob represents the configuration of a single cron job. +message CronJob { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional CronJobSpec spec = 2; + + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional CronJobStatus status = 3; +} + +// CronJobList is a collection of cron jobs. +message CronJobList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CronJobs. + repeated CronJob items = 2; +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +message CronJobSpec { + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + optional int64 startingDeadlineSeconds = 2; + + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one + // +optional + optional string concurrencyPolicy = 3; + + // This flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + optional bool suspend = 4; + + // Specifies the job that will be created when executing a CronJob. + optional JobTemplateSpec jobTemplate = 5; + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 3. + // +optional + optional int32 successfulJobsHistoryLimit = 6; + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 1. + // +optional + optional int32 failedJobsHistoryLimit = 7; +} + +// CronJobStatus represents the current state of a cron job. +message CronJobStatus { + // A list of pointers to currently running jobs. + // +optional + repeated k8s.io.api.core.v1.ObjectReference active = 1; + + // Information when was the last time the job was successfully scheduled. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional k8s.io.api.batch.v1.JobSpec spec = 2; +} + diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go new file mode 100644 index 0000000..226de49 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go new file mode 100644 index 0000000..2978747 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -0,0 +1,158 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + batchv1 "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CronJobs. + Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one + // +optional + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // This flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` + + // Specifies the job that will be created when executing a CronJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 3. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 1. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // A list of pointers to currently running jobs. + // +optional + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // Information when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..ecc9144 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,96 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CronJob = map[string]string{ + "": "CronJob represents the configuration of a single cron job.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (CronJob) SwaggerDoc() map[string]string { + return map_CronJob +} + +var map_CronJobList = map[string]string{ + "": "CronJobList is a collection of cron jobs.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CronJobs.", +} + +func (CronJobList) SwaggerDoc() map[string]string { + return map_CronJobList +} + +var map_CronJobSpec = map[string]string{ + "": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "Specifies the job that will be created when executing a CronJob.", + "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", + "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", +} + +func (CronJobSpec) SwaggerDoc() map[string]string { + return map_CronJobSpec +} + +var map_CronJobStatus = map[string]string{ + "": "CronJobStatus represents the current state of a cron job.", + "active": "A list of pointers to currently running jobs.", + "lastScheduleTime": "Information when was the last time the job was successfully scheduled.", +} + +func (CronJobStatus) SwaggerDoc() map[string]string { + return map_CronJobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..7c9dcb7 --- /dev/null +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,194 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJob) DeepCopyInto(out *CronJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob. +func (in *CronJob) DeepCopy() *CronJob { + if in == nil { + return nil + } + out := new(CronJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobList) DeepCopyInto(out *CronJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList. +func (in *CronJobList) DeepCopy() *CronJobList { + if in == nil { + return nil + } + out := new(CronJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + in.JobTemplate.DeepCopyInto(&out.JobTemplate) + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec. +func (in *CronJobSpec) DeepCopy() *CronJobSpec { + if in == nil { + return nil + } + out := new(CronJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus. +func (in *CronJobStatus) DeepCopy() *CronJobStatus { + if in == nil { + return nil + } + out := new(CronJobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. +func (in *JobTemplate) DeepCopy() *JobTemplate { + if in == nil { + return nil + } + out := new(JobTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec. +func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { + if in == nil { + return nil + } + out := new(JobTemplateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go new file mode 100644 index 0000000..3044b0c --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go new file mode 100644 index 0000000..3e58dbb --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -0,0 +1,1743 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto + +package v2alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{0} +} +func (m *CronJob) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJob) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJob.Merge(m, src) +} +func (m *CronJob) XXX_Size() int { + return m.Size() +} +func (m *CronJob) XXX_DiscardUnknown() { + xxx_messageInfo_CronJob.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJob proto.InternalMessageInfo + +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{1} +} +func (m *CronJobList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobList.Merge(m, src) +} +func (m *CronJobList) XXX_Size() int { + return m.Size() +} +func (m *CronJobList) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobList.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobList proto.InternalMessageInfo + +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{2} +} +func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobSpec.Merge(m, src) +} +func (m *CronJobSpec) XXX_Size() int { + return m.Size() +} +func (m *CronJobSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo + +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{3} +} +func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CronJobStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CronJobStatus.Merge(m, src) +} +func (m *CronJobStatus) XXX_Size() int { + return m.Size() +} +func (m *CronJobStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CronJobStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo + +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{4} +} +func (m *JobTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplate.Merge(m, src) +} +func (m *JobTemplate) XXX_Size() int { + return m.Size() +} +func (m *JobTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplate proto.InternalMessageInfo + +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5495a0550fe29c46, []int{5} +} +func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_JobTemplateSpec.Merge(m, src) +} +func (m *JobTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *JobTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") + proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v2alpha1.CronJobList") + proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v2alpha1.CronJobSpec") + proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v2alpha1.CronJobStatus") + proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") + proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptor_5495a0550fe29c46) +} + +var fileDescriptor_5495a0550fe29c46 = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, + 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, + 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, + 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, + 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, + 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, + 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, + 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, + 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, + 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, + 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, + 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, + 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, + 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, + 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, + 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, + 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, + 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, + 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, + 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, + 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, + 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, + 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, + 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, + 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, + 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, + 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, + 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, + 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, + 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, + 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, + 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, + 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, + 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, + 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, + 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, + 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, + 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, + 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, + 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, + 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, + 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, + 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, + 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, + 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, + 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, + 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, + 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, + 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, +} + +func (m *CronJob) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronJobList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FailedJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + i-- + dAtA[i] = 0x38 + } + if m.SuccessfulJobsHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + { + size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + if m.Suspend != nil { + i-- + if *m.Suspend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + i -= len(m.ConcurrencyPolicy) + copy(dAtA[i:], m.ConcurrencyPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i-- + dAtA[i] = 0x1a + if m.StartingDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Schedule) + copy(dAtA[i:], m.Schedule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LastScheduleTime != nil { + { + size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.Active) > 0 { + for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *JobTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CronJob) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CronJobList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CronJobSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Schedule) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartingDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) + } + l = len(m.ConcurrencyPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.Suspend != nil { + n += 2 + } + l = m.JobTemplate.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.SuccessfulJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) + } + return n +} + +func (m *CronJobStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Active) > 0 { + for _, e := range m.Active { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LastScheduleTime != nil { + l = m.LastScheduleTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *JobTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *JobTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CronJob) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJob{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CronJob{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CronJobList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CronJobSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CronJobSpec{`, + `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, + `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, + `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, + `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, + `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, + `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *CronJobStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForActive := "[]ObjectReference{" + for _, f := range this.Active { + repeatedStringForActive += fmt.Sprintf("%v", f) + "," + } + repeatedStringForActive += "}" + s := strings.Join([]string{`&CronJobStatus{`, + `Active:` + repeatedStringForActive + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *JobTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&JobTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CronJob) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJob: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CronJob{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Schedule = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StartingDeadlineSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Suspend = &b + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SuccessfulJobsHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FailedJobsHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CronJobStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Active = append(m.Active, v11.ObjectReference{}) + if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastScheduleTime == nil { + m.LastScheduleTime = &v1.Time{} + } + if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto new file mode 100644 index 0000000..0bba13b --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -0,0 +1,135 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.batch.v2alpha1; + +import "k8s.io/api/batch/v1/generated.proto"; +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v2alpha1"; + +// CronJob represents the configuration of a single cron job. +message CronJob { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional CronJobSpec spec = 2; + + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional CronJobStatus status = 3; +} + +// CronJobList is a collection of cron jobs. +message CronJobList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CronJobs. + repeated CronJob items = 2; +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +message CronJobSpec { + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + optional string schedule = 1; + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + optional int64 startingDeadlineSeconds = 2; + + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one + // +optional + optional string concurrencyPolicy = 3; + + // This flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + optional bool suspend = 4; + + // Specifies the job that will be created when executing a CronJob. + optional JobTemplateSpec jobTemplate = 5; + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 successfulJobsHistoryLimit = 6; + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + optional int32 failedJobsHistoryLimit = 7; +} + +// CronJobStatus represents the current state of a cron job. +message CronJobStatus { + // A list of pointers to currently running jobs. + // +optional + repeated k8s.io.api.core.v1.ObjectReference active = 1; + + // Information when was the last time the job was successfully scheduled. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; +} + +// JobTemplate describes a template for creating copies of a predefined pod. +message JobTemplate { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional JobTemplateSpec template = 2; +} + +// JobTemplateSpec describes the data a Job should have when created from a template +message JobTemplateSpec { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional k8s.io.api.batch.v1.JobSpec spec = 2; +} + diff --git a/vendor/k8s.io/api/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go new file mode 100644 index 0000000..ac7fa50 --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "batch" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &JobTemplate{}, + &CronJob{}, + &CronJobList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go new file mode 100644 index 0000000..465e614 --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -0,0 +1,156 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +import ( + batchv1 "k8s.io/api/batch/v1" + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// JobTemplate describes a template for creating copies of a predefined pod. +type JobTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Defines jobs that will be created from this template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// JobTemplateSpec describes the data a Job should have when created from a template +type JobTemplateSpec struct { + // Standard object's metadata of the jobs created from this template. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronJob represents the configuration of a single cron job. +type CronJob struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of a cron job, including the schedule. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Current status of a cron job. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CronJobList is a collection of cron jobs. +type CronJobList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CronJobs. + Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CronJobSpec describes how the job execution will look like and when it will actually run. +type CronJobSpec struct { + + // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. + Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` + + // Optional deadline in seconds for starting the job if it misses scheduled + // time for any reason. Missed jobs executions will be counted as failed ones. + // +optional + StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` + + // Specifies how to treat concurrent executions of a Job. + // Valid values are: + // - "Allow" (default): allows CronJobs to run concurrently; + // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; + // - "Replace": cancels currently running job and replaces it with a new one + // +optional + ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` + + // This flag tells the controller to suspend subsequent executions, it does + // not apply to already started executions. Defaults to false. + // +optional + Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` + + // Specifies the job that will be created when executing a CronJob. + JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` + + // The number of successful finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` + + // The number of failed finished jobs to retain. + // This is a pointer to distinguish between explicit zero and not specified. + // +optional + FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` +} + +// ConcurrencyPolicy describes how the job will be handled. +// Only one of the following concurrent policies may be specified. +// If none of the following policies is specified, the default one +// is AllowConcurrent. +type ConcurrencyPolicy string + +const ( + // AllowConcurrent allows CronJobs to run concurrently. + AllowConcurrent ConcurrencyPolicy = "Allow" + + // ForbidConcurrent forbids concurrent runs, skipping next run if previous + // hasn't finished yet. + ForbidConcurrent ConcurrencyPolicy = "Forbid" + + // ReplaceConcurrent cancels currently running job and replaces it with a new one. + ReplaceConcurrent ConcurrencyPolicy = "Replace" +) + +// CronJobStatus represents the current state of a cron job. +type CronJobStatus struct { + // A list of pointers to currently running jobs. + // +optional + Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` + + // Information when was the last time the job was successfully scheduled. + // +optional + LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` +} diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..bc80eca --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -0,0 +1,96 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v2alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CronJob = map[string]string{ + "": "CronJob represents the configuration of a single cron job.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (CronJob) SwaggerDoc() map[string]string { + return map_CronJob +} + +var map_CronJobList = map[string]string{ + "": "CronJobList is a collection of cron jobs.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CronJobs.", +} + +func (CronJobList) SwaggerDoc() map[string]string { + return map_CronJobList +} + +var map_CronJobSpec = map[string]string{ + "": "CronJobSpec describes how the job execution will look like and when it will actually run.", + "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", + "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", + "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", + "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", + "jobTemplate": "Specifies the job that will be created when executing a CronJob.", + "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", + "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", +} + +func (CronJobSpec) SwaggerDoc() map[string]string { + return map_CronJobSpec +} + +var map_CronJobStatus = map[string]string{ + "": "CronJobStatus represents the current state of a cron job.", + "active": "A list of pointers to currently running jobs.", + "lastScheduleTime": "Information when was the last time the job was successfully scheduled.", +} + +func (CronJobStatus) SwaggerDoc() map[string]string { + return map_CronJobStatus +} + +var map_JobTemplate = map[string]string{ + "": "JobTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (JobTemplate) SwaggerDoc() map[string]string { + return map_JobTemplate +} + +var map_JobTemplateSpec = map[string]string{ + "": "JobTemplateSpec describes the data a Job should have when created from a template", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (JobTemplateSpec) SwaggerDoc() map[string]string { + return map_JobTemplateSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..1b03f67 --- /dev/null +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -0,0 +1,194 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJob) DeepCopyInto(out *CronJob) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob. +func (in *CronJob) DeepCopy() *CronJob { + if in == nil { + return nil + } + out := new(CronJob) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJob) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobList) DeepCopyInto(out *CronJobList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CronJob, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList. +func (in *CronJobList) DeepCopy() *CronJobList { + if in == nil { + return nil + } + out := new(CronJobList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CronJobList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { + *out = *in + if in.StartingDeadlineSeconds != nil { + in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.Suspend != nil { + in, out := &in.Suspend, &out.Suspend + *out = new(bool) + **out = **in + } + in.JobTemplate.DeepCopyInto(&out.JobTemplate) + if in.SuccessfulJobsHistoryLimit != nil { + in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit + *out = new(int32) + **out = **in + } + if in.FailedJobsHistoryLimit != nil { + in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec. +func (in *CronJobSpec) DeepCopy() *CronJobSpec { + if in == nil { + return nil + } + out := new(CronJobSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { + *out = *in + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = make([]v1.ObjectReference, len(*in)) + copy(*out, *in) + } + if in.LastScheduleTime != nil { + in, out := &in.LastScheduleTime, &out.LastScheduleTime + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus. +func (in *CronJobStatus) DeepCopy() *CronJobStatus { + if in == nil { + return nil + } + out := new(CronJobStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. +func (in *JobTemplate) DeepCopy() *JobTemplate { + if in == nil { + return nil + } + out := new(JobTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *JobTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec. +func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { + if in == nil { + return nil + } + out := new(JobTemplateSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go new file mode 100644 index 0000000..9055248 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=certificates.k8s.io + +package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go new file mode 100644 index 0000000..24fa4bf --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -0,0 +1,1956 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } +func (*CertificateSigningRequest) ProtoMessage() {} +func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{0} +} +func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequest.Merge(m, src) +} +func (m *CertificateSigningRequest) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo + +func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } +func (*CertificateSigningRequestCondition) ProtoMessage() {} +func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{1} +} +func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src) +} +func (m *CertificateSigningRequestCondition) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo + +func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } +func (*CertificateSigningRequestList) ProtoMessage() {} +func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{2} +} +func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestList.Merge(m, src) +} +func (m *CertificateSigningRequestList) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestList) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo + +func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } +func (*CertificateSigningRequestSpec) ProtoMessage() {} +func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{3} +} +func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src) +} +func (m *CertificateSigningRequestSpec) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo + +func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } +func (*CertificateSigningRequestStatus) ProtoMessage() {} +func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{4} +} +func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src) +} +func (m *CertificateSigningRequestStatus) XXX_Size() int { + return m.Size() +} +func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { + xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo + +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { + return fileDescriptor_09d156762b8218ef, []int{5} +} +func (m *ExtraValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExtraValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExtraValue.Merge(m, src) +} +func (m *ExtraValue) XXX_Size() int { + return m.Size() +} +func (m *ExtraValue) XXX_DiscardUnknown() { + xxx_messageInfo_ExtraValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ExtraValue proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") + proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") + proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") + proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") + proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") + proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") + proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) +} + +var fileDescriptor_09d156762b8218ef = []byte{ + // 824 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xf6, 0xfa, 0xdb, 0xe3, 0x90, 0x56, 0x23, 0x54, 0x2d, 0x91, 0xba, 0x1b, 0xad, 0x00, 0x85, + 0x8f, 0xce, 0x92, 0x0a, 0x41, 0x94, 0x03, 0x82, 0x0d, 0x15, 0x44, 0xb4, 0x20, 0x4d, 0x1a, 0x0e, + 0x08, 0x89, 0x8e, 0xd7, 0x6f, 0x37, 0x53, 0x77, 0x3f, 0xd8, 0x99, 0x35, 0xf8, 0xd6, 0x9f, 0xc0, + 0x91, 0x0b, 0x12, 0x3f, 0x27, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x59, 0xc4, 0xdc, 0xf9, 0x01, 0x3d, + 0xa1, 0x99, 0x1d, 0x7b, 0x8d, 0x23, 0xd7, 0x55, 0x73, 0xdb, 0xf7, 0x79, 0xdf, 0xe7, 0x79, 0x3f, + 0x67, 0xd1, 0x97, 0xa3, 0x03, 0x41, 0x78, 0xea, 0x8f, 0x8a, 0x01, 0xe4, 0x09, 0x48, 0x10, 0xfe, + 0x18, 0x92, 0x61, 0x9a, 0xfb, 0xc6, 0xc1, 0x32, 0xee, 0x87, 0x90, 0x4b, 0xfe, 0x90, 0x87, 0x4c, + 0xbb, 0xf7, 0x07, 0x20, 0xd9, 0xbe, 0x1f, 0x41, 0x02, 0x39, 0x93, 0x30, 0x24, 0x59, 0x9e, 0xca, + 0x14, 0xbb, 0x25, 0x81, 0xb0, 0x8c, 0x93, 0x65, 0x02, 0x31, 0x84, 0x9d, 0x5b, 0x11, 0x97, 0x67, + 0xc5, 0x80, 0x84, 0x69, 0xec, 0x47, 0x69, 0x94, 0xfa, 0x9a, 0x37, 0x28, 0x1e, 0x6a, 0x4b, 0x1b, + 0xfa, 0xab, 0xd4, 0xdb, 0xf9, 0xb0, 0x2a, 0x20, 0x66, 0xe1, 0x19, 0x4f, 0x20, 0x9f, 0xf8, 0xd9, + 0x28, 0x52, 0x80, 0xf0, 0x63, 0x90, 0xcc, 0x1f, 0x5f, 0xaa, 0x62, 0xc7, 0x5f, 0xc7, 0xca, 0x8b, + 0x44, 0xf2, 0x18, 0x2e, 0x11, 0x3e, 0xda, 0x44, 0x10, 0xe1, 0x19, 0xc4, 0x6c, 0x95, 0xe7, 0xfd, + 0x51, 0x47, 0x6f, 0x1c, 0x55, 0x6d, 0x9e, 0xf0, 0x28, 0xe1, 0x49, 0x44, 0xe1, 0xc7, 0x02, 0x84, + 0xc4, 0x0f, 0x50, 0x57, 0x55, 0x38, 0x64, 0x92, 0xd9, 0xd6, 0xae, 0xb5, 0xd7, 0xbf, 0xfd, 0x01, + 0xa9, 0xe6, 0xb3, 0x48, 0x44, 0xb2, 0x51, 0xa4, 0x00, 0x41, 0x54, 0x34, 0x19, 0xef, 0x93, 0x6f, + 0x06, 0x8f, 0x20, 0x94, 0xf7, 0x40, 0xb2, 0x00, 0x9f, 0x4f, 0xdd, 0xda, 0x6c, 0xea, 0xa2, 0x0a, + 0xa3, 0x0b, 0x55, 0xfc, 0x00, 0x35, 0x45, 0x06, 0xa1, 0x5d, 0xd7, 0xea, 0x9f, 0x90, 0x0d, 0xd3, + 0x27, 0x6b, 0x6b, 0x3d, 0xc9, 0x20, 0x0c, 0xb6, 0x4c, 0xae, 0xa6, 0xb2, 0xa8, 0x56, 0xc6, 0x67, + 0xa8, 0x2d, 0x24, 0x93, 0x85, 0xb0, 0x1b, 0x3a, 0xc7, 0xa7, 0x57, 0xc8, 0xa1, 0x75, 0x82, 0x6d, + 0x93, 0xa5, 0x5d, 0xda, 0xd4, 0xe8, 0x7b, 0xbf, 0xd5, 0x91, 0xb7, 0x96, 0x7b, 0x94, 0x26, 0x43, + 0x2e, 0x79, 0x9a, 0xe0, 0x03, 0xd4, 0x94, 0x93, 0x0c, 0xf4, 0x40, 0x7b, 0xc1, 0x9b, 0xf3, 0x92, + 0xef, 0x4f, 0x32, 0x78, 0x3e, 0x75, 0x5f, 0x5f, 0x8d, 0x57, 0x38, 0xd5, 0x0c, 0xfc, 0x36, 0x6a, + 0xe7, 0xc0, 0x44, 0x9a, 0xe8, 0x71, 0xf5, 0xaa, 0x42, 0xa8, 0x46, 0xa9, 0xf1, 0xe2, 0x77, 0x50, + 0x27, 0x06, 0x21, 0x58, 0x04, 0xba, 0xe7, 0x5e, 0x70, 0xcd, 0x04, 0x76, 0xee, 0x95, 0x30, 0x9d, + 0xfb, 0xf1, 0x23, 0xb4, 0xfd, 0x98, 0x09, 0x79, 0x9a, 0x0d, 0x99, 0x84, 0xfb, 0x3c, 0x06, 0xbb, + 0xa9, 0xa7, 0xf4, 0xee, 0xcb, 0xed, 0x59, 0x31, 0x82, 0x1b, 0x46, 0x7d, 0xfb, 0xee, 0xff, 0x94, + 0xe8, 0x8a, 0xb2, 0x37, 0xb5, 0xd0, 0xcd, 0xb5, 0xf3, 0xb9, 0xcb, 0x85, 0xc4, 0xdf, 0x5f, 0xba, + 0x37, 0xf2, 0x72, 0x75, 0x28, 0xb6, 0xbe, 0xb6, 0xeb, 0xa6, 0x96, 0xee, 0x1c, 0x59, 0xba, 0xb5, + 0x1f, 0x50, 0x8b, 0x4b, 0x88, 0x85, 0x5d, 0xdf, 0x6d, 0xec, 0xf5, 0x6f, 0x1f, 0xbe, 0xfa, 0x21, + 0x04, 0xaf, 0x99, 0x34, 0xad, 0x63, 0x25, 0x48, 0x4b, 0x5d, 0xef, 0xdf, 0xc6, 0x0b, 0x1a, 0x54, + 0x27, 0x89, 0xdf, 0x42, 0x9d, 0xbc, 0x34, 0x75, 0x7f, 0x5b, 0x41, 0x5f, 0x6d, 0xc5, 0x44, 0xd0, + 0xb9, 0x0f, 0x13, 0x84, 0x04, 0x8f, 0x12, 0xc8, 0xbf, 0x66, 0x31, 0xd8, 0x9d, 0x72, 0xd9, 0xea, + 0x0d, 0x9d, 0x2c, 0x50, 0xba, 0x14, 0x81, 0x09, 0x6a, 0x17, 0x6a, 0x9d, 0xc2, 0x6e, 0xed, 0x36, + 0xf6, 0x7a, 0xc1, 0x0d, 0x75, 0x14, 0xa7, 0x1a, 0x79, 0x3e, 0x75, 0xbb, 0x5f, 0xc1, 0x44, 0x1b, + 0xd4, 0x44, 0xe1, 0xf7, 0x51, 0xb7, 0x10, 0x90, 0x27, 0x4a, 0xbd, 0x3c, 0xa5, 0xc5, 0xdc, 0x4e, + 0x0d, 0x4e, 0x17, 0x11, 0xf8, 0x26, 0x6a, 0x14, 0x7c, 0x68, 0x4e, 0xa9, 0x6f, 0x02, 0x1b, 0xa7, + 0xc7, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x75, 0x72, 0xa4, + 0x92, 0x7f, 0xa1, 0x11, 0x6a, 0x3c, 0x38, 0x41, 0x2d, 0xf8, 0x59, 0xe6, 0xcc, 0x6e, 0xeb, 0xd1, + 0x1f, 0x5f, 0xed, 0x9d, 0x93, 0x3b, 0x4a, 0xeb, 0x4e, 0x22, 0xf3, 0x49, 0xb5, 0x09, 0x8d, 0xd1, + 0x32, 0xcd, 0x0e, 0x20, 0x54, 0xc5, 0xe0, 0xeb, 0xa8, 0x31, 0x82, 0x49, 0xf9, 0xe0, 0xa8, 0xfa, + 0xc4, 0x9f, 0xa1, 0xd6, 0x98, 0x3d, 0x2e, 0xc0, 0xfc, 0x77, 0xde, 0xdb, 0x58, 0x8f, 0x56, 0xfb, + 0x56, 0x51, 0x68, 0xc9, 0x3c, 0xac, 0x1f, 0x58, 0xde, 0x9f, 0x16, 0x72, 0x37, 0xfc, 0x2d, 0xf0, + 0x4f, 0x08, 0x85, 0xf3, 0xb7, 0x2c, 0x6c, 0x4b, 0xf7, 0x7f, 0xf4, 0xea, 0xfd, 0x2f, 0xfe, 0x0b, + 0xd5, 0x8f, 0x75, 0x01, 0x09, 0xba, 0x94, 0x0a, 0xef, 0xa3, 0xfe, 0x92, 0xb4, 0xee, 0x74, 0x2b, + 0xb8, 0x36, 0x9b, 0xba, 0xfd, 0x25, 0x71, 0xba, 0x1c, 0xe3, 0x7d, 0x6c, 0xc6, 0xa6, 0x1b, 0xc5, + 0xee, 0xfc, 0xbd, 0x58, 0x7a, 0xaf, 0xbd, 0xd5, 0x7b, 0x3f, 0xec, 0xfe, 0xfa, 0xbb, 0x5b, 0x7b, + 0xf2, 0xd7, 0x6e, 0x2d, 0xb8, 0x75, 0x7e, 0xe1, 0xd4, 0x9e, 0x5e, 0x38, 0xb5, 0x67, 0x17, 0x4e, + 0xed, 0xc9, 0xcc, 0xb1, 0xce, 0x67, 0x8e, 0xf5, 0x74, 0xe6, 0x58, 0xcf, 0x66, 0x8e, 0xf5, 0xf7, + 0xcc, 0xb1, 0x7e, 0xf9, 0xc7, 0xa9, 0x7d, 0xd7, 0x31, 0xdd, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, + 0x69, 0x8d, 0xc8, 0xd3, 0xaf, 0x07, 0x00, 0x00, +} + +func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SignerName != nil { + i -= len(*m.SignerName) + copy(dAtA[i:], *m.SignerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) + i-- + dAtA[i] = 0x3a + } + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { + v := m.Extra[string(keysForExtra[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForExtra[iNdEx]) + copy(dAtA[i:], keysForExtra[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.Usages) > 0 { + for iNdEx := len(m.Usages) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Usages[iNdEx]) + copy(dAtA[i:], m.Usages[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Usages[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Groups[iNdEx]) + copy(dAtA[i:], m.Groups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x12 + if m.Request != nil { + i -= len(m.Request) + copy(dAtA[i:], m.Request) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Certificate != nil { + i -= len(m.Certificate) + copy(dAtA[i:], m.Certificate) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i-- + dAtA[i] = 0x12 + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m ExtraValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CertificateSigningRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CertificateSigningRequestList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CertificateSigningRequestSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Request != nil { + l = len(m.Request) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Username) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Extra) > 0 { + for k, v := range m.Extra { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.SignerName != nil { + l = len(*m.SignerName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CertificateSigningRequestStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Certificate != nil { + l = len(m.Certificate) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m ExtraValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CertificateSigningRequest) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequest{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CertificateSigningRequestCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CertificateSigningRequest{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CertificateSigningRequestList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestSpec) String() string { + if this == nil { + return "nil" + } + keysForExtra := make([]string, 0, len(this.Extra)) + for k := range this.Extra { + keysForExtra = append(keysForExtra, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + mapStringForExtra := "map[string]ExtraValue{" + for _, k := range keysForExtra { + mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) + } + mapStringForExtra += "}" + s := strings.Join([]string{`&CertificateSigningRequestSpec{`, + `Request:` + valueToStringGenerated(this.Request) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, + `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, + `Extra:` + mapStringForExtra + `,`, + `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, + `}`, + }, "") + return s +} +func (this *CertificateSigningRequestStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]CertificateSigningRequestCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&CertificateSigningRequestStatus{`, + `Conditions:` + repeatedStringForConditions + `,`, + `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = RequestConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CertificateSigningRequest{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Request = append(m.Request[:0], dAtA[iNdEx:postIndex]...) + if m.Request == nil { + m.Request = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Usages = append(m.Usages, KeyUsage(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Extra == nil { + m.Extra = make(map[string]ExtraValue) + } + var mapkey string + mapvalue := &ExtraValue{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Extra[mapkey] = *mapvalue + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SignerName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) + if m.Certificate == nil { + m.Certificate = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExtraValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto new file mode 100644 index 0000000..78d2dbc --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -0,0 +1,134 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.certificates.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Describes a certificate signing request +message CertificateSigningRequest { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The certificate request itself and any additional information. + // +optional + optional CertificateSigningRequestSpec spec = 2; + + // Derived information about the request. + // +optional + optional CertificateSigningRequestStatus status = 3; +} + +message CertificateSigningRequestCondition { + // request approval state, currently Approved or Denied. + optional string type = 1; + + // brief reason for the request state + // +optional + optional string reason = 2; + + // human readable message with details about the request state + // +optional + optional string message = 3; + + // timestamp for the last update to this condition + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; +} + +message CertificateSigningRequestList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated CertificateSigningRequest items = 2; +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +message CertificateSigningRequestSpec { + // Base64-encoded PKCS#10 CSR data + optional bytes request = 1; + + // Requested signer for the request. It is a qualified name in the form: + // `scope-hostname.io/name`. + // If empty, it will be defaulted: + // 1. If it's a kubelet client certificate, it is assigned + // "kubernetes.io/kube-apiserver-client-kubelet". + // 2. If it's a kubelet serving certificate, it is assigned + // "kubernetes.io/kubelet-serving". + // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". + // Distribution of trust for signers happens out of band. + // You can select on this field using `spec.signerName`. + // +optional + optional string signerName = 7; + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + repeated string usages = 5; + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + optional string username = 2; + + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + optional string uid = 3; + + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + repeated string groups = 4; + + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + map extra = 6; +} + +message CertificateSigningRequestStatus { + // Conditions applied to the request, such as approval or denial. + // +optional + repeated CertificateSigningRequestCondition conditions = 1; + + // If request was approved, the controller will place the issued certificate here. + // +optional + optional bytes certificate = 2; +} + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message ExtraValue { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go new file mode 100644 index 0000000..b4f3af9 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "certificates.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &CertificateSigningRequest{}, + &CertificateSigningRequestList{}, + ) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go new file mode 100644 index 0000000..5a46e63 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -0,0 +1,190 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Describes a certificate signing request +type CertificateSigningRequest struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The certificate request itself and any additional information. + // +optional + Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Derived information about the request. + // +optional + Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// This information is immutable after the request is created. Only the Request +// and Usages fields can be set on creation, other fields are derived by +// Kubernetes and cannot be modified by users. +type CertificateSigningRequestSpec struct { + // Base64-encoded PKCS#10 CSR data + Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` + + // Requested signer for the request. It is a qualified name in the form: + // `scope-hostname.io/name`. + // If empty, it will be defaulted: + // 1. If it's a kubelet client certificate, it is assigned + // "kubernetes.io/kube-apiserver-client-kubelet". + // 2. If it's a kubelet serving certificate, it is assigned + // "kubernetes.io/kubelet-serving". + // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". + // Distribution of trust for signers happens out of band. + // You can select on this field using `spec.signerName`. + // +optional + SignerName *string `json:"signerName,omitempty" protobuf:"bytes,7,opt,name=signerName"` + + // allowedUsages specifies a set of usage contexts the key will be + // valid for. + // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 + // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 + Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=usages"` + + // Information about the requesting user. + // See user.Info interface for details. + // +optional + Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` + // UID information about the requesting user. + // See user.Info interface for details. + // +optional + UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` + // Group information about the requesting user. + // See user.Info interface for details. + // +optional + Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` + // Extra information about the requesting user. + // See user.Info interface for details. + // +optional + Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` +} + +// Built in signerName values that are honoured by kube-controller-manager. +// None of these usages are related to ServiceAccount token secrets +// `.data[ca.crt]` in any way. +const ( + // Signs certificates that will be honored as client-certs by the + // kube-apiserver. Never auto-approved by kube-controller-manager. + KubeAPIServerClientSignerName = "kubernetes.io/kube-apiserver-client" + + // Signs client certificates that will be honored as client-certs by the + // kube-apiserver for a kubelet. + // May be auto-approved by kube-controller-manager. + KubeAPIServerClientKubeletSignerName = "kubernetes.io/kube-apiserver-client-kubelet" + + // Signs serving certificates that are honored as a valid kubelet serving + // certificate by the kube-apiserver, but has no other guarantees. + KubeletServingSignerName = "kubernetes.io/kubelet-serving" + + // Has no guarantees for trust at all. Some distributions may honor these + // as client certs, but that behavior is not standard kubernetes behavior. + LegacyUnknownSignerName = "kubernetes.io/legacy-unknown" +) + +// ExtraValue masks the value so protobuf can generate +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type ExtraValue []string + +func (t ExtraValue) String() string { + return fmt.Sprintf("%v", []string(t)) +} + +type CertificateSigningRequestStatus struct { + // Conditions applied to the request, such as approval or denial. + // +optional + Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` + + // If request was approved, the controller will place the issued certificate here. + // +optional + Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` +} + +type RequestConditionType string + +// These are the possible conditions for a certificate request. +const ( + CertificateApproved RequestConditionType = "Approved" + CertificateDenied RequestConditionType = "Denied" +) + +type CertificateSigningRequestCondition struct { + // request approval state, currently Approved or Denied. + Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"` + // brief reason for the request state + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` + // human readable message with details about the request state + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // timestamp for the last update to this condition + // +optional + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +type CertificateSigningRequestList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// KeyUsages specifies valid usage contexts for keys. +// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 +// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 +type KeyUsage string + +const ( + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommitment KeyUsage = "content commitment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapeSGC KeyUsage = "netscape sgc" +) diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..a2edb45 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CertificateSigningRequest = map[string]string{ + "": "Describes a certificate signing request", + "spec": "The certificate request itself and any additional information.", + "status": "Derived information about the request.", +} + +func (CertificateSigningRequest) SwaggerDoc() map[string]string { + return map_CertificateSigningRequest +} + +var map_CertificateSigningRequestCondition = map[string]string{ + "type": "request approval state, currently Approved or Denied.", + "reason": "brief reason for the request state", + "message": "human readable message with details about the request state", + "lastUpdateTime": "timestamp for the last update to this condition", +} + +func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestCondition +} + +var map_CertificateSigningRequestSpec = map[string]string{ + "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", + "request": "Base64-encoded PKCS#10 CSR data", + "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", +} + +func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestSpec +} + +var map_CertificateSigningRequestStatus = map[string]string{ + "conditions": "Conditions applied to the request, such as approval or denial.", + "certificate": "If request was approved, the controller will place the issued certificate here.", +} + +func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { + return map_CertificateSigningRequestStatus +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..11d0f77 --- /dev/null +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,202 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequest. +func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest { + if in == nil { + return nil + } + out := new(CertificateSigningRequest) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestCondition) DeepCopyInto(out *CertificateSigningRequestCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestCondition. +func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequestCondition { + if in == nil { + return nil + } + out := new(CertificateSigningRequestCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CertificateSigningRequest, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestList. +func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestList { + if in == nil { + return nil + } + out := new(CertificateSigningRequestList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningRequestSpec) { + *out = *in + if in.Request != nil { + in, out := &in.Request, &out.Request + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.SignerName != nil { + in, out := &in.SignerName, &out.SignerName + *out = new(string) + **out = **in + } + if in.Usages != nil { + in, out := &in.Usages, &out.Usages + *out = make([]KeyUsage, len(*in)) + copy(*out, *in) + } + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Extra != nil { + in, out := &in.Extra, &out.Extra + *out = make(map[string]ExtraValue, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestSpec. +func (in *CertificateSigningRequestSpec) DeepCopy() *CertificateSigningRequestSpec { + if in == nil { + return nil + } + out := new(CertificateSigningRequestSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CertificateSigningRequestStatus) DeepCopyInto(out *CertificateSigningRequestStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]CertificateSigningRequestCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Certificate != nil { + in, out := &in.Certificate, &out.Certificate + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestStatus. +func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequestStatus { + if in == nil { + return nil + } + out := new(CertificateSigningRequestStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ExtraValue) DeepCopyInto(out *ExtraValue) { + { + in := &in + *out = make(ExtraValue, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. +func (in ExtraValue) DeepCopy() ExtraValue { + if in == nil { + return nil + } + out := new(ExtraValue) + in.DeepCopyInto(out) + return *out +} diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go new file mode 100644 index 0000000..fc2f4f2 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=coordination.k8s.io + +package v1 // import "k8s.io/api/coordination/v1" diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go new file mode 100644 index 0000000..22c3d62 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/generated.pb.go @@ -0,0 +1,976 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo + +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_929e1148ad9baca3, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") + proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") + proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) +} + +var fileDescriptor_929e1148ad9baca3 = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, + 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, + 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, + 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, + 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, + 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, + 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, + 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, + 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, + 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, + 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, + 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, + 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, + 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, + 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, + 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, + 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, + 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, + 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, + 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, + 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, + 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, + 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, + 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, + 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, + 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, + 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, + 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, + 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, + 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, + 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, + 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, + 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, +} + +func (m *Lease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 + } + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AcquireTime != nil { + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HolderIdentity != nil { + l = len(*m.HolderIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseDurationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) + } + if m.AcquireTime != nil { + l = m.AcquireTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseTransitions != nil { + n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Lease) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lease{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseSpec{`, + `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, + `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Lease{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HolderIdentity = &s + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LeaseDurationSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AcquireTime == nil { + m.AcquireTime = &v1.MicroTime{} + } + if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LeaseTransitions = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto new file mode 100644 index 0000000..4206746 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/generated.proto @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.coordination.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Lease defines a lease concept. +message Lease { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseSpec spec = 2; +} + +// LeaseList is a list of Lease objects. +message LeaseList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Lease items = 2; +} + +// LeaseSpec is a specification of a Lease. +message LeaseSpec { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + optional string holderIdentity = 1; + + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + optional int32 leaseDurationSeconds = 2; + + // acquireTime is a time when the current lease was acquired. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + optional int32 leaseTransitions = 5; +} + diff --git a/vendor/k8s.io/api/coordination/v1/register.go b/vendor/k8s.io/api/coordination/v1/register.go new file mode 100644 index 0000000..95b987b --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Lease{}, + &LeaseList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go new file mode 100644 index 0000000..7a5605a --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Lease defines a lease concept. +type Lease struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseSpec is a specification of a Lease. +type LeaseSpec struct { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` + // acquireTime is a time when the current lease was acquired. + // +optional + AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LeaseList is a list of Lease objects. +type LeaseList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..0f14404 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Lease = map[string]string{ + "": "Lease defines a lease concept.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Lease) SwaggerDoc() map[string]string { + return map_Lease +} + +var map_LeaseList = map[string]string{ + "": "LeaseList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (LeaseList) SwaggerDoc() map[string]string { + return map_LeaseList +} + +var map_LeaseSpec = map[string]string{ + "": "LeaseSpec is a specification of a Lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "acquireTime": "acquireTime is a time when the current lease was acquired.", + "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", + "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", +} + +func (LeaseSpec) SwaggerDoc() map[string]string { + return map_LeaseSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..2dd7edd --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lease) DeepCopyInto(out *Lease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. +func (in *Lease) DeepCopy() *Lease { + if in == nil { + return nil + } + out := new(Lease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Lease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseList) DeepCopyInto(out *LeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Lease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. +func (in *LeaseList) DeepCopy() *LeaseList { + if in == nil { + return nil + } + out := new(LeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { + *out = *in + if in.HolderIdentity != nil { + in, out := &in.HolderIdentity, &out.HolderIdentity + *out = new(string) + **out = **in + } + if in.LeaseDurationSeconds != nil { + in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds + *out = new(int32) + **out = **in + } + if in.AcquireTime != nil { + in, out := &in.AcquireTime, &out.AcquireTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.LeaseTransitions != nil { + in, out := &in.LeaseTransitions, &out.LeaseTransitions + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. +func (in *LeaseSpec) DeepCopy() *LeaseSpec { + if in == nil { + return nil + } + out := new(LeaseSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go new file mode 100644 index 0000000..304732d --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=coordination.k8s.io + +package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go new file mode 100644 index 0000000..57a314c --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -0,0 +1,976 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{0} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{1} +} +func (m *LeaseList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseList.Merge(m, src) +} +func (m *LeaseList) XXX_Size() int { + return m.Size() +} +func (m *LeaseList) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseList.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseList proto.InternalMessageInfo + +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_daca6bcd2ff63a80, []int{2} +} +func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LeaseSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseSpec.Merge(m, src) +} +func (m *LeaseSpec) XXX_Size() int { + return m.Size() +} +func (m *LeaseSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") + proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") + proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) +} + +var fileDescriptor_daca6bcd2ff63a80 = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, + 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, + 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, + 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, + 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, + 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, + 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, + 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, + 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, + 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, + 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, + 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, + 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, + 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, + 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, + 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, + 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, + 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, + 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, + 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, + 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, + 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, + 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, + 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, + 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, + 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, + 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, + 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, + 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, + 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, + 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, + 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, +} + +func (m *Lease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LeaseTransitions != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) + i-- + dAtA[i] = 0x28 + } + if m.RenewTime != nil { + { + size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AcquireTime != nil { + { + size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.LeaseDurationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) + i-- + dAtA[i] = 0x10 + } + if m.HolderIdentity != nil { + i -= len(*m.HolderIdentity) + copy(dAtA[i:], *m.HolderIdentity) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LeaseList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LeaseSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HolderIdentity != nil { + l = len(*m.HolderIdentity) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseDurationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) + } + if m.AcquireTime != nil { + l = m.AcquireTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RenewTime != nil { + l = m.RenewTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LeaseTransitions != nil { + n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Lease) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lease{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LeaseList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Lease{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LeaseList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LeaseSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LeaseSpec{`, + `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, + `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Lease{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.HolderIdentity = &s + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LeaseDurationSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AcquireTime == nil { + m.AcquireTime = &v1.MicroTime{} + } + if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RenewTime == nil { + m.RenewTime = &v1.MicroTime{} + } + if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LeaseTransitions = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto new file mode 100644 index 0000000..cfc2711 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.coordination.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Lease defines a lease concept. +message Lease { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LeaseSpec spec = 2; +} + +// LeaseList is a list of Lease objects. +message LeaseList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Lease items = 2; +} + +// LeaseSpec is a specification of a Lease. +message LeaseSpec { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + optional string holderIdentity = 1; + + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + optional int32 leaseDurationSeconds = 2; + + // acquireTime is a time when the current lease was acquired. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; + + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; + + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + optional int32 leaseTransitions = 5; +} + diff --git a/vendor/k8s.io/api/coordination/v1beta1/register.go b/vendor/k8s.io/api/coordination/v1beta1/register.go new file mode 100644 index 0000000..85efaa6 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "coordination.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Lease{}, + &LeaseList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go new file mode 100644 index 0000000..da88f67 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Lease defines a lease concept. +type Lease struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the Lease. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// LeaseSpec is a specification of a Lease. +type LeaseSpec struct { + // holderIdentity contains the identity of the holder of a current lease. + // +optional + HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` + // leaseDurationSeconds is a duration that candidates for a lease need + // to wait to force acquire it. This is measure against time of last + // observed RenewTime. + // +optional + LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` + // acquireTime is a time when the current lease was acquired. + // +optional + AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` + // renewTime is a time when the current holder of a lease has last + // updated the lease. + // +optional + RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` + // leaseTransitions is the number of transitions of a lease between + // holders. + // +optional + LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LeaseList is a list of Lease objects. +type LeaseList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..f557d26 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,63 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Lease = map[string]string{ + "": "Lease defines a lease concept.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Lease) SwaggerDoc() map[string]string { + return map_Lease +} + +var map_LeaseList = map[string]string{ + "": "LeaseList is a list of Lease objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (LeaseList) SwaggerDoc() map[string]string { + return map_LeaseList +} + +var map_LeaseSpec = map[string]string{ + "": "LeaseSpec is a specification of a Lease.", + "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", + "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", + "acquireTime": "acquireTime is a time when the current lease was acquired.", + "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", + "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", +} + +func (LeaseSpec) SwaggerDoc() map[string]string { + return map_LeaseSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..de69621 --- /dev/null +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,124 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lease) DeepCopyInto(out *Lease) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. +func (in *Lease) DeepCopy() *Lease { + if in == nil { + return nil + } + out := new(Lease) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Lease) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseList) DeepCopyInto(out *LeaseList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Lease, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. +func (in *LeaseList) DeepCopy() *LeaseList { + if in == nil { + return nil + } + out := new(LeaseList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LeaseList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { + *out = *in + if in.HolderIdentity != nil { + in, out := &in.HolderIdentity, &out.HolderIdentity + *out = new(string) + **out = **in + } + if in.LeaseDurationSeconds != nil { + in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds + *out = new(int32) + **out = **in + } + if in.AcquireTime != nil { + in, out := &in.AcquireTime, &out.AcquireTime + *out = (*in).DeepCopy() + } + if in.RenewTime != nil { + in, out := &in.RenewTime, &out.RenewTime + *out = (*in).DeepCopy() + } + if in.LeaseTransitions != nil { + in, out := &in.LeaseTransitions, &out.LeaseTransitions + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. +func (in *LeaseSpec) DeepCopy() *LeaseSpec { + if in == nil { + return nil + } + out := new(LeaseSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go new file mode 100644 index 0000000..edc9b4d --- /dev/null +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file should be consistent with pkg/api/annotation_key_constants.go. + +package v1 + +const ( + // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy + // webhook backend fails. + ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" + + // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation + PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" + + // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods + MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" + + // TolerationsAnnotationKey represents the key of tolerations data (json serialized) + // in the Annotations of a Pod. + TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" + + // TaintsAnnotationKey represents the key of taints data (json serialized) + // in the Annotations of a Node. + TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" + + // SeccompPodAnnotationKey represents the key of a seccomp profile applied + // to all containers of a pod. + SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" + + // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied + // to one container of a pod. + SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" + + // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. + SeccompProfileRuntimeDefault string = "runtime/default" + + // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. + // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. + DeprecatedSeccompProfileDockerDefault string = "docker/default" + + // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) + // in the Annotations of a Node. + PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" + + // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache + // an object (e.g. secret, config map) before fetching it again from apiserver. + // This annotation can be attached to node. + ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" + + // annotation key prefix used to identify non-convertible json paths. + NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" + + kubectlPrefix = "kubectl.kubernetes.io/" + + // LastAppliedConfigAnnotation is the annotation used to store the previous + // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. + LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" + + // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers + // + // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to + // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow + // access only from the CIDRs currently allocated to MIT & the USPS. + // + // Not all cloud providers support this annotation, though AWS & GCE do. + AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" + + // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that + // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') + // of the last change, of some Pod or Service object, that triggered the endpoints object change. + // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints + // controller at T1, and the Endpoints object was changed at T2, the + // EndpointsLastChangeTriggerTime would be set to T0. + // + // The "endpoints change trigger" here means any Pod or Service change that resulted in the + // Endpoints object change. + // + // Given the definition of the "endpoints change trigger", please note that this annotation will + // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the + // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's + // already set). + // + // This annotation will be used to compute the in-cluster network programming latency SLI, see + // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md + EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" + + // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated + // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. + // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or + // CSI Backend for a volume plugin on a specific node. + MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" +) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go new file mode 100644 index 0000000..1bdf0b2 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package + +// Package v1 is the v1 version of the core API. +package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go new file mode 100644 index 0000000..8e58752 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -0,0 +1,66605 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + resource "k8s.io/apimachinery/pkg/api/resource" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } +func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} +func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{0} +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src) +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo + +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{1} +} +func (m *Affinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Affinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Affinity.Merge(m, src) +} +func (m *Affinity) XXX_Size() int { + return m.Size() +} +func (m *Affinity) XXX_DiscardUnknown() { + xxx_messageInfo_Affinity.DiscardUnknown(m) +} + +var xxx_messageInfo_Affinity proto.InternalMessageInfo + +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{2} +} +func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AttachedVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttachedVolume.Merge(m, src) +} +func (m *AttachedVolume) XXX_Size() int { + return m.Size() +} +func (m *AttachedVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AttachedVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo + +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{3} +} +func (m *AvoidPods) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AvoidPods) XXX_Merge(src proto.Message) { + xxx_messageInfo_AvoidPods.Merge(m, src) +} +func (m *AvoidPods) XXX_Size() int { + return m.Size() +} +func (m *AvoidPods) XXX_DiscardUnknown() { + xxx_messageInfo_AvoidPods.DiscardUnknown(m) +} + +var xxx_messageInfo_AvoidPods proto.InternalMessageInfo + +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{4} +} +func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src) +} +func (m *AzureDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo + +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{5} +} +func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src) +} +func (m *AzureFilePersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo + +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{6} +} +func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_AzureFileVolumeSource.Merge(m, src) +} +func (m *AzureFileVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *AzureFileVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo + +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{7} +} +func (m *Binding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Binding) XXX_Merge(src proto.Message) { + xxx_messageInfo_Binding.Merge(m, src) +} +func (m *Binding) XXX_Size() int { + return m.Size() +} +func (m *Binding) XXX_DiscardUnknown() { + xxx_messageInfo_Binding.DiscardUnknown(m) +} + +var xxx_messageInfo_Binding proto.InternalMessageInfo + +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{8} +} +func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src) +} +func (m *CSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo + +func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } +func (*CSIVolumeSource) ProtoMessage() {} +func (*CSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{9} +} +func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIVolumeSource.Merge(m, src) +} +func (m *CSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo + +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{10} +} +func (m *Capabilities) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Capabilities) XXX_Merge(src proto.Message) { + xxx_messageInfo_Capabilities.Merge(m, src) +} +func (m *Capabilities) XXX_Size() int { + return m.Size() +} +func (m *Capabilities) XXX_DiscardUnknown() { + xxx_messageInfo_Capabilities.DiscardUnknown(m) +} + +var xxx_messageInfo_Capabilities proto.InternalMessageInfo + +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{11} +} +func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src) +} +func (m *CephFSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo + +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{12} +} +func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CephFSVolumeSource.Merge(m, src) +} +func (m *CephFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CephFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo + +func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } +func (*CinderPersistentVolumeSource) ProtoMessage() {} +func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{13} +} +func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src) +} +func (m *CinderPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo + +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{14} +} +func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CinderVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_CinderVolumeSource.Merge(m, src) +} +func (m *CinderVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *CinderVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo + +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{15} +} +func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClientIPConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClientIPConfig.Merge(m, src) +} +func (m *ClientIPConfig) XXX_Size() int { + return m.Size() +} +func (m *ClientIPConfig) XXX_DiscardUnknown() { + xxx_messageInfo_ClientIPConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo + +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{16} +} +func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentCondition.Merge(m, src) +} +func (m *ComponentCondition) XXX_Size() int { + return m.Size() +} +func (m *ComponentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo + +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{17} +} +func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatus.Merge(m, src) +} +func (m *ComponentStatus) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo + +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{18} +} +func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ComponentStatusList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ComponentStatusList.Merge(m, src) +} +func (m *ComponentStatusList) XXX_Size() int { + return m.Size() +} +func (m *ComponentStatusList) XXX_DiscardUnknown() { + xxx_messageInfo_ComponentStatusList.DiscardUnknown(m) +} + +var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo + +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{19} +} +func (m *ConfigMap) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMap) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMap.Merge(m, src) +} +func (m *ConfigMap) XXX_Size() int { + return m.Size() +} +func (m *ConfigMap) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMap.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMap proto.InternalMessageInfo + +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{20} +} +func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapEnvSource.Merge(m, src) +} +func (m *ConfigMapEnvSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo + +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{21} +} +func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapKeySelector.Merge(m, src) +} +func (m *ConfigMapKeySelector) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo + +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{22} +} +func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapList.Merge(m, src) +} +func (m *ConfigMapList) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapList) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapList.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo + +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{23} +} +func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src) +} +func (m *ConfigMapNodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo + +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{24} +} +func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapProjection.Merge(m, src) +} +func (m *ConfigMapProjection) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo + +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{25} +} +func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src) +} +func (m *ConfigMapVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo + +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{26} +} +func (m *Container) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Container) XXX_Merge(src proto.Message) { + xxx_messageInfo_Container.Merge(m, src) +} +func (m *Container) XXX_Size() int { + return m.Size() +} +func (m *Container) XXX_DiscardUnknown() { + xxx_messageInfo_Container.DiscardUnknown(m) +} + +var xxx_messageInfo_Container proto.InternalMessageInfo + +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{27} +} +func (m *ContainerImage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerImage) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerImage.Merge(m, src) +} +func (m *ContainerImage) XXX_Size() int { + return m.Size() +} +func (m *ContainerImage) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerImage.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerImage proto.InternalMessageInfo + +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{28} +} +func (m *ContainerPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerPort.Merge(m, src) +} +func (m *ContainerPort) XXX_Size() int { + return m.Size() +} +func (m *ContainerPort) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerPort.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerPort proto.InternalMessageInfo + +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{29} +} +func (m *ContainerState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerState.Merge(m, src) +} +func (m *ContainerState) XXX_Size() int { + return m.Size() +} +func (m *ContainerState) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerState.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerState proto.InternalMessageInfo + +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{30} +} +func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateRunning) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateRunning.Merge(m, src) +} +func (m *ContainerStateRunning) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateRunning) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo + +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{31} +} +func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateTerminated.Merge(m, src) +} +func (m *ContainerStateTerminated) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateTerminated) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo + +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{32} +} +func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStateWaiting.Merge(m, src) +} +func (m *ContainerStateWaiting) XXX_Size() int { + return m.Size() +} +func (m *ContainerStateWaiting) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo + +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{33} +} +func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerStatus.Merge(m, src) +} +func (m *ContainerStatus) XXX_Size() int { + return m.Size() +} +func (m *ContainerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo + +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{34} +} +func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonEndpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonEndpoint.Merge(m, src) +} +func (m *DaemonEndpoint) XXX_Size() int { + return m.Size() +} +func (m *DaemonEndpoint) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo + +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{35} +} +func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIProjection.Merge(m, src) +} +func (m *DownwardAPIProjection) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIProjection) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo + +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{36} +} +func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src) +} +func (m *DownwardAPIVolumeFile) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m) +} + +var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo + +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{37} +} +func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src) +} +func (m *DownwardAPIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo + +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{38} +} +func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src) +} +func (m *EmptyDirVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo + +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{39} +} +func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointAddress.Merge(m, src) +} +func (m *EndpointAddress) XXX_Size() int { + return m.Size() +} +func (m *EndpointAddress) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo + +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{40} +} +func (m *EndpointPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointPort.Merge(m, src) +} +func (m *EndpointPort) XXX_Size() int { + return m.Size() +} +func (m *EndpointPort) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointPort.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointPort proto.InternalMessageInfo + +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{41} +} +func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointSubset) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointSubset.Merge(m, src) +} +func (m *EndpointSubset) XXX_Size() int { + return m.Size() +} +func (m *EndpointSubset) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointSubset.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo + +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{42} +} +func (m *Endpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Endpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoints.Merge(m, src) +} +func (m *Endpoints) XXX_Size() int { + return m.Size() +} +func (m *Endpoints) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoints.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoints proto.InternalMessageInfo + +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{43} +} +func (m *EndpointsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EndpointsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EndpointsList.Merge(m, src) +} +func (m *EndpointsList) XXX_Size() int { + return m.Size() +} +func (m *EndpointsList) XXX_DiscardUnknown() { + xxx_messageInfo_EndpointsList.DiscardUnknown(m) +} + +var xxx_messageInfo_EndpointsList proto.InternalMessageInfo + +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{44} +} +func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvFromSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvFromSource.Merge(m, src) +} +func (m *EnvFromSource) XXX_Size() int { + return m.Size() +} +func (m *EnvFromSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvFromSource.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo + +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{45} +} +func (m *EnvVar) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVar) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVar.Merge(m, src) +} +func (m *EnvVar) XXX_Size() int { + return m.Size() +} +func (m *EnvVar) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVar.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvVar proto.InternalMessageInfo + +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{46} +} +func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EnvVarSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EnvVarSource.Merge(m, src) +} +func (m *EnvVarSource) XXX_Size() int { + return m.Size() +} +func (m *EnvVarSource) XXX_DiscardUnknown() { + xxx_messageInfo_EnvVarSource.DiscardUnknown(m) +} + +var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo + +func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } +func (*EphemeralContainer) ProtoMessage() {} +func (*EphemeralContainer) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{47} +} +func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainer) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainer.Merge(m, src) +} +func (m *EphemeralContainer) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainer) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainer.DiscardUnknown(m) +} + +var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo + +func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } +func (*EphemeralContainerCommon) ProtoMessage() {} +func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{48} +} +func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainerCommon.Merge(m, src) +} +func (m *EphemeralContainerCommon) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m) +} + +var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo + +func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } +func (*EphemeralContainers) ProtoMessage() {} +func (*EphemeralContainers) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{49} +} +func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EphemeralContainers) XXX_Merge(src proto.Message) { + xxx_messageInfo_EphemeralContainers.Merge(m, src) +} +func (m *EphemeralContainers) XXX_Size() int { + return m.Size() +} +func (m *EphemeralContainers) XXX_DiscardUnknown() { + xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) +} + +var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{50} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{51} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{52} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSeries proto.InternalMessageInfo + +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{53} +} +func (m *EventSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSource.Merge(m, src) +} +func (m *EventSource) XXX_Size() int { + return m.Size() +} +func (m *EventSource) XXX_DiscardUnknown() { + xxx_messageInfo_EventSource.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSource proto.InternalMessageInfo + +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{54} +} +func (m *ExecAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExecAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExecAction.Merge(m, src) +} +func (m *ExecAction) XXX_Size() int { + return m.Size() +} +func (m *ExecAction) XXX_DiscardUnknown() { + xxx_messageInfo_ExecAction.DiscardUnknown(m) +} + +var xxx_messageInfo_ExecAction proto.InternalMessageInfo + +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{55} +} +func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FCVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FCVolumeSource.Merge(m, src) +} +func (m *FCVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FCVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FCVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo + +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{56} +} +func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src) +} +func (m *FlexPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo + +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{57} +} +func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlexVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlexVolumeSource.Merge(m, src) +} +func (m *FlexVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlexVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo + +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{58} +} +func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlockerVolumeSource.Merge(m, src) +} +func (m *FlockerVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *FlockerVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo + +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{59} +} +func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src) +} +func (m *GCEPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo + +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{60} +} +func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GitRepoVolumeSource.Merge(m, src) +} +func (m *GitRepoVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GitRepoVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo + +func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } +func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} +func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{61} +} +func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src) +} +func (m *GlusterfsPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo + +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{62} +} +func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src) +} +func (m *GlusterfsVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo + +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{63} +} +func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPGetAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPGetAction.Merge(m, src) +} +func (m *HTTPGetAction) XXX_Size() int { + return m.Size() +} +func (m *HTTPGetAction) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPGetAction.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo + +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{64} +} +func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPHeader) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPHeader.Merge(m, src) +} +func (m *HTTPHeader) XXX_Size() int { + return m.Size() +} +func (m *HTTPHeader) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPHeader.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo + +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{65} +} +func (m *Handler) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Handler) XXX_Merge(src proto.Message) { + xxx_messageInfo_Handler.Merge(m, src) +} +func (m *Handler) XXX_Size() int { + return m.Size() +} +func (m *Handler) XXX_DiscardUnknown() { + xxx_messageInfo_Handler.DiscardUnknown(m) +} + +var xxx_messageInfo_Handler proto.InternalMessageInfo + +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{66} +} +func (m *HostAlias) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostAlias) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostAlias.Merge(m, src) +} +func (m *HostAlias) XXX_Size() int { + return m.Size() +} +func (m *HostAlias) XXX_DiscardUnknown() { + xxx_messageInfo_HostAlias.DiscardUnknown(m) +} + +var xxx_messageInfo_HostAlias proto.InternalMessageInfo + +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{67} +} +func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPathVolumeSource.Merge(m, src) +} +func (m *HostPathVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *HostPathVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo + +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{68} +} +func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src) +} +func (m *ISCSIPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo + +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{69} +} +func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ISCSIVolumeSource.Merge(m, src) +} +func (m *ISCSIVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo + +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{70} +} +func (m *KeyToPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *KeyToPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_KeyToPath.Merge(m, src) +} +func (m *KeyToPath) XXX_Size() int { + return m.Size() +} +func (m *KeyToPath) XXX_DiscardUnknown() { + xxx_messageInfo_KeyToPath.DiscardUnknown(m) +} + +var xxx_messageInfo_KeyToPath proto.InternalMessageInfo + +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{71} +} +func (m *Lifecycle) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Lifecycle) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lifecycle.Merge(m, src) +} +func (m *Lifecycle) XXX_Size() int { + return m.Size() +} +func (m *Lifecycle) XXX_DiscardUnknown() { + xxx_messageInfo_Lifecycle.DiscardUnknown(m) +} + +var xxx_messageInfo_Lifecycle proto.InternalMessageInfo + +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{72} +} +func (m *LimitRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRange.Merge(m, src) +} +func (m *LimitRange) XXX_Size() int { + return m.Size() +} +func (m *LimitRange) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRange.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitRange proto.InternalMessageInfo + +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{73} +} +func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeItem) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeItem.Merge(m, src) +} +func (m *LimitRangeItem) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeItem) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeItem.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo + +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{74} +} +func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeList.Merge(m, src) +} +func (m *LimitRangeList) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeList) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeList.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo + +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{75} +} +func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LimitRangeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_LimitRangeSpec.Merge(m, src) +} +func (m *LimitRangeSpec) XXX_Size() int { + return m.Size() +} +func (m *LimitRangeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{76} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{77} +} +func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerIngress.Merge(m, src) +} +func (m *LoadBalancerIngress) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerIngress) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo + +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{78} +} +func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_LoadBalancerStatus.Merge(m, src) +} +func (m *LoadBalancerStatus) XXX_Size() int { + return m.Size() +} +func (m *LoadBalancerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo + +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{79} +} +func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalObjectReference.Merge(m, src) +} +func (m *LocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *LocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo + +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{80} +} +func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LocalVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_LocalVolumeSource.Merge(m, src) +} +func (m *LocalVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *LocalVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo + +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{81} +} +func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NFSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NFSVolumeSource.Merge(m, src) +} +func (m *NFSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *NFSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo + +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{82} +} +func (m *Namespace) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Namespace) XXX_Merge(src proto.Message) { + xxx_messageInfo_Namespace.Merge(m, src) +} +func (m *Namespace) XXX_Size() int { + return m.Size() +} +func (m *Namespace) XXX_DiscardUnknown() { + xxx_messageInfo_Namespace.DiscardUnknown(m) +} + +var xxx_messageInfo_Namespace proto.InternalMessageInfo + +func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } +func (*NamespaceCondition) ProtoMessage() {} +func (*NamespaceCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{83} +} +func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceCondition.Merge(m, src) +} +func (m *NamespaceCondition) XXX_Size() int { + return m.Size() +} +func (m *NamespaceCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo + +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{84} +} +func (m *NamespaceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceList.Merge(m, src) +} +func (m *NamespaceList) XXX_Size() int { + return m.Size() +} +func (m *NamespaceList) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceList.DiscardUnknown(m) +} + +var xxx_messageInfo_NamespaceList proto.InternalMessageInfo + +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{85} +} +func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceSpec.Merge(m, src) +} +func (m *NamespaceSpec) XXX_Size() int { + return m.Size() +} +func (m *NamespaceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo + +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{86} +} +func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NamespaceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NamespaceStatus.Merge(m, src) +} +func (m *NamespaceStatus) XXX_Size() int { + return m.Size() +} +func (m *NamespaceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NamespaceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo + +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{87} +} +func (m *Node) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Node) XXX_Merge(src proto.Message) { + xxx_messageInfo_Node.Merge(m, src) +} +func (m *Node) XXX_Size() int { + return m.Size() +} +func (m *Node) XXX_DiscardUnknown() { + xxx_messageInfo_Node.DiscardUnknown(m) +} + +var xxx_messageInfo_Node proto.InternalMessageInfo + +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{88} +} +func (m *NodeAddress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAddress) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAddress.Merge(m, src) +} +func (m *NodeAddress) XXX_Size() int { + return m.Size() +} +func (m *NodeAddress) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAddress.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeAddress proto.InternalMessageInfo + +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{89} +} +func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeAffinity.Merge(m, src) +} +func (m *NodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *NodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_NodeAffinity.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo + +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{90} +} +func (m *NodeCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeCondition.Merge(m, src) +} +func (m *NodeCondition) XXX_Size() int { + return m.Size() +} +func (m *NodeCondition) XXX_DiscardUnknown() { + xxx_messageInfo_NodeCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeCondition proto.InternalMessageInfo + +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{91} +} +func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigSource.Merge(m, src) +} +func (m *NodeConfigSource) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigSource) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigSource.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo + +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{92} +} +func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeConfigStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeConfigStatus.Merge(m, src) +} +func (m *NodeConfigStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeConfigStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo + +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{93} +} +func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src) +} +func (m *NodeDaemonEndpoints) XXX_Size() int { + return m.Size() +} +func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { + xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo + +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{94} +} +func (m *NodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeList.Merge(m, src) +} +func (m *NodeList) XXX_Size() int { + return m.Size() +} +func (m *NodeList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeList proto.InternalMessageInfo + +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{95} +} +func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeProxyOptions.Merge(m, src) +} +func (m *NodeProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *NodeProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo + +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{96} +} +func (m *NodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeResources.Merge(m, src) +} +func (m *NodeResources) XXX_Size() int { + return m.Size() +} +func (m *NodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_NodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeResources proto.InternalMessageInfo + +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{97} +} +func (m *NodeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelector.Merge(m, src) +} +func (m *NodeSelector) XXX_Size() int { + return m.Size() +} +func (m *NodeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSelector proto.InternalMessageInfo + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{98} +} +func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorRequirement.Merge(m, src) +} +func (m *NodeSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo + +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{99} +} +func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSelectorTerm.Merge(m, src) +} +func (m *NodeSelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *NodeSelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo + +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{100} +} +func (m *NodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSpec.Merge(m, src) +} +func (m *NodeSpec) XXX_Size() int { + return m.Size() +} +func (m *NodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSpec proto.InternalMessageInfo + +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{101} +} +func (m *NodeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeStatus.Merge(m, src) +} +func (m *NodeStatus) XXX_Size() int { + return m.Size() +} +func (m *NodeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_NodeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeStatus proto.InternalMessageInfo + +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{102} +} +func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeSystemInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeSystemInfo.Merge(m, src) +} +func (m *NodeSystemInfo) XXX_Size() int { + return m.Size() +} +func (m *NodeSystemInfo) XXX_DiscardUnknown() { + xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo + +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{103} +} +func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectFieldSelector.Merge(m, src) +} +func (m *ObjectFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ObjectFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo + +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{104} +} +func (m *ObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectReference.Merge(m, src) +} +func (m *ObjectReference) XXX_Size() int { + return m.Size() +} +func (m *ObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectReference proto.InternalMessageInfo + +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{105} +} +func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolume.Merge(m, src) +} +func (m *PersistentVolume) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolume) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo + +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{106} +} +func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaim.Merge(m, src) +} +func (m *PersistentVolumeClaim) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{107} +} +func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src) +} +func (m *PersistentVolumeClaimCondition) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{108} +} +func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src) +} +func (m *PersistentVolumeClaimList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{109} +} +func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src) +} +func (m *PersistentVolumeClaimSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{110} +} +func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src) +} +func (m *PersistentVolumeClaimStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{111} +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{112} +} +func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeList.Merge(m, src) +} +func (m *PersistentVolumeList) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeList) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo + +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{113} +} +func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSource.Merge(m, src) +} +func (m *PersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo + +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{114} +} +func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeSpec.Merge(m, src) +} +func (m *PersistentVolumeSpec) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo + +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{115} +} +func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PersistentVolumeStatus.Merge(m, src) +} +func (m *PersistentVolumeStatus) XXX_Size() int { + return m.Size() +} +func (m *PersistentVolumeStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo + +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{116} +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src) +} +func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo + +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{117} +} +func (m *Pod) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Pod) XXX_Merge(src proto.Message) { + xxx_messageInfo_Pod.Merge(m, src) +} +func (m *Pod) XXX_Size() int { + return m.Size() +} +func (m *Pod) XXX_DiscardUnknown() { + xxx_messageInfo_Pod.DiscardUnknown(m) +} + +var xxx_messageInfo_Pod proto.InternalMessageInfo + +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{118} +} +func (m *PodAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinity.Merge(m, src) +} +func (m *PodAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinity.DiscardUnknown(m) +} + +var xxx_messageInfo_PodAffinity proto.InternalMessageInfo + +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{119} +} +func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAffinityTerm.Merge(m, src) +} +func (m *PodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *PodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m) +} + +var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo + +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{120} +} +func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodAntiAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAntiAffinity.Merge(m, src) +} +func (m *PodAntiAffinity) XXX_Size() int { + return m.Size() +} +func (m *PodAntiAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m) +} + +var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo + +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{121} +} +func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodAttachOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodAttachOptions.Merge(m, src) +} +func (m *PodAttachOptions) XXX_Size() int { + return m.Size() +} +func (m *PodAttachOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodAttachOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo + +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{122} +} +func (m *PodCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodCondition.Merge(m, src) +} +func (m *PodCondition) XXX_Size() int { + return m.Size() +} +func (m *PodCondition) XXX_DiscardUnknown() { + xxx_messageInfo_PodCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_PodCondition proto.InternalMessageInfo + +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{123} +} +func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDNSConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfig.Merge(m, src) +} +func (m *PodDNSConfig) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfig) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo + +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{124} +} +func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDNSConfigOption.Merge(m, src) +} +func (m *PodDNSConfigOption) XXX_Size() int { + return m.Size() +} +func (m *PodDNSConfigOption) XXX_DiscardUnknown() { + xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo + +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{125} +} +func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodExecOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodExecOptions.Merge(m, src) +} +func (m *PodExecOptions) XXX_Size() int { + return m.Size() +} +func (m *PodExecOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodExecOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo + +func (m *PodIP) Reset() { *m = PodIP{} } +func (*PodIP) ProtoMessage() {} +func (*PodIP) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{126} +} +func (m *PodIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodIP.Merge(m, src) +} +func (m *PodIP) XXX_Size() int { + return m.Size() +} +func (m *PodIP) XXX_DiscardUnknown() { + xxx_messageInfo_PodIP.DiscardUnknown(m) +} + +var xxx_messageInfo_PodIP proto.InternalMessageInfo + +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{127} +} +func (m *PodList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodList.Merge(m, src) +} +func (m *PodList) XXX_Size() int { + return m.Size() +} +func (m *PodList) XXX_DiscardUnknown() { + xxx_messageInfo_PodList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodList proto.InternalMessageInfo + +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{128} +} +func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodLogOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodLogOptions.Merge(m, src) +} +func (m *PodLogOptions) XXX_Size() int { + return m.Size() +} +func (m *PodLogOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodLogOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo + +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{129} +} +func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPortForwardOptions.Merge(m, src) +} +func (m *PodPortForwardOptions) XXX_Size() int { + return m.Size() +} +func (m *PodPortForwardOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo + +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{130} +} +func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodProxyOptions.Merge(m, src) +} +func (m *PodProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *PodProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PodProxyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo + +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{131} +} +func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodReadinessGate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodReadinessGate.Merge(m, src) +} +func (m *PodReadinessGate) XXX_Size() int { + return m.Size() +} +func (m *PodReadinessGate) XXX_DiscardUnknown() { + xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) +} + +var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo + +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{132} +} +func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityContext.Merge(m, src) +} +func (m *PodSecurityContext) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityContext.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo + +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{133} +} +func (m *PodSignature) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSignature) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSignature.Merge(m, src) +} +func (m *PodSignature) XXX_Size() int { + return m.Size() +} +func (m *PodSignature) XXX_DiscardUnknown() { + xxx_messageInfo_PodSignature.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSignature proto.InternalMessageInfo + +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{134} +} +func (m *PodSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSpec.Merge(m, src) +} +func (m *PodSpec) XXX_Size() int { + return m.Size() +} +func (m *PodSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSpec proto.InternalMessageInfo + +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{135} +} +func (m *PodStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatus.Merge(m, src) +} +func (m *PodStatus) XXX_Size() int { + return m.Size() +} +func (m *PodStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodStatus proto.InternalMessageInfo + +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{136} +} +func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodStatusResult) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodStatusResult.Merge(m, src) +} +func (m *PodStatusResult) XXX_Size() int { + return m.Size() +} +func (m *PodStatusResult) XXX_DiscardUnknown() { + xxx_messageInfo_PodStatusResult.DiscardUnknown(m) +} + +var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo + +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{137} +} +func (m *PodTemplate) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodTemplate) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplate.Merge(m, src) +} +func (m *PodTemplate) XXX_Size() int { + return m.Size() +} +func (m *PodTemplate) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplate.DiscardUnknown(m) +} + +var xxx_messageInfo_PodTemplate proto.InternalMessageInfo + +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{138} +} +func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodTemplateList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateList.Merge(m, src) +} +func (m *PodTemplateList) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateList) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo + +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{139} +} +func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodTemplateSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodTemplateSpec.Merge(m, src) +} +func (m *PodTemplateSpec) XXX_Size() int { + return m.Size() +} +func (m *PodTemplateSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo + +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{140} +} +func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_PortworxVolumeSource.Merge(m, src) +} +func (m *PortworxVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *PortworxVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{141} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} + +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{142} +} +func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src) +} +func (m *PreferAvoidPodsEntry) XXX_Size() int { + return m.Size() +} +func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo + +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{143} +} +func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src) +} +func (m *PreferredSchedulingTerm) XXX_Size() int { + return m.Size() +} +func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() { + xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m) +} + +var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo + +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{144} +} +func (m *Probe) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Probe) XXX_Merge(src proto.Message) { + xxx_messageInfo_Probe.Merge(m, src) +} +func (m *Probe) XXX_Size() int { + return m.Size() +} +func (m *Probe) XXX_DiscardUnknown() { + xxx_messageInfo_Probe.DiscardUnknown(m) +} + +var xxx_messageInfo_Probe proto.InternalMessageInfo + +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{145} +} +func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProjectedVolumeSource.Merge(m, src) +} +func (m *ProjectedVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ProjectedVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo + +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{146} +} +func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_QuobyteVolumeSource.Merge(m, src) +} +func (m *QuobyteVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *QuobyteVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo + +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{147} +} +func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src) +} +func (m *RBDPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo + +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{148} +} +func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RBDVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_RBDVolumeSource.Merge(m, src) +} +func (m *RBDVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *RBDVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo + +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{149} +} +func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RangeAllocation) XXX_Merge(src proto.Message) { + xxx_messageInfo_RangeAllocation.Merge(m, src) +} +func (m *RangeAllocation) XXX_Size() int { + return m.Size() +} +func (m *RangeAllocation) XXX_DiscardUnknown() { + xxx_messageInfo_RangeAllocation.DiscardUnknown(m) +} + +var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo + +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{150} +} +func (m *ReplicationController) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationController) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationController.Merge(m, src) +} +func (m *ReplicationController) XXX_Size() int { + return m.Size() +} +func (m *ReplicationController) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationController.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationController proto.InternalMessageInfo + +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{151} +} +func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerCondition.Merge(m, src) +} +func (m *ReplicationControllerCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo + +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{152} +} +func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerList.Merge(m, src) +} +func (m *ReplicationControllerList) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo + +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{153} +} +func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerSpec.Merge(m, src) +} +func (m *ReplicationControllerSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo + +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{154} +} +func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicationControllerStatus.Merge(m, src) +} +func (m *ReplicationControllerStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo + +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{155} +} +func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceFieldSelector.Merge(m, src) +} +func (m *ResourceFieldSelector) XXX_Size() int { + return m.Size() +} +func (m *ResourceFieldSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo + +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{156} +} +func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuota) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuota.Merge(m, src) +} +func (m *ResourceQuota) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuota) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuota.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo + +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{157} +} +func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuotaList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaList.Merge(m, src) +} +func (m *ResourceQuotaList) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaList) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo + +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{158} +} +func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaSpec.Merge(m, src) +} +func (m *ResourceQuotaSpec) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo + +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{159} +} +func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceQuotaStatus.Merge(m, src) +} +func (m *ResourceQuotaStatus) XXX_Size() int { + return m.Size() +} +func (m *ResourceQuotaStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo + +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{160} +} +func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ResourceRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceRequirements.Merge(m, src) +} +func (m *ResourceRequirements) XXX_Size() int { + return m.Size() +} +func (m *ResourceRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo + +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{161} +} +func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxOptions.Merge(m, src) +} +func (m *SELinuxOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo + +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{162} +} +func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src) +} +func (m *ScaleIOPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo + +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{163} +} +func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src) +} +func (m *ScaleIOVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo + +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{164} +} +func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScopeSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopeSelector.Merge(m, src) +} +func (m *ScopeSelector) XXX_Size() int { + return m.Size() +} +func (m *ScopeSelector) XXX_DiscardUnknown() { + xxx_messageInfo_ScopeSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo + +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{165} +} +func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src) +} +func (m *ScopedResourceSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo + +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{166} +} +func (m *Secret) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Secret) XXX_Merge(src proto.Message) { + xxx_messageInfo_Secret.Merge(m, src) +} +func (m *Secret) XXX_Size() int { + return m.Size() +} +func (m *Secret) XXX_DiscardUnknown() { + xxx_messageInfo_Secret.DiscardUnknown(m) +} + +var xxx_messageInfo_Secret proto.InternalMessageInfo + +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{167} +} +func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretEnvSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretEnvSource.Merge(m, src) +} +func (m *SecretEnvSource) XXX_Size() int { + return m.Size() +} +func (m *SecretEnvSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretEnvSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo + +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{168} +} +func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretKeySelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretKeySelector.Merge(m, src) +} +func (m *SecretKeySelector) XXX_Size() int { + return m.Size() +} +func (m *SecretKeySelector) XXX_DiscardUnknown() { + xxx_messageInfo_SecretKeySelector.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo + +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{169} +} +func (m *SecretList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretList) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretList.Merge(m, src) +} +func (m *SecretList) XXX_Size() int { + return m.Size() +} +func (m *SecretList) XXX_DiscardUnknown() { + xxx_messageInfo_SecretList.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretList proto.InternalMessageInfo + +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{170} +} +func (m *SecretProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretProjection.Merge(m, src) +} +func (m *SecretProjection) XXX_Size() int { + return m.Size() +} +func (m *SecretProjection) XXX_DiscardUnknown() { + xxx_messageInfo_SecretProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretProjection proto.InternalMessageInfo + +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{171} +} +func (m *SecretReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretReference.Merge(m, src) +} +func (m *SecretReference) XXX_Size() int { + return m.Size() +} +func (m *SecretReference) XXX_DiscardUnknown() { + xxx_messageInfo_SecretReference.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretReference proto.InternalMessageInfo + +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{172} +} +func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretVolumeSource.Merge(m, src) +} +func (m *SecretVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *SecretVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo + +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{173} +} +func (m *SecurityContext) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecurityContext) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecurityContext.Merge(m, src) +} +func (m *SecurityContext) XXX_Size() int { + return m.Size() +} +func (m *SecurityContext) XXX_DiscardUnknown() { + xxx_messageInfo_SecurityContext.DiscardUnknown(m) +} + +var xxx_messageInfo_SecurityContext proto.InternalMessageInfo + +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{174} +} +func (m *SerializedReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SerializedReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_SerializedReference.Merge(m, src) +} +func (m *SerializedReference) XXX_Size() int { + return m.Size() +} +func (m *SerializedReference) XXX_DiscardUnknown() { + xxx_messageInfo_SerializedReference.DiscardUnknown(m) +} + +var xxx_messageInfo_SerializedReference proto.InternalMessageInfo + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{175} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{176} +} +func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccount) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccount.Merge(m, src) +} +func (m *ServiceAccount) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccount) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccount.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo + +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{177} +} +func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountList.Merge(m, src) +} +func (m *ServiceAccountList) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo + +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{178} +} +func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src) +} +func (m *ServiceAccountTokenProjection) XXX_Size() int { + return m.Size() +} +func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo + +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{179} +} +func (m *ServiceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceList.Merge(m, src) +} +func (m *ServiceList) XXX_Size() int { + return m.Size() +} +func (m *ServiceList) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceList.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceList proto.InternalMessageInfo + +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{180} +} +func (m *ServicePort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServicePort) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServicePort.Merge(m, src) +} +func (m *ServicePort) XXX_Size() int { + return m.Size() +} +func (m *ServicePort) XXX_DiscardUnknown() { + xxx_messageInfo_ServicePort.DiscardUnknown(m) +} + +var xxx_messageInfo_ServicePort proto.InternalMessageInfo + +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{181} +} +func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceProxyOptions.Merge(m, src) +} +func (m *ServiceProxyOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceProxyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo + +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{182} +} +func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceSpec.Merge(m, src) +} +func (m *ServiceSpec) XXX_Size() int { + return m.Size() +} +func (m *ServiceSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo + +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{183} +} +func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServiceStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceStatus.Merge(m, src) +} +func (m *ServiceStatus) XXX_Size() int { + return m.Size() +} +func (m *ServiceStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo + +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{184} +} +func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_SessionAffinityConfig.Merge(m, src) +} +func (m *SessionAffinityConfig) XXX_Size() int { + return m.Size() +} +func (m *SessionAffinityConfig) XXX_DiscardUnknown() { + xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo + +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{185} +} +func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src) +} +func (m *StorageOSPersistentVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo + +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{186} +} +func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageOSVolumeSource.Merge(m, src) +} +func (m *StorageOSVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *StorageOSVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo + +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{187} +} +func (m *Sysctl) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Sysctl) XXX_Merge(src proto.Message) { + xxx_messageInfo_Sysctl.Merge(m, src) +} +func (m *Sysctl) XXX_Size() int { + return m.Size() +} +func (m *Sysctl) XXX_DiscardUnknown() { + xxx_messageInfo_Sysctl.DiscardUnknown(m) +} + +var xxx_messageInfo_Sysctl proto.InternalMessageInfo + +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{188} +} +func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TCPSocketAction) XXX_Merge(src proto.Message) { + xxx_messageInfo_TCPSocketAction.Merge(m, src) +} +func (m *TCPSocketAction) XXX_Size() int { + return m.Size() +} +func (m *TCPSocketAction) XXX_DiscardUnknown() { + xxx_messageInfo_TCPSocketAction.DiscardUnknown(m) +} + +var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo + +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{189} +} +func (m *Taint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Taint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Taint.Merge(m, src) +} +func (m *Taint) XXX_Size() int { + return m.Size() +} +func (m *Taint) XXX_DiscardUnknown() { + xxx_messageInfo_Taint.DiscardUnknown(m) +} + +var xxx_messageInfo_Taint proto.InternalMessageInfo + +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{190} +} +func (m *Toleration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Toleration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Toleration.Merge(m, src) +} +func (m *Toleration) XXX_Size() int { + return m.Size() +} +func (m *Toleration) XXX_DiscardUnknown() { + xxx_messageInfo_Toleration.DiscardUnknown(m) +} + +var xxx_messageInfo_Toleration proto.InternalMessageInfo + +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{191} +} +func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src) +} +func (m *TopologySelectorLabelRequirement) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo + +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{192} +} +func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySelectorTerm.Merge(m, src) +} +func (m *TopologySelectorTerm) XXX_Size() int { + return m.Size() +} +func (m *TopologySelectorTerm) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo + +func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } +func (*TopologySpreadConstraint) ProtoMessage() {} +func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{193} +} +func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) { + xxx_messageInfo_TopologySpreadConstraint.Merge(m, src) +} +func (m *TopologySpreadConstraint) XXX_Size() int { + return m.Size() +} +func (m *TopologySpreadConstraint) XXX_DiscardUnknown() { + xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m) +} + +var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo + +func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } +func (*TypedLocalObjectReference) ProtoMessage() {} +func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{194} +} +func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypedLocalObjectReference.Merge(m, src) +} +func (m *TypedLocalObjectReference) XXX_Size() int { + return m.Size() +} +func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { + xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m) +} + +var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo + +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{195} +} +func (m *Volume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Volume) XXX_Merge(src proto.Message) { + xxx_messageInfo_Volume.Merge(m, src) +} +func (m *Volume) XXX_Size() int { + return m.Size() +} +func (m *Volume) XXX_DiscardUnknown() { + xxx_messageInfo_Volume.DiscardUnknown(m) +} + +var xxx_messageInfo_Volume proto.InternalMessageInfo + +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{196} +} +func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeDevice) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeDevice.Merge(m, src) +} +func (m *VolumeDevice) XXX_Size() int { + return m.Size() +} +func (m *VolumeDevice) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeDevice.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo + +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{197} +} +func (m *VolumeMount) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeMount) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeMount.Merge(m, src) +} +func (m *VolumeMount) XXX_Size() int { + return m.Size() +} +func (m *VolumeMount) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeMount.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeMount proto.InternalMessageInfo + +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{198} +} +func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeAffinity.Merge(m, src) +} +func (m *VolumeNodeAffinity) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeAffinity) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo + +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{199} +} +func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeProjection) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeProjection.Merge(m, src) +} +func (m *VolumeProjection) XXX_Size() int { + return m.Size() +} +func (m *VolumeProjection) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeProjection.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo + +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{200} +} +func (m *VolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeSource.Merge(m, src) +} +func (m *VolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeSource proto.InternalMessageInfo + +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{201} +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src) +} +func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int { + return m.Size() +} +func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() { + xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo + +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{202} +} +func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) { + xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src) +} +func (m *WeightedPodAffinityTerm) XXX_Size() int { + return m.Size() +} +func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() { + xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m) +} + +var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo + +func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } +func (*WindowsSecurityContextOptions) ProtoMessage() {} +func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_83c10c24ec417dc9, []int{203} +} +func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src) +} +func (m *WindowsSecurityContextOptions) XXX_Size() int { + return m.Size() +} +func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() { + xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") + proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") + proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") + proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") + proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") + proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) +} + +var fileDescriptor_83c10c24ec417dc9 = []byte{ + // 13727 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, + 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x99, 0xd1, 0x6c, + 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, + 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb1, 0x77, 0xbf, 0x2b, 0x75, 0xa5, 0xa4, + 0x3a, 0x75, 0x57, 0xf5, 0x56, 0x55, 0x6b, 0x46, 0xfb, 0x83, 0x30, 0x3e, 0x9e, 0x67, 0xc0, 0x71, + 0x76, 0x10, 0x7e, 0x00, 0x41, 0x38, 0x30, 0x0e, 0xc0, 0xd8, 0x0e, 0x63, 0x30, 0x60, 0x0e, 0x1b, + 0x0c, 0xb6, 0x03, 0xfb, 0x0f, 0x8c, 0x09, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, + 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, + 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7c, 0xf9, 0xe5, 0x97, + 0xdf, 0x03, 0x5e, 0xdb, 0x7d, 0x35, 0x5a, 0xf0, 0x82, 0xab, 0xbb, 0x9d, 0x4d, 0x12, 0xfa, 0x24, + 0x26, 0xd1, 0xd5, 0x3d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0xb6, 0x77, 0xb5, 0x11, 0x84, + 0xe4, 0xea, 0xde, 0xf3, 0x57, 0xb7, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x42, 0x3b, 0x0c, 0xe2, + 0x00, 0x21, 0x8e, 0xb3, 0xe0, 0xb4, 0xbd, 0x05, 0x8a, 0xb3, 0xb0, 0xf7, 0xfc, 0xdc, 0x73, 0xdb, + 0x5e, 0xbc, 0xd3, 0xd9, 0x5c, 0x68, 0x04, 0xad, 0xab, 0xdb, 0xc1, 0x76, 0x70, 0x95, 0xa1, 0x6e, + 0x76, 0xb6, 0xd8, 0x3f, 0xf6, 0x87, 0xfd, 0xe2, 0x24, 0xe6, 0x5e, 0x4a, 0x9a, 0x69, 0x39, 0x8d, + 0x1d, 0xcf, 0x27, 0xe1, 0xfe, 0xd5, 0xf6, 0xee, 0x36, 0x6b, 0x37, 0x24, 0x51, 0xd0, 0x09, 0x1b, + 0x24, 0xdd, 0x70, 0xcf, 0x5a, 0xd1, 0xd5, 0x16, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5d, 0xcd, 0xab, + 0x15, 0x76, 0xfc, 0xd8, 0x6b, 0x75, 0x37, 0xf3, 0xd1, 0x7e, 0x15, 0xa2, 0xc6, 0x0e, 0x69, 0x39, + 0x5d, 0xf5, 0x5e, 0xcc, 0xab, 0xd7, 0x89, 0xbd, 0xe6, 0x55, 0xcf, 0x8f, 0xa3, 0x38, 0x4c, 0x57, + 0xb2, 0xbf, 0x62, 0xc1, 0xa5, 0xc5, 0xbb, 0xf5, 0x95, 0xa6, 0x13, 0xc5, 0x5e, 0x63, 0xa9, 0x19, + 0x34, 0x76, 0xeb, 0x71, 0x10, 0x92, 0x3b, 0x41, 0xb3, 0xd3, 0x22, 0x75, 0x36, 0x10, 0xe8, 0x59, + 0x28, 0xed, 0xb1, 0xff, 0xd5, 0xca, 0xac, 0x75, 0xc9, 0xba, 0x52, 0x5e, 0x9a, 0xfe, 0xf5, 0x83, + 0xf9, 0x0f, 0x3c, 0x38, 0x98, 0x2f, 0xdd, 0x11, 0xe5, 0x58, 0x61, 0xa0, 0xcb, 0x30, 0xb2, 0x15, + 0x6d, 0xec, 0xb7, 0xc9, 0x6c, 0x81, 0xe1, 0x4e, 0x0a, 0xdc, 0x91, 0xd5, 0x3a, 0x2d, 0xc5, 0x02, + 0x8a, 0xae, 0x42, 0xb9, 0xed, 0x84, 0xb1, 0x17, 0x7b, 0x81, 0x3f, 0x5b, 0xbc, 0x64, 0x5d, 0x19, + 0x5e, 0x9a, 0x11, 0xa8, 0xe5, 0x9a, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x8e, 0x7b, 0xcb, + 0x6f, 0xee, 0xcf, 0x0e, 0x5d, 0xb2, 0xae, 0x94, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, + 0x83, 0x05, 0x28, 0x2d, 0x6e, 0x6d, 0x79, 0xbe, 0x17, 0xef, 0xa3, 0x3b, 0x30, 0xee, 0x07, 0x2e, + 0x91, 0xff, 0xd9, 0x57, 0x8c, 0xbd, 0x70, 0x69, 0xa1, 0x7b, 0x29, 0x2d, 0xac, 0x6b, 0x78, 0x4b, + 0xd3, 0x0f, 0x0e, 0xe6, 0xc7, 0xf5, 0x12, 0x6c, 0xd0, 0x41, 0x18, 0xc6, 0xda, 0x81, 0xab, 0xc8, + 0x16, 0x18, 0xd9, 0xf9, 0x2c, 0xb2, 0xb5, 0x04, 0x6d, 0x69, 0xea, 0xc1, 0xc1, 0xfc, 0x98, 0x56, + 0x80, 0x75, 0x22, 0x68, 0x13, 0xa6, 0xe8, 0x5f, 0x3f, 0xf6, 0x14, 0xdd, 0x22, 0xa3, 0xfb, 0x64, + 0x1e, 0x5d, 0x0d, 0x75, 0xe9, 0xd4, 0x83, 0x83, 0xf9, 0xa9, 0x54, 0x21, 0x4e, 0x13, 0xb4, 0xdf, + 0x81, 0xc9, 0xc5, 0x38, 0x76, 0x1a, 0x3b, 0xc4, 0xe5, 0x33, 0x88, 0x5e, 0x82, 0x21, 0xdf, 0x69, + 0x11, 0x31, 0xbf, 0x97, 0xc4, 0xc0, 0x0e, 0xad, 0x3b, 0x2d, 0x72, 0x78, 0x30, 0x3f, 0x7d, 0xdb, + 0xf7, 0xde, 0xee, 0x88, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0x2f, 0x00, 0xb8, 0x64, 0xcf, 0x6b, + 0x90, 0x9a, 0x13, 0xef, 0x88, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x7d, + 0x28, 0x2f, 0xee, 0x05, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0xed, 0xc2, 0x54, 0x3b, 0x24, 0x5b, 0x24, + 0x54, 0x45, 0xb3, 0xd6, 0xa5, 0xe2, 0x95, 0xb1, 0x17, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe2, + 0xc7, 0xe1, 0xfe, 0xd2, 0x63, 0xa2, 0xbd, 0xa9, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xaa, 0x00, + 0x67, 0x16, 0xdf, 0xe9, 0x84, 0xa4, 0xe2, 0x45, 0xbb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xee, 0x7a, + 0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x39, 0x18, 0xa5, 0xbf, 0x6f, 0xe3, + 0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xac, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x6b, + 0x30, 0xd6, 0x60, 0x1b, 0x72, 0x7b, 0x2d, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xd2, 0x33, 0x14, 0x7d, + 0x39, 0x29, 0x3e, 0x3c, 0x98, 0x9f, 0xe5, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, + 0xed, 0xaf, 0x21, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb3, 0xad, 0x32, 0x9e, + 0xbd, 0x4d, 0xd0, 0xf3, 0x30, 0xb4, 0xeb, 0xf9, 0xee, 0xec, 0x08, 0xa3, 0x75, 0x81, 0xce, 0xf9, + 0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0xe6, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1, + 0x60, 0x9e, 0xc1, 0x56, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0, + 0x2f, 0x00, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, + 0xca, 0x10, 0xa2, 0x1d, 0x27, 0x64, 0xeb, 0x4b, 0x0c, 0xac, 0x62, 0x08, 0x75, 0x09, 0xc0, 0x09, + 0x8e, 0xc1, 0x10, 0x8a, 0xfd, 0x18, 0x02, 0xfa, 0x04, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, + 0x07, 0x90, 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, + 0x7e, 0x8f, 0x7f, 0xab, 0xfd, 0x0b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0x73, + 0x50, 0xa2, 0x67, 0x93, 0xeb, 0xc4, 0x8e, 0xe0, 0x7b, 0x1f, 0xd1, 0xf6, 0x96, 0x3a, 0x2a, 0x16, + 0xda, 0xbb, 0xdb, 0xb4, 0x20, 0x5a, 0xa0, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0xfc, 0x3c, 0x69, 0xc4, + 0x6b, 0x24, 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc0, 0x48, 0xec, 0x84, 0xdb, + 0x24, 0x16, 0x0c, 0x30, 0x93, 0x51, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x37, 0x48, 0x72, 0x2c, + 0x6c, 0xb0, 0xaa, 0x58, 0x90, 0xb0, 0xbf, 0x7f, 0x14, 0xce, 0x2d, 0xd7, 0xab, 0x39, 0xeb, 0xea, + 0x32, 0x8c, 0xb8, 0xa1, 0xb7, 0x47, 0x42, 0x31, 0xce, 0x8a, 0x4a, 0x85, 0x95, 0x62, 0x01, 0x45, + 0xaf, 0xc2, 0x38, 0x3f, 0x90, 0xae, 0x3b, 0xbe, 0xdb, 0x94, 0x43, 0x7c, 0x5a, 0x60, 0x8f, 0xdf, + 0xd1, 0x60, 0xd8, 0xc0, 0x3c, 0xe2, 0xa2, 0xba, 0x9c, 0xda, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2d, + 0x98, 0xe6, 0xcd, 0x2c, 0xc6, 0x71, 0xe8, 0x6d, 0x76, 0x62, 0x12, 0xcd, 0x0e, 0x33, 0x4e, 0xb7, + 0x9c, 0x35, 0x5a, 0xb9, 0x23, 0xb0, 0x70, 0x27, 0x45, 0x85, 0x33, 0xc1, 0x59, 0xd1, 0xee, 0x74, + 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6e, 0xc1, 0x5c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, 0x49, + 0x58, 0xeb, 0x6c, 0x36, 0xbd, 0x68, 0x87, 0xaf, 0x53, 0x4c, 0xb6, 0x18, 0x27, 0xc8, 0x99, 0x43, + 0x85, 0x24, 0xe6, 0xf0, 0xe2, 0x83, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x47, 0x33, 0x68, + 0x17, 0x10, 0x3d, 0x4a, 0xeb, 0xb1, 0xb3, 0x4d, 0x92, 0xc6, 0x47, 0x07, 0x6f, 0xfc, 0xec, 0x83, + 0x83, 0x79, 0xb4, 0xde, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc3, 0x69, 0x5a, 0xda, 0xf5, 0xad, + 0xa5, 0xc1, 0x9b, 0x9b, 0x7d, 0x70, 0x30, 0x7f, 0x7a, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, + 0xcd, 0x82, 0x73, 0xc9, 0xe7, 0xaf, 0xdc, 0x6f, 0x3b, 0xbe, 0x9b, 0x34, 0x5c, 0x1e, 0xbc, 0x61, + 0xca, 0x93, 0xcf, 0x2d, 0xe7, 0x51, 0xc2, 0xf9, 0x8d, 0xcc, 0x2d, 0xc3, 0x99, 0xcc, 0xd5, 0x82, + 0xa6, 0xa1, 0xb8, 0x4b, 0xb8, 0x14, 0x54, 0xc6, 0xf4, 0x27, 0x3a, 0x0d, 0xc3, 0x7b, 0x4e, 0xb3, + 0x23, 0x36, 0x0a, 0xe6, 0x7f, 0x3e, 0x56, 0x78, 0xd5, 0xb2, 0xff, 0x75, 0x11, 0xa6, 0x96, 0xeb, + 0xd5, 0x87, 0xda, 0x85, 0xfa, 0x31, 0x54, 0xe8, 0x79, 0x0c, 0x25, 0x87, 0x5a, 0x31, 0xf7, 0x50, + 0xfb, 0x4b, 0x19, 0x5b, 0x68, 0x88, 0x6d, 0xa1, 0x6f, 0xc8, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6, + 0x72, 0x56, 0xd1, 0x30, 0x9b, 0xcc, 0x4c, 0x89, 0xe5, 0x66, 0xd0, 0x70, 0x9a, 0x69, 0xd6, 0x77, + 0xc4, 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, + 0x48, 0x84, 0x9e, 0x82, 0xa2, 0xe3, 0xba, 0x4c, 0xda, 0x2a, 0x2f, 0x9d, 0x79, 0x70, 0x30, 0x5f, + 0x5c, 0x74, 0xe9, 0xb1, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x61, 0x18, 0x72, 0xc3, 0xa0, + 0x3d, 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, + 0x14, 0xe0, 0xfc, 0x32, 0x69, 0xef, 0xac, 0xd6, 0x73, 0xf8, 0xf7, 0x15, 0x28, 0xb5, 0x02, 0xdf, + 0x8b, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x4b, 0x30, 0xd4, + 0x4e, 0x84, 0xca, 0x71, 0x29, 0x90, 0x32, 0x71, 0x92, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0x58, + 0x31, 0x0a, 0xe3, 0x76, 0x44, 0x42, 0xcc, 0x20, 0xc9, 0xc9, 0x4c, 0xcf, 0x6c, 0xc1, 0xa1, 0x53, + 0x27, 0x33, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, 0x1c, 0xa5, 0x66, 0x76, 0xa0, 0x6d, 0x3a, 0xc1, + 0x8e, 0x6e, 0x35, 0x93, 0x09, 0x11, 0xe3, 0x44, 0x19, 0xe9, 0x7b, 0x74, 0x7f, 0xb9, 0x00, 0x88, + 0x0f, 0xe1, 0x5f, 0xb0, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0x83, 0x6f, 0x89, 0xe3, 0x1a, 0xbd, 0x3f, + 0xb5, 0xe0, 0xfc, 0xb2, 0xe7, 0xbb, 0x24, 0xcc, 0x59, 0x80, 0x8f, 0xe6, 0x2e, 0x7b, 0x34, 0xa1, + 0xc1, 0x58, 0x62, 0x43, 0xc7, 0xb0, 0xc4, 0xec, 0x3f, 0xb2, 0x00, 0xf1, 0xcf, 0x7e, 0xcf, 0x7d, + 0xec, 0xed, 0xee, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x84, 0xc9, 0xe5, 0xa6, 0x47, 0xfc, 0xb8, + 0x5a, 0x5b, 0x0e, 0xfc, 0x2d, 0x6f, 0x1b, 0x7d, 0x0c, 0x26, 0x63, 0xaf, 0x45, 0x82, 0x4e, 0x5c, + 0x27, 0x8d, 0xc0, 0x67, 0x37, 0x49, 0xeb, 0xca, 0xf0, 0x12, 0x7a, 0x70, 0x30, 0x3f, 0xb9, 0x61, + 0x40, 0x70, 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0x41, 0xab, 0x1d, 0xf8, 0xc4, 0x8f, 0x97, 0x03, + 0xdf, 0xe5, 0x1a, 0x87, 0x8f, 0xc1, 0x50, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8, + 0x28, 0x1c, 0x1e, 0xcc, 0x9f, 0xed, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x00, 0x23, 0x51, + 0xec, 0xc4, 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0xeb, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0x4a, + 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0x4f, 0xc3, 0x68, 0x8b, 0x44, 0x91, 0xb3, 0x2d, 0x4f, 0xc3, + 0x29, 0x51, 0x77, 0x74, 0x8d, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x30, 0x09, 0xc3, 0x20, 0x14, + 0x7b, 0x74, 0x42, 0x20, 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5, + 0x95, 0xb7, 0x75, 0x02, 0xb7, 0x82, 0x4f, 0x03, 0x34, 0xe4, 0x07, 0x46, 0xec, 0xf4, 0x18, 0x7b, + 0xe1, 0x72, 0xe6, 0x41, 0xdd, 0x35, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0xcf, + 0x2d, 0x38, 0x95, 0xfa, 0xa2, 0x9b, 0x5e, 0x14, 0xa3, 0xb7, 0xba, 0xbe, 0x6a, 0x61, 0xb0, 0xaf, + 0xa2, 0xb5, 0xd9, 0x37, 0xa9, 0xa5, 0x2c, 0x4b, 0xb4, 0x2f, 0xba, 0x0e, 0xc3, 0x5e, 0x4c, 0x5a, + 0xf2, 0x63, 0x9e, 0xec, 0xf9, 0x31, 0xbc, 0x57, 0xc9, 0x8c, 0x54, 0x69, 0x4d, 0xcc, 0x09, 0xd8, + 0xbf, 0x52, 0x84, 0x32, 0x5f, 0xb6, 0x6b, 0x4e, 0xfb, 0x04, 0xe6, 0xe2, 0x19, 0x28, 0x7b, 0xad, + 0x56, 0x27, 0x76, 0x36, 0x05, 0x3b, 0x2f, 0xf1, 0xad, 0x55, 0x95, 0x85, 0x38, 0x81, 0xa3, 0x2a, + 0x0c, 0xb1, 0xae, 0xf0, 0xaf, 0x7c, 0x2a, 0xfb, 0x2b, 0x45, 0xdf, 0x17, 0x2a, 0x4e, 0xec, 0x70, + 0x49, 0x4a, 0x9d, 0x23, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0xf4, 0x7c, 0x27, 0xdc, 0xa7, + 0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, 0x04, 0x97, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x01, + 0x60, 0x8d, 0xe8, 0xdc, 0x2b, 0x50, 0x56, 0xc8, 0x47, 0x11, 0x88, 0xe6, 0x3e, 0x01, 0x53, 0xa9, + 0xb6, 0xfa, 0x55, 0x1f, 0xd7, 0xe5, 0xa9, 0x5f, 0x64, 0x2c, 0x43, 0xf4, 0x7a, 0xc5, 0xdf, 0x13, + 0x2c, 0xf7, 0x1d, 0x38, 0xdd, 0xcc, 0xe0, 0x64, 0x62, 0x5e, 0x07, 0xe7, 0x7c, 0xe7, 0xc5, 0x67, + 0x9f, 0xce, 0x82, 0xe2, 0xcc, 0x36, 0xa8, 0x8c, 0x10, 0xb4, 0xe9, 0x06, 0x71, 0x9a, 0xba, 0xb8, + 0x7d, 0x4b, 0x94, 0x61, 0x05, 0xa5, 0xfc, 0xee, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, + 0xd2, 0x88, 0x83, 0xf0, 0x6b, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0x8a, + 0x37, 0xc8, 0x3e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x3d, 0xbf, 0xee, 0xa7, 0x2d, 0x98, 0x50, 0x5f, + 0x77, 0x02, 0x7c, 0x61, 0xc9, 0xe4, 0x0b, 0x17, 0x7a, 0x2e, 0xf0, 0x1c, 0x8e, 0xf0, 0xe5, 0x02, + 0x9c, 0x53, 0x38, 0xf4, 0x6e, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x5a, 0x59, + 0xa6, 0xba, 0x28, 0xd1, 0x59, 0x25, 0x38, 0x54, 0xc4, 0xf3, 0x13, 0xd5, 0xd2, 0xb8, 0xae, 0xce, + 0x15, 0xaa, 0xdb, 0x25, 0x28, 0x76, 0x3c, 0x57, 0x1c, 0x30, 0x1f, 0x91, 0xa3, 0x7d, 0xbb, 0x5a, + 0x39, 0x3c, 0x98, 0x7f, 0x22, 0xef, 0x29, 0x81, 0x9e, 0x6c, 0xd1, 0xc2, 0xed, 0x6a, 0x05, 0xd3, + 0xca, 0x68, 0x11, 0xa6, 0xe4, 0x6b, 0xc9, 0x1d, 0x2a, 0x6e, 0x05, 0xbe, 0x38, 0x87, 0x94, 0x4e, + 0x16, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0x15, 0x98, 0xde, 0xed, 0x6c, 0x92, 0x26, 0x89, 0xf9, 0x07, + 0xdf, 0x20, 0x5c, 0x63, 0x59, 0x4e, 0x6e, 0x66, 0x37, 0x52, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x73, + 0x76, 0x1e, 0x88, 0xd1, 0xab, 0x85, 0x01, 0x5d, 0x58, 0x94, 0xfa, 0xd7, 0x72, 0x39, 0x0f, 0xb2, + 0x2a, 0x6e, 0x90, 0xfd, 0x8d, 0x80, 0x4a, 0xe6, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x43, 0x3d, 0xd7, + 0xfc, 0xcf, 0x16, 0xe0, 0x8c, 0x1a, 0x01, 0x43, 0x08, 0xfc, 0x8b, 0x3e, 0x06, 0xcf, 0xc3, 0x98, + 0x4b, 0xb6, 0x9c, 0x4e, 0x33, 0x56, 0xea, 0xf3, 0x61, 0xfe, 0x84, 0x52, 0x49, 0x8a, 0xb1, 0x8e, + 0x73, 0x84, 0x61, 0xfb, 0x5f, 0x63, 0xec, 0x20, 0x8e, 0x1d, 0xba, 0xc6, 0xd5, 0xae, 0xb1, 0x72, + 0x77, 0xcd, 0x93, 0x30, 0xec, 0xb5, 0xa8, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x2a, 0x2d, 0xc4, 0x1c, + 0x86, 0x3e, 0x04, 0xa3, 0x8d, 0xa0, 0xd5, 0x72, 0x7c, 0x97, 0x1d, 0x79, 0xe5, 0xa5, 0x31, 0x2a, + 0xbb, 0x2d, 0xf3, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x39, 0xe1, 0x36, 0xd7, 0x61, 0x94, 0x97, + 0x4a, 0xb4, 0xa5, 0xc5, 0x70, 0x3b, 0xc2, 0xac, 0x94, 0x5e, 0xc1, 0xee, 0x05, 0xe1, 0xae, 0xe7, + 0x6f, 0x57, 0xbc, 0x50, 0x6c, 0x09, 0x75, 0x16, 0xde, 0x55, 0x10, 0xac, 0x61, 0xa1, 0x55, 0x18, + 0x6e, 0x07, 0x61, 0x1c, 0xcd, 0x8e, 0xb0, 0xe1, 0x7e, 0x22, 0x87, 0x11, 0xf1, 0xaf, 0xad, 0x05, + 0x61, 0x9c, 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x51, 0xe2, 0xef, 0xad, 0x86, + 0x41, 0x6b, 0xf6, 0x54, 0x3e, 0xa5, 0x15, 0x8e, 0xc2, 0x97, 0x59, 0x22, 0xa3, 0x8a, 0x62, 0x2c, + 0x49, 0xa0, 0x6f, 0x80, 0x22, 0xf1, 0xf7, 0x66, 0x47, 0x19, 0xa5, 0xb9, 0x1c, 0x4a, 0x77, 0x9c, + 0x30, 0xe1, 0xf9, 0x2b, 0xfe, 0x1e, 0xa6, 0x75, 0xd0, 0xa7, 0xa0, 0x2c, 0x19, 0x46, 0x24, 0x94, + 0x75, 0x99, 0x0b, 0x56, 0xb2, 0x19, 0x4c, 0xde, 0xee, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x12, + 0x0e, 0x29, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x4f, 0x49, 0x0d, 0xf1, 0x5a, 0xd0, 0xf1, 0xe3, 0x68, + 0xb6, 0xcc, 0xba, 0x97, 0xf9, 0x76, 0x77, 0x27, 0xc1, 0x4b, 0xab, 0x90, 0x79, 0x65, 0x6c, 0x90, + 0x42, 0x9f, 0x81, 0x09, 0xfe, 0x9f, 0xbf, 0x80, 0x45, 0xb3, 0x67, 0x18, 0xed, 0x4b, 0xf9, 0xb4, + 0x39, 0xe2, 0xd2, 0x19, 0x41, 0x7c, 0x42, 0x2f, 0x8d, 0xb0, 0x49, 0x0d, 0x61, 0x98, 0x68, 0x7a, + 0x7b, 0xc4, 0x27, 0x51, 0x54, 0x0b, 0x83, 0x4d, 0x32, 0x0b, 0x6c, 0x60, 0xce, 0x65, 0xbf, 0x98, + 0x05, 0x9b, 0x64, 0x69, 0x86, 0xd2, 0xbc, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b, 0x26, 0xe9, + 0x8d, 0xcd, 0x4b, 0x88, 0x8e, 0xf5, 0x23, 0xca, 0xee, 0x55, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0, + 0x2d, 0x18, 0x8f, 0x62, 0x27, 0x8c, 0x3b, 0x6d, 0x4e, 0xf4, 0x6c, 0x3f, 0xa2, 0xec, 0xc1, 0xb5, + 0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x06, 0x94, 0x9b, 0xde, 0x16, 0x69, 0xec, 0x37, 0x9a, 0x64, + 0x76, 0x9c, 0x51, 0xcb, 0x64, 0x2a, 0x37, 0x25, 0x12, 0x97, 0x73, 0xd5, 0x5f, 0x9c, 0x54, 0x47, + 0x77, 0xe0, 0x6c, 0x4c, 0xc2, 0x96, 0xe7, 0x3b, 0x94, 0x19, 0x88, 0xab, 0x15, 0x7b, 0xc8, 0x9c, + 0x60, 0xbb, 0xed, 0xa2, 0x98, 0x8d, 0xb3, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x0f, 0xb3, + 0x19, 0x90, 0xa0, 0xe9, 0x35, 0xf6, 0x67, 0x4f, 0x33, 0xca, 0x1f, 0x17, 0x94, 0x67, 0x37, 0x72, + 0xf0, 0x0e, 0x7b, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x0b, 0xa6, 0x18, 0x07, 0xaa, 0x75, 0x9a, 0x4d, + 0xd1, 0xe0, 0x24, 0x6b, 0xf0, 0x43, 0xf2, 0x3c, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, + 0xe1, 0x74, 0x6d, 0xb4, 0xc9, 0xde, 0xcc, 0x3a, 0xa1, 0x17, 0xef, 0x53, 0xbe, 0x41, 0xee, 0xc7, + 0xb3, 0x53, 0x3d, 0xf5, 0x15, 0x3a, 0xaa, 0x7a, 0x58, 0xd3, 0x0b, 0x71, 0x9a, 0x20, 0x65, 0xa9, + 0x51, 0xec, 0x7a, 0xfe, 0xec, 0x34, 0xbf, 0x97, 0x48, 0x8e, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0xd8, + 0x7b, 0x19, 0xfd, 0x71, 0x8b, 0x9e, 0x5c, 0x33, 0x0c, 0x31, 0x79, 0x2f, 0x93, 0x00, 0x9c, 0xe0, + 0x50, 0x61, 0x32, 0x8e, 0xf7, 0x67, 0x11, 0x43, 0x55, 0x8c, 0x65, 0x63, 0xe3, 0x53, 0x98, 0x96, + 0xdb, 0x9b, 0x30, 0xa9, 0x18, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x4c, 0x7c, 0x12, 0xda, 0xb5, + 0x32, 0xed, 0x02, 0x13, 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x21, 0x4b, 0xfb, 0x31, 0xe1, + 0x77, 0xfa, 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0xb9, 0x18, 0x9a, 0x70, 0xdb, + 0x01, 0xce, 0x97, 0x67, 0xa1, 0xb4, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x82, 0xe7, + 0x75, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0d, 0x26, 0x1a, 0x7a, 0x03, 0xe2, 0x70, 0x54, 0x6c, 0xc4, + 0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x55, 0x28, 0x31, 0x1b, 0x90, 0x46, 0xd0, 0x14, 0x52, 0x9b, 0x3c, + 0xe1, 0x4b, 0x35, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x86, 0x11, 0xda, 0x85, 0x6a, + 0x4d, 0x1c, 0x4b, 0x4a, 0x51, 0x74, 0x9d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x7a, 0x41, 0x1b, 0x65, + 0x7a, 0x1f, 0x26, 0xa8, 0x06, 0xa3, 0xf7, 0x1c, 0x2f, 0xf6, 0xfc, 0x6d, 0x21, 0x7f, 0x3c, 0xdd, + 0xf3, 0x8c, 0x62, 0x95, 0xee, 0xf2, 0x0a, 0xfc, 0x14, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, 0x86, + 0x1d, 0xdf, 0xa7, 0x14, 0x0b, 0x83, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, 0x06, + 0xbd, 0x05, 0x20, 0x77, 0x18, 0x71, 0x85, 0xed, 0xc5, 0xb3, 0xfd, 0x89, 0x6e, 0xa8, 0x3a, 0x4b, + 0x93, 0xf4, 0x8c, 0x4e, 0xfe, 0x63, 0x8d, 0x9e, 0x1d, 0x33, 0x39, 0xad, 0xbb, 0x33, 0xe8, 0x9b, + 0xe9, 0x12, 0x77, 0xc2, 0x98, 0xb8, 0x8b, 0xb1, 0x18, 0x9c, 0x0f, 0x0f, 0x76, 0x49, 0xd9, 0xf0, + 0x5a, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x4e, 0xe8, 0xd9, 0x3f, 0x5f, 0x84, 0xd9, 0xbc, 0xee, 0xd2, + 0x45, 0x47, 0xee, 0x7b, 0xf1, 0x32, 0x15, 0xaf, 0x2c, 0x73, 0xd1, 0xad, 0x88, 0x72, 0xac, 0x30, + 0xe8, 0xec, 0x47, 0xde, 0xb6, 0xbc, 0x63, 0x0e, 0x27, 0xb3, 0x5f, 0x67, 0xa5, 0x58, 0x40, 0x29, + 0x5e, 0x48, 0x9c, 0x48, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0xb6, 0x6b, + 0xa8, 0x8f, 0xb6, 0xcb, 0x18, 0xa2, 0xe1, 0xe3, 0x1d, 0x22, 0xf4, 0x59, 0x80, 0x2d, 0xcf, 0xf7, + 0xa2, 0x1d, 0x46, 0x7d, 0xe4, 0xc8, 0xd4, 0x95, 0x70, 0xb6, 0xaa, 0xa8, 0x60, 0x8d, 0x22, 0x7a, + 0x19, 0xc6, 0xd4, 0x06, 0xac, 0x56, 0xd8, 0x4b, 0xa7, 0x66, 0x39, 0x92, 0x70, 0xa3, 0x0a, 0xd6, + 0xf1, 0xec, 0xcf, 0xa7, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xa1, 0xf7, + 0xf8, 0xda, 0x5f, 0x2d, 0xc2, 0x94, 0xd1, 0x58, 0x27, 0x1a, 0x80, 0x67, 0x5d, 0xa3, 0x0c, 0xdc, + 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xff, 0xad, 0xa2, 0x33, 0x79, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x59, + 0x28, 0x37, 0x9d, 0x88, 0x69, 0xce, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0x2e, 0x26, 0x4e, 0x14, + 0x6b, 0xa7, 0x26, 0xa7, 0x9d, 0x90, 0xa4, 0x27, 0x0d, 0x95, 0x4f, 0xa4, 0xf5, 0x98, 0xea, 0x04, + 0x15, 0x62, 0xf6, 0x31, 0x87, 0xa1, 0x57, 0x61, 0x3c, 0x24, 0x6c, 0x55, 0x2c, 0x53, 0x69, 0x8e, + 0x2d, 0xb3, 0xe1, 0x44, 0xec, 0xc3, 0x1a, 0x0c, 0x1b, 0x98, 0xc9, 0xdd, 0x60, 0xa4, 0xc7, 0xdd, + 0xe0, 0x69, 0x18, 0x65, 0x3f, 0xd4, 0x0a, 0x50, 0xb3, 0x51, 0xe5, 0xc5, 0x58, 0xc2, 0xd3, 0x0b, + 0xa6, 0x34, 0xd8, 0x82, 0xa1, 0xb7, 0x0f, 0xb1, 0xa8, 0xd9, 0x2b, 0x73, 0x89, 0x73, 0x39, 0xb1, + 0xe4, 0xb1, 0x84, 0xd9, 0x1f, 0x86, 0xc9, 0x8a, 0x43, 0x5a, 0x81, 0xbf, 0xe2, 0xbb, 0xed, 0xc0, + 0xf3, 0x63, 0x34, 0x0b, 0x43, 0xec, 0x10, 0xe1, 0x2c, 0x60, 0x88, 0x36, 0x84, 0x87, 0xe8, 0x85, + 0xc0, 0xde, 0x86, 0x33, 0x95, 0xe0, 0x9e, 0x7f, 0xcf, 0x09, 0xdd, 0xc5, 0x5a, 0x55, 0xbb, 0x5f, + 0xaf, 0xcb, 0xfb, 0x1d, 0x37, 0xda, 0xca, 0x64, 0xbd, 0x5a, 0x4d, 0x2e, 0xd6, 0xae, 0x7a, 0x4d, + 0x92, 0xa3, 0x05, 0xf9, 0x9b, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0xbd, 0x6a, 0x59, 0xb9, 0xaf, 0x5a, + 0x6f, 0x42, 0x69, 0xcb, 0x23, 0x4d, 0x17, 0x93, 0x2d, 0xb1, 0x12, 0x9f, 0xca, 0xb7, 0x43, 0x59, + 0xa5, 0x98, 0x52, 0xeb, 0xc5, 0x6f, 0x87, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, 0x4c, 0xcb, + 0x0b, 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x74, 0xaf, 0x5b, 0x88, 0x49, 0xfc, 0xf4, 0x83, 0x83, 0xf9, + 0x69, 0x9c, 0x22, 0x83, 0xbb, 0x08, 0xd3, 0xeb, 0x60, 0x8b, 0x72, 0xe0, 0x21, 0x36, 0xfc, 0xec, + 0x3a, 0xc8, 0x6e, 0xb6, 0xac, 0xd4, 0xfe, 0x61, 0x0b, 0x1e, 0xeb, 0x1a, 0x19, 0x71, 0xc3, 0x3f, + 0xe6, 0x59, 0x48, 0xdf, 0xb8, 0x0b, 0xfd, 0x6f, 0xdc, 0xf6, 0x3f, 0xb0, 0xe0, 0xf4, 0x4a, 0xab, + 0x1d, 0xef, 0x57, 0x3c, 0xf3, 0x09, 0xea, 0x15, 0x18, 0x69, 0x11, 0xd7, 0xeb, 0xb4, 0xc4, 0xcc, + 0xcd, 0x4b, 0x2e, 0xb5, 0xc6, 0x4a, 0x0f, 0x0f, 0xe6, 0x27, 0xea, 0x71, 0x10, 0x3a, 0xdb, 0x84, + 0x17, 0x60, 0x81, 0xce, 0x78, 0xbd, 0xf7, 0x0e, 0xb9, 0xe9, 0xb5, 0x3c, 0x69, 0x57, 0xd4, 0x53, + 0x67, 0xb7, 0x20, 0x07, 0x74, 0xe1, 0xcd, 0x8e, 0xe3, 0xc7, 0x5e, 0xbc, 0x2f, 0x5e, 0x8f, 0x24, + 0x11, 0x9c, 0xd0, 0xb3, 0xbf, 0x62, 0xc1, 0x94, 0x5c, 0xf7, 0x8b, 0xae, 0x1b, 0x92, 0x28, 0x42, + 0x73, 0x50, 0xf0, 0xda, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0xa8, 0xd6, 0x70, 0xc1, 0x6b, 0x4b, 0xb1, + 0x8c, 0x31, 0xc2, 0xa2, 0xf9, 0x90, 0x76, 0x5d, 0x94, 0x63, 0x85, 0x81, 0xae, 0x40, 0xc9, 0x0f, + 0x5c, 0x6e, 0xdb, 0xc5, 0x8f, 0x34, 0xb6, 0xc0, 0xd6, 0x45, 0x19, 0x56, 0x50, 0x54, 0x83, 0x32, + 0x37, 0x7b, 0x4a, 0x16, 0xed, 0x40, 0xc6, 0x53, 0xec, 0xcb, 0x36, 0x64, 0x4d, 0x9c, 0x10, 0xb1, + 0x7f, 0xd9, 0x82, 0x71, 0xf9, 0x65, 0x03, 0xca, 0x9c, 0x74, 0x6b, 0x25, 0xf2, 0x66, 0xb2, 0xb5, + 0xa8, 0xcc, 0xc8, 0x20, 0x86, 0xa8, 0x58, 0x3c, 0x92, 0xa8, 0xf8, 0x3c, 0x8c, 0x39, 0xed, 0x76, + 0xcd, 0x94, 0x33, 0xd9, 0x52, 0x5a, 0x4c, 0x8a, 0xb1, 0x8e, 0x63, 0xff, 0x50, 0x01, 0x26, 0xe5, + 0x17, 0xd4, 0x3b, 0x9b, 0x11, 0x89, 0xd1, 0x06, 0x94, 0x1d, 0x3e, 0x4b, 0x44, 0x2e, 0xf2, 0x27, + 0xb3, 0xf5, 0x08, 0xc6, 0x94, 0x26, 0x07, 0xfe, 0xa2, 0xac, 0x8d, 0x13, 0x42, 0xa8, 0x09, 0x33, + 0x7e, 0x10, 0x33, 0xe6, 0xaf, 0xe0, 0xbd, 0x9e, 0x76, 0xd2, 0xd4, 0xcf, 0x09, 0xea, 0x33, 0xeb, + 0x69, 0x2a, 0xb8, 0x9b, 0x30, 0x5a, 0x91, 0xba, 0x99, 0x62, 0xbe, 0x32, 0x40, 0x9f, 0xb8, 0x6c, + 0xd5, 0x8c, 0xfd, 0x4b, 0x16, 0x94, 0x25, 0xda, 0x49, 0xbc, 0xe2, 0xad, 0xc1, 0x68, 0xc4, 0x26, + 0x41, 0x0e, 0x8d, 0xdd, 0xab, 0xe3, 0x7c, 0xbe, 0x92, 0x33, 0x8d, 0xff, 0x8f, 0xb0, 0xa4, 0xc1, + 0x54, 0xf3, 0xaa, 0xfb, 0xef, 0x11, 0xd5, 0xbc, 0xea, 0x4f, 0xce, 0xa1, 0xf4, 0x07, 0xac, 0xcf, + 0x9a, 0xae, 0x8b, 0x8a, 0x5e, 0xed, 0x90, 0x6c, 0x79, 0xf7, 0xd3, 0xa2, 0x57, 0x8d, 0x95, 0x62, + 0x01, 0x45, 0x6f, 0xc1, 0x78, 0x43, 0xea, 0x64, 0x93, 0x1d, 0x7e, 0xb9, 0xe7, 0xfb, 0x80, 0x7a, + 0x4a, 0xe2, 0xba, 0x90, 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x66, 0x04, 0xc5, 0x7e, 0x66, 0x04, + 0x09, 0xdd, 0xfc, 0x47, 0xf5, 0x1f, 0xb1, 0x60, 0x84, 0xeb, 0xe2, 0x06, 0x53, 0x85, 0x6a, 0x2f, + 0x6b, 0xc9, 0xd8, 0xdd, 0xa1, 0x85, 0xe2, 0xa5, 0x0c, 0xad, 0x41, 0x99, 0xfd, 0x60, 0xba, 0xc4, + 0x62, 0xbe, 0xd5, 0x3d, 0x6f, 0x55, 0xef, 0xe0, 0x1d, 0x59, 0x0d, 0x27, 0x14, 0xec, 0x1f, 0x28, + 0x52, 0xee, 0x96, 0xa0, 0x1a, 0x87, 0xbe, 0xf5, 0xe8, 0x0e, 0xfd, 0xc2, 0xa3, 0x3a, 0xf4, 0xb7, + 0x61, 0xaa, 0xa1, 0xbd, 0xc3, 0x25, 0x33, 0x79, 0xa5, 0xe7, 0x22, 0xd1, 0x9e, 0xec, 0xb8, 0x96, + 0x65, 0xd9, 0x24, 0x82, 0xd3, 0x54, 0xd1, 0x37, 0xc3, 0x38, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0xc4, + 0xf8, 0x50, 0xfe, 0x7a, 0xd1, 0x9b, 0xe0, 0x5a, 0x39, 0xad, 0x3a, 0x36, 0x88, 0xd9, 0x7f, 0x6c, + 0x01, 0x5a, 0x69, 0xef, 0x90, 0x16, 0x09, 0x9d, 0x66, 0xa2, 0x4e, 0xff, 0x2b, 0x16, 0xcc, 0x92, + 0xae, 0xe2, 0xe5, 0xa0, 0xd5, 0x12, 0x97, 0x96, 0x9c, 0x7b, 0xf5, 0x4a, 0x4e, 0x1d, 0xe5, 0x96, + 0x30, 0x9b, 0x87, 0x81, 0x73, 0xdb, 0x43, 0x6b, 0x70, 0x8a, 0x9f, 0x92, 0x0a, 0xa0, 0xd9, 0x5e, + 0x3f, 0x2e, 0x08, 0x9f, 0xda, 0xe8, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x18, 0x87, 0xdc, 0x5e, + 0xbc, 0xff, 0x8e, 0xf0, 0xfe, 0x3b, 0xc2, 0xfb, 0xef, 0x08, 0xef, 0xbf, 0x23, 0xbc, 0xff, 0x8e, + 0xf0, 0x75, 0xff, 0x8e, 0xf0, 0x87, 0x16, 0x9c, 0xea, 0x3e, 0x06, 0x4e, 0x42, 0x30, 0xef, 0xc0, + 0xa9, 0xee, 0xb3, 0xae, 0xa7, 0x9d, 0x5d, 0x77, 0x3f, 0x93, 0x73, 0x2f, 0xe3, 0x1b, 0x70, 0x16, + 0x7d, 0xfb, 0xe7, 0x4b, 0x30, 0xbc, 0xb2, 0x47, 0xfc, 0xf8, 0x04, 0x3e, 0xb1, 0x01, 0x93, 0x9e, + 0xbf, 0x17, 0x34, 0xf7, 0x88, 0xcb, 0xe1, 0x47, 0xb9, 0x22, 0x9f, 0x15, 0xa4, 0x27, 0xab, 0x06, + 0x09, 0x9c, 0x22, 0xf9, 0x28, 0xd4, 0xd4, 0xd7, 0x60, 0x84, 0x9f, 0x0e, 0x42, 0x47, 0x9d, 0x79, + 0x18, 0xb0, 0x41, 0x14, 0x67, 0x5e, 0xa2, 0x42, 0xe7, 0xa7, 0x8f, 0xa8, 0x8e, 0x3e, 0x0f, 0x93, + 0x5b, 0x5e, 0x18, 0xc5, 0x1b, 0x5e, 0x8b, 0x44, 0xb1, 0xd3, 0x6a, 0x3f, 0x84, 0x5a, 0x5a, 0x8d, + 0xc3, 0xaa, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x6d, 0x98, 0x68, 0x3a, 0x7a, 0x53, 0xa3, 0x47, 0x6e, + 0x4a, 0x1d, 0x3b, 0x37, 0x75, 0x42, 0xd8, 0xa4, 0x4b, 0xf7, 0x69, 0x83, 0x69, 0x56, 0x4b, 0x4c, + 0xdf, 0xa0, 0xf6, 0x29, 0x57, 0xa9, 0x72, 0x18, 0x95, 0xa0, 0x98, 0xe5, 0x6d, 0xd9, 0x94, 0xa0, + 0x34, 0xfb, 0xda, 0xcf, 0x41, 0x99, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0xc9, 0x75, 0x75, 0xb0, 0xbe, + 0xae, 0x79, 0x8d, 0x30, 0x30, 0x1f, 0x04, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, + 0x91, 0xd0, 0x23, 0x91, 0x38, 0xc3, 0x7a, 0x4c, 0x23, 0x43, 0xe3, 0x4e, 0x2b, 0xfc, 0x37, 0x16, + 0x55, 0xe9, 0xf2, 0x72, 0x98, 0xae, 0x94, 0x9d, 0x32, 0xda, 0xf2, 0x5a, 0x64, 0xa5, 0x58, 0x40, + 0xd1, 0x1b, 0x30, 0x1a, 0x92, 0x26, 0x7b, 0x71, 0x9a, 0x18, 0x7c, 0x91, 0xf3, 0x07, 0x2c, 0x5e, + 0x0f, 0x4b, 0x02, 0xe8, 0x06, 0xa0, 0x90, 0x50, 0x09, 0xcc, 0xf3, 0xb7, 0x95, 0x3d, 0xaa, 0xe0, + 0xe0, 0x6a, 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0x0f, 0xe1, 0x8c, 0x6a, 0xe8, 0x1a, 0xcc, 0xa8, 0xd2, + 0xaa, 0x1f, 0xc5, 0x0e, 0xe5, 0x9c, 0x53, 0x8c, 0x96, 0x52, 0x80, 0xe0, 0x34, 0x02, 0xee, 0xae, + 0x63, 0xff, 0xa4, 0x05, 0x7c, 0x9c, 0x4f, 0xe0, 0xda, 0xff, 0xba, 0x79, 0xed, 0x3f, 0x97, 0x3b, + 0x73, 0x39, 0x57, 0xfe, 0x07, 0x16, 0x8c, 0x69, 0x33, 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, + 0xc0, 0x34, 0x5d, 0xe9, 0xb7, 0x36, 0x23, 0x12, 0xee, 0x11, 0x97, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, + 0x4c, 0x65, 0xfb, 0x76, 0x33, 0x45, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x91, 0xcf, 0x2f, 0x45, 0xc3, + 0xce, 0x9c, 0x3f, 0xad, 0x1c, 0x1e, 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0x3f, 0xb7, 0xd8, 0x9f, 0x93, + 0xdf, 0xa8, 0x6c, 0x0c, 0x1b, 0x6a, 0xb1, 0xa4, 0x6c, 0x0c, 0xd5, 0x72, 0xc0, 0x09, 0x0e, 0xdd, + 0xa3, 0x3b, 0x41, 0x14, 0xa7, 0x6d, 0x0c, 0xaf, 0x07, 0x51, 0x8c, 0x19, 0xc4, 0x7e, 0x11, 0x60, + 0xe5, 0x3e, 0x69, 0xf0, 0xa5, 0xae, 0x5f, 0x67, 0xac, 0xfc, 0xeb, 0x8c, 0xfd, 0x5b, 0x16, 0x4c, + 0xae, 0x2e, 0x1b, 0x4a, 0xe4, 0x05, 0x00, 0x7e, 0x07, 0xbb, 0x7b, 0x77, 0x5d, 0x3e, 0xd0, 0xf3, + 0x37, 0x56, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x0e, 0x8a, 0xcd, 0x8e, 0x2f, 0x14, 0x9a, 0xa3, 0xf4, + 0xc0, 0xbe, 0xd9, 0xf1, 0x31, 0x2d, 0xd3, 0x9c, 0x1c, 0x8a, 0x03, 0x3b, 0x39, 0xf4, 0x0d, 0x36, + 0x80, 0xe6, 0x61, 0xf8, 0xde, 0x3d, 0xcf, 0xe5, 0x2e, 0x9d, 0xc2, 0x78, 0xe0, 0xee, 0xdd, 0x6a, + 0x25, 0xc2, 0xbc, 0xdc, 0xfe, 0x52, 0x11, 0xe6, 0x56, 0x9b, 0xe4, 0xfe, 0xbb, 0x74, 0x6b, 0x1d, + 0xd4, 0x45, 0xe3, 0x68, 0xaa, 0xa1, 0xa3, 0xba, 0xe1, 0xf4, 0x1f, 0x8f, 0x2d, 0x18, 0xe5, 0x26, + 0x76, 0xd2, 0xc9, 0xf5, 0xb5, 0xac, 0xd6, 0xf3, 0x07, 0x64, 0x81, 0x9b, 0xea, 0x09, 0x1f, 0x3d, + 0x75, 0xd2, 0x8a, 0x52, 0x2c, 0x89, 0xcf, 0x7d, 0x0c, 0xc6, 0x75, 0xcc, 0x23, 0x39, 0xc4, 0xfd, + 0xe5, 0x22, 0x4c, 0xd3, 0x1e, 0x3c, 0xd2, 0x89, 0xb8, 0xdd, 0x3d, 0x11, 0xc7, 0xed, 0x14, 0xd5, + 0x7f, 0x36, 0xde, 0x4a, 0xcf, 0xc6, 0xf3, 0x79, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0xed, 0x16, 0x9c, + 0x5a, 0x6d, 0x06, 0x8d, 0xdd, 0x94, 0xe3, 0xd2, 0xcb, 0x30, 0x46, 0xf9, 0x78, 0x64, 0xf8, 0xd4, + 0x1b, 0x51, 0x16, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xab, 0x95, 0xac, 0xe0, 0x0c, + 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2c, 0xb8, 0x70, 0x6d, 0x79, 0x25, 0x59, 0x8a, 0x5d, 0xf1, + 0x21, 0x2e, 0xc3, 0x48, 0xdb, 0xd5, 0xba, 0x92, 0x28, 0x7c, 0x2b, 0xac, 0x17, 0x02, 0xfa, 0x5e, + 0x89, 0x7d, 0xf2, 0x13, 0x16, 0x9c, 0xba, 0xe6, 0xc5, 0xf4, 0x58, 0x4e, 0x47, 0x2a, 0xa0, 0xe7, + 0x72, 0xe4, 0xc5, 0x41, 0xb8, 0x9f, 0x8e, 0x54, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, + 0x8f, 0x19, 0x77, 0x17, 0xcc, 0xa7, 0x2f, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0x1f, 0xe6, 0x7a, 0x21, + 0xd3, 0x1a, 0xee, 0x0b, 0x0e, 0xab, 0x3e, 0xac, 0x22, 0x01, 0x38, 0xc1, 0xa1, 0x17, 0xa8, 0xf9, + 0x6b, 0xcd, 0x4e, 0x14, 0x93, 0x70, 0x2b, 0xca, 0xe1, 0x8e, 0x2f, 0x42, 0x99, 0x48, 0x1d, 0xbd, + 0xe8, 0xb5, 0x12, 0x35, 0x95, 0xf2, 0x9e, 0x07, 0x4c, 0x50, 0x78, 0x03, 0xb8, 0x41, 0x1e, 0xcd, + 0x8f, 0x6d, 0x15, 0x10, 0xd1, 0xdb, 0xd2, 0x23, 0x48, 0x30, 0x57, 0xf4, 0x95, 0x2e, 0x28, 0xce, + 0xa8, 0x61, 0xff, 0xb0, 0x05, 0x67, 0xd4, 0x07, 0xbf, 0xe7, 0x3e, 0xd3, 0xfe, 0x99, 0x02, 0x4c, + 0x5c, 0xdf, 0xd8, 0xa8, 0x5d, 0x23, 0xb1, 0x38, 0xb6, 0xfb, 0xbf, 0xbc, 0x63, 0xed, 0x01, 0xb1, + 0xd7, 0x2d, 0xb0, 0x13, 0x7b, 0xcd, 0x05, 0x1e, 0x88, 0x68, 0xa1, 0xea, 0xc7, 0xb7, 0xc2, 0x7a, + 0x1c, 0x7a, 0xfe, 0x76, 0xe6, 0x93, 0xa3, 0x14, 0x2e, 0x8a, 0x79, 0xc2, 0x05, 0x7a, 0x11, 0x46, + 0x58, 0x24, 0x24, 0x39, 0x09, 0x8f, 0xab, 0x4b, 0x14, 0x2b, 0x3d, 0x3c, 0x98, 0x2f, 0xdf, 0xc6, + 0x55, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0xc6, 0x76, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2e, 0xbd, + 0x2d, 0x73, 0x76, 0x78, 0x31, 0x8b, 0x1d, 0xd2, 0x41, 0xe0, 0x68, 0x09, 0x07, 0x49, 0xca, 0x22, + 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, 0xec, 0x98, 0xde, 0x4e, 0xec, 0xdf, 0xb7, 0x60, 0x94, 0x07, + 0xa5, 0x08, 0xd1, 0xc7, 0x61, 0x88, 0xdc, 0x27, 0x0d, 0x21, 0x2a, 0x67, 0x76, 0x38, 0x91, 0xb4, + 0xb8, 0x0e, 0x98, 0xfe, 0xc7, 0xac, 0x16, 0xba, 0x0e, 0xa3, 0xb4, 0xb7, 0xd7, 0x54, 0x84, 0x8e, + 0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x17, 0xce, 0x44, 0x11, 0x96, 0xd5, 0xd9, 0x83, 0x75, 0xa3, + 0x5d, 0xa7, 0x1c, 0x3b, 0xee, 0x25, 0x58, 0x6c, 0x2c, 0xd7, 0x38, 0x92, 0xa0, 0xc6, 0x1f, 0xac, + 0x65, 0x21, 0x4e, 0x88, 0xd8, 0x1b, 0x50, 0xa6, 0x93, 0xba, 0xd8, 0xf4, 0x9c, 0xde, 0x6f, 0xf0, + 0xcf, 0x40, 0x59, 0xbe, 0xb0, 0x47, 0xc2, 0x19, 0x9d, 0x51, 0x95, 0x0f, 0xf0, 0x11, 0x4e, 0xe0, + 0xf6, 0x16, 0x9c, 0x66, 0xf6, 0x92, 0x4e, 0xbc, 0x63, 0xec, 0xb1, 0xfe, 0x8b, 0xf9, 0x59, 0x71, + 0xf3, 0xe4, 0x33, 0x33, 0xab, 0xf9, 0x7b, 0x8e, 0x4b, 0x8a, 0xc9, 0x2d, 0xd4, 0xfe, 0xea, 0x10, + 0x3c, 0x5e, 0xad, 0xe7, 0xc7, 0x2b, 0x79, 0x15, 0xc6, 0xb9, 0x5c, 0x4a, 0x97, 0xb6, 0xd3, 0x14, + 0xed, 0x2a, 0xe5, 0xef, 0x86, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x00, 0x45, 0xef, 0x6d, 0x3f, 0xed, + 0x0d, 0x55, 0x7d, 0x73, 0x1d, 0xd3, 0x72, 0x0a, 0xa6, 0x22, 0x2e, 0x3f, 0x3b, 0x14, 0x58, 0x89, + 0xb9, 0xaf, 0xc3, 0xa4, 0x17, 0x35, 0x22, 0xaf, 0xea, 0x53, 0x3e, 0xa3, 0x71, 0x2a, 0xa5, 0x15, + 0xa1, 0x9d, 0x56, 0x50, 0x9c, 0xc2, 0xd6, 0x0e, 0xb2, 0xe1, 0x81, 0xc5, 0xe4, 0xbe, 0xde, 0xd9, + 0xf4, 0x06, 0xd0, 0x66, 0x5f, 0x17, 0x31, 0x2d, 0xbe, 0xb8, 0x01, 0xf0, 0x0f, 0x8e, 0xb0, 0x84, + 0xd1, 0x2b, 0x67, 0x63, 0xc7, 0x69, 0x2f, 0x76, 0xe2, 0x9d, 0x8a, 0x17, 0x35, 0x82, 0x3d, 0x12, + 0xee, 0x33, 0x6d, 0x41, 0x29, 0xb9, 0x72, 0x2a, 0xc0, 0xf2, 0xf5, 0xc5, 0x1a, 0xc5, 0xc4, 0xdd, + 0x75, 0xd0, 0x22, 0x4c, 0xc9, 0xc2, 0x3a, 0x89, 0xd8, 0x11, 0x36, 0xc6, 0xc8, 0x28, 0xff, 0x24, + 0x51, 0xac, 0x88, 0xa4, 0xf1, 0x4d, 0x49, 0x1a, 0x8e, 0x43, 0x92, 0x7e, 0x05, 0x26, 0x3c, 0xdf, + 0x8b, 0x3d, 0x27, 0x0e, 0xf8, 0x13, 0x14, 0x57, 0x0c, 0x30, 0xdd, 0x7a, 0x55, 0x07, 0x60, 0x13, + 0xcf, 0xfe, 0x6f, 0x43, 0x30, 0xc3, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0xf5, 0xb4, 0xc2, 0x6e, 0x77, + 0xaf, 0xb0, 0xe3, 0xb8, 0x22, 0x3c, 0xf4, 0x32, 0xfb, 0x3c, 0x94, 0x95, 0x4b, 0x96, 0xf4, 0xc9, + 0xb4, 0x72, 0x7c, 0x32, 0xfb, 0x4b, 0x1f, 0xd2, 0xaa, 0xad, 0x98, 0x69, 0xd5, 0xf6, 0xb7, 0x2d, + 0x48, 0xde, 0x54, 0xd0, 0x75, 0x28, 0xb7, 0x03, 0x66, 0xac, 0x19, 0x4a, 0x0b, 0xe8, 0xc7, 0x33, + 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0x8f, 0xaf, 0xc9, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, 0x1d, + 0x92, 0x7a, 0xcc, 0xc2, 0x96, 0xf4, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0xb3, + 0x16, 0x00, 0x37, 0x1c, 0x73, 0xfc, 0x6d, 0x72, 0x02, 0xea, 0xee, 0x0a, 0x0c, 0x45, 0x6d, 0xd2, + 0xe8, 0x65, 0x46, 0x9b, 0xf4, 0xa7, 0xde, 0x26, 0x8d, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, + 0xdf, 0x09, 0x30, 0x99, 0xa0, 0x55, 0x63, 0xd2, 0x42, 0xcf, 0x19, 0x61, 0x0c, 0xce, 0xa5, 0xc2, + 0x18, 0x94, 0x19, 0xb6, 0xa6, 0x59, 0xfd, 0x3c, 0x14, 0x5b, 0xce, 0x7d, 0xa1, 0x3a, 0x7b, 0xa6, + 0x77, 0x37, 0x28, 0xfd, 0x85, 0x35, 0xe7, 0x3e, 0xbf, 0x24, 0x3e, 0x23, 0x17, 0xc8, 0x9a, 0x73, + 0xff, 0x90, 0x1b, 0xcb, 0x32, 0x26, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xd7, 0xe4, 0x3f, 0x5b, + 0x76, 0xb4, 0x11, 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0x81, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, + 0x3f, 0x40, 0x5b, 0x9e, 0x8f, 0xde, 0x81, 0x51, 0x61, 0xb2, 0x28, 0xc2, 0x06, 0x5d, 0x1d, 0xa0, + 0x3d, 0x61, 0xf1, 0xc8, 0xdb, 0xbc, 0x2a, 0x2f, 0xc1, 0xa2, 0xb4, 0x6f, 0xbb, 0xb2, 0x41, 0xf4, + 0x37, 0x2c, 0x98, 0x14, 0xbf, 0x31, 0x79, 0xbb, 0x43, 0xa2, 0x58, 0xc8, 0x9e, 0x1f, 0x1d, 0xbc, + 0x0f, 0xa2, 0x22, 0xef, 0xca, 0x47, 0x25, 0x9b, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, + 0x47, 0x16, 0x9c, 0x6e, 0x39, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0xb1, 0x17, 0x88, 0xa7, 0xff, + 0x8f, 0x0f, 0x36, 0xfd, 0x5d, 0xd5, 0x79, 0x27, 0xe5, 0xfb, 0xe4, 0xe9, 0x2c, 0x94, 0xbe, 0x5d, + 0xcd, 0xec, 0xd7, 0xdc, 0x16, 0x94, 0xe4, 0x7a, 0xcb, 0x50, 0x35, 0x54, 0x74, 0xc1, 0xfa, 0xc8, + 0x16, 0xa3, 0x7a, 0x78, 0x00, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0xcf, 0xc3, 0xb8, 0xbe, + 0xc6, 0x1e, 0x69, 0x5b, 0x6f, 0xc3, 0xa9, 0x8c, 0xb5, 0xf4, 0x48, 0x9b, 0xbc, 0x07, 0xe7, 0x72, + 0xd7, 0xc7, 0xa3, 0x6c, 0xd8, 0xfe, 0x19, 0x4b, 0xe7, 0x83, 0x27, 0xf0, 0xe6, 0xb0, 0x6c, 0xbe, + 0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, 0x1e, 0xde, 0xd2, 0x3b, 0x4d, 0xb9, 0x3a, 0x7a, 0x03, + 0x46, 0x9a, 0xb4, 0x44, 0x1a, 0xbe, 0xda, 0xfd, 0x77, 0x64, 0x22, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, + 0x0a, 0xf6, 0x2f, 0x58, 0x30, 0x74, 0x02, 0x23, 0x81, 0xcd, 0x91, 0x78, 0x2e, 0x97, 0xb4, 0x88, + 0x68, 0xbc, 0x80, 0x9d, 0x7b, 0x2b, 0xf7, 0x63, 0xe2, 0x47, 0xec, 0xaa, 0x98, 0x39, 0x30, 0xff, + 0x1f, 0x9c, 0xba, 0x19, 0x38, 0xee, 0x92, 0xd3, 0x74, 0xfc, 0x06, 0x09, 0xab, 0xfe, 0xf6, 0x91, + 0x8c, 0xb6, 0x0b, 0xfd, 0x8c, 0xb6, 0xed, 0x1d, 0x40, 0x7a, 0x03, 0xc2, 0xfb, 0x05, 0xc3, 0xa8, + 0xc7, 0x9b, 0x12, 0xc3, 0xff, 0x54, 0xb6, 0x68, 0xd6, 0xd5, 0x33, 0xcd, 0xaf, 0x83, 0x17, 0x60, + 0x49, 0xc8, 0x7e, 0x15, 0x32, 0x5d, 0xe8, 0xfb, 0xab, 0x0d, 0xec, 0x4f, 0xc1, 0x0c, 0xab, 0x79, + 0xc4, 0x2b, 0xad, 0x9d, 0xd2, 0x4a, 0x66, 0x04, 0xd7, 0xb3, 0xbf, 0x68, 0xc1, 0xd4, 0x7a, 0x2a, + 0xe6, 0xd8, 0x65, 0xf6, 0x00, 0x9a, 0xa1, 0x0c, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0xc7, 0xae, 0x83, + 0xfa, 0x73, 0x0b, 0x92, 0xa8, 0x16, 0x27, 0x20, 0x78, 0x2d, 0x1b, 0x82, 0x57, 0xa6, 0x6e, 0x44, + 0x75, 0x27, 0x4f, 0xee, 0x42, 0x37, 0x54, 0xbc, 0xa7, 0x1e, 0x6a, 0x91, 0x84, 0x0c, 0x8f, 0x0e, + 0x34, 0x69, 0x06, 0x85, 0x92, 0x11, 0xa0, 0xec, 0xff, 0x5c, 0x00, 0xa4, 0x70, 0x07, 0x8e, 0x47, + 0xd5, 0x5d, 0xe3, 0x78, 0xe2, 0x51, 0xed, 0x01, 0x62, 0x4f, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, + 0x27, 0xb4, 0x6e, 0x47, 0xb3, 0x0f, 0x98, 0x13, 0x4d, 0xa2, 0x9b, 0x5d, 0xd4, 0x70, 0x46, 0x0b, + 0x9a, 0x69, 0xc6, 0xf0, 0xa0, 0xa6, 0x19, 0x23, 0x7d, 0x3c, 0xdc, 0x7e, 0xda, 0x82, 0x09, 0x35, + 0x4c, 0xef, 0x11, 0xfb, 0x73, 0xd5, 0x9f, 0x1c, 0xd6, 0x57, 0xd3, 0xba, 0xcc, 0x8e, 0x84, 0x6f, + 0x64, 0x9e, 0x8a, 0x4e, 0xd3, 0x7b, 0x87, 0xa8, 0x68, 0x80, 0xf3, 0xc2, 0xf3, 0x50, 0x94, 0x1e, + 0x1e, 0xcc, 0x4f, 0xa8, 0x7f, 0x3c, 0xfa, 0x70, 0x52, 0xc5, 0xfe, 0x31, 0xba, 0xd9, 0xcd, 0xa5, + 0x88, 0x5e, 0x86, 0xe1, 0xf6, 0x8e, 0x13, 0x91, 0x94, 0x9f, 0xce, 0x70, 0x8d, 0x16, 0x1e, 0x1e, + 0xcc, 0x4f, 0xaa, 0x0a, 0xac, 0x04, 0x73, 0xec, 0xc1, 0xa3, 0x7c, 0x75, 0x2f, 0xce, 0xbe, 0x51, + 0xbe, 0xfe, 0xd8, 0x82, 0xa1, 0xf5, 0xc0, 0x3d, 0x09, 0x16, 0xf0, 0xba, 0xc1, 0x02, 0xce, 0xe7, + 0x05, 0x86, 0xcf, 0xdd, 0xfd, 0xab, 0xa9, 0xdd, 0x7f, 0x31, 0x97, 0x42, 0xef, 0x8d, 0xdf, 0x82, + 0x31, 0x16, 0x6e, 0x5e, 0xf8, 0x24, 0xbd, 0x68, 0x6c, 0xf8, 0xf9, 0xd4, 0x86, 0x9f, 0xd2, 0x50, + 0xb5, 0x9d, 0xfe, 0x34, 0x8c, 0x0a, 0x27, 0x97, 0xb4, 0xc3, 0xa7, 0xc0, 0xc5, 0x12, 0x6e, 0xff, + 0x48, 0x11, 0x8c, 0xf0, 0xf6, 0xe8, 0x97, 0x2c, 0x58, 0x08, 0xb9, 0xf1, 0xab, 0x5b, 0xe9, 0x84, + 0x9e, 0xbf, 0x5d, 0x6f, 0xec, 0x10, 0xb7, 0xd3, 0xf4, 0xfc, 0xed, 0xea, 0xb6, 0x1f, 0xa8, 0xe2, + 0x95, 0xfb, 0xa4, 0xd1, 0x61, 0xcf, 0x57, 0x7d, 0x62, 0xe9, 0x2b, 0x23, 0xf2, 0x17, 0x1e, 0x1c, + 0xcc, 0x2f, 0xe0, 0x23, 0xd1, 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0xae, 0xf2, 0xa8, 0xef, + 0x83, 0xf7, 0xbf, 0xc7, 0x3d, 0xb7, 0x26, 0x49, 0x25, 0x44, 0x36, 0x48, 0xd8, 0x5a, 0x7a, 0x45, + 0x0c, 0xe8, 0xd5, 0xda, 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0x2f, 0x8b, 0x30, 0x21, 0xa2, + 0x41, 0x89, 0x33, 0xe0, 0x65, 0x63, 0x49, 0x3c, 0x91, 0x5a, 0x12, 0x33, 0x06, 0xf2, 0xf1, 0xb0, + 0xff, 0x08, 0x66, 0x28, 0x73, 0xbe, 0x4e, 0x9c, 0x30, 0xde, 0x24, 0x0e, 0xb7, 0xb8, 0x2a, 0x1e, + 0x99, 0xfb, 0x2b, 0xc5, 0xda, 0xcd, 0x34, 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xcc, 0xf1, 0x61, + 0xba, 0x2b, 0xa0, 0xd7, 0xa7, 0xa1, 0xac, 0x3c, 0x34, 0x04, 0xd3, 0xe9, 0x1d, 0x17, 0x2f, 0x4d, + 0x81, 0x2b, 0xbf, 0x12, 0xef, 0xa0, 0x84, 0x9c, 0xfd, 0x8f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, + 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, 0x57, 0xec, 0xd8, 0x0f, 0xe6, 0xed, 0x58, 0xa3, 0x19, + 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, 0x03, 0x5d, 0xe7, 0x76, 0x6d, 0x7b, 0xf2, 0xa6, 0x36, + 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x33, 0xdc, 0xf0, 0xf0, 0x86, 0x1f, + 0xdc, 0xf3, 0xaf, 0x05, 0x81, 0x8c, 0xb8, 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, + 0x93, 0xda, 0x60, 0x11, 0x32, 0xbf, 0x05, 0x4e, 0x51, 0xd2, 0xa6, 0x43, 0x74, 0x84, 0x08, 0x4c, + 0x89, 0x50, 0x63, 0xb2, 0x4c, 0x8c, 0x5d, 0xe6, 0x25, 0xcc, 0xac, 0x9d, 0x68, 0x80, 0x6f, 0x98, + 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0xe3, 0x16, 0x30, 0xe7, 0xd0, 0x13, 0x90, 0x47, 0x3e, 0x61, 0xca, + 0x23, 0xb3, 0x79, 0x83, 0x9c, 0x23, 0x8a, 0xbc, 0xc4, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xef, 0x0b, + 0xa3, 0x8f, 0xfe, 0xf7, 0x0f, 0xfb, 0xff, 0x58, 0x9c, 0x89, 0x29, 0xff, 0x09, 0xf4, 0xad, 0x50, + 0x6a, 0x38, 0x6d, 0xa7, 0xc1, 0x73, 0xb1, 0xe4, 0xea, 0xe2, 0x8c, 0x4a, 0x0b, 0xcb, 0xa2, 0x06, + 0xd7, 0x2d, 0xc9, 0x90, 0x75, 0x25, 0x59, 0xdc, 0x57, 0x9f, 0xa4, 0x9a, 0x9c, 0xdb, 0x85, 0x09, + 0x83, 0xd8, 0x23, 0x55, 0x44, 0x7c, 0x2b, 0x3f, 0x62, 0x55, 0x88, 0xc5, 0x16, 0xcc, 0xf8, 0xda, + 0x7f, 0x7a, 0xa0, 0xc8, 0xcb, 0xe5, 0x07, 0xfb, 0x1d, 0xa2, 0xec, 0xf4, 0xd1, 0xfc, 0x4e, 0x53, + 0x64, 0x70, 0x37, 0x65, 0xfb, 0x47, 0x2d, 0x78, 0x4c, 0x47, 0xd4, 0x5c, 0x5b, 0xfa, 0x69, 0xf7, + 0x2b, 0x50, 0x0a, 0xda, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xa7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, + 0xf9, 0xa1, 0x88, 0x64, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xdb, 0x27, 0x1b, 0x8c, 0x48, + 0x38, 0x31, 0x31, 0x1e, 0xc0, 0x1e, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0x57, 0x2d, 0xbe, 0xb0, 0xf4, + 0xae, 0xa3, 0xb7, 0x61, 0xba, 0xe5, 0xc4, 0x8d, 0x9d, 0x95, 0xfb, 0xed, 0x90, 0xbf, 0x95, 0xc8, + 0x71, 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, + 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0x7a, 0x89, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, + 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, 0x55, 0xe4, 0xbb, 0x9d, 0x89, 0xf2, 0x4f, 0xc3, 0x68, + 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, 0x41, 0x1d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, + 0x81, 0x92, 0xf8, 0x29, 0xdf, 0xb6, 0x18, 0x6f, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x02, 0x40, + 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0xc5, 0x8d, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, + 0x7a, 0x0d, 0x26, 0x3a, 0x7e, 0xc4, 0xc5, 0x11, 0x2d, 0x4a, 0xac, 0x32, 0x40, 0xb9, 0xad, 0x03, + 0xb1, 0x89, 0x8b, 0x16, 0x61, 0x24, 0x76, 0x98, 0xd9, 0xca, 0x70, 0xbe, 0xbd, 0xed, 0x06, 0xc5, + 0xd0, 0xd3, 0x7e, 0xd0, 0x0a, 0x58, 0x54, 0x44, 0x9f, 0x96, 0xae, 0xb2, 0x9c, 0xb1, 0x0b, 0x43, + 0xf7, 0xc1, 0x0e, 0x01, 0xcd, 0x51, 0x56, 0x18, 0xd0, 0x1b, 0xb4, 0xd0, 0xc7, 0x00, 0xc8, 0xfd, + 0x98, 0x84, 0xbe, 0xd3, 0x54, 0x56, 0x61, 0x4a, 0x2e, 0xa8, 0x04, 0xeb, 0x41, 0x7c, 0x3b, 0x22, + 0x2b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x6f, 0x94, 0x01, 0x12, 0xb9, 0x1d, 0xbd, 0xd3, 0xc5, 0xb8, + 0x9e, 0xed, 0x2d, 0xe9, 0x1f, 0x1f, 0xd7, 0x42, 0xdf, 0x65, 0xc1, 0x98, 0xd3, 0x6c, 0x06, 0x0d, + 0x87, 0xc7, 0xf1, 0x2d, 0xf4, 0x66, 0x9c, 0xa2, 0xfd, 0xc5, 0xa4, 0x06, 0xef, 0xc2, 0x8b, 0x72, + 0x85, 0x6a, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x91, 0x57, 0xc5, 0xa2, 0x31, 0x94, 0xea, + 0xaa, 0x58, 0x66, 0x67, 0x84, 0x7e, 0x4b, 0xbc, 0x6d, 0xdc, 0x12, 0x87, 0xf2, 0x7d, 0x01, 0x0d, + 0xf1, 0xb5, 0xdf, 0x05, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0xc3, 0xf9, 0x8e, 0x77, 0xda, 0x3d, 0xa9, + 0x4f, 0x4c, 0x80, 0xcf, 0xc3, 0x94, 0x6b, 0x0a, 0x01, 0x62, 0x25, 0x3e, 0x95, 0x47, 0x37, 0x25, + 0x33, 0x24, 0xc7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x87, 0x89, 0xa8, 0xfa, 0x5b, 0x81, + 0x70, 0xb6, 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xa7, 0xfb, 0xba, 0xa8, + 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x8c, 0x30, 0xcf, 0xab, 0x68, 0xb6, 0x94, 0xaf, 0x2b, 0x36, 0xe3, + 0x9e, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf, + 0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7d, 0x30, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xc1, 0x8c, + 0x9a, 0x54, 0x8a, 0x12, 0xff, 0x65, 0xce, 0xb1, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x97, 0x2c, 0x19, + 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x25, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0x3f, 0xde, + 0xc1, 0x2f, 0xe2, 0xec, 0x34, 0xe2, 0x25, 0x58, 0xd4, 0x3f, 0x51, 0xf1, 0x60, 0xce, 0x87, 0xe9, + 0xf4, 0x16, 0x7d, 0xa4, 0xe2, 0xc8, 0xef, 0x0f, 0xc1, 0xa4, 0xb9, 0xa4, 0xd0, 0x55, 0x28, 0x0b, + 0x22, 0x2a, 0x4f, 0x80, 0xda, 0x25, 0x6b, 0x12, 0x80, 0x13, 0x1c, 0x96, 0x1e, 0x82, 0x55, 0xd7, + 0xcc, 0x6c, 0x93, 0xf4, 0x10, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0xb1, 0xda, 0x0c, 0x82, 0x58, 0x1d, + 0x48, 0x6a, 0xdd, 0x2d, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x20, 0xda, 0x25, 0xa1, 0x4f, 0x9a, 0x66, + 0x44, 0x61, 0x75, 0x10, 0xdd, 0xd0, 0x81, 0xd8, 0xc4, 0xa5, 0xc7, 0x69, 0x10, 0xb1, 0x85, 0x2c, + 0xae, 0x6f, 0x89, 0xd9, 0x72, 0x9d, 0xbb, 0x56, 0x4b, 0x38, 0xfa, 0x14, 0x3c, 0xa6, 0xa2, 0x26, + 0x61, 0xfe, 0x0e, 0x21, 0x5b, 0x1c, 0x31, 0xb4, 0x2d, 0x8f, 0x2d, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, + 0xe8, 0x75, 0x98, 0x14, 0x22, 0xbe, 0xa4, 0x38, 0x6a, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85, + 0x2d, 0x63, 0x22, 0x33, 0x29, 0x5b, 0x52, 0x28, 0x75, 0xc7, 0x44, 0xd6, 0xe1, 0xb8, 0xab, 0x06, + 0x5a, 0x84, 0x29, 0x2e, 0x83, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d, + 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x55, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, + 0xb9, 0x9b, 0x95, 0x66, 0x5b, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x03, 0xa7, 0x32, 0x62, + 0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, + 0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x18, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, + 0x63, 0xff, 0xcf, 0x02, 0x4c, 0x65, 0xbc, 0xad, 0xb0, 0x34, 0x77, 0xa9, 0x4b, 0x4a, 0x92, 0xd5, + 0xce, 0x0c, 0xb1, 0x5d, 0x38, 0x42, 0x88, 0xed, 0x62, 0xbf, 0x10, 0xdb, 0x43, 0xef, 0x26, 0xc4, + 0xb6, 0x39, 0x62, 0xc3, 0x03, 0x8d, 0x58, 0x46, 0x58, 0xee, 0x91, 0x23, 0x86, 0xe5, 0x36, 0x06, + 0x7d, 0x74, 0x80, 0x41, 0xff, 0x81, 0x02, 0x4c, 0xa7, 0x6d, 0x20, 0x4f, 0x40, 0x6f, 0xfb, 0x86, + 0xa1, 0xb7, 0xcd, 0x4e, 0x1a, 0x99, 0xb6, 0xcc, 0xcc, 0xd3, 0xe1, 0xe2, 0x94, 0x0e, 0xf7, 0xc3, + 0x03, 0x51, 0xeb, 0xad, 0xcf, 0xfd, 0xbb, 0x05, 0x38, 0x93, 0xae, 0xb2, 0xdc, 0x74, 0xbc, 0xd6, + 0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0x1b, 0xe4, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, + 0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xa5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x6a, + 0xcf, 0x55, 0x43, 0xed, 0xf9, 0x42, 0x4a, 0xed, 0x69, 0xf7, 0xae, 0x7d, 0x3c, 0x7a, 0x50, 0xe1, + 0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa4, 0x0e, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0xeb, + 0x49, 0xf7, 0xf9, 0x6f, 0x2d, 0x38, 0x97, 0x39, 0x37, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, + 0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x94, 0x5f, 0xbf, 0x36, 0x94, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x0b, + 0xc6, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x5a, 0xe0, 0xaa, 0x28, 0xc2, 0xcf, 0xb1, 0x7b, 0x56, 0x52, + 0x7c, 0x78, 0x30, 0x3f, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x06, 0x4a, 0x91, 0x38, + 0x37, 0xc5, 0xdc, 0xbf, 0x38, 0xe0, 0xe0, 0x38, 0x9b, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x9a, 0x0a, + 0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0x2f, 0x00, 0xec, 0xa9, 0xcb, 0x40, 0x5a, + 0xff, 0xa0, 0x5d, 0x13, 0x34, 0x2c, 0xf4, 0x4d, 0x30, 0x1d, 0xf1, 0x38, 0x80, 0xcb, 0x4d, 0x27, + 0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xd9, + 0x2a, 0x0b, 0x5a, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x64, 0xf7, 0x74, 0x7a, 0xf8, 0xd9, + 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, 0xe8, 0xf2, 0x11, 0x7a, 0x88, 0xd1, 0x7c, 0xe6, 0x49, 0xb9, + 0x8a, 0x9b, 0x69, 0x95, 0xcb, 0x7c, 0x53, 0x2b, 0x8a, 0x08, 0xd6, 0x08, 0xda, 0x3f, 0x30, 0x04, + 0x8f, 0xf7, 0xe0, 0x91, 0x68, 0xd1, 0x7c, 0x87, 0x7d, 0x26, 0x7d, 0xb9, 0x9e, 0xcb, 0xac, 0x6c, + 0xdc, 0xb6, 0x53, 0x4b, 0xb1, 0xf0, 0xae, 0x97, 0xe2, 0xf7, 0x5a, 0x9a, 0xda, 0x83, 0xdb, 0x6a, + 0x7e, 0xe2, 0x88, 0xbc, 0xff, 0x18, 0xf5, 0x20, 0x5b, 0x19, 0xca, 0x84, 0x17, 0x06, 0xee, 0xce, + 0xc0, 0xda, 0x85, 0x93, 0xd5, 0x12, 0x7f, 0xc1, 0x82, 0x27, 0x32, 0xfb, 0x6b, 0x58, 0xe4, 0x5c, + 0x85, 0x72, 0x83, 0x16, 0x6a, 0xae, 0x88, 0x89, 0x8f, 0xb6, 0x04, 0xe0, 0x04, 0xc7, 0x30, 0xbc, + 0x29, 0xf4, 0x35, 0xbc, 0xf9, 0x65, 0x0b, 0xba, 0xf6, 0xc7, 0x09, 0x30, 0xea, 0xaa, 0xc9, 0xa8, + 0x3f, 0x38, 0xc8, 0x5c, 0xe6, 0xf0, 0xe8, 0x3f, 0x9a, 0x82, 0xb3, 0x39, 0xae, 0x38, 0x7b, 0x30, + 0xb3, 0xdd, 0x20, 0xa6, 0x93, 0xa7, 0xf8, 0x98, 0x4c, 0x7f, 0xd8, 0x9e, 0x1e, 0xa1, 0x2c, 0x63, + 0xe6, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xc1, 0x82, 0xd3, 0xce, 0xbd, 0xa8, 0x2b, 0xc5, + 0xbe, 0x58, 0x33, 0x2f, 0x65, 0x2a, 0x41, 0xfa, 0xa4, 0xe4, 0xe7, 0x29, 0x44, 0xb3, 0xb0, 0x70, + 0x66, 0x5b, 0x08, 0x8b, 0xb8, 0xf2, 0x54, 0x9c, 0xef, 0xe1, 0x86, 0x9c, 0xe5, 0x33, 0xc5, 0x4f, + 0x10, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x1c, 0x94, 0xb7, 0xa5, 0x23, 0x63, 0xc6, 0x09, 0x95, 0x0c, + 0x64, 0x6f, 0xf7, 0x4e, 0xfe, 0x92, 0xa9, 0x90, 0x70, 0x42, 0x14, 0xbd, 0x0e, 0x45, 0x7f, 0x2b, + 0xea, 0x95, 0x85, 0x33, 0x65, 0xb2, 0xc6, 0x9d, 0xfd, 0xd7, 0x57, 0xeb, 0x98, 0x56, 0x44, 0xd7, + 0xa1, 0x18, 0x6e, 0xba, 0x42, 0x83, 0x97, 0xc9, 0xc3, 0xf1, 0x52, 0x25, 0xa7, 0x57, 0x8c, 0x12, + 0x5e, 0xaa, 0x60, 0x4a, 0x02, 0xd5, 0x60, 0x98, 0xf9, 0xaf, 0x88, 0xf3, 0x20, 0x53, 0xf2, 0xed, + 0xe1, 0x07, 0xc6, 0x23, 0x02, 0x30, 0x04, 0xcc, 0x09, 0xa1, 0x0d, 0x18, 0x69, 0xb0, 0x8c, 0x8d, + 0x22, 0x1e, 0xd9, 0x47, 0x32, 0x75, 0x75, 0x3d, 0x52, 0x59, 0x0a, 0xd5, 0x15, 0xc3, 0xc0, 0x82, + 0x16, 0xa3, 0x4a, 0xda, 0x3b, 0x5b, 0x91, 0xc8, 0x30, 0x9c, 0x4d, 0xb5, 0x47, 0x86, 0x56, 0x41, + 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x0c, 0x0a, 0x5b, 0x0d, 0xe1, 0x9b, 0x92, 0xa9, 0xb4, 0x33, + 0xe3, 0x35, 0x2c, 0x8d, 0x3c, 0x38, 0x98, 0x2f, 0xac, 0x2e, 0xe3, 0xc2, 0x56, 0x03, 0xad, 0xc3, + 0xe8, 0x16, 0xf7, 0xf0, 0x16, 0x7a, 0xb9, 0xa7, 0xb2, 0x9d, 0xcf, 0xbb, 0x9c, 0xc0, 0xb9, 0x5b, + 0x86, 0x00, 0x60, 0x49, 0x84, 0x85, 0x69, 0x57, 0x9e, 0xea, 0x22, 0x74, 0xd7, 0xc2, 0xd1, 0xa2, + 0x0b, 0xf0, 0xf3, 0x39, 0xf1, 0x77, 0xc7, 0x1a, 0x45, 0xba, 0xaa, 0x1d, 0x99, 0xe6, 0x5d, 0x84, + 0x62, 0xc9, 0x5c, 0xd5, 0x7d, 0x32, 0xe0, 0xf3, 0x55, 0xad, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc2, + 0xc4, 0x5e, 0xd4, 0xde, 0x21, 0x72, 0x4b, 0xb3, 0xc8, 0x2c, 0x39, 0x47, 0xd8, 0x1d, 0x81, 0xe8, + 0x85, 0x71, 0xc7, 0x69, 0x76, 0x71, 0x21, 0xf6, 0xfc, 0x7d, 0x47, 0x27, 0x86, 0x4d, 0xda, 0x74, + 0xf8, 0xdf, 0xee, 0x04, 0x9b, 0xfb, 0x31, 0x11, 0x11, 0xb7, 0x32, 0x87, 0xff, 0x4d, 0x8e, 0xd2, + 0x3d, 0xfc, 0x02, 0x80, 0x25, 0x11, 0x74, 0x47, 0x0c, 0x0f, 0xe3, 0x9e, 0xd3, 0xf9, 0x61, 0x31, + 0x17, 0x25, 0x52, 0xce, 0xa0, 0x30, 0x6e, 0x99, 0x90, 0x62, 0x5c, 0xb2, 0xbd, 0x13, 0xc4, 0x81, + 0x9f, 0xe2, 0xd0, 0x33, 0xf9, 0x5c, 0xb2, 0x96, 0x81, 0xdf, 0xcd, 0x25, 0xb3, 0xb0, 0x70, 0x66, + 0x5b, 0xc8, 0x85, 0xc9, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xae, 0x2f, 0xd4, 0x43, 0xaf, 0x60, + 0x60, 0x8a, 0x16, 0x59, 0x30, 0x3b, 0x13, 0x82, 0x53, 0x34, 0xd1, 0x27, 0x61, 0x34, 0x6a, 0x38, + 0x4d, 0x52, 0xbd, 0x35, 0x7b, 0x2a, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xba, + 0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x66, 0x69, 0xb8, 0x58, 0x78, 0xb8, 0x9c, 0xe8, 0x9e, + 0x5d, 0x06, 0xc4, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0xc4, 0xeb, 0x20, 0x9a, + 0x3d, 0x93, 0xbf, 0x07, 0x84, 0x54, 0x7e, 0xab, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x9c, 0x10, 0xa5, + 0x9c, 0x99, 0x72, 0xd3, 0xb3, 0x3d, 0x2c, 0x5f, 0x72, 0x79, 0x29, 0xe3, 0xcc, 0x94, 0x93, 0x52, + 0x12, 0xf6, 0xef, 0x8e, 0x76, 0xcb, 0x2c, 0xec, 0x42, 0xf6, 0x1d, 0x56, 0xd7, 0x5b, 0xdd, 0x47, + 0x07, 0xd5, 0x0f, 0x1d, 0xa3, 0xb4, 0xfa, 0x05, 0x0b, 0xce, 0xb6, 0x33, 0x3f, 0x44, 0x08, 0x00, + 0x83, 0xa9, 0x99, 0xf8, 0xa7, 0xab, 0x50, 0x82, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0xfa, 0x46, 0x50, + 0x7c, 0xd7, 0x37, 0x82, 0x35, 0x28, 0x31, 0x21, 0xb3, 0x4f, 0x06, 0xe3, 0xf4, 0xc5, 0x88, 0x89, + 0x12, 0xcb, 0xa2, 0x22, 0x56, 0x24, 0xd0, 0xf7, 0x59, 0x70, 0x21, 0xdd, 0x75, 0x4c, 0x18, 0x58, + 0xc4, 0x1f, 0xe4, 0x77, 0xc1, 0x55, 0xf1, 0xfd, 0x17, 0x6a, 0xbd, 0x90, 0x0f, 0xfb, 0x21, 0xe0, + 0xde, 0x8d, 0xa1, 0x4a, 0xc6, 0x65, 0x74, 0xc4, 0x54, 0xc0, 0x0f, 0x70, 0x21, 0x7d, 0x09, 0xc6, + 0x5b, 0x41, 0xc7, 0x8f, 0x85, 0xa1, 0x8c, 0x78, 0xb4, 0x67, 0x8f, 0xd5, 0x6b, 0x5a, 0x39, 0x36, + 0xb0, 0x52, 0xd7, 0xd8, 0xd2, 0x43, 0x5f, 0x63, 0xdf, 0x82, 0x71, 0x5f, 0xb3, 0xec, 0x14, 0xf2, + 0xc0, 0xe5, 0xfc, 0xd8, 0xa1, 0xba, 0x1d, 0x28, 0xef, 0xa5, 0x5e, 0x82, 0x0d, 0x6a, 0x27, 0x7b, + 0x37, 0xfa, 0x49, 0x2b, 0x43, 0xa8, 0xe7, 0xb7, 0xe5, 0x8f, 0x9b, 0xb7, 0xe5, 0xcb, 0xe9, 0xdb, + 0x72, 0x97, 0xf2, 0xd5, 0xb8, 0x28, 0x0f, 0x9e, 0x1a, 0x65, 0xd0, 0x30, 0x81, 0x76, 0x13, 0x2e, + 0xf5, 0x3b, 0x96, 0x98, 0xc5, 0x94, 0xab, 0x9e, 0xda, 0x12, 0x8b, 0x29, 0xb7, 0x5a, 0xc1, 0x0c, + 0x32, 0x68, 0x1c, 0x19, 0xfb, 0x7f, 0x58, 0x50, 0xac, 0x05, 0xee, 0x09, 0x28, 0x93, 0x3f, 0x61, + 0x28, 0x93, 0x1f, 0xcf, 0x3e, 0x10, 0xdd, 0x5c, 0xd5, 0xf1, 0x4a, 0x4a, 0x75, 0x7c, 0x21, 0x8f, + 0x40, 0x6f, 0x45, 0xf1, 0x8f, 0x15, 0x61, 0xac, 0x16, 0xb8, 0xca, 0x5c, 0xf9, 0xd7, 0x1e, 0xc6, + 0x5c, 0x39, 0x37, 0xc0, 0xbf, 0x46, 0x99, 0x19, 0x5a, 0x49, 0x1f, 0xcb, 0xbf, 0x60, 0x56, 0xcb, + 0x77, 0x89, 0xb7, 0xbd, 0x13, 0x13, 0x37, 0xfd, 0x39, 0x27, 0x67, 0xb5, 0xfc, 0xdf, 0x2d, 0x98, + 0x4a, 0xb5, 0x8e, 0x9a, 0x30, 0xd1, 0xd4, 0x15, 0x93, 0x62, 0x9d, 0x3e, 0x94, 0x4e, 0x53, 0x58, + 0x7d, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0x16, 0x00, 0xd4, 0x4b, 0x9d, 0xd4, 0x80, 0x31, 0xa9, 0x5f, + 0x3d, 0xe5, 0x45, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x58, 0x1c, 0xb4, 0x83, 0x66, 0xb0, 0xbd, 0x7f, + 0x83, 0xc8, 0xc8, 0x45, 0xca, 0x96, 0x6b, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0x89, 0x22, 0xff, + 0x50, 0x3f, 0xf6, 0xde, 0x5f, 0x93, 0xef, 0xed, 0x35, 0xf9, 0x15, 0x0b, 0xa6, 0x69, 0xeb, 0xcc, + 0x5c, 0x44, 0x1e, 0xb6, 0x2a, 0x66, 0xb0, 0xd5, 0x23, 0x66, 0xf0, 0x65, 0xca, 0xbb, 0xdc, 0xa0, + 0x13, 0x0b, 0x0d, 0x9a, 0xc6, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0xc2, 0x50, 0xb8, 0xb8, + 0xe9, 0x78, 0x24, 0x0c, 0xb1, 0x80, 0xca, 0x90, 0xc2, 0x43, 0xd9, 0x21, 0x85, 0x79, 0x1c, 0x46, + 0x61, 0x58, 0x20, 0xc4, 0x1e, 0x2d, 0x0e, 0xa3, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x29, 0xc2, + 0x78, 0x2d, 0x70, 0x93, 0xb7, 0xb2, 0x97, 0x8c, 0xb7, 0xb2, 0x4b, 0xa9, 0xb7, 0xb2, 0x69, 0x1d, + 0xf7, 0xfd, 0x97, 0xb1, 0xaf, 0xd5, 0xcb, 0xd8, 0xbf, 0xb0, 0xd8, 0xac, 0x55, 0xd6, 0xeb, 0xdc, + 0xfa, 0x08, 0x3d, 0x0f, 0x63, 0x8c, 0x21, 0x31, 0x9f, 0x4a, 0xf9, 0x80, 0xc4, 0x52, 0xe5, 0xac, + 0x27, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa0, 0x14, 0x11, 0x27, 0x6c, 0xec, 0x28, 0x1e, 0x27, 0x5e, + 0x7b, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x66, 0x12, 0x02, 0xb0, 0x98, 0xef, 0xa3, 0xa5, 0xf7, 0x87, + 0x6f, 0x91, 0xfc, 0xb8, 0x7f, 0xf6, 0x5d, 0x40, 0xdd, 0xf8, 0x03, 0xc4, 0xbe, 0x9a, 0x37, 0x63, + 0x5f, 0x95, 0xbb, 0xe2, 0x5e, 0xfd, 0x99, 0x05, 0x93, 0xb5, 0xc0, 0xa5, 0x5b, 0xf7, 0xeb, 0x69, + 0x9f, 0xea, 0xf1, 0x4f, 0x47, 0x7a, 0xc4, 0x3f, 0x7d, 0x12, 0x86, 0x6b, 0x81, 0x5b, 0xad, 0xf5, + 0xf2, 0x6d, 0xb6, 0xff, 0x9e, 0x05, 0xa3, 0xb5, 0xc0, 0x3d, 0x01, 0xe5, 0xfc, 0xc7, 0x4d, 0xe5, + 0xfc, 0x63, 0x39, 0xeb, 0x26, 0x47, 0x1f, 0xff, 0x77, 0x86, 0x60, 0x82, 0xf6, 0x33, 0xd8, 0x96, + 0x53, 0x69, 0x0c, 0x9b, 0x35, 0xc0, 0xb0, 0x51, 0x59, 0x38, 0x68, 0x36, 0x83, 0x7b, 0xe9, 0x69, + 0x5d, 0x65, 0xa5, 0x58, 0x40, 0xd1, 0xb3, 0x50, 0x6a, 0x87, 0x64, 0xcf, 0x0b, 0x84, 0x90, 0xa9, + 0x3d, 0x75, 0xd4, 0x44, 0x39, 0x56, 0x18, 0xf4, 0x72, 0x16, 0x79, 0x7e, 0x83, 0xd4, 0x49, 0x23, + 0xf0, 0x5d, 0xae, 0xbf, 0x2e, 0x8a, 0xb4, 0x01, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x5d, 0x28, 0xb3, + 0xff, 0x8c, 0xed, 0x1c, 0x3d, 0x01, 0xa5, 0x48, 0x48, 0x26, 0x08, 0xe0, 0x84, 0x16, 0x7a, 0x01, + 0x20, 0x96, 0x11, 0xb2, 0x23, 0x11, 0xe7, 0x48, 0x09, 0xe4, 0x2a, 0x76, 0x76, 0x84, 0x35, 0x2c, + 0xf4, 0x0c, 0x94, 0x63, 0xc7, 0x6b, 0xde, 0xf4, 0x7c, 0x12, 0x31, 0xbd, 0x74, 0x51, 0xe6, 0x05, + 0x13, 0x85, 0x38, 0x81, 0x53, 0x81, 0x88, 0x05, 0x01, 0xe0, 0xe9, 0x6b, 0x4b, 0x0c, 0x9b, 0x09, + 0x44, 0x37, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x0e, 0x9c, 0xf7, 0x7c, 0x16, 0x62, 0x9f, 0xd4, 0x77, + 0xbd, 0xf6, 0xc6, 0xcd, 0xfa, 0x1d, 0x12, 0x7a, 0x5b, 0xfb, 0x4b, 0x4e, 0x63, 0x97, 0xf8, 0x32, + 0xb5, 0xe0, 0x07, 0x45, 0x17, 0xcf, 0x57, 0x7b, 0xe0, 0xe2, 0x9e, 0x94, 0xec, 0x57, 0xe1, 0x4c, + 0x2d, 0x70, 0x6b, 0x41, 0x18, 0xaf, 0x06, 0xe1, 0x3d, 0x27, 0x74, 0xe5, 0x4a, 0x99, 0x97, 0x59, + 0x48, 0x28, 0x2b, 0x1c, 0xe6, 0x8c, 0xc2, 0xc8, 0x85, 0xf5, 0x22, 0x13, 0xbe, 0x8e, 0xe8, 0x8c, + 0xd2, 0x60, 0x62, 0x80, 0xca, 0x37, 0x71, 0xcd, 0x89, 0x09, 0xba, 0xc5, 0xf2, 0xe8, 0x26, 0x27, + 0xa2, 0xa8, 0xfe, 0xb4, 0x96, 0x47, 0x37, 0x01, 0x66, 0x1e, 0xa1, 0x66, 0x7d, 0xfb, 0xaf, 0x0d, + 0x33, 0xe6, 0x98, 0xca, 0x59, 0x80, 0x3e, 0x0b, 0x93, 0x11, 0xb9, 0xe9, 0xf9, 0x9d, 0xfb, 0x52, + 0x27, 0xd0, 0xc3, 0x9d, 0xa8, 0xbe, 0xa2, 0x63, 0x72, 0xcd, 0xa2, 0x59, 0x86, 0x53, 0xd4, 0x50, + 0x0b, 0x26, 0xef, 0x79, 0xbe, 0x1b, 0xdc, 0x8b, 0x24, 0xfd, 0x52, 0xbe, 0x82, 0xf1, 0x2e, 0xc7, + 0x4c, 0xf5, 0xd1, 0x68, 0xee, 0xae, 0x41, 0x0c, 0xa7, 0x88, 0xd3, 0x05, 0x18, 0x76, 0xfc, 0xc5, + 0xe8, 0x76, 0x44, 0x42, 0x91, 0x11, 0x99, 0x2d, 0x40, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x0b, 0x90, + 0xfd, 0xb9, 0x16, 0x06, 0x1d, 0x1e, 0xc7, 0x5e, 0x2c, 0x40, 0xac, 0x4a, 0xb1, 0x86, 0x41, 0x37, + 0x28, 0xfb, 0xb7, 0x1e, 0xf8, 0x38, 0x08, 0x62, 0xb9, 0xa5, 0x59, 0x0e, 0x4e, 0xad, 0x1c, 0x1b, + 0x58, 0x68, 0x15, 0x50, 0xd4, 0x69, 0xb7, 0x9b, 0xcc, 0x4e, 0xc1, 0x69, 0x32, 0x52, 0xfc, 0x8d, + 0xb8, 0xc8, 0xa3, 0x74, 0xd6, 0xbb, 0xa0, 0x38, 0xa3, 0x06, 0xe5, 0xd5, 0x5b, 0xa2, 0xab, 0xc3, + 0xac, 0xab, 0xfc, 0x31, 0xa2, 0xce, 0xfb, 0x29, 0x61, 0x68, 0x05, 0x46, 0xa3, 0xfd, 0xa8, 0x11, + 0x8b, 0x70, 0x63, 0x39, 0x69, 0x69, 0xea, 0x0c, 0x45, 0xcb, 0x8a, 0xc6, 0xab, 0x60, 0x59, 0x17, + 0x35, 0xe0, 0x94, 0xa0, 0xb8, 0xbc, 0xe3, 0xf8, 0x2a, 0xc9, 0x07, 0x37, 0xd7, 0x7c, 0xfe, 0xc1, + 0xc1, 0xfc, 0x29, 0xd1, 0xb2, 0x0e, 0x3e, 0x3c, 0x98, 0x3f, 0x5b, 0x0b, 0xdc, 0x0c, 0x08, 0xce, + 0xa2, 0x66, 0x7f, 0x2b, 0x93, 0x37, 0x58, 0x92, 0xde, 0xb8, 0x13, 0x12, 0xd4, 0x82, 0x89, 0x36, + 0x5b, 0xc6, 0x22, 0xfa, 0xbb, 0x58, 0x8b, 0x2f, 0x0d, 0xa8, 0x38, 0xb8, 0x47, 0xd9, 0xb4, 0x52, + 0xec, 0xb1, 0x1b, 0x59, 0x4d, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x57, 0xce, 0xb2, 0x13, 0xab, 0xce, + 0xb5, 0x01, 0xa3, 0xc2, 0x7a, 0x5b, 0x5c, 0x7d, 0xe6, 0xf2, 0xd5, 0x52, 0xc9, 0xb0, 0x09, 0x0b, + 0x70, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x24, 0xbd, 0x49, 0x68, 0xd9, 0x2f, 0x4e, 0xe7, 0x7b, 0xd9, + 0x27, 0x49, 0x2f, 0xb4, 0xcc, 0x10, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0xde, 0x64, 0x76, 0x06, 0x66, + 0x62, 0x8d, 0x3e, 0xa4, 0x75, 0x93, 0x02, 0x49, 0x56, 0x23, 0x92, 0x97, 0xb4, 0xc3, 0x7e, 0xb4, + 0x49, 0x3b, 0xd0, 0x4d, 0x98, 0x10, 0x99, 0x6a, 0xc5, 0xca, 0x2a, 0x1a, 0xda, 0xb2, 0x09, 0xac, + 0x03, 0x0f, 0xd3, 0x05, 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0x82, 0x96, 0x39, 0xe6, 0x5a, 0xe8, 0xb0, + 0x27, 0x6f, 0x8f, 0xb1, 0x3b, 0xed, 0x2c, 0x7d, 0xe2, 0xc1, 0xc1, 0xfc, 0x85, 0x8d, 0x5e, 0x88, + 0xb8, 0x37, 0x1d, 0x74, 0x0b, 0xce, 0x70, 0x1f, 0xd1, 0x0a, 0x71, 0xdc, 0xa6, 0xe7, 0xab, 0xc3, + 0x9a, 0x6f, 0xc9, 0x73, 0x0f, 0x0e, 0xe6, 0xcf, 0x2c, 0x66, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xe3, + 0x50, 0x76, 0xfd, 0x48, 0x8c, 0xc1, 0x88, 0x91, 0x9c, 0xa7, 0x5c, 0x59, 0xaf, 0xab, 0xef, 0x4f, + 0xfe, 0xe0, 0xa4, 0x02, 0xda, 0xe6, 0x1a, 0x55, 0xa5, 0xc0, 0x18, 0xed, 0x8a, 0x6e, 0x93, 0x56, + 0x85, 0x19, 0x5e, 0x62, 0xfc, 0x29, 0x41, 0x19, 0x4f, 0x1b, 0x0e, 0x64, 0x06, 0x61, 0xf4, 0x06, + 0x20, 0x2a, 0xe1, 0x7b, 0x0d, 0xb2, 0xd8, 0x60, 0xa9, 0x05, 0x98, 0x02, 0xba, 0x64, 0xfa, 0x2d, + 0xd5, 0xbb, 0x30, 0x70, 0x46, 0x2d, 0x74, 0x9d, 0x1e, 0x39, 0x7a, 0xa9, 0xe0, 0x2a, 0x2a, 0x95, + 0x5a, 0x85, 0xb4, 0x43, 0xd2, 0x70, 0x62, 0xe2, 0x9a, 0x14, 0x71, 0xaa, 0x1e, 0x72, 0xe1, 0xbc, + 0xd3, 0x89, 0x03, 0xa6, 0xac, 0x36, 0x51, 0x37, 0x82, 0x5d, 0xe2, 0xb3, 0x77, 0xa2, 0xd2, 0xd2, + 0x25, 0x2a, 0x0d, 0x2c, 0xf6, 0xc0, 0xc3, 0x3d, 0xa9, 0x50, 0x29, 0x4e, 0xe5, 0x4e, 0x05, 0x33, + 0x68, 0x4f, 0x46, 0xfe, 0xd4, 0x97, 0x61, 0x6c, 0x27, 0x88, 0xe2, 0x75, 0x12, 0xdf, 0x0b, 0xc2, + 0x5d, 0x11, 0x7a, 0x31, 0x09, 0xd7, 0x9b, 0x80, 0xb0, 0x8e, 0x47, 0xaf, 0x69, 0xcc, 0x8a, 0xa1, + 0x5a, 0x61, 0x0f, 0xc8, 0xa5, 0x84, 0xc7, 0x5c, 0xe7, 0xc5, 0x58, 0xc2, 0x25, 0x6a, 0xb5, 0xb6, + 0xcc, 0x1e, 0x83, 0x53, 0xa8, 0xd5, 0xda, 0x32, 0x96, 0x70, 0xba, 0x5c, 0xa3, 0x1d, 0x27, 0x24, + 0xb5, 0x30, 0x68, 0x90, 0x48, 0x0b, 0x12, 0xfd, 0x38, 0x0f, 0x2c, 0x49, 0x97, 0x6b, 0x3d, 0x0b, + 0x01, 0x67, 0xd7, 0x43, 0xa4, 0x3b, 0x6b, 0xd2, 0x64, 0xbe, 0x16, 0xbf, 0x5b, 0xde, 0x18, 0x30, + 0x71, 0x92, 0x0f, 0xd3, 0x2a, 0x5f, 0x13, 0x0f, 0x25, 0x19, 0xcd, 0x4e, 0xb1, 0xb5, 0x3d, 0x78, + 0x1c, 0x4a, 0xf5, 0x2e, 0x52, 0x4d, 0x51, 0xc2, 0x5d, 0xb4, 0x8d, 0xb8, 0x4c, 0xd3, 0x7d, 0x93, + 0xe9, 0x5e, 0x85, 0x72, 0xd4, 0xd9, 0x74, 0x83, 0x96, 0xe3, 0xf9, 0xec, 0x31, 0x58, 0xbb, 0x2f, + 0xd4, 0x25, 0x00, 0x27, 0x38, 0x68, 0x15, 0x4a, 0x8e, 0x7c, 0xf4, 0x40, 0xf9, 0xe1, 0x3c, 0xd4, + 0x53, 0x07, 0xf7, 0x70, 0x97, 0xcf, 0x1c, 0xaa, 0x2e, 0x7a, 0x0d, 0x26, 0x84, 0x8f, 0xa3, 0x48, + 0x15, 0x78, 0xca, 0x74, 0x44, 0xa9, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xdb, 0x30, 0x16, 0x07, 0x4d, + 0xe6, 0x4d, 0x41, 0xc5, 0xb0, 0xb3, 0xf9, 0x21, 0xc1, 0x36, 0x14, 0x9a, 0xae, 0x6f, 0x54, 0x55, + 0xb1, 0x4e, 0x07, 0x6d, 0xf0, 0xf5, 0xce, 0x82, 0x25, 0x93, 0x68, 0xf6, 0xb1, 0xfc, 0x33, 0x49, + 0xc5, 0x54, 0x36, 0xb7, 0x83, 0xa8, 0x89, 0x75, 0x32, 0xe8, 0x1a, 0xcc, 0xb4, 0x43, 0x2f, 0x60, + 0x6b, 0x42, 0xbd, 0x77, 0xcd, 0x9a, 0x29, 0x5e, 0x6a, 0x69, 0x04, 0xdc, 0x5d, 0x87, 0xb9, 0xa8, + 0x8a, 0xc2, 0xd9, 0x73, 0x3c, 0x9b, 0x30, 0xbf, 0x7e, 0xf1, 0x32, 0xac, 0xa0, 0x68, 0x8d, 0x71, + 0x62, 0xae, 0x39, 0x98, 0x9d, 0xcb, 0x8f, 0x20, 0xa2, 0x6b, 0x18, 0xb8, 0x70, 0xa9, 0xfe, 0xe2, + 0x84, 0x02, 0x72, 0xb5, 0xb4, 0x73, 0x54, 0xa2, 0x8f, 0x66, 0xcf, 0xf7, 0x30, 0x25, 0x4b, 0x89, + 0xff, 0x89, 0x40, 0x60, 0x14, 0x47, 0x38, 0x45, 0x13, 0x7d, 0x13, 0x4c, 0x8b, 0x88, 0x65, 0xc9, + 0x30, 0x5d, 0x48, 0x6c, 0x54, 0x71, 0x0a, 0x86, 0xbb, 0xb0, 0x79, 0x10, 0x79, 0x67, 0xb3, 0x49, + 0x04, 0xeb, 0xbb, 0xe9, 0xf9, 0xbb, 0xd1, 0xec, 0x45, 0xc6, 0x1f, 0x44, 0x10, 0xf9, 0x34, 0x14, + 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xdd, 0x0e, 0x09, 0x69, 0x31, 0x41, 0x5c, 0x9c, 0x67, 0xf3, 0xdc, + 0x43, 0x9b, 0xf6, 0xa4, 0x96, 0x82, 0x1d, 0x66, 0x94, 0xe1, 0x2e, 0x0a, 0xe8, 0x1e, 0x94, 0x82, + 0x3d, 0x12, 0xee, 0x10, 0xc7, 0x9d, 0xbd, 0xd4, 0xc3, 0x66, 0x5a, 0x1c, 0x6e, 0xb7, 0x04, 0x6e, + 0xea, 0x8d, 0x5c, 0x16, 0xf7, 0x7f, 0x23, 0x97, 0x8d, 0xa1, 0xef, 0xb7, 0xe0, 0x9c, 0x54, 0xab, + 0xd7, 0xdb, 0x74, 0xd4, 0x97, 0x03, 0x3f, 0x8a, 0x43, 0xee, 0x53, 0xfc, 0x44, 0xbe, 0x9f, 0xed, + 0x46, 0x4e, 0x25, 0xa5, 0xbc, 0x3c, 0x97, 0x87, 0x11, 0xe1, 0xfc, 0x16, 0xe7, 0xbe, 0x11, 0x66, + 0xba, 0x4e, 0xee, 0xa3, 0xe4, 0xb5, 0x98, 0xdb, 0x85, 0x09, 0x63, 0x74, 0x1e, 0xe9, 0xf3, 0xe8, + 0xbf, 0x19, 0x85, 0xb2, 0x7a, 0x3a, 0x43, 0x57, 0xcd, 0x17, 0xd1, 0x73, 0xe9, 0x17, 0xd1, 0x12, + 0xbd, 0x32, 0xeb, 0x8f, 0xa0, 0x1b, 0x19, 0x11, 0x9c, 0xf2, 0xf6, 0xe2, 0xe0, 0xae, 0xb9, 0x9a, + 0x26, 0xb4, 0x38, 0xf0, 0xd3, 0xea, 0x50, 0x4f, 0xe5, 0xea, 0x35, 0x98, 0xf1, 0x03, 0x26, 0x2e, + 0x12, 0x57, 0xca, 0x02, 0xec, 0xc8, 0x2f, 0xeb, 0x21, 0x11, 0x52, 0x08, 0xb8, 0xbb, 0x0e, 0x6d, + 0x90, 0x9f, 0xd9, 0x69, 0x6d, 0x2e, 0x3f, 0xd2, 0xb1, 0x80, 0xa2, 0x27, 0x61, 0xb8, 0x1d, 0xb8, + 0xd5, 0x9a, 0x10, 0x15, 0xb5, 0x1c, 0xa7, 0x6e, 0xb5, 0x86, 0x39, 0x0c, 0x2d, 0xc2, 0x08, 0xfb, + 0x11, 0xcd, 0x8e, 0xe7, 0xfb, 0xbe, 0xb3, 0x1a, 0x5a, 0xd6, 0x10, 0x56, 0x01, 0x8b, 0x8a, 0x4c, + 0xab, 0x44, 0xe5, 0x6b, 0xa6, 0x55, 0x1a, 0x7d, 0x48, 0xad, 0x92, 0x24, 0x80, 0x13, 0x5a, 0xe8, + 0x3e, 0x9c, 0x31, 0xee, 0x34, 0x7c, 0x89, 0x90, 0x48, 0xf8, 0xdf, 0x3e, 0xd9, 0xf3, 0x32, 0x23, + 0x9e, 0x62, 0x2f, 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, + 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, 0xee, 0x16, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf4, + 0x76, 0x10, 0x31, 0x36, 0x2b, 0xc4, 0x5b, 0xe9, 0xbc, 0x59, 0x7a, 0xf3, 0x56, 0x9d, 0x95, 0x1f, + 0x1e, 0xcc, 0x8f, 0xd5, 0x02, 0x57, 0xfe, 0xc5, 0xaa, 0x02, 0xfa, 0x6e, 0x0b, 0xe6, 0xba, 0x2f, + 0x4d, 0xaa, 0xd3, 0x13, 0x83, 0x77, 0xda, 0x16, 0x8d, 0xce, 0xad, 0xe4, 0x92, 0xc3, 0x3d, 0x9a, + 0xb2, 0x7f, 0x91, 0x3f, 0x9b, 0x8a, 0xc7, 0x15, 0x12, 0x75, 0x9a, 0x27, 0x91, 0x65, 0x71, 0xc5, + 0x78, 0xf7, 0x79, 0xe8, 0xa7, 0xf9, 0x5f, 0xb5, 0xd8, 0xd3, 0xfc, 0x06, 0x69, 0xb5, 0x9b, 0x4e, + 0x7c, 0x12, 0xbe, 0x7f, 0x6f, 0x42, 0x29, 0x16, 0xad, 0xf5, 0x4a, 0x0c, 0xa9, 0x75, 0x8a, 0x99, + 0x27, 0x28, 0x61, 0x53, 0x96, 0x62, 0x45, 0xc6, 0xfe, 0xa7, 0x7c, 0x06, 0x24, 0xe4, 0x04, 0xd4, + 0xeb, 0x15, 0x53, 0xbd, 0x3e, 0xdf, 0xe7, 0x0b, 0x72, 0xd4, 0xec, 0xff, 0xc4, 0xec, 0x37, 0x53, + 0xb2, 0xbc, 0xd7, 0x6d, 0x42, 0xec, 0x1f, 0xb4, 0xe0, 0x74, 0x96, 0x11, 0x25, 0xbd, 0x20, 0x70, + 0x15, 0x8f, 0xb2, 0x91, 0x51, 0x23, 0x78, 0x47, 0x94, 0x63, 0x85, 0x31, 0x70, 0xce, 0xa5, 0xa3, + 0xc5, 0x20, 0xbd, 0x05, 0x13, 0xb5, 0x90, 0x68, 0x07, 0xda, 0xeb, 0xdc, 0x99, 0x97, 0xf7, 0xe7, + 0xd9, 0x23, 0x3b, 0xf2, 0xda, 0x3f, 0x55, 0x80, 0xd3, 0xfc, 0x91, 0x7b, 0x71, 0x2f, 0xf0, 0xdc, + 0x5a, 0xe0, 0x8a, 0x7c, 0x59, 0x9f, 0x86, 0xf1, 0xb6, 0xa6, 0x97, 0xeb, 0x15, 0x4f, 0x4f, 0xd7, + 0xdf, 0x25, 0x9a, 0x04, 0xbd, 0x14, 0x1b, 0xb4, 0x90, 0x0b, 0xe3, 0x64, 0xcf, 0x6b, 0xa8, 0x97, + 0xd2, 0xc2, 0x91, 0x0f, 0x17, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x41, 0x0a, 0x55, + 0xfb, 0x87, 0x2c, 0x78, 0x2c, 0x27, 0xfa, 0x1e, 0x6d, 0xee, 0x1e, 0x33, 0x27, 0x10, 0xd9, 0x18, + 0x55, 0x73, 0xdc, 0xc8, 0x00, 0x0b, 0x28, 0xfa, 0x24, 0x00, 0x37, 0x12, 0xa0, 0x37, 0xd4, 0x7e, + 0x61, 0xca, 0x8c, 0x08, 0x4b, 0x5a, 0xb0, 0x1c, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x1f, 0x2f, 0xc2, + 0x30, 0xcf, 0x23, 0xbd, 0x0a, 0xa3, 0x3b, 0x3c, 0x8b, 0xc0, 0x20, 0x09, 0x0b, 0x12, 0xdd, 0x01, + 0x2f, 0xc0, 0xb2, 0x32, 0x5a, 0x83, 0x53, 0x3c, 0x0b, 0x43, 0xb3, 0x42, 0x9a, 0xce, 0xbe, 0x54, + 0x74, 0xf1, 0x0c, 0x86, 0x4a, 0xe1, 0x57, 0xed, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xeb, 0x30, 0x49, + 0x2f, 0x1e, 0x41, 0x27, 0x96, 0x94, 0x78, 0xfe, 0x05, 0x75, 0xd3, 0xd9, 0x30, 0xa0, 0x38, 0x85, + 0x4d, 0xef, 0xbe, 0xed, 0x2e, 0x95, 0xde, 0x70, 0x72, 0xf7, 0x35, 0xd5, 0x78, 0x26, 0x2e, 0xb3, + 0x9e, 0xec, 0x30, 0x5b, 0xd1, 0x8d, 0x9d, 0x90, 0x44, 0x3b, 0x41, 0xd3, 0x65, 0x82, 0xd6, 0xb0, + 0x66, 0x3d, 0x99, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x35, 0x3b, 0x21, 0x49, 0xa8, + 0x8c, 0x98, 0x54, 0x56, 0x53, 0x70, 0xdc, 0x55, 0x83, 0xae, 0xa3, 0x33, 0xb5, 0x30, 0xa0, 0xcc, + 0x4b, 0x86, 0x14, 0x51, 0x26, 0xb1, 0xa3, 0xd2, 0xfb, 0xb1, 0x47, 0xf0, 0x2d, 0x61, 0x34, 0xc8, + 0x29, 0x18, 0xef, 0xe1, 0x75, 0xe1, 0xf7, 0x28, 0xa9, 0xa0, 0xe7, 0x61, 0x4c, 0xc4, 0xd6, 0x67, + 0x96, 0x9b, 0x7c, 0xea, 0xd8, 0xfb, 0x7d, 0x25, 0x29, 0xc6, 0x3a, 0x8e, 0xfd, 0x3d, 0x05, 0x38, + 0x95, 0x61, 0x7a, 0xcf, 0x59, 0xd5, 0xb6, 0x17, 0xc5, 0x2a, 0x4b, 0x9b, 0xc6, 0xaa, 0x78, 0x39, + 0x56, 0x18, 0x74, 0x3f, 0x70, 0x66, 0x98, 0x66, 0x80, 0xc2, 0xb4, 0x55, 0x40, 0x8f, 0x98, 0xef, + 0xec, 0x12, 0x0c, 0x75, 0x22, 0x22, 0xc3, 0xe6, 0x29, 0xfe, 0xcd, 0x9e, 0x75, 0x18, 0x84, 0x8a, + 0xc7, 0xdb, 0xea, 0x85, 0x44, 0x13, 0x8f, 0xf9, 0x1b, 0x09, 0x87, 0xd1, 0xce, 0xc5, 0xc4, 0x77, + 0xfc, 0x58, 0x08, 0xd1, 0x49, 0xfc, 0x27, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x52, 0x11, 0xce, 0xe5, + 0x3a, 0xe3, 0xd0, 0xae, 0xb7, 0x02, 0xdf, 0x8b, 0x03, 0x65, 0x18, 0xc1, 0x63, 0x3e, 0x91, 0xf6, + 0xce, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x86, 0x61, 0xa6, 0x74, 0xea, 0xca, 0x57, 0xb7, 0x54, + 0xe1, 0x41, 0x40, 0x38, 0x78, 0xe0, 0x5c, 0xa0, 0x4f, 0xc2, 0x50, 0x3b, 0x08, 0x9a, 0x69, 0xa6, + 0x45, 0xbb, 0x1b, 0x04, 0x4d, 0xcc, 0x80, 0xe8, 0x43, 0x62, 0xbc, 0x52, 0x96, 0x00, 0xd8, 0x71, + 0x83, 0x48, 0x1b, 0xb4, 0xa7, 0x61, 0x74, 0x97, 0xec, 0x87, 0x9e, 0xbf, 0x9d, 0xb6, 0x10, 0xb9, + 0xc1, 0x8b, 0xb1, 0x84, 0x9b, 0xa9, 0x87, 0x46, 0x8f, 0x3b, 0x89, 0x67, 0xa9, 0xef, 0x11, 0xf8, + 0xbd, 0x45, 0x98, 0xc2, 0x4b, 0x95, 0xf7, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x1c, 0x77, 0x12, 0xcf, + 0xfe, 0xb3, 0xf1, 0x73, 0x16, 0x4c, 0xb1, 0x08, 0xff, 0x22, 0x5a, 0x90, 0x17, 0xf8, 0x27, 0x20, + 0xe2, 0x3d, 0x09, 0xc3, 0x21, 0x6d, 0x34, 0x9d, 0xa8, 0x8e, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x0f, + 0x43, 0xac, 0x0b, 0x74, 0xf2, 0xc6, 0x79, 0x8e, 0x9f, 0x8a, 0x13, 0x3b, 0x98, 0x95, 0xb2, 0x10, + 0x18, 0x98, 0xb4, 0x9b, 0x1e, 0xef, 0x74, 0xf2, 0x24, 0xf8, 0xde, 0x08, 0x81, 0x91, 0xd9, 0xb5, + 0x77, 0x17, 0x02, 0x23, 0x9b, 0x64, 0xef, 0xeb, 0xd3, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81, + 0x43, 0x60, 0xf4, 0xae, 0xfd, 0x28, 0x23, 0xc1, 0x17, 0x4f, 0xd0, 0xfe, 0x6e, 0x68, 0x50, 0x09, + 0x73, 0x78, 0x80, 0xc8, 0x14, 0x99, 0x43, 0xf6, 0x1e, 0x89, 0x4c, 0x91, 0xd9, 0xb7, 0x9c, 0xeb, + 0xdf, 0x9f, 0x17, 0x72, 0xbe, 0x85, 0x5d, 0x04, 0xaf, 0x50, 0x3e, 0xc3, 0x80, 0x91, 0x90, 0x98, + 0xc7, 0x39, 0x8f, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, 0x4c, 0xb5, 0x3c, 0x9f, 0x32, 0x9f, 0x7d, + 0x53, 0xf0, 0x53, 0x81, 0x83, 0xd6, 0x4c, 0x30, 0x4e, 0xe3, 0x23, 0x4f, 0x8b, 0x5a, 0x51, 0xc8, + 0x4f, 0xfd, 0x9c, 0xdb, 0xdb, 0x05, 0xf3, 0xb9, 0x54, 0x8d, 0x62, 0x46, 0x04, 0x8b, 0x35, 0xed, + 0xfe, 0x5f, 0x1c, 0xfc, 0xfe, 0x3f, 0x9e, 0x7d, 0xf7, 0x9f, 0x7b, 0x0d, 0x26, 0x1e, 0x5a, 0xe1, + 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf7, 0xd8, 0xf6, 0x9c, 0xd7, 0x1b, 0x73, 0xa0, 0xf1, 0xfa, 0xae, + 0x79, 0xa8, 0xc1, 0xe9, 0xad, 0x4e, 0xb3, 0xb9, 0xcf, 0x4c, 0xdc, 0x89, 0x2b, 0x31, 0x84, 0x4c, + 0x79, 0x5e, 0x66, 0x55, 0x5a, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xa9, 0x40, 0x4f, 0x4f, 0x92, 0x7d, + 0x45, 0x2a, 0x25, 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0x66, 0x9c, 0x3d, 0xc7, 0xe3, + 0xa1, 0x3f, 0x25, 0x01, 0x2e, 0xd1, 0x2b, 0x3d, 0xdd, 0x62, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x6f, + 0x00, 0x0a, 0x44, 0xea, 0xfa, 0x6b, 0xc4, 0x17, 0xaf, 0x5a, 0x6c, 0xee, 0x8a, 0x09, 0x4b, 0xb8, + 0xd5, 0x85, 0x81, 0x33, 0x6a, 0xa5, 0xa2, 0x40, 0x8c, 0xe4, 0x47, 0x81, 0xe8, 0xcd, 0x17, 0xfb, + 0x26, 0x21, 0xf8, 0x2f, 0x16, 0x3d, 0xbe, 0xb8, 0x90, 0x6f, 0x06, 0x33, 0x7b, 0x8d, 0x59, 0x8d, + 0x71, 0x1d, 0x9e, 0x16, 0x90, 0xe1, 0x8c, 0x66, 0x35, 0x96, 0x00, 0xb1, 0x89, 0xcb, 0x17, 0x44, + 0x94, 0xf8, 0x01, 0x1a, 0x22, 0xbe, 0x88, 0xb8, 0xa2, 0x30, 0xd0, 0xa7, 0x60, 0xd4, 0xf5, 0xf6, + 0xbc, 0x28, 0x08, 0xc5, 0x4a, 0x3f, 0xe2, 0x73, 0x41, 0xc2, 0x07, 0x2b, 0x9c, 0x0c, 0x96, 0xf4, + 0xec, 0xef, 0x2d, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, 0x10, 0x3b, 0x27, 0x70, 0x2c, 0x5f, 0x33, + 0x8e, 0xe5, 0x0f, 0xf5, 0x0a, 0x3b, 0xc3, 0xba, 0x94, 0x7b, 0x1c, 0xdf, 0x4a, 0x1d, 0xc7, 0x4f, + 0xf5, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x67, 0x16, 0xcc, 0x18, 0xf8, 0x27, 0x70, 0x1a, 0xac, 0x9a, + 0xa7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x14, 0xf8, 0xce, 0x62, 0xaa, 0xef, 0x8c, 0xfb, 0xbf, + 0x0d, 0x43, 0x3b, 0x4e, 0xe8, 0xf6, 0x0a, 0xb3, 0xdd, 0x55, 0x69, 0xe1, 0xba, 0x13, 0x8a, 0x67, + 0xbd, 0x67, 0x55, 0xe6, 0x65, 0x27, 0xec, 0xff, 0xa4, 0xc7, 0x9a, 0x42, 0xaf, 0xc2, 0x48, 0xd4, + 0x08, 0xda, 0xca, 0x28, 0xfd, 0x12, 0xcf, 0xca, 0x4c, 0x4b, 0x0e, 0x0f, 0xe6, 0x91, 0xd9, 0x1c, + 0x2d, 0xc6, 0x02, 0x1f, 0x7d, 0x1a, 0x26, 0xd8, 0x2f, 0x65, 0x63, 0x53, 0xcc, 0x4f, 0xc9, 0x53, + 0xd7, 0x11, 0xb9, 0x01, 0x9a, 0x51, 0x84, 0x4d, 0x52, 0x73, 0xdb, 0x50, 0x56, 0x9f, 0xf5, 0x48, + 0xdf, 0xe3, 0xfe, 0x43, 0x11, 0x4e, 0x65, 0xac, 0x39, 0x14, 0x19, 0x33, 0xf1, 0xfc, 0x80, 0x4b, + 0xf5, 0x5d, 0xce, 0x45, 0xc4, 0x6e, 0x43, 0xae, 0x58, 0x5b, 0x03, 0x37, 0x7a, 0x3b, 0x22, 0xe9, + 0x46, 0x69, 0x51, 0xff, 0x46, 0x69, 0x63, 0x27, 0x36, 0xd4, 0xb4, 0x21, 0xd5, 0xd3, 0x47, 0x3a, + 0xa7, 0x7f, 0x52, 0x84, 0xd3, 0x59, 0x91, 0xb0, 0xd0, 0xb7, 0xa4, 0xd2, 0xb3, 0xbd, 0x34, 0x68, + 0x0c, 0x2d, 0x9e, 0xb3, 0x8d, 0xeb, 0x80, 0x97, 0x16, 0xcc, 0x84, 0x6d, 0x7d, 0x87, 0x59, 0xb4, + 0xc9, 0x7c, 0xdc, 0x43, 0x9e, 0x56, 0x4f, 0xb2, 0x8f, 0x8f, 0x0e, 0xdc, 0x01, 0x91, 0x8f, 0x2f, + 0x4a, 0xbd, 0xdf, 0xcb, 0xe2, 0xfe, 0xef, 0xf7, 0xb2, 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, + 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x2b, 0xad, 0xdf, 0x8f, 0x74, 0xd6, 0x7f, 0xc8, 0x82, 0x94, 0xc9, + 0xb5, 0x52, 0x8b, 0x59, 0xb9, 0x6a, 0xb1, 0x4b, 0x30, 0x14, 0x06, 0x4d, 0x92, 0xce, 0x86, 0x86, + 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd9, 0x31, 0xae, 0x5f, 0xe4, 0xc4, 0x15, 0xed, + 0x49, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0x27, 0xad, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0x3f, + 0x37, 0x04, 0x17, 0x7a, 0x46, 0x89, 0xa0, 0xd7, 0xa1, 0x6d, 0x27, 0x26, 0xf7, 0x9c, 0xfd, 0x74, + 0x74, 0xf9, 0x6b, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0x8a, 0xe1, 0x41, 0x62, 0x53, 0x4a, 0x44, 0x11, + 0x1b, 0x56, 0x40, 0x4d, 0xa5, 0x54, 0xf1, 0x38, 0x94, 0x52, 0x2f, 0x00, 0x44, 0x51, 0x93, 0x1b, + 0xbe, 0xb8, 0xc2, 0xdb, 0x26, 0x09, 0x26, 0x5c, 0xbf, 0x29, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, + 0x6e, 0x87, 0x41, 0xcc, 0x75, 0xb2, 0x15, 0x6e, 0x1b, 0x36, 0x6c, 0x3a, 0xe8, 0xd7, 0x52, 0x70, + 0xdc, 0x55, 0x03, 0xbd, 0x0c, 0x63, 0xc2, 0x69, 0xbf, 0x16, 0x04, 0x4d, 0xa1, 0x06, 0x52, 0xe6, + 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x53, 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xca, 0x5e, + 0x0d, 0x2f, 0x15, 0x15, 0xaf, 0x34, 0x50, 0x54, 0xbc, 0x44, 0x31, 0x56, 0x1e, 0xf8, 0x6d, 0x0b, + 0xfa, 0xaa, 0x92, 0x7e, 0x7a, 0x08, 0x4e, 0x89, 0x85, 0xf3, 0xa8, 0x97, 0xcb, 0xed, 0xee, 0xe5, + 0x72, 0x1c, 0xaa, 0xb3, 0xf7, 0xd7, 0xcc, 0x49, 0xaf, 0x99, 0xef, 0xb3, 0xc0, 0x14, 0xaf, 0xd0, + 0xff, 0x9f, 0x9b, 0x9e, 0xe3, 0xe5, 0x5c, 0x71, 0xcd, 0x95, 0x07, 0xc8, 0xbb, 0x4c, 0xd4, 0x61, + 0xff, 0x27, 0x0b, 0x9e, 0xe8, 0x4b, 0x11, 0xad, 0x40, 0x99, 0xc9, 0x80, 0xda, 0xed, 0xec, 0x29, + 0x65, 0x3b, 0x2a, 0x01, 0x39, 0x22, 0x69, 0x52, 0x13, 0xad, 0x74, 0xe5, 0x41, 0x79, 0x3a, 0x23, + 0x0f, 0xca, 0x19, 0x63, 0x78, 0x1e, 0x32, 0x11, 0xca, 0x1f, 0x14, 0x61, 0x84, 0xaf, 0xf8, 0x13, + 0xb8, 0x86, 0x3d, 0x03, 0x65, 0xaf, 0xd5, 0xea, 0xf0, 0x6c, 0x12, 0xc3, 0xdc, 0xb3, 0x92, 0x0e, + 0x4d, 0x55, 0x16, 0xe2, 0x04, 0x8e, 0x56, 0x85, 0x92, 0xb7, 0x47, 0x8c, 0x3e, 0xde, 0xf1, 0x85, + 0x8a, 0x13, 0x3b, 0x5c, 0xa6, 0x50, 0x47, 0x5b, 0xa2, 0x0e, 0x46, 0x9f, 0x05, 0x88, 0xe2, 0xd0, + 0xf3, 0xb7, 0x69, 0x99, 0x88, 0xde, 0xf8, 0xe1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, 0xd3, 0x4c, 0xb6, + 0xb9, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0x0e, 0xd7, 0xb9, 0x94, 0x96, 0x14, 0x38, 0xd5, 0xe4, + 0xa8, 0x9d, 0x7b, 0x05, 0xca, 0x8a, 0x78, 0x3f, 0x95, 0xcf, 0xb8, 0x2e, 0x89, 0x7c, 0x02, 0xa6, + 0x52, 0x7d, 0x3b, 0x92, 0xc6, 0xe8, 0xe7, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, 0xf7, 0x04, 0x03, + 0x7e, 0x07, 0x4e, 0x37, 0x33, 0x18, 0xa1, 0x98, 0xfe, 0xc1, 0x19, 0xa7, 0xd2, 0x10, 0x65, 0x41, + 0x71, 0x66, 0x1b, 0xe8, 0x0a, 0x5d, 0xe4, 0x94, 0xd1, 0x39, 0x4d, 0xe1, 0x68, 0x39, 0xce, 0x17, + 0x38, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0x33, 0xbc, 0xe7, 0x37, 0xc8, 0xbe, 0x62, 0x07, + 0x5f, 0xcb, 0xbe, 0x8b, 0x3c, 0x46, 0x85, 0x9c, 0x3c, 0x46, 0xfa, 0xa7, 0x15, 0x7b, 0x7e, 0xda, + 0x4f, 0x59, 0x20, 0x56, 0xc8, 0x09, 0xdc, 0xfb, 0xbf, 0xd1, 0xbc, 0xf7, 0xcf, 0xe5, 0x6f, 0x82, + 0x9c, 0x0b, 0xff, 0x9f, 0x59, 0x30, 0xcd, 0x11, 0x92, 0x07, 0xea, 0xaf, 0xe9, 0x3c, 0x0c, 0x92, + 0xed, 0xf4, 0x06, 0xd9, 0xdf, 0x08, 0x6a, 0x4e, 0xbc, 0x93, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0xf5, + 0x9c, 0x2c, 0x57, 0x6e, 0xa0, 0x23, 0xa4, 0x50, 0x3e, 0x72, 0x98, 0x7f, 0xfb, 0xab, 0x16, 0x20, + 0xde, 0x8c, 0x21, 0x2b, 0x51, 0x09, 0x84, 0x95, 0x6a, 0x67, 0x4b, 0xc2, 0x9a, 0x14, 0x04, 0x6b, + 0x58, 0xc7, 0x32, 0x3c, 0x29, 0x2b, 0x83, 0x62, 0x7f, 0x2b, 0x83, 0x23, 0x8c, 0xe8, 0x1f, 0x0c, + 0x43, 0xda, 0x5d, 0x04, 0xdd, 0x81, 0xf1, 0x86, 0xd3, 0x76, 0x36, 0xbd, 0xa6, 0x17, 0x7b, 0x24, + 0xea, 0x65, 0x9e, 0xb4, 0xac, 0xe1, 0x89, 0x77, 0x61, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x02, 0x40, + 0x3b, 0xf4, 0xf6, 0xbc, 0x26, 0xd9, 0x66, 0xea, 0x09, 0xe6, 0xda, 0xcd, 0x6d, 0x6e, 0x64, 0x29, + 0xd6, 0x30, 0x32, 0x7c, 0x67, 0x8b, 0x8f, 0xd8, 0x77, 0x16, 0x4e, 0xcc, 0x77, 0x76, 0xe8, 0x48, + 0xbe, 0xb3, 0xa5, 0x23, 0xfb, 0xce, 0x0e, 0x0f, 0xe4, 0x3b, 0x8b, 0xe1, 0xac, 0x14, 0xf7, 0xe8, + 0xff, 0x55, 0xaf, 0x49, 0x84, 0x8c, 0xcf, 0xfd, 0xd1, 0xe7, 0x1e, 0x1c, 0xcc, 0x9f, 0xc5, 0x99, + 0x18, 0x38, 0xa7, 0x26, 0xfa, 0x24, 0xcc, 0x3a, 0xcd, 0x66, 0x70, 0x4f, 0x4d, 0xea, 0x4a, 0xd4, + 0x70, 0x9a, 0x5c, 0xef, 0x3f, 0xca, 0xa8, 0x9e, 0x7f, 0x70, 0x30, 0x3f, 0xbb, 0x98, 0x83, 0x83, + 0x73, 0x6b, 0xa3, 0x8f, 0x43, 0xb9, 0x1d, 0x06, 0x8d, 0x35, 0xcd, 0xa7, 0xed, 0x22, 0x1d, 0xc0, + 0x9a, 0x2c, 0x3c, 0x3c, 0x98, 0x9f, 0x50, 0x7f, 0xd8, 0x81, 0x9f, 0x54, 0xb0, 0x77, 0xe1, 0x54, + 0x9d, 0x84, 0x1e, 0x4b, 0x88, 0xec, 0x26, 0xfc, 0x63, 0x03, 0xca, 0x61, 0x8a, 0x63, 0x0e, 0x14, + 0xd7, 0x4e, 0x8b, 0x87, 0x2e, 0x39, 0x64, 0x42, 0xc8, 0xfe, 0xdf, 0x16, 0x8c, 0x0a, 0xf7, 0x8d, + 0x13, 0x90, 0xea, 0x16, 0x0d, 0xe5, 0xfa, 0x7c, 0xf6, 0xa9, 0xc2, 0x3a, 0x93, 0xab, 0x56, 0xaf, + 0xa6, 0xd4, 0xea, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0xa1, 0xfe, 0xb7, 0x8a, 0x30, 0x69, 0xfa, 0xf9, + 0x9d, 0xc0, 0x10, 0xac, 0xc3, 0x68, 0x24, 0x1c, 0xd9, 0x0a, 0xf9, 0xe6, 0xdb, 0xe9, 0x49, 0x4c, + 0x4c, 0xbb, 0x84, 0xeb, 0x9a, 0x24, 0x92, 0xe9, 0x21, 0x57, 0x7c, 0x84, 0x1e, 0x72, 0xfd, 0x5c, + 0x2d, 0x87, 0x8e, 0xc3, 0xd5, 0xd2, 0xfe, 0x32, 0x3b, 0xd9, 0xf4, 0xf2, 0x13, 0x10, 0x7a, 0xae, + 0x99, 0x67, 0xa0, 0xdd, 0x63, 0x65, 0x89, 0x4e, 0xe5, 0x08, 0x3f, 0x3f, 0x6b, 0xc1, 0x85, 0x8c, + 0xaf, 0xd2, 0x24, 0xa1, 0x67, 0xa1, 0xe4, 0x74, 0x5c, 0x4f, 0xed, 0x65, 0xed, 0x89, 0x6d, 0x51, + 0x94, 0x63, 0x85, 0x81, 0x96, 0x61, 0x86, 0xdc, 0x6f, 0x7b, 0xfc, 0x75, 0x51, 0xb7, 0xbf, 0x2c, + 0xf2, 0x58, 0xdf, 0x2b, 0x69, 0x20, 0xee, 0xc6, 0x57, 0xe1, 0x27, 0x8a, 0xb9, 0xe1, 0x27, 0xfe, + 0xa1, 0x05, 0x63, 0xca, 0x95, 0xeb, 0x91, 0x8f, 0xf6, 0x37, 0x99, 0xa3, 0xfd, 0x78, 0x8f, 0xd1, + 0xce, 0x19, 0xe6, 0xdf, 0x2a, 0xa8, 0xfe, 0xd6, 0x82, 0x30, 0x1e, 0x40, 0xc2, 0x7a, 0x15, 0x4a, + 0xed, 0x30, 0x88, 0x83, 0x46, 0xd0, 0x14, 0x02, 0xd6, 0xf9, 0x24, 0x0e, 0x0b, 0x2f, 0x3f, 0xd4, + 0x7e, 0x63, 0x85, 0x4d, 0x65, 0x1b, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x96, 0xc5, 0xa2, 0x94, 0x26, + 0xc5, 0x58, 0xc7, 0x61, 0x03, 0x1e, 0x84, 0xb1, 0x90, 0x83, 0x92, 0x01, 0x0f, 0xc2, 0x18, 0x33, + 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xa1, 0xa2, 0xf2, 0xf9, 0x4d, 0x27, + 0xf6, 0x9a, 0x0b, 0x9e, 0x1f, 0x47, 0x71, 0xb8, 0x50, 0xf5, 0xe3, 0x5b, 0x21, 0xbf, 0xe2, 0x69, + 0xb1, 0x58, 0x14, 0x2d, 0xac, 0xd1, 0x95, 0x6e, 0xcb, 0xac, 0x8d, 0x61, 0xf3, 0x7d, 0x7f, 0x5d, + 0x94, 0x63, 0x85, 0x61, 0xbf, 0xc2, 0x4e, 0x1f, 0x36, 0xa6, 0x47, 0x0b, 0x5e, 0xf2, 0x8b, 0x65, + 0x35, 0x1b, 0xec, 0x71, 0xaf, 0xa2, 0x87, 0x48, 0xe9, 0xcd, 0xec, 0x69, 0xc3, 0xba, 0x0b, 0x53, + 0x12, 0x47, 0x05, 0x7d, 0x73, 0x97, 0xcd, 0xc6, 0x73, 0x7d, 0x4e, 0x8d, 0x23, 0x58, 0x69, 0xb0, + 0x94, 0x05, 0x2c, 0xa0, 0x7b, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0xb2, 0x40, 0x00, 0x70, 0x82, 0x83, + 0xae, 0x8a, 0x0b, 0x3c, 0x57, 0x7d, 0x3f, 0x9e, 0xba, 0xc0, 0xcb, 0xcf, 0xd7, 0x94, 0xe5, 0xcf, + 0xc3, 0x98, 0x4a, 0xd8, 0x59, 0xe3, 0x79, 0x20, 0xc5, 0xb2, 0x59, 0x49, 0x8a, 0xb1, 0x8e, 0x83, + 0x36, 0x60, 0x2a, 0xe2, 0xaa, 0x24, 0x15, 0x1f, 0x95, 0xab, 0xe4, 0x3e, 0x2c, 0x0d, 0x5d, 0xea, + 0x26, 0xf8, 0x90, 0x15, 0x71, 0x6e, 0x23, 0x5d, 0x85, 0xd3, 0x24, 0xd0, 0xeb, 0x30, 0xd9, 0x0c, + 0x1c, 0x77, 0xc9, 0x69, 0x3a, 0x7e, 0x83, 0x7d, 0x6f, 0xc9, 0xcc, 0xfb, 0x76, 0xd3, 0x80, 0xe2, + 0x14, 0x36, 0x15, 0x96, 0xf4, 0x12, 0x11, 0xd3, 0xd7, 0xf1, 0xb7, 0x49, 0x24, 0xd2, 0x2f, 0x32, + 0x61, 0xe9, 0x66, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x85, 0x71, 0xf9, 0xf9, 0x9a, 0x67, 0x7d, + 0x62, 0xdb, 0xaf, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x19, 0xf9, 0x7f, 0x23, 0x74, 0xb6, 0xb6, + 0xbc, 0x86, 0x70, 0x37, 0xe5, 0x8e, 0x77, 0x8b, 0xd2, 0x3b, 0x6c, 0x25, 0x0b, 0xe9, 0xf0, 0x60, + 0xfe, 0x92, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0x4e, 0xed, 0x10, 0xa7, + 0x19, 0xef, 0x2c, 0xef, 0x90, 0xc6, 0xae, 0xdc, 0x44, 0xcc, 0x5f, 0x5f, 0xb3, 0x88, 0xbf, 0xde, + 0x8d, 0x82, 0xb3, 0xea, 0xa1, 0xb7, 0x60, 0xb6, 0xdd, 0xd9, 0x6c, 0x7a, 0xd1, 0xce, 0x7a, 0x10, + 0x33, 0x6b, 0x17, 0x95, 0xff, 0x53, 0x38, 0xf6, 0xab, 0x88, 0x08, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b, + 0x01, 0xbd, 0x03, 0x67, 0x52, 0x8b, 0x41, 0xb8, 0x36, 0x4f, 0xe6, 0x47, 0x48, 0xaf, 0x67, 0x55, + 0x10, 0x51, 0x02, 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x12, 0x94, 0xbc, 0xf6, 0xaa, 0xd3, 0xf2, + 0x9a, 0xfb, 0x2c, 0xc4, 0x7b, 0x99, 0x85, 0x3d, 0x2f, 0x55, 0x6b, 0xbc, 0xec, 0x50, 0xfb, 0x8d, + 0x15, 0x26, 0xbd, 0x22, 0x68, 0x81, 0x2c, 0xa3, 0xd9, 0xe9, 0xc4, 0x98, 0x57, 0x8b, 0x76, 0x19, + 0x61, 0x03, 0xeb, 0xdd, 0xd9, 0x48, 0xbd, 0x4d, 0x2b, 0x6b, 0x32, 0x23, 0xfa, 0x1c, 0x8c, 0xeb, + 0x2b, 0x56, 0x9c, 0x7f, 0x97, 0xb3, 0x45, 0x2a, 0x6d, 0x65, 0x73, 0x89, 0x53, 0xad, 0x5e, 0x1d, + 0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xb1, 0x44, 0x37, 0xa1, 0xd4, 0x68, 0x7a, 0xc4, 0x8f, 0xab, + 0xb5, 0x5e, 0x31, 0x98, 0x96, 0x05, 0x8e, 0x98, 0x1c, 0x11, 0xbe, 0x9a, 0x97, 0x61, 0x45, 0xc1, + 0xfe, 0x95, 0x02, 0xcc, 0xf7, 0x89, 0x85, 0x9e, 0x52, 0xe5, 0x5b, 0x03, 0xa9, 0xf2, 0x17, 0x65, + 0xe6, 0xd4, 0xf5, 0x94, 0xca, 0x22, 0x95, 0x15, 0x35, 0x51, 0x5c, 0xa4, 0xf1, 0x07, 0x36, 0xad, + 0xd6, 0x5f, 0x03, 0x86, 0xfa, 0x3a, 0x07, 0x18, 0xaf, 0x80, 0xc3, 0x83, 0xdf, 0x93, 0x72, 0x5f, + 0x74, 0xec, 0x2f, 0x17, 0xe0, 0x8c, 0x1a, 0xc2, 0xaf, 0xdf, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0xc7, + 0xf0, 0x1e, 0x66, 0xdf, 0x82, 0x11, 0x1e, 0x54, 0x6a, 0x00, 0xf9, 0xec, 0x49, 0x33, 0xfe, 0xa2, + 0x12, 0x09, 0x8c, 0x18, 0x8c, 0xdf, 0x6d, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, 0x34, 0x76, 0x49, + 0xbc, 0xc8, 0xe5, 0x69, 0x2c, 0x64, 0x2d, 0xeb, 0x21, 0x65, 0xa8, 0x2c, 0xe9, 0xec, 0x12, 0x0c, + 0xed, 0x04, 0x51, 0x9c, 0x7e, 0x2c, 0xbf, 0x1e, 0x44, 0x31, 0x66, 0x10, 0xfb, 0x77, 0x2c, 0x18, + 0x66, 0xb9, 0xc2, 0xfb, 0x65, 0xab, 0x1f, 0xe4, 0xbb, 0xd0, 0xcb, 0x30, 0x42, 0xb6, 0xb6, 0x48, + 0x23, 0x16, 0xb3, 0x2a, 0xbd, 0x9b, 0x47, 0x56, 0x58, 0x29, 0x15, 0x30, 0x58, 0x63, 0xfc, 0x2f, + 0x16, 0xc8, 0xe8, 0x2e, 0x94, 0x63, 0xaf, 0x45, 0x16, 0x5d, 0x57, 0x3c, 0x37, 0x3e, 0x84, 0x87, + 0xf6, 0x86, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x97, 0x0a, 0x00, 0x49, 0xb4, 0x8f, 0x7e, 0x9f, 0xb8, + 0xd4, 0xf5, 0x10, 0x75, 0x39, 0xe3, 0x21, 0x0a, 0x25, 0x04, 0x33, 0x5e, 0xa1, 0xd4, 0x30, 0x15, + 0x07, 0x1a, 0xa6, 0xa1, 0xa3, 0x0c, 0xd3, 0x32, 0xcc, 0x24, 0xd1, 0x4a, 0xcc, 0x60, 0x4d, 0xec, + 0x0e, 0xb5, 0x91, 0x06, 0xe2, 0x6e, 0x7c, 0x9b, 0xc0, 0x25, 0x15, 0xb4, 0x41, 0x9c, 0x35, 0xcc, + 0x9a, 0x55, 0x7f, 0xd8, 0xeb, 0x33, 0x4e, 0xc9, 0x4b, 0x5b, 0x21, 0xf7, 0xa5, 0xed, 0x47, 0x2d, + 0x38, 0x9d, 0x6e, 0x87, 0xb9, 0x17, 0x7e, 0xd1, 0x82, 0x33, 0xec, 0xbd, 0x91, 0xb5, 0xda, 0xfd, + 0xba, 0xf9, 0x52, 0xcf, 0x40, 0x14, 0x39, 0x3d, 0x4e, 0xdc, 0xe8, 0xd7, 0xb2, 0x48, 0xe3, 0xec, + 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x08, 0x16, 0xcc, 0xd8, 0xdd, 0xb9, 0x5f, 0xdf, 0x25, + 0xf7, 0x84, 0x49, 0x71, 0x62, 0xec, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0xc3, 0x5b, 0x17, 0x06, 0x0b, + 0x6f, 0x8d, 0x76, 0x60, 0xe6, 0xde, 0x0e, 0xf1, 0x6f, 0xfb, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, + 0x1e, 0x0a, 0xf9, 0xba, 0xf9, 0x98, 0x34, 0xfc, 0xbd, 0x9b, 0x46, 0x38, 0x3c, 0x98, 0xbf, 0x60, + 0x14, 0x24, 0x5d, 0xe6, 0x8c, 0x04, 0x77, 0x13, 0xed, 0x8e, 0x0e, 0x3e, 0xf4, 0x08, 0xa3, 0x83, + 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xf6, 0x3e, 0x74, 0x05, 0x4a, 0x4e, 0xdb, 0xe3, 0xba, 0x56, + 0xc1, 0x46, 0x99, 0xce, 0xa0, 0x56, 0xe5, 0x9a, 0x56, 0x05, 0x55, 0x59, 0x85, 0x0b, 0xb9, 0x59, + 0x85, 0xfb, 0x26, 0x09, 0xb6, 0xbf, 0xcb, 0x02, 0xe1, 0xa8, 0x37, 0x00, 0xef, 0xfe, 0xb4, 0x4c, + 0xca, 0x6e, 0x64, 0x10, 0xb9, 0x94, 0xef, 0xb9, 0x28, 0xf2, 0x86, 0x28, 0x59, 0xc9, 0xc8, 0x16, + 0x62, 0xd0, 0xb2, 0x5d, 0x10, 0xd0, 0x0a, 0x61, 0x9a, 0xca, 0xfe, 0xbd, 0x79, 0x01, 0xc0, 0x65, + 0xb8, 0x5a, 0x6a, 0x66, 0x75, 0x32, 0x57, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xbf, 0x2b, 0xc0, 0x98, + 0xcc, 0x58, 0xd1, 0xf1, 0x07, 0xd1, 0x27, 0x1c, 0x29, 0x85, 0x1d, 0xcb, 0x65, 0x4e, 0x09, 0xd7, + 0x12, 0x35, 0x4c, 0x92, 0xcb, 0x5c, 0x02, 0x70, 0x82, 0x43, 0x77, 0x51, 0xd4, 0xd9, 0x64, 0xe8, + 0x29, 0xb7, 0xb2, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x09, 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d, + 0x6d, 0xae, 0xc4, 0x1e, 0x56, 0xfe, 0xe0, 0xd3, 0x6b, 0x29, 0xd8, 0xe1, 0xc1, 0xfc, 0xe9, 0x74, + 0x19, 0x7b, 0x9d, 0xe9, 0xa2, 0xc2, 0xcc, 0x43, 0x78, 0x23, 0x74, 0xf7, 0x77, 0x59, 0x95, 0x24, + 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x03, 0xd4, 0x9d, 0xbb, 0x03, 0xbd, 0xc1, 0x6d, 0x02, 0xbd, 0x90, + 0xb8, 0xbd, 0x5e, 0x6b, 0x74, 0xaf, 0x67, 0xe9, 0x11, 0xc2, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0xab, + 0x45, 0x98, 0x4e, 0xfb, 0xc0, 0xa2, 0xeb, 0x30, 0xc2, 0x45, 0x0f, 0x41, 0xbe, 0x87, 0x31, 0x80, + 0xe6, 0x39, 0xcb, 0x98, 0xb0, 0x90, 0x5e, 0x44, 0x7d, 0xf4, 0x16, 0x8c, 0xb9, 0xc1, 0x3d, 0xff, + 0x9e, 0x13, 0xba, 0x8b, 0xb5, 0xaa, 0x58, 0xce, 0x99, 0xb7, 0xa5, 0x4a, 0x82, 0xa6, 0x7b, 0xe3, + 0xb2, 0x87, 0xaf, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x85, 0x1a, 0xde, 0xf2, 0xb6, 0xd7, 0x9c, + 0x76, 0x2f, 0x03, 0xf1, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xf1, 0x88, 0x39, 0x00, 0x27, 0x84, + 0xd0, 0xb7, 0xc0, 0xa9, 0x28, 0x47, 0x27, 0x9b, 0x97, 0xca, 0xa9, 0x97, 0x9a, 0x72, 0xe9, 0x31, + 0x7a, 0x8f, 0xcd, 0xd2, 0xde, 0x66, 0x35, 0x63, 0xff, 0xea, 0x29, 0x30, 0x36, 0xb1, 0x91, 0xd9, + 0xcf, 0x3a, 0xa6, 0xcc, 0x7e, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x35, + 0xec, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0x4e, 0xbf, 0x58, 0xfc, 0x1a, 0xa6, + 0x5f, 0x1c, 0x3a, 0xc1, 0xf4, 0x8b, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0x21, 0xf4, + 0x67, 0xae, 0xc3, 0x6b, 0x1c, 0xa5, 0x3b, 0xd1, 0x97, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa1, 0x76, + 0xe0, 0x48, 0xfe, 0x9d, 0xb9, 0xfb, 0xd5, 0x3a, 0x73, 0x0f, 0x8a, 0x24, 0x8b, 0xa3, 0x0f, 0x9b, + 0x64, 0x71, 0x55, 0xa6, 0x46, 0x2c, 0xe5, 0x7b, 0x73, 0xb0, 0xcc, 0x87, 0x7d, 0x12, 0x22, 0xde, + 0xd1, 0xd3, 0x49, 0x96, 0xf3, 0x39, 0x81, 0xca, 0x14, 0x39, 0x60, 0x12, 0xc9, 0xef, 0xb2, 0xe0, + 0x4c, 0x3b, 0x2b, 0xb3, 0xaa, 0x78, 0xe0, 0x7d, 0x79, 0xe0, 0xd4, 0xb1, 0x46, 0x83, 0x4c, 0x51, + 0x93, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0x6e, 0xba, 0x22, 0x0b, 0xe2, 0x93, 0x39, 0xd9, + 0x28, 0x7b, 0xe4, 0xa0, 0xdc, 0xc8, 0xc8, 0x7c, 0xf8, 0xc1, 0xbc, 0xcc, 0x87, 0x03, 0xe7, 0x3b, + 0x7c, 0x43, 0xe5, 0xa1, 0x9c, 0xc8, 0x5f, 0x4a, 0x3c, 0xcb, 0x64, 0xdf, 0xec, 0x93, 0x6f, 0xa8, + 0xec, 0x93, 0x3d, 0xe2, 0x54, 0xf2, 0xdc, 0x92, 0x7d, 0x73, 0x4e, 0x6a, 0x79, 0x23, 0xa7, 0x8e, + 0x27, 0x6f, 0xa4, 0x71, 0xd4, 0xf0, 0xd4, 0x85, 0xcf, 0xf4, 0x39, 0x6a, 0x0c, 0xba, 0xbd, 0x0f, + 0x1b, 0x9e, 0x23, 0x73, 0xe6, 0xa1, 0x72, 0x64, 0xde, 0xd1, 0x73, 0x4e, 0xa2, 0x3e, 0x49, 0x15, + 0x29, 0xd2, 0x80, 0x99, 0x26, 0xef, 0xe8, 0x07, 0xe0, 0xa9, 0x7c, 0xba, 0xea, 0x9c, 0xeb, 0xa6, + 0x9b, 0x79, 0x04, 0x76, 0x65, 0xb0, 0x3c, 0x7d, 0x32, 0x19, 0x2c, 0xcf, 0x1c, 0x7b, 0x06, 0xcb, + 0xb3, 0x27, 0x90, 0xc1, 0xf2, 0xb1, 0x13, 0xcc, 0x60, 0x79, 0x87, 0x59, 0x45, 0xf0, 0x70, 0x27, + 0x22, 0xae, 0x66, 0x76, 0x0c, 0xc7, 0xac, 0x98, 0x28, 0xfc, 0xe3, 0x14, 0x08, 0x27, 0xa4, 0x32, + 0x32, 0x63, 0xce, 0x3e, 0x82, 0xcc, 0x98, 0xeb, 0x49, 0x66, 0xcc, 0x73, 0xf9, 0x53, 0x9d, 0x61, + 0xba, 0x9e, 0x93, 0x0f, 0xf3, 0x8e, 0x9e, 0xc7, 0xf2, 0xf1, 0x1e, 0xaa, 0xf8, 0x2c, 0xc5, 0x63, + 0x8f, 0xec, 0x95, 0xaf, 0xf3, 0xec, 0x95, 0xe7, 0xf3, 0x39, 0x79, 0xfa, 0xb8, 0x33, 0x73, 0x56, + 0x7e, 0x4f, 0x01, 0x2e, 0xf6, 0xde, 0x17, 0x89, 0xd6, 0xb3, 0x96, 0xbc, 0x08, 0xa6, 0xb4, 0x9e, + 0xfc, 0x6e, 0x95, 0x60, 0x0d, 0x1c, 0x09, 0xeb, 0x1a, 0xcc, 0x28, 0xdb, 0xf4, 0xa6, 0xd7, 0xd8, + 0xd7, 0xd2, 0xf4, 0x2b, 0x7f, 0xde, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, + 0x56, 0x2b, 0xe2, 0x0e, 0xa5, 0xd4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x5a, 0xf0, + 0x58, 0x4e, 0x72, 0xa8, 0x81, 0x03, 0x3d, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0x1e, 0x9c, + 0x91, 0x82, 0x4a, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xa1, 0xa7, 0xe5, + 0x17, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, + 0x26, 0x0d, 0x4d, 0x6f, 0xcd, 0x4c, 0xa8, 0xae, 0xad, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, + 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0xc5, 0x8c, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x57, + 0x60, 0x42, 0x59, 0x94, 0x69, 0x33, 0xce, 0x18, 0x30, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf2, + 0xeb, 0xbf, 0x77, 0xf1, 0x03, 0xbf, 0xf9, 0x7b, 0x17, 0x3f, 0xf0, 0xdb, 0xbf, 0x77, 0xf1, 0x03, + 0xdf, 0xf6, 0xe0, 0xa2, 0xf5, 0xeb, 0x0f, 0x2e, 0x5a, 0xbf, 0xf9, 0xe0, 0xa2, 0xf5, 0xdb, 0x0f, + 0x2e, 0x5a, 0xbf, 0xfb, 0xe0, 0xa2, 0xf5, 0xa5, 0xdf, 0xbf, 0xf8, 0x81, 0x4f, 0x17, 0xf6, 0x9e, + 0xff, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x2b, 0xc8, 0x61, 0xd8, 0xfd, 0x00, 0x00, +} + +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Affinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PodAntiAffinity != nil { + { + size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.PodAffinity != nil { + { + size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Kind != nil { + i -= len(*m.Kind) + copy(dAtA[i:], *m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i-- + dAtA[i] = 0x32 + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x22 + } + if m.CachingMode != nil { + i -= len(*m.CachingMode) + copy(dAtA[i:], *m.CachingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.DataDiskURI) + copy(dAtA[i:], m.DataDiskURI) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i-- + dAtA[i] = 0x12 + i -= len(m.DiskName) + copy(dAtA[i:], m.DiskName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretNamespace != nil { + i -= len(*m.SecretNamespace) + copy(dAtA[i:], *m.SecretNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.ShareName) + copy(dAtA[i:], m.ShareName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i-- + dAtA[i] = 0x12 + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Binding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ControllerExpandSecretRef != nil { + { + size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.NodeStageSecretRef != nil { + { + size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.ControllerPublishSecretRef != nil { + { + size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.VolumeHandle) + copy(dAtA[i:], m.VolumeHandle) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodePublishSecretRef != nil { + { + size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { + v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForVolumeAttributes[iNdEx]) + copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x1a + } + if m.ReadOnly != nil { + i-- + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Capabilities) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drop) > 0 { + for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Drop[iNdEx]) + copy(dAtA[i:], m.Drop[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Add) > 0 { + for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Add[iNdEx]) + copy(dAtA[i:], m.Add[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.SecretFile) + copy(dAtA[i:], m.SecretFile) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i-- + dAtA[i] = 0x22 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x1a + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + if len(m.Monitors) > 0 { + for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Monitors[iNdEx]) + copy(dAtA[i:], m.Monitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Immutable != nil { + i-- + if *m.Immutable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- { + v := m.BinaryData[string(keysForBinaryData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForBinaryData[iNdEx]) + copy(dAtA[i:], keysForBinaryData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.KubeletConfigKey) + copy(dAtA[i:], m.KubeletConfigKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i-- + dAtA[i] = 0x2a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x1a + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + i-- + dAtA[i] = 0x10 + if len(m.Names) > 0 { + for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Names[iNdEx]) + copy(dAtA[i:], m.Names[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Terminated != nil { + { + size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Running != nil { + { + size, err := m.Running.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Waiting != nil { + { + size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x3a + { + size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Started != nil { + i-- + if *m.Started { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + } + i -= len(m.ContainerID) + copy(dAtA[i:], m.ContainerID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i-- + dAtA[i] = 0x42 + i -= len(m.ImageID) + copy(dAtA[i:], m.ImageID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i-- + dAtA[i] = 0x3a + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + i-- + dAtA[i] = 0x28 + i-- + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + { + size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x20 + } + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SizeLimit != nil { + { + size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Medium) + copy(dAtA[i:], m.Medium) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeName != nil { + i -= len(*m.NodeName) + copy(dAtA[i:], *m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1a + if m.TargetRef != nil { + { + size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x22 + } + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.NotReadyAddresses) > 0 { + for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Endpoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Subsets) > 0 { + for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ConfigMapRef != nil { + { + size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Prefix) + copy(dAtA[i:], m.Prefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EnvVar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ValueFrom != nil { + { + size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretKeyRef != nil { + { + size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConfigMapKeyRef != nil { + { + size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.ResourceFieldRef != nil { + { + size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.FieldRef != nil { + { + size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TargetContainerName) + copy(dAtA[i:], m.TargetContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName))) + i-- + dAtA[i] = 0x12 + { + size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.StartupProbe != nil { + { + size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.VolumeDevices) > 0 { + for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + i -= len(m.TerminationMessagePolicy) + copy(dAtA[i:], m.TerminationMessagePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x90 + i-- + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x88 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + i -= len(m.ImagePullPolicy) + copy(dAtA[i:], m.ImagePullPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i-- + dAtA[i] = 0x72 + i -= len(m.TerminationMessagePath) + copy(dAtA[i:], m.TerminationMessagePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i-- + dAtA[i] = 0x6a + if m.Lifecycle != nil { + { + size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.ReadinessProbe != nil { + { + size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.LivenessProbe != nil { + { + size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.WorkingDir) + copy(dAtA[i:], m.WorkingDir) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i-- + dAtA[i] = 0x2a + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x7a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x72 + if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x62 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x4a + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x40 + { + size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *EventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + i -= len(m.Component) + copy(dAtA[i:], m.Component) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ExecAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.WWIDs) > 0 { + for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.WWIDs[iNdEx]) + copy(dAtA[i:], m.WWIDs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + if m.Lun != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) + i-- + dAtA[i] = 0x10 + } + if len(m.TargetWWNs) > 0 { + for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TargetWWNs[iNdEx]) + copy(dAtA[i:], m.TargetWWNs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { + v := m.Options[string(keysForOptions[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForOptions[iNdEx]) + copy(dAtA[i:], keysForOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DatasetUUID) + copy(dAtA[i:], m.DatasetUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i-- + dAtA[i] = 0x12 + i -= len(m.DatasetName) + copy(dAtA[i:], m.DatasetName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PDName) + copy(dAtA[i:], m.PDName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Directory) + copy(dAtA[i:], m.Directory) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i-- + dAtA[i] = 0x1a + i -= len(m.Revision) + copy(dAtA[i:], m.Revision) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i-- + dAtA[i] = 0x12 + i -= len(m.Repository) + copy(dAtA[i:], m.Repository) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EndpointsNamespace != nil { + i -= len(*m.EndpointsNamespace) + copy(dAtA[i:], *m.EndpointsNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) + i-- + dAtA[i] = 0x22 + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.EndpointsName) + copy(dAtA[i:], m.EndpointsName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.HTTPHeaders) > 0 { + for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Scheme) + copy(dAtA[i:], m.Scheme) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i-- + dAtA[i] = 0x22 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Handler) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TCPSocket != nil { + { + size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.HTTPGet != nil { + { + size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Exec != nil { + { + size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *HostAlias) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Hostnames) > 0 { + for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hostnames[iNdEx]) + copy(dAtA[i:], m.Hostnames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Type != nil { + i -= len(*m.Type) + copy(dAtA[i:], *m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InitiatorName != nil { + i -= len(*m.InitiatorName) + copy(dAtA[i:], *m.InitiatorName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i-- + dAtA[i] = 0x62 + } + i-- + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + i-- + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.Portals) > 0 { + for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Portals[iNdEx]) + copy(dAtA[i:], m.Portals[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x2a + i -= len(m.ISCSIInterface) + copy(dAtA[i:], m.ISCSIInterface) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + i-- + dAtA[i] = 0x18 + i -= len(m.IQN) + copy(dAtA[i:], m.IQN) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i-- + dAtA[i] = 0x12 + i -= len(m.TargetPortal) + copy(dAtA[i:], m.TargetPortal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Mode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + i-- + dAtA[i] = 0x18 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreStop != nil { + { + size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PostStart != nil { + { + size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *LimitRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- { + v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMaxLimitRequestRatio[iNdEx]) + copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x32 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- { + v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefaultRequest[iNdEx]) + copy(dAtA[i:], keysForDefaultRequest[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- { + v := m.Default[ResourceName(keysForDefault[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDefault[iNdEx]) + copy(dAtA[i:], keysForDefault[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- { + v := m.Min[ResourceName(keysForMin[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMin[iNdEx]) + copy(dAtA[i:], keysForMin[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- { + v := m.Max[ResourceName(keysForMax[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForMax[iNdEx]) + copy(dAtA[i:], keysForMax[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Limits) > 0 { + for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *List) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x12 + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FSType != nil { + i -= len(*m.FSType) + copy(dAtA[i:], *m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x12 + i -= len(m.Server) + copy(dAtA[i:], m.Server) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Namespace) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Address) + copy(dAtA[i:], m.Address) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + return len(dAtA) - i, nil +} + +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Error) + copy(dAtA[i:], m.Error) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i-- + dAtA[i] = 0x22 + if m.LastKnownGood != nil { + { + size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Active != nil { + { + size, err := m.Active.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Assigned != nil { + { + size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchFields) > 0 { + for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodCIDRs) > 0 { + for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PodCIDRs[iNdEx]) + copy(dAtA[i:], m.PodCIDRs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + if m.ConfigSource != nil { + { + size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Taints) > 0 { + for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + i-- + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.ProviderID) + copy(dAtA[i:], m.ProviderID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i-- + dAtA[i] = 0x1a + i -= len(m.DoNotUseExternalID) + copy(dAtA[i:], m.DoNotUseExternalID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUseExternalID))) + i-- + dAtA[i] = 0x12 + i -= len(m.PodCIDR) + copy(dAtA[i:], m.PodCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Config != nil { + { + size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if len(m.VolumesAttached) > 0 { + for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if len(m.VolumesInUse) > 0 { + for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumesInUse[iNdEx]) + copy(dAtA[i:], m.VolumesInUse[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + if len(m.Images) > 0 { + for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + { + size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Addresses) > 0 { + for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0x1a + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { + v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForAllocatable[iNdEx]) + copy(dAtA[i:], keysForAllocatable[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Architecture) + copy(dAtA[i:], m.Architecture) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i-- + dAtA[i] = 0x52 + i -= len(m.OperatingSystem) + copy(dAtA[i:], m.OperatingSystem) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i-- + dAtA[i] = 0x4a + i -= len(m.KubeProxyVersion) + copy(dAtA[i:], m.KubeProxyVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i-- + dAtA[i] = 0x42 + i -= len(m.KubeletVersion) + copy(dAtA[i:], m.KubeletVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i-- + dAtA[i] = 0x3a + i -= len(m.ContainerRuntimeVersion) + copy(dAtA[i:], m.ContainerRuntimeVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.OSImage) + copy(dAtA[i:], m.OSImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i-- + dAtA[i] = 0x2a + i -= len(m.KernelVersion) + copy(dAtA[i:], m.KernelVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i-- + dAtA[i] = 0x22 + i -= len(m.BootID) + copy(dAtA[i:], m.BootID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i-- + dAtA[i] = 0x1a + i -= len(m.SystemUUID) + copy(dAtA[i:], m.SystemUUID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i-- + dAtA[i] = 0x12 + i -= len(m.MachineID) + copy(dAtA[i:], m.MachineID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FieldPath) + copy(dAtA[i:], m.FieldPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i-- + dAtA[i] = 0x3a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DataSource != nil { + { + size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x32 + } + if m.StorageClassName != nil { + i -= len(*m.StorageClassName) + copy(dAtA[i:], *m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i-- + dAtA[i] = 0x2a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.ClaimName) + copy(dAtA[i:], m.ClaimName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.Local != nil { + { + size, err := m.Local.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.NodeAffinity != nil { + { + size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.VolumeMode != nil { + i -= len(*m.VolumeMode) + copy(dAtA[i:], *m.VolumeMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i-- + dAtA[i] = 0x42 + } + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.StorageClassName) + copy(dAtA[i:], m.StorageClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i-- + dAtA[i] = 0x32 + i -= len(m.PersistentVolumeReclaimPolicy) + copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i-- + dAtA[i] = 0x2a + if m.ClaimRef != nil { + { + size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.AccessModes) > 0 { + for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AccessModes[iNdEx]) + copy(dAtA[i:], m.AccessModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { + v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForCapacity[iNdEx]) + copy(dAtA[i:], keysForCapacity[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.PdID) + copy(dAtA[i:], m.PdID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Pod) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x1a + if len(m.Namespaces) > 0 { + for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Namespaces[iNdEx]) + copy(dAtA[i:], m.Namespaces[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PodCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x32 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x2a + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Options) > 0 { + for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Searches) > 0 { + for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Searches[iNdEx]) + copy(dAtA[i:], m.Searches[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Nameservers) > 0 { + for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Nameservers[iNdEx]) + copy(dAtA[i:], m.Nameservers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Value != nil { + i -= len(*m.Value) + copy(dAtA[i:], *m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i-- + dAtA[i] = 0x12 + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0x2a + i-- + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i-- + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Stdout { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PodIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.InsecureSkipTLSVerifyBackend { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + if m.LimitBytes != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + i-- + dAtA[i] = 0x40 + } + if m.TailLines != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + i-- + dAtA[i] = 0x38 + } + i-- + if m.Timestamps { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if m.SinceTime != nil { + { + size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.SinceSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + i-- + dAtA[i] = 0x20 + } + i-- + if m.Previous { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i-- + if m.Follow { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Container) + copy(dAtA[i:], m.Container) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx])) + i-- + dAtA[i] = 0x8 + } + } + return len(dAtA) - i, nil +} + +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ConditionType) + copy(dAtA[i:], m.ConditionType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FSGroupChangePolicy != nil { + i -= len(*m.FSGroupChangePolicy) + copy(dAtA[i:], *m.FSGroupChangePolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSGroupChangePolicy))) + i-- + dAtA[i] = 0x4a + } + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if len(m.Sysctls) > 0 { + for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x30 + } + if m.FSGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) + i-- + dAtA[i] = 0x28 + } + if len(m.SupplementalGroups) > 0 { + for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) + i-- + dAtA[i] = 0x20 + } + } + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x10 + } + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSignature) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PodController != nil { + { + size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EphemeralContainers) > 0 { + for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x92 + } + } + if len(m.TopologySpreadConstraints) > 0 { + for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x8a + } + } + if len(m.Overhead) > 0 { + keysForOverhead := make([]string, 0, len(m.Overhead)) + for k := range m.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- { + v := m.Overhead[ResourceName(keysForOverhead[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForOverhead[iNdEx]) + copy(dAtA[i:], keysForOverhead[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x2 + i-- + dAtA[i] = 0x82 + } + } + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xfa + } + if m.EnableServiceLinks != nil { + i-- + if *m.EnableServiceLinks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xf0 + } + if m.RuntimeClassName != nil { + i -= len(*m.RuntimeClassName) + copy(dAtA[i:], *m.RuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xea + } + if len(m.ReadinessGates) > 0 { + for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + } + if m.ShareProcessNamespace != nil { + i-- + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd8 + } + if m.DNSConfig != nil { + { + size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.Priority != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc8 + } + i -= len(m.PriorityClassName) + copy(dAtA[i:], m.PriorityClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + if len(m.HostAliases) > 0 { + for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + } + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa8 + } + if len(m.InitContainers) > 0 { + for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + i -= len(m.SchedulerName) + copy(dAtA[i:], m.SchedulerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + if m.Affinity != nil { + { + size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + i -= len(m.Subdomain) + copy(dAtA[i:], m.Subdomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + i -= len(m.Hostname) + copy(dAtA[i:], m.Hostname) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + } + if m.SecurityContext != nil { + { + size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x60 + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x58 + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x52 + i -= len(m.DeprecatedServiceAccount) + copy(dAtA[i:], m.DeprecatedServiceAccount) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i-- + dAtA[i] = 0x4a + i -= len(m.ServiceAccountName) + copy(dAtA[i:], m.ServiceAccountName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i-- + dAtA[i] = 0x42 + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.DNSPolicy) + copy(dAtA[i:], m.DNSPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i-- + dAtA[i] = 0x32 + if m.ActiveDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + i-- + dAtA[i] = 0x28 + } + if m.TerminationGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.RestartPolicy) + copy(dAtA[i:], m.RestartPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i-- + dAtA[i] = 0x1a + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PodStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.EphemeralContainerStatuses) > 0 { + for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.PodIPs) > 0 { + for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + } + i -= len(m.NominatedNodeName) + copy(dAtA[i:], m.NominatedNodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i-- + dAtA[i] = 0x5a + if len(m.InitContainerStatuses) > 0 { + for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + i -= len(m.QOSClass) + copy(dAtA[i:], m.QOSClass) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i-- + dAtA[i] = 0x4a + if len(m.ContainerStatuses) > 0 { + for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.StartTime != nil { + { + size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.PodIP) + copy(dAtA[i:], m.PodIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i-- + dAtA[i] = 0x32 + i -= len(m.HostIP) + copy(dAtA[i:], m.HostIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Phase) + copy(dAtA[i:], m.Phase) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeID) + copy(dAtA[i:], m.VolumeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x22 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x1a + { + size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *Probe) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + i-- + dAtA[i] = 0x10 + { + size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x10 + } + if len(m.Sources) > 0 { + for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Tenant) + copy(dAtA[i:], m.Tenant) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) + i-- + dAtA[i] = 0x32 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x2a + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0x22 + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.Volume) + copy(dAtA[i:], m.Volume) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i-- + dAtA[i] = 0x12 + i -= len(m.Registry) + copy(dAtA[i:], m.Registry) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.Keyring) + copy(dAtA[i:], m.Keyring) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i-- + dAtA[i] = 0x32 + i -= len(m.RadosUser) + copy(dAtA[i:], m.RadosUser) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i-- + dAtA[i] = 0x2a + i -= len(m.RBDPool) + copy(dAtA[i:], m.RBDPool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i-- + dAtA[i] = 0x22 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.RBDImage) + copy(dAtA[i:], m.RBDImage) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i-- + dAtA[i] = 0x12 + if len(m.CephMonitors) > 0 { + for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.CephMonitors[iNdEx]) + copy(dAtA[i:], m.CephMonitors[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Data != nil { + i -= len(m.Data) + copy(dAtA[i:], m.Data) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i-- + dAtA[i] = 0x1a + } + i -= len(m.Range) + copy(dAtA[i:], m.Range) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + if m.Template != nil { + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.ContainerName) + copy(dAtA[i:], m.ContainerName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ScopeSelector != nil { + { + size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Scopes) > 0 { + for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Scopes[iNdEx]) + copy(dAtA[i:], m.Scopes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- { + v := m.Used[ResourceName(keysForUsed[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsed[iNdEx]) + copy(dAtA[i:], keysForUsed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { + v := m.Hard[ResourceName(keysForHard[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForHard[iNdEx]) + copy(dAtA[i:], keysForHard[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { + v := m.Requests[ResourceName(keysForRequests[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForRequests[iNdEx]) + copy(dAtA[i:], keysForRequests[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { + v := m.Limits[ResourceName(keysForLimits[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForLimits[iNdEx]) + copy(dAtA[i:], keysForLimits[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Level) + copy(dAtA[i:], m.Level) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i-- + dAtA[i] = 0x22 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + i -= len(m.Role) + copy(dAtA[i:], m.Role) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i-- + dAtA[i] = 0x12 + i -= len(m.User) + copy(dAtA[i:], m.User) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x50 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x4a + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0x42 + i -= len(m.StorageMode) + copy(dAtA[i:], m.StorageMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i-- + dAtA[i] = 0x3a + i -= len(m.StoragePool) + copy(dAtA[i:], m.StoragePool) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i-- + dAtA[i] = 0x32 + i -= len(m.ProtectionDomain) + copy(dAtA[i:], m.ProtectionDomain) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i-- + dAtA[i] = 0x2a + i-- + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.System) + copy(dAtA[i:], m.System) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i-- + dAtA[i] = 0x12 + i -= len(m.Gateway) + copy(dAtA[i:], m.Gateway) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.ScopeName) + copy(dAtA[i:], m.ScopeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Secret) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Immutable != nil { + i-- + if *m.Immutable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- { + v := m.StringData[string(keysForStringData[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForStringData[iNdEx]) + copy(dAtA[i:], keysForStringData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x1a + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { + v := m.Data[string(keysForData[iNdEx])] + baseI := i + if v != nil { + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + } + i -= len(keysForData[iNdEx]) + copy(dAtA[i:], keysForData[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0x12 + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional != nil { + i-- + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if m.DefaultMode != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + i-- + dAtA[i] = 0x18 + } + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.WindowsOptions != nil { + { + size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.ProcMount != nil { + i -= len(*m.ProcMount) + copy(dAtA[i:], *m.ProcMount) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) + i-- + dAtA[i] = 0x4a + } + if m.RunAsGroup != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + i-- + dAtA[i] = 0x40 + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.ReadOnlyRootFilesystem != nil { + i-- + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if m.RunAsNonRoot != nil { + i-- + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if m.RunAsUser != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + i-- + dAtA[i] = 0x20 + } + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Privileged != nil { + i-- + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.Capabilities != nil { + { + size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AutomountServiceAccountToken != nil { + i-- + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + } + if len(m.ImagePullSecrets) > 0 { + for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Secrets) > 0 { + for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0x1a + if m.ExpirationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + i-- + dAtA[i] = 0x10 + } + i -= len(m.Audience) + copy(dAtA[i:], m.Audience) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AppProtocol != nil { + i -= len(*m.AppProtocol) + copy(dAtA[i:], *m.AppProtocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) + i-- + dAtA[i] = 0x32 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + i-- + dAtA[i] = 0x28 + { + size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x18 + i -= len(m.Protocol) + copy(dAtA[i:], m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + } + if m.IPFamily != nil { + i -= len(*m.IPFamily) + copy(dAtA[i:], *m.IPFamily) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) + i-- + dAtA[i] = 0x7a + } + if m.SessionAffinityConfig != nil { + { + size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + i-- + if m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x68 + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + i-- + dAtA[i] = 0x60 + i -= len(m.ExternalTrafficPolicy) + copy(dAtA[i:], m.ExternalTrafficPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i-- + dAtA[i] = 0x5a + i -= len(m.ExternalName) + copy(dAtA[i:], m.ExternalName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i-- + dAtA[i] = 0x52 + if len(m.LoadBalancerSourceRanges) > 0 { + for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.LoadBalancerSourceRanges[iNdEx]) + copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) + i-- + dAtA[i] = 0x4a + } + } + i -= len(m.LoadBalancerIP) + copy(dAtA[i:], m.LoadBalancerIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i-- + dAtA[i] = 0x42 + i -= len(m.SessionAffinity) + copy(dAtA[i:], m.SessionAffinity) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i-- + dAtA[i] = 0x3a + if len(m.ExternalIPs) > 0 { + for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ExternalIPs[iNdEx]) + copy(dAtA[i:], m.ExternalIPs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x22 + i -= len(m.ClusterIP) + copy(dAtA[i:], m.ClusterIP) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClientIP != nil { + { + size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SecretRef != nil { + { + size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x20 + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x1a + i -= len(m.VolumeNamespace) + copy(dAtA[i:], m.VolumeNamespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumeName) + copy(dAtA[i:], m.VolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Sysctl) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Taint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TimeAdded != nil { + { + size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x1a + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Toleration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.TolerationSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.Effect) + copy(dAtA[i:], m.Effect) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i-- + dAtA[i] = 0x22 + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.LabelSelector != nil { + { + size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.WhenUnsatisfiable) + copy(dAtA[i:], m.WhenUnsatisfiable) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable))) + i-- + dAtA[i] = 0x1a + i -= len(m.TopologyKey) + copy(dAtA[i:], m.TopologyKey) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + if m.APIGroup != nil { + i -= len(*m.APIGroup) + copy(dAtA[i:], *m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Volume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.DevicePath) + copy(dAtA[i:], m.DevicePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.SubPathExpr) + copy(dAtA[i:], m.SubPathExpr) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) + i-- + dAtA[i] = 0x32 + if m.MountPropagation != nil { + i -= len(*m.MountPropagation) + copy(dAtA[i:], *m.MountPropagation) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i-- + dAtA[i] = 0x2a + } + i -= len(m.SubPath) + copy(dAtA[i:], m.SubPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i-- + dAtA[i] = 0x22 + i -= len(m.MountPath) + copy(dAtA[i:], m.MountPath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i-- + dAtA[i] = 0x1a + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Required != nil { + { + size, err := m.Required.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ServiceAccountToken != nil { + { + size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CSI != nil { + { + size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xe2 + } + if m.StorageOS != nil { + { + size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xda + } + if m.Projected != nil { + { + size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xd2 + } + if m.ScaleIO != nil { + { + size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xca + } + if m.PortworxVolume != nil { + { + size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if m.PhotonPersistentDisk != nil { + { + size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + if m.AzureDisk != nil { + { + size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if m.Quobyte != nil { + { + size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + if m.VsphereVolume != nil { + { + size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + if m.ConfigMap != nil { + { + size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + if m.AzureFile != nil { + { + size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + if m.FC != nil { + { + size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + if m.DownwardAPI != nil { + { + size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + if m.Flocker != nil { + { + size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + if m.CephFS != nil { + { + size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + if m.Cinder != nil { + { + size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + if m.FlexVolume != nil { + { + size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + } + if m.RBD != nil { + { + size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + } + if m.PersistentVolumeClaim != nil { + { + size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if m.Glusterfs != nil { + { + size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if m.ISCSI != nil { + { + size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + if m.NFS != nil { + { + size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + if m.Secret != nil { + { + size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if m.GitRepo != nil { + { + size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if m.AWSElasticBlockStore != nil { + { + size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.GCEPersistentDisk != nil { + { + size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.EmptyDir != nil { + { + size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.HostPath != nil { + { + size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StoragePolicyID) + copy(dAtA[i:], m.StoragePolicyID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i-- + dAtA[i] = 0x22 + i -= len(m.StoragePolicyName) + copy(dAtA[i:], m.StoragePolicyName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i-- + dAtA[i] = 0x1a + i -= len(m.FSType) + copy(dAtA[i:], m.FSType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i-- + dAtA[i] = 0x12 + i -= len(m.VolumePath) + copy(dAtA[i:], m.VolumePath) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RunAsUserName != nil { + i -= len(*m.RunAsUserName) + copy(dAtA[i:], *m.RunAsUserName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName))) + i-- + dAtA[i] = 0x1a + } + if m.GMSACredentialSpec != nil { + i -= len(*m.GMSACredentialSpec) + copy(dAtA[i:], *m.GMSACredentialSpec) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) + i-- + dAtA[i] = 0x12 + } + if m.GMSACredentialSpecName != nil { + i -= len(*m.GMSACredentialSpecName) + copy(dAtA[i:], *m.GMSACredentialSpecName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *Affinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AttachedVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AvoidPods) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *AzureDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *AzureFileVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Binding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ControllerExpandSecretRef != nil { + l = m.ControllerExpandSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSIVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + if m.ReadOnly != nil { + n += 2 + } + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Capabilities) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CephFSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CephFSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *CinderPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CinderVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClientIPConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + return n +} + +func (m *ComponentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ComponentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ComponentStatusList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMap) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Immutable != nil { + n += 2 + } + return n +} + +func (m *ConfigMapEnvSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ConfigMapNodeConfigSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ConfigMapProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *ConfigMapVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *Container) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerImage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n +} + +func (m *ContainerPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ContainerStateRunning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateTerminated) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStateWaiting) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ContainerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + if m.Started != nil { + n += 2 + } + return n +} + +func (m *DaemonEndpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n +} + +func (m *DownwardAPIProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DownwardAPIVolumeFile) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *DownwardAPIVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *EmptyDirVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EndpointSubset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Endpoints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EndpointsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EnvFromSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVar) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EnvVarSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EphemeralContainer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.EphemeralContainerCommon.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.TargetContainerName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EphemeralContainerCommon) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.StartupProbe != nil { + l = m.StartupProbe.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *EphemeralContainers) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *EventSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ExecAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FCVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) + } + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *FlexPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *FlexVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *FlockerVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n +} + +func (m *GitRepoVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GlusterfsPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.EndpointsNamespace != nil { + l = len(*m.EndpointsNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GlusterfsVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *HTTPGetAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPHeader) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Handler) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HostAlias) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPathVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ISCSIPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ISCSIVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *KeyToPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n +} + +func (m *Lifecycle) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *LimitRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitRangeItem) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *LimitRangeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LimitRangeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LoadBalancerIngress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LoadBalancerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LocalVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NFSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Namespace) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NamespaceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NamespaceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Node) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAddress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeConfigSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeConfigStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeDaemonEndpoints) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSelectorTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DoNotUseExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.PodCIDRs) > 0 { + for _, s := range m.PodCIDRs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NodeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NodeSystemInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectFieldSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaim) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PersistentVolumeClaimList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeClaimSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DataSource != nil { + l = m.DataSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *PersistentVolumeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PersistentVolumeStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Pod) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAntiAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodAttachOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDNSConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDNSConfigOption) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodExecOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodLogOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + n += 2 + return n +} + +func (m *PodPortForwardOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n +} + +func (m *PodProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodReadinessGate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) + } + } + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FSGroupChangePolicy != nil { + l = len(*m.FSGroupChangePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSignature) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ShareProcessNamespace != nil { + n += 3 + } + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) + } + if m.EnableServiceLinks != nil { + n += 3 + } + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.Overhead) > 0 { + for k, v := range m.Overhead { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.TopologySpreadConstraints) > 0 { + for _, e := range m.TopologySpreadConstraints { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainers) > 0 { + for _, e := range m.EphemeralContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.PodIPs) > 0 { + for _, e := range m.PodIPs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EphemeralContainerStatuses) > 0 { + for _, e := range m.EphemeralContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodStatusResult) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplate) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodTemplateList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodTemplateSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PortworxVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PreferAvoidPodsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PreferredSchedulingTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Probe) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n +} + +func (m *ProjectedVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n +} + +func (m *QuobyteVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Tenant) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RBDPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RBDVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n +} + +func (m *RangeAllocation) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicationController) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicationControllerList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicationControllerSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicationControllerStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceFieldSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuota) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ResourceQuotaList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ResourceQuotaSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ResourceQuotaStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *ResourceRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *SELinuxOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScaleIOVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ScopeSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Secret) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.Immutable != nil { + n += 2 + } + return n +} + +func (m *SecretEnvSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretKeySelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *SecretProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecretReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n +} + +func (m *SecurityContext) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Privileged != nil { + n += 2 + } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + n += 2 + } + if m.ReadOnlyRootFilesystem != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 2 + } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + } + if m.ProcMount != nil { + l = len(*m.ProcMount) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.WindowsOptions != nil { + l = m.WindowsOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SerializedReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceAccount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AutomountServiceAccountToken != nil { + n += 2 + } + return n +} + +func (m *ServiceAccountList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceAccountTokenProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Audience) + n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) + } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServicePort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) + if m.AppProtocol != nil { + l = len(*m.AppProtocol) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ServiceProxyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ServiceSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPFamily != nil { + l = len(*m.IPFamily) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServiceStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SessionAffinityConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSPersistentVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *StorageOSVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeNamespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Sysctl) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TCPSocketAction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Taint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Toleration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } + return n +} + +func (m *TopologySelectorLabelRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySelectorTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TopologySpreadConstraint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.MaxSkew)) + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.WhenUnsatisfiable) + n += 1 + l + sovGenerated(uint64(l)) + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypedLocalObjectReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Volume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.VolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeDevice) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeMount) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.MountPath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SubPath) + n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.SubPathExpr) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeAffinity) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeProjection) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.VolumePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePolicyID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WeightedPodAffinityTerm) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *WindowsSecurityContextOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GMSACredentialSpecName != nil { + l = len(*m.GMSACredentialSpecName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.GMSACredentialSpec != nil { + l = len(*m.GMSACredentialSpec) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RunAsUserName != nil { + l = len(*m.RunAsUserName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{" + for _, f := range this.PreferAvoidPods { + repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferAvoidPods += "}" + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`, + `}`, + }, "") + return s +} +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) + } + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Capabilities) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ComponentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ComponentStatus{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMap) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) + } + mapStringForBinaryData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, + `Immutable:` + valueToStringGenerated(this.Immutable) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ConfigMap{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `Started:` + valueToStringGenerated(this.Started) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DownwardAPIVolumeFile{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" + } + repeatedStringForAddresses := "[]EndpointAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForNotReadyAddresses := "[]EndpointAddress{" + for _, f := range this.NotReadyAddresses { + repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForNotReadyAddresses += "}" + repeatedStringForPorts := "[]EndpointPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + repeatedStringForAddresses + `,`, + `NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubsets := "[]EndpointSubset{" + for _, f := range this.Subsets { + repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + "," + } + repeatedStringForSubsets += "}" + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + repeatedStringForSubsets + `,`, + `}`, + }, "") + return s +} +func (this *EndpointsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Endpoints{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVar) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EphemeralContainer{`, + `EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`, + `TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainerCommon) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ContainerPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," + } + repeatedStringForEnv += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeMounts += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumeDevices := "[]VolumeDevice{" + for _, f := range this.VolumeDevices { + repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumeDevices += "}" + s := strings.Join([]string{`&EphemeralContainerCommon{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + repeatedStringForPorts + `,`, + `Env:` + repeatedStringForEnv + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, + `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EphemeralContainers) String() string { + if this == nil { + return "nil" + } + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + s := strings.Join([]string{`&EphemeralContainers{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s +} +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" + } + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) + } + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s +} +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, + `}`, + }, "") + return s +} +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" + } + repeatedStringForHTTPHeaders := "[]HTTPHeader{" + for _, f := range this.HTTPHeaders { + repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," + } + repeatedStringForHTTPHeaders += "}" + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`, + `}`, + }, "") + return s +} +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Handler) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s +} +func (this *HostAlias) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s +} +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s +} +func (this *KeyToPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *Lifecycle) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" + } + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) + } + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) + } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) + } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) + } + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) + } + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]LimitRange{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForLimits := "[]LimitRangeItem{" + for _, f := range this.Limits { + repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + "," + } + repeatedStringForLimits += "}" + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + repeatedStringForLimits + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s +} +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]LoadBalancerIngress{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + repeatedStringForIngress + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Namespace{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s +} +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NamespaceCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *Node) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s +} +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Node{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{" + for _, f := range this.NodeSelectorTerms { + repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForNodeSelectorTerms += "}" + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + repeatedStringForMatchFields := "[]NodeSelectorRequirement{" + for _, f := range this.MatchFields { + repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchFields += "}" + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `MatchFields:` + repeatedStringForMatchFields + `,`, + `}`, + }, "") + return s +} +func (this *NodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTaints := "[]Taint{" + for _, f := range this.Taints { + repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + "," + } + repeatedStringForTaints += "}" + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `DoNotUseExternalID:` + fmt.Sprintf("%v", this.DoNotUseExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + repeatedStringForTaints + `,`, + `ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]NodeCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForAddresses := "[]NodeAddress{" + for _, f := range this.Addresses { + repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + "," + } + repeatedStringForAddresses += "}" + repeatedStringForImages := "[]ContainerImage{" + for _, f := range this.Images { + repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + "," + } + repeatedStringForImages += "}" + repeatedStringForVolumesAttached := "[]AttachedVolume{" + for _, f := range this.VolumesAttached { + repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumesAttached += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) + } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Addresses:` + repeatedStringForAddresses + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + repeatedStringForImages + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, + `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolumeClaim{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PersistentVolumeClaimCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PersistentVolume{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" + } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s +} +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" + for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" + for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," + } + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, + `}`, + }, "") + return s +} +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s +} +func (this *PodCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" + } + repeatedStringForOptions := "[]PodDNSConfigOption{" + for _, f := range this.Options { + repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + "," + } + repeatedStringForOptions += "}" + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + repeatedStringForOptions + `,`, + `}`, + }, "") + return s +} +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *PodIP) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodIP{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `}`, + }, "") + return s +} +func (this *PodList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Pod{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" + } + repeatedStringForSysctls := "[]Sysctl{" + for _, f := range this.Sysctls { + repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + "," + } + repeatedStringForSysctls += "}" + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + repeatedStringForSysctls + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PodSignature) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForContainers := "[]Container{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + repeatedStringForInitContainers := "[]Container{" + for _, f := range this.InitContainers { + repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainers += "}" + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," + } + repeatedStringForTolerations += "}" + repeatedStringForHostAliases := "[]HostAlias{" + for _, f := range this.HostAliases { + repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + "," + } + repeatedStringForHostAliases += "}" + repeatedStringForReadinessGates := "[]PodReadinessGate{" + for _, f := range this.ReadinessGates { + repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + "," + } + repeatedStringForReadinessGates += "}" + repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" + for _, f := range this.TopologySpreadConstraints { + repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + "," + } + repeatedStringForTopologySpreadConstraints += "}" + repeatedStringForEphemeralContainers := "[]EphemeralContainer{" + for _, f := range this.EphemeralContainers { + repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainers += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + keysForOverhead := make([]string, 0, len(this.Overhead)) + for k := range this.Overhead { + keysForOverhead = append(keysForOverhead, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) + mapStringForOverhead := "ResourceList{" + for _, k := range keysForOverhead { + mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)]) + } + mapStringForOverhead += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + repeatedStringForVolumes + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + repeatedStringForInitContainers + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `HostAliases:` + repeatedStringForHostAliases + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + repeatedStringForReadinessGates + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `Overhead:` + mapStringForOverhead + `,`, + `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, + `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]PodCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + repeatedStringForContainerStatuses := "[]ContainerStatus{" + for _, f := range this.ContainerStatuses { + repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForContainerStatuses += "}" + repeatedStringForInitContainerStatuses := "[]ContainerStatus{" + for _, f := range this.InitContainerStatuses { + repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForInitContainerStatuses += "}" + repeatedStringForPodIPs := "[]PodIP{" + for _, f := range this.PodIPs { + repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + "," + } + repeatedStringForPodIPs += "}" + repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{" + for _, f := range this.EphemeralContainerStatuses { + repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," + } + repeatedStringForEphemeralContainerStatuses += "}" + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, + `ContainerStatuses:` + repeatedStringForContainerStatuses + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, + `PodIPs:` + repeatedStringForPodIPs + `,`, + `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, + `}`, + }, "") + return s +} +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplate) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodTemplate{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForSources := "[]VolumeProjection{" + for _, f := range this.Sources { + repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + "," + } + repeatedStringForSources += "}" + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + repeatedStringForSources + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, + `}`, + }, "") + return s +} +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicationController{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicationControllerCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ResourceQuota{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" + } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) + } + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) + } + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s +} +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" + } + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) + } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) + } + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `}`, + }, "") + return s +} +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" + } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) + } + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) + } + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `Immutable:` + valueToStringGenerated(this.Immutable) + `,`, + `}`, + }, "") + return s +} +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Secret{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *SecretProjection) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]KeyToPath{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + repeatedStringForItems + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, + `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" + } + repeatedStringForSecrets := "[]ObjectReference{" + for _, f := range this.Secrets { + repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForSecrets += "}" + repeatedStringForImagePullSecrets := "[]LocalObjectReference{" + for _, f := range this.ImagePullSecrets { + repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," + } + repeatedStringForImagePullSecrets += "}" + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + repeatedStringForSecrets + `,`, + `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ServiceAccount{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Service{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ServicePort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]ServicePort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + repeatedStringForPorts + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, + `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *Taint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{" + for _, f := range this.MatchLabelExpressions { + repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchLabelExpressions += "}" + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`, + `}`, + }, "") + return s +} +func (this *TopologySpreadConstraint) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TopologySpreadConstraint{`, + `MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TypedLocalObjectReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypedLocalObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Volume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeMount) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s +} +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *WindowsSecurityContextOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WindowsSecurityContextOptions{`, + `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, + `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, + `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Affinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AvoidPods) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShareName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Binding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Binding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} + } + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} + } + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ControllerExpandSecretRef == nil { + m.ControllerExpandSecretRef = &SecretReference{} + } + if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnly = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.VolumeAttributes[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &LocalObjectReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Capabilities) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretFile = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.BinaryData[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Immutable = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Container) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Container: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} + } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerImage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + } + m.SizeBytes = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SizeBytes |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} + } + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Running == nil { + m.Running = &ContainerStateRunning{} + } + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + } + m.ExitCode = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExitCode |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ready = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + } + m.RestartCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RestartCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImageID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Started = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SizeLimit == nil { + m.SizeLimit = &resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Prefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} + } + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} + } + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} + } + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} + } + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 17: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartupProbe == nil { + m.StartupProbe = &Probe{} + } + if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Component = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExecAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Lun = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Options == nil { + m.Options = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Options[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DatasetUUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Repository = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Revision = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Directory = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.EndpointsNamespace = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EndpointsName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scheme = URIScheme(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) + if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPHeader) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Handler) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Handler: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Exec == nil { + m.Exec = &ExecAction{} + } + if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTPGet == nil { + m.HTTPGet = &HTTPGetAction{} + } + if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TCPSocket == nil { + m.TCPSocket = &TCPSocketAction{} + } + if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostAlias) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := HostPathType(dAtA[iNdEx:postIndex]) + m.Type = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetPortal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IQN = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) + } + m.Lun = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Lun |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DiscoveryCHAPAuth = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SessionCHAPAuth = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.InitiatorName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *KeyToPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lifecycle) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PostStart == nil { + m.PostStart = &Handler{} + } + if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PreStop == nil { + m.PreStop = &Handler{} + } + if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = LimitType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Max == nil { + m.Max = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Max[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Min == nil { + m.Min = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Min[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Default == nil { + m.Default = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Default[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DefaultRequest == nil { + m.DefaultRequest = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxLimitRequestRatio == nil { + m.MaxLimitRequestRatio = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, LimitRange{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Limits = append(m.Limits, LimitRangeItem{}) + if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, LoadBalancerIngress{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LocalVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LocalVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Server = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Namespace) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Namespace: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Namespace{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, FinalizerName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NamespaceCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAddress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeAddressType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Address = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { + m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} + } + if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = NodeConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeConfigSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapNodeConfigSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeConfigStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Assigned", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Assigned == nil { + m.Assigned = &NodeConfigSource{} + } + if err := m.Assigned.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Active == nil { + m.Active = &NodeConfigSource{} + } + if err := m.Active.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastKnownGood", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LastKnownGood == nil { + m.LastKnownGood = &NodeConfigSource{} + } + if err := m.LastKnownGood.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Error = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Node{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) + if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = NodeSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchFields = append(m.MatchFields, NodeSelectorRequirement{}) + if err := m.MatchFields[len(m.MatchFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DoNotUseExternalID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DoNotUseExternalID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProviderID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Unschedulable = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Taints = append(m.Taints, Taint{}) + if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigSource == nil { + m.ConfigSource = &NodeConfigSource{} + } + if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Allocatable[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = NodePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, NodeCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Addresses = append(m.Addresses, NodeAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Images = append(m.Images, ContainerImage{}) + if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) + if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Config == nil { + m.Config = &NodeConfigStatus{} + } + if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MachineID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SystemUUID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.BootID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KernelVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OSImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeletVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.KubeProxyVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OperatingSystem = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Architecture = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PersistentVolumeClaimConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolumeClaim{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.StorageClassName = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DataSource == nil { + m.DataSource = &TypedLocalObjectReference{} + } + if err := m.DataSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PersistentVolumeClaimCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClaimName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PersistentVolume{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsPersistentVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDPersistentVolumeSource{} + } + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIPersistentVolumeSource{} + } + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderPersistentVolumeSource{} + } + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSPersistentVolumeSource{} + } + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexPersistentVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFilePersistentVolumeSource{} + } + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOPersistentVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Local == nil { + m.Local = &LocalVolumeSource{} + } + if err := m.Local.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StorageOS == nil { + m.StorageOS = &StorageOSPersistentVolumeSource{} + } + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIPersistentVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capacity == nil { + m.Capacity = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Capacity[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PersistentVolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClaimRef == nil { + m.ClaimRef = &ObjectReference{} + } + if err := m.ClaimRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) + m.VolumeMode = &s + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeAffinity == nil { + m.NodeAffinity = &VolumeNodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PdID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Pod) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Pod: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) + if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) + if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, PodDNSConfigOption{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Value = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodExecOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdin = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stdout = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Stderr = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TTY = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Pod{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodLogOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Container = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Follow = bool(v != 0) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Previous = bool(v != 0) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SinceSeconds = &v + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SinceTime == nil { + m.SinceTime = &v1.Time{} + } + if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Timestamps = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TailLines = &v + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.LimitBytes = &v + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.InsecureSkipTLSVerifyBackend = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 0 { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.Ports) == 0 { + m.Ports = make([]int32, 0, elementCount) + } + for iNdEx < postIndex { + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Ports = append(m.Ports, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 4: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.SupplementalGroups) == 0 { + m.SupplementalGroups = make([]int64, 0, elementCount) + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SupplementalGroups = append(m.SupplementalGroups, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.FSGroup = &v + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsGroup = &v + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sysctls = append(m.Sysctls, Sysctl{}) + if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PodFSGroupChangePolicy(dAtA[iNdEx:postIndex]) + m.FSGroupChangePolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSignature) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSignature: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSignature: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodController", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodController == nil { + m.PodController = &v1.OwnerReference{} + } + if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, Container{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TerminationGracePeriodSeconds = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ActiveDeadlineSeconds = &v + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DNSPolicy = DNSPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecurityContext == nil { + m.SecurityContext = &PodSecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hostname = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subdomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Affinity == nil { + m.Affinity = &Affinity{} + } + if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SchedulerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainers = append(m.InitContainers, Container{}) + if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostAliases = append(m.HostAliases, HostAlias{}) + if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PriorityClassName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 25: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Priority = &v + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DNSConfig == nil { + m.DNSConfig = &PodDNSConfig{} + } + if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ShareProcessNamespace = &b + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{}) + if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 29: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RuntimeClassName = &s + iNdEx = postIndex + case 30: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.EnableServiceLinks = &b + case 31: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + case 32: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Overhead[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 33: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) + if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 34: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) + if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Phase = PodPhase(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, PodCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StartTime == nil { + m.StartTime = &v1.Time{} + } + if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) + if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) + if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PodIPs = append(m.PodIPs, PodIP{}) + if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) + if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodStatusResult) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodTemplate{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSignature.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EvictionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Preference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Probe) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Probe: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Handler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) + } + m.InitialDelaySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.InitialDelaySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + m.TimeoutSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TimeoutSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) + } + m.PeriodSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PeriodSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) + } + m.SuccessThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SuccessThreshold |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) + } + m.FailureThreshold = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FailureThreshold |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sources = append(m.Sources, VolumeProjection{}) + if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Registry = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volume = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tenant = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDImage = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RBDPool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RadosUser = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keyring = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RangeAllocation) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Range = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationController) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicationControllerConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicationController{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Template == nil { + m.Template = &PodTemplateSpec{} + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicationControllerCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContainerName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuota) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ResourceQuota{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hard == nil { + m.Hard = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Scopes = append(m.Scopes, ResourceQuotaScope(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScopeSelector == nil { + m.ScopeSelector = &ScopeSelector{} + } + if err := m.ScopeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Hard == nil { + m.Hard = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Hard[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Used == nil { + m.Used = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Used[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Limits == nil { + m.Limits = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Limits[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Requests == nil { + m.Requests = make(ResourceList) + } + var mapkey ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Requests[ResourceName(mapkey)] = *mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Role = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Level = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Gateway = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.System = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SSLEnabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePool = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageMode = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopeSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopeSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopeSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, ScopedResourceSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScopedResourceSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScopedResourceSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScopeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ScopeName = ResourceQuotaScope(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = ScopeSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Secret) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Secret: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Data == nil { + m.Data = make(map[string][]byte) + } + var mapkey string + mapvalue := []byte{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex < 0 { + return ErrInvalidLengthGenerated + } + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Data[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = SecretType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StringData == nil { + m.StringData = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.StringData[mapkey] = mapvalue + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Immutable = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Secret{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecurityContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Capabilities == nil { + m.Capabilities = &Capabilities{} + } + if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Privileged = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsUser = &v + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.RunAsNonRoot = &b + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.ReadOnlyRootFilesystem = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RunAsGroup = &v + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProcMount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := ProcMountType(dAtA[iNdEx:postIndex]) + m.ProcMount = &s + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.WindowsOptions == nil { + m.WindowsOptions = &WindowsSecurityContextOptions{} + } + if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SerializedReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secrets = append(m.Secrets, ObjectReference{}) + if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) + if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AutomountServiceAccountToken = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ServiceAccount{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Audience = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ExpirationSeconds = &v + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Service{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServicePort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) + } + m.NodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodePort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.AppProtocol = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, ServicePort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ServiceType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) + } + m.HealthCheckNodePort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HealthCheckNodePort |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 13: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.PublishNotReadyAddresses = bool(v != 0) + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SessionAffinityConfig == nil { + m.SessionAffinityConfig = &SessionAffinityConfig{} + } + if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := IPFamily(dAtA[iNdEx:postIndex]) + m.IPFamily = &s + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SessionAffinityConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SessionAffinityConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientIP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ClientIP == nil { + m.ClientIP = &ClientIPConfig{} + } + if err := m.ClientIP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &ObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageOSVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageOSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Sysctl) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Taint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Taint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TimeAdded == nil { + m.TimeAdded = &v1.Time{} + } + if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Toleration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Toleration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = TolerationOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TolerationSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySelectorLabelRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySelectorLabelRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{}) + if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) + } + m.MaxSkew = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxSkew |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKey = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.LabelSelector == nil { + m.LabelSelector = &v1.LabelSelector{} + } + if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.APIGroup = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Volume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Volume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeDevice) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DevicePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeMount) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := MountPropagationMode(dAtA[iNdEx:postIndex]) + m.MountPropagation = &s + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SubPathExpr = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeAffinity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Required == nil { + m.Required = &NodeSelector{} + } + if err := m.Required.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeProjection) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretProjection{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIProjection{} + } + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapProjection{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ServiceAccountToken == nil { + m.ServiceAccountToken = &ServiceAccountTokenProjection{} + } + if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HostPath == nil { + m.HostPath = &HostPathVolumeSource{} + } + if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.EmptyDir == nil { + m.EmptyDir = &EmptyDirVolumeSource{} + } + if err := m.EmptyDir.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GCEPersistentDisk == nil { + m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} + } + if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AWSElasticBlockStore == nil { + m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} + } + if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GitRepo == nil { + m.GitRepo = &GitRepoVolumeSource{} + } + if err := m.GitRepo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Secret == nil { + m.Secret = &SecretVolumeSource{} + } + if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NFS == nil { + m.NFS = &NFSVolumeSource{} + } + if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ISCSI == nil { + m.ISCSI = &ISCSIVolumeSource{} + } + if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Glusterfs == nil { + m.Glusterfs = &GlusterfsVolumeSource{} + } + if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PersistentVolumeClaim == nil { + m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} + } + if err := m.PersistentVolumeClaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RBD == nil { + m.RBD = &RBDVolumeSource{} + } + if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FlexVolume == nil { + m.FlexVolume = &FlexVolumeSource{} + } + if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Cinder == nil { + m.Cinder = &CinderVolumeSource{} + } + if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CephFS == nil { + m.CephFS = &CephFSVolumeSource{} + } + if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Flocker == nil { + m.Flocker = &FlockerVolumeSource{} + } + if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DownwardAPI == nil { + m.DownwardAPI = &DownwardAPIVolumeSource{} + } + if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FC == nil { + m.FC = &FCVolumeSource{} + } + if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureFile == nil { + m.AzureFile = &AzureFileVolumeSource{} + } + if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMap == nil { + m.ConfigMap = &ConfigMapVolumeSource{} + } + if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.VsphereVolume == nil { + m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} + } + if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Quobyte == nil { + m.Quobyte = &QuobyteVolumeSource{} + } + if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AzureDisk == nil { + m.AzureDisk = &AzureDiskVolumeSource{} + } + if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PhotonPersistentDisk == nil { + m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} + } + if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PortworxVolume == nil { + m.PortworxVolume = &PortworxVolumeSource{} + } + if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 25: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ScaleIO == nil { + m.ScaleIO = &ScaleIOVolumeSource{} + } + if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 26: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Projected == nil { + m.Projected = &ProjectedVolumeSource{} + } + if err := m.Projected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 27: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.StorageOS == nil { + m.StorageOS = &StorageOSVolumeSource{} + } + if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 28: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CSI == nil { + m.CSI = &CSIVolumeSource{} + } + if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumePath = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePolicyName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StoragePolicyID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) + } + m.Weight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Weight |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodAffinityTerm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpecName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.GMSACredentialSpec = &s + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.RunAsUserName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto new file mode 100644 index 0000000..d1cd8eb --- /dev/null +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -0,0 +1,5332 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.core.v1; + +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +message AWSElasticBlockStoreVolumeSource { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + optional string volumeID = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + optional int32 partition = 3; + + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional bool readOnly = 4; +} + +// Affinity is a group of affinity scheduling rules. +message Affinity { + // Describes node affinity scheduling rules for the pod. + // +optional + optional NodeAffinity nodeAffinity = 1; + + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAffinity podAffinity = 2; + + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + optional PodAntiAffinity podAntiAffinity = 3; +} + +// AttachedVolume describes a volume attached to a node +message AttachedVolume { + // Name of the attached volume + optional string name = 1; + + // DevicePath represents the device path where the volume should be available + optional string devicePath = 2; +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +message AvoidPods { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + repeated PreferAvoidPodsEntry preferAvoidPods = 1; +} + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +message AzureDiskVolumeSource { + // The Name of the data disk in the blob storage + optional string diskName = 1; + + // The URI the data disk in the blob storage + optional string diskURI = 2; + + // Host Caching mode: None, Read Only, Read Write. + // +optional + optional string cachingMode = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 4; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 5; + + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + optional string kind = 6; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFilePersistentVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; + + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + optional string secretNamespace = 4; +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +message AzureFileVolumeSource { + // the name of secret that contains Azure Storage Account Name and Key + optional string secretName = 1; + + // Share Name + optional string shareName = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +message Binding { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The target object that you want to bind to the standard object. + optional ObjectReference target = 2; +} + +// Represents storage that is managed by an external CSI volume driver (Beta feature) +message CSIPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + // Required. + optional string driver = 1; + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + optional string volumeHandle = 2; + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 3; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // +optional + optional string fsType = 4; + + // Attributes of the volume to publish. + // +optional + map volumeAttributes = 5; + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerPublishSecretRef = 6; + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference nodeStageSecretRef = 7; + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference nodePublishSecretRef = 8; + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + optional SecretReference controllerExpandSecretRef = 9; +} + +// Represents a source location of a volume to mount, managed by an external CSI driver +message CSIVolumeSource { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + optional string driver = 1; + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + optional bool readOnly = 2; + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + optional string fsType = 3; + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + map volumeAttributes = 4; + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + optional LocalObjectReference nodePublishSecretRef = 5; +} + +// Adds and removes POSIX capabilities from running containers. +message Capabilities { + // Added capabilities + // +optional + repeated string add = 1; + + // Removed capabilities + // +optional + repeated string drop = 2; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSPersistentVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional SecretReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional bool readOnly = 6; +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +message CephFSVolumeSource { + // Required: Monitors is a collection of Ceph monitors + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + repeated string monitors = 1; + + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + optional string path = 2; + + // Optional: User is the rados user name, default is admin + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string user = 3; + + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional string secretFile = 4; + + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 5; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + optional bool readOnly = 6; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderPersistentVolumeSource { + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional bool readOnly = 3; + + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + optional SecretReference secretRef = 4; +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +message CinderVolumeSource { + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + optional string volumeID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional string fsType = 2; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional bool readOnly = 3; + + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + optional LocalObjectReference secretRef = 4; +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +message ClientIPConfig { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + optional int32 timeoutSeconds = 1; +} + +// Information about the condition of a component. +message ComponentCondition { + // Type of condition for a component. + // Valid value: "Healthy" + optional string type = 1; + + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + optional string status = 2; + + // Message about the condition for a component. + // For example, information about a health check. + // +optional + optional string message = 3; + + // Condition error code for a component. + // For example, a health check error code. + // +optional + optional string error = 4; +} + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +message ComponentStatus { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // List of component conditions observed + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ComponentCondition conditions = 2; +} + +// Status of all the conditions for the component as a list of ComponentStatus objects. +message ComponentStatusList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ComponentStatus objects. + repeated ComponentStatus items = 2; +} + +// ConfigMap holds configuration data for pods to consume. +message ConfigMap { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Immutable, if set to true, ensures that data stored in the ConfigMap cannot + // be updated (only object metadata can be modified). + // If not set to true, the field can be modified at any time. + // Defaulted to nil. + // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. + // +optional + optional bool immutable = 4; + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + map data = 2; + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + map binaryData = 3; +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +message ConfigMapEnvSource { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the ConfigMap must be defined + // +optional + optional bool optional = 2; +} + +// Selects a key from a ConfigMap. +message ConfigMapKeySelector { + // The ConfigMap to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key to select. + optional string key = 2; + + // Specify whether the ConfigMap or its key must be defined + // +optional + optional bool optional = 3; +} + +// ConfigMapList is a resource containing a list of ConfigMap objects. +message ConfigMapList { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of ConfigMaps. + repeated ConfigMap items = 2; +} + +// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +message ConfigMapNodeConfigSource { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + optional string namespace = 1; + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + optional string name = 2; + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + optional string uid = 3; + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + optional string resourceVersion = 4; + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + optional string kubeletConfigKey = 5; +} + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +message ConfigMapProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the ConfigMap or its keys must be defined + // +optional + optional bool optional = 4; +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +message ConfigMapVolumeSource { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the ConfigMap or its keys must be defined + // +optional + optional bool optional = 4; +} + +// A single application container that you want to run within a pod. +message Container { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe livenessProbe = 10; + + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe readinessProbe = 11; + + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is a beta feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional Probe startupProbe = 22; + + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // Security options the pod should run with. + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// Describe a container image +message ContainerImage { + // Names by which this image is known. + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + repeated string names = 1; + + // The size of the image in bytes. + // +optional + optional int64 sizeBytes = 2; +} + +// ContainerPort represents a network port in a single container. +message ContainerPort { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + optional string name = 1; + + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + optional int32 hostPort = 2; + + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + optional int32 containerPort = 3; + + // Protocol for port. Must be UDP, TCP, or SCTP. + // Defaults to "TCP". + // +optional + optional string protocol = 4; + + // What host IP to bind the external port to. + // +optional + optional string hostIP = 5; +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +message ContainerState { + // Details about a waiting container + // +optional + optional ContainerStateWaiting waiting = 1; + + // Details about a running container + // +optional + optional ContainerStateRunning running = 2; + + // Details about a terminated container + // +optional + optional ContainerStateTerminated terminated = 3; +} + +// ContainerStateRunning is a running state of a container. +message ContainerStateRunning { + // Time at which the container was last (re-)started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; +} + +// ContainerStateTerminated is a terminated state of a container. +message ContainerStateTerminated { + // Exit status from the last termination of the container + optional int32 exitCode = 1; + + // Signal from the last termination of the container + // +optional + optional int32 signal = 2; + + // (brief) reason from the last termination of the container + // +optional + optional string reason = 3; + + // Message regarding the last termination of the container + // +optional + optional string message = 4; + + // Time at which previous execution of the container started + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; + + // Time at which the container last terminated + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; + + // Container's ID in the format 'docker://' + // +optional + optional string containerID = 7; +} + +// ContainerStateWaiting is a waiting state of a container. +message ContainerStateWaiting { + // (brief) reason the container is not yet running. + // +optional + optional string reason = 1; + + // Message regarding why the container is not yet running. + // +optional + optional string message = 2; +} + +// ContainerStatus contains details for the current status of this container. +message ContainerStatus { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + optional string name = 1; + + // Details about the container's current condition. + // +optional + optional ContainerState state = 2; + + // Details about the container's last termination condition. + // +optional + optional ContainerState lastState = 3; + + // Specifies whether the container has passed its readiness probe. + optional bool ready = 4; + + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + optional int32 restartCount = 5; + + // The image the container is running. + // More info: https://kubernetes.io/docs/concepts/containers/images + // TODO(dchen1107): Which image the container is running with? + optional string image = 6; + + // ImageID of the container's image. + optional string imageID = 7; + + // Container's ID in the format 'docker://'. + // +optional + optional string containerID = 8; + + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + optional bool started = 9; +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +message DaemonEndpoint { + // Port number of the given endpoint. + optional int32 Port = 1; +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +message DownwardAPIProjection { + // Items is a list of DownwardAPIVolume file + // +optional + repeated DownwardAPIVolumeFile items = 1; +} + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +message DownwardAPIVolumeFile { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + optional string path = 1; + + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + optional ObjectFieldSelector fieldRef = 2; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 3; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 4; +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +message DownwardAPIVolumeSource { + // Items is a list of downward API volume file + // +optional + repeated DownwardAPIVolumeFile items = 1; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +message EmptyDirVolumeSource { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + optional string medium = 1; + + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; +} + +// EndpointAddress is a tuple that describes single IP address. +message EndpointAddress { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + optional string ip = 1; + + // The Hostname of this endpoint + // +optional + optional string hostname = 3; + + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + optional string nodeName = 4; + + // Reference to object providing the endpoint. + // +optional + optional ObjectReference targetRef = 2; +} + +// EndpointPort is a tuple that describes a single port. +message EndpointPort { + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + optional string name = 1; + + // The port number of the endpoint. + optional int32 port = 2; + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + // +optional + optional string protocol = 3; + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names such as + // mycompany.com/my-custom-protocol. + // Field can be enabled with ServiceAppProtocol feature gate. + // +optional + optional string appProtocol = 4; +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +message EndpointSubset { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + repeated EndpointAddress addresses = 1; + + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + repeated EndpointAddress notReadyAddresses = 2; + + // Port numbers available on the related IP addresses. + // +optional + repeated EndpointPort ports = 3; +} + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +message Endpoints { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + // +optional + repeated EndpointSubset subsets = 2; +} + +// EndpointsList is a list of endpoints. +message EndpointsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of endpoints. + repeated Endpoints items = 2; +} + +// EnvFromSource represents the source of a set of ConfigMaps +message EnvFromSource { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + optional string prefix = 1; + + // The ConfigMap to select from + // +optional + optional ConfigMapEnvSource configMapRef = 2; + + // The Secret to select from + // +optional + optional SecretEnvSource secretRef = 3; +} + +// EnvVar represents an environment variable present in a Container. +message EnvVar { + // Name of the environment variable. Must be a C_IDENTIFIER. + optional string name = 1; + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + optional string value = 2; + + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + optional EnvVarSource valueFrom = 3; +} + +// EnvVarSource represents a source for the value of an EnvVar. +message EnvVarSource { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + // +optional + optional ObjectFieldSelector fieldRef = 1; + + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + optional ResourceFieldSelector resourceFieldRef = 2; + + // Selects a key of a ConfigMap. + // +optional + optional ConfigMapKeySelector configMapKeyRef = 3; + + // Selects a key of a secret in the pod's namespace + // +optional + optional SecretKeySelector secretKeyRef = 4; +} + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +message EphemeralContainer { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + optional EphemeralContainerCommon ephemeralContainerCommon = 1; + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + optional string targetContainerName = 2; +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +message EphemeralContainerCommon { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + optional string name = 1; + + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + optional string image = 2; + + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string command = 3; + + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + repeated string args = 4; + + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + optional string workingDir = 5; + + // Ports are not allowed for ephemeral containers. + repeated ContainerPort ports = 6; + + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + repeated EnvFromSource envFrom = 19; + + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EnvVar env = 7; + + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + optional ResourceRequirements resources = 8; + + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + repeated VolumeMount volumeMounts = 9; + + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + repeated VolumeDevice volumeDevices = 21; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe livenessProbe = 10; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe readinessProbe = 11; + + // Probes are not allowed for ephemeral containers. + // +optional + optional Probe startupProbe = 22; + + // Lifecycle is not allowed for ephemeral containers. + // +optional + optional Lifecycle lifecycle = 12; + + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + optional string terminationMessagePath = 13; + + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + optional string terminationMessagePolicy = 20; + + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + optional string imagePullPolicy = 14; + + // SecurityContext is not allowed for ephemeral containers. + // +optional + optional SecurityContext securityContext = 15; + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + optional bool stdin = 16; + + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + optional bool stdinOnce = 17; + + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + optional bool tty = 18; +} + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +message EphemeralContainers { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 2; +} + +// Event is a report of an event somewhere in the cluster. +message Event { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The object that this event is about. + optional ObjectReference involvedObject = 2; + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + optional string reason = 3; + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + optional string message = 4; + + // The component reporting this event. Should be a short machine understandable string. + // +optional + optional EventSource source = 5; + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; + + // The time at which the most recent occurrence of this event was recorded. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; + + // The number of times this event has occurred. + // +optional + optional int32 count = 8; + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + optional string type = 9; + + // Time when this Event was first observed. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 11; + + // What action was taken/failed regarding to the Regarding object. + // +optional + optional string action = 12; + + // Optional secondary object for more complex actions. + // +optional + optional ObjectReference related = 13; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingComponent = 14; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 15; +} + +// EventList is a list of events. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of events + repeated Event items = 2; +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time of the last occurrence observed + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 + optional string state = 3; +} + +// EventSource contains information for an event. +message EventSource { + // Component from which the event is generated. + // +optional + optional string component = 1; + + // Node name on which the event is generated. + // +optional + optional string host = 2; +} + +// ExecAction describes a "run in container" action. +message ExecAction { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + repeated string command = 1; +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +message FCVolumeSource { + // Optional: FC target worldwide names (WWNs) + // +optional + repeated string targetWWNs = 1; + + // Optional: FC target lun number + // +optional + optional int32 lun = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: FC volume world wide identifiers (wwids) + // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + // +optional + repeated string wwids = 5; +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +message FlexPersistentVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional SecretReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map options = 5; +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +message FlexVolumeSource { + // Driver is the name of the driver to use for this volume. + optional string driver = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + optional string fsType = 2; + + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + optional LocalObjectReference secretRef = 3; + + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // Optional: Extra command options if any. + // +optional + map options = 5; +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +message FlockerVolumeSource { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + optional string datasetName = 1; + + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + optional string datasetUUID = 2; +} + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +message GCEPersistentDiskVolumeSource { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + optional string pdName = 1; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 2; + + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional int32 partition = 3; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional bool readOnly = 4; +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. +message GitRepoVolumeSource { + // Repository URL + optional string repository = 1; + + // Commit hash for the specified revision. + // +optional + optional string revision = 2; + + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + optional string directory = 3; +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsPersistentVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + optional string endpointsNamespace = 4; +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +message GlusterfsVolumeSource { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + optional string endpoints = 1; + + // Path is the Glusterfs volume path. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + optional string path = 2; + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + optional bool readOnly = 3; +} + +// HTTPGetAction describes an action based on HTTP Get requests. +message HTTPGetAction { + // Path to access on the HTTP server. + // +optional + optional string path = 1; + + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; + + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + optional string host = 3; + + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + optional string scheme = 4; + + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + repeated HTTPHeader httpHeaders = 5; +} + +// HTTPHeader describes a custom header to be used in HTTP probes +message HTTPHeader { + // The header field name + optional string name = 1; + + // The header field value + optional string value = 2; +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +message Handler { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + optional ExecAction exec = 1; + + // HTTPGet specifies the http request to perform. + // +optional + optional HTTPGetAction httpGet = 2; + + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + optional TCPSocketAction tcpSocket = 3; +} + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +message HostAlias { + // IP address of the host file entry. + optional string ip = 1; + + // Hostnames for the above IP address. + repeated string hostnames = 2; +} + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +message HostPathVolumeSource { + // Path of the directory on the host. + // If the path is a symlink, it will follow the link to the real path. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + optional string path = 1; + + // Type for HostPath Volume + // Defaults to "" + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + optional string type = 2; +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIPersistentVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI Target Lun number. + optional int32 lun = 3; + + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional SecretReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +message ISCSIVolumeSource { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + optional string targetPortal = 1; + + // Target iSCSI Qualified Name. + optional string iqn = 2; + + // iSCSI Target Lun number. + optional int32 lun = 3; + + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + optional string iscsiInterface = 4; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 5; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + optional bool readOnly = 6; + + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + repeated string portals = 7; + + // whether support iSCSI Discovery CHAP authentication + // +optional + optional bool chapAuthDiscovery = 8; + + // whether support iSCSI Session CHAP authentication + // +optional + optional bool chapAuthSession = 11; + + // CHAP Secret for iSCSI target and initiator authentication + // +optional + optional LocalObjectReference secretRef = 10; + + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + optional string initiatorName = 12; +} + +// Maps a string key to a path within a volume. +message KeyToPath { + // The key to project. + optional string key = 1; + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + optional string path = 2; + + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 mode = 3; +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +message Lifecycle { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + optional Handler postStart = 1; + + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness/startup probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + optional Handler preStop = 2; +} + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +message LimitRange { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the limits enforced. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional LimitRangeSpec spec = 2; +} + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +message LimitRangeItem { + // Type of resource that this limit applies to. + optional string type = 1; + + // Max usage constraints on this kind by resource name. + // +optional + map max = 2; + + // Min usage constraints on this kind by resource name. + // +optional + map min = 3; + + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + map default = 4; + + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + map defaultRequest = 5; + + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + map maxLimitRequestRatio = 6; +} + +// LimitRangeList is a list of LimitRange items. +message LimitRangeList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of LimitRange objects. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + repeated LimitRange items = 2; +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +message LimitRangeSpec { + // Limits is the list of LimitRangeItem objects that are enforced. + repeated LimitRangeItem limits = 1; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +message LoadBalancerIngress { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + optional string ip = 1; + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + optional string hostname = 2; +} + +// LoadBalancerStatus represents the status of a load-balancer. +message LoadBalancerStatus { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + repeated LoadBalancerIngress ingress = 1; +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +message LocalObjectReference { + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + optional string name = 1; +} + +// Local represents directly-attached storage with node affinity (Beta feature) +message LocalVolumeSource { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + optional string path = 1; + + // Filesystem type to mount. + // It applies only when the Path is a block device. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // +optional + optional string fsType = 2; +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +message NFSVolumeSource { + // Server is the hostname or IP address of the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + optional string server = 1; + + // Path that is exported by the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + optional string path = 2; + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional bool readOnly = 3; +} + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +message Namespace { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of the Namespace. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional NamespaceSpec spec = 2; + + // Status describes the current status of a Namespace. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional NamespaceStatus status = 3; +} + +// NamespaceCondition contains details about state of namespace. +message NamespaceCondition { + // Type of namespace controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // +optional + optional string reason = 5; + + // +optional + optional string message = 6; +} + +// NamespaceList is a list of Namespaces. +message NamespaceList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Namespace objects in the list. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + repeated Namespace items = 2; +} + +// NamespaceSpec describes the attributes on a Namespace. +message NamespaceSpec { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ + // +optional + repeated string finalizers = 1; +} + +// NamespaceStatus is information about the current status of a Namespace. +message NamespaceStatus { + // Phase is the current lifecycle phase of the namespace. + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ + // +optional + optional string phase = 1; + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NamespaceCondition conditions = 2; +} + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +message Node { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a node. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional NodeSpec spec = 2; + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional NodeStatus status = 3; +} + +// NodeAddress contains information for the node's address. +message NodeAddress { + // Node address type, one of Hostname, ExternalIP or InternalIP. + optional string type = 1; + + // The node address. + optional string address = 2; +} + +// Node affinity is a group of node affinity scheduling rules. +message NodeAffinity { + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// NodeCondition contains condition information for a node. +message NodeCondition { + // Type of node condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time we got an update on a given condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; + + // Last time the condition transit from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // (brief) reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +message NodeConfigSource { + // ConfigMap is a reference to a Node's ConfigMap + optional ConfigMapNodeConfigSource configMap = 2; +} + +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +message NodeConfigStatus { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + optional NodeConfigSource assigned = 1; + + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + optional NodeConfigSource active = 2; + + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + optional NodeConfigSource lastKnownGood = 3; + + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + optional string error = 4; +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +message NodeDaemonEndpoints { + // Endpoint on which Kubelet is listening. + // +optional + optional DaemonEndpoint kubeletEndpoint = 1; +} + +// NodeList is the whole list of all Nodes which have been registered with master. +message NodeList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of nodes + repeated Node items = 2; +} + +// NodeProxyOptions is the query options to a Node's proxy call. +message NodeProxyOptions { + // Path is the URL path to use for the current proxy request to node. + // +optional + optional string path = 1; +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +message NodeResources { + // Capacity represents the available resources of a node + map capacity = 1; +} + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +message NodeSelector { + // Required. A list of node selector terms. The terms are ORed. + repeated NodeSelectorTerm nodeSelectorTerms = 1; +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +message NodeSelectorRequirement { + // The label key that the selector applies to. + optional string key = 1; + + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + repeated string values = 3; +} + +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +message NodeSelectorTerm { + // A list of node selector requirements by node's labels. + // +optional + repeated NodeSelectorRequirement matchExpressions = 1; + + // A list of node selector requirements by node's fields. + // +optional + repeated NodeSelectorRequirement matchFields = 2; +} + +// NodeSpec describes the attributes that a node is created with. +message NodeSpec { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + optional string podCIDR = 1; + + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + repeated string podCIDRs = 7; + + // ID of the node assigned by the cloud provider in the format: :// + // +optional + optional string providerID = 3; + + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration + // +optional + optional bool unschedulable = 4; + + // If specified, the node's taints. + // +optional + repeated Taint taints = 5; + + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + optional NodeConfigSource configSource = 6; + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + optional string externalID = 2; +} + +// NodeStatus is information about the current status of a node. +message NodeStatus { + // Capacity represents the total resources of a node. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + map capacity = 1; + + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + map allocatable = 2; + + // NodePhase is the recently observed lifecycle phase of the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase + // The field is never populated, and now is deprecated. + // +optional + optional string phase = 3; + + // Conditions is an array of current observed node conditions. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NodeCondition conditions = 4; + + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated NodeAddress addresses = 5; + + // Endpoints of daemons running on the Node. + // +optional + optional NodeDaemonEndpoints daemonEndpoints = 6; + + // Set of ids/uuids to uniquely identify the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // +optional + optional NodeSystemInfo nodeInfo = 7; + + // List of container images on this node + // +optional + repeated ContainerImage images = 8; + + // List of attachable volumes in use (mounted) by the node. + // +optional + repeated string volumesInUse = 9; + + // List of volumes that are attached to the node. + // +optional + repeated AttachedVolume volumesAttached = 10; + + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + optional NodeConfigStatus config = 11; +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +message NodeSystemInfo { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + optional string machineID = 1; + + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + optional string systemUUID = 2; + + // Boot ID reported by the node. + optional string bootID = 3; + + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + optional string kernelVersion = 4; + + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + optional string osImage = 5; + + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + optional string containerRuntimeVersion = 6; + + // Kubelet Version reported by the node. + optional string kubeletVersion = 7; + + // KubeProxy Version reported by the node. + optional string kubeProxyVersion = 8; + + // The Operating System reported by the node + optional string operatingSystem = 9; + + // The Architecture reported by the node + optional string architecture = 10; +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +message ObjectFieldSelector { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + optional string apiVersion = 1; + + // Path of the field to select in the specified API version. + optional string fieldPath = 2; +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. +// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message ObjectReference { + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + optional string namespace = 2; + + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + optional string name = 3; + + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + optional string uid = 4; + + // API version of the referent. + // +optional + optional string apiVersion = 5; + + // Specific resourceVersion to which this reference is made, if any. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + optional string fieldPath = 7; +} + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes +message PersistentVolume { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeSpec spec = 2; + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + optional PersistentVolumeStatus status = 3; +} + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +message PersistentVolumeClaim { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimSpec spec = 2; + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimStatus status = 3; +} + +// PersistentVolumeClaimCondition contails details about state of pvc +message PersistentVolumeClaimCondition { + optional string type = 1; + + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // persistent volume is being resized. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +message PersistentVolumeClaimList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of persistent volume claims. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + repeated PersistentVolumeClaim items = 2; +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +message PersistentVolumeClaimSpec { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 1; + + // A label query over volumes to consider for binding. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; + + // Resources represents the minimum resources the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + optional ResourceRequirements resources = 2; + + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + optional string volumeName = 3; + + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + optional string storageClassName = 5; + + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + optional string volumeMode = 6; + + // This field can be used to specify either: + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing PVC (PersistentVolumeClaim) + // * An existing custom resource/object that implements data population (Alpha) + // In order to use VolumeSnapshot object types, the appropriate feature gate + // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // If the provisioner or an external controller can support the specified data source, + // it will create a new volume based on the contents of the specified data source. + // If the specified data source is not supported, the volume will + // not be created and the failure will be reported as an event. + // In the future, we plan to support more data source types and the behavior + // of the provisioner may change. + // +optional + optional TypedLocalObjectReference dataSource = 7; +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +message PersistentVolumeClaimStatus { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + optional string phase = 1; + + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + repeated string accessModes = 2; + + // Represents the actual resources of the underlying volume. + // +optional + map capacity = 3; + + // Current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'ResizeStarted'. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated PersistentVolumeClaimCondition conditions = 4; +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +message PersistentVolumeClaimVolumeSource { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + optional string claimName = 1; + + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + optional bool readOnly = 2; +} + +// PersistentVolumeList is a list of PersistentVolume items. +message PersistentVolumeList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of persistent volumes. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + repeated PersistentVolume items = 2; +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +message PersistentVolumeSource { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; + + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + optional HostPathVolumeSource hostPath = 3; + + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // +optional + optional GlusterfsPersistentVolumeSource glusterfs = 4; + + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 5; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/rbd/README.md + // +optional + optional RBDPersistentVolumeSource rbd = 6; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + optional ISCSIPersistentVolumeSource iscsi = 7; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional CinderPersistentVolumeSource cinder = 8; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSPersistentVolumeSource cephfs = 9; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 10; + + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + optional FlexPersistentVolumeSource flexVolume = 12; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFilePersistentVolumeSource azureFile = 13; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 15; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 16; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 18; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOPersistentVolumeSource scaleIO = 19; + + // Local represents directly-attached storage with node affinity + // +optional + optional LocalVolumeSource local = 20; + + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://examples.k8s.io/volumes/storageos/README.md + // +optional + optional StorageOSPersistentVolumeSource storageos = 21; + + // CSI represents storage that is handled by an external CSI driver (Beta feature). + // +optional + optional CSIPersistentVolumeSource csi = 22; +} + +// PersistentVolumeSpec is the specification of a persistent volume. +message PersistentVolumeSpec { + // A description of the persistent volume's resources and capacity. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + map capacity = 1; + + // The actual volume backing the persistent volume. + optional PersistentVolumeSource persistentVolumeSource = 2; + + // AccessModes contains all ways the volume can be mounted. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes + // +optional + repeated string accessModes = 3; + + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding + // +optional + optional ObjectReference claimRef = 4; + + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default for manually created PersistentVolumes), Delete (default + // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). + // Recycle must be supported by the volume plugin underlying this PersistentVolume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming + // +optional + optional string persistentVolumeReclaimPolicy = 5; + + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + optional string storageClassName = 6; + + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options + // +optional + repeated string mountOptions = 7; + + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // +optional + optional string volumeMode = 8; + + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + optional VolumeNodeAffinity nodeAffinity = 9; +} + +// PersistentVolumeStatus is the current status of a persistent volume. +message PersistentVolumeStatus { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase + // +optional + optional string phase = 1; + + // A human-readable message indicating details about why the volume is in this state. + // +optional + optional string message = 2; + + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + optional string reason = 3; +} + +// Represents a Photon Controller persistent disk resource. +message PhotonPersistentDiskVolumeSource { + // ID that identifies Photon Controller persistent disk + optional string pdID = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; +} + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +message Pod { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 3; +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +message PodAffinity { + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running +message PodAffinityTerm { + // A label query over a set of resources, in this case pods. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; + + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + // +optional + repeated string namespaces = 2; + + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + optional string topologyKey = 3; +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +message PodAntiAffinity { + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; + + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; +} + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodAttachOptions { + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + optional bool tty = 4; + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; +} + +// PodCondition contains details for the current condition of this pod. +message PodCondition { + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + optional string type = 1; + + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + optional string status = 2; + + // Last time we probed the condition. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; + + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + optional string reason = 5; + + // Human-readable message indicating details about last transition. + // +optional + optional string message = 6; +} + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +message PodDNSConfig { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + repeated string nameservers = 1; + + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + repeated string searches = 2; + + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + repeated PodDNSConfigOption options = 3; +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +message PodDNSConfigOption { + // Required. + optional string name = 1; + + // +optional + optional string value = 2; +} + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +message PodExecOptions { + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + optional bool stdin = 1; + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stdout = 2; + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + optional bool stderr = 3; + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + optional bool tty = 4; + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + optional string container = 5; + + // Command is the remote command to execute. argv array. Not executed within a shell. + repeated string command = 6; +} + +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +message PodIP { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + optional string ip = 1; +} + +// PodList is a list of Pods. +message PodList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pods. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + repeated Pod items = 2; +} + +// PodLogOptions is the query options for a Pod's logs REST call. +message PodLogOptions { + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + optional string container = 1; + + // Follow the log stream of the pod. Defaults to false. + // +optional + optional bool follow = 2; + + // Return previous terminated container logs. Defaults to false. + // +optional + optional bool previous = 3; + + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional int64 sinceSeconds = 4; + + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; + + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + optional bool timestamps = 6; + + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + optional int64 tailLines = 7; + + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + optional int64 limitBytes = 8; + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + optional bool insecureSkipTLSVerifyBackend = 9; +} + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +message PodPortForwardOptions { + // List of ports to forward + // Required when using WebSockets + // +optional + repeated int32 ports = 1; +} + +// PodProxyOptions is the query options to a Pod's proxy call. +message PodProxyOptions { + // Path is the URL path to use for the current proxy request to pod. + // +optional + optional string path = 1; +} + +// PodReadinessGate contains the reference to a pod condition +message PodReadinessGate { + // ConditionType refers to a condition in the pod's condition list with matching type. + optional string conditionType = 1; +} + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +message PodSecurityContext { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + optional SELinuxOptions seLinuxOptions = 1; + + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 8; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + optional int64 runAsUser = 2; + + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + optional int64 runAsGroup = 6; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 3; + + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + repeated int64 supplementalGroups = 4; + + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + optional int64 fsGroup = 5; + + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + repeated Sysctl sysctls = 7; + + // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + // before being exposed inside Pod. This field will only apply to + // volume types which support fsGroup based ownership(and permissions). + // It will have no effect on ephemeral volume types such as: secret, configmaps + // and emptydir. + // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // +optional + optional string fsGroupChangePolicy = 9; +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +message PodSignature { + // Reference to controller whose pods should avoid this node. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; +} + +// PodSpec is a description of a pod. +message PodSpec { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + repeated Volume volumes = 1; + + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + repeated Container initContainers = 20; + + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Container containers = 2; + + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated EphemeralContainer ephemeralContainers = 34; + + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + // +optional + optional string restartPolicy = 3; + + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + optional int64 terminationGracePeriodSeconds = 4; + + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + optional int64 activeDeadlineSeconds = 5; + + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + optional string dnsPolicy = 6; + + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + map nodeSelector = 7; + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + optional string serviceAccountName = 8; + + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + optional string serviceAccount = 9; + + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + optional bool automountServiceAccountToken = 21; + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + optional string nodeName = 10; + + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostNetwork = 11; + + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostPID = 12; + + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool hostIPC = 13; + + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + optional bool shareProcessNamespace = 27; + + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + optional PodSecurityContext securityContext = 14; + + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated LocalObjectReference imagePullSecrets = 15; + + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + optional string hostname = 16; + + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + optional string subdomain = 17; + + // If specified, the pod's scheduling constraints + // +optional + optional Affinity affinity = 18; + + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + optional string schedulerName = 19; + + // If specified, the pod's tolerations. + // +optional + repeated Toleration tolerations = 22; + + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + repeated HostAlias hostAliases = 23; + + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + optional string priorityClassName = 24; + + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + optional int32 priority = 25; + + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + optional PodDNSConfig dnsConfig = 26; + + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + repeated PodReadinessGate readinessGates = 28; + + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // This is a beta feature as of Kubernetes v1.14. + // +optional + optional string runtimeClassName = 29; + + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + optional bool enableServiceLinks = 30; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 31; + + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + map overhead = 32; + + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is only honored by clusters that enable the EvenPodsSpread feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + repeated TopologySpreadConstraint topologySpreadConstraints = 33; +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system, especially if the node that hosts the pod cannot contact the control +// plane. +message PodStatus { + // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. + // The conditions array, the reason and message fields, and the individual container status + // arrays contain more detail about the pod's status. + // There are five possible phase values: + // + // Pending: The pod has been accepted by the Kubernetes system, but one or more of the + // container images has not been created. This includes time before being scheduled as + // well as time spent downloading images over the network, which could take a while. + // Running: The pod has been bound to a node, and all of the containers have been created. + // At least one container is still running, or is in the process of starting or restarting. + // Succeeded: All containers in the pod have terminated in success, and will not be restarted. + // Failed: All containers in the pod have terminated, and at least one container has + // terminated in failure. The container either exited with non-zero status or was terminated + // by the system. + // Unknown: For some reason the state of the pod could not be obtained, typically due to an + // error in communicating with the host of the pod. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase + // +optional + optional string phase = 1; + + // Current service state of pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated PodCondition conditions = 2; + + // A human readable message indicating details about why the pod is in this condition. + // +optional + optional string message = 3; + + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'Evicted' + // +optional + optional string reason = 4; + + // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // As a result, this field may be different than PodSpec.nodeName when the pod is + // scheduled. + // +optional + optional string nominatedNodeName = 11; + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + optional string hostIP = 5; + + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + optional string podIP = 6; + + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + repeated PodIP podIPs = 12; + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + repeated ContainerStatus initContainerStatuses = 10; + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +optional + repeated ContainerStatus containerStatuses = 8; + + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // +optional + optional string qosClass = 9; + + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + repeated ContainerStatus ephemeralContainerStatuses = 13; +} + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +message PodStatusResult { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PodStatus status = 2; +} + +// PodTemplate describes a template for creating copies of a predefined pod. +message PodTemplate { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Template defines the pods that will be created from this pod template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PodTemplateSpec template = 2; +} + +// PodTemplateList is a list of PodTemplates. +message PodTemplateList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod templates + repeated PodTemplate items = 2; +} + +// PodTemplateSpec describes the data a pod should have when created from a template +message PodTemplateSpec { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional PodSpec spec = 2; +} + +// PortworxVolumeSource represents a Portworx volume resource. +message PortworxVolumeSource { + // VolumeID uniquely identifies a Portworx volume + optional string volumeID = 1; + + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + optional string fsType = 2; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 3; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; +} + +// Describes a class of pods that should avoid this node. +message PreferAvoidPodsEntry { + // The class of pods. + optional PodSignature podSignature = 1; + + // Time at which this entry was added to the list. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; + + // (brief) reason why this entry was added to the list. + // +optional + optional string reason = 3; + + // Human readable message indicating why this entry was added to the list. + // +optional + optional string message = 4; +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +message PreferredSchedulingTerm { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + optional int32 weight = 1; + + // A node selector term, associated with the corresponding weight. + optional NodeSelectorTerm preference = 2; +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +message Probe { + // The action taken to determine the health of a container + optional Handler handler = 1; + + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional int32 initialDelaySeconds = 2; + + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + optional int32 timeoutSeconds = 3; + + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + optional int32 periodSeconds = 4; + + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + optional int32 successThreshold = 5; + + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + optional int32 failureThreshold = 6; +} + +// Represents a projected volume source +message ProjectedVolumeSource { + // list of volume projections + repeated VolumeProjection sources = 1; + + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 2; +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +message QuobyteVolumeSource { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + optional string registry = 1; + + // Volume is a string that references an already created Quobyte volume by name. + optional string volume = 2; + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + optional bool readOnly = 3; + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + optional string user = 4; + + // Group to map volume access to + // Default is no group + // +optional + optional string group = 5; + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + optional string tenant = 6; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDPersistentVolumeSource { + // A collection of Ceph monitors. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional SecretReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +message RBDVolumeSource { + // A collection of Ceph monitors. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + repeated string monitors = 1; + + // The rados image name. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + optional string image = 2; + + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + optional string fsType = 3; + + // The rados pool name. + // Default is rbd. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional string pool = 4; + + // The rados user name. + // Default is admin. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional string user = 5; + + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional string keyring = 6; + + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional LocalObjectReference secretRef = 7; + + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + optional bool readOnly = 8; +} + +// RangeAllocation is not a public type. +message RangeAllocation { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Range is string that identifies the range represented by 'data'. + optional string range = 2; + + // Data is a bit array containing all allocated addresses in the previous segment. + optional bytes data = 3; +} + +// ReplicationController represents the configuration of a replication controller. +message ReplicationController { + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerSpec spec = 2; + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicationControllerStatus status = 3; +} + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +message ReplicationControllerCondition { + // Type of replication controller condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicationControllerList is a collection of replication controllers. +message ReplicationControllerList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of replication controllers. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicationController items = 2; +} + +// ReplicationControllerSpec is the specification of a replication controller. +message ReplicationControllerSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + map selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional PodTemplateSpec template = 3; +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +message ReplicationControllerStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replication controller. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replication controller's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicationControllerCondition conditions = 6; +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +message ResourceFieldSelector { + // Container name: required for volumes, optional for env vars + // +optional + optional string containerName = 1; + + // Required: resource to select + optional string resource = 2; + + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; +} + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +message ResourceQuota { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the desired quota. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaSpec spec = 2; + + // Status defines the actual enforced quota and its current usage. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ResourceQuotaStatus status = 3; +} + +// ResourceQuotaList is a list of ResourceQuota items. +message ResourceQuotaList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ResourceQuota objects. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + repeated ResourceQuota items = 2; +} + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +message ResourceQuotaSpec { + // hard is the set of desired hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + map hard = 1; + + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + repeated string scopes = 2; + + // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + // +optional + optional ScopeSelector scopeSelector = 3; +} + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +message ResourceQuotaStatus { + // Hard is the set of enforced hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + map hard = 1; + + // Used is the current observed total usage of the resource in the namespace. + // +optional + map used = 2; +} + +// ResourceRequirements describes the compute resource requirements. +message ResourceRequirements { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + map limits = 1; + + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + map requests = 2; +} + +// SELinuxOptions are the labels to be applied to the container +message SELinuxOptions { + // User is a SELinux user label that applies to the container. + // +optional + optional string user = 1; + + // Role is a SELinux role label that applies to the container. + // +optional + optional string role = 2; + + // Type is a SELinux type label that applies to the container. + // +optional + optional string type = 3; + + // Level is SELinux level label that applies to the container. + // +optional + optional string level = 4; +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +message ScaleIOPersistentVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional SecretReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + optional string protectionDomain = 5; + + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs" + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +message ScaleIOVolumeSource { + // The host address of the ScaleIO API Gateway. + optional string gateway = 1; + + // The name of the storage system as configured in ScaleIO. + optional string system = 2; + + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + optional LocalObjectReference secretRef = 3; + + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + optional bool sslEnabled = 4; + + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + optional string protectionDomain = 5; + + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + optional string storagePool = 6; + + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + optional string storageMode = 7; + + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + optional string volumeName = 8; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + optional string fsType = 9; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 10; +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector requirements. +message ScopeSelector { + // A list of scope selector requirements by scope of the resources. + // +optional + repeated ScopedResourceSelectorRequirement matchExpressions = 1; +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +message ScopedResourceSelectorRequirement { + // The name of the scope that the selector applies to. + optional string scopeName = 1; + + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + optional string operator = 2; + + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + repeated string values = 3; +} + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +message Secret { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Immutable, if set to true, ensures that data stored in the Secret cannot + // be updated (only object metadata can be modified). + // If not set to true, the field can be modified at any time. + // Defaulted to nil. + // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. + // +optional + optional bool immutable = 5; + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + map data = 2; + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + map stringData = 4; + + // Used to facilitate programmatic handling of secret data. + // +optional + optional string type = 3; +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +message SecretEnvSource { + // The Secret to select from. + optional LocalObjectReference localObjectReference = 1; + + // Specify whether the Secret must be defined + // +optional + optional bool optional = 2; +} + +// SecretKeySelector selects a key of a Secret. +message SecretKeySelector { + // The name of the secret in the pod's namespace to select from. + optional LocalObjectReference localObjectReference = 1; + + // The key of the secret to select from. Must be a valid secret key. + optional string key = 2; + + // Specify whether the Secret or its key must be defined + // +optional + optional bool optional = 3; +} + +// SecretList is a list of Secret. +message SecretList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + repeated Secret items = 2; +} + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +message SecretProjection { + optional LocalObjectReference localObjectReference = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Specify whether the Secret or its key must be defined + // +optional + optional bool optional = 4; +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +message SecretReference { + // Name is unique within a namespace to reference a secret resource. + // +optional + optional string name = 1; + + // Namespace defines the space within which the secret name must be unique. + // +optional + optional string namespace = 2; +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +message SecretVolumeSource { + // Name of the secret in the pod's namespace to use. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional string secretName = 1; + + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + repeated KeyToPath items = 2; + + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + optional int32 defaultMode = 3; + + // Specify whether the Secret or its keys must be defined + // +optional + optional bool optional = 4; +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +message SecurityContext { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + optional Capabilities capabilities = 1; + + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + optional bool privileged = 2; + + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional SELinuxOptions seLinuxOptions = 3; + + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional WindowsSecurityContextOptions windowsOptions = 10; + + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional int64 runAsUser = 4; + + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional int64 runAsGroup = 8; + + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional bool runAsNonRoot = 5; + + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + optional bool readOnlyRootFilesystem = 6; + + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // AllowPrivilegeEscalation is true always when the container is: + // 1) run as Privileged + // 2) has CAP_SYS_ADMIN + // +optional + optional bool allowPrivilegeEscalation = 7; + + // procMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + // readonly paths and masked paths. + // This requires the ProcMountType feature flag to be enabled. + // +optional + optional string procMount = 9; +} + +// SerializedReference is a reference to serialized object. +message SerializedReference { + // The reference to an object in the system. + // +optional + optional ObjectReference reference = 1; +} + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +message Service { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the behavior of a service. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceSpec spec = 2; + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ServiceStatus status = 3; +} + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +message ServiceAccount { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated ObjectReference secrets = 2; + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +optional + repeated LocalObjectReference imagePullSecrets = 3; + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + optional bool automountServiceAccountToken = 4; +} + +// ServiceAccountList is a list of ServiceAccount objects +message ServiceAccountList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ServiceAccounts. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + repeated ServiceAccount items = 2; +} + +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +message ServiceAccountTokenProjection { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + // +optional + optional string audience = 1; + + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + // +optional + optional int64 expirationSeconds = 2; + + // Path is the path relative to the mount point of the file to project the + // token into. + optional string path = 3; +} + +// ServiceList holds a list of services. +message ServiceList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of services + repeated Service items = 2; +} + +// ServicePort contains information on service's port. +message ServicePort { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. + // Optional if only one ServicePort is defined on this service. + // +optional + optional string name = 1; + + // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + // Default is TCP. + // +optional + optional string protocol = 2; + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names such as + // mycompany.com/my-custom-protocol. + // Field can be enabled with ServiceAppProtocol feature gate. + // +optional + optional string appProtocol = 6; + + // The port that will be exposed by this service. + optional int32 port = 3; + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + // +optional + optional int32 nodePort = 5; +} + +// ServiceProxyOptions is the query options to a Service's proxy call. +message ServiceProxyOptions { + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + optional string path = 1; +} + +// ServiceSpec describes the attributes that a user creates on a service. +message ServiceSpec { + // The list of ports that are exposed by this service. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol + repeated ServicePort ports = 1; + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + map selector = 2; + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + optional string clusterIP = 3; + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // +optional + optional string type = 4; + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. + // +optional + repeated string externalIPs = 5; + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + optional string sessionAffinity = 7; + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + optional string loadBalancerIP = 8; + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + // +optional + repeated string loadBalancerSourceRanges = 9; + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + // +optional + optional string externalName = 10; + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + optional string externalTrafficPolicy = 11; + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + optional int32 healthCheckNodePort = 12; + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + optional bool publishNotReadyAddresses = 13; + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + optional SessionAffinityConfig sessionAffinityConfig = 14; + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + optional string ipFamily = 15; + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + repeated string topologyKeys = 16; +} + +// ServiceStatus represents the current status of a service. +message ServiceStatus { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + optional LoadBalancerStatus loadBalancer = 1; +} + +// SessionAffinityConfig represents the configurations of session affinity. +message SessionAffinityConfig { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + optional ClientIPConfig clientIP = 1; +} + +// Represents a StorageOS persistent volume resource. +message StorageOSPersistentVolumeSource { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + optional string volumeName = 1; + + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + optional string volumeNamespace = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 3; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + optional ObjectReference secretRef = 5; +} + +// Represents a StorageOS persistent volume resource. +message StorageOSVolumeSource { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + optional string volumeName = 1; + + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + optional string volumeNamespace = 2; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 3; + + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + optional bool readOnly = 4; + + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + optional LocalObjectReference secretRef = 5; +} + +// Sysctl defines a kernel parameter to be set +message Sysctl { + // Name of a property to set + optional string name = 1; + + // Value of a property to set + optional string value = 2; +} + +// TCPSocketAction describes an action based on opening a socket +message TCPSocketAction { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; + + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + optional string host = 2; +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +message Taint { + // Required. The taint key to be applied to a node. + optional string key = 1; + + // The taint value corresponding to the taint key. + // +optional + optional string value = 2; + + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + optional string effect = 3; + + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; +} + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +message Toleration { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + optional string key = 1; + + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + optional string operator = 2; + + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + optional string value = 3; + + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + optional string effect = 4; + + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + optional int64 tolerationSeconds = 5; +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +message TopologySelectorLabelRequirement { + // The label key that the selector applies to. + optional string key = 1; + + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + repeated string values = 2; +} + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +message TopologySelectorTerm { + // A list of topology selector requirements by labels. + // +optional + repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; +} + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +message TopologySpreadConstraint { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + optional int32 maxSkew = 1; + + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + optional string topologyKey = 2; + + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + optional string whenUnsatisfiable = 3; + + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; +} + +// TypedLocalObjectReference contains enough information to let you locate the +// typed referenced object inside the same namespace. +message TypedLocalObjectReference { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +message Volume { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + optional string name = 1; + + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + optional VolumeSource volumeSource = 2; +} + +// volumeDevice describes a mapping of a raw block device within a container. +message VolumeDevice { + // name must match the name of a persistentVolumeClaim in the pod + optional string name = 1; + + // devicePath is the path inside of the container that the device will be mapped to. + optional string devicePath = 2; +} + +// VolumeMount describes a mounting of a Volume within a container. +message VolumeMount { + // This must match the Name of a Volume. + optional string name = 1; + + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + optional bool readOnly = 2; + + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + optional string mountPath = 3; + + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + optional string subPath = 4; + + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationNone is used. + // This field is beta in 1.10. + // +optional + optional string mountPropagation = 5; + + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // +optional + optional string subPathExpr = 6; +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +message VolumeNodeAffinity { + // Required specifies hard node constraints that must be met. + optional NodeSelector required = 1; +} + +// Projection that may be projected along with other supported volume types +message VolumeProjection { + // information about the secret data to project + // +optional + optional SecretProjection secret = 1; + + // information about the downwardAPI data to project + // +optional + optional DownwardAPIProjection downwardAPI = 2; + + // information about the configMap data to project + // +optional + optional ConfigMapProjection configMap = 3; + + // information about the serviceAccountToken data to project + // +optional + optional ServiceAccountTokenProjection serviceAccountToken = 4; +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +message VolumeSource { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + optional HostPathVolumeSource hostPath = 1; + + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + optional EmptyDirVolumeSource emptyDir = 2; + + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; + + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; + + // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. + // +optional + optional GitRepoVolumeSource gitRepo = 5; + + // Secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + optional SecretVolumeSource secret = 6; + + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + optional NFSVolumeSource nfs = 7; + + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://examples.k8s.io/volumes/iscsi/README.md + // +optional + optional ISCSIVolumeSource iscsi = 8; + + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // +optional + optional GlusterfsVolumeSource glusterfs = 9; + + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; + + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/rbd/README.md + // +optional + optional RBDVolumeSource rbd = 11; + + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + optional FlexVolumeSource flexVolume = 12; + + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + optional CinderVolumeSource cinder = 13; + + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + optional CephFSVolumeSource cephfs = 14; + + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + optional FlockerVolumeSource flocker = 15; + + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + optional DownwardAPIVolumeSource downwardAPI = 16; + + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + optional FCVolumeSource fc = 17; + + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + optional AzureFileVolumeSource azureFile = 18; + + // ConfigMap represents a configMap that should populate this volume + // +optional + optional ConfigMapVolumeSource configMap = 19; + + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; + + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + optional QuobyteVolumeSource quobyte = 21; + + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + optional AzureDiskVolumeSource azureDisk = 22; + + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; + + // Items for all in one resources secrets, configmaps, and downward API + optional ProjectedVolumeSource projected = 26; + + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + optional PortworxVolumeSource portworxVolume = 24; + + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + optional ScaleIOVolumeSource scaleIO = 25; + + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + optional StorageOSVolumeSource storageos = 27; + + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + optional CSIVolumeSource csi = 28; +} + +// Represents a vSphere volume resource. +message VsphereVirtualDiskVolumeSource { + // Path that identifies vSphere volume vmdk + optional string volumePath = 1; + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + optional string fsType = 2; + + // Storage Policy Based Management (SPBM) profile name. + // +optional + optional string storagePolicyName = 3; + + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + optional string storagePolicyID = 4; +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +message WeightedPodAffinityTerm { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + optional int32 weight = 1; + + // Required. A pod affinity term, associated with the corresponding weight. + optional PodAffinityTerm podAffinityTerm = 2; +} + +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +message WindowsSecurityContextOptions { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // +optional + optional string gmsaCredentialSpecName = 1; + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // +optional + optional string gmsaCredentialSpec = 2; + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + optional string runAsUserName = 3; +} + diff --git a/vendor/k8s.io/api/core/v1/objectreference.go b/vendor/k8s.io/api/core/v1/objectreference.go new file mode 100644 index 0000000..ee5335e --- /dev/null +++ b/vendor/k8s.io/api/core/v1/objectreference.go @@ -0,0 +1,33 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that +// intend only to get a reference to that object. This simplifies the event recording interface. +func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go new file mode 100644 index 0000000..8da1dde --- /dev/null +++ b/vendor/k8s.io/api/core/v1/register.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Pod{}, + &PodList{}, + &PodStatusResult{}, + &PodTemplate{}, + &PodTemplateList{}, + &ReplicationController{}, + &ReplicationControllerList{}, + &Service{}, + &ServiceProxyOptions{}, + &ServiceList{}, + &Endpoints{}, + &EndpointsList{}, + &Node{}, + &NodeList{}, + &NodeProxyOptions{}, + &Binding{}, + &Event{}, + &EventList{}, + &List{}, + &LimitRange{}, + &LimitRangeList{}, + &ResourceQuota{}, + &ResourceQuotaList{}, + &Namespace{}, + &NamespaceList{}, + &Secret{}, + &SecretList{}, + &ServiceAccount{}, + &ServiceAccountList{}, + &PersistentVolume{}, + &PersistentVolumeList{}, + &PersistentVolumeClaim{}, + &PersistentVolumeClaimList{}, + &PodAttachOptions{}, + &PodLogOptions{}, + &PodExecOptions{}, + &PodPortForwardOptions{}, + &PodProxyOptions{}, + &ComponentStatus{}, + &ComponentStatusList{}, + &SerializedReference{}, + &RangeAllocation{}, + &ConfigMap{}, + &ConfigMapList{}, + &EphemeralContainers{}, + ) + + // Add common types + scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) + + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go new file mode 100644 index 0000000..5bc9cd5 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -0,0 +1,64 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" +) + +// Returns string version of ResourceName. +func (self ResourceName) String() string { + return string(self) +} + +// Returns the CPU limit if specified. +func (self *ResourceList) Cpu() *resource.Quantity { + if val, ok := (*self)[ResourceCPU]; ok { + return &val + } + return &resource.Quantity{Format: resource.DecimalSI} +} + +// Returns the Memory limit if specified. +func (self *ResourceList) Memory() *resource.Quantity { + if val, ok := (*self)[ResourceMemory]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +// Returns the Storage limit if specified. +func (self *ResourceList) Storage() *resource.Quantity { + if val, ok := (*self)[ResourceStorage]; ok { + return &val + } + return &resource.Quantity{Format: resource.BinarySI} +} + +func (self *ResourceList) Pods() *resource.Quantity { + if val, ok := (*self)[ResourcePods]; ok { + return &val + } + return &resource.Quantity{} +} + +func (self *ResourceList) StorageEphemeral() *resource.Quantity { + if val, ok := (*self)[ResourceEphemeralStorage]; ok { + return &val + } + return &resource.Quantity{} +} diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go new file mode 100644 index 0000000..db71bd2 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -0,0 +1,39 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "fmt" + +// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, +// if the two taints have same key:effect, regard as they match. +func (t *Taint) MatchTaint(taintToMatch *Taint) bool { + return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect +} + +// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. +func (t *Taint) ToString() string { + if len(t.Effect) == 0 { + if len(t.Value) == 0 { + return fmt.Sprintf("%v", t.Key) + } + return fmt.Sprintf("%v=%v:", t.Key, t.Value) + } + if len(t.Value) == 0 { + return fmt.Sprintf("%v:%v", t.Key, t.Effect) + } + return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) +} diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go new file mode 100644 index 0000000..b203d33 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/toleration.go @@ -0,0 +1,56 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , +// if the two tolerations have same combination, regard as they match. +// TODO: uniqueness check for tolerations in api validations. +func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { + return t.Key == tolerationToMatch.Key && + t.Effect == tolerationToMatch.Effect && + t.Operator == tolerationToMatch.Operator && + t.Value == tolerationToMatch.Value +} + +// ToleratesTaint checks if the toleration tolerates the taint. +// The matching follows the rules below: +// (1) Empty toleration.effect means to match all taint effects, +// otherwise taint effect must equal to toleration.effect. +// (2) If toleration.operator is 'Exists', it means to match all taint values. +// (3) Empty toleration.key means to match all taint keys. +// If toleration.key is empty, toleration.operator must be 'Exists'; +// this combination means to match all taint values and all taint keys. +func (t *Toleration) ToleratesTaint(taint *Taint) bool { + if len(t.Effect) > 0 && t.Effect != taint.Effect { + return false + } + + if len(t.Key) > 0 && t.Key != taint.Key { + return false + } + + // TODO: Use proper defaulting when Toleration becomes a field of PodSpec + switch t.Operator { + // empty operator means Equal + case "", TolerationOpEqual: + return t.Value == taint.Value + case TolerationOpExists: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go new file mode 100644 index 0000000..b61a86a --- /dev/null +++ b/vendor/k8s.io/api/core/v1/types.go @@ -0,0 +1,5961 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) + NamespaceNodeLease string = "kube-node-lease" + // TopologyKeyAny is the service topology key that matches any node + TopologyKeyAny string = "*" +) + +// Volume represents a named volume in a pod that may be accessed by any container in the pod. +type Volume struct { + // Volume's name. + // Must be a DNS_LABEL and unique within the pod. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // VolumeSource represents the location and type of the mounted volume. + // If not specified, the Volume is implied to be an EmptyDir. + // This implied behavior is deprecated and will be removed in a future version. + VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` +} + +// Represents the source of a volume to mount. +// Only one of its members may be specified. +type VolumeSource struct { + // HostPath represents a pre-existing file or directory on the host + // machine that is directly exposed to the container. This is generally + // used for system agents or other privileged things that are allowed + // to see the host machine. Most containers will NOT need this. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // --- + // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not + // mount host directories as read/write. + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` + // EmptyDir represents a temporary directory that shares a pod's lifetime. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` + // GitRepo represents a git repository at a particular revision. + // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an + // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir + // into the Pod's container. + // +optional + GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` + // Secret represents a secret that should populate this volume. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` + // NFS represents an NFS mount on the host that shares a pod's lifetime + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://examples.k8s.io/volumes/iscsi/README.md + // +optional + ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` + // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` + // PersistentVolumeClaimVolumeSource represents a reference to a + // PersistentVolumeClaim in the same namespace. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/rbd/README.md + // +optional + RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` + // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` + // DownwardAPI represents downward API about the pod that should populate this volume + // +optional + DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` + // ConfigMap represents a configMap that should populate this volume + // +optional + ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` + // Items for all in one resources secrets, configmaps, and downward API + Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` + // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. + // +optional + StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` + // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). + // +optional + CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` +} + +// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. +// This volume finds the bound PV and mounts that volume for the pod. A +// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another +// type of volume that is owned by someone else (the system). +type PersistentVolumeClaimVolumeSource struct { + // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` + // Will force the ReadOnly setting in VolumeMounts. + // Default false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// PersistentVolumeSource is similar to VolumeSource but meant for the +// administrator who creates PVs. Exactly one of its members must be set. +type PersistentVolumeSource struct { + // GCEPersistentDisk represents a GCE Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` + // AWSElasticBlockStore represents an AWS Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` + // HostPath represents a directory on the host. + // Provisioned by a developer or tester. + // This is useful for single-node development and testing only! + // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` + // Glusterfs represents a Glusterfs volume that is attached to a host and + // exposed to the pod. Provisioned by an admin. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // +optional + Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + // NFS represents an NFS mount on the host. Provisioned by an admin. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` + // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. + // More info: https://examples.k8s.io/volumes/rbd/README.md + // +optional + RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` + // ISCSI represents an ISCSI Disk resource that is attached to a + // kubelet's host machine and then exposed to the pod. Provisioned by an admin. + // +optional + ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` + // Cinder represents a cinder volume attached and mounted on kubelets host machine. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` + // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime + // +optional + CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` + // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. + // +optional + FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` + // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running + // +optional + Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` + // FlexVolume represents a generic volume resource that is + // provisioned/attached using an exec based plugin. + // +optional + FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` + // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. + // +optional + AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` + // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine + // +optional + VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` + // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime + // +optional + Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` + // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. + // +optional + AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` + // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine + PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` + // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine + // +optional + PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` + // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. + // +optional + ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` + // Local represents directly-attached storage with node affinity + // +optional + Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` + // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod + // More info: https://examples.k8s.io/volumes/storageos/README.md + // +optional + StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` + // CSI represents storage that is handled by an external CSI driver (Beta feature). + // +optional + CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` +} + +const ( + // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. + // It's currently still used and will be held for backwards compatibility + BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" + + // MountOptionAnnotation defines mount option annotation used in PVs + MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolume (PV) is a storage resource provisioned by an administrator. +// It is analogous to a node. +// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes +type PersistentVolume struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines a specification of a persistent volume owned by the cluster. + // Provisioned by an administrator. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status for the persistent volume. + // Populated by the system. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes + // +optional + Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// PersistentVolumeSpec is the specification of a persistent volume. +type PersistentVolumeSpec struct { + // A description of the persistent volume's resources and capacity. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // The actual volume backing the persistent volume. + PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` + // AccessModes contains all ways the volume can be mounted. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. + // Expected to be non-nil when bound. + // claim.VolumeName is the authoritative bind between PV and PVC. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding + // +optional + ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` + // What happens to a persistent volume when released from its claim. + // Valid options are Retain (default for manually created PersistentVolumes), Delete (default + // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). + // Recycle must be supported by the volume plugin underlying this PersistentVolume. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming + // +optional + PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` + // Name of StorageClass to which this persistent volume belongs. Empty value + // means that this volume does not belong to any StorageClass. + // +optional + StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` + // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will + // simply fail if one is invalid. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options + // +optional + MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` + // volumeMode defines if a volume is intended to be used with a formatted filesystem + // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` + // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. + // This field influences the scheduling of pods that use this volume. + // +optional + NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` +} + +// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. +type VolumeNodeAffinity struct { + // Required specifies hard node constraints that must be met. + Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"` +} + +// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. +type PersistentVolumeReclaimPolicy string + +const ( + // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. + // The volume plugin must support Recycling. + PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" + // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. + // The volume plugin must support Deletion. + PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" + // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. + // The default policy is Retain. + PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" +) + +// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. +type PersistentVolumeMode string + +const ( + // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. + PersistentVolumeBlock PersistentVolumeMode = "Block" + // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. + PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" +) + +// PersistentVolumeStatus is the current status of a persistent volume. +type PersistentVolumeStatus struct { + // Phase indicates if a volume is available, bound to a claim, or released by a claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase + // +optional + Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` + // A human-readable message indicating details about why the volume is in this state. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // Reason is a brief CamelCase string that describes any failure and is meant + // for machine parsing and tidy display in the CLI. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeList is a list of PersistentVolume items. +type PersistentVolumeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // List of persistent volumes. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes + Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaim is a user's request for and claim to a persistent volume +type PersistentVolumeClaim struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired characteristics of a volume requested by a pod author. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status represents the current information/status of a persistent volume claim. + // Read-only. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + // +optional + Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. +type PersistentVolumeClaimList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // A list of persistent volume claims. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims + Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PersistentVolumeClaimSpec describes the common attributes of storage devices +// and allows a Source for provider-specific attributes +type PersistentVolumeClaimSpec struct { + // AccessModes contains the desired access modes the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // A label query over volumes to consider for binding. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` + // Resources represents the minimum resources the volume should have. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` + // VolumeName is the binding reference to the PersistentVolume backing this claim. + // +optional + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` + // Name of the StorageClass required by the claim. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 + // +optional + StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` + // volumeMode defines what type of volume is required by the claim. + // Value of Filesystem is implied when not included in claim spec. + // +optional + VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` + // This field can be used to specify either: + // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) + // * An existing PVC (PersistentVolumeClaim) + // * An existing custom resource/object that implements data population (Alpha) + // In order to use VolumeSnapshot object types, the appropriate feature gate + // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) + // If the provisioner or an external controller can support the specified data source, + // it will create a new volume based on the contents of the specified data source. + // If the specified data source is not supported, the volume will + // not be created and the failure will be reported as an event. + // In the future, we plan to support more data source types and the behavior + // of the provisioner may change. + // +optional + DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` +} + +// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type +type PersistentVolumeClaimConditionType string + +const ( + // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started + PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" + // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node + PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" +) + +// PersistentVolumeClaimCondition contails details about state of pvc +type PersistentVolumeClaimCondition struct { + Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, this should be a short, machine understandable string that gives the reason + // for condition's last transition. If it reports "ResizeStarted" that means the underlying + // persistent volume is being resized. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// PersistentVolumeClaimStatus is the current status of a persistent volume claim. +type PersistentVolumeClaimStatus struct { + // Phase represents the current phase of PersistentVolumeClaim. + // +optional + Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` + // AccessModes contains the actual access modes the volume backing the PVC has. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 + // +optional + AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` + // Represents the actual resources of the underlying volume. + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // Current Condition of persistent volume claim. If underlying persistent volume is being + // resized then the Condition will be set to 'ResizeStarted'. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` +} + +type PersistentVolumeAccessMode string + +const ( + // can be mounted in read/write mode to exactly 1 host + ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" + // can be mounted in read-only mode to many hosts + ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" + // can be mounted in read/write mode to many hosts + ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" +) + +type PersistentVolumePhase string + +const ( + // used for PersistentVolumes that are not available + VolumePending PersistentVolumePhase = "Pending" + // used for PersistentVolumes that are not yet bound + // Available volumes are held by the binder and matched to PersistentVolumeClaims + VolumeAvailable PersistentVolumePhase = "Available" + // used for PersistentVolumes that are bound + VolumeBound PersistentVolumePhase = "Bound" + // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted + // released volumes must be recycled before becoming available again + // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource + VolumeReleased PersistentVolumePhase = "Released" + // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim + VolumeFailed PersistentVolumePhase = "Failed" +) + +type PersistentVolumeClaimPhase string + +const ( + // used for PersistentVolumeClaims that are not yet bound + ClaimPending PersistentVolumeClaimPhase = "Pending" + // used for PersistentVolumeClaims that are bound + ClaimBound PersistentVolumeClaimPhase = "Bound" + // used for PersistentVolumeClaims that lost their underlying + // PersistentVolume. The claim was bound to a PersistentVolume and this + // volume does not exist any longer and all data on it was lost. + ClaimLost PersistentVolumeClaimPhase = "Lost" +) + +type HostPathType string + +const ( + // For backwards compatible, leave it empty if unset + HostPathUnset HostPathType = "" + // If nothing exists at the given path, an empty directory will be created there + // as needed with file mode 0755, having the same group and ownership with Kubelet. + HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" + // A directory must exist at the given path + HostPathDirectory HostPathType = "Directory" + // If nothing exists at the given path, an empty file will be created there + // as needed with file mode 0644, having the same group and ownership with Kubelet. + HostPathFileOrCreate HostPathType = "FileOrCreate" + // A file must exist at the given path + HostPathFile HostPathType = "File" + // A UNIX socket must exist at the given path + HostPathSocket HostPathType = "Socket" + // A character device must exist at the given path + HostPathCharDev HostPathType = "CharDevice" + // A block device must exist at the given path + HostPathBlockDev HostPathType = "BlockDevice" +) + +// Represents a host path mapped into a pod. +// Host path volumes do not support ownership management or SELinux relabeling. +type HostPathVolumeSource struct { + // Path of the directory on the host. + // If the path is a symlink, it will follow the link to the real path. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + // Type for HostPath Volume + // Defaults to "" + // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath + // +optional + Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"` +} + +// Represents an empty directory for a pod. +// Empty directory volumes support ownership management and SELinux relabeling. +type EmptyDirVolumeSource struct { + // What type of storage medium should back this directory. + // The default is "" which means to use the node's default medium. + // Must be an empty string (default) or Memory. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir + // +optional + Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` + // Total amount of local storage required for this EmptyDir volume. + // The size limit is also applicable for memory medium. + // The maximum usage on memory medium EmptyDir would be the minimum value between + // the SizeLimit specified here and the sum of memory limits of all containers in a pod. + // The default is nil which means that the limit is undefined. + // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir + // +optional + SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents a Glusterfs mount that lasts the lifetime of a pod. +// Glusterfs volumes do not support ownership management or SELinux relabeling. +type GlusterfsPersistentVolumeSource struct { + // EndpointsName is the endpoint name that details Glusterfs topology. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` + + // Path is the Glusterfs volume path. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // EndpointsNamespace is the namespace that contains Glusterfs endpoint. + // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. + // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // +optional + EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDVolumeSource struct { + // A collection of Ceph monitors. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + +// Represents a Rados Block Device mount that lasts the lifetime of a pod. +// RBD volumes support ownership management and SELinux relabeling. +type RBDPersistentVolumeSource struct { + // A collection of Ceph monitors. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // The rados image name. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // The rados pool name. + // Default is rbd. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` + // The rados user name. + // Default is admin. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` + // Keyring is the path to key ring for RBDUser. + // Default is /etc/ceph/keyring. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` + // SecretRef is name of the authentication secret for RBDUser. If provided + // overrides keyring. + // Default is nil. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +type CinderVolumeSource struct { + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` +} + +// Represents a cinder volume resource in Openstack. +// A Cinder volume must exist before mounting to a container. +// The volume must also be in the same region as the kubelet. +// Cinder volumes support ownership management and SELinux relabeling. +type CinderPersistentVolumeSource struct { + // volume id used to identify the volume in cinder. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // Optional: points to a secret object containing parameters used to connect + // to OpenStack. + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Optional: User is the rados user name, default is admin + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` +} + +// SecretReference represents a Secret Reference. It has enough information to retrieve secret +// in any namespace +type SecretReference struct { + // Name is unique within a namespace to reference a secret resource. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Namespace defines the space within which the secret name must be unique. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` +} + +// Represents a Ceph Filesystem mount that lasts the lifetime of a pod +// Cephfs volumes do not support ownership management or SELinux relabeling. +type CephFSPersistentVolumeSource struct { + // Required: Monitors is a collection of Ceph monitors + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` + // Optional: Used as the mounted root, rather than the full Ceph tree, default is / + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` + // Optional: User is the rados user name, default is admin + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` + // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` + // Optional: SecretRef is reference to the authentication secret for User, default is empty. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` +} + +// Represents a Flocker volume mounted by the Flocker agent. +// One and only one of datasetName and datasetUUID should be set. +// Flocker volumes do not support ownership management or SELinux relabeling. +type FlockerVolumeSource struct { + // Name of the dataset stored as metadata -> name on the dataset for Flocker + // should be considered as deprecated + // +optional + DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"` + // UUID of the dataset. This is unique identifier of a Flocker dataset + // +optional + DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"` +} + +// StorageMedium defines ways that storage can be allocated to a volume. +type StorageMedium string + +const ( + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this + StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages + StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages- +) + +// Protocol defines network protocols supported for things like container ports. +type Protocol string + +const ( + // ProtocolTCP is the TCP protocol. + ProtocolTCP Protocol = "TCP" + // ProtocolUDP is the UDP protocol. + ProtocolUDP Protocol = "UDP" + // ProtocolSCTP is the SCTP protocol. + ProtocolSCTP Protocol = "SCTP" +) + +// Represents a Persistent Disk resource in Google Compute Engine. +// +// A GCE PD must exist before mounting to a container. The disk must +// also be in the same GCE project and zone as the kubelet. A GCE PD +// can only be mounted as read/write once or read-only many times. GCE +// PDs support ownership management and SELinux relabeling. +type GCEPersistentDiskVolumeSource struct { + // Unique name of the PD resource in GCE. Used to identify the disk in GCE. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a Quobyte mount that lasts the lifetime of a pod. +// Quobyte volumes do not support ownership management or SELinux relabeling. +type QuobyteVolumeSource struct { + // Registry represents a single or multiple Quobyte Registry services + // specified as a string as host:port pair (multiple entries are separated with commas) + // which acts as the central registry for volumes + Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"` + + // Volume is a string that references an already created Quobyte volume by name. + Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"` + + // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // User to map volume access to + // Defaults to serivceaccount user + // +optional + User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"` + + // Group to map volume access to + // Default is no group + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` + + // Tenant owning the given Quobyte volume in the Backend + // Used with dynamically provisioned Quobyte volumes, value is set by the plugin + // +optional + Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` +} + +// FlexPersistentVolumeSource represents a generic persistent volume resource that is +// provisioned/attached using an exec based plugin. +type FlexPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + +// FlexVolume represents a generic volume resource that is +// provisioned/attached using an exec based plugin. +type FlexVolumeSource struct { + // Driver is the name of the driver to use for this volume. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Optional: SecretRef is reference to the secret object containing + // sensitive information to pass to the plugin scripts. This may be + // empty if no secret object is specified. If the secret object + // contains more than one secret, all secrets are passed to the plugin + // scripts. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: Extra command options if any. + // +optional + Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` +} + +// Represents a Persistent Disk resource in AWS. +// +// An AWS EBS disk must exist before mounting to a container. The disk +// must also be in the same AWS zone as the kubelet. An AWS EBS disk +// can only be mounted as read/write once. AWS EBS volumes support +// ownership management and SELinux relabeling. +type AWSElasticBlockStoreVolumeSource struct { + // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // The partition in the volume that you want to mount. + // If omitted, the default is to mount by volume name. + // Examples: For volume /dev/sda1, you specify the partition as "1". + // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). + // +optional + Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` + // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". + // If omitted, the default is "false". + // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` +} + +// Represents a volume that is populated with the contents of a git repository. +// Git repo volumes do not support ownership management. +// Git repo volumes support SELinux relabeling. +// +// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an +// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir +// into the Pod's container. +type GitRepoVolumeSource struct { + // Repository URL + Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` + // Commit hash for the specified revision. + // +optional + Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` + // Target directory name. + // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the + // git repository. Otherwise, if specified, the volume will contain the git repository in + // the subdirectory with the given name. + // +optional + Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` +} + +// Adapts a Secret into a volume. +// +// The contents of the target Secret's Data field will be presented in a volume +// as files using the keys in the Data field as the file names. +// Secret volumes support ownership management and SELinux relabeling. +type SecretVolumeSource struct { + // Name of the secret in the pod's namespace to use. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` + // Specify whether the Secret or its keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + SecretVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a secret into a projected volume. +// +// The contents of the target Secret's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names. +// Note that this is identical to a secret volume source without the default +// mode. +type SecretProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // Secret will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the Secret, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// Represents an NFS mount that lasts the lifetime of a pod. +// NFS volumes do not support ownership management or SELinux relabeling. +type NFSVolumeSource struct { + // Server is the hostname or IP address of the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + Server string `json:"server" protobuf:"bytes,1,opt,name=server"` + + // Path that is exported by the NFS server. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + + // ReadOnly here will force + // the NFS export to be mounted with read-only permissions. + // Defaults to false. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// Represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI Target Lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` +} + +// ISCSIPersistentVolumeSource represents an ISCSI disk. +// ISCSI volumes can only be mounted as read/write once. +// ISCSI volumes support ownership management and SELinux relabeling. +type ISCSIPersistentVolumeSource struct { + // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` + // Target iSCSI Qualified Name. + IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` + // iSCSI Target Lun number. + Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` + // iSCSI Interface Name that uses an iSCSI transport. + // Defaults to 'default' (tcp). + // +optional + ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` + // Filesystem type of the volume that you want to mount. + // Tip: Ensure that the filesystem type is supported by the host operating system. + // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` + // ReadOnly here will force the ReadOnly setting in VolumeMounts. + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` + // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port + // is other than default (typically TCP ports 860 and 3260). + // +optional + Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` + // whether support iSCSI Discovery CHAP authentication + // +optional + DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` + // whether support iSCSI Session CHAP authentication + // +optional + SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` + // CHAP Secret for iSCSI target and initiator authentication + // +optional + SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` + // Custom iSCSI Initiator Name. + // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface + // : will be created for the connection. + // +optional + InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` +} + +// Represents a Fibre Channel volume. +// Fibre Channel volumes can only be mounted as read/write once. +// Fibre Channel volumes support ownership management and SELinux relabeling. +type FCVolumeSource struct { + // Optional: FC target worldwide names (WWNs) + // +optional + TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"` + // Optional: FC target lun number + // +optional + Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // TODO: how do we prevent errors in the filesystem from compromising the machine + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Optional: Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // Optional: FC volume world wide identifiers (wwids) + // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. + // +optional + WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"` +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFileVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` + // Share Name + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. +type AzureFilePersistentVolumeSource struct { + // the name of secret that contains Azure Storage Account Name and Key + SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` + // Share Name + ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + // the namespace of the secret that contains Azure Storage Account Name and Key + // default is the same as the Pod + // +optional + SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"` +} + +// Represents a vSphere volume resource. +type VsphereVirtualDiskVolumeSource struct { + // Path that identifies vSphere volume vmdk + VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Storage Policy Based Management (SPBM) profile name. + // +optional + StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"` + // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. + // +optional + StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"` +} + +// Represents a Photon Controller persistent disk resource. +type PhotonPersistentDiskVolumeSource struct { + // ID that identifies Photon Controller persistent disk + PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +type AzureDataDiskCachingMode string +type AzureDataDiskKind string + +const ( + AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" + AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" + AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" + + AzureSharedBlobDisk AzureDataDiskKind = "Shared" + AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" + AzureManagedDisk AzureDataDiskKind = "Managed" +) + +// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. +type AzureDiskVolumeSource struct { + // The Name of the data disk in the blob storage + DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"` + // The URI the data disk in the blob storage + DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` + // Host Caching mode: None, Read Only, Read Write. + // +optional + CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` + // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared + Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` +} + +// PortworxVolumeSource represents a Portworx volume resource. +type PortworxVolumeSource struct { + // VolumeID uniquely identifies a Portworx volume + VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` + // FSType represents the filesystem type to mount + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. + FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` +} + +// ScaleIOVolumeSource represents a persistent ScaleIO volume +type ScaleIOVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs". + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume +type ScaleIOPersistentVolumeSource struct { + // The host address of the ScaleIO API Gateway. + Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` + // The name of the storage system as configured in ScaleIO. + System string `json:"system" protobuf:"bytes,2,opt,name=system"` + // SecretRef references to the secret for ScaleIO user and other + // sensitive information. If this is not provided, Login operation will fail. + SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` + // Flag to enable/disable SSL communication with Gateway, default false + // +optional + SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` + // The name of the ScaleIO Protection Domain for the configured storage. + // +optional + ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` + // The ScaleIO Storage Pool associated with the protection domain. + // +optional + StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` + // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. + // Default is ThinProvisioned. + // +optional + StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` + // The name of a volume already created in the ScaleIO system + // that is associated with this volume source. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // Default is "xfs" + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` +} + +// Represents a StorageOS persistent volume resource. +type StorageOSVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` +} + +// Represents a StorageOS persistent volume resource. +type StorageOSPersistentVolumeSource struct { + // VolumeName is the human-readable name of the StorageOS volume. Volume + // names are only unique within a namespace. + VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` + // VolumeNamespace specifies the scope of the volume within StorageOS. If no + // namespace is specified then the Pod's namespace will be used. This allows the + // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. + // Set VolumeName to any name to override the default behaviour. + // Set to "default" if you are not using namespaces within StorageOS. + // Namespaces that do not pre-exist within StorageOS will be created. + // +optional + VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + // Defaults to false (read/write). ReadOnly here will force + // the ReadOnly setting in VolumeMounts. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` + // SecretRef specifies the secret to use for obtaining the StorageOS API + // credentials. If not specified, default values will be attempted. + // +optional + SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` +} + +// Adapts a ConfigMap into a volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// volume as files using the keys in the Data field as the file names, unless +// the items element is populated with specific mappings of keys to paths. +// ConfigMap volumes support ownership management and SELinux relabeling. +type ConfigMapVolumeSource struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` + // Specify whether the ConfigMap or its keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +const ( + ConfigMapVolumeSourceDefaultMode int32 = 0644 +) + +// Adapts a ConfigMap into a projected volume. +// +// The contents of the target ConfigMap's Data field will be presented in a +// projected volume as files using the keys in the Data field as the file names, +// unless the items element is populated with specific mappings of keys to paths. +// Note that this is identical to a configmap volume source without the default +// mode. +type ConfigMapProjection struct { + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // If unspecified, each key-value pair in the Data field of the referenced + // ConfigMap will be projected into the volume as a file whose name is the + // key and content is the value. If specified, the listed keys will be + // projected into the specified paths, and unlisted keys will not be + // present. If a key is specified which is not present in the ConfigMap, + // the volume setup will error unless it is marked optional. Paths must be + // relative and may not contain the '..' path or start with '..'. + // +optional + Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` + // Specify whether the ConfigMap or its keys must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` +} + +// ServiceAccountTokenProjection represents a projected service account token +// volume. This projection can be used to insert a service account token into +// the pods runtime filesystem for use against APIs (Kubernetes API Server or +// otherwise). +type ServiceAccountTokenProjection struct { + // Audience is the intended audience of the token. A recipient of a token + // must identify itself with an identifier specified in the audience of the + // token, and otherwise should reject the token. The audience defaults to the + // identifier of the apiserver. + //+optional + Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"` + // ExpirationSeconds is the requested duration of validity of the service + // account token. As the token approaches expiration, the kubelet volume + // plugin will proactively rotate the service account token. The kubelet will + // start trying to rotate the token if the token is older than 80 percent of + // its time to live or if the token is older than 24 hours.Defaults to 1 hour + // and must be at least 10 minutes. + //+optional + ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` + // Path is the path relative to the mount point of the file to project the + // token into. + Path string `json:"path" protobuf:"bytes,3,opt,name=path"` +} + +// Represents a projected volume source +type ProjectedVolumeSource struct { + // list of volume projections + Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` + // Mode bits to use on created files by default. Must be a value between + // 0 and 0777. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +// Projection that may be projected along with other supported volume types +type VolumeProjection struct { + // all types below are the supported types for projection into the same volume + + // information about the secret data to project + // +optional + Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` + // information about the downwardAPI data to project + // +optional + DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` + // information about the configMap data to project + // +optional + ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` + // information about the serviceAccountToken data to project + // +optional + ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` +} + +const ( + ProjectedVolumeSourceDefaultMode int32 = 0644 +) + +// Maps a string key to a path within a volume. +type KeyToPath struct { + // The key to project. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + + // The relative path of the file to map the key to. + // May not be an absolute path. + // May not contain the path element '..'. + // May not start with the string '..'. + Path string `json:"path" protobuf:"bytes,2,opt,name=path"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` +} + +// Local represents directly-attached storage with node affinity (Beta feature) +type LocalVolumeSource struct { + // The full path to the volume on the node. + // It can be either a directory or block device (disk, partition, ...). + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + + // Filesystem type to mount. + // It applies only when the Path is a block device. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` +} + +// Represents storage that is managed by an external CSI volume driver (Beta feature) +type CSIPersistentVolumeSource struct { + // Driver is the name of the driver to use for this volume. + // Required. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // VolumeHandle is the unique volume name returned by the CSI volume + // plugin’s CreateVolume to refer to the volume on all subsequent calls. + // Required. + VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` + + // Optional: The value to pass to ControllerPublishVolumeRequest. + // Defaults to false (read/write). + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` + + // Filesystem type to mount. + // Must be a filesystem type supported by the host operating system. + // Ex. "ext4", "xfs", "ntfs". + // +optional + FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` + + // Attributes of the volume to publish. + // +optional + VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"` + + // ControllerPublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerPublishVolume and ControllerUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` + + // NodeStageSecretRef is a reference to the secret object containing sensitive + // information to pass to the CSI driver to complete the CSI NodeStageVolume + // and NodeStageVolume and NodeUnstageVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` + + // ControllerExpandSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // ControllerExpandVolume call. + // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secrets are passed. + // +optional + ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` +} + +// Represents a source location of a volume to mount, managed by an external CSI driver +type CSIVolumeSource struct { + // Driver is the name of the CSI driver that handles this volume. + // Consult with your admin for the correct name as registered in the cluster. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` + + // Specifies a read-only configuration for the volume. + // Defaults to false (read/write). + // +optional + ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + + // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". + // If not provided, the empty value is passed to the associated CSI driver + // which will determine the default filesystem to apply. + // +optional + FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` + + // VolumeAttributes stores driver-specific properties that are passed to the CSI + // driver. Consult your driver's documentation for supported values. + // +optional + VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"` + + // NodePublishSecretRef is a reference to the secret object containing + // sensitive information to pass to the CSI driver to complete the CSI + // NodePublishVolume and NodeUnpublishVolume calls. + // This field is optional, and may be empty if no secret is required. If the + // secret object contains more than one secret, all secret references are passed. + // +optional + NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"` +} + +// ContainerPort represents a network port in a single container. +type ContainerPort struct { + // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each + // named port in a pod must have a unique name. Name for the port that can be + // referred to by services. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // Number of port to expose on the host. + // If specified, this must be a valid port number, 0 < x < 65536. + // If HostNetwork is specified, this must match ContainerPort. + // Most containers do not need this. + // +optional + HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` + // Number of port to expose on the pod's IP address. + // This must be a valid port number, 0 < x < 65536. + ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` + // Protocol for port. Must be UDP, TCP, or SCTP. + // Defaults to "TCP". + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` + // What host IP to bind the external port to. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` +} + +// VolumeMount describes a mounting of a Volume within a container. +type VolumeMount struct { + // This must match the Name of a Volume. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Mounted read-only if true, read-write otherwise (false or unspecified). + // Defaults to false. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` + // Path within the container at which the volume should be mounted. Must + // not contain ':'. + MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` + // Path within the volume from which the container's volume should be mounted. + // Defaults to "" (volume's root). + // +optional + SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` + // mountPropagation determines how mounts are propagated from the host + // to container and the other way around. + // When not set, MountPropagationNone is used. + // This field is beta in 1.10. + // +optional + MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` + // Expanded path within the volume from which the container's volume should be mounted. + // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. + // Defaults to "" (volume's root). + // SubPathExpr and SubPath are mutually exclusive. + // +optional + SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` +} + +// MountPropagationMode describes mount propagation. +type MountPropagationMode string + +const ( + // MountPropagationNone means that the volume in a container will + // not receive new mounts from the host or other containers, and filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode corresponds to "private" in Linux terminology. + MountPropagationNone MountPropagationMode = "None" + // MountPropagationHostToContainer means that the volume in a container will + // receive new mounts from the host or other containers, but filesystems + // mounted inside the container won't be propagated to the host or other + // containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rslave" in Linux terminology). + MountPropagationHostToContainer MountPropagationMode = "HostToContainer" + // MountPropagationBidirectional means that the volume in a container will + // receive new mounts from the host or other containers, and its own mounts + // will be propagated from the container to the host or other containers. + // Note that this mode is recursively applied to all mounts in the volume + // ("rshared" in Linux terminology). + MountPropagationBidirectional MountPropagationMode = "Bidirectional" +) + +// volumeDevice describes a mapping of a raw block device within a container. +type VolumeDevice struct { + // name must match the name of a persistentVolumeClaim in the pod + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // devicePath is the path inside of the container that the device will be mapped to. + DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"` +} + +// EnvVar represents an environment variable present in a Container. +type EnvVar struct { + // Name of the environment variable. Must be a C_IDENTIFIER. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Optional: no more than one of the following may be specified. + + // Variable references $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + // Defaults to "". + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Source for the environment variable's value. Cannot be used if value is not empty. + // +optional + ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` +} + +// EnvVarSource represents a source for the value of an EnvVar. +type EnvVarSource struct { + // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` + // Selects a key of a ConfigMap. + // +optional + ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` + // Selects a key of a secret in the pod's namespace + // +optional + SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` +} + +// ObjectFieldSelector selects an APIVersioned field of an object. +type ObjectFieldSelector struct { + // Version of the schema the FieldPath is written in terms of, defaults to "v1". + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // Path of the field to select in the specified API version. + FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` +} + +// ResourceFieldSelector represents container resources (cpu, memory) and their output format +type ResourceFieldSelector struct { + // Container name: required for volumes, optional for env vars + // +optional + ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` + // Required: resource to select + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` + // Specifies the output format of the exposed resources, defaults to "1" + // +optional + Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` +} + +// Selects a key from a ConfigMap. +type ConfigMapKeySelector struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key to select. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the ConfigMap or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// SecretKeySelector selects a key of a Secret. +type SecretKeySelector struct { + // The name of the secret in the pod's namespace to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // The key of the secret to select from. Must be a valid secret key. + Key string `json:"key" protobuf:"bytes,2,opt,name=key"` + // Specify whether the Secret or its key must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` +} + +// EnvFromSource represents the source of a set of ConfigMaps +type EnvFromSource struct { + // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. + // +optional + Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` + // The ConfigMap to select from + // +optional + ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` + // The Secret to select from + // +optional + SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` +} + +// ConfigMapEnvSource selects a ConfigMap to populate the environment +// variables with. +// +// The contents of the target ConfigMap's Data field will represent the +// key-value pairs as environment variables. +type ConfigMapEnvSource struct { + // The ConfigMap to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the ConfigMap must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// SecretEnvSource selects a Secret to populate the environment +// variables with. +// +// The contents of the target Secret's Data field will represent the +// key-value pairs as environment variables. +type SecretEnvSource struct { + // The Secret to select from. + LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` + // Specify whether the Secret must be defined + // +optional + Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` +} + +// HTTPHeader describes a custom header to be used in HTTP probes +type HTTPHeader struct { + // The header field name + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The header field value + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// HTTPGetAction describes an action based on HTTP Get requests. +type HTTPGetAction struct { + // Path to access on the HTTP server. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + // Name or number of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` + // Host name to connect to, defaults to the pod IP. You probably want to set + // "Host" in httpHeaders instead. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` + // Scheme to use for connecting to the host. + // Defaults to HTTP. + // +optional + Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` + // Custom headers to set in the request. HTTP allows repeated headers. + // +optional + HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` +} + +// URIScheme identifies the scheme used for connection to a host for Get actions +type URIScheme string + +const ( + // URISchemeHTTP means that the scheme used will be http:// + URISchemeHTTP URIScheme = "HTTP" + // URISchemeHTTPS means that the scheme used will be https:// + URISchemeHTTPS URIScheme = "HTTPS" +) + +// TCPSocketAction describes an action based on opening a socket +type TCPSocketAction struct { + // Number or name of the port to access on the container. + // Number must be in the range 1 to 65535. + // Name must be an IANA_SVC_NAME. + Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` + // Optional: Host name to connect to, defaults to the pod IP. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` +} + +// ExecAction describes a "run in container" action. +type ExecAction struct { + // Command is the command line to execute inside the container, the working directory for the + // command is root ('/') in the container's filesystem. The command is simply exec'd, it is + // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use + // a shell, you need to explicitly call out to that shell. + // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` +} + +// Probe describes a health check to be performed against a container to determine whether it is +// alive or ready to receive traffic. +type Probe struct { + // The action taken to determine the health of a container + Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` + // Number of seconds after the container has started before liveness probes are initiated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` + // Number of seconds after which the probe times out. + // Defaults to 1 second. Minimum value is 1. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` + // How often (in seconds) to perform the probe. + // Default to 10 seconds. Minimum value is 1. + // +optional + PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` + // Minimum consecutive successes for the probe to be considered successful after having failed. + // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // +optional + SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` + // Minimum consecutive failures for the probe to be considered failed after having succeeded. + // Defaults to 3. Minimum value is 1. + // +optional + FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` +} + +// PullPolicy describes a policy for if/when to pull a container image +type PullPolicy string + +const ( + // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. + PullAlways PullPolicy = "Always" + // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present + PullNever PullPolicy = "Never" + // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. + PullIfNotPresent PullPolicy = "IfNotPresent" +) + +// PreemptionPolicy describes a policy for if/when to preempt a pod. +type PreemptionPolicy string + +const ( + // PreemptLowerPriority means that pod can preempt other pods with lower priority. + PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" + // PreemptNever means that pod never preempts other pods with lower priority. + PreemptNever PreemptionPolicy = "Never" +) + +// TerminationMessagePolicy describes how termination messages are retrieved from a container. +type TerminationMessagePolicy string + +const ( + // TerminationMessageReadFile is the default behavior and will set the container status message to + // the contents of the container's terminationMessagePath when the container exits. + TerminationMessageReadFile TerminationMessagePolicy = "File" + // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs + // for the container status message when the container exits with an error and the + // terminationMessagePath has no contents. + TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" +) + +// Capability represent POSIX capabilities type +type Capability string + +// Adds and removes POSIX capabilities from running containers. +type Capabilities struct { + // Added capabilities + // +optional + Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` + // Removed capabilities + // +optional + Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` +} + +// ResourceRequirements describes the compute resource requirements. +type ResourceRequirements struct { + // Limits describes the maximum amount of compute resources allowed. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` + // Requests describes the minimum amount of compute resources required. + // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, + // otherwise to an implementation-defined value. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // TerminationMessagePathDefault means the default path to capture the application termination message running in a container + TerminationMessagePathDefault string = "/dev/termination-log" +) + +// A single application container that you want to run within a pod. +type Container struct { + // Name of the container specified as a DNS_LABEL. + // Each container in a pod must have a unique name (DNS_LABEL). + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + // This field is optional to allow higher level config management to default or override + // container images in workload controllers like Deployments and StatefulSets. + // +optional + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // List of ports to expose from the container. Exposing a port here gives + // the system additional information about the network connections a + // container uses, but is primarily informational. Not specifying a port here + // DOES NOT prevent that port from being exposed. Any port which is + // listening on the default "0.0.0.0" address inside a container will be + // accessible from the network. + // Cannot be updated. + // +optional + // +patchMergeKey=containerPort + // +patchStrategy=merge + // +listType=map + // +listMapKey=containerPort + // +listMapKey=protocol + Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Compute Resources required by this container. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Periodic probe of container liveness. + // Container will be restarted if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Periodic probe of container service readiness. + // Container will be removed from service endpoints if the probe fails. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // StartupProbe indicates that the Pod has successfully initialized. + // If specified, no other probes are executed until this completes successfully. + // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. + // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, + // when it might take a long time to load data or warm a cache, than during steady-state operation. + // This cannot be updated. + // This is a beta feature enabled by the StartupProbe feature flag. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Actions that the management system should take in response to container lifecycle events. + // Cannot be updated. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // Security options the pod should run with. + // More info: https://kubernetes.io/docs/concepts/policy/security-context/ + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// Handler defines a specific action that should be taken +// TODO: pass structured data to these actions, and document that data here. +type Handler struct { + // One and only one of the following should be specified. + // Exec specifies the action to take. + // +optional + Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` + // HTTPGet specifies the http request to perform. + // +optional + HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` + // TCPSocket specifies an action involving a TCP port. + // TCP hooks not yet supported + // TODO: implement a realistic TCP lifecycle hook + // +optional + TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` +} + +// Lifecycle describes actions that the management system should take in response to container lifecycle +// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks +// until the action is complete, unless the container process fails, in which case the handler is aborted. +type Lifecycle struct { + // PostStart is called immediately after a container is created. If the handler fails, + // the container is terminated and restarted according to its restart policy. + // Other management of the container blocks until the hook completes. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` + // PreStop is called immediately before a container is terminated due to an + // API request or management event such as liveness/startup probe failure, + // preemption, resource contention, etc. The handler is not called if the + // container crashes or exits. The reason for termination is passed to the + // handler. The Pod's termination grace period countdown begins before the + // PreStop hooked is executed. Regardless of the outcome of the handler, the + // container will eventually terminate within the Pod's termination grace + // period. Other management of the container blocks until the hook completes + // or until the termination grace period is reached. + // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks + // +optional + PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` +} + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// ContainerStateWaiting is a waiting state of a container. +type ContainerStateWaiting struct { + // (brief) reason the container is not yet running. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` + // Message regarding why the container is not yet running. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// ContainerStateRunning is a running state of a container. +type ContainerStateRunning struct { + // Time at which the container was last (re-)started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` +} + +// ContainerStateTerminated is a terminated state of a container. +type ContainerStateTerminated struct { + // Exit status from the last termination of the container + ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` + // Signal from the last termination of the container + // +optional + Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` + // (brief) reason from the last termination of the container + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Message regarding the last termination of the container + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + // Time at which previous execution of the container started + // +optional + StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` + // Time at which the container last terminated + // +optional + FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` + // Container's ID in the format 'docker://' + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` +} + +// ContainerState holds a possible state of container. +// Only one of its members may be specified. +// If none of them is specified, the default one is ContainerStateWaiting. +type ContainerState struct { + // Details about a waiting container + // +optional + Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` + // Details about a running container + // +optional + Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` + // Details about a terminated container + // +optional + Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` +} + +// ContainerStatus contains details for the current status of this container. +type ContainerStatus struct { + // This must be a DNS_LABEL. Each container in a pod must have a unique name. + // Cannot be updated. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Details about the container's current condition. + // +optional + State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` + // Details about the container's last termination condition. + // +optional + LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` + // Specifies whether the container has passed its readiness probe. + Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` + // The number of times the container has been restarted, currently based on + // the number of dead containers that have not yet been removed. + // Note that this is calculated from dead containers. But those containers are subject to + // garbage collection. This value will get capped at 5 by GC. + RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` + // The image the container is running. + // More info: https://kubernetes.io/docs/concepts/containers/images + // TODO(dchen1107): Which image the container is running with? + Image string `json:"image" protobuf:"bytes,6,opt,name=image"` + // ImageID of the container's image. + ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` + // Container's ID in the format 'docker://'. + // +optional + ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` + // Specifies whether the container has passed its startup probe. + // Initialized as false, becomes true after startupProbe is considered successful. + // Resets to false when the container is restarted, or if kubelet loses state temporarily. + // Is always true when no startupProbe is defined. + // +optional + Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` +} + +// PodPhase is a label for the condition of a pod at the current time. +type PodPhase string + +// These are the valid statuses of pods. +const ( + // PodPending means the pod has been accepted by the system, but one or more of the containers + // has not been started. This includes time before being bound to a node, as well as time spent + // pulling images onto the host. + PodPending PodPhase = "Pending" + // PodRunning means the pod has been bound to a node and all of the containers have been started. + // At least one container is still running or is in the process of being restarted. + PodRunning PodPhase = "Running" + // PodSucceeded means that all containers in the pod have voluntarily terminated + // with a container exit code of 0, and the system is not going to restart any of these containers. + PodSucceeded PodPhase = "Succeeded" + // PodFailed means that all containers in the pod have terminated, and at least one container has + // terminated in a failure (exited with a non-zero exit code or was stopped by the system). + PodFailed PodPhase = "Failed" + // PodUnknown means that for some reason the state of the pod could not be obtained, typically due + // to an error in communicating with the host of the pod. + PodUnknown PodPhase = "Unknown" +) + +// PodConditionType is a valid value for PodCondition.Type +type PodConditionType string + +// These are valid conditions of pod. +const ( + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" + // PodReady means the pod is able to service requests and should be added to the + // load balancing pools of all matching services. + PodReady PodConditionType = "Ready" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" +) + +// These are reasons for a pod's transition to a condition. +const ( + // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler + // can't schedule the pod right now, for example due to insufficient resources in the cluster. + PodReasonUnschedulable = "Unschedulable" +) + +// PodCondition contains details for the current condition of this pod. +type PodCondition struct { + // Type is the type of the condition. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` + // Status is the status of the condition. + // Can be True, False, Unknown. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we probed the condition. + // +optional + LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // Unique, one-word, CamelCase reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human-readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// RestartPolicy describes how the container should be restarted. +// Only one of the following restart policies may be specified. +// If none of the following policies is specified, the default one +// is RestartPolicyAlways. +type RestartPolicy string + +const ( + RestartPolicyAlways RestartPolicy = "Always" + RestartPolicyOnFailure RestartPolicy = "OnFailure" + RestartPolicyNever RestartPolicy = "Never" +) + +// DNSPolicy defines how a pod's DNS will be configured. +type DNSPolicy string + +const ( + // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS + // first, if it is available, then fall back on the default + // (as determined by kubelet) DNS settings. + DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" + + // DNSClusterFirst indicates that the pod should use cluster DNS + // first unless hostNetwork is true, if it is available, then + // fall back on the default (as determined by kubelet) DNS settings. + DNSClusterFirst DNSPolicy = "ClusterFirst" + + // DNSDefault indicates that the pod should use the default (as + // determined by kubelet) DNS settings. + DNSDefault DNSPolicy = "Default" + + // DNSNone indicates that the pod should use empty DNS settings. DNS + // parameters such as nameservers and search paths should be defined via + // DNSConfig. + DNSNone DNSPolicy = "None" +) + +const ( + // DefaultTerminationGracePeriodSeconds indicates the default duration in + // seconds a pod needs to terminate gracefully. + DefaultTerminationGracePeriodSeconds = 30 +) + +// A node selector represents the union of the results of one or more label queries +// over a set of nodes; that is, it represents the OR of the selectors represented +// by the node selector terms. +type NodeSelector struct { + //Required. A list of node selector terms. The terms are ORed. + NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` +} + +// A null or empty node selector term matches no objects. The requirements of +// them are ANDed. +// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. +type NodeSelectorTerm struct { + // A list of node selector requirements by node's labels. + // +optional + MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` + // A list of node selector requirements by node's fields. + // +optional + MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"` +} + +// A node selector requirement is a selector that contains values, a key, and an operator +// that relates the key and values. +type NodeSelectorRequirement struct { + // The label key that the selector applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // Represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. + Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. If the operator is Gt or Lt, the values + // array must have a single element, which will be interpreted as an integer. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A node selector operator is the set of operators that can be used in +// a node selector requirement. +type NodeSelectorOperator string + +const ( + NodeSelectorOpIn NodeSelectorOperator = "In" + NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" + NodeSelectorOpExists NodeSelectorOperator = "Exists" + NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" + NodeSelectorOpGt NodeSelectorOperator = "Gt" + NodeSelectorOpLt NodeSelectorOperator = "Lt" +) + +// A topology selector term represents the result of label queries. +// A null or empty topology selector term matches no objects. +// The requirements of them are ANDed. +// It provides a subset of functionality as NodeSelectorTerm. +// This is an alpha feature and may change in the future. +type TopologySelectorTerm struct { + // A list of topology selector requirements by labels. + // +optional + MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"` +} + +// A topology selector requirement is a selector that matches given label. +// This is an alpha feature and may change in the future. +type TopologySelectorLabelRequirement struct { + // The label key that the selector applies to. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // An array of string values. One value must match the label to be selected. + // Each entry in Values is ORed. + Values []string `json:"values" protobuf:"bytes,2,rep,name=values"` +} + +// Affinity is a group of affinity scheduling rules. +type Affinity struct { + // Describes node affinity scheduling rules for the pod. + // +optional + NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` + // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` + // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). + // +optional + PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` +} + +// Pod affinity is a group of inter pod affinity scheduling rules. +type PodAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// Pod anti affinity is a group of inter pod anti affinity scheduling rules. +type PodAntiAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system will try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the anti-affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the anti-affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to a pod label update), the + // system may or may not try to eventually evict the pod from its node. + // When there are multiple elements, the lists of nodes corresponding to each + // podAffinityTerm are intersected, i.e. all terms must be satisfied. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the anti-affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling anti-affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +type WeightedPodAffinityTerm struct { + // weight associated with matching the corresponding podAffinityTerm, + // in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // Required. A pod affinity term, associated with the corresponding weight. + PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` +} + +// Defines a set of pods (namely those matching the labelSelector +// relative to the given namespace(s)) that this pod should be +// co-located (affinity) or not co-located (anti-affinity) with, +// where co-located is defined as running on a node whose value of +// the label with key matches that of any node on which +// a pod of the set of pods is running +type PodAffinityTerm struct { + // A label query over a set of resources, in this case pods. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // namespaces specifies which namespaces the labelSelector applies to (matches against); + // null or empty list means "this pod's namespace" + // +optional + Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` + // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching + // the labelSelector in the specified namespaces, where co-located is defined as running on a node + // whose value of the label with key topologyKey matches that of any node on which any of the + // selected pods is running. + // Empty topologyKey is not allowed. + TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"` +} + +// Node affinity is a group of node affinity scheduling rules. +type NodeAffinity struct { + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // will try to eventually evict the pod from its node. + // +optional + // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` + + // If the affinity requirements specified by this field are not met at + // scheduling time, the pod will not be scheduled onto the node. + // If the affinity requirements specified by this field cease to be met + // at some point during pod execution (e.g. due to an update), the system + // may or may not try to eventually evict the pod from its node. + // +optional + RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` + // The scheduler will prefer to schedule pods to nodes that satisfy + // the affinity expressions specified by this field, but it may choose + // a node that violates one or more of the expressions. The node that is + // most preferred is the one with the greatest sum of weights, i.e. + // for each node that meets all of the scheduling requirements (resource + // request, requiredDuringScheduling affinity expressions, etc.), + // compute a sum by iterating through the elements of this field and adding + // "weight" to the sum if the node matches the corresponding matchExpressions; the + // node(s) with the highest sum are the most preferred. + // +optional + PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` +} + +// An empty preferred scheduling term matches all objects with implicit weight 0 +// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +type PreferredSchedulingTerm struct { + // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. + Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` + // A node selector term, associated with the corresponding weight. + Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` +} + +// The node this Taint is attached to has the "effect" on +// any pod that does not tolerate the Taint. +type Taint struct { + // Required. The taint key to be applied to a node. + Key string `json:"key" protobuf:"bytes,1,opt,name=key"` + // The taint value corresponding to the taint key. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` + // Required. The effect of the taint on pods + // that do not tolerate the taint. + // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. + Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` + // TimeAdded represents the time at which the taint was added. + // It is only written for NoExecute taints. + // +optional + TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` +} + +type TaintEffect string + +const ( + // Do not allow new pods to schedule onto the node unless they tolerate the taint, + // but allow all pods submitted to Kubelet without going through the scheduler + // to start, and allow all already-running pods to continue running. + // Enforced by the scheduler. + TaintEffectNoSchedule TaintEffect = "NoSchedule" + // Like TaintEffectNoSchedule, but the scheduler tries not to schedule + // new pods onto the node, rather than prohibiting new pods from scheduling + // onto the node entirely. Enforced by the scheduler. + TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" + // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. + // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to + // Kubelet without going through the scheduler to start. + // Enforced by Kubelet and the scheduler. + // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" + + // Evict any already-running pods that do not tolerate the taint. + // Currently enforced by NodeController. + TaintEffectNoExecute TaintEffect = "NoExecute" +) + +// The pod this Toleration is attached to tolerates any taint that matches +// the triple using the matching operator . +type Toleration struct { + // Key is the taint key that the toleration applies to. Empty means match all taint keys. + // If the key is empty, operator must be Exists; this combination means to match all values and all keys. + // +optional + Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"` + // Operator represents a key's relationship to the value. + // Valid operators are Exists and Equal. Defaults to Equal. + // Exists is equivalent to wildcard for value, so that a pod can + // tolerate all taints of a particular category. + // +optional + Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` + // Value is the taint value the toleration matches to. + // If the operator is Exists, the value should be empty, otherwise just a regular string. + // +optional + Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` + // Effect indicates the taint effect to match. Empty means match all taint effects. + // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. + // +optional + Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` + // TolerationSeconds represents the period of time the toleration (which must be + // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, + // it is not set, which means tolerate the taint forever (do not evict). Zero and + // negative values will be treated as 0 (evict immediately) by the system. + // +optional + TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"` +} + +// A toleration operator is the set of operators that can be used in a toleration. +type TolerationOperator string + +const ( + TolerationOpExists TolerationOperator = "Exists" + TolerationOpEqual TolerationOperator = "Equal" +) + +// PodReadinessGate contains the reference to a pod condition +type PodReadinessGate struct { + // ConditionType refers to a condition in the pod's condition list with matching type. + ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"` +} + +// PodSpec is a description of a pod. +type PodSpec struct { + // List of volumes that can be mounted by containers belonging to the pod. + // More info: https://kubernetes.io/docs/concepts/storage/volumes + // +optional + // +patchMergeKey=name + // +patchStrategy=merge,retainKeys + Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` + // List of initialization containers belonging to the pod. + // Init containers are executed in order prior to containers being started. If any + // init container fails, the pod is considered to have failed and is handled according + // to its restartPolicy. The name for an init container or normal container must be + // unique among all containers. + // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + // The resourceRequirements of an init container are taken into account during scheduling + // by finding the highest request/limit for each resource type, and then using the max of + // of that value or the sum of the normal containers. Limits are applied to init containers + // in a similar fashion. + // Init containers cannot currently be added or removed. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + // +patchMergeKey=name + // +patchStrategy=merge + InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` + // List of containers belonging to the pod. + // Containers cannot currently be added or removed. + // There must be at least one container in a Pod. + // Cannot be updated. + // +patchMergeKey=name + // +patchStrategy=merge + Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` + // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing + // pod to perform user-initiated actions such as debugging. This list cannot be specified when + // creating a pod, and it cannot be modified by updating the pod spec. In order to add an + // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. + // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` + // Restart policy for all containers within the pod. + // One of Always, OnFailure, Never. + // Default to Always. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy + // +optional + RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` + // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. + // Value must be non-negative integer. The value zero indicates delete immediately. + // If this value is nil, the default grace period will be used instead. + // The grace period is the duration in seconds after the processes running in the pod are sent + // a termination signal and the time when the processes are forcibly halted with a kill signal. + // Set this value longer than the expected cleanup time for your process. + // Defaults to 30 seconds. + // +optional + TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` + // Optional duration in seconds the pod may be active on the node relative to + // StartTime before the system will actively try to mark it failed and kill associated containers. + // Value must be a positive integer. + // +optional + ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` + // Set DNS policy for the pod. + // Defaults to "ClusterFirst". + // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. + // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. + // To have DNS options set along with hostNetwork, you have to specify DNS policy + // explicitly to 'ClusterFirstWithHostNet'. + // +optional + DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` + // NodeSelector is a selector which must be true for the pod to fit on a node. + // Selector which must match a node's labels for the pod to be scheduled on that node. + // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` + + // ServiceAccountName is the name of the ServiceAccount to use to run this pod. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + // +optional + ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` + // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. + // Deprecated: Use serviceAccountName instead. + // +k8s:conversion-gen=false + // +optional + DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` + // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` + + // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, + // the scheduler simply schedules this pod onto that node, assuming that it fits resource + // requirements. + // +optional + NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` + // Host networking requested for this pod. Use the host's network namespace. + // If this option is set, the ports that will be used must be specified. + // Default to false. + // +k8s:conversion-gen=false + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` + // Use the host's pid namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` + // Use the host's ipc namespace. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` + // Share a single process namespace between all of the containers in a pod. + // When this is set containers will be able to view and signal processes from other containers + // in the same pod, and the first process in each container will not be assigned PID 1. + // HostPID and ShareProcessNamespace cannot both be set. + // Optional: Default to false. + // +k8s:conversion-gen=false + // +optional + ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` + // SecurityContext holds pod-level security attributes and common container settings. + // Optional: Defaults to empty. See type description for default values of each field. + // +optional + SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` + // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. + // If specified, these secrets will be passed to individual puller implementations for them to use. For example, + // in the case of docker, only DockerConfig type secrets are honored. + // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` + // Specifies the hostname of the Pod + // If not specified, the pod's hostname will be set to a system-defined value. + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` + // If specified, the fully qualified Pod hostname will be "...svc.". + // If not specified, the pod will not have a domainname at all. + // +optional + Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` + // If specified, the pod's scheduling constraints + // +optional + Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` + // If specified, the pod will be dispatched by specified scheduler. + // If not specified, the pod will be dispatched by default scheduler. + // +optional + SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` + // If specified, the pod's tolerations. + // +optional + Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` + // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts + // file if specified. This is only valid for non-hostNetwork pods. + // +optional + // +patchMergeKey=ip + // +patchStrategy=merge + HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` + // If specified, indicates the pod's priority. "system-node-critical" and + // "system-cluster-critical" are two special keywords which indicate the + // highest priorities with the former being the highest priority. Any other + // name must be defined by creating a PriorityClass object with that name. + // If not specified, the pod priority will be default or zero if there is no + // default. + // +optional + PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"` + // The priority value. Various system components use this field to find the + // priority of the pod. When Priority Admission Controller is enabled, it + // prevents users from setting this field. The admission controller populates + // this field from PriorityClassName. + // The higher the value, the higher the priority. + // +optional + Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` + // Specifies the DNS parameters of a pod. + // Parameters specified here will be merged to the generated DNS + // configuration based on DNSPolicy. + // +optional + DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + // If specified, all readiness gates will be evaluated for pod readiness. + // A pod is ready when all its containers are ready AND + // all conditions specified in the readiness gates have status equal to "True" + // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // +optional + ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` + // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used + // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. + // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an + // empty definition that uses the default runtime handler. + // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md + // This is a beta feature as of Kubernetes v1.14. + // +optional + RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` + // EnableServiceLinks indicates whether information about services should be injected into pod's + // environment variables, matching the syntax of Docker links. + // Optional: Defaults to true. + // +optional + EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` + // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. + // This field will be autopopulated at admission time by the RuntimeClass admission controller. If + // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. + // The RuntimeClass admission controller will reject Pod create requests which have the overhead already + // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value + // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. + // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` + // TopologySpreadConstraints describes how a group of pods ought to spread across topology + // domains. Scheduler will schedule pods in a way which abides by the constraints. + // This field is only honored by clusters that enable the EvenPodsSpread feature. + // All topologySpreadConstraints are ANDed. + // +optional + // +patchMergeKey=topologyKey + // +patchStrategy=merge + // +listType=map + // +listMapKey=topologyKey + // +listMapKey=whenUnsatisfiable + TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` +} + +type UnsatisfiableConstraintAction string + +const ( + // DoNotSchedule instructs the scheduler not to schedule the pod + // when constraints are not satisfied. + DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" + // ScheduleAnyway instructs the scheduler to schedule the pod + // even if constraints are not satisfied. + ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" +) + +// TopologySpreadConstraint specifies how to spread matching pods among the given topology. +type TopologySpreadConstraint struct { + // MaxSkew describes the degree to which pods may be unevenly distributed. + // It's the maximum permitted difference between the number of matching pods in + // any two topology domains of a given topology type. + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 1/1/0: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P | P | | + // +-------+-------+-------+ + // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; + // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) + // violate MaxSkew(1). + // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. + // It's a required field. Default value is 1 and 0 is not allowed. + MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"` + // TopologyKey is the key of node labels. Nodes that have a label with this key + // and identical values are considered to be in the same topology. + // We consider each as a "bucket", and try to put balanced number + // of pods into each bucket. + // It's a required field. + TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` + // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy + // the spread constraint. + // - DoNotSchedule (default) tells the scheduler not to schedule it + // - ScheduleAnyway tells the scheduler to still schedule it + // It's considered as "Unsatisfiable" if and only if placing incoming pod on any + // topology violates "MaxSkew". + // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same + // labelSelector spread as 3/1/1: + // +-------+-------+-------+ + // | zone1 | zone2 | zone3 | + // +-------+-------+-------+ + // | P P P | P | P | + // +-------+-------+-------+ + // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled + // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies + // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler + // won't make it *more* imbalanced. + // It's a required field. + WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"` + // LabelSelector is used to find matching pods. + // Pods that match this label selector are counted to determine the number of pods + // in their corresponding topology domain. + // +optional + LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` +} + +const ( + // The default value for enableServiceLinks attribute. + DefaultEnableServiceLinks = true +) + +// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the +// pod's hosts file. +type HostAlias struct { + // IP address of the host file entry. + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + // Hostnames for the above IP address. + Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` +} + +// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume +// when volume is mounted. +type PodFSGroupChangePolicy string + +const ( + // FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed + // only when permission and ownership of root directory does not match with expected + // permissions on the volume. This can help shorten the time it takes to change + // ownership and permissions of a volume. + FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch" + // FSGroupChangeAlways indicates that volume's ownership and permissions + // should always be changed whenever volume is mounted inside a Pod. This the default + // behavior. + FSGroupChangeAlways PodFSGroupChangePolicy = "Always" +) + +// PodSecurityContext holds pod-level security attributes and common container settings. +// Some fields are also present in container.securityContext. Field values of +// container.securityContext take precedence over field values of PodSecurityContext. +type PodSecurityContext struct { + // The SELinux context to be applied to all containers. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in SecurityContext. If set in + // both SecurityContext and PodSecurityContext, the value specified in SecurityContext + // takes precedence for that container. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options within a container's SecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence + // for that container. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in SecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` + // A list of groups applied to the first process run in each container, in addition + // to the container's primary GID. If unspecified, no groups will be added to + // any container. + // +optional + SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` + // A special supplemental group that applies to all containers in a pod. + // Some volume types allow the Kubelet to change the ownership of that volume + // to be owned by the pod: + // + // 1. The owning GID will be the FSGroup + // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) + // 3. The permission bits are OR'd with rw-rw---- + // + // If unset, the Kubelet will not modify the ownership and permissions of any volume. + // +optional + FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` + // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported + // sysctls (by the container runtime) might fail to launch. + // +optional + Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` + // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume + // before being exposed inside Pod. This field will only apply to + // volume types which support fsGroup based ownership(and permissions). + // It will have no effect on ephemeral volume types such as: secret, configmaps + // and emptydir. + // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". + // +optional + FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` +} + +// PodQOSClass defines the supported qos classes of Pods. +type PodQOSClass string + +const ( + // PodQOSGuaranteed is the Guaranteed qos class. + PodQOSGuaranteed PodQOSClass = "Guaranteed" + // PodQOSBurstable is the Burstable qos class. + PodQOSBurstable PodQOSClass = "Burstable" + // PodQOSBestEffort is the BestEffort qos class. + PodQOSBestEffort PodQOSClass = "BestEffort" +) + +// PodDNSConfig defines the DNS parameters of a pod in addition to +// those generated from DNSPolicy. +type PodDNSConfig struct { + // A list of DNS name server IP addresses. + // This will be appended to the base nameservers generated from DNSPolicy. + // Duplicated nameservers will be removed. + // +optional + Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` + // A list of DNS search domains for host-name lookup. + // This will be appended to the base search paths generated from DNSPolicy. + // Duplicated search paths will be removed. + // +optional + Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` + // A list of DNS resolver options. + // This will be merged with the base options generated from DNSPolicy. + // Duplicated entries will be removed. Resolution options given in Options + // will override those that appear in the base DNSPolicy. + // +optional + Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` +} + +// PodDNSConfigOption defines DNS resolver options of a pod. +type PodDNSConfigOption struct { + // Required. + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // +optional + Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` +} + +// IP address information for entries in the (plural) PodIPs field. +// Each entry includes: +// IP: An IP address allocated to the pod. Routable at least within the cluster. +type PodIP struct { + // ip is an IP address (IPv4 or IPv6) assigned to the pod + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` +} + +// EphemeralContainerCommon is a copy of all fields in Container to be inlined in +// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer +// to Container and allows separate documentation for the fields of EphemeralContainer. +// When a new field is added to Container it must be added here as well. +type EphemeralContainerCommon struct { + // Name of the ephemeral container specified as a DNS_LABEL. + // This name must be unique among all containers, init containers and ephemeral containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Docker image name. + // More info: https://kubernetes.io/docs/concepts/containers/images + Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` + // Entrypoint array. Not executed within a shell. + // The docker image's ENTRYPOINT is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` + // Arguments to the entrypoint. + // The docker image's CMD is used if this is not provided. + // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable + // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax + // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, + // regardless of whether the variable exists or not. + // Cannot be updated. + // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell + // +optional + Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` + // Container's working directory. + // If not specified, the container runtime's default will be used, which + // might be configured in the container image. + // Cannot be updated. + // +optional + WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` + // Ports are not allowed for ephemeral containers. + Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` + // List of sources to populate environment variables in the container. + // The keys defined within a source must be a C_IDENTIFIER. All invalid keys + // will be reported as an event when the container is starting. When a key exists in multiple + // sources, the value associated with the last source will take precedence. + // Values defined by an Env with a duplicate key will take precedence. + // Cannot be updated. + // +optional + EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` + // List of environment variables to set in the container. + // Cannot be updated. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` + // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources + // already allocated to the pod. + // +optional + Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` + // Pod volumes to mount into the container's filesystem. + // Cannot be updated. + // +optional + // +patchMergeKey=mountPath + // +patchStrategy=merge + VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` + // volumeDevices is the list of block devices to be used by the container. + // +patchMergeKey=devicePath + // +patchStrategy=merge + // +optional + VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` + // Probes are not allowed for ephemeral containers. + // +optional + LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` + // Probes are not allowed for ephemeral containers. + // +optional + StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` + // Lifecycle is not allowed for ephemeral containers. + // +optional + Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` + // Optional: Path at which the file to which the container's termination message + // will be written is mounted into the container's filesystem. + // Message written is intended to be brief final status, such as an assertion failure message. + // Will be truncated by the node if greater than 4096 bytes. The total message length across + // all containers will be limited to 12kb. + // Defaults to /dev/termination-log. + // Cannot be updated. + // +optional + TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` + // Indicate how the termination message should be populated. File will use the contents of + // terminationMessagePath to populate the container status message on both success and failure. + // FallbackToLogsOnError will use the last chunk of container log output if the termination + // message file is empty and the container exited with an error. + // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. + // Defaults to File. + // Cannot be updated. + // +optional + TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` + // Image pull policy. + // One of Always, Never, IfNotPresent. + // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. + // Cannot be updated. + // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images + // +optional + ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` + // SecurityContext is not allowed for ephemeral containers. + // +optional + SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` + + // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) + // and shouldn't be used for general purpose containers. + + // Whether this container should allocate a buffer for stdin in the container runtime. If this + // is not set, reads from stdin in the container will always result in EOF. + // Default is false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` + // Whether the container runtime should close the stdin channel after it has been opened by + // a single attach. When stdin is true the stdin stream will remain open across multiple attach + // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the + // first client attaches to stdin, and then remains open and accepts data until the client disconnects, + // at which time stdin is closed and remains closed until the container is restarted. If this + // flag is false, a container processes that reads from stdin will never receive an EOF. + // Default is false + // +optional + StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` + // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. + // Default is false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` +} + +// EphemeralContainerCommon converts to Container. All fields must be kept in sync between +// these two types. +var _ = Container(EphemeralContainerCommon{}) + +// An EphemeralContainer is a container that may be added temporarily to an existing pod for +// user-initiated activities such as debugging. Ephemeral containers have no resource or +// scheduling guarantees, and they will not be restarted when they exit or when a pod is +// removed or restarted. If an ephemeral container causes a pod to exceed its resource +// allocation, the pod may be evicted. +// Ephemeral containers may not be added by directly updating the pod spec. They must be added +// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec +// once added. +// This is an alpha feature enabled by the EphemeralContainers feature flag. +type EphemeralContainer struct { + // Ephemeral containers have all of the fields of Container, plus additional fields + // specific to ephemeral containers. Fields in common with Container are in the + // following inlined struct so than an EphemeralContainer may easily be converted + // to a Container. + EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"` + + // If set, the name of the container from PodSpec that this ephemeral container targets. + // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. + // If not set then the ephemeral container is run in whatever namespaces are shared + // for the pod. Note that the container runtime must support this feature. + // +optional + TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` +} + +// PodStatus represents information about the status of a pod. Status may trail the actual +// state of a system, especially if the node that hosts the pod cannot contact the control +// plane. +type PodStatus struct { + // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. + // The conditions array, the reason and message fields, and the individual container status + // arrays contain more detail about the pod's status. + // There are five possible phase values: + // + // Pending: The pod has been accepted by the Kubernetes system, but one or more of the + // container images has not been created. This includes time before being scheduled as + // well as time spent downloading images over the network, which could take a while. + // Running: The pod has been bound to a node, and all of the containers have been created. + // At least one container is still running, or is in the process of starting or restarting. + // Succeeded: All containers in the pod have terminated in success, and will not be restarted. + // Failed: All containers in the pod have terminated, and at least one container has + // terminated in failure. The container either exited with non-zero status or was terminated + // by the system. + // Unknown: For some reason the state of the pod could not be obtained, typically due to an + // error in communicating with the host of the pod. + // + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase + // +optional + Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` + // Current service state of pod. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` + // A human readable message indicating details about why the pod is in this condition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A brief CamelCase message indicating details about why the pod is in this state. + // e.g. 'Evicted' + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be + // scheduled right away as preemption victims receive their graceful termination periods. + // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide + // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to + // give the resources on this node to a higher priority pod that is created after preemption. + // As a result, this field may be different than PodSpec.nodeName when the pod is + // scheduled. + // +optional + NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"` + + // IP address of the host to which the pod is assigned. Empty if not yet scheduled. + // +optional + HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` + // IP address allocated to the pod. Routable at least within the cluster. + // Empty if not yet allocated. + // +optional + PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` + + // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must + // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list + // is empty if no IPs have been allocated yet. + // +optional + // +patchStrategy=merge + // +patchMergeKey=ip + PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` + + // RFC 3339 date and time at which the object was acknowledged by the Kubelet. + // This is before the Kubelet pulled the container image(s) for the pod. + // +optional + StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` + + // The list has one entry per init container in the manifest. The most recent successful + // init container will have ready = true, the most recently started container will have + // startTime set. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` + + // The list has one entry per container in the manifest. Each entry is currently the output + // of `docker inspect`. + // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status + // +optional + ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` + // The Quality of Service (QOS) classification assigned to the pod based on resource requirements + // See PodQOSClass type for available QOS classes + // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md + // +optional + QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` + // Status for any ephemeral containers that have run in this pod. + // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. + // +optional + EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded +type PodStatusResult struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` +} + +// +genclient +// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers +// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Pod is a collection of containers that can run on a host. This resource is created +// by clients and scheduled onto hosts. +type Pod struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the pod. + // This data may not be up to date. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodList is a list of Pods. +type PodList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pods. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// PodTemplateSpec describes the data a pod should have when created from a template +type PodTemplateSpec struct { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the pod. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplate describes a template for creating copies of a predefined pod. +type PodTemplate struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Template defines the pods that will be created from this pod template. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodTemplateList is a list of PodTemplates. +type PodTemplateList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod templates + Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicationControllerSpec is the specification of a replication controller. +type ReplicationControllerSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the Replicas count. + // If Selector is empty, it is defaulted to the labels present on the Pod template. + // Label keys and values that must match in order to be controlled by this replication + // controller, if empty defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // TemplateRef is a reference to an object that describes the pod that will be created if + // insufficient replicas are detected. + // Reference to an object that describes the pod that will be created if insufficient replicas are detected. + // +optional + // TemplateRef *ObjectReference `json:"templateRef,omitempty"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. This takes precedence over a TemplateRef. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicationControllerStatus represents the current status of a replication +// controller. +type ReplicationControllerStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replication controller. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replication controller. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replication controller. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed replication controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replication controller's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicationControllerConditionType string + +// These are valid conditions of a replication controller. +const ( + // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods + // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, + // etc. or deleted due to kubelet being down or finalizers are failing. + ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" +) + +// ReplicationControllerCondition describes the state of a replication controller at a certain point. +type ReplicationControllerCondition struct { + // Type of replication controller condition. + Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationController represents the configuration of a replication controller. +type ReplicationController struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicationController are empty, they are defaulted to + // be the same as the Pod(s) that the replication controller manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the replication controller. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the replication controller. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicationControllerList is a collection of replication controllers. +type ReplicationControllerList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of replication controllers. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Session Affinity Type string +type ServiceAffinity string + +const ( + // ServiceAffinityClientIP is the Client IP based. + ServiceAffinityClientIP ServiceAffinity = "ClientIP" + + // ServiceAffinityNone - no session affinity. + ServiceAffinityNone ServiceAffinity = "None" +) + +const DefaultClientIPServiceAffinitySeconds int32 = 10800 + +// SessionAffinityConfig represents the configurations of session affinity. +type SessionAffinityConfig struct { + // clientIP contains the configurations of Client IP based session affinity. + // +optional + ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"` +} + +// ClientIPConfig represents the configurations of Client IP based session affinity. +type ClientIPConfig struct { + // timeoutSeconds specifies the seconds of ClientIP type session sticky time. + // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". + // Default value is 10800(for 3 hours). + // +optional + TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` +} + +// Service Type string describes ingress methods for a service +type ServiceType string + +const ( + // ServiceTypeClusterIP means a service will only be accessible inside the + // cluster, via the cluster IP. + ServiceTypeClusterIP ServiceType = "ClusterIP" + + // ServiceTypeNodePort means a service will be exposed on one port of + // every node, in addition to 'ClusterIP' type. + ServiceTypeNodePort ServiceType = "NodePort" + + // ServiceTypeLoadBalancer means a service will be exposed via an + // external load balancer (if the cloud provider supports it), in addition + // to 'NodePort' type. + ServiceTypeLoadBalancer ServiceType = "LoadBalancer" + + // ServiceTypeExternalName means a service consists of only a reference to + // an external name that kubedns or equivalent will return as a CNAME + // record, with no exposing or proxying of any pods involved. + ServiceTypeExternalName ServiceType = "ExternalName" +) + +// Service External Traffic Policy Type string +type ServiceExternalTrafficPolicyType string + +const ( + // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. + ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" + // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. + ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" +) + +// ServiceStatus represents the current status of a service. +type ServiceStatus struct { + // LoadBalancer contains the current status of the load-balancer, + // if one is present. + // +optional + LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// LoadBalancerStatus represents the status of a load-balancer. +type LoadBalancerStatus struct { + // Ingress is a list containing ingress points for the load-balancer. + // Traffic intended for the service should be sent to these ingress points. + // +optional + Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` +} + +// LoadBalancerIngress represents the status of a load-balancer ingress point: +// traffic intended for the service should be sent to an ingress point. +type LoadBalancerIngress struct { + // IP is set for load-balancer ingress points that are IP based + // (typically GCE or OpenStack load-balancers) + // +optional + IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` + + // Hostname is set for load-balancer ingress points that are DNS based + // (typically AWS load-balancers) + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` +} + +// IPFamily represents the IP Family (IPv4 or IPv6). This type is used +// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) +type IPFamily string + +const ( + // IPv4Protocol indicates that this IP is IPv4 protocol + IPv4Protocol IPFamily = "IPv4" + // IPv6Protocol indicates that this IP is IPv6 protocol + IPv6Protocol IPFamily = "IPv6" + // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service + MaxServiceTopologyKeys = 16 +) + +// ServiceSpec describes the attributes that a user creates on a service. +type ServiceSpec struct { + // The list of ports that are exposed by this service. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +patchMergeKey=port + // +patchStrategy=merge + // +listType=map + // +listMapKey=port + // +listMapKey=protocol + Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` + + // Route service traffic to pods with label keys and values matching this + // selector. If empty or not present, the service is assumed to have an + // external process managing its endpoints, which Kubernetes will not + // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. + // Ignored if type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/ + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // clusterIP is the IP address of the service and is usually assigned + // randomly by the master. If an address is specified manually and is not in + // use by others, it will be allocated to the service; otherwise, creation + // of the service will fail. This field can not be changed through updates. + // Valid values are "None", empty string (""), or a valid IP address. "None" + // can be specified for headless services when proxying is not required. + // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if + // type is ExternalName. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` + + // type determines how the Service is exposed. Defaults to ClusterIP. Valid + // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. + // "ExternalName" maps to the specified externalName. + // "ClusterIP" allocates a cluster-internal IP address for load-balancing to + // endpoints. Endpoints are determined by the selector or if that is not + // specified, by manual construction of an Endpoints object. If clusterIP is + // "None", no virtual IP is allocated and the endpoints are published as a + // set of endpoints rather than a stable IP. + // "NodePort" builds on ClusterIP and allocates a port on every node which + // routes to the clusterIP. + // "LoadBalancer" builds on NodePort and creates an + // external load-balancer (if supported in the current cloud) which routes + // to the clusterIP. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // +optional + Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` + + // externalIPs is a list of IP addresses for which nodes in the cluster + // will also accept traffic for this service. These IPs are not managed by + // Kubernetes. The user is responsible for ensuring that traffic arrives + // at a node with this IP. A common example is external load-balancers + // that are not part of the Kubernetes system. + // +optional + ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` + + // Supports "ClientIP" and "None". Used to maintain session affinity. + // Enable client IP based session affinity. + // Must be ClientIP or None. + // Defaults to None. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + // +optional + SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` + + // Only applies to Service Type: LoadBalancer + // LoadBalancer will get created with the IP specified in this field. + // This feature depends on whether the underlying cloud-provider supports specifying + // the loadBalancerIP when a load balancer is created. + // This field will be ignored if the cloud-provider does not support the feature. + // +optional + LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` + + // If specified and supported by the platform, this will restrict traffic through the cloud-provider + // load-balancer will be restricted to the specified client IPs. This field will be ignored if the + // cloud-provider does not support the feature." + // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ + // +optional + LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` + + // externalName is the external reference that kubedns or equivalent will + // return as a CNAME record for this service. No proxying will be involved. + // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) + // and requires Type to be ExternalName. + // +optional + ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` + + // externalTrafficPolicy denotes if this Service desires to route external + // traffic to node-local or cluster-wide endpoints. "Local" preserves the + // client source IP and avoids a second hop for LoadBalancer and Nodeport + // type services, but risks potentially imbalanced traffic spreading. + // "Cluster" obscures the client source IP and may cause a second hop to + // another node, but should have good overall load-spreading. + // +optional + ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` + + // healthCheckNodePort specifies the healthcheck nodePort for the service. + // If not specified, HealthCheckNodePort is created by the service api + // backend with the allocated nodePort. Will use user-specified nodePort value + // if specified by the client. Only effects when Type is set to LoadBalancer + // and ExternalTrafficPolicy is set to Local. + // +optional + HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` + + // publishNotReadyAddresses, when set to true, indicates that DNS implementations + // must publish the notReadyAddresses of subsets for the Endpoints associated with + // the Service. The default value is false. + // The primary use case for setting this field is to use a StatefulSet's Headless Service + // to propagate SRV records for its Pods without respect to their readiness for purpose + // of peer discovery. + // +optional + PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"` + + // sessionAffinityConfig contains the configurations of session affinity. + // +optional + SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` + + // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. + // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is + // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. + // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which + // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of + // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the + // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. + // +optional + IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` + + // topologyKeys is a preference-order list of topology keys which + // implementations of services should use to preferentially sort endpoints + // when accessing this Service, it can not be used at the same time as + // externalTrafficPolicy=Local. + // Topology keys must be valid label keys and at most 16 keys may be specified. + // Endpoints are chosen based on the first topology key with available backends. + // If this field is specified and all entries have no backends that match + // the topology of the client, the service has no backends for that client + // and connections should fail. + // The special value "*" may be used to mean "any topology". This catch-all + // value, if used, only makes sense as the last value in the list. + // If this is not specified or empty, no topology constraints will be applied. + // +optional + TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` +} + +// ServicePort contains information on service's port. +type ServicePort struct { + // The name of this port within the service. This must be a DNS_LABEL. + // All ports within a ServiceSpec must have unique names. When considering + // the endpoints for a Service, this must match the 'name' field in the + // EndpointPort. + // Optional if only one ServicePort is defined on this service. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names such as + // mycompany.com/my-custom-protocol. + // Field can be enabled with ServiceAppProtocol feature gate. + // +optional + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` + + // The port that will be exposed by this service. + Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` + + // Number or name of the port to access on the pods targeted by the service. + // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. + // If this is a string, it will be looked up as a named port in the + // target Pod's container ports. If this is not specified, the value + // of the 'port' field is used (an identity map). + // This field is ignored for services with clusterIP=None, and should be + // omitted or set equal to the 'port' field. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service + // +optional + TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` + + // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. + // Usually assigned by the system. If specified, it will be allocated to the service + // if unused or else creation of the service will fail. + // Default is to auto-allocate a port if the ServiceType of this Service requires one. + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + // +optional + NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` +} + +// +genclient +// +genclient:skipVerbs=deleteCollection +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Service is a named abstraction of software service (for example, mysql) consisting of local port +// (for example 3306) that the proxy listens on, and the selector that determines which pods +// will answer requests sent through the proxy. +type Service struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a service. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the service. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // ClusterIPNone - do not assign a cluster IP + // no proxying required and no environment variables should be created for pods + ClusterIPNone = "None" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceList holds a list of services. +type ServiceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of services + Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccount binds together: +// * a name, understood by users, and perhaps by peripheral systems, for an identity +// * a principal that can be authenticated and authorized +// * a set of secrets +type ServiceAccount struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` + + // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images + // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets + // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. + // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod + // +optional + ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` + + // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. + // Can be overridden at the pod level. + // +optional + AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceAccountList is a list of ServiceAccount objects +type ServiceAccountList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ServiceAccounts. + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Endpoints is a collection of endpoints that implement the actual service. Example: +// Name: "mysvc", +// Subsets: [ +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// }, +// { +// Addresses: [{"ip": "10.10.3.3"}], +// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] +// }, +// ] +type Endpoints struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The set of all endpoints is the union of all subsets. Addresses are placed into + // subsets according to the IPs they share. A single address with multiple ports, + // some of which are ready and some of which are not (because they come from + // different containers) will result in the address being displayed in different + // subsets for the different ports. No address will appear in both Addresses and + // NotReadyAddresses in the same subset. + // Sets of addresses and ports that comprise a service. + // +optional + Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"` +} + +// EndpointSubset is a group of addresses with a common set of ports. The +// expanded set of endpoints is the Cartesian product of Addresses x Ports. +// For example, given: +// { +// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], +// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] +// } +// The resulting set of endpoints can be viewed as: +// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], +// b: [ 10.10.1.1:309, 10.10.2.2:309 ] +type EndpointSubset struct { + // IP addresses which offer the related ports that are marked as ready. These endpoints + // should be considered safe for load balancers and clients to utilize. + // +optional + Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` + // IP addresses which offer the related ports but are not currently marked as ready + // because they have not yet finished starting, have recently failed a readiness check, + // or have recently failed a liveness check. + // +optional + NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` + // Port numbers available on the related IP addresses. + // +optional + Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` +} + +// EndpointAddress is a tuple that describes single IP address. +type EndpointAddress struct { + // The IP of this endpoint. + // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), + // or link-local multicast ((224.0.0.0/24). + // IPv6 is also accepted but not fully supported on all platforms. Also, certain + // kubernetes components, like kube-proxy, are not IPv6 ready. + // TODO: This should allow hostname or IP, See #4447. + IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` + // The Hostname of this endpoint + // +optional + Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` + // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. + // +optional + NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"` + // Reference to object providing the endpoint. + // +optional + TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` +} + +// EndpointPort is a tuple that describes a single port. +type EndpointPort struct { + // The name of this port. This must match the 'name' field in the + // corresponding ServicePort. + // Must be a DNS_LABEL. + // Optional only if one port is defined. + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // The port number of the endpoint. + Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` + + // The IP protocol for this port. + // Must be UDP, TCP, or SCTP. + // Default is TCP. + // +optional + Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` + + // The application protocol for this port. + // This field follows standard Kubernetes label syntax. + // Un-prefixed names are reserved for IANA standard service names (as per + // RFC-6335 and http://www.iana.org/assignments/service-names). + // Non-standard protocols should use prefixed names such as + // mycompany.com/my-custom-protocol. + // Field can be enabled with ServiceAppProtocol feature gate. + // +optional + AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EndpointsList is a list of endpoints. +type EndpointsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of endpoints. + Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// NodeSpec describes the attributes that a node is created with. +type NodeSpec struct { + // PodCIDR represents the pod IP range assigned to the node. + // +optional + PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` + + // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this + // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for + // each of IPv4 and IPv6. + // +optional + // +patchStrategy=merge + PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` + + // ID of the node assigned by the cloud provider in the format: :// + // +optional + ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` + // Unschedulable controls node schedulability of new pods. By default, node is schedulable. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration + // +optional + Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` + // If specified, the node's taints. + // +optional + Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` + // If specified, the source to get node configuration from + // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field + // +optional + ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` + + // Deprecated. Not all kubelets will set this field. Remove field after 1.13. + // see: https://issues.k8s.io/61966 + // +optional + DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` +} + +// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. +type NodeConfigSource struct { + // For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags: + // 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags) + // 2. configMapRef and proto tag 1 were used by the API to refer to a configmap, + // but used a generic ObjectReference type that didn't really have the fields we needed + // All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags, + // so there was no persisted data for these fields that needed to be migrated/handled. + + // +k8s:deprecated=kind + // +k8s:deprecated=apiVersion + // +k8s:deprecated=configMapRef,protobuf=1 + + // ConfigMap is a reference to a Node's ConfigMap + ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"` +} + +// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. +type ConfigMapNodeConfigSource struct { + // Namespace is the metadata.namespace of the referenced ConfigMap. + // This field is required in all cases. + Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` + + // Name is the metadata.name of the referenced ConfigMap. + // This field is required in all cases. + Name string `json:"name" protobuf:"bytes,2,opt,name=name"` + + // UID is the metadata.UID of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` + + // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. + // This field is forbidden in Node.Spec, and required in Node.Status. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + + // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure + // This field is required in all cases. + KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"` +} + +// DaemonEndpoint contains information about a single Daemon endpoint. +type DaemonEndpoint struct { + /* + The port tag was not properly in quotes in earlier releases, so it must be + uppercased for backwards compat (since it was falling back to var name of + 'Port'). + */ + + // Port number of the given endpoint. + Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` +} + +// NodeDaemonEndpoints lists ports opened by daemons running on the Node. +type NodeDaemonEndpoints struct { + // Endpoint on which Kubelet is listening. + // +optional + KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` +} + +// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. +type NodeSystemInfo struct { + // MachineID reported by the node. For unique machine identification + // in the cluster this field is preferred. Learn more from man(5) + // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html + MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` + // SystemUUID reported by the node. For unique machine identification + // MachineID is preferred. This field is specific to Red Hat hosts + // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html + SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` + // Boot ID reported by the node. + BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` + // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). + KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` + // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). + OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` + // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). + ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` + // Kubelet Version reported by the node. + KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` + // KubeProxy Version reported by the node. + KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` + // The Operating System reported by the node + OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` + // The Architecture reported by the node + Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` +} + +// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. +type NodeConfigStatus struct { + // Assigned reports the checkpointed config the node will try to use. + // When Node.Spec.ConfigSource is updated, the node checkpoints the associated + // config payload to local disk, along with a record indicating intended + // config. The node refers to this record to choose its config checkpoint, and + // reports this record in Assigned. Assigned only updates in the status after + // the record has been checkpointed to disk. When the Kubelet is restarted, + // it tries to make the Assigned config the Active config by loading and + // validating the checkpointed payload identified by Assigned. + // +optional + Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"` + // Active reports the checkpointed config the node is actively using. + // Active will represent either the current version of the Assigned config, + // or the current LastKnownGood config, depending on whether attempting to use the + // Assigned config results in an error. + // +optional + Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"` + // LastKnownGood reports the checkpointed config the node will fall back to + // when it encounters an error attempting to use the Assigned config. + // The Assigned config becomes the LastKnownGood config when the node determines + // that the Assigned config is stable and correct. + // This is currently implemented as a 10-minute soak period starting when the local + // record of Assigned config is updated. If the Assigned config is Active at the end + // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is + // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, + // because the local default config is always assumed good. + // You should not make assumptions about the node's method of determining config stability + // and correctness, as this may change or become configurable in the future. + // +optional + LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"` + // Error describes any problems reconciling the Spec.ConfigSource to the Active config. + // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned + // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting + // to load or validate the Assigned config, etc. + // Errors may occur at different points while syncing config. Earlier errors (e.g. download or + // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across + // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in + // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error + // by fixing the config assigned in Spec.ConfigSource. + // You can find additional information for debugging by searching the error message in the Kubelet log. + // Error is a human-readable description of the error state; machines can check whether or not Error + // is empty, but should not rely on the stability of the Error text across Kubelet versions. + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` +} + +// NodeStatus is information about the current status of a node. +type NodeStatus struct { + // Capacity represents the total resources of a node. + // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity + // +optional + Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` + // Allocatable represents the resources of a node that are available for scheduling. + // Defaults to Capacity. + // +optional + Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` + // NodePhase is the recently observed lifecycle phase of the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase + // The field is never populated, and now is deprecated. + // +optional + Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` + // Conditions is an array of current observed node conditions. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` + // List of addresses reachable to the node. + // Queried from cloud provider, if available. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses + // Note: This field is declared as mergeable, but the merge key is not sufficiently + // unique, which can cause data corruption when it is merged. Callers should instead + // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` + // Endpoints of daemons running on the Node. + // +optional + DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` + // Set of ids/uuids to uniquely identify the node. + // More info: https://kubernetes.io/docs/concepts/nodes/node/#info + // +optional + NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` + // List of container images on this node + // +optional + Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` + // List of attachable volumes in use (mounted) by the node. + // +optional + VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` + // List of volumes that are attached to the node. + // +optional + VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` + // Status of the config assigned to the node via the dynamic Kubelet config feature. + // +optional + Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` +} + +type UniqueVolumeName string + +// AttachedVolume describes a volume attached to a node +type AttachedVolume struct { + // Name of the attached volume + Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"` + + // DevicePath represents the device path where the volume should be available + DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"` +} + +// AvoidPods describes pods that should avoid this node. This is the value for a +// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and +// will eventually become a field of NodeStatus. +type AvoidPods struct { + // Bounded-sized list of signatures of pods that should avoid this node, sorted + // in timestamp order from oldest to newest. Size of the slice is unspecified. + // +optional + PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` +} + +// Describes a class of pods that should avoid this node. +type PreferAvoidPodsEntry struct { + // The class of pods. + PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"` + // Time at which this entry was added to the list. + // +optional + EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` + // (brief) reason why this entry was added to the list. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + // Human readable message indicating why this entry was added to the list. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` +} + +// Describes the class of pods that should avoid this node. +// Exactly one field should be set. +type PodSignature struct { + // Reference to controller whose pods should avoid this node. + // +optional + PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` +} + +// Describe a container image +type ContainerImage struct { + // Names by which this image is known. + // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] + Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` + // The size of the image in bytes. + // +optional + SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` +} + +type NodePhase string + +// These are the valid phases of node. +const ( + // NodePending means the node has been created/added by the system, but not configured. + NodePending NodePhase = "Pending" + // NodeRunning means the node has been configured and has Kubernetes components running. + NodeRunning NodePhase = "Running" + // NodeTerminated means the node has been removed from the cluster. + NodeTerminated NodePhase = "Terminated" +) + +type NodeConditionType string + +// These are valid conditions of node. Currently, we don't have enough information to decide +// node condition. In the future, we will add more. The proposed set of conditions are: +// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. +const ( + // NodeReady means kubelet is healthy and ready to accept pods. + NodeReady NodeConditionType = "Ready" + // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. + NodeMemoryPressure NodeConditionType = "MemoryPressure" + // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. + NodeDiskPressure NodeConditionType = "DiskPressure" + // NodePIDPressure means the kubelet is under pressure due to insufficient available PID. + NodePIDPressure NodeConditionType = "PIDPressure" + // NodeNetworkUnavailable means that network for the node is not correctly configured. + NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" +) + +// NodeCondition contains condition information for a node. +type NodeCondition struct { + // Type of node condition. + Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Last time we got an update on a given condition. + // +optional + LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` + // Last time the condition transit from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // (brief) reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +type NodeAddressType string + +// These are valid address type of node. +const ( + NodeHostName NodeAddressType = "Hostname" + NodeExternalIP NodeAddressType = "ExternalIP" + NodeInternalIP NodeAddressType = "InternalIP" + NodeExternalDNS NodeAddressType = "ExternalDNS" + NodeInternalDNS NodeAddressType = "InternalDNS" +) + +// NodeAddress contains information for the node's address. +type NodeAddress struct { + // Node address type, one of Hostname, ExternalIP or InternalIP. + Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` + // The node address. + Address string `json:"address" protobuf:"bytes,2,opt,name=address"` +} + +// ResourceName is the name identifying various resources in a ResourceList. +type ResourceName string + +// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, +// with the -, _, and . characters allowed anywhere, except the first or last character. +// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than +// camel case, separating compound words. +// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. +const ( + // CPU, in cores. (500m = .5 cores) + ResourceCPU ResourceName = "cpu" + // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceMemory ResourceName = "memory" + // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) + ResourceStorage ResourceName = "storage" + // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. + ResourceEphemeralStorage ResourceName = "ephemeral-storage" +) + +const ( + // Default namespace prefix. + ResourceDefaultNamespacePrefix = "kubernetes.io/" + // Name prefix for huge page resources (alpha). + ResourceHugePagesPrefix = "hugepages-" + // Name prefix for storage resource limits + ResourceAttachableVolumesPrefix = "attachable-volumes-" +) + +// ResourceList is a set of (resource name, quantity) pairs. +type ResourceList map[ResourceName]resource.Quantity + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Node is a worker node in Kubernetes. +// Each node will have a unique identifier in the cache (i.e. in etcd). +type Node struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of a node. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the node. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeList is the whole list of all Nodes which have been registered with master. +type NodeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of nodes + Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// FinalizerName is the name identifying a finalizer during namespace lifecycle. +type FinalizerName string + +// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or +// in metav1. +const ( + FinalizerKubernetes FinalizerName = "kubernetes" +) + +// NamespaceSpec describes the attributes on a Namespace. +type NamespaceSpec struct { + // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ + // +optional + Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` +} + +// NamespaceStatus is information about the current status of a Namespace. +type NamespaceStatus struct { + // Phase is the current lifecycle phase of the namespace. + // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ + // +optional + Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` + + // Represents the latest available observations of a namespace's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +type NamespacePhase string + +// These are the valid phases of a namespace. +const ( + // NamespaceActive means the namespace is available for use in the system + NamespaceActive NamespacePhase = "Active" + // NamespaceTerminating means the namespace is undergoing graceful termination + NamespaceTerminating NamespacePhase = "Terminating" +) + +const ( + // NamespaceTerminatingCause is returned as a defaults.cause item when a change is + // forbidden due to the namespace being terminated. + NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating" +) + +type NamespaceConditionType string + +// These are valid conditions of a namespace. +const ( + // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. + NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" + // NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources. + NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" + // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. + NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" + // NamespaceContentRemaining contains information about resources remaining in a namespace. + NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining" + // NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace. + NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining" +) + +// NamespaceCondition contains details about state of namespace. +type NamespaceCondition struct { + // Type of namespace controller condition. + Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` +} + +// +genclient +// +genclient:nonNamespaced +// +genclient:skipVerbs=deleteCollection +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Namespace provides a scope for Names. +// Use of multiple namespaces is optional. +type Namespace struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the behavior of the Namespace. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status describes the current status of a Namespace. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NamespaceList is a list of Namespaces. +type NamespaceList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Namespace objects in the list. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. +// Deprecated in 1.7, please use the bindings subresource of pods instead. +type Binding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The target object that you want to bind to the standard object. + Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. +type EphemeralContainers struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of ephemeral containers associated with this pod. New ephemeral containers + // may be appended to this list, but existing ephemeral containers may not be removed + // or modified. + // +patchMergeKey=name + // +patchStrategy=merge + EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +// +k8s:openapi-gen=false +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodLogOptions is the query options for a Pod's logs REST call. +type PodLogOptions struct { + metav1.TypeMeta `json:",inline"` + + // The container for which to stream logs. Defaults to only container if there is one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` + // Follow the log stream of the pod. Defaults to false. + // +optional + Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` + // Return previous terminated container logs. Defaults to false. + // +optional + Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` + // A relative time in seconds before the current time from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` + // An RFC3339 timestamp from which to show logs. If this value + // precedes the time a pod was started, only logs since the pod start will be returned. + // If this value is in the future, no logs will be returned. + // Only one of sinceSeconds or sinceTime may be specified. + // +optional + SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` + // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line + // of log output. Defaults to false. + // +optional + Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` + // If set, the number of lines from the end of the logs to show. If not specified, + // logs are shown from the creation of the container or sinceSeconds or sinceTime + // +optional + TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` + // If set, the number of bytes to read from the server before terminating the + // log output. This may not display a complete final line of logging, and may return + // slightly more or slightly less than the specified limit. + // +optional + LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` + + // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the + // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver + // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real + // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the + // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept + // the actual log data coming from the real kubelet). + // +optional + InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodAttachOptions is the query options to a Pod's remote attach call. +// --- +// TODO: merge w/ PodExecOptions below for stdin, stdout, etc +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodAttachOptions struct { + metav1.TypeMeta `json:",inline"` + + // Stdin if true, redirects the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Stdout if true indicates that stdout is to be redirected for the attach call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Stderr if true indicates that stderr is to be redirected for the attach call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the attach call. + // This is passed through the container runtime so the tty + // is allocated on the worker node by the container runtime. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // The container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodExecOptions is the query options to a Pod's remote exec call. +// --- +// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging +// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY +type PodExecOptions struct { + metav1.TypeMeta `json:",inline"` + + // Redirect the standard input stream of the pod for this call. + // Defaults to false. + // +optional + Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` + + // Redirect the standard output stream of the pod for this call. + // Defaults to true. + // +optional + Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` + + // Redirect the standard error stream of the pod for this call. + // Defaults to true. + // +optional + Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` + + // TTY if true indicates that a tty will be allocated for the exec call. + // Defaults to false. + // +optional + TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` + + // Container in which to execute the command. + // Defaults to only container if there is only one container in the pod. + // +optional + Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` + + // Command is the remote command to execute. argv array. Not executed within a shell. + Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPortForwardOptions is the query options to a Pod's port forward call +// when using WebSockets. +// The `port` query parameter must specify the port or +// ports (comma separated) to forward over. +// Port forwarding over SPDY does not use these options. It requires the port +// to be passed in the `port` header as part of request. +type PodPortForwardOptions struct { + metav1.TypeMeta `json:",inline"` + + // List of ports to forward + // Required when using WebSockets + // +optional + Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodProxyOptions is the query options to a Pod's proxy call. +type PodProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to pod. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeProxyOptions is the query options to a Node's proxy call. +type NodeProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the URL path to use for the current proxy request to node. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ServiceProxyOptions is the query options to a Service's proxy call. +type ServiceProxyOptions struct { + metav1.TypeMeta `json:",inline"` + + // Path is the part of URLs that include service endpoints, suffixes, + // and parameters to use for the current proxy request to service. + // For example, the whole request URL is + // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. + // Path is _search?q=user:kimchy. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` +} + +// ObjectReference contains enough information to let you inspect or modify the referred object. +// --- +// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. +// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. +// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular +// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". +// Those cannot be well described when embedded. +// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. +// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity +// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple +// and the version of the actual struct is irrelevant. +// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type +// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. +// Instead of using this type, create a locally provided and used type that is well-focused on your reference. +// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type ObjectReference struct { + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + // Namespace of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // API version of the referent. + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` + // Specific resourceVersion to which this reference is made, if any. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // If referring to a piece of an object instead of an entire object, this string + // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. + // For example, if the object reference is to a container within a pod, this would take on a value like: + // "spec.containers{name}" (where "name" refers to the name of the container that triggered + // the event) or if no container name is specified "spec.containers[2]" (container with + // index 2 in this pod). This syntax is chosen only to have some well-defined way of + // referencing a part of an object. + // TODO: this design is not final and this field is subject to change in the future. + // +optional + FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` +} + +// LocalObjectReference contains enough information to let you locate the +// referenced object inside the same namespace. +type LocalObjectReference struct { + // Name of the referent. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names + // TODO: Add other useful fields. apiVersion, kind, uid? + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` +} + +// TypedLocalObjectReference contains enough information to let you locate the +// typed referenced object inside the same namespace. +type TypedLocalObjectReference struct { + // APIGroup is the group for the resource being referenced. + // If APIGroup is not specified, the specified Kind must be in the core API group. + // For any other third-party types, APIGroup is required. + // +optional + APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SerializedReference is a reference to serialized object. +type SerializedReference struct { + metav1.TypeMeta `json:",inline"` + // The reference to an object in the system. + // +optional + Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` +} + +// EventSource contains information for an event. +type EventSource struct { + // Component from which the event is generated. + // +optional + Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` + // Node name on which the event is generated. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` +} + +// Valid values for event types (new types could be added in future) +const ( + // Information only and will not cause any problems + EventTypeNormal string = "Normal" + // These events are to warn that something might go wrong + EventTypeWarning string = "Warning" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. +type Event struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` + + // The object that this event is about. + InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` + + // This should be a short, machine understandable string that gives the reason + // for the transition into the object's current status. + // TODO: provide exact specification for format. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` + + // A human-readable description of the status of this operation. + // TODO: decide on maximum length. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` + + // The component reporting this event. Should be a short machine understandable string. + // +optional + Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` + + // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) + // +optional + FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` + + // The time at which the most recent occurrence of this event was recorded. + // +optional + LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` + + // The number of times this event has occurred. + // +optional + Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` + + // Type of this event (Normal, Warning), new types could be added in the future + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` + + // Time when this Event was first observed. + // +optional + EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"` + + // What action was taken/failed regarding to the Regarding object. + // +optional + Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"` + + // Optional secondary object for more complex actions. + // +optional + Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"` + // Time of the last occurrence observed + LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` + // State of this Series: Ongoing or Finished + // Deprecated. Planned removal for 1.18 + State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of events. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of events + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List metav1.List + +// LimitType is a type of object that is limited +type LimitType string + +const ( + // Limit that applies to all pods in a namespace + LimitTypePod LimitType = "Pod" + // Limit that applies to all containers in a namespace + LimitTypeContainer LimitType = "Container" + // Limit that applies to all persistent volume claims in a namespace + LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" +) + +// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. +type LimitRangeItem struct { + // Type of resource that this limit applies to. + Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` + // Max usage constraints on this kind by resource name. + // +optional + Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` + // Min usage constraints on this kind by resource name. + // +optional + Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` + // Default resource requirement limit value by resource name if resource limit is omitted. + // +optional + Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` + // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. + // +optional + DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` + // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. + // +optional + MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` +} + +// LimitRangeSpec defines a min/max usage limit for resources that match on kind. +type LimitRangeSpec struct { + // Limits is the list of LimitRangeItem objects that are enforced. + Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRange sets resource usage limits for each kind of resource in a Namespace. +type LimitRange struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the limits enforced. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// LimitRangeList is a list of LimitRange items. +type LimitRangeList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of LimitRange objects. + // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// The following identify resource constants for Kubernetes object types +const ( + // Pods, number + ResourcePods ResourceName = "pods" + // Services, number + ResourceServices ResourceName = "services" + // ReplicationControllers, number + ResourceReplicationControllers ResourceName = "replicationcontrollers" + // ResourceQuotas, number + ResourceQuotas ResourceName = "resourcequotas" + // ResourceSecrets, number + ResourceSecrets ResourceName = "secrets" + // ResourceConfigMaps, number + ResourceConfigMaps ResourceName = "configmaps" + // ResourcePersistentVolumeClaims, number + ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" + // ResourceServicesNodePorts, number + ResourceServicesNodePorts ResourceName = "services.nodeports" + // ResourceServicesLoadBalancers, number + ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" + // CPU request, in cores. (500m = .5 cores) + ResourceRequestsCPU ResourceName = "requests.cpu" + // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsMemory ResourceName = "requests.memory" + // Storage request, in bytes + ResourceRequestsStorage ResourceName = "requests.storage" + // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" + // CPU limit, in cores. (500m = .5 cores) + ResourceLimitsCPU ResourceName = "limits.cpu" + // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsMemory ResourceName = "limits.memory" + // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" +) + +// The following identify resource prefix for Kubernetes object types +const ( + // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) + // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. + ResourceRequestsHugePagesPrefix = "requests.hugepages-" + // Default resource requests prefix + DefaultResourceRequestsPrefix = "requests." +) + +// A ResourceQuotaScope defines a filter that must match each object tracked by a quota +type ResourceQuotaScope string + +const ( + // Match all pod objects where spec.activeDeadlineSeconds + ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" + // Match all pod objects where !spec.activeDeadlineSeconds + ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" + // Match all pod objects that have best effort quality of service + ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" + // Match all pod objects that do not have best effort quality of service + ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" + // Match all pod objects that have priority class mentioned + ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" +) + +// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. +type ResourceQuotaSpec struct { + // hard is the set of desired hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // A collection of filters that must match each object tracked by a quota. + // If not specified, the quota matches all objects. + // +optional + Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` + // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota + // but expressed using ScopeSelectorOperator in combination with possible values. + // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. + // +optional + ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"` +} + +// A scope selector represents the AND of the selectors represented +// by the scoped-resource selector requirements. +type ScopeSelector struct { + // A list of scope selector requirements by scope of the resources. + // +optional + MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` +} + +// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator +// that relates the scope name and values. +type ScopedResourceSelectorRequirement struct { + // The name of the scope that the selector applies to. + ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"` + // Represents a scope's relationship to a set of values. + // Valid operators are In, NotIn, Exists, DoesNotExist. + Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"` + // An array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. + // This array is replaced during a strategic merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A scope selector operator is the set of operators that can be used in +// a scope selector requirement. +type ScopeSelectorOperator string + +const ( + ScopeSelectorOpIn ScopeSelectorOperator = "In" + ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" + ScopeSelectorOpExists ScopeSelectorOperator = "Exists" + ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" +) + +// ResourceQuotaStatus defines the enforced hard limits and observed use. +type ResourceQuotaStatus struct { + // Hard is the set of enforced hard limits for each named resource. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + // +optional + Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` + // Used is the current observed total usage of the resource in the namespace. + // +optional + Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuota sets aggregate quota restrictions enforced per namespace +type ResourceQuota struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the desired quota. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status defines the actual enforced quota and its current usage. + // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ResourceQuotaList is a list of ResourceQuota items. +type ResourceQuotaList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ResourceQuota objects. + // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ + Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Secret holds secret data of a certain type. The total bytes of the values in +// the Data field must be less than MaxSecretSize bytes. +type Secret struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Immutable, if set to true, ensures that data stored in the Secret cannot + // be updated (only object metadata can be modified). + // If not set to true, the field can be modified at any time. + // Defaulted to nil. + // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. + // +optional + Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"` + + // Data contains the secret data. Each key must consist of alphanumeric + // characters, '-', '_' or '.'. The serialized form of the secret data is a + // base64 encoded string, representing the arbitrary (possibly non-string) + // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 + // +optional + Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` + + // stringData allows specifying non-binary secret data in string form. + // It is provided as a write-only convenience method. + // All keys and values are merged into the data field on write, overwriting any existing values. + // It is never output when reading from the API. + // +k8s:conversion-gen=false + // +optional + StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` + + // Used to facilitate programmatic handling of secret data. + // +optional + Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` +} + +const MaxSecretSize = 1 * 1024 * 1024 + +type SecretType string + +const ( + // SecretTypeOpaque is the default. Arbitrary user-defined data + SecretTypeOpaque SecretType = "Opaque" + + // SecretTypeServiceAccountToken contains a token that identifies a service account to the API + // + // Required fields: + // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies + // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies + // - Secret.Data["token"] - a token that identifies the service account to the API + SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" + + // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountNameKey = "kubernetes.io/service-account.name" + // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets + ServiceAccountUIDKey = "kubernetes.io/service-account.uid" + // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets + ServiceAccountTokenKey = "token" + // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets + ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" + // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets + ServiceAccountRootCAKey = "ca.crt" + // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls + ServiceAccountNamespaceKey = "namespace" + + // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg + // + // Required fields: + // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file + SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" + + // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets + DockerConfigKey = ".dockercfg" + + // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json + // + // Required fields: + // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file + SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" + + // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets + DockerConfigJsonKey = ".dockerconfigjson" + + // SecretTypeBasicAuth contains data needed for basic authentication. + // + // Required at least one of fields: + // - Secret.Data["username"] - username used for authentication + // - Secret.Data["password"] - password or token needed for authentication + SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" + + // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets + BasicAuthUsernameKey = "username" + // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets + BasicAuthPasswordKey = "password" + + // SecretTypeSSHAuth contains data needed for SSH authetication. + // + // Required field: + // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication + SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" + + // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets + SSHAuthPrivateKey = "ssh-privatekey" + // SecretTypeTLS contains information about a TLS client or server secret. It + // is primarily used with TLS termination of the Ingress resource, but may be + // used in other types. + // + // Required fields: + // - Secret.Data["tls.key"] - TLS private key. + // Secret.Data["tls.crt"] - TLS certificate. + // TODO: Consider supporting different formats, specifying CA/destinationCA. + SecretTypeTLS SecretType = "kubernetes.io/tls" + + // TLSCertKey is the key for tls certificates in a TLS secert. + TLSCertKey = "tls.crt" + // TLSPrivateKeyKey is the key for the private key field in a TLS secret. + TLSPrivateKeyKey = "tls.key" + // SecretTypeBootstrapToken is used during the automated bootstrap process (first + // implemented by kubeadm). It stores tokens that are used to sign well known + // ConfigMaps. They are used for authn. + SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// SecretList is a list of Secret. +type SecretList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of secret objects. + // More info: https://kubernetes.io/docs/concepts/configuration/secret + Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMap holds configuration data for pods to consume. +type ConfigMap struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Immutable, if set to true, ensures that data stored in the ConfigMap cannot + // be updated (only object metadata can be modified). + // If not set to true, the field can be modified at any time. + // Defaulted to nil. + // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. + // +optional + Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"` + + // Data contains the configuration data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // Values with non-UTF-8 byte sequences must use the BinaryData field. + // The keys stored in Data must not overlap with the keys in + // the BinaryData field, this is enforced during validation process. + // +optional + Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` + + // BinaryData contains the binary data. + // Each key must consist of alphanumeric characters, '-', '_' or '.'. + // BinaryData can contain byte sequences that are not in the UTF-8 range. + // The keys stored in BinaryData must not overlap with the ones in + // the Data field, this is enforced during validation process. + // Using this field will require 1.10+ apiserver and + // kubelet. + // +optional + BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ConfigMapList is a resource containing a list of ConfigMap objects. +type ConfigMapList struct { + metav1.TypeMeta `json:",inline"` + + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of ConfigMaps. + Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Type and constants for component health validation. +type ComponentConditionType string + +// These are the valid conditions for the component. +const ( + ComponentHealthy ComponentConditionType = "Healthy" +) + +// Information about the condition of a component. +type ComponentCondition struct { + // Type of condition for a component. + // Valid value: "Healthy" + Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` + // Status of the condition for a component. + // Valid values for "Healthy": "True", "False", or "Unknown". + Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` + // Message about the condition for a component. + // For example, information about a health check. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // Condition error code for a component. + // For example, a health check error code. + // +optional + Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ComponentStatus (and ComponentStatusList) holds the cluster validation info. +type ComponentStatus struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of component conditions observed + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Status of all the conditions for the component as a list of ComponentStatus objects. +type ComponentStatusList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ComponentStatus objects. + Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// DownwardAPIVolumeSource represents a volume containing downward API info. +// Downward API volumes support ownership management and SELinux relabeling. +type DownwardAPIVolumeSource struct { + // Items is a list of downward API volume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` + // Optional: mode bits to use on created files by default. Must be a + // value between 0 and 0777. Defaults to 0644. + // Directories within the path are not affected by this setting. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` +} + +const ( + DownwardAPIVolumeSourceDefaultMode int32 = 0644 +) + +// DownwardAPIVolumeFile represents information to create the file containing the pod field +type DownwardAPIVolumeFile struct { + // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' + Path string `json:"path" protobuf:"bytes,1,opt,name=path"` + // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. + // +optional + FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` + // Selects a resource of the container: only resources limits and requests + // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. + // +optional + ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` + // Optional: mode bits to use on this file, must be a value between 0 + // and 0777. If not specified, the volume defaultMode will be used. + // This might be in conflict with other options that affect the file + // mode, like fsGroup, and the result can be other mode bits set. + // +optional + Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` +} + +// Represents downward API info for projecting into a projected volume. +// Note that this is identical to a downwardAPI volume source without the default +// mode. +type DownwardAPIProjection struct { + // Items is a list of DownwardAPIVolume file + // +optional + Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` +} + +// SecurityContext holds security configuration that will be applied to a container. +// Some fields are present in both SecurityContext and PodSecurityContext. When both +// are set, the values in SecurityContext take precedence. +type SecurityContext struct { + // The capabilities to add/drop when running containers. + // Defaults to the default set of capabilities granted by the container runtime. + // +optional + Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` + // Run container in privileged mode. + // Processes in privileged containers are essentially equivalent to root on the host. + // Defaults to false. + // +optional + Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` + // The SELinux context to be applied to the container. + // If unspecified, the container runtime will allocate a random SELinux context for each + // container. May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` + // The Windows specific settings applied to all containers. + // If unspecified, the options from the PodSecurityContext will be used. + // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` + // The UID to run the entrypoint of the container process. + // Defaults to user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` + // The GID to run the entrypoint of the container process. + // Uses runtime default if unset. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"` + // Indicates that the container must run as a non-root user. + // If true, the Kubelet will validate the image at runtime to ensure that it + // does not run as UID 0 (root) and fail to start the container if it does. + // If unset or false, no such validation will be performed. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` + // Whether this container has a read-only root filesystem. + // Default is false. + // +optional + ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` + // AllowPrivilegeEscalation controls whether a process can gain more + // privileges than its parent process. This bool directly controls if + // the no_new_privs flag will be set on the container process. + // AllowPrivilegeEscalation is true always when the container is: + // 1) run as Privileged + // 2) has CAP_SYS_ADMIN + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` + // procMount denotes the type of proc mount to use for the containers. + // The default is DefaultProcMount which uses the container runtime defaults for + // readonly paths and masked paths. + // This requires the ProcMountType feature flag to be enabled. + // +optional + ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"` +} + +type ProcMountType string + +const ( + // DefaultProcMount uses the container runtime defaults for readonly and masked + // paths for /proc. Most container runtimes mask certain paths in /proc to avoid + // accidental security exposure of special devices or information. + DefaultProcMount ProcMountType = "Default" + + // UnmaskedProcMount bypasses the default masking behavior of the container + // runtime and ensures the newly created /proc the container stays in tact with + // no modifications. + UnmaskedProcMount ProcMountType = "Unmasked" +) + +// SELinuxOptions are the labels to be applied to the container +type SELinuxOptions struct { + // User is a SELinux user label that applies to the container. + // +optional + User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` + // Role is a SELinux role label that applies to the container. + // +optional + Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` + // Type is a SELinux type label that applies to the container. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` + // Level is SELinux level label that applies to the container. + // +optional + Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` +} + +// WindowsSecurityContextOptions contain Windows-specific options and credentials. +type WindowsSecurityContextOptions struct { + // GMSACredentialSpecName is the name of the GMSA credential spec to use. + // +optional + GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` + + // GMSACredentialSpec is where the GMSA admission webhook + // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the + // GMSA credential spec named by the GMSACredentialSpecName field. + // +optional + GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` + + // The UserName in Windows to run the entrypoint of the container process. + // Defaults to the user specified in image metadata if unspecified. + // May also be set in PodSecurityContext. If set in both SecurityContext and + // PodSecurityContext, the value specified in SecurityContext takes precedence. + // +optional + RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RangeAllocation is not a public type. +type RangeAllocation struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Range is string that identifies the range represented by 'data'. + Range string `json:"range" protobuf:"bytes,2,opt,name=range"` + // Data is a bit array containing all allocated addresses in the previous segment. + Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` +} + +const ( + // "default-scheduler" is the name of default scheduler. + DefaultSchedulerName = "default-scheduler" + + // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule + // corresponding to every RequiredDuringScheduling affinity rule. + // When the --hard-pod-affinity-weight scheduler flag is not specified, + // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. + DefaultHardPodAffinitySymmetricWeight int32 = 1 +) + +// Sysctl defines a kernel parameter to be set +type Sysctl struct { + // Name of a property to set + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Value of a property to set + Value string `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +// NodeResources is an object for conveying resource information about a node. +// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. +type NodeResources struct { + // Capacity represents the available resources of a node + Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` +} + +const ( + // Enable stdin for remote command execution + ExecStdinParam = "input" + // Enable stdout for remote command execution + ExecStdoutParam = "output" + // Enable stderr for remote command execution + ExecStderrParam = "error" + // Enable TTY for remote command execution + ExecTTYParam = "tty" + // Command to run for remote command execution + ExecCommandParam = "command" + + // Name of header that specifies stream type + StreamType = "streamType" + // Value for streamType header for stdin stream + StreamTypeStdin = "stdin" + // Value for streamType header for stdout stream + StreamTypeStdout = "stdout" + // Value for streamType header for stderr stream + StreamTypeStderr = "stderr" + // Value for streamType header for data stream + StreamTypeData = "data" + // Value for streamType header for error stream + StreamTypeError = "error" + // Value for streamType header for terminal resize stream + StreamTypeResize = "resize" + + // Name of header that specifies the port being forwarded + PortHeader = "port" + // Name of header that specifies a request ID used to associate the error + // and data streams for a single forwarded connection + PortForwardRequestIDHeader = "requestID" +) diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..331451f --- /dev/null +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -0,0 +1,2473 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AWSElasticBlockStoreVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", + "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", + "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", +} + +func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { + return map_AWSElasticBlockStoreVolumeSource +} + +var map_Affinity = map[string]string{ + "": "Affinity is a group of affinity scheduling rules.", + "nodeAffinity": "Describes node affinity scheduling rules for the pod.", + "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", + "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", +} + +func (Affinity) SwaggerDoc() map[string]string { + return map_Affinity +} + +var map_AttachedVolume = map[string]string{ + "": "AttachedVolume describes a volume attached to a node", + "name": "Name of the attached volume", + "devicePath": "DevicePath represents the device path where the volume should be available", +} + +func (AttachedVolume) SwaggerDoc() map[string]string { + return map_AttachedVolume +} + +var map_AvoidPods = map[string]string{ + "": "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", + "preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", +} + +func (AvoidPods) SwaggerDoc() map[string]string { + return map_AvoidPods +} + +var map_AzureDiskVolumeSource = map[string]string{ + "": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "diskName": "The Name of the data disk in the blob storage", + "diskURI": "The URI the data disk in the blob storage", + "cachingMode": "Host Caching mode: None, Read Only, Read Write.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "kind": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", +} + +func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { + return map_AzureDiskVolumeSource +} + +var map_AzureFilePersistentVolumeSource = map[string]string{ + "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "secretName": "the name of secret that contains Azure Storage Account Name and Key", + "shareName": "Share Name", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretNamespace": "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", +} + +func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string { + return map_AzureFilePersistentVolumeSource +} + +var map_AzureFileVolumeSource = map[string]string{ + "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "secretName": "the name of secret that contains Azure Storage Account Name and Key", + "shareName": "Share Name", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (AzureFileVolumeSource) SwaggerDoc() map[string]string { + return map_AzureFileVolumeSource +} + +var map_Binding = map[string]string{ + "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "target": "The target object that you want to bind to the standard object.", +} + +func (Binding) SwaggerDoc() map[string]string { + return map_Binding +} + +var map_CSIPersistentVolumeSource = map[string]string{ + "": "Represents storage that is managed by an external CSI volume driver (Beta feature)", + "driver": "Driver is the name of the driver to use for this volume. Required.", + "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", + "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", + "volumeAttributes": "Attributes of the volume to publish.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", +} + +func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CSIPersistentVolumeSource +} + +var map_CSIVolumeSource = map[string]string{ + "": "Represents a source location of a volume to mount, managed by an external CSI driver", + "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", + "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", + "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", + "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", +} + +func (CSIVolumeSource) SwaggerDoc() map[string]string { + return map_CSIVolumeSource +} + +var map_Capabilities = map[string]string{ + "": "Adds and removes POSIX capabilities from running containers.", + "add": "Added capabilities", + "drop": "Removed capabilities", +} + +func (Capabilities) SwaggerDoc() map[string]string { + return map_Capabilities +} + +var map_CephFSPersistentVolumeSource = map[string]string{ + "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", +} + +func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSPersistentVolumeSource +} + +var map_CephFSVolumeSource = map[string]string{ + "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", + "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", +} + +func (CephFSVolumeSource) SwaggerDoc() map[string]string { + return map_CephFSVolumeSource +} + +var map_CinderPersistentVolumeSource = map[string]string{ + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", +} + +func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_CinderPersistentVolumeSource +} + +var map_CinderVolumeSource = map[string]string{ + "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", + "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", +} + +func (CinderVolumeSource) SwaggerDoc() map[string]string { + return map_CinderVolumeSource +} + +var map_ClientIPConfig = map[string]string{ + "": "ClientIPConfig represents the configurations of Client IP based session affinity.", + "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", +} + +func (ClientIPConfig) SwaggerDoc() map[string]string { + return map_ClientIPConfig +} + +var map_ComponentCondition = map[string]string{ + "": "Information about the condition of a component.", + "type": "Type of condition for a component. Valid value: \"Healthy\"", + "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", + "message": "Message about the condition for a component. For example, information about a health check.", + "error": "Condition error code for a component. For example, a health check error code.", +} + +func (ComponentCondition) SwaggerDoc() map[string]string { + return map_ComponentCondition +} + +var map_ComponentStatus = map[string]string{ + "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "conditions": "List of component conditions observed", +} + +func (ComponentStatus) SwaggerDoc() map[string]string { + return map_ComponentStatus +} + +var map_ComponentStatusList = map[string]string{ + "": "Status of all the conditions for the component as a list of ComponentStatus objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ComponentStatus objects.", +} + +func (ComponentStatusList) SwaggerDoc() map[string]string { + return map_ComponentStatusList +} + +var map_ConfigMap = map[string]string{ + "": "ConfigMap holds configuration data for pods to consume.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "immutable": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.", + "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", + "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", +} + +func (ConfigMap) SwaggerDoc() map[string]string { + return map_ConfigMap +} + +var map_ConfigMapEnvSource = map[string]string{ + "": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the ConfigMap must be defined", +} + +func (ConfigMapEnvSource) SwaggerDoc() map[string]string { + return map_ConfigMapEnvSource +} + +var map_ConfigMapKeySelector = map[string]string{ + "": "Selects a key from a ConfigMap.", + "key": "The key to select.", + "optional": "Specify whether the ConfigMap or its key must be defined", +} + +func (ConfigMapKeySelector) SwaggerDoc() map[string]string { + return map_ConfigMapKeySelector +} + +var map_ConfigMapList = map[string]string{ + "": "ConfigMapList is a resource containing a list of ConfigMap objects.", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of ConfigMaps.", +} + +func (ConfigMapList) SwaggerDoc() map[string]string { + return map_ConfigMapList +} + +var map_ConfigMapNodeConfigSource = map[string]string{ + "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", + "namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", + "name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", + "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "resourceVersion": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", + "kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", +} + +func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { + return map_ConfigMapNodeConfigSource +} + +var map_ConfigMapProjection = map[string]string{ + "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the ConfigMap or its keys must be defined", +} + +func (ConfigMapProjection) SwaggerDoc() map[string]string { + return map_ConfigMapProjection +} + +var map_ConfigMapVolumeSource = map[string]string{ + "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", + "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the ConfigMap or its keys must be defined", +} + +func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { + return map_ConfigMapVolumeSource +} + +var map_Container = map[string]string{ + "": "A single application container that you want to run within a pod.", + "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", + "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (Container) SwaggerDoc() map[string]string { + return map_Container +} + +var map_ContainerImage = map[string]string{ + "": "Describe a container image", + "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", + "sizeBytes": "The size of the image in bytes.", +} + +func (ContainerImage) SwaggerDoc() map[string]string { + return map_ContainerImage +} + +var map_ContainerPort = map[string]string{ + "": "ContainerPort represents a network port in a single container.", + "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", + "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", + "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", + "protocol": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", + "hostIP": "What host IP to bind the external port to.", +} + +func (ContainerPort) SwaggerDoc() map[string]string { + return map_ContainerPort +} + +var map_ContainerState = map[string]string{ + "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", + "waiting": "Details about a waiting container", + "running": "Details about a running container", + "terminated": "Details about a terminated container", +} + +func (ContainerState) SwaggerDoc() map[string]string { + return map_ContainerState +} + +var map_ContainerStateRunning = map[string]string{ + "": "ContainerStateRunning is a running state of a container.", + "startedAt": "Time at which the container was last (re-)started", +} + +func (ContainerStateRunning) SwaggerDoc() map[string]string { + return map_ContainerStateRunning +} + +var map_ContainerStateTerminated = map[string]string{ + "": "ContainerStateTerminated is a terminated state of a container.", + "exitCode": "Exit status from the last termination of the container", + "signal": "Signal from the last termination of the container", + "reason": "(brief) reason from the last termination of the container", + "message": "Message regarding the last termination of the container", + "startedAt": "Time at which previous execution of the container started", + "finishedAt": "Time at which the container last terminated", + "containerID": "Container's ID in the format 'docker://'", +} + +func (ContainerStateTerminated) SwaggerDoc() map[string]string { + return map_ContainerStateTerminated +} + +var map_ContainerStateWaiting = map[string]string{ + "": "ContainerStateWaiting is a waiting state of a container.", + "reason": "(brief) reason the container is not yet running.", + "message": "Message regarding why the container is not yet running.", +} + +func (ContainerStateWaiting) SwaggerDoc() map[string]string { + return map_ContainerStateWaiting +} + +var map_ContainerStatus = map[string]string{ + "": "ContainerStatus contains details for the current status of this container.", + "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", + "state": "Details about the container's current condition.", + "lastState": "Details about the container's last termination condition.", + "ready": "Specifies whether the container has passed its readiness probe.", + "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", + "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", + "imageID": "ImageID of the container's image.", + "containerID": "Container's ID in the format 'docker://'.", + "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", +} + +func (ContainerStatus) SwaggerDoc() map[string]string { + return map_ContainerStatus +} + +var map_DaemonEndpoint = map[string]string{ + "": "DaemonEndpoint contains information about a single Daemon endpoint.", + "Port": "Port number of the given endpoint.", +} + +func (DaemonEndpoint) SwaggerDoc() map[string]string { + return map_DaemonEndpoint +} + +var map_DownwardAPIProjection = map[string]string{ + "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", + "items": "Items is a list of DownwardAPIVolume file", +} + +func (DownwardAPIProjection) SwaggerDoc() map[string]string { + return map_DownwardAPIProjection +} + +var map_DownwardAPIVolumeFile = map[string]string{ + "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", + "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", + "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeFile +} + +var map_DownwardAPIVolumeSource = map[string]string{ + "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", + "items": "Items is a list of downward API volume file", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { + return map_DownwardAPIVolumeSource +} + +var map_EmptyDirVolumeSource = map[string]string{ + "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", + "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "sizeLimit": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", +} + +func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { + return map_EmptyDirVolumeSource +} + +var map_EndpointAddress = map[string]string{ + "": "EndpointAddress is a tuple that describes single IP address.", + "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", + "hostname": "The Hostname of this endpoint", + "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", + "targetRef": "Reference to object providing the endpoint.", +} + +func (EndpointAddress) SwaggerDoc() map[string]string { + return map_EndpointAddress +} + +var map_EndpointPort = map[string]string{ + "": "EndpointPort is a tuple that describes a single port.", + "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", + "port": "The port number of the endpoint.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.", +} + +func (EndpointPort) SwaggerDoc() map[string]string { + return map_EndpointPort +} + +var map_EndpointSubset = map[string]string{ + "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", + "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", + "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", + "ports": "Port numbers available on the related IP addresses.", +} + +func (EndpointSubset) SwaggerDoc() map[string]string { + return map_EndpointSubset +} + +var map_Endpoints = map[string]string{ + "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", +} + +func (Endpoints) SwaggerDoc() map[string]string { + return map_Endpoints +} + +var map_EndpointsList = map[string]string{ + "": "EndpointsList is a list of endpoints.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of endpoints.", +} + +func (EndpointsList) SwaggerDoc() map[string]string { + return map_EndpointsList +} + +var map_EnvFromSource = map[string]string{ + "": "EnvFromSource represents the source of a set of ConfigMaps", + "prefix": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", + "configMapRef": "The ConfigMap to select from", + "secretRef": "The Secret to select from", +} + +func (EnvFromSource) SwaggerDoc() map[string]string { + return map_EnvFromSource +} + +var map_EnvVar = map[string]string{ + "": "EnvVar represents an environment variable present in a Container.", + "name": "Name of the environment variable. Must be a C_IDENTIFIER.", + "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", + "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", +} + +func (EnvVar) SwaggerDoc() map[string]string { + return map_EnvVar +} + +var map_EnvVarSource = map[string]string{ + "": "EnvVarSource represents a source for the value of an EnvVar.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", + "configMapKeyRef": "Selects a key of a ConfigMap.", + "secretKeyRef": "Selects a key of a secret in the pod's namespace", +} + +func (EnvVarSource) SwaggerDoc() map[string]string { + return map_EnvVarSource +} + +var map_EphemeralContainer = map[string]string{ + "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", + "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", +} + +func (EphemeralContainer) SwaggerDoc() map[string]string { + return map_EphemeralContainer +} + +var map_EphemeralContainerCommon = map[string]string{ + "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", + "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", + "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", + "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", + "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", + "ports": "Ports are not allowed for ephemeral containers.", + "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", + "env": "List of environment variables to set in the container. Cannot be updated.", + "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", + "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", + "livenessProbe": "Probes are not allowed for ephemeral containers.", + "readinessProbe": "Probes are not allowed for ephemeral containers.", + "startupProbe": "Probes are not allowed for ephemeral containers.", + "lifecycle": "Lifecycle is not allowed for ephemeral containers.", + "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", + "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", + "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", + "securityContext": "SecurityContext is not allowed for ephemeral containers.", + "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", + "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", + "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", +} + +func (EphemeralContainerCommon) SwaggerDoc() map[string]string { + return map_EphemeralContainerCommon +} + +var map_EphemeralContainers = map[string]string{ + "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", + "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", +} + +func (EphemeralContainers) SwaggerDoc() map[string]string { + return map_EphemeralContainers +} + +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "involvedObject": "The object that this event is about.", + "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", + "message": "A human-readable description of the status of this operation.", + "source": "The component reporting this event. Should be a short machine understandable string.", + "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", + "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", + "count": "The number of times this event has occurred.", + "type": "Type of this event (Normal, Warning), new types could be added in the future", + "eventTime": "Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "action": "What action was taken/failed regarding to the Regarding object.", + "related": "Optional secondary object for more complex actions.", + "reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of events.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of events", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time of the last occurrence observed", + "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + +var map_EventSource = map[string]string{ + "": "EventSource contains information for an event.", + "component": "Component from which the event is generated.", + "host": "Node name on which the event is generated.", +} + +func (EventSource) SwaggerDoc() map[string]string { + return map_EventSource +} + +var map_ExecAction = map[string]string{ + "": "ExecAction describes a \"run in container\" action.", + "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", +} + +func (ExecAction) SwaggerDoc() map[string]string { + return map_ExecAction +} + +var map_FCVolumeSource = map[string]string{ + "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", + "targetWWNs": "Optional: FC target worldwide names (WWNs)", + "lun": "Optional: FC target lun number", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "wwids": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", +} + +func (FCVolumeSource) SwaggerDoc() map[string]string { + return map_FCVolumeSource +} + +var map_FlexPersistentVolumeSource = map[string]string{ + "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_FlexPersistentVolumeSource +} + +var map_FlexVolumeSource = map[string]string{ + "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "driver": "Driver is the name of the driver to use for this volume.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", + "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "options": "Optional: Extra command options if any.", +} + +func (FlexVolumeSource) SwaggerDoc() map[string]string { + return map_FlexVolumeSource +} + +var map_FlockerVolumeSource = map[string]string{ + "": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", + "datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", + "datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset", +} + +func (FlockerVolumeSource) SwaggerDoc() map[string]string { + return map_FlockerVolumeSource +} + +var map_GCEPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", + "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", +} + +func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_GCEPersistentDiskVolumeSource +} + +var map_GitRepoVolumeSource = map[string]string{ + "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "repository": "Repository URL", + "revision": "Commit hash for the specified revision.", + "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", +} + +func (GitRepoVolumeSource) SwaggerDoc() map[string]string { + return map_GitRepoVolumeSource +} + +var map_GlusterfsPersistentVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsPersistentVolumeSource +} + +var map_GlusterfsVolumeSource = map[string]string{ + "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", +} + +func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { + return map_GlusterfsVolumeSource +} + +var map_HTTPGetAction = map[string]string{ + "": "HTTPGetAction describes an action based on HTTP Get requests.", + "path": "Path to access on the HTTP server.", + "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", + "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.", + "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.", +} + +func (HTTPGetAction) SwaggerDoc() map[string]string { + return map_HTTPGetAction +} + +var map_HTTPHeader = map[string]string{ + "": "HTTPHeader describes a custom header to be used in HTTP probes", + "name": "The header field name", + "value": "The header field value", +} + +func (HTTPHeader) SwaggerDoc() map[string]string { + return map_HTTPHeader +} + +var map_Handler = map[string]string{ + "": "Handler defines a specific action that should be taken", + "exec": "One and only one of the following should be specified. Exec specifies the action to take.", + "httpGet": "HTTPGet specifies the http request to perform.", + "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", +} + +func (Handler) SwaggerDoc() map[string]string { + return map_Handler +} + +var map_HostAlias = map[string]string{ + "": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", + "ip": "IP address of the host file entry.", + "hostnames": "Hostnames for the above IP address.", +} + +func (HostAlias) SwaggerDoc() map[string]string { + return map_HostAlias +} + +var map_HostPathVolumeSource = map[string]string{ + "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", + "path": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "type": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", +} + +func (HostPathVolumeSource) SwaggerDoc() map[string]string { + return map_HostPathVolumeSource +} + +var map_ISCSIPersistentVolumeSource = map[string]string{ + "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", +} + +func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIPersistentVolumeSource +} + +var map_ISCSIVolumeSource = map[string]string{ + "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", + "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "iqn": "Target iSCSI Qualified Name.", + "lun": "iSCSI Target Lun number.", + "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", + "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", + "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", + "chapAuthSession": "whether support iSCSI Session CHAP authentication", + "secretRef": "CHAP Secret for iSCSI target and initiator authentication", + "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", +} + +func (ISCSIVolumeSource) SwaggerDoc() map[string]string { + return map_ISCSIVolumeSource +} + +var map_KeyToPath = map[string]string{ + "": "Maps a string key to a path within a volume.", + "key": "The key to project.", + "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", + "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (KeyToPath) SwaggerDoc() map[string]string { + return map_KeyToPath +} + +var map_Lifecycle = map[string]string{ + "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", + "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", +} + +func (Lifecycle) SwaggerDoc() map[string]string { + return map_Lifecycle +} + +var map_LimitRange = map[string]string{ + "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (LimitRange) SwaggerDoc() map[string]string { + return map_LimitRange +} + +var map_LimitRangeItem = map[string]string{ + "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", + "type": "Type of resource that this limit applies to.", + "max": "Max usage constraints on this kind by resource name.", + "min": "Min usage constraints on this kind by resource name.", + "default": "Default resource requirement limit value by resource name if resource limit is omitted.", + "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", + "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", +} + +func (LimitRangeItem) SwaggerDoc() map[string]string { + return map_LimitRangeItem +} + +var map_LimitRangeList = map[string]string{ + "": "LimitRangeList is a list of LimitRange items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", +} + +func (LimitRangeList) SwaggerDoc() map[string]string { + return map_LimitRangeList +} + +var map_LimitRangeSpec = map[string]string{ + "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", + "limits": "Limits is the list of LimitRangeItem objects that are enforced.", +} + +func (LimitRangeSpec) SwaggerDoc() map[string]string { + return map_LimitRangeSpec +} + +var map_LoadBalancerIngress = map[string]string{ + "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", + "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", + "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", +} + +func (LoadBalancerIngress) SwaggerDoc() map[string]string { + return map_LoadBalancerIngress +} + +var map_LoadBalancerStatus = map[string]string{ + "": "LoadBalancerStatus represents the status of a load-balancer.", + "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", +} + +func (LoadBalancerStatus) SwaggerDoc() map[string]string { + return map_LoadBalancerStatus +} + +var map_LocalObjectReference = map[string]string{ + "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", +} + +func (LocalObjectReference) SwaggerDoc() map[string]string { + return map_LocalObjectReference +} + +var map_LocalVolumeSource = map[string]string{ + "": "Local represents directly-attached storage with node affinity (Beta feature)", + "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", + "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.", +} + +func (LocalVolumeSource) SwaggerDoc() map[string]string { + return map_LocalVolumeSource +} + +var map_NFSVolumeSource = map[string]string{ + "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", + "server": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "path": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", +} + +func (NFSVolumeSource) SwaggerDoc() map[string]string { + return map_NFSVolumeSource +} + +var map_Namespace = map[string]string{ + "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Namespace) SwaggerDoc() map[string]string { + return map_Namespace +} + +var map_NamespaceCondition = map[string]string{ + "": "NamespaceCondition contains details about state of namespace.", + "type": "Type of namespace controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", +} + +func (NamespaceCondition) SwaggerDoc() map[string]string { + return map_NamespaceCondition +} + +var map_NamespaceList = map[string]string{ + "": "NamespaceList is a list of Namespaces.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", +} + +func (NamespaceList) SwaggerDoc() map[string]string { + return map_NamespaceList +} + +var map_NamespaceSpec = map[string]string{ + "": "NamespaceSpec describes the attributes on a Namespace.", + "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", +} + +func (NamespaceSpec) SwaggerDoc() map[string]string { + return map_NamespaceSpec +} + +var map_NamespaceStatus = map[string]string{ + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", + "conditions": "Represents the latest available observations of a namespace's current state.", +} + +func (NamespaceStatus) SwaggerDoc() map[string]string { + return map_NamespaceStatus +} + +var map_Node = map[string]string{ + "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Node) SwaggerDoc() map[string]string { + return map_Node +} + +var map_NodeAddress = map[string]string{ + "": "NodeAddress contains information for the node's address.", + "type": "Node address type, one of Hostname, ExternalIP or InternalIP.", + "address": "The node address.", +} + +func (NodeAddress) SwaggerDoc() map[string]string { + return map_NodeAddress +} + +var map_NodeAffinity = map[string]string{ + "": "Node affinity is a group of node affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", +} + +func (NodeAffinity) SwaggerDoc() map[string]string { + return map_NodeAffinity +} + +var map_NodeCondition = map[string]string{ + "": "NodeCondition contains condition information for a node.", + "type": "Type of node condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastHeartbeatTime": "Last time we got an update on a given condition.", + "lastTransitionTime": "Last time the condition transit from one status to another.", + "reason": "(brief) reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (NodeCondition) SwaggerDoc() map[string]string { + return map_NodeCondition +} + +var map_NodeConfigSource = map[string]string{ + "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.", + "configMap": "ConfigMap is a reference to a Node's ConfigMap", +} + +func (NodeConfigSource) SwaggerDoc() map[string]string { + return map_NodeConfigSource +} + +var map_NodeConfigStatus = map[string]string{ + "": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", + "assigned": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.", + "active": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.", + "lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.", + "error": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", +} + +func (NodeConfigStatus) SwaggerDoc() map[string]string { + return map_NodeConfigStatus +} + +var map_NodeDaemonEndpoints = map[string]string{ + "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", + "kubeletEndpoint": "Endpoint on which Kubelet is listening.", +} + +func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { + return map_NodeDaemonEndpoints +} + +var map_NodeList = map[string]string{ + "": "NodeList is the whole list of all Nodes which have been registered with master.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of nodes", +} + +func (NodeList) SwaggerDoc() map[string]string { + return map_NodeList +} + +var map_NodeProxyOptions = map[string]string{ + "": "NodeProxyOptions is the query options to a Node's proxy call.", + "path": "Path is the URL path to use for the current proxy request to node.", +} + +func (NodeProxyOptions) SwaggerDoc() map[string]string { + return map_NodeProxyOptions +} + +var map_NodeResources = map[string]string{ + "": "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.", + "Capacity": "Capacity represents the available resources of a node", +} + +func (NodeResources) SwaggerDoc() map[string]string { + return map_NodeResources +} + +var map_NodeSelector = map[string]string{ + "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", + "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", +} + +func (NodeSelector) SwaggerDoc() map[string]string { + return map_NodeSelector +} + +var map_NodeSelectorRequirement = map[string]string{ + "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "The label key that the selector applies to.", + "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", + "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", +} + +func (NodeSelectorRequirement) SwaggerDoc() map[string]string { + return map_NodeSelectorRequirement +} + +var map_NodeSelectorTerm = map[string]string{ + "": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", + "matchExpressions": "A list of node selector requirements by node's labels.", + "matchFields": "A list of node selector requirements by node's fields.", +} + +func (NodeSelectorTerm) SwaggerDoc() map[string]string { + return map_NodeSelectorTerm +} + +var map_NodeSpec = map[string]string{ + "": "NodeSpec describes the attributes that a node is created with.", + "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", + "podCIDRs": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", + "providerID": "ID of the node assigned by the cloud provider in the format: ://", + "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", + "taints": "If specified, the node's taints.", + "configSource": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field", + "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", +} + +func (NodeSpec) SwaggerDoc() map[string]string { + return map_NodeSpec +} + +var map_NodeStatus = map[string]string{ + "": "NodeStatus is information about the current status of a node.", + "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", + "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", + "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", + "daemonEndpoints": "Endpoints of daemons running on the Node.", + "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", + "images": "List of container images on this node", + "volumesInUse": "List of attachable volumes in use (mounted) by the node.", + "volumesAttached": "List of volumes that are attached to the node.", + "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", +} + +func (NodeStatus) SwaggerDoc() map[string]string { + return map_NodeStatus +} + +var map_NodeSystemInfo = map[string]string{ + "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", + "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", + "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", + "bootID": "Boot ID reported by the node.", + "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", + "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", + "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", + "kubeletVersion": "Kubelet Version reported by the node.", + "kubeProxyVersion": "KubeProxy Version reported by the node.", + "operatingSystem": "The Operating System reported by the node", + "architecture": "The Architecture reported by the node", +} + +func (NodeSystemInfo) SwaggerDoc() map[string]string { + return map_NodeSystemInfo +} + +var map_ObjectFieldSelector = map[string]string{ + "": "ObjectFieldSelector selects an APIVersioned field of an object.", + "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", + "fieldPath": "Path of the field to select in the specified API version.", +} + +func (ObjectFieldSelector) SwaggerDoc() map[string]string { + return map_ObjectFieldSelector +} + +var map_ObjectReference = map[string]string{ + "": "ObjectReference contains enough information to let you inspect or modify the referred object.", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", + "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", + "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", + "apiVersion": "API version of the referent.", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", +} + +func (ObjectReference) SwaggerDoc() map[string]string { + return map_ObjectReference +} + +var map_PersistentVolume = map[string]string{ + "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", + "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", +} + +func (PersistentVolume) SwaggerDoc() map[string]string { + return map_PersistentVolume +} + +var map_PersistentVolumeClaim = map[string]string{ + "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaim) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaim +} + +var map_PersistentVolumeClaimCondition = map[string]string{ + "": "PersistentVolumeClaimCondition contails details about state of pvc", + "lastProbeTime": "Last time we probed the condition.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", + "message": "Human-readable message indicating details about last transition.", +} + +func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimCondition +} + +var map_PersistentVolumeClaimList = map[string]string{ + "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", +} + +func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimList +} + +var map_PersistentVolumeClaimSpec = map[string]string{ + "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", + "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "selector": "A label query over volumes to consider for binding.", + "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", + "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", + "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", + "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", +} + +func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimSpec +} + +var map_PersistentVolumeClaimStatus = map[string]string{ + "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", + "phase": "Phase represents the current phase of PersistentVolumeClaim.", + "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", + "capacity": "Represents the actual resources of the underlying volume.", + "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", +} + +func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimStatus +} + +var map_PersistentVolumeClaimVolumeSource = map[string]string{ + "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", + "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", +} + +func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeClaimVolumeSource +} + +var map_PersistentVolumeList = map[string]string{ + "": "PersistentVolumeList is a list of PersistentVolume items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", +} + +func (PersistentVolumeList) SwaggerDoc() map[string]string { + return map_PersistentVolumeList +} + +var map_PersistentVolumeSource = map[string]string{ + "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "local": "Local represents directly-attached storage with node affinity", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", + "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", +} + +func (PersistentVolumeSource) SwaggerDoc() map[string]string { + return map_PersistentVolumeSource +} + +var map_PersistentVolumeSpec = map[string]string{ + "": "PersistentVolumeSpec is the specification of a persistent volume.", + "capacity": "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", + "accessModes": "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", + "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", + "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", + "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", + "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", + "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", +} + +func (PersistentVolumeSpec) SwaggerDoc() map[string]string { + return map_PersistentVolumeSpec +} + +var map_PersistentVolumeStatus = map[string]string{ + "": "PersistentVolumeStatus is the current status of a persistent volume.", + "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", + "message": "A human-readable message indicating details about why the volume is in this state.", + "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", +} + +func (PersistentVolumeStatus) SwaggerDoc() map[string]string { + return map_PersistentVolumeStatus +} + +var map_PhotonPersistentDiskVolumeSource = map[string]string{ + "": "Represents a Photon Controller persistent disk resource.", + "pdID": "ID that identifies Photon Controller persistent disk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", +} + +func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { + return map_PhotonPersistentDiskVolumeSource +} + +var map_Pod = map[string]string{ + "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Pod) SwaggerDoc() map[string]string { + return map_Pod +} + +var map_PodAffinity = map[string]string{ + "": "Pod affinity is a group of inter pod affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAffinity) SwaggerDoc() map[string]string { + return map_PodAffinity +} + +var map_PodAffinityTerm = map[string]string{ + "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", + "labelSelector": "A label query over a set of resources, in this case pods.", + "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", + "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", +} + +func (PodAffinityTerm) SwaggerDoc() map[string]string { + return map_PodAffinityTerm +} + +var map_PodAntiAffinity = map[string]string{ + "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", + "requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", +} + +func (PodAntiAffinity) SwaggerDoc() map[string]string { + return map_PodAntiAffinity +} + +var map_PodAttachOptions = map[string]string{ + "": "PodAttachOptions is the query options to a Pod's remote attach call.", + "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", + "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", + "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", +} + +func (PodAttachOptions) SwaggerDoc() map[string]string { + return map_PodAttachOptions +} + +var map_PodCondition = map[string]string{ + "": "PodCondition contains details for the current condition of this pod.", + "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "lastProbeTime": "Last time we probed the condition.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", + "message": "Human-readable message indicating details about last transition.", +} + +func (PodCondition) SwaggerDoc() map[string]string { + return map_PodCondition +} + +var map_PodDNSConfig = map[string]string{ + "": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", + "nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", + "searches": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", + "options": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", +} + +func (PodDNSConfig) SwaggerDoc() map[string]string { + return map_PodDNSConfig +} + +var map_PodDNSConfigOption = map[string]string{ + "": "PodDNSConfigOption defines DNS resolver options of a pod.", + "name": "Required.", +} + +func (PodDNSConfigOption) SwaggerDoc() map[string]string { + return map_PodDNSConfigOption +} + +var map_PodExecOptions = map[string]string{ + "": "PodExecOptions is the query options to a Pod's remote exec call.", + "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", + "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", + "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", + "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", + "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", + "command": "Command is the remote command to execute. argv array. Not executed within a shell.", +} + +func (PodExecOptions) SwaggerDoc() map[string]string { + return map_PodExecOptions +} + +var map_PodIP = map[string]string{ + "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", + "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", +} + +func (PodIP) SwaggerDoc() map[string]string { + return map_PodIP +} + +var map_PodList = map[string]string{ + "": "PodList is a list of Pods.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", +} + +func (PodList) SwaggerDoc() map[string]string { + return map_PodList +} + +var map_PodLogOptions = map[string]string{ + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", + "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", +} + +func (PodLogOptions) SwaggerDoc() map[string]string { + return map_PodLogOptions +} + +var map_PodPortForwardOptions = map[string]string{ + "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", + "ports": "List of ports to forward Required when using WebSockets", +} + +func (PodPortForwardOptions) SwaggerDoc() map[string]string { + return map_PodPortForwardOptions +} + +var map_PodProxyOptions = map[string]string{ + "": "PodProxyOptions is the query options to a Pod's proxy call.", + "path": "Path is the URL path to use for the current proxy request to pod.", +} + +func (PodProxyOptions) SwaggerDoc() map[string]string { + return map_PodProxyOptions +} + +var map_PodReadinessGate = map[string]string{ + "": "PodReadinessGate contains the reference to a pod condition", + "conditionType": "ConditionType refers to a condition in the pod's condition list with matching type.", +} + +func (PodReadinessGate) SwaggerDoc() map[string]string { + return map_PodReadinessGate +} + +var map_PodSecurityContext = map[string]string{ + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", + "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", +} + +func (PodSecurityContext) SwaggerDoc() map[string]string { + return map_PodSecurityContext +} + +var map_PodSignature = map[string]string{ + "": "Describes the class of pods that should avoid this node. Exactly one field should be set.", + "podController": "Reference to controller whose pods should avoid this node.", +} + +func (PodSignature) SwaggerDoc() map[string]string { + return map_PodSignature +} + +var map_PodSpec = map[string]string{ + "": "PodSpec is a description of a pod.", + "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", + "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", + "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", + "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", + "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", + "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", + "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", + "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", + "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", + "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", + "hostPID": "Use the host's pid namespace. Optional: Default to false.", + "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", + "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", + "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", + "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", + "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", + "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", + "affinity": "If specified, the pod's scheduling constraints", + "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", + "tolerations": "If specified, the pod's tolerations.", + "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", + "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", + "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", + "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", + "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", + "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", +} + +func (PodSpec) SwaggerDoc() map[string]string { + return map_PodSpec +} + +var map_PodStatus = map[string]string{ + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", + "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", +} + +func (PodStatus) SwaggerDoc() map[string]string { + return map_PodStatus +} + +var map_PodStatusResult = map[string]string{ + "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PodStatusResult) SwaggerDoc() map[string]string { + return map_PodStatusResult +} + +var map_PodTemplate = map[string]string{ + "": "PodTemplate describes a template for creating copies of a predefined pod.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PodTemplate) SwaggerDoc() map[string]string { + return map_PodTemplate +} + +var map_PodTemplateList = map[string]string{ + "": "PodTemplateList is a list of PodTemplates.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of pod templates", +} + +func (PodTemplateList) SwaggerDoc() map[string]string { + return map_PodTemplateList +} + +var map_PodTemplateSpec = map[string]string{ + "": "PodTemplateSpec describes the data a pod should have when created from a template", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (PodTemplateSpec) SwaggerDoc() map[string]string { + return map_PodTemplateSpec +} + +var map_PortworxVolumeSource = map[string]string{ + "": "PortworxVolumeSource represents a Portworx volume resource.", + "volumeID": "VolumeID uniquely identifies a Portworx volume", + "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (PortworxVolumeSource) SwaggerDoc() map[string]string { + return map_PortworxVolumeSource +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_PreferAvoidPodsEntry = map[string]string{ + "": "Describes a class of pods that should avoid this node.", + "podSignature": "The class of pods.", + "evictionTime": "Time at which this entry was added to the list.", + "reason": "(brief) reason why this entry was added to the list.", + "message": "Human readable message indicating why this entry was added to the list.", +} + +func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string { + return map_PreferAvoidPodsEntry +} + +var map_PreferredSchedulingTerm = map[string]string{ + "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "preference": "A node selector term, associated with the corresponding weight.", +} + +func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { + return map_PreferredSchedulingTerm +} + +var map_Probe = map[string]string{ + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", + "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", +} + +func (Probe) SwaggerDoc() map[string]string { + return map_Probe +} + +var map_ProjectedVolumeSource = map[string]string{ + "": "Represents a projected volume source", + "sources": "list of volume projections", + "defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", +} + +func (ProjectedVolumeSource) SwaggerDoc() map[string]string { + return map_ProjectedVolumeSource +} + +var map_QuobyteVolumeSource = map[string]string{ + "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", + "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", + "volume": "Volume is a string that references an already created Quobyte volume by name.", + "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", + "user": "User to map volume access to Defaults to serivceaccount user", + "group": "Group to map volume access to Default is no group", + "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", +} + +func (QuobyteVolumeSource) SwaggerDoc() map[string]string { + return map_QuobyteVolumeSource +} + +var map_RBDPersistentVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_RBDPersistentVolumeSource +} + +var map_RBDVolumeSource = map[string]string{ + "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", + "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", + "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", +} + +func (RBDVolumeSource) SwaggerDoc() map[string]string { + return map_RBDVolumeSource +} + +var map_RangeAllocation = map[string]string{ + "": "RangeAllocation is not a public type.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "range": "Range is string that identifies the range represented by 'data'.", + "data": "Data is a bit array containing all allocated addresses in the previous segment.", +} + +func (RangeAllocation) SwaggerDoc() map[string]string { + return map_RangeAllocation +} + +var map_ReplicationController = map[string]string{ + "": "ReplicationController represents the configuration of a replication controller.", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ReplicationController) SwaggerDoc() map[string]string { + return map_ReplicationController +} + +var map_ReplicationControllerCondition = map[string]string{ + "": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", + "type": "Type of replication controller condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicationControllerCondition) SwaggerDoc() map[string]string { + return map_ReplicationControllerCondition +} + +var map_ReplicationControllerList = map[string]string{ + "": "ReplicationControllerList is a collection of replication controllers.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicationControllerList) SwaggerDoc() map[string]string { + return map_ReplicationControllerList +} + +var map_ReplicationControllerSpec = map[string]string{ + "": "ReplicationControllerSpec is the specification of a replication controller.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicationControllerSpec) SwaggerDoc() map[string]string { + return map_ReplicationControllerSpec +} + +var map_ReplicationControllerStatus = map[string]string{ + "": "ReplicationControllerStatus represents the current status of a replication controller.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", + "readyReplicas": "The number of ready replicas for this replication controller.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", + "conditions": "Represents the latest available observations of a replication controller's current state.", +} + +func (ReplicationControllerStatus) SwaggerDoc() map[string]string { + return map_ReplicationControllerStatus +} + +var map_ResourceFieldSelector = map[string]string{ + "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", + "containerName": "Container name: required for volumes, optional for env vars", + "resource": "Required: resource to select", + "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", +} + +func (ResourceFieldSelector) SwaggerDoc() map[string]string { + return map_ResourceFieldSelector +} + +var map_ResourceQuota = map[string]string{ + "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ResourceQuota) SwaggerDoc() map[string]string { + return map_ResourceQuota +} + +var map_ResourceQuotaList = map[string]string{ + "": "ResourceQuotaList is a list of ResourceQuota items.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", +} + +func (ResourceQuotaList) SwaggerDoc() map[string]string { + return map_ResourceQuotaList +} + +var map_ResourceQuotaSpec = map[string]string{ + "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", + "hard": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", + "scopeSelector": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.", +} + +func (ResourceQuotaSpec) SwaggerDoc() map[string]string { + return map_ResourceQuotaSpec +} + +var map_ResourceQuotaStatus = map[string]string{ + "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", + "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", + "used": "Used is the current observed total usage of the resource in the namespace.", +} + +func (ResourceQuotaStatus) SwaggerDoc() map[string]string { + return map_ResourceQuotaStatus +} + +var map_ResourceRequirements = map[string]string{ + "": "ResourceRequirements describes the compute resource requirements.", + "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", +} + +func (ResourceRequirements) SwaggerDoc() map[string]string { + return map_ResourceRequirements +} + +var map_SELinuxOptions = map[string]string{ + "": "SELinuxOptions are the labels to be applied to the container", + "user": "User is a SELinux user label that applies to the container.", + "role": "Role is a SELinux role label that applies to the container.", + "type": "Type is a SELinux type label that applies to the container.", + "level": "Level is SELinux level label that applies to the container.", +} + +func (SELinuxOptions) SwaggerDoc() map[string]string { + return map_SELinuxOptions +} + +var map_ScaleIOPersistentVolumeSource = map[string]string{ + "": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOPersistentVolumeSource +} + +var map_ScaleIOVolumeSource = map[string]string{ + "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", + "gateway": "The host address of the ScaleIO API Gateway.", + "system": "The name of the storage system as configured in ScaleIO.", + "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", + "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", + "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", + "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", + "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", + "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", +} + +func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { + return map_ScaleIOVolumeSource +} + +var map_ScopeSelector = map[string]string{ + "": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", + "matchExpressions": "A list of scope selector requirements by scope of the resources.", +} + +func (ScopeSelector) SwaggerDoc() map[string]string { + return map_ScopeSelector +} + +var map_ScopedResourceSelectorRequirement = map[string]string{ + "": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", + "scopeName": "The name of the scope that the selector applies to.", + "operator": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", + "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { + return map_ScopedResourceSelectorRequirement +} + +var map_Secret = map[string]string{ + "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.", + "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", + "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", + "type": "Used to facilitate programmatic handling of secret data.", +} + +func (Secret) SwaggerDoc() map[string]string { + return map_Secret +} + +var map_SecretEnvSource = map[string]string{ + "": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", + "optional": "Specify whether the Secret must be defined", +} + +func (SecretEnvSource) SwaggerDoc() map[string]string { + return map_SecretEnvSource +} + +var map_SecretKeySelector = map[string]string{ + "": "SecretKeySelector selects a key of a Secret.", + "key": "The key of the secret to select from. Must be a valid secret key.", + "optional": "Specify whether the Secret or its key must be defined", +} + +func (SecretKeySelector) SwaggerDoc() map[string]string { + return map_SecretKeySelector +} + +var map_SecretList = map[string]string{ + "": "SecretList is a list of Secret.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", +} + +func (SecretList) SwaggerDoc() map[string]string { + return map_SecretList +} + +var map_SecretProjection = map[string]string{ + "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "optional": "Specify whether the Secret or its key must be defined", +} + +func (SecretProjection) SwaggerDoc() map[string]string { + return map_SecretProjection +} + +var map_SecretReference = map[string]string{ + "": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", + "name": "Name is unique within a namespace to reference a secret resource.", + "namespace": "Namespace defines the space within which the secret name must be unique.", +} + +func (SecretReference) SwaggerDoc() map[string]string { + return map_SecretReference +} + +var map_SecretVolumeSource = map[string]string{ + "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", + "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", + "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", + "optional": "Specify whether the Secret or its keys must be defined", +} + +func (SecretVolumeSource) SwaggerDoc() map[string]string { + return map_SecretVolumeSource +} + +var map_SecurityContext = map[string]string{ + "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", + "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", + "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", + "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", + "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", + "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", +} + +func (SecurityContext) SwaggerDoc() map[string]string { + return map_SecurityContext +} + +var map_SerializedReference = map[string]string{ + "": "SerializedReference is a reference to serialized object.", + "reference": "The reference to an object in the system.", +} + +func (SerializedReference) SwaggerDoc() map[string]string { + return map_SerializedReference +} + +var map_Service = map[string]string{ + "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Service) SwaggerDoc() map[string]string { + return map_Service +} + +var map_ServiceAccount = map[string]string{ + "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", + "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", + "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", +} + +func (ServiceAccount) SwaggerDoc() map[string]string { + return map_ServiceAccount +} + +var map_ServiceAccountList = map[string]string{ + "": "ServiceAccountList is a list of ServiceAccount objects", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", +} + +func (ServiceAccountList) SwaggerDoc() map[string]string { + return map_ServiceAccountList +} + +var map_ServiceAccountTokenProjection = map[string]string{ + "": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", + "audience": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", + "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", + "path": "Path is the path relative to the mount point of the file to project the token into.", +} + +func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { + return map_ServiceAccountTokenProjection +} + +var map_ServiceList = map[string]string{ + "": "ServiceList holds a list of services.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of services", +} + +func (ServiceList) SwaggerDoc() map[string]string { + return map_ServiceList +} + +var map_ServicePort = map[string]string{ + "": "ServicePort contains information on service's port.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", + "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.", + "port": "The port that will be exposed by this service.", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", +} + +func (ServicePort) SwaggerDoc() map[string]string { + return map_ServicePort +} + +var map_ServiceProxyOptions = map[string]string{ + "": "ServiceProxyOptions is the query options to a Service's proxy call.", + "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", +} + +func (ServiceProxyOptions) SwaggerDoc() map[string]string { + return map_ServiceProxyOptions +} + +var map_ServiceSpec = map[string]string{ + "": "ServiceSpec describes the attributes that a user creates on a service.", + "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", + "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", + "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", + "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", + "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", + "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", + "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", + "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", + "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", + "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", + "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", + "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", +} + +func (ServiceSpec) SwaggerDoc() map[string]string { + return map_ServiceSpec +} + +var map_ServiceStatus = map[string]string{ + "": "ServiceStatus represents the current status of a service.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", +} + +func (ServiceStatus) SwaggerDoc() map[string]string { + return map_ServiceStatus +} + +var map_SessionAffinityConfig = map[string]string{ + "": "SessionAffinityConfig represents the configurations of session affinity.", + "clientIP": "clientIP contains the configurations of Client IP based session affinity.", +} + +func (SessionAffinityConfig) SwaggerDoc() map[string]string { + return map_SessionAffinityConfig +} + +var map_StorageOSPersistentVolumeSource = map[string]string{ + "": "Represents a StorageOS persistent volume resource.", + "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", +} + +func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string { + return map_StorageOSPersistentVolumeSource +} + +var map_StorageOSVolumeSource = map[string]string{ + "": "Represents a StorageOS persistent volume resource.", + "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", + "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", + "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", +} + +func (StorageOSVolumeSource) SwaggerDoc() map[string]string { + return map_StorageOSVolumeSource +} + +var map_Sysctl = map[string]string{ + "": "Sysctl defines a kernel parameter to be set", + "name": "Name of a property to set", + "value": "Value of a property to set", +} + +func (Sysctl) SwaggerDoc() map[string]string { + return map_Sysctl +} + +var map_TCPSocketAction = map[string]string{ + "": "TCPSocketAction describes an action based on opening a socket", + "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", + "host": "Optional: Host name to connect to, defaults to the pod IP.", +} + +func (TCPSocketAction) SwaggerDoc() map[string]string { + return map_TCPSocketAction +} + +var map_Taint = map[string]string{ + "": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", + "key": "Required. The taint key to be applied to a node.", + "value": "The taint value corresponding to the taint key.", + "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", + "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", +} + +func (Taint) SwaggerDoc() map[string]string { + return map_Taint +} + +var map_Toleration = map[string]string{ + "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", + "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", +} + +func (Toleration) SwaggerDoc() map[string]string { + return map_Toleration +} + +var map_TopologySelectorLabelRequirement = map[string]string{ + "": "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", + "key": "The label key that the selector applies to.", + "values": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", +} + +func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { + return map_TopologySelectorLabelRequirement +} + +var map_TopologySelectorTerm = map[string]string{ + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "matchLabelExpressions": "A list of topology selector requirements by labels.", +} + +func (TopologySelectorTerm) SwaggerDoc() map[string]string { + return map_TopologySelectorTerm +} + +var map_TopologySpreadConstraint = map[string]string{ + "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", + "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", + "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", + "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", + "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", +} + +func (TopologySpreadConstraint) SwaggerDoc() map[string]string { + return map_TopologySpreadConstraint +} + +var map_TypedLocalObjectReference = map[string]string{ + "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", + "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (TypedLocalObjectReference) SwaggerDoc() map[string]string { + return map_TypedLocalObjectReference +} + +var map_Volume = map[string]string{ + "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", + "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", +} + +func (Volume) SwaggerDoc() map[string]string { + return map_Volume +} + +var map_VolumeDevice = map[string]string{ + "": "volumeDevice describes a mapping of a raw block device within a container.", + "name": "name must match the name of a persistentVolumeClaim in the pod", + "devicePath": "devicePath is the path inside of the container that the device will be mapped to.", +} + +func (VolumeDevice) SwaggerDoc() map[string]string { + return map_VolumeDevice +} + +var map_VolumeMount = map[string]string{ + "": "VolumeMount describes a mounting of a Volume within a container.", + "name": "This must match the Name of a Volume.", + "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", + "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", + "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", + "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", + "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", +} + +func (VolumeMount) SwaggerDoc() map[string]string { + return map_VolumeMount +} + +var map_VolumeNodeAffinity = map[string]string{ + "": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", + "required": "Required specifies hard node constraints that must be met.", +} + +func (VolumeNodeAffinity) SwaggerDoc() map[string]string { + return map_VolumeNodeAffinity +} + +var map_VolumeProjection = map[string]string{ + "": "Projection that may be projected along with other supported volume types", + "secret": "information about the secret data to project", + "downwardAPI": "information about the downwardAPI data to project", + "configMap": "information about the configMap data to project", + "serviceAccountToken": "information about the serviceAccountToken data to project", +} + +func (VolumeProjection) SwaggerDoc() map[string]string { + return map_VolumeProjection +} + +var map_VolumeSource = map[string]string{ + "": "Represents the source of a volume to mount. Only one of its members may be specified.", + "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", + "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", + "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", + "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", + "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", + "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", + "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", + "csi": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", +} + +func (VolumeSource) SwaggerDoc() map[string]string { + return map_VolumeSource +} + +var map_VsphereVirtualDiskVolumeSource = map[string]string{ + "": "Represents a vSphere volume resource.", + "volumePath": "Path that identifies vSphere volume vmdk", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", + "storagePolicyName": "Storage Policy Based Management (SPBM) profile name.", + "storagePolicyID": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", +} + +func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { + return map_VsphereVirtualDiskVolumeSource +} + +var map_WeightedPodAffinityTerm = map[string]string{ + "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", +} + +func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { + return map_WeightedPodAffinityTerm +} + +var map_WindowsSecurityContextOptions = map[string]string{ + "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", + "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", + "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", + "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", +} + +func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { + return map_WindowsSecurityContextOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go new file mode 100644 index 0000000..22aa55b --- /dev/null +++ b/vendor/k8s.io/api/core/v1/well_known_labels.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + LabelHostname = "kubernetes.io/hostname" + + LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" + LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" + LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" + LabelZoneRegionStable = "topology.kubernetes.io/region" + + LabelInstanceType = "beta.kubernetes.io/instance-type" + LabelInstanceTypeStable = "node.kubernetes.io/instance-type" + + LabelOSStable = "kubernetes.io/os" + LabelArchStable = "kubernetes.io/arch" + + // LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0. + // It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763) + LabelWindowsBuild = "node.kubernetes.io/windows-build" + + // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) + LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" + // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) + LabelNamespaceSuffixNode = "node.kubernetes.io" + + // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled + LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" + + // IsHeadlessService is added by Controller to an Endpoint denoting if its parent + // Service is Headless. The existence of this label can be used further by other + // controllers and kube-proxy to check if the Endpoint objects should be replicated when + // using Headless Services + IsHeadlessService = "service.kubernetes.io/headless" +) diff --git a/vendor/k8s.io/api/core/v1/well_known_taints.go b/vendor/k8s.io/api/core/v1/well_known_taints.go new file mode 100644 index 0000000..e1a8f62 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/well_known_taints.go @@ -0,0 +1,48 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +const ( + // TaintNodeNotReady will be added when node is not ready + // and removed when node becomes ready. + TaintNodeNotReady = "node.kubernetes.io/not-ready" + + // TaintNodeUnreachable will be added when node becomes unreachable + // (corresponding to NodeReady status ConditionUnknown) + // and removed when node becomes reachable (NodeReady status ConditionTrue). + TaintNodeUnreachable = "node.kubernetes.io/unreachable" + + // TaintNodeUnschedulable will be added when node becomes unschedulable + // and removed when node becomes scheduable. + TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" + + // TaintNodeMemoryPressure will be added when node has memory pressure + // and removed when node has enough memory. + TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" + + // TaintNodeDiskPressure will be added when node has disk pressure + // and removed when node has enough disk. + TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" + + // TaintNodeNetworkUnavailable will be added when node's network is unavailable + // and removed when network becomes ready. + TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" + + // TaintNodePIDPressure will be added when node has pid pressure + // and removed when node has enough disk. + TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" +) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..23d9644 --- /dev/null +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -0,0 +1,5805 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. +func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { + if in == nil { + return nil + } + out := new(AWSElasticBlockStoreVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Affinity) DeepCopyInto(out *Affinity) { + *out = *in + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(NodeAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAffinity != nil { + in, out := &in.PodAffinity, &out.PodAffinity + *out = new(PodAffinity) + (*in).DeepCopyInto(*out) + } + if in.PodAntiAffinity != nil { + in, out := &in.PodAntiAffinity, &out.PodAntiAffinity + *out = new(PodAntiAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. +func (in *Affinity) DeepCopy() *Affinity { + if in == nil { + return nil + } + out := new(Affinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. +func (in *AttachedVolume) DeepCopy() *AttachedVolume { + if in == nil { + return nil + } + out := new(AttachedVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { + *out = *in + if in.PreferAvoidPods != nil { + in, out := &in.PreferAvoidPods, &out.PreferAvoidPods + *out = make([]PreferAvoidPodsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. +func (in *AvoidPods) DeepCopy() *AvoidPods { + if in == nil { + return nil + } + out := new(AvoidPods) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { + *out = *in + if in.CachingMode != nil { + in, out := &in.CachingMode, &out.CachingMode + *out = new(AzureDataDiskCachingMode) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.Kind != nil { + in, out := &in.Kind, &out.Kind + *out = new(AzureDataDiskKind) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. +func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { + if in == nil { + return nil + } + out := new(AzureDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { + *out = *in + if in.SecretNamespace != nil { + in, out := &in.SecretNamespace, &out.SecretNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. +func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { + if in == nil { + return nil + } + out := new(AzureFilePersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. +func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { + if in == nil { + return nil + } + out := new(AzureFileVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Binding) DeepCopyInto(out *Binding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Target = in.Target + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. +func (in *Binding) DeepCopy() *Binding { + if in == nil { + return nil + } + out := new(Binding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Binding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { + *out = *in + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ControllerPublishSecretRef != nil { + in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodeStageSecretRef != nil { + in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef + *out = new(SecretReference) + **out = **in + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(SecretReference) + **out = **in + } + if in.ControllerExpandSecretRef != nil { + in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. +func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { + *out = *in + if in.ReadOnly != nil { + in, out := &in.ReadOnly, &out.ReadOnly + *out = new(bool) + **out = **in + } + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + if in.VolumeAttributes != nil { + in, out := &in.VolumeAttributes, &out.VolumeAttributes + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.NodePublishSecretRef != nil { + in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. +func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { + if in == nil { + return nil + } + out := new(CSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Capabilities) DeepCopyInto(out *Capabilities) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + if in.Drop != nil { + in, out := &in.Drop, &out.Drop + *out = make([]Capability, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. +func (in *Capabilities) DeepCopy() *Capabilities { + if in == nil { + return nil + } + out := new(Capabilities) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. +func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CephFSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { + *out = *in + if in.Monitors != nil { + in, out := &in.Monitors, &out.Monitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. +func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { + if in == nil { + return nil + } + out := new(CephFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. +func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { + if in == nil { + return nil + } + out := new(CinderPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. +func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { + if in == nil { + return nil + } + out := new(CinderVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { + *out = *in + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. +func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { + if in == nil { + return nil + } + out := new(ClientIPConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. +func (in *ComponentCondition) DeepCopy() *ComponentCondition { + if in == nil { + return nil + } + out := new(ComponentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ComponentCondition, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. +func (in *ComponentStatus) DeepCopy() *ComponentStatus { + if in == nil { + return nil + } + out := new(ComponentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatus) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ComponentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. +func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { + if in == nil { + return nil + } + out := new(ComponentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ComponentStatusList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Immutable != nil { + in, out := &in.Immutable, &out.Immutable + *out = new(bool) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.BinaryData != nil { + in, out := &in.BinaryData, &out.BinaryData + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. +func (in *ConfigMap) DeepCopy() *ConfigMap { + if in == nil { + return nil + } + out := new(ConfigMap) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMap) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. +func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { + if in == nil { + return nil + } + out := new(ConfigMapEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. +func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { + if in == nil { + return nil + } + out := new(ConfigMapKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ConfigMap, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. +func (in *ConfigMapList) DeepCopy() *ConfigMapList { + if in == nil { + return nil + } + out := new(ConfigMapList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ConfigMapList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. +func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { + if in == nil { + return nil + } + out := new(ConfigMapNodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. +func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { + if in == nil { + return nil + } + out := new(ConfigMapProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. +func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { + if in == nil { + return nil + } + out := new(ConfigMapVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Container) DeepCopyInto(out *Container) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. +func (in *Container) DeepCopy() *Container { + if in == nil { + return nil + } + out := new(Container) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { + *out = *in + if in.Names != nil { + in, out := &in.Names, &out.Names + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. +func (in *ContainerImage) DeepCopy() *ContainerImage { + if in == nil { + return nil + } + out := new(ContainerImage) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. +func (in *ContainerPort) DeepCopy() *ContainerPort { + if in == nil { + return nil + } + out := new(ContainerPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerState) DeepCopyInto(out *ContainerState) { + *out = *in + if in.Waiting != nil { + in, out := &in.Waiting, &out.Waiting + *out = new(ContainerStateWaiting) + **out = **in + } + if in.Running != nil { + in, out := &in.Running, &out.Running + *out = new(ContainerStateRunning) + (*in).DeepCopyInto(*out) + } + if in.Terminated != nil { + in, out := &in.Terminated, &out.Terminated + *out = new(ContainerStateTerminated) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. +func (in *ContainerState) DeepCopy() *ContainerState { + if in == nil { + return nil + } + out := new(ContainerState) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. +func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { + if in == nil { + return nil + } + out := new(ContainerStateRunning) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { + *out = *in + in.StartedAt.DeepCopyInto(&out.StartedAt) + in.FinishedAt.DeepCopyInto(&out.FinishedAt) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. +func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { + if in == nil { + return nil + } + out := new(ContainerStateTerminated) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. +func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { + if in == nil { + return nil + } + out := new(ContainerStateWaiting) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { + *out = *in + in.State.DeepCopyInto(&out.State) + in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) + if in.Started != nil { + in, out := &in.Started, &out.Started + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. +func (in *ContainerStatus) DeepCopy() *ContainerStatus { + if in == nil { + return nil + } + out := new(ContainerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. +func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { + if in == nil { + return nil + } + out := new(DaemonEndpoint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. +func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { + if in == nil { + return nil + } + out := new(DownwardAPIProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. +func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeFile) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DownwardAPIVolumeFile, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. +func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { + if in == nil { + return nil + } + out := new(DownwardAPIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { + *out = *in + if in.SizeLimit != nil { + in, out := &in.SizeLimit, &out.SizeLimit + x := (*in).DeepCopy() + *out = &x + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. +func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { + if in == nil { + return nil + } + out := new(EmptyDirVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { + *out = *in + if in.NodeName != nil { + in, out := &in.NodeName, &out.NodeName + *out = new(string) + **out = **in + } + if in.TargetRef != nil { + in, out := &in.TargetRef, &out.TargetRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. +func (in *EndpointAddress) DeepCopy() *EndpointAddress { + if in == nil { + return nil + } + out := new(EndpointAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { + *out = *in + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. +func (in *EndpointPort) DeepCopy() *EndpointPort { + if in == nil { + return nil + } + out := new(EndpointPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { + *out = *in + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.NotReadyAddresses != nil { + in, out := &in.NotReadyAddresses, &out.NotReadyAddresses + *out = make([]EndpointAddress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]EndpointPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. +func (in *EndpointSubset) DeepCopy() *EndpointSubset { + if in == nil { + return nil + } + out := new(EndpointSubset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Endpoints) DeepCopyInto(out *Endpoints) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subsets != nil { + in, out := &in.Subsets, &out.Subsets + *out = make([]EndpointSubset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. +func (in *Endpoints) DeepCopy() *Endpoints { + if in == nil { + return nil + } + out := new(Endpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Endpoints) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Endpoints, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. +func (in *EndpointsList) DeepCopy() *EndpointsList { + if in == nil { + return nil + } + out := new(EndpointsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EndpointsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { + *out = *in + if in.ConfigMapRef != nil { + in, out := &in.ConfigMapRef, &out.ConfigMapRef + *out = new(ConfigMapEnvSource) + (*in).DeepCopyInto(*out) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretEnvSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. +func (in *EnvFromSource) DeepCopy() *EnvFromSource { + if in == nil { + return nil + } + out := new(EnvFromSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVar) DeepCopyInto(out *EnvVar) { + *out = *in + if in.ValueFrom != nil { + in, out := &in.ValueFrom, &out.ValueFrom + *out = new(EnvVarSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. +func (in *EnvVar) DeepCopy() *EnvVar { + if in == nil { + return nil + } + out := new(EnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { + *out = *in + if in.FieldRef != nil { + in, out := &in.FieldRef, &out.FieldRef + *out = new(ObjectFieldSelector) + **out = **in + } + if in.ResourceFieldRef != nil { + in, out := &in.ResourceFieldRef, &out.ResourceFieldRef + *out = new(ResourceFieldSelector) + (*in).DeepCopyInto(*out) + } + if in.ConfigMapKeyRef != nil { + in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef + *out = new(ConfigMapKeySelector) + (*in).DeepCopyInto(*out) + } + if in.SecretKeyRef != nil { + in, out := &in.SecretKeyRef, &out.SecretKeyRef + *out = new(SecretKeySelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. +func (in *EnvVarSource) DeepCopy() *EnvVarSource { + if in == nil { + return nil + } + out := new(EnvVarSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { + *out = *in + in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. +func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { + if in == nil { + return nil + } + out := new(EphemeralContainer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ContainerPort, len(*in)) + copy(*out, *in) + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + in.Resources.DeepCopyInto(&out.Resources) + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeDevices != nil { + in, out := &in.VolumeDevices, &out.VolumeDevices + *out = make([]VolumeDevice, len(*in)) + copy(*out, *in) + } + if in.LivenessProbe != nil { + in, out := &in.LivenessProbe, &out.LivenessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.ReadinessProbe != nil { + in, out := &in.ReadinessProbe, &out.ReadinessProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.StartupProbe != nil { + in, out := &in.StartupProbe, &out.StartupProbe + *out = new(Probe) + (*in).DeepCopyInto(*out) + } + if in.Lifecycle != nil { + in, out := &in.Lifecycle, &out.Lifecycle + *out = new(Lifecycle) + (*in).DeepCopyInto(*out) + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(SecurityContext) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. +func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { + if in == nil { + return nil + } + out := new(EphemeralContainerCommon) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. +func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { + if in == nil { + return nil + } + out := new(EphemeralContainers) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EphemeralContainers) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.InvolvedObject = in.InvolvedObject + out.Source = in.Source + in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) + in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + if in.Related != nil { + in, out := &in.Related, &out.Related + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSource) DeepCopyInto(out *EventSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. +func (in *EventSource) DeepCopy() *EventSource { + if in == nil { + return nil + } + out := new(EventSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecAction) DeepCopyInto(out *ExecAction) { + *out = *in + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. +func (in *ExecAction) DeepCopy() *ExecAction { + if in == nil { + return nil + } + out := new(ExecAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { + *out = *in + if in.TargetWWNs != nil { + in, out := &in.TargetWWNs, &out.TargetWWNs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Lun != nil { + in, out := &in.Lun, &out.Lun + *out = new(int32) + **out = **in + } + if in.WWIDs != nil { + in, out := &in.WWIDs, &out.WWIDs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. +func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { + if in == nil { + return nil + } + out := new(FCVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. +func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { + if in == nil { + return nil + } + out := new(FlexPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. +func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { + if in == nil { + return nil + } + out := new(FlexVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. +func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { + if in == nil { + return nil + } + out := new(FlockerVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. +func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(GCEPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. +func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { + if in == nil { + return nil + } + out := new(GitRepoVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { + *out = *in + if in.EndpointsNamespace != nil { + in, out := &in.EndpointsNamespace, &out.EndpointsNamespace + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. +func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. +func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { + if in == nil { + return nil + } + out := new(GlusterfsVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { + *out = *in + out.Port = in.Port + if in.HTTPHeaders != nil { + in, out := &in.HTTPHeaders, &out.HTTPHeaders + *out = make([]HTTPHeader, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. +func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { + if in == nil { + return nil + } + out := new(HTTPGetAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. +func (in *HTTPHeader) DeepCopy() *HTTPHeader { + if in == nil { + return nil + } + out := new(HTTPHeader) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Handler) DeepCopyInto(out *Handler) { + *out = *in + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecAction) + (*in).DeepCopyInto(*out) + } + if in.HTTPGet != nil { + in, out := &in.HTTPGet, &out.HTTPGet + *out = new(HTTPGetAction) + (*in).DeepCopyInto(*out) + } + if in.TCPSocket != nil { + in, out := &in.TCPSocket, &out.TCPSocket + *out = new(TCPSocketAction) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. +func (in *Handler) DeepCopy() *Handler { + if in == nil { + return nil + } + out := new(Handler) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostAlias) DeepCopyInto(out *HostAlias) { + *out = *in + if in.Hostnames != nil { + in, out := &in.Hostnames, &out.Hostnames + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. +func (in *HostAlias) DeepCopy() *HostAlias { + if in == nil { + return nil + } + out := new(HostAlias) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { + *out = *in + if in.Type != nil { + in, out := &in.Type, &out.Type + *out = new(HostPathType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. +func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { + if in == nil { + return nil + } + out := new(HostPathVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. +func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { + *out = *in + if in.Portals != nil { + in, out := &in.Portals, &out.Portals + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + if in.InitiatorName != nil { + in, out := &in.InitiatorName, &out.InitiatorName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. +func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { + if in == nil { + return nil + } + out := new(ISCSIVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { + *out = *in + if in.Mode != nil { + in, out := &in.Mode, &out.Mode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. +func (in *KeyToPath) DeepCopy() *KeyToPath { + if in == nil { + return nil + } + out := new(KeyToPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { + *out = *in + if in.PostStart != nil { + in, out := &in.PostStart, &out.PostStart + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + if in.PreStop != nil { + in, out := &in.PreStop, &out.PreStop + *out = new(Handler) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. +func (in *Lifecycle) DeepCopy() *Lifecycle { + if in == nil { + return nil + } + out := new(Lifecycle) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRange) DeepCopyInto(out *LimitRange) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. +func (in *LimitRange) DeepCopy() *LimitRange { + if in == nil { + return nil + } + out := new(LimitRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRange) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { + *out = *in + if in.Max != nil { + in, out := &in.Max, &out.Max + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Min != nil { + in, out := &in.Min, &out.Min + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Default != nil { + in, out := &in.Default, &out.Default + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.DefaultRequest != nil { + in, out := &in.DefaultRequest, &out.DefaultRequest + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.MaxLimitRequestRatio != nil { + in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. +func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { + if in == nil { + return nil + } + out := new(LimitRangeItem) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]LimitRange, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. +func (in *LimitRangeList) DeepCopy() *LimitRangeList { + if in == nil { + return nil + } + out := new(LimitRangeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *LimitRangeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make([]LimitRangeItem, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. +func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { + if in == nil { + return nil + } + out := new(LimitRangeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. +func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { + if in == nil { + return nil + } + out := new(LoadBalancerIngress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { + *out = *in + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]LoadBalancerIngress, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. +func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { + if in == nil { + return nil + } + out := new(LoadBalancerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. +func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { + if in == nil { + return nil + } + out := new(LocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { + *out = *in + if in.FSType != nil { + in, out := &in.FSType, &out.FSType + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. +func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { + if in == nil { + return nil + } + out := new(LocalVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. +func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { + if in == nil { + return nil + } + out := new(NFSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Namespace) DeepCopyInto(out *Namespace) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. +func (in *Namespace) DeepCopy() *Namespace { + if in == nil { + return nil + } + out := new(Namespace) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Namespace) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. +func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { + if in == nil { + return nil + } + out := new(NamespaceCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Namespace, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. +func (in *NamespaceList) DeepCopy() *NamespaceList { + if in == nil { + return nil + } + out := new(NamespaceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NamespaceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { + *out = *in + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]FinalizerName, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. +func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { + if in == nil { + return nil + } + out := new(NamespaceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NamespaceCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. +func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { + if in == nil { + return nil + } + out := new(NamespaceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Node) DeepCopyInto(out *Node) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. +func (in *Node) DeepCopy() *Node { + if in == nil { + return nil + } + out := new(Node) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Node) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. +func (in *NodeAddress) DeepCopy() *NodeAddress { + if in == nil { + return nil + } + out := new(NodeAddress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]PreferredSchedulingTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. +func (in *NodeAffinity) DeepCopy() *NodeAffinity { + if in == nil { + return nil + } + out := new(NodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { + *out = *in + in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. +func (in *NodeCondition) DeepCopy() *NodeCondition { + if in == nil { + return nil + } + out := new(NodeCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { + *out = *in + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapNodeConfigSource) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. +func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { + if in == nil { + return nil + } + out := new(NodeConfigSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { + *out = *in + if in.Assigned != nil { + in, out := &in.Assigned, &out.Assigned + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.Active != nil { + in, out := &in.Active, &out.Active + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + if in.LastKnownGood != nil { + in, out := &in.LastKnownGood, &out.LastKnownGood + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. +func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { + if in == nil { + return nil + } + out := new(NodeConfigStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { + *out = *in + out.KubeletEndpoint = in.KubeletEndpoint + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. +func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { + if in == nil { + return nil + } + out := new(NodeDaemonEndpoints) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeList) DeepCopyInto(out *NodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Node, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. +func (in *NodeList) DeepCopy() *NodeList { + if in == nil { + return nil + } + out := new(NodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. +func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { + if in == nil { + return nil + } + out := new(NodeProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeResources) DeepCopyInto(out *NodeResources) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. +func (in *NodeResources) DeepCopy() *NodeResources { + if in == nil { + return nil + } + out := new(NodeResources) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { + *out = *in + if in.NodeSelectorTerms != nil { + in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms + *out = make([]NodeSelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. +func (in *NodeSelector) DeepCopy() *NodeSelector { + if in == nil { + return nil + } + out := new(NodeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. +func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { + if in == nil { + return nil + } + out := new(NodeSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.MatchFields != nil { + in, out := &in.MatchFields, &out.MatchFields + *out = make([]NodeSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. +func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { + if in == nil { + return nil + } + out := new(NodeSelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { + *out = *in + if in.PodCIDRs != nil { + in, out := &in.PodCIDRs, &out.PodCIDRs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Taints != nil { + in, out := &in.Taints, &out.Taints + *out = make([]Taint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ConfigSource != nil { + in, out := &in.ConfigSource, &out.ConfigSource + *out = new(NodeConfigSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. +func (in *NodeSpec) DeepCopy() *NodeSpec { + if in == nil { + return nil + } + out := new(NodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]NodeCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Addresses != nil { + in, out := &in.Addresses, &out.Addresses + *out = make([]NodeAddress, len(*in)) + copy(*out, *in) + } + out.DaemonEndpoints = in.DaemonEndpoints + out.NodeInfo = in.NodeInfo + if in.Images != nil { + in, out := &in.Images, &out.Images + *out = make([]ContainerImage, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumesInUse != nil { + in, out := &in.VolumesInUse, &out.VolumesInUse + *out = make([]UniqueVolumeName, len(*in)) + copy(*out, *in) + } + if in.VolumesAttached != nil { + in, out := &in.VolumesAttached, &out.VolumesAttached + *out = make([]AttachedVolume, len(*in)) + copy(*out, *in) + } + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(NodeConfigStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. +func (in *NodeStatus) DeepCopy() *NodeStatus { + if in == nil { + return nil + } + out := new(NodeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. +func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { + if in == nil { + return nil + } + out := new(NodeSystemInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. +func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { + if in == nil { + return nil + } + out := new(ObjectFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. +func (in *ObjectReference) DeepCopy() *ObjectReference { + if in == nil { + return nil + } + out := new(ObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ObjectReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. +func (in *PersistentVolume) DeepCopy() *PersistentVolume { + if in == nil { + return nil + } + out := new(PersistentVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolume) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. +func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { + if in == nil { + return nil + } + out := new(PersistentVolumeClaim) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. +func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolumeClaim, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. +func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Resources.DeepCopyInto(&out.Resources) + if in.StorageClassName != nil { + in, out := &in.StorageClassName, &out.StorageClassName + *out = new(string) + **out = **in + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.DataSource != nil { + in, out := &in.DataSource, &out.DataSource + *out = new(TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. +func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { + *out = *in + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PersistentVolumeClaimCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. +func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. +func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeClaimVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PersistentVolume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. +func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { + if in == nil { + return nil + } + out := new(PersistentVolumeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { + *out = *in + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFilePersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Local != nil { + in, out := &in.Local, &out.Local + *out = new(LocalVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIPersistentVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. +func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { + if in == nil { + return nil + } + out := new(PersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { + *out = *in + if in.Capacity != nil { + in, out := &in.Capacity, &out.Capacity + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) + if in.AccessModes != nil { + in, out := &in.AccessModes, &out.AccessModes + *out = make([]PersistentVolumeAccessMode, len(*in)) + copy(*out, *in) + } + if in.ClaimRef != nil { + in, out := &in.ClaimRef, &out.ClaimRef + *out = new(ObjectReference) + **out = **in + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.VolumeMode != nil { + in, out := &in.VolumeMode, &out.VolumeMode + *out = new(PersistentVolumeMode) + **out = **in + } + if in.NodeAffinity != nil { + in, out := &in.NodeAffinity, &out.NodeAffinity + *out = new(VolumeNodeAffinity) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. +func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { + if in == nil { + return nil + } + out := new(PersistentVolumeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. +func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { + if in == nil { + return nil + } + out := new(PersistentVolumeStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. +func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { + if in == nil { + return nil + } + out := new(PhotonPersistentDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Pod) DeepCopyInto(out *Pod) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. +func (in *Pod) DeepCopy() *Pod { + if in == nil { + return nil + } + out := new(Pod) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Pod) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. +func (in *PodAffinity) DeepCopy() *PodAffinity { + if in == nil { + return nil + } + out := new(PodAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.Namespaces != nil { + in, out := &in.Namespaces, &out.Namespaces + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. +func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { + if in == nil { + return nil + } + out := new(PodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { + *out = *in + if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution + *out = make([]PodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { + in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution + *out = make([]WeightedPodAffinityTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. +func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { + if in == nil { + return nil + } + out := new(PodAntiAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. +func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { + if in == nil { + return nil + } + out := new(PodAttachOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodAttachOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodCondition) DeepCopyInto(out *PodCondition) { + *out = *in + in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. +func (in *PodCondition) DeepCopy() *PodCondition { + if in == nil { + return nil + } + out := new(PodCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { + *out = *in + if in.Nameservers != nil { + in, out := &in.Nameservers, &out.Nameservers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Searches != nil { + in, out := &in.Searches, &out.Searches + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Options != nil { + in, out := &in.Options, &out.Options + *out = make([]PodDNSConfigOption, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. +func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { + if in == nil { + return nil + } + out := new(PodDNSConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { + *out = *in + if in.Value != nil { + in, out := &in.Value, &out.Value + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. +func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { + if in == nil { + return nil + } + out := new(PodDNSConfigOption) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. +func (in *PodExecOptions) DeepCopy() *PodExecOptions { + if in == nil { + return nil + } + out := new(PodExecOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodExecOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodIP) DeepCopyInto(out *PodIP) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. +func (in *PodIP) DeepCopy() *PodIP { + if in == nil { + return nil + } + out := new(PodIP) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodList) DeepCopyInto(out *PodList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Pod, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. +func (in *PodList) DeepCopy() *PodList { + if in == nil { + return nil + } + out := new(PodList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.SinceSeconds != nil { + in, out := &in.SinceSeconds, &out.SinceSeconds + *out = new(int64) + **out = **in + } + if in.SinceTime != nil { + in, out := &in.SinceTime, &out.SinceTime + *out = (*in).DeepCopy() + } + if in.TailLines != nil { + in, out := &in.TailLines, &out.TailLines + *out = new(int64) + **out = **in + } + if in.LimitBytes != nil { + in, out := &in.LimitBytes, &out.LimitBytes + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. +func (in *PodLogOptions) DeepCopy() *PodLogOptions { + if in == nil { + return nil + } + out := new(PodLogOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodLogOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]int32, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. +func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { + if in == nil { + return nil + } + out := new(PodPortForwardOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. +func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { + if in == nil { + return nil + } + out := new(PodProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. +func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { + if in == nil { + return nil + } + out := new(PodReadinessGate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.SupplementalGroups != nil { + in, out := &in.SupplementalGroups, &out.SupplementalGroups + *out = make([]int64, len(*in)) + copy(*out, *in) + } + if in.FSGroup != nil { + in, out := &in.FSGroup, &out.FSGroup + *out = new(int64) + **out = **in + } + if in.Sysctls != nil { + in, out := &in.Sysctls, &out.Sysctls + *out = make([]Sysctl, len(*in)) + copy(*out, *in) + } + if in.FSGroupChangePolicy != nil { + in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy + *out = new(PodFSGroupChangePolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. +func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { + if in == nil { + return nil + } + out := new(PodSecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSignature) DeepCopyInto(out *PodSignature) { + *out = *in + if in.PodController != nil { + in, out := &in.PodController, &out.PodController + *out = new(metav1.OwnerReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. +func (in *PodSignature) DeepCopy() *PodSignature { + if in == nil { + return nil + } + out := new(PodSignature) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSpec) DeepCopyInto(out *PodSpec) { + *out = *in + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.InitContainers != nil { + in, out := &in.InitContainers, &out.InitContainers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]Container, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EphemeralContainers != nil { + in, out := &in.EphemeralContainers, &out.EphemeralContainers + *out = make([]EphemeralContainer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.TerminationGracePeriodSeconds != nil { + in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.ActiveDeadlineSeconds != nil { + in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds + *out = new(int64) + **out = **in + } + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + if in.ShareProcessNamespace != nil { + in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace + *out = new(bool) + **out = **in + } + if in.SecurityContext != nil { + in, out := &in.SecurityContext, &out.SecurityContext + *out = new(PodSecurityContext) + (*in).DeepCopyInto(*out) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.Affinity != nil { + in, out := &in.Affinity, &out.Affinity + *out = new(Affinity) + (*in).DeepCopyInto(*out) + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.HostAliases != nil { + in, out := &in.HostAliases, &out.HostAliases + *out = make([]HostAlias, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Priority != nil { + in, out := &in.Priority, &out.Priority + *out = new(int32) + **out = **in + } + if in.DNSConfig != nil { + in, out := &in.DNSConfig, &out.DNSConfig + *out = new(PodDNSConfig) + (*in).DeepCopyInto(*out) + } + if in.ReadinessGates != nil { + in, out := &in.ReadinessGates, &out.ReadinessGates + *out = make([]PodReadinessGate, len(*in)) + copy(*out, *in) + } + if in.RuntimeClassName != nil { + in, out := &in.RuntimeClassName, &out.RuntimeClassName + *out = new(string) + **out = **in + } + if in.EnableServiceLinks != nil { + in, out := &in.EnableServiceLinks, &out.EnableServiceLinks + *out = new(bool) + **out = **in + } + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(PreemptionPolicy) + **out = **in + } + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.TopologySpreadConstraints != nil { + in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints + *out = make([]TopologySpreadConstraint, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. +func (in *PodSpec) DeepCopy() *PodSpec { + if in == nil { + return nil + } + out := new(PodSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatus) DeepCopyInto(out *PodStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]PodCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PodIPs != nil { + in, out := &in.PodIPs, &out.PodIPs + *out = make([]PodIP, len(*in)) + copy(*out, *in) + } + if in.StartTime != nil { + in, out := &in.StartTime, &out.StartTime + *out = (*in).DeepCopy() + } + if in.InitContainerStatuses != nil { + in, out := &in.InitContainerStatuses, &out.InitContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.ContainerStatuses != nil { + in, out := &in.ContainerStatuses, &out.ContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EphemeralContainerStatuses != nil { + in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses + *out = make([]ContainerStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. +func (in *PodStatus) DeepCopy() *PodStatus { + if in == nil { + return nil + } + out := new(PodStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. +func (in *PodStatusResult) DeepCopy() *PodStatusResult { + if in == nil { + return nil + } + out := new(PodStatusResult) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodStatusResult) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. +func (in *PodTemplate) DeepCopy() *PodTemplate { + if in == nil { + return nil + } + out := new(PodTemplate) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplate) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodTemplate, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. +func (in *PodTemplateList) DeepCopy() *PodTemplateList { + if in == nil { + return nil + } + out := new(PodTemplateList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodTemplateList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { + *out = *in + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. +func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { + if in == nil { + return nil + } + out := new(PodTemplateSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. +func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { + if in == nil { + return nil + } + out := new(PortworxVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { + *out = *in + in.PodSignature.DeepCopyInto(&out.PodSignature) + in.EvictionTime.DeepCopyInto(&out.EvictionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. +func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { + if in == nil { + return nil + } + out := new(PreferAvoidPodsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { + *out = *in + in.Preference.DeepCopyInto(&out.Preference) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. +func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { + if in == nil { + return nil + } + out := new(PreferredSchedulingTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Probe) DeepCopyInto(out *Probe) { + *out = *in + in.Handler.DeepCopyInto(&out.Handler) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. +func (in *Probe) DeepCopy() *Probe { + if in == nil { + return nil + } + out := new(Probe) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { + *out = *in + if in.Sources != nil { + in, out := &in.Sources, &out.Sources + *out = make([]VolumeProjection, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. +func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { + if in == nil { + return nil + } + out := new(ProjectedVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. +func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { + if in == nil { + return nil + } + out := new(QuobyteVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. +func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { + if in == nil { + return nil + } + out := new(RBDPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { + *out = *in + if in.CephMonitors != nil { + in, out := &in.CephMonitors, &out.CephMonitors + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. +func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { + if in == nil { + return nil + } + out := new(RBDVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. +func (in *RangeAllocation) DeepCopy() *RangeAllocation { + if in == nil { + return nil + } + out := new(RangeAllocation) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RangeAllocation) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. +func (in *ReplicationController) DeepCopy() *ReplicationController { + if in == nil { + return nil + } + out := new(ReplicationController) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationController) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. +func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { + if in == nil { + return nil + } + out := new(ReplicationControllerCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicationController, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. +func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { + if in == nil { + return nil + } + out := new(ReplicationControllerList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Template != nil { + in, out := &in.Template, &out.Template + *out = new(PodTemplateSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. +func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { + if in == nil { + return nil + } + out := new(ReplicationControllerSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicationControllerCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. +func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { + if in == nil { + return nil + } + out := new(ReplicationControllerStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { + *out = *in + out.Divisor = in.Divisor.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. +func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { + if in == nil { + return nil + } + out := new(ResourceFieldSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in ResourceList) DeepCopyInto(out *ResourceList) { + { + in := &in + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. +func (in ResourceList) DeepCopy() ResourceList { + if in == nil { + return nil + } + out := new(ResourceList) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. +func (in *ResourceQuota) DeepCopy() *ResourceQuota { + if in == nil { + return nil + } + out := new(ResourceQuota) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuota) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ResourceQuota, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. +func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { + if in == nil { + return nil + } + out := new(ResourceQuotaList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Scopes != nil { + in, out := &in.Scopes, &out.Scopes + *out = make([]ResourceQuotaScope, len(*in)) + copy(*out, *in) + } + if in.ScopeSelector != nil { + in, out := &in.ScopeSelector, &out.ScopeSelector + *out = new(ScopeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. +func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { + if in == nil { + return nil + } + out := new(ResourceQuotaSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { + *out = *in + if in.Hard != nil { + in, out := &in.Hard, &out.Hard + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Used != nil { + in, out := &in.Used, &out.Used + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. +func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { + if in == nil { + return nil + } + out := new(ResourceQuotaStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { + *out = *in + if in.Limits != nil { + in, out := &in.Limits, &out.Limits + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + if in.Requests != nil { + in, out := &in.Requests, &out.Requests + *out = make(ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. +func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { + if in == nil { + return nil + } + out := new(ResourceRequirements) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. +func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { + if in == nil { + return nil + } + out := new(SELinuxOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(SecretReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. +func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. +func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { + if in == nil { + return nil + } + out := new(ScaleIOVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { + *out = *in + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]ScopedResourceSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. +func (in *ScopeSelector) DeepCopy() *ScopeSelector { + if in == nil { + return nil + } + out := new(ScopeSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. +func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { + if in == nil { + return nil + } + out := new(ScopedResourceSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Secret) DeepCopyInto(out *Secret) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Immutable != nil { + in, out := &in.Immutable, &out.Immutable + *out = new(bool) + **out = **in + } + if in.Data != nil { + in, out := &in.Data, &out.Data + *out = make(map[string][]byte, len(*in)) + for key, val := range *in { + var outVal []byte + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]byte, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.StringData != nil { + in, out := &in.StringData, &out.StringData + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. +func (in *Secret) DeepCopy() *Secret { + if in == nil { + return nil + } + out := new(Secret) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Secret) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. +func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { + if in == nil { + return nil + } + out := new(SecretEnvSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. +func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { + if in == nil { + return nil + } + out := new(SecretKeySelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretList) DeepCopyInto(out *SecretList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Secret, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. +func (in *SecretList) DeepCopy() *SecretList { + if in == nil { + return nil + } + out := new(SecretList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SecretList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { + *out = *in + out.LocalObjectReference = in.LocalObjectReference + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. +func (in *SecretProjection) DeepCopy() *SecretProjection { + if in == nil { + return nil + } + out := new(SecretProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretReference) DeepCopyInto(out *SecretReference) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. +func (in *SecretReference) DeepCopy() *SecretReference { + if in == nil { + return nil + } + out := new(SecretReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]KeyToPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.DefaultMode != nil { + in, out := &in.DefaultMode, &out.DefaultMode + *out = new(int32) + **out = **in + } + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. +func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { + if in == nil { + return nil + } + out := new(SecretVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { + *out = *in + if in.Capabilities != nil { + in, out := &in.Capabilities, &out.Capabilities + *out = new(Capabilities) + (*in).DeepCopyInto(*out) + } + if in.Privileged != nil { + in, out := &in.Privileged, &out.Privileged + *out = new(bool) + **out = **in + } + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(SELinuxOptions) + **out = **in + } + if in.WindowsOptions != nil { + in, out := &in.WindowsOptions, &out.WindowsOptions + *out = new(WindowsSecurityContextOptions) + (*in).DeepCopyInto(*out) + } + if in.RunAsUser != nil { + in, out := &in.RunAsUser, &out.RunAsUser + *out = new(int64) + **out = **in + } + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(int64) + **out = **in + } + if in.RunAsNonRoot != nil { + in, out := &in.RunAsNonRoot, &out.RunAsNonRoot + *out = new(bool) + **out = **in + } + if in.ReadOnlyRootFilesystem != nil { + in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.ProcMount != nil { + in, out := &in.ProcMount, &out.ProcMount + *out = new(ProcMountType) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. +func (in *SecurityContext) DeepCopy() *SecurityContext { + if in == nil { + return nil + } + out := new(SecurityContext) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Reference = in.Reference + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. +func (in *SerializedReference) DeepCopy() *SerializedReference { + if in == nil { + return nil + } + out := new(SerializedReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *SerializedReference) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Service) DeepCopyInto(out *Service) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. +func (in *Service) DeepCopy() *Service { + if in == nil { + return nil + } + out := new(Service) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Service) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Secrets != nil { + in, out := &in.Secrets, &out.Secrets + *out = make([]ObjectReference, len(*in)) + copy(*out, *in) + } + if in.ImagePullSecrets != nil { + in, out := &in.ImagePullSecrets, &out.ImagePullSecrets + *out = make([]LocalObjectReference, len(*in)) + copy(*out, *in) + } + if in.AutomountServiceAccountToken != nil { + in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. +func (in *ServiceAccount) DeepCopy() *ServiceAccount { + if in == nil { + return nil + } + out := new(ServiceAccount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccount) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ServiceAccount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. +func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { + if in == nil { + return nil + } + out := new(ServiceAccountList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceAccountList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { + *out = *in + if in.ExpirationSeconds != nil { + in, out := &in.ExpirationSeconds, &out.ExpirationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. +func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { + if in == nil { + return nil + } + out := new(ServiceAccountTokenProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceList) DeepCopyInto(out *ServiceList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Service, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. +func (in *ServiceList) DeepCopy() *ServiceList { + if in == nil { + return nil + } + out := new(ServiceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServicePort) DeepCopyInto(out *ServicePort) { + *out = *in + if in.AppProtocol != nil { + in, out := &in.AppProtocol, &out.AppProtocol + *out = new(string) + **out = **in + } + out.TargetPort = in.TargetPort + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. +func (in *ServicePort) DeepCopy() *ServicePort { + if in == nil { + return nil + } + out := new(ServicePort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. +func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { + if in == nil { + return nil + } + out := new(ServiceProxyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]ServicePort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ExternalIPs != nil { + in, out := &in.ExternalIPs, &out.ExternalIPs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.LoadBalancerSourceRanges != nil { + in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.SessionAffinityConfig != nil { + in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig + *out = new(SessionAffinityConfig) + (*in).DeepCopyInto(*out) + } + if in.IPFamily != nil { + in, out := &in.IPFamily, &out.IPFamily + *out = new(IPFamily) + **out = **in + } + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. +func (in *ServiceSpec) DeepCopy() *ServiceSpec { + if in == nil { + return nil + } + out := new(ServiceSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. +func (in *ServiceStatus) DeepCopy() *ServiceStatus { + if in == nil { + return nil + } + out := new(ServiceStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { + *out = *in + if in.ClientIP != nil { + in, out := &in.ClientIP, &out.ClientIP + *out = new(ClientIPConfig) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. +func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { + if in == nil { + return nil + } + out := new(SessionAffinityConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(ObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. +func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSPersistentVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { + *out = *in + if in.SecretRef != nil { + in, out := &in.SecretRef, &out.SecretRef + *out = new(LocalObjectReference) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. +func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { + if in == nil { + return nil + } + out := new(StorageOSVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Sysctl) DeepCopyInto(out *Sysctl) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. +func (in *Sysctl) DeepCopy() *Sysctl { + if in == nil { + return nil + } + out := new(Sysctl) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { + *out = *in + out.Port = in.Port + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. +func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { + if in == nil { + return nil + } + out := new(TCPSocketAction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Taint) DeepCopyInto(out *Taint) { + *out = *in + if in.TimeAdded != nil { + in, out := &in.TimeAdded, &out.TimeAdded + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. +func (in *Taint) DeepCopy() *Taint { + if in == nil { + return nil + } + out := new(Taint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Toleration) DeepCopyInto(out *Toleration) { + *out = *in + if in.TolerationSeconds != nil { + in, out := &in.TolerationSeconds, &out.TolerationSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. +func (in *Toleration) DeepCopy() *Toleration { + if in == nil { + return nil + } + out := new(Toleration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. +func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { + if in == nil { + return nil + } + out := new(TopologySelectorLabelRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { + *out = *in + if in.MatchLabelExpressions != nil { + in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions + *out = make([]TopologySelectorLabelRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. +func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { + if in == nil { + return nil + } + out := new(TopologySelectorTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { + *out = *in + if in.LabelSelector != nil { + in, out := &in.LabelSelector, &out.LabelSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. +func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { + if in == nil { + return nil + } + out := new(TopologySpreadConstraint) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { + *out = *in + if in.APIGroup != nil { + in, out := &in.APIGroup, &out.APIGroup + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. +func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { + if in == nil { + return nil + } + out := new(TypedLocalObjectReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Volume) DeepCopyInto(out *Volume) { + *out = *in + in.VolumeSource.DeepCopyInto(&out.VolumeSource) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. +func (in *Volume) DeepCopy() *Volume { + if in == nil { + return nil + } + out := new(Volume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. +func (in *VolumeDevice) DeepCopy() *VolumeDevice { + if in == nil { + return nil + } + out := new(VolumeDevice) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { + *out = *in + if in.MountPropagation != nil { + in, out := &in.MountPropagation, &out.MountPropagation + *out = new(MountPropagationMode) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. +func (in *VolumeMount) DeepCopy() *VolumeMount { + if in == nil { + return nil + } + out := new(VolumeMount) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { + *out = *in + if in.Required != nil { + in, out := &in.Required, &out.Required + *out = new(NodeSelector) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. +func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { + if in == nil { + return nil + } + out := new(VolumeNodeAffinity) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { + *out = *in + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretProjection) + (*in).DeepCopyInto(*out) + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIProjection) + (*in).DeepCopyInto(*out) + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapProjection) + (*in).DeepCopyInto(*out) + } + if in.ServiceAccountToken != nil { + in, out := &in.ServiceAccountToken, &out.ServiceAccountToken + *out = new(ServiceAccountTokenProjection) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. +func (in *VolumeProjection) DeepCopy() *VolumeProjection { + if in == nil { + return nil + } + out := new(VolumeProjection) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { + *out = *in + if in.HostPath != nil { + in, out := &in.HostPath, &out.HostPath + *out = new(HostPathVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.EmptyDir != nil { + in, out := &in.EmptyDir, &out.EmptyDir + *out = new(EmptyDirVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.GCEPersistentDisk != nil { + in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk + *out = new(GCEPersistentDiskVolumeSource) + **out = **in + } + if in.AWSElasticBlockStore != nil { + in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore + *out = new(AWSElasticBlockStoreVolumeSource) + **out = **in + } + if in.GitRepo != nil { + in, out := &in.GitRepo, &out.GitRepo + *out = new(GitRepoVolumeSource) + **out = **in + } + if in.Secret != nil { + in, out := &in.Secret, &out.Secret + *out = new(SecretVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.NFS != nil { + in, out := &in.NFS, &out.NFS + *out = new(NFSVolumeSource) + **out = **in + } + if in.ISCSI != nil { + in, out := &in.ISCSI, &out.ISCSI + *out = new(ISCSIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Glusterfs != nil { + in, out := &in.Glusterfs, &out.Glusterfs + *out = new(GlusterfsVolumeSource) + **out = **in + } + if in.PersistentVolumeClaim != nil { + in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim + *out = new(PersistentVolumeClaimVolumeSource) + **out = **in + } + if in.RBD != nil { + in, out := &in.RBD, &out.RBD + *out = new(RBDVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FlexVolume != nil { + in, out := &in.FlexVolume, &out.FlexVolume + *out = new(FlexVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Cinder != nil { + in, out := &in.Cinder, &out.Cinder + *out = new(CinderVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CephFS != nil { + in, out := &in.CephFS, &out.CephFS + *out = new(CephFSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.Flocker != nil { + in, out := &in.Flocker, &out.Flocker + *out = new(FlockerVolumeSource) + **out = **in + } + if in.DownwardAPI != nil { + in, out := &in.DownwardAPI, &out.DownwardAPI + *out = new(DownwardAPIVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.FC != nil { + in, out := &in.FC, &out.FC + *out = new(FCVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.AzureFile != nil { + in, out := &in.AzureFile, &out.AzureFile + *out = new(AzureFileVolumeSource) + **out = **in + } + if in.ConfigMap != nil { + in, out := &in.ConfigMap, &out.ConfigMap + *out = new(ConfigMapVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.VsphereVolume != nil { + in, out := &in.VsphereVolume, &out.VsphereVolume + *out = new(VsphereVirtualDiskVolumeSource) + **out = **in + } + if in.Quobyte != nil { + in, out := &in.Quobyte, &out.Quobyte + *out = new(QuobyteVolumeSource) + **out = **in + } + if in.AzureDisk != nil { + in, out := &in.AzureDisk, &out.AzureDisk + *out = new(AzureDiskVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PhotonPersistentDisk != nil { + in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk + *out = new(PhotonPersistentDiskVolumeSource) + **out = **in + } + if in.Projected != nil { + in, out := &in.Projected, &out.Projected + *out = new(ProjectedVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.PortworxVolume != nil { + in, out := &in.PortworxVolume, &out.PortworxVolume + *out = new(PortworxVolumeSource) + **out = **in + } + if in.ScaleIO != nil { + in, out := &in.ScaleIO, &out.ScaleIO + *out = new(ScaleIOVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.StorageOS != nil { + in, out := &in.StorageOS, &out.StorageOS + *out = new(StorageOSVolumeSource) + (*in).DeepCopyInto(*out) + } + if in.CSI != nil { + in, out := &in.CSI, &out.CSI + *out = new(CSIVolumeSource) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. +func (in *VolumeSource) DeepCopy() *VolumeSource { + if in == nil { + return nil + } + out := new(VolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. +func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { + if in == nil { + return nil + } + out := new(VsphereVirtualDiskVolumeSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { + *out = *in + in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. +func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { + if in == nil { + return nil + } + out := new(WeightedPodAffinityTerm) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { + *out = *in + if in.GMSACredentialSpecName != nil { + in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName + *out = new(string) + **out = **in + } + if in.GMSACredentialSpec != nil { + in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec + *out = new(string) + **out = **in + } + if in.RunAsUserName != nil { + in, out := &in.RunAsUserName, &out.RunAsUserName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. +func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { + if in == nil { + return nil + } + out := new(WindowsSecurityContextOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go new file mode 100644 index 0000000..9bec7b3 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=events.k8s.io + +package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go new file mode 100644 index 0000000..923dee5 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -0,0 +1,1448 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{0} +} +func (m *Event) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Event) XXX_Merge(src proto.Message) { + xxx_messageInfo_Event.Merge(m, src) +} +func (m *Event) XXX_Size() int { + return m.Size() +} +func (m *Event) XXX_DiscardUnknown() { + xxx_messageInfo_Event.DiscardUnknown(m) +} + +var xxx_messageInfo_Event proto.InternalMessageInfo + +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{1} +} +func (m *EventList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventList) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventList.Merge(m, src) +} +func (m *EventList) XXX_Size() int { + return m.Size() +} +func (m *EventList) XXX_DiscardUnknown() { + xxx_messageInfo_EventList.DiscardUnknown(m) +} + +var xxx_messageInfo_EventList proto.InternalMessageInfo + +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { + return fileDescriptor_4f97f691c32a5ac8, []int{2} +} +func (m *EventSeries) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *EventSeries) XXX_Merge(src proto.Message) { + xxx_messageInfo_EventSeries.Merge(m, src) +} +func (m *EventSeries) XXX_Size() int { + return m.Size() +} +func (m *EventSeries) XXX_DiscardUnknown() { + xxx_messageInfo_EventSeries.DiscardUnknown(m) +} + +var xxx_messageInfo_EventSeries proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) +} + +var fileDescriptor_4f97f691c32a5ac8 = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, + 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, + 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, + 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, + 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, + 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, + 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, + 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, + 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, + 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, + 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, + 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, + 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, + 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, + 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, + 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, + 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, + 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, + 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, + 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, + 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, + 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, + 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, + 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, + 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, + 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, + 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, + 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, + 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, + 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, + 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, + 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, + 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, + 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, + 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, + 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, + 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, + 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, + 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, + 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, + 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, + 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, + 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, + 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, + 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, + 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, + 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, + 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, + 0x00, +} + +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + i-- + dAtA[i] = 0x78 + { + size, err := m.DeprecatedLastTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + { + size, err := m.DeprecatedFirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + { + size, err := m.DeprecatedSource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0x5a + i -= len(m.Note) + copy(dAtA[i:], m.Note) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i-- + dAtA[i] = 0x52 + if m.Related != nil { + { + size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.Regarding.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x3a + i -= len(m.Action) + copy(dAtA[i:], m.Action) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i-- + dAtA[i] = 0x32 + i -= len(m.ReportingInstance) + copy(dAtA[i:], m.ReportingInstance) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i-- + dAtA[i] = 0x2a + i -= len(m.ReportingController) + copy(dAtA[i:], m.ReportingController) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i-- + dAtA[i] = 0x22 + if m.Series != nil { + { + size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + { + size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Event) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Regarding.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Note) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedFirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.DeprecatedLastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.DeprecatedCount)) + return n +} + +func (m *EventList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *EventSeries) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Event) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Regarding:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Regarding), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Note:` + fmt.Sprintf("%v", this.Note) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedSource), "EventSource", "v11.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedFirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedLastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Event{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *EventSeries) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Event) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Event: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Series == nil { + m.Series = &EventSeries{} + } + if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingController = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ReportingInstance = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Action = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Regarding", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Regarding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Related == nil { + m.Related = &v11.ObjectReference{} + } + if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Note", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Note = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedFirstTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedFirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedLastTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeprecatedLastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCount", wireType) + } + m.DeprecatedCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DeprecatedCount |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Event{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EventSeries) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = EventSeriesState(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto new file mode 100644 index 0000000..58f5aa4 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -0,0 +1,122 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.events.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +message Event { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Required. Time when this Event was first observed. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + optional EventSeries series = 3; + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + optional string reportingController = 4; + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + optional string reportingInstance = 5; + + // What action was taken/failed regarding to the regarding object. + // +optional + optional string action = 6; + + // Why the action was taken. + optional string reason = 7; + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + optional k8s.io.api.core.v1.ObjectReference regarding = 8; + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + optional k8s.io.api.core.v1.ObjectReference related = 9; + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + optional string note = 10; + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + optional string type = 11; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + optional int32 deprecatedCount = 15; +} + +// EventList is a list of Event objects. +message EventList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated Event items = 2; +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +message EventSeries { + // Number of occurrences in this series up to the last heartbeat time + optional int32 count = 1; + + // Time when last Event from the series was seen before last heartbeat. + optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; + + // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 + optional string state = 3; +} + diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go new file mode 100644 index 0000000..4506782 --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "events.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Event{}, + &EventList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go new file mode 100644 index 0000000..0571fbb --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -0,0 +1,123 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. +type Event struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Required. Time when this Event was first observed. + EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` + + // Data about the Event series this event represents or nil if it's a singleton Event. + // +optional + Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"` + + // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. + // +optional + ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` + + // ID of the controller instance, e.g. `kubelet-xyzf`. + // +optional + ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` + + // What action was taken/failed regarding to the regarding object. + // +optional + Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` + + // Why the action was taken. + Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` + + // The object this Event is about. In most cases it's an Object reporting controller implements. + // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because + // it acts on some changes in a ReplicaSet object. + // +optional + Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"` + + // Optional secondary object for more complex actions. E.g. when regarding object triggers + // a creation or deletion of related object. + // +optional + Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"` + + // Optional. A human-readable description of the status of this operation. + // Maximal length of the note is 1kB, but libraries should be prepared to + // handle values up to 64kB. + // +optional + Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"` + + // Type of this event (Normal, Warning), new types could be added in the + // future. + // +optional + Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` + + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"` + // Deprecated field assuring backward compatibility with core.v1 Event type + // +optional + DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"` +} + +// EventSeries contain information on series of events, i.e. thing that was/is happening +// continuously for some time. +type EventSeries struct { + // Number of occurrences in this series up to the last heartbeat time + Count int32 `json:"count" protobuf:"varint,1,opt,name=count"` + // Time when last Event from the series was seen before last heartbeat. + LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` + // Information whether this series is ongoing or finished. + // Deprecated. Planned removal for 1.18 + State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` +} + +type EventSeriesState string + +const ( + EventSeriesStateOngoing EventSeriesState = "Ongoing" + EventSeriesStateFinished EventSeriesState = "Finished" + EventSeriesStateUnknown EventSeriesState = "Unknown" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// EventList is a list of Event objects. +type EventList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..639daca --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,73 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Event = map[string]string{ + "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", + "eventTime": "Required. Time when this Event was first observed.", + "series": "Data about the Event series this event represents or nil if it's a singleton Event.", + "reportingController": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", + "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", + "action": "What action was taken/failed regarding to the regarding object.", + "reason": "Why the action was taken.", + "regarding": "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", + "related": "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", + "note": "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", + "type": "Type of this event (Normal, Warning), new types could be added in the future.", + "deprecatedSource": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedLastTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", + "deprecatedCount": "Deprecated field assuring backward compatibility with core.v1 Event type", +} + +func (Event) SwaggerDoc() map[string]string { + return map_Event +} + +var map_EventList = map[string]string{ + "": "EventList is a list of Event objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (EventList) SwaggerDoc() map[string]string { + return map_EventList +} + +var map_EventSeries = map[string]string{ + "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", + "count": "Number of occurrences in this series up to the last heartbeat time", + "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", + "state": "Information whether this series is ongoing or finished. Deprecated. Planned removal for 1.18", +} + +func (EventSeries) SwaggerDoc() map[string]string { + return map_EventSeries +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..779ebaf --- /dev/null +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,117 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.EventTime.DeepCopyInto(&out.EventTime) + if in.Series != nil { + in, out := &in.Series, &out.Series + *out = new(EventSeries) + (*in).DeepCopyInto(*out) + } + out.Regarding = in.Regarding + if in.Related != nil { + in, out := &in.Related, &out.Related + *out = new(v1.ObjectReference) + **out = **in + } + out.DeprecatedSource = in.DeprecatedSource + in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp) + in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Event) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventList) DeepCopyInto(out *EventList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Event, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. +func (in *EventList) DeepCopy() *EventList { + if in == nil { + return nil + } + out := new(EventList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *EventList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *EventSeries) DeepCopyInto(out *EventSeries) { + *out = *in + in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. +func (in *EventSeries) DeepCopy() *EventSeries { + if in == nil { + return nil + } + out := new(EventSeries) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go new file mode 100644 index 0000000..fa799f3 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go new file mode 100644 index 0000000..bd37f43 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -0,0 +1,15315 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{3} +} +func (m *DaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSet.Merge(m, src) +} +func (m *DaemonSet) XXX_Size() int { + return m.Size() +} +func (m *DaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSet proto.InternalMessageInfo + +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{4} +} +func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetCondition.Merge(m, src) +} +func (m *DaemonSetCondition) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo + +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{5} +} +func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetList.Merge(m, src) +} +func (m *DaemonSetList) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetList) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo + +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{6} +} +func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetSpec.Merge(m, src) +} +func (m *DaemonSetSpec) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo + +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{7} +} +func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetStatus.Merge(m, src) +} +func (m *DaemonSetStatus) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo + +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{8} +} +func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) +} +func (m *DaemonSetUpdateStrategy) XXX_Size() int { + return m.Size() +} +func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo + +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{9} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{10} +} +func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentCondition.Merge(m, src) +} +func (m *DeploymentCondition) XXX_Size() int { + return m.Size() +} +func (m *DeploymentCondition) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo + +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{11} +} +func (m *DeploymentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentList.Merge(m, src) +} +func (m *DeploymentList) XXX_Size() int { + return m.Size() +} +func (m *DeploymentList) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentList.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentList proto.InternalMessageInfo + +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{12} +} +func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentRollback) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentRollback.Merge(m, src) +} +func (m *DeploymentRollback) XXX_Size() int { + return m.Size() +} +func (m *DeploymentRollback) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo + +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{13} +} +func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentSpec.Merge(m, src) +} +func (m *DeploymentSpec) XXX_Size() int { + return m.Size() +} +func (m *DeploymentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo + +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{14} +} +func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStatus.Merge(m, src) +} +func (m *DeploymentStatus) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo + +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{15} +} +func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentStrategy.Merge(m, src) +} +func (m *DeploymentStrategy) XXX_Size() int { + return m.Size() +} +func (m *DeploymentStrategy) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{16} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{17} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{18} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{19} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{20} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{21} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{22} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{23} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{24} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{25} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{26} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{27} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{28} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{29} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{30} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{31} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{32} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{33} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{34} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{35} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{36} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{37} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{38} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{39} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo + +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{40} +} +func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSet.Merge(m, src) +} +func (m *ReplicaSet) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSet) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSet.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo + +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{41} +} +func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetCondition.Merge(m, src) +} +func (m *ReplicaSetCondition) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetCondition) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo + +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{42} +} +func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetList.Merge(m, src) +} +func (m *ReplicaSetList) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetList) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo + +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{43} +} +func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetSpec.Merge(m, src) +} +func (m *ReplicaSetSpec) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo + +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{44} +} +func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReplicaSetStatus.Merge(m, src) +} +func (m *ReplicaSetStatus) XXX_Size() int { + return m.Size() +} +func (m *ReplicaSetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo + +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{45} +} +func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollbackConfig) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollbackConfig.Merge(m, src) +} +func (m *RollbackConfig) XXX_Size() int { + return m.Size() +} +func (m *RollbackConfig) XXX_DiscardUnknown() { + xxx_messageInfo_RollbackConfig.DiscardUnknown(m) +} + +var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo + +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{46} +} +func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) +} +func (m *RollingUpdateDaemonSet) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{47} +} +func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) +} +func (m *RollingUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo + +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{48} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{49} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{50} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{51} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{52} +} +func (m *Scale) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scale) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scale.Merge(m, src) +} +func (m *Scale) XXX_Size() int { + return m.Size() +} +func (m *Scale) XXX_DiscardUnknown() { + xxx_messageInfo_Scale.DiscardUnknown(m) +} + +var xxx_messageInfo_Scale proto.InternalMessageInfo + +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{53} +} +func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleSpec.Merge(m, src) +} +func (m *ScaleSpec) XXX_Size() int { + return m.Size() +} +func (m *ScaleSpec) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{54} +} +func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ScaleStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_ScaleStatus.Merge(m, src) +} +func (m *ScaleStatus) XXX_Size() int { + return m.Size() +} +func (m *ScaleStatus) XXX_DiscardUnknown() { + xxx_messageInfo_ScaleStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cdc93917efc28165, []int{55} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") + proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") + proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") + proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") + proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") + proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") + proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy") + proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment") + proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") + proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") + proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") + proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") + proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") + proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") + proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") + proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") + proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") + proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") + proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") + proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") + proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) +} + +var fileDescriptor_cdc93917efc28165 = []byte{ + // 3743 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc7, + 0x72, 0xd6, 0xec, 0x2e, 0xb9, 0xcb, 0xe2, 0x7f, 0x93, 0x22, 0xf7, 0x49, 0x4f, 0x5c, 0xbd, 0x31, + 0xa0, 0xc8, 0x8e, 0xb4, 0x6b, 0xc9, 0x92, 0x9e, 0x22, 0x21, 0xef, 0x99, 0x4b, 0x8a, 0x12, 0x5f, + 0xf8, 0xb3, 0xee, 0x25, 0x65, 0xc3, 0x88, 0x1d, 0x0f, 0x77, 0x9b, 0xcb, 0x11, 0x67, 0x67, 0xc6, + 0xd3, 0xb3, 0x34, 0x17, 0xc8, 0x21, 0x87, 0x20, 0x80, 0x81, 0x00, 0xc9, 0xc5, 0x49, 0x8e, 0x31, + 0x02, 0xe4, 0x94, 0x20, 0xc7, 0xe4, 0x60, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0xbe, 0xc5, + 0x27, 0x22, 0xa6, 0x4f, 0x41, 0x4e, 0xb9, 0x05, 0x3a, 0x05, 0xdd, 0xd3, 0xf3, 0x3f, 0xc3, 0x1d, + 0xd2, 0x12, 0x11, 0x03, 0xef, 0x24, 0x6e, 0x57, 0xd5, 0x57, 0xd5, 0xdd, 0xd5, 0x55, 0xd5, 0x3d, + 0x25, 0x58, 0xd9, 0xbf, 0x4f, 0xab, 0xaa, 0x51, 0xdb, 0xef, 0xed, 0x10, 0x4b, 0x27, 0x36, 0xa1, + 0xb5, 0x03, 0xa2, 0xb7, 0x0d, 0xab, 0x26, 0x08, 0x8a, 0xa9, 0xd6, 0xc8, 0xa1, 0x4d, 0x74, 0xaa, + 0x1a, 0x3a, 0xad, 0x1d, 0xdc, 0xda, 0x21, 0xb6, 0x72, 0xab, 0xd6, 0x21, 0x3a, 0xb1, 0x14, 0x9b, + 0xb4, 0xab, 0xa6, 0x65, 0xd8, 0x06, 0xba, 0xe2, 0xb0, 0x57, 0x15, 0x53, 0xad, 0xfa, 0xec, 0x55, + 0xc1, 0x7e, 0xe9, 0x66, 0x47, 0xb5, 0xf7, 0x7a, 0x3b, 0xd5, 0x96, 0xd1, 0xad, 0x75, 0x8c, 0x8e, + 0x51, 0xe3, 0x52, 0x3b, 0xbd, 0x5d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x39, 0x68, 0x97, 0xe4, 0x80, + 0xf2, 0x96, 0x61, 0x91, 0xda, 0x41, 0x4c, 0xe3, 0xa5, 0x3b, 0x3e, 0x4f, 0x57, 0x69, 0xed, 0xa9, + 0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xbf, 0xc3, 0x06, 0x68, 0xad, 0x4b, 0x6c, 0x25, 0x49, 0xaa, 0x96, + 0x26, 0x65, 0xf5, 0x74, 0x5b, 0xed, 0x92, 0x98, 0xc0, 0xbd, 0x41, 0x02, 0xb4, 0xb5, 0x47, 0xba, + 0x4a, 0x4c, 0xee, 0xad, 0x34, 0xb9, 0x9e, 0xad, 0x6a, 0x35, 0x55, 0xb7, 0xa9, 0x6d, 0x45, 0x85, + 0xe4, 0x3b, 0x30, 0xb5, 0xa8, 0x69, 0xc6, 0x27, 0xa4, 0xbd, 0xd4, 0x5c, 0x5d, 0xb6, 0xd4, 0x03, + 0x62, 0xa1, 0xab, 0x50, 0xd0, 0x95, 0x2e, 0x29, 0x4b, 0x57, 0xa5, 0xeb, 0x23, 0xf5, 0xb1, 0xe7, + 0x47, 0x95, 0x0b, 0xc7, 0x47, 0x95, 0xc2, 0x86, 0xd2, 0x25, 0x98, 0x53, 0xe4, 0x87, 0x30, 0x2d, + 0xa4, 0x56, 0x34, 0x72, 0xf8, 0xd4, 0xd0, 0x7a, 0x5d, 0x82, 0xae, 0xc1, 0x70, 0x9b, 0x03, 0x08, + 0xc1, 0x09, 0x21, 0x38, 0xec, 0xc0, 0x62, 0x41, 0x95, 0x29, 0x4c, 0x0a, 0xe1, 0x27, 0x06, 0xb5, + 0x1b, 0x8a, 0xbd, 0x87, 0x6e, 0x03, 0x98, 0x8a, 0xbd, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, + 0x91, 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, + 0xd6, 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, + 0xf2, 0x67, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0xc4, 0x46, 0x1f, 0x41, 0x89, 0x6d, + 0x57, 0x5b, 0xb1, 0x15, 0xae, 0x6d, 0xf4, 0xf6, 0x9b, 0x55, 0xdf, 0x9d, 0xbc, 0xd5, 0xab, 0x9a, + 0xfb, 0x1d, 0x36, 0x40, 0xab, 0x8c, 0xbb, 0x7a, 0x70, 0xab, 0xba, 0xb9, 0xf3, 0x8c, 0xb4, 0xec, + 0x75, 0x62, 0x2b, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x06, 0x14, 0xa8, 0x49, 0x5a, 0xdc, + 0xb2, 0xd1, 0xdb, 0x37, 0xaa, 0x27, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, 0xbf, 0xe2, + 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x29, 0x0c, 0x53, 0x5b, 0xb1, 0x7b, 0xb4, 0x9c, 0xe7, 0x88, 0xd5, + 0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x7f, 0xe5, 0x00, 0x79, 0xbc, + 0x4b, 0x86, 0xde, 0x56, 0x6d, 0xd5, 0xd0, 0xd1, 0x03, 0x28, 0xd8, 0x7d, 0xd3, 0x75, 0x81, 0x6b, + 0xae, 0x41, 0x5b, 0x7d, 0x93, 0xbc, 0x38, 0xaa, 0xcc, 0xc5, 0x25, 0x18, 0x05, 0x73, 0x19, 0xb4, + 0xe6, 0x99, 0x9a, 0xe3, 0xd2, 0x77, 0xc2, 0xaa, 0x5f, 0x1c, 0x55, 0x12, 0x0e, 0x5b, 0xd5, 0x43, + 0x0a, 0x1b, 0x88, 0x0e, 0x00, 0x69, 0x0a, 0xb5, 0xb7, 0x2c, 0x45, 0xa7, 0x8e, 0x26, 0xb5, 0x4b, + 0xc4, 0x22, 0xbc, 0x91, 0x6d, 0xd3, 0x98, 0x44, 0xfd, 0x92, 0xb0, 0x02, 0xad, 0xc5, 0xd0, 0x70, + 0x82, 0x06, 0xe6, 0xcd, 0x16, 0x51, 0xa8, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, 0x14, 0x0b, + 0x2a, 0x7a, 0x1d, 0x8a, 0x5d, 0x42, 0xa9, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, + 0xeb, 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x17, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0x52, 0x1b, 0xfd, + 0x6e, 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, + 0x70, 0x1d, 0x86, 0x54, 0x9b, 0x74, 0xd9, 0x3e, 0xe4, 0xaf, 0x8f, 0xde, 0xbe, 0x9e, 0xd5, 0x65, + 0xea, 0xe3, 0x02, 0x74, 0x68, 0x95, 0x89, 0x63, 0x07, 0x45, 0xfe, 0xb3, 0x42, 0xc0, 0x7c, 0xe6, + 0x9a, 0xe8, 0x03, 0x28, 0x51, 0xa2, 0x91, 0x96, 0x6d, 0x58, 0xc2, 0xfc, 0xb7, 0x32, 0x9a, 0xaf, + 0xec, 0x10, 0xad, 0x29, 0x44, 0xeb, 0x63, 0xcc, 0x7e, 0xf7, 0x17, 0xf6, 0x20, 0xd1, 0x3b, 0x50, + 0xb2, 0x49, 0xd7, 0xd4, 0x14, 0x9b, 0x88, 0x73, 0xf4, 0x5a, 0x70, 0x0a, 0xcc, 0x73, 0x18, 0x58, + 0xc3, 0x68, 0x6f, 0x09, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, 0x0e, 0x60, + 0xa2, 0x67, 0xb6, 0x19, 0xa7, 0xcd, 0xa2, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, 0x5e, 0xd6, 0xb5, 0xd9, + 0x0e, 0x49, 0xd7, 0xe7, 0x84, 0xae, 0x89, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x8b, 0x30, 0xd9, 0x55, + 0x75, 0x16, 0x97, 0xfa, 0x4d, 0xd2, 0x32, 0xf4, 0x36, 0xe5, 0x6e, 0x35, 0x54, 0x9f, 0x17, 0x00, + 0x93, 0xeb, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x5f, 0x01, 0x72, 0xa7, 0xf1, 0xd8, 0x09, 0xe2, 0xaa, + 0xa1, 0x73, 0x9f, 0xcb, 0xfb, 0xce, 0xbd, 0x15, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x0d, 0x66, 0x2d, + 0x72, 0xa0, 0xb2, 0x39, 0x3e, 0x51, 0xa9, 0x6d, 0x58, 0xfd, 0x35, 0xb5, 0xab, 0xda, 0xe5, 0x61, + 0x6e, 0x53, 0xf9, 0xf8, 0xa8, 0x32, 0x8b, 0x13, 0xe8, 0x38, 0x51, 0x4a, 0xfe, 0xf3, 0x61, 0x98, + 0x8c, 0xc4, 0x1b, 0xf4, 0x14, 0xe6, 0x5a, 0x3d, 0xcb, 0x22, 0xba, 0xbd, 0xd1, 0xeb, 0xee, 0x10, + 0xab, 0xd9, 0xda, 0x23, 0xed, 0x9e, 0x46, 0xda, 0xdc, 0x51, 0x86, 0xea, 0x0b, 0xc2, 0xe2, 0xb9, + 0xa5, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0x56, 0x41, 0xe7, 0x43, 0xeb, 0x2a, 0xa5, 0x1e, 0x66, 0x8e, + 0x63, 0x7a, 0xab, 0xb0, 0x11, 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xa1, 0xaa, 0x45, 0xda, + 0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x13, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x0b, 0xa3, 0x8e, 0x36, + 0xbe, 0x7f, 0x62, 0xa3, 0x67, 0x04, 0xd8, 0xe8, 0x86, 0x4f, 0xc2, 0x41, 0x3e, 0x36, 0x35, 0x63, + 0x87, 0x12, 0xeb, 0x80, 0xb4, 0xd3, 0x37, 0x78, 0x33, 0xc6, 0x81, 0x13, 0xa4, 0xd8, 0xd4, 0x1c, + 0x0f, 0x8c, 0x4d, 0x6d, 0x38, 0x3c, 0xb5, 0xed, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0xfc, 0xd8, 0x31, + 0x79, 0xf1, 0x40, 0x51, 0x35, 0x65, 0x47, 0x23, 0xe5, 0x62, 0xd8, 0x8f, 0x37, 0xc2, 0x64, 0x1c, + 0xe5, 0x47, 0x8f, 0x61, 0xda, 0x19, 0xda, 0xd6, 0x15, 0x0f, 0xa4, 0xc4, 0x41, 0x7e, 0x22, 0x40, + 0xa6, 0x37, 0xa2, 0x0c, 0x38, 0x2e, 0x83, 0x1e, 0xc0, 0x44, 0xcb, 0xd0, 0x34, 0xee, 0x8f, 0x4b, + 0x46, 0x4f, 0xb7, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x14, 0xa2, 0xe0, 0x08, 0x27, 0x22, + 0x00, 0x2d, 0x37, 0xe1, 0xd0, 0x32, 0xf0, 0xf8, 0x78, 0x2b, 0x6b, 0x0c, 0xf0, 0x52, 0x95, 0x5f, + 0x03, 0x78, 0x43, 0x14, 0x07, 0x80, 0xe5, 0x7f, 0x95, 0x60, 0x3e, 0x25, 0x74, 0xa0, 0x5f, 0x86, + 0x52, 0xec, 0x6f, 0x46, 0x52, 0xec, 0xe5, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xdc, 0x62, 0xb3, + 0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0x77, 0x07, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, 0x3f, 0x7d, + 0x7c, 0x54, 0x19, 0x0f, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0x8b, 0x1c, 0xc0, 0x32, 0x31, 0x35, 0xa3, + 0xdf, 0x25, 0xfa, 0x79, 0xd4, 0x50, 0x9b, 0xa1, 0x1a, 0xea, 0xe6, 0xa0, 0xed, 0xf1, 0x4c, 0x4b, + 0x2d, 0xa2, 0xde, 0x8d, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x91, 0x87, 0x19, + 0x9f, 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x37, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, + 0xea, 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, + 0xcb, 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc6, 0x9a, 0xed, 0x4b, + 0x09, 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x9e, 0xd9, 0x45, 0x53, + 0xaa, 0xb6, 0xff, 0x65, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, + 0x43, 0x9f, 0x49, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0xc3, 0x56, 0x9c, 0x58, 0xe9, 0x98, 0xb5, + 0x9a, 0xd9, 0x2c, 0x57, 0x63, 0x75, 0x3b, 0x86, 0xf5, 0x48, 0xb7, 0xad, 0xbe, 0xbf, 0xc9, 0x71, + 0x06, 0x9c, 0x60, 0x00, 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x65, 0x88, 0x83, 0x7c, 0x33, 0x43, 0xcc, + 0x63, 0x02, 0x4b, 0x86, 0xbe, 0xab, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0xbd, 0xf4, + 0x08, 0xe6, 0x53, 0xac, 0x45, 0x53, 0x90, 0xdf, 0x27, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd, + 0xc2, 0xd0, 0x81, 0xa2, 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x0f, 0x72, 0xf7, 0x25, 0xf9, + 0x8b, 0xa1, 0xa0, 0xef, 0xf0, 0x8a, 0xf9, 0x3a, 0xbb, 0xb4, 0x9a, 0x9a, 0xda, 0x52, 0xa8, 0x28, + 0x84, 0xc6, 0x9c, 0x0b, 0xab, 0x33, 0x86, 0x3d, 0x6a, 0xa8, 0xb6, 0xce, 0xbd, 0xda, 0xda, 0x3a, + 0xff, 0x72, 0x6a, 0xeb, 0xdf, 0x83, 0x12, 0x75, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, + 0x2a, 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, + 0x97, 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x4a, 0xda, 0x3c, 0xb6, 0x95, 0xfc, 0x78, 0xd3, + 0xe0, 0xa3, 0x58, 0x50, 0xd1, 0x07, 0x21, 0x97, 0x2d, 0x9d, 0xc5, 0x65, 0x27, 0xd2, 0xdd, 0x15, + 0x6d, 0xc3, 0xbc, 0x69, 0x19, 0x1d, 0x8b, 0x50, 0xba, 0x4c, 0x94, 0xb6, 0xa6, 0xea, 0xc4, 0x5d, + 0x1f, 0xa7, 0x22, 0xba, 0x7c, 0x7c, 0x54, 0x99, 0x6f, 0x24, 0xb3, 0xe0, 0x34, 0x59, 0xf9, 0x79, + 0x01, 0xa6, 0xa2, 0x19, 0x30, 0xa5, 0x48, 0x95, 0xce, 0x54, 0xa4, 0xde, 0x08, 0x1c, 0x06, 0xa7, + 0x82, 0x0f, 0xbc, 0xe0, 0xc4, 0x0e, 0xc4, 0x22, 0x4c, 0x8a, 0x68, 0xe0, 0x12, 0x45, 0x99, 0xee, + 0xed, 0xfe, 0x76, 0x98, 0x8c, 0xa3, 0xfc, 0xe8, 0x21, 0x8c, 0x5b, 0xbc, 0xee, 0x76, 0x01, 0x9c, + 0xda, 0xf5, 0xa2, 0x00, 0x18, 0xc7, 0x41, 0x22, 0x0e, 0xf3, 0xb2, 0xba, 0xd5, 0x2f, 0x47, 0x5d, + 0x80, 0x42, 0xb8, 0x6e, 0x5d, 0x8c, 0x32, 0xe0, 0xb8, 0x0c, 0x5a, 0x87, 0x99, 0x9e, 0x1e, 0x87, + 0x72, 0x5c, 0xf9, 0xb2, 0x80, 0x9a, 0xd9, 0x8e, 0xb3, 0xe0, 0x24, 0x39, 0xb4, 0x1b, 0x2a, 0x65, + 0x87, 0x79, 0x78, 0xbe, 0x9d, 0xf9, 0xe0, 0x65, 0xae, 0x65, 0x13, 0xca, 0xed, 0x52, 0xd6, 0x72, + 0x5b, 0xfe, 0x27, 0x29, 0x98, 0x84, 0xbc, 0x12, 0x78, 0xd0, 0x2b, 0x53, 0x4c, 0x22, 0x50, 0x1d, + 0x19, 0xc9, 0xd5, 0xef, 0xbd, 0x53, 0x55, 0xbf, 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1, + 0xdc, 0x4a, 0xf3, 0xb1, 0x65, 0xf4, 0x4c, 0xd7, 0x9c, 0x4d, 0xd3, 0x59, 0x9a, 0x9f, 0x43, 0xc1, + 0xea, 0x69, 0xee, 0x3c, 0x5e, 0x73, 0xe7, 0x81, 0x7b, 0x1a, 0x9b, 0xc7, 0x4c, 0x44, 0xca, 0x99, + 0x04, 0x13, 0x40, 0x1b, 0x30, 0x6c, 0x29, 0x7a, 0x87, 0xb8, 0x69, 0xf5, 0xda, 0x00, 0xeb, 0x57, + 0x97, 0x31, 0x63, 0x0f, 0x14, 0x36, 0x5c, 0x1a, 0x0b, 0x14, 0xf9, 0x9f, 0x25, 0x98, 0x7c, 0xb2, + 0xb5, 0xd5, 0x58, 0xd5, 0xf9, 0x89, 0xe6, 0x6f, 0xab, 0x57, 0xa1, 0x60, 0x2a, 0xf6, 0x5e, 0x34, + 0xd3, 0x33, 0x1a, 0xe6, 0x14, 0x74, 0x07, 0x4a, 0xec, 0x5f, 0x66, 0x17, 0x3f, 0x52, 0x23, 0x3c, + 0x10, 0x96, 0x1a, 0x62, 0xec, 0x45, 0xe0, 0x6f, 0xec, 0x71, 0xa2, 0xf7, 0xa0, 0xc8, 0xe2, 0x0f, + 0xd1, 0xdb, 0x19, 0x0b, 0x74, 0x61, 0x54, 0xdd, 0x11, 0xf2, 0x6b, 0x2e, 0x31, 0x80, 0x5d, 0x38, + 0x79, 0x1f, 0x66, 0x03, 0x93, 0x60, 0xab, 0xf8, 0x94, 0xe5, 0x54, 0xd4, 0x84, 0x21, 0xa6, 0x9d, + 0x65, 0xce, 0x7c, 0x86, 0x27, 0xd0, 0xc8, 0x42, 0xf8, 0xf5, 0x11, 0xfb, 0x45, 0xb1, 0x83, 0x25, + 0xaf, 0xc3, 0x38, 0x7f, 0x86, 0x36, 0x2c, 0x9b, 0x2f, 0x26, 0xba, 0x02, 0xf9, 0xae, 0xaa, 0x8b, + 0xec, 0x3c, 0x2a, 0x64, 0xf2, 0x2c, 0xb3, 0xb0, 0x71, 0x4e, 0x56, 0x0e, 0x45, 0xbc, 0xf2, 0xc9, + 0xca, 0x21, 0x66, 0xe3, 0xf2, 0x63, 0x28, 0x8a, 0x4d, 0x0a, 0x02, 0xe5, 0x4f, 0x06, 0xca, 0x27, + 0x00, 0x6d, 0x42, 0x71, 0xb5, 0x51, 0xd7, 0x0c, 0xa7, 0x56, 0x6b, 0xa9, 0x6d, 0x2b, 0xba, 0x83, + 0x4b, 0xab, 0xcb, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb6, 0x88, 0x69, 0x73, 0x3f, 0x1a, + 0xa9, 0x03, 0xf3, 0x8d, 0x47, 0x7c, 0x04, 0x0b, 0x8a, 0xfc, 0x27, 0x39, 0x28, 0x8a, 0xe5, 0x38, + 0x87, 0xbb, 0xdb, 0x5a, 0xe8, 0xee, 0xf6, 0x46, 0x36, 0xd7, 0x48, 0xbd, 0xb8, 0x6d, 0x45, 0x2e, + 0x6e, 0x37, 0x32, 0xe2, 0x9d, 0x7c, 0x6b, 0xfb, 0x34, 0x07, 0x13, 0x61, 0xa7, 0x44, 0x77, 0x61, + 0x94, 0xa5, 0x29, 0xb5, 0x45, 0x36, 0xfc, 0xea, 0xd8, 0x7b, 0xba, 0x69, 0xfa, 0x24, 0x1c, 0xe4, + 0x43, 0x1d, 0x4f, 0x8c, 0xf9, 0x91, 0x98, 0x74, 0xfa, 0x92, 0xf6, 0x6c, 0x55, 0xab, 0x3a, 0x1f, + 0x64, 0xaa, 0xab, 0xba, 0xbd, 0x69, 0x35, 0x6d, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, + 0xc8, 0xe8, 0x5d, 0x96, 0x32, 0xa9, 0xd1, 0xb3, 0x5a, 0x24, 0xa9, 0xf4, 0x75, 0xcb, 0x36, 0x76, + 0x40, 0xdb, 0x6b, 0x46, 0x4b, 0xd1, 0x9c, 0xcd, 0xc1, 0x64, 0x97, 0x58, 0x44, 0x6f, 0x11, 0xb7, + 0xdc, 0x74, 0x20, 0xb0, 0x07, 0x26, 0xff, 0x83, 0x04, 0xa3, 0x62, 0x2d, 0xce, 0xe1, 0x92, 0xf3, + 0x3b, 0xe1, 0x4b, 0xce, 0xb5, 0x8c, 0x91, 0x23, 0xf9, 0x86, 0xf3, 0xd7, 0xbe, 0xe9, 0x2c, 0x56, + 0xb0, 0xe3, 0xb2, 0x67, 0x50, 0x3b, 0x7a, 0x5c, 0xd8, 0x29, 0xc7, 0x9c, 0x82, 0x7a, 0x30, 0xa5, + 0x46, 0x82, 0x8b, 0xd8, 0xb3, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0x45, 0x29, + 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x40, 0x61, 0xcf, 0xb6, 0xcd, 0x84, 0xe7, 0xf3, + 0x01, 0x21, 0xcd, 0x37, 0xa1, 0xc4, 0x67, 0xb7, 0xb5, 0xd5, 0xc0, 0x1c, 0x4a, 0xfe, 0xc7, 0x9c, + 0xb7, 0x1e, 0xfc, 0xce, 0xf1, 0xb6, 0x37, 0xdb, 0x25, 0x4d, 0xa1, 0x94, 0x3b, 0xb6, 0x73, 0x3f, + 0x9e, 0x0d, 0x18, 0xee, 0xd1, 0x70, 0x8c, 0x1b, 0x6d, 0xf9, 0xa1, 0x5e, 0x3a, 0x4b, 0xa8, 0x1f, + 0x4d, 0x0a, 0xf3, 0xe8, 0x09, 0xe4, 0x6d, 0x2d, 0xeb, 0x3d, 0x57, 0x20, 0x6e, 0xad, 0x35, 0xfd, + 0x58, 0xb9, 0xb5, 0xd6, 0xc4, 0x0c, 0x02, 0x6d, 0xc2, 0x10, 0x4b, 0xa7, 0x2c, 0x3a, 0xe4, 0xb3, + 0x47, 0x1b, 0xb6, 0x82, 0xbe, 0x4b, 0xb1, 0x5f, 0x14, 0x3b, 0x38, 0xf2, 0xc7, 0x30, 0x1e, 0x0a, + 0x21, 0xe8, 0x23, 0x18, 0xd3, 0x0c, 0xa5, 0x5d, 0x57, 0x34, 0x45, 0x6f, 0x11, 0xf7, 0x6b, 0xc7, + 0xb5, 0xa4, 0xb3, 0xb7, 0x16, 0xe0, 0x13, 0x01, 0x68, 0x56, 0x28, 0x19, 0x0b, 0xd2, 0x70, 0x08, + 0x51, 0x56, 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x4f, 0x75, 0x52, 0xdd, 0x48, 0x7d, 0x84, + 0x59, 0xc8, 0x1c, 0x98, 0x62, 0x67, 0x1c, 0xdd, 0x06, 0xa0, 0xa4, 0x65, 0x11, 0x9b, 0x6f, 0x67, + 0x2e, 0xfc, 0xc5, 0xb4, 0xe9, 0x51, 0x70, 0x80, 0x4b, 0xfe, 0x17, 0x09, 0xc6, 0x37, 0x88, 0xfd, + 0x89, 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0x0f, 0xe0, 0x50, 0x1e, 0x78, 0x73, + 0xc0, 0xce, 0x84, 0xac, 0x4b, 0xcb, 0x06, 0xf2, 0x97, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x87, + 0x7f, 0x1b, 0x86, 0x4c, 0xc3, 0xb2, 0xdd, 0x1a, 0xe1, 0x54, 0x0a, 0x59, 0x84, 0x0d, 0x54, 0x09, + 0x0c, 0x06, 0x3b, 0x68, 0x68, 0x0d, 0x72, 0xb6, 0x21, 0x5c, 0xf5, 0x74, 0x98, 0x84, 0x58, 0x75, + 0x10, 0x98, 0xb9, 0x2d, 0x03, 0xe7, 0x6c, 0x83, 0x6d, 0x44, 0x39, 0xc4, 0x15, 0x0c, 0x5f, 0xaf, + 0x68, 0x06, 0x18, 0x0a, 0xbb, 0x96, 0xd1, 0x3d, 0xf3, 0x1c, 0xbc, 0x8d, 0x58, 0xb1, 0x8c, 0x2e, + 0xe6, 0x58, 0xf2, 0x57, 0x12, 0x4c, 0x87, 0x38, 0xcf, 0x21, 0x75, 0xbc, 0x13, 0x4e, 0x1d, 0x37, + 0x4e, 0x33, 0x91, 0x94, 0x04, 0xf2, 0x55, 0x2e, 0x32, 0x0d, 0x36, 0x61, 0xb4, 0x0b, 0xa3, 0xa6, + 0xd1, 0x6e, 0xbe, 0x84, 0xef, 0x9b, 0x93, 0x2c, 0xa5, 0x37, 0x7c, 0x2c, 0x1c, 0x04, 0x46, 0x87, + 0x30, 0xad, 0x2b, 0x5d, 0x42, 0x4d, 0xa5, 0x45, 0x9a, 0x2f, 0xe1, 0xc5, 0xe7, 0x22, 0xff, 0x80, + 0x12, 0x45, 0xc4, 0x71, 0x25, 0x68, 0x1d, 0x8a, 0xaa, 0xc9, 0x4b, 0x4c, 0x51, 0x4b, 0x0c, 0xcc, + 0xc3, 0x4e, 0x41, 0xea, 0xc4, 0x73, 0xf1, 0x03, 0xbb, 0x18, 0xf2, 0xdf, 0x44, 0xbd, 0x81, 0x57, + 0x2c, 0x8f, 0xa1, 0xc4, 0x3b, 0x4d, 0x5a, 0x86, 0xe6, 0x7e, 0xea, 0xe0, 0x97, 0x0b, 0x31, 0xf6, + 0xe2, 0xa8, 0x72, 0x39, 0xe1, 0x15, 0xdb, 0x25, 0x63, 0x4f, 0x18, 0x6d, 0x40, 0xc1, 0xfc, 0x21, + 0xc5, 0x15, 0x4f, 0x93, 0xbc, 0xa2, 0xe2, 0x38, 0xf2, 0x1f, 0xe6, 0x23, 0xe6, 0xf2, 0x64, 0xf9, + 0xec, 0xa5, 0xed, 0xba, 0x57, 0xcc, 0xa5, 0xee, 0xfc, 0x0e, 0x14, 0x45, 0xaa, 0x15, 0xce, 0xfc, + 0xf3, 0xd3, 0x38, 0x73, 0x30, 0x8b, 0x79, 0x77, 0x29, 0x77, 0xd0, 0x05, 0x46, 0x1f, 0xc2, 0x30, + 0x71, 0x54, 0x38, 0xb9, 0xf1, 0xde, 0x69, 0x54, 0xf8, 0x71, 0xd5, 0xaf, 0xa1, 0xc5, 0x98, 0x40, + 0x45, 0xbf, 0x64, 0xeb, 0xc5, 0x78, 0x59, 0xc9, 0x49, 0xcb, 0x05, 0x9e, 0xae, 0xae, 0x38, 0xd3, + 0xf6, 0x86, 0x5f, 0x1c, 0x55, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0xbf, 0x49, 0x30, 0xcd, 0x57, + 0xa8, 0xd5, 0xb3, 0x54, 0xbb, 0x7f, 0x6e, 0x89, 0xe9, 0x69, 0x28, 0x31, 0xdd, 0x19, 0xb0, 0x2c, + 0x31, 0x0b, 0x53, 0x93, 0xd3, 0xd7, 0x12, 0x5c, 0x8c, 0x71, 0x9f, 0x43, 0x5c, 0xdc, 0x0e, 0xc7, + 0xc5, 0x37, 0x4f, 0x3b, 0xa1, 0x94, 0xd8, 0xf8, 0x3f, 0xd3, 0x09, 0xd3, 0xe1, 0x27, 0xe5, 0x36, + 0x80, 0x69, 0xa9, 0x07, 0xaa, 0x46, 0x3a, 0xe2, 0xab, 0x7e, 0x29, 0xd0, 0xb3, 0xe5, 0x51, 0x70, + 0x80, 0x0b, 0x51, 0x98, 0x6b, 0x93, 0x5d, 0xa5, 0xa7, 0xd9, 0x8b, 0xed, 0xf6, 0x92, 0x62, 0x2a, + 0x3b, 0xaa, 0xa6, 0xda, 0xaa, 0x78, 0xff, 0x18, 0xa9, 0x3f, 0x74, 0xbe, 0xb6, 0x27, 0x71, 0xbc, + 0x38, 0xaa, 0x5c, 0x49, 0xfa, 0xdc, 0xe5, 0xb2, 0xf4, 0x71, 0x0a, 0x34, 0xea, 0x43, 0xd9, 0x22, + 0x1f, 0xf7, 0x54, 0x8b, 0xb4, 0x97, 0x2d, 0xc3, 0x0c, 0xa9, 0xcd, 0x73, 0xb5, 0xbf, 0x7d, 0x7c, + 0x54, 0x29, 0xe3, 0x14, 0x9e, 0xc1, 0x8a, 0x53, 0xe1, 0xd1, 0x33, 0x98, 0x51, 0x44, 0x77, 0x5d, + 0x50, 0xab, 0x73, 0x4a, 0xee, 0x1f, 0x1f, 0x55, 0x66, 0x16, 0xe3, 0xe4, 0xc1, 0x0a, 0x93, 0x40, + 0x51, 0x0d, 0x8a, 0x07, 0xbc, 0x11, 0x8f, 0x96, 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x37, + 0x8f, 0x61, 0x0e, 0xaf, 0x34, 0xf9, 0xe9, 0x73, 0xb9, 0xd8, 0x5d, 0x97, 0xd5, 0x92, 0xe2, 0xc4, + 0xf3, 0x27, 0xf0, 0x92, 0x1f, 0xb5, 0x9e, 0xf8, 0x24, 0x1c, 0xe4, 0x43, 0x1f, 0xc0, 0xc8, 0x9e, + 0x78, 0x30, 0xa1, 0xe5, 0x62, 0xa6, 0x24, 0x1c, 0x7a, 0x60, 0xa9, 0x4f, 0x0b, 0x15, 0x23, 0xee, + 0x30, 0xc5, 0x3e, 0x22, 0x7a, 0x1d, 0x8a, 0xfc, 0xc7, 0xea, 0x32, 0x7f, 0x5f, 0x2c, 0xf9, 0xb1, + 0xed, 0x89, 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xda, 0x58, 0xe2, 0xef, 0xdc, 0x11, 0xd6, 0xd5, + 0xc6, 0x12, 0x76, 0xe9, 0xe8, 0x23, 0x28, 0x52, 0xb2, 0xa6, 0xea, 0xbd, 0xc3, 0x32, 0x64, 0xfa, + 0x4a, 0xde, 0x7c, 0xc4, 0xb9, 0x23, 0x2f, 0x7d, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x83, + 0x11, 0xab, 0xa7, 0x2f, 0xd2, 0x6d, 0x4a, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, + 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x8f, 0x8a, + 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x6f, 0x97, 0xe2, 0xc3, 0x82, 0x47, 0xc6, 0x01, + 0x6c, 0xf4, 0xc7, 0x12, 0x20, 0xda, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x6e, 0x2b, 0x1a, 0x1f, 0xa5, + 0xe5, 0x31, 0xae, 0xf2, 0xed, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0xbe, 0x17, 0xc4, 0x59, + 0x71, 0x82, 0x5e, 0xb6, 0x89, 0xbb, 0x62, 0xd6, 0xe3, 0x99, 0x36, 0x31, 0xf9, 0xb9, 0xd6, 0xdf, + 0x44, 0x41, 0xc7, 0x2e, 0x2c, 0x7a, 0x0a, 0x73, 0x6e, 0xc7, 0x28, 0x36, 0x0c, 0x7b, 0x45, 0xd5, + 0x08, 0xed, 0x53, 0x9b, 0x74, 0xcb, 0x13, 0xdc, 0xc1, 0xbc, 0xb6, 0x19, 0x9c, 0xc8, 0x85, 0x53, + 0xa4, 0x51, 0x17, 0x2a, 0x6e, 0x70, 0x62, 0x27, 0xd7, 0x8b, 0x8e, 0x8f, 0x68, 0x4b, 0xd1, 0x9c, + 0x4f, 0x28, 0x93, 0x5c, 0xc1, 0x6b, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, + 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0xa7, 0x2c, 0xe2, 0xa5, 0x2a, 0x48, 0x95, + 0x46, 0x36, 0x4c, 0x29, 0xe1, 0xde, 0x5d, 0x5a, 0x9e, 0xce, 0xf4, 0x1a, 0x1b, 0x69, 0xf9, 0xf5, + 0x1f, 0x4e, 0x22, 0x04, 0x8a, 0x63, 0x1a, 0xd0, 0xef, 0x03, 0x52, 0xa2, 0xed, 0xc6, 0xb4, 0x8c, + 0x32, 0x25, 0xba, 0x58, 0x9f, 0xb2, 0xef, 0x76, 0x31, 0x12, 0xc5, 0x09, 0x7a, 0x58, 0x81, 0xae, + 0x44, 0x5a, 0xa4, 0x69, 0x79, 0x9e, 0x2b, 0xaf, 0x65, 0x53, 0xee, 0xc9, 0x05, 0xbe, 0x14, 0x45, + 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0x59, 0x31, 0xb8, 0xad, 0x53, 0x65, 0x97, 0x34, 0xfb, 0xb4, + 0x65, 0x6b, 0xb4, 0x3c, 0xc3, 0xe3, 0x3b, 0xff, 0x5a, 0xb9, 0x98, 0x40, 0xc7, 0x89, 0x52, 0xe8, + 0x6d, 0x98, 0xda, 0x35, 0xac, 0x1d, 0xb5, 0xdd, 0x26, 0xba, 0x8b, 0x34, 0xcb, 0x91, 0xf8, 0x3b, + 0xd0, 0x4a, 0x84, 0x86, 0x63, 0xdc, 0x88, 0xc2, 0x45, 0x81, 0xdc, 0xb0, 0x8c, 0xd6, 0xba, 0xd1, + 0xd3, 0x6d, 0xa7, 0xec, 0xbb, 0xe8, 0xa5, 0xd1, 0x8b, 0x8b, 0x49, 0x0c, 0x2f, 0x8e, 0x2a, 0x57, + 0x93, 0xab, 0x7c, 0x9f, 0x09, 0x27, 0x63, 0x23, 0x13, 0xc6, 0x44, 0xe3, 0x3b, 0x7f, 0x90, 0x2a, + 0x97, 0xf9, 0xd1, 0x7f, 0x30, 0x38, 0xe0, 0x79, 0x22, 0xd1, 0xf3, 0x3f, 0x75, 0x7c, 0x54, 0x19, + 0x0b, 0x32, 0xe0, 0x90, 0x06, 0xde, 0xe8, 0x24, 0x3e, 0xaf, 0x9d, 0x4f, 0xb3, 0xf8, 0xe9, 0x1a, + 0x9d, 0x7c, 0xd3, 0x5e, 0x5a, 0xa3, 0x53, 0x00, 0xf2, 0xe4, 0x27, 0xf3, 0xff, 0xce, 0xc1, 0x8c, + 0xcf, 0x9c, 0xb9, 0xd1, 0x29, 0x41, 0xe4, 0xd7, 0x0d, 0xe3, 0xd9, 0x9a, 0x8f, 0xfc, 0xa5, 0xfb, + 0xff, 0xd7, 0x7c, 0xe4, 0xdb, 0x96, 0x72, 0x7b, 0xf8, 0xbb, 0x5c, 0x70, 0x02, 0xa7, 0xec, 0x80, + 0x79, 0x09, 0x3d, 0xd3, 0x3f, 0xba, 0x26, 0x1a, 0xf9, 0xeb, 0x3c, 0x4c, 0x45, 0x4f, 0x63, 0xa8, + 0x51, 0x42, 0x1a, 0xd8, 0x28, 0xd1, 0x80, 0xd9, 0xdd, 0x9e, 0xa6, 0xf5, 0xf9, 0x1c, 0x02, 0xdd, + 0x12, 0xce, 0x27, 0xcb, 0x9f, 0x0a, 0xc9, 0xd9, 0x95, 0x04, 0x1e, 0x9c, 0x28, 0x19, 0xef, 0x9b, + 0x28, 0xfc, 0xd0, 0xbe, 0x89, 0xa1, 0x33, 0xf4, 0x4d, 0x24, 0xb7, 0x9e, 0xe4, 0xcf, 0xd4, 0x7a, + 0x72, 0x96, 0xa6, 0x89, 0x84, 0x20, 0x36, 0xb0, 0x01, 0xf8, 0x17, 0x30, 0x11, 0x6e, 0xe4, 0x71, + 0xf6, 0xd2, 0xe9, 0x25, 0x12, 0x9f, 0x86, 0x03, 0x7b, 0xe9, 0x8c, 0x63, 0x8f, 0x43, 0xfe, 0x23, + 0x09, 0xe6, 0x92, 0x1b, 0x76, 0x91, 0x06, 0x13, 0x5d, 0xe5, 0x30, 0xd8, 0x44, 0x2d, 0x9d, 0xf1, + 0x65, 0x8c, 0x77, 0x70, 0xac, 0x87, 0xb0, 0x70, 0x04, 0x5b, 0xfe, 0x5e, 0x82, 0xf9, 0x94, 0xde, + 0x89, 0xf3, 0xb5, 0x04, 0xbd, 0x0f, 0xa5, 0xae, 0x72, 0xd8, 0xec, 0x59, 0x1d, 0x72, 0xe6, 0xb7, + 0x40, 0x7e, 0xa0, 0xd7, 0x05, 0x0a, 0xf6, 0xf0, 0xe4, 0xbf, 0x92, 0xe0, 0x27, 0xa9, 0x57, 0x25, + 0x74, 0x2f, 0xd4, 0xe6, 0x21, 0x47, 0xda, 0x3c, 0x50, 0x5c, 0xf0, 0x15, 0x75, 0x79, 0x7c, 0x2e, + 0x41, 0x39, 0xed, 0xee, 0x88, 0xee, 0x86, 0x8c, 0xfc, 0x59, 0xc4, 0xc8, 0xe9, 0x98, 0xdc, 0x2b, + 0xb2, 0xf1, 0xdf, 0x25, 0xb8, 0x7c, 0x42, 0x0d, 0xe6, 0x5d, 0x51, 0x48, 0x3b, 0xc8, 0xc5, 0x9f, + 0xad, 0xc5, 0x37, 0x2f, 0xff, 0x8a, 0x92, 0xc0, 0x83, 0x53, 0xa5, 0xd1, 0x36, 0xcc, 0x8b, 0xfb, + 0x51, 0x94, 0x26, 0xca, 0x0b, 0xde, 0x0d, 0xb7, 0x9c, 0xcc, 0x82, 0xd3, 0x64, 0xe5, 0xbf, 0x95, + 0x60, 0x2e, 0xf9, 0x51, 0x00, 0xbd, 0x15, 0x5a, 0xf2, 0x4a, 0x64, 0xc9, 0x27, 0x23, 0x52, 0x62, + 0xc1, 0x3f, 0x84, 0x09, 0xf1, 0x74, 0x20, 0x60, 0x84, 0x33, 0xcb, 0x49, 0x19, 0x44, 0x40, 0xb8, + 0x05, 0x2c, 0x3f, 0x26, 0xe1, 0x31, 0x1c, 0x41, 0x93, 0x3f, 0xcd, 0xc1, 0x50, 0xb3, 0xa5, 0x68, + 0xe4, 0x1c, 0xea, 0xd7, 0x5f, 0x85, 0xea, 0xd7, 0x41, 0xff, 0xcf, 0x8c, 0x5b, 0x95, 0x5a, 0xba, + 0xe2, 0x48, 0xe9, 0xfa, 0x46, 0x26, 0xb4, 0x93, 0xab, 0xd6, 0xdf, 0x82, 0x11, 0x4f, 0xe9, 0xe9, + 0x92, 0xa9, 0xfc, 0x97, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x65, 0x2a, 0xde, 0x0d, 0xd5, 0x1f, 0xf9, + 0x0c, 0x0f, 0x35, 0x01, 0x5d, 0x55, 0xb7, 0xe2, 0x70, 0xfa, 0xa4, 0xfd, 0xce, 0xd8, 0x78, 0x21, + 0xf2, 0x0b, 0x98, 0xb0, 0x15, 0xab, 0x43, 0x6c, 0xef, 0xc3, 0x85, 0xd3, 0xc7, 0xe5, 0x35, 0xec, + 0x6f, 0x85, 0xa8, 0x38, 0xc2, 0x7d, 0xe9, 0x21, 0x8c, 0x87, 0x94, 0x9d, 0xaa, 0xcd, 0xf9, 0xef, + 0x25, 0xf8, 0xd9, 0xc0, 0xc7, 0x1e, 0x54, 0x0f, 0x1d, 0x92, 0x6a, 0xe4, 0x90, 0x2c, 0xa4, 0x03, + 0xbc, 0xba, 0x76, 0xb9, 0xfa, 0xcd, 0xe7, 0xdf, 0x2d, 0x5c, 0xf8, 0xe6, 0xbb, 0x85, 0x0b, 0xdf, + 0x7e, 0xb7, 0x70, 0xe1, 0x0f, 0x8e, 0x17, 0xa4, 0xe7, 0xc7, 0x0b, 0xd2, 0x37, 0xc7, 0x0b, 0xd2, + 0xb7, 0xc7, 0x0b, 0xd2, 0x7f, 0x1e, 0x2f, 0x48, 0x7f, 0xfa, 0xfd, 0xc2, 0x85, 0xf7, 0x8b, 0x02, + 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x98, 0xf4, 0xad, 0x8a, 0xba, 0x3e, 0x00, 0x00, +} + +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + } + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x48 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + i-- + dAtA[i] = 0x40 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + i-- + dAtA[i] = 0x38 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + { + size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.UpdatedAnnotations) > 0 { + keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) + for k := range m.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForUpdatedAnnotations[iNdEx]) + copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ProgressDeadlineSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) + i-- + dAtA[i] = 0x48 + } + if m.RollbackTo != nil { + { + size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + i-- + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + if m.RevisionHistoryLimit != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) + i-- + dAtA[i] = 0x30 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x28 + { + size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CollisionCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + i-- + dAtA[i] = 0x40 + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x38 + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RollingUpdate != nil { + { + size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PathType != nil { + i -= len(*m.PathType) + copy(dAtA[i:], *m.PathType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *HostPortRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IDRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IngressClassName != nil { + i -= len(*m.IngressClassName) + copy(dAtA[i:], *m.IngressClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TLS) > 0 { + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressTLS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x2a + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + { + size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + i-- + dAtA[i] = 0x20 + { + size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Replicas != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Conditions) > 0 { + for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxSurge != nil { + { + size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scale) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scale) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.TargetSelector) + copy(dAtA[i:], m.TargetSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i-- + dAtA[i] = 0x1a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.Selector[string(keysForSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForSelector[iNdEx]) + copy(dAtA[i:], keysForSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *DaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DaemonSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.UpdateStrategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + n += 1 + sovGenerated(uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + return n +} + +func (m *DaemonSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) + n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberReady)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) + n += 1 + sovGenerated(uint64(m.NumberAvailable)) + n += 1 + sovGenerated(uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DaemonSetUpdateStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastUpdateTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *DeploymentRollback) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.UpdatedAnnotations) > 0 { + for k, v := range m.UpdatedAnnotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeploymentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Strategy.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) + } + n += 2 + if m.RollbackTo != nil { + l = m.RollbackTo.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ProgressDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) + } + return n +} + +func (m *DeploymentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + n += 1 + sovGenerated(uint64(*m.CollisionCount)) + } + return n +} + +func (m *DeploymentStrategy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if m.RollingUpdate != nil { + l = m.RollingUpdate.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.DefaultAllowPrivilegeEscalation != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedHostPaths) > 0 { + for _, e := range m.AllowedHostPaths { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedProcMountTypes) > 0 { + for _, s := range m.AllowedProcMountTypes { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ReplicaSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetCondition) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ReplicaSetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ReplicaSetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n +} + +func (m *ReplicaSetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RollbackConfig) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Revision)) + return n +} + +func (m *RollingUpdateDaemonSet) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RollingUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxSurge != nil { + l = m.MaxSurge.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scale) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ScaleSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + return n +} + +func (m *ScaleStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + l = len(m.TargetSelector) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllowedCSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedCSIDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]DaemonSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DaemonSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetSpec{`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DaemonSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DaemonSetStatus{`, + `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, + `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, + `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, + `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, + `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, + `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *DaemonSetUpdateStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Deployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Deployment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Deployment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&DeploymentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentRollback) String() string { + if this == nil { + return "nil" + } + keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) + for k := range this.UpdatedAnnotations { + keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) + mapStringForUpdatedAnnotations := "map[string]string{" + for _, k := range keysForUpdatedAnnotations { + mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) + } + mapStringForUpdatedAnnotations += "}" + s := strings.Join([]string{`&DeploymentRollback{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, + `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, + `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, + `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]DeploymentCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&DeploymentStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, + `}`, + }, "") + return s +} +func (this *DeploymentStrategy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeploymentStrategy{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSet{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetCondition) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ReplicaSet{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ReplicaSetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ReplicaSetSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicaSetStatus) String() string { + if this == nil { + return "nil" + } + repeatedStringForConditions := "[]ReplicaSetCondition{" + for _, f := range this.Conditions { + repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," + } + repeatedStringForConditions += "}" + s := strings.Join([]string{`&ReplicaSetStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + repeatedStringForConditions + `,`, + `}`, + }, "") + return s +} +func (this *RollbackConfig) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDaemonSet) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RollingUpdateDeployment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scale) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Scale{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ScaleSpec{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `}`, + }, "") + return s +} +func (this *ScaleStatus) String() string { + if this == nil { + return "nil" + } + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) + } + mapStringForSelector += "}" + s := strings.Join([]string{`&ScaleStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, DaemonSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) + } + m.TemplateGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.TemplateGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) + } + m.CurrentNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) + } + m.NumberMisscheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberMisscheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) + } + m.DesiredNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) + } + m.NumberReady = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberReady |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) + } + m.UpdatedNumberScheduled = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) + } + m.NumberAvailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberAvailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) + } + m.NumberUnavailable = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NumberUnavailable |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DaemonSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDaemonSet{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Deployment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.UpdatedAnnotations == nil { + m.UpdatedAnnotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.UpdatedAnnotations[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RevisionHistoryLimit = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Paused = bool(v != 0) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollbackTo == nil { + m.RollbackTo = &RollbackConfig{} + } + if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ProgressDeadlineSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) + } + m.UpdatedReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UpdatedReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) + } + m.UnavailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.UnavailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, DeploymentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.CollisionCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RollingUpdate == nil { + m.RollingUpdate = &RollingUpdateDeployment{} + } + if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IPBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ReplicaSet{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Replicas = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) + } + m.MinReadySeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MinReadySeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) + } + m.FullyLabeledReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.FullyLabeledReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) + } + m.ReadyReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadyReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) + } + m.AvailableReplicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.AvailableReplicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Conditions = append(m.Conditions, ReplicaSetCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + } + m.Revision = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Revision |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxSurge == nil { + m.MaxSurge = &intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &v11.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scale) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scale: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ScaleStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) + } + m.Replicas = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Replicas |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Selector[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TargetSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto new file mode 100644 index 0000000..ef8367e --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -0,0 +1,1234 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.extensions.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +// Deprecated: use AllowedFlexVolume from policy API Group instead. +message AllowedFlexVolume { + // driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +// Deprecated: use AllowedHostPath from policy API Group instead. +message AllowedHostPath { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + optional string pathPrefix = 1; + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + optional bool readOnly = 2; +} + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for +// more information. +// DaemonSet represents the configuration of a daemon set. +message DaemonSet { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional DaemonSetSpec spec = 2; + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional DaemonSetStatus status = 3; +} + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +message DaemonSetCondition { + // Type of DaemonSet condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // Last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// DaemonSetList is a collection of daemon sets. +message DaemonSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // A list of daemon sets. + repeated DaemonSet items = 2; +} + +// DaemonSetSpec is the specification of a daemon set. +message DaemonSetSpec { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + optional k8s.io.api.core.v1.PodTemplateSpec template = 2; + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + optional DaemonSetUpdateStrategy updateStrategy = 3; + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + optional int32 minReadySeconds = 4; + + // DEPRECATED. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + optional int64 templateGeneration = 5; + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + optional int32 revisionHistoryLimit = 6; +} + +// DaemonSetStatus represents the current status of a daemon set. +message DaemonSetStatus { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 currentNumberScheduled = 1; + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 numberMisscheduled = 2; + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + optional int32 desiredNumberScheduled = 3; + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + optional int32 numberReady = 4; + + // The most recent generation observed by the daemon set controller. + // +optional + optional int64 observedGeneration = 5; + + // The total number of nodes that are running updated daemon pod + // +optional + optional int32 updatedNumberScheduled = 6; + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberAvailable = 7; + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + optional int32 numberUnavailable = 8; + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + optional int32 collisionCount = 9; + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated DaemonSetCondition conditions = 10; +} + +message DaemonSetUpdateStrategy { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if type = "RollingUpdate". + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + optional RollingUpdateDaemonSet rollingUpdate = 2; +} + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +message Deployment { + // Standard object metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the Deployment. + // +optional + optional DeploymentSpec spec = 2; + + // Most recently observed status of the Deployment. + // +optional + optional DeploymentStatus status = 3; +} + +// DeploymentCondition describes the state of a deployment at a certain point. +message DeploymentCondition { + // Type of deployment condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time this condition was updated. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; + + // Last time the condition transitioned from one status to another. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; + + // The reason for the condition's last transition. + optional string reason = 4; + + // A human readable message indicating details about the transition. + optional string message = 5; +} + +// DeploymentList is a list of Deployments. +message DeploymentList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Deployments. + repeated Deployment items = 2; +} + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +message DeploymentRollback { + // Required: This must match the Name of a deployment. + optional string name = 1; + + // The annotations to be updated to a deployment + // +optional + map updatedAnnotations = 2; + + // The config of this deployment rollback. + optional RollbackConfig rollbackTo = 3; +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +message DeploymentSpec { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + optional int32 replicas = 1; + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template describes the pods that will be created. + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + optional DeploymentStrategy strategy = 4; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 5; + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". + // +optional + optional int32 revisionHistoryLimit = 6; + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + optional bool paused = 7; + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + optional RollbackConfig rollbackTo = 8; + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is set to + // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". + // +optional + optional int32 progressDeadlineSeconds = 9; +} + +// DeploymentStatus is the most recently observed status of the Deployment. +message DeploymentStatus { + // The generation observed by the deployment controller. + // +optional + optional int64 observedGeneration = 1; + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + optional int32 replicas = 2; + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + optional int32 updatedReplicas = 3; + + // Total number of ready pods targeted by this deployment. + // +optional + optional int32 readyReplicas = 7; + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + optional int32 availableReplicas = 4; + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + optional int32 unavailableReplicas = 5; + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + repeated DeploymentCondition conditions = 6; + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + optional int32 collisionCount = 8; +} + +// DeploymentStrategy describes how to replace existing pods with new ones. +message DeploymentStrategy { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + optional string type = 1; + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + // --- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + optional RollingUpdateDeployment rollingUpdate = 2; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use FSGroupStrategyOptions from policy API Group instead. +message FSGroupStrategyOptions { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// HTTPIngressPath associates a path with a backend. Incoming urls matching the +// path are forwarded to the backend. +message HTTPIngressPath { + // Path is matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" part of a URL + // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, + // all paths from incoming requests are matched. + // +optional + optional string path = 1; + + // PathType determines the interpretation of the Path matching. PathType can + // be one of the following values: + // * Exact: Matches the URL path exactly. + // * Prefix: Matches based on a URL path prefix split by '/'. Matching is + // done on a path element by element basis. A path element refers is the + // list of labels in the path split by the '/' separator. A request is a + // match for path p if every p is an element-wise prefix of p of the + // request path. Note that if the last element of the path is a substring + // of the last element in request path, it is not a match (e.g. /foo/bar + // matches /foo/bar/baz, but does not match /foo/barbaz). + // * ImplementationSpecific: Interpretation of the Path matching is up to + // the IngressClass. Implementations can treat this as a separate PathType + // or treat it identically to Prefix or Exact path types. + // Implementations are required to support all path types. + // Defaults to ImplementationSpecific. + optional string pathType = 3; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +// Deprecated: use HostPortRange from policy API Group instead. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// IDRange provides a min/max of an allowed range of IDs. +// Deprecated: use IDRange from policy API Group instead. +message IDRange { + // min is the start of the range, inclusive. + optional int64 min = 1; + + // max is the end of the range, inclusive. + optional int64 max = 2; +} + +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs +// that should not be included within this rule. +message IPBlock { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + optional string cidr = 1; + + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Except values will be rejected if they are outside the CIDR range + // +optional + repeated string except = 2; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. +message Ingress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + // +optional + optional string serviceName = 1; + + // Specifies the port of the referenced service. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + + // Resource is an ObjectRef to another Kubernetes resource in the namespace + // of the Ingress object. If resource is specified, serviceName and servicePort + // must not be specified. + // +optional + optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // Note the following deviations from the "host" part of the + // URI as defined in RFC 3986: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + // the IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the + // IngressRuleValue. If the host is unspecified, the Ingress routes all + // traffic based on the specified IngressRuleValue. + // + // Host can be "precise" which is a domain name without the terminating dot of + // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name + // prefixed with a single wildcard label (e.g. "*.foo.com"). + // The wildcard character '*' must appear by itself as the first DNS label and + // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). + // Requests will be matched against the Host field in the following way: + // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If Host is a wildcard, then the request matches this rule if the http host header + // is to equal to the suffix (removing the first label) of the wildcard rule. + // +optional + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + // +optional + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // IngressClassName is the name of the IngressClass cluster resource. The + // associated IngressClass defines which controller will implement the + // resource. This replaces the deprecated `kubernetes.io/ingress.class` + // annotation. For backwards compatibility, when that annotation is set, it + // must be given precedence over this field. The controller may emit a + // warning if the field and annotation have different values. + // Implementations of this API should ignore Ingresses without a class + // specified. An IngressClass resource may be marked as default, which can + // be used to set a default value for this field. For more information, + // refer to the IngressClass documentation. + // +optional + optional string ingressClassName = 4; + + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + // +optional + optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + optional string secretName = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. +// NetworkPolicy describes what network traffic is allowed for a set of Pods +message NetworkPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + optional NetworkPolicySpec spec = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +message NetworkPolicyEgressRule { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + repeated NetworkPolicyPeer to = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +message NetworkPolicyIngressRule { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). + // If this field is present and contains at least one item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // +optional + repeated NetworkPolicyPeer from = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. +// Network Policy List is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. +message NetworkPolicyPeer { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + optional IPBlock ipBlock = 3; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. +message NetworkPolicyPort { + // Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + optional string protocol = 1; + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; +} + +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. +message NetworkPolicySpec { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not allow any traffic + // (and serves solely to ensure that the pods it selects are isolated by default). + // +optional + repeated NetworkPolicyIngressRule ingress = 2; + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + repeated NetworkPolicyEgressRule egress = 3; + + // List of rule types that the NetworkPolicy relates to. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + repeated string policyTypes = 4; +} + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +// Deprecated: use PodSecurityPolicy from policy API Group instead. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + // +optional + optional PodSecurityPolicySpec spec = 2; +} + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +// Deprecated: use PodSecurityPolicyList from policy API Group instead. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// PodSecurityPolicySpec defines the policy enforced. +// Deprecated: use PodSecurityPolicySpec from policy API Group instead. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + // +optional + optional bool privileged = 1; + + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + repeated string defaultAddCapabilities = 2; + + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + repeated string requiredDropCapabilities = 3; + + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + repeated string allowedCapabilities = 4; + + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + optional bool readOnlyRootFilesystem = 14; + + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + optional bool defaultAllowPrivilegeEscalation = 15; + + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + optional bool allowPrivilegeEscalation = 16; + + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + repeated AllowedHostPath allowedHostPaths = 17; + + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; + + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + repeated string allowedUnsafeSysctls = 19; + + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + repeated string forbiddenSysctls = 20; + + // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. + // Empty or nil indicates that only the DefaultProcMountType may be used. + // This requires the ProcMountType feature flag to be enabled. + // +optional + repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; +} + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +message ReplicaSet { + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetSpec spec = 2; + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional ReplicaSetStatus status = 3; +} + +// ReplicaSetCondition describes the state of a replica set at a certain point. +message ReplicaSetCondition { + // Type of replica set condition. + optional string type = 1; + + // Status of the condition, one of True, False, Unknown. + optional string status = 2; + + // The last time the condition transitioned from one status to another. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; + + // The reason for the condition's last transition. + // +optional + optional string reason = 4; + + // A human readable message indicating details about the transition. + // +optional + optional string message = 5; +} + +// ReplicaSetList is a collection of ReplicaSets. +message ReplicaSetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + repeated ReplicaSet items = 2; +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +message ReplicaSetSpec { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + optional int32 replicas = 1; + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + optional int32 minReadySeconds = 4; + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + optional k8s.io.api.core.v1.PodTemplateSpec template = 3; +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +message ReplicaSetStatus { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + optional int32 replicas = 1; + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + optional int32 fullyLabeledReplicas = 2; + + // The number of ready replicas for this replica set. + // +optional + optional int32 readyReplicas = 4; + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + optional int32 availableReplicas = 5; + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + optional int64 observedGeneration = 3; + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + repeated ReplicaSetCondition conditions = 6; +} + +// DEPRECATED. +message RollbackConfig { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + optional int64 revision = 1; +} + +// Spec to control the desired behavior of daemon set rolling update. +message RollingUpdateDaemonSet { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; +} + +// Spec to control the desired behavior of rolling update. +message RollingUpdateDeployment { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. +message RunAsUserStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use SELinuxStrategyOptions from policy API Group instead. +message SELinuxStrategyOptions { + // rule is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; +} + +// represents a scaling request for a resource. +message Scale { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + optional ScaleSpec spec = 2; + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + optional ScaleStatus status = 3; +} + +// describes the attributes of a scale subresource +message ScaleSpec { + // desired number of instances for the scaled object. + // +optional + optional int32 replicas = 1; +} + +// represents the current status of a scale subresource. +message ScaleStatus { + // actual number of observed instances of the scaled object. + optional int32 replicas = 1; + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + map selector = 2; + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + optional string targetSelector = 3; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. +message SupplementalGroupsStrategyOptions { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go new file mode 100644 index 0000000..c69eff0 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -0,0 +1,65 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "extensions" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Deployment{}, + &DeploymentList{}, + &DeploymentRollback{}, + &Scale{}, + &DaemonSetList{}, + &DaemonSet{}, + &Ingress{}, + &IngressList{}, + &ReplicaSet{}, + &ReplicaSetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go new file mode 100644 index 0000000..8934c06 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -0,0 +1,1465 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + appsv1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// describes the attributes of a scale subresource +type ScaleSpec struct { + // desired number of instances for the scaled object. + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` +} + +// represents the current status of a scale subresource. +type ScaleStatus struct { + // actual number of observed instances of the scaled object. + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors + // +optional + Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` + + // label selector for pods that should match the replicas count. This is a serializated + // version of both map-based and more expressive set-based selectors. This is done to + // avoid introspection in the clients. The string will be in the same format as the + // query-param syntax. If the target type only supports map-based selectors, both this + // field and map-based selector field are populated. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// represents a scaling request for a resource. +type Scale struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // +optional + Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // +optional + Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for +// more information. +// Deployment enables declarative updates for Pods and ReplicaSets. +type Deployment struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the Deployment. + // +optional + Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Most recently observed status of the Deployment. + // +optional + Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// DeploymentSpec is the specification of the desired behavior of the Deployment. +type DeploymentSpec struct { + // Number of desired pods. This is a pointer to distinguish between explicit + // zero and not specified. Defaults to 1. + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Label selector for pods. Existing ReplicaSets whose pods are + // selected by this will be the ones affected by this deployment. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template describes the pods that will be created. + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` + + // The deployment strategy to use to replace existing pods with new ones. + // +optional + // +patchStrategy=retainKeys + Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` + + // The number of old ReplicaSets to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // This is set to the max value of int32 (i.e. 2147483647) by default, which + // means "retaining all old RelicaSets". + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` + + // Indicates that the deployment is paused and will not be processed by the + // deployment controller. + // +optional + Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` + + // DEPRECATED. + // The config this deployment is rolling back to. Will be cleared after rollback is done. + // +optional + RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` + + // The maximum time in seconds for a deployment to make progress before it + // is considered to be failed. The deployment controller will continue to + // process failed deployments and a condition with a ProgressDeadlineExceeded + // reason will be surfaced in the deployment status. Note that progress will + // not be estimated during the time a deployment is paused. This is set to + // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". + // +optional + ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED. +// DeploymentRollback stores the information required to rollback a deployment. +type DeploymentRollback struct { + metav1.TypeMeta `json:",inline"` + // Required: This must match the Name of a deployment. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The annotations to be updated to a deployment + // +optional + UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` + // The config of this deployment rollback. + RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` +} + +// DEPRECATED. +type RollbackConfig struct { + // The revision to rollback to. If set to 0, rollback to the last revision. + // +optional + Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` +} + +const ( + // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added + // to existing RCs (and label key that is added to its pods) to prevent the existing RCs + // to select new pods (and old pods being select by new RC). + DefaultDeploymentUniqueLabelKey string = "pod-template-hash" +) + +// DeploymentStrategy describes how to replace existing pods with new ones. +type DeploymentStrategy struct { + // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. + // +optional + Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` + + // Rolling update config params. Present only if DeploymentStrategyType = + // RollingUpdate. + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. + // +optional + RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DeploymentStrategyType string + +const ( + // Kill all existing pods before creating new ones. + RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" + + // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. + RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" +) + +// Spec to control the desired behavior of rolling update. +type RollingUpdateDeployment struct { + // The maximum number of pods that can be unavailable during the update. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // Absolute number is calculated from percentage by rounding down. + // This can not be 0 if MaxSurge is 0. + // By default, a fixed value of 1 is used. + // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods + // immediately when the rolling update starts. Once new pods are ready, old RC + // can be scaled down further, followed by scaling up the new RC, ensuring + // that the total number of pods available at all times during the update is at + // least 70% of desired pods. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` + + // The maximum number of pods that can be scheduled above the desired number of + // pods. + // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). + // This can not be 0 if MaxUnavailable is 0. + // Absolute number is calculated from percentage by rounding up. + // By default, a value of 1 is used. + // Example: when this is set to 30%, the new RC can be scaled up immediately when + // the rolling update starts, such that the total number of old and new pods do not exceed + // 130% of desired pods. Once old pods have been killed, + // new RC can be scaled up further, ensuring that total number of pods running + // at any time during the update is at most 130% of desired pods. + // +optional + MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` +} + +// DeploymentStatus is the most recently observed status of the Deployment. +type DeploymentStatus struct { + // The generation observed by the deployment controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // Total number of non-terminated pods targeted by this deployment (their labels match the selector). + // +optional + Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` + + // Total number of non-terminated pods targeted by this deployment that have the desired template spec. + // +optional + UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` + + // Total number of ready pods targeted by this deployment. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` + + // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` + + // Total number of unavailable pods targeted by this deployment. This is the total number of + // pods that are still required for the deployment to have 100% available capacity. They may + // either be pods that are running but not yet available or pods that still have not been created. + // +optional + UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` + + // Represents the latest available observations of a deployment's current state. + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` + + // Count of hash collisions for the Deployment. The Deployment controller uses this + // field as a collision avoidance mechanism when it needs to create the name for the + // newest ReplicaSet. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` +} + +type DeploymentConditionType string + +// These are valid conditions of a deployment. +const ( + // Available means the deployment is available, ie. at least the minimum available + // replicas required are up and running for at least minReadySeconds. + DeploymentAvailable DeploymentConditionType = "Available" + // Progressing means the deployment is progressing. Progress for a deployment is + // considered when a new replica set is created or adopted, and when new pods scale + // up or old pods scale down. Progress is not estimated for paused deployments or + // when progressDeadlineSeconds is not specified. + DeploymentProgressing DeploymentConditionType = "Progressing" + // ReplicaFailure is added in a deployment when one of its pods fails to be created + // or deleted. + DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" +) + +// DeploymentCondition describes the state of a deployment at a certain point. +type DeploymentCondition struct { + // Type of deployment condition. + Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time this condition was updated. + LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` + // Last time the condition transitioned from one status to another. + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeploymentList is a list of Deployments. +type DeploymentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Deployments. + Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +type DaemonSetUpdateStrategy struct { + // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". + // Default is OnDelete. + // +optional + Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` + + // Rolling update config params. Present only if type = "RollingUpdate". + //--- + // TODO: Update this to follow our convention for oneOf, whatever we decide it + // to be. Same as Deployment `strategy.rollingUpdate`. + // See https://github.com/kubernetes/kubernetes/issues/35345 + // +optional + RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` +} + +type DaemonSetUpdateStrategyType string + +const ( + // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. + RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" + + // Replace the old daemons only when it's killed + OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" +) + +// Spec to control the desired behavior of daemon set rolling update. +type RollingUpdateDaemonSet struct { + // The maximum number of DaemonSet pods that can be unavailable during the + // update. Value can be an absolute number (ex: 5) or a percentage of total + // number of DaemonSet pods at the start of the update (ex: 10%). Absolute + // number is calculated from percentage by rounding up. + // This cannot be 0. + // Default value is 1. + // Example: when this is set to 30%, at most 30% of the total number of nodes + // that should be running the daemon pod (i.e. status.desiredNumberScheduled) + // can have their pods stopped for an update at any given + // time. The update starts by stopping at most 30% of those DaemonSet pods + // and then brings up new DaemonSet pods in their place. Once the new pods + // are available, it then proceeds onto other DaemonSet pods, thus ensuring + // that at least 70% of original number of DaemonSet pods are available at + // all times during the update. + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` +} + +// DaemonSetSpec is the specification of a daemon set. +type DaemonSetSpec struct { + // A label query over pods that are managed by the daemon set. + // Must match in order to be controlled. + // If empty, defaulted to labels on Pod template. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // An object that describes the pod that will be created. + // The DaemonSet will create exactly one copy of this pod on every node + // that matches the template's node selector (or on every node if no node + // selector is specified). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` + + // An update strategy to replace existing DaemonSet pods with new pods. + // +optional + UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` + + // The minimum number of seconds for which a newly created DaemonSet pod should + // be ready without any of its container crashing, for it to be considered + // available. Defaults to 0 (pod will be considered available as soon as it + // is ready). + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // DEPRECATED. + // A sequence number representing a specific generation of the template. + // Populated by the system. It can be set only during the creation. + // +optional + TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"` + + // The number of old history to retain to allow rollback. + // This is a pointer to distinguish between explicit zero and not specified. + // Defaults to 10. + // +optional + RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` +} + +// DaemonSetStatus represents the current status of a daemon set. +type DaemonSetStatus struct { + // The number of nodes that are running at least 1 + // daemon pod and are supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` + + // The number of nodes that are running the daemon pod, but are + // not supposed to run the daemon pod. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` + + // The total number of nodes that should be running the daemon + // pod (including nodes correctly running the daemon pod). + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ + DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` + + // The number of nodes that should be running the daemon pod and have one + // or more of the daemon pod running and ready. + NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` + + // The most recent generation observed by the daemon set controller. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` + + // The total number of nodes that are running updated daemon pod + // +optional + UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` + + // The number of nodes that should be running the + // daemon pod and have one or more of the daemon pod running and + // available (ready for at least spec.minReadySeconds) + // +optional + NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` + + // The number of nodes that should be running the + // daemon pod and have none of the daemon pod running and available + // (ready for at least spec.minReadySeconds) + // +optional + NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` + + // Count of hash collisions for the DaemonSet. The DaemonSet controller + // uses this field as a collision avoidance mechanism when it needs to + // create the name for the newest ControllerRevision. + // +optional + CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` + + // Represents the latest available observations of a DaemonSet's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` +} + +type DaemonSetConditionType string + +// TODO: Add valid condition types of a DaemonSet. + +// DaemonSetCondition describes the state of a DaemonSet at a certain point. +type DaemonSetCondition struct { + // Type of DaemonSet condition. + Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // Last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for +// more information. +// DaemonSet represents the configuration of a daemon set. +type DaemonSet struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The desired behavior of this daemon set. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // The current status of this daemon set. This data may be + // out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +const ( + // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. + // DaemonSetTemplateGenerationKey is the key of the labels that is added + // to daemon set pods to distinguish between old and new pod templates + // during DaemonSet template update. + DaemonSetTemplateGenerationKey string = "pod-template-generation" + + // DefaultDaemonSetUniqueLabelKey is the default label key that is added + // to existing DaemonSet pods to distinguish between old and new + // DaemonSet pods during DaemonSet template updates. + DefaultDaemonSetUniqueLabelKey = appsv1beta1.ControllerRevisionHashLabelKey +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DaemonSetList is a collection of daemon sets. +type DaemonSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // A list of daemon sets. + Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Ingress. + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // IngressClassName is the name of the IngressClass cluster resource. The + // associated IngressClass defines which controller will implement the + // resource. This replaces the deprecated `kubernetes.io/ingress.class` + // annotation. For backwards compatibility, when that annotation is set, it + // must be given precedence over this field. The controller may emit a + // warning if the field and annotation have different values. + // Implementations of this API should ignore Ingresses without a class + // specified. An IngressClass resource may be marked as default, which can + // be used to set a default value for this field. For more information, + // refer to the IngressClass documentation. + // +optional + IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` + + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` + // SecretName is the name of the secret used to terminate SSL traffic on 443. + // Field is left optional to allow SSL routing based on SNI hostname alone. + // If the SNI host in a listener conflicts with the "Host" header field used + // by an IngressRule, the SNI host is used for termination and value of the + // Host header is used for routing. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // Note the following deviations from the "host" part of the + // URI as defined in RFC 3986: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + // the IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the + // IngressRuleValue. If the host is unspecified, the Ingress routes all + // traffic based on the specified IngressRuleValue. + // + // Host can be "precise" which is a domain name without the terminating dot of + // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name + // prefixed with a single wildcard label (e.g. "*.foo.com"). + // The wildcard character '*' must appear by itself as the first DNS label and + // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). + // Requests will be matched against the Host field in the following way: + // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If Host is a wildcard, then the request matches this rule if the http host header + // is to equal to the suffix (removing the first label) of the wildcard rule. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// PathType represents the type of path referred to by a HTTPIngressPath. +type PathType string + +const ( + // PathTypeExact matches the URL path exactly and with case sensitivity. + PathTypeExact = PathType("Exact") + + // PathTypePrefix matches based on a URL path prefix split by '/'. Matching + // is case sensitive and done on a path element by element basis. A path + // element refers to the list of labels in the path split by the '/' + // separator. A request is a match for path p if every p is an element-wise + // prefix of p of the request path. Note that if the last element of the + // path is a substring of the last element in request path, it is not a + // match (e.g. /foo/bar matches /foo/bar/baz, but does not match + // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the + // longest matching path is given priority. + // Examples: + // - /foo/bar does not match requests to /foo/barbaz + // - /foo/bar matches request to /foo/bar and /foo/bar/baz + // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are + // present in an Ingress spec, the longest matching path (/foo/) is given + // priority. + PathTypePrefix = PathType("Prefix") + + // PathTypeImplementationSpecific matching is up to the IngressClass. + // Implementations can treat this as a separate PathType or treat it + // identically to Prefix or Exact path types. + PathTypeImplementationSpecific = PathType("ImplementationSpecific") +) + +// HTTPIngressPath associates a path with a backend. Incoming urls matching the +// path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" part of a URL + // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, + // all paths from incoming requests are matched. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // PathType determines the interpretation of the Path matching. PathType can + // be one of the following values: + // * Exact: Matches the URL path exactly. + // * Prefix: Matches based on a URL path prefix split by '/'. Matching is + // done on a path element by element basis. A path element refers is the + // list of labels in the path split by the '/' separator. A request is a + // match for path p if every p is an element-wise prefix of p of the + // request path. Note that if the last element of the path is a substring + // of the last element in request path, it is not a match (e.g. /foo/bar + // matches /foo/bar/baz, but does not match /foo/barbaz). + // * ImplementationSpecific: Interpretation of the Path matching is up to + // the IngressClass. Implementations can treat this as a separate PathType + // or treat it identically to Prefix or Exact path types. + // Implementations are required to support all path types. + // Defaults to ImplementationSpecific. + PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + // +optional + ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` + + // Specifies the port of the referenced service. + // +optional + ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` + + // Resource is an ObjectRef to another Kubernetes resource in the namespace + // of the Ingress object. If resource is specified, serviceName and servicePort + // must not be specified. + // +optional + Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` +} + +// +genclient +// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for +// more information. +// ReplicaSet ensures that a specified number of pod replicas are running at any given time. +type ReplicaSet struct { + metav1.TypeMeta `json:",inline"` + + // If the Labels of a ReplicaSet are empty, they are defaulted to + // be the same as the Pod(s) that the ReplicaSet manages. + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec defines the specification of the desired behavior of the ReplicaSet. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the most recently observed status of the ReplicaSet. + // This data may be out of date by some window of time. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ReplicaSetList is a collection of ReplicaSets. +type ReplicaSetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of ReplicaSets. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller + Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ReplicaSetSpec is the specification of a ReplicaSet. +type ReplicaSetSpec struct { + // Replicas is the number of desired replicas. + // This is a pointer to distinguish between explicit zero and unspecified. + // Defaults to 1. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + // +optional + Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` + + // Minimum number of seconds for which a newly created pod should be ready + // without any of its container crashing, for it to be considered available. + // Defaults to 0 (pod will be considered available as soon as it is ready) + // +optional + MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` + + // Selector is a label query over pods that should match the replica count. + // If the selector is empty, it is defaulted to the labels present on the pod template. + // Label keys and values that must match in order to be controlled by this replica set. + // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // Template is the object that describes the pod that will be created if + // insufficient replicas are detected. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template + // +optional + Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` +} + +// ReplicaSetStatus represents the current status of a ReplicaSet. +type ReplicaSetStatus struct { + // Replicas is the most recently oberved number of replicas. + // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller + Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` + + // The number of pods that have labels matching the labels of the pod template of the replicaset. + // +optional + FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` + + // The number of ready replicas for this replica set. + // +optional + ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` + + // The number of available replicas (ready for at least minReadySeconds) for this replica set. + // +optional + AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` + + // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` + + // Represents the latest available observations of a replica set's current state. + // +optional + // +patchMergeKey=type + // +patchStrategy=merge + Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` +} + +type ReplicaSetConditionType string + +// These are valid conditions of a replica set. +const ( + // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created + // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted + // due to kubelet being down or finalizers are failing. + ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" +) + +// ReplicaSetCondition describes the state of a replica set at a certain point. +type ReplicaSetCondition struct { + // Type of replica set condition. + Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` + // Status of the condition, one of True, False, Unknown. + Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` + // The last time the condition transitioned from one status to another. + // +optional + LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` + // The reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` + // A human readable message indicating details about the transition. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +// Deprecated: use PodSecurityPolicy from policy API Group instead. +type PodSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodSecurityPolicySpec defines the policy enforced. +// Deprecated: use PodSecurityPolicySpec from policy API Group instead. +type PodSecurityPolicySpec struct { + // privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` + // seLinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` + // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. + // Empty or nil indicates that only the DefaultProcMountType may be used. + // This requires the ProcMountType feature flag to be enabled. + // +optional + AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +// Deprecated: use AllowedHostPath from policy API Group instead. +type AllowedHostPath struct { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// FSType gives strong typing to different file systems that are used by volumes. +// Deprecated: use FSType from policy API Group instead. +type FSType string + +const ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + CSI FSType = "csi" + All FSType = "*" +) + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +// Deprecated: use AllowedFlexVolume from policy API Group instead. +type AllowedFlexVolume struct { + // driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +// Deprecated: use HostPortRange from policy API Group instead. +type HostPortRange struct { + // min is the start of the range, inclusive. + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use SELinuxStrategyOptions from policy API Group instead. +type SELinuxStrategyOptions struct { + // rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security Context. +// Deprecated: use SELinuxStrategy from policy API Group instead. +type SELinuxStrategy string + +const ( + // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. + // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. + // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. +type RunAsUserStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// IDRange provides a min/max of an allowed range of IDs. +// Deprecated: use IDRange from policy API Group instead. +type IDRange struct { + // min is the start of the range, inclusive. + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// Security Context. +// Deprecated: use RunAsUserStrategy from policy API Group instead. +type RunAsUserStrategy string + +const ( + // RunAsUserStrategyMustRunAs means that container must run as a particular uid. + // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. + // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // RunAsUserStrategyRunAsAny means that container may make requests for any uid. + // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +// Deprecated: use RunAsGroupStrategy from policy API Group instead. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. + // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use FSGroupStrategyOptions from policy API Group instead. +type FSGroupStrategyOptions struct { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +// Deprecated: use FSGroupStrategyType from policy API Group instead. +type FSGroupStrategyType string + +const ( + // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. + // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. + // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. +type SupplementalGroupsStrategyOptions struct { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. +type SupplementalGroupsStrategyType string + +const ( + // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. + // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. + // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +// Deprecated: use PodSecurityPolicyList from policy API Group instead. +type PodSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. +// NetworkPolicy describes what network traffic is allowed for a set of Pods +type NetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. +// Policy Type string describes the NetworkPolicy type +// This type is beta-level in 1.8 +type PolicyType string + +const ( + // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods + PolicyTypeIngress PolicyType = "Ingress" + // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods + PolicyTypeEgress PolicyType = "Egress" +) + +// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules + // is applied to any pods selected by this field. Multiple network policies can select the + // same set of pods. In this case, the ingress rules for each are combined additively. + // This field is NOT optional and follows standard label selector semantics. + // An empty podSelector matches all pods in this namespace. + PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // List of ingress rules to be applied to the selected pods. + // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod + // OR if the traffic source is the pod's local node, + // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy + // objects whose podSelector matches the pod. + // If this field is empty then this NetworkPolicy does not allow any traffic + // (and serves solely to ensure that the pods it selects are isolated by default). + // +optional + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` + + // List of rule types that the NetworkPolicy relates to. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. +// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this rule. + // Each item in this list is combined using a logical OR. + // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows traffic + // only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. + // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). + // If this field is present and contains at least one item, this rule allows traffic only if the + // traffic matches at least one item in the from list. + // +optional + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +type NetworkPolicyEgressRule struct { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. +type NetworkPolicyPort struct { + // Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. + // If not specified, this field defaults to TCP. + // +optional + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` + + // If specified, the port on the given protocol. This can + // either be a numerical or named port on a pod. If this field is not provided, + // this matches all port names and numbers. + // If present, only traffic on the specified protocol AND port + // will be matched. + // +optional + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` +} + +// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs +// that should not be included within this rule. +type IPBlock struct { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Except values will be rejected if they are outside the CIDR range + // +optional + Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` +} + +// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. +type NetworkPolicyPeer struct { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. +// Network Policy List is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..9ccad92 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,656 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", + "driver": "driver is the name of the Flexvolume driver.", +} + +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_AllowedHostPath = map[string]string{ + "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", + "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", + "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", +} + +func (AllowedHostPath) SwaggerDoc() map[string]string { + return map_AllowedHostPath +} + +var map_DaemonSet = map[string]string{ + "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (DaemonSet) SwaggerDoc() map[string]string { + return map_DaemonSet +} + +var map_DaemonSetCondition = map[string]string{ + "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", + "type": "Type of DaemonSet condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DaemonSetCondition) SwaggerDoc() map[string]string { + return map_DaemonSetCondition +} + +var map_DaemonSetList = map[string]string{ + "": "DaemonSetList is a collection of daemon sets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "A list of daemon sets.", +} + +func (DaemonSetList) SwaggerDoc() map[string]string { + return map_DaemonSetList +} + +var map_DaemonSetSpec = map[string]string{ + "": "DaemonSetSpec is the specification of a daemon set.", + "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", + "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", + "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", + "templateGeneration": "DEPRECATED. A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", + "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", +} + +func (DaemonSetSpec) SwaggerDoc() map[string]string { + return map_DaemonSetSpec +} + +var map_DaemonSetStatus = map[string]string{ + "": "DaemonSetStatus represents the current status of a daemon set.", + "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", + "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", + "observedGeneration": "The most recent generation observed by the daemon set controller.", + "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", + "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", + "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", + "conditions": "Represents the latest available observations of a DaemonSet's current state.", +} + +func (DaemonSetStatus) SwaggerDoc() map[string]string { + return map_DaemonSetStatus +} + +var map_DaemonSetUpdateStrategy = map[string]string{ + "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.", + "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", +} + +func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { + return map_DaemonSetUpdateStrategy +} + +var map_Deployment = map[string]string{ + "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", + "metadata": "Standard object metadata.", + "spec": "Specification of the desired behavior of the Deployment.", + "status": "Most recently observed status of the Deployment.", +} + +func (Deployment) SwaggerDoc() map[string]string { + return map_Deployment +} + +var map_DeploymentCondition = map[string]string{ + "": "DeploymentCondition describes the state of a deployment at a certain point.", + "type": "Type of deployment condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastUpdateTime": "The last time this condition was updated.", + "lastTransitionTime": "Last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (DeploymentCondition) SwaggerDoc() map[string]string { + return map_DeploymentCondition +} + +var map_DeploymentList = map[string]string{ + "": "DeploymentList is a list of Deployments.", + "metadata": "Standard list metadata.", + "items": "Items is the list of Deployments.", +} + +func (DeploymentList) SwaggerDoc() map[string]string { + return map_DeploymentList +} + +var map_DeploymentRollback = map[string]string{ + "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", + "name": "Required: This must match the Name of a deployment.", + "updatedAnnotations": "The annotations to be updated to a deployment", + "rollbackTo": "The config of this deployment rollback.", +} + +func (DeploymentRollback) SwaggerDoc() map[string]string { + return map_DeploymentRollback +} + +var map_DeploymentSpec = map[string]string{ + "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", + "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", + "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", + "template": "Template describes the pods that will be created.", + "strategy": "The deployment strategy to use to replace existing pods with new ones.", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".", + "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", + "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", + "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", +} + +func (DeploymentSpec) SwaggerDoc() map[string]string { + return map_DeploymentSpec +} + +var map_DeploymentStatus = map[string]string{ + "": "DeploymentStatus is the most recently observed status of the Deployment.", + "observedGeneration": "The generation observed by the deployment controller.", + "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", + "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", + "readyReplicas": "Total number of ready pods targeted by this deployment.", + "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", + "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", + "conditions": "Represents the latest available observations of a deployment's current state.", + "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", +} + +func (DeploymentStatus) SwaggerDoc() map[string]string { + return map_DeploymentStatus +} + +var map_DeploymentStrategy = map[string]string{ + "": "DeploymentStrategy describes how to replace existing pods with new ones.", + "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", + "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", +} + +func (DeploymentStrategy) SwaggerDoc() map[string]string { + return map_DeploymentStrategy +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_HTTPIngressPath = map[string]string{ + "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", + "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", +} + +func (HTTPIngressPath) SwaggerDoc() map[string]string { + return map_HTTPIngressPath +} + +var map_HTTPIngressRuleValue = map[string]string{ + "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "paths": "A collection of paths that map requests to backends.", +} + +func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { + return map_HTTPIngressRuleValue +} + +var map_HostPortRange = map[string]string{ + "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (HostPortRange) SwaggerDoc() map[string]string { + return map_HostPortRange +} + +var map_IDRange = map[string]string{ + "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_IPBlock = map[string]string{ + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", +} + +func (IPBlock) SwaggerDoc() map[string]string { + return map_IPBlock +} + +var map_Ingress = map[string]string{ + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressBackend = map[string]string{ + "": "IngressBackend describes all endpoints for a given service and port.", + "serviceName": "Specifies the name of the referenced service.", + "servicePort": "Specifies the port of the referenced service.", + "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", +} + +func (IngressBackend) SwaggerDoc() map[string]string { + return map_IngressBackend +} + +var map_IngressList = map[string]string{ + "": "IngressList is a collection of Ingress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of Ingress.", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressRule = map[string]string{ + "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", +} + +func (IngressRule) SwaggerDoc() map[string]string { + return map_IngressRule +} + +var map_IngressRuleValue = map[string]string{ + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", +} + +func (IngressRuleValue) SwaggerDoc() map[string]string { + return map_IngressRuleValue +} + +var map_IngressSpec = map[string]string{ + "": "IngressSpec describes the Ingress the user wishes to exist.", + "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "": "IngressStatus describe the current state of the Ingress.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_IngressTLS = map[string]string{ + "": "IngressTLS describes the transport layer security associated with an Ingress.", + "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", +} + +func (IngressTLS) SwaggerDoc() map[string]string { + return map_IngressTLS +} + +var map_NetworkPolicy = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior for this NetworkPolicy.", +} + +func (NetworkPolicy) SwaggerDoc() map[string]string { + return map_NetworkPolicy +} + +var map_NetworkPolicyEgressRule = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", +} + +func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyEgressRule +} + +var map_NetworkPolicyIngressRule = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", +} + +func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyIngressRule +} + +var map_NetworkPolicyList = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (NetworkPolicyList) SwaggerDoc() map[string]string { + return map_NetworkPolicyList +} + +var map_NetworkPolicyPeer = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", + "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", + "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", + "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", +} + +func (NetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_NetworkPolicyPeer +} + +var map_NetworkPolicyPort = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", + "protocol": "Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", +} + +func (NetworkPolicyPort) SwaggerDoc() map[string]string { + return map_NetworkPolicyPort +} + +var map_NetworkPolicySpec = map[string]string{ + "": "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", + "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", + "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", +} + +func (NetworkPolicySpec) SwaggerDoc() map[string]string { + return map_NetworkPolicySpec +} + +var map_PodSecurityPolicy = map[string]string{ + "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the policy enforced.", +} + +func (PodSecurityPolicy) SwaggerDoc() map[string]string { + return map_PodSecurityPolicy +} + +var map_PodSecurityPolicyList = map[string]string{ + "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (PodSecurityPolicyList) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyList +} + +var map_PodSecurityPolicySpec = map[string]string{ + "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", + "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", +} + +func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySpec +} + +var map_ReplicaSet = map[string]string{ + "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (ReplicaSet) SwaggerDoc() map[string]string { + return map_ReplicaSet +} + +var map_ReplicaSetCondition = map[string]string{ + "": "ReplicaSetCondition describes the state of a replica set at a certain point.", + "type": "Type of replica set condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "lastTransitionTime": "The last time the condition transitioned from one status to another.", + "reason": "The reason for the condition's last transition.", + "message": "A human readable message indicating details about the transition.", +} + +func (ReplicaSetCondition) SwaggerDoc() map[string]string { + return map_ReplicaSetCondition +} + +var map_ReplicaSetList = map[string]string{ + "": "ReplicaSetList is a collection of ReplicaSets.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", +} + +func (ReplicaSetList) SwaggerDoc() map[string]string { + return map_ReplicaSetList +} + +var map_ReplicaSetSpec = map[string]string{ + "": "ReplicaSetSpec is the specification of a ReplicaSet.", + "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", + "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", + "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", +} + +func (ReplicaSetSpec) SwaggerDoc() map[string]string { + return map_ReplicaSetSpec +} + +var map_ReplicaSetStatus = map[string]string{ + "": "ReplicaSetStatus represents the current status of a ReplicaSet.", + "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", + "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", + "readyReplicas": "The number of ready replicas for this replica set.", + "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", + "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", + "conditions": "Represents the latest available observations of a replica set's current state.", +} + +func (ReplicaSetStatus) SwaggerDoc() map[string]string { + return map_ReplicaSetStatus +} + +var map_RollbackConfig = map[string]string{ + "": "DEPRECATED.", + "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", +} + +func (RollbackConfig) SwaggerDoc() map[string]string { + return map_RollbackConfig +} + +var map_RollingUpdateDaemonSet = map[string]string{ + "": "Spec to control the desired behavior of daemon set rolling update.", + "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", +} + +func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { + return map_RollingUpdateDaemonSet +} + +var map_RollingUpdateDeployment = map[string]string{ + "": "Spec to control the desired behavior of rolling update.", + "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", +} + +func (RollingUpdateDeployment) SwaggerDoc() map[string]string { + return map_RollingUpdateDeployment +} + +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + +var map_SELinuxStrategyOptions = map[string]string{ + "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", +} + +func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxStrategyOptions +} + +var map_Scale = map[string]string{ + "": "represents a scaling request for a resource.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", +} + +func (Scale) SwaggerDoc() map[string]string { + return map_Scale +} + +var map_ScaleSpec = map[string]string{ + "": "describes the attributes of a scale subresource", + "replicas": "desired number of instances for the scaled object.", +} + +func (ScaleSpec) SwaggerDoc() map[string]string { + return map_ScaleSpec +} + +var map_ScaleStatus = map[string]string{ + "": "represents the current status of a scale subresource.", + "replicas": "actual number of observed instances of the scaled object.", + "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", + "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", +} + +func (ScaleStatus) SwaggerDoc() map[string]string { + return map_ScaleStatus +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", + "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..913f485 --- /dev/null +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,1489 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { + if in == nil { + return nil + } + out := new(AllowedCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { + if in == nil { + return nil + } + out := new(AllowedHostPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. +func (in *DaemonSet) DeepCopy() *DaemonSet { + if in == nil { + return nil + } + out := new(DaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. +func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { + if in == nil { + return nil + } + out := new(DaemonSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]DaemonSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. +func (in *DaemonSetList) DeepCopy() *DaemonSetList { + if in == nil { + return nil + } + out := new(DaemonSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DaemonSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. +func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { + if in == nil { + return nil + } + out := new(DaemonSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { + *out = *in + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DaemonSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. +func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { + if in == nil { + return nil + } + out := new(DaemonSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDaemonSet) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. +func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { + if in == nil { + return nil + } + out := new(DaemonSetUpdateStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Deployment) DeepCopyInto(out *Deployment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. +func (in *Deployment) DeepCopy() *Deployment { + if in == nil { + return nil + } + out := new(Deployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Deployment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { + *out = *in + in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. +func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { + if in == nil { + return nil + } + out := new(DeploymentCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Deployment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. +func (in *DeploymentList) DeepCopy() *DeploymentList { + if in == nil { + return nil + } + out := new(DeploymentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.UpdatedAnnotations != nil { + in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + out.RollbackTo = in.RollbackTo + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. +func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { + if in == nil { + return nil + } + out := new(DeploymentRollback) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeploymentRollback) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + in.Strategy.DeepCopyInto(&out.Strategy) + if in.RevisionHistoryLimit != nil { + in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit + *out = new(int32) + **out = **in + } + if in.RollbackTo != nil { + in, out := &in.RollbackTo, &out.RollbackTo + *out = new(RollbackConfig) + **out = **in + } + if in.ProgressDeadlineSeconds != nil { + in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. +func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { + if in == nil { + return nil + } + out := new(DeploymentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]DeploymentCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.CollisionCount != nil { + in, out := &in.CollisionCount, &out.CollisionCount + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. +func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { + if in == nil { + return nil + } + out := new(DeploymentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { + *out = *in + if in.RollingUpdate != nil { + in, out := &in.RollingUpdate, &out.RollingUpdate + *out = new(RollingUpdateDeployment) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. +func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { + if in == nil { + return nil + } + out := new(DeploymentStrategy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { + *out = *in + if in.PathType != nil { + in, out := &in.PathType, &out.PathType + *out = new(PathType) + **out = **in + } + in.Backend.DeepCopyInto(&out.Backend) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. +func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { + if in == nil { + return nil + } + out := new(HTTPIngressPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. +func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { + if in == nil { + return nil + } + out := new(HTTPIngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. +func (in *HostPortRange) DeepCopy() *HostPortRange { + if in == nil { + return nil + } + out := new(HostPortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPBlock) DeepCopyInto(out *IPBlock) { + *out = *in + if in.Except != nil { + in, out := &in.Except, &out.Except + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. +func (in *IPBlock) DeepCopy() *IPBlock { + if in == nil { + return nil + } + out := new(IPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { + *out = *in + out.ServicePort = in.ServicePort + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(corev1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. +func (in *IngressBackend) DeepCopy() *IngressBackend { + if in == nil { + return nil + } + out := new(IngressBackend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRule) DeepCopyInto(out *IngressRule) { + *out = *in + in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. +func (in *IngressRule) DeepCopy() *IngressRule { + if in == nil { + return nil + } + out := new(IngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. +func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { + if in == nil { + return nil + } + out := new(IngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.IngressClassName != nil { + in, out := &in.IngressClassName, &out.IngressClassName + *out = new(string) + **out = **in + } + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. +func (in *IngressTLS) DeepCopy() *IngressTLS { + if in == nil { + return nil + } + out := new(IngressTLS) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. +func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { + if in == nil { + return nil + } + out := new(NetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. +func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyEgressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. +func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. +func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { + if in == nil { + return nil + } + out := new(NetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + *out = new(IPBlock) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. +func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { + if in == nil { + return nil + } + out := new(NetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(corev1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. +func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { + if in == nil { + return nil + } + out := new(NetworkPolicyPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { + *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]NetworkPolicyEgressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PolicyTypes != nil { + in, out := &in.PolicyTypes, &out.PolicyTypes + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. +func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { + if in == nil { + return nil + } + out := new(NetworkPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. +func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { + if in == nil { + return nil + } + out := new(PodSecurityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. +func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { + if in == nil { + return nil + } + out := new(PodSecurityPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + in.SELinux.DeepCopyInto(&out.SELinux) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowedHostPaths != nil { + in, out := &in.AllowedHostPaths, &out.AllowedHostPaths + *out = make([]AllowedHostPath, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedProcMountTypes != nil { + in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes + *out = make([]corev1.ProcMountType, len(*in)) + copy(*out, *in) + } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. +func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. +func (in *ReplicaSet) DeepCopy() *ReplicaSet { + if in == nil { + return nil + } + out := new(ReplicaSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSet) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { + *out = *in + in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. +func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { + if in == nil { + return nil + } + out := new(ReplicaSetCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ReplicaSet, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. +func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { + if in == nil { + return nil + } + out := new(ReplicaSetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicaSetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { + *out = *in + if in.Replicas != nil { + in, out := &in.Replicas, &out.Replicas + *out = new(int32) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + in.Template.DeepCopyInto(&out.Template) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. +func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { + if in == nil { + return nil + } + out := new(ReplicaSetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { + *out = *in + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]ReplicaSetCondition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. +func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { + if in == nil { + return nil + } + out := new(ReplicaSetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. +func (in *RollbackConfig) DeepCopy() *RollbackConfig { + if in == nil { + return nil + } + out := new(RollbackConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. +func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { + if in == nil { + return nil + } + out := new(RollingUpdateDaemonSet) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { + *out = *in + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.MaxSurge != nil { + in, out := &in.MaxSurge, &out.MaxSurge + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. +func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { + if in == nil { + return nil + } + out := new(RollingUpdateDeployment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(corev1.SELinuxOptions) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. +func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scale) DeepCopyInto(out *Scale) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. +func (in *Scale) DeepCopy() *Scale { + if in == nil { + return nil + } + out := new(Scale) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Scale) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. +func (in *ScaleSpec) DeepCopy() *ScaleSpec { + if in == nil { + return nil + } + out := new(ScaleSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { + *out = *in + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. +func (in *ScaleStatus) DeepCopy() *ScaleStatus { + if in == nil { + return nil + } + out := new(ScaleStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go new file mode 100644 index 0000000..d3ffd5e --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=networking.k8s.io + +package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go new file mode 100644 index 0000000..1ff2339 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -0,0 +1,2202 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{0} +} +func (m *IPBlock) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IPBlock) XXX_Merge(src proto.Message) { + xxx_messageInfo_IPBlock.Merge(m, src) +} +func (m *IPBlock) XXX_Size() int { + return m.Size() +} +func (m *IPBlock) XXX_DiscardUnknown() { + xxx_messageInfo_IPBlock.DiscardUnknown(m) +} + +var xxx_messageInfo_IPBlock proto.InternalMessageInfo + +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{1} +} +func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicy.Merge(m, src) +} +func (m *NetworkPolicy) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo + +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{2} +} +func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) +} +func (m *NetworkPolicyEgressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{3} +} +func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) +} +func (m *NetworkPolicyIngressRule) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo + +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{4} +} +func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyList.Merge(m, src) +} +func (m *NetworkPolicyList) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo + +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{5} +} +func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) +} +func (m *NetworkPolicyPeer) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo + +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{6} +} +func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicyPort.Merge(m, src) +} +func (m *NetworkPolicyPort) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicyPort) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo + +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_1c72867a70a7cc90, []int{7} +} +func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_NetworkPolicySpec.Merge(m, src) +} +func (m *NetworkPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *NetworkPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") + proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.networking.v1.NetworkPolicy") + proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyEgressRule") + proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyIngressRule") + proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.networking.v1.NetworkPolicyList") + proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer") + proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") + proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) +} + +var fileDescriptor_1c72867a70a7cc90 = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, + 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, + 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, + 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, + 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, + 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, + 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, + 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, + 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, + 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, + 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, + 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, + 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, + 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, + 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, + 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, + 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, + 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, + 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, + 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, + 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, + 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, + 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, + 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, + 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, + 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, + 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, + 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, + 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, + 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, + 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, + 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, + 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, + 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, + 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, + 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, + 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, + 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, + 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, + 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, + 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, + 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, + 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, + 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, + 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, + 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, + 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, + 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, + 0x67, 0x08, 0x00, 0x00, +} + +func (m *IPBlock) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Except) > 0 { + for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Except[iNdEx]) + copy(dAtA[i:], m.Except[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.CIDR) + copy(dAtA[i:], m.CIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.To) > 0 { + for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.From) > 0 { + for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Ports) > 0 { + for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IPBlock != nil { + { + size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.NamespaceSelector != nil { + { + size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PodSelector != nil { + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Port != nil { + { + size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Protocol != nil { + i -= len(*m.Protocol) + copy(dAtA[i:], *m.Protocol) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PolicyTypes) > 0 { + for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.PolicyTypes[iNdEx]) + copy(dAtA[i:], m.PolicyTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Egress) > 0 { + for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Ingress) > 0 { + for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IPBlock) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.CIDR) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Except) > 0 { + for _, s := range m.Except { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NetworkPolicyEgressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.To) > 0 { + for _, e := range m.To { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyIngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.From) > 0 { + for _, e := range m.From { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *NetworkPolicyPeer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PodSelector != nil { + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.IPBlock != nil { + l = m.IPBlock.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicyPort) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Protocol != nil { + l = len(*m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Port != nil { + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *NetworkPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.PodSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Egress) > 0 { + for _, e := range m.Egress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *IPBlock) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IPBlock{`, + `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, + `Except:` + fmt.Sprintf("%v", this.Except) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyEgressRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForTo := "[]NetworkPolicyPeer{" + for _, f := range this.To { + repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForTo += "}" + s := strings.Join([]string{`&NetworkPolicyEgressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `To:` + repeatedStringForTo + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyIngressRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForPorts := "[]NetworkPolicyPort{" + for _, f := range this.Ports { + repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," + } + repeatedStringForPorts += "}" + repeatedStringForFrom := "[]NetworkPolicyPeer{" + for _, f := range this.From { + repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," + } + repeatedStringForFrom += "}" + s := strings.Join([]string{`&NetworkPolicyIngressRule{`, + `Ports:` + repeatedStringForPorts + `,`, + `From:` + repeatedStringForFrom + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NetworkPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NetworkPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPeer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPeer{`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicyPort) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&NetworkPolicyPort{`, + `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NetworkPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForIngress := "[]NetworkPolicyIngressRule{" + for _, f := range this.Ingress { + repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForIngress += "}" + repeatedStringForEgress := "[]NetworkPolicyEgressRule{" + for _, f := range this.Egress { + repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForEgress += "}" + s := strings.Join([]string{`&NetworkPolicySpec{`, + `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + repeatedStringForIngress + `,`, + `Egress:` + repeatedStringForEgress + `,`, + `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *IPBlock) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.To = append(m.To, NetworkPolicyPeer{}) + if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ports = append(m.Ports, NetworkPolicyPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.From = append(m.From, NetworkPolicyPeer{}) + if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NetworkPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodSelector == nil { + m.PodSelector = &v1.LabelSelector{} + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.IPBlock == nil { + m.IPBlock = &IPBlock{} + } + if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) + m.Protocol = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Port == nil { + m.Port = &intstr.IntOrString{} + } + if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) + if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) + if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto new file mode 100644 index 0000000..f2aa969 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -0,0 +1,195 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.networking.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs +// that should not be included within this rule. +message IPBlock { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + optional string cidr = 1; + + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Except values will be rejected if they are outside the CIDR range + // +optional + repeated string except = 2; +} + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +message NetworkPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + optional NetworkPolicySpec spec = 2; +} + +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +message NetworkPolicyEgressRule { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + repeated NetworkPolicyPeer to = 2; +} + +// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. +message NetworkPolicyIngressRule { + // List of ports which should be made accessible on the pods selected for this + // rule. Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + repeated NetworkPolicyPort ports = 1; + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all sources (traffic not restricted by + // source). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the from list. + // +optional + repeated NetworkPolicyPeer from = 2; +} + +// NetworkPolicyList is a list of NetworkPolicy objects. +message NetworkPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated NetworkPolicy items = 2; +} + +// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of +// fields are allowed +message NetworkPolicyPeer { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + optional IPBlock ipBlock = 3; +} + +// NetworkPolicyPort describes a port to allow traffic on +message NetworkPolicyPort { + // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this + // field defaults to TCP. + // +optional + optional string protocol = 1; + + // The port on the given protocol. This can either be a numerical or named port on + // a pod. If this field is not provided, this matches all port names and numbers. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; +} + +// NetworkPolicySpec provides the specification of a NetworkPolicy +message NetworkPolicySpec { + // Selects the pods to which this NetworkPolicy object applies. The array of + // ingress rules is applied to any pods selected by this field. Multiple network + // policies can select the same set of pods. In this case, the ingress rules for + // each are combined additively. This field is NOT optional and follows standard + // label selector semantics. An empty podSelector matches all pods in this + // namespace. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; + + // List of ingress rules to be applied to the selected pods. Traffic is allowed to + // a pod if there are no NetworkPolicies selecting the pod + // (and cluster policy otherwise allows the traffic), OR if the traffic source is + // the pod's local node, OR if the traffic matches at least one ingress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy does not allow any traffic (and serves + // solely to ensure that the pods it selects are isolated by default) + // +optional + repeated NetworkPolicyIngressRule ingress = 2; + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + repeated NetworkPolicyEgressRule egress = 3; + + // List of rule types that the NetworkPolicy relates to. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + repeated string policyTypes = 4; +} + diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go new file mode 100644 index 0000000..f47f22e --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NetworkPolicy{}, + &NetworkPolicyList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go new file mode 100644 index 0000000..73580a5 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -0,0 +1,203 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicy describes what network traffic is allowed for a set of Pods +type NetworkPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior for this NetworkPolicy. + // +optional + Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// Policy Type string describes the NetworkPolicy type +// This type is beta-level in 1.8 +type PolicyType string + +const ( + // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods + PolicyTypeIngress PolicyType = "Ingress" + // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods + PolicyTypeEgress PolicyType = "Egress" +) + +// NetworkPolicySpec provides the specification of a NetworkPolicy +type NetworkPolicySpec struct { + // Selects the pods to which this NetworkPolicy object applies. The array of + // ingress rules is applied to any pods selected by this field. Multiple network + // policies can select the same set of pods. In this case, the ingress rules for + // each are combined additively. This field is NOT optional and follows standard + // label selector semantics. An empty podSelector matches all pods in this + // namespace. + PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` + + // List of ingress rules to be applied to the selected pods. Traffic is allowed to + // a pod if there are no NetworkPolicies selecting the pod + // (and cluster policy otherwise allows the traffic), OR if the traffic source is + // the pod's local node, OR if the traffic matches at least one ingress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy does not allow any traffic (and serves + // solely to ensure that the pods it selects are isolated by default) + // +optional + Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` + + // List of egress rules to be applied to the selected pods. Outgoing traffic is + // allowed if there are no NetworkPolicies selecting the pod (and cluster policy + // otherwise allows the traffic), OR if the traffic matches at least one egress rule + // across all of the NetworkPolicy objects whose podSelector matches the pod. If + // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves + // solely to ensure that the pods it selects are isolated by default). + // This field is beta-level in 1.8 + // +optional + Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` + + // List of rule types that the NetworkPolicy relates to. + // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // If this field is not specified, it will default based on the existence of Ingress or Egress rules; + // policies that contain an Egress section are assumed to affect Egress, and all policies + // (whether or not they contain an Ingress section) are assumed to affect Ingress. + // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. + // Likewise, if you want to write a policy that specifies that no egress is allowed, + // you must specify a policyTypes value that include "Egress" (since such a policy would not include + // an Egress section and would otherwise default to just [ "Ingress" ]). + // This field is beta-level in 1.8 + // +optional + PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` +} + +// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. +type NetworkPolicyIngressRule struct { + // List of ports which should be made accessible on the pods selected for this + // rule. Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of sources which should be able to access the pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all sources (traffic not restricted by + // source). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the from list. + // +optional + From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` +} + +// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods +// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. +// This type is beta-level in 1.8 +type NetworkPolicyEgressRule struct { + // List of destination ports for outgoing traffic. + // Each item in this list is combined using a logical OR. If this field is + // empty or missing, this rule matches all ports (traffic not restricted by port). + // If this field is present and contains at least one item, then this rule allows + // traffic only if the traffic matches at least one port in the list. + // +optional + Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` + + // List of destinations for outgoing traffic of pods selected for this rule. + // Items in this list are combined using a logical OR operation. If this field is + // empty or missing, this rule matches all destinations (traffic not restricted by + // destination). If this field is present and contains at least one item, this rule + // allows traffic only if the traffic matches at least one item in the to list. + // +optional + To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` +} + +// NetworkPolicyPort describes a port to allow traffic on +type NetworkPolicyPort struct { + // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this + // field defaults to TCP. + // +optional + Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` + + // The port on the given protocol. This can either be a numerical or named port on + // a pod. If this field is not provided, this matches all port names and numbers. + // +optional + Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` +} + +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed +// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs +// that should not be included within this rule. +type IPBlock struct { + // CIDR is a string representing the IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` + // Except is a slice of CIDRs that should not be included within an IP Block + // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Except values will be rejected if they are outside the CIDR range + // +optional + Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` +} + +// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of +// fields are allowed +type NetworkPolicyPeer struct { + // This is a label selector which selects Pods. This field follows standard label + // selector semantics; if present but empty, it selects all pods. + // + // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. + // +optional + PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` + + // Selects Namespaces using cluster-scoped labels. This field follows standard label + // selector semantics; if present but empty, it selects all namespaces. + // + // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects + // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. + // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. + // +optional + NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` + + // IPBlock defines policy on a particular IPBlock. If this field is set then + // neither of the other fields can be. + // +optional + IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NetworkPolicyList is a list of NetworkPolicy objects. +type NetworkPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..b404e5b --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -0,0 +1,113 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_IPBlock = map[string]string{ + "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", +} + +func (IPBlock) SwaggerDoc() map[string]string { + return map_IPBlock +} + +var map_NetworkPolicy = map[string]string{ + "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired behavior for this NetworkPolicy.", +} + +func (NetworkPolicy) SwaggerDoc() map[string]string { + return map_NetworkPolicy +} + +var map_NetworkPolicyEgressRule = map[string]string{ + "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", + "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", +} + +func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyEgressRule +} + +var map_NetworkPolicyIngressRule = map[string]string{ + "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", + "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", +} + +func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { + return map_NetworkPolicyIngressRule +} + +var map_NetworkPolicyList = map[string]string{ + "": "NetworkPolicyList is a list of NetworkPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (NetworkPolicyList) SwaggerDoc() map[string]string { + return map_NetworkPolicyList +} + +var map_NetworkPolicyPeer = map[string]string{ + "": "NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of fields are allowed", + "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", + "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", + "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", +} + +func (NetworkPolicyPeer) SwaggerDoc() map[string]string { + return map_NetworkPolicyPeer +} + +var map_NetworkPolicyPort = map[string]string{ + "": "NetworkPolicyPort describes a port to allow traffic on", + "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", + "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.", +} + +func (NetworkPolicyPort) SwaggerDoc() map[string]string { + return map_NetworkPolicyPort +} + +var map_NetworkPolicySpec = map[string]string{ + "": "NetworkPolicySpec provides the specification of a NetworkPolicy", + "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", + "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", + "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", +} + +func (NetworkPolicySpec) SwaggerDoc() map[string]string { + return map_NetworkPolicySpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..1833e97 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -0,0 +1,262 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IPBlock) DeepCopyInto(out *IPBlock) { + *out = *in + if in.Except != nil { + in, out := &in.Except, &out.Except + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. +func (in *IPBlock) DeepCopy() *IPBlock { + if in == nil { + return nil + } + out := new(IPBlock) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. +func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { + if in == nil { + return nil + } + out := new(NetworkPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.To != nil { + in, out := &in.To, &out.To + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. +func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyEgressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { + *out = *in + if in.Ports != nil { + in, out := &in.Ports, &out.Ports + *out = make([]NetworkPolicyPort, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.From != nil { + in, out := &in.From, &out.From + *out = make([]NetworkPolicyPeer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. +func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { + if in == nil { + return nil + } + out := new(NetworkPolicyIngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NetworkPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. +func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { + if in == nil { + return nil + } + out := new(NetworkPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { + *out = *in + if in.PodSelector != nil { + in, out := &in.PodSelector, &out.PodSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(metav1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.IPBlock != nil { + in, out := &in.IPBlock, &out.IPBlock + *out = new(IPBlock) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. +func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { + if in == nil { + return nil + } + out := new(NetworkPolicyPeer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { + *out = *in + if in.Protocol != nil { + in, out := &in.Protocol, &out.Protocol + *out = new(corev1.Protocol) + **out = **in + } + if in.Port != nil { + in, out := &in.Port, &out.Port + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. +func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { + if in == nil { + return nil + } + out := new(NetworkPolicyPort) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { + *out = *in + in.PodSelector.DeepCopyInto(&out.PodSelector) + if in.Ingress != nil { + in, out := &in.Ingress, &out.Ingress + *out = make([]NetworkPolicyIngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Egress != nil { + in, out := &in.Egress, &out.Egress + *out = make([]NetworkPolicyEgressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.PolicyTypes != nil { + in, out := &in.PolicyTypes, &out.PolicyTypes + *out = make([]PolicyType, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. +func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { + if in == nil { + return nil + } + out := new(NetworkPolicySpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go new file mode 100644 index 0000000..12d3d4f --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true +// +groupName=networking.k8s.io + +package v1beta1 // import "k8s.io/api/networking/v1beta1" diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go new file mode 100644 index 0000000..6f51df8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go @@ -0,0 +1,3183 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{0} +} +func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressPath.Merge(m, src) +} +func (m *HTTPIngressPath) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressPath) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo + +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{1} +} +func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) +} +func (m *HTTPIngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo + +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{2} +} +func (m *Ingress) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ingress) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ingress.Merge(m, src) +} +func (m *Ingress) XXX_Size() int { + return m.Size() +} +func (m *Ingress) XXX_DiscardUnknown() { + xxx_messageInfo_Ingress.DiscardUnknown(m) +} + +var xxx_messageInfo_Ingress proto.InternalMessageInfo + +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{3} +} +func (m *IngressBackend) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressBackend) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressBackend.Merge(m, src) +} +func (m *IngressBackend) XXX_Size() int { + return m.Size() +} +func (m *IngressBackend) XXX_DiscardUnknown() { + xxx_messageInfo_IngressBackend.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressBackend proto.InternalMessageInfo + +func (m *IngressClass) Reset() { *m = IngressClass{} } +func (*IngressClass) ProtoMessage() {} +func (*IngressClass) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{4} +} +func (m *IngressClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressClass.Merge(m, src) +} +func (m *IngressClass) XXX_Size() int { + return m.Size() +} +func (m *IngressClass) XXX_DiscardUnknown() { + xxx_messageInfo_IngressClass.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressClass proto.InternalMessageInfo + +func (m *IngressClassList) Reset() { *m = IngressClassList{} } +func (*IngressClassList) ProtoMessage() {} +func (*IngressClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{5} +} +func (m *IngressClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressClassList.Merge(m, src) +} +func (m *IngressClassList) XXX_Size() int { + return m.Size() +} +func (m *IngressClassList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressClassList proto.InternalMessageInfo + +func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } +func (*IngressClassSpec) ProtoMessage() {} +func (*IngressClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{6} +} +func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressClassSpec.Merge(m, src) +} +func (m *IngressClassSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo + +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{7} +} +func (m *IngressList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressList) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressList.Merge(m, src) +} +func (m *IngressList) XXX_Size() int { + return m.Size() +} +func (m *IngressList) XXX_DiscardUnknown() { + xxx_messageInfo_IngressList.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressList proto.InternalMessageInfo + +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{8} +} +func (m *IngressRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRule.Merge(m, src) +} +func (m *IngressRule) XXX_Size() int { + return m.Size() +} +func (m *IngressRule) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRule.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRule proto.InternalMessageInfo + +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{9} +} +func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressRuleValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressRuleValue.Merge(m, src) +} +func (m *IngressRuleValue) XXX_Size() int { + return m.Size() +} +func (m *IngressRuleValue) XXX_DiscardUnknown() { + xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo + +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{10} +} +func (m *IngressSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressSpec.Merge(m, src) +} +func (m *IngressSpec) XXX_Size() int { + return m.Size() +} +func (m *IngressSpec) XXX_DiscardUnknown() { + xxx_messageInfo_IngressSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressSpec proto.InternalMessageInfo + +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{11} +} +func (m *IngressStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressStatus.Merge(m, src) +} +func (m *IngressStatus) XXX_Size() int { + return m.Size() +} +func (m *IngressStatus) XXX_DiscardUnknown() { + xxx_messageInfo_IngressStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressStatus proto.InternalMessageInfo + +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { + return fileDescriptor_5bea11de0ceb8f53, []int{12} +} +func (m *IngressTLS) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IngressTLS) XXX_Merge(src proto.Message) { + xxx_messageInfo_IngressTLS.Merge(m, src) +} +func (m *IngressTLS) XXX_Size() int { + return m.Size() +} +func (m *IngressTLS) XXX_DiscardUnknown() { + xxx_messageInfo_IngressTLS.DiscardUnknown(m) +} + +var xxx_messageInfo_IngressTLS proto.InternalMessageInfo + +func init() { + proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") + proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") + proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") + proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") + proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") + proto.RegisterType((*IngressClassList)(nil), "k8s.io.api.networking.v1beta1.IngressClassList") + proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec") + proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") + proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") + proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") + proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") + proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") + proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) +} + +var fileDescriptor_5bea11de0ceb8f53 = []byte{ + // 990 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0xaf, 0x93, 0x66, 0x9b, 0x4e, 0xb2, 0xdd, 0x6a, 0xe8, 0x21, 0xaa, 0x84, 0x5b, 0xf9, 0x80, + 0xca, 0x9f, 0xda, 0x34, 0xbb, 0x20, 0x8e, 0xc8, 0x2b, 0xa1, 0x56, 0x04, 0x1a, 0x26, 0x16, 0x20, + 0x04, 0xd2, 0x4e, 0x9c, 0xb7, 0x8e, 0x89, 0x63, 0x9b, 0x99, 0x71, 0xd0, 0xde, 0xb8, 0x72, 0x82, + 0x2f, 0x01, 0x9f, 0x81, 0x23, 0x82, 0x4b, 0x8f, 0x7b, 0xdc, 0x53, 0x45, 0xc3, 0xb7, 0xe0, 0x84, + 0x66, 0x3c, 0xb5, 0x9d, 0xa4, 0xa5, 0x59, 0x0e, 0x7b, 0x8a, 0x67, 0xde, 0x7b, 0xbf, 0x37, 0xef, + 0xf7, 0x7e, 0x33, 0x2f, 0xe8, 0xa3, 0xc9, 0x07, 0xdc, 0x0e, 0x13, 0x67, 0x92, 0x0d, 0x81, 0xc5, + 0x20, 0x80, 0x3b, 0x33, 0x88, 0x47, 0x09, 0x73, 0xb4, 0x81, 0xa6, 0xa1, 0x13, 0x83, 0xf8, 0x3e, + 0x61, 0x93, 0x30, 0x0e, 0x9c, 0xd9, 0xc9, 0x10, 0x04, 0x3d, 0x71, 0x02, 0x88, 0x81, 0x51, 0x01, + 0x23, 0x3b, 0x65, 0x89, 0x48, 0xf0, 0xeb, 0xb9, 0xbb, 0x4d, 0xd3, 0xd0, 0x2e, 0xdd, 0x6d, 0xed, + 0xbe, 0x7f, 0x1c, 0x84, 0x62, 0x9c, 0x0d, 0x6d, 0x3f, 0x99, 0x3a, 0x41, 0x12, 0x24, 0x8e, 0x8a, + 0x1a, 0x66, 0x4f, 0xd5, 0x4a, 0x2d, 0xd4, 0x57, 0x8e, 0xb6, 0x6f, 0x55, 0x92, 0xfb, 0x09, 0x03, + 0x67, 0xb6, 0x92, 0x71, 0xff, 0x51, 0xe9, 0x33, 0xa5, 0xfe, 0x38, 0x8c, 0x81, 0x3d, 0x73, 0xd2, + 0x49, 0x20, 0x37, 0xb8, 0x33, 0x05, 0x41, 0x6f, 0x8a, 0x72, 0x6e, 0x8b, 0x62, 0x59, 0x2c, 0xc2, + 0x29, 0xac, 0x04, 0xbc, 0x7f, 0x57, 0x00, 0xf7, 0xc7, 0x30, 0xa5, 0x2b, 0x71, 0x0f, 0x6f, 0x8b, + 0xcb, 0x44, 0x18, 0x39, 0x61, 0x2c, 0xb8, 0x60, 0xcb, 0x41, 0xd6, 0x9f, 0x06, 0x7a, 0x70, 0xea, + 0x79, 0xfd, 0xb3, 0x38, 0x60, 0xc0, 0x79, 0x9f, 0x8a, 0x31, 0x3e, 0x44, 0x9b, 0x29, 0x15, 0xe3, + 0x8e, 0x71, 0x68, 0x1c, 0x6d, 0xbb, 0xed, 0x8b, 0xcb, 0x83, 0x8d, 0xf9, 0xe5, 0xc1, 0xa6, 0xb4, + 0x11, 0x65, 0xc1, 0x8f, 0x50, 0x53, 0xfe, 0x7a, 0xcf, 0x52, 0xe8, 0xd4, 0x95, 0x57, 0x67, 0x7e, + 0x79, 0xd0, 0xec, 0xeb, 0xbd, 0x7f, 0x2a, 0xdf, 0xa4, 0xf0, 0xc4, 0x5f, 0xa2, 0xad, 0x21, 0xf5, + 0x27, 0x10, 0x8f, 0x3a, 0xb5, 0x43, 0xe3, 0xa8, 0xd5, 0x3d, 0xb6, 0xff, 0xb3, 0x87, 0xb6, 0x3e, + 0x94, 0x9b, 0x07, 0xb9, 0x0f, 0xf4, 0x49, 0xb6, 0xf4, 0x06, 0xb9, 0x86, 0xb3, 0x26, 0x68, 0xaf, + 0x52, 0x04, 0xc9, 0x22, 0xf8, 0x9c, 0x46, 0x19, 0xe0, 0x01, 0x6a, 0xc8, 0xec, 0xbc, 0x63, 0x1c, + 0xd6, 0x8f, 0x5a, 0x5d, 0xfb, 0x8e, 0x7c, 0x4b, 0x44, 0xb8, 0xf7, 0x75, 0xc2, 0x86, 0x5c, 0x71, + 0x92, 0x63, 0x59, 0x3f, 0xd5, 0xd0, 0x96, 0xf6, 0xc2, 0x4f, 0x50, 0x53, 0xf6, 0x7d, 0x44, 0x05, + 0x55, 0x74, 0xb5, 0xba, 0xef, 0x56, 0x72, 0x14, 0x6d, 0xb0, 0xd3, 0x49, 0x20, 0x37, 0xb8, 0x2d, + 0xbd, 0xed, 0xd9, 0x89, 0x7d, 0x3e, 0xfc, 0x16, 0x7c, 0xf1, 0x09, 0x08, 0xea, 0x62, 0x9d, 0x05, + 0x95, 0x7b, 0xa4, 0x40, 0xc5, 0x3d, 0xb4, 0xc9, 0x53, 0xf0, 0x35, 0x63, 0x6f, 0xad, 0xc7, 0xd8, + 0x20, 0x05, 0xbf, 0x6c, 0x9c, 0x5c, 0x11, 0x85, 0x82, 0x3d, 0x74, 0x8f, 0x0b, 0x2a, 0x32, 0xae, + 0xda, 0xd6, 0xea, 0xbe, 0xb3, 0x26, 0x9e, 0x8a, 0x71, 0x77, 0x34, 0xe2, 0xbd, 0x7c, 0x4d, 0x34, + 0x96, 0xf5, 0x63, 0x0d, 0xed, 0x2c, 0xf6, 0x0a, 0xbf, 0x87, 0x5a, 0x1c, 0xd8, 0x2c, 0xf4, 0xe1, + 0x53, 0x3a, 0x05, 0x2d, 0xa5, 0xd7, 0x74, 0x7c, 0x6b, 0x50, 0x9a, 0x48, 0xd5, 0x0f, 0x07, 0x45, + 0x58, 0x3f, 0x61, 0x42, 0x17, 0x7d, 0x3b, 0xa5, 0x52, 0xd9, 0x76, 0xae, 0x6c, 0xfb, 0x2c, 0x16, + 0xe7, 0x6c, 0x20, 0x58, 0x18, 0x07, 0x2b, 0x89, 0x24, 0x18, 0xa9, 0x22, 0xe3, 0x2f, 0x50, 0x93, + 0x01, 0x4f, 0x32, 0xe6, 0x83, 0xa6, 0x62, 0x41, 0x8c, 0xf2, 0x09, 0x90, 0x6d, 0x92, 0xba, 0x1d, + 0xf5, 0x12, 0x9f, 0x46, 0x79, 0x73, 0x08, 0x3c, 0x05, 0x06, 0xb1, 0x0f, 0x6e, 0x5b, 0x0a, 0x9e, + 0x68, 0x08, 0x52, 0x80, 0xc9, 0x0b, 0xd5, 0xd6, 0x5c, 0x3c, 0x8e, 0xe8, 0x2b, 0x91, 0xc8, 0x67, + 0x0b, 0x12, 0x71, 0xd6, 0x6b, 0xa9, 0x3a, 0xdc, 0x6d, 0x3a, 0xb1, 0xfe, 0x30, 0xd0, 0x6e, 0xd5, + 0xb1, 0x17, 0x72, 0x81, 0xbf, 0x5e, 0xa9, 0xc4, 0x5e, 0xaf, 0x12, 0x19, 0xad, 0xea, 0xd8, 0xd5, + 0xa9, 0x9a, 0xd7, 0x3b, 0x95, 0x2a, 0xfa, 0xa8, 0x11, 0x0a, 0x98, 0xf2, 0x4e, 0x4d, 0xdd, 0xd5, + 0xb7, 0x5f, 0xa2, 0x8c, 0xf2, 0xa2, 0x9e, 0x49, 0x04, 0x92, 0x03, 0x59, 0xbf, 0x2c, 0x15, 0x21, + 0xeb, 0xc3, 0x5d, 0x84, 0xfc, 0x24, 0x16, 0x2c, 0x89, 0x22, 0x60, 0x5a, 0x97, 0x05, 0xbd, 0x8f, + 0x0b, 0x0b, 0xa9, 0x78, 0xe1, 0x6f, 0x10, 0x4a, 0x29, 0xa3, 0x53, 0x10, 0xc0, 0xf8, 0x4d, 0x6f, + 0xd7, 0xdd, 0x72, 0xd9, 0x91, 0xf0, 0xfd, 0x02, 0x84, 0x54, 0x00, 0xad, 0xdf, 0x0c, 0xd4, 0xd2, + 0xe7, 0x7c, 0x05, 0x3c, 0x7f, 0xbc, 0xc8, 0xf3, 0x1b, 0x6b, 0xbe, 0xc1, 0x37, 0x53, 0xfc, 0x6b, + 0x79, 0x74, 0xf9, 0xea, 0xca, 0xd1, 0x31, 0x4e, 0xb8, 0x58, 0x1e, 0x1d, 0xa7, 0x09, 0x17, 0x44, + 0x59, 0x70, 0x86, 0x76, 0xc3, 0xa5, 0x67, 0xfa, 0xe5, 0x84, 0x5b, 0x84, 0xb9, 0x1d, 0x0d, 0xbf, + 0xbb, 0x6c, 0x21, 0x2b, 0x29, 0x2c, 0x40, 0x2b, 0x5e, 0xf2, 0xde, 0x8c, 0x85, 0x48, 0x35, 0xc7, + 0x0f, 0xd7, 0x1f, 0x0e, 0xe5, 0x11, 0x9a, 0xaa, 0x3a, 0xcf, 0xeb, 0x13, 0x05, 0x65, 0xfd, 0x5e, + 0x2b, 0xf8, 0x50, 0x6a, 0xfb, 0xb0, 0xa8, 0x56, 0x29, 0x50, 0xbd, 0x85, 0x9b, 0x8a, 0x9b, 0xbd, + 0xca, 0xc1, 0x0b, 0x1b, 0x59, 0xf1, 0xc6, 0x5e, 0x39, 0x34, 0x8d, 0xff, 0x33, 0x34, 0x5b, 0x37, + 0x0d, 0x4c, 0x7c, 0x8a, 0xea, 0x22, 0xba, 0x96, 0xc0, 0x9b, 0xeb, 0x21, 0x7a, 0xbd, 0x81, 0xdb, + 0xd2, 0x94, 0xd7, 0xbd, 0xde, 0x80, 0x48, 0x08, 0x7c, 0x8e, 0x1a, 0x2c, 0x8b, 0x40, 0x0e, 0x94, + 0xfa, 0xfa, 0x03, 0x4a, 0x32, 0x58, 0x4a, 0x4a, 0xae, 0x38, 0xc9, 0x71, 0xac, 0xef, 0xd0, 0xfd, + 0x85, 0xa9, 0x83, 0x9f, 0xa0, 0x76, 0x94, 0xd0, 0x91, 0x4b, 0x23, 0x1a, 0xfb, 0xfa, 0xce, 0x2e, + 0xe9, 0xf6, 0xfa, 0xfe, 0xf5, 0x2a, 0x7e, 0x7a, 0x66, 0xed, 0xe9, 0x24, 0xed, 0xaa, 0x8d, 0x2c, + 0x20, 0x5a, 0x14, 0xa1, 0xb2, 0x46, 0x7c, 0x80, 0x1a, 0x52, 0xa9, 0xf9, 0x9f, 0x86, 0x6d, 0x77, + 0x5b, 0x9e, 0x50, 0x0a, 0x98, 0x93, 0x7c, 0x5f, 0x3e, 0x21, 0x1c, 0x7c, 0x06, 0x42, 0xb5, 0xb3, + 0xb6, 0xf8, 0x84, 0x0c, 0x0a, 0x0b, 0xa9, 0x78, 0xb9, 0xc7, 0x17, 0x57, 0xe6, 0xc6, 0xf3, 0x2b, + 0x73, 0xe3, 0xc5, 0x95, 0xb9, 0xf1, 0xc3, 0xdc, 0x34, 0x2e, 0xe6, 0xa6, 0xf1, 0x7c, 0x6e, 0x1a, + 0x2f, 0xe6, 0xa6, 0xf1, 0xd7, 0xdc, 0x34, 0x7e, 0xfe, 0xdb, 0xdc, 0xf8, 0x6a, 0x4b, 0xd3, 0xf4, + 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x54, 0x4d, 0x9d, 0x25, 0x0b, 0x00, 0x00, +} + +func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PathType != nil { + i -= len(*m.PathType) + copy(dAtA[i:], *m.PathType) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Path) + copy(dAtA[i:], m.Path) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Ingress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressBackend) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Resource != nil { + { + size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.ServiceName) + copy(dAtA[i:], m.ServiceName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Parameters != nil { + { + size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Controller) + copy(dAtA[i:], m.Controller) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.HTTP != nil { + { + size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.IngressClassName != nil { + i -= len(*m.IngressClassName) + copy(dAtA[i:], *m.IngressClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) + i-- + dAtA[i] = 0x22 + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.TLS) > 0 { + for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if m.Backend != nil { + { + size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *IngressStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *IngressTLS) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.SecretName) + copy(dAtA[i:], m.SecretName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i-- + dAtA[i] = 0x12 + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *HTTPIngressPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.PathType != nil { + l = len(*m.PathType) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *HTTPIngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, e := range m.Paths { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Ingress) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressBackend) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ServiceName) + n += 1 + l + sovGenerated(uint64(l)) + l = m.ServicePort.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Resource != nil { + l = m.Resource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Controller) + n += 1 + l + sovGenerated(uint64(l)) + if m.Parameters != nil { + l = m.Parameters.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *IngressRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = m.IngressRuleValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressRuleValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.HTTP != nil { + l = m.HTTP.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Backend != nil { + l = m.Backend.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.TLS) > 0 { + for _, e := range m.TLS { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.IngressClassName != nil { + l = len(*m.IngressClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *IngressStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LoadBalancer.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *IngressTLS) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *HTTPIngressPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HTTPIngressPath{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, + `PathType:` + valueToStringGenerated(this.PathType) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPIngressRuleValue) String() string { + if this == nil { + return "nil" + } + repeatedStringForPaths := "[]HTTPIngressPath{" + for _, f := range this.Paths { + repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," + } + repeatedStringForPaths += "}" + s := strings.Join([]string{`&HTTPIngressRuleValue{`, + `Paths:` + repeatedStringForPaths + `,`, + `}`, + }, "") + return s +} +func (this *Ingress) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Ingress{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressBackend) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressBackend{`, + `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressClassSpec", "IngressClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]IngressClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IngressClass", "IngressClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressClassSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressClassSpec{`, + `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, + `Parameters:` + strings.Replace(fmt.Sprintf("%v", this.Parameters), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Ingress{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&IngressList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *IngressRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRule{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressRuleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressRuleValue{`, + `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForTLS := "[]IngressTLS{" + for _, f := range this.TLS { + repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," + } + repeatedStringForTLS += "}" + repeatedStringForRules := "[]IngressRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&IngressSpec{`, + `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + repeatedStringForTLS + `,`, + `Rules:` + repeatedStringForRules + `,`, + `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `}`, + }, "") + return s +} +func (this *IngressStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *IngressTLS) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IngressTLS{`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Path = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := PathType(dAtA[iNdEx:postIndex]) + m.PathType = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, HTTPIngressPath{}) + if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ingress) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ingress: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressBackend) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServiceName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Resource == nil { + m.Resource = &v11.TypedLocalObjectReference{} + } + if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, IngressClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Controller = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = &v11.TypedLocalObjectReference{} + } + if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Ingress{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.HTTP == nil { + m.HTTP = &HTTPIngressRuleValue{} + } + if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Backend == nil { + m.Backend = &IngressBackend{} + } + if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TLS = append(m.TLS, IngressTLS{}) + if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, IngressRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.IngressClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IngressTLS) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SecretName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto new file mode 100644 index 0000000..68bede8 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/generated.proto @@ -0,0 +1,276 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.networking.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// HTTPIngressPath associates a path with a backend. Incoming urls matching the +// path are forwarded to the backend. +message HTTPIngressPath { + // Path is matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" part of a URL + // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, + // all paths from incoming requests are matched. + // +optional + optional string path = 1; + + // PathType determines the interpretation of the Path matching. PathType can + // be one of the following values: + // * Exact: Matches the URL path exactly. + // * Prefix: Matches based on a URL path prefix split by '/'. Matching is + // done on a path element by element basis. A path element refers is the + // list of labels in the path split by the '/' separator. A request is a + // match for path p if every p is an element-wise prefix of p of the + // request path. Note that if the last element of the path is a substring + // of the last element in request path, it is not a match (e.g. /foo/bar + // matches /foo/bar/baz, but does not match /foo/barbaz). + // * ImplementationSpecific: Interpretation of the Path matching is up to + // the IngressClass. Implementations can treat this as a separate PathType + // or treat it identically to Prefix or Exact path types. + // Implementations are required to support all path types. + // Defaults to ImplementationSpecific. + optional string pathType = 3; + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + optional IngressBackend backend = 2; +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +message HTTPIngressRuleValue { + // A collection of paths that map requests to backends. + repeated HTTPIngressPath paths = 1; +} + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +message Ingress { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressSpec spec = 2; + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressStatus status = 3; +} + +// IngressBackend describes all endpoints for a given service and port. +message IngressBackend { + // Specifies the name of the referenced service. + // +optional + optional string serviceName = 1; + + // Specifies the port of the referenced service. + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; + + // Resource is an ObjectRef to another Kubernetes resource in the namespace + // of the Ingress object. If resource is specified, serviceName and servicePort + // must not be specified. + // +optional + optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; +} + +// IngressClass represents the class of the Ingress, referenced by the Ingress +// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be +// used to indicate that an IngressClass should be considered default. When a +// single IngressClass resource has this annotation set to true, new Ingress +// resources without a class specified will be assigned this default class. +message IngressClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Spec is the desired state of the IngressClass. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional IngressClassSpec spec = 2; +} + +// IngressClassList is a collection of IngressClasses. +message IngressClassList { + // Standard list metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of IngressClasses. + // +listType=set + repeated IngressClass items = 2; +} + +// IngressClassSpec provides information about the class of an Ingress. +message IngressClassSpec { + // Controller refers to the name of the controller that should handle this + // class. This allows for different "flavors" that are controlled by the + // same controller. For example, you may have different Parameters for the + // same implementing controller. This should be specified as a + // domain-prefixed path no more than 250 characters in length, e.g. + // "acme.io/ingress-controller". This field is immutable. + optional string controller = 1; + + // Parameters is a link to a custom resource containing additional + // configuration for the controller. This is optional if the controller does + // not require extra parameters. + // +optional + optional k8s.io.api.core.v1.TypedLocalObjectReference parameters = 2; +} + +// IngressList is a collection of Ingress. +message IngressList { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of Ingress. + repeated Ingress items = 2; +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +message IngressRule { + // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // Note the following deviations from the "host" part of the + // URI as defined in RFC 3986: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + // the IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the + // IngressRuleValue. If the host is unspecified, the Ingress routes all + // traffic based on the specified IngressRuleValue. + // + // Host can be "precise" which is a domain name without the terminating dot of + // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name + // prefixed with a single wildcard label (e.g. "*.foo.com"). + // The wildcard character '*' must appear by itself as the first DNS label and + // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). + // Requests will be matched against the Host field in the following way: + // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If Host is a wildcard, then the request matches this rule if the http host header + // is to equal to the suffix (removing the first label) of the wildcard rule. + // +optional + optional string host = 1; + + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + optional IngressRuleValue ingressRuleValue = 2; +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +message IngressRuleValue { + // +optional + optional HTTPIngressRuleValue http = 1; +} + +// IngressSpec describes the Ingress the user wishes to exist. +message IngressSpec { + // IngressClassName is the name of the IngressClass cluster resource. The + // associated IngressClass defines which controller will implement the + // resource. This replaces the deprecated `kubernetes.io/ingress.class` + // annotation. For backwards compatibility, when that annotation is set, it + // must be given precedence over this field. The controller may emit a + // warning if the field and annotation have different values. + // Implementations of this API should ignore Ingresses without a class + // specified. An IngressClass resource may be marked as default, which can + // be used to set a default value for this field. For more information, + // refer to the IngressClass documentation. + // +optional + optional string ingressClassName = 4; + + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + optional IngressBackend backend = 1; + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + repeated IngressTLS tls = 2; + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + repeated IngressRule rules = 3; +} + +// IngressStatus describe the current state of the Ingress. +message IngressStatus { + // LoadBalancer contains the current status of the load-balancer. + // +optional + optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; +} + +// IngressTLS describes the transport layer security associated with an Ingress. +message IngressTLS { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + repeated string hosts = 1; + + // SecretName is the name of the secret used to terminate TLS traffic on + // port 443. Field is left optional to allow TLS routing based on SNI + // hostname alone. If the SNI host in a listener conflicts with the "Host" + // header field used by an IngressRule, the SNI host is used for termination + // and value of the Host header is used for routing. + // +optional + optional string secretName = 2; +} + diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go new file mode 100644 index 0000000..0423495 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "networking.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder holds functions that add things to a scheme + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + + // AddToScheme adds the types of this group into the given scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Ingress{}, + &IngressList{}, + &IngressClass{}, + &IngressClassList{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go new file mode 100644 index 0000000..46f530b --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/types.go @@ -0,0 +1,320 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Ingress is a collection of rules that allow inbound connections to reach the +// endpoints defined by a backend. An Ingress can be configured to give services +// externally-reachable urls, load balance traffic, terminate SSL, offer name +// based virtual hosting etc. +type Ingress struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + + // Status is the current state of the Ingress. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressList is a collection of Ingress. +type IngressList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of Ingress. + Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// IngressSpec describes the Ingress the user wishes to exist. +type IngressSpec struct { + // IngressClassName is the name of the IngressClass cluster resource. The + // associated IngressClass defines which controller will implement the + // resource. This replaces the deprecated `kubernetes.io/ingress.class` + // annotation. For backwards compatibility, when that annotation is set, it + // must be given precedence over this field. The controller may emit a + // warning if the field and annotation have different values. + // Implementations of this API should ignore Ingresses without a class + // specified. An IngressClass resource may be marked as default, which can + // be used to set a default value for this field. For more information, + // refer to the IngressClass documentation. + // +optional + IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` + + // A default backend capable of servicing requests that don't match any + // rule. At least one of 'backend' or 'rules' must be specified. This field + // is optional to allow the loadbalancer controller or defaulting logic to + // specify a global default. + // +optional + Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` + + // TLS configuration. Currently the Ingress only supports a single TLS + // port, 443. If multiple members of this list specify different hosts, they + // will be multiplexed on the same port according to the hostname specified + // through the SNI TLS extension, if the ingress controller fulfilling the + // ingress supports SNI. + // +optional + TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` + + // A list of host rules used to configure the Ingress. If unspecified, or + // no rule matches, all traffic is sent to the default backend. + // +optional + Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` + // TODO: Add the ability to specify load-balancer IP through claims +} + +// IngressTLS describes the transport layer security associated with an Ingress. +type IngressTLS struct { + // Hosts are a list of hosts included in the TLS certificate. The values in + // this list must match the name/s used in the tlsSecret. Defaults to the + // wildcard host setting for the loadbalancer controller fulfilling this + // Ingress, if left unspecified. + // +optional + Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` + // SecretName is the name of the secret used to terminate TLS traffic on + // port 443. Field is left optional to allow TLS routing based on SNI + // hostname alone. If the SNI host in a listener conflicts with the "Host" + // header field used by an IngressRule, the SNI host is used for termination + // and value of the Host header is used for routing. + // +optional + SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` + // TODO: Consider specifying different modes of termination, protocols etc. +} + +// IngressStatus describe the current state of the Ingress. +type IngressStatus struct { + // LoadBalancer contains the current status of the load-balancer. + // +optional + LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` +} + +// IngressRule represents the rules mapping the paths under a specified host to +// the related backend services. Incoming requests are first evaluated for a host +// match, then routed to the backend associated with the matching IngressRuleValue. +type IngressRule struct { + // Host is the fully qualified domain name of a network host, as defined by RFC 3986. + // Note the following deviations from the "host" part of the + // URI as defined in RFC 3986: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to + // the IP in the Spec of the parent Ingress. + // 2. The `:` delimiter is not respected because ports are not allowed. + // Currently the port of an Ingress is implicitly :80 for http and + // :443 for https. + // Both these may change in the future. + // Incoming requests are matched against the host before the + // IngressRuleValue. If the host is unspecified, the Ingress routes all + // traffic based on the specified IngressRuleValue. + // + // Host can be "precise" which is a domain name without the terminating dot of + // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name + // prefixed with a single wildcard label (e.g. "*.foo.com"). + // The wildcard character '*' must appear by itself as the first DNS label and + // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). + // Requests will be matched against the Host field in the following way: + // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. + // 2. If Host is a wildcard, then the request matches this rule if the http host header + // is to equal to the suffix (removing the first label) of the wildcard rule. + // +optional + Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` + // IngressRuleValue represents a rule to route requests for this IngressRule. + // If unspecified, the rule defaults to a http catch-all. Whether that sends + // just traffic matching the host to the default backend or all traffic to the + // default backend, is left to the controller fulfilling the Ingress. Http is + // currently the only supported IngressRuleValue. + // +optional + IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` +} + +// IngressRuleValue represents a rule to apply against incoming requests. If the +// rule is satisfied, the request is routed to the specified backend. Currently +// mixing different types of rules in a single Ingress is disallowed, so exactly +// one of the following must be set. +type IngressRuleValue struct { + //TODO: + // 1. Consider renaming this resource and the associated rules so they + // aren't tied to Ingress. They can be used to route intra-cluster traffic. + // 2. Consider adding fields for ingress-type specific global options + // usable by a loadbalancer, like http keep-alive. + + // +optional + HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` +} + +// HTTPIngressRuleValue is a list of http selectors pointing to backends. +// In the example: http:///? -> backend where +// where parts of the url correspond to RFC 3986, this resource will be used +// to match against everything after the last '/' and before the first '?' +// or '#'. +type HTTPIngressRuleValue struct { + // A collection of paths that map requests to backends. + Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` + // TODO: Consider adding fields for ingress-type specific global + // options usable by a loadbalancer, like http keep-alive. +} + +// PathType represents the type of path referred to by a HTTPIngressPath. +type PathType string + +const ( + // PathTypeExact matches the URL path exactly and with case sensitivity. + PathTypeExact = PathType("Exact") + + // PathTypePrefix matches based on a URL path prefix split by '/'. Matching + // is case sensitive and done on a path element by element basis. A path + // element refers to the list of labels in the path split by the '/' + // separator. A request is a match for path p if every p is an element-wise + // prefix of p of the request path. Note that if the last element of the + // path is a substring of the last element in request path, it is not a + // match (e.g. /foo/bar matches /foo/bar/baz, but does not match + // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the + // longest matching path is given priority. + // Examples: + // - /foo/bar does not match requests to /foo/barbaz + // - /foo/bar matches request to /foo/bar and /foo/bar/baz + // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are + // present in an Ingress spec, the longest matching path (/foo/) is given + // priority. + PathTypePrefix = PathType("Prefix") + + // PathTypeImplementationSpecific matching is up to the IngressClass. + // Implementations can treat this as a separate PathType or treat it + // identically to Prefix or Exact path types. + PathTypeImplementationSpecific = PathType("ImplementationSpecific") +) + +// HTTPIngressPath associates a path with a backend. Incoming urls matching the +// path are forwarded to the backend. +type HTTPIngressPath struct { + // Path is matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" part of a URL + // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, + // all paths from incoming requests are matched. + // +optional + Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` + + // PathType determines the interpretation of the Path matching. PathType can + // be one of the following values: + // * Exact: Matches the URL path exactly. + // * Prefix: Matches based on a URL path prefix split by '/'. Matching is + // done on a path element by element basis. A path element refers is the + // list of labels in the path split by the '/' separator. A request is a + // match for path p if every p is an element-wise prefix of p of the + // request path. Note that if the last element of the path is a substring + // of the last element in request path, it is not a match (e.g. /foo/bar + // matches /foo/bar/baz, but does not match /foo/barbaz). + // * ImplementationSpecific: Interpretation of the Path matching is up to + // the IngressClass. Implementations can treat this as a separate PathType + // or treat it identically to Prefix or Exact path types. + // Implementations are required to support all path types. + // Defaults to ImplementationSpecific. + PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` + + // Backend defines the referenced service endpoint to which the traffic + // will be forwarded to. + Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` +} + +// IngressBackend describes all endpoints for a given service and port. +type IngressBackend struct { + // Specifies the name of the referenced service. + // +optional + ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` + + // Specifies the port of the referenced service. + // +optional + ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` + + // Resource is an ObjectRef to another Kubernetes resource in the namespace + // of the Ingress object. If resource is specified, serviceName and servicePort + // must not be specified. + // +optional + Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressClass represents the class of the Ingress, referenced by the Ingress +// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be +// used to indicate that an IngressClass should be considered default. When a +// single IngressClass resource has this annotation set to true, new Ingress +// resources without a class specified will be assigned this default class. +type IngressClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Spec is the desired state of the IngressClass. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// IngressClassSpec provides information about the class of an Ingress. +type IngressClassSpec struct { + // Controller refers to the name of the controller that should handle this + // class. This allows for different "flavors" that are controlled by the + // same controller. For example, you may have different Parameters for the + // same implementing controller. This should be specified as a + // domain-prefixed path no more than 250 characters in length, e.g. + // "acme.io/ingress-controller". This field is immutable. + Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` + + // Parameters is a link to a custom resource containing additional + // configuration for the controller. This is optional if the controller does + // not require extra parameters. + // +optional + Parameters *v1.TypedLocalObjectReference `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// IngressClassList is a collection of IngressClasses. +type IngressClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of IngressClasses. + // +listType=set + Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..c774249 --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,160 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_HTTPIngressPath = map[string]string{ + "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", + "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", +} + +func (HTTPIngressPath) SwaggerDoc() map[string]string { + return map_HTTPIngressPath +} + +var map_HTTPIngressRuleValue = map[string]string{ + "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", + "paths": "A collection of paths that map requests to backends.", +} + +func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { + return map_HTTPIngressRuleValue +} + +var map_Ingress = map[string]string{ + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (Ingress) SwaggerDoc() map[string]string { + return map_Ingress +} + +var map_IngressBackend = map[string]string{ + "": "IngressBackend describes all endpoints for a given service and port.", + "serviceName": "Specifies the name of the referenced service.", + "servicePort": "Specifies the port of the referenced service.", + "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", +} + +func (IngressBackend) SwaggerDoc() map[string]string { + return map_IngressBackend +} + +var map_IngressClass = map[string]string{ + "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (IngressClass) SwaggerDoc() map[string]string { + return map_IngressClass +} + +var map_IngressClassList = map[string]string{ + "": "IngressClassList is a collection of IngressClasses.", + "metadata": "Standard list metadata.", + "items": "Items is the list of IngressClasses.", +} + +func (IngressClassList) SwaggerDoc() map[string]string { + return map_IngressClassList +} + +var map_IngressClassSpec = map[string]string{ + "": "IngressClassSpec provides information about the class of an Ingress.", + "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", + "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", +} + +func (IngressClassSpec) SwaggerDoc() map[string]string { + return map_IngressClassSpec +} + +var map_IngressList = map[string]string{ + "": "IngressList is a collection of Ingress.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of Ingress.", +} + +func (IngressList) SwaggerDoc() map[string]string { + return map_IngressList +} + +var map_IngressRule = map[string]string{ + "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", +} + +func (IngressRule) SwaggerDoc() map[string]string { + return map_IngressRule +} + +var map_IngressRuleValue = map[string]string{ + "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", +} + +func (IngressRuleValue) SwaggerDoc() map[string]string { + return map_IngressRuleValue +} + +var map_IngressSpec = map[string]string{ + "": "IngressSpec describes the Ingress the user wishes to exist.", + "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", +} + +func (IngressSpec) SwaggerDoc() map[string]string { + return map_IngressSpec +} + +var map_IngressStatus = map[string]string{ + "": "IngressStatus describe the current state of the Ingress.", + "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", +} + +func (IngressStatus) SwaggerDoc() map[string]string { + return map_IngressStatus +} + +var map_IngressTLS = map[string]string{ + "": "IngressTLS describes the transport layer security associated with an Ingress.", + "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", + "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", +} + +func (IngressTLS) SwaggerDoc() map[string]string { + return map_IngressTLS +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go b/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go new file mode 100644 index 0000000..1629b5d --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go @@ -0,0 +1,32 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +const ( + // AnnotationIsDefaultIngressClass can be used to indicate that an + // IngressClass should be considered default. When a single IngressClass + // resource has this annotation set to true, new Ingress resources without a + // class specified will be assigned this default class. + AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class" + + // AnnotationIngressClass indicates the class of an Ingress to be used when + // determining which controller should implement the Ingress. Use of this + // annotation is deprecated. The Ingress class field should be used instead + // of this annotation. + // +deprecated + AnnotationIngressClass = "kubernetes.io/ingress.class" +) diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..d55ccde --- /dev/null +++ b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,351 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { + *out = *in + if in.PathType != nil { + in, out := &in.PathType, &out.PathType + *out = new(PathType) + **out = **in + } + in.Backend.DeepCopyInto(&out.Backend) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. +func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { + if in == nil { + return nil + } + out := new(HTTPIngressPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]HTTPIngressPath, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. +func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { + if in == nil { + return nil + } + out := new(HTTPIngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Ingress) DeepCopyInto(out *Ingress) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. +func (in *Ingress) DeepCopy() *Ingress { + if in == nil { + return nil + } + out := new(Ingress) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Ingress) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { + *out = *in + out.ServicePort = in.ServicePort + if in.Resource != nil { + in, out := &in.Resource, &out.Resource + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. +func (in *IngressBackend) DeepCopy() *IngressBackend { + if in == nil { + return nil + } + out := new(IngressBackend) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressClass) DeepCopyInto(out *IngressClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass. +func (in *IngressClass) DeepCopy() *IngressClass { + if in == nil { + return nil + } + out := new(IngressClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressClassList) DeepCopyInto(out *IngressClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]IngressClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList. +func (in *IngressClassList) DeepCopy() *IngressClassList { + if in == nil { + return nil + } + out := new(IngressClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) { + *out = *in + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = new(v1.TypedLocalObjectReference) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec. +func (in *IngressClassSpec) DeepCopy() *IngressClassSpec { + if in == nil { + return nil + } + out := new(IngressClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressList) DeepCopyInto(out *IngressList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Ingress, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. +func (in *IngressList) DeepCopy() *IngressList { + if in == nil { + return nil + } + out := new(IngressList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *IngressList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRule) DeepCopyInto(out *IngressRule) { + *out = *in + in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. +func (in *IngressRule) DeepCopy() *IngressRule { + if in == nil { + return nil + } + out := new(IngressRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { + *out = *in + if in.HTTP != nil { + in, out := &in.HTTP, &out.HTTP + *out = new(HTTPIngressRuleValue) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. +func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { + if in == nil { + return nil + } + out := new(IngressRuleValue) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { + *out = *in + if in.IngressClassName != nil { + in, out := &in.IngressClassName, &out.IngressClassName + *out = new(string) + **out = **in + } + if in.Backend != nil { + in, out := &in.Backend, &out.Backend + *out = new(IngressBackend) + (*in).DeepCopyInto(*out) + } + if in.TLS != nil { + in, out := &in.TLS, &out.TLS + *out = make([]IngressTLS, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]IngressRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. +func (in *IngressSpec) DeepCopy() *IngressSpec { + if in == nil { + return nil + } + out := new(IngressSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { + *out = *in + in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. +func (in *IngressStatus) DeepCopy() *IngressStatus { + if in == nil { + return nil + } + out := new(IngressStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { + *out = *in + if in.Hosts != nil { + in, out := &in.Hosts, &out.Hosts + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. +func (in *IngressTLS) DeepCopy() *IngressTLS { + if in == nil { + return nil + } + out := new(IngressTLS) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/node/v1alpha1/doc.go new file mode 100644 index 0000000..dfe9954 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1alpha1 // import "k8s.io/api/node/v1alpha1" diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go new file mode 100644 index 0000000..e6658a9 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go @@ -0,0 +1,1583 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } +func (*RuntimeClassSpec) ProtoMessage() {} +func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{3} +} +func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassSpec.Merge(m, src) +} +func (m *RuntimeClassSpec) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassSpec) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_82a78945ab308218, []int{4} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") + proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) +} + +var fileDescriptor_82a78945ab308218 = []byte{ + // 698 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, + 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, + 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, + 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, + 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, + 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, + 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, + 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, + 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, + 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, + 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, + 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, + 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, + 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, + 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, + 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, + 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, + 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, + 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, + 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, + 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, + 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, + 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, + 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, + 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, + 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, + 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, + 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, + 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, + 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, + 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, + 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, + 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, + 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, + 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, + 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, + 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, + 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, + 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, + 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, + 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, + 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, + 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, + 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.RuntimeHandler) + copy(dAtA[i:], m.RuntimeHandler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.RuntimeHandler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassSpec{`, + `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto new file mode 100644 index 0000000..ac05f83 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/generated.proto @@ -0,0 +1,118 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + optional RuntimeClassSpec spec = 2; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +message RuntimeClassSpec { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + optional string runtimeHandler = 1; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 2; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 3; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1alpha1/register.go b/vendor/k8s.io/api/node/v1alpha1/register.go new file mode 100644 index 0000000..b608214 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go new file mode 100644 index 0000000..b597671 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types.go @@ -0,0 +1,116 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the RuntimeClass + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` +} + +// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters +// that are required to describe the RuntimeClass to the Container Runtime +// Interface (CRI) implementation, as well as any other components that need to +// understand how the pod will be run. The RuntimeClassSpec is immutable. +type RuntimeClassSpec struct { + // RuntimeHandler specifies the underlying runtime and configuration that the + // CRI implementation will use to handle pods of this class. The possible + // values are specific to the node & CRI configuration. It is assumed that + // all handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements + // and is immutable. + RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..3900001 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,80 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_RuntimeClassSpec = map[string]string{ + "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", + "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClassSpec) SwaggerDoc() map[string]string { + return map_RuntimeClassSpec +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..20f8051 --- /dev/null +++ b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,165 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { + if in == nil { + return nil + } + out := new(RuntimeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuntimeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { + if in == nil { + return nil + } + out := new(RuntimeClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { + *out = *in + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassSpec. +func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { + if in == nil { + return nil + } + out := new(RuntimeClassSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go new file mode 100644 index 0000000..e87583c --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=node.k8s.io + +package v1beta1 // import "k8s.io/api/node/v1beta1" diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go new file mode 100644 index 0000000..b85cbd2 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.pb.go @@ -0,0 +1,1412 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Overhead) Reset() { *m = Overhead{} } +func (*Overhead) ProtoMessage() {} +func (*Overhead) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{0} +} +func (m *Overhead) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Overhead) XXX_Merge(src proto.Message) { + xxx_messageInfo_Overhead.Merge(m, src) +} +func (m *Overhead) XXX_Size() int { + return m.Size() +} +func (m *Overhead) XXX_DiscardUnknown() { + xxx_messageInfo_Overhead.DiscardUnknown(m) +} + +var xxx_messageInfo_Overhead proto.InternalMessageInfo + +func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } +func (*RuntimeClass) ProtoMessage() {} +func (*RuntimeClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{1} +} +func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClass.Merge(m, src) +} +func (m *RuntimeClass) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClass) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClass.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo + +func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } +func (*RuntimeClassList) ProtoMessage() {} +func (*RuntimeClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{2} +} +func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassList.Merge(m, src) +} +func (m *RuntimeClassList) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassList) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo + +func (m *Scheduling) Reset() { *m = Scheduling{} } +func (*Scheduling) ProtoMessage() {} +func (*Scheduling) Descriptor() ([]byte, []int) { + return fileDescriptor_f977b0dddc93b4ec, []int{3} +} +func (m *Scheduling) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Scheduling) XXX_Merge(src proto.Message) { + xxx_messageInfo_Scheduling.Merge(m, src) +} +func (m *Scheduling) XXX_Size() int { + return m.Size() +} +func (m *Scheduling) XXX_DiscardUnknown() { + xxx_messageInfo_Scheduling.DiscardUnknown(m) +} + +var xxx_messageInfo_Scheduling proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry") + proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") + proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") + proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) +} + +var fileDescriptor_f977b0dddc93b4ec = []byte{ + // 666 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, + 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, + 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, + 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, + 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, + 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, + 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, + 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, + 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, + 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, + 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, + 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, + 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, + 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, + 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, + 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, + 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, + 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, + 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, + 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, + 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, + 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, + 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, + 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, + 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, + 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, + 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, + 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, + 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, + 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, + 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, + 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, + 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, + 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, + 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, + 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, + 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, + 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, + 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, + 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, + 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, +} + +func (m *Overhead) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.PodFixed) > 0 { + keysForPodFixed := make([]string, 0, len(m.PodFixed)) + for k := range m.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { + v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForPodFixed[iNdEx]) + copy(dAtA[i:], keysForPodFixed[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Scheduling != nil { + { + size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.Overhead != nil { + { + size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + i -= len(m.Handler) + copy(dAtA[i:], m.Handler) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Scheduling) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Tolerations) > 0 { + for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { + v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForNodeSelector[iNdEx]) + copy(dAtA[i:], keysForNodeSelector[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Overhead) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.PodFixed) > 0 { + for k, v := range m.PodFixed { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *RuntimeClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Handler) + n += 1 + l + sovGenerated(uint64(l)) + if m.Overhead != nil { + l = m.Overhead.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Scheduling != nil { + l = m.Scheduling.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RuntimeClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Scheduling) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Overhead) String() string { + if this == nil { + return "nil" + } + keysForPodFixed := make([]string, 0, len(this.PodFixed)) + for k := range this.PodFixed { + keysForPodFixed = append(keysForPodFixed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) + mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForPodFixed { + mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForPodFixed += "}" + s := strings.Join([]string{`&Overhead{`, + `PodFixed:` + mapStringForPodFixed + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, + `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, + `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RuntimeClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RuntimeClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Scheduling) String() string { + if this == nil { + return "nil" + } + repeatedStringForTolerations := "[]Toleration{" + for _, f := range this.Tolerations { + repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," + } + repeatedStringForTolerations += "}" + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) + } + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&Scheduling{`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `Tolerations:` + repeatedStringForTolerations + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Overhead) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Overhead: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodFixed == nil { + m.PodFixed = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Handler = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Overhead == nil { + m.Overhead = &Overhead{} + } + if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Scheduling == nil { + m.Scheduling = &Scheduling{} + } + if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RuntimeClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Scheduling) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeSelector == nil { + m.NodeSelector = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.NodeSelector[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tolerations = append(m.Tolerations, v11.Toleration{}) + if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto new file mode 100644 index 0000000..4916679 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/generated.proto @@ -0,0 +1,108 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.node.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// Overhead structure represents the resource overhead associated with running a pod. +message Overhead { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + map podFixed = 1; +} + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +message RuntimeClass { + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + optional string handler = 2; + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + optional Overhead overhead = 3; + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + optional Scheduling scheduling = 4; +} + +// RuntimeClassList is a list of RuntimeClass objects. +message RuntimeClassList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated RuntimeClass items = 2; +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +message Scheduling { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + map nodeSelector = 1; + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + repeated k8s.io.api.core.v1.Toleration tolerations = 2; +} + diff --git a/vendor/k8s.io/api/node/v1beta1/register.go b/vendor/k8s.io/api/node/v1beta1/register.go new file mode 100644 index 0000000..3c3b61b --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "node.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +// addKnownTypes adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &RuntimeClass{}, + &RuntimeClassList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go new file mode 100644 index 0000000..793a48f --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClass defines a class of container runtime supported in the cluster. +// The RuntimeClass is used to determine which container runtime is used to run +// all containers in a pod. RuntimeClasses are (currently) manually defined by a +// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is +// responsible for resolving the RuntimeClassName reference before running the +// pod. For more details, see +// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md +type RuntimeClass struct { + metav1.TypeMeta `json:",inline"` + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Handler specifies the underlying runtime and configuration that the CRI + // implementation will use to handle pods of this class. The possible values + // are specific to the node & CRI configuration. It is assumed that all + // handlers are available on every node, and handlers of the same name are + // equivalent on every node. + // For example, a handler called "runc" might specify that the runc OCI + // runtime (using native Linux containers) will be used to run the containers + // in a pod. + // The Handler must conform to the DNS Label (RFC 1123) requirements, and is + // immutable. + Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` + + // Overhead represents the resource overhead associated with running a pod for a + // given RuntimeClass. For more details, see + // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md + // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. + // +optional + Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` + + // Scheduling holds the scheduling constraints to ensure that pods running + // with this RuntimeClass are scheduled to nodes that support it. + // If scheduling is nil, this RuntimeClass is assumed to be supported by all + // nodes. + // +optional + Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` +} + +// Overhead structure represents the resource overhead associated with running a pod. +type Overhead struct { + // PodFixed represents the fixed resource overhead associated with running a pod. + // +optional + PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// Scheduling specifies the scheduling constraints for nodes supporting a +// RuntimeClass. +type Scheduling struct { + // nodeSelector lists labels that must be present on nodes that support this + // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a + // node matched by this selector. The RuntimeClass nodeSelector is merged + // with a pod's existing nodeSelector. Any conflicts will cause the pod to + // be rejected in admission. + // +optional + NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` + + // tolerations are appended (excluding duplicates) to pods running with this + // RuntimeClass during admission, effectively unioning the set of nodes + // tolerated by the pod and the RuntimeClass. + // +optional + // +listType=atomic + Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RuntimeClassList is a list of RuntimeClass objects. +type RuntimeClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..681f73f --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Overhead = map[string]string{ + "": "Overhead structure represents the resource overhead associated with running a pod.", + "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", +} + +func (Overhead) SwaggerDoc() map[string]string { + return map_Overhead +} + +var map_RuntimeClass = map[string]string{ + "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", + "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", + "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", +} + +func (RuntimeClass) SwaggerDoc() map[string]string { + return map_RuntimeClass +} + +var map_RuntimeClassList = map[string]string{ + "": "RuntimeClassList is a list of RuntimeClass objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (RuntimeClassList) SwaggerDoc() map[string]string { + return map_RuntimeClassList +} + +var map_Scheduling = map[string]string{ + "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", + "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", + "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", +} + +func (Scheduling) SwaggerDoc() map[string]string { + return map_Scheduling +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..c398952 --- /dev/null +++ b/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,148 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Overhead) DeepCopyInto(out *Overhead) { + *out = *in + if in.PodFixed != nil { + in, out := &in.PodFixed, &out.PodFixed + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. +func (in *Overhead) DeepCopy() *Overhead { + if in == nil { + return nil + } + out := new(Overhead) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Overhead != nil { + in, out := &in.Overhead, &out.Overhead + *out = new(Overhead) + (*in).DeepCopyInto(*out) + } + if in.Scheduling != nil { + in, out := &in.Scheduling, &out.Scheduling + *out = new(Scheduling) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. +func (in *RuntimeClass) DeepCopy() *RuntimeClass { + if in == nil { + return nil + } + out := new(RuntimeClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RuntimeClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. +func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { + if in == nil { + return nil + } + out := new(RuntimeClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RuntimeClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Scheduling) DeepCopyInto(out *Scheduling) { + *out = *in + if in.NodeSelector != nil { + in, out := &in.NodeSelector, &out.NodeSelector + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Tolerations != nil { + in, out := &in.Tolerations, &out.Tolerations + *out = make([]v1.Toleration, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. +func (in *Scheduling) DeepCopy() *Scheduling { + if in == nil { + return nil + } + out := new(Scheduling) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go new file mode 100644 index 0000000..05d8332 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// Package policy is for any kind of policy object. Suitable examples, even if +// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, +// NetworkPolicy, etc. +package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go new file mode 100644 index 0000000..40ec7ef --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -0,0 +1,5561 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } +func (*AllowedCSIDriver) ProtoMessage() {} +func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{0} +} +func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedCSIDriver.Merge(m, src) +} +func (m *AllowedCSIDriver) XXX_Size() int { + return m.Size() +} +func (m *AllowedCSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo + +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{1} +} +func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedFlexVolume.Merge(m, src) +} +func (m *AllowedFlexVolume) XXX_Size() int { + return m.Size() +} +func (m *AllowedFlexVolume) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo + +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{2} +} +func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AllowedHostPath) XXX_Merge(src proto.Message) { + xxx_messageInfo_AllowedHostPath.Merge(m, src) +} +func (m *AllowedHostPath) XXX_Size() int { + return m.Size() +} +func (m *AllowedHostPath) XXX_DiscardUnknown() { + xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +} + +var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo + +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{3} +} +func (m *Eviction) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Eviction) XXX_Merge(src proto.Message) { + xxx_messageInfo_Eviction.Merge(m, src) +} +func (m *Eviction) XXX_Size() int { + return m.Size() +} +func (m *Eviction) XXX_DiscardUnknown() { + xxx_messageInfo_Eviction.DiscardUnknown(m) +} + +var xxx_messageInfo_Eviction proto.InternalMessageInfo + +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{4} +} +func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) +} +func (m *FSGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo + +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{5} +} +func (m *HostPortRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostPortRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostPortRange.Merge(m, src) +} +func (m *HostPortRange) XXX_Size() int { + return m.Size() +} +func (m *HostPortRange) XXX_DiscardUnknown() { + xxx_messageInfo_HostPortRange.DiscardUnknown(m) +} + +var xxx_messageInfo_HostPortRange proto.InternalMessageInfo + +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{6} +} +func (m *IDRange) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IDRange) XXX_Merge(src proto.Message) { + xxx_messageInfo_IDRange.Merge(m, src) +} +func (m *IDRange) XXX_Size() int { + return m.Size() +} +func (m *IDRange) XXX_DiscardUnknown() { + xxx_messageInfo_IDRange.DiscardUnknown(m) +} + +var xxx_messageInfo_IDRange proto.InternalMessageInfo + +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{7} +} +func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudget.Merge(m, src) +} +func (m *PodDisruptionBudget) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudget) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo + +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{8} +} +func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src) +} +func (m *PodDisruptionBudgetList) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo + +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{9} +} +func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src) +} +func (m *PodDisruptionBudgetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo + +func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } +func (*PodDisruptionBudgetStatus) ProtoMessage() {} +func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{10} +} +func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src) +} +func (m *PodDisruptionBudgetStatus) XXX_Size() int { + return m.Size() +} +func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { + xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo + +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{11} +} +func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicy.Merge(m, src) +} +func (m *PodSecurityPolicy) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicy) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo + +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{12} +} +func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) +} +func (m *PodSecurityPolicyList) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo + +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{13} +} +func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) +} +func (m *PodSecurityPolicySpec) XXX_Size() int { + return m.Size() +} +func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo + +func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } +func (*RunAsGroupStrategyOptions) ProtoMessage() {} +func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{14} +} +func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) +} +func (m *RunAsGroupStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{15} +} +func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) +} +func (m *RunAsUserStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo + +func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } +func (*RuntimeClassStrategyOptions) ProtoMessage() {} +func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{16} +} +func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) +} +func (m *RuntimeClassStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo + +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{17} +} +func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) +} +func (m *SELinuxStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo + +func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } +func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} +func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_014060e454a820dc, []int{18} +} +func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) +} +func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { + return m.Size() +} +func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { + xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") + proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") + proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") + proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") + proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") + proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") + proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") + proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") + proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") + proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") + proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") + proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") + proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") + proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") + proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") + proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") + proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") + proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") + proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") + proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) +} + +var fileDescriptor_014060e454a820dc = []byte{ + // 1878 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, + 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x0d, 0xd7, 0xd9, 0x00, + 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, + 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, + 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, + 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0xec, 0x5b, 0xf8, 0xaa, 0xd8, 0xe1, 0xec, 0x92, 0xfb, + 0x47, 0x5a, 0x01, 0xec, 0x3b, 0xee, 0x9c, 0xef, 0xfb, 0xce, 0xcc, 0x99, 0x33, 0x67, 0x0e, 0x07, + 0x98, 0x17, 0x0f, 0x98, 0x61, 0x7b, 0xb5, 0x8b, 0xa0, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0xb5, 0x1e, + 0x71, 0x2d, 0x8f, 0xd6, 0xa4, 0x01, 0xfb, 0x76, 0xcd, 0xf7, 0x1c, 0xbb, 0xdd, 0xaf, 0xf5, 0xee, + 0xb4, 0x08, 0xc7, 0x77, 0x6a, 0x1d, 0xe2, 0x12, 0x8a, 0x39, 0xb1, 0x0c, 0x9f, 0x7a, 0xdc, 0x83, + 0x37, 0x47, 0x50, 0x03, 0xfb, 0xb6, 0x31, 0x82, 0x1a, 0x12, 0xba, 0xfb, 0x51, 0xc7, 0xe6, 0xe7, + 0x41, 0xcb, 0x68, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc1, 0x68, 0x05, 0x67, 0xe2, 0x4b, + 0x7c, 0x88, 0x5f, 0x23, 0xa5, 0x5d, 0x7d, 0xc2, 0x69, 0xdb, 0xa3, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, + 0xbd, 0x37, 0xc6, 0x74, 0x71, 0xfb, 0xdc, 0x76, 0x09, 0xed, 0xd7, 0xfc, 0x8b, 0x4e, 0x38, 0xc0, + 0x6a, 0x5d, 0xc2, 0x71, 0x1e, 0xab, 0x56, 0xc4, 0xa2, 0x81, 0xcb, 0xed, 0x2e, 0xc9, 0x10, 0xee, + 0xcf, 0x22, 0xb0, 0xf6, 0x39, 0xe9, 0xe2, 0x0c, 0xef, 0x6e, 0x11, 0x2f, 0xe0, 0xb6, 0x53, 0xb3, + 0x5d, 0xce, 0x38, 0x4d, 0x93, 0xf4, 0x7b, 0x60, 0x63, 0xdf, 0x71, 0xbc, 0xaf, 0x88, 0x75, 0xd0, + 0x3c, 0xae, 0x53, 0xbb, 0x47, 0x28, 0xbc, 0x05, 0xe6, 0x5d, 0xdc, 0x25, 0xaa, 0x72, 0x4b, 0xb9, + 0xbd, 0x6c, 0xae, 0xbe, 0x18, 0x68, 0x73, 0xc3, 0x81, 0x36, 0xff, 0x04, 0x77, 0x09, 0x12, 0x16, + 0xfd, 0x53, 0x50, 0x91, 0xac, 0x23, 0x87, 0x5c, 0x7e, 0xe1, 0x39, 0x41, 0x97, 0xc0, 0x1f, 0x83, + 0x45, 0x4b, 0x08, 0x48, 0xe2, 0xba, 0x24, 0x2e, 0x8e, 0x64, 0x91, 0xb4, 0xea, 0x0c, 0x5c, 0x97, + 0xe4, 0x47, 0x1e, 0xe3, 0x0d, 0xcc, 0xcf, 0xe1, 0x1e, 0x00, 0x3e, 0xe6, 0xe7, 0x0d, 0x4a, 0xce, + 0xec, 0x4b, 0x49, 0x87, 0x92, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0x4c, 0x09, + 0xb6, 0x4e, 0x5d, 0xa7, 0xaf, 0x5e, 0xbb, 0xa5, 0xdc, 0x2e, 0x9b, 0x1b, 0x92, 0x51, 0x46, 0x72, + 0x1c, 0xc5, 0x08, 0xfd, 0x3f, 0x0a, 0x28, 0x1f, 0xf6, 0xec, 0x36, 0xb7, 0x3d, 0x17, 0xfe, 0x11, + 0x94, 0xc3, 0xdd, 0xb2, 0x30, 0xc7, 0xc2, 0xd9, 0xca, 0xde, 0xc7, 0xc6, 0x38, 0x93, 0xe2, 0xe0, + 0x19, 0xfe, 0x45, 0x27, 0x1c, 0x60, 0x46, 0x88, 0x36, 0x7a, 0x77, 0x8c, 0xd3, 0xd6, 0x97, 0xa4, + 0xcd, 0x4f, 0x08, 0xc7, 0xe3, 0xe9, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, 0xb0, 0x66, 0x11, 0x87, + 0x70, 0x72, 0xea, 0x87, 0x1e, 0x99, 0x98, 0xe1, 0xca, 0xde, 0xdd, 0xd7, 0x73, 0x53, 0x9f, 0xa4, + 0x9a, 0x95, 0xe1, 0x40, 0x5b, 0x4b, 0x0c, 0xa1, 0xa4, 0xb8, 0xfe, 0xb5, 0x02, 0x76, 0x8e, 0x9a, + 0x0f, 0xa9, 0x17, 0xf8, 0x4d, 0x1e, 0xee, 0x6e, 0xa7, 0x2f, 0x4d, 0xf0, 0x67, 0x60, 0x9e, 0x06, + 0x4e, 0xb4, 0x97, 0xef, 0x47, 0x7b, 0x89, 0x02, 0x87, 0xbc, 0x1a, 0x68, 0x9b, 0x29, 0xd6, 0xd3, + 0xbe, 0x4f, 0x90, 0x20, 0xc0, 0xcf, 0xc1, 0x22, 0xc5, 0x6e, 0x87, 0x84, 0x53, 0x2f, 0xdd, 0x5e, + 0xd9, 0xd3, 0x8d, 0xc2, 0xb3, 0x66, 0x1c, 0xd7, 0x51, 0x08, 0x1d, 0xef, 0xb8, 0xf8, 0x64, 0x48, + 0x2a, 0xe8, 0x27, 0x60, 0x4d, 0x6c, 0xb5, 0x47, 0xb9, 0xb0, 0xc0, 0x77, 0x41, 0xa9, 0x6b, 0xbb, + 0x62, 0x52, 0x0b, 0xe6, 0x8a, 0x64, 0x95, 0x4e, 0x6c, 0x17, 0x85, 0xe3, 0xc2, 0x8c, 0x2f, 0x45, + 0xcc, 0x26, 0xcd, 0xf8, 0x12, 0x85, 0xe3, 0xfa, 0x43, 0xb0, 0x24, 0x3d, 0x4e, 0x0a, 0x95, 0xa6, + 0x0b, 0x95, 0x72, 0x84, 0xfe, 0x71, 0x0d, 0x6c, 0x36, 0x3c, 0xab, 0x6e, 0x33, 0x1a, 0x88, 0x78, + 0x99, 0x81, 0xd5, 0x21, 0xfc, 0x2d, 0xe4, 0xc7, 0x53, 0x30, 0xcf, 0x7c, 0xd2, 0x96, 0x69, 0xb1, + 0x37, 0x25, 0xb6, 0x39, 0xf3, 0x6b, 0xfa, 0xa4, 0x3d, 0x3e, 0x96, 0xe1, 0x17, 0x12, 0x6a, 0xf0, + 0x39, 0x58, 0x64, 0x1c, 0xf3, 0x80, 0xa9, 0x25, 0xa1, 0x7b, 0xef, 0x8a, 0xba, 0x82, 0x3b, 0xde, + 0xc5, 0xd1, 0x37, 0x92, 0x9a, 0xfa, 0xbf, 0x15, 0x70, 0x23, 0x87, 0xf5, 0xd8, 0x66, 0x1c, 0x3e, + 0xcf, 0x44, 0xcc, 0x78, 0xbd, 0x88, 0x85, 0x6c, 0x11, 0xaf, 0xf8, 0xf0, 0x46, 0x23, 0x13, 0xd1, + 0x6a, 0x82, 0x05, 0x9b, 0x93, 0x6e, 0x94, 0x8a, 0xc6, 0xd5, 0x96, 0x65, 0xae, 0x49, 0xe9, 0x85, + 0xe3, 0x50, 0x04, 0x8d, 0xb4, 0xf4, 0x6f, 0xaf, 0xe5, 0x2e, 0x27, 0x0c, 0x27, 0x3c, 0x03, 0xab, + 0x5d, 0xdb, 0xdd, 0xef, 0x61, 0xdb, 0xc1, 0x2d, 0x79, 0x7a, 0xa6, 0x25, 0x41, 0x58, 0x61, 0x8d, + 0x51, 0x85, 0x35, 0x8e, 0x5d, 0x7e, 0x4a, 0x9b, 0x9c, 0xda, 0x6e, 0xc7, 0xdc, 0x18, 0x0e, 0xb4, + 0xd5, 0x93, 0x09, 0x25, 0x94, 0xd0, 0x85, 0xbf, 0x07, 0x65, 0x46, 0x1c, 0xd2, 0xe6, 0x1e, 0xbd, + 0x5a, 0x85, 0x78, 0x8c, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, 0xb1, + 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x73, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, 0x1c, + 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xc3, 0x79, 0x70, 0xb3, 0x30, 0xab, 0xe0, + 0xe7, 0x00, 0x7a, 0x2d, 0x46, 0x68, 0x8f, 0x58, 0x0f, 0x47, 0x77, 0x90, 0xed, 0x45, 0x07, 0x77, + 0x57, 0x6e, 0x10, 0x3c, 0xcd, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x45, 0x01, 0x6b, 0xd6, 0xc8, 0x0d, + 0xb1, 0x1a, 0x9e, 0x15, 0x25, 0xc6, 0xc3, 0x1f, 0x92, 0xef, 0x46, 0x7d, 0x52, 0xe9, 0xd0, 0xe5, + 0xb4, 0x6f, 0x6e, 0xcb, 0x09, 0xad, 0x25, 0x6c, 0x28, 0xe9, 0x34, 0x5c, 0x92, 0x15, 0x4b, 0x32, + 0x79, 0xa7, 0x89, 0x10, 0x2f, 0x8c, 0x97, 0x54, 0xcf, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x0a, 0xac, + 0xb7, 0x03, 0x4a, 0x89, 0xcb, 0x1f, 0x11, 0xec, 0xf0, 0xf3, 0xbe, 0x3a, 0x2f, 0x74, 0x76, 0xa4, + 0xce, 0xfa, 0x41, 0xc2, 0x8a, 0x52, 0xe8, 0x90, 0x6f, 0x11, 0x66, 0x53, 0x62, 0x45, 0xfc, 0x85, + 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, 0x1a, 0x3e, 0x00, 0xab, 0xe4, 0xd2, 0x27, 0xed, 0x28, 0xa0, + 0x8b, 0x82, 0xbd, 0x25, 0xd9, 0xab, 0x87, 0x13, 0x36, 0x94, 0x40, 0xee, 0x3a, 0x00, 0x66, 0x23, + 0x08, 0x37, 0x40, 0xe9, 0x82, 0xf4, 0x47, 0xd7, 0x0e, 0x0a, 0x7f, 0xc2, 0xcf, 0xc0, 0x42, 0x0f, + 0x3b, 0x01, 0x91, 0x89, 0xfe, 0xc1, 0xeb, 0x25, 0xfa, 0x53, 0xbb, 0x4b, 0xd0, 0x88, 0xf8, 0xf3, + 0x6b, 0x0f, 0x14, 0xfd, 0x1b, 0x05, 0x54, 0x1a, 0x9e, 0xd5, 0x24, 0xed, 0x80, 0xda, 0xbc, 0xdf, + 0x10, 0x9b, 0xfc, 0x16, 0x0a, 0x36, 0x4a, 0x14, 0xec, 0x8f, 0xa7, 0x27, 0x5a, 0x72, 0x76, 0x45, + 0xe5, 0x5a, 0x7f, 0xa1, 0x80, 0xed, 0x0c, 0xfa, 0x2d, 0x94, 0xd3, 0x5f, 0x27, 0xcb, 0xe9, 0x87, + 0x57, 0x59, 0x4c, 0x41, 0x31, 0xfd, 0xa6, 0x92, 0xb3, 0x14, 0x51, 0x4a, 0xc3, 0xd6, 0x8e, 0xda, + 0x3d, 0xdb, 0x21, 0x1d, 0x62, 0x89, 0xc5, 0x94, 0x27, 0x5a, 0xbb, 0xd8, 0x82, 0x26, 0x50, 0x90, + 0x81, 0x1d, 0x8b, 0x9c, 0xe1, 0xc0, 0xe1, 0xfb, 0x96, 0x75, 0x80, 0x7d, 0xdc, 0xb2, 0x1d, 0x9b, + 0xdb, 0xb2, 0x17, 0x59, 0x36, 0x3f, 0x1d, 0x0e, 0xb4, 0x9d, 0x7a, 0x2e, 0xe2, 0xd5, 0x40, 0x7b, + 0x37, 0xdb, 0xca, 0x1b, 0x31, 0xa4, 0x8f, 0x0a, 0xa4, 0x61, 0x1f, 0xa8, 0x94, 0xfc, 0x29, 0x08, + 0x0f, 0x45, 0x9d, 0x7a, 0x7e, 0xc2, 0x6d, 0x49, 0xb8, 0xfd, 0xe5, 0x70, 0xa0, 0xa9, 0xa8, 0x00, + 0x33, 0xdb, 0x71, 0xa1, 0x3c, 0xfc, 0x12, 0x6c, 0x62, 0xd9, 0x84, 0x4f, 0x7a, 0x9d, 0x17, 0x5e, + 0x1f, 0x0c, 0x07, 0xda, 0xe6, 0x7e, 0xd6, 0x3c, 0xdb, 0x61, 0x9e, 0x28, 0xac, 0x81, 0xa5, 0x9e, + 0xe8, 0xd7, 0x99, 0xba, 0x20, 0xf4, 0xb7, 0x87, 0x03, 0x6d, 0x69, 0xd4, 0xc2, 0x87, 0x9a, 0x8b, + 0x47, 0x4d, 0xd1, 0x05, 0x46, 0x28, 0xf8, 0x09, 0x58, 0x39, 0xf7, 0x18, 0x7f, 0x42, 0xf8, 0x57, + 0x1e, 0xbd, 0x10, 0x85, 0xa1, 0x6c, 0x6e, 0xca, 0x1d, 0x5c, 0x79, 0x34, 0x36, 0xa1, 0x49, 0x1c, + 0xfc, 0x2d, 0x58, 0x3e, 0x97, 0x3d, 0x1f, 0x53, 0x97, 0x44, 0xa2, 0xdd, 0x9e, 0x92, 0x68, 0x89, + 0xfe, 0xd0, 0xac, 0x48, 0xf9, 0xe5, 0x68, 0x98, 0xa1, 0xb1, 0x1a, 0xfc, 0x09, 0x58, 0x12, 0x1f, + 0xc7, 0x75, 0xb5, 0x2c, 0x66, 0x73, 0x5d, 0xc2, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, + 0xc7, 0x8d, 0x03, 0x75, 0x39, 0x0b, 0x3d, 0x6e, 0x1c, 0xa0, 0xc8, 0x0e, 0x9f, 0x83, 0x25, 0x46, + 0x1e, 0xdb, 0x6e, 0x70, 0xa9, 0x02, 0x71, 0xe4, 0xee, 0x4c, 0x99, 0x6e, 0xf3, 0x50, 0x20, 0x53, + 0xdd, 0xf6, 0x58, 0x5d, 0xda, 0x51, 0x24, 0x09, 0x2d, 0xb0, 0x4c, 0x03, 0x77, 0x9f, 0x3d, 0x63, + 0x84, 0xaa, 0x2b, 0x99, 0xab, 0x3e, 0xad, 0x8f, 0x22, 0x6c, 0xda, 0x43, 0x1c, 0x99, 0x18, 0x81, + 0xc6, 0xc2, 0xd0, 0x02, 0x40, 0x7c, 0x88, 0xa6, 0x5e, 0xdd, 0x99, 0xd9, 0x04, 0xa2, 0x18, 0x9c, + 0xf6, 0xb3, 0x1e, 0x1e, 0xcf, 0xb1, 0x19, 0x4d, 0xe8, 0xc2, 0xbf, 0x2a, 0x00, 0xb2, 0xc0, 0xf7, + 0x1d, 0xd2, 0x25, 0x2e, 0xc7, 0x8e, 0x18, 0x65, 0xea, 0xaa, 0x70, 0xf7, 0x8b, 0x69, 0x51, 0xcb, + 0x90, 0xd2, 0x6e, 0xe3, 0x6b, 0x33, 0x0b, 0x45, 0x39, 0x3e, 0xc3, 0x4d, 0x3b, 0x93, 0xab, 0x5d, + 0x9b, 0xb9, 0x69, 0xf9, 0x7f, 0x91, 0xc6, 0x9b, 0x26, 0xed, 0x28, 0x92, 0x84, 0x5f, 0x80, 0x9d, + 0xe8, 0x0f, 0x24, 0xf2, 0x3c, 0x7e, 0x64, 0x3b, 0x84, 0xf5, 0x19, 0x27, 0x5d, 0x75, 0x5d, 0x24, + 0x53, 0x55, 0x32, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x68, 0x51, 0x11, 0x0a, 0x4f, + 0x68, 0x5c, 0x05, 0x0f, 0x59, 0x1b, 0x3b, 0xa3, 0xc6, 0xe8, 0xba, 0x70, 0xf0, 0xfe, 0x70, 0xa0, + 0x69, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x06, 0xa8, 0xb8, 0xc8, 0xcf, 0x86, 0xf0, 0xf3, + 0xa3, 0xb0, 0xb2, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe0, 0xe4, 0x5f, 0x79, 0xa6, 0x56, + 0xc4, 0x59, 0xff, 0x60, 0xca, 0x3e, 0xa4, 0xfe, 0xfd, 0x9b, 0xaa, 0x0c, 0xe3, 0x46, 0xca, 0xc0, + 0x50, 0x46, 0x1d, 0x5e, 0x02, 0x88, 0xd3, 0x2f, 0x0f, 0x4c, 0x85, 0x33, 0x2f, 0xb2, 0xcc, 0x73, + 0xc5, 0x38, 0xd5, 0x32, 0x26, 0x86, 0x72, 0x7c, 0x40, 0x0e, 0x2a, 0x38, 0xf5, 0x52, 0xc2, 0xd4, + 0x1b, 0xc2, 0xf1, 0x4f, 0x67, 0x3b, 0x8e, 0x39, 0xe6, 0x4d, 0xe9, 0xb7, 0x92, 0xb6, 0x30, 0x94, + 0x75, 0x00, 0x1f, 0x83, 0x2d, 0x39, 0xf8, 0xcc, 0x65, 0xf8, 0x8c, 0x34, 0xfb, 0xac, 0xcd, 0x1d, + 0xa6, 0x6e, 0x8a, 0xda, 0xad, 0x0e, 0x07, 0xda, 0xd6, 0x7e, 0x8e, 0x1d, 0xe5, 0xb2, 0xe0, 0x67, + 0x60, 0xe3, 0xcc, 0xa3, 0x2d, 0xdb, 0xb2, 0x88, 0x1b, 0x29, 0x6d, 0x09, 0xa5, 0xad, 0x30, 0xfe, + 0x47, 0x29, 0x1b, 0xca, 0xa0, 0x21, 0x03, 0xdb, 0x52, 0xb9, 0x41, 0xbd, 0xf6, 0x89, 0x17, 0xb8, + 0x3c, 0xbc, 0x2e, 0x98, 0xba, 0x1d, 0x5f, 0x91, 0xdb, 0xfb, 0x79, 0x80, 0x57, 0x03, 0xed, 0x56, + 0xce, 0x75, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0xe8, 0x80, 0x55, 0xf9, 0xf6, 0x75, 0xe0, 0x60, 0xc6, + 0x54, 0x55, 0x1c, 0xf5, 0xfb, 0xd3, 0x0b, 0x5b, 0x0c, 0x4f, 0x9f, 0x77, 0xf1, 0xa7, 0x6c, 0x12, + 0x80, 0x12, 0xea, 0xfa, 0xdf, 0x15, 0x70, 0xb3, 0xb0, 0x30, 0xc2, 0xfb, 0x89, 0x07, 0x15, 0x3d, + 0xf5, 0xa0, 0x02, 0xb3, 0xc4, 0x37, 0xf0, 0x9e, 0xf2, 0xb5, 0x02, 0xd4, 0xa2, 0x1b, 0x02, 0x7e, + 0x92, 0x98, 0xe0, 0x7b, 0xa9, 0x09, 0x56, 0x32, 0xbc, 0x37, 0x30, 0xbf, 0x6f, 0x15, 0xf0, 0xce, + 0x94, 0x1d, 0x88, 0x0b, 0x12, 0xb1, 0x26, 0x51, 0x4f, 0x70, 0x78, 0x94, 0x15, 0x91, 0x47, 0xe3, + 0x82, 0x94, 0x83, 0x41, 0x85, 0x6c, 0xf8, 0x0c, 0xdc, 0x90, 0xd5, 0x30, 0x6d, 0x13, 0x9d, 0xfb, + 0xb2, 0xf9, 0xce, 0x70, 0xa0, 0xdd, 0xa8, 0xe7, 0x43, 0x50, 0x11, 0x57, 0xff, 0xa7, 0x02, 0x76, + 0xf2, 0xaf, 0x7c, 0x78, 0x37, 0x11, 0x6e, 0x2d, 0x15, 0xee, 0xeb, 0x29, 0x96, 0x0c, 0xf6, 0x1f, + 0xc0, 0xba, 0x6c, 0x0c, 0x92, 0xef, 0x83, 0x89, 0xa0, 0x87, 0x47, 0x24, 0xec, 0xe9, 0xa5, 0x44, + 0x94, 0xbe, 0xe2, 0xaf, 0x78, 0x72, 0x0c, 0xa5, 0xd4, 0xf4, 0x7f, 0x29, 0xe0, 0xbd, 0x99, 0x97, + 0x2d, 0x34, 0x13, 0x53, 0x37, 0x52, 0x53, 0xaf, 0x16, 0x0b, 0xbc, 0x99, 0x67, 0x42, 0xf3, 0xa3, + 0x17, 0x2f, 0xab, 0x73, 0xdf, 0xbd, 0xac, 0xce, 0x7d, 0xff, 0xb2, 0x3a, 0xf7, 0xe7, 0x61, 0x55, + 0x79, 0x31, 0xac, 0x2a, 0xdf, 0x0d, 0xab, 0xca, 0xf7, 0xc3, 0xaa, 0xf2, 0xdf, 0x61, 0x55, 0xf9, + 0xdb, 0xff, 0xaa, 0x73, 0xbf, 0x5b, 0x92, 0x72, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xe0, + 0x55, 0x1c, 0x41, 0x18, 0x00, 0x00, +} + +func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Driver) + copy(dAtA[i:], m.Driver) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.PathPrefix) + copy(dAtA[i:], m.PathPrefix) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Eviction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DeleteOptions != nil { + { + size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *HostPortRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *IDRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.MaxUnavailable != nil { + { + size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.Selector != nil { + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.MinAvailable != nil { + { + size, err := m.MinAvailable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + i-- + dAtA[i] = 0x30 + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + i-- + dAtA[i] = 0x28 + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + i-- + dAtA[i] = 0x20 + i = encodeVarintGenerated(dAtA, i, uint64(m.DisruptionsAllowed)) + i-- + dAtA[i] = 0x18 + if len(m.DisruptedPods) > 0 { + keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) + for k := range m.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- { + v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])] + baseI := i + { + size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForDisruptedPods[iNdEx]) + copy(dAtA[i:], keysForDisruptedPods[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDisruptedPods[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RuntimeClass != nil { + { + size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xc2 + } + if len(m.AllowedCSIDrivers) > 0 { + for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xba + } + } + if m.RunAsGroup != nil { + { + size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xb2 + } + if len(m.AllowedProcMountTypes) > 0 { + for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedProcMountTypes[iNdEx]) + copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xaa + } + } + if len(m.ForbiddenSysctls) > 0 { + for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ForbiddenSysctls[iNdEx]) + copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0xa2 + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedUnsafeSysctls[iNdEx]) + copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } + } + if len(m.AllowedFlexVolumes) > 0 { + for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x92 + } + } + if len(m.AllowedHostPaths) > 0 { + for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + if m.AllowPrivilegeEscalation != nil { + i-- + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x80 + } + if m.DefaultAllowPrivilegeEscalation != nil { + i-- + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x78 + } + i-- + if m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x70 + { + size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + { + size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x62 + { + size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x5a + { + size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + i-- + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i-- + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x40 + if len(m.HostPorts) > 0 { + for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + i-- + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Volumes[iNdEx]) + copy(dAtA[i:], m.Volumes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.AllowedCapabilities) > 0 { + for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedCapabilities[iNdEx]) + copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.RequiredDropCapabilities) > 0 { + for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RequiredDropCapabilities[iNdEx]) + copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.DefaultAddCapabilities) > 0 { + for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DefaultAddCapabilities[iNdEx]) + copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DefaultRuntimeClassName != nil { + i -= len(*m.DefaultRuntimeClassName) + copy(dAtA[i:], *m.DefaultRuntimeClassName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) + i-- + dAtA[i] = 0x12 + } + if len(m.AllowedRuntimeClassNames) > 0 { + for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllowedRuntimeClassNames[iNdEx]) + copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SELinuxOptions != nil { + { + size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Rule) + copy(dAtA[i:], m.Rule) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AllowedCSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedFlexVolume) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *AllowedHostPath) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.PathPrefix) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *Eviction) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeleteOptions != nil { + l = m.DeleteOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *FSGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *HostPortRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *IDRange) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Min)) + n += 1 + sovGenerated(uint64(m.Max)) + return n +} + +func (m *PodDisruptionBudget) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodDisruptionBudgetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodDisruptionBudgetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MinAvailable != nil { + l = m.MinAvailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.MaxUnavailable != nil { + l = m.MaxUnavailable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PodDisruptionBudgetStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + if len(m.DisruptedPods) > 0 { + for k, v := range m.DisruptedPods { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.CurrentHealthy)) + n += 1 + sovGenerated(uint64(m.DesiredHealthy)) + n += 1 + sovGenerated(uint64(m.ExpectedPods)) + return n +} + +func (m *PodSecurityPolicy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodSecurityPolicyList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodSecurityPolicySpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + if len(m.HostPorts) > 0 { + for _, e := range m.HostPorts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 2 + n += 2 + l = m.SELinux.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.RunAsUser.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.SupplementalGroups.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FSGroup.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.DefaultAllowPrivilegeEscalation != nil { + n += 2 + } + if m.AllowPrivilegeEscalation != nil { + n += 3 + } + if len(m.AllowedHostPaths) > 0 { + for _, e := range m.AllowedHostPaths { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedFlexVolumes) > 0 { + for _, e := range m.AllowedFlexVolumes { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.AllowedProcMountTypes) > 0 { + for _, s := range m.AllowedProcMountTypes { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RunAsGroup != nil { + l = m.RunAsGroup.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedCSIDrivers) > 0 { + for _, e := range m.AllowedCSIDrivers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClass != nil { + l = m.RuntimeClass.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RunAsGroupStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RunAsUserStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RuntimeClassStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllowedRuntimeClassNames) > 0 { + for _, s := range m.AllowedRuntimeClassNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultRuntimeClassName != nil { + l = len(*m.DefaultRuntimeClassName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SELinuxStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *SupplementalGroupsStrategyOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Rule) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AllowedCSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedCSIDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Eviction) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Eviction{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *FSGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&FSGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *HostPortRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&HostPortRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *IDRange) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&IDRange{`, + `Min:` + fmt.Sprintf("%v", this.Min) + `,`, + `Max:` + fmt.Sprintf("%v", this.Max) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudget) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudget{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodDisruptionBudget{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodDisruptionBudgetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodDisruptionBudgetStatus) String() string { + if this == nil { + return "nil" + } + keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) + for k := range this.DisruptedPods { + keysForDisruptedPods = append(keysForDisruptedPods, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) + mapStringForDisruptedPods := "map[string]v1.Time{" + for _, k := range keysForDisruptedPods { + mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) + } + mapStringForDisruptedPods += "}" + s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `DisruptedPods:` + mapStringForDisruptedPods + `,`, + `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, + `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, + `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicy) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodSecurityPolicy{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicyList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodSecurityPolicy{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodSecurityPolicyList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityPolicySpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForHostPorts := "[]HostPortRange{" + for _, f := range this.HostPorts { + repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," + } + repeatedStringForHostPorts += "}" + repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" + for _, f := range this.AllowedHostPaths { + repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedHostPaths += "}" + repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" + for _, f := range this.AllowedFlexVolumes { + repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedFlexVolumes += "}" + repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" + for _, f := range this.AllowedCSIDrivers { + repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForAllowedCSIDrivers += "}" + s := strings.Join([]string{`&PodSecurityPolicySpec{`, + `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, + `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, + `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, + `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, + `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPorts:` + repeatedStringForHostPorts + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, + `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, + `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, + `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, + `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, + `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, + `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, + `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, + `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, + `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, + `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, + `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *RunAsGroupStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RunAsUserStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&RunAsUserStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func (this *RuntimeClassStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, + `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, + `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, + `}`, + }, "") + return s +} +func (this *SELinuxStrategyOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SELinuxStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, + `}`, + }, "") + return s +} +func (this *SupplementalGroupsStrategyOptions) String() string { + if this == nil { + return "nil" + } + repeatedStringForRanges := "[]IDRange{" + for _, f := range this.Ranges { + repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + } + repeatedStringForRanges += "}" + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + repeatedStringForRanges + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Eviction) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Eviction: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeleteOptions == nil { + m.DeleteOptions = &v1.DeleteOptions{} + } + if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostPortRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *IDRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IDRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) + } + m.Min = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Min |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) + } + m.Max = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Max |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodDisruptionBudget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MinAvailable == nil { + m.MinAvailable = &intstr.IntOrString{} + } + if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Selector == nil { + m.Selector = &v1.LabelSelector{} + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MaxUnavailable == nil { + m.MaxUnavailable = &intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) + } + m.ObservedGeneration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ObservedGeneration |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DisruptedPods == nil { + m.DisruptedPods = make(map[string]v1.Time) + } + var mapkey string + mapvalue := &v1.Time{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.DisruptedPods[mapkey] = *mapvalue + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) + } + m.DisruptionsAllowed = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DisruptionsAllowed |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) + } + m.CurrentHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CurrentHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) + } + m.DesiredHealthy = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DesiredHealthy |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) + } + m.ExpectedPods = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExpectedPods |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodSecurityPolicy{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Privileged = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostNetwork = bool(v != 0) + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostPorts = append(m.HostPorts, HostPortRange{}) + if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostPID = bool(v != 0) + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.HostIPC = bool(v != 0) + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnlyRootFilesystem = bool(v != 0) + case 15: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.DefaultAllowPrivilegeEscalation = &b + case 16: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 18: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 20: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 22: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RunAsGroup == nil { + m.RunAsGroup = &RunAsGroupStrategyOptions{} + } + if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 23: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) + if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 24: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.RuntimeClass == nil { + m.RuntimeClass = &RuntimeClassStrategyOptions{} + } + if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.DefaultRuntimeClassName = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SELinuxOptions == nil { + m.SELinuxOptions = &v11.SELinuxOptions{} + } + if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto new file mode 100644 index 0000000..d044837 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -0,0 +1,400 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.policy.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; +import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +message AllowedCSIDriver { + // Name is the registered name of the CSI driver + optional string name = 1; +} + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +message AllowedFlexVolume { + // driver is the name of the Flexvolume driver. + optional string driver = 1; +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +message AllowedHostPath { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + optional string pathPrefix = 1; + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + optional bool readOnly = 2; +} + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +message Eviction { + // ObjectMeta describes the pod that is being evicted. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // DeleteOptions may be provided + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; +} + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +message FSGroupStrategyOptions { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +message HostPortRange { + // min is the start of the range, inclusive. + optional int32 min = 1; + + // max is the end of the range, inclusive. + optional int32 max = 2; +} + +// IDRange provides a min/max of an allowed range of IDs. +message IDRange { + // min is the start of the range, inclusive. + optional int64 min = 1; + + // max is the end of the range, inclusive. + optional int64 max = 2; +} + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +message PodDisruptionBudget { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired behavior of the PodDisruptionBudget. + // +optional + optional PodDisruptionBudgetSpec spec = 2; + + // Most recently observed status of the PodDisruptionBudget. + // +optional + optional PodDisruptionBudgetStatus status = 3; +} + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +message PodDisruptionBudgetList { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + repeated PodDisruptionBudget items = 2; +} + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +message PodDisruptionBudgetSpec { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; + + // Label query over pods whose evictions are managed by the disruption + // budget. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; + + // An eviction is allowed if at most "maxUnavailable" pods selected by + // "selector" are unavailable after the eviction, i.e. even in absence of + // the evicted pod. For example, one can prevent all voluntary evictions + // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional + optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +message PodDisruptionBudgetStatus { + // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other + // status information is valid only if observedGeneration equals to PDB's object generation. + // +optional + optional int64 observedGeneration = 1; + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + // +optional + map disruptedPods = 2; + + // Number of pod disruptions that are currently allowed. + optional int32 disruptionsAllowed = 3; + + // current number of healthy pods + optional int32 currentHealthy = 4; + + // minimum desired number of healthy pods + optional int32 desiredHealthy = 5; + + // total number of pods counted by this disruption budget + optional int32 expectedPods = 6; +} + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +message PodSecurityPolicy { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec defines the policy enforced. + // +optional + optional PodSecurityPolicySpec spec = 2; +} + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +message PodSecurityPolicyList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is a list of schema objects. + repeated PodSecurityPolicy items = 2; +} + +// PodSecurityPolicySpec defines the policy enforced. +message PodSecurityPolicySpec { + // privileged determines if a pod can request to be run as privileged. + // +optional + optional bool privileged = 1; + + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + repeated string defaultAddCapabilities = 2; + + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + repeated string requiredDropCapabilities = 3; + + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + repeated string allowedCapabilities = 4; + + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + repeated string volumes = 5; + + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + optional bool hostNetwork = 6; + + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + repeated HostPortRange hostPorts = 7; + + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + optional bool hostPID = 8; + + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + optional bool hostIPC = 9; + + // seLinux is the strategy that will dictate the allowable labels that may be set. + optional SELinuxStrategyOptions seLinux = 10; + + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + optional RunAsUserStrategyOptions runAsUser = 11; + + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + optional RunAsGroupStrategyOptions runAsGroup = 22; + + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + optional SupplementalGroupsStrategyOptions supplementalGroups = 12; + + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + optional FSGroupStrategyOptions fsGroup = 13; + + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + optional bool readOnlyRootFilesystem = 14; + + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + optional bool defaultAllowPrivilegeEscalation = 15; + + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + optional bool allowPrivilegeEscalation = 16; + + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + repeated AllowedHostPath allowedHostPaths = 17; + + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + repeated AllowedFlexVolume allowedFlexVolumes = 18; + + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. + // +optional + repeated AllowedCSIDriver allowedCSIDrivers = 23; + + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + repeated string allowedUnsafeSysctls = 19; + + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + repeated string forbiddenSysctls = 20; + + // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. + // Empty or nil indicates that only the DefaultProcMountType may be used. + // This requires the ProcMountType feature flag to be enabled. + // +optional + repeated string allowedProcMountTypes = 21; + + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + optional RuntimeClassStrategyOptions runtimeClass = 24; +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsGroupStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +message RunAsUserStrategyOptions { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + optional string rule = 1; + + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +message RuntimeClassStrategyOptions { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + repeated string allowedRuntimeClassNames = 1; + + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + optional string defaultRuntimeClassName = 2; +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +message SELinuxStrategyOptions { + // rule is the strategy that will dictate the allowable labels that may be set. + optional string rule = 1; + + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; +} + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +message SupplementalGroupsStrategyOptions { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + optional string rule = 1; + + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + repeated IDRange ranges = 2; +} + diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go new file mode 100644 index 0000000..b3efd63 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/register.go @@ -0,0 +1,56 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "policy" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodDisruptionBudget{}, + &PodDisruptionBudgetList{}, + &PodSecurityPolicy{}, + &PodSecurityPolicyList{}, + &Eviction{}, + ) + // Add the watch version that applies + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go new file mode 100644 index 0000000..e6a5976 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -0,0 +1,489 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" +) + +// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. +type PodDisruptionBudgetSpec struct { + // An eviction is allowed if at least "minAvailable" pods selected by + // "selector" will still be available after the eviction, i.e. even in the + // absence of the evicted pod. So for example you can prevent all voluntary + // evictions by specifying "100%". + // +optional + MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` + + // Label query over pods whose evictions are managed by the disruption + // budget. + // +optional + Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` + + // An eviction is allowed if at most "maxUnavailable" pods selected by + // "selector" are unavailable after the eviction, i.e. even in absence of + // the evicted pod. For example, one can prevent all voluntary evictions + // by specifying 0. This is a mutually exclusive setting with "minAvailable". + // +optional + MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` +} + +// PodDisruptionBudgetStatus represents information about the status of a +// PodDisruptionBudget. Status may trail the actual state of a system. +type PodDisruptionBudgetStatus struct { + // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other + // status information is valid only if observedGeneration equals to PDB's object generation. + // +optional + ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` + + // DisruptedPods contains information about pods whose eviction was + // processed by the API server eviction subresource handler but has not + // yet been observed by the PodDisruptionBudget controller. + // A pod will be in this map from the time when the API server processed the + // eviction request to the time when the pod is seen by PDB controller + // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod + // and the value is the time when the API server processed the eviction request. If + // the deletion didn't occur and a pod is still there it will be removed from + // the list automatically by PodDisruptionBudget controller after some time. + // If everything goes smooth this map should be empty for the most of the time. + // Large number of entries in the map may indicate problems with pod deletions. + // +optional + DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty" protobuf:"bytes,2,rep,name=disruptedPods"` + + // Number of pod disruptions that are currently allowed. + DisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` + + // current number of healthy pods + CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` + + // minimum desired number of healthy pods + DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"` + + // total number of pods counted by this disruption budget + ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods +type PodDisruptionBudget struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired behavior of the PodDisruptionBudget. + // +optional + Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` + // Most recently observed status of the PodDisruptionBudget. + // +optional + Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. +type PodDisruptionBudgetList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:noVerbs +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Eviction evicts a pod from its node subject to certain policies and safety constraints. +// This is a subresource of Pod. A request to cause such an eviction is +// created by POSTing to .../pods//evictions. +type Eviction struct { + metav1.TypeMeta `json:",inline"` + + // ObjectMeta describes the pod that is being evicted. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // DeleteOptions may be provided + // +optional + DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicy governs the ability to make requests that affect the Security Context +// that will be applied to a pod and container. +type PodSecurityPolicy struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec defines the policy enforced. + // +optional + Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodSecurityPolicySpec defines the policy enforced. +type PodSecurityPolicySpec struct { + // privileged determines if a pod can request to be run as privileged. + // +optional + Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` + // defaultAddCapabilities is the default set of capabilities that will be added to the container + // unless the pod spec specifically drops the capability. You may not list a capability in both + // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly + // allowed, and need not be included in the allowedCapabilities list. + // +optional + DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // requiredDropCapabilities are the capabilities that will be dropped from the container. These + // are required to be dropped and cannot be added. + // +optional + RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // allowedCapabilities is a list of capabilities that can be requested to add to the container. + // Capabilities in this field may be added at the pod author's discretion. + // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. + // +optional + AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` + // volumes is a white list of allowed volume plugins. Empty indicates that + // no volumes may be used. To allow all volumes you may use '*'. + // +optional + Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` + // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. + // +optional + HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` + // hostPorts determines which host port ranges are allowed to be exposed. + // +optional + HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` + // hostPID determines if the policy allows the use of HostPID in the pod spec. + // +optional + HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` + // hostIPC determines if the policy allows the use of HostIPC in the pod spec. + // +optional + HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` + // seLinux is the strategy that will dictate the allowable labels that may be set. + SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` + // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. + RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` + // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. + // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the + // RunAsGroup feature gate to be enabled. + // +optional + RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` + // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. + SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` + // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. + FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` + // readOnlyRootFilesystem when set to true will force containers to run with a read only root file + // system. If the container specifically requests to run with a non-read only root file system + // the PSP should deny the pod. + // If set to false the container may run with a read only root file system if it wishes but it + // will not be forced to. + // +optional + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` + // defaultAllowPrivilegeEscalation controls the default setting for whether a + // process can gain more privileges than its parent process. + // +optional + DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` + // allowPrivilegeEscalation determines if a pod can request to allow + // privilege escalation. If unspecified, defaults to true. + // +optional + AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` + // allowedHostPaths is a white list of allowed host paths. Empty indicates + // that all host paths may be used. + // +optional + AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` + // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all + // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes + // is allowed in the "volumes" field. + // +optional + AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` + // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. + // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. + // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. + // +optional + AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` + // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. + // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. + // + // Examples: + // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. + // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. + // +optional + AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` + // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. + // Each entry is either a plain sysctl name or ends in "*" in which case it is considered + // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. + // + // Examples: + // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. + // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. + // +optional + ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` + // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. + // Empty or nil indicates that only the DefaultProcMountType may be used. + // This requires the ProcMountType feature flag to be enabled. + // +optional + AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` + // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. + // If this field is omitted, the pod's runtimeClassName field is unrestricted. + // Enforcement of this field depends on the RuntimeClass feature gate being enabled. + // +optional + RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` +} + +// AllowedHostPath defines the host volume conditions that will be enabled by a policy +// for pods to use. It requires the path prefix to be defined. +type AllowedHostPath struct { + // pathPrefix is the path prefix that the host volume must match. + // It does not support `*`. + // Trailing slashes are trimmed when validating the path prefix with a host path. + // + // Examples: + // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` + // `/foo` would not allow `/food` or `/etc/foo` + PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` + + // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. + // +optional + ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` +} + +// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities +// field and means that any capabilities are allowed to be requested. +var AllowAllCapabilities v1.Capability = "*" + +// FSType gives strong typing to different file systems that are used by volumes. +type FSType string + +const ( + AzureFile FSType = "azureFile" + Flocker FSType = "flocker" + FlexVolume FSType = "flexVolume" + HostPath FSType = "hostPath" + EmptyDir FSType = "emptyDir" + GCEPersistentDisk FSType = "gcePersistentDisk" + AWSElasticBlockStore FSType = "awsElasticBlockStore" + GitRepo FSType = "gitRepo" + Secret FSType = "secret" + NFS FSType = "nfs" + ISCSI FSType = "iscsi" + Glusterfs FSType = "glusterfs" + PersistentVolumeClaim FSType = "persistentVolumeClaim" + RBD FSType = "rbd" + Cinder FSType = "cinder" + CephFS FSType = "cephFS" + DownwardAPI FSType = "downwardAPI" + FC FSType = "fc" + ConfigMap FSType = "configMap" + VsphereVolume FSType = "vsphereVolume" + Quobyte FSType = "quobyte" + AzureDisk FSType = "azureDisk" + PhotonPersistentDisk FSType = "photonPersistentDisk" + StorageOS FSType = "storageos" + Projected FSType = "projected" + PortworxVolume FSType = "portworxVolume" + ScaleIO FSType = "scaleIO" + CSI FSType = "csi" + All FSType = "*" +) + +// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. +type AllowedFlexVolume struct { + // driver is the name of the Flexvolume driver. + Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` +} + +// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. +type AllowedCSIDriver struct { + // Name is the registered name of the CSI driver + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` +} + +// HostPortRange defines a range of host ports that will be enabled by a policy +// for pods to use. It requires both the start and end to be defined. +type HostPortRange struct { + // min is the start of the range, inclusive. + Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. +type SELinuxStrategyOptions struct { + // rule is the strategy that will dictate the allowable labels that may be set. + Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` + // seLinuxOptions required to run as; required for MustRunAs + // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + // +optional + SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` +} + +// SELinuxStrategy denotes strategy types for generating SELinux options for a +// Security Context. +type SELinuxStrategy string + +const ( + // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. + SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" + // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. + SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" +) + +// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsUserStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsUser values that may be set. + Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` + // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. +type RunAsGroupStrategyOptions struct { + // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. + Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` + // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid + // then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// IDRange provides a min/max of an allowed range of IDs. +type IDRange struct { + // min is the start of the range, inclusive. + Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` + // max is the end of the range, inclusive. + Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` +} + +// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a +// Security Context. +type RunAsUserStrategy string + +const ( + // RunAsUserStrategyMustRunAs means that container must run as a particular uid. + RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" + // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. + RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" + // RunAsUserStrategyRunAsAny means that container may make requests for any uid. + RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" +) + +// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a +// Security Context. +type RunAsGroupStrategy string + +const ( + // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when RunAsGroup are specified, they have to fall in the defined range. + RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" + // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. + RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" + // RunAsUserStrategyRunAsAny means that container may make requests for any gid. + RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" +) + +// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. +type FSGroupStrategyOptions struct { + // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. + // +optional + Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` + // ranges are the allowed ranges of fs groups. If you would like to force a single + // fs group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// FSGroupStrategyType denotes strategy types for generating FSGroup values for a +// SecurityContext +type FSGroupStrategyType string + +const ( + // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. + // However, when FSGroups are specified, they have to fall in the defined range. + FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" + // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. + FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" + // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. + FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" +) + +// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. +type SupplementalGroupsStrategyOptions struct { + // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. + // +optional + Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` + // ranges are the allowed ranges of supplemental groups. If you would like to force a single + // supplemental group then supply a single range with the same start and end. Required for MustRunAs. + // +optional + Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` +} + +// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental +// groups for a SecurityContext. +type SupplementalGroupsStrategyType string + +const ( + // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. + // However, when gids are specified, they have to fall in the defined range. + SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" + // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. + SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" + // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. + SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" +) + +// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses +// for a pod. +type RuntimeClassStrategyOptions struct { + // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. + // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the + // list. An empty list requires the RuntimeClassName field to be unset. + AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` + // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. + // The default MUST be allowed by the allowedRuntimeClassNames list. + // A value of nil does not mutate the Pod. + // +optional + DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` +} + +// AllowAllRuntimeClassNames can be used as a value for the +// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is +// allowed. +const AllowAllRuntimeClassNames = "*" + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodSecurityPolicyList is a list of PodSecurityPolicy objects. +type PodSecurityPolicyList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is a list of schema objects. + Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..70f667c --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,243 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AllowedCSIDriver = map[string]string{ + "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", + "name": "Name is the registered name of the CSI driver", +} + +func (AllowedCSIDriver) SwaggerDoc() map[string]string { + return map_AllowedCSIDriver +} + +var map_AllowedFlexVolume = map[string]string{ + "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", + "driver": "driver is the name of the Flexvolume driver.", +} + +func (AllowedFlexVolume) SwaggerDoc() map[string]string { + return map_AllowedFlexVolume +} + +var map_AllowedHostPath = map[string]string{ + "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", + "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", + "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", +} + +func (AllowedHostPath) SwaggerDoc() map[string]string { + return map_AllowedHostPath +} + +var map_Eviction = map[string]string{ + "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", + "metadata": "ObjectMeta describes the pod that is being evicted.", + "deleteOptions": "DeleteOptions may be provided", +} + +func (Eviction) SwaggerDoc() map[string]string { + return map_Eviction +} + +var map_FSGroupStrategyOptions = map[string]string{ + "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_FSGroupStrategyOptions +} + +var map_HostPortRange = map[string]string{ + "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (HostPortRange) SwaggerDoc() map[string]string { + return map_HostPortRange +} + +var map_IDRange = map[string]string{ + "": "IDRange provides a min/max of an allowed range of IDs.", + "min": "min is the start of the range, inclusive.", + "max": "max is the end of the range, inclusive.", +} + +func (IDRange) SwaggerDoc() map[string]string { + return map_IDRange +} + +var map_PodDisruptionBudget = map[string]string{ + "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", + "spec": "Specification of the desired behavior of the PodDisruptionBudget.", + "status": "Most recently observed status of the PodDisruptionBudget.", +} + +func (PodDisruptionBudget) SwaggerDoc() map[string]string { + return map_PodDisruptionBudget +} + +var map_PodDisruptionBudgetList = map[string]string{ + "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", +} + +func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetList +} + +var map_PodDisruptionBudgetSpec = map[string]string{ + "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", + "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", + "selector": "Label query over pods whose evictions are managed by the disruption budget.", + "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", +} + +func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetSpec +} + +var map_PodDisruptionBudgetStatus = map[string]string{ + "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", + "observedGeneration": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", + "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", + "currentHealthy": "current number of healthy pods", + "desiredHealthy": "minimum desired number of healthy pods", + "expectedPods": "total number of pods counted by this disruption budget", +} + +func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { + return map_PodDisruptionBudgetStatus +} + +var map_PodSecurityPolicy = map[string]string{ + "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "spec defines the policy enforced.", +} + +func (PodSecurityPolicy) SwaggerDoc() map[string]string { + return map_PodSecurityPolicy +} + +var map_PodSecurityPolicyList = map[string]string{ + "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is a list of schema objects.", +} + +func (PodSecurityPolicyList) SwaggerDoc() map[string]string { + return map_PodSecurityPolicyList +} + +var map_PodSecurityPolicySpec = map[string]string{ + "": "PodSecurityPolicySpec defines the policy enforced.", + "privileged": "privileged determines if a pod can request to be run as privileged.", + "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", + "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", + "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", + "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", + "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", + "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", + "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", + "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", + "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", + "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", + "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", + "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", + "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", + "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", + "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", + "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", + "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", + "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", + "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", + "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", + "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", + "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", + "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", +} + +func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { + return map_PodSecurityPolicySpec +} + +var map_RunAsGroupStrategyOptions = map[string]string{ + "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", + "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsGroupStrategyOptions +} + +var map_RunAsUserStrategyOptions = map[string]string{ + "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", + "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { + return map_RunAsUserStrategyOptions +} + +var map_RuntimeClassStrategyOptions = map[string]string{ + "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", + "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", + "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", +} + +func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { + return map_RuntimeClassStrategyOptions +} + +var map_SELinuxStrategyOptions = map[string]string{ + "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", + "rule": "rule is the strategy that will dictate the allowable labels that may be set.", + "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", +} + +func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { + return map_SELinuxStrategyOptions +} + +var map_SupplementalGroupsStrategyOptions = map[string]string{ + "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", + "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", + "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", +} + +func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { + return map_SupplementalGroupsStrategyOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..75851e1 --- /dev/null +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,540 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + corev1 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. +func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { + if in == nil { + return nil + } + out := new(AllowedCSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { + if in == nil { + return nil + } + out := new(AllowedFlexVolume) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { + if in == nil { + return nil + } + out := new(AllowedHostPath) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Eviction) DeepCopyInto(out *Eviction) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.DeleteOptions != nil { + in, out := &in.DeleteOptions, &out.DeleteOptions + *out = new(v1.DeleteOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction. +func (in *Eviction) DeepCopy() *Eviction { + if in == nil { + return nil + } + out := new(Eviction) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Eviction) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. +func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { + if in == nil { + return nil + } + out := new(FSGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. +func (in *HostPortRange) DeepCopy() *HostPortRange { + if in == nil { + return nil + } + out := new(HostPortRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *IDRange) DeepCopyInto(out *IDRange) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. +func (in *IDRange) DeepCopy() *IDRange { + if in == nil { + return nil + } + out := new(IDRange) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget. +func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget { + if in == nil { + return nil + } + out := new(PodDisruptionBudget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodDisruptionBudget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList. +func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList { + if in == nil { + return nil + } + out := new(PodDisruptionBudgetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { + *out = *in + if in.MinAvailable != nil { + in, out := &in.MinAvailable, &out.MinAvailable + *out = new(intstr.IntOrString) + **out = **in + } + if in.Selector != nil { + in, out := &in.Selector, &out.Selector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.MaxUnavailable != nil { + in, out := &in.MaxUnavailable, &out.MaxUnavailable + *out = new(intstr.IntOrString) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec. +func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec { + if in == nil { + return nil + } + out := new(PodDisruptionBudgetSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) { + *out = *in + if in.DisruptedPods != nil { + in, out := &in.DisruptedPods, &out.DisruptedPods + *out = make(map[string]v1.Time, len(*in)) + for key, val := range *in { + (*out)[key] = *val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus. +func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { + if in == nil { + return nil + } + out := new(PodDisruptionBudgetStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. +func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { + if in == nil { + return nil + } + out := new(PodSecurityPolicy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodSecurityPolicy, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. +func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { + if in == nil { + return nil + } + out := new(PodSecurityPolicyList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { + *out = *in + if in.DefaultAddCapabilities != nil { + in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.RequiredDropCapabilities != nil { + in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.AllowedCapabilities != nil { + in, out := &in.AllowedCapabilities, &out.AllowedCapabilities + *out = make([]corev1.Capability, len(*in)) + copy(*out, *in) + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]FSType, len(*in)) + copy(*out, *in) + } + if in.HostPorts != nil { + in, out := &in.HostPorts, &out.HostPorts + *out = make([]HostPortRange, len(*in)) + copy(*out, *in) + } + in.SELinux.DeepCopyInto(&out.SELinux) + in.RunAsUser.DeepCopyInto(&out.RunAsUser) + if in.RunAsGroup != nil { + in, out := &in.RunAsGroup, &out.RunAsGroup + *out = new(RunAsGroupStrategyOptions) + (*in).DeepCopyInto(*out) + } + in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) + in.FSGroup.DeepCopyInto(&out.FSGroup) + if in.DefaultAllowPrivilegeEscalation != nil { + in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowPrivilegeEscalation != nil { + in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation + *out = new(bool) + **out = **in + } + if in.AllowedHostPaths != nil { + in, out := &in.AllowedHostPaths, &out.AllowedHostPaths + *out = make([]AllowedHostPath, len(*in)) + copy(*out, *in) + } + if in.AllowedFlexVolumes != nil { + in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes + *out = make([]AllowedFlexVolume, len(*in)) + copy(*out, *in) + } + if in.AllowedCSIDrivers != nil { + in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers + *out = make([]AllowedCSIDriver, len(*in)) + copy(*out, *in) + } + if in.AllowedUnsafeSysctls != nil { + in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ForbiddenSysctls != nil { + in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowedProcMountTypes != nil { + in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes + *out = make([]corev1.ProcMountType, len(*in)) + copy(*out, *in) + } + if in.RuntimeClass != nil { + in, out := &in.RuntimeClass, &out.RuntimeClass + *out = new(RuntimeClassStrategyOptions) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. +func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { + if in == nil { + return nil + } + out := new(PodSecurityPolicySpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. +func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsGroupStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. +func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { + if in == nil { + return nil + } + out := new(RunAsUserStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { + *out = *in + if in.AllowedRuntimeClassNames != nil { + in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.DefaultRuntimeClassName != nil { + in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. +func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { + if in == nil { + return nil + } + out := new(RuntimeClassStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { + *out = *in + if in.SELinuxOptions != nil { + in, out := &in.SELinuxOptions, &out.SELinuxOptions + *out = new(corev1.SELinuxOptions) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. +func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { + if in == nil { + return nil + } + out := new(SELinuxStrategyOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { + *out = *in + if in.Ranges != nil { + in, out := &in.Ranges, &out.Ranges + *out = make([]IDRange, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. +func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { + if in == nil { + return nil + } + out := new(SupplementalGroupsStrategyOptions) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go new file mode 100644 index 0000000..80f43ce --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=rbac.authorization.k8s.io + +package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go new file mode 100644 index 0000000..ba6872d --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -0,0 +1,3266 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_979ffd7b30c07419, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) +} + +var fileDescriptor_979ffd7b30c07419 = []byte{ + // 807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, + 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, + 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, + 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, + 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, + 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, + 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, + 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, + 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, + 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, + 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, + 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, + 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, + 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, + 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, + 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, + 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, + 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, + 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, + 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, + 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, + 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, + 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, + 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, + 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, + 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, + 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, + 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, + 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, + 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, + 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, + 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, + 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, + 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, + 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, + 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, + 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, + 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, + 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, + 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, + 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, + 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, + 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, + 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, + 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, + 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, + 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, + 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, + 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, + 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, +} + +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto new file mode 100644 index 0000000..71fa083 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -0,0 +1,199 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.rbac.v1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + // +optional + repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 4; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 5; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + // +optional + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + optional string apiGroup = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/api/rbac/v1/register.go b/vendor/k8s.io/api/rbac/v1/register.go new file mode 100644 index 0000000..8f1fd46 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go new file mode 100644 index 0000000..7ba7d05 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -0,0 +1,237 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + // +optional + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList is a collection of RoleBindings +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList is a collection of Roles +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + // +optional + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBindingList is a collection of ClusterRoleBindings +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleList is a collection of ClusterRoles +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..83ce310 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -0,0 +1,158 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..095a5e9 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -0,0 +1,389 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]metav1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go new file mode 100644 index 0000000..918b8a3 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=rbac.authorization.k8s.io + +package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go new file mode 100644 index 0000000..3b12526 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -0,0 +1,3267 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_b59b0bd5e7cb9590, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1alpha1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1alpha1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1alpha1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) +} + +var fileDescriptor_b59b0bd5e7cb9590 = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, + 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, + 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, + 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, + 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, + 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, + 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, + 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, + 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, + 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, + 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, + 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, + 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, + 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, + 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, + 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, + 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, + 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, + 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, + 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, + 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, + 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, + 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, + 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, + 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, + 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, + 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, + 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, + 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, + 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, + 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, + 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, + 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, + 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, + 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, + 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, + 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, + 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, + 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, + 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, + 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, + 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, + 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, + 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, + 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, + 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, + 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, + 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, +} + +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto new file mode 100644 index 0000000..895ab62 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -0,0 +1,208 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.rbac.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + // +optional + repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 3; + + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + repeated string resources = 4; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 5; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 6; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + // +optional + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false + // +optional + optional string apiVersion = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/api/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go new file mode 100644 index 0000000..0c69776 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go new file mode 100644 index 0000000..ba91ab3 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -0,0 +1,246 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. ResourceAll represents all resources. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIVersion holds the API group and version of the referenced subject. + // Defaults to "v1" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. + // +k8s:conversion-gen=false + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + // +optional + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList is a collection of Roles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + // +optional + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..eab08c5 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,158 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiVersion": "APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..0358227 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,389 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go new file mode 100644 index 0000000..fe7aae9 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=rbac.authorization.k8s.io + +package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go new file mode 100644 index 0000000..53d3632 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -0,0 +1,3266 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{0} +} +func (m *AggregationRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *AggregationRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_AggregationRule.Merge(m, src) +} +func (m *AggregationRule) XXX_Size() int { + return m.Size() +} +func (m *AggregationRule) XXX_DiscardUnknown() { + xxx_messageInfo_AggregationRule.DiscardUnknown(m) +} + +var xxx_messageInfo_AggregationRule proto.InternalMessageInfo + +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{1} +} +func (m *ClusterRole) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRole) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRole.Merge(m, src) +} +func (m *ClusterRole) XXX_Size() int { + return m.Size() +} +func (m *ClusterRole) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRole.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRole proto.InternalMessageInfo + +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{2} +} +func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBinding.Merge(m, src) +} +func (m *ClusterRoleBinding) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo + +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{3} +} +func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) +} +func (m *ClusterRoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo + +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{4} +} +func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ClusterRoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_ClusterRoleList.Merge(m, src) +} +func (m *ClusterRoleList) XXX_Size() int { + return m.Size() +} +func (m *ClusterRoleList) XXX_DiscardUnknown() { + xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo + +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{5} +} +func (m *PolicyRule) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PolicyRule) XXX_Merge(src proto.Message) { + xxx_messageInfo_PolicyRule.Merge(m, src) +} +func (m *PolicyRule) XXX_Size() int { + return m.Size() +} +func (m *PolicyRule) XXX_DiscardUnknown() { + xxx_messageInfo_PolicyRule.DiscardUnknown(m) +} + +var xxx_messageInfo_PolicyRule proto.InternalMessageInfo + +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{6} +} +func (m *Role) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Role) XXX_Merge(src proto.Message) { + xxx_messageInfo_Role.Merge(m, src) +} +func (m *Role) XXX_Size() int { + return m.Size() +} +func (m *Role) XXX_DiscardUnknown() { + xxx_messageInfo_Role.DiscardUnknown(m) +} + +var xxx_messageInfo_Role proto.InternalMessageInfo + +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{7} +} +func (m *RoleBinding) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBinding) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBinding.Merge(m, src) +} +func (m *RoleBinding) XXX_Size() int { + return m.Size() +} +func (m *RoleBinding) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBinding.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBinding proto.InternalMessageInfo + +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{8} +} +func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleBindingList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleBindingList.Merge(m, src) +} +func (m *RoleBindingList) XXX_Size() int { + return m.Size() +} +func (m *RoleBindingList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleBindingList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo + +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{9} +} +func (m *RoleList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleList) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleList.Merge(m, src) +} +func (m *RoleList) XXX_Size() int { + return m.Size() +} +func (m *RoleList) XXX_DiscardUnknown() { + xxx_messageInfo_RoleList.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleList proto.InternalMessageInfo + +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{10} +} +func (m *RoleRef) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RoleRef) XXX_Merge(src proto.Message) { + xxx_messageInfo_RoleRef.Merge(m, src) +} +func (m *RoleRef) XXX_Size() int { + return m.Size() +} +func (m *RoleRef) XXX_DiscardUnknown() { + xxx_messageInfo_RoleRef.DiscardUnknown(m) +} + +var xxx_messageInfo_RoleRef proto.InternalMessageInfo + +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { + return fileDescriptor_99f6bec96facc83d, []int{11} +} +func (m *Subject) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Subject) XXX_Merge(src proto.Message) { + xxx_messageInfo_Subject.Merge(m, src) +} +func (m *Subject) XXX_Size() int { + return m.Size() +} +func (m *Subject) XXX_DiscardUnknown() { + xxx_messageInfo_Subject.DiscardUnknown(m) +} + +var xxx_messageInfo_Subject proto.InternalMessageInfo + +func init() { + proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") + proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") + proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") + proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") + proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleList") + proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1beta1.PolicyRule") + proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1beta1.Role") + proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1beta1.RoleBinding") + proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.RoleBindingList") + proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1beta1.RoleList") + proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") + proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) +} + +var fileDescriptor_99f6bec96facc83d = []byte{ + // 808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, + 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, + 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, + 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, + 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, + 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, + 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, + 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, + 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, + 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, + 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, + 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, + 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, + 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, + 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, + 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, + 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, + 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, + 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, + 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, + 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, + 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, + 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, + 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, + 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, + 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, + 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, + 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, + 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, + 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, + 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, + 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, + 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, + 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, + 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, + 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, + 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, + 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, + 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, + 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, + 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, + 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, + 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, + 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, + 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, + 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, + 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, + 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, + 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, + 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, +} + +func (m *AggregationRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ClusterRole) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.AggregationRule != nil { + { + size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PolicyRule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NonResourceURLs) > 0 { + for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NonResourceURLs[iNdEx]) + copy(dAtA[i:], m.NonResourceURLs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.ResourceNames) > 0 { + for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ResourceNames[iNdEx]) + copy(dAtA[i:], m.ResourceNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Resources[iNdEx]) + copy(dAtA[i:], m.Resources[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.APIGroups) > 0 { + for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.APIGroups[iNdEx]) + copy(dAtA[i:], m.APIGroups[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Verbs) > 0 { + for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Verbs[iNdEx]) + copy(dAtA[i:], m.Verbs[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Role) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Role) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Rules) > 0 { + for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBinding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Subjects) > 0 { + for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *RoleRef) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Subject) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Subject) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.APIGroup) + copy(dAtA[i:], m.APIGroup) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *AggregationRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.ClusterRoleSelectors) > 0 { + for _, e := range m.ClusterRoleSelectors { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRole) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AggregationRule != nil { + l = m.AggregationRule.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ClusterRoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *ClusterRoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ClusterRoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PolicyRule) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Role) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleBinding) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subjects) > 0 { + for _, e := range m.Subjects { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.RoleRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *RoleBindingList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *RoleRef) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Subject) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AggregationRule) String() string { + if this == nil { + return "nil" + } + repeatedStringForClusterRoleSelectors := "[]LabelSelector{" + for _, f := range this.ClusterRoleSelectors { + repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," + } + repeatedStringForClusterRoleSelectors += "}" + s := strings.Join([]string{`&AggregationRule{`, + `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRole) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&ClusterRole{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&ClusterRoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ClusterRoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]ClusterRole{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&ClusterRoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PolicyRule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PolicyRule{`, + `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, + `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, + `}`, + }, "") + return s +} +func (this *Role) String() string { + if this == nil { + return "nil" + } + repeatedStringForRules := "[]PolicyRule{" + for _, f := range this.Rules { + repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," + } + repeatedStringForRules += "}" + s := strings.Join([]string{`&Role{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + repeatedStringForRules + `,`, + `}`, + }, "") + return s +} +func (this *RoleBinding) String() string { + if this == nil { + return "nil" + } + repeatedStringForSubjects := "[]Subject{" + for _, f := range this.Subjects { + repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," + } + repeatedStringForSubjects += "}" + s := strings.Join([]string{`&RoleBinding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + repeatedStringForSubjects + `,`, + `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *RoleBindingList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RoleBinding{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleBindingList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]Role{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&RoleList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *RoleRef) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RoleRef{`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s +} +func (this *Subject) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Subject{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AggregationRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) + if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRole) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AggregationRule == nil { + m.AggregationRule = &AggregationRule{} + } + if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ClusterRole{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PolicyRule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Role) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Role: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, PolicyRule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBinding) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Subjects = append(m.Subjects, Subject{}) + if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleBindingList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, RoleBinding{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, Role{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RoleRef) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Subject) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Subject: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroup = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto new file mode 100644 index 0000000..87e0dbd --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -0,0 +1,208 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.rbac.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +message AggregationRule { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; +} + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. +message ClusterRole { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this ClusterRole + // +optional + repeated PolicyRule rules = 2; + + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + optional AggregationRule aggregationRule = 3; +} + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. +message ClusterRoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. +message ClusterRoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoleBindings + repeated ClusterRoleBinding items = 2; +} + +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +message ClusterRoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of ClusterRoles + repeated ClusterRole items = 2; +} + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +message PolicyRule { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + repeated string verbs = 1; + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + repeated string apiGroups = 2; + + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + repeated string resources = 3; + + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + repeated string resourceNames = 4; + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + repeated string nonResourceURLs = 5; +} + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. +message Role { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Rules holds all the PolicyRules for this Role + // +optional + repeated PolicyRule rules = 2; +} + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. +message RoleBinding { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Subjects holds references to the objects the role applies to. + // +optional + repeated Subject subjects = 2; + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + optional RoleRef roleRef = 3; +} + +// RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. +message RoleBindingList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of RoleBindings + repeated RoleBinding items = 2; +} + +// RoleList is a collection of Roles +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. +message RoleList { + // Standard object's metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of Roles + repeated Role items = 2; +} + +// RoleRef contains information that points to the role being used +message RoleRef { + // APIGroup is the group for the resource being referenced + optional string apiGroup = 1; + + // Kind is the type of resource being referenced + optional string kind = 2; + + // Name is the name of resource being referenced + optional string name = 3; +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +message Subject { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + optional string kind = 1; + + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + optional string apiGroup = 2; + + // Name of the object being referenced. + optional string name = 3; + + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + optional string namespace = 4; +} + diff --git a/vendor/k8s.io/api/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go new file mode 100644 index 0000000..c8526a6 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/register.go @@ -0,0 +1,58 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const GroupName = "rbac.authorization.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Role{}, + &RoleBinding{}, + &RoleBindingList{}, + &RoleList{}, + + &ClusterRole{}, + &ClusterRoleBinding{}, + &ClusterRoleBindingList{}, + &ClusterRoleList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go new file mode 100644 index 0000000..74c7093 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -0,0 +1,245 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +const ( + APIGroupAll = "*" + ResourceAll = "*" + VerbAll = "*" + NonResourceAll = "*" + + GroupKind = "Group" + ServiceAccountKind = "ServiceAccount" + UserKind = "User" + + // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" + AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" +) + +// Authorization is calculated against +// 1. evaluation of ClusterRoleBindings - short circuit on match +// 2. evaluation of RoleBindings in the namespace requested - short circuit on match +// 3. deny by default + +// PolicyRule holds information that describes a policy rule, but does not contain information +// about who the rule applies to or which namespace the rule applies to. +type PolicyRule struct { + // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. + Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` + + // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of + // the enumerated resources in any API group will be allowed. + // +optional + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` + // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. + // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. + // +optional + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` + // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. + // +optional + ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` + + // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. + // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. + // +optional + NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` +} + +// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, +// or a value for non-objects such as user and group names. +type Subject struct { + // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". + // If the Authorizer does not recognized the kind value, the Authorizer should report an error. + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // APIGroup holds the API group of the referenced subject. + // Defaults to "" for ServiceAccount subjects. + // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. + // +optional + APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` + // Name of the object being referenced. + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty + // the Authorizer should report an error. + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` +} + +// RoleRef contains information that points to the role being used +type RoleRef struct { + // APIGroup is the group for the resource being referenced + APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` + // Kind is the type of resource being referenced + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` + // Name is the name of resource being referenced + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. +type Role struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this Role + // +optional + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` +} + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. +// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given +// namespace only have effect in that namespace. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. +type RoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleBindingList is a collection of RoleBindings +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. +type RoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of RoleBindings + Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// RoleList is a collection of Roles +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. +type RoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of Roles + Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. +type ClusterRole struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Rules holds all the PolicyRules for this ClusterRole + // +optional + Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` + // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. + // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be + // stomped by the controller. + // +optional + AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` +} + +// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole +type AggregationRule struct { + // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. + // If any of the selectors match, then the ClusterRole's permissions will be added + // +optional + ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, +// and adds who information via Subject. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. +type ClusterRoleBinding struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Subjects holds references to the objects the role applies to. + // +optional + Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` + + // RoleRef can only reference a ClusterRole in the global namespace. + // If the RoleRef cannot be resolved, the Authorizer must return an error. + RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleBindingList is a collection of ClusterRoleBindings. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. +type ClusterRoleBindingList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoleBindings + Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ClusterRoleList is a collection of ClusterRoles. +// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +type ClusterRoleList struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of ClusterRoles + Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..8e9d7ac --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,158 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_AggregationRule = map[string]string{ + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", +} + +func (AggregationRule) SwaggerDoc() map[string]string { + return map_AggregationRule +} + +var map_ClusterRole = map[string]string{ + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this ClusterRole", + "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", +} + +func (ClusterRole) SwaggerDoc() map[string]string { + return map_ClusterRole +} + +var map_ClusterRoleBinding = map[string]string{ + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (ClusterRoleBinding) SwaggerDoc() map[string]string { + return map_ClusterRoleBinding +} + +var map_ClusterRoleBindingList = map[string]string{ + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoleBindings", +} + +func (ClusterRoleBindingList) SwaggerDoc() map[string]string { + return map_ClusterRoleBindingList +} + +var map_ClusterRoleList = map[string]string{ + "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of ClusterRoles", +} + +func (ClusterRoleList) SwaggerDoc() map[string]string { + return map_ClusterRoleList +} + +var map_PolicyRule = map[string]string{ + "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", + "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", + "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", + "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", + "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", +} + +func (PolicyRule) SwaggerDoc() map[string]string { + return map_PolicyRule +} + +var map_Role = map[string]string{ + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "rules": "Rules holds all the PolicyRules for this Role", +} + +func (Role) SwaggerDoc() map[string]string { + return map_Role +} + +var map_RoleBinding = map[string]string{ + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "subjects": "Subjects holds references to the objects the role applies to.", + "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", +} + +func (RoleBinding) SwaggerDoc() map[string]string { + return map_RoleBinding +} + +var map_RoleBindingList = map[string]string{ + "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of RoleBindings", +} + +func (RoleBindingList) SwaggerDoc() map[string]string { + return map_RoleBindingList +} + +var map_RoleList = map[string]string{ + "": "RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", + "metadata": "Standard object's metadata.", + "items": "Items is a list of Roles", +} + +func (RoleList) SwaggerDoc() map[string]string { + return map_RoleList +} + +var map_RoleRef = map[string]string{ + "": "RoleRef contains information that points to the role being used", + "apiGroup": "APIGroup is the group for the resource being referenced", + "kind": "Kind is the type of resource being referenced", + "name": "Name is the name of resource being referenced", +} + +func (RoleRef) SwaggerDoc() map[string]string { + return map_RoleRef +} + +var map_Subject = map[string]string{ + "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", + "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", + "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", + "name": "Name of the object being referenced.", + "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", +} + +func (Subject) SwaggerDoc() map[string]string { + return map_Subject +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..7ffe581 --- /dev/null +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,389 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { + *out = *in + if in.ClusterRoleSelectors != nil { + in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors + *out = make([]v1.LabelSelector, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. +func (in *AggregationRule) DeepCopy() *AggregationRule { + if in == nil { + return nil + } + out := new(AggregationRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.AggregationRule != nil { + in, out := &in.AggregationRule, &out.AggregationRule + *out = new(AggregationRule) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. +func (in *ClusterRole) DeepCopy() *ClusterRole { + if in == nil { + return nil + } + out := new(ClusterRole) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRole) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. +func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { + if in == nil { + return nil + } + out := new(ClusterRoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. +func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { + if in == nil { + return nil + } + out := new(ClusterRoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]ClusterRole, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. +func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { + if in == nil { + return nil + } + out := new(ClusterRoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ClusterRoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ResourceNames != nil { + in, out := &in.ResourceNames, &out.ResourceNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.NonResourceURLs != nil { + in, out := &in.NonResourceURLs, &out.NonResourceURLs + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. +func (in *PolicyRule) DeepCopy() *PolicyRule { + if in == nil { + return nil + } + out := new(PolicyRule) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Role) DeepCopyInto(out *Role) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]PolicyRule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. +func (in *Role) DeepCopy() *Role { + if in == nil { + return nil + } + out := new(Role) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Role) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Subjects != nil { + in, out := &in.Subjects, &out.Subjects + *out = make([]Subject, len(*in)) + copy(*out, *in) + } + out.RoleRef = in.RoleRef + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. +func (in *RoleBinding) DeepCopy() *RoleBinding { + if in == nil { + return nil + } + out := new(RoleBinding) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBinding) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]RoleBinding, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. +func (in *RoleBindingList) DeepCopy() *RoleBindingList { + if in == nil { + return nil + } + out := new(RoleBindingList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleBindingList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleList) DeepCopyInto(out *RoleList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]Role, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. +func (in *RoleList) DeepCopy() *RoleList { + if in == nil { + return nil + } + out := new(RoleList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *RoleList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RoleRef) DeepCopyInto(out *RoleRef) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. +func (in *RoleRef) DeepCopy() *RoleRef { + if in == nil { + return nil + } + out := new(RoleRef) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Subject) DeepCopyInto(out *Subject) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. +func (in *Subject) DeepCopy() *Subject { + if in == nil { + return nil + } + out := new(Subject) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go new file mode 100644 index 0000000..76c4da0 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=scheduling.k8s.io + +package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go new file mode 100644 index 0000000..efc3102 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/generated.pb.go @@ -0,0 +1,735 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_277b2f43b72fffd5, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) +} + +var fileDescriptor_277b2f43b72fffd5 = []byte{ + // 488 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, + 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, + 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, + 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, + 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, + 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, + 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, + 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, + 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, + 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, + 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, + 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, + 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, + 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, + 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, + 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, + 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, + 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, + 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, + 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, + 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, + 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, + 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, + 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, + 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, + 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, + 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, + 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, + 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, + 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, +} + +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a + } + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto new file mode 100644 index 0000000..82f6e0a --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/generated.proto @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1/register.go b/vendor/k8s.io/api/scheduling/v1/register.go new file mode 100644 index 0000000..33977fe --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + + // SchemeBuilder is a collection of functions that add things to a scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go new file mode 100644 index 0000000..087ee10 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/types.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..4cfb9d3 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..63bfe64 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go @@ -0,0 +1,90 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(corev1.PreemptionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go new file mode 100644 index 0000000..cff47e1 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=scheduling.k8s.io + +package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go new file mode 100644 index 0000000..8a62104 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -0,0 +1,735 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_f033641dd0b95dce, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) +} + +var fileDescriptor_f033641dd0b95dce = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, + 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, + 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, + 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, + 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, + 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, + 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, + 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, + 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, + 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, + 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, + 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, + 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, + 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, + 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, + 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, + 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, + 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, + 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, + 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, + 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, + 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, + 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, + 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, + 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, + 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, + 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, + 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, + 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, +} + +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a + } + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto new file mode 100644 index 0000000..682fb87 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go new file mode 100644 index 0000000..24689f0 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go new file mode 100644 index 0000000..86a2c51 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..63a9a35 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..0392823 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,90 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go new file mode 100644 index 0000000..e661968 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=scheduling.k8s.io + +package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go new file mode 100644 index 0000000..b89af56 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -0,0 +1,735 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{0} +} +func (m *PriorityClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClass.Merge(m, src) +} +func (m *PriorityClass) XXX_Size() int { + return m.Size() +} +func (m *PriorityClass) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClass.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClass proto.InternalMessageInfo + +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_6cd406dede2d3f42, []int{1} +} +func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PriorityClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PriorityClassList.Merge(m, src) +} +func (m *PriorityClassList) XXX_Size() int { + return m.Size() +} +func (m *PriorityClassList) XXX_DiscardUnknown() { + xxx_messageInfo_PriorityClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") + proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) +} + +var fileDescriptor_6cd406dede2d3f42 = []byte{ + // 494 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, + 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, + 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, + 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, + 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, + 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, + 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, + 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, + 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, + 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, + 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, + 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, + 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, + 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, + 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, + 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, + 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, + 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, + 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, + 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, + 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, + 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, + 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, + 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, + 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, + 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, + 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, + 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, + 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, + 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, + 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, +} + +func (m *PriorityClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.PreemptionPolicy != nil { + i -= len(*m.PreemptionPolicy) + copy(dAtA[i:], *m.PreemptionPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) + i-- + dAtA[i] = 0x2a + } + i -= len(m.Description) + copy(dAtA[i:], m.Description) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i-- + dAtA[i] = 0x22 + i-- + if m.GlobalDefault { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + i-- + dAtA[i] = 0x10 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PriorityClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Value)) + n += 2 + l = len(m.Description) + n += 1 + l + sovGenerated(uint64(l)) + if m.PreemptionPolicy != nil { + l = len(*m.PreemptionPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *PriorityClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PriorityClass) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PriorityClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, + `Description:` + fmt.Sprintf("%v", this.Description) + `,`, + `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, + `}`, + }, "") + return s +} +func (this *PriorityClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PriorityClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PriorityClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PriorityClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.GlobalDefault = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) + m.PreemptionPolicy = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PriorityClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PriorityClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto new file mode 100644 index 0000000..2582891 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -0,0 +1,76 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.scheduling.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +message PriorityClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + optional int32 value = 2; + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + optional bool globalDefault = 3; + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + optional string description = 4; + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + optional string preemptionPolicy = 5; +} + +// PriorityClassList is a collection of priority classes. +message PriorityClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of PriorityClasses + repeated PriorityClass items = 2; +} + diff --git a/vendor/k8s.io/api/scheduling/v1beta1/register.go b/vendor/k8s.io/api/scheduling/v1beta1/register.go new file mode 100644 index 0000000..fb26557 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "scheduling.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PriorityClass{}, + &PriorityClassList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go new file mode 100644 index 0000000..f806ecd --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -0,0 +1,75 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + apiv1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. +// PriorityClass defines mapping from a priority class name to the priority +// integer value. The value can be any valid integer. +type PriorityClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The value of this priority class. This is the actual priority that pods + // receive when they have the name of this class in their pod spec. + Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` + + // globalDefault specifies whether this PriorityClass should be considered as + // the default priority for pods that do not have any priority class. + // Only one PriorityClass can be marked as `globalDefault`. However, if more than + // one PriorityClasses exists with their `globalDefault` field set to true, + // the smallest value of such global default PriorityClasses will be used as the default priority. + // +optional + GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` + + // description is an arbitrary string that usually provides guidelines on + // when this priority class should be used. + // +optional + Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` + + // PreemptionPolicy is the Policy for preempting pods with lower priority. + // One of Never, PreemptLowerPriority. + // Defaults to PreemptLowerPriority if unset. + // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. + // +optional + PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PriorityClassList is a collection of priority classes. +type PriorityClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of PriorityClasses + Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..ffded9d --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,53 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PriorityClass = map[string]string{ + "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", + "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", +} + +func (PriorityClass) SwaggerDoc() map[string]string { + return map_PriorityClass +} + +var map_PriorityClassList = map[string]string{ + "": "PriorityClassList is a collection of priority classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of PriorityClasses", +} + +func (PriorityClassList) SwaggerDoc() map[string]string { + return map_PriorityClassList +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..6e20085 --- /dev/null +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,90 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.PreemptionPolicy != nil { + in, out := &in.PreemptionPolicy, &out.PreemptionPolicy + *out = new(v1.PreemptionPolicy) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. +func (in *PriorityClass) DeepCopy() *PriorityClass { + if in == nil { + return nil + } + out := new(PriorityClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PriorityClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. +func (in *PriorityClassList) DeepCopy() *PriorityClassList { + if in == nil { + return nil + } + out := new(PriorityClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PriorityClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go new file mode 100644 index 0000000..60066bb --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -0,0 +1,23 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:openapi-gen=true + +// +groupName=settings.k8s.io + +package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go new file mode 100644 index 0000000..7ed066d --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -0,0 +1,1053 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{0} +} +func (m *PodPreset) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPreset) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPreset.Merge(m, src) +} +func (m *PodPreset) XXX_Size() int { + return m.Size() +} +func (m *PodPreset) XXX_DiscardUnknown() { + xxx_messageInfo_PodPreset.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPreset proto.InternalMessageInfo + +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{1} +} +func (m *PodPresetList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetList.Merge(m, src) +} +func (m *PodPresetList) XXX_Size() int { + return m.Size() +} +func (m *PodPresetList) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetList proto.InternalMessageInfo + +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_48fab0a6ea4b79ce, []int{2} +} +func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodPresetSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodPresetSpec.Merge(m, src) +} +func (m *PodPresetSpec) XXX_Size() int { + return m.Size() +} +func (m *PodPresetSpec) XXX_DiscardUnknown() { + xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") + proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") + proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) +} + +var fileDescriptor_48fab0a6ea4b79ce = []byte{ + // 542 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, + 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, + 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, + 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, + 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, + 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, + 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, + 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, + 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, + 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, + 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, + 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, + 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, + 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, + 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, + 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, + 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, + 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, + 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, + 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, + 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, + 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, + 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, + 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, + 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, + 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, + 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, + 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, + 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, + 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, + 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, + 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, + 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, + 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, +} + +func (m *PodPreset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodPresetList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeMounts) > 0 { + for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if len(m.Volumes) > 0 { + for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.EnvFrom) > 0 { + for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *PodPreset) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodPresetList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodPresetSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PodPreset) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PodPreset{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodPreset{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodPresetList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodPresetSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForEnv := "[]EnvVar{" + for _, f := range this.Env { + repeatedStringForEnv += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnv += "}" + repeatedStringForEnvFrom := "[]EnvFromSource{" + for _, f := range this.EnvFrom { + repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," + } + repeatedStringForEnvFrom += "}" + repeatedStringForVolumes := "[]Volume{" + for _, f := range this.Volumes { + repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumes += "}" + repeatedStringForVolumeMounts := "[]VolumeMount{" + for _, f := range this.VolumeMounts { + repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," + } + repeatedStringForVolumeMounts += "}" + s := strings.Join([]string{`&PodPresetSpec{`, + `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + repeatedStringForEnv + `,`, + `EnvFrom:` + repeatedStringForEnvFrom + `,`, + `Volumes:` + repeatedStringForVolumes + `,`, + `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PodPreset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodPreset{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, v11.EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Volumes = append(m.Volumes, v11.Volume{}) + if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto new file mode 100644 index 0000000..db1ec93 --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -0,0 +1,75 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.settings.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +message PodPreset { + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // +optional + optional PodPresetSpec spec = 2; +} + +// PodPresetList is a list of PodPreset objects. +message PodPresetList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is a list of schema objects. + repeated PodPreset items = 2; +} + +// PodPresetSpec is a description of a pod preset. +message PodPresetSpec { + // Selector is a label query over a set of resources, in this case pods. + // Required. + optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; + + // Env defines the collection of EnvVar to inject into containers. + // +optional + repeated k8s.io.api.core.v1.EnvVar env = 2; + + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3; + + // Volumes defines the collection of Volume to inject into the pod. + // +optional + repeated k8s.io.api.core.v1.Volume volumes = 4; + + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; +} + diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go new file mode 100644 index 0000000..eee278d --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/register.go @@ -0,0 +1,52 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "settings.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &PodPreset{}, + &PodPresetList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go new file mode 100644 index 0000000..8cc99d4 --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/types.go @@ -0,0 +1,70 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPreset is a policy resource that defines additional runtime +// requirements for a Pod. +type PodPreset struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // +optional + Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` +} + +// PodPresetSpec is a description of a pod preset. +type PodPresetSpec struct { + // Selector is a label query over a set of resources, in this case pods. + // Required. + Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` + + // Env defines the collection of EnvVar to inject into containers. + // +optional + Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` + // EnvFrom defines the collection of EnvFromSource to inject into containers. + // +optional + EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` + // Volumes defines the collection of Volume to inject into the pod. + // +optional + Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` + // VolumeMounts defines the collection of VolumeMount to inject into containers. + // +optional + VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodPresetList is a list of PodPreset objects. +type PodPresetList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is a list of schema objects. + Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..0501e0a --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,61 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PodPreset = map[string]string{ + "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", +} + +func (PodPreset) SwaggerDoc() map[string]string { + return map_PodPreset +} + +var map_PodPresetList = map[string]string{ + "": "PodPresetList is a list of PodPreset objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is a list of schema objects.", +} + +func (PodPresetList) SwaggerDoc() map[string]string { + return map_PodPresetList +} + +var map_PodPresetSpec = map[string]string{ + "": "PodPresetSpec is a description of a pod preset.", + "selector": "Selector is a label query over a set of resources, in this case pods. Required.", + "env": "Env defines the collection of EnvVar to inject into containers.", + "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", + "volumes": "Volumes defines the collection of Volume to inject into the pod.", + "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", +} + +func (PodPresetSpec) SwaggerDoc() map[string]string { + return map_PodPresetSpec +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..ed6c31a --- /dev/null +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,131 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPreset) DeepCopyInto(out *PodPreset) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset. +func (in *PodPreset) DeepCopy() *PodPreset { + if in == nil { + return nil + } + out := new(PodPreset) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPreset) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodPreset, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList. +func (in *PodPresetList) DeepCopy() *PodPresetList { + if in == nil { + return nil + } + out := new(PodPresetList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodPresetList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { + *out = *in + in.Selector.DeepCopyInto(&out.Selector) + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]v1.EnvVar, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.EnvFrom != nil { + in, out := &in.EnvFrom, &out.EnvFrom + *out = make([]v1.EnvFromSource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Volumes != nil { + in, out := &in.Volumes, &out.Volumes + *out = make([]v1.Volume, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.VolumeMounts != nil { + in, out := &in.VolumeMounts, &out.VolumeMounts + *out = make([]v1.VolumeMount, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec. +func (in *PodPresetSpec) DeepCopy() *PodPresetSpec { + if in == nil { + return nil + } + out := new(PodPresetSpec) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go new file mode 100644 index 0000000..75a6489 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true + +package v1 // import "k8s.io/api/storage/v1" diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go new file mode 100644 index 0000000..9e57369 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -0,0 +1,4461 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{0} +} +func (m *CSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriver.Merge(m, src) +} +func (m *CSIDriver) XXX_Size() int { + return m.Size() +} +func (m *CSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriver proto.InternalMessageInfo + +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{1} +} +func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverList.Merge(m, src) +} +func (m *CSIDriverList) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo + +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{2} +} +func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverSpec.Merge(m, src) +} +func (m *CSIDriverSpec) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo + +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{3} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{4} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{5} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{6} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{7} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClass proto.InternalMessageInfo + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{8} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{9} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{10} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{11} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{12} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{13} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{14} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_3b530c1983504d8d, []int{15} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1.CSIDriver") + proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1.CSIDriverList") + proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1.CSIDriverSpec") + proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode") + proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver") + proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList") + proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1.CSINodeSpec") + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1.VolumeNodeResources") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) +} + +var fileDescriptor_3b530c1983504d8d = []byte{ + // 1336 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, + 0x14, 0xcf, 0xc6, 0xf9, 0x3b, 0x4e, 0x5a, 0x67, 0x1a, 0xc0, 0xe4, 0xe0, 0x8d, 0x96, 0x0a, 0x42, + 0xa1, 0xeb, 0xa6, 0x94, 0xaa, 0xaa, 0x04, 0x52, 0x36, 0x31, 0x22, 0x22, 0x4e, 0xa2, 0x49, 0xa9, + 0x10, 0x02, 0xc4, 0x64, 0xf7, 0xd5, 0xd9, 0xc6, 0xbb, 0xb3, 0xdd, 0x1d, 0x1b, 0x7c, 0xe3, 0xc4, + 0x0d, 0x09, 0xae, 0x7c, 0x0a, 0x90, 0xe0, 0xc2, 0x91, 0x53, 0xb9, 0x55, 0x9c, 0x7a, 0xb2, 0xe8, + 0x72, 0x05, 0x3e, 0x40, 0x4e, 0x68, 0x66, 0xc7, 0xde, 0xb5, 0xbd, 0x4e, 0xd3, 0x8b, 0x6f, 0x9e, + 0xf7, 0xde, 0xef, 0xf7, 0xde, 0x9b, 0xf7, 0x67, 0xd6, 0xe8, 0xfd, 0xd3, 0x3b, 0x91, 0xe9, 0xb2, + 0xea, 0x69, 0xeb, 0x18, 0x42, 0x1f, 0x38, 0x44, 0xd5, 0x36, 0xf8, 0x0e, 0x0b, 0xab, 0x4a, 0x41, + 0x03, 0xb7, 0x1a, 0x71, 0x16, 0xd2, 0x06, 0x54, 0xdb, 0x9b, 0xd5, 0x06, 0xf8, 0x10, 0x52, 0x0e, + 0x8e, 0x19, 0x84, 0x8c, 0x33, 0xfc, 0x52, 0x62, 0x66, 0xd2, 0xc0, 0x35, 0x95, 0x99, 0xd9, 0xde, + 0x5c, 0xbb, 0xde, 0x70, 0xf9, 0x49, 0xeb, 0xd8, 0xb4, 0x99, 0x57, 0x6d, 0xb0, 0x06, 0xab, 0x4a, + 0xeb, 0xe3, 0xd6, 0x03, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x61, 0x59, 0x33, 0x32, 0xce, 0x6c, 0x16, + 0xe6, 0x79, 0x5a, 0xbb, 0x95, 0xda, 0x78, 0xd4, 0x3e, 0x71, 0x7d, 0x08, 0x3b, 0xd5, 0xe0, 0xb4, + 0x21, 0x04, 0x51, 0xd5, 0x03, 0x4e, 0xf3, 0x50, 0xd5, 0x71, 0xa8, 0xb0, 0xe5, 0x73, 0xd7, 0x83, + 0x11, 0xc0, 0xed, 0xe7, 0x01, 0x22, 0xfb, 0x04, 0x3c, 0x3a, 0x8c, 0x33, 0x7e, 0xd5, 0xd0, 0xe2, + 0xf6, 0xd1, 0xee, 0x4e, 0xe8, 0xb6, 0x21, 0xc4, 0x5f, 0xa2, 0x05, 0x11, 0x91, 0x43, 0x39, 0x2d, + 0x6b, 0xeb, 0xda, 0x46, 0xf1, 0xe6, 0x0d, 0x33, 0xbd, 0xa9, 0x3e, 0xb1, 0x19, 0x9c, 0x36, 0x84, + 0x20, 0x32, 0x85, 0xb5, 0xd9, 0xde, 0x34, 0x0f, 0x8e, 0x1f, 0x82, 0xcd, 0xeb, 0xc0, 0xa9, 0x85, + 0x1f, 0x77, 0xf5, 0xa9, 0xb8, 0xab, 0xa3, 0x54, 0x46, 0xfa, 0xac, 0xf8, 0x03, 0x34, 0x13, 0x05, + 0x60, 0x97, 0xa7, 0x25, 0xfb, 0x55, 0x33, 0xb7, 0x0e, 0x66, 0x3f, 0xa2, 0xa3, 0x00, 0x6c, 0x6b, + 0x49, 0x31, 0xce, 0x88, 0x13, 0x91, 0x78, 0xe3, 0x17, 0x0d, 0x2d, 0xf7, 0xad, 0xf6, 0xdc, 0x88, + 0xe3, 0xcf, 0x46, 0x62, 0x37, 0x2f, 0x16, 0xbb, 0x40, 0xcb, 0xc8, 0x4b, 0xca, 0xcf, 0x42, 0x4f, + 0x92, 0x89, 0xbb, 0x86, 0x66, 0x5d, 0x0e, 0x5e, 0x54, 0x9e, 0x5e, 0x2f, 0x6c, 0x14, 0x6f, 0xae, + 0x3f, 0x2f, 0x70, 0x6b, 0x59, 0x91, 0xcd, 0xee, 0x0a, 0x18, 0x49, 0xd0, 0xc6, 0x3f, 0xd9, 0xb0, + 0x45, 0x3a, 0xf8, 0x2e, 0xba, 0x44, 0x39, 0xa7, 0xf6, 0x09, 0x81, 0x47, 0x2d, 0x37, 0x04, 0x47, + 0x06, 0xbf, 0x60, 0xe1, 0xb8, 0xab, 0x5f, 0xda, 0x1a, 0xd0, 0x90, 0x21, 0x4b, 0x81, 0x0d, 0x98, + 0xb3, 0xeb, 0x3f, 0x60, 0x07, 0x7e, 0x9d, 0xb5, 0x7c, 0x2e, 0xaf, 0x55, 0x61, 0x0f, 0x07, 0x34, + 0x64, 0xc8, 0x12, 0xdb, 0x68, 0xb5, 0xcd, 0x9a, 0x2d, 0x0f, 0xf6, 0xdc, 0x07, 0x60, 0x77, 0xec, + 0x26, 0xd4, 0x99, 0x03, 0x51, 0xb9, 0xb0, 0x5e, 0xd8, 0x58, 0xb4, 0xaa, 0x71, 0x57, 0x5f, 0xbd, + 0x9f, 0xa3, 0x3f, 0xeb, 0xea, 0x57, 0x72, 0xe4, 0x24, 0x97, 0xcc, 0xf8, 0x59, 0x43, 0xf3, 0xdb, + 0x47, 0xbb, 0xfb, 0xcc, 0x81, 0x09, 0xf4, 0xd6, 0xce, 0x40, 0x6f, 0x19, 0xe3, 0x4b, 0x24, 0xe2, + 0x19, 0xdb, 0x59, 0xff, 0x25, 0x25, 0x12, 0x36, 0x6a, 0x2a, 0xd6, 0xd1, 0x8c, 0x4f, 0x3d, 0x90, + 0x51, 0x2f, 0xa6, 0x98, 0x7d, 0xea, 0x01, 0x91, 0x1a, 0xfc, 0x3a, 0x9a, 0xf3, 0x99, 0x03, 0xbb, + 0x3b, 0xd2, 0xf7, 0xa2, 0x75, 0x49, 0xd9, 0xcc, 0xed, 0x4b, 0x29, 0x51, 0x5a, 0x7c, 0x0b, 0x2d, + 0x71, 0x16, 0xb0, 0x26, 0x6b, 0x74, 0x3e, 0x82, 0x4e, 0xef, 0xb2, 0x4b, 0x71, 0x57, 0x5f, 0xba, + 0x97, 0x91, 0x93, 0x01, 0x2b, 0xfc, 0x39, 0x2a, 0xd2, 0x66, 0x93, 0xd9, 0x94, 0xd3, 0xe3, 0x26, + 0x94, 0x67, 0x64, 0x7a, 0xd7, 0xc6, 0xa4, 0x97, 0x14, 0x47, 0xf8, 0x25, 0x10, 0xb1, 0x56, 0x68, + 0x43, 0x64, 0x5d, 0x8e, 0xbb, 0x7a, 0x71, 0x2b, 0xa5, 0x20, 0x59, 0x3e, 0xe3, 0x27, 0x0d, 0x15, + 0x55, 0xc2, 0x13, 0x18, 0xa4, 0xed, 0xc1, 0x41, 0xaa, 0x9c, 0x5f, 0xa5, 0x31, 0x63, 0xf4, 0x45, + 0x3f, 0x62, 0x39, 0x43, 0x07, 0x68, 0xde, 0x91, 0xa5, 0x8a, 0xca, 0x9a, 0x64, 0xbd, 0x7a, 0x3e, + 0xab, 0x1a, 0xd1, 0xcb, 0x8a, 0x7b, 0x3e, 0x39, 0x47, 0xa4, 0xc7, 0x62, 0x7c, 0x37, 0x87, 0x96, + 0x8e, 0x12, 0xd8, 0x76, 0x93, 0x46, 0xd1, 0x04, 0x9a, 0xf7, 0x5d, 0x54, 0x0c, 0x42, 0xd6, 0x76, + 0x23, 0x97, 0xf9, 0x10, 0xaa, 0x3e, 0xba, 0xa2, 0x20, 0xc5, 0xc3, 0x54, 0x45, 0xb2, 0x76, 0xb8, + 0x81, 0x50, 0x40, 0x43, 0xea, 0x01, 0x17, 0xd9, 0x17, 0x64, 0xf6, 0xef, 0x8c, 0xc9, 0x3e, 0x9b, + 0x91, 0x79, 0xd8, 0x47, 0xd5, 0x7c, 0x1e, 0x76, 0xd2, 0xe8, 0x52, 0x05, 0xc9, 0x50, 0xe3, 0x53, + 0xb4, 0x1c, 0x82, 0xdd, 0xa4, 0xae, 0x77, 0xc8, 0x9a, 0xae, 0xdd, 0x91, 0x6d, 0xb8, 0x68, 0xd5, + 0xe2, 0xae, 0xbe, 0x4c, 0xb2, 0x8a, 0xb3, 0xae, 0x7e, 0x63, 0xf4, 0x5d, 0x34, 0x0f, 0x21, 0x8c, + 0xdc, 0x88, 0x83, 0xcf, 0x93, 0x0e, 0x1d, 0xc0, 0x90, 0x41, 0x6e, 0x31, 0x27, 0x9e, 0xd8, 0x52, + 0x07, 0x01, 0x77, 0x99, 0x1f, 0x95, 0x67, 0xd3, 0x39, 0xa9, 0x67, 0xe4, 0x64, 0xc0, 0x0a, 0xef, + 0xa1, 0x55, 0xd1, 0xd7, 0x5f, 0x25, 0x0e, 0x6a, 0x5f, 0x07, 0xd4, 0x17, 0xb7, 0x54, 0x9e, 0x93, + 0x4b, 0xb1, 0x2c, 0x56, 0xda, 0x56, 0x8e, 0x9e, 0xe4, 0xa2, 0xf0, 0x27, 0x68, 0x25, 0xd9, 0x69, + 0x96, 0xeb, 0x3b, 0xae, 0xdf, 0x10, 0x1b, 0xad, 0x3c, 0x2f, 0x93, 0xbe, 0x16, 0x77, 0xf5, 0x95, + 0xfb, 0xc3, 0xca, 0xb3, 0x3c, 0x21, 0x19, 0x25, 0xc1, 0x8f, 0xd0, 0x8a, 0xf4, 0x08, 0x8e, 0x1a, + 0x7a, 0x17, 0xa2, 0xf2, 0x82, 0x2c, 0xdd, 0x46, 0xb6, 0x74, 0xe2, 0xea, 0x44, 0xdd, 0x7a, 0xab, + 0xe1, 0x08, 0x9a, 0x60, 0x73, 0x16, 0xde, 0x83, 0xd0, 0xb3, 0x5e, 0x55, 0xf5, 0x5a, 0xd9, 0x1a, + 0xa6, 0x22, 0xa3, 0xec, 0x6b, 0xef, 0xa1, 0xcb, 0x43, 0x05, 0xc7, 0x25, 0x54, 0x38, 0x85, 0x4e, + 0xb2, 0xd4, 0x88, 0xf8, 0x89, 0x57, 0xd1, 0x6c, 0x9b, 0x36, 0x5b, 0x90, 0x34, 0x1f, 0x49, 0x0e, + 0x77, 0xa7, 0xef, 0x68, 0xc6, 0x6f, 0x1a, 0x2a, 0x65, 0xbb, 0x67, 0x02, 0x7b, 0xe2, 0xc3, 0xc1, + 0x3d, 0xf1, 0xda, 0x05, 0x7a, 0x7a, 0xcc, 0xb2, 0xf8, 0x71, 0x1a, 0x95, 0x92, 0xba, 0x24, 0xcf, + 0xa9, 0x07, 0x3e, 0x9f, 0xc0, 0x40, 0xd7, 0x07, 0x5e, 0xa3, 0xb7, 0xce, 0x5d, 0xd7, 0x69, 0x60, + 0xe3, 0x9e, 0x25, 0xfc, 0x31, 0x9a, 0x8b, 0x38, 0xe5, 0x2d, 0x31, 0xe4, 0x82, 0xf0, 0xfa, 0x45, + 0x09, 0x25, 0x28, 0x7d, 0x91, 0x92, 0x33, 0x51, 0x64, 0xc6, 0xef, 0x1a, 0x5a, 0x1d, 0x86, 0x4c, + 0xa0, 0xba, 0x7b, 0x83, 0xd5, 0x7d, 0xe3, 0x82, 0xc9, 0x8c, 0xa9, 0xf0, 0x9f, 0x1a, 0x7a, 0x79, + 0x24, 0x6f, 0xf9, 0xf6, 0x89, 0x9d, 0x10, 0x0c, 0x6d, 0x9e, 0xfd, 0xf4, 0x2d, 0x97, 0x3b, 0xe1, + 0x30, 0x47, 0x4f, 0x72, 0x51, 0xf8, 0x21, 0x2a, 0xb9, 0x7e, 0xd3, 0xf5, 0x21, 0x91, 0x1d, 0xa5, + 0xf5, 0xcd, 0x1d, 0xdc, 0x61, 0x66, 0x59, 0xdc, 0xd5, 0xb8, 0xab, 0x97, 0x76, 0x87, 0x58, 0xc8, + 0x08, 0xaf, 0xf1, 0x47, 0x4e, 0x65, 0xe4, 0x6b, 0xf7, 0x36, 0x5a, 0x48, 0xbe, 0x03, 0x21, 0x54, + 0x69, 0xf4, 0x6f, 0x7a, 0x4b, 0xc9, 0x49, 0xdf, 0x42, 0xf6, 0x8d, 0xbc, 0x0a, 0x15, 0xe8, 0x85, + 0xfb, 0x46, 0x82, 0x32, 0x7d, 0x23, 0xcf, 0x44, 0x91, 0x89, 0x20, 0xc4, 0x37, 0x8d, 0xbc, 0xcb, + 0xc2, 0x60, 0x10, 0xfb, 0x4a, 0x4e, 0xfa, 0x16, 0xc6, 0xbf, 0x85, 0x9c, 0x02, 0xc9, 0x06, 0xcc, + 0x64, 0xd3, 0xfb, 0xf2, 0x1d, 0xce, 0xc6, 0xe9, 0x67, 0xe3, 0xe0, 0x1f, 0x34, 0x84, 0x69, 0x9f, + 0xa2, 0xde, 0x6b, 0xd0, 0xa4, 0x8b, 0x6a, 0x2f, 0x34, 0x12, 0xe6, 0xd6, 0x08, 0x4f, 0xf2, 0x12, + 0xae, 0x29, 0xff, 0x78, 0xd4, 0x80, 0xe4, 0x38, 0xc7, 0x0e, 0x2a, 0x26, 0xd2, 0x5a, 0x18, 0xb2, + 0x50, 0x8d, 0xa7, 0x71, 0x6e, 0x2c, 0xd2, 0xd2, 0xaa, 0xc8, 0xcf, 0xb2, 0x14, 0x7a, 0xd6, 0xd5, + 0x8b, 0x19, 0x3d, 0xc9, 0xd2, 0x0a, 0x2f, 0x0e, 0xa4, 0x5e, 0x66, 0x5e, 0xcc, 0xcb, 0x0e, 0x8c, + 0xf7, 0x92, 0xa1, 0x5d, 0xab, 0xa1, 0x57, 0xc6, 0x5c, 0xcb, 0x0b, 0xbd, 0x17, 0xdf, 0x6a, 0x28, + 0xeb, 0x03, 0xef, 0xa1, 0x19, 0xf1, 0x27, 0x54, 0x2d, 0x92, 0x6b, 0x17, 0x5b, 0x24, 0xf7, 0x5c, + 0x0f, 0xd2, 0x55, 0x28, 0x4e, 0x44, 0xb2, 0xe0, 0x37, 0xd1, 0xbc, 0x07, 0x51, 0x44, 0x1b, 0xca, + 0x73, 0xfa, 0x21, 0x57, 0x4f, 0xc4, 0xa4, 0xa7, 0x37, 0x6e, 0xa3, 0x2b, 0x39, 0x1f, 0xc4, 0x58, + 0x47, 0xb3, 0xb6, 0xfc, 0xbf, 0x24, 0x02, 0x9a, 0xb5, 0x16, 0xc5, 0x46, 0xd9, 0x96, 0x7f, 0x93, + 0x12, 0xb9, 0xb5, 0xf1, 0xf8, 0x59, 0x65, 0xea, 0xc9, 0xb3, 0xca, 0xd4, 0xd3, 0x67, 0x95, 0xa9, + 0x6f, 0xe2, 0x8a, 0xf6, 0x38, 0xae, 0x68, 0x4f, 0xe2, 0x8a, 0xf6, 0x34, 0xae, 0x68, 0x7f, 0xc5, + 0x15, 0xed, 0xfb, 0xbf, 0x2b, 0x53, 0x9f, 0x4e, 0xb7, 0x37, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, + 0x9b, 0x74, 0xdf, 0x56, 0x8a, 0x10, 0x00, 0x00, +} + +func (m *CSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeLifecycleModes) > 0 { + for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumeLifecycleModes[iNdEx]) + copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.PodInfoOnMount != nil { + i-- + if *m.PodInfoOnMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AttachRequired != nil { + i-- + if *m.AttachRequired { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CSINode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drivers) > 0 { + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } + if m.AllowVolumeExpansion != nil { + i-- + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSIDriverList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSIDriverSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AttachRequired != nil { + n += 2 + } + if m.PodInfoOnMount != nil { + n += 2 + } + if len(m.VolumeLifecycleModes) > 0 { + for _, s := range m.VolumeLifecycleModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Drivers) > 0 { + for _, e := range m.Drivers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriver{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSIDriver{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSIDriverList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriverSpec{`, + `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, + `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, + `}`, + }, "") + return s +} +func (this *CSINode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINode{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSINodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" + s := strings.Join([]string{`&CSINodeSpec{`, + `Drivers:` + repeatedStringForDrivers + `,`, + `}`, + }, "") + return s +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSIDriver{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AttachRequired = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PodInfoOnMount = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSINode{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drivers = append(m.Drivers, CSINodeDriver{}) + if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AttachmentMetadata[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto new file mode 100644 index 0000000..cb3c42c --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -0,0 +1,370 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +message CSIDriver { + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the CSI Driver. + optional CSIDriverSpec spec = 2; +} + +// CSIDriverList is a collection of CSIDriver objects. +message CSIDriverList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSIDriver + repeated CSIDriver items = 2; +} + +// CSIDriverSpec is the specification of a CSIDriver. +message CSIDriverSpec { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + optional bool attachRequired = 1; + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. + // +optional + optional bool podInfoOnMount = 2; + + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // This field is beta. + // +optional + // +listType=set + repeated string volumeLifecycleModes = 3; +} + +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +message CSINode { + // metadata.name must be the Kubernetes node name. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of CSINode + optional CSINodeSpec spec = 2; +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +message CSINodeDriver { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + optional string name = 1; + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + optional string nodeID = 2; + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // This field is beta. + // +optional + optional VolumeNodeResources allocatable = 4; +} + +// CSINodeList is a collection of CSINode objects. +message CSINodeList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSINode + repeated CSINode items = 2; +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +message CSINodeSpec { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + repeated CSINodeDriver drivers = 1; +} + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map parameters = 3; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + optional string reclaimPolicy = 4; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + repeated string mountOptions = 5; + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string may be logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is not specified, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go new file mode 100644 index 0000000..1a2f83d --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + + &VolumeAttachment{}, + &VolumeAttachmentList{}, + + &CSINode{}, + &CSINodeList{}, + + &CSIDriver{}, + &CSIDriverList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go new file mode 100644 index 0000000..556427b --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -0,0 +1,438 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string may be logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +type CSIDriver struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the CSI Driver. + Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriverList is a collection of CSIDriver objects. +type CSIDriverList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSIDriver + Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CSIDriverSpec is the specification of a CSIDriver. +type CSIDriverSpec struct { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. + // +optional + PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` + + // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // This field is beta. + // +optional + // +listType=set + VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` +} + +// VolumeLifecycleMode is an enumeration of possible usage modes for a volume +// provided by a CSI driver. More modes may be added in the future. +type VolumeLifecycleMode string + +const ( + // VolumeLifecyclePersistent explicitly confirms that the driver implements + // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not + // set. Such volumes are managed in Kubernetes via the persistent volume + // claim mechanism and have a lifecycle that is independent of the pods which + // use them. + VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" + + // VolumeLifecycleEphemeral indicates that the driver can be used for + // ephemeral inline volumes. Such volumes are specified inside the pod + // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have + // a lifecycle that is tied to the lifecycle of the pod. For example, such + // a volume might contain data that gets created specifically for that pod, + // like secrets. + // But how the volume actually gets created and managed is entirely up to + // the driver. It might also use reference counting to share the same volume + // instance among different pods if the CSIVolumeSource of those pods is + // identical. + VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +type CSINode struct { + metav1.TypeMeta `json:",inline"` + + // metadata.name must be the Kubernetes node name. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of CSINode + Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeSpec struct { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +type CSINodeDriver struct { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // This field is beta. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is not specified, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINodeList is a collection of CSINode objects. +type CSINodeList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSINode + Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..0e524a2 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -0,0 +1,200 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CSIDriver = map[string]string{ + "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the CSI Driver.", +} + +func (CSIDriver) SwaggerDoc() map[string]string { + return map_CSIDriver +} + +var map_CSIDriverList = map[string]string{ + "": "CSIDriverList is a collection of CSIDriver objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSIDriver", +} + +func (CSIDriverList) SwaggerDoc() map[string]string { + return map_CSIDriverList +} + +var map_CSIDriverSpec = map[string]string{ + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.", +} + +func (CSIDriverSpec) SwaggerDoc() map[string]string { + return map_CSIDriverSpec +} + +var map_CSINode = map[string]string{ + "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "metadata": "metadata.name must be the Kubernetes node name.", + "spec": "spec is the specification of CSINode", +} + +func (CSINode) SwaggerDoc() map[string]string { + return map_CSINode +} + +var map_CSINodeDriver = map[string]string{ + "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", +} + +func (CSINodeDriver) SwaggerDoc() map[string]string { + return map_CSINodeDriver +} + +var map_CSINodeList = map[string]string{ + "": "CSINodeList is a collection of CSINode objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSINode", +} + +func (CSINodeList) SwaggerDoc() map[string]string { + return map_CSINodeList +} + +var map_CSINodeSpec = map[string]string{ + "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", +} + +func (CSINodeSpec) SwaggerDoc() map[string]string { + return map_CSINodeSpec +} + +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", + "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..efaa40a --- /dev/null +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -0,0 +1,494 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + corev1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. +func (in *CSIDriver) DeepCopy() *CSIDriver { + if in == nil { + return nil + } + out := new(CSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. +func (in *CSIDriverList) DeepCopy() *CSIDriverList { + if in == nil { + return nil + } + out := new(CSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { + *out = *in + if in.AttachRequired != nil { + in, out := &in.AttachRequired, &out.AttachRequired + *out = new(bool) + **out = **in + } + if in.PodInfoOnMount != nil { + in, out := &in.PodInfoOnMount, &out.PodInfoOnMount + *out = new(bool) + **out = **in + } + if in.VolumeLifecycleModes != nil { + in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes + *out = make([]VolumeLifecycleMode, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. +func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { + if in == nil { + return nil + } + out := new(CSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINode) DeepCopyInto(out *CSINode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. +func (in *CSINode) DeepCopy() *CSINode { + if in == nil { + return nil + } + out := new(CSINode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { + *out = *in + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. +func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { + if in == nil { + return nil + } + out := new(CSINodeDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSINode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. +func (in *CSINodeList) DeepCopy() *CSINodeList { + if in == nil { + return nil + } + out := new(CSINodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSINodeDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. +func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { + if in == nil { + return nil + } + out := new(CSINodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClass) DeepCopyInto(out *StorageClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReclaimPolicy != nil { + in, out := &in.ReclaimPolicy, &out.ReclaimPolicy + *out = new(corev1.PersistentVolumeReclaimPolicy) + **out = **in + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowVolumeExpansion != nil { + in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion + *out = new(bool) + **out = **in + } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + *out = new(VolumeBindingMode) + **out = **in + } + if in.AllowedTopologies != nil { + in, out := &in.AllowedTopologies, &out.AllowedTopologies + *out = make([]corev1.TopologySelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass. +func (in *StorageClass) DeepCopy() *StorageClass { + if in == nil { + return nil + } + out := new(StorageClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList. +func (in *StorageClassList) DeepCopy() *StorageClassList { + if in == nil { + return nil + } + out := new(StorageClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + *out = new(string) + **out = **in + } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(corev1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go new file mode 100644 index 0000000..6f7ad7e --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true + +package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go new file mode 100644 index 0000000..1f9db7a --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -0,0 +1,1813 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{0} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{1} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{2} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{3} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{4} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_10f856db1e670dc4, []int{5} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func init() { + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) +} + +var fileDescriptor_10f856db1e670dc4 = []byte{ + // 745 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, + 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, + 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, + 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, + 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, + 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, + 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, + 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, + 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, + 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, + 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, + 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, + 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, + 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, + 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, + 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, + 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, + 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, + 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, + 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, + 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, + 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, + 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, + 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, + 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, + 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, + 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, + 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, + 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, + 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, + 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, + 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, + 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, + 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, + 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, + 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, + 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, + 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, + 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, + 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, + 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, + 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, + 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, + 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, + 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, + 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, +} + +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AttachmentMetadata[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto new file mode 100644 index 0000000..7601963 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -0,0 +1,136 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go new file mode 100644 index 0000000..7b81ee4 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &VolumeAttachment{}, + &VolumeAttachmentList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go new file mode 100644 index 0000000..3940885 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -0,0 +1,136 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string maybe logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..2e82161 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,93 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..3debf9d --- /dev/null +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,180 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + *out = new(string) + **out = **in + } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go new file mode 100644 index 0000000..e3e3626 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -0,0 +1,22 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +groupName=storage.k8s.io +// +k8s:openapi-gen=true + +package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go new file mode 100644 index 0000000..af4ce59 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -0,0 +1,4461 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + k8s_io_api_core_v1 "k8s.io/api/core/v1" + v11 "k8s.io/api/core/v1" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *CSIDriver) Reset() { *m = CSIDriver{} } +func (*CSIDriver) ProtoMessage() {} +func (*CSIDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{0} +} +func (m *CSIDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriver.Merge(m, src) +} +func (m *CSIDriver) XXX_Size() int { + return m.Size() +} +func (m *CSIDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriver proto.InternalMessageInfo + +func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } +func (*CSIDriverList) ProtoMessage() {} +func (*CSIDriverList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{1} +} +func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverList.Merge(m, src) +} +func (m *CSIDriverList) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverList) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo + +func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } +func (*CSIDriverSpec) ProtoMessage() {} +func (*CSIDriverSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{2} +} +func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSIDriverSpec.Merge(m, src) +} +func (m *CSIDriverSpec) XXX_Size() int { + return m.Size() +} +func (m *CSIDriverSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo + +func (m *CSINode) Reset() { *m = CSINode{} } +func (*CSINode) ProtoMessage() {} +func (*CSINode) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{3} +} +func (m *CSINode) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINode) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINode.Merge(m, src) +} +func (m *CSINode) XXX_Size() int { + return m.Size() +} +func (m *CSINode) XXX_DiscardUnknown() { + xxx_messageInfo_CSINode.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINode proto.InternalMessageInfo + +func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } +func (*CSINodeDriver) ProtoMessage() {} +func (*CSINodeDriver) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{4} +} +func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeDriver) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeDriver.Merge(m, src) +} +func (m *CSINodeDriver) XXX_Size() int { + return m.Size() +} +func (m *CSINodeDriver) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo + +func (m *CSINodeList) Reset() { *m = CSINodeList{} } +func (*CSINodeList) ProtoMessage() {} +func (*CSINodeList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{5} +} +func (m *CSINodeList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeList) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeList.Merge(m, src) +} +func (m *CSINodeList) XXX_Size() int { + return m.Size() +} +func (m *CSINodeList) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeList.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeList proto.InternalMessageInfo + +func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } +func (*CSINodeSpec) ProtoMessage() {} +func (*CSINodeSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{6} +} +func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CSINodeSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_CSINodeSpec.Merge(m, src) +} +func (m *CSINodeSpec) XXX_Size() int { + return m.Size() +} +func (m *CSINodeSpec) XXX_DiscardUnknown() { + xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo + +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{7} +} +func (m *StorageClass) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClass) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClass.Merge(m, src) +} +func (m *StorageClass) XXX_Size() int { + return m.Size() +} +func (m *StorageClass) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClass.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClass proto.InternalMessageInfo + +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{8} +} +func (m *StorageClassList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StorageClassList) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageClassList.Merge(m, src) +} +func (m *StorageClassList) XXX_Size() int { + return m.Size() +} +func (m *StorageClassList) XXX_DiscardUnknown() { + xxx_messageInfo_StorageClassList.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageClassList proto.InternalMessageInfo + +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{9} +} +func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachment) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachment.Merge(m, src) +} +func (m *VolumeAttachment) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachment) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo + +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{10} +} +func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentList.Merge(m, src) +} +func (m *VolumeAttachmentList) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentList) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo + +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{11} +} +func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) +} +func (m *VolumeAttachmentSource) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo + +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{12} +} +func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) +} +func (m *VolumeAttachmentSpec) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo + +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{13} +} +func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) +} +func (m *VolumeAttachmentStatus) XXX_Size() int { + return m.Size() +} +func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo + +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{14} +} +func (m *VolumeError) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeError) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeError.Merge(m, src) +} +func (m *VolumeError) XXX_Size() int { + return m.Size() +} +func (m *VolumeError) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeError.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeError proto.InternalMessageInfo + +func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } +func (*VolumeNodeResources) ProtoMessage() {} +func (*VolumeNodeResources) Descriptor() ([]byte, []int) { + return fileDescriptor_7d2980599fd0de80, []int{15} +} +func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { + xxx_messageInfo_VolumeNodeResources.Merge(m, src) +} +func (m *VolumeNodeResources) XXX_Size() int { + return m.Size() +} +func (m *VolumeNodeResources) XXX_DiscardUnknown() { + xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) +} + +var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo + +func init() { + proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") + proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList") + proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1beta1.CSIDriverSpec") + proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1beta1.CSINode") + proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1beta1.CSINodeDriver") + proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") + proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") + proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) +} + +var fileDescriptor_7d2980599fd0de80 = []byte{ + // 1344 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0xdb, 0x46, + 0x1b, 0x37, 0x2d, 0x7f, 0x9e, 0xec, 0x44, 0xbe, 0x18, 0xef, 0xab, 0x57, 0x83, 0x64, 0xe8, 0x45, + 0x1b, 0x27, 0x48, 0xc8, 0x24, 0x48, 0x83, 0x20, 0x40, 0x07, 0xd3, 0x31, 0x50, 0x25, 0x96, 0xe3, + 0x9e, 0x8d, 0xa0, 0x08, 0x3a, 0xf4, 0x44, 0x3e, 0x91, 0x19, 0x93, 0x3c, 0x86, 0x3c, 0xa9, 0xd5, + 0xd6, 0xa9, 0x73, 0xd1, 0xa1, 0x7f, 0x41, 0xff, 0x85, 0x16, 0x68, 0x97, 0x8e, 0xcd, 0x54, 0x04, + 0x9d, 0x32, 0x09, 0x0d, 0xbb, 0x76, 0xeb, 0x66, 0x74, 0x28, 0xee, 0x78, 0x12, 0x29, 0x89, 0x8a, + 0xed, 0x0e, 0xde, 0x78, 0xcf, 0xc7, 0xef, 0xf9, 0x7e, 0xee, 0x88, 0xb6, 0x8f, 0xef, 0x47, 0xba, + 0xc3, 0x8c, 0xe3, 0x4e, 0x0b, 0x42, 0x1f, 0x38, 0x44, 0x46, 0x17, 0x7c, 0x9b, 0x85, 0x86, 0x62, + 0xd0, 0xc0, 0x31, 0x22, 0xce, 0x42, 0xda, 0x06, 0xa3, 0x7b, 0xbb, 0x05, 0x9c, 0xde, 0x36, 0xda, + 0xe0, 0x43, 0x48, 0x39, 0xd8, 0x7a, 0x10, 0x32, 0xce, 0x70, 0x25, 0x91, 0xd5, 0x69, 0xe0, 0xe8, + 0x4a, 0x56, 0x57, 0xb2, 0x95, 0x9b, 0x6d, 0x87, 0x1f, 0x75, 0x5a, 0xba, 0xc5, 0x3c, 0xa3, 0xcd, + 0xda, 0xcc, 0x90, 0x2a, 0xad, 0xce, 0x73, 0x79, 0x92, 0x07, 0xf9, 0x95, 0x40, 0x55, 0xea, 0x19, + 0xb3, 0x16, 0x0b, 0x85, 0xcd, 0x71, 0x73, 0x95, 0xbb, 0xa9, 0x8c, 0x47, 0xad, 0x23, 0xc7, 0x87, + 0xb0, 0x67, 0x04, 0xc7, 0x6d, 0x41, 0x88, 0x0c, 0x0f, 0x38, 0xcd, 0xd3, 0x32, 0xa6, 0x69, 0x85, + 0x1d, 0x9f, 0x3b, 0x1e, 0x4c, 0x28, 0xdc, 0x3b, 0x4d, 0x21, 0xb2, 0x8e, 0xc0, 0xa3, 0xe3, 0x7a, + 0xf5, 0x9f, 0x34, 0xb4, 0xbc, 0x7d, 0xd0, 0x78, 0x18, 0x3a, 0x5d, 0x08, 0xf1, 0x67, 0x68, 0x49, + 0x78, 0x64, 0x53, 0x4e, 0xcb, 0xda, 0x86, 0xb6, 0x59, 0xbc, 0x73, 0x4b, 0x4f, 0xd3, 0x35, 0x04, + 0xd6, 0x83, 0xe3, 0xb6, 0x20, 0x44, 0xba, 0x90, 0xd6, 0xbb, 0xb7, 0xf5, 0x27, 0xad, 0x17, 0x60, + 0xf1, 0x26, 0x70, 0x6a, 0xe2, 0x57, 0xfd, 0xda, 0x4c, 0xdc, 0xaf, 0xa1, 0x94, 0x46, 0x86, 0xa8, + 0xf8, 0x31, 0x9a, 0x8b, 0x02, 0xb0, 0xca, 0xb3, 0x12, 0xfd, 0x9a, 0x3e, 0xbd, 0x18, 0xfa, 0xd0, + 0xad, 0x83, 0x00, 0x2c, 0x73, 0x45, 0xc1, 0xce, 0x89, 0x13, 0x91, 0x20, 0xf5, 0x1f, 0x35, 0xb4, + 0x3a, 0x94, 0xda, 0x75, 0x22, 0x8e, 0x3f, 0x9d, 0x08, 0x40, 0x3f, 0x5b, 0x00, 0x42, 0x5b, 0xba, + 0x5f, 0x52, 0x76, 0x96, 0x06, 0x94, 0x8c, 0xf3, 0x8f, 0xd0, 0xbc, 0xc3, 0xc1, 0x8b, 0xca, 0xb3, + 0x1b, 0x85, 0xcd, 0xe2, 0x9d, 0xf7, 0xce, 0xe4, 0xbd, 0xb9, 0xaa, 0x10, 0xe7, 0x1b, 0x42, 0x97, + 0x24, 0x10, 0xf5, 0x3f, 0xb3, 0xbe, 0x8b, 0x98, 0xf0, 0x03, 0x74, 0x89, 0x72, 0x4e, 0xad, 0x23, + 0x02, 0x2f, 0x3b, 0x4e, 0x08, 0xb6, 0x8c, 0x60, 0xc9, 0xc4, 0x71, 0xbf, 0x76, 0x69, 0x6b, 0x84, + 0x43, 0xc6, 0x24, 0x85, 0x6e, 0xc0, 0xec, 0x86, 0xff, 0x9c, 0x3d, 0xf1, 0x9b, 0xac, 0xe3, 0x73, + 0x99, 0x60, 0xa5, 0xbb, 0x3f, 0xc2, 0x21, 0x63, 0x92, 0xd8, 0x42, 0xeb, 0x5d, 0xe6, 0x76, 0x3c, + 0xd8, 0x75, 0x9e, 0x83, 0xd5, 0xb3, 0x5c, 0x68, 0x32, 0x1b, 0xa2, 0x72, 0x61, 0xa3, 0xb0, 0xb9, + 0x6c, 0x1a, 0x71, 0xbf, 0xb6, 0xfe, 0x34, 0x87, 0x7f, 0xd2, 0xaf, 0x5d, 0xc9, 0xa1, 0x93, 0x5c, + 0xb0, 0xfa, 0x0f, 0x1a, 0x5a, 0xdc, 0x3e, 0x68, 0xec, 0x31, 0x1b, 0x2e, 0xa0, 0xcb, 0x1a, 0x23, + 0x5d, 0x76, 0xf5, 0x94, 0x3a, 0x09, 0xa7, 0xa6, 0xf6, 0xd8, 0x5f, 0x49, 0x9d, 0x84, 0x8c, 0x1a, + 0x92, 0x0d, 0x34, 0xe7, 0x53, 0x0f, 0xa4, 0xeb, 0xcb, 0xa9, 0xce, 0x1e, 0xf5, 0x80, 0x48, 0x0e, + 0x7e, 0x1f, 0x2d, 0xf8, 0xcc, 0x86, 0xc6, 0x43, 0xe9, 0xc0, 0xb2, 0x79, 0x49, 0xc9, 0x2c, 0xec, + 0x49, 0x2a, 0x51, 0x5c, 0x7c, 0x17, 0xad, 0x70, 0x16, 0x30, 0x97, 0xb5, 0x7b, 0x8f, 0xa1, 0x37, + 0xc8, 0x78, 0x29, 0xee, 0xd7, 0x56, 0x0e, 0x33, 0x74, 0x32, 0x22, 0x85, 0x5b, 0xa8, 0x48, 0x5d, + 0x97, 0x59, 0x94, 0xd3, 0x96, 0x0b, 0xe5, 0x39, 0x19, 0xa3, 0xf1, 0xae, 0x18, 0x93, 0x32, 0x09, + 0xe3, 0x04, 0x22, 0xd6, 0x09, 0x2d, 0x88, 0xcc, 0xcb, 0x71, 0xbf, 0x56, 0xdc, 0x4a, 0x71, 0x48, + 0x16, 0xb4, 0xfe, 0xbd, 0x86, 0x8a, 0x2a, 0xea, 0x0b, 0x98, 0xab, 0x8f, 0x46, 0xe7, 0xea, 0xff, + 0x67, 0xa8, 0xd7, 0x94, 0xa9, 0xb2, 0x86, 0x6e, 0xcb, 0x91, 0x3a, 0x44, 0x8b, 0xb6, 0x2c, 0x5a, + 0x54, 0xd6, 0x24, 0xf4, 0xb5, 0x33, 0x40, 0xab, 0xb1, 0xbd, 0xac, 0x0c, 0x2c, 0x26, 0xe7, 0x88, + 0x0c, 0xa0, 0xea, 0xdf, 0x2c, 0xa0, 0x95, 0x83, 0x44, 0x77, 0xdb, 0xa5, 0x51, 0x74, 0x01, 0x0d, + 0xfd, 0x01, 0x2a, 0x06, 0x21, 0xeb, 0x3a, 0x91, 0xc3, 0x7c, 0x08, 0x55, 0x5b, 0x5d, 0x51, 0x2a, + 0xc5, 0xfd, 0x94, 0x45, 0xb2, 0x72, 0xd8, 0x45, 0x28, 0xa0, 0x21, 0xf5, 0x80, 0x8b, 0x14, 0x14, + 0x64, 0x0a, 0xee, 0xbf, 0x2b, 0x05, 0xd9, 0xb0, 0xf4, 0xfd, 0xa1, 0xea, 0x8e, 0xcf, 0xc3, 0x5e, + 0xea, 0x62, 0xca, 0x20, 0x19, 0x7c, 0x7c, 0x8c, 0x56, 0x43, 0xb0, 0x5c, 0xea, 0x78, 0xfb, 0xcc, + 0x75, 0xac, 0x9e, 0x6c, 0xcd, 0x65, 0x73, 0x27, 0xee, 0xd7, 0x56, 0x49, 0x96, 0x71, 0xd2, 0xaf, + 0xdd, 0x9a, 0xbc, 0x3a, 0xf5, 0x7d, 0x08, 0x23, 0x27, 0xe2, 0xe0, 0xf3, 0xa4, 0x61, 0x47, 0x74, + 0xc8, 0x28, 0xb6, 0x98, 0x1d, 0x4f, 0xac, 0xaf, 0x27, 0x01, 0x77, 0x98, 0x1f, 0x95, 0xe7, 0xd3, + 0xd9, 0x69, 0x66, 0xe8, 0x64, 0x44, 0x0a, 0xef, 0xa2, 0x75, 0xd1, 0xe6, 0x9f, 0x27, 0x06, 0x76, + 0xbe, 0x08, 0xa8, 0x2f, 0x52, 0x55, 0x5e, 0x90, 0xdb, 0xb2, 0x2c, 0x76, 0xdd, 0x56, 0x0e, 0x9f, + 0xe4, 0x6a, 0xe1, 0x4f, 0xd0, 0x5a, 0xb2, 0xec, 0x4c, 0xc7, 0xb7, 0x1d, 0xbf, 0x2d, 0x56, 0x5d, + 0x79, 0x51, 0x06, 0x7d, 0x3d, 0xee, 0xd7, 0xd6, 0x9e, 0x8e, 0x33, 0x4f, 0xf2, 0x88, 0x64, 0x12, + 0x04, 0xbf, 0x44, 0x6b, 0xd2, 0x22, 0xd8, 0x6a, 0x11, 0x38, 0x10, 0x95, 0x97, 0x64, 0xfd, 0x36, + 0xb3, 0xf5, 0x13, 0xa9, 0x13, 0x8d, 0x34, 0x58, 0x17, 0x07, 0xe0, 0x82, 0xc5, 0x59, 0x78, 0x08, + 0xa1, 0x67, 0xfe, 0x4f, 0xd5, 0x6b, 0x6d, 0x6b, 0x1c, 0x8a, 0x4c, 0xa2, 0x57, 0x3e, 0x44, 0x97, + 0xc7, 0x0a, 0x8e, 0x4b, 0xa8, 0x70, 0x0c, 0xbd, 0x64, 0xd1, 0x11, 0xf1, 0x89, 0xd7, 0xd1, 0x7c, + 0x97, 0xba, 0x1d, 0x48, 0x3a, 0x90, 0x24, 0x87, 0x07, 0xb3, 0xf7, 0xb5, 0xfa, 0xcf, 0x1a, 0x2a, + 0x65, 0xbb, 0xe7, 0x02, 0xd6, 0x46, 0x73, 0x74, 0x6d, 0x6c, 0x9e, 0xb5, 0xb1, 0xa7, 0xec, 0x8e, + 0xef, 0x66, 0x51, 0x29, 0x29, 0x4e, 0x72, 0xd9, 0x7a, 0xe0, 0xf3, 0x0b, 0x18, 0x6d, 0x32, 0x72, + 0x57, 0xdd, 0x3a, 0x7d, 0x8f, 0xa7, 0xde, 0x4d, 0xbb, 0xb4, 0xf0, 0x33, 0xb4, 0x10, 0x71, 0xca, + 0x3b, 0x62, 0xe6, 0x05, 0xea, 0x9d, 0x73, 0xa1, 0x4a, 0xcd, 0xf4, 0xd2, 0x4a, 0xce, 0x44, 0x21, + 0xd6, 0x7f, 0xd1, 0xd0, 0xfa, 0xb8, 0xca, 0x05, 0x14, 0xfb, 0xe3, 0xd1, 0x62, 0xdf, 0x38, 0x4f, + 0x44, 0x53, 0x0a, 0xfe, 0x9b, 0x86, 0xfe, 0x33, 0x11, 0xbc, 0xbc, 0x1e, 0xc5, 0x9e, 0x08, 0xc6, + 0xb6, 0xd1, 0x5e, 0x7a, 0xe7, 0xcb, 0x3d, 0xb1, 0x9f, 0xc3, 0x27, 0xb9, 0x5a, 0xf8, 0x05, 0x2a, + 0x39, 0xbe, 0xeb, 0xf8, 0x90, 0xd0, 0x0e, 0xd2, 0x72, 0xe7, 0x0e, 0xf3, 0x38, 0xb2, 0x2c, 0xf3, + 0x7a, 0xdc, 0xaf, 0x95, 0x1a, 0x63, 0x28, 0x64, 0x02, 0xb7, 0xfe, 0x6b, 0x4e, 0x79, 0xe4, 0x5d, + 0x78, 0x03, 0x2d, 0x25, 0x8f, 0x46, 0x08, 0x55, 0x18, 0xc3, 0x74, 0x6f, 0x29, 0x3a, 0x19, 0x4a, + 0xc8, 0x0e, 0x92, 0xa9, 0x50, 0x8e, 0x9e, 0xaf, 0x83, 0xa4, 0x66, 0xa6, 0x83, 0xe4, 0x99, 0x28, + 0x44, 0xe1, 0x89, 0x78, 0x00, 0xc9, 0x84, 0x16, 0x46, 0x3d, 0xd9, 0x53, 0x74, 0x32, 0x94, 0xa8, + 0xff, 0x5d, 0xc8, 0xa9, 0x92, 0x6c, 0xc5, 0x4c, 0x48, 0x83, 0xb7, 0xf2, 0x78, 0x48, 0xf6, 0x30, + 0x24, 0x1b, 0x7f, 0xab, 0x21, 0x4c, 0x87, 0x10, 0xcd, 0x41, 0xab, 0x26, 0xfd, 0xf4, 0xe8, 0xfc, + 0x13, 0xa2, 0x6f, 0x4d, 0x80, 0x25, 0xf7, 0x64, 0x45, 0x39, 0x81, 0x27, 0x05, 0x48, 0x8e, 0x07, + 0xd8, 0x41, 0xc5, 0x84, 0xba, 0x13, 0x86, 0x2c, 0x54, 0x23, 0x7b, 0xf5, 0x74, 0x87, 0xa4, 0xb8, + 0x59, 0x95, 0x0f, 0xb9, 0x54, 0xff, 0xa4, 0x5f, 0x2b, 0x66, 0xf8, 0x24, 0x8b, 0x2d, 0x4c, 0xd9, + 0x90, 0x9a, 0x9a, 0xfb, 0x17, 0xa6, 0x1e, 0xc2, 0x74, 0x53, 0x19, 0xec, 0xca, 0x0e, 0xfa, 0xef, + 0x94, 0x04, 0x9d, 0xeb, 0x5e, 0xf9, 0x4a, 0x43, 0x59, 0x1b, 0x78, 0x17, 0xcd, 0x89, 0xff, 0x59, + 0xb5, 0x61, 0xae, 0x9f, 0x6d, 0xc3, 0x1c, 0x3a, 0x1e, 0xa4, 0x8b, 0x52, 0x9c, 0x88, 0x44, 0xc1, + 0xd7, 0xd0, 0xa2, 0x07, 0x51, 0x44, 0xdb, 0xca, 0x72, 0xfa, 0xea, 0x6b, 0x26, 0x64, 0x32, 0xe0, + 0xd7, 0xef, 0xa1, 0x2b, 0x39, 0xef, 0x68, 0x5c, 0x43, 0xf3, 0x96, 0xfc, 0xe1, 0x12, 0x0e, 0xcd, + 0x9b, 0xcb, 0x62, 0xcb, 0x6c, 0xcb, 0xff, 0xac, 0x84, 0x6e, 0xde, 0x7c, 0xf5, 0xb6, 0x3a, 0xf3, + 0xfa, 0x6d, 0x75, 0xe6, 0xcd, 0xdb, 0xea, 0xcc, 0x97, 0x71, 0x55, 0x7b, 0x15, 0x57, 0xb5, 0xd7, + 0x71, 0x55, 0x7b, 0x13, 0x57, 0xb5, 0xdf, 0xe3, 0xaa, 0xf6, 0xf5, 0x1f, 0xd5, 0x99, 0x67, 0x8b, + 0x2a, 0xdf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0xff, 0xde, 0x2e, 0xe4, 0x10, 0x00, 0x00, +} + +func (m *CSIDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.VolumeLifecycleModes) > 0 { + for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.VolumeLifecycleModes[iNdEx]) + copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if m.PodInfoOnMount != nil { + i-- + if *m.PodInfoOnMount { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if m.AttachRequired != nil { + i-- + if *m.AttachRequired { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *CSINode) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Allocatable != nil { + { + size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if len(m.TopologyKeys) > 0 { + for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.TopologyKeys[iNdEx]) + copy(dAtA[i:], m.TopologyKeys[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.NodeID) + copy(dAtA[i:], m.NodeID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Drivers) > 0 { + for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AllowedTopologies) > 0 { + for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.VolumeBindingMode != nil { + i -= len(*m.VolumeBindingMode) + copy(dAtA[i:], *m.VolumeBindingMode) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i-- + dAtA[i] = 0x3a + } + if m.AllowVolumeExpansion != nil { + i-- + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + if len(m.MountOptions) > 0 { + for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.MountOptions[iNdEx]) + copy(dAtA[i:], m.MountOptions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.ReclaimPolicy != nil { + i -= len(*m.ReclaimPolicy) + copy(dAtA[i:], *m.ReclaimPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i-- + dAtA[i] = 0x22 + } + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { + v := m.Parameters[string(keysForParameters[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForParameters[iNdEx]) + copy(dAtA[i:], keysForParameters[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Provisioner) + copy(dAtA[i:], m.Provisioner) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.InlineVolumeSpec != nil { + { + size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.PersistentVolumeName != nil { + i -= len(*m.PersistentVolumeName) + copy(dAtA[i:], *m.PersistentVolumeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.NodeName) + copy(dAtA[i:], m.NodeName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i-- + dAtA[i] = 0x1a + { + size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Attacher) + copy(dAtA[i:], m.Attacher) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DetachError != nil { + { + size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + if m.AttachError != nil { + { + size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { + v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAttachmentMetadata[iNdEx]) + copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i-- + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *VolumeError) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CSIDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSIDriverList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSIDriverSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.AttachRequired != nil { + n += 2 + } + if m.PodInfoOnMount != nil { + n += 2 + } + if len(m.VolumeLifecycleModes) > 0 { + for _, s := range m.VolumeLifecycleModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINode) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CSINodeDriver) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeID) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.TopologyKeys) > 0 { + for _, s := range m.TopologyKeys { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Allocatable != nil { + l = m.Allocatable.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *CSINodeList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CSINodeSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Drivers) > 0 { + for _, e := range m.Drivers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClass) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.AllowVolumeExpansion != nil { + n += 2 + } + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *StorageClassList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *VolumeAttachmentSource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.InlineVolumeSpec != nil { + l = m.InlineVolumeSpec.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeAttachmentSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeAttachmentStatus) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *VolumeError) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *VolumeNodeResources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Count != nil { + n += 1 + sovGenerated(uint64(*m.Count)) + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *CSIDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriver{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSIDriver{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSIDriverList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSIDriverSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSIDriverSpec{`, + `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, + `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, + `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, + `}`, + }, "") + return s +} +func (this *CSINode) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINode{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeDriver) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CSINodeDriver{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, + `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, + `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]CSINode{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&CSINodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *CSINodeSpec) String() string { + if this == nil { + return "nil" + } + repeatedStringForDrivers := "[]CSINodeDriver{" + for _, f := range this.Drivers { + repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," + } + repeatedStringForDrivers += "}" + s := strings.Join([]string{`&CSINodeSpec{`, + `Drivers:` + repeatedStringForDrivers + `,`, + `}`, + }, "") + return s +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" + for _, f := range this.AllowedTopologies { + repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," + } + repeatedStringForAllowedTopologies += "}" + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]StorageClass{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]VolumeAttachment{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" + } + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeNodeResources) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeNodeResources{`, + `Count:` + valueToStringGenerated(this.Count) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CSIDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSIDriver{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AttachRequired = &b + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.PodInfoOnMount = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINode) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINode: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Allocatable == nil { + m.Allocatable = &VolumeNodeResources{} + } + if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CSINode{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drivers = append(m.Drivers, CSINodeDriver{}) + if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClass) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provisioner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Parameters == nil { + m.Parameters = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Parameters[mapkey] = mapvalue + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StorageClassList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, VolumeAttachment{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.PersistentVolumeName = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.InlineVolumeSpec == nil { + m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} + } + if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attacher = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NodeName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Attached = bool(v != 0) + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachmentMetadata == nil { + m.AttachmentMetadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.AttachmentMetadata[mapkey] = mapvalue + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.AttachError == nil { + m.AttachError = &VolumeError{} + } + if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DetachError == nil { + m.DetachError = &VolumeError{} + } + if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeError) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Count = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto new file mode 100644 index 0000000..373a154 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -0,0 +1,372 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.storage.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the +// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically +// creates a CSIDriver object representing the driver. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +message CSIDriver { + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the CSI Driver. + optional CSIDriverSpec spec = 2; +} + +// CSIDriverList is a collection of CSIDriver objects. +message CSIDriverList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSIDriver + repeated CSIDriver items = 2; +} + +// CSIDriverSpec is the specification of a CSIDriver. +message CSIDriverSpec { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + optional bool attachRequired = 1; + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. + // +optional + optional bool podInfoOnMount = 2; + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + repeated string volumeLifecycleModes = 3; +} + +// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. +// See the release notes for more information. +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +message CSINode { + // metadata.name must be the Kubernetes node name. + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // spec is the specification of CSINode + optional CSINodeSpec spec = 2; +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +message CSINodeDriver { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + optional string name = 1; + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + optional string nodeID = 2; + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + repeated string topologyKeys = 3; + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + optional VolumeNodeResources allocatable = 4; +} + +// CSINodeList is a collection of CSINode objects. +message CSINodeList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // items is the list of CSINode + repeated CSINode items = 2; +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +message CSINodeSpec { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + repeated CSINodeDriver drivers = 1; +} + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +message StorageClass { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Provisioner indicates the type of the provisioner. + optional string provisioner = 2; + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + map parameters = 3; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + optional string reclaimPolicy = 4; + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + repeated string mountOptions = 5; + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + optional bool allowVolumeExpansion = 6; + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + optional string volumeBindingMode = 7; + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; +} + +// StorageClassList is a collection of storage classes. +message StorageClassList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of StorageClasses + repeated StorageClass items = 2; +} + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +message VolumeAttachment { + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + optional VolumeAttachmentSpec spec = 2; + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeAttachmentStatus status = 3; +} + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +message VolumeAttachmentList { + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // Items is the list of VolumeAttachments + repeated VolumeAttachment items = 2; +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +message VolumeAttachmentSource { + // Name of the persistent volume to attach. + // +optional + optional string persistentVolumeName = 1; + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +message VolumeAttachmentSpec { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + optional string attacher = 1; + + // Source represents the volume that should be attached. + optional VolumeAttachmentSource source = 2; + + // The node that the volume should be attached to. + optional string nodeName = 3; +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +message VolumeAttachmentStatus { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + optional bool attached = 1; + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + map attachmentMetadata = 2; + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError attachError = 3; + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + optional VolumeError detachError = 4; +} + +// VolumeError captures an error encountered during a volume operation. +message VolumeError { + // Time the error was encountered. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; + + // String detailing the error encountered during Attach or Detach operation. + // This string may be logged, so it should not contain sensitive + // information. + // +optional + optional string message = 2; +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +message VolumeNodeResources { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + optional int32 count = 1; +} + diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go new file mode 100644 index 0000000..c270ace --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "storage.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &StorageClass{}, + &StorageClassList{}, + + &VolumeAttachment{}, + &VolumeAttachmentList{}, + + &CSIDriver{}, + &CSIDriverList{}, + + &CSINode{}, + &CSINodeList{}, + ) + + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go new file mode 100644 index 0000000..a8faeb9 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -0,0 +1,440 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClass describes the parameters for a class of storage for +// which PersistentVolumes can be dynamically provisioned. +// +// StorageClasses are non-namespaced; the name of the storage class +// according to etcd is in ObjectMeta.Name. +type StorageClass struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Provisioner indicates the type of the provisioner. + Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` + + // Parameters holds the parameters for the provisioner that should + // create volumes of this storage class. + // +optional + Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with this reclaimPolicy. Defaults to Delete. + // +optional + ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` + + // Dynamically provisioned PersistentVolumes of this storage class are + // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - + // mount of the PVs will simply fail if one is invalid. + // +optional + MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` + + // AllowVolumeExpansion shows whether the storage class allow volume expand + // +optional + AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` + + // VolumeBindingMode indicates how PersistentVolumeClaims should be + // provisioned and bound. When unset, VolumeBindingImmediate is used. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` + + // Restrict the node topologies where volumes can be dynamically provisioned. + // Each volume plugin defines its own supported topology specifications. + // An empty TopologySelectorTerm list means there is no topology restriction. + // This field is only honored by servers that enable the VolumeScheduling feature. + // +optional + AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// StorageClassList is a collection of storage classes. +type StorageClassList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of StorageClasses + Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. +type VolumeBindingMode string + +const ( + // VolumeBindingImmediate indicates that PersistentVolumeClaims should be + // immediately provisioned and bound. This is the default mode. + VolumeBindingImmediate VolumeBindingMode = "Immediate" + + // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims + // should not be provisioned and bound until the first Pod is created that + // references the PeristentVolumeClaim. The volume provisioning and + // binding will occur during Pod scheduing. + VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachment captures the intent to attach or detach the specified volume +// to/from the specified node. +// +// VolumeAttachment objects are non-namespaced. +type VolumeAttachment struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the desired attach/detach volume behavior. + // Populated by the Kubernetes system. + Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` + + // Status of the VolumeAttachment request. + // Populated by the entity completing the attach or detach + // operation, i.e. the external-attacher. + // +optional + Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// VolumeAttachmentList is a collection of VolumeAttachment objects. +type VolumeAttachmentList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Items is the list of VolumeAttachments + Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// VolumeAttachmentSpec is the specification of a VolumeAttachment request. +type VolumeAttachmentSpec struct { + // Attacher indicates the name of the volume driver that MUST handle this + // request. This is the name returned by GetPluginName(). + Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` + + // Source represents the volume that should be attached. + Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` + + // The node that the volume should be attached to. + NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` +} + +// VolumeAttachmentSource represents a volume that should be attached. +// Right now only PersistenVolumes can be attached via external attacher, +// in future we may allow also inline volumes in pods. +// Exactly one member can be set. +type VolumeAttachmentSource struct { + // Name of the persistent volume to attach. + // +optional + PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` + + // inlineVolumeSpec contains all the information necessary to attach + // a persistent volume defined by a pod's inline VolumeSource. This field + // is populated only for the CSIMigration feature. It contains + // translated fields from a pod's inline VolumeSource to a + // PersistentVolumeSpec. This field is alpha-level and is only + // honored by servers that enabled the CSIMigration feature. + // +optional + InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` +} + +// VolumeAttachmentStatus is the status of a VolumeAttachment request. +type VolumeAttachmentStatus struct { + // Indicates the volume is successfully attached. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` + + // Upon successful attach, this field is populated with any + // information returned by the attach operation that must be passed + // into subsequent WaitForAttach or Mount calls. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` + + // The last error encountered during attach operation, if any. + // This field must only be set by the entity completing the attach + // operation, i.e. the external-attacher. + // +optional + AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` + + // The last error encountered during detach operation, if any. + // This field must only be set by the entity completing the detach + // operation, i.e. the external-attacher. + // +optional + DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` +} + +// VolumeError captures an error encountered during a volume operation. +type VolumeError struct { + // Time the error was encountered. + // +optional + Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` + + // String detailing the error encountered during Attach or Detach operation. + // This string may be logged, so it should not contain sensitive + // information. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` +} + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriver captures information about a Container Storage Interface (CSI) +// volume driver deployed on the cluster. +// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the +// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically +// creates a CSIDriver object representing the driver. +// Kubernetes attach detach controller uses this object to determine whether attach is required. +// Kubelet uses this object to determine whether pod information needs to be passed on mount. +// CSIDriver objects are non-namespaced. +type CSIDriver struct { + metav1.TypeMeta `json:",inline"` + + // Standard object metadata. + // metadata.Name indicates the name of the CSI driver that this object + // refers to; it MUST be the same name returned by the CSI GetPluginName() + // call for that driver. + // The driver name must be 63 characters or less, beginning and ending with + // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and + // alphanumerics between. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Specification of the CSI Driver. + Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSIDriverList is a collection of CSIDriver objects. +type CSIDriverList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSIDriver + Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// CSIDriverSpec is the specification of a CSIDriver. +type CSIDriverSpec struct { + // attachRequired indicates this CSI volume driver requires an attach + // operation (because it implements the CSI ControllerPublishVolume() + // method), and that the Kubernetes attach detach controller should call + // the attach volume interface which checks the volumeattachment status + // and waits until the volume is attached before proceeding to mounting. + // The CSI external-attacher coordinates with CSI volume driver and updates + // the volumeattachment status when the attach operation is complete. + // If the CSIDriverRegistry feature gate is enabled and the value is + // specified to false, the attach operation will be skipped. + // Otherwise the attach operation will be called. + // +optional + AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` + + // If set to true, podInfoOnMount indicates this CSI volume driver + // requires additional pod information (like podName, podUID, etc.) during + // mount operations. + // If set to false, pod information will not be passed on mount. + // Default is false. + // The CSI driver specifies podInfoOnMount as part of driver deployment. + // If true, Kubelet will pass pod information as VolumeContext in the CSI + // NodePublishVolume() calls. + // The CSI driver is responsible for parsing and validating the information + // passed in as VolumeContext. + // The following VolumeConext will be passed if podInfoOnMount is set to true. + // This list might grow, but the prefix will be used. + // "csi.storage.k8s.io/pod.name": pod.Name + // "csi.storage.k8s.io/pod.namespace": pod.Namespace + // "csi.storage.k8s.io/pod.uid": string(pod.UID) + // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume + // defined by a CSIVolumeSource, otherwise "false" + // + // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only + // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. + // Other drivers can leave pod info disabled and/or ignore this field. + // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when + // deployed on such a cluster and the deployment determines which mode that is, for example + // via a command line parameter of the driver. + // +optional + PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` + + // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. + // The default if the list is empty is "Persistent", which is the usage + // defined by the CSI specification and implemented in Kubernetes via the usual + // PV/PVC mechanism. + // The other mode is "Ephemeral". In this mode, volumes are defined inline + // inside the pod spec with CSIVolumeSource and their lifecycle is tied to + // the lifecycle of that pod. A driver has to be aware of this + // because it is only going to get a NodePublishVolume call for such a volume. + // For more information about implementing this mode, see + // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html + // A driver can support one or more of these modes and + // more modes may be added in the future. + // +optional + VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` +} + +// VolumeLifecycleMode is an enumeration of possible usage modes for a volume +// provided by a CSI driver. More modes may be added in the future. +type VolumeLifecycleMode string + +const ( + // VolumeLifecyclePersistent explicitly confirms that the driver implements + // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not + // set. Such volumes are managed in Kubernetes via the persistent volume + // claim mechanism and have a lifecycle that is independent of the pods which + // use them. + VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" + + // VolumeLifecycleEphemeral indicates that the driver can be used for + // ephemeral inline volumes. Such volumes are specified inside the pod + // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have + // a lifecycle that is tied to the lifecycle of the pod. For example, such + // a volume might contain data that gets created specifically for that pod, + // like secrets. + // But how the volume actually gets created and managed is entirely up to + // the driver. It might also use reference counting to share the same volume + // instance among different pods if the CSIVolumeSource of those pods is + // identical. + VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. +// See the release notes for more information. +// CSINode holds information about all CSI drivers installed on a node. +// CSI drivers do not need to create the CSINode object directly. As long as +// they use the node-driver-registrar sidecar container, the kubelet will +// automatically populate the CSINode object for the CSI driver as part of +// kubelet plugin registration. +// CSINode has the same name as a node. If the object is missing, it means either +// there are no CSI Drivers available on the node, or the Kubelet version is low +// enough that it doesn't create this object. +// CSINode has an OwnerReference that points to the corresponding node object. +type CSINode struct { + metav1.TypeMeta `json:",inline"` + + // metadata.name must be the Kubernetes node name. + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // spec is the specification of CSINode + Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` +} + +// CSINodeSpec holds information about the specification of all CSI drivers installed on a node +type CSINodeSpec struct { + // drivers is a list of information of all CSI Drivers existing on a node. + // If all drivers in the list are uninstalled, this can become empty. + // +patchMergeKey=name + // +patchStrategy=merge + Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` +} + +// CSINodeDriver holds information about the specification of one CSI driver installed on a node +type CSINodeDriver struct { + // This is the name of the CSI driver that this object refers to. + // This MUST be the same name returned by the CSI GetPluginName() call for + // that driver. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // nodeID of the node from the driver point of view. + // This field enables Kubernetes to communicate with storage systems that do + // not share the same nomenclature for nodes. For example, Kubernetes may + // refer to a given node as "node1", but the storage system may refer to + // the same node as "nodeA". When Kubernetes issues a command to the storage + // system to attach a volume to a specific node, it can use this field to + // refer to the node name using the ID that the storage system will + // understand, e.g. "nodeA" instead of "node1". This field is required. + NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` + + // topologyKeys is the list of keys supported by the driver. + // When a driver is initialized on a cluster, it provides a set of topology + // keys that it understands (e.g. "company.com/zone", "company.com/region"). + // When a driver is initialized on a node, it provides the same topology keys + // along with values. Kubelet will expose these topology keys as labels + // on its own node object. + // When Kubernetes does topology aware provisioning, it can use this list to + // determine which labels it should retrieve from the node object and pass + // back to the driver. + // It is possible for different nodes to use different topology keys. + // This can be empty if driver does not support topology. + // +optional + TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` + + // allocatable represents the volume resources of a node that are available for scheduling. + // +optional + Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` +} + +// VolumeNodeResources is a set of resource limits for scheduling of volumes. +type VolumeNodeResources struct { + // Maximum number of unique volumes managed by the CSI driver that can be used on a node. + // A volume that is both attached and mounted on a node is considered to be used once, not twice. + // The same rule applies for a unique volume that is shared among multiple pods on the same node. + // If this field is nil, then the supported number of volumes on this node is unbounded. + // +optional + Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CSINodeList is a collection of CSINode objects. +type CSINodeList struct { + metav1.TypeMeta `json:",inline"` + + // Standard list metadata + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items is the list of CSINode + Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..53fa666 --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,200 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_CSIDriver = map[string]string{ + "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", + "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the CSI Driver.", +} + +func (CSIDriver) SwaggerDoc() map[string]string { + return map_CSIDriver +} + +var map_CSIDriverList = map[string]string{ + "": "CSIDriverList is a collection of CSIDriver objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSIDriver", +} + +func (CSIDriverList) SwaggerDoc() map[string]string { + return map_CSIDriverList +} + +var map_CSIDriverSpec = map[string]string{ + "": "CSIDriverSpec is the specification of a CSIDriver.", + "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", + "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", + "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", +} + +func (CSIDriverSpec) SwaggerDoc() map[string]string { + return map_CSIDriverSpec +} + +var map_CSINode = map[string]string{ + "": "DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", + "metadata": "metadata.name must be the Kubernetes node name.", + "spec": "spec is the specification of CSINode", +} + +func (CSINode) SwaggerDoc() map[string]string { + return map_CSINode +} + +var map_CSINodeDriver = map[string]string{ + "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", + "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", + "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", + "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", + "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", +} + +func (CSINodeDriver) SwaggerDoc() map[string]string { + return map_CSINodeDriver +} + +var map_CSINodeList = map[string]string{ + "": "CSINodeList is a collection of CSINode objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "items is the list of CSINode", +} + +func (CSINodeList) SwaggerDoc() map[string]string { + return map_CSINodeList +} + +var map_CSINodeSpec = map[string]string{ + "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", + "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", +} + +func (CSINodeSpec) SwaggerDoc() map[string]string { + return map_CSINodeSpec +} + +var map_StorageClass = map[string]string{ + "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "provisioner": "Provisioner indicates the type of the provisioner.", + "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", + "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", + "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", + "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", + "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", + "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", +} + +func (StorageClass) SwaggerDoc() map[string]string { + return map_StorageClass +} + +var map_StorageClassList = map[string]string{ + "": "StorageClassList is a collection of storage classes.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of StorageClasses", +} + +func (StorageClassList) SwaggerDoc() map[string]string { + return map_StorageClassList +} + +var map_VolumeAttachment = map[string]string{ + "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", + "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachment) SwaggerDoc() map[string]string { + return map_VolumeAttachment +} + +var map_VolumeAttachmentList = map[string]string{ + "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "items": "Items is the list of VolumeAttachments", +} + +func (VolumeAttachmentList) SwaggerDoc() map[string]string { + return map_VolumeAttachmentList +} + +var map_VolumeAttachmentSource = map[string]string{ + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "persistentVolumeName": "Name of the persistent volume to attach.", +} + +func (VolumeAttachmentSource) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSource +} + +var map_VolumeAttachmentSpec = map[string]string{ + "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", + "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", + "source": "Source represents the volume that should be attached.", + "nodeName": "The node that the volume should be attached to.", +} + +func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { + return map_VolumeAttachmentSpec +} + +var map_VolumeAttachmentStatus = map[string]string{ + "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", + "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", + "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", +} + +func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { + return map_VolumeAttachmentStatus +} + +var map_VolumeError = map[string]string{ + "": "VolumeError captures an error encountered during a volume operation.", + "time": "Time the error was encountered.", + "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", +} + +func (VolumeError) SwaggerDoc() map[string]string { + return map_VolumeError +} + +var map_VolumeNodeResources = map[string]string{ + "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", + "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", +} + +func (VolumeNodeResources) SwaggerDoc() map[string]string { + return map_VolumeNodeResources +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..52433fc --- /dev/null +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,494 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. +func (in *CSIDriver) DeepCopy() *CSIDriver { + if in == nil { + return nil + } + out := new(CSIDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriver) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSIDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. +func (in *CSIDriverList) DeepCopy() *CSIDriverList { + if in == nil { + return nil + } + out := new(CSIDriverList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSIDriverList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { + *out = *in + if in.AttachRequired != nil { + in, out := &in.AttachRequired, &out.AttachRequired + *out = new(bool) + **out = **in + } + if in.PodInfoOnMount != nil { + in, out := &in.PodInfoOnMount, &out.PodInfoOnMount + *out = new(bool) + **out = **in + } + if in.VolumeLifecycleModes != nil { + in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes + *out = make([]VolumeLifecycleMode, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. +func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { + if in == nil { + return nil + } + out := new(CSIDriverSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINode) DeepCopyInto(out *CSINode) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. +func (in *CSINode) DeepCopy() *CSINode { + if in == nil { + return nil + } + out := new(CSINode) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINode) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { + *out = *in + if in.TopologyKeys != nil { + in, out := &in.TopologyKeys, &out.TopologyKeys + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Allocatable != nil { + in, out := &in.Allocatable, &out.Allocatable + *out = new(VolumeNodeResources) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. +func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { + if in == nil { + return nil + } + out := new(CSINodeDriver) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CSINode, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. +func (in *CSINodeList) DeepCopy() *CSINodeList { + if in == nil { + return nil + } + out := new(CSINodeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CSINodeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { + *out = *in + if in.Drivers != nil { + in, out := &in.Drivers, &out.Drivers + *out = make([]CSINodeDriver, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. +func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { + if in == nil { + return nil + } + out := new(CSINodeSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClass) DeepCopyInto(out *StorageClass) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + if in.Parameters != nil { + in, out := &in.Parameters, &out.Parameters + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.ReclaimPolicy != nil { + in, out := &in.ReclaimPolicy, &out.ReclaimPolicy + *out = new(v1.PersistentVolumeReclaimPolicy) + **out = **in + } + if in.MountOptions != nil { + in, out := &in.MountOptions, &out.MountOptions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.AllowVolumeExpansion != nil { + in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion + *out = new(bool) + **out = **in + } + if in.VolumeBindingMode != nil { + in, out := &in.VolumeBindingMode, &out.VolumeBindingMode + *out = new(VolumeBindingMode) + **out = **in + } + if in.AllowedTopologies != nil { + in, out := &in.AllowedTopologies, &out.AllowedTopologies + *out = make([]v1.TopologySelectorTerm, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass. +func (in *StorageClass) DeepCopy() *StorageClass { + if in == nil { + return nil + } + out := new(StorageClass) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClass) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]StorageClass, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList. +func (in *StorageClassList) DeepCopy() *StorageClassList { + if in == nil { + return nil + } + out := new(StorageClassList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *StorageClassList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Spec.DeepCopyInto(&out.Spec) + in.Status.DeepCopyInto(&out.Status) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. +func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { + if in == nil { + return nil + } + out := new(VolumeAttachment) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachment) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]VolumeAttachment, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. +func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { + if in == nil { + return nil + } + out := new(VolumeAttachmentList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { + *out = *in + if in.PersistentVolumeName != nil { + in, out := &in.PersistentVolumeName, &out.PersistentVolumeName + *out = new(string) + **out = **in + } + if in.InlineVolumeSpec != nil { + in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec + *out = new(v1.PersistentVolumeSpec) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. +func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { + if in == nil { + return nil + } + out := new(VolumeAttachmentSource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { + *out = *in + in.Source.DeepCopyInto(&out.Source) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. +func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { + if in == nil { + return nil + } + out := new(VolumeAttachmentSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { + *out = *in + if in.AttachmentMetadata != nil { + in, out := &in.AttachmentMetadata, &out.AttachmentMetadata + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.AttachError != nil { + in, out := &in.AttachError, &out.AttachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + if in.DetachError != nil { + in, out := &in.DetachError, &out.DetachError + *out = new(VolumeError) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. +func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { + if in == nil { + return nil + } + out := new(VolumeAttachmentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeError) DeepCopyInto(out *VolumeError) { + *out = *in + in.Time.DeepCopyInto(&out.Time) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. +func (in *VolumeError) DeepCopy() *VolumeError { + if in == nil { + return nil + } + out := new(VolumeError) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { + *out = *in + if in.Count != nil { + in, out := &in.Count, &out.Count + *out = new(int32) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. +func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { + if in == nil { + return nil + } + out := new(VolumeNodeResources) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/LICENSE b/vendor/k8s.io/apimachinery/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/k8s.io/apimachinery/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS new file mode 100644 index 0000000..435297a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -0,0 +1,25 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- erictune +- saad-ali +- janetkuo +- tallclair +- eparis +- dims +- hongchaodeng +- krousey +- cjcullen +- david-mcmahon diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go new file mode 100644 index 0000000..167baf6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors provides detailed error types for api field validation. +package errors // import "k8s.io/apimachinery/pkg/api/errors" diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go new file mode 100644 index 0000000..e53c3e6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -0,0 +1,691 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const ( + // StatusTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. + // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in + // the future version. + StatusTooManyRequests = http.StatusTooManyRequests +) + +// StatusError is an error intended for consumption by a REST API server; it can also be +// reconstructed by clients from a REST response. Public to allow easy type switches. +type StatusError struct { + ErrStatus metav1.Status +} + +// APIStatus is exposed by errors that can be converted to an api.Status object +// for finer grained details. +type APIStatus interface { + Status() metav1.Status +} + +var _ error = &StatusError{} + +// Error implements the Error interface. +func (e *StatusError) Error() string { + return e.ErrStatus.Message +} + +// Status allows access to e's status without having to know the detailed workings +// of StatusError. +func (e *StatusError) Status() metav1.Status { + return e.ErrStatus +} + +// DebugError reports extended info about the error to debug output. +func (e *StatusError) DebugError() (string, []interface{}) { + if out, err := json.MarshalIndent(e.ErrStatus, "", " "); err == nil { + return "server response object: %s", []interface{}{string(out)} + } + return "server response object: %#v", []interface{}{e.ErrStatus} +} + +// HasStatusCause returns true if the provided error has a details cause +// with the provided type name. +func HasStatusCause(err error, name metav1.CauseType) bool { + _, ok := StatusCause(err, name) + return ok +} + +// StatusCause returns the named cause from the provided error if it exists and +// the error is of the type APIStatus. Otherwise it returns false. +func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { + apierr, ok := err.(APIStatus) + if !ok || apierr == nil || apierr.Status().Details == nil { + return metav1.StatusCause{}, false + } + for _, cause := range apierr.Status().Details.Causes { + if cause.Type == name { + return cause, true + } + } + return metav1.StatusCause{}, false +} + +// UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. +type UnexpectedObjectError struct { + Object runtime.Object +} + +// Error returns an error message describing 'u'. +func (u *UnexpectedObjectError) Error() string { + return fmt.Sprintf("unexpected object: %v", u.Object) +} + +// FromObject generates an StatusError from an metav1.Status, if that is the type of obj; otherwise, +// returns an UnexpecteObjectError. +func FromObject(obj runtime.Object) error { + switch t := obj.(type) { + case *metav1.Status: + return &StatusError{ErrStatus: *t} + case runtime.Unstructured: + var status metav1.Status + obj := t.UnstructuredContent() + if !reflect.DeepEqual(obj["kind"], "Status") { + break + } + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil { + return err + } + if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" { + break + } + return &StatusError{ErrStatus: status} + } + return &UnexpectedObjectError{obj} +} + +// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found. +func NewNotFound(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotFound, + Reason: metav1.StatusReasonNotFound, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q not found", qualifiedResource.String(), name), + }} +} + +// NewAlreadyExists returns an error indicating the item requested exists by that identifier. +func NewAlreadyExists(qualifiedResource schema.GroupResource, name string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonAlreadyExists, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("%s %q already exists", qualifiedResource.String(), name), + }} +} + +// NewUnauthorized returns an error indicating the client is not authorized to perform the requested +// action. +func NewUnauthorized(reason string) *StatusError { + message := reason + if len(message) == 0 { + message = "not authorized" + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnauthorized, + Reason: metav1.StatusReasonUnauthorized, + Message: message, + }} +} + +// NewForbidden returns an error indicating the requested action was forbidden +func NewForbidden(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + var message string + if qualifiedResource.Empty() { + message = fmt.Sprintf("forbidden: %v", err) + } else if name == "" { + message = fmt.Sprintf("%s is forbidden: %v", qualifiedResource.String(), err) + } else { + message = fmt.Sprintf("%s %q is forbidden: %v", qualifiedResource.String(), name, err) + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusForbidden, + Reason: metav1.StatusReasonForbidden, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: message, + }} +} + +// NewConflict returns an error indicating the item can't be updated as provided. +func NewConflict(qualifiedResource schema.GroupResource, name string, err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + }, + Message: fmt.Sprintf("Operation cannot be fulfilled on %s %q: %v", qualifiedResource.String(), name, err), + }} +} + +// NewApplyConflict returns an error including details on the requests apply conflicts +func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError { + return &StatusError{ErrStatus: metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusConflict, + Reason: metav1.StatusReasonConflict, + Details: &metav1.StatusDetails{ + // TODO: Get obj details here? + Causes: causes, + }, + Message: message, + }} +} + +// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. +// DEPRECATED: Please use NewResourceExpired instead. +func NewGone(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonGone, + Message: message, + }} +} + +// NewResourceExpired creates an error that indicates that the requested resource content has expired from +// the server (usually due to a resourceVersion that is too old). +func NewResourceExpired(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGone, + Reason: metav1.StatusReasonExpired, + Message: message, + }} +} + +// NewInvalid returns an error indicating the item is invalid and cannot be processed. +func NewInvalid(qualifiedKind schema.GroupKind, name string, errs field.ErrorList) *StatusError { + causes := make([]metav1.StatusCause, 0, len(errs)) + for i := range errs { + err := errs[i] + causes = append(causes, metav1.StatusCause{ + Type: metav1.CauseType(err.Type), + Message: err.ErrorBody(), + Field: err.Field, + }) + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusUnprocessableEntity, + Reason: metav1.StatusReasonInvalid, + Details: &metav1.StatusDetails{ + Group: qualifiedKind.Group, + Kind: qualifiedKind.Kind, + Name: name, + Causes: causes, + }, + Message: fmt.Sprintf("%s %q is invalid: %v", qualifiedKind.String(), name, errs.ToAggregate()), + }} +} + +// NewBadRequest creates an error that indicates that the request is invalid and can not be processed. +func NewBadRequest(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusBadRequest, + Reason: metav1.StatusReasonBadRequest, + Message: reason, + }} +} + +// NewTooManyRequests creates an error that indicates that the client must try again later because +// the specified endpoint is not accepting requests. More specific details should be provided +// if client should know why the failure was limited4. +func NewTooManyRequests(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: message, + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + +// NewServiceUnavailable creates an error that indicates that the requested service is unavailable. +func NewServiceUnavailable(reason string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusServiceUnavailable, + Reason: metav1.StatusReasonServiceUnavailable, + Message: reason, + }} +} + +// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind. +func NewMethodNotSupported(qualifiedResource schema.GroupResource, action string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusMethodNotAllowed, + Reason: metav1.StatusReasonMethodNotAllowed, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + }, + Message: fmt.Sprintf("%s is not supported on resources of kind %q", action, qualifiedResource.String()), + }} +} + +// NewServerTimeout returns an error indicating the requested action could not be completed due to a +// transient error, and the client should try again. +func NewServerTimeout(qualifiedResource schema.GroupResource, operation string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonServerTimeout, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: operation, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: fmt.Sprintf("The %s operation against %s could not be completed at this time, please try again.", operation, qualifiedResource.String()), + }} +} + +// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we +// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this. +func NewServerTimeoutForKind(qualifiedKind schema.GroupKind, operation string, retryAfterSeconds int) *StatusError { + return NewServerTimeout(schema.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds) +} + +// NewInternalError returns an error indicating the item is invalid and cannot be processed. +func NewInternalError(err error) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusInternalServerError, + Reason: metav1.StatusReasonInternalError, + Details: &metav1.StatusDetails{ + Causes: []metav1.StatusCause{{Message: err.Error()}}, + }, + Message: fmt.Sprintf("Internal error occurred: %v", err), + }} +} + +// NewTimeoutError returns an error indicating that a timeout occurred before the request +// could be completed. Clients may retry, but the operation may still complete. +func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusGatewayTimeout, + Reason: metav1.StatusReasonTimeout, + Message: fmt.Sprintf("Timeout: %s", message), + Details: &metav1.StatusDetails{ + RetryAfterSeconds: int32(retryAfterSeconds), + }, + }} +} + +// NewTooManyRequestsError returns an error indicating that the request was rejected because +// the server has received too many requests. Client should wait and retry. But if the request +// is perishable, then the client should not retry the request. +func NewTooManyRequestsError(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusTooManyRequests, + Reason: metav1.StatusReasonTooManyRequests, + Message: fmt.Sprintf("Too many requests: %s", message), + }} +} + +// NewRequestEntityTooLargeError returns an error indicating that the request +// entity was too large. +func NewRequestEntityTooLargeError(message string) *StatusError { + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusRequestEntityTooLarge, + Reason: metav1.StatusReasonRequestEntityTooLarge, + Message: fmt.Sprintf("Request entity too large: %s", message), + }} +} + +// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form. +func NewGenericServerResponse(code int, verb string, qualifiedResource schema.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError { + reason := metav1.StatusReasonUnknown + message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code) + switch code { + case http.StatusConflict: + if verb == "POST" { + reason = metav1.StatusReasonAlreadyExists + } else { + reason = metav1.StatusReasonConflict + } + message = "the server reported a conflict" + case http.StatusNotFound: + reason = metav1.StatusReasonNotFound + message = "the server could not find the requested resource" + case http.StatusBadRequest: + reason = metav1.StatusReasonBadRequest + message = "the server rejected our request for an unknown reason" + case http.StatusUnauthorized: + reason = metav1.StatusReasonUnauthorized + message = "the server has asked for the client to provide credentials" + case http.StatusForbidden: + reason = metav1.StatusReasonForbidden + // the server message has details about who is trying to perform what action. Keep its message. + message = serverMessage + case http.StatusNotAcceptable: + reason = metav1.StatusReasonNotAcceptable + // the server message has details about what types are acceptable + if len(serverMessage) == 0 || serverMessage == "unknown" { + message = "the server was unable to respond with a content type that the client supports" + } else { + message = serverMessage + } + case http.StatusUnsupportedMediaType: + reason = metav1.StatusReasonUnsupportedMediaType + // the server message has details about what types are acceptable + message = serverMessage + case http.StatusMethodNotAllowed: + reason = metav1.StatusReasonMethodNotAllowed + message = "the server does not allow this method on the requested resource" + case http.StatusUnprocessableEntity: + reason = metav1.StatusReasonInvalid + message = "the server rejected our request due to an error in our request" + case http.StatusServiceUnavailable: + reason = metav1.StatusReasonServiceUnavailable + message = "the server is currently unable to handle the request" + case http.StatusGatewayTimeout: + reason = metav1.StatusReasonTimeout + message = "the server was unable to return a response in the time allotted, but may still be processing the request" + case http.StatusTooManyRequests: + reason = metav1.StatusReasonTooManyRequests + message = "the server has received too many requests and has asked us to try again later" + default: + if code >= 500 { + reason = metav1.StatusReasonInternalError + message = fmt.Sprintf("an error on the server (%q) has prevented the request from succeeding", serverMessage) + } + } + switch { + case !qualifiedResource.Empty() && len(name) > 0: + message = fmt.Sprintf("%s (%s %s %s)", message, strings.ToLower(verb), qualifiedResource.String(), name) + case !qualifiedResource.Empty(): + message = fmt.Sprintf("%s (%s %s)", message, strings.ToLower(verb), qualifiedResource.String()) + } + var causes []metav1.StatusCause + if isUnexpectedResponse { + causes = []metav1.StatusCause{ + { + Type: metav1.CauseTypeUnexpectedServerResponse, + Message: serverMessage, + }, + } + } else { + causes = nil + } + return &StatusError{metav1.Status{ + Status: metav1.StatusFailure, + Code: int32(code), + Reason: reason, + Details: &metav1.StatusDetails{ + Group: qualifiedResource.Group, + Kind: qualifiedResource.Resource, + Name: name, + + Causes: causes, + RetryAfterSeconds: int32(retryAfterSeconds), + }, + Message: message, + }} +} + +// IsNotFound returns true if the specified error was created by NewNotFound. +func IsNotFound(err error) bool { + return ReasonForError(err) == metav1.StatusReasonNotFound +} + +// IsAlreadyExists determines if the err is an error which indicates that a specified resource already exists. +func IsAlreadyExists(err error) bool { + return ReasonForError(err) == metav1.StatusReasonAlreadyExists +} + +// IsConflict determines if the err is an error which indicates the provided update conflicts. +func IsConflict(err error) bool { + return ReasonForError(err) == metav1.StatusReasonConflict +} + +// IsInvalid determines if the err is an error which indicates the provided resource is not valid. +func IsInvalid(err error) bool { + return ReasonForError(err) == metav1.StatusReasonInvalid +} + +// IsGone is true if the error indicates the requested resource is no longer available. +func IsGone(err error) bool { + return ReasonForError(err) == metav1.StatusReasonGone +} + +// IsResourceExpired is true if the error indicates the resource has expired and the current action is +// no longer possible. +func IsResourceExpired(err error) bool { + return ReasonForError(err) == metav1.StatusReasonExpired +} + +// IsNotAcceptable determines if err is an error which indicates that the request failed due to an invalid Accept header +func IsNotAcceptable(err error) bool { + return ReasonForError(err) == metav1.StatusReasonNotAcceptable +} + +// IsUnsupportedMediaType determines if err is an error which indicates that the request failed due to an invalid Content-Type header +func IsUnsupportedMediaType(err error) bool { + return ReasonForError(err) == metav1.StatusReasonUnsupportedMediaType +} + +// IsMethodNotSupported determines if the err is an error which indicates the provided action could not +// be performed because it is not supported by the server. +func IsMethodNotSupported(err error) bool { + return ReasonForError(err) == metav1.StatusReasonMethodNotAllowed +} + +// IsServiceUnavailable is true if the error indicates the underlying service is no longer available. +func IsServiceUnavailable(err error) bool { + return ReasonForError(err) == metav1.StatusReasonServiceUnavailable +} + +// IsBadRequest determines if err is an error which indicates that the request is invalid. +func IsBadRequest(err error) bool { + return ReasonForError(err) == metav1.StatusReasonBadRequest +} + +// IsUnauthorized determines if err is an error which indicates that the request is unauthorized and +// requires authentication by the user. +func IsUnauthorized(err error) bool { + return ReasonForError(err) == metav1.StatusReasonUnauthorized +} + +// IsForbidden determines if err is an error which indicates that the request is forbidden and cannot +// be completed as requested. +func IsForbidden(err error) bool { + return ReasonForError(err) == metav1.StatusReasonForbidden +} + +// IsTimeout determines if err is an error which indicates that request times out due to long +// processing. +func IsTimeout(err error) bool { + return ReasonForError(err) == metav1.StatusReasonTimeout +} + +// IsServerTimeout determines if err is an error which indicates that the request needs to be retried +// by the client. +func IsServerTimeout(err error) bool { + return ReasonForError(err) == metav1.StatusReasonServerTimeout +} + +// IsInternalError determines if err is an error which indicates an internal server error. +func IsInternalError(err error) bool { + return ReasonForError(err) == metav1.StatusReasonInternalError +} + +// IsTooManyRequests determines if err is an error which indicates that there are too many requests +// that the server cannot handle. +func IsTooManyRequests(err error) bool { + if ReasonForError(err) == metav1.StatusReasonTooManyRequests { + return true + } + switch t := err.(type) { + case APIStatus: + return t.Status().Code == http.StatusTooManyRequests + } + return false +} + +// IsRequestEntityTooLargeError determines if err is an error which indicates +// the request entity is too large. +func IsRequestEntityTooLargeError(err error) bool { + if ReasonForError(err) == metav1.StatusReasonRequestEntityTooLarge { + return true + } + switch t := err.(type) { + case APIStatus: + return t.Status().Code == http.StatusRequestEntityTooLarge + } + return false +} + +// IsUnexpectedServerError returns true if the server response was not in the expected API format, +// and may be the result of another HTTP actor. +func IsUnexpectedServerError(err error) bool { + switch t := err.(type) { + case APIStatus: + if d := t.Status().Details; d != nil { + for _, cause := range d.Causes { + if cause.Type == metav1.CauseTypeUnexpectedServerResponse { + return true + } + } + } + } + return false +} + +// IsUnexpectedObjectError determines if err is due to an unexpected object from the master. +func IsUnexpectedObjectError(err error) bool { + _, ok := err.(*UnexpectedObjectError) + return err != nil && ok +} + +// SuggestsClientDelay returns true if this error suggests a client delay as well as the +// suggested seconds to wait, or false if the error does not imply a wait. It does not +// address whether the error *should* be retried, since some errors (like a 3xx) may +// request delay without retry. +func SuggestsClientDelay(err error) (int, bool) { + switch t := err.(type) { + case APIStatus: + if t.Status().Details != nil { + switch t.Status().Reason { + // this StatusReason explicitly requests the caller to delay the action + case metav1.StatusReasonServerTimeout: + return int(t.Status().Details.RetryAfterSeconds), true + } + // If the client requests that we retry after a certain number of seconds + if t.Status().Details.RetryAfterSeconds > 0 { + return int(t.Status().Details.RetryAfterSeconds), true + } + } + } + return 0, false +} + +// ReasonForError returns the HTTP status for a particular error. +func ReasonForError(err error) metav1.StatusReason { + switch t := err.(type) { + case APIStatus: + return t.Status().Reason + } + return metav1.StatusReasonUnknown +} + +// ErrorReporter converts generic errors into runtime.Object errors without +// requiring the caller to take a dependency on meta/v1 (where Status lives). +// This prevents circular dependencies in core watch code. +type ErrorReporter struct { + code int + verb string + reason string +} + +// NewClientErrorReporter will respond with valid v1.Status objects that report +// unexpected server responses. Primarily used by watch to report errors when +// we attempt to decode a response from the server and it is not in the form +// we expect. Because watch is a dependency of the core api, we can't return +// meta/v1.Status in that package and so much inject this interface to convert a +// generic error as appropriate. The reason is passed as a unique status cause +// on the returned status, otherwise the generic "ClientError" is returned. +func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter { + return &ErrorReporter{ + code: code, + verb: verb, + reason: reason, + } +} + +// AsObject returns a valid error runtime.Object (a v1.Status) for the given +// error, using the code and verb of the reporter type. The error is set to +// indicate that this was an unexpected server response. +func (r *ErrorReporter) AsObject(err error) runtime.Object { + status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true) + if status.ErrStatus.Details == nil { + status.ErrStatus.Details = &metav1.StatusDetails{} + } + reason := r.reason + if len(reason) == 0 { + reason = "ClientError" + } + status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{ + Type: metav1.CauseType(reason), + Message: err.Error(), + }) + return &status.ErrStatus +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS new file mode 100644 index 0000000..96bccff --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -0,0 +1,23 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- nikhiljindal +- gmarek +- janetkuo +- ncdc +- eparis +- dims +- krousey +- resouer +- david-mcmahon +- mfojtik +- jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go new file mode 100644 index 0000000..b6d42ac --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package meta provides functions for retrieving API metadata from objects +// belonging to the Kubernetes API +package meta // import "k8s.io/apimachinery/pkg/api/meta" diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go new file mode 100644 index 0000000..cbf5d02 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go @@ -0,0 +1,121 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" +) + +// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource +type AmbiguousResourceError struct { + PartialResource schema.GroupVersionResource + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousResourceError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) +} + +// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind +type AmbiguousKindError struct { + PartialKind schema.GroupVersionKind + + MatchingResources []schema.GroupVersionResource + MatchingKinds []schema.GroupVersionKind +} + +func (e *AmbiguousKindError) Error() string { + switch { + case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) + case len(e.MatchingKinds) > 0: + return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) + case len(e.MatchingResources) > 0: + return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) + } + return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) +} + +func IsAmbiguousError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *AmbiguousResourceError, *AmbiguousKindError: + return true + default: + return false + } +} + +// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource +type NoResourceMatchError struct { + PartialResource schema.GroupVersionResource +} + +func (e *NoResourceMatchError) Error() string { + return fmt.Sprintf("no matches for %v", e.PartialResource) +} + +// NoKindMatchError is returned if the RESTMapper can't find any match for a kind +type NoKindMatchError struct { + // GroupKind is the API group and kind that was searched + GroupKind schema.GroupKind + // SearchedVersions is the optional list of versions the search was restricted to + SearchedVersions []string +} + +func (e *NoKindMatchError) Error() string { + searchedVersions := sets.NewString() + for _, v := range e.SearchedVersions { + searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String()) + } + + switch len(searchedVersions) { + case 0: + return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group) + case 1: + return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0]) + default: + return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List()) + } +} + +func IsNoMatchError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case *NoResourceMatchError, *NoKindMatchError: + return true + default: + return false + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go new file mode 100644 index 0000000..fd22100 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go @@ -0,0 +1,97 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the +// first successful result for the singular requests +type FirstHitRESTMapper struct { + MultiRESTMapper +} + +func (m FirstHitRESTMapper) String() string { + return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper) +} + +func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.ResourceFor(resource) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return schema.GroupVersionResource{}, collapseAggregateErrors(errors) +} + +func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.KindFor(resource) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return schema.GroupVersionKind{}, collapseAggregateErrors(errors) +} + +// RESTMapping provides the REST mapping for the resource based on the +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + errors := []error{} + for _, t := range m.MultiRESTMapper { + ret, err := t.RESTMapping(gk, versions...) + if err == nil { + return ret, nil + } + errors = append(errors, err) + } + + return nil, collapseAggregateErrors(errors) +} + +// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list +// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) +func collapseAggregateErrors(errors []error) error { + if len(errors) == 0 { + return nil + } + if len(errors) == 1 { + return errors[0] + } + + allNoMatchErrors := true + for _, err := range errors { + allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err) + } + if allNoMatchErrors { + return errors[0] + } + + return utilerrors.NewAggregate(errors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go new file mode 100644 index 0000000..50468b5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -0,0 +1,264 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "errors" + "fmt" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + // isListCache maintains a cache of types that are checked for lists + // which is used by IsListType. + // TODO: remove and replace with an interface check + isListCache = struct { + lock sync.RWMutex + byType map[reflect.Type]bool + }{ + byType: make(map[reflect.Type]bool, 1024), + } +) + +// IsListType returns true if the provided Object has a slice called Items. +// TODO: Replace the code in this check with an interface comparison by +// creating and enforcing that lists implement a list accessor. +func IsListType(obj runtime.Object) bool { + switch t := obj.(type) { + case runtime.Unstructured: + return t.IsList() + } + t := reflect.TypeOf(obj) + + isListCache.lock.RLock() + ok, exists := isListCache.byType[t] + isListCache.lock.RUnlock() + + if !exists { + _, err := getItemsPtr(obj) + ok = err == nil + + // cache only the first 1024 types + isListCache.lock.Lock() + if len(isListCache.byType) < 1024 { + isListCache.byType[t] = ok + } + isListCache.lock.Unlock() + } + + return ok +} + +var ( + errExpectFieldItems = errors.New("no Items field in this object") + errExpectSliceItems = errors.New("Items field must be a slice of objects") +) + +// GetItemsPtr returns a pointer to the list object's Items member. +// If 'list' doesn't have an Items member, it's not really a list type +// and an error will be returned. +// This function will either return a pointer to a slice, or an error, but not both. +// TODO: this will be replaced with an interface in the future +func GetItemsPtr(list runtime.Object) (interface{}, error) { + obj, err := getItemsPtr(list) + if err != nil { + return nil, fmt.Errorf("%T is not a list: %v", list, err) + } + return obj, nil +} + +// getItemsPtr returns a pointer to the list object's Items member or an error. +func getItemsPtr(list runtime.Object) (interface{}, error) { + v, err := conversion.EnforcePtr(list) + if err != nil { + return nil, err + } + + items := v.FieldByName("Items") + if !items.IsValid() { + return nil, errExpectFieldItems + } + switch items.Kind() { + case reflect.Interface, reflect.Ptr: + target := reflect.TypeOf(items.Interface()).Elem() + if target.Kind() != reflect.Slice { + return nil, errExpectSliceItems + } + return items.Interface(), nil + case reflect.Slice: + return items.Addr().Interface(), nil + default: + return nil, errExpectSliceItems + } +} + +// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates +// the loop. +func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.EachListItem(fn) + } + // TODO: Change to an interface call? + itemsPtr, err := GetItemsPtr(obj) + if err != nil { + return err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return err + } + len := items.Len() + if len == 0 { + return nil + } + takeAddr := false + if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface { + if !items.Index(0).CanAddr() { + return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) + } + takeAddr = true + } + + for i := 0; i < len; i++ { + raw := items.Index(i) + if takeAddr { + raw = raw.Addr() + } + switch item := raw.Interface().(type) { + case *runtime.RawExtension: + if err := fn(item.Object); err != nil { + return err + } + case runtime.Object: + if err := fn(item); err != nil { + return err + } + default: + obj, ok := item.(runtime.Object) + if !ok { + return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } + if err := fn(obj); err != nil { + return err + } + } + } + return nil +} + +// ExtractList returns obj's Items element as an array of runtime.Objects. +// Returns an error if obj is not a List type (does not have an Items member). +func ExtractList(obj runtime.Object) ([]runtime.Object, error) { + itemsPtr, err := GetItemsPtr(obj) + if err != nil { + return nil, err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return nil, err + } + list := make([]runtime.Object, items.Len()) + for i := range list { + raw := items.Index(i) + switch item := raw.Interface().(type) { + case runtime.RawExtension: + switch { + case item.Object != nil: + list[i] = item.Object + case item.Raw != nil: + // TODO: Set ContentEncoding and ContentType correctly. + list[i] = &runtime.Unknown{Raw: item.Raw} + default: + list[i] = nil + } + case runtime.Object: + list[i] = item + default: + var found bool + if list[i], found = raw.Addr().Interface().(runtime.Object); !found { + return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) + } + } + } + return list, nil +} + +// objectSliceType is the type of a slice of Objects +var objectSliceType = reflect.TypeOf([]runtime.Object{}) + +// LenList returns the length of this list or 0 if it is not a list. +func LenList(list runtime.Object) int { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return 0 + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return 0 + } + return items.Len() +} + +// SetList sets the given list object's Items member have the elements given in +// objects. +// Returns an error if list is not a List type (does not have an Items member), +// or if any of the objects are not of the right type. +func SetList(list runtime.Object, objects []runtime.Object) error { + itemsPtr, err := GetItemsPtr(list) + if err != nil { + return err + } + items, err := conversion.EnforcePtr(itemsPtr) + if err != nil { + return err + } + if items.Type() == objectSliceType { + items.Set(reflect.ValueOf(objects)) + return nil + } + slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) + for i := range objects { + dest := slice.Index(i) + if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { + dest = dest.FieldByName("Object") + } + + // check to see if you're directly assignable + if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) { + dest.Set(reflect.ValueOf(objects[i])) + continue + } + + src, err := conversion.EnforcePtr(objects[i]) + if err != nil { + return err + } + if src.Type().AssignableTo(dest.Type()) { + dest.Set(src) + } else if src.Type().ConvertibleTo(dest.Type()) { + dest.Set(src.Convert(dest.Type())) + } else { + return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type()) + } + } + items.Set(slice) + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go new file mode 100644 index 0000000..42eac3a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go @@ -0,0 +1,134 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +type ListMetaAccessor interface { + GetListMeta() List +} + +// List lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +type List metav1.ListInterface + +// Type exposes the type and APIVersion of versioned or internal API objects. +type Type metav1.Type + +// MetadataAccessor lets you work with object and list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +// +// MetadataAccessor exposes Interface in a way that can be used with multiple objects. +type MetadataAccessor interface { + APIVersion(obj runtime.Object) (string, error) + SetAPIVersion(obj runtime.Object, version string) error + + Kind(obj runtime.Object) (string, error) + SetKind(obj runtime.Object, kind string) error + + Namespace(obj runtime.Object) (string, error) + SetNamespace(obj runtime.Object, namespace string) error + + Name(obj runtime.Object) (string, error) + SetName(obj runtime.Object, name string) error + + GenerateName(obj runtime.Object) (string, error) + SetGenerateName(obj runtime.Object, name string) error + + UID(obj runtime.Object) (types.UID, error) + SetUID(obj runtime.Object, uid types.UID) error + + SelfLink(obj runtime.Object) (string, error) + SetSelfLink(obj runtime.Object, selfLink string) error + + Labels(obj runtime.Object) (map[string]string, error) + SetLabels(obj runtime.Object, labels map[string]string) error + + Annotations(obj runtime.Object) (map[string]string, error) + SetAnnotations(obj runtime.Object, annotations map[string]string) error + + Continue(obj runtime.Object) (string, error) + SetContinue(obj runtime.Object, c string) error + + runtime.ResourceVersioner +} + +type RESTScopeName string + +const ( + RESTScopeNameNamespace RESTScopeName = "namespace" + RESTScopeNameRoot RESTScopeName = "root" +) + +// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy +type RESTScope interface { + // Name of the scope + Name() RESTScopeName +} + +// RESTMapping contains the information needed to deal with objects of a specific +// resource and kind in a RESTful manner. +type RESTMapping struct { + // Resource is the GroupVersionResource (location) for this endpoint + Resource schema.GroupVersionResource + + // GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint + GroupVersionKind schema.GroupVersionKind + + // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy + Scope RESTScope +} + +// RESTMapper allows clients to map resources to kind, and map kind and version +// to interfaces for manipulating those objects. It is primarily intended for +// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. +// +// The Kubernetes API provides versioned resources and object kinds which are scoped +// to API groups. In other words, kinds and resources should not be assumed to be +// unique across groups. +// +// TODO: split into sub-interfaces +type RESTMapper interface { + // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) + + // KindsFor takes a partial resource and returns the list of potential kinds in priority order + KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) + + // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches + ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) + + // ResourcesFor takes a partial resource and returns the list of potential resource in priority order + ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) + + // RESTMapping identifies a preferred resource mapping for the provided group kind. + RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) + // RESTMappings returns all resource mappings for the provided group kind if no + // version search is provided. Otherwise identifies a preferred resource mapping for + // the provided version(s). + RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) + + ResourceSingularizer(resource string) (singular string, err error) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go new file mode 100644 index 0000000..431a0a6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go @@ -0,0 +1,104 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// lazyObject defers loading the mapper and typer until necessary. +type lazyObject struct { + loader func() (RESTMapper, error) + + lock sync.Mutex + loaded bool + err error + mapper RESTMapper +} + +// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by +// returning those initialization errors when the interface methods are invoked. This defers the +// initialization and any server calls until a client actually needs to perform the action. +func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { + obj := &lazyObject{loader: fn} + return obj +} + +// init lazily loads the mapper and typer, returning an error if initialization has failed. +func (o *lazyObject) init() error { + o.lock.Lock() + defer o.lock.Unlock() + if o.loaded { + return o.err + } + o.mapper, o.err = o.loader() + o.loaded = true + return o.err +} + +var _ RESTMapper = &lazyObject{} + +func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return schema.GroupVersionKind{}, err + } + return o.mapper.KindFor(resource) +} + +func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionKind{}, err + } + return o.mapper.KindsFor(resource) +} + +func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return schema.GroupVersionResource{}, err + } + return o.mapper.ResourceFor(input) +} + +func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + if err := o.init(); err != nil { + return []schema.GroupVersionResource{}, err + } + return o.mapper.ResourcesFor(input) +} + +func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMapping(gk, versions...) +} + +func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + if err := o.init(); err != nil { + return nil, err + } + return o.mapper.RESTMappings(gk, versions...) +} + +func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { + if err := o.init(); err != nil { + return "", err + } + return o.mapper.ResourceSingularizer(resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go new file mode 100644 index 0000000..fa4b767 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -0,0 +1,648 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "reflect" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/klog" +) + +// errNotList is returned when an object implements the Object style interfaces but not the List style +// interfaces. +var errNotList = fmt.Errorf("object does not implement the List interfaces") + +var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink") + +// CommonAccessor returns a Common interface for the provided object or an error if the object does +// not provide List. +func CommonAccessor(obj interface{}) (metav1.Common, error) { + switch t := obj.(type) { + case List: + return t, nil + case metav1.ListInterface: + return t, nil + case ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotCommon + case metav1.ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotCommon + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotCommon + default: + return nil, errNotCommon + } +} + +// ListAccessor returns a List interface for the provided object or an error if the object does +// not provide List. +// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an +// object *is* a List. +func ListAccessor(obj interface{}) (List, error) { + switch t := obj.(type) { + case List: + return t, nil + case metav1.ListInterface: + return t, nil + case ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + case metav1.ListMetaAccessor: + if m := t.GetListMeta(); m != nil { + return m, nil + } + return nil, errNotList + default: + return nil, errNotList + } +} + +// errNotObject is returned when an object implements the List style interfaces but not the Object style +// interfaces. +var errNotObject = fmt.Errorf("object does not implement the Object interfaces") + +// Accessor takes an arbitrary object pointer and returns meta.Interface. +// obj must be a pointer to an API type. An error is returned if the minimum +// required fields are missing. Fields that are not required return the default +// value and are a no-op if set. +func Accessor(obj interface{}) (metav1.Object, error) { + switch t := obj.(type) { + case metav1.Object: + return t, nil + case metav1.ObjectMetaAccessor: + if m := t.GetObjectMeta(); m != nil { + return m, nil + } + return nil, errNotObject + default: + return nil, errNotObject + } +} + +// AsPartialObjectMetadata takes the metav1 interface and returns a partial object. +// TODO: consider making this solely a conversion action. +func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { + switch t := m.(type) { + case *metav1.ObjectMeta: + return &metav1.PartialObjectMetadata{ObjectMeta: *t} + default: + return &metav1.PartialObjectMetadata{ + ObjectMeta: metav1.ObjectMeta{ + Name: m.GetName(), + GenerateName: m.GetGenerateName(), + Namespace: m.GetNamespace(), + SelfLink: m.GetSelfLink(), + UID: m.GetUID(), + ResourceVersion: m.GetResourceVersion(), + Generation: m.GetGeneration(), + CreationTimestamp: m.GetCreationTimestamp(), + DeletionTimestamp: m.GetDeletionTimestamp(), + DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + ManagedFields: m.GetManagedFields(), + }, + } + } +} + +// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion +// and Kind of an in-memory internal object. +// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta +// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube +// api conventions). +func TypeAccessor(obj interface{}) (Type, error) { + if typed, ok := obj.(runtime.Object); ok { + return objectAccessor{typed}, nil + } + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, err + } + t := v.Type() + if v.Kind() != reflect.Struct { + return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) + } + + typeMeta := v.FieldByName("TypeMeta") + if !typeMeta.IsValid() { + return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) + } + a := &genericAccessor{} + if err := extractFromTypeMeta(typeMeta, a); err != nil { + return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) + } + return a, nil +} + +type objectAccessor struct { + runtime.Object +} + +func (obj objectAccessor) GetKind() string { + return obj.GetObjectKind().GroupVersionKind().Kind +} + +func (obj objectAccessor) SetKind(kind string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gvk.Kind = kind + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +func (obj objectAccessor) GetAPIVersion() string { + return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() +} + +func (obj objectAccessor) SetAPIVersion(version string) { + gvk := obj.GetObjectKind().GroupVersionKind() + gv, err := schema.ParseGroupVersion(version) + if err != nil { + gv = schema.GroupVersion{Version: version} + } + gvk.Group, gvk.Version = gv.Group, gv.Version + obj.GetObjectKind().SetGroupVersionKind(gvk) +} + +// NewAccessor returns a MetadataAccessor that can retrieve +// or manipulate resource version on objects derived from core API +// metadata concepts. +func NewAccessor() MetadataAccessor { + return resourceAccessor{} +} + +// resourceAccessor implements ResourceVersioner and SelfLinker. +type resourceAccessor struct{} + +func (resourceAccessor) Kind(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetKind(), nil +} + +func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { + objectAccessor{obj}.SetKind(kind) + return nil +} + +func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { + return objectAccessor{obj}.GetAPIVersion(), nil +} + +func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { + objectAccessor{obj}.SetAPIVersion(version) + return nil +} + +func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetNamespace(), nil +} + +func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetNamespace(namespace) + return nil +} + +func (resourceAccessor) Name(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetName(), nil +} + +func (resourceAccessor) SetName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetName(name) + return nil +} + +func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetGenerateName(), nil +} + +func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetGenerateName(name) + return nil +} + +func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { + accessor, err := Accessor(obj) + if err != nil { + return "", err + } + return accessor.GetUID(), nil +} + +func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetUID(uid) + return nil +} + +func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { + accessor, err := CommonAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetSelfLink(), nil +} + +func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { + accessor, err := CommonAccessor(obj) + if err != nil { + return err + } + accessor.SetSelfLink(selfLink) + return nil +} + +func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetLabels(), nil +} + +func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetLabels(labels) + return nil +} + +func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { + accessor, err := Accessor(obj) + if err != nil { + return nil, err + } + return accessor.GetAnnotations(), nil +} + +func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { + accessor, err := Accessor(obj) + if err != nil { + return err + } + accessor.SetAnnotations(annotations) + return nil +} + +func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { + accessor, err := CommonAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetResourceVersion(), nil +} + +func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { + accessor, err := CommonAccessor(obj) + if err != nil { + return err + } + accessor.SetResourceVersion(version) + return nil +} + +func (resourceAccessor) Continue(obj runtime.Object) (string, error) { + accessor, err := ListAccessor(obj) + if err != nil { + return "", err + } + return accessor.GetContinue(), nil +} + +func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { + accessor, err := ListAccessor(obj) + if err != nil { + return err + } + accessor.SetContinue(version) + return nil +} + +// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. +func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { + return err + } + if err := runtime.Field(v, "Kind", &o.Kind); err != nil { + return err + } + if err := runtime.Field(v, "Name", &o.Name); err != nil { + return err + } + if err := runtime.Field(v, "UID", &o.UID); err != nil { + return err + } + var controllerPtr *bool + if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { + return err + } + if controllerPtr != nil { + controller := *controllerPtr + o.Controller = &controller + } + var blockOwnerDeletionPtr *bool + if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil { + return err + } + if blockOwnerDeletionPtr != nil { + block := *blockOwnerDeletionPtr + o.BlockOwnerDeletion = &block + } + return nil +} + +// setOwnerReference sets v to o. v is the OwnerReferences field of an object. +func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { + if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { + return err + } + if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { + return err + } + if err := runtime.SetField(o.Name, v, "Name"); err != nil { + return err + } + if err := runtime.SetField(o.UID, v, "UID"); err != nil { + return err + } + if o.Controller != nil { + controller := *(o.Controller) + if err := runtime.SetField(&controller, v, "Controller"); err != nil { + return err + } + } + if o.BlockOwnerDeletion != nil { + block := *(o.BlockOwnerDeletion) + if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil { + return err + } + } + return nil +} + +// genericAccessor contains pointers to strings that can modify an arbitrary +// struct and implements the Accessor interface. +type genericAccessor struct { + namespace *string + name *string + generateName *string + uid *types.UID + apiVersion *string + kind *string + resourceVersion *string + selfLink *string + creationTimestamp *metav1.Time + deletionTimestamp **metav1.Time + labels *map[string]string + annotations *map[string]string + ownerReferences reflect.Value + finalizers *[]string +} + +func (a genericAccessor) GetNamespace() string { + if a.namespace == nil { + return "" + } + return *a.namespace +} + +func (a genericAccessor) SetNamespace(namespace string) { + if a.namespace == nil { + return + } + *a.namespace = namespace +} + +func (a genericAccessor) GetName() string { + if a.name == nil { + return "" + } + return *a.name +} + +func (a genericAccessor) SetName(name string) { + if a.name == nil { + return + } + *a.name = name +} + +func (a genericAccessor) GetGenerateName() string { + if a.generateName == nil { + return "" + } + return *a.generateName +} + +func (a genericAccessor) SetGenerateName(generateName string) { + if a.generateName == nil { + return + } + *a.generateName = generateName +} + +func (a genericAccessor) GetUID() types.UID { + if a.uid == nil { + return "" + } + return *a.uid +} + +func (a genericAccessor) SetUID(uid types.UID) { + if a.uid == nil { + return + } + *a.uid = uid +} + +func (a genericAccessor) GetAPIVersion() string { + return *a.apiVersion +} + +func (a genericAccessor) SetAPIVersion(version string) { + *a.apiVersion = version +} + +func (a genericAccessor) GetKind() string { + return *a.kind +} + +func (a genericAccessor) SetKind(kind string) { + *a.kind = kind +} + +func (a genericAccessor) GetResourceVersion() string { + return *a.resourceVersion +} + +func (a genericAccessor) SetResourceVersion(version string) { + *a.resourceVersion = version +} + +func (a genericAccessor) GetSelfLink() string { + return *a.selfLink +} + +func (a genericAccessor) SetSelfLink(selfLink string) { + *a.selfLink = selfLink +} + +func (a genericAccessor) GetCreationTimestamp() metav1.Time { + return *a.creationTimestamp +} + +func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) { + *a.creationTimestamp = timestamp +} + +func (a genericAccessor) GetDeletionTimestamp() *metav1.Time { + return *a.deletionTimestamp +} + +func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) { + *a.deletionTimestamp = timestamp +} + +func (a genericAccessor) GetLabels() map[string]string { + if a.labels == nil { + return nil + } + return *a.labels +} + +func (a genericAccessor) SetLabels(labels map[string]string) { + *a.labels = labels +} + +func (a genericAccessor) GetAnnotations() map[string]string { + if a.annotations == nil { + return nil + } + return *a.annotations +} + +func (a genericAccessor) SetAnnotations(annotations map[string]string) { + if a.annotations == nil { + emptyAnnotations := make(map[string]string) + a.annotations = &emptyAnnotations + } + *a.annotations = annotations +} + +func (a genericAccessor) GetFinalizers() []string { + if a.finalizers == nil { + return nil + } + return *a.finalizers +} + +func (a genericAccessor) SetFinalizers(finalizers []string) { + *a.finalizers = finalizers +} + +func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { + var ret []metav1.OwnerReference + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + klog.Errorf("expect %v to be a pointer to slice", s) + return ret + } + s = s.Elem() + // Set the capacity to one element greater to avoid copy if the caller later append an element. + ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) + for i := 0; i < s.Len(); i++ { + if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { + klog.Errorf("extractFromOwnerReference failed: %v", err) + return ret + } + } + return ret +} + +func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { + s := a.ownerReferences + if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { + klog.Errorf("expect %v to be a pointer to slice", s) + } + s = s.Elem() + newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) + for i := 0; i < len(references); i++ { + if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { + klog.Errorf("setOwnerReference failed: %v", err) + return + } + } + s.Set(newReferences) +} + +// extractFromTypeMeta extracts pointers to version and kind fields from an object +func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { + if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { + return err + } + if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go new file mode 100644 index 0000000..6b01bf1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go @@ -0,0 +1,210 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" + utilerrors "k8s.io/apimachinery/pkg/util/errors" +) + +// MultiRESTMapper is a wrapper for multiple RESTMappers. +type MultiRESTMapper []RESTMapper + +func (m MultiRESTMapper) String() string { + nested := []string{} + for _, t := range m { + currString := fmt.Sprintf("%v", t) + splitStrings := strings.Split(currString, "\n") + nested = append(nested, strings.Join(splitStrings, "\n\t")) + } + + return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t")) +} + +// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod) +// This implementation supports multiple REST schemas and return the first match. +func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + for _, t := range m { + singular, err = t.ResourceSingularizer(resource) + if err == nil { + return + } + } + return +} + +func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + allGVRs := []schema.GroupVersionResource{} + for _, t := range m { + gvrs, err := t.ResourcesFor(resource) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + return nil, err + } + + // walk the existing values to de-dup + for _, curr := range gvrs { + found := false + for _, existing := range allGVRs { + if curr == existing { + found = true + break + } + } + + if !found { + allGVRs = append(allGVRs, curr) + } + } + } + + if len(allGVRs) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + return allGVRs, nil +} + +func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + allGVKs := []schema.GroupVersionKind{} + for _, t := range m { + gvks, err := t.KindsFor(resource) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + return nil, err + } + + // walk the existing values to de-dup + for _, curr := range gvks { + found := false + for _, existing := range allGVKs { + if curr == existing { + found = true + break + } + } + + if !found { + allGVKs = append(allGVKs, curr) + } + } + } + + if len(allGVKs) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + return allGVKs, nil +} + +func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +// RESTMapping provides the REST mapping for the resource based on the +// kind and version. This implementation supports multiple REST schemas and +// return the first match. +func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + allMappings := []*RESTMapping{} + errors := []error{} + + for _, t := range m { + currMapping, err := t.RESTMapping(gk, versions...) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + errors = append(errors, err) + continue + } + + allMappings = append(allMappings, currMapping) + } + + // if we got exactly one mapping, then use it even if other requested failed + if len(allMappings) == 1 { + return allMappings[0], nil + } + if len(allMappings) > 1 { + var kinds []schema.GroupVersionKind + for _, m := range allMappings { + kinds = append(kinds, m.GroupVersionKind) + } + return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} + } + if len(errors) > 0 { + return nil, utilerrors.NewAggregate(errors) + } + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} +} + +// RESTMappings returns all possible RESTMappings for the provided group kind, or an error +// if the type is not recognized. +func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + var allMappings []*RESTMapping + var errors []error + + for _, t := range m { + currMappings, err := t.RESTMappings(gk, versions...) + // ignore "no match" errors, but any other error percolates back up + if IsNoMatchError(err) { + continue + } + if err != nil { + errors = append(errors, err) + continue + } + allMappings = append(allMappings, currMappings...) + } + if len(errors) > 0 { + return nil, utilerrors.NewAggregate(errors) + } + if len(allMappings) == 0 { + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} + } + return allMappings, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go new file mode 100644 index 0000000..fa11c58 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go @@ -0,0 +1,222 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package meta + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + AnyGroup = "*" + AnyVersion = "*" + AnyResource = "*" + AnyKind = "*" +) + +// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind +// when multiple matches are possible +type PriorityRESTMapper struct { + // Delegate is the RESTMapper to use to locate all the Kind and Resource matches + Delegate RESTMapper + + // ResourcePriority is a list of priority patterns to apply to matching resources. + // The list of all matching resources is narrowed based on the patterns until only one remains. + // A pattern with no matches is skipped. A pattern with more than one match uses its + // matches as the list to continue matching against. + ResourcePriority []schema.GroupVersionResource + + // KindPriority is a list of priority patterns to apply to matching kinds. + // The list of all matching kinds is narrowed based on the patterns until only one remains. + // A pattern with no matches is skipped. A pattern with more than one match uses its + // matches as the list to continue matching against. + KindPriority []schema.GroupVersionKind +} + +func (m PriorityRESTMapper) String() string { + return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate) +} + +// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. +func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource) + if originalErr != nil && len(originalGVRs) == 0 { + return schema.GroupVersionResource{}, originalErr + } + if len(originalGVRs) == 1 { + return originalGVRs[0], originalErr + } + + remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) + for _, pattern := range m.ResourcePriority { + matchedGVRs := []schema.GroupVersionResource{} + for _, gvr := range remainingGVRs { + if resourceMatches(pattern, gvr) { + matchedGVRs = append(matchedGVRs, gvr) + } + } + + switch len(matchedGVRs) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matchedGVRs[0], originalErr + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remainingGVRs = matchedGVRs + } + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} +} + +// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. +func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource) + if originalErr != nil && len(originalGVKs) == 0 { + return schema.GroupVersionKind{}, originalErr + } + if len(originalGVKs) == 1 { + return originalGVKs[0], originalErr + } + + remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) + for _, pattern := range m.KindPriority { + matchedGVKs := []schema.GroupVersionKind{} + for _, gvr := range remainingGVKs { + if kindMatches(pattern, gvr) { + matchedGVKs = append(matchedGVKs, gvr) + } + } + + switch len(matchedGVKs) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matchedGVKs[0], originalErr + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remainingGVKs = matchedGVKs + } + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} +} + +func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool { + if pattern.Group != AnyGroup && pattern.Group != resource.Group { + return false + } + if pattern.Version != AnyVersion && pattern.Version != resource.Version { + return false + } + if pattern.Resource != AnyResource && pattern.Resource != resource.Resource { + return false + } + + return true +} + +func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool { + if pattern.Group != AnyGroup && pattern.Group != kind.Group { + return false + } + if pattern.Version != AnyVersion && pattern.Version != kind.Version { + return false + } + if pattern.Kind != AnyKind && pattern.Kind != kind.Kind { + return false + } + + return true +} + +func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { + mappings, originalErr := m.Delegate.RESTMappings(gk, versions...) + if originalErr != nil && len(mappings) == 0 { + return nil, originalErr + } + + // any versions the user provides take priority + priorities := m.KindPriority + if len(versions) > 0 { + priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) + for _, version := range versions { + gv := schema.GroupVersion{ + Version: version, + Group: gk.Group, + } + priorities = append(priorities, gv.WithKind(AnyKind)) + } + priorities = append(priorities, m.KindPriority...) + } + + remaining := append([]*RESTMapping{}, mappings...) + for _, pattern := range priorities { + var matching []*RESTMapping + for _, m := range remaining { + if kindMatches(pattern, m.GroupVersionKind) { + matching = append(matching, m) + } + } + + switch len(matching) { + case 0: + // if you have no matches, then nothing matched this pattern just move to the next + continue + case 1: + // one match, return + return matching[0], originalErr + default: + // more than one match, use the matched hits as the list moving to the next pattern. + // this way you can have a series of selection criteria + remaining = matching + } + } + if len(remaining) == 1 { + return remaining[0], originalErr + } + + var kinds []schema.GroupVersionKind + for _, m := range mappings { + kinds = append(kinds, m.GroupVersionKind) + } + return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} +} + +func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + return m.Delegate.RESTMappings(gk, versions...) +} + +func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { + return m.Delegate.ResourceSingularizer(resource) +} + +func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + return m.Delegate.ResourcesFor(partiallySpecifiedResource) +} + +func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { + return m.Delegate.KindsFor(partiallySpecifiedResource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go new file mode 100644 index 0000000..41b60d7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go @@ -0,0 +1,518 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// TODO: move everything in this file to pkg/api/rest +package meta + +import ( + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Implements RESTScope interface +type restScope struct { + name RESTScopeName +} + +func (r *restScope) Name() RESTScopeName { + return r.name +} + +var RESTScopeNamespace = &restScope{ + name: RESTScopeNameNamespace, +} + +var RESTScopeRoot = &restScope{ + name: RESTScopeNameRoot, +} + +// DefaultRESTMapper exposes mappings between the types defined in a +// runtime.Scheme. It assumes that all types defined the provided scheme +// can be mapped with the provided MetadataAccessor and Codec interfaces. +// +// The resource name of a Kind is defined as the lowercase, +// English-plural version of the Kind string. +// When converting from resource to Kind, the singular version of the +// resource name is also accepted for convenience. +// +// TODO: Only accept plural for some operations for increased control? +// (`get pod bar` vs `get pods bar`) +type DefaultRESTMapper struct { + defaultGroupVersions []schema.GroupVersion + + resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind + kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource + kindToScope map[schema.GroupVersionKind]RESTScope + singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource + pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource +} + +func (m *DefaultRESTMapper) String() string { + return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) +} + +var _ RESTMapper = &DefaultRESTMapper{} + +// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion +// to a resource name and back based on the objects in a runtime.Scheme +// and the Kubernetes API conventions. Takes a group name, a priority list of the versions +// to search when an object has no default version (set empty to return an error), +// and a function that retrieves the correct metadata for a given version. +func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper { + resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) + kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) + kindToScope := make(map[schema.GroupVersionKind]RESTScope) + singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) + // TODO: verify name mappings work correctly when versions differ + + return &DefaultRESTMapper{ + resourceToKind: resourceToKind, + kindToPluralResource: kindToPluralResource, + kindToScope: kindToScope, + defaultGroupVersions: defaultGroupVersions, + singularToPlural: singularToPlural, + pluralToSingular: pluralToSingular, + } +} + +func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { + plural, singular := UnsafeGuessKindToResource(kind) + m.AddSpecific(kind, plural, singular, scope) +} + +func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) { + m.singularToPlural[singular] = plural + m.pluralToSingular[plural] = singular + + m.resourceToKind[singular] = kind + m.resourceToKind[plural] = kind + + m.kindToPluralResource[kind] = plural + m.kindToScope[kind] = scope +} + +// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular +// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. +// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all +// callers to use the RESTMapper they mean. +var unpluralizedSuffixes = []string{ + "endpoints", +} + +// UnsafeGuessKindToResource converts Kind to a resource name. +// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match +// and they aren't guaranteed to do so. +func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { + kindName := kind.Kind + if len(kindName) == 0 { + return schema.GroupVersionResource{}, schema.GroupVersionResource{} + } + singularName := strings.ToLower(kindName) + singular := kind.GroupVersion().WithResource(singularName) + + for _, skip := range unpluralizedSuffixes { + if strings.HasSuffix(singularName, skip) { + return singular, singular + } + } + + switch string(singularName[len(singularName)-1]) { + case "s": + return kind.GroupVersion().WithResource(singularName + "es"), singular + case "y": + return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular + } + + return kind.GroupVersion().WithResource(singularName + "s"), singular +} + +// ResourceSingularizer implements RESTMapper +// It converts a resource name from plural to singular (e.g., from pods to pod) +func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { + partialResource := schema.GroupVersionResource{Resource: resourceType} + resources, err := m.ResourcesFor(partialResource) + if err != nil { + return resourceType, err + } + + singular := schema.GroupVersionResource{} + for _, curr := range resources { + currSingular, ok := m.pluralToSingular[curr] + if !ok { + continue + } + if singular.Empty() { + singular = currSingular + continue + } + + if currSingular.Resource != singular.Resource { + return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) + } + } + + if singular.Empty() { + return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) + } + + return singular.Resource, nil +} + +// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) +func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource { + resource.Resource = strings.ToLower(resource.Resource) + if resource.Version == runtime.APIVersionInternal { + resource.Version = "" + } + + return resource +} + +func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionResource{} + switch { + case hasGroup && hasVersion: + // fully qualified. Find the exact match + for plural, singular := range m.pluralToSingular { + if singular == resource { + ret = append(ret, plural) + break + } + if plural == resource { + ret = append(ret, plural) + break + } + } + + case hasGroup: + // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for plural, singular := range m.pluralToSingular { + if singular.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + if plural.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, plural) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for plural, singular := range m.pluralToSingular { + if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { + continue + } + if singular.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + if plural.Resource == requestedGroupResource.Resource { + ret = append(ret, plural) + } + } + + } + + case hasVersion: + for plural, singular := range m.pluralToSingular { + if singular.Version == resource.Version && singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Version == resource.Version && plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + + default: + for plural, singular := range m.pluralToSingular { + if singular.Resource == resource.Resource { + ret = append(ret, plural) + } + if plural.Resource == resource.Resource { + ret = append(ret, plural) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: resource} + } + + sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { + resources, err := m.ResourcesFor(resource) + if err != nil { + return schema.GroupVersionResource{}, err + } + if len(resources) == 1 { + return resources[0], nil + } + + return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} +} + +func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { + resource := coerceResourceForMatching(input) + + hasResource := len(resource.Resource) > 0 + hasGroup := len(resource.Group) > 0 + hasVersion := len(resource.Version) > 0 + + if !hasResource { + return nil, fmt.Errorf("a resource must be present, got: %v", resource) + } + + ret := []schema.GroupVersionKind{} + switch { + // fully qualified. Find the exact match + case hasGroup && hasVersion: + kind, exists := m.resourceToKind[resource] + if exists { + ret = append(ret, kind) + } + + case hasGroup: + foundExactMatch := false + requestedGroupResource := resource.GroupResource() + for currResource, currKind := range m.resourceToKind { + if currResource.GroupResource() == requestedGroupResource { + foundExactMatch = true + ret = append(ret, currKind) + } + } + + // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match + // storageclass.storage.k8s.io + if !foundExactMatch { + for currResource, currKind := range m.resourceToKind { + if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { + continue + } + if currResource.Resource == requestedGroupResource.Resource { + ret = append(ret, currKind) + } + } + + } + + case hasVersion: + for currResource, currKind := range m.resourceToKind { + if currResource.Version == resource.Version && currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + + default: + for currResource, currKind := range m.resourceToKind { + if currResource.Resource == resource.Resource { + ret = append(ret, currKind) + } + } + } + + if len(ret) == 0 { + return nil, &NoResourceMatchError{PartialResource: input} + } + + sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) + return ret, nil +} + +func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { + kinds, err := m.KindsFor(resource) + if err != nil { + return schema.GroupVersionKind{}, err + } + if len(kinds) == 1 { + return kinds[0], nil + } + + return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} +} + +type kindByPreferredGroupVersion struct { + list []schema.GroupVersionKind + sortOrder []schema.GroupVersion +} + +func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } +func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o kindByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Kind < rhs.Kind + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +type resourceByPreferredGroupVersion struct { + list []schema.GroupVersionResource + sortOrder []schema.GroupVersion +} + +func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } +func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } +func (o resourceByPreferredGroupVersion) Less(i, j int) bool { + lhs := o.list[i] + rhs := o.list[j] + if lhs == rhs { + return false + } + + if lhs.GroupVersion() == rhs.GroupVersion() { + return lhs.Resource < rhs.Resource + } + + // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order + lhsIndex := -1 + rhsIndex := -1 + + for i := range o.sortOrder { + if o.sortOrder[i] == lhs.GroupVersion() { + lhsIndex = i + } + if o.sortOrder[i] == rhs.GroupVersion() { + rhsIndex = i + } + } + + if rhsIndex == -1 { + return true + } + + return lhsIndex < rhsIndex +} + +// RESTMapping returns a struct representing the resource path and conversion interfaces a +// RESTClient should use to operate on the provided group/kind in order of versions. If a version search +// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which +// version should be used to access the named group/kind. +func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { + mappings, err := m.RESTMappings(gk, versions...) + if err != nil { + return nil, err + } + if len(mappings) == 0 { + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} + } + // since we rely on RESTMappings method + // take the first match and return to the caller + // as this was the existing behavior. + return mappings[0], nil +} + +// RESTMappings returns the RESTMappings for the provided group kind. If a version search order +// is not provided, the search order provided to DefaultRESTMapper will be used. +func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { + mappings := make([]*RESTMapping, 0) + potentialGVK := make([]schema.GroupVersionKind, 0) + hadVersion := false + + // Pick an appropriate version + for _, version := range versions { + if len(version) == 0 || version == runtime.APIVersionInternal { + continue + } + currGVK := gk.WithVersion(version) + hadVersion = true + if _, ok := m.kindToPluralResource[currGVK]; ok { + potentialGVK = append(potentialGVK, currGVK) + break + } + } + // Use the default preferred versions + if !hadVersion && len(potentialGVK) == 0 { + for _, gv := range m.defaultGroupVersions { + if gv.Group != gk.Group { + continue + } + potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version)) + } + } + + if len(potentialGVK) == 0 { + return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} + } + + for _, gvk := range potentialGVK { + //Ensure we have a REST mapping + res, ok := m.kindToPluralResource[gvk] + if !ok { + continue + } + + // Ensure we have a REST scope + scope, ok := m.kindToScope[gvk] + if !ok { + return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) + } + + mappings = append(mappings, &RESTMapping{ + Resource: res, + GroupVersionKind: gvk, + Scope: scope, + }) + } + + if len(mappings) == 0 { + return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} + } + return mappings, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS new file mode 100644 index 0000000..dc77401 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -0,0 +1,16 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- derekwaynecarr +- mikedanese +- saad-ali +- janetkuo +- tallclair +- eparis +- xiang90 +- mbohlool +- david-mcmahon diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go new file mode 100644 index 0000000..a8866a4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/amount.go @@ -0,0 +1,299 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + "strconv" + + inf "gopkg.in/inf.v0" +) + +// Scale is used for getting and setting the base-10 scaled value. +// Base-2 scales are omitted for mathematical simplicity. +// See Quantity.ScaledValue for more details. +type Scale int32 + +// infScale adapts a Scale value to an inf.Scale value. +func (s Scale) infScale() inf.Scale { + return inf.Scale(-s) // inf.Scale is upside-down +} + +const ( + Nano Scale = -9 + Micro Scale = -6 + Milli Scale = -3 + Kilo Scale = 3 + Mega Scale = 6 + Giga Scale = 9 + Tera Scale = 12 + Peta Scale = 15 + Exa Scale = 18 +) + +var ( + Zero = int64Amount{} + + // Used by quantity strings - treat as read only + zeroBytes = []byte("0") +) + +// int64Amount represents a fixed precision numerator and arbitrary scale exponent. It is faster +// than operations on inf.Dec for values that can be represented as int64. +// +k8s:openapi-gen=true +type int64Amount struct { + value int64 + scale Scale +} + +// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0. +func (a int64Amount) Sign() int { + switch { + case a.value == 0: + return 0 + case a.value > 0: + return 1 + default: + return -1 + } +} + +// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be +// represented in an int64 OR would result in a loss of precision. This method is intended as +// an optimization to avoid calling AsDec. +func (a int64Amount) AsInt64() (int64, bool) { + if a.scale == 0 { + return a.value, true + } + if a.scale < 0 { + // TODO: attempt to reduce factors, although it is assumed that factors are reduced prior + // to the int64Amount being created. + return 0, false + } + return positiveScaleInt64(a.value, a.scale) +} + +// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale, +// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result +// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger +// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would +// return 1, because 0.000001 is rounded up to 1. +func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) { + if a.scale < scale { + result, _ = negativeScaleInt64(a.value, scale-a.scale) + return result, true + } + return positiveScaleInt64(a.value, a.scale-scale) +} + +// AsDec returns an inf.Dec representation of this value. +func (a int64Amount) AsDec() *inf.Dec { + var base inf.Dec + base.SetUnscaled(a.value) + base.SetScale(inf.Scale(-a.scale)) + return &base +} + +// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b. +func (a int64Amount) Cmp(b int64Amount) int { + switch { + case a.scale == b.scale: + // compare only the unscaled portion + case a.scale > b.scale: + result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == a.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return -1 + default: + return 1 + } + } + b.value = result + default: + result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale) + if !exact { + return a.AsDec().Cmp(b.AsDec()) + } + if result == b.value { + switch { + case remainder == 0: + return 0 + case remainder > 0: + return 1 + default: + return -1 + } + } + a.value = result + } + + switch { + case a.value == b.value: + return 0 + case a.value < b.value: + return -1 + default: + return 1 + } +} + +// Add adds two int64Amounts together, matching scales. It will return false and not mutate +// a if overflow or underflow would result. +func (a *int64Amount) Add(b int64Amount) bool { + switch { + case b.value == 0: + return true + case a.value == 0: + a.value = b.value + a.scale = b.scale + return true + case a.scale == b.scale: + c, ok := int64Add(a.value, b.value) + if !ok { + return false + } + a.value = c + case a.scale > b.scale: + c, ok := positiveScaleInt64(a.value, a.scale-b.scale) + if !ok { + return false + } + c, ok = int64Add(c, b.value) + if !ok { + return false + } + a.scale = b.scale + a.value = c + default: + c, ok := positiveScaleInt64(b.value, b.scale-a.scale) + if !ok { + return false + } + c, ok = int64Add(a.value, c) + if !ok { + return false + } + a.value = c + } + return true +} + +// Sub removes the value of b from the current amount, or returns false if underflow would result. +func (a *int64Amount) Sub(b int64Amount) bool { + return a.Add(int64Amount{value: -b.value, scale: b.scale}) +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) { + if a.scale >= scale { + return a, true + } + result, exact := negativeScaleInt64(a.value, scale-a.scale) + return int64Amount{value: result, scale: scale}, exact +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.value + exponent = int32(a.scale) + + amount, times := removeInt64Factors(mantissa, 10) + exponent += int32(times) + + // make sure exponent is a multiple of 3 + var ok bool + switch exponent % 3 { + case 1, -2: + amount, ok = int64MultiplyScale10(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 1 + case 2, -1: + amount, ok = int64MultiplyScale100(amount) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBytes(out) + } + exponent = exponent - 2 + } + return strconv.AppendInt(out, amount, 10), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + value, ok := a.AsScaledInt64(0) + if !ok { + return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out) + } + amount, exponent := removeInt64Factors(value, 1024) + return strconv.AppendInt(out, amount, 10), exponent +} + +// infDecAmount implements common operations over an inf.Dec that are specific to the quantity +// representation. +type infDecAmount struct { + *inf.Dec +} + +// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision +// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6. +func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, scale.infScale(), inf.RoundUp) + return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0 +} + +// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted +// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3. +func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + mantissa := a.Dec.UnscaledBig() + exponent = int32(-a.Dec.Scale()) + amount := big.NewInt(0).Set(mantissa) + // move all factors of 10 into the exponent for easy reasoning + amount, times := removeBigIntFactors(amount, bigTen) + exponent += times + + // make sure exponent is a multiple of 3 + for exponent%3 != 0 { + amount.Mul(amount, bigTen) + exponent-- + } + + return append(out, amount.String()...), exponent +} + +// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns +// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would +// return []byte("2048"), 1. +func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) { + tmp := &inf.Dec{} + tmp.Round(a.Dec, 0, inf.RoundUp) + amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024) + return append(out, amount.String()...), exponent +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go new file mode 100644 index 0000000..2e09f4f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto + +package resource + +import ( + fmt "fmt" + + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { + return fileDescriptor_612bba87bd70906c, []int{0} +} +func (m *Quantity) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Quantity.Unmarshal(m, b) +} +func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) +} +func (m *Quantity) XXX_Merge(src proto.Message) { + xxx_messageInfo_Quantity.Merge(m, src) +} +func (m *Quantity) XXX_Size() int { + return xxx_messageInfo_Quantity.Size(m) +} +func (m *Quantity) XXX_DiscardUnknown() { + xxx_messageInfo_Quantity.DiscardUnknown(m) +} + +var xxx_messageInfo_Quantity proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) +} + +var fileDescriptor_612bba87bd70906c = []byte{ + // 237 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, + 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, + 0xc5, 0xc8, 0xce, 0x00, 0x23, 0x5b, 0x92, 0x1e, 0xae, 0x15, 0xd5, 0x8e, 0x2e, 0x36, 0x52, 0xb7, + 0x8e, 0x8c, 0x1d, 0x19, 0x9b, 0xbf, 0xe9, 0xd8, 0xb1, 0x03, 0x03, 0x31, 0x3f, 0x82, 0xea, 0x36, + 0x52, 0xb7, 0x7b, 0xef, 0xf4, 0x4e, 0x97, 0xbd, 0xd4, 0xd3, 0x56, 0x1a, 0xa7, 0xea, 0x50, 0x12, + 0x5b, 0xf2, 0xd4, 0xaa, 0x4f, 0xb2, 0x33, 0xc7, 0xea, 0xb4, 0x28, 0x1a, 0xb3, 0x28, 0xaa, 0xb9, + 0xb1, 0xc4, 0x4b, 0xd5, 0xd4, 0xfa, 0x20, 0x14, 0x53, 0xeb, 0x02, 0x57, 0xa4, 0x34, 0x59, 0xe2, + 0xc2, 0xd3, 0x4c, 0x36, 0xec, 0xbc, 0x1b, 0xdf, 0x1f, 0x2b, 0x79, 0x5e, 0xc9, 0xa6, 0xd6, 0x07, + 0x21, 0x87, 0xea, 0xf6, 0x51, 0x1b, 0x3f, 0x0f, 0xa5, 0xac, 0xdc, 0x42, 0x69, 0xa7, 0x9d, 0x4a, + 0x71, 0x19, 0x3e, 0x12, 0x25, 0x48, 0xd3, 0xf1, 0xe8, 0xdd, 0x34, 0x1b, 0xbd, 0x86, 0xc2, 0x7a, + 0xe3, 0x97, 0xe3, 0xeb, 0xec, 0xa2, 0xf5, 0x6c, 0xac, 0xbe, 0x11, 0x13, 0xf1, 0x70, 0xf9, 0x76, + 0xa2, 0xa7, 0xab, 0xef, 0x4d, 0x0e, 0x5f, 0x5d, 0x0e, 0xeb, 0x2e, 0x87, 0x4d, 0x97, 0xc3, 0xea, + 0x67, 0x02, 0xcf, 0x72, 0xdb, 0x23, 0xec, 0x7a, 0x84, 0x7d, 0x8f, 0xb0, 0x8a, 0x28, 0xb6, 0x11, + 0xc5, 0x2e, 0xa2, 0xd8, 0x47, 0x14, 0xbf, 0x11, 0xc5, 0xfa, 0x0f, 0xe1, 0x7d, 0x34, 0x3c, 0xf6, + 0x1f, 0x00, 0x00, 0xff, 0xff, 0x3c, 0x08, 0x88, 0x49, 0x0e, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto new file mode 100644 index 0000000..18a6c7c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -0,0 +1,88 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.api.resource; + +// Package-wide variables from generator "generated". +option go_package = "resource"; + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and AsInt64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +message Quantity { + optional string string = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go new file mode 100644 index 0000000..8ffcb9f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -0,0 +1,310 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math/big" + + inf "gopkg.in/inf.v0" +) + +const ( + // maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64. + // It is also the maximum decimal digits that can be represented with an int64. + maxInt64Factors = 18 +) + +var ( + // Commonly needed big.Int values-- treat as read only! + bigTen = big.NewInt(10) + bigZero = big.NewInt(0) + bigOne = big.NewInt(1) + bigThousand = big.NewInt(1000) + big1024 = big.NewInt(1024) + + // Commonly needed inf.Dec values-- treat as read only! + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) + + // Largest (in magnitude) number allowed. + maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 + + // The maximum value we can represent milli-units for. + // Compare with the return value of Quantity.Value() to + // see if it's safe to use Quantity.MilliValue(). + MaxMilliValue = int64(((1 << 63) - 1) / 1000) +) + +const mostNegative = -(mostPositive + 1) +const mostPositive = 1<<63 - 1 + +// int64Add returns a+b, or false if that would overflow int64. +func int64Add(a, b int64) (int64, bool) { + c := a + b + switch { + case a > 0 && b > 0: + if c < 0 { + return 0, false + } + case a < 0 && b < 0: + if c > 0 { + return 0, false + } + if a == mostNegative && b == mostNegative { + return 0, false + } + } + return c, true +} + +// int64Multiply returns a*b, or false if that would overflow or underflow int64. +func int64Multiply(a, b int64) (int64, bool) { + if a == 0 || b == 0 || a == 1 || b == 1 { + return a * b, true + } + if a == mostNegative || b == mostNegative { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64. +// Use when b is known to be greater than one. +func int64MultiplyScale(a int64, b int64) (int64, bool) { + if a == 0 || a == 1 { + return a * b, true + } + if a == mostNegative && b != 1 { + return 0, false + } + c := a * b + return c, c/b == a +} + +// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale10(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 10, true + } + if a == mostNegative { + return 0, false + } + c := a * 10 + return c, c/10 == a +} + +// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale100(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 100, true + } + if a == mostNegative { + return 0, false + } + c := a * 100 + return c, c/100 == a +} + +// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than +// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication. +func int64MultiplyScale1000(a int64) (int64, bool) { + if a == 0 || a == 1 { + return a * 1000, true + } + if a == mostNegative { + return 0, false + } + c := a * 1000 + return c, c/1000 == a +} + +// positiveScaleInt64 multiplies base by 10^scale, returning false if the +// value overflows. Passing a negative scale is undefined. +func positiveScaleInt64(base int64, scale Scale) (int64, bool) { + switch scale { + case 0: + return base, true + case 1: + return int64MultiplyScale10(base) + case 2: + return int64MultiplyScale100(base) + case 3: + return int64MultiplyScale1000(base) + case 6: + return int64MultiplyScale(base, 1000000) + case 9: + return int64MultiplyScale(base, 1000000000) + default: + value := base + var ok bool + for i := Scale(0); i < scale; i++ { + if value, ok = int64MultiplyScale(value, 10); !ok { + return 0, false + } + } + return value, true + } +} + +// negativeScaleInt64 reduces base by the provided scale, rounding up, until the +// value is zero or the scale is reached. Passing a negative scale is undefined. +// The value returned, if not exact, is rounded away from zero. +func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { + if scale == 0 { + return base, true + } + + value := base + var fraction bool + for i := Scale(0); i < scale; i++ { + if !fraction && value%10 != 0 { + fraction = true + } + value = value / 10 + if value == 0 { + if fraction { + if base > 0 { + return 1, false + } + return -1, false + } + return 0, true + } + } + if fraction { + if base > 0 { + value++ + } else { + value-- + } + } + return value, !fraction +} + +func pow10Int64(b int64) int64 { + switch b { + case 0: + return 1 + case 1: + return 10 + case 2: + return 100 + case 3: + return 1000 + case 4: + return 10000 + case 5: + return 100000 + case 6: + return 1000000 + case 7: + return 10000000 + case 8: + return 100000000 + case 9: + return 1000000000 + case 10: + return 10000000000 + case 11: + return 100000000000 + case 12: + return 1000000000000 + case 13: + return 10000000000000 + case 14: + return 100000000000000 + case 15: + return 1000000000000000 + case 16: + return 10000000000000000 + case 17: + return 100000000000000000 + case 18: + return 1000000000000000000 + default: + return 0 + } +} + +// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or +// false if no such division is possible. Dividing by negative scales is undefined. +func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) { + if scale == 0 { + return base, 0, true + } + // the max scale representable in base 10 in an int64 is 18 decimal places + if scale >= 18 { + return 0, base, false + } + divisor := pow10Int64(int64(scale)) + return base / divisor, base % divisor, true +} + +// removeInt64Factors divides in a loop; the return values have the property that +// value == result * base ^ scale +func removeInt64Factors(value int64, base int64) (result int64, times int32) { + times = 0 + result = value + negative := result < 0 + if negative { + result = -result + } + switch base { + // allow the compiler to optimize the common cases + case 10: + for result >= 10 && result%10 == 0 { + times++ + result = result / 10 + } + // allow the compiler to optimize the common cases + case 1024: + for result >= 1024 && result%1024 == 0 { + times++ + result = result / 1024 + } + default: + for result >= base && result%base == 0 { + times++ + result = result / base + } + } + if negative { + result = -result + } + return result, times +} + +// removeBigIntFactors divides in a loop; the return values have the property that +// d == result * factor ^ times +// d may be modified in place. +// If d == 0, then the return values will be (0, 0) +func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) { + q := big.NewInt(0) + m := big.NewInt(0) + for d.Cmp(bigZero) != 0 { + q.DivMod(d, factor, m) + if m.Cmp(bigZero) != 0 { + break + } + times++ + d, q = q, d + } + return d, times +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go new file mode 100644 index 0000000..d95e03a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -0,0 +1,733 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "bytes" + "errors" + "fmt" + "math/big" + "strconv" + "strings" + + inf "gopkg.in/inf.v0" +) + +// Quantity is a fixed-point representation of a number. +// It provides convenient marshaling/unmarshaling in JSON and YAML, +// in addition to String() and AsInt64() accessors. +// +// The serialization format is: +// +// ::= +// (Note that may be empty, from the "" case in .) +// ::= 0 | 1 | ... | 9 +// ::= | +// ::= | . | . | . +// ::= "+" | "-" +// ::= | +// ::= | | +// ::= Ki | Mi | Gi | Ti | Pi | Ei +// (International System of units; See: http://physics.nist.gov/cuu/Units/binary.html) +// ::= m | "" | k | M | G | T | P | E +// (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) +// ::= "e" | "E" +// +// No matter which of the three exponent forms is used, no quantity may represent +// a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal +// places. Numbers larger or more precise will be capped or rounded up. +// (E.g.: 0.1m will rounded up to 1m.) +// This may be extended in the future if we require larger or smaller quantities. +// +// When a Quantity is parsed from a string, it will remember the type of suffix +// it had, and will use the same type again when it is serialized. +// +// Before serializing, Quantity will be put in "canonical form". +// This means that Exponent/suffix will be adjusted up or down (with a +// corresponding increase or decrease in Mantissa) such that: +// a. No precision is lost +// b. No fractional digits will be emitted +// c. The exponent (or suffix) is as large as possible. +// The sign will be omitted unless the number is negative. +// +// Examples: +// 1.5 will be serialized as "1500m" +// 1.5Gi will be serialized as "1536Mi" +// +// Note that the quantity will NEVER be internally represented by a +// floating point number. That is the whole point of this exercise. +// +// Non-canonical values will still parse as long as they are well formed, +// but will be re-emitted in their canonical form. (So always use canonical +// form, or don't diff.) +// +// This format is intended to make it difficult to use these numbers without +// writing some sort of special handling code in the hopes that that will +// cause implementors to also use a fixed point implementation. +// +// +protobuf=true +// +protobuf.embed=string +// +protobuf.options.marshal=false +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen=true +// +k8s:openapi-gen=true +type Quantity struct { + // i is the quantity in int64 scaled form, if d.Dec == nil + i int64Amount + // d is the quantity in inf.Dec form if d.Dec != nil + d infDecAmount + // s is the generated value of this quantity to avoid recalculation + s string + + // Change Format at will. See the comment for Canonicalize for + // more details. + Format +} + +// CanonicalValue allows a quantity amount to be converted to a string. +type CanonicalValue interface { + // AsCanonicalBytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-10. Callers may + // pass a byte slice to the method to avoid allocations. + AsCanonicalBytes(out []byte) ([]byte, int32) + // AsCanonicalBase1024Bytes returns a byte array representing the string representation + // of the value mantissa and an int32 representing its exponent in base-1024. Callers + // may pass a byte slice to the method to avoid allocations. + AsCanonicalBase1024Bytes(out []byte) ([]byte, int32) +} + +// Format lists the three possible formattings of a quantity. +type Format string + +const ( + DecimalExponent = Format("DecimalExponent") // e.g., 12e6 + BinarySI = Format("BinarySI") // e.g., 12Mi (12 * 2^20) + DecimalSI = Format("DecimalSI") // e.g., 12M (12 * 10^6) +) + +// MustParse turns the given string into a quantity or panics; for tests +// or others cases where you know the string is valid. +func MustParse(str string) Quantity { + q, err := ParseQuantity(str) + if err != nil { + panic(fmt.Errorf("cannot parse '%v': %v", str, err)) + } + return q +} + +const ( + // splitREString is used to separate a number from its suffix; as such, + // this is overly permissive, but that's OK-- it will be checked later. + splitREString = "^([+-]?[0-9.]+)([eEinumkKMGTP]*[-+]?[0-9]*)$" +) + +var ( + // Errors that could happen while parsing a string. + ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'") + ErrNumeric = errors.New("unable to parse numeric part of quantity") + ErrSuffix = errors.New("unable to parse quantity's suffix") +) + +// parseQuantityString is a fast scanner for quantity values. +func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) { + positive = true + pos := 0 + end := len(str) + + // handle leading sign + if pos < end { + switch str[0] { + case '-': + positive = false + pos++ + case '+': + pos++ + } + } + + // strip leading zeros +Zeroes: + for i := pos; ; i++ { + if i >= end { + num = "0" + value = num + return + } + switch str[i] { + case '0': + pos++ + default: + break Zeroes + } + } + + // extract the numerator +Num: + for i := pos; ; i++ { + if i >= end { + num = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + num = str[pos:i] + pos = i + break Num + } + } + + // if we stripped all numerator positions, always return 0 + if len(num) == 0 { + num = "0" + } + + // handle a denominator + if pos < end && str[pos] == '.' { + pos++ + Denom: + for i := pos; ; i++ { + if i >= end { + denom = str[pos:end] + value = str[0:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + denom = str[pos:i] + pos = i + break Denom + } + } + // TODO: we currently allow 1.G, but we may not want to in the future. + // if len(denom) == 0 { + // err = ErrFormatWrong + // return + // } + } + value = str[0:pos] + + // grab the elements of the suffix + suffixStart := pos + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") { + pos = i + break + } + } + if pos < end { + switch str[pos] { + case '-', '+': + pos++ + } + } +Suffix: + for i := pos; ; i++ { + if i >= end { + suffix = str[suffixStart:end] + return + } + switch str[i] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + break Suffix + } + } + // we encountered a non decimal in the Suffix loop, but the last character + // was not a valid exponent + err = ErrFormatWrong + return +} + +// ParseQuantity turns str into a Quantity, or returns an error. +func ParseQuantity(str string) (Quantity, error) { + if len(str) == 0 { + return Quantity{}, ErrFormatWrong + } + if str == "0" { + return Quantity{Format: DecimalSI, s: str}, nil + } + + positive, value, num, denom, suf, err := parseQuantityString(str) + if err != nil { + return Quantity{}, err + } + + base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf)) + if !ok { + return Quantity{}, ErrSuffix + } + + precision := int32(0) + scale := int32(0) + mantissa := int64(1) + switch format { + case DecimalExponent, DecimalSI: + scale = exponent + precision = maxInt64Factors - int32(len(num)+len(denom)) + case BinarySI: + scale = 0 + switch { + case exponent >= 0 && len(denom) == 0: + // only handle positive binary numbers with the fast path + mantissa = int64(int64(mantissa) << uint64(exponent)) + // 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision + precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1 + default: + precision = -1 + } + } + + if precision >= 0 { + // if we have a denominator, shift the entire value to the left by the number of places in the + // denominator + scale -= int32(len(denom)) + if scale >= int32(Nano) { + shifted := num + denom + + var value int64 + value, err := strconv.ParseInt(shifted, 10, 64) + if err != nil { + return Quantity{}, ErrNumeric + } + if result, ok := int64Multiply(value, int64(mantissa)); ok { + if !positive { + result = -result + } + // if the number is in canonical form, reuse the string + switch format { + case BinarySI: + if exponent%10 == 0 && (value&0x07 != 0) { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + default: + if scale%3 == 0 && !strings.HasSuffix(shifted, "000") && shifted[0] != '0' { + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format, s: str}, nil + } + } + return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil + } + } + } + + amount := new(inf.Dec) + if _, ok := amount.SetString(value); !ok { + return Quantity{}, ErrNumeric + } + + // So that no one but us has to think about suffixes, remove it. + if base == 10 { + amount.SetScale(amount.Scale() + Scale(exponent).infScale()) + } else if base == 2 { + // numericSuffix = 2 ** exponent + numericSuffix := big.NewInt(1).Lsh(bigOne, uint(exponent)) + ub := amount.UnscaledBig() + amount.SetUnscaledBig(ub.Mul(ub, numericSuffix)) + } + + // Cap at min/max bounds. + sign := amount.Sign() + if sign == -1 { + amount.Neg(amount) + } + + // This rounds non-zero values up to the minimum representable value, under the theory that + // if you want some resources, you should get some resources, even if you asked for way too small + // of an amount. Arguably, this should be inf.RoundHalfUp (normal rounding), but that would have + // the side effect of rounding values < .5n to zero. + if v, ok := amount.Unscaled(); v != int64(0) || !ok { + amount.Round(amount, Nano.infScale(), inf.RoundUp) + } + + // The max is just a simple cap. + // TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster + if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 { + amount.Set(maxAllowed.Dec) + } + + if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } + if sign == -1 { + amount.Neg(amount) + } + + return Quantity{d: infDecAmount{amount}, Format: format}, nil +} + +// DeepCopy returns a deep-copy of the Quantity value. Note that the method +// receiver is a value, so we can mutate it in-place and return it. +func (q Quantity) DeepCopy() Quantity { + if q.d.Dec != nil { + tmp := &inf.Dec{} + q.d.Dec = tmp.Set(q.d.Dec) + } + return q +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Quantity) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Quantity) OpenAPISchemaFormat() string { return "" } + +// CanonicalizeBytes returns the canonical form of q and its suffix (see comment on Quantity). +// +// Note about BinarySI: +// * If q.Format is set to BinarySI and q.Amount represents a non-zero value between +// -1 and +1, it will be emitted as if q.Format were DecimalSI. +// * Otherwise, if q.Format is set to BinarySI, fractional parts of q.Amount will be +// rounded up. (1.1i becomes 2i.) +func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) { + if q.IsZero() { + return zeroBytes, nil + } + + var rounded CanonicalValue + format := q.Format + switch format { + case DecimalExponent, DecimalSI: + case BinarySI: + if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 { + // This avoids rounding and hopefully confusion, too. + format = DecimalSI + } else { + var exact bool + if rounded, exact = q.AsScale(0); !exact { + // Don't lose precision-- show as DecimalSI + format = DecimalSI + } + } + default: + format = DecimalExponent + } + + // TODO: If BinarySI formatting is requested but would cause rounding, upgrade to + // one of the other formats. + switch format { + case DecimalExponent, DecimalSI: + number, exponent := q.AsCanonicalBytes(out) + suffix, _ := quantitySuffixer.constructBytes(10, exponent, format) + return number, suffix + default: + // format must be BinarySI + number, exponent := rounded.AsCanonicalBase1024Bytes(out) + suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format) + return number, suffix + } +} + +// AsInt64 returns a representation of the current value as an int64 if a fast conversion +// is possible. If false is returned, callers must use the inf.Dec form of this quantity. +func (q *Quantity) AsInt64() (int64, bool) { + if q.d.Dec != nil { + return 0, false + } + return q.i.AsInt64() +} + +// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself. +func (q *Quantity) ToDec() *Quantity { + if q.d.Dec == nil { + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + } + return q +} + +// AsDec returns the quantity as represented by a scaled inf.Dec. +func (q *Quantity) AsDec() *inf.Dec { + if q.d.Dec != nil { + return q.d.Dec + } + q.d.Dec = q.i.AsDec() + q.i = int64Amount{} + return q.d.Dec +} + +// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa +// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra +// allocation. +func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) { + if q.d.Dec != nil { + return q.d.AsCanonicalBytes(out) + } + return q.i.AsCanonicalBytes(out) +} + +// IsZero returns true if the quantity is equal to zero. +func (q *Quantity) IsZero() bool { + if q.d.Dec != nil { + return q.d.Dec.Sign() == 0 + } + return q.i.value == 0 +} + +// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the +// quantity is greater than zero. +func (q *Quantity) Sign() int { + if q.d.Dec != nil { + return q.d.Dec.Sign() + } + return q.i.Sign() +} + +// AsScale returns the current value, rounded up to the provided scale, and returns +// false if the scale resulted in a loss of precision. +func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) { + if q.d.Dec != nil { + return q.d.AsScale(scale) + } + return q.i.AsScale(scale) +} + +// RoundUp updates the quantity to the provided scale, ensuring that the value is at +// least 1. False is returned if the rounding operation resulted in a loss of precision. +// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10). +func (q *Quantity) RoundUp(scale Scale) bool { + if q.d.Dec != nil { + q.s = "" + d, exact := q.d.AsScale(scale) + q.d = d + return exact + } + // avoid clearing the string value if we have already calculated it + if q.i.scale >= scale { + return true + } + q.s = "" + i, exact := q.i.AsScale(scale) + q.i = i + return exact +} + +// Add adds the provide y quantity to the current value. If the current value is zero, +// the format of the quantity will be updated to the format of y. +func (q *Quantity) Add(y Quantity) { + q.s = "" + if q.d.Dec == nil && y.d.Dec == nil { + if q.i.value == 0 { + q.Format = y.Format + } + if q.i.Add(y.i) { + return + } + } else if q.IsZero() { + q.Format = y.Format + } + q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec()) +} + +// Sub subtracts the provided quantity from the current value in place. If the current +// value is zero, the format of the quantity will be updated to the format of y. +func (q *Quantity) Sub(y Quantity) { + q.s = "" + if q.IsZero() { + q.Format = y.Format + } + if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) { + return + } + q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec()) +} + +// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) Cmp(y Quantity) int { + if q.d.Dec == nil && y.d.Dec == nil { + return q.i.Cmp(y.i) + } + return q.AsDec().Cmp(y.AsDec()) +} + +// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the +// quantity is greater than y. +func (q *Quantity) CmpInt64(y int64) int { + if q.d.Dec != nil { + return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0))) + } + return q.i.Cmp(int64Amount{value: y}) +} + +// Neg sets quantity to be the negative value of itself. +func (q *Quantity) Neg() { + q.s = "" + if q.d.Dec == nil { + q.i.value = -q.i.value + return + } + q.d.Dec.Neg(q.d.Dec) +} + +// Equal checks equality of two Quantities. This is useful for testing with +// cmp.Equal. +func (q Quantity) Equal(v Quantity) bool { + return q.Cmp(v) == 0 +} + +// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation +// of most Quantity values. +const int64QuantityExpectedBytes = 18 + +// String formats the Quantity as a string, caching the result if not calculated. +// String is an expensive operation and caching this result significantly reduces the cost of +// normal parse / marshal operations on Quantity. +func (q *Quantity) String() string { + if len(q.s) == 0 { + result := make([]byte, 0, int64QuantityExpectedBytes) + number, suffix := q.CanonicalizeBytes(result) + number = append(number, suffix...) + q.s = string(number) + } + return q.s +} + +// MarshalJSON implements the json.Marshaller interface. +func (q Quantity) MarshalJSON() ([]byte, error) { + if len(q.s) > 0 { + out := make([]byte, len(q.s)+2) + out[0], out[len(out)-1] = '"', '"' + copy(out[1:], q.s) + return out, nil + } + result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes) + result[0] = '"' + number, suffix := q.CanonicalizeBytes(result[1:1]) + // if the same slice was returned to us that we passed in, avoid another allocation by copying number into + // the source slice and returning that + if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes { + number = append(number, suffix...) + number = append(number, '"') + return result[:1+len(number)], nil + } + // if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use + // append + result = result[:1] + result = append(result, number...) + result = append(result, suffix...) + result = append(result, '"') + return result, nil +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (q Quantity) ToUnstructured() interface{} { + return q.String() +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +// TODO: Remove support for leading/trailing whitespace +func (q *Quantity) UnmarshalJSON(value []byte) error { + l := len(value) + if l == 4 && bytes.Equal(value, []byte("null")) { + q.d.Dec = nil + q.i = int64Amount{} + return nil + } + if l >= 2 && value[0] == '"' && value[l-1] == '"' { + value = value[1 : l-1] + } + + parsed, err := ParseQuantity(strings.TrimSpace(string(value))) + if err != nil { + return err + } + + // This copy is safe because parsed will not be referred to again. + *q = parsed + return nil +} + +// NewQuantity returns a new Quantity representing the given +// value in the given format. +func NewQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value}, + Format: format, + } +} + +// NewMilliQuantity returns a new Quantity representing the given +// value * 1/1000 in the given format. Note that BinarySI formatting +// will round fractional values, and will be changed to DecimalSI for +// values x where (-1 < x < 1) && (x != 0). +func NewMilliQuantity(value int64, format Format) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: -3}, + Format: format, + } +} + +// NewScaledQuantity returns a new Quantity representing the given +// value * 10^scale in DecimalSI format. +func NewScaledQuantity(value int64, scale Scale) *Quantity { + return &Quantity{ + i: int64Amount{value: value, scale: scale}, + Format: DecimalSI, + } +} + +// Value returns the unscaled value of q rounded up to the nearest integer away from 0. +func (q *Quantity) Value() int64 { + return q.ScaledValue(0) +} + +// MilliValue returns the value of ceil(q * 1000); this could overflow an int64; +// if that's a concern, call Value() first to verify the number is small enough. +func (q *Quantity) MilliValue() int64 { + return q.ScaledValue(Milli) +} + +// ScaledValue returns the value of ceil(q / 10^scale). +// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. +// This could overflow an int64. +// To detect overflow, call Value() first and verify the expected magnitude. +func (q *Quantity) ScaledValue(scale Scale) int64 { + if q.d.Dec == nil { + i, _ := q.i.AsScaledInt64(scale) + return i + } + dec := q.d.Dec + return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale())) +} + +// Set sets q's value to be value. +func (q *Quantity) Set(value int64) { + q.SetScaled(value, 0) +} + +// SetMilli sets q's value to be value * 1/1000. +func (q *Quantity) SetMilli(value int64) { + q.SetScaled(value, Milli) +} + +// SetScaled sets q's value to be value * 10^scale +func (q *Quantity) SetScaled(value int64, scale Scale) { + q.s = "" + q.d.Dec = nil + q.i = int64Amount{value: value, scale: scale} +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go new file mode 100644 index 0000000..f89ca16 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -0,0 +1,288 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "fmt" + "io" + "math/bits" + + "github.com/gogo/protobuf/proto" +) + +var _ proto.Sizer = &Quantity{} + +func (m *Quantity) Marshal() (data []byte, err error) { + size := m.Size() + data = make([]byte, size) + n, err := m.MarshalToSizedBuffer(data[:size]) + if err != nil { + return nil, err + } + return data[:n], nil +} + +// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) MarshalTo(data []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(data[:size]) +} + +// MarshalToSizedBuffer is a customized version of the generated +// Protobuf unmarshaler for a struct with a single string field. +func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { + i := len(data) + _ = i + var l int + _ = l + + // BEGIN CUSTOM MARSHAL + out := m.String() + i -= len(out) + copy(data[i:], out) + i = encodeVarintGenerated(data, i, uint64(len(out))) + // END CUSTOM MARSHAL + i-- + data[i] = 0xa + + return len(data) - i, nil +} + +func encodeVarintGenerated(data []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + data[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + data[offset] = uint8(v) + return base +} + +func (m *Quantity) Size() (n int) { + var l int + _ = l + + // BEGIN CUSTOM SIZE + l = len(m.String()) + // END CUSTOM SIZE + + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} + +// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct +// with a single string field. +func (m *Quantity) Unmarshal(data []byte) error { + l := len(data) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Quantity: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(data[iNdEx:postIndex]) + + // BEGIN CUSTOM DECODE + p, err := ParseQuantity(s) + if err != nil { + return err + } + *m = p + // END CUSTOM DECODE + + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(data[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} + +func skipGenerated(data []byte) (n int, err error) { + l := len(data) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if data[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(data[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go new file mode 100644 index 0000000..55e177b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/scale_int.go @@ -0,0 +1,95 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "math" + "math/big" + "sync" +) + +var ( + // A sync pool to reduce allocation. + intPool sync.Pool + maxInt64 = big.NewInt(math.MaxInt64) +) + +func init() { + intPool.New = func() interface{} { + return &big.Int{} + } +} + +// scaledValue scales given unscaled value from scale to new Scale and returns +// an int64. It ALWAYS rounds up the result when scale down. The final result might +// overflow. +// +// scale, newScale represents the scale of the unscaled decimal. +// The mathematical value of the decimal is unscaled * 10**(-scale). +func scaledValue(unscaled *big.Int, scale, newScale int) int64 { + dif := scale - newScale + if dif == 0 { + return unscaled.Int64() + } + + // Handle scale up + // This is an easy case, we do not need to care about rounding and overflow. + // If any intermediate operation causes overflow, the result will overflow. + if dif < 0 { + return unscaled.Int64() * int64(math.Pow10(-dif)) + } + + // Handle scale down + // We have to be careful about the intermediate operations. + + // fast path when unscaled < max.Int64 and exp(10,dif) < max.Int64 + const log10MaxInt64 = 19 + if unscaled.Cmp(maxInt64) < 0 && dif < log10MaxInt64 { + divide := int64(math.Pow10(dif)) + result := unscaled.Int64() / divide + mod := unscaled.Int64() % divide + if mod != 0 { + return result + 1 + } + return result + } + + // We should only convert back to int64 when getting the result. + divisor := intPool.Get().(*big.Int) + exp := intPool.Get().(*big.Int) + result := intPool.Get().(*big.Int) + defer func() { + intPool.Put(divisor) + intPool.Put(exp) + intPool.Put(result) + }() + + // divisor = 10^(dif) + // TODO: create loop up table if exp costs too much. + divisor.Exp(bigTen, exp.SetInt64(int64(dif)), nil) + // reuse exp + remainder := exp + + // result = unscaled / divisor + // remainder = unscaled % divisor + result.DivMod(unscaled, divisor, remainder) + if remainder.Sign() != 0 { + return result.Int64() + 1 + } + + return result.Int64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go new file mode 100644 index 0000000..5ed7abe --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/suffix.go @@ -0,0 +1,198 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package resource + +import ( + "strconv" +) + +type suffix string + +// suffixer can interpret and construct suffixes. +type suffixer interface { + interpret(suffix) (base, exponent int32, fmt Format, ok bool) + construct(base, exponent int32, fmt Format) (s suffix, ok bool) + constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool) +} + +// quantitySuffixer handles suffixes for all three formats that quantity +// can handle. +var quantitySuffixer = newSuffixer() + +type bePair struct { + base, exponent int32 +} + +type listSuffixer struct { + suffixToBE map[suffix]bePair + beToSuffix map[bePair]suffix + beToSuffixBytes map[bePair][]byte +} + +func (ls *listSuffixer) addSuffix(s suffix, pair bePair) { + if ls.suffixToBE == nil { + ls.suffixToBE = map[suffix]bePair{} + } + if ls.beToSuffix == nil { + ls.beToSuffix = map[bePair]suffix{} + } + if ls.beToSuffixBytes == nil { + ls.beToSuffixBytes = map[bePair][]byte{} + } + ls.suffixToBE[s] = pair + ls.beToSuffix[pair] = s + ls.beToSuffixBytes[pair] = []byte(s) +} + +func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) { + pair, ok := ls.suffixToBE[s] + if !ok { + return 0, 0, false + } + return pair.base, pair.exponent, true +} + +func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) { + s, ok = ls.beToSuffix[bePair{base, exponent}] + return +} + +func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) { + s, ok = ls.beToSuffixBytes[bePair{base, exponent}] + return +} + +type suffixHandler struct { + decSuffixes listSuffixer + binSuffixes listSuffixer +} + +type fastLookup struct { + *suffixHandler +} + +func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) { + switch s { + case "": + return 10, 0, DecimalSI, true + case "n": + return 10, -9, DecimalSI, true + case "u": + return 10, -6, DecimalSI, true + case "m": + return 10, -3, DecimalSI, true + case "k": + return 10, 3, DecimalSI, true + case "M": + return 10, 6, DecimalSI, true + case "G": + return 10, 9, DecimalSI, true + } + return l.suffixHandler.interpret(s) +} + +func newSuffixer() suffixer { + sh := &suffixHandler{} + + // IMPORTANT: if you change this section you must change fastLookup + + sh.binSuffixes.addSuffix("Ki", bePair{2, 10}) + sh.binSuffixes.addSuffix("Mi", bePair{2, 20}) + sh.binSuffixes.addSuffix("Gi", bePair{2, 30}) + sh.binSuffixes.addSuffix("Ti", bePair{2, 40}) + sh.binSuffixes.addSuffix("Pi", bePair{2, 50}) + sh.binSuffixes.addSuffix("Ei", bePair{2, 60}) + // Don't emit an error when trying to produce + // a suffix for 2^0. + sh.decSuffixes.addSuffix("", bePair{2, 0}) + + sh.decSuffixes.addSuffix("n", bePair{10, -9}) + sh.decSuffixes.addSuffix("u", bePair{10, -6}) + sh.decSuffixes.addSuffix("m", bePair{10, -3}) + sh.decSuffixes.addSuffix("", bePair{10, 0}) + sh.decSuffixes.addSuffix("k", bePair{10, 3}) + sh.decSuffixes.addSuffix("M", bePair{10, 6}) + sh.decSuffixes.addSuffix("G", bePair{10, 9}) + sh.decSuffixes.addSuffix("T", bePair{10, 12}) + sh.decSuffixes.addSuffix("P", bePair{10, 15}) + sh.decSuffixes.addSuffix("E", bePair{10, 18}) + + return fastLookup{sh} +} + +func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) { + switch fmt { + case DecimalSI: + return sh.decSuffixes.construct(base, exponent) + case BinarySI: + return sh.binSuffixes.construct(base, exponent) + case DecimalExponent: + if base != 10 { + return "", false + } + if exponent == 0 { + return "", true + } + return suffix("e" + strconv.FormatInt(int64(exponent), 10)), true + } + return "", false +} + +func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) { + switch format { + case DecimalSI: + return sh.decSuffixes.constructBytes(base, exponent) + case BinarySI: + return sh.binSuffixes.constructBytes(base, exponent) + case DecimalExponent: + if base != 10 { + return nil, false + } + if exponent == 0 { + return nil, true + } + result := make([]byte, 8, 8) + result[0] = 'e' + number := strconv.AppendInt(result[1:1], int64(exponent), 10) + if &result[1] == &number[0] { + return result[:1+len(number)], true + } + result = append(result[:1], number...) + return result, true + } + return nil, false +} + +func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) { + // Try lookup tables first + if b, e, ok := sh.decSuffixes.lookup(suffix); ok { + return b, e, DecimalSI, true + } + if b, e, ok := sh.binSuffixes.lookup(suffix); ok { + return b, e, BinarySI, true + } + + if len(suffix) > 1 && (suffix[0] == 'E' || suffix[0] == 'e') { + parsed, err := strconv.ParseInt(string(suffix[1:]), 10, 64) + if err != nil { + return 0, 0, DecimalExponent, false + } + return 10, int32(parsed), DecimalExponent, true + } + + return 0, 0, DecimalExponent, false +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go new file mode 100644 index 0000000..ab47407 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/zz_generated.deepcopy.go @@ -0,0 +1,27 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package resource + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Quantity) DeepCopyInto(out *Quantity) { + *out = in.DeepCopy() + return +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS new file mode 100644 index 0000000..77cfb0c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -0,0 +1,32 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- wojtek-t +- deads2k +- brendandburns +- caesarxuchao +- liggitt +- nikhiljindal +- gmarek +- erictune +- davidopp +- sttts +- quinton-hoole +- luxas +- janetkuo +- justinsb +- ncdc +- soltysh +- dims +- madhusudancs +- hongchaodeng +- krousey +- mml +- mbohlool +- david-mcmahon +- therc +- mqliang +- kevin-wangzefeng +- jianhuiz diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go new file mode 100644 index 0000000..15b45ff --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// IsControlledBy checks if the object has a controllerRef set to the given owner +func IsControlledBy(obj Object, owner Object) bool { + ref := GetControllerOfNoCopy(obj) + if ref == nil { + return false + } + return ref.UID == owner.GetUID() +} + +// GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller +func GetControllerOf(controllee Object) *OwnerReference { + ref := GetControllerOfNoCopy(controllee) + if ref == nil { + return nil + } + cp := *ref + return &cp +} + +// GetControllerOf returns a pointer to the controllerRef if controllee has a controller +func GetControllerOfNoCopy(controllee Object) *OwnerReference { + refs := controllee.GetOwnerReferences() + for i := range refs { + if refs[i].Controller != nil && *refs[i].Controller { + return &refs[i] + } + } + return nil +} + +// NewControllerRef creates an OwnerReference pointing to the given owner. +func NewControllerRef(owner Object, gvk schema.GroupVersionKind) *OwnerReference { + blockOwnerDeletion := true + isController := true + return &OwnerReference{ + APIVersion: gvk.GroupVersion().String(), + Kind: gvk.Kind, + Name: owner.GetName(), + UID: owner.GetUID(), + BlockOwnerDeletion: &blockOwnerDeletion, + Controller: &isController, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go new file mode 100644 index 0000000..b937398 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -0,0 +1,347 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + "net/url" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/api/resource" + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/intstr" +) + +func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = float64(**in) + return nil +} + +func Convert_float64_To_Pointer_float64(in *float64, out **float64, s conversion.Scope) error { + temp := float64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int32_To_int32(in **int32, out *int32, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int32(**in) + return nil +} + +func Convert_int32_To_Pointer_int32(in *int32, out **int32, s conversion.Scope) error { + temp := int32(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int64(in **int64, out *int64, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int64(**in) + return nil +} + +func Convert_int64_To_Pointer_int64(in *int64, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_int64_To_int(in **int64, out *int, s conversion.Scope) error { + if *in == nil { + *out = 0 + return nil + } + *out = int(**in) + return nil +} + +func Convert_int_To_Pointer_int64(in *int, out **int64, s conversion.Scope) error { + temp := int64(*in) + *out = &temp + return nil +} + +func Convert_Pointer_string_To_string(in **string, out *string, s conversion.Scope) error { + if *in == nil { + *out = "" + return nil + } + *out = **in + return nil +} + +func Convert_string_To_Pointer_string(in *string, out **string, s conversion.Scope) error { + if in == nil { + stringVar := "" + *out = &stringVar + return nil + } + *out = in + return nil +} + +func Convert_Pointer_bool_To_bool(in **bool, out *bool, s conversion.Scope) error { + if *in == nil { + *out = false + return nil + } + *out = **in + return nil +} + +func Convert_bool_To_Pointer_bool(in *bool, out **bool, s conversion.Scope) error { + if in == nil { + boolVar := false + *out = &boolVar + return nil + } + *out = in + return nil +} + +// +k8s:conversion-fn=drop +func Convert_v1_TypeMeta_To_v1_TypeMeta(in, out *TypeMeta, s conversion.Scope) error { + // These values are explicitly not copied + //out.APIVersion = in.APIVersion + //out.Kind = in.Kind + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error { + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { + *out = *in + return nil +} + +func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error { + if *in == nil { + *out = intstr.IntOrString{} // zero value + return nil + } + *out = **in // copy + return nil +} + +func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error { + temp := *in // copy + *out = &temp + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_v1_MicroTime_To_v1_MicroTime(in *MicroTime, out *MicroTime, s conversion.Scope) error { + // Cannot deep copy these, because time.Time has unexported fields. + *out = *in + return nil +} + +func Convert_Pointer_v1_Duration_To_v1_Duration(in **Duration, out *Duration, s conversion.Scope) error { + if *in == nil { + *out = Duration{} // zero duration + return nil + } + *out = **in // copy + return nil +} + +func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s conversion.Scope) error { + temp := *in //copy + *out = &temp + return nil +} + +// Convert_Slice_string_To_v1_Time allows converting a URL query parameter value +func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error { + str := "" + if len(*in) > 0 { + str = (*in)[0] + } + return out.UnmarshalQueryParameter(str) +} + +func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error { + if in == nil { + return nil + } + str := "" + if len(*in) > 0 { + str = (*in)[0] + } + temp := Time{} + if err := temp.UnmarshalQueryParameter(str); err != nil { + return err + } + *out = &temp + return nil +} + +func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { + selector, err := labels.Parse(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_string_To_fields_Selector(in *string, out *fields.Selector, s conversion.Scope) error { + selector, err := fields.ParseSelector(*in) + if err != nil { + return err + } + *out = selector + return nil +} + +func Convert_labels_Selector_To_string(in *labels.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +func Convert_fields_Selector_To_string(in *fields.Selector, out *string, s conversion.Scope) error { + if *in == nil { + return nil + } + *out = (*in).String() + return nil +} + +// +k8s:conversion-fn=copy-only +func Convert_resource_Quantity_To_resource_Quantity(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error { + *out = *in + return nil +} + +func Convert_Map_string_To_string_To_v1_LabelSelector(in *map[string]string, out *LabelSelector, s conversion.Scope) error { + if in == nil { + return nil + } + for labelKey, labelValue := range *in { + AddLabelToSelector(out, labelKey, labelValue) + } + return nil +} + +func Convert_v1_LabelSelector_To_Map_string_To_string(in *LabelSelector, out *map[string]string, s conversion.Scope) error { + var err error + *out, err = LabelSelectorAsMap(in) + return err +} + +// Convert_Slice_string_To_Slice_int32 converts multiple query parameters or +// a single query parameter with a comma delimited value to multiple int32. +// This is used for port forwarding which needs the ports as int32. +func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversion.Scope) error { + for _, s := range *in { + for _, v := range strings.Split(s, ",") { + x, err := strconv.ParseUint(v, 10, 16) + if err != nil { + return fmt.Errorf("cannot convert to []int32: %v", err) + } + *out = append(*out, int32(x)) + } + } + return nil +} + +// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error { + var str string + if len(*in) > 0 { + str = (*in)[0] + } else { + str = "" + } + temp := DeletionPropagation(str) + *out = &temp + return nil +} + +// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*in) > 0 { + *out = IncludeObjectPolicy((*in)[0]) + } + return nil +} + +// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions. +func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil { + return err + } + + uid := types.UID("") + if values, ok := (*in)["uid"]; ok && len(values) > 0 { + uid = types.UID(values[0]) + } + + resourceVersion := "" + if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 { + resourceVersion = values[0] + } + + if len(uid) > 0 || len(resourceVersion) > 0 { + if out.Preconditions == nil { + out.Preconditions = &Preconditions{} + } + if len(uid) > 0 { + out.Preconditions.UID = &uid + } + if len(resourceVersion) > 0 { + out.Preconditions.ResourceVersion = &resourceVersion + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go new file mode 100644 index 0000000..8751d05 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go @@ -0,0 +1,46 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +func (in *TableRow) DeepCopy() *TableRow { + if in == nil { + return nil + } + + out := new(TableRow) + + if in.Cells != nil { + out.Cells = make([]interface{}, len(in.Cells)) + for i := range in.Cells { + out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) + } + } + + if in.Conditions != nil { + out.Conditions = make([]TableRowCondition, len(in.Conditions)) + for i := range in.Conditions { + in.Conditions[i].DeepCopyInto(&out.Conditions[i]) + } + } + + in.Object.DeepCopyInto(&out.Object) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go new file mode 100644 index 0000000..7736753 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:conversion-gen=false +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io + +package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go new file mode 100644 index 0000000..a22b078 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -0,0 +1,65 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" +) + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +type Duration struct { + time.Duration `protobuf:"varint,1,opt,name=duration,casttype=time.Duration"` +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (d *Duration) UnmarshalJSON(b []byte) error { + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pd, err := time.ParseDuration(str) + if err != nil { + return err + } + d.Duration = pd + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.Duration.String()) +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (d Duration) ToUnstructured() interface{} { + return d.Duration.String() +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go new file mode 100644 index 0000000..3288c56 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -0,0 +1,11087 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + +package v1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + runtime "k8s.io/apimachinery/pkg/runtime" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + time "time" + + k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf +var _ = time.Kitchen + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{0} +} +func (m *APIGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroup.Merge(m, src) +} +func (m *APIGroup) XXX_Size() int { + return m.Size() +} +func (m *APIGroup) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroup proto.InternalMessageInfo + +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{1} +} +func (m *APIGroupList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIGroupList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIGroupList.Merge(m, src) +} +func (m *APIGroupList) XXX_Size() int { + return m.Size() +} +func (m *APIGroupList) XXX_DiscardUnknown() { + xxx_messageInfo_APIGroupList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIGroupList proto.InternalMessageInfo + +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{2} +} +func (m *APIResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResource.Merge(m, src) +} +func (m *APIResource) XXX_Size() int { + return m.Size() +} +func (m *APIResource) XXX_DiscardUnknown() { + xxx_messageInfo_APIResource.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResource proto.InternalMessageInfo + +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{3} +} +func (m *APIResourceList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIResourceList) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIResourceList.Merge(m, src) +} +func (m *APIResourceList) XXX_Size() int { + return m.Size() +} +func (m *APIResourceList) XXX_DiscardUnknown() { + xxx_messageInfo_APIResourceList.DiscardUnknown(m) +} + +var xxx_messageInfo_APIResourceList proto.InternalMessageInfo + +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{4} +} +func (m *APIVersions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *APIVersions) XXX_Merge(src proto.Message) { + xxx_messageInfo_APIVersions.Merge(m, src) +} +func (m *APIVersions) XXX_Size() int { + return m.Size() +} +func (m *APIVersions) XXX_DiscardUnknown() { + xxx_messageInfo_APIVersions.DiscardUnknown(m) +} + +var xxx_messageInfo_APIVersions proto.InternalMessageInfo + +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{5} +} +func (m *CreateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *CreateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateOptions.Merge(m, src) +} +func (m *CreateOptions) XXX_Size() int { + return m.Size() +} +func (m *CreateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_CreateOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateOptions proto.InternalMessageInfo + +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{6} +} +func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DeleteOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteOptions.Merge(m, src) +} +func (m *DeleteOptions) XXX_Size() int { + return m.Size() +} +func (m *DeleteOptions) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{7} +} +func (m *Duration) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Duration) XXX_Merge(src proto.Message) { + xxx_messageInfo_Duration.Merge(m, src) +} +func (m *Duration) XXX_Size() int { + return m.Size() +} +func (m *Duration) XXX_DiscardUnknown() { + xxx_messageInfo_Duration.DiscardUnknown(m) +} + +var xxx_messageInfo_Duration proto.InternalMessageInfo + +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{8} +} +func (m *ExportOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ExportOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ExportOptions.Merge(m, src) +} +func (m *ExportOptions) XXX_Size() int { + return m.Size() +} +func (m *ExportOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ExportOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ExportOptions proto.InternalMessageInfo + +func (m *FieldsV1) Reset() { *m = FieldsV1{} } +func (*FieldsV1) ProtoMessage() {} +func (*FieldsV1) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{9} +} +func (m *FieldsV1) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *FieldsV1) XXX_Merge(src proto.Message) { + xxx_messageInfo_FieldsV1.Merge(m, src) +} +func (m *FieldsV1) XXX_Size() int { + return m.Size() +} +func (m *FieldsV1) XXX_DiscardUnknown() { + xxx_messageInfo_FieldsV1.DiscardUnknown(m) +} + +var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo + +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{10} +} +func (m *GetOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GetOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetOptions.Merge(m, src) +} +func (m *GetOptions) XXX_Size() int { + return m.Size() +} +func (m *GetOptions) XXX_DiscardUnknown() { + xxx_messageInfo_GetOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_GetOptions proto.InternalMessageInfo + +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{11} +} +func (m *GroupKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupKind.Merge(m, src) +} +func (m *GroupKind) XXX_Size() int { + return m.Size() +} +func (m *GroupKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupKind.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupKind proto.InternalMessageInfo + +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{12} +} +func (m *GroupResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupResource.Merge(m, src) +} +func (m *GroupResource) XXX_Size() int { + return m.Size() +} +func (m *GroupResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupResource proto.InternalMessageInfo + +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{13} +} +func (m *GroupVersion) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersion) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersion.Merge(m, src) +} +func (m *GroupVersion) XXX_Size() int { + return m.Size() +} +func (m *GroupVersion) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersion.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersion proto.InternalMessageInfo + +func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } +func (*GroupVersionForDiscovery) ProtoMessage() {} +func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{14} +} +func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) +} +func (m *GroupVersionForDiscovery) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo + +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{15} +} +func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionKind) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionKind.Merge(m, src) +} +func (m *GroupVersionKind) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionKind) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo + +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{16} +} +func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *GroupVersionResource) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupVersionResource.Merge(m, src) +} +func (m *GroupVersionResource) XXX_Size() int { + return m.Size() +} +func (m *GroupVersionResource) XXX_DiscardUnknown() { + xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo + +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{17} +} +func (m *LabelSelector) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelector) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelector.Merge(m, src) +} +func (m *LabelSelector) XXX_Size() int { + return m.Size() +} +func (m *LabelSelector) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelector.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelector proto.InternalMessageInfo + +func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } +func (*LabelSelectorRequirement) ProtoMessage() {} +func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{18} +} +func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { + xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) +} +func (m *LabelSelectorRequirement) XXX_Size() int { + return m.Size() +} +func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { + xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) +} + +var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo + +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{19} +} +func (m *List) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *List) XXX_Merge(src proto.Message) { + xxx_messageInfo_List.Merge(m, src) +} +func (m *List) XXX_Size() int { + return m.Size() +} +func (m *List) XXX_DiscardUnknown() { + xxx_messageInfo_List.DiscardUnknown(m) +} + +var xxx_messageInfo_List proto.InternalMessageInfo + +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{20} +} +func (m *ListMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListMeta.Merge(m, src) +} +func (m *ListMeta) XXX_Size() int { + return m.Size() +} +func (m *ListMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ListMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ListMeta proto.InternalMessageInfo + +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{21} +} +func (m *ListOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ListOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ListOptions.Merge(m, src) +} +func (m *ListOptions) XXX_Size() int { + return m.Size() +} +func (m *ListOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ListOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ListOptions proto.InternalMessageInfo + +func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } +func (*ManagedFieldsEntry) ProtoMessage() {} +func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{22} +} +func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { + xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) +} +func (m *ManagedFieldsEntry) XXX_Size() int { + return m.Size() +} +func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { + xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) +} + +var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo + +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{23} +} +func (m *MicroTime) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_MicroTime.Unmarshal(m, b) +} +func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) +} +func (m *MicroTime) XXX_Merge(src proto.Message) { + xxx_messageInfo_MicroTime.Merge(m, src) +} +func (m *MicroTime) XXX_Size() int { + return xxx_messageInfo_MicroTime.Size(m) +} +func (m *MicroTime) XXX_DiscardUnknown() { + xxx_messageInfo_MicroTime.DiscardUnknown(m) +} + +var xxx_messageInfo_MicroTime proto.InternalMessageInfo + +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{24} +} +func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ObjectMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_ObjectMeta.Merge(m, src) +} +func (m *ObjectMeta) XXX_Size() int { + return m.Size() +} +func (m *ObjectMeta) XXX_DiscardUnknown() { + xxx_messageInfo_ObjectMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo + +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{25} +} +func (m *OwnerReference) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *OwnerReference) XXX_Merge(src proto.Message) { + xxx_messageInfo_OwnerReference.Merge(m, src) +} +func (m *OwnerReference) XXX_Size() int { + return m.Size() +} +func (m *OwnerReference) XXX_DiscardUnknown() { + xxx_messageInfo_OwnerReference.DiscardUnknown(m) +} + +var xxx_messageInfo_OwnerReference proto.InternalMessageInfo + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{26} +} +func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadata.Merge(m, src) +} +func (m *PartialObjectMetadata) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadata) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{27} +} +func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) +} +func (m *PartialObjectMetadataList) XXX_Size() int { + return m.Size() +} +func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { + xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) +} + +var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo + +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{28} +} +func (m *Patch) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Patch) XXX_Merge(src proto.Message) { + xxx_messageInfo_Patch.Merge(m, src) +} +func (m *Patch) XXX_Size() int { + return m.Size() +} +func (m *Patch) XXX_DiscardUnknown() { + xxx_messageInfo_Patch.DiscardUnknown(m) +} + +var xxx_messageInfo_Patch proto.InternalMessageInfo + +func (m *PatchOptions) Reset() { *m = PatchOptions{} } +func (*PatchOptions) ProtoMessage() {} +func (*PatchOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{29} +} +func (m *PatchOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PatchOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_PatchOptions.Merge(m, src) +} +func (m *PatchOptions) XXX_Size() int { + return m.Size() +} +func (m *PatchOptions) XXX_DiscardUnknown() { + xxx_messageInfo_PatchOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_PatchOptions proto.InternalMessageInfo + +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{30} +} +func (m *Preconditions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Preconditions) XXX_Merge(src proto.Message) { + xxx_messageInfo_Preconditions.Merge(m, src) +} +func (m *Preconditions) XXX_Size() int { + return m.Size() +} +func (m *Preconditions) XXX_DiscardUnknown() { + xxx_messageInfo_Preconditions.DiscardUnknown(m) +} + +var xxx_messageInfo_Preconditions proto.InternalMessageInfo + +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{31} +} +func (m *RootPaths) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RootPaths) XXX_Merge(src proto.Message) { + xxx_messageInfo_RootPaths.Merge(m, src) +} +func (m *RootPaths) XXX_Size() int { + return m.Size() +} +func (m *RootPaths) XXX_DiscardUnknown() { + xxx_messageInfo_RootPaths.DiscardUnknown(m) +} + +var xxx_messageInfo_RootPaths proto.InternalMessageInfo + +func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } +func (*ServerAddressByClientCIDR) ProtoMessage() {} +func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{32} +} +func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) +} +func (m *ServerAddressByClientCIDR) XXX_Size() int { + return m.Size() +} +func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { + xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) +} + +var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo + +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{33} +} +func (m *Status) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Status) XXX_Merge(src proto.Message) { + xxx_messageInfo_Status.Merge(m, src) +} +func (m *Status) XXX_Size() int { + return m.Size() +} +func (m *Status) XXX_DiscardUnknown() { + xxx_messageInfo_Status.DiscardUnknown(m) +} + +var xxx_messageInfo_Status proto.InternalMessageInfo + +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{34} +} +func (m *StatusCause) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusCause) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusCause.Merge(m, src) +} +func (m *StatusCause) XXX_Size() int { + return m.Size() +} +func (m *StatusCause) XXX_DiscardUnknown() { + xxx_messageInfo_StatusCause.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusCause proto.InternalMessageInfo + +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{35} +} +func (m *StatusDetails) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *StatusDetails) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatusDetails.Merge(m, src) +} +func (m *StatusDetails) XXX_Size() int { + return m.Size() +} +func (m *StatusDetails) XXX_DiscardUnknown() { + xxx_messageInfo_StatusDetails.DiscardUnknown(m) +} + +var xxx_messageInfo_StatusDetails proto.InternalMessageInfo + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{36} +} +func (m *TableOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TableOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_TableOptions.Merge(m, src) +} +func (m *TableOptions) XXX_Size() int { + return m.Size() +} +func (m *TableOptions) XXX_DiscardUnknown() { + xxx_messageInfo_TableOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_TableOptions proto.InternalMessageInfo + +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{37} +} +func (m *Time) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Time.Unmarshal(m, b) +} +func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Time.Marshal(b, m, deterministic) +} +func (m *Time) XXX_Merge(src proto.Message) { + xxx_messageInfo_Time.Merge(m, src) +} +func (m *Time) XXX_Size() int { + return xxx_messageInfo_Time.Size(m) +} +func (m *Time) XXX_DiscardUnknown() { + xxx_messageInfo_Time.DiscardUnknown(m) +} + +var xxx_messageInfo_Time proto.InternalMessageInfo + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{38} +} +func (m *Timestamp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Timestamp) XXX_Merge(src proto.Message) { + xxx_messageInfo_Timestamp.Merge(m, src) +} +func (m *Timestamp) XXX_Size() int { + return m.Size() +} +func (m *Timestamp) XXX_DiscardUnknown() { + xxx_messageInfo_Timestamp.DiscardUnknown(m) +} + +var xxx_messageInfo_Timestamp proto.InternalMessageInfo + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{39} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{40} +} +func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpdateOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpdateOptions.Merge(m, src) +} +func (m *UpdateOptions) XXX_Size() int { + return m.Size() +} +func (m *UpdateOptions) XXX_DiscardUnknown() { + xxx_messageInfo_UpdateOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo + +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{41} +} +func (m *Verbs) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Verbs) XXX_Merge(src proto.Message) { + xxx_messageInfo_Verbs.Merge(m, src) +} +func (m *Verbs) XXX_Size() int { + return m.Size() +} +func (m *Verbs) XXX_DiscardUnknown() { + xxx_messageInfo_Verbs.DiscardUnknown(m) +} + +var xxx_messageInfo_Verbs proto.InternalMessageInfo + +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { + return fileDescriptor_cf52fa777ced5367, []int{42} +} +func (m *WatchEvent) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *WatchEvent) XXX_Merge(src proto.Message) { + xxx_messageInfo_WatchEvent.Merge(m, src) +} +func (m *WatchEvent) XXX_Size() int { + return m.Size() +} +func (m *WatchEvent) XXX_DiscardUnknown() { + xxx_messageInfo_WatchEvent.DiscardUnknown(m) +} + +var xxx_messageInfo_WatchEvent proto.InternalMessageInfo + +func init() { + proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") + proto.RegisterType((*APIGroupList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroupList") + proto.RegisterType((*APIResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResource") + proto.RegisterType((*APIResourceList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIResourceList") + proto.RegisterType((*APIVersions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIVersions") + proto.RegisterType((*CreateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.CreateOptions") + proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") + proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") + proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") + proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") + proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") + proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") + proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") + proto.RegisterType((*GroupVersion)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersion") + proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") + proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") + proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") + proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") + proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") + proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") + proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") + proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") + proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") + proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") + proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") + proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") + proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") + proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") + proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") + proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") + proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") + proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") + proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") + proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") + proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") + proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") + proto.RegisterType((*UpdateOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.UpdateOptions") + proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") + proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) +} + +var fileDescriptor_cf52fa777ced5367 = []byte{ + // 2713 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, + 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, + 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, + 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, + 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, + 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, + 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, + 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, + 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, + 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, + 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, + 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, + 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, + 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, + 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, + 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, + 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, + 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, + 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, + 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, + 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, + 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, + 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, + 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, + 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, + 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, + 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, + 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, + 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, + 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, + 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, + 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, + 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, + 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, + 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, + 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, + 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, + 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, + 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, + 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, + 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, + 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, + 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, + 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, + 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, + 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, + 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, + 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, + 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, + 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, + 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, + 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, + 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, + 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, + 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, + 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, + 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, + 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, + 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, + 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, + 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, + 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, + 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, + 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, + 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, + 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, + 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, + 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, + 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, + 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, + 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, + 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, + 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, + 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, + 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, + 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, + 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, + 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, + 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, + 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, + 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, + 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, + 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, + 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, + 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, + 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, + 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, + 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, + 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, + 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, + 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, + 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, + 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, + 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, + 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, + 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, + 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, + 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, + 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, + 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, + 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, + 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, + 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, + 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, + 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, + 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, + 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, + 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, + 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, + 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, + 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, + 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, + 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, + 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, + 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, + 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, + 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, + 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, + 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, + 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, + 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, + 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, + 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, + 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, + 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, + 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, + 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, + 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, + 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, + 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, + 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, + 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, + 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, + 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, + 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, + 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, + 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, + 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, + 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, + 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, + 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, + 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, + 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, + 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, + 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, + 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, + 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, + 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, + 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, + 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, + 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, + 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, + 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, + 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, + 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, + 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, + 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, + 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, + 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, + 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, + 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, + 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, + 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, + 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, + 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, + 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, + 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, + 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, + 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, +} + +func (m *APIGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIGroupList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *APIResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StorageVersionHash) + copy(dAtA[i:], m.StorageVersionHash) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) + i-- + dAtA[i] = 0x52 + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x4a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x42 + if len(m.Categories) > 0 { + for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Categories[iNdEx]) + copy(dAtA[i:], m.Categories[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) + i-- + dAtA[i] = 0x3a + } + } + i -= len(m.SingularName) + copy(dAtA[i:], m.SingularName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i-- + dAtA[i] = 0x32 + if len(m.ShortNames) > 0 { + for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.ShortNames[iNdEx]) + copy(dAtA[i:], m.ShortNames[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.Verbs != nil { + { + size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i-- + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIResourceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.APIResources) > 0 { + for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *APIVersions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ServerAddressByClientCIDRs) > 0 { + for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Versions) > 0 { + for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Versions[iNdEx]) + copy(dAtA[i:], m.Versions[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *CreateOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if m.PropagationPolicy != nil { + i -= len(*m.PropagationPolicy) + copy(dAtA[i:], *m.PropagationPolicy) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i-- + dAtA[i] = 0x22 + } + if m.OrphanDependents != nil { + i-- + if *m.OrphanDependents { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if m.Preconditions != nil { + { + size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.GracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *ExportOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.Exact { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + i-- + if m.Export { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *FieldsV1) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *GetOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.GroupVersion) + copy(dAtA[i:], m.GroupVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Resource) + copy(dAtA[i:], m.Resource) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i-- + dAtA[i] = 0x1a + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x12 + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LabelSelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MatchExpressions) > 0 { + for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.MatchLabels) > 0 { + keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) + for k := range m.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForMatchLabels[iNdEx]) + copy(dAtA[i:], keysForMatchLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Values[iNdEx]) + copy(dAtA[i:], m.Values[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + i -= len(m.Operator) + copy(dAtA[i:], m.Operator) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i-- + dAtA[i] = 0x12 + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *List) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ListMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.RemainingItemCount != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) + i-- + dAtA[i] = 0x20 + } + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x1a + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ListOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i-- + if m.AllowWatchBookmarks { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x48 + i -= len(m.Continue) + copy(dAtA[i:], m.Continue) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + i-- + dAtA[i] = 0x38 + if m.TimeoutSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + i-- + dAtA[i] = 0x28 + } + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x22 + i-- + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + i -= len(m.FieldSelector) + copy(dAtA[i:], m.FieldSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i-- + dAtA[i] = 0x12 + i -= len(m.LabelSelector) + copy(dAtA[i:], m.LabelSelector) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.FieldsV1 != nil { + { + size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + i -= len(m.FieldsType) + copy(dAtA[i:], m.FieldsType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) + i-- + dAtA[i] = 0x32 + if m.Time != nil { + { + size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x1a + i -= len(m.Operation) + copy(dAtA[i:], m.Operation) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) + i-- + dAtA[i] = 0x12 + i -= len(m.Manager) + copy(dAtA[i:], m.Manager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ManagedFields) > 0 { + for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x8a + } + } + i -= len(m.ClusterName) + copy(dAtA[i:], m.ClusterName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i-- + dAtA[i] = 0x7a + if len(m.Finalizers) > 0 { + for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Finalizers[iNdEx]) + copy(dAtA[i:], m.Finalizers[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) + i-- + dAtA[i] = 0x72 + } + } + if len(m.OwnerReferences) > 0 { + for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { + v := m.Annotations[string(keysForAnnotations[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForAnnotations[iNdEx]) + copy(dAtA[i:], keysForAnnotations[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x62 + } + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { + v := m.Labels[string(keysForLabels[iNdEx])] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(keysForLabels[iNdEx]) + copy(dAtA[i:], keysForLabels[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x5a + } + } + if m.DeletionGracePeriodSeconds != nil { + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + i-- + dAtA[i] = 0x50 + } + if m.DeletionTimestamp != nil { + { + size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + { + size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + i-- + dAtA[i] = 0x38 + i -= len(m.ResourceVersion) + copy(dAtA[i:], m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i-- + dAtA[i] = 0x32 + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x2a + i -= len(m.SelfLink) + copy(dAtA[i:], m.SelfLink) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i-- + dAtA[i] = 0x22 + i -= len(m.Namespace) + copy(dAtA[i:], m.Namespace) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i-- + dAtA[i] = 0x1a + i -= len(m.GenerateName) + copy(dAtA[i:], m.GenerateName) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.BlockOwnerDeletion != nil { + i-- + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x38 + } + if m.Controller != nil { + i-- + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x2a + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x22 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x1a + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Patch) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *PatchOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x1a + if m.Force != nil { + i-- + if *m.Force { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x10 + } + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *Preconditions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ResourceVersion != nil { + i -= len(*m.ResourceVersion) + copy(dAtA[i:], *m.ResourceVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) + i-- + dAtA[i] = 0x12 + } + if m.UID != nil { + i -= len(*m.UID) + copy(dAtA[i:], *m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RootPaths) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Paths[iNdEx]) + copy(dAtA[i:], m.Paths[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ServerAddress) + copy(dAtA[i:], m.ServerAddress) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) + i-- + dAtA[i] = 0x12 + i -= len(m.ClientCIDR) + copy(dAtA[i:], m.ClientCIDR) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + i-- + dAtA[i] = 0x30 + if m.Details != nil { + { + size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0x22 + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x1a + i -= len(m.Status) + copy(dAtA[i:], m.Status) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i-- + dAtA[i] = 0x12 + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatusCause) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Field) + copy(dAtA[i:], m.Field) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) + i-- + dAtA[i] = 0x1a + i -= len(m.Message) + copy(dAtA[i:], m.Message) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *StatusDetails) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.UID) + copy(dAtA[i:], m.UID) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i-- + dAtA[i] = 0x32 + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + i-- + dAtA[i] = 0x28 + if len(m.Causes) > 0 { + for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x1a + i -= len(m.Group) + copy(dAtA[i:], m.Group) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i-- + dAtA[i] = 0x12 + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.IncludeObject) + copy(dAtA[i:], m.IncludeObject) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0x12 + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.FieldManager) + copy(dAtA[i:], m.FieldManager) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) + i-- + dAtA[i] = 0x12 + if len(m.DryRun) > 0 { + for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.DryRun[iNdEx]) + copy(dAtA[i:], m.DryRun[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m Verbs) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m Verbs) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m) > 0 { + for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m[iNdEx]) + copy(dAtA[i:], m[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *WatchEvent) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(m.Type) + copy(dAtA[i:], m.Type) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *APIGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Versions) > 0 { + for _, e := range m.Versions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.PreferredVersion.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIGroupList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if m.Verbs != nil { + l = m.Verbs.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.ShortNames) > 0 { + for _, s := range m.ShortNames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.SingularName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageVersionHash) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *APIResourceList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.APIResources) > 0 { + for _, e := range m.APIResources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *APIVersions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Versions) > 0 { + for _, s := range m.Versions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, e := range m.ServerAddressByClientCIDRs { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *CreateOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *DeleteOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.GracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.GracePeriodSeconds)) + } + if m.Preconditions != nil { + l = m.Preconditions.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.OrphanDependents != nil { + n += 2 + } + if m.PropagationPolicy != nil { + l = len(*m.PropagationPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Duration) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Duration)) + return n +} + +func (m *ExportOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 2 + n += 2 + return n +} + +func (m *FieldsV1) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *GetOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersion) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionForDiscovery) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.GroupVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionKind) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *GroupVersionResource) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Version) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LabelSelector) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MatchLabels) > 0 { + for k, v := range m.MatchLabels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *LabelSelectorRequirement) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *List) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ListMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) + if m.RemainingItemCount != nil { + n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) + } + return n +} + +func (m *ListOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.LabelSelector) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldSelector) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + } + n += 1 + sovGenerated(uint64(m.Limit)) + l = len(m.Continue) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n +} + +func (m *ManagedFieldsEntry) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Manager) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operation) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Time != nil { + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.FieldsType) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldsV1 != nil { + l = m.FieldsV1.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *ObjectMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.GenerateName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SelfLink) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Generation)) + l = m.CreationTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.DeletionTimestamp != nil { + l = m.DeletionTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.DeletionGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + for k, v := range m.Labels { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Annotations) > 0 { + for k, v := range m.Annotations { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.OwnerReferences) > 0 { + for _, e := range m.OwnerReferences { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.ClusterName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ManagedFields) > 0 { + for _, e := range m.ManagedFields { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *OwnerReference) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + if m.Controller != nil { + n += 2 + } + if m.BlockOwnerDeletion != nil { + n += 2 + } + return n +} + +func (m *PartialObjectMetadata) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Patch) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *PatchOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Force != nil { + n += 2 + } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Preconditions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ResourceVersion != nil { + l = len(*m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *RootPaths) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *ServerAddressByClientCIDR) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ClientCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ServerAddress) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Status) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + if m.Details != nil { + l = m.Details.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.Code)) + return n +} + +func (m *StatusCause) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Field) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *StatusDetails) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Causes) > 0 { + for _, e := range m.Causes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + n += 1 + sovGenerated(uint64(m.RetryAfterSeconds)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *TableOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Timestamp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Seconds)) + n += 1 + sovGenerated(uint64(m.Nanos)) + return n +} + +func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *UpdateOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.FieldManager) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m Verbs) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m) > 0 { + for _, s := range m { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *WatchEvent) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Object.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *APIGroup) String() string { + if this == nil { + return "nil" + } + repeatedStringForVersions := "[]GroupVersionForDiscovery{" + for _, f := range this.Versions { + repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," + } + repeatedStringForVersions += "}" + repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" + for _, f := range this.ServerAddressByClientCIDRs { + repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," + } + repeatedStringForServerAddressByClientCIDRs += "}" + s := strings.Join([]string{`&APIGroup{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Versions:` + repeatedStringForVersions + `,`, + `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, + `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, + `}`, + }, "") + return s +} +func (this *APIGroupList) String() string { + if this == nil { + return "nil" + } + repeatedStringForGroups := "[]APIGroup{" + for _, f := range this.Groups { + repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," + } + repeatedStringForGroups += "}" + s := strings.Join([]string{`&APIGroupList{`, + `Groups:` + repeatedStringForGroups + `,`, + `}`, + }, "") + return s +} +func (this *APIResource) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&APIResource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespaced:` + fmt.Sprintf("%v", this.Namespaced) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Verbs:` + strings.Replace(fmt.Sprintf("%v", this.Verbs), "Verbs", "Verbs", 1) + `,`, + `ShortNames:` + fmt.Sprintf("%v", this.ShortNames) + `,`, + `SingularName:` + fmt.Sprintf("%v", this.SingularName) + `,`, + `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`, + `}`, + }, "") + return s +} +func (this *APIResourceList) String() string { + if this == nil { + return "nil" + } + repeatedStringForAPIResources := "[]APIResource{" + for _, f := range this.APIResources { + repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," + } + repeatedStringForAPIResources += "}" + s := strings.Join([]string{`&APIResourceList{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `APIResources:` + repeatedStringForAPIResources + `,`, + `}`, + }, "") + return s +} +func (this *CreateOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CreateOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `}`, + }, "") + return s +} +func (this *DeleteOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DeleteOptions{`, + `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, + `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, + `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, + `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `}`, + }, "") + return s +} +func (this *Duration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Duration{`, + `Duration:` + fmt.Sprintf("%v", this.Duration) + `,`, + `}`, + }, "") + return s +} +func (this *ExportOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ExportOptions{`, + `Export:` + fmt.Sprintf("%v", this.Export) + `,`, + `Exact:` + fmt.Sprintf("%v", this.Exact) + `,`, + `}`, + }, "") + return s +} +func (this *FieldsV1) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldsV1{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *GetOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *GroupVersionForDiscovery) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelector) String() string { + if this == nil { + return "nil" + } + repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" + for _, f := range this.MatchExpressions { + repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," + } + repeatedStringForMatchExpressions += "}" + keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) + for k := range this.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + mapStringForMatchLabels := "map[string]string{" + for _, k := range keysForMatchLabels { + mapStringForMatchLabels += fmt.Sprintf("%v: %v,", k, this.MatchLabels[k]) + } + mapStringForMatchLabels += "}" + s := strings.Join([]string{`&LabelSelector{`, + `MatchLabels:` + mapStringForMatchLabels + `,`, + `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `}`, + }, "") + return s +} +func (this *LabelSelectorRequirement) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&LabelSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]RawExtension{" + for _, f := range this.Items { + repeatedStringForItems += fmt.Sprintf("%v", f) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *ListMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListMeta{`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, + `}`, + }, "") + return s +} +func (this *ListOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListOptions{`, + `LabelSelector:` + fmt.Sprintf("%v", this.LabelSelector) + `,`, + `FieldSelector:` + fmt.Sprintf("%v", this.FieldSelector) + `,`, + `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, + `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, + `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, + `}`, + }, "") + return s +} +func (this *ManagedFieldsEntry) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ManagedFieldsEntry{`, + `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, + `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, + `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, + `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ObjectMeta) String() string { + if this == nil { + return "nil" + } + repeatedStringForOwnerReferences := "[]OwnerReference{" + for _, f := range this.OwnerReferences { + repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," + } + repeatedStringForOwnerReferences += "}" + repeatedStringForManagedFields := "[]ManagedFieldsEntry{" + for _, f := range this.ManagedFields { + repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," + } + repeatedStringForManagedFields += "}" + keysForLabels := make([]string, 0, len(this.Labels)) + for k := range this.Labels { + keysForLabels = append(keysForLabels, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + mapStringForLabels := "map[string]string{" + for _, k := range keysForLabels { + mapStringForLabels += fmt.Sprintf("%v: %v,", k, this.Labels[k]) + } + mapStringForLabels += "}" + keysForAnnotations := make([]string, 0, len(this.Annotations)) + for k := range this.Annotations { + keysForAnnotations = append(keysForAnnotations, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + mapStringForAnnotations := "map[string]string{" + for _, k := range keysForAnnotations { + mapStringForAnnotations += fmt.Sprintf("%v: %v,", k, this.Annotations[k]) + } + mapStringForAnnotations += "}" + s := strings.Join([]string{`&ObjectMeta{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `GenerateName:` + fmt.Sprintf("%v", this.GenerateName) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, + `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, + `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, + `Labels:` + mapStringForLabels + `,`, + `Annotations:` + mapStringForAnnotations + `,`, + `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, + `ManagedFields:` + repeatedStringForManagedFields + `,`, + `}`, + }, "") + return s +} +func (this *OwnerReference) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&OwnerReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Controller:` + valueToStringGenerated(this.Controller) + `,`, + `BlockOwnerDeletion:` + valueToStringGenerated(this.BlockOwnerDeletion) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PartialObjectMetadata{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *Patch) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Patch{`, + `}`, + }, "") + return s +} +func (this *PatchOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PatchOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `Force:` + valueToStringGenerated(this.Force) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`, + `}`, + }, "") + return s +} +func (this *RootPaths) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RootPaths{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func (this *ServerAddressByClientCIDR) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServerAddressByClientCIDR{`, + `ClientCIDR:` + fmt.Sprintf("%v", this.ClientCIDR) + `,`, + `ServerAddress:` + fmt.Sprintf("%v", this.ServerAddress) + `,`, + `}`, + }, "") + return s +} +func (this *Status) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Status{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, + `Code:` + fmt.Sprintf("%v", this.Code) + `,`, + `}`, + }, "") + return s +} +func (this *StatusCause) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StatusCause{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Field:` + fmt.Sprintf("%v", this.Field) + `,`, + `}`, + }, "") + return s +} +func (this *StatusDetails) String() string { + if this == nil { + return "nil" + } + repeatedStringForCauses := "[]StatusCause{" + for _, f := range this.Causes { + repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," + } + repeatedStringForCauses += "}" + s := strings.Join([]string{`&StatusDetails{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Causes:` + repeatedStringForCauses + `,`, + `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} +func (this *Timestamp) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Timestamp{`, + `Seconds:` + fmt.Sprintf("%v", this.Seconds) + `,`, + `Nanos:` + fmt.Sprintf("%v", this.Nanos) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `}`, + }, "") + return s +} +func (this *UpdateOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UpdateOptions{`, + `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, + `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `}`, + }, "") + return s +} +func (this *WatchEvent) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&WatchEvent{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *APIGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, GroupVersionForDiscovery{}) + if err := m.Versions[len(m.Versions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PreferredVersion", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.PreferredVersion.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIGroupList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIGroupList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIGroupList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, APIGroup{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespaced", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Namespaced = bool(v != 0) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Verbs == nil { + m.Verbs = Verbs{} + } + if err := m.Verbs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ShortNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ShortNames = append(m.ShortNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SingularName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SingularName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Categories", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Categories = append(m.Categories, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StorageVersionHash = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIResourceList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIResourceList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIResourceList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIResources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIResources = append(m.APIResources, APIResource{}) + if err := m.APIResources[len(m.APIResources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *APIVersions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: APIVersions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: APIVersions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Versions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Versions = append(m.Versions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddressByClientCIDRs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddressByClientCIDRs = append(m.ServerAddressByClientCIDRs, ServerAddressByClientCIDR{}) + if err := m.ServerAddressByClientCIDRs[len(m.ServerAddressByClientCIDRs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CreateOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CreateOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CreateOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeleteOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeleteOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeleteOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.GracePeriodSeconds = &v + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Preconditions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Preconditions == nil { + m.Preconditions = &Preconditions{} + } + if err := m.Preconditions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrphanDependents", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.OrphanDependents = &b + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PropagationPolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := DeletionPropagation(dAtA[iNdEx:postIndex]) + m.PropagationPolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Duration", wireType) + } + m.Duration = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Duration |= time.Duration(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ExportOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExportOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExportOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Export", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Export = bool(v != 0) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Exact", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Exact = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FieldsV1) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GetOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersion) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GroupVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resource = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.MatchLabels == nil { + m.MatchLabels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.MatchLabels[mapkey] = mapvalue + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MatchExpressions = append(m.MatchExpressions, LabelSelectorRequirement{}) + if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LabelSelectorRequirement: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LabelSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operator = LabelSelectorOperator(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *List) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: List: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, runtime.RawExtension{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RemainingItemCount = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.LabelSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldSelector", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldSelector = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Watch", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Watch = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.TimeoutSeconds = &v + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) + } + m.Limit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Limit |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Continue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.AllowWatchBookmarks = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Manager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Time == nil { + m.Time = &Time{} + } + if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldsType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldsV1 == nil { + m.FieldsV1 = &FieldsV1{} + } + if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ObjectMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ObjectMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ObjectMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GenerateName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.GenerateName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Namespace = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SelfLink", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.SelfLink = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Generation", wireType) + } + m.Generation = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Generation |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CreationTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.CreationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionTimestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.DeletionTimestamp == nil { + m.DeletionTimestamp = &Time{} + } + if err := m.DeletionTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DeletionGracePeriodSeconds", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.DeletionGracePeriodSeconds = &v + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Labels", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Labels == nil { + m.Labels = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Labels[mapkey] = mapvalue + iNdEx = postIndex + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Annotations[mapkey] = mapvalue + iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 17: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) + if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OwnerReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PatchOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Force = &b + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Preconditions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.UID = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.ResourceVersion = &s + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RootPaths) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RootPaths: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RootPaths: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServerAddressByClientCIDR: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientCIDR", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClientCIDR = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ServerAddress", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ServerAddress = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Status = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = StatusReason(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Details", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Details == nil { + m.Details = &StatusDetails{} + } + if err := m.Details.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusCause) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusCause: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusCause: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = CauseType(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Field", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Field = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StatusDetails) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StatusDetails: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StatusDetails: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Causes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Causes = append(m.Causes, StatusCause{}) + if err := m.Causes[len(m.Causes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetryAfterSeconds", wireType) + } + m.RetryAfterSeconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetryAfterSeconds |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpdateOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpdateOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpdateOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FieldManager = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Verbs) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Verbs: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Verbs: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + *m = append(*m, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WatchEvent) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WatchEvent: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WatchEvent: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto new file mode 100644 index 0000000..ba1194d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -0,0 +1,1024 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1; + +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1"; + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +message APIGroup { + // name is the name of the group. + optional string name = 1; + + // versions are the versions supported in this group. + repeated GroupVersionForDiscovery versions = 2; + + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + optional GroupVersionForDiscovery preferredVersion = 3; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 4; +} + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +message APIGroupList { + // groups is a list of APIGroup. + repeated APIGroup groups = 1; +} + +// APIResource specifies the name of a resource and whether it is namespaced. +message APIResource { + // name is the plural name of the resource. + optional string name = 1; + + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + optional string singularName = 6; + + // namespaced indicates if a resource is namespaced or not. + optional bool namespaced = 2; + + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + optional string group = 8; + + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + optional string version = 9; + + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + optional string kind = 3; + + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + optional Verbs verbs = 4; + + // shortNames is a list of suggested short names of the resource. + repeated string shortNames = 5; + + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + repeated string categories = 7; + + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + optional string storageVersionHash = 10; +} + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +message APIResourceList { + // groupVersion is the group and version this APIResourceList is for. + optional string groupVersion = 1; + + // resources contains the name of the resources and if they are namespaced. + repeated APIResource resources = 2; +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message APIVersions { + // versions are the api versions that are available. + repeated string versions = 1; + + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + repeated ServerAddressByClientCIDR serverAddressByClientCIDRs = 2; +} + +// CreateOptions may be provided when creating an API object. +message CreateOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + optional string fieldManager = 3; +} + +// DeleteOptions may be provided when deleting an API object. +message DeleteOptions { + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + optional int64 gracePeriodSeconds = 1; + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +k8s:conversion-gen=false + // +optional + optional Preconditions preconditions = 2; + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + optional bool orphanDependents = 3; + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + optional string propagationPolicy = 4; + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 5; +} + +// Duration is a wrapper around time.Duration which supports correct +// marshaling to YAML and JSON. In particular, it marshals into strings, which +// can be used as map keys in json. +message Duration { + optional int64 duration = 1; +} + +// ExportOptions is the query options to the standard REST get call. +// Deprecated. Planned for removal in 1.18. +message ExportOptions { + // Should this value be exported. Export strips fields that a user can not specify. + // Deprecated. Planned for removal in 1.18. + optional bool export = 1; + + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + // Deprecated. Planned for removal in 1.18. + optional bool exact = 2; +} + +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +message FieldsV1 { + // Raw is the underlying serialization of this object. + optional bytes Raw = 1; +} + +// GetOptions is the standard query options to the standard REST get call. +message GetOptions { + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + optional string resourceVersion = 1; +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupKind { + optional string group = 1; + + optional string kind = 2; +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupResource { + optional string group = 1; + + optional string resource = 2; +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersion { + optional string group = 1; + + optional string version = 2; +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +message GroupVersionForDiscovery { + // groupVersion specifies the API group and version in the form "group/version" + optional string groupVersion = 1; + + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + optional string version = 2; +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionKind { + optional string group = 1; + + optional string version = 2; + + optional string kind = 3; +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +message GroupVersionResource { + optional string group = 1; + + optional string version = 2; + + optional string resource = 3; +} + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +message LabelSelector { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + map matchLabels = 1; + + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + repeated LabelSelectorRequirement matchExpressions = 2; +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +message LabelSelectorRequirement { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + optional string key = 1; + + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists and DoesNotExist. + optional string operator = 2; + + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + repeated string values = 3; +} + +// List holds a list of objects, which may not be known by the server. +message List { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // List of objects + repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +message ListMeta { + // selfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. + // +optional + optional string selfLink = 1; + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 2; + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // consistent list may not be possible if the server configuration has changed or more than a few + // minutes have passed. The resourceVersion field returned when using this continue value will be + // identical to the value in the first response, unless you have received this token from an error + // message. + optional string continue = 3; + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + optional int64 remainingItemCount = 4; +} + +// ListOptions is the query options to a standard REST list call. +message ListOptions { + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + optional string labelSelector = 1; + + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + optional string fieldSelector = 2; + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + optional bool watch = 3; + + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // +optional + optional bool allowWatchBookmarks = 9; + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + optional string resourceVersion = 4; + + // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. + // +optional + optional int64 timeoutSeconds = 5; + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + optional int64 limit = 7; + + // The continue option should be set when retrieving more results from the server. Since this value is + // server defined, clients may only use the continue value from a previous query result with identical + // query parameters (except for the value of continue) and the server may reject a continue value it + // does not recognize. If the specified continue value is no longer valid whether due to expiration + // (generally five to fifteen minutes) or a configuration change on the server, the server will + // respond with a 410 ResourceExpired error together with a continue token. If the client needs a + // consistent list, it must restart their list without the continue field. Otherwise, the client may + // send another list request with the token received with the 410 error, the server will respond with + // a list starting from the next key, but from the latest snapshot, which is inconsistent from the + // previous list results - objects that are created, modified, or deleted after the first list request + // will be included in the response, as long as their keys are after the "next key". + // + // This field is not supported when watch is true. Clients may start a watch from the last + // resourceVersion value returned by the server and not miss any modifications. + optional string continue = 8; +} + +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +message ManagedFieldsEntry { + // Manager is an identifier of the workflow managing these fields. + optional string manager = 1; + + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + optional string operation = 2; + + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + optional string apiVersion = 3; + + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + optional Time time = 4; + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + optional string fieldsType = 6; + + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. + // +optional + optional FieldsV1 fieldsV1 = 7; +} + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message MicroTime { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +message ObjectMeta { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + optional string name = 1; + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + optional string generateName = 2; + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + optional string namespace = 3; + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. + // +optional + optional string selfLink = 4; + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 5; + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + optional string resourceVersion = 6; + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + optional int64 generation = 7; + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional Time creationTimestamp = 8; + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional Time deletionTimestamp = 9; + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + optional int64 deletionGracePeriodSeconds = 10; + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + map labels = 11; + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + map annotations = 12; + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + repeated OwnerReference ownerReferences = 13; + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. + // +optional + // +patchStrategy=merge + repeated string finalizers = 14; + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + optional string clusterName = 15; + + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // +optional + repeated ManagedFieldsEntry managedFields = 17; +} + +// OwnerReference contains enough information to let you identify an owning +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. +message OwnerReference { + // API version of the referent. + optional string apiVersion = 5; + + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional string kind = 1; + + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + optional string name = 3; + + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + optional string uid = 4; + + // If true, this reference points to the managing controller. + // +optional + optional bool controller = 6; + + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + optional bool blockOwnerDeletion = 7; +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // items contains each of the included items. + repeated PartialObjectMetadata items = 2; +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +message Patch { +} + +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +message PatchOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + optional bool force = 2; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + optional string fieldManager = 3; +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +message Preconditions { + // Specifies the target UID. + // +optional + optional string uid = 1; + + // Specifies the target ResourceVersion + // +optional + optional string resourceVersion = 2; +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +message RootPaths { + // paths are the paths available at root. + repeated string paths = 1; +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +message ServerAddressByClientCIDR { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + optional string clientCIDR = 1; + + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + optional string serverAddress = 2; +} + +// Status is a return value for calls that don't return other objects. +message Status { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional ListMeta metadata = 1; + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + optional string status = 2; + + // A human-readable description of the status of this operation. + // +optional + optional string message = 3; + + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + optional string reason = 4; + + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + optional StatusDetails details = 5; + + // Suggested HTTP return code for this status, 0 if not set. + // +optional + optional int32 code = 6; +} + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +message StatusCause { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + optional string reason = 1; + + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + optional string message = 2; + + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + optional string field = 3; +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +message StatusDetails { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + optional string name = 1; + + // The group attribute of the resource associated with the status StatusReason. + // +optional + optional string group = 2; + + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional string kind = 3; + + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + optional string uid = 6; + + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + repeated StatusCause causes = 4; + + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. + // +optional + optional int32 retryAfterSeconds = 5; +} + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Time { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +message Timestamp { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + optional int64 seconds = 1; + + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + optional int32 nanos = 2; +} + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +message TypeMeta { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + optional string kind = 1; + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + // +optional + optional string apiVersion = 2; +} + +// UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. +message UpdateOptions { + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + repeated string dryRun = 1; + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + optional string fieldManager = 2; +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +message Verbs { + // items, if empty, will result in an empty slice + + repeated string items = 1; +} + +// Event represents a single event to a watched resource. +// +// +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message WatchEvent { + optional string type = 1; + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + optional k8s.io.apimachinery.pkg.runtime.RawExtension object = 2; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go new file mode 100644 index 0000000..bd4c6d9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/group_version.go @@ -0,0 +1,148 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupResource struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` +} + +func (gr *GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionResource struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Resource string `json:"resource" protobuf:"bytes,3,opt,name=resource"` +} + +func (gvr *GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupKind struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` +} + +func (gk *GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersionKind struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +type GroupVersion struct { + Group string `json:"group" protobuf:"bytes,1,opt,name=group"` + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// MarshalJSON implements the json.Marshaller interface. +func (gv GroupVersion) MarshalJSON() ([]byte, error) { + s := gv.String() + if strings.Count(s, "/") > 1 { + return []byte{}, fmt.Errorf("illegal GroupVersion %v: contains more than one /", s) + } + return json.Marshal(s) +} + +func (gv *GroupVersion) unmarshal(value []byte) error { + var s string + if err := json.Unmarshal(value, &s); err != nil { + return err + } + parsed, err := schema.ParseGroupVersion(s) + if err != nil { + return err + } + gv.Group, gv.Version = parsed.Group, parsed.Version + return nil +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (gv *GroupVersion) UnmarshalJSON(value []byte) error { + return gv.unmarshal(value) +} + +// UnmarshalTEXT implements the Ugorji's encoding.TextUnmarshaler interface. +func (gv *GroupVersion) UnmarshalText(value []byte) error { + return gv.unmarshal(value) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go new file mode 100644 index 0000000..ad989ad --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -0,0 +1,282 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/types" +) + +// LabelSelectorAsSelector converts the LabelSelector api type into a struct that implements +// labels.Selector +// Note: This function should be kept in sync with the selector methods in pkg/labels/selector.go +func LabelSelectorAsSelector(ps *LabelSelector) (labels.Selector, error) { + if ps == nil { + return labels.Nothing(), nil + } + if len(ps.MatchLabels)+len(ps.MatchExpressions) == 0 { + return labels.Everything(), nil + } + selector := labels.NewSelector() + for k, v := range ps.MatchLabels { + r, err := labels.NewRequirement(k, selection.Equals, []string{v}) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + for _, expr := range ps.MatchExpressions { + var op selection.Operator + switch expr.Operator { + case LabelSelectorOpIn: + op = selection.In + case LabelSelectorOpNotIn: + op = selection.NotIn + case LabelSelectorOpExists: + op = selection.Exists + case LabelSelectorOpDoesNotExist: + op = selection.DoesNotExist + default: + return nil, fmt.Errorf("%q is not a valid pod selector operator", expr.Operator) + } + r, err := labels.NewRequirement(expr.Key, op, append([]string(nil), expr.Values...)) + if err != nil { + return nil, err + } + selector = selector.Add(*r) + } + return selector, nil +} + +// LabelSelectorAsMap converts the LabelSelector api type into a map of strings, ie. the +// original structure of a label selector. Operators that cannot be converted into plain +// labels (Exists, DoesNotExist, NotIn, and In with more than one value) will result in +// an error. +func LabelSelectorAsMap(ps *LabelSelector) (map[string]string, error) { + if ps == nil { + return nil, nil + } + selector := map[string]string{} + for k, v := range ps.MatchLabels { + selector[k] = v + } + for _, expr := range ps.MatchExpressions { + switch expr.Operator { + case LabelSelectorOpIn: + if len(expr.Values) != 1 { + return selector, fmt.Errorf("operator %q without a single value cannot be converted into the old label selector format", expr.Operator) + } + // Should we do anything in case this will override a previous key-value pair? + selector[expr.Key] = expr.Values[0] + case LabelSelectorOpNotIn, LabelSelectorOpExists, LabelSelectorOpDoesNotExist: + return selector, fmt.Errorf("operator %q cannot be converted into the old label selector format", expr.Operator) + default: + return selector, fmt.Errorf("%q is not a valid selector operator", expr.Operator) + } + } + return selector, nil +} + +// ParseToLabelSelector parses a string representing a selector into a LabelSelector object. +// Note: This function should be kept in sync with the parser in pkg/labels/selector.go +func ParseToLabelSelector(selector string) (*LabelSelector, error) { + reqs, err := labels.ParseToRequirements(selector) + if err != nil { + return nil, fmt.Errorf("couldn't parse the selector string \"%s\": %v", selector, err) + } + + labelSelector := &LabelSelector{ + MatchLabels: map[string]string{}, + MatchExpressions: []LabelSelectorRequirement{}, + } + for _, req := range reqs { + var op LabelSelectorOperator + switch req.Operator() { + case selection.Equals, selection.DoubleEquals: + vals := req.Values() + if vals.Len() != 1 { + return nil, fmt.Errorf("equals operator must have exactly one value") + } + val, ok := vals.PopAny() + if !ok { + return nil, fmt.Errorf("equals operator has exactly one value but it cannot be retrieved") + } + labelSelector.MatchLabels[req.Key()] = val + continue + case selection.In: + op = LabelSelectorOpIn + case selection.NotIn: + op = LabelSelectorOpNotIn + case selection.Exists: + op = LabelSelectorOpExists + case selection.DoesNotExist: + op = LabelSelectorOpDoesNotExist + case selection.GreaterThan, selection.LessThan: + // Adding a separate case for these operators to indicate that this is deliberate + return nil, fmt.Errorf("%q isn't supported in label selectors", req.Operator()) + default: + return nil, fmt.Errorf("%q is not a valid label selector operator", req.Operator()) + } + labelSelector.MatchExpressions = append(labelSelector.MatchExpressions, LabelSelectorRequirement{ + Key: req.Key(), + Operator: op, + Values: req.Values().List(), + }) + } + return labelSelector, nil +} + +// SetAsLabelSelector converts the labels.Set object into a LabelSelector api object. +func SetAsLabelSelector(ls labels.Set) *LabelSelector { + if ls == nil { + return nil + } + + selector := &LabelSelector{ + MatchLabels: make(map[string]string), + } + for label, value := range ls { + selector.MatchLabels[label] = value + } + + return selector +} + +// FormatLabelSelector convert labelSelector into plain string +func FormatLabelSelector(labelSelector *LabelSelector) string { + selector, err := LabelSelectorAsSelector(labelSelector) + if err != nil { + return "" + } + + l := selector.String() + if len(l) == 0 { + l = "" + } + return l +} + +func ExtractGroupVersions(l *APIGroupList) []string { + var groupVersions []string + for _, g := range l.Groups { + for _, gv := range g.Versions { + groupVersions = append(groupVersions, gv.GroupVersion) + } + } + return groupVersions +} + +// HasAnnotation returns a bool if passed in annotation exists +func HasAnnotation(obj ObjectMeta, ann string) bool { + _, found := obj.Annotations[ann] + return found +} + +// SetMetaDataAnnotation sets the annotation and value +func SetMetaDataAnnotation(obj *ObjectMeta, ann string, value string) { + if obj.Annotations == nil { + obj.Annotations = make(map[string]string) + } + obj.Annotations[ann] = value +} + +// SingleObject returns a ListOptions for watching a single object. +func SingleObject(meta ObjectMeta) ListOptions { + return ListOptions{ + FieldSelector: fields.OneTermEqualSelector("metadata.name", meta.Name).String(), + ResourceVersion: meta.ResourceVersion, + } +} + +// NewDeleteOptions returns a DeleteOptions indicating the resource should +// be deleted within the specified grace period. Use zero to indicate +// immediate deletion. If you would prefer to use the default grace period, +// use &metav1.DeleteOptions{} directly. +func NewDeleteOptions(grace int64) *DeleteOptions { + return &DeleteOptions{GracePeriodSeconds: &grace} +} + +// NewPreconditionDeleteOptions returns a DeleteOptions with a UID precondition set. +func NewPreconditionDeleteOptions(uid string) *DeleteOptions { + u := types.UID(uid) + p := Preconditions{UID: &u} + return &DeleteOptions{Preconditions: &p} +} + +// NewUIDPreconditions returns a Preconditions with UID set. +func NewUIDPreconditions(uid string) *Preconditions { + u := types.UID(uid) + return &Preconditions{UID: &u} +} + +// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set. +func NewRVDeletionPrecondition(rv string) *DeleteOptions { + p := Preconditions{ResourceVersion: &rv} + return &DeleteOptions{Preconditions: &p} +} + +// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. +func HasObjectMetaSystemFieldValues(meta Object) bool { + return !meta.GetCreationTimestamp().Time.IsZero() || + len(meta.GetUID()) != 0 +} + +// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields +// for a pre-existing object. This is opt-in for new objects with Status subresource. +func ResetObjectMetaForStatus(meta, existingMeta Object) { + meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) + meta.SetGeneration(existingMeta.GetGeneration()) + meta.SetSelfLink(existingMeta.GetSelfLink()) + meta.SetLabels(existingMeta.GetLabels()) + meta.SetAnnotations(existingMeta.GetAnnotations()) + meta.SetFinalizers(existingMeta.GetFinalizers()) + meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) + // managedFields must be preserved since it's been modified to + // track changed fields in the status update. + //meta.SetManagedFields(existingMeta.GetManagedFields()) +} + +// MarshalJSON implements json.Marshaler +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (f FieldsV1) MarshalJSON() ([]byte, error) { + if f.Raw == nil { + return []byte("null"), nil + } + return f.Raw, nil +} + +// UnmarshalJSON implements json.Unmarshaler +func (f *FieldsV1) UnmarshalJSON(b []byte) error { + if f == nil { + return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(b, []byte("null")) { + f.Raw = append(f.Raw[0:0], b...) + } + return nil +} + +var _ json.Marshaler = FieldsV1{} +var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go new file mode 100644 index 0000000..9b45145 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/labels.go @@ -0,0 +1,55 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// Clones the given selector and returns a new selector with the given key and value added. +// Returns the given selector, if labelKey is empty. +func CloneSelectorAndAddLabel(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + + // Clone. + newSelector := selector.DeepCopy() + + if newSelector.MatchLabels == nil { + newSelector.MatchLabels = make(map[string]string) + } + + newSelector.MatchLabels[labelKey] = labelValue + + return newSelector +} + +// AddLabelToSelector returns a selector with the given key and value added to the given selector's MatchLabels. +func AddLabelToSelector(selector *LabelSelector, labelKey, labelValue string) *LabelSelector { + if labelKey == "" { + // Don't need to add a label. + return selector + } + if selector.MatchLabels == nil { + selector.MatchLabels = make(map[string]string) + } + selector.MatchLabels[labelKey] = labelValue + return selector +} + +// SelectorHasLabel checks if the given selector contains the given label key in its MatchLabels +func SelectorHasLabel(selector *LabelSelector, labelKey string) bool { + return len(selector.MatchLabels[labelKey]) > 0 +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go new file mode 100644 index 0000000..912cf20 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -0,0 +1,178 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" +) + +// TODO: move this, Object, List, and Type to a different package +type ObjectMetaAccessor interface { + GetObjectMeta() Object +} + +// Object lets you work with object metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field (Name, UID, Namespace on lists) will be a no-op and return +// a default value. +type Object interface { + GetNamespace() string + SetNamespace(namespace string) + GetName() string + SetName(name string) + GetGenerateName() string + SetGenerateName(name string) + GetUID() types.UID + SetUID(uid types.UID) + GetResourceVersion() string + SetResourceVersion(version string) + GetGeneration() int64 + SetGeneration(generation int64) + GetSelfLink() string + SetSelfLink(selfLink string) + GetCreationTimestamp() Time + SetCreationTimestamp(timestamp Time) + GetDeletionTimestamp() *Time + SetDeletionTimestamp(timestamp *Time) + GetDeletionGracePeriodSeconds() *int64 + SetDeletionGracePeriodSeconds(*int64) + GetLabels() map[string]string + SetLabels(labels map[string]string) + GetAnnotations() map[string]string + SetAnnotations(annotations map[string]string) + GetFinalizers() []string + SetFinalizers(finalizers []string) + GetOwnerReferences() []OwnerReference + SetOwnerReferences([]OwnerReference) + GetClusterName() string + SetClusterName(clusterName string) + GetManagedFields() []ManagedFieldsEntry + SetManagedFields(managedFields []ManagedFieldsEntry) +} + +// ListMetaAccessor retrieves the list interface from an object +type ListMetaAccessor interface { + GetListMeta() ListInterface +} + +// Common lets you work with core metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Common interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) +} + +// ListInterface lets you work with list metadata from any of the versioned or +// internal API objects. Attempting to set or retrieve a field on an object that does +// not support that field will be a no-op and return a default value. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type ListInterface interface { + GetResourceVersion() string + SetResourceVersion(version string) + GetSelfLink() string + SetSelfLink(selfLink string) + GetContinue() string + SetContinue(c string) + GetRemainingItemCount() *int64 + SetRemainingItemCount(c *int64) +} + +// Type exposes the type and APIVersion of versioned or internal API objects. +// TODO: move this, and TypeMeta and ListMeta, to a different package +type Type interface { + GetAPIVersion() string + SetAPIVersion(version string) + GetKind() string + SetKind(kind string) +} + +var _ ListInterface = &ListMeta{} + +func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ListMeta) GetContinue() string { return meta.Continue } +func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } +func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } +func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *ListMeta) GetListMeta() ListInterface { return obj } + +func (obj *ObjectMeta) GetObjectMeta() Object { return obj } + +// Namespace implements metav1.Object for any object with an ObjectMeta typed field. Allows +// fast, direct access to metadata fields for API objects. +func (meta *ObjectMeta) GetNamespace() string { return meta.Namespace } +func (meta *ObjectMeta) SetNamespace(namespace string) { meta.Namespace = namespace } +func (meta *ObjectMeta) GetName() string { return meta.Name } +func (meta *ObjectMeta) SetName(name string) { meta.Name = name } +func (meta *ObjectMeta) GetGenerateName() string { return meta.GenerateName } +func (meta *ObjectMeta) SetGenerateName(generateName string) { meta.GenerateName = generateName } +func (meta *ObjectMeta) GetUID() types.UID { return meta.UID } +func (meta *ObjectMeta) SetUID(uid types.UID) { meta.UID = uid } +func (meta *ObjectMeta) GetResourceVersion() string { return meta.ResourceVersion } +func (meta *ObjectMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } +func (meta *ObjectMeta) GetGeneration() int64 { return meta.Generation } +func (meta *ObjectMeta) SetGeneration(generation int64) { meta.Generation = generation } +func (meta *ObjectMeta) GetSelfLink() string { return meta.SelfLink } +func (meta *ObjectMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } +func (meta *ObjectMeta) GetCreationTimestamp() Time { return meta.CreationTimestamp } +func (meta *ObjectMeta) SetCreationTimestamp(creationTimestamp Time) { + meta.CreationTimestamp = creationTimestamp +} +func (meta *ObjectMeta) GetDeletionTimestamp() *Time { return meta.DeletionTimestamp } +func (meta *ObjectMeta) SetDeletionTimestamp(deletionTimestamp *Time) { + meta.DeletionTimestamp = deletionTimestamp +} +func (meta *ObjectMeta) GetDeletionGracePeriodSeconds() *int64 { return meta.DeletionGracePeriodSeconds } +func (meta *ObjectMeta) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + meta.DeletionGracePeriodSeconds = deletionGracePeriodSeconds +} +func (meta *ObjectMeta) GetLabels() map[string]string { return meta.Labels } +func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } +func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } +func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } +func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } +func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } +func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { + meta.OwnerReferences = references +} +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } +func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } +func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { + meta.ManagedFields = managedFields +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go new file mode 100644 index 0000000..cdd9a6a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -0,0 +1,196 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + "github.com/google/gofuzz" +) + +const RFC3339Micro = "2006-01-02T15:04:05.000000Z07:00" + +// MicroTime is version of Time with microsecond level precision. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type MicroTime struct { + time.Time `protobuf:"-"` +} + +// DeepCopy returns a deep-copy of the MicroTime value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *MicroTime) DeepCopyInto(out *MicroTime) { + *out = *t +} + +// NewMicroTime returns a wrapped instance of the provided time +func NewMicroTime(time time.Time) MicroTime { + return MicroTime{time} +} + +// DateMicro returns the MicroTime corresponding to the supplied parameters +// by wrapping time.Date. +func DateMicro(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) MicroTime { + return MicroTime{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// NowMicro returns the current local time. +func NowMicro() MicroTime { + return MicroTime{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *MicroTime) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *MicroTime) Before(u *MicroTime) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// Equal reports whether the time instant t is equal to u. +func (t *MicroTime) Equal(u *MicroTime) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// BeforeTime reports whether the time instant t is before second-lever precision u. +func (t *MicroTime) BeforeTime(u *Time) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// EqualTime reports whether the time instant t is equal to second-lever precision u. +func (t *MicroTime) EqualTime(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// UnixMicro returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func UnixMicro(sec int64, nsec int64) MicroTime { + return MicroTime{time.Unix(sec, nsec)} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *MicroTime) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *MicroTime) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(RFC3339Micro, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t MicroTime) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + + return json.Marshal(t.UTC().Format(RFC3339Micro)) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ MicroTime) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ MicroTime) OpenAPISchemaFormat() string { return "date-time" } + +// MarshalQueryParameter converts to a URL query parameter value +func (t MicroTime) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(RFC3339Micro), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *MicroTime) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Accurate to a tenth of + // micro second. Leave off nanoseconds because JSON doesn't + // represent them so they can't round-trip properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 1000*c.Rand.Int63n(1000000)) +} + +var _ fuzz.Interface = &MicroTime{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go new file mode 100644 index 0000000..6dd6d89 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -0,0 +1,80 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is declared in time_proto.go + +// Timestamp returns the Time as a new Timestamp value. +func (m *MicroTime) ProtoMicroTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *MicroTime) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoMicroTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *MicroTime) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + return nil +} + +// Marshal implements the protobuf marshalling interface. +func (m *MicroTime) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoMicroTime().Marshal() +} + +// MarshalTo implements the protobuf marshalling interface. +func (m *MicroTime) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalTo(data) +} + +// MarshalToSizedBuffer implements the protobuf marshalling interface. +func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoMicroTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go new file mode 100644 index 0000000..c1a0771 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -0,0 +1,108 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +var ( + // localSchemeBuilder is used to make compiler happy for autogenerated + // conversions. However, it's not used. + schemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &schemeBuilder +) + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} + +// Unversioned is group version for unversioned API objects +// TODO: this should be v1 probably +var Unversioned = schema.GroupVersion{Group: "", Version: "v1"} + +// WatchEventKind is name reserved for serializing watch events. +const WatchEventKind = "WatchEvent" + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +var optionsTypes = []runtime.Object{ + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + &PatchOptions{}, +} + +// AddToGroupVersion registers common meta types into schemas. +func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { + scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) + scheme.AddKnownTypeWithName( + schema.GroupVersion{Group: groupVersion.Group, Version: runtime.APIVersionInternal}.WithKind(WatchEventKind), + &InternalEvent{}, + ) + // Supports legacy code paths, most callers should use metav1.ParameterCodec for now + scheme.AddKnownTypes(groupVersion, optionsTypes...) + // Register Unversioned types under their own special group + scheme.AddUnversionedTypes(Unversioned, + &Status{}, + &APIVersions{}, + &APIGroupList{}, + &APIGroup{}, + &APIResourceList{}, + ) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterConversions(scheme)) + utilruntime.Must(RegisterDefaults(scheme)) +} + +// AddMetaToScheme registers base meta types into schemas. +func AddMetaToScheme(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + return nil +} + +func init() { + scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...) + + utilruntime.Must(AddMetaToScheme(scheme)) + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + utilruntime.Must(RegisterDefaults(scheme)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go new file mode 100644 index 0000000..4a1d89c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -0,0 +1,197 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "encoding/json" + "time" + + fuzz "github.com/google/gofuzz" +) + +// Time is a wrapper around time.Time which supports correct +// marshaling to YAML and JSON. Wrappers are provided for many +// of the factory methods that the time package offers. +// +// +protobuf.options.marshal=false +// +protobuf.as=Timestamp +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Time struct { + time.Time `protobuf:"-"` +} + +// DeepCopyInto creates a deep-copy of the Time value. The underlying time.Time +// type is effectively immutable in the time API, so it is safe to +// copy-by-assign, despite the presence of (unexported) Pointer fields. +func (t *Time) DeepCopyInto(out *Time) { + *out = *t +} + +// NewTime returns a wrapped instance of the provided time +func NewTime(time time.Time) Time { + return Time{time} +} + +// Date returns the Time corresponding to the supplied parameters +// by wrapping time.Date. +func Date(year int, month time.Month, day, hour, min, sec, nsec int, loc *time.Location) Time { + return Time{time.Date(year, month, day, hour, min, sec, nsec, loc)} +} + +// Now returns the current local time. +func Now() Time { + return Time{time.Now()} +} + +// IsZero returns true if the value is nil or time is zero. +func (t *Time) IsZero() bool { + if t == nil { + return true + } + return t.Time.IsZero() +} + +// Before reports whether the time instant t is before u. +func (t *Time) Before(u *Time) bool { + if t != nil && u != nil { + return t.Time.Before(u.Time) + } + return false +} + +// Equal reports whether the time instant t is equal to u. +func (t *Time) Equal(u *Time) bool { + if t == nil && u == nil { + return true + } + if t != nil && u != nil { + return t.Time.Equal(u.Time) + } + return false +} + +// Unix returns the local time corresponding to the given Unix time +// by wrapping time.Unix. +func Unix(sec int64, nsec int64) Time { + return Time{time.Unix(sec, nsec)} +} + +// Rfc3339Copy returns a copy of the Time at second-level precision. +func (t Time) Rfc3339Copy() Time { + copied, _ := time.Parse(time.RFC3339, t.Format(time.RFC3339)) + return Time{copied} +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (t *Time) UnmarshalJSON(b []byte) error { + if len(b) == 4 && string(b) == "null" { + t.Time = time.Time{} + return nil + } + + var str string + err := json.Unmarshal(b, &str) + if err != nil { + return err + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// UnmarshalQueryParameter converts from a URL query parameter value to an object +func (t *Time) UnmarshalQueryParameter(str string) error { + if len(str) == 0 { + t.Time = time.Time{} + return nil + } + // Tolerate requests from older clients that used JSON serialization to build query params + if len(str) == 4 && str == "null" { + t.Time = time.Time{} + return nil + } + + pt, err := time.Parse(time.RFC3339, str) + if err != nil { + return err + } + + t.Time = pt.Local() + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + if t.IsZero() { + // Encode unset/nil objects as JSON's "null". + return []byte("null"), nil + } + buf := make([]byte, 0, len(time.RFC3339)+2) + buf = append(buf, '"') + // time cannot contain non escapable JSON characters + buf = t.UTC().AppendFormat(buf, time.RFC3339) + buf = append(buf, '"') + return buf, nil +} + +// ToUnstructured implements the value.UnstructuredConverter interface. +func (t Time) ToUnstructured() interface{} { + if t.IsZero() { + return nil + } + buf := make([]byte, 0, len(time.RFC3339)) + buf = t.UTC().AppendFormat(buf, time.RFC3339) + return string(buf) +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (_ Time) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (_ Time) OpenAPISchemaFormat() string { return "date-time" } + +// MarshalQueryParameter converts to a URL query parameter value +func (t Time) MarshalQueryParameter() (string, error) { + if t.IsZero() { + // Encode unset/nil objects as an empty string + return "", nil + } + + return t.UTC().Format(time.RFC3339), nil +} + +// Fuzz satisfies fuzz.Interface. +func (t *Time) Fuzz(c fuzz.Continue) { + if t == nil { + return + } + // Allow for about 1000 years of randomness. Leave off nanoseconds + // because JSON doesn't represent them so they can't round-trip + // properly. + t.Time = time.Unix(c.Rand.Int63n(1000*365*24*60*60), 0) +} + +var _ fuzz.Interface = &Time{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go new file mode 100644 index 0000000..eac8d96 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -0,0 +1,100 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "time" +) + +// Timestamp is a struct that is equivalent to Time, but intended for +// protobuf marshalling/unmarshalling. It is generated into a serialization +// that matches Time. Do not use in Go structs. +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `json:"seconds" protobuf:"varint,1,opt,name=seconds"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. This field may be limited in precision depending on context. + Nanos int32 `json:"nanos" protobuf:"varint,2,opt,name=nanos"` +} + +// Timestamp returns the Time as a new Timestamp value. +func (m *Time) ProtoTime() *Timestamp { + if m == nil { + return &Timestamp{} + } + return &Timestamp{ + Seconds: m.Time.Unix(), + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // Nanos: int32(m.Time.Nanosecond()), + } +} + +// Size implements the protobuf marshalling interface. +func (m *Time) Size() (n int) { + if m == nil || m.Time.IsZero() { + return 0 + } + return m.ProtoTime().Size() +} + +// Reset implements the protobuf marshalling interface. +func (m *Time) Unmarshal(data []byte) error { + if len(data) == 0 { + m.Time = time.Time{} + return nil + } + p := Timestamp{} + if err := p.Unmarshal(data); err != nil { + return err + } + // leaving this here for the record. our JSON only handled seconds, so this results in writes by + // protobuf clients storing values that aren't read by json clients, which results in unexpected + // field mutation, which fails various validation and equality code. + // m.Time = time.Unix(p.Seconds, int64(p.Nanos)).Local() + m.Time = time.Unix(p.Seconds, int64(0)).Local() + return nil +} + +// Marshal implements the protobuf marshaling interface. +func (m *Time) Marshal() (data []byte, err error) { + if m == nil || m.Time.IsZero() { + return nil, nil + } + return m.ProtoTime().Marshal() +} + +// MarshalTo implements the protobuf marshaling interface. +func (m *Time) MarshalTo(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalTo(data) +} + +// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. +func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { + if m == nil || m.Time.IsZero() { + return 0, nil + } + return m.ProtoTime().MarshalToSizedBuffer(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go new file mode 100644 index 0000000..e7aaead --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -0,0 +1,1316 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package v1 contains API types that are common to all versions. +// +// The package contains two categories of types: +// - external (serialized) types that lack their own version (e.g TypeMeta) +// - internal (never-serialized) types that are needed by several different +// api groups, and so live here, to avoid duplication and/or import loops +// (e.g. LabelSelector). +// In the future, we will probably move these categories of objects into +// separate packages. +package v1 + +import ( + "fmt" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" +) + +// TypeMeta describes an individual object in an API response or request +// with strings representing the type of the object and its API schema version. +// Structures that are versioned or persisted should inline TypeMeta. +// +// +k8s:deepcopy-gen=false +type TypeMeta struct { + // Kind is a string value representing the REST resource this object represents. + // Servers may infer this from the endpoint the client submits requests to. + // Cannot be updated. + // In CamelCase. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` + + // APIVersion defines the versioned schema of this representation of an object. + // Servers should convert recognized schemas to the latest internal value, and + // may reject unrecognized values. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + // +optional + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` +} + +// ListMeta describes metadata that synthetic resources must have, including lists and +// various status objects. A resource may have only one of {ObjectMeta, ListMeta}. +type ListMeta struct { + // selfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` + + // String that identifies the server's internal version of this object that + // can be used by clients to determine when objects have changed. + // Value must be treated as opaque by clients and passed unmodified back to the server. + // Populated by the system. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` + + // continue may be set if the user set a limit on the number of items returned, and indicates that + // the server has more data available. The value is opaque and may be used to issue another request + // to the endpoint that served this list to retrieve the next set of available objects. Continuing a + // consistent list may not be possible if the server configuration has changed or more than a few + // minutes have passed. The resourceVersion field returned when using this continue value will be + // identical to the value in the first response, unless you have received this token from an error + // message. + Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` + + // remainingItemCount is the number of subsequent items in the list which are not included in this + // list response. If the list request contained label or field selectors, then the number of + // remaining items is unknown and the field will be left unset and omitted during serialization. + // If the list is complete (either because it is not chunking or because this is the last chunk), + // then there are no more remaining items and this field will be left unset and omitted during + // serialization. + // Servers older than v1.15 do not set this field. + // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients + // should not rely on the remainingItemCount to be set or to be exact. + // +optional + RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` +} + +// These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here +const ( + FinalizerOrphanDependents string = "orphan" + FinalizerDeleteDependents string = "foregroundDeletion" +) + +// ObjectMeta is metadata that all persisted resources must have, which includes all objects +// users must create. +type ObjectMeta struct { + // Name must be unique within a namespace. Is required when creating resources, although + // some resources may allow a client to request the generation of an appropriate name + // automatically. Name is primarily intended for creation idempotence and configuration + // definition. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + + // GenerateName is an optional prefix, used by the server, to generate a unique + // name ONLY IF the Name field has not been provided. + // If this field is used, the name returned to the client will be different + // than the name passed. This value will also be combined with a unique suffix. + // The provided value has the same validation rules as the Name field, + // and may be truncated by the length of the suffix required to make the value + // unique on the server. + // + // If this field is specified and the generated name exists, the server will + // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason + // ServerTimeout indicating a unique name could not be found in the time allotted, and the client + // should retry (optionally after the time indicated in the Retry-After header). + // + // Applied only if Name is not specified. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // +optional + GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` + + // Namespace defines the space within each name must be unique. An empty namespace is + // equivalent to the "default" namespace, but "default" is the canonical representation. + // Not all objects are required to be scoped to a namespace - the value of this field for + // those objects will be empty. + // + // Must be a DNS_LABEL. + // Cannot be updated. + // More info: http://kubernetes.io/docs/user-guide/namespaces + // +optional + Namespace string `json:"namespace,omitempty" protobuf:"bytes,3,opt,name=namespace"` + + // SelfLink is a URL representing this object. + // Populated by the system. + // Read-only. + // + // DEPRECATED + // Kubernetes will stop propagating this field in 1.20 release and the field is planned + // to be removed in 1.21 release. + // +optional + SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` + + // UID is the unique in time and space value for this object. It is typically generated by + // the server on successful creation of a resource and is not allowed to change on PUT + // operations. + // + // Populated by the system. + // Read-only. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,5,opt,name=uid,casttype=k8s.io/kubernetes/pkg/types.UID"` + + // An opaque value that represents the internal version of this object that can + // be used by clients to determine when objects have changed. May be used for optimistic + // concurrency, change detection, and the watch operation on a resource or set of resources. + // Clients must treat these values as opaque and passed unmodified back to the server. + // They may only be valid for a particular resource or set of resources. + // + // Populated by the system. + // Read-only. + // Value must be treated as opaque by clients and . + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` + + // A sequence number representing a specific generation of the desired state. + // Populated by the system. Read-only. + // +optional + Generation int64 `json:"generation,omitempty" protobuf:"varint,7,opt,name=generation"` + + // CreationTimestamp is a timestamp representing the server time when this object was + // created. It is not guaranteed to be set in happens-before order across separate operations. + // Clients may not set this value. It is represented in RFC3339 form and is in UTC. + // + // Populated by the system. + // Read-only. + // Null for lists. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` + + // DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This + // field is set by the server when a graceful deletion is requested by the user, and is not + // directly settable by a client. The resource is expected to be deleted (no longer visible + // from resource lists, and not reachable by name) after the time in this field, once the + // finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. + // Once the deletionTimestamp is set, this value may not be unset or be set further into the + // future, although it may be shortened or the resource may be deleted prior to this time. + // For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react + // by sending a graceful termination signal to the containers in the pod. After that 30 seconds, + // the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, + // remove the pod from the API. In the presence of network partitions, this object may still + // exist after this timestamp, until an administrator or automated process can determine the + // resource is fully terminated. + // If not set, graceful deletion of the object has not been requested. + // + // Populated by the system when a graceful deletion is requested. + // Read-only. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` + + // Number of seconds allowed for this object to gracefully terminate before + // it will be removed from the system. Only set when deletionTimestamp is also set. + // May only be shortened. + // Read-only. + // +optional + DeletionGracePeriodSeconds *int64 `json:"deletionGracePeriodSeconds,omitempty" protobuf:"varint,10,opt,name=deletionGracePeriodSeconds"` + + // Map of string keys and values that can be used to organize and categorize + // (scope and select) objects. May match selectors of replication controllers + // and services. + // More info: http://kubernetes.io/docs/user-guide/labels + // +optional + Labels map[string]string `json:"labels,omitempty" protobuf:"bytes,11,rep,name=labels"` + + // Annotations is an unstructured key value map stored with a resource that may be + // set by external tools to store and retrieve arbitrary metadata. They are not + // queryable and should be preserved when modifying objects. + // More info: http://kubernetes.io/docs/user-guide/annotations + // +optional + Annotations map[string]string `json:"annotations,omitempty" protobuf:"bytes,12,rep,name=annotations"` + + // List of objects depended by this object. If ALL objects in the list have + // been deleted, this object will be garbage collected. If this object is managed by a controller, + // then an entry in this list will point to this controller, with the controller field set to true. + // There cannot be more than one managing controller. + // +optional + // +patchMergeKey=uid + // +patchStrategy=merge + OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + + // Must be empty before the object is deleted from the registry. Each entry + // is an identifier for the responsible component that will remove the entry + // from the list. If the deletionTimestamp of the object is non-nil, entries + // in this list can only be removed. + // Finalizers may be processed and removed in any order. Order is NOT enforced + // because it introduces significant risk of stuck finalizers. + // finalizers is a shared field, any actor with permission can reorder it. + // If the finalizer list is processed in order, then this can lead to a situation + // in which the component responsible for the first finalizer in the list is + // waiting for a signal (field value, external system, or other) produced by a + // component responsible for a finalizer later in the list, resulting in a deadlock. + // Without enforced ordering finalizers are free to order amongst themselves and + // are not vulnerable to ordering changes in the list. + // +optional + // +patchStrategy=merge + Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` + + // The name of the cluster which the object belongs to. + // This is used to distinguish resources with same name and namespace in different clusters. + // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. + // +optional + ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` + + // ManagedFields maps workflow-id and version to the set of fields + // that are managed by that workflow. This is mostly for internal + // housekeeping, and users typically shouldn't need to set or + // understand this field. A workflow can be the user's name, a + // controller's name, or the name of a specific apply path like + // "ci-cd". The set of fields is always in the version that the + // workflow used when modifying the object. + // + // +optional + ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` +} + +const ( + // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients + NamespaceDefault string = "default" + // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces + NamespaceAll string = "" + // NamespaceNone is the argument for a context when there is no namespace. + NamespaceNone string = "" + // NamespaceSystem is the system namespace where we place system components. + NamespaceSystem string = "kube-system" + // NamespacePublic is the namespace where we place public info (ConfigMaps) + NamespacePublic string = "kube-public" +) + +// OwnerReference contains enough information to let you identify an owning +// object. An owning object must be in the same namespace as the dependent, or +// be cluster-scoped, so there is no namespace field. +type OwnerReference struct { + // API version of the referent. + APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` + // Kind of the referent. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` + // Name of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#names + Name string `json:"name" protobuf:"bytes,3,opt,name=name"` + // UID of the referent. + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + UID types.UID `json:"uid" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // If true, this reference points to the managing controller. + // +optional + Controller *bool `json:"controller,omitempty" protobuf:"varint,6,opt,name=controller"` + // If true, AND if the owner has the "foregroundDeletion" finalizer, then + // the owner cannot be deleted from the key-value store until this + // reference is removed. + // Defaults to false. + // To set this field, a user needs "delete" permission of the owner, + // otherwise 422 (Unprocessable Entity) will be returned. + // +optional + BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ListOptions is the query options to a standard REST list call. +type ListOptions struct { + TypeMeta `json:",inline"` + + // A selector to restrict the list of returned objects by their labels. + // Defaults to everything. + // +optional + LabelSelector string `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` + // A selector to restrict the list of returned objects by their fields. + // Defaults to everything. + // +optional + FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` + + // +k8s:deprecated=includeUninitialized,protobuf=6 + + // Watch for changes to the described resources and return them as a stream of + // add, update, and remove notifications. Specify resourceVersion. + // +optional + Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` + // allowWatchBookmarks requests watch events with type "BOOKMARK". + // Servers that do not implement bookmarks may ignore this flag and + // bookmarks are sent at the server's discretion. Clients should not + // assume bookmarks are returned at any specific interval, nor may they + // assume the server will send any BOOKMARK event during a session. + // If this is not a watch, this field is ignored. + // If the feature gate WatchBookmarks is not enabled in apiserver, + // this field is ignored. + // +optional + AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` + + // When specified with a watch call, shows changes that occur after that particular version of a resource. + // Defaults to changes from the beginning of history. + // When specified for list: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + // +optional + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` + // Timeout for the list/watch call. + // This limits the duration of the call, regardless of any activity or inactivity. + // +optional + TimeoutSeconds *int64 `json:"timeoutSeconds,omitempty" protobuf:"varint,5,opt,name=timeoutSeconds"` + + // limit is a maximum number of responses to return for a list call. If more items exist, the + // server will set the `continue` field on the list metadata to a value that can be used with the + // same initial query to retrieve the next set of results. Setting a limit may return fewer than + // the requested amount of items (up to zero items) in the event all requested objects are + // filtered out and clients should only use the presence of the continue field to determine whether + // more results are available. Servers may choose not to support the limit argument and will return + // all of the available results. If limit is specified and the continue field is empty, clients may + // assume that no more results are available. This field is not supported if watch is true. + // + // The server guarantees that the objects returned when using continue will be identical to issuing + // a single list call without a limit - that is, no objects created, modified, or deleted after the + // first request is issued will be included in any subsequent continued requests. This is sometimes + // referred to as a consistent snapshot, and ensures that a client that is using limit to receive + // smaller chunks of a very large result can ensure they see all possible objects. If objects are + // updated during a chunked list the version of the object that was present at the time the first list + // result was calculated is returned. + Limit int64 `json:"limit,omitempty" protobuf:"varint,7,opt,name=limit"` + // The continue option should be set when retrieving more results from the server. Since this value is + // server defined, clients may only use the continue value from a previous query result with identical + // query parameters (except for the value of continue) and the server may reject a continue value it + // does not recognize. If the specified continue value is no longer valid whether due to expiration + // (generally five to fifteen minutes) or a configuration change on the server, the server will + // respond with a 410 ResourceExpired error together with a continue token. If the client needs a + // consistent list, it must restart their list without the continue field. Otherwise, the client may + // send another list request with the token received with the 410 error, the server will respond with + // a list starting from the next key, but from the latest snapshot, which is inconsistent from the + // previous list results - objects that are created, modified, or deleted after the first list request + // will be included in the response, as long as their keys are after the "next key". + // + // This field is not supported when watch is true. Clients may start a watch from the last + // resourceVersion value returned by the server and not miss any modifications. + Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExportOptions is the query options to the standard REST get call. +// Deprecated. Planned for removal in 1.18. +type ExportOptions struct { + TypeMeta `json:",inline"` + // Should this value be exported. Export strips fields that a user can not specify. + // Deprecated. Planned for removal in 1.18. + Export bool `json:"export" protobuf:"varint,1,opt,name=export"` + // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. + // Deprecated. Planned for removal in 1.18. + Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// GetOptions is the standard query options to the standard REST get call. +type GetOptions struct { + TypeMeta `json:",inline"` + // When specified: + // - if unset, then the result is returned from remote storage based on quorum-read flag; + // - if it's 0, then we simply return what we currently have in cache, no guarantee; + // - if set to non zero, then the result is at least as fresh as given rv. + ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` + // +k8s:deprecated=includeUninitialized,protobuf=2 +} + +// DeletionPropagation decides if a deletion will propagate to the dependents of +// the object, and how the garbage collector will handle the propagation. +type DeletionPropagation string + +const ( + // Orphans the dependents. + DeletePropagationOrphan DeletionPropagation = "Orphan" + // Deletes the object from the key-value store, the garbage collector will + // delete the dependents in the background. + DeletePropagationBackground DeletionPropagation = "Background" + // The object exists in the key-value store until the garbage collector + // deletes all the dependents whose ownerReference.blockOwnerDeletion=true + // from the key-value store. API sever will put the "foregroundDeletion" + // finalizer on the object, and sets its deletionTimestamp. This policy is + // cascading, i.e., the dependents will be deleted with Foreground. + DeletePropagationForeground DeletionPropagation = "Foreground" +) + +const ( + // DryRunAll means to complete all processing stages, but don't + // persist changes to storage. + DryRunAll = "All" +) + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// DeleteOptions may be provided when deleting an API object. +type DeleteOptions struct { + TypeMeta `json:",inline"` + + // The duration in seconds before the object should be deleted. Value must be non-negative integer. + // The value zero indicates delete immediately. If this value is nil, the default grace period for the + // specified type will be used. + // Defaults to a per object value if not specified. zero means delete immediately. + // +optional + GracePeriodSeconds *int64 `json:"gracePeriodSeconds,omitempty" protobuf:"varint,1,opt,name=gracePeriodSeconds"` + + // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be + // returned. + // +k8s:conversion-gen=false + // +optional + Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` + + // Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. + // Should the dependent objects be orphaned. If true/false, the "orphan" + // finalizer will be added to/removed from the object's finalizers list. + // Either this field or PropagationPolicy may be set, but not both. + // +optional + OrphanDependents *bool `json:"orphanDependents,omitempty" protobuf:"varint,3,opt,name=orphanDependents"` + + // Whether and how garbage collection will be performed. + // Either this field or OrphanDependents may be set, but not both. + // The default policy is decided by the existing finalizer set in the + // metadata.finalizers and the resource-specific default policy. + // Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - + // allow the garbage collector to delete the dependents in the background; + // 'Foreground' - a cascading policy that deletes all dependents in the + // foreground. + // +optional + PropagationPolicy *DeletionPropagation `json:"propagationPolicy,omitempty" protobuf:"varint,4,opt,name=propagationPolicy"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// CreateOptions may be provided when creating an API object. +type CreateOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + // +k8s:deprecated=includeUninitialized,protobuf=2 + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PatchOptions may be provided when patching an API object. +// PatchOptions is meant to be a superset of UpdateOptions. +type PatchOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + + // Force is going to "force" Apply requests. It means user will + // re-acquire conflicting fields owned by other people. Force + // flag must be unset for non-apply patch requests. + // +optional + Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. This + // field is required for apply requests + // (application/apply-patch) but optional for non-apply patch + // types (JsonPatch, MergePatch, StrategicMergePatch). + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` +} + +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// UpdateOptions may be provided when updating an API object. +// All fields in UpdateOptions should also be present in PatchOptions. +type UpdateOptions struct { + TypeMeta `json:",inline"` + + // When present, indicates that modifications should not be + // persisted. An invalid or unrecognized dryRun directive will + // result in an error response and no further processing of the + // request. Valid values are: + // - All: all dry run stages will be processed + // +optional + DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` + + // fieldManager is a name associated with the actor or entity + // that is making these changes. The value must be less than or + // 128 characters long, and only contain printable characters, + // as defined by https://golang.org/pkg/unicode/#IsPrint. + // +optional + FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` +} + +// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. +type Preconditions struct { + // Specifies the target UID. + // +optional + UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // Specifies the target ResourceVersion + // +optional + ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Status is a return value for calls that don't return other objects. +type Status struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Status of the operation. + // One of: "Success" or "Failure". + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // +optional + Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` + // A human-readable description of the status of this operation. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` + // A machine-readable description of why this operation is in the + // "Failure" status. If this value is empty there + // is no information available. A Reason clarifies an HTTP status + // code but does not override it. + // +optional + Reason StatusReason `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason,casttype=StatusReason"` + // Extended data associated with the reason. Each reason may define its + // own extended details. This field is optional and the data returned + // is not guaranteed to conform to any schema except that defined by + // the reason type. + // +optional + Details *StatusDetails `json:"details,omitempty" protobuf:"bytes,5,opt,name=details"` + // Suggested HTTP return code for this status, 0 if not set. + // +optional + Code int32 `json:"code,omitempty" protobuf:"varint,6,opt,name=code"` +} + +// StatusDetails is a set of additional properties that MAY be set by the +// server to provide additional information about a response. The Reason +// field of a Status object defines what attributes will be set. Clients +// must ignore fields that do not match the defined type of each attribute, +// and should assume that any attribute may be empty, invalid, or under +// defined. +type StatusDetails struct { + // The name attribute of the resource associated with the status StatusReason + // (when there is a single name which can be described). + // +optional + Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` + // The group attribute of the resource associated with the status StatusReason. + // +optional + Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` + // The kind attribute of the resource associated with the status StatusReason. + // On some operations may differ from the requested resource Kind. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` + // UID of the resource. + // (when there is a single resource which can be described). + // More info: http://kubernetes.io/docs/user-guide/identifiers#uids + // +optional + UID types.UID `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` + // The Causes array includes more details associated with the StatusReason + // failure. Not all StatusReasons may provide detailed causes. + // +optional + Causes []StatusCause `json:"causes,omitempty" protobuf:"bytes,4,rep,name=causes"` + // If specified, the time in seconds before the operation should be retried. Some errors may indicate + // the client must take an alternate action - for those errors this field may indicate how long to wait + // before taking the alternate action. + // +optional + RetryAfterSeconds int32 `json:"retryAfterSeconds,omitempty" protobuf:"varint,5,opt,name=retryAfterSeconds"` +} + +// Values of Status.Status +const ( + StatusSuccess = "Success" + StatusFailure = "Failure" +) + +// StatusReason is an enumeration of possible failure causes. Each StatusReason +// must map to a single HTTP status code, but multiple reasons may map +// to the same HTTP status code. +// TODO: move to apiserver +type StatusReason string + +const ( + // StatusReasonUnknown means the server has declined to indicate a specific reason. + // The details field may contain other information about this error. + // Status code 500. + StatusReasonUnknown StatusReason = "" + + // StatusReasonUnauthorized means the server can be reached and understood the request, but requires + // the user to present appropriate authorization credentials (identified by the WWW-Authenticate header) + // in order for the action to be completed. If the user has specified credentials on the request, the + // server considers them insufficient. + // Status code 401 + StatusReasonUnauthorized StatusReason = "Unauthorized" + + // StatusReasonForbidden means the server can be reached and understood the request, but refuses + // to take any further action. It is the result of the server being configured to deny access for some reason + // to the requested resource by the client. + // Details (optional): + // "kind" string - the kind attribute of the forbidden resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the forbidden resource + // Status code 403 + StatusReasonForbidden StatusReason = "Forbidden" + + // StatusReasonNotFound means one or more resources required for this operation + // could not be found. + // Details (optional): + // "kind" string - the kind attribute of the missing resource + // on some operations may differ from the requested + // resource. + // "id" string - the identifier of the missing resource + // Status code 404 + StatusReasonNotFound StatusReason = "NotFound" + + // StatusReasonAlreadyExists means the resource you are creating already exists. + // Details (optional): + // "kind" string - the kind attribute of the conflicting resource + // "id" string - the identifier of the conflicting resource + // Status code 409 + StatusReasonAlreadyExists StatusReason = "AlreadyExists" + + // StatusReasonConflict means the requested operation cannot be completed + // due to a conflict in the operation. The client may need to alter the + // request. Each resource may define custom details that indicate the + // nature of the conflict. + // Status code 409 + StatusReasonConflict StatusReason = "Conflict" + + // StatusReasonGone means the item is no longer available at the server and no + // forwarding address is known. + // Status code 410 + StatusReasonGone StatusReason = "Gone" + + // StatusReasonInvalid means the requested create or update operation cannot be + // completed due to invalid data provided as part of the request. The client may + // need to alter the request. When set, the client may use the StatusDetails + // message field as a summary of the issues encountered. + // Details (optional): + // "kind" string - the kind attribute of the invalid resource + // "id" string - the identifier of the invalid resource + // "causes" - one or more StatusCause entries indicating the data in the + // provided resource that was invalid. The code, message, and + // field attributes will be set. + // Status code 422 + StatusReasonInvalid StatusReason = "Invalid" + + // StatusReasonServerTimeout means the server can be reached and understood the request, + // but cannot complete the action in a reasonable time. The client should retry the request. + // This is may be due to temporary server load or a transient communication issue with + // another server. Status code 500 is used because the HTTP spec provides no suitable + // server-requested client retry and the 5xx class represents actionable errors. + // Details (optional): + // "kind" string - the kind attribute of the resource being acted on. + // "id" string - the operation that is being attempted. + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 500 + StatusReasonServerTimeout StatusReason = "ServerTimeout" + + // StatusReasonTimeout means that the request could not be completed within the given time. + // Clients can get this response only when they specified a timeout param in the request, + // or if the server cannot complete the operation within a reasonable amount of time. + // The request might succeed with an increased value of timeout param. The client *should* + // wait at least the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 504 + StatusReasonTimeout StatusReason = "Timeout" + + // StatusReasonTooManyRequests means the server experienced too many requests within a + // given window and that the client must wait to perform the action again. A client may + // always retry the request that led to this error, although the client should wait at least + // the number of seconds specified by the retryAfterSeconds field. + // Details (optional): + // "retryAfterSeconds" int32 - the number of seconds before the operation should be retried + // Status code 429 + StatusReasonTooManyRequests StatusReason = "TooManyRequests" + + // StatusReasonBadRequest means that the request itself was invalid, because the request + // doesn't make any sense, for example deleting a read-only object. This is different than + // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the + // data was invalid. API calls that return BadRequest can never succeed. + // Status code 400 + StatusReasonBadRequest StatusReason = "BadRequest" + + // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the + // resource was not supported by the code - for instance, attempting to delete a resource that + // can only be created. API calls that return MethodNotAllowed can never succeed. + // Status code 405 + StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" + + // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable + // to the server - for instance, attempting to receive protobuf for a resource that supports only json and yaml. + // API calls that return NotAcceptable can never succeed. + // Status code 406 + StatusReasonNotAcceptable StatusReason = "NotAcceptable" + + // StatusReasonRequestEntityTooLarge means that the request entity is too large. + // Status code 413 + StatusReasonRequestEntityTooLarge StatusReason = "RequestEntityTooLarge" + + // StatusReasonUnsupportedMediaType means that the content type sent by the client is not acceptable + // to the server - for instance, attempting to send protobuf for a resource that supports only json and yaml. + // API calls that return UnsupportedMediaType can never succeed. + // Status code 415 + StatusReasonUnsupportedMediaType StatusReason = "UnsupportedMediaType" + + // StatusReasonInternalError indicates that an internal error occurred, it is unexpected + // and the outcome of the call is unknown. + // Details (optional): + // "causes" - The original error + // Status code 500 + StatusReasonInternalError StatusReason = "InternalError" + + // StatusReasonExpired indicates that the request is invalid because the content you are requesting + // has expired and is no longer available. It is typically associated with watches that can't be + // serviced. + // Status code 410 (gone) + StatusReasonExpired StatusReason = "Expired" + + // StatusReasonServiceUnavailable means that the request itself was valid, + // but the requested service is unavailable at this time. + // Retrying the request after some time might succeed. + // Status code 503 + StatusReasonServiceUnavailable StatusReason = "ServiceUnavailable" +) + +// StatusCause provides more information about an api.Status failure, including +// cases when multiple errors are encountered. +type StatusCause struct { + // A machine-readable description of the cause of the error. If this value is + // empty there is no information available. + // +optional + Type CauseType `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason,casttype=CauseType"` + // A human-readable description of the cause of the error. This field may be + // presented as-is to a reader. + // +optional + Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` + // The field of the resource that has caused this error, as named by its JSON + // serialization. May include dot and postfix notation for nested attributes. + // Arrays are zero-indexed. Fields may appear more than once in an array of + // causes due to fields having multiple errors. + // Optional. + // + // Examples: + // "name" - the field "name" on the current resource + // "items[0].name" - the field "name" on the first array entry in "items" + // +optional + Field string `json:"field,omitempty" protobuf:"bytes,3,opt,name=field"` +} + +// CauseType is a machine readable value providing more detail about what +// occurred in a status response. An operation may have multiple causes for a +// status (whether Failure or Success). +type CauseType string + +const ( + // CauseTypeFieldValueNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). + CauseTypeFieldValueNotFound CauseType = "FieldValueNotFound" + // CauseTypeFieldValueRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). + CauseTypeFieldValueRequired CauseType = "FieldValueRequired" + // CauseTypeFieldValueDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). + CauseTypeFieldValueDuplicate CauseType = "FieldValueDuplicate" + // CauseTypeFieldValueInvalid is used to report malformed values (e.g. failed regex + // match). + CauseTypeFieldValueInvalid CauseType = "FieldValueInvalid" + // CauseTypeFieldValueNotSupported is used to report valid (as per formatting rules) + // values that can not be handled (e.g. an enumerated string). + CauseTypeFieldValueNotSupported CauseType = "FieldValueNotSupported" + // CauseTypeUnexpectedServerResponse is used to report when the server responded to the client + // without the expected return type. The presence of this cause indicates the error may be + // due to an intervening proxy or the server software malfunctioning. + CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" + // FieldManagerConflict is used to report when another client claims to manage this field, + // It should only be returned for a request using server-side apply. + CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" + // CauseTypeResourceVersionTooLarge is used to report that the requested resource version + // is newer than the data observed by the API server, so the request cannot be served. + CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// List holds a list of objects, which may not be known by the server. +type List struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of objects + Items []runtime.RawExtension `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// APIVersions lists the versions that are available, to allow clients to +// discover the API at /api, which is the root path of the legacy v1 API. +// +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type APIVersions struct { + TypeMeta `json:",inline"` + // versions are the api versions that are available. + Versions []string `json:"versions" protobuf:"bytes,1,rep,name=versions"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs" protobuf:"bytes,2,rep,name=serverAddressByClientCIDRs"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIGroupList is a list of APIGroup, to allow clients to discover the API at +// /apis. +type APIGroupList struct { + TypeMeta `json:",inline"` + // groups is a list of APIGroup. + Groups []APIGroup `json:"groups" protobuf:"bytes,1,rep,name=groups"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIGroup contains the name, the supported versions, and the preferred version +// of a group. +type APIGroup struct { + TypeMeta `json:",inline"` + // name is the name of the group. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // versions are the versions supported in this group. + Versions []GroupVersionForDiscovery `json:"versions" protobuf:"bytes,2,rep,name=versions"` + // preferredVersion is the version preferred by the API server, which + // probably is the storage version. + // +optional + PreferredVersion GroupVersionForDiscovery `json:"preferredVersion,omitempty" protobuf:"bytes,3,opt,name=preferredVersion"` + // a map of client CIDR to server address that is serving this group. + // This is to help clients reach servers in the most network-efficient way possible. + // Clients can use the appropriate server address as per the CIDR that they match. + // In case of multiple matches, clients should use the longest matching CIDR. + // The server returns only those CIDRs that it thinks that the client can match. + // For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. + // Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP. + // +optional + ServerAddressByClientCIDRs []ServerAddressByClientCIDR `json:"serverAddressByClientCIDRs,omitempty" protobuf:"bytes,4,rep,name=serverAddressByClientCIDRs"` +} + +// ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match. +type ServerAddressByClientCIDR struct { + // The CIDR with which clients can match their IP to figure out the server address that they should use. + ClientCIDR string `json:"clientCIDR" protobuf:"bytes,1,opt,name=clientCIDR"` + // Address of this server, suitable for a client that matches the above CIDR. + // This can be a hostname, hostname:port, IP or IP:port. + ServerAddress string `json:"serverAddress" protobuf:"bytes,2,opt,name=serverAddress"` +} + +// GroupVersion contains the "group/version" and "version" string of a version. +// It is made a struct to keep extensibility. +type GroupVersionForDiscovery struct { + // groupVersion specifies the API group and version in the form "group/version" + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // version specifies the version in the form of "version". This is to save + // the clients the trouble of splitting the GroupVersion. + Version string `json:"version" protobuf:"bytes,2,opt,name=version"` +} + +// APIResource specifies the name of a resource and whether it is namespaced. +type APIResource struct { + // name is the plural name of the resource. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. + // The singularName is more correct for reporting status on a single item and both singular and plural are allowed + // from the kubectl CLI interface. + SingularName string `json:"singularName" protobuf:"bytes,6,opt,name=singularName"` + // namespaced indicates if a resource is namespaced or not. + Namespaced bool `json:"namespaced" protobuf:"varint,2,opt,name=namespaced"` + // group is the preferred group of the resource. Empty implies the group of the containing resource list. + // For subresources, this may have a different value, for example: Scale". + Group string `json:"group,omitempty" protobuf:"bytes,8,opt,name=group"` + // version is the preferred version of the resource. Empty implies the version of the containing resource list + // For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)". + Version string `json:"version,omitempty" protobuf:"bytes,9,opt,name=version"` + // kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo') + Kind string `json:"kind" protobuf:"bytes,3,opt,name=kind"` + // verbs is a list of supported kube verbs (this includes get, list, watch, create, + // update, patch, delete, deletecollection, and proxy) + Verbs Verbs `json:"verbs" protobuf:"bytes,4,opt,name=verbs"` + // shortNames is a list of suggested short names of the resource. + ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` + // categories is a list of the grouped resources this resource belongs to (e.g. 'all') + Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` + // The hash value of the storage version, the version this resource is + // converted to when written to the data store. Value must be treated + // as opaque by clients. Only equality comparison on the value is valid. + // This is an alpha feature and may change or be removed in the future. + // The field is populated by the apiserver only if the + // StorageVersionHash feature gate is enabled. + // This field will remain optional even if it graduates. + // +optional + StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"` +} + +// Verbs masks the value so protobuf can generate +// +// +protobuf.nullable=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +type Verbs []string + +func (vs Verbs) String() string { + return fmt.Sprintf("%v", []string(vs)) +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// APIResourceList is a list of APIResource, it is used to expose the name of the +// resources supported in a specific group and version, and if the resource +// is namespaced. +type APIResourceList struct { + TypeMeta `json:",inline"` + // groupVersion is the group and version this APIResourceList is for. + GroupVersion string `json:"groupVersion" protobuf:"bytes,1,opt,name=groupVersion"` + // resources contains the name of the resources and if they are namespaced. + APIResources []APIResource `json:"resources" protobuf:"bytes,2,rep,name=resources"` +} + +// RootPaths lists the paths available at root. +// For example: "/healthz", "/apis". +type RootPaths struct { + // paths are the paths available at root. + Paths []string `json:"paths" protobuf:"bytes,1,rep,name=paths"` +} + +// TODO: remove me when watch is refactored +func LabelSelectorQueryParam(version string) string { + return "labelSelector" +} + +// TODO: remove me when watch is refactored +func FieldSelectorQueryParam(version string) string { + return "fieldSelector" +} + +// String returns available api versions as a human-friendly version string. +func (apiVersions APIVersions) String() string { + return strings.Join(apiVersions.Versions, ",") +} + +func (apiVersions APIVersions) GoString() string { + return apiVersions.String() +} + +// Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. +type Patch struct{} + +// Note: +// There are two different styles of label selectors used in versioned types: +// an older style which is represented as just a string in versioned types, and a +// newer style that is structured. LabelSelector is an internal representation for the +// latter style. + +// A label selector is a label query over a set of resources. The result of matchLabels and +// matchExpressions are ANDed. An empty label selector matches all objects. A null +// label selector matches no objects. +type LabelSelector struct { + // matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels + // map is equivalent to an element of matchExpressions, whose key field is "key", the + // operator is "In", and the values array contains only "value". The requirements are ANDed. + // +optional + MatchLabels map[string]string `json:"matchLabels,omitempty" protobuf:"bytes,1,rep,name=matchLabels"` + // matchExpressions is a list of label selector requirements. The requirements are ANDed. + // +optional + MatchExpressions []LabelSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,2,rep,name=matchExpressions"` +} + +// A label selector requirement is a selector that contains values, a key, and an operator that +// relates the key and values. +type LabelSelectorRequirement struct { + // key is the label key that the selector applies to. + // +patchMergeKey=key + // +patchStrategy=merge + Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"` + // operator represents a key's relationship to a set of values. + // Valid operators are In, NotIn, Exists and DoesNotExist. + Operator LabelSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=LabelSelectorOperator"` + // values is an array of string values. If the operator is In or NotIn, + // the values array must be non-empty. If the operator is Exists or DoesNotExist, + // the values array must be empty. This array is replaced during a strategic + // merge patch. + // +optional + Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` +} + +// A label selector operator is the set of operators that can be used in a selector requirement. +type LabelSelectorOperator string + +const ( + LabelSelectorOpIn LabelSelectorOperator = "In" + LabelSelectorOpNotIn LabelSelectorOperator = "NotIn" + LabelSelectorOpExists LabelSelectorOperator = "Exists" + LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" +) + +// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource +// that the fieldset applies to. +type ManagedFieldsEntry struct { + // Manager is an identifier of the workflow managing these fields. + Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"` + // Operation is the type of operation which lead to this ManagedFieldsEntry being created. + // The only valid values for this field are 'Apply' and 'Update'. + Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"` + // APIVersion defines the version of this resource that this field set + // applies to. The format is "group/version" just like the top-level + // APIVersion field. It is necessary to track the version of a field + // set because it cannot be automatically converted. + APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` + // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' + // +optional + Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` + + // Fields is tombstoned to show why 5 is a reserved protobuf tag. + //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` + + // FieldsType is the discriminator for the different fields format and version. + // There is currently only one possible value: "FieldsV1" + FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` + // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. + // +optional + FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` +} + +// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. +type ManagedFieldsOperationType string + +const ( + ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply" + ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" +) + +// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. +// +// Each key is either a '.' representing the field itself, and will always map to an empty set, +// or a string representing a sub-field or item. The string will follow one of these four formats: +// 'f:', where is the name of a field in a struct, or key in a map +// 'v:', where is the exact json formatted value of a list item +// 'i:', where is position of a item in a list +// 'k:', where is a map of a list item's key fields to their unique values +// If a key maps to an empty Fields value, the field that key represents is part of the set. +// +// The exact format is defined in sigs.k8s.io/structured-merge-diff +type FieldsV1 struct { + // Raw is the underlying serialization of this object. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` +} + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. This can be introduced +// in a v1 because clients a) receive an error if they try to access proto today, and b) +// once introduced they would be able to gracefully switch over to using it. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=false +type Table struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column, such as number, integer, string, or + // array. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type modifier for this column. A format modifies the type and + // imposes additional rules, like date or time formatting for a string. The 'name' format is applied + // to the primary identifier column which has type 'string' to assist in clients identifying column + // is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or + // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a + // more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. These conditions + // apply to the row, not to the object, and will be specific to table output. The only defined + // condition type is 'Completed', for a row that indicates a resource that has run to completion and + // can be given less visual priority. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // The media type of the object will always match the enclosing list - if this as a JSON table, these + // will be JSON encoded objects. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. The only defined value is 'Completed' indicating that the + // object this row represents has reached a completed state and may be given less visual + // priority than other rows. Clients are not required to honor any conditions but should + // be consistent where possible about handling the conditions. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:conversion-gen:explicit-from=net/url.Values +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + TypeMeta `json:",inline"` + + // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions + // and may be removed as a field in a future release. + NoHeaders bool `json:"-"` + + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // +optional + ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // items contains each of the included items. + Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go new file mode 100644 index 0000000..b62e591 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -0,0 +1,442 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_APIGroup = map[string]string{ + "": "APIGroup contains the name, the supported versions, and the preferred version of a group.", + "name": "name is the name of the group.", + "versions": "versions are the versions supported in this group.", + "preferredVersion": "preferredVersion is the version preferred by the API server, which probably is the storage version.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIGroup) SwaggerDoc() map[string]string { + return map_APIGroup +} + +var map_APIGroupList = map[string]string{ + "": "APIGroupList is a list of APIGroup, to allow clients to discover the API at /apis.", + "groups": "groups is a list of APIGroup.", +} + +func (APIGroupList) SwaggerDoc() map[string]string { + return map_APIGroupList +} + +var map_APIResource = map[string]string{ + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", + "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", +} + +func (APIResource) SwaggerDoc() map[string]string { + return map_APIResource +} + +var map_APIResourceList = map[string]string{ + "": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.", + "groupVersion": "groupVersion is the group and version this APIResourceList is for.", + "resources": "resources contains the name of the resources and if they are namespaced.", +} + +func (APIResourceList) SwaggerDoc() map[string]string { + return map_APIResourceList +} + +var map_APIVersions = map[string]string{ + "": "APIVersions lists the versions that are available, to allow clients to discover the API at /api, which is the root path of the legacy v1 API.", + "versions": "versions are the api versions that are available.", + "serverAddressByClientCIDRs": "a map of client CIDR to server address that is serving this group. This is to help clients reach servers in the most network-efficient way possible. Clients can use the appropriate server address as per the CIDR that they match. In case of multiple matches, clients should use the longest matching CIDR. The server returns only those CIDRs that it thinks that the client can match. For example: the master will return an internal IP CIDR only, if the client reaches the server using an internal IP. Server looks at X-Forwarded-For header or X-Real-Ip header or request.RemoteAddr (in that order) to get the client IP.", +} + +func (APIVersions) SwaggerDoc() map[string]string { + return map_APIVersions +} + +var map_CreateOptions = map[string]string{ + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", +} + +func (CreateOptions) SwaggerDoc() map[string]string { + return map_CreateOptions +} + +var map_DeleteOptions = map[string]string{ + "": "DeleteOptions may be provided when deleting an API object.", + "gracePeriodSeconds": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.", + "preconditions": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned.", + "orphanDependents": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \"orphan\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.", + "propagationPolicy": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", +} + +func (DeleteOptions) SwaggerDoc() map[string]string { + return map_DeleteOptions +} + +var map_ExportOptions = map[string]string{ + "": "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.", + "export": "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.", +} + +func (ExportOptions) SwaggerDoc() map[string]string { + return map_ExportOptions +} + +var map_FieldsV1 = map[string]string{ + "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", +} + +func (FieldsV1) SwaggerDoc() map[string]string { + return map_FieldsV1 +} + +var map_GetOptions = map[string]string{ + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", +} + +func (GetOptions) SwaggerDoc() map[string]string { + return map_GetOptions +} + +var map_GroupVersionForDiscovery = map[string]string{ + "": "GroupVersion contains the \"group/version\" and \"version\" string of a version. It is made a struct to keep extensibility.", + "groupVersion": "groupVersion specifies the API group and version in the form \"group/version\"", + "version": "version specifies the version in the form of \"version\". This is to save the clients the trouble of splitting the GroupVersion.", +} + +func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { + return map_GroupVersionForDiscovery +} + +var map_LabelSelector = map[string]string{ + "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", + "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", + "matchExpressions": "matchExpressions is a list of label selector requirements. The requirements are ANDed.", +} + +func (LabelSelector) SwaggerDoc() map[string]string { + return map_LabelSelector +} + +var map_LabelSelectorRequirement = map[string]string{ + "": "A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", + "key": "key is the label key that the selector applies to.", + "operator": "operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.", + "values": "values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", +} + +func (LabelSelectorRequirement) SwaggerDoc() map[string]string { + return map_LabelSelectorRequirement +} + +var map_List = map[string]string{ + "": "List holds a list of objects, which may not be known by the server.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "List of objects", +} + +func (List) SwaggerDoc() map[string]string { + return map_List +} + +var map_ListMeta = map[string]string{ + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", + "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", +} + +func (ListMeta) SwaggerDoc() map[string]string { + return map_ListMeta +} + +var map_ListOptions = map[string]string{ + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", +} + +func (ListOptions) SwaggerDoc() map[string]string { + return map_ListOptions +} + +var map_ManagedFieldsEntry = map[string]string{ + "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", + "manager": "Manager is an identifier of the workflow managing these fields.", + "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", + "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", + "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", + "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", + "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", +} + +func (ManagedFieldsEntry) SwaggerDoc() map[string]string { + return map_ManagedFieldsEntry +} + +var map_ObjectMeta = map[string]string{ + "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", + "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", + "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", + "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", + "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", + "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", + "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", +} + +func (ObjectMeta) SwaggerDoc() map[string]string { + return map_ObjectMeta +} + +var map_OwnerReference = map[string]string{ + "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "apiVersion": "API version of the referent.", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", + "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "controller": "If true, this reference points to the managing controller.", + "blockOwnerDeletion": "If true, AND if the owner has the \"foregroundDeletion\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. Defaults to false. To set this field, a user needs \"delete\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.", +} + +func (OwnerReference) SwaggerDoc() map[string]string { + return map_OwnerReference +} + +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + +var map_Patch = map[string]string{ + "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", +} + +func (Patch) SwaggerDoc() map[string]string { + return map_Patch +} + +var map_PatchOptions = map[string]string{ + "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", +} + +func (PatchOptions) SwaggerDoc() map[string]string { + return map_PatchOptions +} + +var map_Preconditions = map[string]string{ + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", + "resourceVersion": "Specifies the target ResourceVersion", +} + +func (Preconditions) SwaggerDoc() map[string]string { + return map_Preconditions +} + +var map_RootPaths = map[string]string{ + "": "RootPaths lists the paths available at root. For example: \"/healthz\", \"/apis\".", + "paths": "paths are the paths available at root.", +} + +func (RootPaths) SwaggerDoc() map[string]string { + return map_RootPaths +} + +var map_ServerAddressByClientCIDR = map[string]string{ + "": "ServerAddressByClientCIDR helps the client to determine the server address that they should use, depending on the clientCIDR that they match.", + "clientCIDR": "The CIDR with which clients can match their IP to figure out the server address that they should use.", + "serverAddress": "Address of this server, suitable for a client that matches the above CIDR. This can be a hostname, hostname:port, IP or IP:port.", +} + +func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { + return map_ServerAddressByClientCIDR +} + +var map_Status = map[string]string{ + "": "Status is a return value for calls that don't return other objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "message": "A human-readable description of the status of this operation.", + "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", + "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", + "code": "Suggested HTTP return code for this status, 0 if not set.", +} + +func (Status) SwaggerDoc() map[string]string { + return map_Status +} + +var map_StatusCause = map[string]string{ + "": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.", + "reason": "A machine-readable description of the cause of the error. If this value is empty there is no information available.", + "message": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.", + "field": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\n\nExamples:\n \"name\" - the field \"name\" on the current resource\n \"items[0].name\" - the field \"name\" on the first array entry in \"items\"", +} + +func (StatusCause) SwaggerDoc() map[string]string { + return map_StatusCause +} + +var map_StatusDetails = map[string]string{ + "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", + "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", + "group": "The group attribute of the resource associated with the status StatusReason.", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", + "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", + "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", +} + +func (StatusDetails) SwaggerDoc() map[string]string { + return map_StatusDetails +} + +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + +var map_TypeMeta = map[string]string{ + "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", +} + +func (TypeMeta) SwaggerDoc() map[string]string { + return map_TypeMeta +} + +var map_UpdateOptions = map[string]string{ + "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", +} + +func (UpdateOptions) SwaggerDoc() map[string]string { + return map_UpdateOptions +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go new file mode 100644 index 0000000..4244b8a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -0,0 +1,508 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + gojson "encoding/json" + "fmt" + "io" + "strings" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/json" + "k8s.io/klog" +) + +// NestedFieldCopy returns a deep copy of the value of a nested field. +// Returns false if the value is missing. +// No error is returned for a nil field. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. +func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + return runtime.DeepCopyJSONValue(val), true, nil +} + +// NestedFieldNoCopy returns a reference to a nested field. +// Returns false if value is not found and an error if unable +// to traverse obj. +// +// Note: fields passed to this function are treated as keys within the passed +// object; no array/slice syntax is supported. +func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { + var val interface{} = obj + + for i, field := range fields { + if val == nil { + return nil, false, nil + } + if m, ok := val.(map[string]interface{}); ok { + val, ok = m[field] + if !ok { + return nil, false, nil + } + } else { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields[:i+1]), val, val) + } + } + return val, true, nil +} + +// NestedString returns the string value of a nested field. +// Returns false if value is not found and an error if not a string. +func NestedString(obj map[string]interface{}, fields ...string) (string, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return "", found, err + } + s, ok := val.(string) + if !ok { + return "", false, fmt.Errorf("%v accessor error: %v is of the type %T, expected string", jsonPath(fields), val, val) + } + return s, true, nil +} + +// NestedBool returns the bool value of a nested field. +// Returns false if value is not found and an error if not a bool. +func NestedBool(obj map[string]interface{}, fields ...string) (bool, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return false, found, err + } + b, ok := val.(bool) + if !ok { + return false, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected bool", jsonPath(fields), val, val) + } + return b, true, nil +} + +// NestedFloat64 returns the float64 value of a nested field. +// Returns false if value is not found and an error if not a float64. +func NestedFloat64(obj map[string]interface{}, fields ...string) (float64, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err + } + f, ok := val.(float64) + if !ok { + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected float64", jsonPath(fields), val, val) + } + return f, true, nil +} + +// NestedInt64 returns the int64 value of a nested field. +// Returns false if value is not found and an error if not an int64. +func NestedInt64(obj map[string]interface{}, fields ...string) (int64, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return 0, found, err + } + i, ok := val.(int64) + if !ok { + return 0, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected int64", jsonPath(fields), val, val) + } + return i, true, nil +} + +// NestedStringSlice returns a copy of []string value of a nested field. +// Returns false if value is not found and an error if not a []interface{} or contains non-string items in the slice. +func NestedStringSlice(obj map[string]interface{}, fields ...string) ([]string, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + m, ok := val.([]interface{}) + if !ok { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) + } + strSlice := make([]string, 0, len(m)) + for _, v := range m { + if str, ok := v.(string); ok { + strSlice = append(strSlice, str) + } else { + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the slice: %v is of the type %T, expected string", jsonPath(fields), v, v) + } + } + return strSlice, true, nil +} + +// NestedSlice returns a deep copy of []interface{} value of a nested field. +// Returns false if value is not found and an error if not a []interface{}. +func NestedSlice(obj map[string]interface{}, fields ...string) ([]interface{}, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + _, ok := val.([]interface{}) + if !ok { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected []interface{}", jsonPath(fields), val, val) + } + return runtime.DeepCopyJSONValue(val).([]interface{}), true, nil +} + +// NestedStringMap returns a copy of map[string]string value of a nested field. +// Returns false if value is not found and an error if not a map[string]interface{} or contains non-string values in the map. +func NestedStringMap(obj map[string]interface{}, fields ...string) (map[string]string, bool, error) { + m, found, err := nestedMapNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + strMap := make(map[string]string, len(m)) + for k, v := range m { + if str, ok := v.(string); ok { + strMap[k] = str + } else { + return nil, false, fmt.Errorf("%v accessor error: contains non-string key in the map: %v is of the type %T, expected string", jsonPath(fields), v, v) + } + } + return strMap, true, nil +} + +// NestedMap returns a deep copy of map[string]interface{} value of a nested field. +// Returns false if value is not found and an error if not a map[string]interface{}. +func NestedMap(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + m, found, err := nestedMapNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + return runtime.DeepCopyJSON(m), true, nil +} + +// nestedMapNoCopy returns a map[string]interface{} value of a nested field. +// Returns false if value is not found and an error if not a map[string]interface{}. +func nestedMapNoCopy(obj map[string]interface{}, fields ...string) (map[string]interface{}, bool, error) { + val, found, err := NestedFieldNoCopy(obj, fields...) + if !found || err != nil { + return nil, found, err + } + m, ok := val.(map[string]interface{}) + if !ok { + return nil, false, fmt.Errorf("%v accessor error: %v is of the type %T, expected map[string]interface{}", jsonPath(fields), val, val) + } + return m, true, nil +} + +// SetNestedField sets the value of a nested field to a deep copy of the value provided. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedField(obj map[string]interface{}, value interface{}, fields ...string) error { + return setNestedFieldNoCopy(obj, runtime.DeepCopyJSONValue(value), fields...) +} + +func setNestedFieldNoCopy(obj map[string]interface{}, value interface{}, fields ...string) error { + m := obj + + for i, field := range fields[:len(fields)-1] { + if val, ok := m[field]; ok { + if valMap, ok := val.(map[string]interface{}); ok { + m = valMap + } else { + return fmt.Errorf("value cannot be set because %v is not a map[string]interface{}", jsonPath(fields[:i+1])) + } + } else { + newVal := make(map[string]interface{}) + m[field] = newVal + m = newVal + } + } + m[fields[len(fields)-1]] = value + return nil +} + +// SetNestedStringSlice sets the string slice value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringSlice(obj map[string]interface{}, value []string, fields ...string) error { + m := make([]interface{}, 0, len(value)) // convert []string into []interface{} + for _, v := range value { + m = append(m, v) + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedSlice sets the slice value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedSlice(obj map[string]interface{}, value []interface{}, fields ...string) error { + return SetNestedField(obj, value, fields...) +} + +// SetNestedStringMap sets the map[string]string value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedStringMap(obj map[string]interface{}, value map[string]string, fields ...string) error { + m := make(map[string]interface{}, len(value)) // convert map[string]string into map[string]interface{} + for k, v := range value { + m[k] = v + } + return setNestedFieldNoCopy(obj, m, fields...) +} + +// SetNestedMap sets the map[string]interface{} value of a nested field. +// Returns an error if value cannot be set because one of the nesting levels is not a map[string]interface{}. +func SetNestedMap(obj map[string]interface{}, value map[string]interface{}, fields ...string) error { + return SetNestedField(obj, value, fields...) +} + +// RemoveNestedField removes the nested field from the obj. +func RemoveNestedField(obj map[string]interface{}, fields ...string) { + m := obj + for _, field := range fields[:len(fields)-1] { + if x, ok := m[field].(map[string]interface{}); ok { + m = x + } else { + return + } + } + delete(m, fields[len(fields)-1]) +} + +func getNestedString(obj map[string]interface{}, fields ...string) string { + val, found, err := NestedString(obj, fields...) + if !found || err != nil { + return "" + } + return val +} + +func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return 0 + } + return val +} + +func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { + val, found, err := NestedInt64(obj, fields...) + if !found || err != nil { + return nil + } + return &val +} + +func jsonPath(fields []string) string { + return "." + strings.Join(fields, ".") +} + +func extractOwnerReference(v map[string]interface{}) metav1.OwnerReference { + // though this field is a *bool, but when decoded from JSON, it's + // unmarshalled as bool. + var controllerPtr *bool + if controller, found, err := NestedBool(v, "controller"); err == nil && found { + controllerPtr = &controller + } + var blockOwnerDeletionPtr *bool + if blockOwnerDeletion, found, err := NestedBool(v, "blockOwnerDeletion"); err == nil && found { + blockOwnerDeletionPtr = &blockOwnerDeletion + } + return metav1.OwnerReference{ + Kind: getNestedString(v, "kind"), + Name: getNestedString(v, "name"), + APIVersion: getNestedString(v, "apiVersion"), + UID: types.UID(getNestedString(v, "uid")), + Controller: controllerPtr, + BlockOwnerDeletion: blockOwnerDeletionPtr, + } +} + +// UnstructuredJSONScheme is capable of converting JSON data into the Unstructured +// type, which can be used for generic access to objects without a predefined scheme. +// TODO: move into serializer/json. +var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} + +type unstructuredJSONScheme struct{} + +const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON" + +func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var err error + if obj != nil { + err = s.decodeInto(data, obj) + } else { + obj, err = s.decode(data) + } + + if err != nil { + return nil, nil, err + } + + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, &gvk, runtime.NewMissingKindErr(string(data)) + } + + return obj, &gvk, nil +} + +func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case *Unstructured: + return json.NewEncoder(w).Encode(t.Object) + case *UnstructuredList: + items := make([]interface{}, 0, len(t.Items)) + for _, i := range t.Items { + items = append(items, i.Object) + } + listObj := make(map[string]interface{}, len(t.Object)+1) + for k, v := range t.Object { // Make a shallow copy + listObj[k] = v + } + listObj["items"] = items + return json.NewEncoder(w).Encode(listObj) + case *runtime.Unknown: + // TODO: Unstructured needs to deal with ContentType. + _, err := w.Write(t.Raw) + return err + default: + return json.NewEncoder(w).Encode(t) + } +} + +// Identifier implements runtime.Encoder interface. +func (unstructuredJSONScheme) Identifier() runtime.Identifier { + return unstructuredJSONSchemeIdentifier +} + +func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { + type detector struct { + Items gojson.RawMessage + } + var det detector + if err := json.Unmarshal(data, &det); err != nil { + return nil, err + } + + if det.Items != nil { + list := &UnstructuredList{} + err := s.decodeToList(data, list) + return list, err + } + + // No Items field, so it wasn't a list. + unstruct := &Unstructured{} + err := s.decodeToUnstructured(data, unstruct) + return unstruct, err +} + +func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) error { + switch x := obj.(type) { + case *Unstructured: + return s.decodeToUnstructured(data, x) + case *UnstructuredList: + return s.decodeToList(data, x) + default: + return json.Unmarshal(data, x) + } +} + +func (unstructuredJSONScheme) decodeToUnstructured(data []byte, unstruct *Unstructured) error { + m := make(map[string]interface{}) + if err := json.Unmarshal(data, &m); err != nil { + return err + } + + unstruct.Object = m + + return nil +} + +func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList) error { + type decodeList struct { + Items []gojson.RawMessage + } + + var dList decodeList + if err := json.Unmarshal(data, &dList); err != nil { + return err + } + + if err := json.Unmarshal(data, &list.Object); err != nil { + return err + } + + // For typed lists, e.g., a PodList, API server doesn't set each item's + // APIVersion and Kind. We need to set it. + listAPIVersion := list.GetAPIVersion() + listKind := list.GetKind() + itemKind := strings.TrimSuffix(listKind, "List") + + delete(list.Object, "items") + list.Items = make([]Unstructured, 0, len(dList.Items)) + for _, i := range dList.Items { + unstruct := &Unstructured{} + if err := s.decodeToUnstructured([]byte(i), unstruct); err != nil { + return err + } + // This is hacky. Set the item's Kind and APIVersion to those inferred + // from the List. + if len(unstruct.GetKind()) == 0 && len(unstruct.GetAPIVersion()) == 0 { + unstruct.SetKind(itemKind) + unstruct.SetAPIVersion(listAPIVersion) + } + list.Items = append(list.Items, *unstruct) + } + return nil +} + +type jsonFallbackEncoder struct { + encoder runtime.Encoder + identifier runtime.Identifier +} + +func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder { + result := map[string]string{ + "name": "fallback", + "base": string(encoder.Identifier()), + } + identifier, err := gojson.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err) + } + return &jsonFallbackEncoder{ + encoder: encoder, + identifier: runtime.Identifier(identifier), + } +} + +func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we only + // fallback to other encoders here. + err := c.encoder.Encode(obj, w) + if runtime.IsNotRegisteredError(err) { + switch obj.(type) { + case *Unstructured, *UnstructuredList: + return UnstructuredJSONScheme.Encode(obj, w) + } + } + return err +} + +// Identifier implements runtime.Encoder interface. +func (c *jsonFallbackEncoder) Identifier() runtime.Identifier { + return c.identifier +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go new file mode 100644 index 0000000..d190339 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -0,0 +1,496 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + "errors" + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Unstructured allows objects that do not have Golang structs registered to be manipulated +// generically. This can be used to deal with the API objects from a plug-in. Unstructured +// objects still have functioning TypeMeta features-- kind, version, etc. +// +// WARNING: This object has accessors for the v1 standard metadata. You *MUST NOT* use this +// type if you are dealing with objects that are not in the server meta v1 schema. +// +// TODO: make the serialization part of this type distinct from the field accessors. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type Unstructured struct { + // Object is a JSON compatible map with string, float, int, bool, []interface{}, or + // map[string]interface{} + // children. + Object map[string]interface{} +} + +var _ metav1.Object = &Unstructured{} +var _ runtime.Unstructured = &Unstructured{} +var _ metav1.ListInterface = &Unstructured{} + +func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } + +func (obj *Unstructured) IsList() bool { + field, ok := obj.Object["items"] + if !ok { + return false + } + _, ok = field.([]interface{}) + return ok +} +func (obj *Unstructured) ToList() (*UnstructuredList, error) { + if !obj.IsList() { + // return an empty list back + return &UnstructuredList{Object: obj.Object}, nil + } + + ret := &UnstructuredList{} + ret.Object = obj.Object + + err := obj.EachListItem(func(item runtime.Object) error { + castItem := item.(*Unstructured) + ret.Items = append(ret.Items, *castItem) + return nil + }) + if err != nil { + return nil, err + } + + return ret, nil +} + +func (obj *Unstructured) EachListItem(fn func(runtime.Object) error) error { + field, ok := obj.Object["items"] + if !ok { + return errors.New("content is not a list") + } + items, ok := field.([]interface{}) + if !ok { + return fmt.Errorf("content is not a list: %T", field) + } + for _, item := range items { + child, ok := item.(map[string]interface{}) + if !ok { + return fmt.Errorf("items member is not an object: %T", child) + } + if err := fn(&Unstructured{Object: child}); err != nil { + return err + } + } + return nil +} + +func (obj *Unstructured) UnstructuredContent() map[string]interface{} { + if obj.Object == nil { + return make(map[string]interface{}) + } + return obj.Object +} + +func (obj *Unstructured) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content +} + +// MarshalJSON ensures that the unstructured object produces proper +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured object properly decodes +// JSON when passed to Go's standard JSON library. +func (u *Unstructured) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (in *Unstructured) NewEmptyInstance() runtime.Unstructured { + out := new(Unstructured) + if in != nil { + out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind()) + } + return out +} + +func (in *Unstructured) DeepCopy() *Unstructured { + if in == nil { + return nil + } + out := new(Unstructured) + *out = *in + out.Object = runtime.DeepCopyJSON(in.Object) + return out +} + +func (u *Unstructured) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedField(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedStringSlice(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedSlice(u.Object, value, fields...) +} + +func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedStringMap(u.Object, value, fields...) +} + +func (u *Unstructured) GetOwnerReferences() []metav1.OwnerReference { + field, found, err := NestedFieldNoCopy(u.Object, "metadata", "ownerReferences") + if !found || err != nil { + return nil + } + original, ok := field.([]interface{}) + if !ok { + return nil + } + ret := make([]metav1.OwnerReference, 0, len(original)) + for _, obj := range original { + o, ok := obj.(map[string]interface{}) + if !ok { + // expected map[string]interface{}, got something else + return nil + } + ret = append(ret, extractOwnerReference(o)) + } + return ret +} + +func (u *Unstructured) SetOwnerReferences(references []metav1.OwnerReference) { + if references == nil { + RemoveNestedField(u.Object, "metadata", "ownerReferences") + return + } + + newReferences := make([]interface{}, 0, len(references)) + for _, reference := range references { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&reference) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to convert Owner Reference: %v", err)) + continue + } + newReferences = append(newReferences, out) + } + u.setNestedField(newReferences, "metadata", "ownerReferences") +} + +func (u *Unstructured) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *Unstructured) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *Unstructured) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *Unstructured) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *Unstructured) GetNamespace() string { + return getNestedString(u.Object, "metadata", "namespace") +} + +func (u *Unstructured) SetNamespace(namespace string) { + if len(namespace) == 0 { + RemoveNestedField(u.Object, "metadata", "namespace") + return + } + u.setNestedField(namespace, "metadata", "namespace") +} + +func (u *Unstructured) GetName() string { + return getNestedString(u.Object, "metadata", "name") +} + +func (u *Unstructured) SetName(name string) { + if len(name) == 0 { + RemoveNestedField(u.Object, "metadata", "name") + return + } + u.setNestedField(name, "metadata", "name") +} + +func (u *Unstructured) GetGenerateName() string { + return getNestedString(u.Object, "metadata", "generateName") +} + +func (u *Unstructured) SetGenerateName(generateName string) { + if len(generateName) == 0 { + RemoveNestedField(u.Object, "metadata", "generateName") + return + } + u.setNestedField(generateName, "metadata", "generateName") +} + +func (u *Unstructured) GetUID() types.UID { + return types.UID(getNestedString(u.Object, "metadata", "uid")) +} + +func (u *Unstructured) SetUID(uid types.UID) { + if len(string(uid)) == 0 { + RemoveNestedField(u.Object, "metadata", "uid") + return + } + u.setNestedField(string(uid), "metadata", "uid") +} + +func (u *Unstructured) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *Unstructured) SetResourceVersion(resourceVersion string) { + if len(resourceVersion) == 0 { + RemoveNestedField(u.Object, "metadata", "resourceVersion") + return + } + u.setNestedField(resourceVersion, "metadata", "resourceVersion") +} + +func (u *Unstructured) GetGeneration() int64 { + val, found, err := NestedInt64(u.Object, "metadata", "generation") + if !found || err != nil { + return 0 + } + return val +} + +func (u *Unstructured) SetGeneration(generation int64) { + if generation == 0 { + RemoveNestedField(u.Object, "metadata", "generation") + return + } + u.setNestedField(generation, "metadata", "generation") +} + +func (u *Unstructured) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *Unstructured) SetSelfLink(selfLink string) { + if len(selfLink) == 0 { + RemoveNestedField(u.Object, "metadata", "selfLink") + return + } + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *Unstructured) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *Unstructured) SetContinue(c string) { + if len(c) == 0 { + RemoveNestedField(u.Object, "metadata", "continue") + return + } + u.setNestedField(c, "metadata", "continue") +} + +func (u *Unstructured) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *Unstructured) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + +func (u *Unstructured) GetCreationTimestamp() metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) + return timestamp +} + +func (u *Unstructured) SetCreationTimestamp(timestamp metav1.Time) { + ts, _ := timestamp.MarshalQueryParameter() + if len(ts) == 0 || timestamp.Time.IsZero() { + RemoveNestedField(u.Object, "metadata", "creationTimestamp") + return + } + u.setNestedField(ts, "metadata", "creationTimestamp") +} + +func (u *Unstructured) GetDeletionTimestamp() *metav1.Time { + var timestamp metav1.Time + timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "deletionTimestamp")) + if timestamp.IsZero() { + return nil + } + return ×tamp +} + +func (u *Unstructured) SetDeletionTimestamp(timestamp *metav1.Time) { + if timestamp == nil { + RemoveNestedField(u.Object, "metadata", "deletionTimestamp") + return + } + ts, _ := timestamp.MarshalQueryParameter() + u.setNestedField(ts, "metadata", "deletionTimestamp") +} + +func (u *Unstructured) GetDeletionGracePeriodSeconds() *int64 { + val, found, err := NestedInt64(u.Object, "metadata", "deletionGracePeriodSeconds") + if !found || err != nil { + return nil + } + return &val +} + +func (u *Unstructured) SetDeletionGracePeriodSeconds(deletionGracePeriodSeconds *int64) { + if deletionGracePeriodSeconds == nil { + RemoveNestedField(u.Object, "metadata", "deletionGracePeriodSeconds") + return + } + u.setNestedField(*deletionGracePeriodSeconds, "metadata", "deletionGracePeriodSeconds") +} + +func (u *Unstructured) GetLabels() map[string]string { + m, _, _ := NestedStringMap(u.Object, "metadata", "labels") + return m +} + +func (u *Unstructured) SetLabels(labels map[string]string) { + if labels == nil { + RemoveNestedField(u.Object, "metadata", "labels") + return + } + u.setNestedMap(labels, "metadata", "labels") +} + +func (u *Unstructured) GetAnnotations() map[string]string { + m, _, _ := NestedStringMap(u.Object, "metadata", "annotations") + return m +} + +func (u *Unstructured) SetAnnotations(annotations map[string]string) { + if annotations == nil { + RemoveNestedField(u.Object, "metadata", "annotations") + return + } + u.setNestedMap(annotations, "metadata", "annotations") +} + +func (u *Unstructured) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *Unstructured) GetFinalizers() []string { + val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") + return val +} + +func (u *Unstructured) SetFinalizers(finalizers []string) { + if finalizers == nil { + RemoveNestedField(u.Object, "metadata", "finalizers") + return + } + u.setNestedStringSlice(finalizers, "metadata", "finalizers") +} + +func (u *Unstructured) GetClusterName() string { + return getNestedString(u.Object, "metadata", "clusterName") +} + +func (u *Unstructured) SetClusterName(clusterName string) { + if len(clusterName) == 0 { + RemoveNestedField(u.Object, "metadata", "clusterName") + return + } + u.setNestedField(clusterName, "metadata", "clusterName") +} + +func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { + items, found, err := NestedSlice(u.Object, "metadata", "managedFields") + if !found || err != nil { + return nil + } + managedFields := []metav1.ManagedFieldsEntry{} + for _, item := range items { + m, ok := item.(map[string]interface{}) + if !ok { + utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item)) + return nil + } + out := metav1.ManagedFieldsEntry{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err)) + return nil + } + managedFields = append(managedFields, out) + } + return managedFields +} + +func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { + if managedFields == nil { + RemoveNestedField(u.Object, "metadata", "managedFields") + return + } + items := []interface{}{} + for _, managedFieldsEntry := range managedFields { + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err)) + return + } + items = append(items, out) + } + u.setNestedSlice(items, "metadata", "managedFields") +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go new file mode 100644 index 0000000..5028f5f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -0,0 +1,210 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unstructured + +import ( + "bytes" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var _ runtime.Unstructured = &UnstructuredList{} +var _ metav1.ListInterface = &UnstructuredList{} + +// UnstructuredList allows lists that do not have Golang structs +// registered to be manipulated generically. This can be used to deal +// with the API lists from a plug-in. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type UnstructuredList struct { + Object map[string]interface{} + + // Items is a list of unstructured objects. + Items []Unstructured `json:"items"` +} + +func (u *UnstructuredList) GetObjectKind() schema.ObjectKind { return u } + +func (u *UnstructuredList) IsList() bool { return true } + +func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { + for i := range u.Items { + if err := fn(&u.Items[i]); err != nil { + return err + } + } + return nil +} + +// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. +// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. +func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { + out := new(UnstructuredList) + if u != nil { + out.SetGroupVersionKind(u.GroupVersionKind()) + } + return out +} + +// UnstructuredContent returns a map contain an overlay of the Items field onto +// the Object field. Items always overwrites overlay. +func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { + out := make(map[string]interface{}, len(u.Object)+1) + + // shallow copy every property + for k, v := range u.Object { + out[k] = v + } + + items := make([]interface{}, len(u.Items)) + for i, item := range u.Items { + items[i] = item.UnstructuredContent() + } + out["items"] = items + return out +} + +// SetUnstructuredContent obeys the conventions of List and keeps Items and the items +// array in sync. If items is not an array of objects in the incoming map, then any +// mismatched item will be removed. +func (obj *UnstructuredList) SetUnstructuredContent(content map[string]interface{}) { + obj.Object = content + if content == nil { + obj.Items = nil + return + } + items, ok := obj.Object["items"].([]interface{}) + if !ok || items == nil { + items = []interface{}{} + } + unstructuredItems := make([]Unstructured, 0, len(items)) + newItems := make([]interface{}, 0, len(items)) + for _, item := range items { + o, ok := item.(map[string]interface{}) + if !ok { + continue + } + unstructuredItems = append(unstructuredItems, Unstructured{Object: o}) + newItems = append(newItems, o) + } + obj.Items = unstructuredItems + obj.Object["items"] = newItems +} + +func (u *UnstructuredList) DeepCopy() *UnstructuredList { + if u == nil { + return nil + } + out := new(UnstructuredList) + *out = *u + out.Object = runtime.DeepCopyJSON(u.Object) + out.Items = make([]Unstructured, len(u.Items)) + for i := range u.Items { + u.Items[i].DeepCopyInto(&out.Items[i]) + } + return out +} + +// MarshalJSON ensures that the unstructured list object produces proper +// JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) MarshalJSON() ([]byte, error) { + var buf bytes.Buffer + err := UnstructuredJSONScheme.Encode(u, &buf) + return buf.Bytes(), err +} + +// UnmarshalJSON ensures that the unstructured list object properly +// decodes JSON when passed to Go's standard JSON library. +func (u *UnstructuredList) UnmarshalJSON(b []byte) error { + _, _, err := UnstructuredJSONScheme.Decode(b, nil, u) + return err +} + +func (u *UnstructuredList) GetAPIVersion() string { + return getNestedString(u.Object, "apiVersion") +} + +func (u *UnstructuredList) SetAPIVersion(version string) { + u.setNestedField(version, "apiVersion") +} + +func (u *UnstructuredList) GetKind() string { + return getNestedString(u.Object, "kind") +} + +func (u *UnstructuredList) SetKind(kind string) { + u.setNestedField(kind, "kind") +} + +func (u *UnstructuredList) GetResourceVersion() string { + return getNestedString(u.Object, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) SetResourceVersion(version string) { + u.setNestedField(version, "metadata", "resourceVersion") +} + +func (u *UnstructuredList) GetSelfLink() string { + return getNestedString(u.Object, "metadata", "selfLink") +} + +func (u *UnstructuredList) SetSelfLink(selfLink string) { + u.setNestedField(selfLink, "metadata", "selfLink") +} + +func (u *UnstructuredList) GetContinue() string { + return getNestedString(u.Object, "metadata", "continue") +} + +func (u *UnstructuredList) SetContinue(c string) { + u.setNestedField(c, "metadata", "continue") +} + +func (u *UnstructuredList) GetRemainingItemCount() *int64 { + return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") +} + +func (u *UnstructuredList) SetRemainingItemCount(c *int64) { + if c == nil { + RemoveNestedField(u.Object, "metadata", "remainingItemCount") + } else { + u.setNestedField(*c, "metadata", "remainingItemCount") + } +} + +func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { + u.SetAPIVersion(gvk.GroupVersion().String()) + u.SetKind(gvk.Kind) +} + +func (u *UnstructuredList) GroupVersionKind() schema.GroupVersionKind { + gv, err := schema.ParseGroupVersion(u.GetAPIVersion()) + if err != nil { + return schema.GroupVersionKind{} + } + gvk := gv.WithKind(u.GetKind()) + return gvk +} + +func (u *UnstructuredList) setNestedField(value interface{}, fields ...string) { + if u.Object == nil { + u.Object = make(map[string]interface{}) + } + SetNestedField(u.Object, value, fields...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go new file mode 100644 index 0000000..9a9f25e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/zz_generated.deepcopy.go @@ -0,0 +1,55 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package unstructured + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unstructured) DeepCopyInto(out *Unstructured) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Unstructured) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UnstructuredList) DeepCopyInto(out *UnstructuredList) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UnstructuredList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go new file mode 100644 index 0000000..58f0773 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/watch.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/watch" +) + +// Event represents a single event to a watched resource. +// +// +protobuf=true +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type WatchEvent struct { + Type string `json:"type" protobuf:"bytes,1,opt,name=type"` + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Error: *Status is recommended; other types may make sense + // depending on context. + Object runtime.RawExtension `json:"object" protobuf:"bytes,2,opt,name=object"` +} + +func Convert_watch_Event_To_v1_WatchEvent(in *watch.Event, out *WatchEvent, s conversion.Scope) error { + out.Type = string(in.Type) + switch t := in.Object.(type) { + case *runtime.Unknown: + // TODO: handle other fields on Unknown and detect type + out.Object.Raw = t.Raw + case nil: + default: + out.Object.Object = in.Object + } + return nil +} + +func Convert_v1_InternalEvent_To_v1_WatchEvent(in *InternalEvent, out *WatchEvent, s conversion.Scope) error { + return Convert_watch_Event_To_v1_WatchEvent((*watch.Event)(in), out, s) +} + +func Convert_v1_WatchEvent_To_watch_Event(in *WatchEvent, out *watch.Event, s conversion.Scope) error { + out.Type = watch.EventType(in.Type) + if in.Object.Object != nil { + out.Object = in.Object.Object + } else if in.Object.Raw != nil { + // TODO: handle other fields on Unknown and detect type + out.Object = &runtime.Unknown{ + Raw: in.Object.Raw, + ContentType: runtime.ContentTypeJSON, + } + } + return nil +} + +func Convert_v1_WatchEvent_To_v1_InternalEvent(in *WatchEvent, out *InternalEvent, s conversion.Scope) error { + return Convert_v1_WatchEvent_To_watch_Event(in, (*watch.Event)(out), s) +} + +// InternalEvent makes watch.Event versioned +// +protobuf=false +type InternalEvent watch.Event + +func (e *InternalEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *WatchEvent) GetObjectKind() schema.ObjectKind { return schema.EmptyObjectKind } +func (e *InternalEvent) DeepCopyObject() runtime.Object { + if c := e.DeepCopy(); c != nil { + return c + } else { + return nil + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go new file mode 100644 index 0000000..2ade69d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go @@ -0,0 +1,523 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + url "net/url" + unsafe "unsafe" + + resource "k8s.io/apimachinery/pkg/api/resource" + conversion "k8s.io/apimachinery/pkg/conversion" + fields "k8s.io/apimachinery/pkg/fields" + labels "k8s.io/apimachinery/pkg/labels" + runtime "k8s.io/apimachinery/pkg/runtime" + intstr "k8s.io/apimachinery/pkg/util/intstr" + watch "k8s.io/apimachinery/pkg/watch" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_CreateOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil { + return err + } + } else { + out.GracePeriodSeconds = nil + } + // INFO: in.Preconditions opted out of conversion generation + if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil { + return err + } + } else { + out.OrphanDependents = nil + } + if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil { + return err + } + } else { + out.PropagationPolicy = nil + } + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + return nil +} + +func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil { + return err + } + } else { + out.Export = false + } + if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil { + return err + } + } else { + out.Exact = false + } + return nil +} + +// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ExportOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + return nil +} + +// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_GetOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil { + return err + } + } else { + out.LabelSelector = "" + } + if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil { + return err + } + } else { + out.FieldSelector = "" + } + if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil { + return err + } + } else { + out.Watch = false + } + if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil { + return err + } + } else { + out.AllowWatchBookmarks = false + } + if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { + return err + } + } else { + out.ResourceVersion = "" + } + if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil { + return err + } + } else { + out.TimeoutSeconds = nil + } + if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil { + return err + } + } else { + out.Limit = 0 + } + if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil { + return err + } + } else { + out.Continue = "" + } + return nil +} + +// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_ListOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil { + return err + } + } else { + out.Force = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_PatchOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil { + return err + } + } else { + out.NoHeaders = false + } + if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 { + if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil { + return err + } + } else { + out.IncludeObject = "" + } + return nil +} + +// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_TableOptions(in, out, s) +} + +func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + // WARNING: Field TypeMeta does not have json tag, skipping. + + if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { + out.DryRun = *(*[]string)(unsafe.Pointer(&values)) + } else { + out.DryRun = nil + } + if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { + if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { + return err + } + } else { + out.FieldManager = "" + } + return nil +} + +// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function. +func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { + return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b82fdf2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -0,0 +1,1173 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroup) DeepCopyInto(out *APIGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]GroupVersionForDiscovery, len(*in)) + copy(*out, *in) + } + out.PreferredVersion = in.PreferredVersion + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroup. +func (in *APIGroup) DeepCopy() *APIGroup { + if in == nil { + return nil + } + out := new(APIGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIGroupList) DeepCopyInto(out *APIGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Groups != nil { + in, out := &in.Groups, &out.Groups + *out = make([]APIGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIGroupList. +func (in *APIGroupList) DeepCopy() *APIGroupList { + if in == nil { + return nil + } + out := new(APIGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResource) DeepCopyInto(out *APIResource) { + *out = *in + if in.Verbs != nil { + in, out := &in.Verbs, &out.Verbs + *out = make(Verbs, len(*in)) + copy(*out, *in) + } + if in.ShortNames != nil { + in, out := &in.ShortNames, &out.ShortNames + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Categories != nil { + in, out := &in.Categories, &out.Categories + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResource. +func (in *APIResource) DeepCopy() *APIResource { + if in == nil { + return nil + } + out := new(APIResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIResourceList) DeepCopyInto(out *APIResourceList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.APIResources != nil { + in, out := &in.APIResources, &out.APIResources + *out = make([]APIResource, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIResourceList. +func (in *APIResourceList) DeepCopy() *APIResourceList { + if in == nil { + return nil + } + out := new(APIResourceList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIResourceList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *APIVersions) DeepCopyInto(out *APIVersions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Versions != nil { + in, out := &in.Versions, &out.Versions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ServerAddressByClientCIDRs != nil { + in, out := &in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs + *out = make([]ServerAddressByClientCIDR, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersions. +func (in *APIVersions) DeepCopy() *APIVersions { + if in == nil { + return nil + } + out := new(APIVersions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *APIVersions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CreateOptions) DeepCopyInto(out *CreateOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CreateOptions. +func (in *CreateOptions) DeepCopy() *CreateOptions { + if in == nil { + return nil + } + out := new(CreateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *CreateOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *DeleteOptions) DeepCopyInto(out *DeleteOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.GracePeriodSeconds != nil { + in, out := &in.GracePeriodSeconds, &out.GracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Preconditions != nil { + in, out := &in.Preconditions, &out.Preconditions + *out = new(Preconditions) + (*in).DeepCopyInto(*out) + } + if in.OrphanDependents != nil { + in, out := &in.OrphanDependents, &out.OrphanDependents + *out = new(bool) + **out = **in + } + if in.PropagationPolicy != nil { + in, out := &in.PropagationPolicy, &out.PropagationPolicy + *out = new(DeletionPropagation) + **out = **in + } + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeleteOptions. +func (in *DeleteOptions) DeepCopy() *DeleteOptions { + if in == nil { + return nil + } + out := new(DeleteOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *DeleteOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Duration) DeepCopyInto(out *Duration) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Duration. +func (in *Duration) DeepCopy() *Duration { + if in == nil { + return nil + } + out := new(Duration) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExportOptions) DeepCopyInto(out *ExportOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExportOptions. +func (in *ExportOptions) DeepCopy() *ExportOptions { + if in == nil { + return nil + } + out := new(ExportOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExportOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. +func (in *FieldsV1) DeepCopy() *FieldsV1 { + if in == nil { + return nil + } + out := new(FieldsV1) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GetOptions) DeepCopyInto(out *GetOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GetOptions. +func (in *GetOptions) DeepCopy() *GetOptions { + if in == nil { + return nil + } + out := new(GetOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *GetOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupKind) DeepCopyInto(out *GroupKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupKind. +func (in *GroupKind) DeepCopy() *GroupKind { + if in == nil { + return nil + } + out := new(GroupKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupResource) DeepCopyInto(out *GroupResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupResource. +func (in *GroupResource) DeepCopy() *GroupResource { + if in == nil { + return nil + } + out := new(GroupResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersion) DeepCopyInto(out *GroupVersion) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersion. +func (in *GroupVersion) DeepCopy() *GroupVersion { + if in == nil { + return nil + } + out := new(GroupVersion) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionForDiscovery) DeepCopyInto(out *GroupVersionForDiscovery) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionForDiscovery. +func (in *GroupVersionForDiscovery) DeepCopy() *GroupVersionForDiscovery { + if in == nil { + return nil + } + out := new(GroupVersionForDiscovery) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionKind) DeepCopyInto(out *GroupVersionKind) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionKind. +func (in *GroupVersionKind) DeepCopy() *GroupVersionKind { + if in == nil { + return nil + } + out := new(GroupVersionKind) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *GroupVersionResource) DeepCopyInto(out *GroupVersionResource) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GroupVersionResource. +func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { + if in == nil { + return nil + } + out := new(GroupVersionResource) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { + *out = *in + if in.Object != nil { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InternalEvent. +func (in *InternalEvent) DeepCopy() *InternalEvent { + if in == nil { + return nil + } + out := new(InternalEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelector) DeepCopyInto(out *LabelSelector) { + *out = *in + if in.MatchLabels != nil { + in, out := &in.MatchLabels, &out.MatchLabels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.MatchExpressions != nil { + in, out := &in.MatchExpressions, &out.MatchExpressions + *out = make([]LabelSelectorRequirement, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelector. +func (in *LabelSelector) DeepCopy() *LabelSelector { + if in == nil { + return nil + } + out := new(LabelSelector) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *LabelSelectorRequirement) DeepCopyInto(out *LabelSelectorRequirement) { + *out = *in + if in.Values != nil { + in, out := &in.Values, &out.Values + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LabelSelectorRequirement. +func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { + if in == nil { + return nil + } + out := new(LabelSelectorRequirement) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *List) DeepCopyInto(out *List) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]runtime.RawExtension, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. +func (in *List) DeepCopy() *List { + if in == nil { + return nil + } + out := new(List) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *List) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListMeta) DeepCopyInto(out *ListMeta) { + *out = *in + if in.RemainingItemCount != nil { + in, out := &in.RemainingItemCount, &out.RemainingItemCount + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListMeta. +func (in *ListMeta) DeepCopy() *ListMeta { + if in == nil { + return nil + } + out := new(ListMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ListOptions) DeepCopyInto(out *ListOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.TimeoutSeconds != nil { + in, out := &in.TimeoutSeconds, &out.TimeoutSeconds + *out = new(int64) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ListOptions. +func (in *ListOptions) DeepCopy() *ListOptions { + if in == nil { + return nil + } + out := new(ListOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ListOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { + *out = *in + if in.Time != nil { + in, out := &in.Time, &out.Time + *out = (*in).DeepCopy() + } + if in.FieldsV1 != nil { + in, out := &in.FieldsV1, &out.FieldsV1 + *out = new(FieldsV1) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry. +func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry { + if in == nil { + return nil + } + out := new(ManagedFieldsEntry) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. +func (in *MicroTime) DeepCopy() *MicroTime { + if in == nil { + return nil + } + out := new(MicroTime) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { + *out = *in + in.CreationTimestamp.DeepCopyInto(&out.CreationTimestamp) + if in.DeletionTimestamp != nil { + in, out := &in.DeletionTimestamp, &out.DeletionTimestamp + *out = (*in).DeepCopy() + } + if in.DeletionGracePeriodSeconds != nil { + in, out := &in.DeletionGracePeriodSeconds, &out.DeletionGracePeriodSeconds + *out = new(int64) + **out = **in + } + if in.Labels != nil { + in, out := &in.Labels, &out.Labels + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.Annotations != nil { + in, out := &in.Annotations, &out.Annotations + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + if in.OwnerReferences != nil { + in, out := &in.OwnerReferences, &out.OwnerReferences + *out = make([]OwnerReference, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Finalizers != nil { + in, out := &in.Finalizers, &out.Finalizers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ManagedFields != nil { + in, out := &in.ManagedFields, &out.ManagedFields + *out = make([]ManagedFieldsEntry, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMeta. +func (in *ObjectMeta) DeepCopy() *ObjectMeta { + if in == nil { + return nil + } + out := new(ObjectMeta) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OwnerReference) DeepCopyInto(out *OwnerReference) { + *out = *in + if in.Controller != nil { + in, out := &in.Controller, &out.Controller + *out = new(bool) + **out = **in + } + if in.BlockOwnerDeletion != nil { + in, out := &in.BlockOwnerDeletion, &out.BlockOwnerDeletion + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OwnerReference. +func (in *OwnerReference) DeepCopy() *OwnerReference { + if in == nil { + return nil + } + out := new(OwnerReference) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PartialObjectMetadata, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Patch) DeepCopyInto(out *Patch) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Patch. +func (in *Patch) DeepCopy() *Patch { + if in == nil { + return nil + } + out := new(Patch) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PatchOptions) DeepCopyInto(out *PatchOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Force != nil { + in, out := &in.Force, &out.Force + *out = new(bool) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions. +func (in *PatchOptions) DeepCopy() *PatchOptions { + if in == nil { + return nil + } + out := new(PatchOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PatchOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preconditions) DeepCopyInto(out *Preconditions) { + *out = *in + if in.UID != nil { + in, out := &in.UID, &out.UID + *out = new(types.UID) + **out = **in + } + if in.ResourceVersion != nil { + in, out := &in.ResourceVersion, &out.ResourceVersion + *out = new(string) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. +func (in *Preconditions) DeepCopy() *Preconditions { + if in == nil { + return nil + } + out := new(Preconditions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RootPaths) DeepCopyInto(out *RootPaths) { + *out = *in + if in.Paths != nil { + in, out := &in.Paths, &out.Paths + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RootPaths. +func (in *RootPaths) DeepCopy() *RootPaths { + if in == nil { + return nil + } + out := new(RootPaths) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ServerAddressByClientCIDR) DeepCopyInto(out *ServerAddressByClientCIDR) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerAddressByClientCIDR. +func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { + if in == nil { + return nil + } + out := new(ServerAddressByClientCIDR) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Status) DeepCopyInto(out *Status) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Details != nil { + in, out := &in.Details, &out.Details + *out = new(StatusDetails) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status. +func (in *Status) DeepCopy() *Status { + if in == nil { + return nil + } + out := new(Status) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Status) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusCause) DeepCopyInto(out *StatusCause) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusCause. +func (in *StatusCause) DeepCopy() *StatusCause { + if in == nil { + return nil + } + out := new(StatusCause) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *StatusDetails) DeepCopyInto(out *StatusDetails) { + *out = *in + if in.Causes != nil { + in, out := &in.Causes, &out.Causes + *out = make([]StatusCause, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatusDetails. +func (in *StatusDetails) DeepCopy() *StatusDetails { + if in == nil { + return nil + } + out := new(StatusDetails) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. +func (in *Time) DeepCopy() *Time { + if in == nil { + return nil + } + out := new(Time) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Timestamp) DeepCopyInto(out *Timestamp) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Timestamp. +func (in *Timestamp) DeepCopy() *Timestamp { + if in == nil { + return nil + } + out := new(Timestamp) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *UpdateOptions) DeepCopyInto(out *UpdateOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.DryRun != nil { + in, out := &in.DryRun, &out.DryRun + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UpdateOptions. +func (in *UpdateOptions) DeepCopy() *UpdateOptions { + if in == nil { + return nil + } + out := new(UpdateOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *UpdateOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in Verbs) DeepCopyInto(out *Verbs) { + { + in := &in + *out = make(Verbs, len(*in)) + copy(*out, *in) + return + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Verbs. +func (in Verbs) DeepCopy() Verbs { + if in == nil { + return nil + } + out := new(Verbs) + in.DeepCopyInto(out) + return *out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *WatchEvent) DeepCopyInto(out *WatchEvent) { + *out = *in + in.Object.DeepCopyInto(&out.Object) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WatchEvent. +func (in *WatchEvent) DeepCopy() *WatchEvent { + if in == nil { + return nil + } + out := new(WatchEvent) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *WatchEvent) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go new file mode 100644 index 0000000..cce2e60 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go new file mode 100644 index 0000000..2d7c8bd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -0,0 +1,817 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +type typePair struct { + source reflect.Type + dest reflect.Type +} + +type typeNamePair struct { + fieldType reflect.Type + fieldName string +} + +// DebugLogger allows you to get debugging messages if necessary. +type DebugLogger interface { + Logf(format string, args ...interface{}) +} + +type NameFunc func(t reflect.Type) string + +var DefaultNameFunc = func(t reflect.Type) string { return t.Name() } + +// ConversionFunc converts the object a into the object b, reusing arrays or objects +// or pointers if necessary. It should return an error if the object cannot be converted +// or if some data is invalid. If you do not wish a and b to share fields or nested +// objects, you must copy a before calling this function. +type ConversionFunc func(a, b interface{}, scope Scope) error + +// Converter knows how to convert one type to another. +type Converter struct { + // Map from the conversion pair to a function which can + // do the conversion. + conversionFuncs ConversionFuncs + generatedConversionFuncs ConversionFuncs + + // Set of conversions that should be treated as a no-op + ignoredConversions map[typePair]struct{} + ignoredUntypedConversions map[typePair]struct{} + + // This is a map from a source field type and name, to a list of destination + // field type and name. + structFieldDests map[typeNamePair][]typeNamePair + + // Allows for the opposite lookup of structFieldDests. So that SourceFromDest + // copy flag also works. So this is a map of destination field name, to potential + // source field name and type to look for. + structFieldSources map[typeNamePair][]typeNamePair + + // Map from an input type to a function which can apply a key name mapping + inputFieldMappingFuncs map[reflect.Type]FieldMappingFunc + + // Map from an input type to a set of default conversion flags. + inputDefaultFlags map[reflect.Type]FieldMatchingFlags + + // If non-nil, will be called to print helpful debugging info. Quite verbose. + Debug DebugLogger + + // nameFunc is called to retrieve the name of a type; this name is used for the + // purpose of deciding whether two types match or not (i.e., will we attempt to + // do a conversion). The default returns the go type name. + nameFunc func(t reflect.Type) string +} + +// NewConverter creates a new Converter object. +func NewConverter(nameFn NameFunc) *Converter { + c := &Converter{ + conversionFuncs: NewConversionFuncs(), + generatedConversionFuncs: NewConversionFuncs(), + ignoredConversions: make(map[typePair]struct{}), + ignoredUntypedConversions: make(map[typePair]struct{}), + nameFunc: nameFn, + structFieldDests: make(map[typeNamePair][]typeNamePair), + structFieldSources: make(map[typeNamePair][]typeNamePair), + + inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), + inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), + } + c.RegisterUntypedConversionFunc( + (*[]byte)(nil), (*[]byte)(nil), + func(a, b interface{}, s Scope) error { + return Convert_Slice_byte_To_Slice_byte(a.(*[]byte), b.(*[]byte), s) + }, + ) + return c +} + +// WithConversions returns a Converter that is a copy of c but with the additional +// fns merged on top. +func (c *Converter) WithConversions(fns ConversionFuncs) *Converter { + copied := *c + copied.conversionFuncs = c.conversionFuncs.Merge(fns) + return &copied +} + +// DefaultMeta returns the conversion FieldMappingFunc and meta for a given type. +func (c *Converter) DefaultMeta(t reflect.Type) (FieldMatchingFlags, *Meta) { + return c.inputDefaultFlags[t], &Meta{ + KeyNameMapping: c.inputFieldMappingFuncs[t], + } +} + +// Convert_Slice_byte_To_Slice_byte prevents recursing into every byte +func Convert_Slice_byte_To_Slice_byte(in *[]byte, out *[]byte, s Scope) error { + if *in == nil { + *out = nil + return nil + } + *out = make([]byte, len(*in)) + copy(*out, *in) + return nil +} + +// Scope is passed to conversion funcs to allow them to continue an ongoing conversion. +// If multiple converters exist in the system, Scope will allow you to use the correct one +// from a conversion function--that is, the one your conversion function was called by. +type Scope interface { + // Call Convert to convert sub-objects. Note that if you call it with your own exact + // parameters, you'll run out of stack space before anything useful happens. + Convert(src, dest interface{}, flags FieldMatchingFlags) error + + // SrcTags and DestTags contain the struct tags that src and dest had, respectively. + // If the enclosing object was not a struct, then these will contain no tags, of course. + SrcTag() reflect.StructTag + DestTag() reflect.StructTag + + // Flags returns the flags with which the conversion was started. + Flags() FieldMatchingFlags + + // Meta returns any information originally passed to Convert. + Meta() *Meta +} + +// FieldMappingFunc can convert an input field value into different values, depending on +// the value of the source or destination struct tags. +type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (source string, dest string) + +func NewConversionFuncs() ConversionFuncs { + return ConversionFuncs{ + untyped: make(map[typePair]ConversionFunc), + } +} + +type ConversionFuncs struct { + untyped map[typePair]ConversionFunc +} + +// AddUntyped adds the provided conversion function to the lookup table for the types that are +// supplied as a and b. a and b must be pointers or an error is returned. This method overwrites +// previously defined functions. +func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error { + tA, tB := reflect.TypeOf(a), reflect.TypeOf(b) + if tA.Kind() != reflect.Ptr { + return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", a) + } + if tB.Kind() != reflect.Ptr { + return fmt.Errorf("the type %T must be a pointer to register as an untyped conversion", b) + } + c.untyped[typePair{tA, tB}] = fn + return nil +} + +// Merge returns a new ConversionFuncs that contains all conversions from +// both other and c, with other conversions taking precedence. +func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { + merged := NewConversionFuncs() + for k, v := range c.untyped { + merged.untyped[k] = v + } + for k, v := range other.untyped { + merged.untyped[k] = v + } + return merged +} + +// Meta is supplied by Scheme, when it calls Convert. +type Meta struct { + // KeyNameMapping is an optional function which may map the listed key (field name) + // into a source and destination value. + KeyNameMapping FieldMappingFunc + // Context is an optional field that callers may use to pass info to conversion functions. + Context interface{} +} + +// scope contains information about an ongoing conversion. +type scope struct { + converter *Converter + meta *Meta + flags FieldMatchingFlags + + // srcStack & destStack are separate because they may not have a 1:1 + // relationship. + srcStack scopeStack + destStack scopeStack +} + +type scopeStackElem struct { + tag reflect.StructTag + value reflect.Value + key string +} + +type scopeStack []scopeStackElem + +func (s *scopeStack) pop() { + n := len(*s) + *s = (*s)[:n-1] +} + +func (s *scopeStack) push(e scopeStackElem) { + *s = append(*s, e) +} + +func (s *scopeStack) top() *scopeStackElem { + return &(*s)[len(*s)-1] +} + +func (s scopeStack) describe() string { + desc := "" + if len(s) > 1 { + desc = "(" + s[1].value.Type().String() + ")" + } + for i, v := range s { + if i < 2 { + // First layer on stack is not real; second is handled specially above. + continue + } + if v.key == "" { + desc += fmt.Sprintf(".%v", v.value.Type()) + } else { + desc += fmt.Sprintf(".%v", v.key) + } + } + return desc +} + +// Formats src & dest as indices for printing. +func (s *scope) setIndices(src, dest int) { + s.srcStack.top().key = fmt.Sprintf("[%v]", src) + s.destStack.top().key = fmt.Sprintf("[%v]", dest) +} + +// Formats src & dest as map keys for printing. +func (s *scope) setKeys(src, dest interface{}) { + s.srcStack.top().key = fmt.Sprintf(`["%v"]`, src) + s.destStack.top().key = fmt.Sprintf(`["%v"]`, dest) +} + +// Convert continues a conversion. +func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { + return s.converter.Convert(src, dest, flags, s.meta) +} + +// SrcTag returns the tag of the struct containing the current source item, if any. +func (s *scope) SrcTag() reflect.StructTag { + return s.srcStack.top().tag +} + +// DestTag returns the tag of the struct containing the current dest item, if any. +func (s *scope) DestTag() reflect.StructTag { + return s.destStack.top().tag +} + +// Flags returns the flags with which the current conversion was started. +func (s *scope) Flags() FieldMatchingFlags { + return s.flags +} + +// Meta returns the meta object that was originally passed to Convert. +func (s *scope) Meta() *Meta { + return s.meta +} + +// describe prints the path to get to the current (source, dest) values. +func (s *scope) describe() (src, dest string) { + return s.srcStack.describe(), s.destStack.describe() +} + +// error makes an error that includes information about where we were in the objects +// we were asked to convert. +func (s *scope) errorf(message string, args ...interface{}) error { + srcPath, destPath := s.describe() + where := fmt.Sprintf("converting %v to %v: ", srcPath, destPath) + return fmt.Errorf(where+message, args...) +} + +// Verifies whether a conversion function has a correct signature. +func verifyConversionFunctionSignature(ft reflect.Type) error { + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 3 { + return fmt.Errorf("expected three 'in' params, got: %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got: %v", ft) + } + if ft.In(0).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 0, got: %v", ft) + } + if ft.In(1).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'in' param 1, got: %v", ft) + } + scopeType := Scope(nil) + if e, a := reflect.TypeOf(&scopeType).Elem(), ft.In(2); e != a { + return fmt.Errorf("expected '%v' arg for 'in' param 2, got '%v' (%v)", e, a, ft) + } + var forErrorType error + // This convolution is necessary, otherwise TypeOf picks up on the fact + // that forErrorType is nil. + errorType := reflect.TypeOf(&forErrorType).Elem() + if ft.Out(0) != errorType { + return fmt.Errorf("expected error return, got: %v", ft) + } + return nil +} + +// RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (c *Converter) RegisterUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error { + return c.conversionFuncs.AddUntyped(a, b, fn) +} + +// RegisterGeneratedUntypedConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (c *Converter) RegisterGeneratedUntypedConversionFunc(a, b interface{}, fn ConversionFunc) error { + return c.generatedConversionFuncs.AddUntyped(a, b, fn) +} + +// RegisterIgnoredConversion registers a "no-op" for conversion, where any requested +// conversion between from and to is ignored. +func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { + typeFrom := reflect.TypeOf(from) + typeTo := reflect.TypeOf(to) + if reflect.TypeOf(from).Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'from' param 0, got: %v", typeFrom) + } + if typeTo.Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) + } + c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} + c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} + return nil +} + +// RegisterInputDefaults registers a field name mapping function, used when converting +// from maps to structs. Inputs to the conversion methods are checked for this type and a mapping +// applied automatically if the input matches in. A set of default flags for the input conversion +// may also be provided, which will be used when no explicit flags are requested. +func (c *Converter) RegisterInputDefaults(in interface{}, fn FieldMappingFunc, defaultFlags FieldMatchingFlags) error { + fv := reflect.ValueOf(in) + ft := fv.Type() + if ft.Kind() != reflect.Ptr { + return fmt.Errorf("expected pointer 'in' argument, got: %v", ft) + } + c.inputFieldMappingFuncs[ft] = fn + c.inputDefaultFlags[ft] = defaultFlags + return nil +} + +// FieldMatchingFlags contains a list of ways in which struct fields could be +// copied. These constants may be | combined. +type FieldMatchingFlags int + +const ( + // Loop through destination fields, search for matching source + // field to copy it from. Source fields with no corresponding + // destination field will be ignored. If SourceToDest is + // specified, this flag is ignored. If neither is specified, + // or no flags are passed, this flag is the default. + DestFromSource FieldMatchingFlags = 0 + // Loop through source fields, search for matching dest field + // to copy it into. Destination fields with no corresponding + // source field will be ignored. + SourceToDest FieldMatchingFlags = 1 << iota + // Don't treat it as an error if the corresponding source or + // dest field can't be found. + IgnoreMissingFields + // Don't require type names to match. + AllowDifferentFieldTypeNames +) + +// IsSet returns true if the given flag or combination of flags is set. +func (f FieldMatchingFlags) IsSet(flag FieldMatchingFlags) bool { + if flag == DestFromSource { + // The bit logic doesn't work on the default value. + return f&SourceToDest != SourceToDest + } + return f&flag == flag +} + +// Convert will translate src to dest if it knows how. Both must be pointers. +// If no conversion func is registered and the default copying mechanism +// doesn't work on this type pair, an error will be returned. +// Read the comments on the various FieldMatchingFlags constants to understand +// what the 'flags' parameter does. +// 'meta' is given to allow you to pass information to conversion functions, +// it is not used by Convert() other than storing it in the scope. +// Not safe for objects with cyclic references! +func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + return c.doConversion(src, dest, flags, meta, c.convert) +} + +type conversionFunc func(sv, dv reflect.Value, scope *scope) error + +func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { + pair := typePair{reflect.TypeOf(src), reflect.TypeOf(dest)} + scope := &scope{ + converter: c, + flags: flags, + meta: meta, + } + + // ignore conversions of this type + if _, ok := c.ignoredUntypedConversions[pair]; ok { + return nil + } + if fn, ok := c.conversionFuncs.untyped[pair]; ok { + return fn(src, dest, scope) + } + if fn, ok := c.generatedConversionFuncs.untyped[pair]; ok { + return fn(src, dest, scope) + } + // TODO: consider everything past this point deprecated - we want to support only point to point top level + // conversions + + dv, err := EnforcePtr(dest) + if err != nil { + return err + } + if !dv.CanAddr() && !dv.CanSet() { + return fmt.Errorf("can't write to dest") + } + sv, err := EnforcePtr(src) + if err != nil { + return err + } + // Leave something on the stack, so that calls to struct tag getters never fail. + scope.srcStack.push(scopeStackElem{}) + scope.destStack.push(scopeStackElem{}) + return f(sv, dv, scope) +} + +// callUntyped calls predefined conversion func. +func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error { + if !dv.CanAddr() { + return scope.errorf("cant addr dest") + } + var svPointer reflect.Value + if sv.CanAddr() { + svPointer = sv.Addr() + } else { + svPointer = reflect.New(sv.Type()) + svPointer.Elem().Set(sv) + } + dvPointer := dv.Addr() + return f(svPointer.Interface(), dvPointer.Interface(), scope) +} + +// convert recursively copies sv into dv, calling an appropriate conversion function if +// one is registered. +func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { + dt, st := dv.Type(), sv.Type() + pair := typePair{st, dt} + + // ignore conversions of this type + if _, ok := c.ignoredConversions[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Ignoring conversion of '%v' to '%v'", st, dt) + } + return nil + } + + // Convert sv to dv. + pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())} + if f, ok := c.conversionFuncs.untyped[pair]; ok { + return c.callUntyped(sv, dv, f, scope) + } + if f, ok := c.generatedConversionFuncs.untyped[pair]; ok { + return c.callUntyped(sv, dv, f, scope) + } + + if !dv.CanSet() { + return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") + } + + if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.nameFunc(dt) != c.nameFunc(st) { + return scope.errorf( + "type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.", + c.nameFunc(st), c.nameFunc(dt), st, dt) + } + + switch st.Kind() { + case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct: + // Don't copy these via assignment/conversion! + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + if st.ConvertibleTo(dt) { + dv.Set(sv.Convert(dt)) + return nil + } + } + + if c.Debug != nil { + c.Debug.Logf("Trying to convert '%v' to '%v'", st, dt) + } + + scope.srcStack.push(scopeStackElem{value: sv}) + scope.destStack.push(scopeStackElem{value: dv}) + defer scope.srcStack.pop() + defer scope.destStack.pop() + + switch dv.Kind() { + case reflect.Struct: + return c.convertKV(toKVValue(sv), toKVValue(dv), scope) + case reflect.Slice: + if sv.IsNil() { + // Don't make a zero-length slice. + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + scope.setIndices(i, i) + if err := c.convert(sv.Index(i), dv.Index(i), scope); err != nil { + return err + } + } + case reflect.Ptr: + if sv.IsNil() { + // Don't copy a nil ptr! + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return c.convert(sv.Elem(), dv.Elem(), scope) + default: + return c.convert(sv, dv.Elem(), scope) + } + case reflect.Map: + if sv.IsNil() { + // Don't copy a nil ptr! + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, sk := range sv.MapKeys() { + dk := reflect.New(dt.Key()).Elem() + if err := c.convert(sk, dk, scope); err != nil { + return err + } + dkv := reflect.New(dt.Elem()).Elem() + scope.setKeys(sk.Interface(), dk.Interface()) + // TODO: sv.MapIndex(sk) may return a value with CanAddr() == false, + // because a map[string]struct{} does not allow a pointer reference. + // Calling a custom conversion function defined for the map value + // will panic. Example is PodInfo map[string]ContainerStatus. + if err := c.convert(sv.MapIndex(sk), dkv, scope); err != nil { + return err + } + dv.SetMapIndex(dk, dkv) + } + case reflect.Interface: + if sv.IsNil() { + // Don't copy a nil interface! + dv.Set(reflect.Zero(dt)) + return nil + } + tmpdv := reflect.New(sv.Elem().Type()).Elem() + if err := c.convert(sv.Elem(), tmpdv, scope); err != nil { + return err + } + dv.Set(reflect.ValueOf(tmpdv.Interface())) + return nil + default: + return scope.errorf("couldn't copy '%v' into '%v'; didn't understand types", st, dt) + } + return nil +} + +var stringType = reflect.TypeOf("") + +func toKVValue(v reflect.Value) kvValue { + switch v.Kind() { + case reflect.Struct: + return structAdaptor(v) + case reflect.Map: + if v.Type().Key().AssignableTo(stringType) { + return stringMapAdaptor(v) + } + } + + return nil +} + +// kvValue lets us write the same conversion logic to work with both maps +// and structs. Only maps with string keys make sense for this. +type kvValue interface { + // returns all keys, as a []string. + keys() []string + // Will just return "" for maps. + tagOf(key string) reflect.StructTag + // Will return the zero Value if the key doesn't exist. + value(key string) reflect.Value + // Maps require explicit setting-- will do nothing for structs. + // Returns false on failure. + confirmSet(key string, v reflect.Value) bool +} + +type stringMapAdaptor reflect.Value + +func (a stringMapAdaptor) len() int { + return reflect.Value(a).Len() +} + +func (a stringMapAdaptor) keys() []string { + v := reflect.Value(a) + keys := make([]string, v.Len()) + for i, v := range v.MapKeys() { + if v.IsNil() { + continue + } + switch t := v.Interface().(type) { + case string: + keys[i] = t + } + } + return keys +} + +func (a stringMapAdaptor) tagOf(key string) reflect.StructTag { + return "" +} + +func (a stringMapAdaptor) value(key string) reflect.Value { + return reflect.Value(a).MapIndex(reflect.ValueOf(key)) +} + +func (a stringMapAdaptor) confirmSet(key string, v reflect.Value) bool { + return true +} + +type structAdaptor reflect.Value + +func (a structAdaptor) len() int { + v := reflect.Value(a) + return v.Type().NumField() +} + +func (a structAdaptor) keys() []string { + v := reflect.Value(a) + t := v.Type() + keys := make([]string, t.NumField()) + for i := range keys { + keys[i] = t.Field(i).Name + } + return keys +} + +func (a structAdaptor) tagOf(key string) reflect.StructTag { + v := reflect.Value(a) + field, ok := v.Type().FieldByName(key) + if ok { + return field.Tag + } + return "" +} + +func (a structAdaptor) value(key string) reflect.Value { + v := reflect.Value(a) + return v.FieldByName(key) +} + +func (a structAdaptor) confirmSet(key string, v reflect.Value) bool { + return true +} + +// convertKV can convert things that consist of key/value pairs, like structs +// and some maps. +func (c *Converter) convertKV(skv, dkv kvValue, scope *scope) error { + if skv == nil || dkv == nil { + // TODO: add keys to stack to support really understandable error messages. + return fmt.Errorf("Unable to convert %#v to %#v", skv, dkv) + } + + lister := dkv + if scope.flags.IsSet(SourceToDest) { + lister = skv + } + + var mapping FieldMappingFunc + if scope.meta != nil && scope.meta.KeyNameMapping != nil { + mapping = scope.meta.KeyNameMapping + } + + for _, key := range lister.keys() { + if found, err := c.checkField(key, skv, dkv, scope); found { + if err != nil { + return err + } + continue + } + stag := skv.tagOf(key) + dtag := dkv.tagOf(key) + skey := key + dkey := key + if mapping != nil { + skey, dkey = scope.meta.KeyNameMapping(key, stag, dtag) + } + + df := dkv.value(dkey) + sf := skv.value(skey) + if !df.IsValid() || !sf.IsValid() { + switch { + case scope.flags.IsSet(IgnoreMissingFields): + // No error. + case scope.flags.IsSet(SourceToDest): + return scope.errorf("%v not present in dest", dkey) + default: + return scope.errorf("%v not present in src", skey) + } + continue + } + scope.srcStack.top().key = skey + scope.srcStack.top().tag = stag + scope.destStack.top().key = dkey + scope.destStack.top().tag = dtag + if err := c.convert(sf, df, scope); err != nil { + return err + } + } + return nil +} + +// checkField returns true if the field name matches any of the struct +// field copying rules. The error should be ignored if it returns false. +func (c *Converter) checkField(fieldName string, skv, dkv kvValue, scope *scope) (bool, error) { + replacementMade := false + if scope.flags.IsSet(DestFromSource) { + df := dkv.value(fieldName) + if !df.IsValid() { + return false, nil + } + destKey := typeNamePair{df.Type(), fieldName} + // Check each of the potential source (type, name) pairs to see if they're + // present in sv. + for _, potentialSourceKey := range c.structFieldSources[destKey] { + sf := skv.value(potentialSourceKey.fieldName) + if !sf.IsValid() { + continue + } + if sf.Type() == potentialSourceKey.fieldType { + // Both the source's name and type matched, so copy. + scope.srcStack.top().key = potentialSourceKey.fieldName + scope.destStack.top().key = fieldName + if err := c.convert(sf, df, scope); err != nil { + return true, err + } + dkv.confirmSet(fieldName, df) + replacementMade = true + } + } + return replacementMade, nil + } + + sf := skv.value(fieldName) + if !sf.IsValid() { + return false, nil + } + srcKey := typeNamePair{sf.Type(), fieldName} + // Check each of the potential dest (type, name) pairs to see if they're + // present in dv. + for _, potentialDestKey := range c.structFieldDests[srcKey] { + df := dkv.value(potentialDestKey.fieldName) + if !df.IsValid() { + continue + } + if df.Type() == potentialDestKey.fieldType { + // Both the dest's name and type matched, so copy. + scope.srcStack.top().key = fieldName + scope.destStack.top().key = potentialDestKey.fieldName + if err := c.convert(sf, df, scope); err != nil { + return true, err + } + dkv.confirmSet(potentialDestKey.fieldName, df) + replacementMade = true + } + } + return replacementMade, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go new file mode 100644 index 0000000..f21abe1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/deep_equal.go @@ -0,0 +1,36 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "k8s.io/apimachinery/third_party/forked/golang/reflect" +) + +// The code for this type must be located in third_party, since it forks from +// go std lib. But for convenience, we expose the type here, too. +type Equalities struct { + reflect.Equalities +} + +// For convenience, panics on errors +func EqualitiesOrDie(funcs ...interface{}) Equalities { + e := Equalities{reflect.Equalities{}} + if err := e.AddFuncs(funcs...); err != nil { + panic(err) + } + return e +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go new file mode 100644 index 0000000..7415d81 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package conversion provides go object versioning. +// +// Specifically, conversion provides a way for you to define multiple versions +// of the same object. You may write functions which implement conversion logic, +// but for the fields which did not change, copying is automated. This makes it +// easy to modify the structures you use in memory without affecting the format +// you store on disk or respond to in your external API calls. +package conversion // import "k8s.io/apimachinery/pkg/conversion" diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/helper.go b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go new file mode 100644 index 0000000..4ebc1eb --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/helper.go @@ -0,0 +1,39 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package conversion + +import ( + "fmt" + "reflect" +) + +// EnforcePtr ensures that obj is a pointer of some sort. Returns a reflect.Value +// of the dereferenced pointer, ensuring that it is settable/addressable. +// Returns an error if this is not possible. +func EnforcePtr(obj interface{}) (reflect.Value, error) { + v := reflect.ValueOf(obj) + if v.Kind() != reflect.Ptr { + if v.Kind() == reflect.Invalid { + return reflect.Value{}, fmt.Errorf("expected pointer, but got invalid kind") + } + return reflect.Value{}, fmt.Errorf("expected pointer, but got %v type", v.Type()) + } + if v.IsNil() { + return reflect.Value{}, fmt.Errorf("expected pointer, but got nil") + } + return v.Elem(), nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go new file mode 100644 index 0000000..2f0dd00 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -0,0 +1,194 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package queryparams + +import ( + "fmt" + "net/url" + "reflect" + "strings" +) + +// Marshaler converts an object to a query parameter string representation +type Marshaler interface { + MarshalQueryParameter() (string, error) +} + +// Unmarshaler converts a string representation to an object +type Unmarshaler interface { + UnmarshalQueryParameter(string) error +} + +func jsonTag(field reflect.StructField) (string, bool) { + structTag := field.Tag.Get("json") + if len(structTag) == 0 { + return "", false + } + parts := strings.Split(structTag, ",") + tag := parts[0] + if tag == "-" { + tag = "" + } + omitempty := false + parts = parts[1:] + for _, part := range parts { + if part == "omitempty" { + omitempty = true + break + } + } + return tag, omitempty +} + +func isPointerKind(kind reflect.Kind) bool { + return kind == reflect.Ptr +} + +func isStructKind(kind reflect.Kind) bool { + return kind == reflect.Struct +} + +func isValueKind(kind reflect.Kind) bool { + switch kind { + case reflect.String, reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, + reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, + reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Float32, + reflect.Float64, reflect.Complex64, reflect.Complex128: + return true + default: + return false + } +} + +func zeroValue(value reflect.Value) bool { + return reflect.DeepEqual(reflect.Zero(value.Type()).Interface(), value.Interface()) +} + +func customMarshalValue(value reflect.Value) (reflect.Value, bool) { + // Return unless we implement a custom query marshaler + if !value.CanInterface() { + return reflect.Value{}, false + } + + marshaler, ok := value.Interface().(Marshaler) + if !ok { + if !isPointerKind(value.Kind()) && value.CanAddr() { + marshaler, ok = value.Addr().Interface().(Marshaler) + if !ok { + return reflect.Value{}, false + } + } else { + return reflect.Value{}, false + } + } + + // Don't invoke functions on nil pointers + // If the type implements MarshalQueryParameter, AND the tag is not omitempty, AND the value is a nil pointer, "" seems like a reasonable response + if isPointerKind(value.Kind()) && zeroValue(value) { + return reflect.ValueOf(""), true + } + + // Get the custom marshalled value + v, err := marshaler.MarshalQueryParameter() + if err != nil { + return reflect.Value{}, false + } + return reflect.ValueOf(v), true +} + +func addParam(values url.Values, tag string, omitempty bool, value reflect.Value) { + if omitempty && zeroValue(value) { + return + } + val := "" + iValue := fmt.Sprintf("%v", value.Interface()) + + if iValue != "" { + val = iValue + } + values.Add(tag, val) +} + +func addListOfParams(values url.Values, tag string, omitempty bool, list reflect.Value) { + for i := 0; i < list.Len(); i++ { + addParam(values, tag, omitempty, list.Index(i)) + } +} + +// Convert takes an object and converts it to a url.Values object using JSON tags as +// parameter names. Only top-level simple values, arrays, and slices are serialized. +// Embedded structs, maps, etc. will not be serialized. +func Convert(obj interface{}) (url.Values, error) { + result := url.Values{} + if obj == nil { + return result, nil + } + var sv reflect.Value + switch reflect.TypeOf(obj).Kind() { + case reflect.Ptr, reflect.Interface: + sv = reflect.ValueOf(obj).Elem() + default: + return nil, fmt.Errorf("expecting a pointer or interface") + } + st := sv.Type() + if !isStructKind(st.Kind()) { + return nil, fmt.Errorf("expecting a pointer to a struct") + } + + // Check all object fields + convertStruct(result, st, sv) + + return result, nil +} + +func convertStruct(result url.Values, st reflect.Type, sv reflect.Value) { + for i := 0; i < st.NumField(); i++ { + field := sv.Field(i) + tag, omitempty := jsonTag(st.Field(i)) + if len(tag) == 0 { + continue + } + ft := field.Type() + + kind := ft.Kind() + if isPointerKind(kind) { + ft = ft.Elem() + kind = ft.Kind() + if !field.IsNil() { + field = reflect.Indirect(field) + // If the field is non-nil, it should be added to params + // and the omitempty should be overwite to false + omitempty = false + } + } + + switch { + case isValueKind(kind): + addParam(result, tag, omitempty, field) + case kind == reflect.Array || kind == reflect.Slice: + if isValueKind(ft.Elem().Kind()) { + addListOfParams(result, tag, omitempty, field) + } + case isStructKind(kind) && !(zeroValue(field) && omitempty): + if marshalValue, ok := customMarshalValue(field); ok { + addParam(result, tag, omitempty, marshalValue) + } else { + convertStruct(result, ft, field) + } + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go new file mode 100644 index 0000000..7b763de --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package queryparams provides conversion from versioned +// runtime objects to URL query values +package queryparams // import "k8s.io/apimachinery/pkg/conversion/queryparams" diff --git a/vendor/k8s.io/apimachinery/pkg/fields/doc.go b/vendor/k8s.io/apimachinery/pkg/fields/doc.go new file mode 100644 index 0000000..c39b803 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package fields implements a simple field system, parsing and matching +// selectors with sets of fields. +package fields // import "k8s.io/apimachinery/pkg/fields" diff --git a/vendor/k8s.io/apimachinery/pkg/fields/fields.go b/vendor/k8s.io/apimachinery/pkg/fields/fields.go new file mode 100644 index 0000000..623b27e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/fields.go @@ -0,0 +1,62 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "sort" + "strings" +) + +// Fields allows you to present fields independently from their storage. +type Fields interface { + // Has returns whether the provided field exists. + Has(field string) (exists bool) + + // Get returns the value for the provided field. + Get(field string) (value string) +} + +// Set is a map of field:value. It implements Fields. +type Set map[string]string + +// String returns all fields listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided field exists in the map. +func (ls Set) Has(field string) bool { + _, exists := ls[field] + return exists +} + +// Get returns the value in the map for the provided field. +func (ls Set) Get(field string) string { + return ls[field] +} + +// AsSelector converts fields into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/requirements.go b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go new file mode 100644 index 0000000..70d94de --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/requirements.go @@ -0,0 +1,30 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import "k8s.io/apimachinery/pkg/selection" + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Requirement contains a field, a value, and an operator that relates the field and value. +// This is currently for reading internal selection information of field selector. +type Requirement struct { + Operator selection.Operator + Field string + Value string +} diff --git a/vendor/k8s.io/apimachinery/pkg/fields/selector.go b/vendor/k8s.io/apimachinery/pkg/fields/selector.go new file mode 100644 index 0000000..e3e4453 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/fields/selector.go @@ -0,0 +1,476 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fields + +import ( + "bytes" + "fmt" + "sort" + "strings" + + "k8s.io/apimachinery/pkg/selection" +) + +// Selector represents a field selector. +type Selector interface { + // Matches returns true if this selector matches the given set of fields. + Matches(Fields) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific field to be set, and if so returns the value it + // requires. + RequiresExactMatch(field string) (value string, found bool) + + // Transform returns a new copy of the selector after TransformFunc has been + // applied to the entire selector, or an error if fn returns an error. + // If for a given requirement both field and value are transformed to empty + // string, the requirement is skipped. + Transform(fn TransformFunc) (Selector, error) + + // Requirements converts this interface to Requirements to expose + // more detailed selection information. + Requirements() Requirements + + // String returns a human readable string that represents this selector. + String() string + + // Make a deep copy of the selector. + DeepCopySelector() Selector +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Fields) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Requirements() Requirements { return nil } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(field string) (value string, found bool) { return "", false } +func (n nothingSelector) Transform(fn TransformFunc) (Selector, error) { return n, nil } + +// Nothing returns a selector that matches no fields +func Nothing() Selector { + return nothingSelector{} +} + +// Everything returns a selector that matches all fields. +func Everything() Selector { + return andTerm{} +} + +type hasTerm struct { + field, value string +} + +func (t *hasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) == t.value +} + +func (t *hasTerm) Empty() bool { + return false +} + +func (t *hasTerm) RequiresExactMatch(field string) (value string, found bool) { + if t.field == field { + return t.value, true + } + return "", false +} + +func (t *hasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } + return &hasTerm{field, value}, nil +} + +func (t *hasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.Equals, + Value: t.value, + }} +} + +func (t *hasTerm) String() string { + return fmt.Sprintf("%v=%v", t.field, EscapeValue(t.value)) +} + +func (t *hasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(hasTerm) + *out = *t + return out +} + +type notHasTerm struct { + field, value string +} + +func (t *notHasTerm) Matches(ls Fields) bool { + return ls.Get(t.field) != t.value +} + +func (t *notHasTerm) Empty() bool { + return false +} + +func (t *notHasTerm) RequiresExactMatch(field string) (value string, found bool) { + return "", false +} + +func (t *notHasTerm) Transform(fn TransformFunc) (Selector, error) { + field, value, err := fn(t.field, t.value) + if err != nil { + return nil, err + } + if len(field) == 0 && len(value) == 0 { + return Everything(), nil + } + return ¬HasTerm{field, value}, nil +} + +func (t *notHasTerm) Requirements() Requirements { + return []Requirement{{ + Field: t.field, + Operator: selection.NotEquals, + Value: t.value, + }} +} + +func (t *notHasTerm) String() string { + return fmt.Sprintf("%v!=%v", t.field, EscapeValue(t.value)) +} + +func (t *notHasTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := new(notHasTerm) + *out = *t + return out +} + +type andTerm []Selector + +func (t andTerm) Matches(ls Fields) bool { + for _, q := range t { + if !q.Matches(ls) { + return false + } + } + return true +} + +func (t andTerm) Empty() bool { + if t == nil { + return true + } + if len([]Selector(t)) == 0 { + return true + } + for i := range t { + if !t[i].Empty() { + return false + } + } + return true +} + +func (t andTerm) RequiresExactMatch(field string) (string, bool) { + if t == nil || len([]Selector(t)) == 0 { + return "", false + } + for i := range t { + if value, found := t[i].RequiresExactMatch(field); found { + return value, found + } + } + return "", false +} + +func (t andTerm) Transform(fn TransformFunc) (Selector, error) { + next := make([]Selector, 0, len([]Selector(t))) + for _, s := range []Selector(t) { + n, err := s.Transform(fn) + if err != nil { + return nil, err + } + if !n.Empty() { + next = append(next, n) + } + } + return andTerm(next), nil +} + +func (t andTerm) Requirements() Requirements { + reqs := make([]Requirement, 0, len(t)) + for _, s := range []Selector(t) { + rs := s.Requirements() + reqs = append(reqs, rs...) + } + return reqs +} + +func (t andTerm) String() string { + var terms []string + for _, q := range t { + terms = append(terms, q.String()) + } + return strings.Join(terms, ",") +} + +func (t andTerm) DeepCopySelector() Selector { + if t == nil { + return nil + } + out := make([]Selector, len(t)) + for i := range t { + out[i] = t[i].DeepCopySelector() + } + return andTerm(out) +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil Set is considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil { + return Everything() + } + items := make([]Selector, 0, len(ls)) + for field, value := range ls { + items = append(items, &hasTerm{field: field, value: value}) + } + if len(items) == 1 { + return items[0] + } + return andTerm(items) +} + +// valueEscaper prefixes \,= characters with a backslash +var valueEscaper = strings.NewReplacer( + // escape \ characters + `\`, `\\`, + // then escape , and = characters to allow unambiguous parsing of the value in a fieldSelector + `,`, `\,`, + `=`, `\=`, +) + +// EscapeValue escapes an arbitrary literal string for use as a fieldSelector value +func EscapeValue(s string) string { + return valueEscaper.Replace(s) +} + +// InvalidEscapeSequence indicates an error occurred unescaping a field selector +type InvalidEscapeSequence struct { + sequence string +} + +func (i InvalidEscapeSequence) Error() string { + return fmt.Sprintf("invalid field selector: invalid escape sequence: %s", i.sequence) +} + +// UnescapedRune indicates an error occurred unescaping a field selector +type UnescapedRune struct { + r rune +} + +func (i UnescapedRune) Error() string { + return fmt.Sprintf("invalid field selector: unescaped character in value: %v", i.r) +} + +// UnescapeValue unescapes a fieldSelector value and returns the original literal value. +// May return the original string if it contains no escaped or special characters. +func UnescapeValue(s string) (string, error) { + // if there's no escaping or special characters, just return to avoid allocation + if !strings.ContainsAny(s, `\,=`) { + return s, nil + } + + v := bytes.NewBuffer(make([]byte, 0, len(s))) + inSlash := false + for _, c := range s { + if inSlash { + switch c { + case '\\', ',', '=': + // omit the \ for recognized escape sequences + v.WriteRune(c) + default: + // error on unrecognized escape sequences + return "", InvalidEscapeSequence{sequence: string([]rune{'\\', c})} + } + inSlash = false + continue + } + + switch c { + case '\\': + inSlash = true + case ',', '=': + // unescaped , and = characters are not allowed in field selector values + return "", UnescapedRune{r: c} + default: + v.WriteRune(c) + } + } + + // Ending with a single backslash is an invalid sequence + if inSlash { + return "", InvalidEscapeSequence{sequence: "\\"} + } + + return v.String(), nil +} + +// ParseSelectorOrDie takes a string representing a selector and returns an +// object suitable for matching, or panic when an error occur. +func ParseSelectorOrDie(s string) Selector { + selector, err := ParseSelector(s) + if err != nil { + panic(err) + } + return selector +} + +// ParseSelector takes a string representing a selector and returns an +// object suitable for matching, or an error. +func ParseSelector(selector string) (Selector, error) { + return parseSelector(selector, + func(lhs, rhs string) (newLhs, newRhs string, err error) { + return lhs, rhs, nil + }) +} + +// ParseAndTransformSelector parses the selector and runs them through the given TransformFunc. +func ParseAndTransformSelector(selector string, fn TransformFunc) (Selector, error) { + return parseSelector(selector, fn) +} + +// TransformFunc transforms selectors. +type TransformFunc func(field, value string) (newField, newValue string, err error) + +// splitTerms returns the comma-separated terms contained in the given fieldSelector. +// Backslash-escaped commas are treated as data instead of delimiters, and are included in the returned terms, with the leading backslash preserved. +func splitTerms(fieldSelector string) []string { + if len(fieldSelector) == 0 { + return nil + } + + terms := make([]string, 0, 1) + startIndex := 0 + inSlash := false + for i, c := range fieldSelector { + switch { + case inSlash: + inSlash = false + case c == '\\': + inSlash = true + case c == ',': + terms = append(terms, fieldSelector[startIndex:i]) + startIndex = i + 1 + } + } + + terms = append(terms, fieldSelector[startIndex:]) + + return terms +} + +const ( + notEqualOperator = "!=" + doubleEqualOperator = "==" + equalOperator = "=" +) + +// termOperators holds the recognized operators supported in fieldSelectors. +// doubleEqualOperator and equal are equivalent, but doubleEqualOperator is checked first +// to avoid leaving a leading = character on the rhs value. +var termOperators = []string{notEqualOperator, doubleEqualOperator, equalOperator} + +// splitTerm returns the lhs, operator, and rhs parsed from the given term, along with an indicator of whether the parse was successful. +// no escaping of special characters is supported in the lhs value, so the first occurrence of a recognized operator is used as the split point. +// the literal rhs is returned, and the caller is responsible for applying any desired unescaping. +func splitTerm(term string) (lhs, op, rhs string, ok bool) { + for i := range term { + remaining := term[i:] + for _, op := range termOperators { + if strings.HasPrefix(remaining, op) { + return term[0:i], op, term[i+len(op):], true + } + } + } + return "", "", "", false +} + +func parseSelector(selector string, fn TransformFunc) (Selector, error) { + parts := splitTerms(selector) + sort.StringSlice(parts).Sort() + var items []Selector + for _, part := range parts { + if part == "" { + continue + } + lhs, op, rhs, ok := splitTerm(part) + if !ok { + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + unescapedRHS, err := UnescapeValue(rhs) + if err != nil { + return nil, err + } + switch op { + case notEqualOperator: + items = append(items, ¬HasTerm{field: lhs, value: unescapedRHS}) + case doubleEqualOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + case equalOperator: + items = append(items, &hasTerm{field: lhs, value: unescapedRHS}) + default: + return nil, fmt.Errorf("invalid selector: '%s'; can't understand '%s'", selector, part) + } + } + if len(items) == 1 { + return items[0].Transform(fn) + } + return andTerm(items).Transform(fn) +} + +// OneTermEqualSelector returns an object that matches objects where one field/field equals one value. +// Cannot return an error. +func OneTermEqualSelector(k, v string) Selector { + return &hasTerm{field: k, value: v} +} + +// OneTermNotEqualSelector returns an object that matches objects where one field/field does not equal one value. +// Cannot return an error. +func OneTermNotEqualSelector(k, v string) Selector { + return ¬HasTerm{field: k, value: v} +} + +// AndSelectors creates a selector that is the logical AND of all the given selectors +func AndSelectors(selectors ...Selector) Selector { + return andTerm(selectors) +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/doc.go b/vendor/k8s.io/apimachinery/pkg/labels/doc.go new file mode 100644 index 0000000..82de005 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package labels implements a simple label system, parsing and matching +// selectors with sets of labels. +package labels // import "k8s.io/apimachinery/pkg/labels" diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go new file mode 100644 index 0000000..abf3ace --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -0,0 +1,181 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "fmt" + "sort" + "strings" +) + +// Labels allows you to present labels independently from their storage. +type Labels interface { + // Has returns whether the provided label exists. + Has(label string) (exists bool) + + // Get returns the value for the provided label. + Get(label string) (value string) +} + +// Set is a map of label:value. It implements Labels. +type Set map[string]string + +// String returns all labels listed as a human readable string. +// Conveniently, exactly the format that ParseSelector takes. +func (ls Set) String() string { + selector := make([]string, 0, len(ls)) + for key, value := range ls { + selector = append(selector, key+"="+value) + } + // Sort for determinism. + sort.StringSlice(selector).Sort() + return strings.Join(selector, ",") +} + +// Has returns whether the provided label exists in the map. +func (ls Set) Has(label string) bool { + _, exists := ls[label] + return exists +} + +// Get returns the value in the map for the provided label. +func (ls Set) Get(label string) string { + return ls[label] +} + +// AsSelector converts labels into a selectors. +func (ls Set) AsSelector() Selector { + return SelectorFromSet(ls) +} + +// AsSelectorPreValidated converts labels into a selector, but +// assumes that labels are already validated and thus don't +// preform any validation. +// According to our measurements this is significantly faster +// in codepaths that matter at high scale. +func (ls Set) AsSelectorPreValidated() Selector { + return SelectorFromValidatedSet(ls) +} + +// FormatLabels convert label map into plain string +func FormatLabels(labelMap map[string]string) string { + l := Set(labelMap).String() + if l == "" { + l = "" + } + return l +} + +// Conflicts takes 2 maps and returns true if there a key match between +// the maps but the value doesn't match, and returns false in other cases +func Conflicts(labels1, labels2 Set) bool { + small := labels1 + big := labels2 + if len(labels2) < len(labels1) { + small = labels2 + big = labels1 + } + + for k, v := range small { + if val, match := big[k]; match { + if val != v { + return true + } + } + } + + return false +} + +// Merge combines given maps, and does not check for any conflicts +// between the maps. In case of conflicts, second map (labels2) wins +func Merge(labels1, labels2 Set) Set { + mergedMap := Set{} + + for k, v := range labels1 { + mergedMap[k] = v + } + for k, v := range labels2 { + mergedMap[k] = v + } + return mergedMap +} + +// Equals returns true if the given maps are equal +func Equals(labels1, labels2 Set) bool { + if len(labels1) != len(labels2) { + return false + } + + for k, v := range labels1 { + value, ok := labels2[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// AreLabelsInWhiteList verifies if the provided label list +// is in the provided whitelist and returns true, otherwise false. +func AreLabelsInWhiteList(labels, whitelist Set) bool { + if len(whitelist) == 0 { + return true + } + + for k, v := range labels { + value, ok := whitelist[k] + if !ok { + return false + } + if value != v { + return false + } + } + return true +} + +// ConvertSelectorToLabelsMap converts selector string to labels map +// and validates keys and values +func ConvertSelectorToLabelsMap(selector string) (Set, error) { + labelsMap := Set{} + + if len(selector) == 0 { + return labelsMap, nil + } + + labels := strings.Split(selector, ",") + for _, label := range labels { + l := strings.Split(label, "=") + if len(l) != 2 { + return labelsMap, fmt.Errorf("invalid selector: %s", l) + } + key := strings.TrimSpace(l[0]) + if err := validateLabelKey(key); err != nil { + return labelsMap, err + } + value := strings.TrimSpace(l[1]) + if err := validateLabelValue(key, value); err != nil { + return labelsMap, err + } + labelsMap[key] = value + } + return labelsMap, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go new file mode 100644 index 0000000..2f8e1e2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -0,0 +1,914 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package labels + +import ( + "bytes" + "fmt" + "sort" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/selection" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/apimachinery/pkg/util/validation" + "k8s.io/klog" +) + +// Requirements is AND of all requirements. +type Requirements []Requirement + +// Selector represents a label selector. +type Selector interface { + // Matches returns true if this selector matches the given set of labels. + Matches(Labels) bool + + // Empty returns true if this selector does not restrict the selection space. + Empty() bool + + // String returns a human readable string that represents this selector. + String() string + + // Add adds requirements to the Selector + Add(r ...Requirement) Selector + + // Requirements converts this interface into Requirements to expose + // more detailed selection information. + // If there are querying parameters, it will return converted requirements and selectable=true. + // If this selector doesn't want to select anything, it will return selectable=false. + Requirements() (requirements Requirements, selectable bool) + + // Make a deep copy of the selector. + DeepCopySelector() Selector + + // RequiresExactMatch allows a caller to introspect whether a given selector + // requires a single specific label to be set, and if so returns the value it + // requires. + RequiresExactMatch(label string) (value string, found bool) +} + +// Everything returns a selector that matches all labels. +func Everything() Selector { + return internalSelector{} +} + +type nothingSelector struct{} + +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } +func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false } + +// Nothing returns a selector that matches no labels +func Nothing() Selector { + return nothingSelector{} +} + +// NewSelector returns a nil selector +func NewSelector() Selector { + return internalSelector(nil) +} + +type internalSelector []Requirement + +func (s internalSelector) DeepCopy() internalSelector { + if s == nil { + return nil + } + result := make([]Requirement, len(s)) + for i := range s { + s[i].DeepCopyInto(&result[i]) + } + return result +} + +func (s internalSelector) DeepCopySelector() Selector { + return s.DeepCopy() +} + +// ByKey sorts requirements by key to obtain deterministic parser +type ByKey []Requirement + +func (a ByKey) Len() int { return len(a) } + +func (a ByKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +func (a ByKey) Less(i, j int) bool { return a[i].key < a[j].key } + +// Requirement contains values, a key, and an operator that relates the key and values. +// The zero value of Requirement is invalid. +// Requirement implements both set based match and exact match +// Requirement should be initialized via NewRequirement constructor for creating a valid Requirement. +// +k8s:deepcopy-gen=true +type Requirement struct { + key string + operator selection.Operator + // In huge majority of cases we have at most one value here. + // It is generally faster to operate on a single-element slice + // than on a single-element map, so we have a slice here. + strValues []string +} + +// NewRequirement is the constructor for a Requirement. +// If any of these rules is violated, an error is returned: +// (1) The operator can only be In, NotIn, Equals, DoubleEquals, NotEquals, Exists, or DoesNotExist. +// (2) If the operator is In or NotIn, the values set must be non-empty. +// (3) If the operator is Equals, DoubleEquals, or NotEquals, the values set must contain one value. +// (4) If the operator is Exists or DoesNotExist, the value set must be empty. +// (5) If the operator is Gt or Lt, the values set must contain only one value, which will be interpreted as an integer. +// (6) The key is invalid due to its length, or sequence +// of characters. See validateLabelKey for more details. +// +// The empty string is a valid value in the input values set. +func NewRequirement(key string, op selection.Operator, vals []string) (*Requirement, error) { + if err := validateLabelKey(key); err != nil { + return nil, err + } + switch op { + case selection.In, selection.NotIn: + if len(vals) == 0 { + return nil, fmt.Errorf("for 'in', 'notin' operators, values set can't be empty") + } + case selection.Equals, selection.DoubleEquals, selection.NotEquals: + if len(vals) != 1 { + return nil, fmt.Errorf("exact-match compatibility requires one single value") + } + case selection.Exists, selection.DoesNotExist: + if len(vals) != 0 { + return nil, fmt.Errorf("values set must be empty for exists and does not exist") + } + case selection.GreaterThan, selection.LessThan: + if len(vals) != 1 { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, exactly one value is required") + } + for i := range vals { + if _, err := strconv.ParseInt(vals[i], 10, 64); err != nil { + return nil, fmt.Errorf("for 'Gt', 'Lt' operators, the value must be an integer") + } + } + default: + return nil, fmt.Errorf("operator '%v' is not recognized", op) + } + + for i := range vals { + if err := validateLabelValue(key, vals[i]); err != nil { + return nil, err + } + } + return &Requirement{key: key, operator: op, strValues: vals}, nil +} + +func (r *Requirement) hasValue(value string) bool { + for i := range r.strValues { + if r.strValues[i] == value { + return true + } + } + return false +} + +// Matches returns true if the Requirement matches the input Labels. +// There is a match in the following cases: +// (1) The operator is Exists and Labels has the Requirement's key. +// (2) The operator is In, Labels has the Requirement's key and Labels' +// value for that key is in Requirement's value set. +// (3) The operator is NotIn, Labels has the Requirement's key and +// Labels' value for that key is not in Requirement's value set. +// (4) The operator is DoesNotExist or NotIn and Labels does not have the +// Requirement's key. +// (5) The operator is GreaterThanOperator or LessThanOperator, and Labels has +// the Requirement's key and the corresponding value satisfies mathematical inequality. +func (r *Requirement) Matches(ls Labels) bool { + switch r.operator { + case selection.In, selection.Equals, selection.DoubleEquals: + if !ls.Has(r.key) { + return false + } + return r.hasValue(ls.Get(r.key)) + case selection.NotIn, selection.NotEquals: + if !ls.Has(r.key) { + return true + } + return !r.hasValue(ls.Get(r.key)) + case selection.Exists: + return ls.Has(r.key) + case selection.DoesNotExist: + return !ls.Has(r.key) + case selection.GreaterThan, selection.LessThan: + if !ls.Has(r.key) { + return false + } + lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + return false + } + + // There should be only one strValue in r.strValues, and can be converted to a integer. + if len(r.strValues) != 1 { + klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + return false + } + + var rValue int64 + for i := range r.strValues { + rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) + if err != nil { + klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + return false + } + } + return (r.operator == selection.GreaterThan && lsValue > rValue) || (r.operator == selection.LessThan && lsValue < rValue) + default: + return false + } +} + +// Key returns requirement key +func (r *Requirement) Key() string { + return r.key +} + +// Operator returns requirement operator +func (r *Requirement) Operator() selection.Operator { + return r.operator +} + +// Values returns requirement values +func (r *Requirement) Values() sets.String { + ret := sets.String{} + for i := range r.strValues { + ret.Insert(r.strValues[i]) + } + return ret +} + +// Empty returns true if the internalSelector doesn't restrict selection space +func (lsel internalSelector) Empty() bool { + if lsel == nil { + return true + } + return len(lsel) == 0 +} + +// String returns a human-readable string that represents this +// Requirement. If called on an invalid Requirement, an error is +// returned. See NewRequirement for creating a valid Requirement. +func (r *Requirement) String() string { + var buffer bytes.Buffer + if r.operator == selection.DoesNotExist { + buffer.WriteString("!") + } + buffer.WriteString(r.key) + + switch r.operator { + case selection.Equals: + buffer.WriteString("=") + case selection.DoubleEquals: + buffer.WriteString("==") + case selection.NotEquals: + buffer.WriteString("!=") + case selection.In: + buffer.WriteString(" in ") + case selection.NotIn: + buffer.WriteString(" notin ") + case selection.GreaterThan: + buffer.WriteString(">") + case selection.LessThan: + buffer.WriteString("<") + case selection.Exists, selection.DoesNotExist: + return buffer.String() + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString("(") + } + if len(r.strValues) == 1 { + buffer.WriteString(r.strValues[0]) + } else { // only > 1 since == 0 prohibited by NewRequirement + // normalizes value order on output, without mutating the in-memory selector representation + // also avoids normalization when it is not required, and ensures we do not mutate shared data + buffer.WriteString(strings.Join(safeSort(r.strValues), ",")) + } + + switch r.operator { + case selection.In, selection.NotIn: + buffer.WriteString(")") + } + return buffer.String() +} + +// safeSort sort input strings without modification +func safeSort(in []string) []string { + if sort.StringsAreSorted(in) { + return in + } + out := make([]string, len(in)) + copy(out, in) + sort.Strings(out) + return out +} + +// Add adds requirements to the selector. It copies the current selector returning a new one +func (lsel internalSelector) Add(reqs ...Requirement) Selector { + var sel internalSelector + for ix := range lsel { + sel = append(sel, lsel[ix]) + } + for _, r := range reqs { + sel = append(sel, r) + } + sort.Sort(ByKey(sel)) + return sel +} + +// Matches for a internalSelector returns true if all +// its Requirements match the input Labels. If any +// Requirement does not match, false is returned. +func (lsel internalSelector) Matches(l Labels) bool { + for ix := range lsel { + if matches := lsel[ix].Matches(l); !matches { + return false + } + } + return true +} + +func (lsel internalSelector) Requirements() (Requirements, bool) { return Requirements(lsel), true } + +// String returns a comma-separated string of all +// the internalSelector Requirements' human-readable strings. +func (lsel internalSelector) String() string { + var reqs []string + for ix := range lsel { + reqs = append(reqs, lsel[ix].String()) + } + return strings.Join(reqs, ",") +} + +// RequiresExactMatch introspect whether a given selector requires a single specific field +// to be set, and if so returns the value it requires. +func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { + for ix := range lsel { + if lsel[ix].key == label { + switch lsel[ix].operator { + case selection.Equals, selection.DoubleEquals, selection.In: + if len(lsel[ix].strValues) == 1 { + return lsel[ix].strValues[0], true + } + } + return "", false + } + } + return "", false +} + +// Token represents constant definition for lexer token +type Token int + +const ( + // ErrorToken represents scan error + ErrorToken Token = iota + // EndOfStringToken represents end of string + EndOfStringToken + // ClosedParToken represents close parenthesis + ClosedParToken + // CommaToken represents the comma + CommaToken + // DoesNotExistToken represents logic not + DoesNotExistToken + // DoubleEqualsToken represents double equals + DoubleEqualsToken + // EqualsToken represents equal + EqualsToken + // GreaterThanToken represents greater than + GreaterThanToken + // IdentifierToken represents identifier, e.g. keys and values + IdentifierToken + // InToken represents in + InToken + // LessThanToken represents less than + LessThanToken + // NotEqualsToken represents not equal + NotEqualsToken + // NotInToken represents not in + NotInToken + // OpenParToken represents open parenthesis + OpenParToken +) + +// string2token contains the mapping between lexer Token and token literal +// (except IdentifierToken, EndOfStringToken and ErrorToken since it makes no sense) +var string2token = map[string]Token{ + ")": ClosedParToken, + ",": CommaToken, + "!": DoesNotExistToken, + "==": DoubleEqualsToken, + "=": EqualsToken, + ">": GreaterThanToken, + "in": InToken, + "<": LessThanToken, + "!=": NotEqualsToken, + "notin": NotInToken, + "(": OpenParToken, +} + +// ScannedItem contains the Token and the literal produced by the lexer. +type ScannedItem struct { + tok Token + literal string +} + +// isWhitespace returns true if the rune is a space, tab, or newline. +func isWhitespace(ch byte) bool { + return ch == ' ' || ch == '\t' || ch == '\r' || ch == '\n' +} + +// isSpecialSymbol detect if the character ch can be an operator +func isSpecialSymbol(ch byte) bool { + switch ch { + case '=', '!', '(', ')', ',', '>', '<': + return true + } + return false +} + +// Lexer represents the Lexer struct for label selector. +// It contains necessary informationt to tokenize the input string +type Lexer struct { + // s stores the string to be tokenized + s string + // pos is the position currently tokenized + pos int +} + +// read return the character currently lexed +// increment the position and check the buffer overflow +func (l *Lexer) read() (b byte) { + b = 0 + if l.pos < len(l.s) { + b = l.s[l.pos] + l.pos++ + } + return b +} + +// unread 'undoes' the last read character +func (l *Lexer) unread() { + l.pos-- +} + +// scanIDOrKeyword scans string to recognize literal token (for example 'in') or an identifier. +func (l *Lexer) scanIDOrKeyword() (tok Token, lit string) { + var buffer []byte +IdentifierLoop: + for { + switch ch := l.read(); { + case ch == 0: + break IdentifierLoop + case isSpecialSymbol(ch) || isWhitespace(ch): + l.unread() + break IdentifierLoop + default: + buffer = append(buffer, ch) + } + } + s := string(buffer) + if val, ok := string2token[s]; ok { // is a literal token? + return val, s + } + return IdentifierToken, s // otherwise is an identifier +} + +// scanSpecialSymbol scans string starting with special symbol. +// special symbol identify non literal operators. "!=", "==", "=" +func (l *Lexer) scanSpecialSymbol() (Token, string) { + lastScannedItem := ScannedItem{} + var buffer []byte +SpecialSymbolLoop: + for { + switch ch := l.read(); { + case ch == 0: + break SpecialSymbolLoop + case isSpecialSymbol(ch): + buffer = append(buffer, ch) + if token, ok := string2token[string(buffer)]; ok { + lastScannedItem = ScannedItem{tok: token, literal: string(buffer)} + } else if lastScannedItem.tok != 0 { + l.unread() + break SpecialSymbolLoop + } + default: + l.unread() + break SpecialSymbolLoop + } + } + if lastScannedItem.tok == 0 { + return ErrorToken, fmt.Sprintf("error expected: keyword found '%s'", buffer) + } + return lastScannedItem.tok, lastScannedItem.literal +} + +// skipWhiteSpaces consumes all blank characters +// returning the first non blank character +func (l *Lexer) skipWhiteSpaces(ch byte) byte { + for { + if !isWhitespace(ch) { + return ch + } + ch = l.read() + } +} + +// Lex returns a pair of Token and the literal +// literal is meaningfull only for IdentifierToken token +func (l *Lexer) Lex() (tok Token, lit string) { + switch ch := l.skipWhiteSpaces(l.read()); { + case ch == 0: + return EndOfStringToken, "" + case isSpecialSymbol(ch): + l.unread() + return l.scanSpecialSymbol() + default: + l.unread() + return l.scanIDOrKeyword() + } +} + +// Parser data structure contains the label selector parser data structure +type Parser struct { + l *Lexer + scannedItems []ScannedItem + position int +} + +// ParserContext represents context during parsing: +// some literal for example 'in' and 'notin' can be +// recognized as operator for example 'x in (a)' but +// it can be recognized as value for example 'value in (in)' +type ParserContext int + +const ( + // KeyAndOperator represents key and operator + KeyAndOperator ParserContext = iota + // Values represents values + Values +) + +// lookahead func returns the current token and string. No increment of current position +func (p *Parser) lookahead(context ParserContext) (Token, string) { + tok, lit := p.scannedItems[p.position].tok, p.scannedItems[p.position].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// consume returns current token and string. Increments the position +func (p *Parser) consume(context ParserContext) (Token, string) { + p.position++ + tok, lit := p.scannedItems[p.position-1].tok, p.scannedItems[p.position-1].literal + if context == Values { + switch tok { + case InToken, NotInToken: + tok = IdentifierToken + } + } + return tok, lit +} + +// scan runs through the input string and stores the ScannedItem in an array +// Parser can now lookahead and consume the tokens +func (p *Parser) scan() { + for { + token, literal := p.l.Lex() + p.scannedItems = append(p.scannedItems, ScannedItem{token, literal}) + if token == EndOfStringToken { + break + } + } +} + +// parse runs the left recursive descending algorithm +// on input string. It returns a list of Requirement objects. +func (p *Parser) parse() (internalSelector, error) { + p.scan() // init scannedItems + + var requirements internalSelector + for { + tok, lit := p.lookahead(Values) + switch tok { + case IdentifierToken, DoesNotExistToken: + r, err := p.parseRequirement() + if err != nil { + return nil, fmt.Errorf("unable to parse requirement: %v", err) + } + requirements = append(requirements, *r) + t, l := p.consume(Values) + switch t { + case EndOfStringToken: + return requirements, nil + case CommaToken: + t2, l2 := p.lookahead(Values) + if t2 != IdentifierToken && t2 != DoesNotExistToken { + return nil, fmt.Errorf("found '%s', expected: identifier after ','", l2) + } + default: + return nil, fmt.Errorf("found '%s', expected: ',' or 'end of string'", l) + } + case EndOfStringToken: + return requirements, nil + default: + return nil, fmt.Errorf("found '%s', expected: !, identifier, or 'end of string'", lit) + } + } +} + +func (p *Parser) parseRequirement() (*Requirement, error) { + key, operator, err := p.parseKeyAndInferOperator() + if err != nil { + return nil, err + } + if operator == selection.Exists || operator == selection.DoesNotExist { // operator found lookahead set checked + return NewRequirement(key, operator, []string{}) + } + operator, err = p.parseOperator() + if err != nil { + return nil, err + } + var values sets.String + switch operator { + case selection.In, selection.NotIn: + values, err = p.parseValues() + case selection.Equals, selection.DoubleEquals, selection.NotEquals, selection.GreaterThan, selection.LessThan: + values, err = p.parseExactValue() + } + if err != nil { + return nil, err + } + return NewRequirement(key, operator, values.List()) + +} + +// parseKeyAndInferOperator parse literals. +// in case of no operator '!, in, notin, ==, =, !=' are found +// the 'exists' operator is inferred +func (p *Parser) parseKeyAndInferOperator() (string, selection.Operator, error) { + var operator selection.Operator + tok, literal := p.consume(Values) + if tok == DoesNotExistToken { + operator = selection.DoesNotExist + tok, literal = p.consume(Values) + } + if tok != IdentifierToken { + err := fmt.Errorf("found '%s', expected: identifier", literal) + return "", "", err + } + if err := validateLabelKey(literal); err != nil { + return "", "", err + } + if t, _ := p.lookahead(Values); t == EndOfStringToken || t == CommaToken { + if operator != selection.DoesNotExist { + operator = selection.Exists + } + } + return literal, operator, nil +} + +// parseOperator return operator and eventually matchType +// matchType can be exact +func (p *Parser) parseOperator() (op selection.Operator, err error) { + tok, lit := p.consume(KeyAndOperator) + switch tok { + // DoesNotExistToken shouldn't be here because it's a unary operator, not a binary operator + case InToken: + op = selection.In + case EqualsToken: + op = selection.Equals + case DoubleEqualsToken: + op = selection.DoubleEquals + case GreaterThanToken: + op = selection.GreaterThan + case LessThanToken: + op = selection.LessThan + case NotInToken: + op = selection.NotIn + case NotEqualsToken: + op = selection.NotEquals + default: + return "", fmt.Errorf("found '%s', expected: '=', '!=', '==', 'in', notin'", lit) + } + return op, nil +} + +// parseValues parses the values for set based matching (x,y,z) +func (p *Parser) parseValues() (sets.String, error) { + tok, lit := p.consume(Values) + if tok != OpenParToken { + return nil, fmt.Errorf("found '%s' expected: '('", lit) + } + tok, lit = p.lookahead(Values) + switch tok { + case IdentifierToken, CommaToken: + s, err := p.parseIdentifiersList() // handles general cases + if err != nil { + return s, err + } + if tok, _ = p.consume(Values); tok != ClosedParToken { + return nil, fmt.Errorf("found '%s', expected: ')'", lit) + } + return s, nil + case ClosedParToken: // handles "()" + p.consume(Values) + return sets.NewString(""), nil + default: + return nil, fmt.Errorf("found '%s', expected: ',', ')' or identifier", lit) + } +} + +// parseIdentifiersList parses a (possibly empty) list of +// of comma separated (possibly empty) identifiers +func (p *Parser) parseIdentifiersList() (sets.String, error) { + s := sets.NewString() + for { + tok, lit := p.consume(Values) + switch tok { + case IdentifierToken: + s.Insert(lit) + tok2, lit2 := p.lookahead(Values) + switch tok2 { + case CommaToken: + continue + case ClosedParToken: + return s, nil + default: + return nil, fmt.Errorf("found '%s', expected: ',' or ')'", lit2) + } + case CommaToken: // handled here since we can have "(," + if s.Len() == 0 { + s.Insert("") // to handle (, + } + tok2, _ := p.lookahead(Values) + if tok2 == ClosedParToken { + s.Insert("") // to handle ,) Double "" removed by StringSet + return s, nil + } + if tok2 == CommaToken { + p.consume(Values) + s.Insert("") // to handle ,, Double "" removed by StringSet + } + default: // it can be operator + return s, fmt.Errorf("found '%s', expected: ',', or identifier", lit) + } + } +} + +// parseExactValue parses the only value for exact match style +func (p *Parser) parseExactValue() (sets.String, error) { + s := sets.NewString() + tok, lit := p.lookahead(Values) + if tok == EndOfStringToken || tok == CommaToken { + s.Insert("") + return s, nil + } + tok, lit = p.consume(Values) + if tok == IdentifierToken { + s.Insert(lit) + return s, nil + } + return nil, fmt.Errorf("found '%s', expected: identifier", lit) +} + +// Parse takes a string representing a selector and returns a selector +// object, or an error. This parsing function differs from ParseSelector +// as they parse different selectors with different syntaxes. +// The input will cause an error if it does not follow this form: +// +// ::= | "," +// ::= [!] KEY [ | ] +// ::= "" | +// ::= | +// ::= "notin" +// ::= "in" +// ::= "(" ")" +// ::= VALUE | VALUE "," +// ::= ["="|"=="|"!="] VALUE +// +// KEY is a sequence of one or more characters following [ DNS_SUBDOMAIN "/" ] DNS_LABEL. Max length is 63 characters. +// VALUE is a sequence of zero or more characters "([A-Za-z0-9_-\.])". Max length is 63 characters. +// Delimiter is white space: (' ', '\t') +// Example of valid syntax: +// "x in (foo,,baz),y,z notin ()" +// +// Note: +// (1) Inclusion - " in " - denotes that the KEY exists and is equal to any of the +// VALUEs in its requirement +// (2) Exclusion - " notin " - denotes that the KEY is not equal to any +// of the VALUEs in its requirement or does not exist +// (3) The empty string is a valid VALUE +// (4) A requirement with just a KEY - as in "y" above - denotes that +// the KEY exists and can be any VALUE. +// (5) A requirement with just !KEY requires that the KEY not exist. +// +func Parse(selector string) (Selector, error) { + parsedSelector, err := parse(selector) + if err == nil { + return parsedSelector, nil + } + return nil, err +} + +// parse parses the string representation of the selector and returns the internalSelector struct. +// The callers of this method can then decide how to return the internalSelector struct to their +// callers. This function has two callers now, one returns a Selector interface and the other +// returns a list of requirements. +func parse(selector string) (internalSelector, error) { + p := &Parser{l: &Lexer{s: selector, pos: 0}} + items, err := p.parse() + if err != nil { + return nil, err + } + sort.Sort(ByKey(items)) // sort to grant determistic parsing + return internalSelector(items), err +} + +func validateLabelKey(k string) error { + if errs := validation.IsQualifiedName(k); len(errs) != 0 { + return fmt.Errorf("invalid label key %q: %s", k, strings.Join(errs, "; ")) + } + return nil +} + +func validateLabelValue(k, v string) error { + if errs := validation.IsValidLabelValue(v); len(errs) != 0 { + return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) + } + return nil +} + +// SelectorFromSet returns a Selector which will match exactly the given Set. A +// nil and empty Sets are considered equivalent to Everything(). +func SelectorFromSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + requirements := make([]Requirement, 0, len(ls)) + for label, value := range ls { + r, err := NewRequirement(label, selection.Equals, []string{value}) + if err == nil { + requirements = append(requirements, *r) + } else { + //TODO: double check errors when input comes from serialization? + return internalSelector{} + } + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return internalSelector(requirements) +} + +// SelectorFromValidatedSet returns a Selector which will match exactly the given Set. +// A nil and empty Sets are considered equivalent to Everything(). +// It assumes that Set is already validated and doesn't do any validation. +func SelectorFromValidatedSet(ls Set) Selector { + if ls == nil || len(ls) == 0 { + return internalSelector{} + } + requirements := make([]Requirement, 0, len(ls)) + for label, value := range ls { + requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) + } + // sort to have deterministic string representation + sort.Sort(ByKey(requirements)) + return internalSelector(requirements) +} + +// ParseToRequirements takes a string representing a selector and returns a list of +// requirements. This function is suitable for those callers that perform additional +// processing on selector requirements. +// See the documentation for Parse() function for more details. +// TODO: Consider exporting the internalSelector type instead. +func ParseToRequirements(selector string) ([]Requirement, error) { + return parse(selector) +} diff --git a/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go new file mode 100644 index 0000000..4d48294 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/labels/zz_generated.deepcopy.go @@ -0,0 +1,42 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package labels + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Requirement) DeepCopyInto(out *Requirement) { + *out = *in + if in.strValues != nil { + in, out := &in.strValues, &out.strValues + *out = make([]string, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Requirement. +func (in *Requirement) DeepCopy() *Requirement { + if in == nil { + return nil + } + out := new(Requirement) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go new file mode 100644 index 0000000..0bccf9d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -0,0 +1,396 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "net/url" + "reflect" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/conversion/queryparams" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" +) + +// codec binds an encoder and decoder. +type codec struct { + Encoder + Decoder +} + +// NewCodec creates a Codec from an Encoder and Decoder. +func NewCodec(e Encoder, d Decoder) Codec { + return codec{e, d} +} + +// Encode is a convenience wrapper for encoding to a []byte from an Encoder +func Encode(e Encoder, obj Object) ([]byte, error) { + // TODO: reuse buffer + buf := &bytes.Buffer{} + if err := e.Encode(obj, buf); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// Decode is a convenience wrapper for decoding data into an Object. +func Decode(d Decoder, data []byte) (Object, error) { + obj, _, err := d.Decode(data, nil, nil) + return obj, err +} + +// DecodeInto performs a Decode into the provided object. +func DecodeInto(d Decoder, data []byte, into Object) error { + out, gvk, err := d.Decode(data, nil, into) + if err != nil { + return err + } + if out != into { + return fmt.Errorf("unable to decode %s into %v", gvk, reflect.TypeOf(into)) + } + return nil +} + +// EncodeOrDie is a version of Encode which will panic instead of returning an error. For tests. +func EncodeOrDie(e Encoder, obj Object) string { + bytes, err := Encode(e, obj) + if err != nil { + panic(err) + } + return string(bytes) +} + +// UseOrCreateObject returns obj if the canonical ObjectKind returned by the provided typer matches gvk, or +// invokes the ObjectCreator to instantiate a new gvk. Returns an error if the typer cannot find the object. +func UseOrCreateObject(t ObjectTyper, c ObjectCreater, gvk schema.GroupVersionKind, obj Object) (Object, error) { + if obj != nil { + kinds, _, err := t.ObjectKinds(obj) + if err != nil { + return nil, err + } + for _, kind := range kinds { + if gvk == kind { + return obj, nil + } + } + } + return c.New(gvk) +} + +// NoopEncoder converts an Decoder to a Serializer or Codec for code that expects them but only uses decoding. +type NoopEncoder struct { + Decoder +} + +var _ Serializer = NoopEncoder{} + +const noopEncoderIdentifier Identifier = "noop" + +func (n NoopEncoder) Encode(obj Object, w io.Writer) error { + // There is no need to handle runtime.CacheableObject, as we don't + // process the obj at all. + return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) +} + +// Identifier implements runtime.Encoder interface. +func (n NoopEncoder) Identifier() Identifier { + return noopEncoderIdentifier +} + +// NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. +type NoopDecoder struct { + Encoder +} + +var _ Serializer = NoopDecoder{} + +func (n NoopDecoder) Decode(data []byte, gvk *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + return nil, nil, fmt.Errorf("decoding is not allowed for this codec: %v", reflect.TypeOf(n.Encoder)) +} + +// NewParameterCodec creates a ParameterCodec capable of transforming url values into versioned objects and back. +func NewParameterCodec(scheme *Scheme) ParameterCodec { + return ¶meterCodec{ + typer: scheme, + convertor: scheme, + creator: scheme, + defaulter: scheme, + } +} + +// parameterCodec implements conversion to and from query parameters and objects. +type parameterCodec struct { + typer ObjectTyper + convertor ObjectConvertor + creator ObjectCreater + defaulter ObjectDefaulter +} + +var _ ParameterCodec = ¶meterCodec{} + +// DecodeParameters converts the provided url.Values into an object of type From with the kind of into, and then +// converts that object to into (if necessary). Returns an error if the operation cannot be completed. +func (c *parameterCodec) DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error { + if len(parameters) == 0 { + return nil + } + targetGVKs, _, err := c.typer.ObjectKinds(into) + if err != nil { + return err + } + for i := range targetGVKs { + if targetGVKs[i].GroupVersion() == from { + if err := c.convertor.Convert(¶meters, into, nil); err != nil { + return err + } + // in the case where we going into the same object we're receiving, default on the outbound object + if c.defaulter != nil { + c.defaulter.Default(into) + } + return nil + } + } + + input, err := c.creator.New(from.WithKind(targetGVKs[0].Kind)) + if err != nil { + return err + } + if err := c.convertor.Convert(¶meters, input, nil); err != nil { + return err + } + // if we have defaulter, default the input before converting to output + if c.defaulter != nil { + c.defaulter.Default(input) + } + return c.convertor.Convert(input, into, nil) +} + +// EncodeParameters converts the provided object into the to version, then converts that object to url.Values. +// Returns an error if conversion is not possible. +func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) { + gvks, _, err := c.typer.ObjectKinds(obj) + if err != nil { + return nil, err + } + gvk := gvks[0] + if to != gvk.GroupVersion() { + out, err := c.convertor.ConvertToVersion(obj, to) + if err != nil { + return nil, err + } + obj = out + } + return queryparams.Convert(obj) +} + +type base64Serializer struct { + Encoder + Decoder + + identifier Identifier +} + +func NewBase64Serializer(e Encoder, d Decoder) Serializer { + return &base64Serializer{ + Encoder: e, + Decoder: d, + identifier: identifier(e), + } +} + +func identifier(e Encoder) Identifier { + result := map[string]string{ + "name": "base64", + } + if e != nil { + result["encoder"] = string(e.Identifier()) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err) + } + return Identifier(identifier) +} + +func (s base64Serializer) Encode(obj Object, stream io.Writer) error { + if co, ok := obj.(CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, stream) + } + return s.doEncode(obj, stream) +} + +func (s base64Serializer) doEncode(obj Object, stream io.Writer) error { + e := base64.NewEncoder(base64.StdEncoding, stream) + err := s.Encoder.Encode(obj, e) + e.Close() + return err +} + +// Identifier implements runtime.Encoder interface. +func (s base64Serializer) Identifier() Identifier { + return s.identifier +} + +func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) + n, err := base64.StdEncoding.Decode(out, data) + if err != nil { + return nil, nil, err + } + return s.Decoder.Decode(out[:n], defaults, into) +} + +// SerializerInfoForMediaType returns the first info in types that has a matching media type (which cannot +// include media-type parameters), or the first info with an empty media type, or false if no type matches. +func SerializerInfoForMediaType(types []SerializerInfo, mediaType string) (SerializerInfo, bool) { + for _, info := range types { + if info.MediaType == mediaType { + return info, true + } + } + for _, info := range types { + if len(info.MediaType) == 0 { + return info, true + } + } + return SerializerInfo{}, false +} + +var ( + // InternalGroupVersioner will always prefer the internal version for a given group version kind. + InternalGroupVersioner GroupVersioner = internalGroupVersioner{} + // DisabledGroupVersioner will reject all kinds passed to it. + DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} +) + +const ( + internalGroupVersionerIdentifier = "internal" + disabledGroupVersionerIdentifier = "disabled" +) + +type internalGroupVersioner struct{} + +// KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. +func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, kind := range kinds { + if kind.Version == APIVersionInternal { + return kind, true + } + } + for _, kind := range kinds { + return schema.GroupVersionKind{Group: kind.Group, Version: APIVersionInternal, Kind: kind.Kind}, true + } + return schema.GroupVersionKind{}, false +} + +// Identifier implements GroupVersioner interface. +func (internalGroupVersioner) Identifier() string { + return internalGroupVersionerIdentifier +} + +type disabledGroupVersioner struct{} + +// KindForGroupVersionKinds returns false for any input. +func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + return schema.GroupVersionKind{}, false +} + +// Identifier implements GroupVersioner interface. +func (disabledGroupVersioner) Identifier() string { + return disabledGroupVersionerIdentifier +} + +// Assert that schema.GroupVersion and GroupVersions implement GroupVersioner +var _ GroupVersioner = schema.GroupVersion{} +var _ GroupVersioner = schema.GroupVersions{} +var _ GroupVersioner = multiGroupVersioner{} + +type multiGroupVersioner struct { + target schema.GroupVersion + acceptedGroupKinds []schema.GroupKind + coerce bool +} + +// NewMultiGroupVersioner returns the provided group version for any kind that matches one of the provided group kinds. +// Kind may be empty in the provided group kind, in which case any kind will match. +func NewMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + if len(groupKinds) == 0 || (len(groupKinds) == 1 && groupKinds[0].Group == gv.Group) { + return gv + } + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds} +} + +// NewCoercingMultiGroupVersioner returns the provided group version for any incoming kind. +// Incoming kinds that match the provided groupKinds are preferred. +// Kind may be empty in the provided group kind, in which case any kind will match. +// Examples: +// gv=mygroup/__internal, groupKinds=mygroup/Foo, anothergroup/Bar +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group/kind) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, anothergroup/v1/Bar) -> mygroup/__internal/Bar (matched preferred group) +// +// gv=mygroup/__internal, groupKinds=mygroup, anothergroup +// KindForGroupVersionKinds(yetanother/v1/Baz, yetanother/v1/Bar) -> mygroup/__internal/Baz (no preferred group/kind match, uses first kind in list) +func NewCoercingMultiGroupVersioner(gv schema.GroupVersion, groupKinds ...schema.GroupKind) GroupVersioner { + return multiGroupVersioner{target: gv, acceptedGroupKinds: groupKinds, coerce: true} +} + +// KindForGroupVersionKinds returns the target group version if any kind matches any of the original group kinds. It will +// use the originating kind where possible. +func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, src := range kinds { + for _, kind := range v.acceptedGroupKinds { + if kind.Group != src.Group { + continue + } + if len(kind.Kind) > 0 && kind.Kind != src.Kind { + continue + } + return v.target.WithKind(src.Kind), true + } + } + if v.coerce && len(kinds) > 0 { + return v.target.WithKind(kinds[0].Kind), true + } + return schema.GroupVersionKind{}, false +} + +// Identifier implements GroupVersioner interface. +func (v multiGroupVersioner) Identifier() string { + groupKinds := make([]string, 0, len(v.acceptedGroupKinds)) + for _, gk := range v.acceptedGroupKinds { + groupKinds = append(groupKinds, gk.String()) + } + result := map[string]string{ + "name": "multi", + "target": v.target.String(), + "accepted": strings.Join(groupKinds, ","), + "coerce": strconv.FormatBool(v.coerce), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err) + } + return string(identifier) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go new file mode 100644 index 0000000..510444a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec_check.go @@ -0,0 +1,48 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// CheckCodec makes sure that the codec can encode objects like internalType, +// decode all of the external types listed, and also decode them into the given +// object. (Will modify internalObject.) (Assumes JSON serialization.) +// TODO: verify that the correct external version is chosen on encode... +func CheckCodec(c Codec, internalType Object, externalTypes ...schema.GroupVersionKind) error { + if _, err := Encode(c, internalType); err != nil { + return fmt.Errorf("Internal type not encodable: %v", err) + } + for _, et := range externalTypes { + exBytes := []byte(fmt.Sprintf(`{"kind":"%v","apiVersion":"%v"}`, et.Kind, et.GroupVersion().String())) + obj, err := Decode(c, exBytes) + if err != nil { + return fmt.Errorf("external type %s not interpretable: %v", et, err) + } + if reflect.TypeOf(obj) != reflect.TypeOf(internalType) { + return fmt.Errorf("decode of external type %s produced: %#v", et, obj) + } + if err = DecodeInto(c, exBytes, internalType); err != nil { + return fmt.Errorf("external type %s not convertible to internal type: %v", et, err) + } + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go new file mode 100644 index 0000000..d04d701 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -0,0 +1,196 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime defines conversions between generic types and structs to map query strings +// to struct objects. +package runtime + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/conversion" +) + +// DefaultMetaV1FieldSelectorConversion auto-accepts metav1 values for name and namespace. +// A cluster scoped resource specifying namespace empty works fine and specifying a particular +// namespace will return no results, as expected. +func DefaultMetaV1FieldSelectorConversion(label, value string) (string, string, error) { + switch label { + case "metadata.name": + return label, value, nil + case "metadata.namespace": + return label, value, nil + default: + return "", "", fmt.Errorf("%q is not a known field selector: only %q, %q", label, "metadata.name", "metadata.namespace") + } +} + +// JSONKeyMapper uses the struct tags on a conversion to determine the key value for +// the other side. Use when mapping from a map[string]* to a struct or vice versa. +func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, string) { + if s := destTag.Get("json"); len(s) > 0 { + return strings.SplitN(s, ",", 2)[0], key + } + if s := sourceTag.Get("json"); len(s) > 0 { + return key, strings.SplitN(s, ",", 2)[0] + } + return key, key +} + +func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error { + if len(*in) == 0 { + *out = "" + return nil + } + *out = (*in)[0] + return nil +} + +func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error { + if len(*in) == 0 { + *out = 0 + return nil + } + str := (*in)[0] + i, err := strconv.Atoi(str) + if err != nil { + return err + } + *out = i + return nil +} + +// Convert_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error { + if len(*in) == 0 { + *out = false + return nil + } + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): + *out = false + default: + *out = true + } + return nil +} + +// Convert_Slice_string_To_bool will convert a string parameter to boolean. +// Only the absence of a value (i.e. zero-length slice), a value of "false", or a +// value of "0" resolve to false. +// Any other value (including empty string) resolves to true. +func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error { + if len(*in) == 0 { + boolVar := false + *out = &boolVar + return nil + } + switch { + case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): + boolVar := false + *out = &boolVar + default: + boolVar := true + *out = &boolVar + } + return nil +} + +func string_to_int64(in string) (int64, error) { + return strconv.ParseInt(in, 10, 64) +} + +func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error { + if in == nil { + *out = 0 + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = i + return nil +} + +func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error { + if len(*in) == 0 { + *out = 0 + return nil + } + i, err := string_to_int64((*in)[0]) + if err != nil { + return err + } + *out = i + return nil +} + +func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error { + if in == nil { + *out = nil + return nil + } + i, err := string_to_int64(*in) + if err != nil { + return err + } + *out = &i + return nil +} + +func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error { + if len(*in) == 0 { + *out = nil + return nil + } + i, err := string_to_int64((*in)[0]) + if err != nil { + return err + } + *out = &i + return nil +} + +func RegisterStringConversions(s *Scheme) error { + if err := s.AddConversionFunc((*[]string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_string(a.(*[]string), b.(*string), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_int(a.(*[]string), b.(*int), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_bool(a.(*[]string), b.(*bool), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*[]string)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_Slice_string_To_int64(a.(*[]string), b.(*int64), scope) + }); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go new file mode 100644 index 0000000..918d083 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -0,0 +1,707 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + encodingjson "encoding/json" + "fmt" + "math" + "os" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/util/json" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "sigs.k8s.io/structured-merge-diff/v3/value" + + "k8s.io/klog" +) + +// UnstructuredConverter is an interface for converting between interface{} +// and map[string]interface representation. +type UnstructuredConverter interface { + ToUnstructured(obj interface{}) (map[string]interface{}, error) + FromUnstructured(u map[string]interface{}, obj interface{}) error +} + +type structField struct { + structType reflect.Type + field int +} + +type fieldInfo struct { + name string + nameValue reflect.Value + omitempty bool +} + +type fieldsCacheMap map[structField]*fieldInfo + +type fieldsCache struct { + sync.Mutex + value atomic.Value +} + +func newFieldsCache() *fieldsCache { + cache := &fieldsCache{} + cache.value.Store(make(fieldsCacheMap)) + return cache +} + +var ( + mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) + stringType = reflect.TypeOf(string("")) + fieldCache = newFieldsCache() + + // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. + DefaultUnstructuredConverter = &unstructuredConverter{ + mismatchDetection: parseBool(os.Getenv("KUBE_PATCH_CONVERSION_DETECTOR")), + comparison: conversion.EqualitiesOrDie( + func(a, b time.Time) bool { + return a.UTC() == b.UTC() + }, + ), + } +) + +func parseBool(key string) bool { + if len(key) == 0 { + return false + } + value, err := strconv.ParseBool(key) + if err != nil { + utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) + } + return value +} + +// unstructuredConverter knows how to convert between interface{} and +// Unstructured in both ways. +type unstructuredConverter struct { + // If true, we will be additionally running conversion via json + // to ensure that the result is true. + // This is supposed to be set only in tests. + mismatchDetection bool + // comparison is the default test logic used to compare + comparison conversion.Equalities +} + +// NewTestUnstructuredConverter creates an UnstructuredConverter that accepts JSON typed maps and translates them +// to Go types via reflection. It performs mismatch detection automatically and is intended for use by external +// test tools. Use DefaultUnstructuredConverter if you do not explicitly need mismatch detection. +func NewTestUnstructuredConverter(comparison conversion.Equalities) UnstructuredConverter { + return &unstructuredConverter{ + mismatchDetection: true, + comparison: comparison, + } +} + +// FromUnstructured converts an object from map[string]interface{} representation into a concrete type. +// It uses encoding/json/Unmarshaler if object implements it or reflection if not. +func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj interface{}) error { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return fmt.Errorf("FromUnstructured requires a non-nil pointer to an object, got %v", t) + } + err := fromUnstructured(reflect.ValueOf(u), value.Elem()) + if c.mismatchDetection { + newObj := reflect.New(t.Elem()).Interface() + newErr := fromUnstructuredViaJSON(u, newObj) + if (err != nil) != (newErr != nil) { + klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + } + if err == nil && !c.comparison.DeepEqual(obj, newObj) { + klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + } + } + return err +} + +func fromUnstructuredViaJSON(u map[string]interface{}, obj interface{}) error { + data, err := json.Marshal(u) + if err != nil { + return err + } + return json.Unmarshal(data, obj) +} + +func fromUnstructured(sv, dv reflect.Value) error { + sv = unwrapInterface(sv) + if !sv.IsValid() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + st, dt := sv.Type(), dv.Type() + + switch dt.Kind() { + case reflect.Map, reflect.Slice, reflect.Ptr, reflect.Struct, reflect.Interface: + // Those require non-trivial conversion. + default: + // This should handle all simple types. + if st.AssignableTo(dt) { + dv.Set(sv) + return nil + } + // We cannot simply use "ConvertibleTo", as JSON doesn't support conversions + // between those four groups: bools, integers, floats and string. We need to + // do the same. + if st.ConvertibleTo(dt) { + switch st.Kind() { + case reflect.String: + switch dt.Kind() { + case reflect.String: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Bool: + switch dt.Kind() { + case reflect.Bool: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + switch dt.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + dv.Set(sv.Convert(dt)) + return nil + } + case reflect.Float32, reflect.Float64: + switch dt.Kind() { + case reflect.Float32, reflect.Float64: + dv.Set(sv.Convert(dt)) + return nil + } + if sv.Float() == math.Trunc(sv.Float()) { + dv.Set(sv.Convert(dt)) + return nil + } + } + return fmt.Errorf("cannot convert %s to %s", st.String(), dt.String()) + } + } + + // Check if the object has a custom JSON marshaller/unmarshaller. + entry := value.TypeReflectEntryOf(dv.Type()) + if entry.CanConvertFromUnstructured() { + return entry.FromUnstructured(sv, dv) + } + + switch dt.Kind() { + case reflect.Map: + return mapFromUnstructured(sv, dv) + case reflect.Slice: + return sliceFromUnstructured(sv, dv) + case reflect.Ptr: + return pointerFromUnstructured(sv, dv) + case reflect.Struct: + return structFromUnstructured(sv, dv) + case reflect.Interface: + return interfaceFromUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", dt.Kind()) + } +} + +func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { + fieldCacheMap := fieldCache.value.Load().(fieldsCacheMap) + if info, ok := fieldCacheMap[structField{structType, field}]; ok { + return info + } + + // Cache miss - we need to compute the field name. + info := &fieldInfo{} + typeField := structType.Field(field) + jsonTag := typeField.Tag.Get("json") + if len(jsonTag) == 0 { + // Make the first character lowercase. + if typeField.Name == "" { + info.name = typeField.Name + } else { + info.name = strings.ToLower(typeField.Name[:1]) + typeField.Name[1:] + } + } else { + items := strings.Split(jsonTag, ",") + info.name = items[0] + for i := range items { + if items[i] == "omitempty" { + info.omitempty = true + break + } + } + } + info.nameValue = reflect.ValueOf(info.name) + + fieldCache.Lock() + defer fieldCache.Unlock() + fieldCacheMap = fieldCache.value.Load().(fieldsCacheMap) + newFieldCacheMap := make(fieldsCacheMap) + for k, v := range fieldCacheMap { + newFieldCacheMap[k] = v + } + newFieldCacheMap[structField{structType, field}] = info + fieldCache.value.Store(newFieldCacheMap) + return info +} + +func unwrapInterface(v reflect.Value) reflect.Value { + for v.Kind() == reflect.Interface { + v = v.Elem() + } + return v +} + +func mapFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore map from %v", st.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeMap(dt)) + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if val := unwrapInterface(sv.MapIndex(key)); val.IsValid() { + if err := fromUnstructured(val, value); err != nil { + return err + } + } else { + value.Set(reflect.Zero(dt.Elem())) + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() == reflect.String && dt.Elem().Kind() == reflect.Uint8 { + // We store original []byte representation as string. + // This conversion is allowed, but we need to be careful about + // marshaling data appropriately. + if len(sv.Interface().(string)) > 0 { + marshalled, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st, err) + } + // TODO: Is this Unmarshal needed? + var data []byte + err = json.Unmarshal(marshalled, &data) + if err != nil { + return fmt.Errorf("error decoding from json: %v", err) + } + dv.SetBytes(data) + } else { + dv.Set(reflect.Zero(dt)) + } + return nil + } + if st.Kind() != reflect.Slice { + return fmt.Errorf("cannot restore slice from %v", st.Kind()) + } + + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.MakeSlice(dt, sv.Len(), sv.Cap())) + for i := 0; i < sv.Len(); i++ { + if err := fromUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + + if st.Kind() == reflect.Ptr && sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + dv.Set(reflect.New(dt.Elem())) + switch st.Kind() { + case reflect.Ptr, reflect.Interface: + return fromUnstructured(sv.Elem(), dv.Elem()) + default: + return fromUnstructured(sv, dv.Elem()) + } +} + +func structFromUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if st.Kind() != reflect.Map { + return fmt.Errorf("cannot restore struct from: %v", st.Kind()) + } + + for i := 0; i < dt.NumField(); i++ { + fieldInfo := fieldInfoFromField(dt, i) + fv := dv.Field(i) + + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := fromUnstructured(sv, fv); err != nil { + return err + } + } else { + value := unwrapInterface(sv.MapIndex(fieldInfo.nameValue)) + if value.IsValid() { + if err := fromUnstructured(value, fv); err != nil { + return err + } + } else { + fv.Set(reflect.Zero(fv.Type())) + } + } + } + return nil +} + +func interfaceFromUnstructured(sv, dv reflect.Value) error { + // TODO: Is this conversion safe? + dv.Set(sv) + return nil +} + +// ToUnstructured converts an object into map[string]interface{} representation. +// It uses encoding/json/Marshaler if object implements it or reflection if not. +func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]interface{}, error) { + var u map[string]interface{} + var err error + if unstr, ok := obj.(Unstructured); ok { + u = unstr.UnstructuredContent() + } else { + t := reflect.TypeOf(obj) + value := reflect.ValueOf(obj) + if t.Kind() != reflect.Ptr || value.IsNil() { + return nil, fmt.Errorf("ToUnstructured requires a non-nil pointer to an object, got %v", t) + } + u = map[string]interface{}{} + err = toUnstructured(value.Elem(), reflect.ValueOf(&u).Elem()) + } + if c.mismatchDetection { + newUnstr := map[string]interface{}{} + newErr := toUnstructuredViaJSON(obj, &newUnstr) + if (err != nil) != (newErr != nil) { + klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + } + if err == nil && !c.comparison.DeepEqual(u, newUnstr) { + klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + } + } + if err != nil { + return nil, err + } + return u, nil +} + +// DeepCopyJSON deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal() and also int64. +// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +func DeepCopyJSON(x map[string]interface{}) map[string]interface{} { + return DeepCopyJSONValue(x).(map[string]interface{}) +} + +// DeepCopyJSONValue deep copies the passed value, assuming it is a valid JSON representation i.e. only contains +// types produced by json.Unmarshal() and also int64. +// bool, int64, float64, string, []interface{}, map[string]interface{}, json.Number and nil +func DeepCopyJSONValue(x interface{}) interface{} { + switch x := x.(type) { + case map[string]interface{}: + if x == nil { + // Typed nil - an interface{} that contains a type map[string]interface{} with a value of nil + return x + } + clone := make(map[string]interface{}, len(x)) + for k, v := range x { + clone[k] = DeepCopyJSONValue(v) + } + return clone + case []interface{}: + if x == nil { + // Typed nil - an interface{} that contains a type []interface{} with a value of nil + return x + } + clone := make([]interface{}, len(x)) + for i, v := range x { + clone[i] = DeepCopyJSONValue(v) + } + return clone + case string, int64, bool, float64, nil, encodingjson.Number: + return x + default: + panic(fmt.Errorf("cannot deep copy %T", x)) + } +} + +func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { + data, err := json.Marshal(obj) + if err != nil { + return err + } + return json.Unmarshal(data, u) +} + +func toUnstructured(sv, dv reflect.Value) error { + // Check if the object has a custom string converter. + entry := value.TypeReflectEntryOf(sv.Type()) + if entry.CanConvertToUnstructured() { + v, err := entry.ToUnstructured(sv) + if err != nil { + return err + } + if v != nil { + dv.Set(reflect.ValueOf(v)) + } + return nil + } + st := sv.Type() + switch st.Kind() { + case reflect.String: + dv.Set(reflect.ValueOf(sv.String())) + return nil + case reflect.Bool: + dv.Set(reflect.ValueOf(sv.Bool())) + return nil + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + dv.Set(reflect.ValueOf(sv.Int())) + return nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + uVal := sv.Uint() + if uVal > math.MaxInt64 { + return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal) + } + dv.Set(reflect.ValueOf(int64(uVal))) + return nil + case reflect.Float32, reflect.Float64: + dv.Set(reflect.ValueOf(sv.Float())) + return nil + case reflect.Map: + return mapToUnstructured(sv, dv) + case reflect.Slice: + return sliceToUnstructured(sv, dv) + case reflect.Ptr: + return pointerToUnstructured(sv, dv) + case reflect.Struct: + return structToUnstructured(sv, dv) + case reflect.Interface: + return interfaceToUnstructured(sv, dv) + default: + return fmt.Errorf("unrecognized type: %v", st.Kind()) + } +} + +func mapToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + if st.Key().Kind() == reflect.String { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + } + } + dv.Set(reflect.MakeMap(mapStringInterfaceType)) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + + if !st.Key().AssignableTo(dt.Key()) && !st.Key().ConvertibleTo(dt.Key()) { + return fmt.Errorf("cannot copy map with non-assignable keys: %v %v", st.Key(), dt.Key()) + } + + for _, key := range sv.MapKeys() { + value := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(sv.MapIndex(key), value); err != nil { + return err + } + if st.Key().AssignableTo(dt.Key()) { + dv.SetMapIndex(key, value) + } else { + dv.SetMapIndex(key.Convert(dt.Key()), value) + } + } + return nil +} + +func sliceToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if sv.IsNil() { + dv.Set(reflect.Zero(dt)) + return nil + } + if st.Elem().Kind() == reflect.Uint8 { + dv.Set(reflect.New(stringType)) + data, err := json.Marshal(sv.Bytes()) + if err != nil { + return err + } + var result string + if err = json.Unmarshal(data, &result); err != nil { + return err + } + dv.Set(reflect.ValueOf(result)) + return nil + } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + switch st.Elem().Kind() { + // TODO It should be possible to reuse the slice for primitive types. + // However, it is panicing in the following form. + // case reflect.String, reflect.Bool, + // reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + // reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + // sv.Set(sv) + // return nil + default: + // We need to do a proper conversion. + dv.Set(reflect.MakeSlice(reflect.SliceOf(dt), sv.Len(), sv.Cap())) + dv = dv.Elem() + dt = dv.Type() + } + } + if dt.Kind() != reflect.Slice { + return fmt.Errorf("cannot convert slice to: %v", dt.Kind()) + } + for i := 0; i < sv.Len(); i++ { + if err := toUnstructured(sv.Index(i), dv.Index(i)); err != nil { + return err + } + } + return nil +} + +func pointerToUnstructured(sv, dv reflect.Value) error { + if sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + return toUnstructured(sv.Elem(), dv) +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Map, reflect.Slice: + // TODO: It seems that 0-len maps are ignored in it. + return v.IsNil() || v.Len() == 0 + case reflect.Ptr, reflect.Interface: + return v.IsNil() + } + return false +} + +func structToUnstructured(sv, dv reflect.Value) error { + st, dt := sv.Type(), dv.Type() + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField())) + dv = dv.Elem() + dt = dv.Type() + } + if dt.Kind() != reflect.Map { + return fmt.Errorf("cannot convert struct to: %v", dt.Kind()) + } + realMap := dv.Interface().(map[string]interface{}) + + for i := 0; i < st.NumField(); i++ { + fieldInfo := fieldInfoFromField(st, i) + fv := sv.Field(i) + + if fieldInfo.name == "-" { + // This field should be skipped. + continue + } + if fieldInfo.omitempty && isZero(fv) { + // omitempty fields should be ignored. + continue + } + if len(fieldInfo.name) == 0 { + // This field is inlined. + if err := toUnstructured(fv, dv); err != nil { + return err + } + continue + } + switch fv.Type().Kind() { + case reflect.String: + realMap[fieldInfo.name] = fv.String() + case reflect.Bool: + realMap[fieldInfo.name] = fv.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + realMap[fieldInfo.name] = fv.Int() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + realMap[fieldInfo.name] = fv.Uint() + case reflect.Float32, reflect.Float64: + realMap[fieldInfo.name] = fv.Float() + default: + subv := reflect.New(dt.Elem()).Elem() + if err := toUnstructured(fv, subv); err != nil { + return err + } + dv.SetMapIndex(fieldInfo.nameValue, subv) + } + } + return nil +} + +func interfaceToUnstructured(sv, dv reflect.Value) error { + if !sv.IsValid() || sv.IsNil() { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + return toUnstructured(sv.Elem(), dv) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go new file mode 100644 index 0000000..89feb40 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/doc.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package runtime includes helper functions for working with API objects +// that follow the kubernetes API object conventions, which are: +// +// 0. Your API objects have a common metadata struct member, TypeMeta. +// +// 1. Your code refers to an internal set of API objects. +// +// 2. In a separate package, you have an external set of API objects. +// +// 3. The external set is considered to be versioned, and no breaking +// changes are ever made to it (fields may be added but not changed +// or removed). +// +// 4. As your api evolves, you'll make an additional versioned package +// with every major change. +// +// 5. Versioned packages have conversion functions which convert to +// and from the internal version. +// +// 6. You'll continue to support older versions according to your +// deprecation policy, and you can easily provide a program/library +// to update old versions into new versions because of 5. +// +// 7. All of your serializations and deserializations are handled in a +// centralized place. +// +// Package runtime provides a conversion helper to make 5 easy, and the +// Encode/Decode/DecodeInto trio to accomplish 7. You can also register +// additional "codecs" which use a version of your choice. It's +// recommended that you register your types with runtime in your +// package's init function. +// +// As a bonus, a few common types useful from all api objects and versions +// are provided in types.go. +package runtime // import "k8s.io/apimachinery/pkg/runtime" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go new file mode 100644 index 0000000..7251e65 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -0,0 +1,149 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "errors" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type encodable struct { + E Encoder `json:"-"` + obj Object + versions []schema.GroupVersion +} + +func (e encodable) GetObjectKind() schema.ObjectKind { return e.obj.GetObjectKind() } +func (e encodable) DeepCopyObject() Object { + out := e + out.obj = e.obj.DeepCopyObject() + copy(out.versions, e.versions) + return out +} + +// NewEncodable creates an object that will be encoded with the provided codec on demand. +// Provided as a convenience for test cases dealing with internal objects. +func NewEncodable(e Encoder, obj Object, versions ...schema.GroupVersion) Object { + if _, ok := obj.(*Unknown); ok { + return obj + } + return encodable{e, obj, versions} +} + +func (e encodable) UnmarshalJSON(in []byte) error { + return errors.New("runtime.encodable cannot be unmarshalled from JSON") +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (e encodable) MarshalJSON() ([]byte, error) { + return Encode(e.E, e.obj) +} + +// NewEncodableList creates an object that will be encoded with the provided codec on demand. +// Provided as a convenience for test cases dealing with internal objects. +func NewEncodableList(e Encoder, objects []Object, versions ...schema.GroupVersion) []Object { + out := make([]Object, len(objects)) + for i := range objects { + if _, ok := objects[i].(*Unknown); ok { + out[i] = objects[i] + continue + } + out[i] = NewEncodable(e, objects[i], versions...) + } + return out +} + +func (e *Unknown) UnmarshalJSON(in []byte) error { + if e == nil { + return errors.New("runtime.Unknown: UnmarshalJSON on nil pointer") + } + e.TypeMeta = TypeMeta{} + e.Raw = append(e.Raw[0:0], in...) + e.ContentEncoding = "" + e.ContentType = ContentTypeJSON + return nil +} + +// Marshal may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (e Unknown) MarshalJSON() ([]byte, error) { + // If ContentType is unset, we assume this is JSON. + if e.ContentType != "" && e.ContentType != ContentTypeJSON { + return nil, errors.New("runtime.Unknown: MarshalJSON on non-json data") + } + if e.Raw == nil { + return []byte("null"), nil + } + return e.Raw, nil +} + +func Convert_runtime_Object_To_runtime_RawExtension(in *Object, out *RawExtension, s conversion.Scope) error { + if in == nil { + out.Raw = []byte("null") + return nil + } + obj := *in + if unk, ok := obj.(*Unknown); ok { + if unk.Raw != nil { + out.Raw = unk.Raw + return nil + } + obj = out.Object + } + if obj == nil { + out.Raw = nil + return nil + } + out.Object = obj + return nil +} + +func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Object, s conversion.Scope) error { + if in.Object != nil { + *out = in.Object + return nil + } + data := in.Raw + if len(data) == 0 || (len(data) == 4 && string(data) == "null") { + *out = nil + return nil + } + *out = &Unknown{ + Raw: data, + // TODO: Set ContentEncoding and ContentType appropriately. + // Currently we set ContentTypeJSON to make tests passing. + ContentType: ContentTypeJSON, + } + return nil +} + +func RegisterEmbeddedConversions(s *Scheme) error { + if err := s.AddConversionFunc((*Object)(nil), (*RawExtension)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_runtime_Object_To_runtime_RawExtension(a.(*Object), b.(*RawExtension), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*RawExtension)(nil), (*Object)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_runtime_RawExtension_To_runtime_Object(a.(*RawExtension), b.(*Object), scope) + }); err != nil { + return err + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go new file mode 100644 index 0000000..be0c5ed --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -0,0 +1,151 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "reflect" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type notRegisteredErr struct { + schemeName string + gvk schema.GroupVersionKind + target GroupVersioner + t reflect.Type +} + +func NewNotRegisteredErrForKind(schemeName string, gvk schema.GroupVersionKind) error { + return ¬RegisteredErr{schemeName: schemeName, gvk: gvk} +} + +func NewNotRegisteredErrForType(schemeName string, t reflect.Type) error { + return ¬RegisteredErr{schemeName: schemeName, t: t} +} + +func NewNotRegisteredErrForTarget(schemeName string, t reflect.Type, target GroupVersioner) error { + return ¬RegisteredErr{schemeName: schemeName, t: t, target: target} +} + +func NewNotRegisteredGVKErrForTarget(schemeName string, gvk schema.GroupVersionKind, target GroupVersioner) error { + return ¬RegisteredErr{schemeName: schemeName, gvk: gvk, target: target} +} + +func (k *notRegisteredErr) Error() string { + if k.t != nil && k.target != nil { + return fmt.Sprintf("%v is not suitable for converting to %q in scheme %q", k.t, k.target, k.schemeName) + } + nullGVK := schema.GroupVersionKind{} + if k.gvk != nullGVK && k.target != nil { + return fmt.Sprintf("%q is not suitable for converting to %q in scheme %q", k.gvk.GroupVersion(), k.target, k.schemeName) + } + if k.t != nil { + return fmt.Sprintf("no kind is registered for the type %v in scheme %q", k.t, k.schemeName) + } + if len(k.gvk.Kind) == 0 { + return fmt.Sprintf("no version %q has been registered in scheme %q", k.gvk.GroupVersion(), k.schemeName) + } + if k.gvk.Version == APIVersionInternal { + return fmt.Sprintf("no kind %q is registered for the internal version of group %q in scheme %q", k.gvk.Kind, k.gvk.Group, k.schemeName) + } + + return fmt.Sprintf("no kind %q is registered for version %q in scheme %q", k.gvk.Kind, k.gvk.GroupVersion(), k.schemeName) +} + +// IsNotRegisteredError returns true if the error indicates the provided +// object or input data is not registered. +func IsNotRegisteredError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*notRegisteredErr) + return ok +} + +type missingKindErr struct { + data string +} + +func NewMissingKindErr(data string) error { + return &missingKindErr{data} +} + +func (k *missingKindErr) Error() string { + return fmt.Sprintf("Object 'Kind' is missing in '%s'", k.data) +} + +// IsMissingKind returns true if the error indicates that the provided object +// is missing a 'Kind' field. +func IsMissingKind(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingKindErr) + return ok +} + +type missingVersionErr struct { + data string +} + +func NewMissingVersionErr(data string) error { + return &missingVersionErr{data} +} + +func (k *missingVersionErr) Error() string { + return fmt.Sprintf("Object 'apiVersion' is missing in '%s'", k.data) +} + +// IsMissingVersion returns true if the error indicates that the provided object +// is missing a 'Version' field. +func IsMissingVersion(err error) bool { + if err == nil { + return false + } + _, ok := err.(*missingVersionErr) + return ok +} + +// strictDecodingError is a base error type that is returned by a strict Decoder such +// as UniversalStrictDecoder. +type strictDecodingError struct { + message string + data string +} + +// NewStrictDecodingError creates a new strictDecodingError object. +func NewStrictDecodingError(message string, data string) error { + return &strictDecodingError{ + message: message, + data: data, + } +} + +func (e *strictDecodingError) Error() string { + return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) +} + +// IsStrictDecodingError returns true if the error indicates that the provided object +// strictness violations. +func IsStrictDecodingError(err error) bool { + if err == nil { + return false + } + _, ok := err.(*strictDecodingError) + return ok +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go new file mode 100644 index 0000000..9056397 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/extension.go @@ -0,0 +1,51 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "encoding/json" + "errors" +) + +func (re *RawExtension) UnmarshalJSON(in []byte) error { + if re == nil { + return errors.New("runtime.RawExtension: UnmarshalJSON on nil pointer") + } + if !bytes.Equal(in, []byte("null")) { + re.Raw = append(re.Raw[0:0], in...) + } + return nil +} + +// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. +// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go +func (re RawExtension) MarshalJSON() ([]byte, error) { + if re.Raw == nil { + // TODO: this is to support legacy behavior of JSONPrinter and YAMLPrinter, which + // expect to call json.Marshal on arbitrary versioned objects (even those not in + // the scheme). pkg/kubectl/resource#AsVersionedObjects and its interaction with + // kubectl get on objects not in the scheme needs to be updated to ensure that the + // objects that are not part of the scheme are correctly put into the right form. + if re.Object != nil { + return json.Marshal(re.Object) + } + return []byte("null"), nil + } + // TODO: Check whether ContentType is actually JSON before returning it. + return re.Raw, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go new file mode 100644 index 0000000..0719718 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -0,0 +1,855 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + +package runtime + +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{0} +} +func (m *RawExtension) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *RawExtension) XXX_Merge(src proto.Message) { + xxx_messageInfo_RawExtension.Merge(m, src) +} +func (m *RawExtension) XXX_Size() int { + return m.Size() +} +func (m *RawExtension) XXX_DiscardUnknown() { + xxx_messageInfo_RawExtension.DiscardUnknown(m) +} + +var xxx_messageInfo_RawExtension proto.InternalMessageInfo + +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{1} +} +func (m *TypeMeta) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TypeMeta) XXX_Merge(src proto.Message) { + xxx_messageInfo_TypeMeta.Merge(m, src) +} +func (m *TypeMeta) XXX_Size() int { + return m.Size() +} +func (m *TypeMeta) XXX_DiscardUnknown() { + xxx_messageInfo_TypeMeta.DiscardUnknown(m) +} + +var xxx_messageInfo_TypeMeta proto.InternalMessageInfo + +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { + return fileDescriptor_9d3c45d7f546725c, []int{2} +} +func (m *Unknown) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Unknown) XXX_Merge(src proto.Message) { + xxx_messageInfo_Unknown.Merge(m, src) +} +func (m *Unknown) XXX_Size() int { + return m.Size() +} +func (m *Unknown) XXX_DiscardUnknown() { + xxx_messageInfo_Unknown.DiscardUnknown(m) +} + +var xxx_messageInfo_Unknown proto.InternalMessageInfo + +func init() { + proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") + proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") + proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) +} + +var fileDescriptor_9d3c45d7f546725c = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} + +func (m *RawExtension) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *TypeMeta) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.Kind) + copy(dAtA[i:], m.Kind) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i-- + dAtA[i] = 0x12 + i -= len(m.APIVersion) + copy(dAtA[i:], m.APIVersion) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *Unknown) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.ContentType) + copy(dAtA[i:], m.ContentType) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i-- + dAtA[i] = 0x22 + i -= len(m.ContentEncoding) + copy(dAtA[i:], m.ContentEncoding) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i-- + dAtA[i] = 0x1a + if m.Raw != nil { + i -= len(m.Raw) + copy(dAtA[i:], m.Raw) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *RawExtension) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + return n +} + +func (m *TypeMeta) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Unknown) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.TypeMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Raw != nil { + l = len(m.Raw) + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.ContentEncoding) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContentType) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *RawExtension) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&RawExtension{`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `}`, + }, "") + return s +} +func (this *TypeMeta) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TypeMeta{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Unknown) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Unknown{`, + `TypeMeta:` + strings.Replace(strings.Replace(this.TypeMeta.String(), "TypeMeta", "TypeMeta", 1), `&`, ``, 1) + `,`, + `Raw:` + valueToStringGenerated(this.Raw) + `,`, + `ContentEncoding:` + fmt.Sprintf("%v", this.ContentEncoding) + `,`, + `ContentType:` + fmt.Sprintf("%v", this.ContentType) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *RawExtension) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RawExtension: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RawExtension: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TypeMeta) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TypeMeta: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TypeMeta: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersion = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Unknown) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Unknown: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Unknown: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TypeMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) + if m.Raw == nil { + m.Raw = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentEncoding", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentEncoding = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ContentType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ContentType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto new file mode 100644 index 0000000..0e212ec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -0,0 +1,127 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime; + +// Package-wide variables from generator "generated". +option go_package = "runtime"; + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +message RawExtension { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + optional bytes raw = 1; +} + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=false +// +protobuf=true +// +k8s:openapi-gen=true +message TypeMeta { + // +optional + optional string apiVersion = 1; + + // +optional + optional string kind = 2; +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=true +// +k8s:openapi-gen=true +message Unknown { + optional TypeMeta typeMeta = 1; + + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + optional bytes raw = 2; + + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + optional string contentEncoding = 3; + + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + optional string contentType = 4; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go new file mode 100644 index 0000000..7bd1a3a --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -0,0 +1,259 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "io" + "reflect" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/errors" +) + +// unsafeObjectConvertor implements ObjectConvertor using the unsafe conversion path. +type unsafeObjectConvertor struct { + *Scheme +} + +var _ ObjectConvertor = unsafeObjectConvertor{} + +// ConvertToVersion converts in to the provided outVersion without copying the input first, which +// is only safe if the output object is not mutated or reused. +func (c unsafeObjectConvertor) ConvertToVersion(in Object, outVersion GroupVersioner) (Object, error) { + return c.Scheme.UnsafeConvertToVersion(in, outVersion) +} + +// UnsafeObjectConvertor performs object conversion without copying the object structure, +// for use when the converted object will not be reused or mutated. Primarily for use within +// versioned codecs, which use the external object for serialization but do not return it. +func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { + return unsafeObjectConvertor{scheme} +} + +// SetField puts the value of src, into fieldName, which must be a member of v. +// The value of src must be assignable to the field. +func SetField(src interface{}, v reflect.Value, fieldName string) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + } + srcValue := reflect.ValueOf(src) + if srcValue.Type().AssignableTo(field.Type()) { + field.Set(srcValue) + return nil + } + if srcValue.Type().ConvertibleTo(field.Type()) { + field.Set(srcValue.Convert(field.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", srcValue.Type(), field.Type()) +} + +// Field puts the value of fieldName, which must be a member of v, into dest, +// which must be a variable to which this field's value can be assigned. +func Field(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + } + destValue, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + if field.Type().AssignableTo(destValue.Type()) { + destValue.Set(field) + return nil + } + if field.Type().ConvertibleTo(destValue.Type()) { + destValue.Set(field.Convert(destValue.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), destValue.Type()) +} + +// FieldPtr puts the address of fieldName, which must be a member of v, +// into dest, which must be an address of a variable to which this field's +// address can be assigned. +func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { + field := v.FieldByName(fieldName) + if !field.IsValid() { + return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + } + v, err := conversion.EnforcePtr(dest) + if err != nil { + return err + } + field = field.Addr() + if field.Type().AssignableTo(v.Type()) { + v.Set(field) + return nil + } + if field.Type().ConvertibleTo(v.Type()) { + v.Set(field.Convert(v.Type())) + return nil + } + return fmt.Errorf("couldn't assign/convert %v to %v", field.Type(), v.Type()) +} + +// EncodeList ensures that each object in an array is converted to a Unknown{} in serialized form. +// TODO: accept a content type. +func EncodeList(e Encoder, objects []Object) error { + var errs []error + for i := range objects { + data, err := Encode(e, objects[i]) + if err != nil { + errs = append(errs, err) + continue + } + // TODO: Set ContentEncoding and ContentType. + objects[i] = &Unknown{Raw: data} + } + return errors.NewAggregate(errs) +} + +func decodeListItem(obj *Unknown, decoders []Decoder) (Object, error) { + for _, decoder := range decoders { + // TODO: Decode based on ContentType. + obj, err := Decode(decoder, obj.Raw) + if err != nil { + if IsNotRegisteredError(err) { + continue + } + return nil, err + } + return obj, nil + } + // could not decode, so leave the object as Unknown, but give the decoders the + // chance to set Unknown.TypeMeta if it is available. + for _, decoder := range decoders { + if err := DecodeInto(decoder, obj.Raw, obj); err == nil { + return obj, nil + } + } + return obj, nil +} + +// DecodeList alters the list in place, attempting to decode any objects found in +// the list that have the Unknown type. Any errors that occur are returned +// after the entire list is processed. Decoders are tried in order. +func DecodeList(objects []Object, decoders ...Decoder) []error { + errs := []error(nil) + for i, obj := range objects { + switch t := obj.(type) { + case *Unknown: + decoded, err := decodeListItem(t, decoders) + if err != nil { + errs = append(errs, err) + break + } + objects[i] = decoded + } + } + return errs +} + +// MultiObjectTyper returns the types of objects across multiple schemes in order. +type MultiObjectTyper []ObjectTyper + +var _ ObjectTyper = MultiObjectTyper{} + +func (m MultiObjectTyper) ObjectKinds(obj Object) (gvks []schema.GroupVersionKind, unversionedType bool, err error) { + for _, t := range m { + gvks, unversionedType, err = t.ObjectKinds(obj) + if err == nil { + return + } + } + return +} + +func (m MultiObjectTyper) Recognizes(gvk schema.GroupVersionKind) bool { + for _, t := range m { + if t.Recognizes(gvk) { + return true + } + } + return false +} + +// SetZeroValue would set the object of objPtr to zero value of its type. +func SetZeroValue(objPtr Object) error { + v, err := conversion.EnforcePtr(objPtr) + if err != nil { + return err + } + v.Set(reflect.Zero(v.Type())) + return nil +} + +// DefaultFramer is valid for any stream that can read objects serially without +// any separation in the stream. +var DefaultFramer = defaultFramer{} + +type defaultFramer struct{} + +func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } +func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } + +// WithVersionEncoder serializes an object and ensures the GVK is set. +type WithVersionEncoder struct { + Version GroupVersioner + Encoder + ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// WithoutVersionDecoder clears the group version kind of a deserialized object. +type WithoutVersionDecoder struct { + Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go new file mode 100644 index 0000000..f44693c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -0,0 +1,344 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "io" + "net/url" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +const ( + // APIVersionInternal may be used if you are registering a type that should not + // be considered stable or serialized - it is a convention only and has no + // special behavior in this package. + APIVersionInternal = "__internal" +) + +// GroupVersioner refines a set of possible conversion targets into a single option. +type GroupVersioner interface { + // KindForGroupVersionKinds returns a desired target group version kind for the given input, or returns ok false if no + // target is known. In general, if the return target is not in the input list, the caller is expected to invoke + // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. + // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. + KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) + // Identifier returns string representation of the object. + // Identifiers of two different encoders should be equal only if for every input + // kinds they return the same result. + Identifier() string +} + +// Identifier represents an identifier. +// Identitier of two different objects should be equal if and only if for every +// input the output they produce is exactly the same. +type Identifier string + +// Encoder writes objects to a serialized form +type Encoder interface { + // Encode writes an object to a stream. Implementations may return errors if the versions are + // incompatible, or if no conversion is defined. + Encode(obj Object, w io.Writer) error + // Identifier returns an identifier of the encoder. + // Identifiers of two different encoders should be equal if and only if for every input + // object it will be encoded to the same representation by both of them. + // + // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to + // correctly handle CacheableObject, Encode() method should look similar to below, where + // doEncode() is the encoding logic of implemented encoder: + // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { + // if co, ok := obj.(CacheableObject); ok { + // return co.CacheEncode(e.Identifier(), e.doEncode, w) + // } + // return e.doEncode(obj, w) + // } + Identifier() Identifier +} + +// Decoder attempts to load an object from data. +type Decoder interface { + // Decode attempts to deserialize the provided data using either the innate typing of the scheme or the + // default kind, group, and version provided. It returns a decoded object as well as the kind, group, and + // version from the serialized data, or an error. If into is non-nil, it will be used as the target type + // and implementations may choose to use it rather than reallocating an object. However, the object is not + // guaranteed to be populated. The returned object is not guaranteed to match into. If defaults are + // provided, they are applied to the data by default. If no defaults or partial defaults are provided, the + // type of the into may be used to guide conversion decisions. + Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) +} + +// Serializer is the core interface for transforming objects into a serialized format and back. +// Implementations may choose to perform conversion of the object, but no assumptions should be made. +type Serializer interface { + Encoder + Decoder +} + +// Codec is a Serializer that deals with the details of versioning objects. It offers the same +// interface as Serializer, so this is a marker to consumers that care about the version of the objects +// they receive. +type Codec Serializer + +// ParameterCodec defines methods for serializing and deserializing API objects to url.Values and +// performing any necessary conversion. Unlike the normal Codec, query parameters are not self describing +// and the desired version must be specified. +type ParameterCodec interface { + // DecodeParameters takes the given url.Values in the specified group version and decodes them + // into the provided object, or returns an error. + DecodeParameters(parameters url.Values, from schema.GroupVersion, into Object) error + // EncodeParameters encodes the provided object as query parameters or returns an error. + EncodeParameters(obj Object, to schema.GroupVersion) (url.Values, error) +} + +// Framer is a factory for creating readers and writers that obey a particular framing pattern. +type Framer interface { + NewFrameReader(r io.ReadCloser) io.ReadCloser + NewFrameWriter(w io.Writer) io.Writer +} + +// SerializerInfo contains information about a specific serialization format +type SerializerInfo struct { + // MediaType is the value that represents this serializer over the wire. + MediaType string + // MediaTypeType is the first part of the MediaType ("application" in "application/json"). + MediaTypeType string + // MediaTypeSubType is the second part of the MediaType ("json" in "application/json"). + MediaTypeSubType string + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the individual object serializer for this media type. + Serializer Serializer + // PrettySerializer, if set, can serialize this object in a form biased towards + // readability. + PrettySerializer Serializer + // StreamSerializer, if set, describes the streaming serialization format + // for this media type. + StreamSerializer *StreamSerializerInfo +} + +// StreamSerializerInfo contains information about a specific stream serialization format +type StreamSerializerInfo struct { + // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. + EncodesAsText bool + // Serializer is the top level object serializer for this type when streaming + Serializer + // Framer is the factory for retrieving streams that separate objects on the wire + Framer +} + +// NegotiatedSerializer is an interface used for obtaining encoders, decoders, and serializers +// for multiple supported media types. This would commonly be accepted by a server component +// that performs HTTP content negotiation to accept multiple formats. +type NegotiatedSerializer interface { + // SupportedMediaTypes is the media types supported for reading and writing single objects. + SupportedMediaTypes() []SerializerInfo + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// ClientNegotiator handles turning an HTTP content type into the appropriate encoder. +// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from +// a NegotiatedSerializer. +type ClientNegotiator interface { + // Encoder returns the appropriate encoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Encoder(contentType string, params map[string]string) (Encoder, error) + // Decoder returns the appropriate decoder for the provided contentType (e.g. application/json) + // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found + // a NegotiateError will be returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + Decoder(contentType string, params map[string]string) (Decoder, error) + // StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g. + // application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no + // serializer is found a NegotiateError will be returned. The Serializer and Framer will always + // be returned if a Decoder is returned. The current client implementations consider params to be + // optional modifiers to the contentType and will ignore unrecognized parameters. + StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) +} + +// StorageSerializer is an interface used for obtaining encoders, decoders, and serializers +// that can read and write data at rest. This would commonly be used by client tools that must +// read files, or server side storage interfaces that persist restful objects. +type StorageSerializer interface { + // SupportedMediaTypes are the media types supported for reading and writing objects. + SupportedMediaTypes() []SerializerInfo + + // UniversalDeserializer returns a Serializer that can read objects in multiple supported formats + // by introspecting the data at rest. + UniversalDeserializer() Decoder + + // EncoderForVersion returns an encoder that ensures objects being written to the provided + // serializer are in the provided group version. + EncoderForVersion(serializer Encoder, gv GroupVersioner) Encoder + // DecoderForVersion returns a decoder that ensures objects being read by the provided + // serializer are in the provided group version by default. + DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder +} + +// NestedObjectEncoder is an optional interface that objects may implement to be given +// an opportunity to encode any nested Objects / RawExtensions during serialization. +type NestedObjectEncoder interface { + EncodeNestedObjects(e Encoder) error +} + +// NestedObjectDecoder is an optional interface that objects may implement to be given +// an opportunity to decode any nested Objects / RawExtensions during serialization. +type NestedObjectDecoder interface { + DecodeNestedObjects(d Decoder) error +} + +/////////////////////////////////////////////////////////////////////////////// +// Non-codec interfaces + +type ObjectDefaulter interface { + // Default takes an object (must be a pointer) and applies any default values. + // Defaulters may not error. + Default(in Object) +} + +type ObjectVersioner interface { + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) +} + +// ObjectConvertor converts an object to a different version. +type ObjectConvertor interface { + // Convert attempts to convert one object into another, or returns an error. This + // method does not mutate the in object, but the in and out object might share data structures, + // i.e. the out object cannot be mutated without mutating the in object as well. + // The context argument will be passed to all nested conversions. + Convert(in, out, context interface{}) error + // ConvertToVersion takes the provided object and converts it the provided version. This + // method does not mutate the in object, but the in and out object might share data structures, + // i.e. the out object cannot be mutated without mutating the in object as well. + // This method is similar to Convert() but handles specific details of choosing the correct + // output version. + ConvertToVersion(in Object, gv GroupVersioner) (out Object, err error) + ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) +} + +// ObjectTyper contains methods for extracting the APIVersion and Kind +// of objects. +type ObjectTyper interface { + // ObjectKinds returns the all possible group,version,kind of the provided object, true if + // the object is unversioned, or an error if the object is not recognized + // (IsNotRegisteredError will return true). + ObjectKinds(Object) ([]schema.GroupVersionKind, bool, error) + // Recognizes returns true if the scheme is able to handle the provided version and kind, + // or more precisely that the provided version is a possible conversion or decoding + // target. + Recognizes(gvk schema.GroupVersionKind) bool +} + +// ObjectCreater contains methods for instantiating an object by kind and version. +type ObjectCreater interface { + New(kind schema.GroupVersionKind) (out Object, err error) +} + +// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource +type EquivalentResourceMapper interface { + // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. + // If subresource is specified, only equivalent resources which also have the same subresource are included. + // The specified resource can be included in the returned list. + EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource + // KindFor returns the kind expected by the specified resource[/subresource]. + // A zero value is returned if the kind is unknown. + KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind +} + +// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, +// and allows registering known resource[/subresource] -> kind +type EquivalentResourceRegistry interface { + EquivalentResourceMapper + // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. + RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) +} + +// ResourceVersioner provides methods for setting and retrieving +// the resource version from an API object. +type ResourceVersioner interface { + SetResourceVersion(obj Object, version string) error + ResourceVersion(obj Object) (string, error) +} + +// SelfLinker provides methods for setting and retrieving the SelfLink field of an API object. +type SelfLinker interface { + SetSelfLink(obj Object, selfLink string) error + SelfLink(obj Object) (string, error) + + // Knowing Name is sometimes necessary to use a SelfLinker. + Name(obj Object) (string, error) + // Knowing Namespace is sometimes necessary to use a SelfLinker + Namespace(obj Object) (string, error) +} + +// Object interface must be supported by all API types registered with Scheme. Since objects in a scheme are +// expected to be serialized to the wire, the interface an Object must provide to the Scheme allows +// serializers to set the kind, version, and group the object is represented as. An Object may choose +// to return a no-op ObjectKindAccessor in cases where it is not expected to be serialized. +type Object interface { + GetObjectKind() schema.ObjectKind + DeepCopyObject() Object +} + +// CacheableObject allows an object to cache its different serializations +// to avoid performing the same serialization multiple times. +type CacheableObject interface { + // CacheEncode writes an object to a stream. The function will + // be used in case of cache miss. The function takes ownership + // of the object. + // If CacheableObject is a wrapper, then deep-copy of the wrapped object + // should be passed to function. + // CacheEncode assumes that for two different calls with the same , + // function will also be the same. + CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error + // GetObject returns a deep-copy of an object to be encoded - the caller of + // GetObject() is the owner of returned object. The reason for making a copy + // is to avoid bugs, where caller modifies the object and forgets to copy it, + // thus modifying the object for everyone. + // The object returned by GetObject should be the same as the one that is supposed + // to be passed to function in CacheEncode method. + // If CacheableObject is a wrapper, the copy of wrapped object should be returned. + GetObject() Object +} + +// Unstructured objects store values as map[string]interface{}, with only values that can be serialized +// to JSON allowed. +type Unstructured interface { + Object + // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. + // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. + NewEmptyInstance() Unstructured + // UnstructuredContent returns a non-nil map with this object's contents. Values may be + // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to + // and from JSON. SetUnstructuredContent should be used to mutate the contents. + UnstructuredContent() map[string]interface{} + // SetUnstructuredContent updates the object content to match the provided map. + SetUnstructuredContent(map[string]interface{}) + // IsList returns true if this type is a list or matches the list convention - has an array called "items". + IsList() bool + // EachListItem should pass a single item out of the list as an Object to the provided function. Any + // error should terminate the iteration. If IsList() returns false, this method should return an error + // instead of calling the provided function. + EachListItem(func(Object) error) error +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go new file mode 100644 index 0000000..3ff8461 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type equivalentResourceRegistry struct { + // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). + // if null, or if "" is returned, resource.String() is used as the key + keyFunc func(resource schema.GroupResource) string + // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). + // main resources are stored with subresource="". + resources map[string]map[string][]schema.GroupVersionResource + // kinds maps resource -> subresource -> kind + kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind + // keys caches the computed key for each GroupResource + keys map[schema.GroupResource]string + + mutex sync.RWMutex +} + +var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) +var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) + +// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. +func NewEquivalentResourceRegistry() EquivalentResourceRegistry { + return &equivalentResourceRegistry{} +} + +// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. +// If "" is returned by the function, GroupResource#String is used as the identity. +// GroupResources with the same identity string are considered equivalent. +func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { + return &equivalentResourceRegistry{keyFunc: keyFunc} +} + +func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.resources[r.keys[resource.GroupResource()]][subresource] +} +func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { + r.mutex.RLock() + defer r.mutex.RUnlock() + return r.kinds[resource][subresource] +} +func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { + r.mutex.Lock() + defer r.mutex.Unlock() + if r.kinds == nil { + r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} + } + if r.kinds[resource] == nil { + r.kinds[resource] = map[string]schema.GroupVersionKind{} + } + r.kinds[resource][subresource] = kind + + // get the shared key of the parent resource + key := "" + gr := resource.GroupResource() + if r.keyFunc != nil { + key = r.keyFunc(gr) + } + if key == "" { + key = gr.String() + } + + if r.keys == nil { + r.keys = map[schema.GroupResource]string{} + } + r.keys[gr] = key + + if r.resources == nil { + r.resources = map[string]map[string][]schema.GroupVersionResource{} + } + if r.resources[key] == nil { + r.resources[key] = map[string][]schema.GroupVersionResource{} + } + r.resources[key][subresource] = append(r.resources[key][subresource], resource) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go new file mode 100644 index 0000000..159b301 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go @@ -0,0 +1,146 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// NegotiateError is returned when a ClientNegotiator is unable to locate +// a serializer for the requested operation. +type NegotiateError struct { + ContentType string + Stream bool +} + +func (e NegotiateError) Error() string { + if e.Stream { + return fmt.Sprintf("no stream serializers registered for %s", e.ContentType) + } + return fmt.Sprintf("no serializers registered for %s", e.ContentType) +} + +type clientNegotiator struct { + serializer NegotiatedSerializer + encode, decode GroupVersioner +} + +func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) { + // TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method + // if client negotiators truly need to use it + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil +} + +func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, NegotiateError{ContentType: contentType} + } + info = mediaTypes[0] + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil +} + +func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) { + mediaTypes := n.serializer.SupportedMediaTypes() + info, ok := SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true} + } + info = mediaTypes[0] + } + if info.StreamSerializer == nil { + return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true} + } + return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil +} + +// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or +// stream decoder for a given content type. Does not perform any conversion, but will +// encode the object to the desired group, version, and kind. Use when creating a client. +func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: serializer, + encode: gv, + } +} + +// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver +// where objects are converted to gv prior to sending and decoded to their internal representation prior +// to retrieval. +// +// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. +func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { + decode := schema.GroupVersions{ + { + Group: gv.Group, + Version: APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: APIVersionInternal, + }, + } + return &clientNegotiator{ + encode: gv, + decode: decode, + serializer: serializer, + } +} + +// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used +// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. +func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { + return &clientNegotiator{ + serializer: &simpleNegotiatedSerializer{info: info}, + encode: gv, + } +} + +type simpleNegotiatedSerializer struct { + info SerializerInfo +} + +func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer { + return &simpleNegotiatedSerializer{info: info} +} + +func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo { + return []SerializerInfo{n.info} +} + +func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder { + return e +} + +func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder { + return d +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go new file mode 100644 index 0000000..1cd2e4c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -0,0 +1,31 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import "k8s.io/apimachinery/pkg/runtime/schema" + +// SetGroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} + +// GroupVersionKind satisfies the ObjectKind interface for all objects that embed TypeMeta +func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} + +func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go new file mode 100644 index 0000000..29d3ac4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -0,0 +1,59 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto + +package schema + +import ( + fmt "fmt" + + math "math" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) +} + +var fileDescriptor_0462724132518e0d = []byte{ + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, + 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, + 0x5e, 0xe6, 0x24, 0x57, 0xc7, 0xb2, 0xfc, 0x47, 0x8e, 0x5d, 0xa9, 0xac, 0x8f, 0xd0, 0xc7, 0x0a, + 0x0c, 0x0c, 0x6c, 0xdc, 0x17, 0xa9, 0x64, 0x07, 0x94, 0xdd, 0x4f, 0xa7, 0xcf, 0xf7, 0xf3, 0x68, + 0xfe, 0x27, 0xa1, 0x3d, 0x9a, 0xdc, 0x51, 0x74, 0x94, 0x68, 0xc2, 0x0b, 0xb9, 0xc1, 0x47, 0xdc, + 0x1f, 0x32, 0x68, 0x2b, 0xfb, 0x51, 0x3b, 0x8a, 0x57, 0x0c, 0x46, 0x61, 0xcc, 0x2e, 0x69, 0x4b, + 0x38, 0xf5, 0x23, 0x59, 0x89, 0x8a, 0x1c, 0x45, 0x99, 0x68, 0x10, 0x21, 0xfa, 0xe4, 0xbf, 0x7e, + 0x9a, 0x13, 0xef, 0x4e, 0x04, 0xa3, 0xc4, 0xee, 0x44, 0x73, 0xdf, 0x7f, 0x4a, 0xa7, 0x31, 0x77, + 0xa2, 0xf7, 0x16, 0x95, 0x57, 0x1e, 0x2b, 0xef, 0xf2, 0xb9, 0xae, 0x3a, 0xea, 0xd5, 0xb2, 0x87, + 0xdf, 0x79, 0x03, 0xb6, 0x6c, 0xc0, 0xd6, 0x0d, 0xd8, 0xad, 0x00, 0x9f, 0x0b, 0xf0, 0xa5, 0x00, + 0x5f, 0x0b, 0xf0, 0x47, 0x01, 0x7e, 0x7f, 0x02, 0x3b, 0x7d, 0xb4, 0xf8, 0x2b, 0x00, 0x00, 0xff, + 0xff, 0xba, 0x7e, 0x65, 0xf4, 0xd6, 0x00, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto new file mode 100644 index 0000000..5aeeaa1 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto @@ -0,0 +1,26 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.runtime.schema; + +// Package-wide variables from generator "generated". +option go_package = "schema"; + diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go new file mode 100644 index 0000000..6361033 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -0,0 +1,314 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "fmt" + "strings" +) + +// ParseResourceArg takes the common style of string which may be either `resource.group.com` or `resource.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupVersions, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource` +func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) { + var gvr *GroupVersionResource + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]} + } + + return gvr, ParseGroupResource(arg) +} + +// ParseKindArg takes the common style of string which may be either `Kind.group.com` or `Kind.version.group.com` +// and parses it out into both possibilities. This code takes no responsibility for knowing which representation was intended +// but with a knowledge of all GroupKinds, calling code can take a very good guess. If there are only two segments, then +// `*GroupVersionResource` is nil. +// `Kind.group.com` -> `group=com, version=group, kind=Kind` and `group=group.com, kind=Kind` +func ParseKindArg(arg string) (*GroupVersionKind, GroupKind) { + var gvk *GroupVersionKind + if strings.Count(arg, ".") >= 2 { + s := strings.SplitN(arg, ".", 3) + gvk = &GroupVersionKind{Group: s[2], Version: s[1], Kind: s[0]} + } + + return gvk, ParseGroupKind(arg) +} + +// GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupResource struct { + Group string + Resource string +} + +func (gr GroupResource) WithVersion(version string) GroupVersionResource { + return GroupVersionResource{Group: gr.Group, Version: version, Resource: gr.Resource} +} + +func (gr GroupResource) Empty() bool { + return len(gr.Group) == 0 && len(gr.Resource) == 0 +} + +func (gr GroupResource) String() string { + if len(gr.Group) == 0 { + return gr.Resource + } + return gr.Resource + "." + gr.Group +} + +func ParseGroupKind(gk string) GroupKind { + i := strings.Index(gk, ".") + if i == -1 { + return GroupKind{Kind: gk} + } + + return GroupKind{Group: gk[i+1:], Kind: gk[:i]} +} + +// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed +// for each field. +func ParseGroupResource(gr string) GroupResource { + if i := strings.Index(gr, "."); i >= 0 { + return GroupResource{Group: gr[i+1:], Resource: gr[:i]} + } + return GroupResource{Resource: gr} +} + +// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionResource struct { + Group string + Version string + Resource string +} + +func (gvr GroupVersionResource) Empty() bool { + return len(gvr.Group) == 0 && len(gvr.Version) == 0 && len(gvr.Resource) == 0 +} + +func (gvr GroupVersionResource) GroupResource() GroupResource { + return GroupResource{Group: gvr.Group, Resource: gvr.Resource} +} + +func (gvr GroupVersionResource) GroupVersion() GroupVersion { + return GroupVersion{Group: gvr.Group, Version: gvr.Version} +} + +func (gvr GroupVersionResource) String() string { + return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") +} + +// GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying +// concepts during lookup stages without having partially valid types +type GroupKind struct { + Group string + Kind string +} + +func (gk GroupKind) Empty() bool { + return len(gk.Group) == 0 && len(gk.Kind) == 0 +} + +func (gk GroupKind) WithVersion(version string) GroupVersionKind { + return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} +} + +func (gk GroupKind) String() string { + if len(gk.Group) == 0 { + return gk.Kind + } + return gk.Kind + "." + gk.Group +} + +// GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion +// to avoid automatic coercion. It doesn't use a GroupVersion to avoid custom marshalling +type GroupVersionKind struct { + Group string + Version string + Kind string +} + +// Empty returns true if group, version, and kind are empty +func (gvk GroupVersionKind) Empty() bool { + return len(gvk.Group) == 0 && len(gvk.Version) == 0 && len(gvk.Kind) == 0 +} + +func (gvk GroupVersionKind) GroupKind() GroupKind { + return GroupKind{Group: gvk.Group, Kind: gvk.Kind} +} + +func (gvk GroupVersionKind) GroupVersion() GroupVersion { + return GroupVersion{Group: gvk.Group, Version: gvk.Version} +} + +func (gvk GroupVersionKind) String() string { + return gvk.Group + "/" + gvk.Version + ", Kind=" + gvk.Kind +} + +// GroupVersion contains the "group" and the "version", which uniquely identifies the API. +type GroupVersion struct { + Group string + Version string +} + +// Empty returns true if group and version are empty +func (gv GroupVersion) Empty() bool { + return len(gv.Group) == 0 && len(gv.Version) == 0 +} + +// String puts "group" and "version" into a single "group/version" string. For the legacy v1 +// it returns "v1". +func (gv GroupVersion) String() string { + // special case the internal apiVersion for the legacy kube types + if gv.Empty() { + return "" + } + + // special case of "v1" for backward compatibility + if len(gv.Group) == 0 && gv.Version == "v1" { + return gv.Version + } + if len(gv.Group) > 0 { + return gv.Group + "/" + gv.Version + } + return gv.Version +} + +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersion) Identifier() string { + return gv.String() +} + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. It prefers a match to group and version over just group. +// TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersion and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +func (gv GroupVersion) KindForGroupVersionKinds(kinds []GroupVersionKind) (target GroupVersionKind, ok bool) { + for _, gvk := range kinds { + if gvk.Group == gv.Group && gvk.Version == gv.Version { + return gvk, true + } + } + for _, gvk := range kinds { + if gvk.Group == gv.Group { + return gv.WithKind(gvk.Kind), true + } + } + return GroupVersionKind{}, false +} + +// ParseGroupVersion turns "group/version" string into a GroupVersion struct. It reports error +// if it cannot parse the string. +func ParseGroupVersion(gv string) (GroupVersion, error) { + // this can be the internal version for the legacy kube types + // TODO once we've cleared the last uses as strings, this special case should be removed. + if (len(gv) == 0) || (gv == "/") { + return GroupVersion{}, nil + } + + switch strings.Count(gv, "/") { + case 0: + return GroupVersion{"", gv}, nil + case 1: + i := strings.Index(gv, "/") + return GroupVersion{gv[:i], gv[i+1:]}, nil + default: + return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv) + } +} + +// WithKind creates a GroupVersionKind based on the method receiver's GroupVersion and the passed Kind. +func (gv GroupVersion) WithKind(kind string) GroupVersionKind { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} +} + +// WithResource creates a GroupVersionResource based on the method receiver's GroupVersion and the passed Resource. +func (gv GroupVersion) WithResource(resource string) GroupVersionResource { + return GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: resource} +} + +// GroupVersions can be used to represent a set of desired group versions. +// TODO: Move GroupVersions to a package under pkg/runtime, since it's used by scheme. +// TODO: Introduce an adapter type between GroupVersions and runtime.GroupVersioner, and use LegacyCodec(GroupVersion) +// in fewer places. +type GroupVersions []GroupVersion + +// Identifier implements runtime.GroupVersioner interface. +func (gv GroupVersions) Identifier() string { + groupVersions := make([]string, 0, len(gv)) + for i := range gv { + groupVersions = append(groupVersions, gv[i].String()) + } + return fmt.Sprintf("[%s]", strings.Join(groupVersions, ",")) +} + +// KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false +// if none of the options match the group. +func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { + var targets []GroupVersionKind + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + targets = append(targets, target) + } + if len(targets) == 1 { + return targets[0], true + } + if len(targets) > 1 { + return bestMatch(kinds, targets), true + } + return GroupVersionKind{}, false +} + +// bestMatch tries to pick best matching GroupVersionKind and falls back to the first +// found if no exact match exists. +func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersionKind { + for _, gvk := range targets { + for _, k := range kinds { + if k == gvk { + return k + } + } + } + return targets[0] +} + +// ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that +// do not use TypeMeta. +func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk.Empty() { + return "", "" + } + return gvk.GroupVersion().String(), gvk.Kind +} + +// FromAPIVersionAndKind returns a GVK representing the provided fields for types that +// do not use TypeMeta. This method exists to support test types and legacy serializations +// that have a distinct group and kind. +// TODO: further reduce usage of this method. +func FromAPIVersionAndKind(apiVersion, kind string) GroupVersionKind { + if gv, err := ParseGroupVersion(apiVersion); err == nil { + return GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: kind} + } + return GroupVersionKind{Kind: kind} +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go new file mode 100644 index 0000000..b570668 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/interfaces.go @@ -0,0 +1,40 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +// All objects that are serialized from a Scheme encode their type information. This interface is used +// by serialization to set type information from the Scheme onto the serialized version of an object. +// For objects that cannot be serialized or have unique requirements, this interface may be a no-op. +type ObjectKind interface { + // SetGroupVersionKind sets or clears the intended serialized kind of an object. Passing kind nil + // should clear the current setting. + SetGroupVersionKind(kind GroupVersionKind) + // GroupVersionKind returns the stored group, version, and kind of an object, or nil if the object does + // not expose or provide these fields. + GroupVersionKind() GroupVersionKind +} + +// EmptyObjectKind implements the ObjectKind interface as a noop +var EmptyObjectKind = emptyObjectKind{} + +type emptyObjectKind struct{} + +// SetGroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) SetGroupVersionKind(gvk GroupVersionKind) {} + +// GroupVersionKind implements the ObjectKind interface +func (emptyObjectKind) GroupVersionKind() GroupVersionKind { return GroupVersionKind{} } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go new file mode 100644 index 0000000..4b739ec --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -0,0 +1,715 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "net/url" + "reflect" + "strings" + + "k8s.io/apimachinery/pkg/conversion" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/naming" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Scheme defines methods for serializing and deserializing API objects, a type +// registry for converting group, version, and kind information to and from Go +// schemas, and mappings between Go schemas of different versions. A scheme is the +// foundation for a versioned API and versioned configuration over time. +// +// In a Scheme, a Type is a particular Go struct, a Version is a point-in-time +// identifier for a particular representation of that Type (typically backwards +// compatible), a Kind is the unique name for that Type within the Version, and a +// Group identifies a set of Versions, Kinds, and Types that evolve over time. An +// Unversioned Type is one that is not yet formally bound to a type and is promised +// to be backwards compatible (effectively a "v1" of a Type that does not expect +// to break in the future). +// +// Schemes are not expected to change at runtime and are only threadsafe after +// registration is complete. +type Scheme struct { + // versionMap allows one to figure out the go type of an object with + // the given version and name. + gvkToType map[schema.GroupVersionKind]reflect.Type + + // typeToGroupVersion allows one to find metadata for a given go object. + // The reflect.Type we index by should *not* be a pointer. + typeToGVK map[reflect.Type][]schema.GroupVersionKind + + // unversionedTypes are transformed without conversion in ConvertToVersion. + unversionedTypes map[reflect.Type]schema.GroupVersionKind + + // unversionedKinds are the names of kinds that can be created in the context of any group + // or version + // TODO: resolve the status of unversioned types. + unversionedKinds map[string]reflect.Type + + // Map from version and resource to the corresponding func to convert + // resource field labels in that version to internal version. + fieldLabelConversionFuncs map[schema.GroupVersionKind]FieldLabelConversionFunc + + // defaulterFuncs is an array of interfaces to be called with an object to provide defaulting + // the provided object must be a pointer. + defaulterFuncs map[reflect.Type]func(interface{}) + + // converter stores all registered conversion functions. It also has + // default converting behavior. + converter *conversion.Converter + + // versionPriority is a map of groups to ordered lists of versions for those groups indicating the + // default priorities of these versions as registered in the scheme + versionPriority map[string][]string + + // observedVersions keeps track of the order we've seen versions during type registration + observedVersions []schema.GroupVersion + + // schemeName is the name of this scheme. If you don't specify a name, the stack of the NewScheme caller will be used. + // This is useful for error reporting to indicate the origin of the scheme. + schemeName string +} + +// FieldLabelConversionFunc converts a field selector to internal representation. +type FieldLabelConversionFunc func(label, value string) (internalLabel, internalValue string, err error) + +// NewScheme creates a new Scheme. This scheme is pluggable by default. +func NewScheme() *Scheme { + s := &Scheme{ + gvkToType: map[schema.GroupVersionKind]reflect.Type{}, + typeToGVK: map[reflect.Type][]schema.GroupVersionKind{}, + unversionedTypes: map[reflect.Type]schema.GroupVersionKind{}, + unversionedKinds: map[string]reflect.Type{}, + fieldLabelConversionFuncs: map[schema.GroupVersionKind]FieldLabelConversionFunc{}, + defaulterFuncs: map[reflect.Type]func(interface{}){}, + versionPriority: map[string][]string{}, + schemeName: naming.GetNameFromCallsite(internalPackages...), + } + s.converter = conversion.NewConverter(s.nameFunc) + + // Enable couple default conversions by default. + utilruntime.Must(RegisterEmbeddedConversions(s)) + utilruntime.Must(RegisterStringConversions(s)) + + utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) + utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) + return s +} + +// nameFunc returns the name of the type that we wish to use to determine when two types attempt +// a conversion. Defaults to the go name of the type if the type is not registered. +func (s *Scheme) nameFunc(t reflect.Type) string { + // find the preferred names for this type + gvks, ok := s.typeToGVK[t] + if !ok { + return t.Name() + } + + for _, gvk := range gvks { + internalGV := gvk.GroupVersion() + internalGV.Version = APIVersionInternal // this is hacky and maybe should be passed in + internalGVK := internalGV.WithKind(gvk.Kind) + + if internalType, exists := s.gvkToType[internalGVK]; exists { + return s.typeToGVK[internalType][0].Kind + } + } + + return gvks[0].Kind +} + +// fromScope gets the input version, desired output version, and desired Scheme +// from a conversion.Scope. +func (s *Scheme) fromScope(scope conversion.Scope) *Scheme { + return s +} + +// Converter allows access to the converter for the scheme +func (s *Scheme) Converter() *conversion.Converter { + return s.converter +} + +// AddUnversionedTypes registers the provided types as "unversioned", which means that they follow special rules. +// Whenever an object of this type is serialized, it is serialized with the provided group version and is not +// converted. Thus unversioned objects are expected to remain backwards compatible forever, as if they were in an +// API group and version that would never be updated. +// +// TODO: there is discussion about removing unversioned and replacing it with objects that are manifest into +// every version with particular schemas. Resolve this method at that point. +func (s *Scheme) AddUnversionedTypes(version schema.GroupVersion, types ...Object) { + s.addObservedVersion(version) + s.AddKnownTypes(version, types...) + for _, obj := range types { + t := reflect.TypeOf(obj).Elem() + gvk := version.WithKind(t.Name()) + s.unversionedTypes[t] = gvk + if old, ok := s.unversionedKinds[gvk.Kind]; ok && t != old { + panic(fmt.Sprintf("%v.%v has already been registered as unversioned kind %q - kind name must be unique in scheme %q", old.PkgPath(), old.Name(), gvk, s.schemeName)) + } + s.unversionedKinds[gvk.Kind] = t + } +} + +// AddKnownTypes registers all types passed in 'types' as being members of version 'version'. +// All objects passed to types should be pointers to structs. The name that go reports for +// the struct becomes the "kind" field when encoding. Version may not be empty - use the +// APIVersionInternal constant if you have a type that does not have a formal version. +func (s *Scheme) AddKnownTypes(gv schema.GroupVersion, types ...Object) { + s.addObservedVersion(gv) + for _, obj := range types { + t := reflect.TypeOf(obj) + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + s.AddKnownTypeWithName(gv.WithKind(t.Name()), obj) + } +} + +// AddKnownTypeWithName is like AddKnownTypes, but it lets you specify what this type should +// be encoded as. Useful for testing when you don't want to make multiple packages to define +// your structs. Version may not be empty - use the APIVersionInternal constant if you have a +// type that does not have a formal version. +func (s *Scheme) AddKnownTypeWithName(gvk schema.GroupVersionKind, obj Object) { + s.addObservedVersion(gvk.GroupVersion()) + t := reflect.TypeOf(obj) + if len(gvk.Version) == 0 { + panic(fmt.Sprintf("version is required on all types: %s %v", gvk, t)) + } + if t.Kind() != reflect.Ptr { + panic("All types must be pointers to structs.") + } + t = t.Elem() + if t.Kind() != reflect.Struct { + panic("All types must be pointers to structs.") + } + + if oldT, found := s.gvkToType[gvk]; found && oldT != t { + panic(fmt.Sprintf("Double registration of different types for %v: old=%v.%v, new=%v.%v in scheme %q", gvk, oldT.PkgPath(), oldT.Name(), t.PkgPath(), t.Name(), s.schemeName)) + } + + s.gvkToType[gvk] = t + + for _, existingGvk := range s.typeToGVK[t] { + if existingGvk == gvk { + return + } + } + s.typeToGVK[t] = append(s.typeToGVK[t], gvk) +} + +// KnownTypes returns the types known for the given version. +func (s *Scheme) KnownTypes(gv schema.GroupVersion) map[string]reflect.Type { + types := make(map[string]reflect.Type) + for gvk, t := range s.gvkToType { + if gv != gvk.GroupVersion() { + continue + } + + types[gvk.Kind] = t + } + return types +} + +// AllKnownTypes returns the all known types. +func (s *Scheme) AllKnownTypes() map[schema.GroupVersionKind]reflect.Type { + return s.gvkToType +} + +// ObjectKinds returns all possible group,version,kind of the go object, true if the +// object is considered unversioned, or an error if it's not a pointer or is unregistered. +func (s *Scheme) ObjectKinds(obj Object) ([]schema.GroupVersionKind, bool, error) { + // Unstructured objects are always considered to have their declared GVK + if _, ok := obj.(Unstructured); ok { + // we require that the GVK be populated in order to recognize the object + gvk := obj.GetObjectKind().GroupVersionKind() + if len(gvk.Kind) == 0 { + return nil, false, NewMissingKindErr("unstructured object has no kind") + } + if len(gvk.Version) == 0 { + return nil, false, NewMissingVersionErr("unstructured object has no version") + } + return []schema.GroupVersionKind{gvk}, false, nil + } + + v, err := conversion.EnforcePtr(obj) + if err != nil { + return nil, false, err + } + t := v.Type() + + gvks, ok := s.typeToGVK[t] + if !ok { + return nil, false, NewNotRegisteredErrForType(s.schemeName, t) + } + _, unversionedType := s.unversionedTypes[t] + + return gvks, unversionedType, nil +} + +// Recognizes returns true if the scheme is able to handle the provided group,version,kind +// of an object. +func (s *Scheme) Recognizes(gvk schema.GroupVersionKind) bool { + _, exists := s.gvkToType[gvk] + return exists +} + +func (s *Scheme) IsUnversioned(obj Object) (bool, bool) { + v, err := conversion.EnforcePtr(obj) + if err != nil { + return false, false + } + t := v.Type() + + if _, ok := s.typeToGVK[t]; !ok { + return false, false + } + _, ok := s.unversionedTypes[t] + return ok, true +} + +// New returns a new API object of the given version and name, or an error if it hasn't +// been registered. The version and kind fields must be specified. +func (s *Scheme) New(kind schema.GroupVersionKind) (Object, error) { + if t, exists := s.gvkToType[kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + + if t, exists := s.unversionedKinds[kind.Kind]; exists { + return reflect.New(t).Interface().(Object), nil + } + return nil, NewNotRegisteredErrForKind(s.schemeName, kind) +} + +// Log sets a logger on the scheme. For test purposes only +func (s *Scheme) Log(l conversion.DebugLogger) { + s.converter.Debug = l +} + +// AddIgnoredConversionType identifies a pair of types that should be skipped by +// conversion (because the data inside them is explicitly dropped during +// conversion). +func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { + return s.converter.RegisterIgnoredConversion(from, to) +} + +// AddConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (s *Scheme) AddConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error { + return s.converter.RegisterUntypedConversionFunc(a, b, fn) +} + +// AddGeneratedConversionFunc registers a function that converts between a and b by passing objects of those +// types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce +// any other guarantee. +func (s *Scheme) AddGeneratedConversionFunc(a, b interface{}, fn conversion.ConversionFunc) error { + return s.converter.RegisterGeneratedUntypedConversionFunc(a, b, fn) +} + +// AddFieldLabelConversionFunc adds a conversion function to convert field selectors +// of the given kind from the given version to internal version representation. +func (s *Scheme) AddFieldLabelConversionFunc(gvk schema.GroupVersionKind, conversionFunc FieldLabelConversionFunc) error { + s.fieldLabelConversionFuncs[gvk] = conversionFunc + return nil +} + +// RegisterInputDefaults sets the provided field mapping function and field matching +// as the defaults for the provided input type. The fn may be nil, in which case no +// mapping will happen by default. Use this method to register a mechanism for handling +// a specific input type in conversion, such as a map[string]string to structs. +func (s *Scheme) RegisterInputDefaults(in interface{}, fn conversion.FieldMappingFunc, defaultFlags conversion.FieldMatchingFlags) error { + return s.converter.RegisterInputDefaults(in, fn, defaultFlags) +} + +// AddTypeDefaultingFunc registers a function that is passed a pointer to an +// object and can default fields on the object. These functions will be invoked +// when Default() is called. The function will never be called unless the +// defaulted object matches srcType. If this function is invoked twice with the +// same srcType, the fn passed to the later call will be used instead. +func (s *Scheme) AddTypeDefaultingFunc(srcType Object, fn func(interface{})) { + s.defaulterFuncs[reflect.TypeOf(srcType)] = fn +} + +// Default sets defaults on the provided Object. +func (s *Scheme) Default(src Object) { + if fn, ok := s.defaulterFuncs[reflect.TypeOf(src)]; ok { + fn(src) + } +} + +// Convert will attempt to convert in into out. Both must be pointers. For easy +// testing of conversion functions. Returns an error if the conversion isn't +// possible. You can call this with types that haven't been registered (for example, +// a to test conversion of types that are nested within registered types). The +// context interface is passed to the convertor. Convert also supports Unstructured +// types and will convert them intelligently. +func (s *Scheme) Convert(in, out interface{}, context interface{}) error { + unstructuredIn, okIn := in.(Unstructured) + unstructuredOut, okOut := out.(Unstructured) + switch { + case okIn && okOut: + // converting unstructured input to an unstructured output is a straight copy - unstructured + // is a "smart holder" and the contents are passed by reference between the two objects + unstructuredOut.SetUnstructuredContent(unstructuredIn.UnstructuredContent()) + return nil + + case okOut: + // if the output is an unstructured object, use the standard Go type to unstructured + // conversion. The object must not be internal. + obj, ok := in.(Object) + if !ok { + return fmt.Errorf("unable to convert object type %T to Unstructured, must be a runtime.Object", in) + } + gvks, unversioned, err := s.ObjectKinds(obj) + if err != nil { + return err + } + gvk := gvks[0] + + // if no conversion is necessary, convert immediately + if unversioned || gvk.Version != APIVersionInternal { + content, err := DefaultUnstructuredConverter.ToUnstructured(in) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + unstructuredOut.GetObjectKind().SetGroupVersionKind(gvk) + return nil + } + + // attempt to convert the object to an external version first. + target, ok := context.(GroupVersioner) + if !ok { + return fmt.Errorf("unable to convert the internal object type %T to Unstructured without providing a preferred version to convert to", in) + } + // Convert is implicitly unsafe, so we don't need to perform a safe conversion + versioned, err := s.UnsafeConvertToVersion(obj, target) + if err != nil { + return err + } + content, err := DefaultUnstructuredConverter.ToUnstructured(versioned) + if err != nil { + return err + } + unstructuredOut.SetUnstructuredContent(content) + return nil + + case okIn: + // converting an unstructured object to any type is modeled by first converting + // the input to a versioned type, then running standard conversions + typed, err := s.unstructuredToTyped(unstructuredIn) + if err != nil { + return err + } + in = typed + } + + flags, meta := s.generateConvertMeta(in) + meta.Context = context + if flags == 0 { + flags = conversion.AllowDifferentFieldTypeNames + } + return s.converter.Convert(in, out, flags, meta) +} + +// ConvertFieldLabel alters the given field label and value for an kind field selector from +// versioned representation to an unversioned one or returns an error. +func (s *Scheme) ConvertFieldLabel(gvk schema.GroupVersionKind, label, value string) (string, string, error) { + conversionFunc, ok := s.fieldLabelConversionFuncs[gvk] + if !ok { + return DefaultMetaV1FieldSelectorConversion(label, value) + } + return conversionFunc(label, value) +} + +// ConvertToVersion attempts to convert an input object to its matching Kind in another +// version within this scheme. Will return an error if the provided version does not +// contain the inKind (or a mapping by name defined with AddKnownTypeWithName). Will also +// return an error if the conversion does not result in a valid Object being +// returned. Passes target down to the conversion methods as the Context on the scope. +func (s *Scheme) ConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(true, in, target) +} + +// UnsafeConvertToVersion will convert in to the provided target if such a conversion is possible, +// but does not guarantee the output object does not share fields with the input object. It attempts to be as +// efficient as possible when doing conversion. +func (s *Scheme) UnsafeConvertToVersion(in Object, target GroupVersioner) (Object, error) { + return s.convertToVersion(false, in, target) +} + +// convertToVersion handles conversion with an optional copy. +func (s *Scheme) convertToVersion(copy bool, in Object, target GroupVersioner) (Object, error) { + var t reflect.Type + + if u, ok := in.(Unstructured); ok { + typed, err := s.unstructuredToTyped(u) + if err != nil { + return nil, err + } + + in = typed + // unstructuredToTyped returns an Object, which must be a pointer to a struct. + t = reflect.TypeOf(in).Elem() + + } else { + // determine the incoming kinds with as few allocations as possible. + t = reflect.TypeOf(in) + if t.Kind() != reflect.Ptr { + return nil, fmt.Errorf("only pointer types may be converted: %v", t) + } + t = t.Elem() + if t.Kind() != reflect.Struct { + return nil, fmt.Errorf("only pointers to struct types may be converted: %v", t) + } + } + + kinds, ok := s.typeToGVK[t] + if !ok || len(kinds) == 0 { + return nil, NewNotRegisteredErrForType(s.schemeName, t) + } + + gvk, ok := target.KindForGroupVersionKinds(kinds) + if !ok { + // try to see if this type is listed as unversioned (for legacy support) + // TODO: when we move to server API versions, we should completely remove the unversioned concept + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, in, gvk) + } + return copyAndSetTargetKind(copy, in, unversionedKind) + } + return nil, NewNotRegisteredErrForTarget(s.schemeName, t, target) + } + + // target wants to use the existing type, set kind and return (no conversion necessary) + for _, kind := range kinds { + if gvk == kind { + return copyAndSetTargetKind(copy, in, gvk) + } + } + + // type is unversioned, no conversion necessary + if unversionedKind, ok := s.unversionedTypes[t]; ok { + if gvk, ok := target.KindForGroupVersionKinds([]schema.GroupVersionKind{unversionedKind}); ok { + return copyAndSetTargetKind(copy, in, gvk) + } + return copyAndSetTargetKind(copy, in, unversionedKind) + } + + out, err := s.New(gvk) + if err != nil { + return nil, err + } + + if copy { + in = in.DeepCopyObject() + } + + flags, meta := s.generateConvertMeta(in) + meta.Context = target + if err := s.converter.Convert(in, out, flags, meta); err != nil { + return nil, err + } + + setTargetKind(out, gvk) + return out, nil +} + +// unstructuredToTyped attempts to transform an unstructured object to a typed +// object if possible. It will return an error if conversion is not possible, or the versioned +// Go form of the object. Note that this conversion will lose fields. +func (s *Scheme) unstructuredToTyped(in Unstructured) (Object, error) { + // the type must be something we recognize + gvks, _, err := s.ObjectKinds(in) + if err != nil { + return nil, err + } + typed, err := s.New(gvks[0]) + if err != nil { + return nil, err + } + if err := DefaultUnstructuredConverter.FromUnstructured(in.UnstructuredContent(), typed); err != nil { + return nil, fmt.Errorf("unable to convert unstructured object to %v: %v", gvks[0], err) + } + return typed, nil +} + +// generateConvertMeta constructs the meta value we pass to Convert. +func (s *Scheme) generateConvertMeta(in interface{}) (conversion.FieldMatchingFlags, *conversion.Meta) { + return s.converter.DefaultMeta(reflect.TypeOf(in)) +} + +// copyAndSetTargetKind performs a conditional copy before returning the object, or an error if copy was not successful. +func copyAndSetTargetKind(copy bool, obj Object, kind schema.GroupVersionKind) (Object, error) { + if copy { + obj = obj.DeepCopyObject() + } + setTargetKind(obj, kind) + return obj, nil +} + +// setTargetKind sets the kind on an object, taking into account whether the target kind is the internal version. +func setTargetKind(obj Object, kind schema.GroupVersionKind) { + if kind.Version == APIVersionInternal { + // internal is a special case + // TODO: look at removing the need to special case this + obj.GetObjectKind().SetGroupVersionKind(schema.GroupVersionKind{}) + return + } + obj.GetObjectKind().SetGroupVersionKind(kind) +} + +// SetVersionPriority allows specifying a precise order of priority. All specified versions must be in the same group, +// and the specified order overwrites any previously specified order for this group +func (s *Scheme) SetVersionPriority(versions ...schema.GroupVersion) error { + groups := sets.String{} + order := []string{} + for _, version := range versions { + if len(version.Version) == 0 || version.Version == APIVersionInternal { + return fmt.Errorf("internal versions cannot be prioritized: %v", version) + } + + groups.Insert(version.Group) + order = append(order, version.Version) + } + if len(groups) != 1 { + return fmt.Errorf("must register versions for exactly one group: %v", strings.Join(groups.List(), ", ")) + } + + s.versionPriority[groups.List()[0]] = order + return nil +} + +// PrioritizedVersionsForGroup returns versions for a single group in priority order +func (s *Scheme) PrioritizedVersionsForGroup(group string) []schema.GroupVersion { + ret := []schema.GroupVersion{} + for _, version := range s.versionPriority[group] { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + } + for _, observedVersion := range s.observedVersions { + if observedVersion.Group != group { + continue + } + found := false + for _, existing := range ret { + if existing == observedVersion { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + + return ret +} + +// PrioritizedVersionsAllGroups returns all known versions in their priority order. Groups are random, but +// versions for a single group are prioritized +func (s *Scheme) PrioritizedVersionsAllGroups() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for group, versions := range s.versionPriority { + for _, version := range versions { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + } + } + for _, observedVersion := range s.observedVersions { + found := false + for _, existing := range ret { + if existing == observedVersion { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + return ret +} + +// PreferredVersionAllGroups returns the most preferred version for every group. +// group ordering is random. +func (s *Scheme) PreferredVersionAllGroups() []schema.GroupVersion { + ret := []schema.GroupVersion{} + for group, versions := range s.versionPriority { + for _, version := range versions { + ret = append(ret, schema.GroupVersion{Group: group, Version: version}) + break + } + } + for _, observedVersion := range s.observedVersions { + found := false + for _, existing := range ret { + if existing.Group == observedVersion.Group { + found = true + break + } + } + if !found { + ret = append(ret, observedVersion) + } + } + + return ret +} + +// IsGroupRegistered returns true if types for the group have been registered with the scheme +func (s *Scheme) IsGroupRegistered(group string) bool { + for _, observedVersion := range s.observedVersions { + if observedVersion.Group == group { + return true + } + } + return false +} + +// IsVersionRegistered returns true if types for the version have been registered with the scheme +func (s *Scheme) IsVersionRegistered(version schema.GroupVersion) bool { + for _, observedVersion := range s.observedVersions { + if observedVersion == version { + return true + } + } + + return false +} + +func (s *Scheme) addObservedVersion(version schema.GroupVersion) { + if len(version.Version) == 0 || version.Version == APIVersionInternal { + return + } + for _, observedVersion := range s.observedVersions { + if observedVersion == version { + return + } + } + + s.observedVersions = append(s.observedVersions, version) +} + +func (s *Scheme) Name() string { + return s.schemeName +} + +// internalPackages are packages that ignored when creating a default reflector name. These packages are in the common +// call chains to NewReflector, so they'd be low entropy names for reflectors +var internalPackages = []string{"k8s.io/apimachinery/pkg/runtime/scheme.go"} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go new file mode 100644 index 0000000..944db48 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme_builder.go @@ -0,0 +1,48 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// SchemeBuilder collects functions that add things to a scheme. It's to allow +// code to compile without explicitly referencing generated types. You should +// declare one in each package that will have generated deep copy or conversion +// functions. +type SchemeBuilder []func(*Scheme) error + +// AddToScheme applies all the stored functions to the scheme. A non-nil error +// indicates that one function failed and the attempt was abandoned. +func (sb *SchemeBuilder) AddToScheme(s *Scheme) error { + for _, f := range *sb { + if err := f(s); err != nil { + return err + } + } + return nil +} + +// Register adds a scheme setup function to the list. +func (sb *SchemeBuilder) Register(funcs ...func(*Scheme) error) { + for _, f := range funcs { + *sb = append(*sb, f) + } +} + +// NewSchemeBuilder calls Register for you. +func NewSchemeBuilder(funcs ...func(*Scheme) error) SchemeBuilder { + var sb SchemeBuilder + sb.Register(funcs...) + return sb +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go new file mode 100644 index 0000000..f21b0ef --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -0,0 +1,324 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "mime" + "strings" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/json" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/runtime/serializer/versioning" +) + +// serializerExtensions are for serializers that are conditionally compiled in +var serializerExtensions = []func(*runtime.Scheme) (serializerType, bool){} + +type serializerType struct { + AcceptContentTypes []string + ContentType string + FileExtensions []string + // EncodesAsText should be true if this content type can be represented safely in UTF-8 + EncodesAsText bool + + Serializer runtime.Serializer + PrettySerializer runtime.Serializer + + AcceptStreamContentTypes []string + StreamContentType string + + Framer runtime.Framer + StreamSerializer runtime.Serializer +} + +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { + jsonSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, + ) + jsonSerializerType := serializerType{ + AcceptContentTypes: []string{runtime.ContentTypeJSON}, + ContentType: runtime.ContentTypeJSON, + FileExtensions: []string{"json"}, + EncodesAsText: true, + Serializer: jsonSerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, + } + if options.Pretty { + jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict}, + ) + } + + yamlSerializer := json.NewSerializerWithOptions( + mf, scheme, scheme, + json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, + ) + protoSerializer := protobuf.NewSerializer(scheme, scheme) + protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) + + serializers := []serializerType{ + jsonSerializerType, + { + AcceptContentTypes: []string{runtime.ContentTypeYAML}, + ContentType: runtime.ContentTypeYAML, + FileExtensions: []string{"yaml"}, + EncodesAsText: true, + Serializer: yamlSerializer, + }, + { + AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, + ContentType: runtime.ContentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: protoSerializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: protoRawSerializer, + }, + } + + for _, fn := range serializerExtensions { + if serializer, ok := fn(scheme); ok { + serializers = append(serializers, serializer) + } + } + return serializers +} + +// CodecFactory provides methods for retrieving codecs and serializers for specific +// versions and content types. +type CodecFactory struct { + scheme *runtime.Scheme + serializers []serializerType + universal runtime.Decoder + accepts []runtime.SerializerInfo + + legacySerializer runtime.Serializer +} + +// CodecFactoryOptions holds the options for configuring CodecFactory behavior +type CodecFactoryOptions struct { + // Strict configures all serializers in strict mode + Strict bool + // Pretty includes a pretty serializer along with the non-pretty one + Pretty bool +} + +// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. +// Functions implementing this type can be passed to the NewCodecFactory() constructor. +type CodecFactoryOptionsMutator func(*CodecFactoryOptions) + +// EnablePretty enables including a pretty serializer along with the non-pretty one +func EnablePretty(options *CodecFactoryOptions) { + options.Pretty = true +} + +// DisablePretty disables including a pretty serializer along with the non-pretty one +func DisablePretty(options *CodecFactoryOptions) { + options.Pretty = false +} + +// EnableStrict enables configuring all serializers in strict mode +func EnableStrict(options *CodecFactoryOptions) { + options.Strict = true +} + +// DisableStrict disables configuring all serializers in strict mode +func DisableStrict(options *CodecFactoryOptions) { + options.Strict = false +} + +// NewCodecFactory provides methods for retrieving serializers for the supported wire formats +// and conversion wrappers to define preferred internal and external versions. In the future, +// as the internal version is used less, callers may instead use a defaulting serializer and +// only convert objects which are shared internally (Status, common API machinery). +// +// Mutators can be passed to change the CodecFactoryOptions before construction of the factory. +// It is recommended to explicitly pass mutators instead of relying on defaults. +// By default, Pretty is enabled -- this is conformant with previously supported behavior. +// +// TODO: allow other codecs to be compiled in? +// TODO: accept a scheme interface +func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory { + options := CodecFactoryOptions{Pretty: true} + for _, fn := range mutators { + fn(&options) + } + + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options) + return newCodecFactory(scheme, serializers) +} + +// newCodecFactory is a helper for testing that allows a different metafactory to be specified. +func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) CodecFactory { + decoders := make([]runtime.Decoder, 0, len(serializers)) + var accepts []runtime.SerializerInfo + alreadyAccepted := make(map[string]struct{}) + + var legacySerializer runtime.Serializer + for _, d := range serializers { + decoders = append(decoders, d.Serializer) + for _, mediaType := range d.AcceptContentTypes { + if _, ok := alreadyAccepted[mediaType]; ok { + continue + } + alreadyAccepted[mediaType] = struct{}{} + info := runtime.SerializerInfo{ + MediaType: d.ContentType, + EncodesAsText: d.EncodesAsText, + Serializer: d.Serializer, + PrettySerializer: d.PrettySerializer, + } + + mediaType, _, err := mime.ParseMediaType(info.MediaType) + if err != nil { + panic(err) + } + parts := strings.SplitN(mediaType, "/", 2) + info.MediaTypeType = parts[0] + info.MediaTypeSubType = parts[1] + + if d.StreamSerializer != nil { + info.StreamSerializer = &runtime.StreamSerializerInfo{ + Serializer: d.StreamSerializer, + EncodesAsText: d.EncodesAsText, + Framer: d.Framer, + } + } + accepts = append(accepts, info) + if mediaType == runtime.ContentTypeJSON { + legacySerializer = d.Serializer + } + } + } + if legacySerializer == nil { + legacySerializer = serializers[0].Serializer + } + + return CodecFactory{ + scheme: scheme, + serializers: serializers, + universal: recognizer.NewDecoder(decoders...), + + accepts: accepts, + + legacySerializer: legacySerializer, + } +} + +// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the +// caller requests it. +func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer { + return WithoutConversionCodecFactory{f} +} + +// SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. +func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { + return f.accepts +} + +// LegacyCodec encodes output to a given API versions, and decodes output into the internal form from +// any recognized source. The returned codec will always encode output to JSON. If a type is not +// found in the list of versions an error will be returned. +// +// This method is deprecated - clients and servers should negotiate a serializer by mime-type and +// invoke CodecForVersions. Callers that need only to read data should use UniversalDecoder(). +// +// TODO: make this call exist only in pkg/api, and initialize it with the set of default versions. +// All other callers will be forced to request a Codec directly. +func (f CodecFactory) LegacyCodec(version ...schema.GroupVersion) runtime.Codec { + return versioning.NewDefaultingCodecForScheme(f.scheme, f.legacySerializer, f.universal, schema.GroupVersions(version), runtime.InternalGroupVersioner) +} + +// UniversalDeserializer can convert any stored data recognized by this factory into a Go object that satisfies +// runtime.Object. It does not perform conversion. It does not perform defaulting. +func (f CodecFactory) UniversalDeserializer() runtime.Decoder { + return f.universal +} + +// UniversalDecoder returns a runtime.Decoder capable of decoding all known API objects in all known formats. Used +// by clients that do not need to encode objects but want to deserialize API objects stored on disk. Only decodes +// objects in groups registered with the scheme. The GroupVersions passed may be used to select alternate +// versions of objects to return - by default, runtime.APIVersionInternal is used. If any versions are specified, +// unrecognized groups will be returned in the version they are encoded as (no conversion). This decoder performs +// defaulting. +// +// TODO: the decoder will eventually be removed in favor of dealing with objects in their versioned form +// TODO: only accept a group versioner +func (f CodecFactory) UniversalDecoder(versions ...schema.GroupVersion) runtime.Decoder { + var versioner runtime.GroupVersioner + if len(versions) == 0 { + versioner = runtime.InternalGroupVersioner + } else { + versioner = schema.GroupVersions(versions) + } + return f.CodecForVersions(nil, f.universal, nil, versioner) +} + +// CodecForVersions creates a codec with the provided serializer. If an object is decoded and its group is not in the list, +// it will default to runtime.APIVersionInternal. If encode is not specified for an object's group, the object is not +// converted. If encode or decode are nil, no conversion is performed. +func (f CodecFactory) CodecForVersions(encoder runtime.Encoder, decoder runtime.Decoder, encode runtime.GroupVersioner, decode runtime.GroupVersioner) runtime.Codec { + // TODO: these are for backcompat, remove them in the future + if encode == nil { + encode = runtime.DisabledGroupVersioner + } + if decode == nil { + decode = runtime.InternalGroupVersioner + } + return versioning.NewDefaultingCodecForScheme(f.scheme, encoder, decoder, encode, decode) +} + +// DecoderToVersion returns a decoder that targets the provided group version. +func (f CodecFactory) DecoderToVersion(decoder runtime.Decoder, gv runtime.GroupVersioner) runtime.Decoder { + return f.CodecForVersions(nil, decoder, nil, gv) +} + +// EncoderForVersion returns an encoder that targets the provided group version. +func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.GroupVersioner) runtime.Encoder { + return f.CodecForVersions(encoder, nil, gv, nil) +} + +// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. +// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future +// will be unnecessary when we change the signature of NegotiatedSerializer. +type WithoutConversionCodecFactory struct { + CodecFactory +} + +// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object +// when serialized. +func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return runtime.WithVersionEncoder{ + Version: version, + Encoder: serializer, + ObjectTyper: f.CodecFactory.scheme, + } +} + +// DecoderToVersion returns an decoder that does not do conversion. +func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return runtime.WithoutVersionDecoder{ + Decoder: serializer, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go new file mode 100644 index 0000000..9d17f09 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -0,0 +1,388 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "io" + "strconv" + "unsafe" + + jsoniter "github.com/json-iterator/go" + "github.com/modern-go/reflect2" + "sigs.k8s.io/yaml" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" + utilyaml "k8s.io/apimachinery/pkg/util/yaml" + "k8s.io/klog" +) + +// NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer +// is not nil, the object has the group, version, and kind fields set. +// Deprecated: use NewSerializerWithOptions instead. +func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) +} + +// NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that +// matches JSON, and will error if constructs are used that do not serialize to JSON. +// Deprecated: use NewSerializerWithOptions instead. +func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) +} + +// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML +// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer +// and are immutable. +func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + options: options, + identifier: identifier(options), + } +} + +// identifier computes Identifier of Encoder based on the given options. +func identifier(options SerializerOptions) runtime.Identifier { + result := map[string]string{ + "name": "json", + "yaml": strconv.FormatBool(options.Yaml), + "pretty": strconv.FormatBool(options.Pretty), + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err) + } + return runtime.Identifier(identifier) +} + +// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. +// example: +// (1) To configure a JSON serializer, set `Yaml` to `false`. +// (2) To configure a YAML serializer, set `Yaml` to `true`. +// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. +type SerializerOptions struct { + // Yaml: configures the Serializer to work with JSON(false) or YAML(true). + // When `Yaml` is enabled, this serializer only supports the subset of YAML that + // matches JSON, and will error if constructs are used that do not serialize to JSON. + Yaml bool + + // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. + // This option is silently ignored when `Yaml` is `true`. + Pretty bool + + // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. + // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. + Strict bool +} + +type Serializer struct { + meta MetaFactory + options SerializerOptions + creater runtime.ObjectCreater + typer runtime.ObjectTyper + + identifier runtime.Identifier +} + +// Serializer implements Serializer +var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} + +type customNumberExtension struct { + jsoniter.DummyExtension +} + +func (cne *customNumberExtension) CreateDecoder(typ reflect2.Type) jsoniter.ValDecoder { + if typ.String() == "interface {}" { + return customNumberDecoder{} + } + return nil +} + +type customNumberDecoder struct { +} + +func (customNumberDecoder) Decode(ptr unsafe.Pointer, iter *jsoniter.Iterator) { + switch iter.WhatIsNext() { + case jsoniter.NumberValue: + var number jsoniter.Number + iter.ReadVal(&number) + i64, err := strconv.ParseInt(string(number), 10, 64) + if err == nil { + *(*interface{})(ptr) = i64 + return + } + f64, err := strconv.ParseFloat(string(number), 64) + if err == nil { + *(*interface{})(ptr) = f64 + return + } + iter.ReportError("DecodeNumber", err.Error()) + default: + *(*interface{})(ptr) = iter.Read() + } +} + +// CaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive when unmarshalling, and otherwise compatible with +// the encoding/json standard library. +func CaseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} + +// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be +// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with +// the encoding/json standard library. +func StrictCaseSensitiveJsonIterator() jsoniter.API { + config := jsoniter.Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, + CaseSensitive: true, + DisallowUnknownFields: true, + }.Froze() + // Force jsoniter to decode number to interface{} via int64/float64, if possible. + config.RegisterExtension(&customNumberExtension{}) + return config +} + +// Private copies of jsoniter to try to shield against possible mutations +// from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them +// in some other library will mess with every usage of the jsoniter library in the whole program. +// See https://github.com/json-iterator/go/issues/265 +var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() +var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() + +// gvkWithDefaults returns group kind and version defaulting from provided default +func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { + if len(actual.Kind) == 0 { + actual.Kind = defaultGVK.Kind + } + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = defaultGVK.Group + actual.Version = defaultGVK.Version + } + if len(actual.Version) == 0 && actual.Group == defaultGVK.Group { + actual.Version = defaultGVK.Version + } + return actual +} + +// Decode attempts to convert the provided data into YAML or JSON, extract the stored schema kind, apply the provided default gvk, and then +// load that data into an object matching the desired schema kind or the provided into. +// If into is *runtime.Unknown, the raw data will be extracted and no decoding will be performed. +// If into is not registered with the typer, then the object will be straight decoded using normal JSON/YAML unmarshalling. +// If into is provided and the original data is not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. +// If into is nil or data's gvk different from into's gvk, it will generate a new Object with ObjectCreater.New(gvk) +// On success or most errors, the method will return the calculated schema kind. +// The gvk calculate priority will be originalData > default gvk > into +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + data := originalData + if s.options.Yaml { + altered, err := yaml.YAMLToJSON(data) + if err != nil { + return nil, nil, err + } + data = altered + } + + actual, err := s.meta.Interpret(data) + if err != nil { + return nil, nil, err + } + + if gvk != nil { + *actual = gvkWithDefaults(*actual, *gvk) + } + + if unk, ok := into.(*runtime.Unknown); ok && unk != nil { + unk.Raw = originalData + unk.ContentType = runtime.ContentTypeJSON + unk.GetObjectKind().SetGroupVersionKind(*actual) + return unk, actual, nil + } + + if into != nil { + _, isUnstructured := into.(runtime.Unstructured) + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err), isUnstructured: + if err := caseSensitiveJsonIterator.Unmarshal(data, into); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + *actual = gvkWithDefaults(*actual, types[0]) + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr(string(originalData)) + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr(string(originalData)) + } + + // use the target if necessary + obj, err := runtime.UseOrCreateObject(s.typer, s.creater, *actual, into) + if err != nil { + return nil, actual, err + } + + if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { + return nil, actual, err + } + + // If the deserializer is non-strict, return successfully here. + if !s.options.Strict { + return obj, actual, nil + } + + // In strict mode pass the data trough the YAMLToJSONStrict converter. + // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, + // the output would equal the input, unless there is a parsing error such as duplicate fields. + // As we know this was successful in the non-strict case, the only error that may be returned here + // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError + // the actual error is that the object contains duplicate fields. + altered, err := yaml.YAMLToJSONStrict(originalData) + if err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // As performance is not an issue for now for the strict deserializer (one has regardless to do + // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated + // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is + // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, + // the actual error is that the object contains unknown field. + strictObj := obj.DeepCopyObject() + if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { + return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) + } + // Always return the same object as the non-strict serializer to avoid any deviations. + return obj, actual, nil +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { + if s.options.Yaml { + json, err := caseSensitiveJsonIterator.Marshal(obj) + if err != nil { + return err + } + data, err := yaml.JSONToYAML(json) + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + + if s.options.Pretty { + data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") + if err != nil { + return err + } + _, err = w.Write(data) + return err + } + encoder := json.NewEncoder(w) + return encoder.Encode(obj) +} + +// Identifier implements runtime.Encoder interface. +func (s *Serializer) Identifier() runtime.Identifier { + return s.identifier +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { + if s.options.Yaml { + // we could potentially look for '---' + return false, true, nil + } + _, _, ok = utilyaml.GuessJSONStream(peek, 2048) + return ok, false, nil +} + +// Framer is the default JSON framing behavior, with newlines delimiting individual objects. +var Framer = jsonFramer{} + +type jsonFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (jsonFramer) NewFrameWriter(w io.Writer) io.Writer { + // we can write JSON objects directly to the writer, because they are self-framing + return w +} + +// NewFrameReader implements stream framing for this serializer +func (jsonFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // we need to extract the JSON chunks of data to pass to Decode() + return framer.NewJSONFramedReader(r) +} + +// YAMLFramer is the default JSON framing behavior, with newlines delimiting individual objects. +var YAMLFramer = yamlFramer{} + +type yamlFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (yamlFramer) NewFrameWriter(w io.Writer) io.Writer { + return yamlFrameWriter{w} +} + +// NewFrameReader implements stream framing for this serializer +func (yamlFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + // extract the YAML document chunks directly + return utilyaml.NewDocumentDecoder(r) +} + +type yamlFrameWriter struct { + w io.Writer +} + +// Write separates each document with the YAML document separator (`---` followed by line +// break). Writers must write well formed YAML documents (include a final line break). +func (w yamlFrameWriter) Write(data []byte) (n int, err error) { + if _, err := w.w.Write([]byte("---\n")); err != nil { + return 0, err + } + return w.w.Write(data) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go new file mode 100644 index 0000000..df3f5f9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/meta.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "encoding/json" + "fmt" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// MetaFactory is used to store and retrieve the version and kind +// information for JSON objects in a serializer. +type MetaFactory interface { + // Interpret should return the version and kind of the wire-format of + // the object. + Interpret(data []byte) (*schema.GroupVersionKind, error) +} + +// DefaultMetaFactory is a default factory for versioning objects in JSON. The object +// in memory and in the default JSON serialization will use the "kind" and "apiVersion" +// fields. +var DefaultMetaFactory = SimpleMetaFactory{} + +// SimpleMetaFactory provides default methods for retrieving the type and version of objects +// that are identified with an "apiVersion" and "kind" fields in their JSON +// serialization. It may be parameterized with the names of the fields in memory, or an +// optional list of base structs to search for those fields in memory. +type SimpleMetaFactory struct { +} + +// Interpret will return the APIVersion and Kind of the JSON wire-format +// encoding of an object, or an error. +func (SimpleMetaFactory) Interpret(data []byte) (*schema.GroupVersionKind, error) { + findKind := struct { + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // +optional + Kind string `json:"kind,omitempty"` + }{} + if err := json.Unmarshal(data, &findKind); err != nil { + return nil, fmt.Errorf("couldn't get version/kind; json parse error: %v", err) + } + gv, err := schema.ParseGroupVersion(findKind.APIVersion) + if err != nil { + return nil, err + } + return &schema.GroupVersionKind{Group: gv.Group, Version: gv.Version, Kind: findKind.Kind}, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go new file mode 100644 index 0000000..a42b4a4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/negotiated_codec.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" +) + +// TODO: We should split negotiated serializers that we can change versions on from those we can change +// serialization formats on +type negotiatedSerializerWrapper struct { + info runtime.SerializerInfo +} + +func NegotiatedSerializerWrapper(info runtime.SerializerInfo) runtime.NegotiatedSerializer { + return &negotiatedSerializerWrapper{info} +} + +func (n *negotiatedSerializerWrapper) SupportedMediaTypes() []runtime.SerializerInfo { + return []runtime.SerializerInfo{n.info} +} + +func (n *negotiatedSerializerWrapper) EncoderForVersion(e runtime.Encoder, _ runtime.GroupVersioner) runtime.Encoder { + return e +} + +func (n *negotiatedSerializerWrapper) DecoderToVersion(d runtime.Decoder, _gv runtime.GroupVersioner) runtime.Decoder { + return d +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go new file mode 100644 index 0000000..72d0ac7 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package protobuf provides a Kubernetes serializer for the protobuf format. +package protobuf // import "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go new file mode 100644 index 0000000..f606b7d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -0,0 +1,472 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protobuf + +import ( + "bytes" + "fmt" + "io" + "net/http" + "reflect" + + "github.com/gogo/protobuf/proto" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" + "k8s.io/apimachinery/pkg/util/framer" +) + +var ( + // protoEncodingPrefix serves as a magic number for an encoded protobuf message on this serializer. All + // proto messages serialized by this schema will be preceded by the bytes 0x6b 0x38 0x73, with the fourth + // byte being reserved for the encoding style. The only encoding style defined is 0x00, which means that + // the rest of the byte stream is a message of type k8s.io.kubernetes.pkg.runtime.Unknown (proto2). + // + // See k8s.io/apimachinery/pkg/runtime/generated.proto for details of the runtime.Unknown message. + // + // This encoding scheme is experimental, and is subject to change at any time. + protoEncodingPrefix = []byte{0x6b, 0x38, 0x73, 0x00} +) + +type errNotMarshalable struct { + t reflect.Type +} + +func (e errNotMarshalable) Error() string { + return fmt.Sprintf("object %v does not implement the protobuf marshalling interface and cannot be encoded to a protobuf message", e.t) +} + +func (e errNotMarshalable) Status() metav1.Status { + return metav1.Status{ + Status: metav1.StatusFailure, + Code: http.StatusNotAcceptable, + Reason: metav1.StatusReason("NotAcceptable"), + Message: e.Error(), + } +} + +func IsNotMarshalable(err error) bool { + _, ok := err.(errNotMarshalable) + return err != nil && ok +} + +// NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer +// is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written +// as-is (any type info passed with the object will be used). +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { + return &Serializer{ + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, + } +} + +type Serializer struct { + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper +} + +var _ runtime.Serializer = &Serializer{} +var _ recognizer.RecognizingDecoder = &Serializer{} + +const serializerIdentifier runtime.Identifier = "protobuf" + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + prefixLen := len(s.prefix) + switch { + case len(originalData) == 0: + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty data") + case len(originalData) < prefixLen || !bytes.Equal(s.prefix, originalData[:prefixLen]): + return nil, nil, fmt.Errorf("provided data does not appear to be a protobuf message, expected prefix %v", s.prefix) + case len(originalData) == prefixLen: + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty body") + } + + data := originalData[prefixLen:] + unk := runtime.Unknown{} + if err := unk.Unmarshal(data); err != nil { + return nil, nil, err + } + + actual := unk.GroupVersionKind() + copyKindDefaults(&actual, gvk) + + if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { + *intoUnknown = unk + if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { + intoUnknown.ContentType = runtime.ContentTypeProtobuf + } + return intoUnknown, &actual, nil + } + + if into != nil { + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + pb, ok := into.(proto.Message) + if !ok { + return nil, &actual, errNotMarshalable{reflect.TypeOf(into)} + } + if err := proto.Unmarshal(unk.Raw, pb); err != nil { + return nil, &actual, err + } + return into, &actual, nil + case err != nil: + return nil, &actual, err + default: + copyKindDefaults(&actual, &types[0]) + // if the result of defaulting did not set a version or group, ensure that at least group is set + // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group + // of into is set if there is no better information from the caller or object. + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = types[0].Group + } + } + } + + if len(actual.Kind) == 0 { + return nil, &actual, runtime.NewMissingKindErr(fmt.Sprintf("%#v", unk.TypeMeta)) + } + if len(actual.Version) == 0 { + return nil, &actual, runtime.NewMissingVersionErr(fmt.Sprintf("%#v", unk.TypeMeta)) + } + + return unmarshalToObject(s.typer, s.creater, &actual, into, unk.Raw) +} + +// Encode serializes the provided object to the given writer. +func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { + prefixSize := uint64(len(s.prefix)) + + var unk runtime.Unknown + switch t := obj.(type) { + case *runtime.Unknown: + estimatedSize := prefixSize + uint64(t.Size()) + data := make([]byte, estimatedSize) + i, err := t.MarshalTo(data[prefixSize:]) + if err != nil { + return err + } + copy(data, s.prefix) + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + default: + kind := obj.GetObjectKind().GroupVersionKind() + unk = runtime.Unknown{ + TypeMeta: runtime.TypeMeta{ + Kind: kind.Kind, + APIVersion: kind.GroupVersion().String(), + }, + } + } + + switch t := obj.(type) { + case bufferedMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalToSizedBuffer methods + encodedSize := uint64(t.Size()) + estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) + data := make([]byte, estimatedSize) + + i, err := unk.NestedMarshalTo(data[prefixSize:], t, encodedSize) + if err != nil { + return err + } + + copy(data, s.prefix) + + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + + case proto.Marshaler: + // this path performs extra allocations + data, err := t.Marshal() + if err != nil { + return err + } + unk.Raw = data + + estimatedSize := prefixSize + uint64(unk.Size()) + data = make([]byte, estimatedSize) + + i, err := unk.MarshalTo(data[prefixSize:]) + if err != nil { + return err + } + + copy(data, s.prefix) + + _, err = w.Write(data[:prefixSize+uint64(i)]) + return err + + default: + // TODO: marshal with a different content type and serializer (JSON for third party objects) + return errNotMarshalable{reflect.TypeOf(obj)} + } +} + +// Identifier implements runtime.Encoder interface. +func (s *Serializer) Identifier() runtime.Identifier { + return serializerIdentifier +} + +// RecognizesData implements the RecognizingDecoder interface. +func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { + prefix := make([]byte, 4) + n, err := peek.Read(prefix) + if err != nil { + if err == io.EOF { + return false, false, nil + } + return false, false, err + } + if n != 4 { + return false, false, nil + } + return bytes.Equal(s.prefix, prefix), false, nil +} + +// copyKindDefaults defaults dst to the value in src if dst does not have a value set. +func copyKindDefaults(dst, src *schema.GroupVersionKind) { + if src == nil { + return + } + // apply kind and version defaulting from provided default + if len(dst.Kind) == 0 { + dst.Kind = src.Kind + } + if len(dst.Version) == 0 && len(src.Version) > 0 { + dst.Group = src.Group + dst.Version = src.Version + } +} + +// bufferedMarshaller describes a more efficient marshalling interface that can avoid allocating multiple +// byte buffers by pre-calculating the size of the final buffer needed. +type bufferedMarshaller interface { + proto.Sizer + runtime.ProtobufMarshaller +} + +// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. +type bufferedReverseMarshaller interface { + proto.Sizer + runtime.ProtobufReverseMarshaller +} + +// estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown +// object with a nil RawJSON struct and the expected size of the provided buffer. The +// returned size will not be correct if RawJSOn is set on unk. +func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { + size := uint64(unk.Size()) + // protobuf uses 1 byte for the tag, a varint for the length of the array (at most 8 bytes - uint64 - here), + // and the size of the array. + size += 1 + 8 + byteSize + return size +} + +// NewRawSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If typer +// is not nil, the object has the group, version, and kind fields set. This serializer does not provide type information for the +// encoded object, and thus is not self describing (callers must know what type is being described in order to decode). +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer { + return &RawSerializer{ + creater: creater, + typer: typer, + } +} + +// RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying +// type). +type RawSerializer struct { + creater runtime.ObjectCreater + typer runtime.ObjectTyper +} + +var _ runtime.Serializer = &RawSerializer{} + +const rawSerializerIdentifier runtime.Identifier = "raw-protobuf" + +// Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default +// gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, +// the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will +// be straight decoded using normal protobuf unmarshalling (the MarshalTo interface). If into is provided and the original data is +// not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most +// errors, the method will return the calculated schema kind. +func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if into == nil { + return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) + } + + if len(originalData) == 0 { + // TODO: treat like decoding {} from JSON with defaulting + return nil, nil, fmt.Errorf("empty data") + } + data := originalData + + actual := &schema.GroupVersionKind{} + copyKindDefaults(actual, gvk) + + if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { + intoUnknown.Raw = data + intoUnknown.ContentEncoding = "" + intoUnknown.ContentType = runtime.ContentTypeProtobuf + intoUnknown.SetGroupVersionKind(*actual) + return intoUnknown, actual, nil + } + + types, _, err := s.typer.ObjectKinds(into) + switch { + case runtime.IsNotRegisteredError(err): + pb, ok := into.(proto.Message) + if !ok { + return nil, actual, errNotMarshalable{reflect.TypeOf(into)} + } + if err := proto.Unmarshal(data, pb); err != nil { + return nil, actual, err + } + return into, actual, nil + case err != nil: + return nil, actual, err + default: + copyKindDefaults(actual, &types[0]) + // if the result of defaulting did not set a version or group, ensure that at least group is set + // (copyKindDefaults will not assign Group if version is already set). This guarantees that the group + // of into is set if there is no better information from the caller or object. + if len(actual.Version) == 0 && len(actual.Group) == 0 { + actual.Group = types[0].Group + } + } + + if len(actual.Kind) == 0 { + return nil, actual, runtime.NewMissingKindErr("") + } + if len(actual.Version) == 0 { + return nil, actual, runtime.NewMissingVersionErr("") + } + + return unmarshalToObject(s.typer, s.creater, actual, into, data) +} + +// unmarshalToObject is the common code between decode in the raw and normal serializer. +func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, actual *schema.GroupVersionKind, into runtime.Object, data []byte) (runtime.Object, *schema.GroupVersionKind, error) { + // use the target if necessary + obj, err := runtime.UseOrCreateObject(typer, creater, *actual, into) + if err != nil { + return nil, actual, err + } + + pb, ok := obj.(proto.Message) + if !ok { + return nil, actual, errNotMarshalable{reflect.TypeOf(obj)} + } + if err := proto.Unmarshal(data, pb); err != nil { + return nil, actual, err + } + if actual != nil { + obj.GetObjectKind().SetGroupVersionKind(*actual) + } + return obj, actual, nil +} + +// Encode serializes the provided object to the given writer. Overrides is ignored. +func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(s.Identifier(), s.doEncode, w) + } + return s.doEncode(obj, w) +} + +func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { + switch t := obj.(type) { + case bufferedReverseMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalToSizedBuffer methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalToSizedBuffer(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + + case bufferedMarshaller: + // this path performs a single allocation during write but requires the caller to implement + // the more efficient Size and MarshalTo methods + encodedSize := uint64(t.Size()) + data := make([]byte, encodedSize) + + n, err := t.MarshalTo(data) + if err != nil { + return err + } + _, err = w.Write(data[:n]) + return err + + case proto.Marshaler: + // this path performs extra allocations + data, err := t.Marshal() + if err != nil { + return err + } + _, err = w.Write(data) + return err + + default: + return errNotMarshalable{reflect.TypeOf(obj)} + } +} + +// Identifier implements runtime.Encoder interface. +func (s *RawSerializer) Identifier() runtime.Identifier { + return rawSerializerIdentifier +} + +var LengthDelimitedFramer = lengthDelimitedFramer{} + +type lengthDelimitedFramer struct{} + +// NewFrameWriter implements stream framing for this serializer +func (lengthDelimitedFramer) NewFrameWriter(w io.Writer) io.Writer { + return framer.NewLengthDelimitedFrameWriter(w) +} + +// NewFrameReader implements stream framing for this serializer +func (lengthDelimitedFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { + return framer.NewLengthDelimitedFrameReader(r) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go new file mode 100644 index 0000000..38497ab --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/recognizer/recognizer.go @@ -0,0 +1,127 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package recognizer + +import ( + "bufio" + "bytes" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +type RecognizingDecoder interface { + runtime.Decoder + // RecognizesData should return true if the input provided in the provided reader + // belongs to this decoder, or an error if the data could not be read or is ambiguous. + // Unknown is true if the data could not be determined to match the decoder type. + // Decoders should assume that they can read as much of peek as they need (as the caller + // provides) and may return unknown if the data provided is not sufficient to make a + // a determination. When peek returns EOF that may mean the end of the input or the + // end of buffered input - recognizers should return the best guess at that time. + RecognizesData(peek io.Reader) (ok, unknown bool, err error) +} + +// NewDecoder creates a decoder that will attempt multiple decoders in an order defined +// by: +// +// 1. The decoder implements RecognizingDecoder and identifies the data +// 2. All other decoders, and any decoder that returned true for unknown. +// +// The order passed to the constructor is preserved within those priorities. +func NewDecoder(decoders ...runtime.Decoder) runtime.Decoder { + return &decoder{ + decoders: decoders, + } +} + +type decoder struct { + decoders []runtime.Decoder +} + +var _ RecognizingDecoder = &decoder{} + +func (d *decoder) RecognizesData(peek io.Reader) (bool, bool, error) { + var ( + lastErr error + anyUnknown bool + ) + data, _ := bufio.NewReaderSize(peek, 1024).Peek(1024) + for _, r := range d.decoders { + switch t := r.(type) { + case RecognizingDecoder: + ok, unknown, err := t.RecognizesData(bytes.NewBuffer(data)) + if err != nil { + lastErr = err + continue + } + anyUnknown = anyUnknown || unknown + if !ok { + continue + } + return true, false, nil + } + } + return false, anyUnknown, lastErr +} + +func (d *decoder) Decode(data []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + var ( + lastErr error + skipped []runtime.Decoder + ) + + // try recognizers, record any decoders we need to give a chance later + for _, r := range d.decoders { + switch t := r.(type) { + case RecognizingDecoder: + buf := bytes.NewBuffer(data) + ok, unknown, err := t.RecognizesData(buf) + if err != nil { + lastErr = err + continue + } + if unknown { + skipped = append(skipped, t) + continue + } + if !ok { + continue + } + return r.Decode(data, gvk, into) + default: + skipped = append(skipped, t) + } + } + + // try recognizers that returned unknown or didn't recognize their data + for _, r := range skipped { + out, actual, err := r.Decode(data, gvk, into) + if err != nil { + lastErr = err + continue + } + return out, actual, nil + } + + if lastErr == nil { + lastErr = fmt.Errorf("no serialization format matched the provided data") + } + return nil, nil, lastErr +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go new file mode 100644 index 0000000..a60a7c0 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/streaming/streaming.go @@ -0,0 +1,137 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package streaming implements encoder and decoder for streams +// of runtime.Objects over io.Writer/Readers. +package streaming + +import ( + "bytes" + "fmt" + "io" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// Encoder is a runtime.Encoder on a stream. +type Encoder interface { + // Encode will write the provided object to the stream or return an error. It obeys the same + // contract as runtime.VersionedEncoder. + Encode(obj runtime.Object) error +} + +// Decoder is a runtime.Decoder from a stream. +type Decoder interface { + // Decode will return io.EOF when no more objects are available. + Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) + // Close closes the underlying stream. + Close() error +} + +// Serializer is a factory for creating encoders and decoders that work over streams. +type Serializer interface { + NewEncoder(w io.Writer) Encoder + NewDecoder(r io.ReadCloser) Decoder +} + +type decoder struct { + reader io.ReadCloser + decoder runtime.Decoder + buf []byte + maxBytes int + resetRead bool +} + +// NewDecoder creates a streaming decoder that reads object chunks from r and decodes them with d. +// The reader is expected to return ErrShortRead if the provided buffer is not large enough to read +// an entire object. +func NewDecoder(r io.ReadCloser, d runtime.Decoder) Decoder { + return &decoder{ + reader: r, + decoder: d, + buf: make([]byte, 1024), + maxBytes: 16 * 1024 * 1024, + } +} + +var ErrObjectTooLarge = fmt.Errorf("object to decode was longer than maximum allowed size") + +// Decode reads the next object from the stream and decodes it. +func (d *decoder) Decode(defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + base := 0 + for { + n, err := d.reader.Read(d.buf[base:]) + if err == io.ErrShortBuffer { + if n == 0 { + return nil, nil, fmt.Errorf("got short buffer with n=0, base=%d, cap=%d", base, cap(d.buf)) + } + if d.resetRead { + continue + } + // double the buffer size up to maxBytes + if len(d.buf) < d.maxBytes { + base += n + d.buf = append(d.buf, make([]byte, len(d.buf))...) + continue + } + // must read the rest of the frame (until we stop getting ErrShortBuffer) + d.resetRead = true + base = 0 + return nil, nil, ErrObjectTooLarge + } + if err != nil { + return nil, nil, err + } + if d.resetRead { + // now that we have drained the large read, continue + d.resetRead = false + continue + } + base += n + break + } + return d.decoder.Decode(d.buf[:base], defaults, into) +} + +func (d *decoder) Close() error { + return d.reader.Close() +} + +type encoder struct { + writer io.Writer + encoder runtime.Encoder + buf *bytes.Buffer +} + +// NewEncoder returns a new streaming encoder. +func NewEncoder(w io.Writer, e runtime.Encoder) Encoder { + return &encoder{ + writer: w, + encoder: e, + buf: &bytes.Buffer{}, + } +} + +// Encode writes the provided object to the nested writer. +func (e *encoder) Encode(obj runtime.Object) error { + if err := e.encoder.Encode(obj, e.buf); err != nil { + return err + } + _, err := e.writer.Write(e.buf.Bytes()) + e.buf.Reset() + return err +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go new file mode 100644 index 0000000..ced184c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -0,0 +1,250 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioning + +import ( + "encoding/json" + "io" + "reflect" + "sync" + + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/klog" +) + +// NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. +func NewDefaultingCodecForScheme( + // TODO: I should be a scheme interface? + scheme *runtime.Scheme, + encoder runtime.Encoder, + decoder runtime.Decoder, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, +) runtime.Codec { + return NewCodec(encoder, decoder, runtime.UnsafeObjectConvertor(scheme), scheme, scheme, scheme, encodeVersion, decodeVersion, scheme.Name()) +} + +// NewCodec takes objects in their internal versions and converts them to external versions before +// serializing them. It assumes the serializer provided to it only deals with external versions. +// This class is also a serializer, but is generally used with a specific version. +func NewCodec( + encoder runtime.Encoder, + decoder runtime.Decoder, + convertor runtime.ObjectConvertor, + creater runtime.ObjectCreater, + typer runtime.ObjectTyper, + defaulter runtime.ObjectDefaulter, + encodeVersion runtime.GroupVersioner, + decodeVersion runtime.GroupVersioner, + originalSchemeName string, +) runtime.Codec { + internal := &codec{ + encoder: encoder, + decoder: decoder, + convertor: convertor, + creater: creater, + typer: typer, + defaulter: defaulter, + + encodeVersion: encodeVersion, + decodeVersion: decodeVersion, + + identifier: identifier(encodeVersion, encoder), + + originalSchemeName: originalSchemeName, + } + return internal +} + +type codec struct { + encoder runtime.Encoder + decoder runtime.Decoder + convertor runtime.ObjectConvertor + creater runtime.ObjectCreater + typer runtime.ObjectTyper + defaulter runtime.ObjectDefaulter + + encodeVersion runtime.GroupVersioner + decodeVersion runtime.GroupVersioner + + identifier runtime.Identifier + + // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates + originalSchemeName string +} + +var identifiersMap sync.Map + +type codecIdentifier struct { + EncodeGV string `json:"encodeGV,omitempty"` + Encoder string `json:"encoder,omitempty"` + Name string `json:"name,omitempty"` +} + +// identifier computes Identifier of Encoder based on codec parameters. +func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier { + result := codecIdentifier{ + Name: "versioning", + } + + if encodeGV != nil { + result.EncodeGV = encodeGV.Identifier() + } + if encoder != nil { + result.Encoder = string(encoder.Identifier()) + } + if id, ok := identifiersMap.Load(result); ok { + return id.(runtime.Identifier) + } + identifier, err := json.Marshal(result) + if err != nil { + klog.Fatalf("Failed marshaling identifier for codec: %v", err) + } + identifiersMap.Store(result, runtime.Identifier(identifier)) + return runtime.Identifier(identifier) +} + +// Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is +// successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an +// into that matches the serialized version. +func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + // If the into object is unstructured and expresses an opinion about its group/version, + // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) + decodeInto := into + if into != nil { + if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() { + decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object) + } + } + + obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) + if err != nil { + return nil, gvk, err + } + + if d, ok := obj.(runtime.NestedObjectDecoder); ok { + if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + return nil, gvk, err + } + } + + // if we specify a target, use generic conversion. + if into != nil { + // perform defaulting if requested + if c.defaulter != nil { + c.defaulter.Default(obj) + } + + // Short-circuit conversion if the into object is same object + if into == obj { + return into, gvk, nil + } + + if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { + return nil, gvk, err + } + + return into, gvk, nil + } + + // perform defaulting if requested + if c.defaulter != nil { + c.defaulter.Default(obj) + } + + out, err := c.convertor.ConvertToVersion(obj, c.decodeVersion) + if err != nil { + return nil, gvk, err + } + return out, gvk, nil +} + +// Encode ensures the provided object is output in the appropriate group and version, invoking +// conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. +func (c *codec) Encode(obj runtime.Object, w io.Writer) error { + if co, ok := obj.(runtime.CacheableObject); ok { + return co.CacheEncode(c.Identifier(), c.doEncode, w) + } + return c.doEncode(obj, w) +} + +func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { + switch obj := obj.(type) { + case *runtime.Unknown: + return c.encoder.Encode(obj, w) + case runtime.Unstructured: + // An unstructured list can contain objects of multiple group version kinds. don't short-circuit just + // because the top-level type matches our desired destination type. actually send the object to the converter + // to give it a chance to convert the list items if needed. + if _, ok := obj.(*unstructured.UnstructuredList); !ok { + // avoid conversion roundtrip if GVK is the right one already or is empty (yes, this is a hack, but the old behaviour we rely on in kubectl) + objGVK := obj.GetObjectKind().GroupVersionKind() + if len(objGVK.Version) == 0 { + return c.encoder.Encode(obj, w) + } + targetGVK, ok := c.encodeVersion.KindForGroupVersionKinds([]schema.GroupVersionKind{objGVK}) + if !ok { + return runtime.NewNotRegisteredGVKErrForTarget(c.originalSchemeName, objGVK, c.encodeVersion) + } + if targetGVK == objGVK { + return c.encoder.Encode(obj, w) + } + } + } + + gvks, isUnversioned, err := c.typer.ObjectKinds(obj) + if err != nil { + return err + } + + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() + // restore the old GVK after encoding + defer objectKind.SetGroupVersionKind(old) + + if c.encodeVersion == nil || isUnversioned { + if e, ok := obj.(runtime.NestedObjectEncoder); ok { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + return err + } + } + objectKind.SetGroupVersionKind(gvks[0]) + return c.encoder.Encode(obj, w) + } + + // Perform a conversion if necessary + out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) + if err != nil { + return err + } + + if e, ok := out.(runtime.NestedObjectEncoder); ok { + if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + return err + } + } + + // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object + return c.encoder.Encode(out, w) +} + +// Identifier implements runtime.Encoder interface. +func (c *codec) Identifier() runtime.Identifier { + return c.identifier +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go new file mode 100644 index 0000000..5bc642b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/swagger_doc_generator.go @@ -0,0 +1,262 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "bytes" + "fmt" + "go/ast" + "go/doc" + "go/parser" + "go/token" + "io" + "reflect" + "strings" +) + +// Pair of strings. We keed the name of fields and the doc +type Pair struct { + Name, Doc string +} + +// KubeTypes is an array to represent all available types in a parsed file. [0] is for the type itself +type KubeTypes []Pair + +func astFrom(filePath string) *doc.Package { + fset := token.NewFileSet() + m := make(map[string]*ast.File) + + f, err := parser.ParseFile(fset, filePath, nil, parser.ParseComments) + if err != nil { + fmt.Println(err) + return nil + } + + m[filePath] = f + apkg, _ := ast.NewPackage(fset, m, nil, nil) + + return doc.New(apkg, "", 0) +} + +func fmtRawDoc(rawDoc string) string { + var buffer bytes.Buffer + delPrevChar := func() { + if buffer.Len() > 0 { + buffer.Truncate(buffer.Len() - 1) // Delete the last " " or "\n" + } + } + + // Ignore all lines after --- + rawDoc = strings.Split(rawDoc, "---")[0] + + for _, line := range strings.Split(rawDoc, "\n") { + line = strings.TrimRight(line, " ") + leading := strings.TrimLeft(line, " ") + switch { + case len(line) == 0: // Keep paragraphs + delPrevChar() + buffer.WriteString("\n\n") + case strings.HasPrefix(leading, "TODO"): // Ignore one line TODOs + case strings.HasPrefix(leading, "+"): // Ignore instructions to the generators + default: + if strings.HasPrefix(line, " ") || strings.HasPrefix(line, "\t") { + delPrevChar() + line = "\n" + line + "\n" // Replace it with newline. This is useful when we have a line with: "Example:\n\tJSON-someting..." + } else { + line += " " + } + buffer.WriteString(line) + } + } + + postDoc := strings.TrimRight(buffer.String(), "\n") + postDoc = strings.Replace(postDoc, "\\\"", "\"", -1) // replace user's \" to " + postDoc = strings.Replace(postDoc, "\"", "\\\"", -1) // Escape " + postDoc = strings.Replace(postDoc, "\n", "\\n", -1) + postDoc = strings.Replace(postDoc, "\t", "\\t", -1) + + return postDoc +} + +// fieldName returns the name of the field as it should appear in JSON format +// "-" indicates that this field is not part of the JSON representation +func fieldName(field *ast.Field) string { + jsonTag := "" + if field.Tag != nil { + jsonTag = reflect.StructTag(field.Tag.Value[1 : len(field.Tag.Value)-1]).Get("json") // Delete first and last quotation + if strings.Contains(jsonTag, "inline") { + return "-" + } + } + + jsonTag = strings.Split(jsonTag, ",")[0] // This can return "-" + if jsonTag == "" { + if field.Names != nil { + return field.Names[0].Name + } + return field.Type.(*ast.Ident).Name + } + return jsonTag +} + +// A buffer of lines that will be written. +type bufferedLine struct { + line string + indentation int +} + +type buffer struct { + lines []bufferedLine +} + +func newBuffer() *buffer { + return &buffer{ + lines: make([]bufferedLine, 0), + } +} + +func (b *buffer) addLine(line string, indent int) { + b.lines = append(b.lines, bufferedLine{line, indent}) +} + +func (b *buffer) flushLines(w io.Writer) error { + for _, line := range b.lines { + indentation := strings.Repeat("\t", line.indentation) + fullLine := fmt.Sprintf("%s%s", indentation, line.line) + if _, err := io.WriteString(w, fullLine); err != nil { + return err + } + } + return nil +} + +func writeFuncHeader(b *buffer, structName string, indent int) { + s := fmt.Sprintf("var map_%s = map[string]string {\n", structName) + b.addLine(s, indent) +} + +func writeFuncFooter(b *buffer, structName string, indent int) { + b.addLine("}\n", indent) // Closes the map definition + + s := fmt.Sprintf("func (%s) SwaggerDoc() map[string]string {\n", structName) + b.addLine(s, indent) + s = fmt.Sprintf("return map_%s\n", structName) + b.addLine(s, indent+1) + b.addLine("}\n", indent) // Closes the function definition +} + +func writeMapBody(b *buffer, kubeType []Pair, indent int) { + format := "\"%s\": \"%s\",\n" + for _, pair := range kubeType { + s := fmt.Sprintf(format, pair.Name, pair.Doc) + b.addLine(s, indent+2) + } +} + +// ParseDocumentationFrom gets all types' documentation and returns them as an +// array. Each type is again represented as an array (we have to use arrays as we +// need to be sure for the order of the fields). This function returns fields and +// struct definitions that have no documentation as {name, ""}. +func ParseDocumentationFrom(src string) []KubeTypes { + var docForTypes []KubeTypes + + pkg := astFrom(src) + + for _, kubType := range pkg.Types { + if structType, ok := kubType.Decl.Specs[0].(*ast.TypeSpec).Type.(*ast.StructType); ok { + var ks KubeTypes + ks = append(ks, Pair{kubType.Name, fmtRawDoc(kubType.Doc)}) + + for _, field := range structType.Fields.List { + if n := fieldName(field); n != "-" { + fieldDoc := fmtRawDoc(field.Doc.Text()) + ks = append(ks, Pair{n, fieldDoc}) + } + } + docForTypes = append(docForTypes, ks) + } + } + + return docForTypes +} + +// WriteSwaggerDocFunc writes a declaration of a function as a string. This function is used in +// Swagger as a documentation source for structs and theirs fields +func WriteSwaggerDocFunc(kubeTypes []KubeTypes, w io.Writer) error { + for _, kubeType := range kubeTypes { + structName := kubeType[0].Name + kubeType[0].Name = "" + + // Ignore empty documentation + docfulTypes := make(KubeTypes, 0, len(kubeType)) + for _, pair := range kubeType { + if pair.Doc != "" { + docfulTypes = append(docfulTypes, pair) + } + } + + if len(docfulTypes) == 0 { + continue // If no fields and the struct have documentation, skip the function definition + } + + indent := 0 + buffer := newBuffer() + + writeFuncHeader(buffer, structName, indent) + writeMapBody(buffer, docfulTypes, indent) + writeFuncFooter(buffer, structName, indent) + buffer.addLine("\n", 0) + + if err := buffer.flushLines(w); err != nil { + return err + } + } + + return nil +} + +// VerifySwaggerDocsExist writes in a io.Writer a list of structs and fields that +// are missing of documentation. +func VerifySwaggerDocsExist(kubeTypes []KubeTypes, w io.Writer) (int, error) { + missingDocs := 0 + buffer := newBuffer() + + for _, kubeType := range kubeTypes { + structName := kubeType[0].Name + if kubeType[0].Doc == "" { + format := "Missing documentation for the struct itself: %s\n" + s := fmt.Sprintf(format, structName) + buffer.addLine(s, 0) + missingDocs++ + } + kubeType = kubeType[1:] // Skip struct definition + + for _, pair := range kubeType { // Iterate only the fields + if pair.Doc == "" { + format := "In struct: %s, field documentation is missing: %s\n" + s := fmt.Sprintf(format, structName, pair.Name) + buffer.addLine(s, 0) + missingDocs++ + } + } + } + + if err := buffer.flushLines(w); err != nil { + return -1, err + } + return missingDocs, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go new file mode 100644 index 0000000..31359f3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +// Note that the types provided in this file are not versioned and are intended to be +// safe to use from within all versions of every API object. + +// TypeMeta is shared by all top level objects. The proper way to use it is to inline it in your type, +// like this: +// type MyAwesomeAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// ... // other fields +// } +// func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind +// +// TypeMeta is provided here for convenience. You may use it directly from this package or define +// your own with the same fields. +// +// +k8s:deepcopy-gen=false +// +protobuf=true +// +k8s:openapi-gen=true +type TypeMeta struct { + // +optional + APIVersion string `json:"apiVersion,omitempty" yaml:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` + // +optional + Kind string `json:"kind,omitempty" yaml:"kind,omitempty" protobuf:"bytes,2,opt,name=kind"` +} + +const ( + ContentTypeJSON string = "application/json" + ContentTypeYAML string = "application/yaml" + ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" +) + +// RawExtension is used to hold extensions in external versions. +// +// To use this, make a field which has RawExtension as its type in your external, versioned +// struct, and Object in your internal struct. You also need to register your +// various plugin types. +// +// // Internal package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.Object `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // External package: +// type MyAPIObject struct { +// runtime.TypeMeta `json:",inline"` +// MyPlugin runtime.RawExtension `json:"myPlugin"` +// } +// type PluginA struct { +// AOption string `json:"aOption"` +// } +// +// // On the wire, the JSON will look something like this: +// { +// "kind":"MyAPIObject", +// "apiVersion":"v1", +// "myPlugin": { +// "kind":"PluginA", +// "aOption":"foo", +// }, +// } +// +// So what happens? Decode first uses json or yaml to unmarshal the serialized data into +// your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. +// The next step is to copy (using pkg/conversion) into the internal struct. The runtime +// package's DefaultScheme has conversion functions installed which will unpack the +// JSON stored in RawExtension, turning it into the correct object type, and storing it +// in the Object. (TODO: In the case where the object is of an unknown type, a +// runtime.Unknown object will be created and stored.) +// +// +k8s:deepcopy-gen=true +// +protobuf=true +// +k8s:openapi-gen=true +type RawExtension struct { + // Raw is the underlying serialization of this object. + // + // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. + Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` + // Object can hold a representation of this extension - useful for working with versioned + // structs. + Object Object `json:"-"` +} + +// Unknown allows api objects with unknown types to be passed-through. This can be used +// to deal with the API objects from a plug-in. Unknown objects still have functioning +// TypeMeta features-- kind, version, etc. +// TODO: Make this object have easy access to field based accessors and settors for +// metadata and field mutatation. +// +// +k8s:deepcopy-gen=true +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +protobuf=true +// +k8s:openapi-gen=true +type Unknown struct { + TypeMeta `json:",inline" protobuf:"bytes,1,opt,name=typeMeta"` + // Raw will hold the complete serialized object which couldn't be matched + // with a registered type. Most likely, nothing should be done with this + // except for passing it through the system. + Raw []byte `protobuf:"bytes,2,opt,name=raw"` + // ContentEncoding is encoding used to encode 'Raw' data. + // Unspecified means no encoding. + ContentEncoding string `protobuf:"bytes,3,opt,name=contentEncoding"` + // ContentType is serialization method used to serialize 'Raw'. + // Unspecified means ContentTypeJSON. + ContentType string `protobuf:"bytes,4,opt,name=contentType"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go new file mode 100644 index 0000000..a82227b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -0,0 +1,89 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" +) + +type ProtobufMarshaller interface { + MarshalTo(data []byte) (int, error) +} + +type ProtobufReverseMarshaller interface { + MarshalToSizedBuffer(data []byte) (int, error) +} + +// NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown +// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. +func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { + // Calculate the full size of the message. + msgSize := m.Size() + if b != nil { + msgSize += int(size) + sovGenerated(size) + 1 + } + + // Reverse marshal the fields of m. + i := msgSize + i -= len(m.ContentType) + copy(data[i:], m.ContentType) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i-- + data[i] = 0x22 + i -= len(m.ContentEncoding) + copy(data[i:], m.ContentEncoding) + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i-- + data[i] = 0x1a + if b != nil { + if r, ok := b.(ProtobufReverseMarshaller); ok { + n1, err := r.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= int(size) + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } else { + i -= int(size) + n1, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n1) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) + } + } + i = encodeVarintGenerated(data, i, size) + i-- + data[i] = 0x12 + } + n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) + if err != nil { + return 0, err + } + i -= n2 + i = encodeVarintGenerated(data, i, uint64(n2)) + i-- + data[i] = 0xa + return msgSize - i, nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go new file mode 100644 index 0000000..b039383 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -0,0 +1,75 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package runtime + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *RawExtension) DeepCopyInto(out *RawExtension) { + *out = *in + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Object != nil { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RawExtension. +func (in *RawExtension) DeepCopy() *RawExtension { + if in == nil { + return nil + } + out := new(RawExtension) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Unknown) DeepCopyInto(out *Unknown) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Raw != nil { + in, out := &in.Raw, &out.Raw + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Unknown. +func (in *Unknown) DeepCopy() *Unknown { + if in == nil { + return nil + } + out := new(Unknown) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *Unknown) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/selection/operator.go b/vendor/k8s.io/apimachinery/pkg/selection/operator.go new file mode 100644 index 0000000..298f798 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/selection/operator.go @@ -0,0 +1,33 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package selection + +// Operator represents a key/field's relationship to value(s). +// See labels.Requirement and fields.Requirement for more details. +type Operator string + +const ( + DoesNotExist Operator = "!" + Equals Operator = "=" + DoubleEquals Operator = "==" + In Operator = "in" + NotEquals Operator = "!=" + NotIn Operator = "notin" + Exists Operator = "exists" + GreaterThan Operator = "gt" + LessThan Operator = "lt" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/doc.go b/vendor/k8s.io/apimachinery/pkg/types/doc.go new file mode 100644 index 0000000..5667fa9 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package types implements various generic types used throughout kubernetes. +package types // import "k8s.io/apimachinery/pkg/types" diff --git a/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go new file mode 100644 index 0000000..88f0de3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/namespacedname.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +// NamespacedName comprises a resource name, with a mandatory namespace, +// rendered as "/". Being a type captures intent and +// helps make sure that UIDs, namespaced names and non-namespaced names +// do not get conflated in code. For most use cases, namespace and name +// will already have been format validated at the API entry point, so we +// don't do that here. Where that's not the case (e.g. in testing), +// consider using NamespacedNameOrDie() in testing.go in this package. + +type NamespacedName struct { + Namespace string + Name string +} + +const ( + Separator = '/' +) + +// String returns the general purpose string representation +func (n NamespacedName) String() string { + return fmt.Sprintf("%s%c%s", n.Namespace, Separator, n.Name) +} diff --git a/vendor/k8s.io/apimachinery/pkg/types/nodename.go b/vendor/k8s.io/apimachinery/pkg/types/nodename.go new file mode 100644 index 0000000..fee348d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/nodename.go @@ -0,0 +1,43 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// NodeName is a type that holds a api.Node's Name identifier. +// Being a type captures intent and helps make sure that the node name +// is not confused with similar concepts (the hostname, the cloud provider id, +// the cloud provider name etc) +// +// To clarify the various types: +// +// * Node.Name is the Name field of the Node in the API. This should be stored in a NodeName. +// Unfortunately, because Name is part of ObjectMeta, we can't store it as a NodeName at the API level. +// +// * Hostname is the hostname of the local machine (from uname -n). +// However, some components allow the user to pass in a --hostname-override flag, +// which will override this in most places. In the absence of anything more meaningful, +// kubelet will use Hostname as the Node.Name when it creates the Node. +// +// * The cloudproviders have the own names: GCE has InstanceName, AWS has InstanceId. +// +// For GCE, InstanceName is the Name of an Instance object in the GCE API. On GCE, Instance.Name becomes the +// Hostname, and thus it makes sense also to use it as the Node.Name. But that is GCE specific, and it is up +// to the cloudprovider how to do this mapping. +// +// For AWS, the InstanceID is not yet suitable for use as a Node.Name, so we actually use the +// PrivateDnsName for the Node.Name. And this is _not_ always the same as the hostname: if +// we are using a custom DHCP domain it won't be. +type NodeName string diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go new file mode 100644 index 0000000..fe8ecaa --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -0,0 +1,29 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// Similarly to above, these are constants to support HTTP PATCH utilized by +// both the client and server that didn't make sense for a whole package to be +// dedicated to. +type PatchType string + +const ( + JSONPatchType PatchType = "application/json-patch+json" + MergePatchType PatchType = "application/merge-patch+json" + StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" + ApplyPatchType PatchType = "application/apply-patch+yaml" +) diff --git a/vendor/k8s.io/apimachinery/pkg/types/uid.go b/vendor/k8s.io/apimachinery/pkg/types/uid.go new file mode 100644 index 0000000..8693392 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/types/uid.go @@ -0,0 +1,22 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package types + +// UID is a type that holds unique ID values, including UUIDs. Because we +// don't ONLY use UUIDs, this is an alias to string. Being a type captures +// intent and helps make sure that UIDs and names do not get conflated. +type UID string diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go new file mode 100644 index 0000000..6cf13d8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -0,0 +1,393 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clock + +import ( + "sync" + "time" +) + +// PassiveClock allows for injecting fake or real clocks into code +// that needs to read the current time but does not support scheduling +// activity in the future. +type PassiveClock interface { + Now() time.Time + Since(time.Time) time.Duration +} + +// Clock allows for injecting fake or real clocks into code that +// needs to do arbitrary things based on time. +type Clock interface { + PassiveClock + After(time.Duration) <-chan time.Time + NewTimer(time.Duration) Timer + Sleep(time.Duration) + NewTicker(time.Duration) Ticker +} + +// RealClock really calls time.Now() +type RealClock struct{} + +// Now returns the current time. +func (RealClock) Now() time.Time { + return time.Now() +} + +// Since returns time since the specified timestamp. +func (RealClock) Since(ts time.Time) time.Duration { + return time.Since(ts) +} + +// After is the same as time.After(d). +func (RealClock) After(d time.Duration) <-chan time.Time { + return time.After(d) +} + +// NewTimer returns a new Timer. +func (RealClock) NewTimer(d time.Duration) Timer { + return &realTimer{ + timer: time.NewTimer(d), + } +} + +// NewTicker returns a new Ticker. +func (RealClock) NewTicker(d time.Duration) Ticker { + return &realTicker{ + ticker: time.NewTicker(d), + } +} + +// Sleep pauses the RealClock for duration d. +func (RealClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// FakePassiveClock implements PassiveClock, but returns an arbitrary time. +type FakePassiveClock struct { + lock sync.RWMutex + time time.Time +} + +// FakeClock implements Clock, but returns an arbitrary time. +type FakeClock struct { + FakePassiveClock + + // waiters are waiting for the fake time to pass their specified time + waiters []fakeClockWaiter +} + +type fakeClockWaiter struct { + targetTime time.Time + stepInterval time.Duration + skipIfBlocked bool + destChan chan time.Time +} + +// NewFakePassiveClock returns a new FakePassiveClock. +func NewFakePassiveClock(t time.Time) *FakePassiveClock { + return &FakePassiveClock{ + time: t, + } +} + +// NewFakeClock returns a new FakeClock +func NewFakeClock(t time.Time) *FakeClock { + return &FakeClock{ + FakePassiveClock: *NewFakePassiveClock(t), + } +} + +// Now returns f's time. +func (f *FakePassiveClock) Now() time.Time { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time +} + +// Since returns time since the time in f. +func (f *FakePassiveClock) Since(ts time.Time) time.Duration { + f.lock.RLock() + defer f.lock.RUnlock() + return f.time.Sub(ts) +} + +// SetTime sets the time on the FakePassiveClock. +func (f *FakePassiveClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.time = t +} + +// After is the Fake version of time.After(d). +func (f *FakeClock) After(d time.Duration) <-chan time.Time { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }) + return ch +} + +// NewTimer is the Fake version of time.NewTimer(d). +func (f *FakeClock) NewTimer(d time.Duration) Timer { + f.lock.Lock() + defer f.lock.Unlock() + stopTime := f.time.Add(d) + ch := make(chan time.Time, 1) // Don't block! + timer := &fakeTimer{ + fakeClock: f, + waiter: fakeClockWaiter{ + targetTime: stopTime, + destChan: ch, + }, + } + f.waiters = append(f.waiters, timer.waiter) + return timer +} + +// NewTicker returns a new Ticker. +func (f *FakeClock) NewTicker(d time.Duration) Ticker { + f.lock.Lock() + defer f.lock.Unlock() + tickTime := f.time.Add(d) + ch := make(chan time.Time, 1) // hold one tick + f.waiters = append(f.waiters, fakeClockWaiter{ + targetTime: tickTime, + stepInterval: d, + skipIfBlocked: true, + destChan: ch, + }) + + return &fakeTicker{ + c: ch, + } +} + +// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer +func (f *FakeClock) Step(d time.Duration) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(f.time.Add(d)) +} + +// SetTime sets the time on a FakeClock. +func (f *FakeClock) SetTime(t time.Time) { + f.lock.Lock() + defer f.lock.Unlock() + f.setTimeLocked(t) +} + +// Actually changes the time and checks any waiters. f must be write-locked. +func (f *FakeClock) setTimeLocked(t time.Time) { + f.time = t + newWaiters := make([]fakeClockWaiter, 0, len(f.waiters)) + for i := range f.waiters { + w := &f.waiters[i] + if !w.targetTime.After(t) { + + if w.skipIfBlocked { + select { + case w.destChan <- t: + default: + } + } else { + w.destChan <- t + } + + if w.stepInterval > 0 { + for !w.targetTime.After(t) { + w.targetTime = w.targetTime.Add(w.stepInterval) + } + newWaiters = append(newWaiters, *w) + } + + } else { + newWaiters = append(newWaiters, f.waiters[i]) + } + } + f.waiters = newWaiters +} + +// HasWaiters returns true if After has been called on f but not yet satisfied (so you can +// write race-free tests). +func (f *FakeClock) HasWaiters() bool { + f.lock.RLock() + defer f.lock.RUnlock() + return len(f.waiters) > 0 +} + +// Sleep pauses the FakeClock for duration d. +func (f *FakeClock) Sleep(d time.Duration) { + f.Step(d) +} + +// IntervalClock implements Clock, but each invocation of Now steps the clock forward the specified duration +type IntervalClock struct { + Time time.Time + Duration time.Duration +} + +// Now returns i's time. +func (i *IntervalClock) Now() time.Time { + i.Time = i.Time.Add(i.Duration) + return i.Time +} + +// Since returns time since the time in i. +func (i *IntervalClock) Since(ts time.Time) time.Duration { + return i.Time.Sub(ts) +} + +// After is currently unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) After(d time.Duration) <-chan time.Time { + panic("IntervalClock doesn't implement After") +} + +// NewTimer is currently unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTimer(d time.Duration) Timer { + panic("IntervalClock doesn't implement NewTimer") +} + +// NewTicker is currently unimplemented, will panic. +// TODO: make interval clock use FakeClock so this can be implemented. +func (*IntervalClock) NewTicker(d time.Duration) Ticker { + panic("IntervalClock doesn't implement NewTicker") +} + +// Sleep is currently unimplemented; will panic. +func (*IntervalClock) Sleep(d time.Duration) { + panic("IntervalClock doesn't implement Sleep") +} + +// Timer allows for injecting fake or real timers into code that +// needs to do arbitrary things based on time. +type Timer interface { + C() <-chan time.Time + Stop() bool + Reset(d time.Duration) bool +} + +// realTimer is backed by an actual time.Timer. +type realTimer struct { + timer *time.Timer +} + +// C returns the underlying timer's channel. +func (r *realTimer) C() <-chan time.Time { + return r.timer.C +} + +// Stop calls Stop() on the underlying timer. +func (r *realTimer) Stop() bool { + return r.timer.Stop() +} + +// Reset calls Reset() on the underlying timer. +func (r *realTimer) Reset(d time.Duration) bool { + return r.timer.Reset(d) +} + +// fakeTimer implements Timer based on a FakeClock. +type fakeTimer struct { + fakeClock *FakeClock + waiter fakeClockWaiter +} + +// C returns the channel that notifies when this timer has fired. +func (f *fakeTimer) C() <-chan time.Time { + return f.waiter.destChan +} + +// Stop conditionally stops the timer. If the timer has neither fired +// nor been stopped then this call stops the timer and returns true, +// otherwise this call returns false. This is like time.Timer::Stop. +func (f *fakeTimer) Stop() bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + // The timer has already fired or been stopped, unless it is found + // among the clock's waiters. + stopped := false + oldWaiters := f.fakeClock.waiters + newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) + seekChan := f.waiter.destChan + for i := range oldWaiters { + // Identify the timer's fakeClockWaiter by the identity of the + // destination channel, nothing else is necessarily unique and + // constant since the timer's creation. + if oldWaiters[i].destChan == seekChan { + stopped = true + } else { + newWaiters = append(newWaiters, oldWaiters[i]) + } + } + + f.fakeClock.waiters = newWaiters + + return stopped +} + +// Reset conditionally updates the firing time of the timer. If the +// timer has neither fired nor been stopped then this call resets the +// timer to the fake clock's "now" + d and returns true, otherwise +// this call returns false. This is like time.Timer::Reset. +func (f *fakeTimer) Reset(d time.Duration) bool { + f.fakeClock.lock.Lock() + defer f.fakeClock.lock.Unlock() + waiters := f.fakeClock.waiters + seekChan := f.waiter.destChan + for i := range waiters { + if waiters[i].destChan == seekChan { + waiters[i].targetTime = f.fakeClock.time.Add(d) + return true + } + } + return false +} + +// Ticker defines the Ticker interface +type Ticker interface { + C() <-chan time.Time + Stop() +} + +type realTicker struct { + ticker *time.Ticker +} + +func (t *realTicker) C() <-chan time.Time { + return t.ticker.C +} + +func (t *realTicker) Stop() { + t.ticker.Stop() +} + +type fakeTicker struct { + c <-chan time.Time +} + +func (t *fakeTicker) C() <-chan time.Time { + return t.c +} + +func (t *fakeTicker) Stop() { +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go new file mode 100644 index 0000000..5d4d625 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/doc.go @@ -0,0 +1,18 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package errors implements various utility functions and types around errors. +package errors // import "k8s.io/apimachinery/pkg/util/errors" diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go new file mode 100644 index 0000000..5bafc21 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -0,0 +1,249 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package errors + +import ( + "errors" + "fmt" + + "k8s.io/apimachinery/pkg/util/sets" +) + +// MessageCountMap contains occurrence for each error message. +type MessageCountMap map[string]int + +// Aggregate represents an object that contains multiple errors, but does not +// necessarily have singular semantic meaning. +// The aggregate can be used with `errors.Is()` to check for the occurrence of +// a specific error type. +// Errors.As() is not supported, because the caller presumably cares about a +// specific error of potentially multiple that match the given type. +type Aggregate interface { + error + Errors() []error + Is(error) bool +} + +// NewAggregate converts a slice of errors into an Aggregate interface, which +// is itself an implementation of the error interface. If the slice is empty, +// this returns nil. +// It will check if any of the element of input error list is nil, to avoid +// nil pointer panic when call Error(). +func NewAggregate(errlist []error) Aggregate { + if len(errlist) == 0 { + return nil + } + // In case of input error list contains nil + var errs []error + for _, e := range errlist { + if e != nil { + errs = append(errs, e) + } + } + if len(errs) == 0 { + return nil + } + return aggregate(errs) +} + +// This helper implements the error and Errors interfaces. Keeping it private +// prevents people from making an aggregate of 0 errors, which is not +// an error, but does satisfy the error interface. +type aggregate []error + +// Error is part of the error interface. +func (agg aggregate) Error() string { + if len(agg) == 0 { + // This should never happen, really. + return "" + } + if len(agg) == 1 { + return agg[0].Error() + } + seenerrs := sets.NewString() + result := "" + agg.visit(func(err error) bool { + msg := err.Error() + if seenerrs.Has(msg) { + return false + } + seenerrs.Insert(msg) + if len(seenerrs) > 1 { + result += ", " + } + result += msg + return false + }) + if len(seenerrs) == 1 { + return result + } + return "[" + result + "]" +} + +func (agg aggregate) Is(target error) bool { + return agg.visit(func(err error) bool { + return errors.Is(err, target) + }) +} + +func (agg aggregate) visit(f func(err error) bool) bool { + for _, err := range agg { + switch err := err.(type) { + case aggregate: + if match := err.visit(f); match { + return match + } + case Aggregate: + for _, nestedErr := range err.Errors() { + if match := f(nestedErr); match { + return match + } + } + default: + if match := f(err); match { + return match + } + } + } + + return false +} + +// Errors is part of the Aggregate interface. +func (agg aggregate) Errors() []error { + return []error(agg) +} + +// Matcher is used to match errors. Returns true if the error matches. +type Matcher func(error) bool + +// FilterOut removes all errors that match any of the matchers from the input +// error. If the input is a singular error, only that error is tested. If the +// input implements the Aggregate interface, the list of errors will be +// processed recursively. +// +// This can be used, for example, to remove known-OK errors (such as io.EOF or +// os.PathNotFound) from a list of errors. +func FilterOut(err error, fns ...Matcher) error { + if err == nil { + return nil + } + if agg, ok := err.(Aggregate); ok { + return NewAggregate(filterErrors(agg.Errors(), fns...)) + } + if !matchesError(err, fns...) { + return err + } + return nil +} + +// matchesError returns true if any Matcher returns true +func matchesError(err error, fns ...Matcher) bool { + for _, fn := range fns { + if fn(err) { + return true + } + } + return false +} + +// filterErrors returns any errors (or nested errors, if the list contains +// nested Errors) for which all fns return false. If no errors +// remain a nil list is returned. The resulting silec will have all +// nested slices flattened as a side effect. +func filterErrors(list []error, fns ...Matcher) []error { + result := []error{} + for _, err := range list { + r := FilterOut(err, fns...) + if r != nil { + result = append(result, r) + } + } + return result +} + +// Flatten takes an Aggregate, which may hold other Aggregates in arbitrary +// nesting, and flattens them all into a single Aggregate, recursively. +func Flatten(agg Aggregate) Aggregate { + result := []error{} + if agg == nil { + return nil + } + for _, err := range agg.Errors() { + if a, ok := err.(Aggregate); ok { + r := Flatten(a) + if r != nil { + result = append(result, r.Errors()...) + } + } else { + if err != nil { + result = append(result, err) + } + } + } + return NewAggregate(result) +} + +// CreateAggregateFromMessageCountMap converts MessageCountMap Aggregate +func CreateAggregateFromMessageCountMap(m MessageCountMap) Aggregate { + if m == nil { + return nil + } + result := make([]error, 0, len(m)) + for errStr, count := range m { + var countStr string + if count > 1 { + countStr = fmt.Sprintf(" (repeated %v times)", count) + } + result = append(result, fmt.Errorf("%v%v", errStr, countStr)) + } + return NewAggregate(result) +} + +// Reduce will return err or, if err is an Aggregate and only has one item, +// the first item in the aggregate. +func Reduce(err error) error { + if agg, ok := err.(Aggregate); ok && err != nil { + switch len(agg.Errors()) { + case 1: + return agg.Errors()[0] + case 0: + return nil + } + } + return err +} + +// AggregateGoroutines runs the provided functions in parallel, stuffing all +// non-nil errors into the returned Aggregate. +// Returns nil if all the functions complete successfully. +func AggregateGoroutines(funcs ...func() error) Aggregate { + errChan := make(chan error, len(funcs)) + for _, f := range funcs { + go func(f func() error) { errChan <- f() }(f) + } + errs := make([]error, 0) + for i := 0; i < cap(errChan); i++ { + if err := <-errChan; err != nil { + errs = append(errs, err) + } + } + return NewAggregate(errs) +} + +// ErrPreconditionViolated is returned when the precondition is violated +var ErrPreconditionViolated = errors.New("precondition is violated") diff --git a/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go new file mode 100644 index 0000000..066680f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/framer/framer.go @@ -0,0 +1,167 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package framer implements simple frame decoding techniques for an io.ReadCloser +package framer + +import ( + "encoding/binary" + "encoding/json" + "io" +) + +type lengthDelimitedFrameWriter struct { + w io.Writer + h [4]byte +} + +func NewLengthDelimitedFrameWriter(w io.Writer) io.Writer { + return &lengthDelimitedFrameWriter{w: w} +} + +// Write writes a single frame to the nested writer, prepending it with the length in +// in bytes of data (as a 4 byte, bigendian uint32). +func (w *lengthDelimitedFrameWriter) Write(data []byte) (int, error) { + binary.BigEndian.PutUint32(w.h[:], uint32(len(data))) + n, err := w.w.Write(w.h[:]) + if err != nil { + return 0, err + } + if n != len(w.h) { + return 0, io.ErrShortWrite + } + return w.w.Write(data) +} + +type lengthDelimitedFrameReader struct { + r io.ReadCloser + remaining int +} + +// NewLengthDelimitedFrameReader returns an io.Reader that will decode length-prefixed +// frames off of a stream. +// +// The protocol is: +// +// stream: message ... +// message: prefix body +// prefix: 4 byte uint32 in BigEndian order, denotes length of body +// body: bytes (0..prefix) +// +// If the buffer passed to Read is not long enough to contain an entire frame, io.ErrShortRead +// will be returned along with the number of bytes read. +func NewLengthDelimitedFrameReader(r io.ReadCloser) io.ReadCloser { + return &lengthDelimitedFrameReader{r: r} +} + +// Read attempts to read an entire frame into data. If that is not possible, io.ErrShortBuffer +// is returned and subsequent calls will attempt to read the last frame. A frame is complete when +// err is nil. +func (r *lengthDelimitedFrameReader) Read(data []byte) (int, error) { + if r.remaining <= 0 { + header := [4]byte{} + n, err := io.ReadAtLeast(r.r, header[:4], 4) + if err != nil { + return 0, err + } + if n != 4 { + return 0, io.ErrUnexpectedEOF + } + frameLength := int(binary.BigEndian.Uint32(header[:])) + r.remaining = frameLength + } + + expect := r.remaining + max := expect + if max > len(data) { + max = len(data) + } + n, err := io.ReadAtLeast(r.r, data[:max], int(max)) + r.remaining -= n + if err == io.ErrShortBuffer || r.remaining > 0 { + return n, io.ErrShortBuffer + } + if err != nil { + return n, err + } + if n != expect { + return n, io.ErrUnexpectedEOF + } + + return n, nil +} + +func (r *lengthDelimitedFrameReader) Close() error { + return r.r.Close() +} + +type jsonFrameReader struct { + r io.ReadCloser + decoder *json.Decoder + remaining []byte +} + +// NewJSONFramedReader returns an io.Reader that will decode individual JSON objects off +// of a wire. +// +// The boundaries between each frame are valid JSON objects. A JSON parsing error will terminate +// the read. +func NewJSONFramedReader(r io.ReadCloser) io.ReadCloser { + return &jsonFrameReader{ + r: r, + decoder: json.NewDecoder(r), + } +} + +// ReadFrame decodes the next JSON object in the stream, or returns an error. The returned +// byte slice will be modified the next time ReadFrame is invoked and should not be altered. +func (r *jsonFrameReader) Read(data []byte) (int, error) { + // Return whatever remaining data exists from an in progress frame + if n := len(r.remaining); n > 0 { + if n <= len(data) { + data = append(data[0:0], r.remaining...) + r.remaining = nil + return n, nil + } + + n = len(data) + data = append(data[0:0], r.remaining[:n]...) + r.remaining = r.remaining[n:] + return n, io.ErrShortBuffer + } + + // RawMessage#Unmarshal appends to data - we reset the slice down to 0 and will either see + // data written to data, or be larger than data and a different array. + n := len(data) + m := json.RawMessage(data[:0]) + if err := r.decoder.Decode(&m); err != nil { + return 0, err + } + + // If capacity of data is less than length of the message, decoder will allocate a new slice + // and set m to it, which means we need to copy the partial result back into data and preserve + // the remaining result for subsequent reads. + if len(m) > n { + data = append(data[0:0], m[:n]...) + r.remaining = m[n:] + return n, io.ErrShortBuffer + } + return len(m), nil +} + +func (r *jsonFrameReader) Close() error { + return r.r.Close() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go new file mode 100644 index 0000000..ec1cb70 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -0,0 +1,372 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto + +package intstr + +import ( + fmt "fmt" + + io "io" + math "math" + math_bits "math/bits" + + proto "github.com/gogo/protobuf/proto" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { + return fileDescriptor_94e046ae3ce6121c, []int{0} +} +func (m *IntOrString) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *IntOrString) XXX_Merge(src proto.Message) { + xxx_messageInfo_IntOrString.Merge(m, src) +} +func (m *IntOrString) XXX_Size() int { + return m.Size() +} +func (m *IntOrString) XXX_DiscardUnknown() { + xxx_messageInfo_IntOrString.DiscardUnknown(m) +} + +var xxx_messageInfo_IntOrString proto.InternalMessageInfo + +func init() { + proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) +} + +var fileDescriptor_94e046ae3ce6121c = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} + +func (m *IntOrString) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + i -= len(m.StrVal) + copy(dAtA[i:], m.StrVal) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i-- + dAtA[i] = 0x1a + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + i-- + dAtA[i] = 0x10 + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + i-- + dAtA[i] = 0x8 + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *IntOrString) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Type)) + n += 1 + sovGenerated(uint64(m.IntVal)) + l = len(m.StrVal) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *IntOrString) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntOrString: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntOrString: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= Type(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntVal", wireType) + } + m.IntVal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.IntVal |= int32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StrVal", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.StrVal = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto new file mode 100644 index 0000000..e79fb9e --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -0,0 +1,43 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.util.intstr; + +// Package-wide variables from generator "generated". +option go_package = "intstr"; + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +message IntOrString { + optional int64 type = 1; + + optional int32 intVal = 2; + + optional string strVal = 3; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go new file mode 100644 index 0000000..cb974dc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -0,0 +1,185 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package intstr + +import ( + "encoding/json" + "errors" + "fmt" + "math" + "runtime/debug" + "strconv" + "strings" + + "github.com/google/gofuzz" + "k8s.io/klog" +) + +// IntOrString is a type that can hold an int32 or a string. When used in +// JSON or YAML marshalling and unmarshalling, it produces or consumes the +// inner type. This allows you to have, for example, a JSON field that can +// accept a name or number. +// TODO: Rename to Int32OrString +// +// +protobuf=true +// +protobuf.options.(gogoproto.goproto_stringer)=false +// +k8s:openapi-gen=true +type IntOrString struct { + Type Type `protobuf:"varint,1,opt,name=type,casttype=Type"` + IntVal int32 `protobuf:"varint,2,opt,name=intVal"` + StrVal string `protobuf:"bytes,3,opt,name=strVal"` +} + +// Type represents the stored type of IntOrString. +type Type int64 + +const ( + Int Type = iota // The IntOrString holds an int. + String // The IntOrString holds a string. +) + +// FromInt creates an IntOrString object with an int32 value. It is +// your responsibility not to call this method with a value greater +// than int32. +// TODO: convert to (val int32) +func FromInt(val int) IntOrString { + if val > math.MaxInt32 || val < math.MinInt32 { + klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + } + return IntOrString{Type: Int, IntVal: int32(val)} +} + +// FromString creates an IntOrString object with a string value. +func FromString(val string) IntOrString { + return IntOrString{Type: String, StrVal: val} +} + +// Parse the given string and try to convert it to an integer before +// setting it as a string value. +func Parse(val string) IntOrString { + i, err := strconv.Atoi(val) + if err != nil { + return FromString(val) + } + return FromInt(i) +} + +// UnmarshalJSON implements the json.Unmarshaller interface. +func (intstr *IntOrString) UnmarshalJSON(value []byte) error { + if value[0] == '"' { + intstr.Type = String + return json.Unmarshal(value, &intstr.StrVal) + } + intstr.Type = Int + return json.Unmarshal(value, &intstr.IntVal) +} + +// String returns the string value, or the Itoa of the int value. +func (intstr *IntOrString) String() string { + if intstr.Type == String { + return intstr.StrVal + } + return strconv.Itoa(intstr.IntValue()) +} + +// IntValue returns the IntVal if type Int, or if +// it is a String, will attempt a conversion to int, +// returning 0 if a parsing error occurs. +func (intstr *IntOrString) IntValue() int { + if intstr.Type == String { + i, _ := strconv.Atoi(intstr.StrVal) + return i + } + return int(intstr.IntVal) +} + +// MarshalJSON implements the json.Marshaller interface. +func (intstr IntOrString) MarshalJSON() ([]byte, error) { + switch intstr.Type { + case Int: + return json.Marshal(intstr.IntVal) + case String: + return json.Marshal(intstr.StrVal) + default: + return []byte{}, fmt.Errorf("impossible IntOrString.Type") + } +} + +// OpenAPISchemaType is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +// +// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators +func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } + +// OpenAPISchemaFormat is used by the kube-openapi generator when constructing +// the OpenAPI spec of this type. +func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } + +func (intstr *IntOrString) Fuzz(c fuzz.Continue) { + if intstr == nil { + return + } + if c.RandBool() { + intstr.Type = Int + c.Fuzz(&intstr.IntVal) + intstr.StrVal = "" + } else { + intstr.Type = String + intstr.IntVal = 0 + c.Fuzz(&intstr.StrVal) + } +} + +func ValueOrDefault(intOrPercent *IntOrString, defaultValue IntOrString) *IntOrString { + if intOrPercent == nil { + return &defaultValue + } + return intOrPercent +} + +func GetValueFromIntOrPercent(intOrPercent *IntOrString, total int, roundUp bool) (int, error) { + if intOrPercent == nil { + return 0, errors.New("nil value for IntOrString") + } + value, isPercent, err := getIntOrPercentValue(intOrPercent) + if err != nil { + return 0, fmt.Errorf("invalid value for IntOrString: %v", err) + } + if isPercent { + if roundUp { + value = int(math.Ceil(float64(value) * (float64(total)) / 100)) + } else { + value = int(math.Floor(float64(value) * (float64(total)) / 100)) + } + } + return value, nil +} + +func getIntOrPercentValue(intOrStr *IntOrString) (int, bool, error) { + switch intOrStr.Type { + case Int: + return intOrStr.IntValue(), false, nil + case String: + s := strings.Replace(intOrStr.StrVal, "%", "", -1) + v, err := strconv.Atoi(s) + if err != nil { + return 0, false, fmt.Errorf("invalid value %q: %v", intOrStr.StrVal, err) + } + return int(v), true, nil + } + return 0, false, fmt.Errorf("invalid type: neither int nor percentage") +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go new file mode 100644 index 0000000..2048348 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -0,0 +1,156 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package json + +import ( + "bytes" + "encoding/json" + "fmt" + "io" +) + +// NewEncoder delegates to json.NewEncoder +// It is only here so this package can be a drop-in for common encoding/json uses +func NewEncoder(w io.Writer) *json.Encoder { + return json.NewEncoder(w) +} + +// Marshal delegates to json.Marshal +// It is only here so this package can be a drop-in for common encoding/json uses +func Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + +// Unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func Unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v, 0) + + case *[]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertSliceNumbers(*v, 0) + + case *interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertInterfaceNumbers(v, 0) + + default: + return json.Unmarshal(data, v) + } +} + +func convertInterfaceNumbers(v *interface{}, depth int) error { + var err error + switch v2 := (*v).(type) { + case json.Number: + *v, err = convertNumber(v2) + case map[string]interface{}: + err = convertMapNumbers(v2, depth+1) + case []interface{}: + err = convertSliceNumbers(v2, depth+1) + } + return err +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go new file mode 100644 index 0000000..d69bf32 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go @@ -0,0 +1,93 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package naming + +import ( + "fmt" + "regexp" + goruntime "runtime" + "runtime/debug" + "strconv" + "strings" +) + +// GetNameFromCallsite walks back through the call stack until we find a caller from outside of the ignoredPackages +// it returns back a shortpath/filename:line to aid in identification of this reflector when it starts logging +func GetNameFromCallsite(ignoredPackages ...string) string { + name := "????" + const maxStack = 10 + for i := 1; i < maxStack; i++ { + _, file, line, ok := goruntime.Caller(i) + if !ok { + file, line, ok = extractStackCreator() + if !ok { + break + } + i += maxStack + } + if hasPackage(file, append(ignoredPackages, "/runtime/asm_")) { + continue + } + + file = trimPackagePrefix(file) + name = fmt.Sprintf("%s:%d", file, line) + break + } + return name +} + +// hasPackage returns true if the file is in one of the ignored packages. +func hasPackage(file string, ignoredPackages []string) bool { + for _, ignoredPackage := range ignoredPackages { + if strings.Contains(file, ignoredPackage) { + return true + } + } + return false +} + +// trimPackagePrefix reduces duplicate values off the front of a package name. +func trimPackagePrefix(file string) string { + if l := strings.LastIndex(file, "/vendor/"); l >= 0 { + return file[l+len("/vendor/"):] + } + if l := strings.LastIndex(file, "/src/"); l >= 0 { + return file[l+5:] + } + if l := strings.LastIndex(file, "/pkg/"); l >= 0 { + return file[l+1:] + } + return file +} + +var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[[:xdigit:]]+$`) + +// extractStackCreator retrieves the goroutine file and line that launched this stack. Returns false +// if the creator cannot be located. +// TODO: Go does not expose this via runtime https://github.com/golang/go/issues/11440 +func extractStackCreator() (string, int, bool) { + stack := debug.Stack() + matches := stackCreator.FindStringSubmatch(string(stack)) + if len(matches) != 4 { + return "", 0, false + } + line, err := strconv.Atoi(matches[3]) + if err != nil { + return "", 0, false + } + return matches[2], line, true +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go new file mode 100644 index 0000000..7b64e68 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -0,0 +1,484 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "bytes" + "context" + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "path" + "strconv" + "strings" + + "golang.org/x/net/http2" + "k8s.io/klog" +) + +// JoinPreservingTrailingSlash does a path.Join of the specified elements, +// preserving any trailing slash on the last non-empty segment +func JoinPreservingTrailingSlash(elem ...string) string { + // do the basic path join + result := path.Join(elem...) + + // find the last non-empty segment + for i := len(elem) - 1; i >= 0; i-- { + if len(elem[i]) > 0 { + // if the last segment ended in a slash, ensure our result does as well + if strings.HasSuffix(elem[i], "/") && !strings.HasSuffix(result, "/") { + result += "/" + } + break + } + } + + return result +} + +// IsTimeout returns true if the given error is a network timeout error +func IsTimeout(err error) bool { + neterr, ok := err.(net.Error) + return ok && neterr != nil && neterr.Timeout() +} + +// IsProbableEOF returns true if the given error resembles a connection termination +// scenario that would justify assuming that the watch is empty. +// These errors are what the Go http stack returns back to us which are general +// connection closure errors (strongly correlated) and callers that need to +// differentiate probable errors in connection behavior between normal "this is +// disconnected" should use the method. +func IsProbableEOF(err error) bool { + if err == nil { + return false + } + if uerr, ok := err.(*url.Error); ok { + err = uerr.Err + } + msg := err.Error() + switch { + case err == io.EOF: + return true + case msg == "http: can't write HTTP request on broken connection": + return true + case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): + return true + case strings.Contains(msg, "connection reset by peer"): + return true + case strings.Contains(strings.ToLower(msg), "use of closed network connection"): + return true + } + return false +} + +var defaultTransport = http.DefaultTransport.(*http.Transport) + +// SetOldTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetOldTransportDefaults(t *http.Transport) *http.Transport { + if t.Proxy == nil || isDefault(t.Proxy) { + // http.ProxyFromEnvironment doesn't respect CIDRs and that makes it impossible to exclude things like pod and service IPs from proxy settings + // ProxierWithNoProxyCIDR allows CIDR rules in NO_PROXY + t.Proxy = NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment) + } + // If no custom dialer is set, use the default context dialer + if t.DialContext == nil && t.Dial == nil { + t.DialContext = defaultTransport.DialContext + } + if t.TLSHandshakeTimeout == 0 { + t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout + } + if t.IdleConnTimeout == 0 { + t.IdleConnTimeout = defaultTransport.IdleConnTimeout + } + return t +} + +// SetTransportDefaults applies the defaults from http.DefaultTransport +// for the Proxy, Dial, and TLSHandshakeTimeout fields if unset +func SetTransportDefaults(t *http.Transport) *http.Transport { + t = SetOldTransportDefaults(t) + // Allow clients to disable http2 if needed. + if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { + klog.Infof("HTTP2 has been explicitly disabled") + } else if allowsHTTP2(t) { + if err := http2.ConfigureTransport(t); err != nil { + klog.Warningf("Transport failed http2 configuration: %v", err) + } + } + return t +} + +func allowsHTTP2(t *http.Transport) bool { + if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { + // the transport expressed no NextProto preference, allow + return true + } + for _, p := range t.TLSClientConfig.NextProtos { + if p == http2.NextProtoTLS { + // the transport explicitly allowed http/2 + return true + } + } + // the transport explicitly set NextProtos and excluded http/2 + return false +} + +type RoundTripperWrapper interface { + http.RoundTripper + WrappedRoundTripper() http.RoundTripper +} + +type DialFunc func(ctx context.Context, net, addr string) (net.Conn, error) + +func DialerFor(transport http.RoundTripper) (DialFunc, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + // transport.DialContext takes precedence over transport.Dial + if transport.DialContext != nil { + return transport.DialContext, nil + } + // adapt transport.Dial to the DialWithContext signature + if transport.Dial != nil { + return func(ctx context.Context, net, addr string) (net.Conn, error) { + return transport.Dial(net, addr) + }, nil + } + // otherwise return nil + return nil, nil + case RoundTripperWrapper: + return DialerFor(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %T", transport) + } +} + +type TLSClientConfigHolder interface { + TLSClientConfig() *tls.Config +} + +func TLSClientConfig(transport http.RoundTripper) (*tls.Config, error) { + if transport == nil { + return nil, nil + } + + switch transport := transport.(type) { + case *http.Transport: + return transport.TLSClientConfig, nil + case TLSClientConfigHolder: + return transport.TLSClientConfig(), nil + case RoundTripperWrapper: + return TLSClientConfig(transport.WrappedRoundTripper()) + default: + return nil, fmt.Errorf("unknown transport type: %T", transport) + } +} + +func FormatURL(scheme string, host string, port int, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: net.JoinHostPort(host, strconv.Itoa(port)), + Path: path, + } +} + +func GetHTTPClient(req *http.Request) string { + if ua := req.UserAgent(); len(ua) != 0 { + return ua + } + return "unknown" +} + +// SourceIPs splits the comma separated X-Forwarded-For header and joins it with +// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs. +// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain. +// The req.RemoteAddr is always the last IP in the returned list. +// It returns nil if all of these are empty or invalid. +func SourceIPs(req *http.Request) []net.IP { + var srcIPs []net.IP + + hdr := req.Header + // First check the X-Forwarded-For header for requests via proxy. + hdrForwardedFor := hdr.Get("X-Forwarded-For") + if hdrForwardedFor != "" { + // X-Forwarded-For can be a csv of IPs in case of multiple proxies. + // Use the first valid one. + parts := strings.Split(hdrForwardedFor, ",") + for _, part := range parts { + ip := net.ParseIP(strings.TrimSpace(part)) + if ip != nil { + srcIPs = append(srcIPs, ip) + } + } + } + + // Try the X-Real-Ip header. + hdrRealIp := hdr.Get("X-Real-Ip") + if hdrRealIp != "" { + ip := net.ParseIP(hdrRealIp) + // Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain. + if ip != nil && !containsIP(srcIPs, ip) { + srcIPs = append(srcIPs, ip) + } + } + + // Always include the request Remote Address as it cannot be easily spoofed. + var remoteIP net.IP + // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. + host, _, err := net.SplitHostPort(req.RemoteAddr) + if err == nil { + remoteIP = net.ParseIP(host) + } + // Fallback if Remote Address was just IP. + if remoteIP == nil { + remoteIP = net.ParseIP(req.RemoteAddr) + } + + // Don't duplicate remote IP if it's already the last address in the chain. + if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) { + srcIPs = append(srcIPs, remoteIP) + } + + return srcIPs +} + +// Checks whether the given IP address is contained in the list of IPs. +func containsIP(ips []net.IP, ip net.IP) bool { + for _, v := range ips { + if v.Equal(ip) { + return true + } + } + return false +} + +// Extracts and returns the clients IP from the given request. +// Looks at X-Forwarded-For header, X-Real-Ip header and request.RemoteAddr in that order. +// Returns nil if none of them are set or is set to an invalid value. +func GetClientIP(req *http.Request) net.IP { + ips := SourceIPs(req) + if len(ips) == 0 { + return nil + } + return ips[0] +} + +// Prepares the X-Forwarded-For header for another forwarding hop by appending the previous sender's +// IP address to the X-Forwarded-For chain. +func AppendForwardedForHeader(req *http.Request) { + // Copied from net/http/httputil/reverseproxy.go: + if clientIP, _, err := net.SplitHostPort(req.RemoteAddr); err == nil { + // If we aren't the first proxy retain prior + // X-Forwarded-For information as a comma+space + // separated list and fold multiple headers into one. + if prior, ok := req.Header["X-Forwarded-For"]; ok { + clientIP = strings.Join(prior, ", ") + ", " + clientIP + } + req.Header.Set("X-Forwarded-For", clientIP) + } +} + +var defaultProxyFuncPointer = fmt.Sprintf("%p", http.ProxyFromEnvironment) + +// isDefault checks to see if the transportProxierFunc is pointing to the default one +func isDefault(transportProxier func(*http.Request) (*url.URL, error)) bool { + transportProxierPointer := fmt.Sprintf("%p", transportProxier) + return transportProxierPointer == defaultProxyFuncPointer +} + +// NewProxierWithNoProxyCIDR constructs a Proxier function that respects CIDRs in NO_PROXY and delegates if +// no matching CIDRs are found +func NewProxierWithNoProxyCIDR(delegate func(req *http.Request) (*url.URL, error)) func(req *http.Request) (*url.URL, error) { + // we wrap the default method, so we only need to perform our check if the NO_PROXY (or no_proxy) envvar has a CIDR in it + noProxyEnv := os.Getenv("NO_PROXY") + if noProxyEnv == "" { + noProxyEnv = os.Getenv("no_proxy") + } + noProxyRules := strings.Split(noProxyEnv, ",") + + cidrs := []*net.IPNet{} + for _, noProxyRule := range noProxyRules { + _, cidr, _ := net.ParseCIDR(noProxyRule) + if cidr != nil { + cidrs = append(cidrs, cidr) + } + } + + if len(cidrs) == 0 { + return delegate + } + + return func(req *http.Request) (*url.URL, error) { + ip := net.ParseIP(req.URL.Hostname()) + if ip == nil { + return delegate(req) + } + + for _, cidr := range cidrs { + if cidr.Contains(ip) { + return nil, nil + } + } + + return delegate(req) + } +} + +// DialerFunc implements Dialer for the provided function. +type DialerFunc func(req *http.Request) (net.Conn, error) + +func (fn DialerFunc) Dial(req *http.Request) (net.Conn, error) { + return fn(req) +} + +// Dialer dials a host and writes a request to it. +type Dialer interface { + // Dial connects to the host specified by req's URL, writes the request to the connection, and + // returns the opened net.Conn. + Dial(req *http.Request) (net.Conn, error) +} + +// ConnectWithRedirects uses dialer to send req, following up to 10 redirects (relative to +// originalLocation). It returns the opened net.Conn and the raw response bytes. +// If requireSameHostRedirects is true, only redirects to the same host are permitted. +func ConnectWithRedirects(originalMethod string, originalLocation *url.URL, header http.Header, originalBody io.Reader, dialer Dialer, requireSameHostRedirects bool) (net.Conn, []byte, error) { + const ( + maxRedirects = 9 // Fail on the 10th redirect + maxResponseSize = 16384 // play it safe to allow the potential for lots of / large headers + ) + + var ( + location = originalLocation + method = originalMethod + intermediateConn net.Conn + rawResponse = bytes.NewBuffer(make([]byte, 0, 256)) + body = originalBody + ) + + defer func() { + if intermediateConn != nil { + intermediateConn.Close() + } + }() + +redirectLoop: + for redirects := 0; ; redirects++ { + if redirects > maxRedirects { + return nil, nil, fmt.Errorf("too many redirects (%d)", redirects) + } + + req, err := http.NewRequest(method, location.String(), body) + if err != nil { + return nil, nil, err + } + + req.Header = header + + intermediateConn, err = dialer.Dial(req) + if err != nil { + return nil, nil, err + } + + // Peek at the backend response. + rawResponse.Reset() + respReader := bufio.NewReader(io.TeeReader( + io.LimitReader(intermediateConn, maxResponseSize), // Don't read more than maxResponseSize bytes. + rawResponse)) // Save the raw response. + resp, err := http.ReadResponse(respReader, nil) + if err != nil { + // Unable to read the backend response; let the client handle it. + klog.Warningf("Error reading backend response: %v", err) + break redirectLoop + } + + switch resp.StatusCode { + case http.StatusFound: + // Redirect, continue. + default: + // Don't redirect. + break redirectLoop + } + + // Redirected requests switch to "GET" according to the HTTP spec: + // https://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3 + method = "GET" + // don't send a body when following redirects + body = nil + + resp.Body.Close() // not used + + // Prepare to follow the redirect. + redirectStr := resp.Header.Get("Location") + if redirectStr == "" { + return nil, nil, fmt.Errorf("%d response missing Location header", resp.StatusCode) + } + // We have to parse relative to the current location, NOT originalLocation. For example, + // if we request http://foo.com/a and get back "http://bar.com/b", the result should be + // http://bar.com/b. If we then make that request and get back a redirect to "/c", the result + // should be http://bar.com/c, not http://foo.com/c. + location, err = location.Parse(redirectStr) + if err != nil { + return nil, nil, fmt.Errorf("malformed Location header: %v", err) + } + + // Only follow redirects to the same host. Otherwise, propagate the redirect response back. + if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { + return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) + } + + // Reset the connection. + intermediateConn.Close() + intermediateConn = nil + } + + connToReturn := intermediateConn + intermediateConn = nil // Don't close the connection when we return it. + return connToReturn, rawResponse.Bytes(), nil +} + +// CloneRequest creates a shallow copy of the request along with a deep copy of the Headers. +func CloneRequest(req *http.Request) *http.Request { + r := new(http.Request) + + // shallow clone + *r = *req + + // deep copy headers + r.Header = CloneHeader(req.Header) + + return r +} + +// CloneHeader creates a deep copy of an http.Header. +func CloneHeader(in http.Header) http.Header { + out := make(http.Header, len(in)) + for key, values := range in { + newValues := make([]string, len(values)) + copy(newValues, values) + out[key] = newValues + } + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go new file mode 100644 index 0000000..836494d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -0,0 +1,457 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "bufio" + "encoding/hex" + "fmt" + "io" + "net" + "os" + + "strings" + + "k8s.io/klog" +) + +type AddressFamily uint + +const ( + familyIPv4 AddressFamily = 4 + familyIPv6 AddressFamily = 6 +) + +type AddressFamilyPreference []AddressFamily + +var ( + preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6} + preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4} +) + +const ( + // LoopbackInterfaceName is the default name of the loopback interface + LoopbackInterfaceName = "lo" +) + +const ( + ipv4RouteFile = "/proc/net/route" + ipv6RouteFile = "/proc/net/ipv6_route" +) + +type Route struct { + Interface string + Destination net.IP + Gateway net.IP + Family AddressFamily +} + +type RouteFile struct { + name string + parse func(input io.Reader) ([]Route, error) +} + +// noRoutesError can be returned in case of no routes +type noRoutesError struct { + message string +} + +func (e noRoutesError) Error() string { + return e.message +} + +// IsNoRoutesError checks if an error is of type noRoutesError +func IsNoRoutesError(err error) bool { + if err == nil { + return false + } + switch err.(type) { + case noRoutesError: + return true + default: + return false + } +} + +var ( + v4File = RouteFile{name: ipv4RouteFile, parse: getIPv4DefaultRoutes} + v6File = RouteFile{name: ipv6RouteFile, parse: getIPv6DefaultRoutes} +) + +func (rf RouteFile) extract() ([]Route, error) { + file, err := os.Open(rf.name) + if err != nil { + return nil, err + } + defer file.Close() + return rf.parse(file) +} + +// getIPv4DefaultRoutes obtains the IPv4 routes, and filters out non-default routes. +func getIPv4DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + //ignore the headers in the route info + if strings.HasPrefix(line, "Iface") { + continue + } + fields := strings.Fields(line) + // Interested in fields: + // 0 - interface name + // 1 - destination address + // 2 - gateway + dest, err := parseIP(fields[1], familyIPv4) + if err != nil { + return nil, err + } + gw, err := parseIP(fields[2], familyIPv4) + if err != nil { + return nil, err + } + if !dest.Equal(net.IPv4zero) { + continue + } + routes = append(routes, Route{ + Interface: fields[0], + Destination: dest, + Gateway: gw, + Family: familyIPv4, + }) + } + return routes, nil +} + +func getIPv6DefaultRoutes(input io.Reader) ([]Route, error) { + routes := []Route{} + scanner := bufio.NewReader(input) + for { + line, err := scanner.ReadString('\n') + if err == io.EOF { + break + } + fields := strings.Fields(line) + // Interested in fields: + // 0 - destination address + // 4 - gateway + // 9 - interface name + dest, err := parseIP(fields[0], familyIPv6) + if err != nil { + return nil, err + } + gw, err := parseIP(fields[4], familyIPv6) + if err != nil { + return nil, err + } + if !dest.Equal(net.IPv6zero) { + continue + } + if gw.Equal(net.IPv6zero) { + continue // loopback + } + routes = append(routes, Route{ + Interface: fields[9], + Destination: dest, + Gateway: gw, + Family: familyIPv6, + }) + } + return routes, nil +} + +// parseIP takes the hex IP address string from route file and converts it +// to a net.IP address. For IPv4, the value must be converted to big endian. +func parseIP(str string, family AddressFamily) (net.IP, error) { + if str == "" { + return nil, fmt.Errorf("input is nil") + } + bytes, err := hex.DecodeString(str) + if err != nil { + return nil, err + } + if family == familyIPv4 { + if len(bytes) != net.IPv4len { + return nil, fmt.Errorf("invalid IPv4 address in route") + } + return net.IP([]byte{bytes[3], bytes[2], bytes[1], bytes[0]}), nil + } + // Must be IPv6 + if len(bytes) != net.IPv6len { + return nil, fmt.Errorf("invalid IPv6 address in route") + } + return net.IP(bytes), nil +} + +func isInterfaceUp(intf *net.Interface) bool { + if intf == nil { + return false + } + if intf.Flags&net.FlagUp != 0 { + klog.V(4).Infof("Interface %v is up", intf.Name) + return true + } + return false +} + +func isLoopbackOrPointToPoint(intf *net.Interface) bool { + return intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) != 0 +} + +// getMatchingGlobalIP returns the first valid global unicast address of the given +// 'family' from the list of 'addrs'. +func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { + if len(addrs) > 0 { + for i := range addrs { + klog.V(4).Infof("Checking addr %s.", addrs[i].String()) + ip, _, err := net.ParseCIDR(addrs[i].String()) + if err != nil { + return nil, err + } + if memberOf(ip, family) { + if ip.IsGlobalUnicast() { + klog.V(4).Infof("IP found %v", ip) + return ip, nil + } else { + klog.V(4).Infof("Non-global unicast address found %v", ip) + } + } else { + klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + } + + } + } + return nil, nil +} + +// getIPFromInterface gets the IPs on an interface and returns a global unicast address, if any. The +// interface must be up, the IP must in the family requested, and the IP must be a global unicast address. +func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInterfacer) (net.IP, error) { + intf, err := nw.InterfaceByName(intfName) + if err != nil { + return nil, err + } + if isInterfaceUp(intf) { + addrs, err := nw.Addrs(intf) + if err != nil { + return nil, err + } + klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + matchingIP, err := getMatchingGlobalIP(addrs, forFamily) + if err != nil { + return nil, err + } + if matchingIP != nil { + klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + return matchingIP, nil + } + } + return nil, nil +} + +// memberOf tells if the IP is of the desired family. Used for checking interface addresses. +func memberOf(ip net.IP, family AddressFamily) bool { + if ip.To4() != nil { + return family == familyIPv4 + } else { + return family == familyIPv6 + } +} + +// chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that +// has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. +// addressFamilies determines whether it prefers IPv4 or IPv6 +func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { + intfs, err := nw.Interfaces() + if err != nil { + return nil, err + } + if len(intfs) == 0 { + return nil, fmt.Errorf("no interfaces found on host.") + } + for _, family := range addressFamilies { + klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, intf := range intfs { + if !isInterfaceUp(&intf) { + klog.V(4).Infof("Skipping: down interface %q", intf.Name) + continue + } + if isLoopbackOrPointToPoint(&intf) { + klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + continue + } + addrs, err := nw.Addrs(&intf) + if err != nil { + return nil, err + } + if len(addrs) == 0 { + klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + continue + } + for _, addr := range addrs { + ip, _, err := net.ParseCIDR(addr.String()) + if err != nil { + return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) + } + if !memberOf(ip, family) { + klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + continue + } + // TODO: Decide if should open up to allow IPv6 LLAs in future. + if !ip.IsGlobalUnicast() { + klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + continue + } + klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + return ip, nil + } + } + } + return nil, fmt.Errorf("no acceptable interface with global unicast address found on host") +} + +// ChooseHostInterface is a method used fetch an IP for a daemon. +// If there is no routing info file, it will choose a global IP from the system +// interfaces. Otherwise, it will use IPv4 and IPv6 route information to return the +// IP of the interface with a gateway on it (with priority given to IPv4). For a node +// with no internet connection, it returns error. +func ChooseHostInterface() (net.IP, error) { + return chooseHostInterface(preferIPv4) +} + +func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) { + var nw networkInterfacer = networkInterface{} + if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { + return chooseIPFromHostInterfaces(nw, addressFamilies) + } + routes, err := getAllDefaultRoutes() + if err != nil { + return nil, err + } + return chooseHostInterfaceFromRoute(routes, nw, addressFamilies) +} + +// networkInterfacer defines an interface for several net library functions. Production +// code will forward to net library functions, and unit tests will override the methods +// for testing purposes. +type networkInterfacer interface { + InterfaceByName(intfName string) (*net.Interface, error) + Addrs(intf *net.Interface) ([]net.Addr, error) + Interfaces() ([]net.Interface, error) +} + +// networkInterface implements the networkInterfacer interface for production code, just +// wrapping the underlying net library function calls. +type networkInterface struct{} + +func (_ networkInterface) InterfaceByName(intfName string) (*net.Interface, error) { + return net.InterfaceByName(intfName) +} + +func (_ networkInterface) Addrs(intf *net.Interface) ([]net.Addr, error) { + return intf.Addrs() +} + +func (_ networkInterface) Interfaces() ([]net.Interface, error) { + return net.Interfaces() +} + +// getAllDefaultRoutes obtains IPv4 and IPv6 default routes on the node. If unable +// to read the IPv4 routing info file, we return an error. If unable to read the IPv6 +// routing info file (which is optional), we'll just use the IPv4 route information. +// Using all the routing info, if no default routes are found, an error is returned. +func getAllDefaultRoutes() ([]Route, error) { + routes, err := v4File.extract() + if err != nil { + return nil, err + } + v6Routes, _ := v6File.extract() + routes = append(routes, v6Routes...) + if len(routes) == 0 { + return nil, noRoutesError{ + message: fmt.Sprintf("no default routes found in %q or %q", v4File.name, v6File.name), + } + } + return routes, nil +} + +// chooseHostInterfaceFromRoute cycles through each default route provided, looking for a +// global IP address from the interface for the route. addressFamilies determines whether it +// prefers IPv4 or IPv6 +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { + for _, family := range addressFamilies { + klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) + for _, route := range routes { + if route.Family != family { + continue + } + klog.V(4).Infof("Default route transits interface %q", route.Interface) + finalIP, err := getIPFromInterface(route.Interface, family, nw) + if err != nil { + return nil, err + } + if finalIP != nil { + klog.V(4).Infof("Found active IP %v ", finalIP) + return finalIP, nil + } + } + } + klog.V(4).Infof("No active IP found by looking at default routes") + return nil, fmt.Errorf("unable to select an IP from default routes.") +} + +// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress: +// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface(). +// If bindAddress is unspecified or loopback, it returns the default IP of the same +// address family as bindAddress. +// Otherwise, it just returns bindAddress. +func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { + addressFamilies := preferIPv4 + if bindAddress != nil && memberOf(bindAddress, familyIPv6) { + addressFamilies = preferIPv6 + } + + if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { + hostIP, err := chooseHostInterface(addressFamilies) + if err != nil { + return nil, err + } + bindAddress = hostIP + } + return bindAddress, nil +} + +// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4. +// This is required in case of network setups where default routes are present, but network +// interfaces use only link-local addresses (e.g. as described in RFC5549). +// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface. +func ChooseBindAddressForInterface(intfName string) (net.IP, error) { + var nw networkInterfacer = networkInterface{} + for _, family := range preferIPv4 { + ip, err := getIPFromInterface(intfName, family, nw) + if err != nil { + return nil, err + } + if ip != nil { + return ip, nil + } + } + return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go new file mode 100644 index 0000000..7b6eca8 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_range.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "fmt" + "strconv" + "strings" +) + +// PortRange represents a range of TCP/UDP ports. To represent a single port, +// set Size to 1. +type PortRange struct { + Base int + Size int +} + +// Contains tests whether a given port falls within the PortRange. +func (pr *PortRange) Contains(p int) bool { + return (p >= pr.Base) && ((p - pr.Base) < pr.Size) +} + +// String converts the PortRange to a string representation, which can be +// parsed by PortRange.Set or ParsePortRange. +func (pr PortRange) String() string { + if pr.Size == 0 { + return "" + } + return fmt.Sprintf("%d-%d", pr.Base, pr.Base+pr.Size-1) +} + +// Set parses a string of the form "value", "min-max", or "min+offset", inclusive at both ends, and +// sets the PortRange from it. This is part of the flag.Value and pflag.Value +// interfaces. +func (pr *PortRange) Set(value string) error { + const ( + SinglePortNotation = 1 << iota + HyphenNotation + PlusNotation + ) + + value = strings.TrimSpace(value) + hyphenIndex := strings.Index(value, "-") + plusIndex := strings.Index(value, "+") + + if value == "" { + pr.Base = 0 + pr.Size = 0 + return nil + } + + var err error + var low, high int + var notation int + + if plusIndex == -1 && hyphenIndex == -1 { + notation |= SinglePortNotation + } + if hyphenIndex != -1 { + notation |= HyphenNotation + } + if plusIndex != -1 { + notation |= PlusNotation + } + + switch notation { + case SinglePortNotation: + var port int + port, err = strconv.Atoi(value) + if err != nil { + return err + } + low = port + high = port + case HyphenNotation: + low, err = strconv.Atoi(value[:hyphenIndex]) + if err != nil { + return err + } + high, err = strconv.Atoi(value[hyphenIndex+1:]) + if err != nil { + return err + } + case PlusNotation: + var offset int + low, err = strconv.Atoi(value[:plusIndex]) + if err != nil { + return err + } + offset, err = strconv.Atoi(value[plusIndex+1:]) + if err != nil { + return err + } + high = low + offset + default: + return fmt.Errorf("unable to parse port range: %s", value) + } + + if low > 65535 || high > 65535 { + return fmt.Errorf("the port range cannot be greater than 65535: %s", value) + } + + if high < low { + return fmt.Errorf("end port cannot be less than start port: %s", value) + } + + pr.Base = low + pr.Size = 1 + high - low + return nil +} + +// Type returns a descriptive string about this type. This is part of the +// pflag.Value interface. +func (*PortRange) Type() string { + return "portRange" +} + +// ParsePortRange parses a string of the form "min-max", inclusive at both +// ends, and initializs a new PortRange from it. +func ParsePortRange(value string) (*PortRange, error) { + pr := &PortRange{} + err := pr.Set(value) + if err != nil { + return nil, err + } + return pr, nil +} + +func ParsePortRangeOrDie(value string) *PortRange { + pr, err := ParsePortRange(value) + if err != nil { + panic(fmt.Sprintf("couldn't parse port range %q: %v", value, err)) + } + return pr +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go new file mode 100644 index 0000000..c0fd4e2 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/port_split.go @@ -0,0 +1,77 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "strings" + + "k8s.io/apimachinery/pkg/util/sets" +) + +var validSchemes = sets.NewString("http", "https", "") + +// SplitSchemeNamePort takes a string of the following forms: +// * "", returns "", "","", true +// * ":", returns "", "","",true +// * "::", returns "","","",true +// +// Name must be non-empty or valid will be returned false. +// Scheme must be "http" or "https" if specified +// Port is returned as a string, and it is not required to be numeric (could be +// used for a named port, for example). +func SplitSchemeNamePort(id string) (scheme, name, port string, valid bool) { + parts := strings.Split(id, ":") + switch len(parts) { + case 1: + name = parts[0] + case 2: + name = parts[0] + port = parts[1] + case 3: + scheme = parts[0] + name = parts[1] + port = parts[2] + default: + return "", "", "", false + } + + if len(name) > 0 && validSchemes.Has(scheme) { + return scheme, name, port, true + } else { + return "", "", "", false + } +} + +// JoinSchemeNamePort returns a string that specifies the scheme, name, and port: +// * "" +// * ":" +// * "::" +// None of the parameters may contain a ':' character +// Name is required +// Scheme must be "", "http", or "https" +func JoinSchemeNamePort(scheme, name, port string) string { + if len(scheme) > 0 { + // Must include three segments to specify scheme + return scheme + ":" + name + ":" + port + } + if len(port) > 0 { + // Must include two segments to specify port + return name + ":" + port + } + // Return name alone + return name +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go new file mode 100644 index 0000000..2e7cb94 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package net + +import ( + "net" + "net/url" + "os" + "reflect" + "syscall" +) + +// IPNetEqual checks if the two input IPNets are representing the same subnet. +// For example, +// 10.0.0.1/24 and 10.0.0.0/24 are the same subnet. +// 10.0.0.1/24 and 10.0.0.0/25 are not the same subnet. +func IPNetEqual(ipnet1, ipnet2 *net.IPNet) bool { + if ipnet1 == nil || ipnet2 == nil { + return false + } + if reflect.DeepEqual(ipnet1.Mask, ipnet2.Mask) && ipnet1.Contains(ipnet2.IP) && ipnet2.Contains(ipnet1.IP) { + return true + } + return false +} + +// Returns if the given err is "connection reset by peer" error. +func IsConnectionReset(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNRESET { + return true + } + return false +} + +// Returns if the given err is "connection refused" error +func IsConnectionRefused(err error) bool { + if urlErr, ok := err.(*url.Error); ok { + err = urlErr.Err + } + if opErr, ok := err.(*net.OpError); ok { + err = opErr.Err + } + if osErr, ok := err.(*os.SyscallError); ok { + err = osErr.Err + } + if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { + return true + } + return false +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go new file mode 100644 index 0000000..1428443 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -0,0 +1,173 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package runtime + +import ( + "fmt" + "net/http" + "runtime" + "sync" + "time" + + "k8s.io/klog" +) + +var ( + // ReallyCrash controls the behavior of HandleCrash and now defaults + // true. It's still exposed so components can optionally set to false + // to restore prior behavior. + ReallyCrash = true +) + +// PanicHandlers is a list of functions which will be invoked when a panic happens. +var PanicHandlers = []func(interface{}){logPanic} + +// HandleCrash simply catches a crash and logs an error. Meant to be called via +// defer. Additional context-specific handlers can be provided, and will be +// called in case of panic. HandleCrash actually crashes, after calling the +// handlers and logging the panic message. +// +// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +func HandleCrash(additionalHandlers ...func(interface{})) { + if r := recover(); r != nil { + for _, fn := range PanicHandlers { + fn(r) + } + for _, fn := range additionalHandlers { + fn(r) + } + if ReallyCrash { + // Actually proceed to panic. + panic(r) + } + } +} + +// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). +func logPanic(r interface{}) { + if r == http.ErrAbortHandler { + // honor the http.ErrAbortHandler sentinel panic value: + // ErrAbortHandler is a sentinel panic value to abort a handler. + // While any panic from ServeHTTP aborts the response to the client, + // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. + return + } + + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + if _, ok := r.(string); ok { + klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + } else { + klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + } +} + +// ErrorHandlers is a list of functions which will be invoked when an unreturnable +// error occurs. +// TODO(lavalamp): for testability, this and the below HandleError function +// should be packaged up into a testable and reusable object. +var ErrorHandlers = []func(error){ + logError, + (&rudimentaryErrorBackoff{ + lastErrorTime: time.Now(), + // 1ms was the number folks were able to stomach as a global rate limit. + // If you need to log errors more than 1000 times a second you + // should probably consider fixing your code instead. :) + minPeriod: time.Millisecond, + }).OnError, +} + +// HandlerError is a method to invoke when a non-user facing piece of code cannot +// return an error and needs to indicate it has been ignored. Invoking this method +// is preferable to logging the error - the default behavior is to log but the +// errors may be sent to a remote server for analysis. +func HandleError(err error) { + // this is sometimes called with a nil error. We probably shouldn't fail and should do nothing instead + if err == nil { + return + } + + for _, fn := range ErrorHandlers { + fn(err) + } +} + +// logError prints an error with the call stack of the location it was reported +func logError(err error) { + klog.ErrorDepth(2, err) +} + +type rudimentaryErrorBackoff struct { + minPeriod time.Duration // immutable + // TODO(lavalamp): use the clock for testability. Need to move that + // package for that to be accessible here. + lastErrorTimeLock sync.Mutex + lastErrorTime time.Time +} + +// OnError will block if it is called more often than the embedded period time. +// This will prevent overly tight hot error loops. +func (r *rudimentaryErrorBackoff) OnError(error) { + r.lastErrorTimeLock.Lock() + defer r.lastErrorTimeLock.Unlock() + d := time.Since(r.lastErrorTime) + if d < r.minPeriod { + // If the time moves backwards for any reason, do nothing + time.Sleep(r.minPeriod - d) + } + r.lastErrorTime = time.Now() +} + +// GetCaller returns the caller of the function that calls it. +func GetCaller() string { + var pc [1]uintptr + runtime.Callers(3, pc[:]) + f := runtime.FuncForPC(pc[0]) + if f == nil { + return fmt.Sprintf("Unable to find caller") + } + return f.Name() +} + +// RecoverFromPanic replaces the specified error with an error containing the +// original error, and the call tree when a panic occurs. This enables error +// handlers to handle errors and panics the same way. +func RecoverFromPanic(err *error) { + if r := recover(); r != nil { + // Same as stdlib http server code. Manually allocate stack trace buffer size + // to prevent excessively large logs + const size = 64 << 10 + stacktrace := make([]byte, size) + stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + + *err = fmt.Errorf( + "recovered from panic %q. (err=%v) Call stack:\n%s", + r, + *err, + stacktrace) + } +} + +// Must panics on non-nil errors. Useful to handling programmer level errors. +func Must(err error) { + if err != nil { + panic(err) + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go new file mode 100644 index 0000000..9bfa85d --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Byte is a set of bytes, implemented via map[byte]struct{} for minimal memory consumption. +type Byte map[byte]Empty + +// NewByte creates a Byte from a list of values. +func NewByte(items ...byte) Byte { + ss := Byte{} + ss.Insert(items...) + return ss +} + +// ByteKeySet creates a Byte from a keys of a map[byte](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func ByteKeySet(theMap interface{}) Byte { + v := reflect.ValueOf(theMap) + ret := Byte{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(byte)) + } + return ret +} + +// Insert adds items to the set. +func (s Byte) Insert(items ...byte) Byte { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Byte) Delete(items ...byte) Byte { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Byte) Has(item byte) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Byte) HasAll(items ...byte) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Byte) HasAny(items ...byte) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Byte) Difference(s2 Byte) Byte { + result := NewByte() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Byte) Union(s2 Byte) Byte { + result := NewByte() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Byte) Intersection(s2 Byte) Byte { + var walk, other Byte + result := NewByte() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Byte) IsSuperset(s2 Byte) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Byte) Equal(s2 Byte) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfByte []byte + +func (s sortableSliceOfByte) Len() int { return len(s) } +func (s sortableSliceOfByte) Less(i, j int) bool { return lessByte(s[i], s[j]) } +func (s sortableSliceOfByte) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted byte slice. +func (s Byte) List() []byte { + res := make(sortableSliceOfByte, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []byte(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Byte) UnsortedList() []byte { + res := make([]byte, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Byte) PopAny() (byte, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue byte + return zeroValue, false +} + +// Len returns the size of the set. +func (s Byte) Len() int { + return len(s) +} + +func lessByte(lhs, rhs byte) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go new file mode 100644 index 0000000..b152a0b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +// Package sets has auto-generated set types. +package sets diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go new file mode 100644 index 0000000..e11e622 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/empty.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +// Empty is public since it is used by some internal API objects for conversions between external +// string arrays and internal sets, and conversion logic requires public types today. +type Empty struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go new file mode 100644 index 0000000..88bd709 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int is a set of ints, implemented via map[int]struct{} for minimal memory consumption. +type Int map[int]Empty + +// NewInt creates a Int from a list of values. +func NewInt(items ...int) Int { + ss := Int{} + ss.Insert(items...) + return ss +} + +// IntKeySet creates a Int from a keys of a map[int](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func IntKeySet(theMap interface{}) Int { + v := reflect.ValueOf(theMap) + ret := Int{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int)) + } + return ret +} + +// Insert adds items to the set. +func (s Int) Insert(items ...int) Int { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int) Delete(items ...int) Int { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int) Has(item int) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int) HasAll(items ...int) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int) HasAny(items ...int) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int) Difference(s2 Int) Int { + result := NewInt() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int) Union(s2 Int) Int { + result := NewInt() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int) Intersection(s2 Int) Int { + var walk, other Int + result := NewInt() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int) IsSuperset(s2 Int) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int) Equal(s2 Int) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt []int + +func (s sortableSliceOfInt) Len() int { return len(s) } +func (s sortableSliceOfInt) Less(i, j int) bool { return lessInt(s[i], s[j]) } +func (s sortableSliceOfInt) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int slice. +func (s Int) List() []int { + res := make(sortableSliceOfInt, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int) UnsortedList() []int { + res := make([]int, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int) PopAny() (int, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int) Len() int { + return len(s) +} + +func lessInt(lhs, rhs int) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go new file mode 100644 index 0000000..96a4855 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. +type Int32 map[int32]Empty + +// NewInt32 creates a Int32 from a list of values. +func NewInt32(items ...int32) Int32 { + ss := Int32{} + ss.Insert(items...) + return ss +} + +// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int32KeySet(theMap interface{}) Int32 { + v := reflect.ValueOf(theMap) + ret := Int32{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int32)) + } + return ret +} + +// Insert adds items to the set. +func (s Int32) Insert(items ...int32) Int32 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int32) Delete(items ...int32) Int32 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int32) Has(item int32) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int32) HasAll(items ...int32) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int32) HasAny(items ...int32) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int32) Difference(s2 Int32) Int32 { + result := NewInt32() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int32) Union(s2 Int32) Int32 { + result := NewInt32() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int32) Intersection(s2 Int32) Int32 { + var walk, other Int32 + result := NewInt32() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int32) IsSuperset(s2 Int32) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int32) Equal(s2 Int32) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt32 []int32 + +func (s sortableSliceOfInt32) Len() int { return len(s) } +func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } +func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int32 slice. +func (s Int32) List() []int32 { + res := make(sortableSliceOfInt32, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int32(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int32) UnsortedList() []int32 { + res := make([]int32, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int32) PopAny() (int32, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int32 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int32) Len() int { + return len(s) +} + +func lessInt32(lhs, rhs int32) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go new file mode 100644 index 0000000..b375a1b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.Int64 is a set of int64s, implemented via map[int64]struct{} for minimal memory consumption. +type Int64 map[int64]Empty + +// NewInt64 creates a Int64 from a list of values. +func NewInt64(items ...int64) Int64 { + ss := Int64{} + ss.Insert(items...) + return ss +} + +// Int64KeySet creates a Int64 from a keys of a map[int64](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func Int64KeySet(theMap interface{}) Int64 { + v := reflect.ValueOf(theMap) + ret := Int64{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(int64)) + } + return ret +} + +// Insert adds items to the set. +func (s Int64) Insert(items ...int64) Int64 { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s Int64) Delete(items ...int64) Int64 { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s Int64) Has(item int64) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s Int64) HasAll(items ...int64) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s Int64) HasAny(items ...int64) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s Int64) Difference(s2 Int64) Int64 { + result := NewInt64() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 Int64) Union(s2 Int64) Int64 { + result := NewInt64() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 Int64) Intersection(s2 Int64) Int64 { + var walk, other Int64 + result := NewInt64() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 Int64) IsSuperset(s2 Int64) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 Int64) Equal(s2 Int64) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfInt64 []int64 + +func (s sortableSliceOfInt64) Len() int { return len(s) } +func (s sortableSliceOfInt64) Less(i, j int) bool { return lessInt64(s[i], s[j]) } +func (s sortableSliceOfInt64) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted int64 slice. +func (s Int64) List() []int64 { + res := make(sortableSliceOfInt64, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []int64(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s Int64) UnsortedList() []int64 { + res := make([]int64, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s Int64) PopAny() (int64, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue int64 + return zeroValue, false +} + +// Len returns the size of the set. +func (s Int64) Len() int { + return len(s) +} + +func lessInt64(lhs, rhs int64) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go new file mode 100644 index 0000000..e6f37db --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -0,0 +1,205 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by set-gen. DO NOT EDIT. + +package sets + +import ( + "reflect" + "sort" +) + +// sets.String is a set of strings, implemented via map[string]struct{} for minimal memory consumption. +type String map[string]Empty + +// NewString creates a String from a list of values. +func NewString(items ...string) String { + ss := String{} + ss.Insert(items...) + return ss +} + +// StringKeySet creates a String from a keys of a map[string](? extends interface{}). +// If the value passed in is not actually a map, this will panic. +func StringKeySet(theMap interface{}) String { + v := reflect.ValueOf(theMap) + ret := String{} + + for _, keyValue := range v.MapKeys() { + ret.Insert(keyValue.Interface().(string)) + } + return ret +} + +// Insert adds items to the set. +func (s String) Insert(items ...string) String { + for _, item := range items { + s[item] = Empty{} + } + return s +} + +// Delete removes all items from the set. +func (s String) Delete(items ...string) String { + for _, item := range items { + delete(s, item) + } + return s +} + +// Has returns true if and only if item is contained in the set. +func (s String) Has(item string) bool { + _, contained := s[item] + return contained +} + +// HasAll returns true if and only if all items are contained in the set. +func (s String) HasAll(items ...string) bool { + for _, item := range items { + if !s.Has(item) { + return false + } + } + return true +} + +// HasAny returns true if any items are contained in the set. +func (s String) HasAny(items ...string) bool { + for _, item := range items { + if s.Has(item) { + return true + } + } + return false +} + +// Difference returns a set of objects that are not in s2 +// For example: +// s1 = {a1, a2, a3} +// s2 = {a1, a2, a4, a5} +// s1.Difference(s2) = {a3} +// s2.Difference(s1) = {a4, a5} +func (s String) Difference(s2 String) String { + result := NewString() + for key := range s { + if !s2.Has(key) { + result.Insert(key) + } + } + return result +} + +// Union returns a new set which includes items in either s1 or s2. +// For example: +// s1 = {a1, a2} +// s2 = {a3, a4} +// s1.Union(s2) = {a1, a2, a3, a4} +// s2.Union(s1) = {a1, a2, a3, a4} +func (s1 String) Union(s2 String) String { + result := NewString() + for key := range s1 { + result.Insert(key) + } + for key := range s2 { + result.Insert(key) + } + return result +} + +// Intersection returns a new set which includes the item in BOTH s1 and s2 +// For example: +// s1 = {a1, a2} +// s2 = {a2, a3} +// s1.Intersection(s2) = {a2} +func (s1 String) Intersection(s2 String) String { + var walk, other String + result := NewString() + if s1.Len() < s2.Len() { + walk = s1 + other = s2 + } else { + walk = s2 + other = s1 + } + for key := range walk { + if other.Has(key) { + result.Insert(key) + } + } + return result +} + +// IsSuperset returns true if and only if s1 is a superset of s2. +func (s1 String) IsSuperset(s2 String) bool { + for item := range s2 { + if !s1.Has(item) { + return false + } + } + return true +} + +// Equal returns true if and only if s1 is equal (as a set) to s2. +// Two sets are equal if their membership is identical. +// (In practice, this means same elements, order doesn't matter) +func (s1 String) Equal(s2 String) bool { + return len(s1) == len(s2) && s1.IsSuperset(s2) +} + +type sortableSliceOfString []string + +func (s sortableSliceOfString) Len() int { return len(s) } +func (s sortableSliceOfString) Less(i, j int) bool { return lessString(s[i], s[j]) } +func (s sortableSliceOfString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// List returns the contents as a sorted string slice. +func (s String) List() []string { + res := make(sortableSliceOfString, 0, len(s)) + for key := range s { + res = append(res, key) + } + sort.Sort(res) + return []string(res) +} + +// UnsortedList returns the slice with contents in random order. +func (s String) UnsortedList() []string { + res := make([]string, 0, len(s)) + for key := range s { + res = append(res, key) + } + return res +} + +// Returns a single element from the set. +func (s String) PopAny() (string, bool) { + for key := range s { + s.Delete(key) + return key, true + } + var zeroValue string + return zeroValue, false +} + +// Len returns the size of the set. +func (s String) Len() int { + return len(s) +} + +func lessString(lhs, rhs string) bool { + return lhs < rhs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go new file mode 100644 index 0000000..0cd5d65 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -0,0 +1,272 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + utilerrors "k8s.io/apimachinery/pkg/util/errors" + "k8s.io/apimachinery/pkg/util/sets" +) + +// Error is an implementation of the 'error' interface, which represents a +// field-level validation error. +type Error struct { + Type ErrorType + Field string + BadValue interface{} + Detail string +} + +var _ error = &Error{} + +// Error implements the error interface. +func (v *Error) Error() string { + return fmt.Sprintf("%s: %s", v.Field, v.ErrorBody()) +} + +// ErrorBody returns the error message without the field name. This is useful +// for building nice-looking higher-level error reporting. +func (v *Error) ErrorBody() string { + var s string + switch v.Type { + case ErrorTypeRequired, ErrorTypeForbidden, ErrorTypeTooLong, ErrorTypeInternal: + s = v.Type.String() + default: + value := v.BadValue + valueType := reflect.TypeOf(value) + if value == nil || valueType == nil { + value = "null" + } else if valueType.Kind() == reflect.Ptr { + if reflectValue := reflect.ValueOf(value); reflectValue.IsNil() { + value = "null" + } else { + value = reflectValue.Elem().Interface() + } + } + switch t := value.(type) { + case int64, int32, float64, float32, bool: + // use simple printer for simple types + s = fmt.Sprintf("%s: %v", v.Type, value) + case string: + s = fmt.Sprintf("%s: %q", v.Type, t) + case fmt.Stringer: + // anything that defines String() is better than raw struct + s = fmt.Sprintf("%s: %s", v.Type, t.String()) + default: + // fallback to raw struct + // TODO: internal types have panic guards against json.Marshalling to prevent + // accidental use of internal types in external serialized form. For now, use + // %#v, although it would be better to show a more expressive output in the future + s = fmt.Sprintf("%s: %#v", v.Type, value) + } + } + if len(v.Detail) != 0 { + s += fmt.Sprintf(": %s", v.Detail) + } + return s +} + +// ErrorType is a machine readable value providing more detail about why +// a field is invalid. These values are expected to match 1-1 with +// CauseType in api/types.go. +type ErrorType string + +// TODO: These values are duplicated in api/types.go, but there's a circular dep. Fix it. +const ( + // ErrorTypeNotFound is used to report failure to find a requested value + // (e.g. looking up an ID). See NotFound(). + ErrorTypeNotFound ErrorType = "FieldValueNotFound" + // ErrorTypeRequired is used to report required values that are not + // provided (e.g. empty strings, null values, or empty arrays). See + // Required(). + ErrorTypeRequired ErrorType = "FieldValueRequired" + // ErrorTypeDuplicate is used to report collisions of values that must be + // unique (e.g. unique IDs). See Duplicate(). + ErrorTypeDuplicate ErrorType = "FieldValueDuplicate" + // ErrorTypeInvalid is used to report malformed values (e.g. failed regex + // match, too long, out of bounds). See Invalid(). + ErrorTypeInvalid ErrorType = "FieldValueInvalid" + // ErrorTypeNotSupported is used to report unknown values for enumerated + // fields (e.g. a list of valid values). See NotSupported(). + ErrorTypeNotSupported ErrorType = "FieldValueNotSupported" + // ErrorTypeForbidden is used to report valid (as per formatting rules) + // values which would be accepted under some conditions, but which are not + // permitted by the current conditions (such as security policy). See + // Forbidden(). + ErrorTypeForbidden ErrorType = "FieldValueForbidden" + // ErrorTypeTooLong is used to report that the given value is too long. + // This is similar to ErrorTypeInvalid, but the error will not include the + // too-long value. See TooLong(). + ErrorTypeTooLong ErrorType = "FieldValueTooLong" + // ErrorTypeTooMany is used to report "too many". This is used to + // report that a given list has too many items. This is similar to FieldValueTooLong, + // but the error indicates quantity instead of length. + ErrorTypeTooMany ErrorType = "FieldValueTooMany" + // ErrorTypeInternal is used to report other errors that are not related + // to user input. See InternalError(). + ErrorTypeInternal ErrorType = "InternalError" +) + +// String converts a ErrorType into its corresponding canonical error message. +func (t ErrorType) String() string { + switch t { + case ErrorTypeNotFound: + return "Not found" + case ErrorTypeRequired: + return "Required value" + case ErrorTypeDuplicate: + return "Duplicate value" + case ErrorTypeInvalid: + return "Invalid value" + case ErrorTypeNotSupported: + return "Unsupported value" + case ErrorTypeForbidden: + return "Forbidden" + case ErrorTypeTooLong: + return "Too long" + case ErrorTypeTooMany: + return "Too many" + case ErrorTypeInternal: + return "Internal error" + default: + panic(fmt.Sprintf("unrecognized validation error: %q", string(t))) + } +} + +// NotFound returns a *Error indicating "value not found". This is +// used to report failure to find a requested value (e.g. looking up an ID). +func NotFound(field *Path, value interface{}) *Error { + return &Error{ErrorTypeNotFound, field.String(), value, ""} +} + +// Required returns a *Error indicating "value required". This is used +// to report required values that are not provided (e.g. empty strings, null +// values, or empty arrays). +func Required(field *Path, detail string) *Error { + return &Error{ErrorTypeRequired, field.String(), "", detail} +} + +// Duplicate returns a *Error indicating "duplicate value". This is +// used to report collisions of values that must be unique (e.g. names or IDs). +func Duplicate(field *Path, value interface{}) *Error { + return &Error{ErrorTypeDuplicate, field.String(), value, ""} +} + +// Invalid returns a *Error indicating "invalid value". This is used +// to report malformed values (e.g. failed regex match, too long, out of bounds). +func Invalid(field *Path, value interface{}, detail string) *Error { + return &Error{ErrorTypeInvalid, field.String(), value, detail} +} + +// NotSupported returns a *Error indicating "unsupported value". +// This is used to report unknown values for enumerated fields (e.g. a list of +// valid values). +func NotSupported(field *Path, value interface{}, validValues []string) *Error { + detail := "" + if validValues != nil && len(validValues) > 0 { + quotedValues := make([]string, len(validValues)) + for i, v := range validValues { + quotedValues[i] = strconv.Quote(v) + } + detail = "supported values: " + strings.Join(quotedValues, ", ") + } + return &Error{ErrorTypeNotSupported, field.String(), value, detail} +} + +// Forbidden returns a *Error indicating "forbidden". This is used to +// report valid (as per formatting rules) values which would be accepted under +// some conditions, but which are not permitted by current conditions (e.g. +// security policy). +func Forbidden(field *Path, detail string) *Error { + return &Error{ErrorTypeForbidden, field.String(), "", detail} +} + +// TooLong returns a *Error indicating "too long". This is used to +// report that the given value is too long. This is similar to +// Invalid, but the returned error will not include the too-long +// value. +func TooLong(field *Path, value interface{}, maxLength int) *Error { + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} +} + +// TooMany returns a *Error indicating "too many". This is used to +// report that a given list has too many items. This is similar to TooLong, +// but the returned error indicates quantity instead of length. +func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { + return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} +} + +// InternalError returns a *Error indicating "internal error". This is used +// to signal that an error was found that was not directly related to user +// input. The err argument must be non-nil. +func InternalError(field *Path, err error) *Error { + return &Error{ErrorTypeInternal, field.String(), nil, err.Error()} +} + +// ErrorList holds a set of Errors. It is plausible that we might one day have +// non-field errors in this same umbrella package, but for now we don't, so +// we can keep it simple and leave ErrorList here. +type ErrorList []*Error + +// NewErrorTypeMatcher returns an errors.Matcher that returns true +// if the provided error is a Error and has the provided ErrorType. +func NewErrorTypeMatcher(t ErrorType) utilerrors.Matcher { + return func(err error) bool { + if e, ok := err.(*Error); ok { + return e.Type == t + } + return false + } +} + +// ToAggregate converts the ErrorList into an errors.Aggregate. +func (list ErrorList) ToAggregate() utilerrors.Aggregate { + errs := make([]error, 0, len(list)) + errorMsgs := sets.NewString() + for _, err := range list { + msg := fmt.Sprintf("%v", err) + if errorMsgs.Has(msg) { + continue + } + errorMsgs.Insert(msg) + errs = append(errs, err) + } + return utilerrors.NewAggregate(errs) +} + +func fromAggregate(agg utilerrors.Aggregate) ErrorList { + errs := agg.Errors() + list := make(ErrorList, len(errs)) + for i := range errs { + list[i] = errs[i].(*Error) + } + return list +} + +// Filter removes items from the ErrorList that match the provided fns. +func (list ErrorList) Filter(fns ...utilerrors.Matcher) ErrorList { + err := utilerrors.FilterOut(list.ToAggregate(), fns...) + if err == nil { + return nil + } + // FilterOut takes an Aggregate and returns an Aggregate + return fromAggregate(err.(utilerrors.Aggregate)) +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go new file mode 100644 index 0000000..2efc8ee --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/path.go @@ -0,0 +1,91 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package field + +import ( + "bytes" + "fmt" + "strconv" +) + +// Path represents the path from some root to a particular field. +type Path struct { + name string // the name of this field or "" if this is an index + index string // if name == "", this is a subscript (index or map key) of the previous element + parent *Path // nil if this is the root element +} + +// NewPath creates a root Path object. +func NewPath(name string, moreNames ...string) *Path { + r := &Path{name: name, parent: nil} + for _, anotherName := range moreNames { + r = &Path{name: anotherName, parent: r} + } + return r +} + +// Root returns the root element of this Path. +func (p *Path) Root() *Path { + for ; p.parent != nil; p = p.parent { + // Do nothing. + } + return p +} + +// Child creates a new Path that is a child of the method receiver. +func (p *Path) Child(name string, moreNames ...string) *Path { + r := NewPath(name, moreNames...) + r.Root().parent = p + return r +} + +// Index indicates that the previous Path is to be subscripted by an int. +// This sets the same underlying value as Key. +func (p *Path) Index(index int) *Path { + return &Path{index: strconv.Itoa(index), parent: p} +} + +// Key indicates that the previous Path is to be subscripted by a string. +// This sets the same underlying value as Index. +func (p *Path) Key(key string) *Path { + return &Path{index: key, parent: p} +} + +// String produces a string representation of the Path. +func (p *Path) String() string { + // make a slice to iterate + elems := []*Path{} + for ; p != nil; p = p.parent { + elems = append(elems, p) + } + + // iterate, but it has to be backwards + buf := bytes.NewBuffer(nil) + for i := range elems { + p := elems[len(elems)-1-i] + if p.parent != nil && len(p.name) > 0 { + // This is either the root or it is a subscript. + buf.WriteString(".") + } + if len(p.name) > 0 { + buf.WriteString(p.name) + } else { + fmt.Fprintf(buf, "[%s]", p.index) + } + } + return buf.String() +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go new file mode 100644 index 0000000..915231f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -0,0 +1,498 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package validation + +import ( + "fmt" + "math" + "net" + "regexp" + "strconv" + "strings" + + "k8s.io/apimachinery/pkg/util/validation/field" +) + +const qnameCharFmt string = "[A-Za-z0-9]" +const qnameExtCharFmt string = "[-A-Za-z0-9_.]" +const qualifiedNameFmt string = "(" + qnameCharFmt + qnameExtCharFmt + "*)?" + qnameCharFmt +const qualifiedNameErrMsg string = "must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" +const qualifiedNameMaxLength int = 63 + +var qualifiedNameRegexp = regexp.MustCompile("^" + qualifiedNameFmt + "$") + +// IsQualifiedName tests whether the value passed is what Kubernetes calls a +// "qualified name". This is a format used in various places throughout the +// system. If the value is not valid, a list of error strings is returned. +// Otherwise an empty list (or nil) is returned. +func IsQualifiedName(value string) []string { + var errs []string + parts := strings.Split(value, "/") + var name string + switch len(parts) { + case 1: + name = parts[0] + case 2: + var prefix string + prefix, name = parts[0], parts[1] + if len(prefix) == 0 { + errs = append(errs, "prefix part "+EmptyError()) + } else if msgs := IsDNS1123Subdomain(prefix); len(msgs) != 0 { + errs = append(errs, prefixEach(msgs, "prefix part ")...) + } + default: + return append(errs, "a qualified name "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")+ + " with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')") + } + + if len(name) == 0 { + errs = append(errs, "name part "+EmptyError()) + } else if len(name) > qualifiedNameMaxLength { + errs = append(errs, "name part "+MaxLenError(qualifiedNameMaxLength)) + } + if !qualifiedNameRegexp.MatchString(name) { + errs = append(errs, "name part "+RegexError(qualifiedNameErrMsg, qualifiedNameFmt, "MyName", "my.name", "123-abc")) + } + return errs +} + +// IsFullyQualifiedName checks if the name is fully qualified. This is similar +// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of +// 2 and does not accept a trailing . as valid. +// TODO: This function is deprecated and preserved until all callers migrate to +// IsFullyQualifiedDomainName; please don't add new callers. +func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 3 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least three segments separated by dots")) + } + return allErrors +} + +// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This +// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments +// instead of 3 and accepts a trailing . as valid. +func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { + var allErrors field.ErrorList + if len(name) == 0 { + return append(allErrors, field.Required(fldPath, "")) + } + if strings.HasSuffix(name, ".") { + name = name[:len(name)-1] + } + if errs := IsDNS1123Subdomain(name); len(errs) > 0 { + return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) + } + if len(strings.Split(name, ".")) < 2 { + return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) + } + return allErrors +} + +// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may +// contain: +// * unreserved characters (alphanumeric, '-', '.', '_', '~') +// * percent-encoded octets +// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") +// * a colon character (":") +const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` + +var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") + +// IsDomainPrefixedPath checks if the given string is a domain-prefixed path +// (e.g. acme.io/foo). All characters before the first "/" must be a valid +// subdomain as defined by RFC 1123. All characters trailing the first "/" must +// be valid HTTP Path characters as defined by RFC 3986. +func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { + var allErrs field.ErrorList + if len(dpPath) == 0 { + return append(allErrs, field.Required(fldPath, "")) + } + + segments := strings.SplitN(dpPath, "/", 2) + if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { + return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) + } + + host := segments[0] + for _, err := range IsDNS1123Subdomain(host) { + allErrs = append(allErrs, field.Invalid(fldPath, host, err)) + } + + path := segments[1] + if !httpPathRegexp.MatchString(path) { + return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) + } + + return allErrs +} + +const labelValueFmt string = "(" + qualifiedNameFmt + ")?" +const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" + +// LabelValueMaxLength is a label's max length +const LabelValueMaxLength int = 63 + +var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") + +// IsValidLabelValue tests whether the value passed is a valid label value. If +// the value is not valid, a list of error strings is returned. Otherwise an +// empty list (or nil) is returned. +func IsValidLabelValue(value string) []string { + var errs []string + if len(value) > LabelValueMaxLength { + errs = append(errs, MaxLenError(LabelValueMaxLength)) + } + if !labelValueRegexp.MatchString(value) { + errs = append(errs, RegexError(labelValueErrMsg, labelValueFmt, "MyValue", "my_value", "12345")) + } + return errs +} + +const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" +const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" + +// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) +const DNS1123LabelMaxLength int = 63 + +var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") + +// IsDNS1123Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1123). +func IsDNS1123Label(value string) []string { + var errs []string + if len(value) > DNS1123LabelMaxLength { + errs = append(errs, MaxLenError(DNS1123LabelMaxLength)) + } + if !dns1123LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123LabelErrMsg, dns1123LabelFmt, "my-name", "123-abc")) + } + return errs +} + +const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" +const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" + +// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) +const DNS1123SubdomainMaxLength int = 253 + +var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") + +// IsDNS1123Subdomain tests for a string that conforms to the definition of a +// subdomain in DNS (RFC 1123). +func IsDNS1123Subdomain(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !dns1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1123SubdomainErrorMsg, dns1123SubdomainFmt, "example.com")) + } + return errs +} + +const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" +const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" + +// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) +const DNS1035LabelMaxLength int = 63 + +var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") + +// IsDNS1035Label tests for a string that conforms to the definition of a label in +// DNS (RFC 1035). +func IsDNS1035Label(value string) []string { + var errs []string + if len(value) > DNS1035LabelMaxLength { + errs = append(errs, MaxLenError(DNS1035LabelMaxLength)) + } + if !dns1035LabelRegexp.MatchString(value) { + errs = append(errs, RegexError(dns1035LabelErrMsg, dns1035LabelFmt, "my-name", "abc-123")) + } + return errs +} + +// wildcard definition - RFC 1034 section 4.3.3. +// examples: +// - valid: *.bar.com, *.foo.bar.com +// - invalid: *.*.bar.com, *.foo.*.com, *bar.com, f*.bar.com, * +const wildcardDNS1123SubdomainFmt = "\\*\\." + dns1123SubdomainFmt +const wildcardDNS1123SubdomainErrMsg = "a wildcard DNS-1123 subdomain must start with '*.', followed by a valid DNS subdomain, which must consist of lower case alphanumeric characters, '-' or '.' and end with an alphanumeric character" + +// IsWildcardDNS1123Subdomain tests for a string that conforms to the definition of a +// wildcard subdomain in DNS (RFC 1034 section 4.3.3). +func IsWildcardDNS1123Subdomain(value string) []string { + wildcardDNS1123SubdomainRegexp := regexp.MustCompile("^" + wildcardDNS1123SubdomainFmt + "$") + + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !wildcardDNS1123SubdomainRegexp.MatchString(value) { + errs = append(errs, RegexError(wildcardDNS1123SubdomainErrMsg, wildcardDNS1123SubdomainFmt, "*.example.com")) + } + return errs +} + +const cIdentifierFmt string = "[A-Za-z_][A-Za-z0-9_]*" +const identifierErrMsg string = "a valid C identifier must start with alphabetic character or '_', followed by a string of alphanumeric characters or '_'" + +var cIdentifierRegexp = regexp.MustCompile("^" + cIdentifierFmt + "$") + +// IsCIdentifier tests for a string that conforms the definition of an identifier +// in C. This checks the format, but not the length. +func IsCIdentifier(value string) []string { + if !cIdentifierRegexp.MatchString(value) { + return []string{RegexError(identifierErrMsg, cIdentifierFmt, "my_name", "MY_NAME", "MyName")} + } + return nil +} + +// IsValidPortNum tests that the argument is a valid, non-zero port number. +func IsValidPortNum(port int) []string { + if 1 <= port && port <= 65535 { + return nil + } + return []string{InclusiveRangeError(1, 65535)} +} + +// IsInRange tests that the argument is in an inclusive range. +func IsInRange(value int, min int, max int) []string { + if value >= min && value <= max { + return nil + } + return []string{InclusiveRangeError(min, max)} +} + +// Now in libcontainer UID/GID limits is 0 ~ 1<<31 - 1 +// TODO: once we have a type for UID/GID we should make these that type. +const ( + minUserID = 0 + maxUserID = math.MaxInt32 + minGroupID = 0 + maxGroupID = math.MaxInt32 +) + +// IsValidGroupID tests that the argument is a valid Unix GID. +func IsValidGroupID(gid int64) []string { + if minGroupID <= gid && gid <= maxGroupID { + return nil + } + return []string{InclusiveRangeError(minGroupID, maxGroupID)} +} + +// IsValidUserID tests that the argument is a valid Unix UID. +func IsValidUserID(uid int64) []string { + if minUserID <= uid && uid <= maxUserID { + return nil + } + return []string{InclusiveRangeError(minUserID, maxUserID)} +} + +var portNameCharsetRegex = regexp.MustCompile("^[-a-z0-9]+$") +var portNameOneLetterRegexp = regexp.MustCompile("[a-z]") + +// IsValidPortName check that the argument is valid syntax. It must be +// non-empty and no more than 15 characters long. It may contain only [-a-z0-9] +// and must contain at least one letter [a-z]. It must not start or end with a +// hyphen, nor contain adjacent hyphens. +// +// Note: We only allow lower-case characters, even though RFC 6335 is case +// insensitive. +func IsValidPortName(port string) []string { + var errs []string + if len(port) > 15 { + errs = append(errs, MaxLenError(15)) + } + if !portNameCharsetRegex.MatchString(port) { + errs = append(errs, "must contain only alpha-numeric characters (a-z, 0-9), and hyphens (-)") + } + if !portNameOneLetterRegexp.MatchString(port) { + errs = append(errs, "must contain at least one letter or number (a-z, 0-9)") + } + if strings.Contains(port, "--") { + errs = append(errs, "must not contain consecutive hyphens") + } + if len(port) > 0 && (port[0] == '-' || port[len(port)-1] == '-') { + errs = append(errs, "must not begin or end with a hyphen") + } + return errs +} + +// IsValidIP tests that the argument is a valid IP address. +func IsValidIP(value string) []string { + if net.ParseIP(value) == nil { + return []string{"must be a valid IP address, (e.g. 10.9.8.7)"} + } + return nil +} + +// IsValidIPv4Address tests that the argument is a valid IPv4 address. +func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() == nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) + } + return allErrors +} + +// IsValidIPv6Address tests that the argument is a valid IPv6 address. +func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { + var allErrors field.ErrorList + ip := net.ParseIP(value) + if ip == nil || ip.To4() != nil { + allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) + } + return allErrors +} + +const percentFmt string = "[0-9]+%" +const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" + +var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") + +// IsValidPercent checks that string is in the form of a percentage +func IsValidPercent(percent string) []string { + if !percentRegexp.MatchString(percent) { + return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} + } + return nil +} + +const httpHeaderNameFmt string = "[-A-Za-z0-9]+" +const httpHeaderNameErrMsg string = "a valid HTTP header must consist of alphanumeric characters or '-'" + +var httpHeaderNameRegexp = regexp.MustCompile("^" + httpHeaderNameFmt + "$") + +// IsHTTPHeaderName checks that a string conforms to the Go HTTP library's +// definition of a valid header field name (a stricter subset than RFC7230). +func IsHTTPHeaderName(value string) []string { + if !httpHeaderNameRegexp.MatchString(value) { + return []string{RegexError(httpHeaderNameErrMsg, httpHeaderNameFmt, "X-Header-Name")} + } + return nil +} + +const envVarNameFmt = "[-._a-zA-Z][-._a-zA-Z0-9]*" +const envVarNameFmtErrMsg string = "a valid environment variable name must consist of alphabetic characters, digits, '_', '-', or '.', and must not start with a digit" + +var envVarNameRegexp = regexp.MustCompile("^" + envVarNameFmt + "$") + +// IsEnvVarName tests if a string is a valid environment variable name. +func IsEnvVarName(value string) []string { + var errs []string + if !envVarNameRegexp.MatchString(value) { + errs = append(errs, RegexError(envVarNameFmtErrMsg, envVarNameFmt, "my.env-name", "MY_ENV.NAME", "MyEnvName1")) + } + + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +const configMapKeyFmt = `[-._a-zA-Z0-9]+` +const configMapKeyErrMsg string = "a valid config key must consist of alphanumeric characters, '-', '_' or '.'" + +var configMapKeyRegexp = regexp.MustCompile("^" + configMapKeyFmt + "$") + +// IsConfigMapKey tests for a string that is a valid key for a ConfigMap or Secret +func IsConfigMapKey(value string) []string { + var errs []string + if len(value) > DNS1123SubdomainMaxLength { + errs = append(errs, MaxLenError(DNS1123SubdomainMaxLength)) + } + if !configMapKeyRegexp.MatchString(value) { + errs = append(errs, RegexError(configMapKeyErrMsg, configMapKeyFmt, "key.name", "KEY_NAME", "key-name")) + } + errs = append(errs, hasChDirPrefix(value)...) + return errs +} + +// MaxLenError returns a string explanation of a "string too long" validation +// failure. +func MaxLenError(length int) string { + return fmt.Sprintf("must be no more than %d characters", length) +} + +// RegexError returns a string explanation of a regex validation failure. +func RegexError(msg string, fmt string, examples ...string) string { + if len(examples) == 0 { + return msg + " (regex used for validation is '" + fmt + "')" + } + msg += " (e.g. " + for i := range examples { + if i > 0 { + msg += " or " + } + msg += "'" + examples[i] + "', " + } + msg += "regex used for validation is '" + fmt + "')" + return msg +} + +// EmptyError returns a string explanation of a "must not be empty" validation +// failure. +func EmptyError() string { + return "must be non-empty" +} + +func prefixEach(msgs []string, prefix string) []string { + for i := range msgs { + msgs[i] = prefix + msgs[i] + } + return msgs +} + +// InclusiveRangeError returns a string explanation of a numeric "must be +// between" validation failure. +func InclusiveRangeError(lo, hi int) string { + return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi) +} + +func hasChDirPrefix(value string) []string { + var errs []string + switch { + case value == ".": + errs = append(errs, `must not be '.'`) + case value == "..": + errs = append(errs, `must not be '..'`) + case strings.HasPrefix(value, ".."): + errs = append(errs, `must not start with '..'`) + } + return errs +} + +// IsValidSocketAddr checks that string represents a valid socket address +// as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) +func IsValidSocketAddr(value string) []string { + var errs []string + ip, port, err := net.SplitHostPort(value) + if err != nil { + errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + return errs + } + portInt, _ := strconv.Atoi(port) + errs = append(errs, IsValidPortNum(portInt)...) + errs = append(errs, IsValidIP(ip)...) + return errs +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go new file mode 100644 index 0000000..a9a3853 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -0,0 +1,344 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package yaml + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strings" + "unicode" + + "k8s.io/klog" + "sigs.k8s.io/yaml" +) + +// ToJSON converts a single YAML document into a JSON document +// or returns an error. If the document appears to be JSON the +// YAML decoding path is not used (so that error messages are +// JSON specific). +func ToJSON(data []byte) ([]byte, error) { + if hasJSONPrefix(data) { + return data, nil + } + return yaml.YAMLToJSON(data) +} + +// YAMLToJSONDecoder decodes YAML documents from an io.Reader by +// separating individual documents. It first converts the YAML +// body to JSON, then unmarshals the JSON. +type YAMLToJSONDecoder struct { + reader Reader +} + +// NewYAMLToJSONDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk, converting it to JSON via +// yaml.YAMLToJSON, and then passing it to json.Decoder. +func NewYAMLToJSONDecoder(r io.Reader) *YAMLToJSONDecoder { + reader := bufio.NewReader(r) + return &YAMLToJSONDecoder{ + reader: NewYAMLReader(reader), + } +} + +// Decode reads a YAML document as JSON from the stream or returns +// an error. The decoding rules match json.Unmarshal, not +// yaml.Unmarshal. +func (d *YAMLToJSONDecoder) Decode(into interface{}) error { + bytes, err := d.reader.Read() + if err != nil && err != io.EOF { + return err + } + + if len(bytes) != 0 { + err := yaml.Unmarshal(bytes, into) + if err != nil { + return YAMLSyntaxError{err} + } + } + return err +} + +// YAMLDecoder reads chunks of objects and returns ErrShortBuffer if +// the data is not sufficient. +type YAMLDecoder struct { + r io.ReadCloser + scanner *bufio.Scanner + remaining []byte +} + +// NewDocumentDecoder decodes YAML documents from the provided +// stream in chunks by converting each document (as defined by +// the YAML spec) into its own chunk. io.ErrShortBuffer will be +// returned if the entire buffer could not be read to assist +// the caller in framing the chunk. +func NewDocumentDecoder(r io.ReadCloser) io.ReadCloser { + scanner := bufio.NewScanner(r) + scanner.Split(splitYAMLDocument) + return &YAMLDecoder{ + r: r, + scanner: scanner, + } +} + +// Read reads the previous slice into the buffer, or attempts to read +// the next chunk. +// TODO: switch to readline approach. +func (d *YAMLDecoder) Read(data []byte) (n int, err error) { + left := len(d.remaining) + if left == 0 { + // return the next chunk from the stream + if !d.scanner.Scan() { + err := d.scanner.Err() + if err == nil { + err = io.EOF + } + return 0, err + } + out := d.scanner.Bytes() + d.remaining = out + left = len(out) + } + + // fits within data + if left <= len(data) { + copy(data, d.remaining) + d.remaining = nil + return left, nil + } + + // caller will need to reread + copy(data, d.remaining[:len(data)]) + d.remaining = d.remaining[len(data):] + return len(data), io.ErrShortBuffer +} + +func (d *YAMLDecoder) Close() error { + return d.r.Close() +} + +const yamlSeparator = "\n---" +const separator = "---" + +// splitYAMLDocument is a bufio.SplitFunc for splitting YAML streams into individual documents. +func splitYAMLDocument(data []byte, atEOF bool) (advance int, token []byte, err error) { + if atEOF && len(data) == 0 { + return 0, nil, nil + } + sep := len([]byte(yamlSeparator)) + if i := bytes.Index(data, []byte(yamlSeparator)); i >= 0 { + // We have a potential document terminator + i += sep + after := data[i:] + if len(after) == 0 { + // we can't read any more characters + if atEOF { + return len(data), data[:len(data)-sep], nil + } + return 0, nil, nil + } + if j := bytes.IndexByte(after, '\n'); j >= 0 { + return i + j + 1, data[0 : i-sep], nil + } + return 0, nil, nil + } + // If we're at EOF, we have a final, non-terminated line. Return it. + if atEOF { + return len(data), data, nil + } + // Request more data. + return 0, nil, nil +} + +// decoder is a convenience interface for Decode. +type decoder interface { + Decode(into interface{}) error +} + +// YAMLOrJSONDecoder attempts to decode a stream of JSON documents or +// YAML documents by sniffing for a leading { character. +type YAMLOrJSONDecoder struct { + r io.Reader + bufferSize int + + decoder decoder + rawData []byte +} + +type JSONSyntaxError struct { + Line int + Err error +} + +func (e JSONSyntaxError) Error() string { + return fmt.Sprintf("json: line %d: %s", e.Line, e.Err.Error()) +} + +type YAMLSyntaxError struct { + err error +} + +func (e YAMLSyntaxError) Error() string { + return e.err.Error() +} + +// NewYAMLOrJSONDecoder returns a decoder that will process YAML documents +// or JSON documents from the given reader as a stream. bufferSize determines +// how far into the stream the decoder will look to figure out whether this +// is a JSON stream (has whitespace followed by an open brace). +func NewYAMLOrJSONDecoder(r io.Reader, bufferSize int) *YAMLOrJSONDecoder { + return &YAMLOrJSONDecoder{ + r: r, + bufferSize: bufferSize, + } +} + +// Decode unmarshals the next object from the underlying stream into the +// provide object, or returns an error. +func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { + if d.decoder == nil { + buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) + if isJSON { + d.decoder = json.NewDecoder(buffer) + d.rawData = origData + } else { + d.decoder = NewYAMLToJSONDecoder(buffer) + } + } + err := d.decoder.Decode(into) + if jsonDecoder, ok := d.decoder.(*json.Decoder); ok { + if syntax, ok := err.(*json.SyntaxError); ok { + data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) + if readErr != nil { + klog.V(4).Infof("reading stream failed: %v", readErr) + } + js := string(data) + + // if contents from io.Reader are not complete, + // use the original raw data to prevent panic + if int64(len(js)) <= syntax.Offset { + js = string(d.rawData) + } + + start := strings.LastIndex(js[:syntax.Offset], "\n") + 1 + line := strings.Count(js[:start], "\n") + return JSONSyntaxError{ + Line: line, + Err: fmt.Errorf(syntax.Error()), + } + } + } + return err +} + +type Reader interface { + Read() ([]byte, error) +} + +type YAMLReader struct { + reader Reader +} + +func NewYAMLReader(r *bufio.Reader) *YAMLReader { + return &YAMLReader{ + reader: &LineReader{reader: r}, + } +} + +// Read returns a full YAML document. +func (r *YAMLReader) Read() ([]byte, error) { + var buffer bytes.Buffer + for { + line, err := r.reader.Read() + if err != nil && err != io.EOF { + return nil, err + } + + sep := len([]byte(separator)) + if i := bytes.Index(line, []byte(separator)); i == 0 { + // We have a potential document terminator + i += sep + after := line[i:] + if len(strings.TrimRightFunc(string(after), unicode.IsSpace)) == 0 { + if buffer.Len() != 0 { + return buffer.Bytes(), nil + } + if err == io.EOF { + return nil, err + } + } + } + if err == io.EOF { + if buffer.Len() != 0 { + // If we're at EOF, we have a final, non-terminated line. Return it. + return buffer.Bytes(), nil + } + return nil, err + } + buffer.Write(line) + } +} + +type LineReader struct { + reader *bufio.Reader +} + +// Read returns a single line (with '\n' ended) from the underlying reader. +// An error is returned iff there is an error with the underlying reader. +func (r *LineReader) Read() ([]byte, error) { + var ( + isPrefix bool = true + err error = nil + line []byte + buffer bytes.Buffer + ) + + for isPrefix && err == nil { + line, isPrefix, err = r.reader.ReadLine() + buffer.Write(line) + } + buffer.WriteByte('\n') + return buffer.Bytes(), err +} + +// GuessJSONStream scans the provided reader up to size, looking +// for an open brace indicating this is JSON. It will return the +// bufio.Reader it creates for the consumer. +func GuessJSONStream(r io.Reader, size int) (io.Reader, []byte, bool) { + buffer := bufio.NewReaderSize(r, size) + b, _ := buffer.Peek(size) + return buffer, b, hasJSONPrefix(b) +} + +var jsonPrefix = []byte("{") + +// hasJSONPrefix returns true if the provided buffer appears to start with +// a JSON open brace. +func hasJSONPrefix(buf []byte) bool { + return hasPrefix(buf, jsonPrefix) +} + +// Return true if the first non-whitespace bytes in buf is +// prefix. +func hasPrefix(buf []byte, prefix []byte) bool { + trim := bytes.TrimLeftFunc(buf, unicode.IsSpace) + return bytes.HasPrefix(trim, prefix) +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go new file mode 100644 index 0000000..29574fd --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +// Package version supplies the type for version information collected at build time. +package version // import "k8s.io/apimachinery/pkg/version" diff --git a/vendor/k8s.io/apimachinery/pkg/version/helpers.go b/vendor/k8s.io/apimachinery/pkg/version/helpers.go new file mode 100644 index 0000000..5e041d6 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/helpers.go @@ -0,0 +1,88 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "regexp" + "strconv" + "strings" +) + +type versionType int + +const ( + // Bigger the version type number, higher priority it is + versionTypeAlpha versionType = iota + versionTypeBeta + versionTypeGA +) + +var kubeVersionRegex = regexp.MustCompile("^v([\\d]+)(?:(alpha|beta)([\\d]+))?$") + +func parseKubeVersion(v string) (majorVersion int, vType versionType, minorVersion int, ok bool) { + var err error + submatches := kubeVersionRegex.FindStringSubmatch(v) + if len(submatches) != 4 { + return 0, 0, 0, false + } + switch submatches[2] { + case "alpha": + vType = versionTypeAlpha + case "beta": + vType = versionTypeBeta + case "": + vType = versionTypeGA + default: + return 0, 0, 0, false + } + if majorVersion, err = strconv.Atoi(submatches[1]); err != nil { + return 0, 0, 0, false + } + if vType != versionTypeGA { + if minorVersion, err = strconv.Atoi(submatches[3]); err != nil { + return 0, 0, 0, false + } + } + return majorVersion, vType, minorVersion, true +} + +// CompareKubeAwareVersionStrings compares two kube-like version strings. +// Kube-like version strings are starting with a v, followed by a major version, optional "alpha" or "beta" strings +// followed by a minor version (e.g. v1, v2beta1). Versions will be sorted based on GA/alpha/beta first and then major +// and minor versions. e.g. v2, v1, v1beta2, v1beta1, v1alpha1. +func CompareKubeAwareVersionStrings(v1, v2 string) int { + if v1 == v2 { + return 0 + } + v1major, v1type, v1minor, ok1 := parseKubeVersion(v1) + v2major, v2type, v2minor, ok2 := parseKubeVersion(v2) + switch { + case !ok1 && !ok2: + return strings.Compare(v2, v1) + case !ok1 && ok2: + return -1 + case ok1 && !ok2: + return 1 + } + if v1type != v2type { + return int(v1type) - int(v2type) + } + if v1major != v2major { + return v1major - v2major + } + return v1minor - v2minor +} diff --git a/vendor/k8s.io/apimachinery/pkg/version/types.go b/vendor/k8s.io/apimachinery/pkg/version/types.go new file mode 100644 index 0000000..72727b5 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/version/types.go @@ -0,0 +1,37 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Info contains versioning information. +// TODO: Add []string of api versions supported? It's still unclear +// how we'll want to distribute that information. +type Info struct { + Major string `json:"major"` + Minor string `json:"minor"` + GitVersion string `json:"gitVersion"` + GitCommit string `json:"gitCommit"` + GitTreeState string `json:"gitTreeState"` + BuildDate string `json:"buildDate"` + GoVersion string `json:"goVersion"` + Compiler string `json:"compiler"` + Platform string `json:"platform"` +} + +// String returns info as a human-friendly version string. +func (info Info) String() string { + return info.GitVersion +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/doc.go b/vendor/k8s.io/apimachinery/pkg/watch/doc.go new file mode 100644 index 0000000..7e6bf3f --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package watch contains a generic watchable interface, and a fake for +// testing code that uses the watch interface. +package watch // import "k8s.io/apimachinery/pkg/watch" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/filter.go b/vendor/k8s.io/apimachinery/pkg/watch/filter.go new file mode 100644 index 0000000..22c9449 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/filter.go @@ -0,0 +1,105 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" +) + +// FilterFunc should take an event, possibly modify it in some way, and return +// the modified event. If the event should be ignored, then return keep=false. +type FilterFunc func(in Event) (out Event, keep bool) + +// Filter passes all events through f before allowing them to pass on. +// Putting a filter on a watch, as an unavoidable side-effect due to the way +// go channels work, effectively causes the watch's event channel to have its +// queue length increased by one. +// +// WARNING: filter has a fatal flaw, in that it can't properly update the +// Type field (Add/Modified/Deleted) to reflect items beginning to pass the +// filter when they previously didn't. +// +func Filter(w Interface, f FilterFunc) Interface { + fw := &filteredWatch{ + incoming: w, + result: make(chan Event), + f: f, + } + go fw.loop() + return fw +} + +type filteredWatch struct { + incoming Interface + result chan Event + f FilterFunc +} + +// ResultChan returns a channel which will receive filtered events. +func (fw *filteredWatch) ResultChan() <-chan Event { + return fw.result +} + +// Stop stops the upstream watch, which will eventually stop this watch. +func (fw *filteredWatch) Stop() { + fw.incoming.Stop() +} + +// loop waits for new values, filters them, and resends them. +func (fw *filteredWatch) loop() { + defer close(fw.result) + for event := range fw.incoming.ResultChan() { + filtered, keep := fw.f(event) + if keep { + fw.result <- filtered + } + } +} + +// Recorder records all events that are sent from the watch until it is closed. +type Recorder struct { + Interface + + lock sync.Mutex + events []Event +} + +var _ Interface = &Recorder{} + +// NewRecorder wraps an Interface and records any changes sent across it. +func NewRecorder(w Interface) *Recorder { + r := &Recorder{} + r.Interface = Filter(w, r.record) + return r +} + +// record is a FilterFunc and tracks each received event. +func (r *Recorder) record(in Event) (Event, bool) { + r.lock.Lock() + defer r.lock.Unlock() + r.events = append(r.events, in) + return in, true +} + +// Events returns a copy of the events sent across this recorder. +func (r *Recorder) Events() []Event { + r.lock.Lock() + defer r.lock.Unlock() + copied := make([]Event, len(r.events)) + copy(copied, r.events) + return copied +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/mux.go b/vendor/k8s.io/apimachinery/pkg/watch/mux.go new file mode 100644 index 0000000..0ac8dc4 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/mux.go @@ -0,0 +1,260 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "sync" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch +// channel is full. +type FullChannelBehavior int + +const ( + WaitIfChannelFull FullChannelBehavior = iota + DropIfChannelFull +) + +// Buffer the incoming queue a little bit even though it should rarely ever accumulate +// anything, just in case a few events are received in such a short window that +// Broadcaster can't move them onto the watchers' queues fast enough. +const incomingQueueLength = 25 + +// Broadcaster distributes event notifications among any number of watchers. Every event +// is delivered to every watcher. +type Broadcaster struct { + // TODO: see if this lock is needed now that new watchers go through + // the incoming channel. + lock sync.Mutex + + watchers map[int64]*broadcasterWatcher + nextWatcher int64 + distributing sync.WaitGroup + + incoming chan Event + + // How large to make watcher's channel. + watchQueueLength int + // If one of the watch channels is full, don't wait for it to become empty. + // Instead just deliver it to the watchers that do have space in their + // channels and move on to the next event. + // It's more fair to do this on a per-watcher basis than to do it on the + // "incoming" channel, which would allow one slow watcher to prevent all + // other watchers from getting new events. + fullChannelBehavior FullChannelBehavior +} + +// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher. +// It is guaranteed that events will be distributed in the order in which they occur, +// but the order in which a single event is distributed among all of the watchers is unspecified. +func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster { + m := &Broadcaster{ + watchers: map[int64]*broadcasterWatcher{}, + incoming: make(chan Event, incomingQueueLength), + watchQueueLength: queueLength, + fullChannelBehavior: fullChannelBehavior, + } + m.distributing.Add(1) + go m.loop() + return m +} + +const internalRunFunctionMarker = "internal-do-function" + +// a function type we can shoehorn into the queue. +type functionFakeRuntimeObject func() + +func (obj functionFakeRuntimeObject) GetObjectKind() schema.ObjectKind { + return schema.EmptyObjectKind +} +func (obj functionFakeRuntimeObject) DeepCopyObject() runtime.Object { + if obj == nil { + return nil + } + // funcs are immutable. Hence, just return the original func. + return obj +} + +// Execute f, blocking the incoming queue (and waiting for it to drain first). +// The purpose of this terrible hack is so that watchers added after an event +// won't ever see that event, and will always see any event after they are +// added. +func (b *Broadcaster) blockQueue(f func()) { + var wg sync.WaitGroup + wg.Add(1) + b.incoming <- Event{ + Type: internalRunFunctionMarker, + Object: functionFakeRuntimeObject(func() { + defer wg.Done() + f() + }), + } + wg.Wait() +} + +// Watch adds a new watcher to the list and returns an Interface for it. +// Note: new watchers will only receive new events. They won't get an entire history +// of previous events. +func (m *Broadcaster) Watch() Interface { + var w *broadcasterWatcher + m.blockQueue(func() { + m.lock.Lock() + defer m.lock.Unlock() + id := m.nextWatcher + m.nextWatcher++ + w = &broadcasterWatcher{ + result: make(chan Event, m.watchQueueLength), + stopped: make(chan struct{}), + id: id, + m: m, + } + m.watchers[id] = w + }) + return w +} + +// WatchWithPrefix adds a new watcher to the list and returns an Interface for it. It sends +// queuedEvents down the new watch before beginning to send ordinary events from Broadcaster. +// The returned watch will have a queue length that is at least large enough to accommodate +// all of the items in queuedEvents. +func (m *Broadcaster) WatchWithPrefix(queuedEvents []Event) Interface { + var w *broadcasterWatcher + m.blockQueue(func() { + m.lock.Lock() + defer m.lock.Unlock() + id := m.nextWatcher + m.nextWatcher++ + length := m.watchQueueLength + if n := len(queuedEvents) + 1; n > length { + length = n + } + w = &broadcasterWatcher{ + result: make(chan Event, length), + stopped: make(chan struct{}), + id: id, + m: m, + } + m.watchers[id] = w + for _, e := range queuedEvents { + w.result <- e + } + }) + return w +} + +// stopWatching stops the given watcher and removes it from the list. +func (m *Broadcaster) stopWatching(id int64) { + m.lock.Lock() + defer m.lock.Unlock() + w, ok := m.watchers[id] + if !ok { + // No need to do anything, it's already been removed from the list. + return + } + delete(m.watchers, id) + close(w.result) +} + +// closeAll disconnects all watchers (presumably in response to a Shutdown call). +func (m *Broadcaster) closeAll() { + m.lock.Lock() + defer m.lock.Unlock() + for _, w := range m.watchers { + close(w.result) + } + // Delete everything from the map, since presence/absence in the map is used + // by stopWatching to avoid double-closing the channel. + m.watchers = map[int64]*broadcasterWatcher{} +} + +// Action distributes the given event among all watchers. +func (m *Broadcaster) Action(action EventType, obj runtime.Object) { + m.incoming <- Event{action, obj} +} + +// Shutdown disconnects all watchers (but any queued events will still be distributed). +// You must not call Action or Watch* after calling Shutdown. This call blocks +// until all events have been distributed through the outbound channels. Note +// that since they can be buffered, this means that the watchers might not +// have received the data yet as it can remain sitting in the buffered +// channel. +func (m *Broadcaster) Shutdown() { + close(m.incoming) + m.distributing.Wait() +} + +// loop receives from m.incoming and distributes to all watchers. +func (m *Broadcaster) loop() { + // Deliberately not catching crashes here. Yes, bring down the process if there's a + // bug in watch.Broadcaster. + for event := range m.incoming { + if event.Type == internalRunFunctionMarker { + event.Object.(functionFakeRuntimeObject)() + continue + } + m.distribute(event) + } + m.closeAll() + m.distributing.Done() +} + +// distribute sends event to all watchers. Blocking. +func (m *Broadcaster) distribute(event Event) { + m.lock.Lock() + defer m.lock.Unlock() + if m.fullChannelBehavior == DropIfChannelFull { + for _, w := range m.watchers { + select { + case w.result <- event: + case <-w.stopped: + default: // Don't block if the event can't be queued. + } + } + } else { + for _, w := range m.watchers { + select { + case w.result <- event: + case <-w.stopped: + } + } + } +} + +// broadcasterWatcher handles a single watcher of a broadcaster +type broadcasterWatcher struct { + result chan Event + stopped chan struct{} + stop sync.Once + id int64 + m *Broadcaster +} + +// ResultChan returns a channel to use for waiting on events. +func (mw *broadcasterWatcher) ResultChan() <-chan Event { + return mw.result +} + +// Stop stops watching and removes mw from its list. +func (mw *broadcasterWatcher) Stop() { + mw.stop.Do(func() { + close(mw.stopped) + mw.m.stopWatching(mw.id) + }) +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go new file mode 100644 index 0000000..4269a83 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -0,0 +1,132 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "fmt" + "io" + "sync" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/util/net" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +// Decoder allows StreamWatcher to watch any stream for which a Decoder can be written. +type Decoder interface { + // Decode should return the type of event, the decoded object, or an error. + // An error will cause StreamWatcher to call Close(). Decode should block until + // it has data or an error occurs. + Decode() (action EventType, object runtime.Object, err error) + + // Close should close the underlying io.Reader, signalling to the source of + // the stream that it is no longer being watched. Close() must cause any + // outstanding call to Decode() to return with an error of some sort. + Close() +} + +// Reporter hides the details of how an error is turned into a runtime.Object for +// reporting on a watch stream since this package may not import a higher level report. +type Reporter interface { + // AsObject must convert err into a valid runtime.Object for the watch stream. + AsObject(err error) runtime.Object +} + +// StreamWatcher turns any stream for which you can write a Decoder interface +// into a watch.Interface. +type StreamWatcher struct { + sync.Mutex + source Decoder + reporter Reporter + result chan Event + stopped bool +} + +// NewStreamWatcher creates a StreamWatcher from the given decoder. +func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { + sw := &StreamWatcher{ + source: d, + reporter: r, + // It's easy for a consumer to add buffering via an extra + // goroutine/channel, but impossible for them to remove it, + // so nonbuffered is better. + result: make(chan Event), + } + go sw.receive() + return sw +} + +// ResultChan implements Interface. +func (sw *StreamWatcher) ResultChan() <-chan Event { + return sw.result +} + +// Stop implements Interface. +func (sw *StreamWatcher) Stop() { + // Call Close() exactly once by locking and setting a flag. + sw.Lock() + defer sw.Unlock() + if !sw.stopped { + sw.stopped = true + sw.source.Close() + } +} + +// stopping returns true if Stop() was called previously. +func (sw *StreamWatcher) stopping() bool { + sw.Lock() + defer sw.Unlock() + return sw.stopped +} + +// receive reads result from the decoder in a loop and sends down the result channel. +func (sw *StreamWatcher) receive() { + defer close(sw.result) + defer sw.Stop() + defer utilruntime.HandleCrash() + for { + action, obj, err := sw.source.Decode() + if err != nil { + // Ignore expected error. + if sw.stopping() { + return + } + switch err { + case io.EOF: + // watch closed normally + case io.ErrUnexpectedEOF: + klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + default: + if net.IsProbableEOF(err) || net.IsTimeout(err) { + klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) + } else { + sw.result <- Event{ + Type: Error, + Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), + } + } + } + return + } + sw.result <- Event{ + Type: action, + Object: obj, + } + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go new file mode 100644 index 0000000..988aba3 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -0,0 +1,322 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package watch + +import ( + "fmt" + "sync" + + "k8s.io/klog" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Interface can be implemented by anything that knows how to watch and report changes. +type Interface interface { + // Stops watching. Will close the channel returned by ResultChan(). Releases + // any resources used by the watch. + Stop() + + // Returns a chan which will receive all the events. If an error occurs + // or Stop() is called, this channel will be closed, in which case the + // watch should be completely cleaned up. + ResultChan() <-chan Event +} + +// EventType defines the possible types of events. +type EventType string + +const ( + Added EventType = "ADDED" + Modified EventType = "MODIFIED" + Deleted EventType = "DELETED" + Bookmark EventType = "BOOKMARK" + Error EventType = "ERROR" + + DefaultChanSize int32 = 100 +) + +// Event represents a single event to a watched resource. +// +k8s:deepcopy-gen=true +type Event struct { + Type EventType + + // Object is: + // * If Type is Added or Modified: the new state of the object. + // * If Type is Deleted: the state of the object immediately before deletion. + // * If Type is Bookmark: the object (instance of a type being watched) where + // only ResourceVersion field is set. On successful restart of watch from a + // bookmark resourceVersion, client is guaranteed to not get repeat event + // nor miss any events. + // * If Type is Error: *api.Status is recommended; other types may make sense + // depending on context. + Object runtime.Object +} + +type emptyWatch chan Event + +// NewEmptyWatch returns a watch interface that returns no results and is closed. +// May be used in certain error conditions where no information is available but +// an error is not warranted. +func NewEmptyWatch() Interface { + ch := make(chan Event) + close(ch) + return emptyWatch(ch) +} + +// Stop implements Interface +func (w emptyWatch) Stop() { +} + +// ResultChan implements Interface +func (w emptyWatch) ResultChan() <-chan Event { + return chan Event(w) +} + +// FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type FakeWatcher struct { + result chan Event + stopped bool + sync.Mutex +} + +func NewFake() *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event), + } +} + +func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { + return &FakeWatcher{ + result: make(chan Event, size), + } +} + +// Stop implements Interface.Stop(). +func (f *FakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.stopped { + klog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.stopped = true + } +} + +func (f *FakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.stopped +} + +// Reset prepares the watcher to be reused. +func (f *FakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.stopped = false + f.result = make(chan Event) +} + +func (f *FakeWatcher) ResultChan() <-chan Event { + return f.result +} + +// Add sends an add event. +func (f *FakeWatcher) Add(obj runtime.Object) { + f.result <- Event{Added, obj} +} + +// Modify sends a modify event. +func (f *FakeWatcher) Modify(obj runtime.Object) { + f.result <- Event{Modified, obj} +} + +// Delete sends a delete event. +func (f *FakeWatcher) Delete(lastValue runtime.Object) { + f.result <- Event{Deleted, lastValue} +} + +// Error sends an Error event. +func (f *FakeWatcher) Error(errValue runtime.Object) { + f.result <- Event{Error, errValue} +} + +// Action sends an event of the requested type, for table-based testing. +func (f *FakeWatcher) Action(action EventType, obj runtime.Object) { + f.result <- Event{action, obj} +} + +// RaceFreeFakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. +type RaceFreeFakeWatcher struct { + result chan Event + Stopped bool + sync.Mutex +} + +func NewRaceFreeFake() *RaceFreeFakeWatcher { + return &RaceFreeFakeWatcher{ + result: make(chan Event, DefaultChanSize), + } +} + +// Stop implements Interface.Stop(). +func (f *RaceFreeFakeWatcher) Stop() { + f.Lock() + defer f.Unlock() + if !f.Stopped { + klog.V(4).Infof("Stopping fake watcher.") + close(f.result) + f.Stopped = true + } +} + +func (f *RaceFreeFakeWatcher) IsStopped() bool { + f.Lock() + defer f.Unlock() + return f.Stopped +} + +// Reset prepares the watcher to be reused. +func (f *RaceFreeFakeWatcher) Reset() { + f.Lock() + defer f.Unlock() + f.Stopped = false + f.result = make(chan Event, DefaultChanSize) +} + +func (f *RaceFreeFakeWatcher) ResultChan() <-chan Event { + f.Lock() + defer f.Unlock() + return f.result +} + +// Add sends an add event. +func (f *RaceFreeFakeWatcher) Add(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Added, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Modify sends a modify event. +func (f *RaceFreeFakeWatcher) Modify(obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Modified, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Delete sends a delete event. +func (f *RaceFreeFakeWatcher) Delete(lastValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Deleted, lastValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Error sends an Error event. +func (f *RaceFreeFakeWatcher) Error(errValue runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{Error, errValue}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// Action sends an event of the requested type, for table-based testing. +func (f *RaceFreeFakeWatcher) Action(action EventType, obj runtime.Object) { + f.Lock() + defer f.Unlock() + if !f.Stopped { + select { + case f.result <- Event{action, obj}: + return + default: + panic(fmt.Errorf("channel full")) + } + } +} + +// ProxyWatcher lets you wrap your channel in watch Interface. Threadsafe. +type ProxyWatcher struct { + result chan Event + stopCh chan struct{} + + mutex sync.Mutex + stopped bool +} + +var _ Interface = &ProxyWatcher{} + +// NewProxyWatcher creates new ProxyWatcher by wrapping a channel +func NewProxyWatcher(ch chan Event) *ProxyWatcher { + return &ProxyWatcher{ + result: ch, + stopCh: make(chan struct{}), + stopped: false, + } +} + +// Stop implements Interface +func (pw *ProxyWatcher) Stop() { + pw.mutex.Lock() + defer pw.mutex.Unlock() + if !pw.stopped { + pw.stopped = true + close(pw.stopCh) + } +} + +// Stopping returns true if Stop() has been called +func (pw *ProxyWatcher) Stopping() bool { + pw.mutex.Lock() + defer pw.mutex.Unlock() + return pw.stopped +} + +// ResultChan implements Interface +func (pw *ProxyWatcher) ResultChan() <-chan Event { + return pw.result +} + +// StopChan returns stop channel +func (pw *ProxyWatcher) StopChan() <-chan struct{} { + return pw.stopCh +} diff --git a/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go new file mode 100644 index 0000000..71ef4da --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/watch/zz_generated.deepcopy.go @@ -0,0 +1,40 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package watch + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Event) DeepCopyInto(out *Event) { + *out = *in + if in.Object != nil { + out.Object = in.Object.DeepCopyObject() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. +func (in *Event) DeepCopy() *Event { + if in == nil { + return nil + } + out := new(Event) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go new file mode 100644 index 0000000..7ed1d1c --- /dev/null +++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/reflect/deep_equal.go @@ -0,0 +1,388 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package reflect is a fork of go's standard library reflection package, which +// allows for deep equal with equality functions defined. +package reflect + +import ( + "fmt" + "reflect" + "strings" +) + +// Equalities is a map from type to a function comparing two values of +// that type. +type Equalities map[reflect.Type]reflect.Value + +// For convenience, panics on errrors +func EqualitiesOrDie(funcs ...interface{}) Equalities { + e := Equalities{} + if err := e.AddFuncs(funcs...); err != nil { + panic(err) + } + return e +} + +// AddFuncs is a shortcut for multiple calls to AddFunc. +func (e Equalities) AddFuncs(funcs ...interface{}) error { + for _, f := range funcs { + if err := e.AddFunc(f); err != nil { + return err + } + } + return nil +} + +// AddFunc uses func as an equality function: it must take +// two parameters of the same type, and return a boolean. +func (e Equalities) AddFunc(eqFunc interface{}) error { + fv := reflect.ValueOf(eqFunc) + ft := fv.Type() + if ft.Kind() != reflect.Func { + return fmt.Errorf("expected func, got: %v", ft) + } + if ft.NumIn() != 2 { + return fmt.Errorf("expected two 'in' params, got: %v", ft) + } + if ft.NumOut() != 1 { + return fmt.Errorf("expected one 'out' param, got: %v", ft) + } + if ft.In(0) != ft.In(1) { + return fmt.Errorf("expected arg 1 and 2 to have same type, but got %v", ft) + } + var forReturnType bool + boolType := reflect.TypeOf(forReturnType) + if ft.Out(0) != boolType { + return fmt.Errorf("expected bool return, got: %v", ft) + } + e[ft.In(0)] = fv + return nil +} + +// Below here is forked from go's reflect/deepequal.go + +// During deepValueEqual, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited comparisons are stored in a map indexed by visit. +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} + +// unexportedTypePanic is thrown when you use this DeepEqual on something that has an +// unexported type. It indicates a programmer error, so should not occur at runtime, +// which is why it's not public and thus impossible to catch. +type unexportedTypePanic []reflect.Type + +func (u unexportedTypePanic) Error() string { return u.String() } +func (u unexportedTypePanic) String() string { + strs := make([]string, len(u)) + for i, t := range u { + strs[i] = fmt.Sprintf("%v", t) + } + return "an unexported field was encountered, nested like this: " + strings.Join(strs, " -> ") +} + +func makeUsefulPanic(v reflect.Value) { + if x := recover(); x != nil { + if u, ok := x.(unexportedTypePanic); ok { + u = append(unexportedTypePanic{v.Type()}, u...) + x = u + } + panic(x) + } +} + +// Tests for deep equality using reflected types. The map argument tracks +// comparisons that have already been seen, which allows short circuiting on +// recursive types. +func (e Equalities) deepValueEqual(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + defer makeUsefulPanic(v1) + + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + if fv, ok := e[v1.Type()]; ok { + return fv.Call([]reflect.Value{v1, v2})[0].Bool() + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + // We don't need to check length here because length is part of + // an array's type, which has already been filtered for. + for i := 0; i < v1.Len(); i++ { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !e.deepValueEqual(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Interface: + if v1.IsNil() || v2.IsNil() { + return v1.IsNil() == v2.IsNil() + } + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + return e.deepValueEqual(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !e.deepValueEqual(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if (v1.IsNil() || v1.Len() == 0) != (v2.IsNil() || v2.Len() == 0) { + return false + } + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() != v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !e.deepValueEqual(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + default: + // Normal equality suffices + if !v1.CanInterface() || !v2.CanInterface() { + panic(unexportedTypePanic{}) + } + return v1.Interface() == v2.Interface() + } +} + +// DeepEqual is like reflect.DeepEqual, but focused on semantic equality +// instead of memory equality. +// +// It will use e's equality functions if it finds types that match. +// +// An empty slice *is* equal to a nil slice for our purposes; same for maps. +// +// Unexported field members cannot be compared and will cause an imformative panic; you must add an Equality +// function for these types. +func (e Equalities) DeepEqual(a1, a2 interface{}) bool { + if a1 == nil || a2 == nil { + return a1 == a2 + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return e.deepValueEqual(v1, v2, make(map[visit]bool), 0) +} + +func (e Equalities) deepValueDerive(v1, v2 reflect.Value, visited map[visit]bool, depth int) bool { + defer makeUsefulPanic(v1) + + if !v1.IsValid() || !v2.IsValid() { + return v1.IsValid() == v2.IsValid() + } + if v1.Type() != v2.Type() { + return false + } + if fv, ok := e[v1.Type()]; ok { + return fv.Call([]reflect.Value{v1, v2})[0].Bool() + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if v1.CanAddr() && v2.CanAddr() && hard(v1.Kind()) { + addr1 := v1.UnsafeAddr() + addr2 := v2.UnsafeAddr() + if addr1 > addr2 { + // Canonicalize order to reduce number of entries in visited. + addr1, addr2 = addr2, addr1 + } + + // Short circuit if references are identical ... + if addr1 == addr2 { + return true + } + + // ... or already seen + typ := v1.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + return true + } + + // Remember for later. + visited[v] = true + } + + switch v1.Kind() { + case reflect.Array: + // We don't need to check length here because length is part of + // an array's type, which has already been filtered for. + for i := 0; i < v1.Len(); i++ { + if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.Slice: + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for i := 0; i < v1.Len(); i++ { + if !e.deepValueDerive(v1.Index(i), v2.Index(i), visited, depth+1) { + return false + } + } + return true + case reflect.String: + if v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + return v1.String() == v2.String() + case reflect.Interface: + if v1.IsNil() { + return true + } + return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Ptr: + if v1.IsNil() { + return true + } + return e.deepValueDerive(v1.Elem(), v2.Elem(), visited, depth+1) + case reflect.Struct: + for i, n := 0, v1.NumField(); i < n; i++ { + if !e.deepValueDerive(v1.Field(i), v2.Field(i), visited, depth+1) { + return false + } + } + return true + case reflect.Map: + if v1.IsNil() || v1.Len() == 0 { + return true + } + if v1.Len() > v2.Len() { + return false + } + if v1.Pointer() == v2.Pointer() { + return true + } + for _, k := range v1.MapKeys() { + if !e.deepValueDerive(v1.MapIndex(k), v2.MapIndex(k), visited, depth+1) { + return false + } + } + return true + case reflect.Func: + if v1.IsNil() && v2.IsNil() { + return true + } + // Can't do better than this: + return false + default: + // Normal equality suffices + if !v1.CanInterface() || !v2.CanInterface() { + panic(unexportedTypePanic{}) + } + return v1.Interface() == v2.Interface() + } +} + +// DeepDerivative is similar to DeepEqual except that unset fields in a1 are +// ignored (not compared). This allows us to focus on the fields that matter to +// the semantic comparison. +// +// The unset fields include a nil pointer and an empty string. +func (e Equalities) DeepDerivative(a1, a2 interface{}) bool { + if a1 == nil { + return true + } + v1 := reflect.ValueOf(a1) + v2 := reflect.ValueOf(a2) + if v1.Type() != v2.Type() { + return false + } + return e.deepValueDerive(v1, v2, make(map[visit]bool), 0) +} diff --git a/vendor/k8s.io/client-go/LICENSE b/vendor/k8s.io/client-go/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/k8s.io/client-go/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go new file mode 100644 index 0000000..61b9c44 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -0,0 +1,508 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "encoding/json" + "fmt" + "net/url" + "sort" + "strings" + "sync" + "time" + + "github.com/golang/protobuf/proto" + openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" +) + +const ( + // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). + defaultRetries = 2 + // protobuf mime type + mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" + // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. + // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. + defaultTimeout = 32 * time.Second +) + +// DiscoveryInterface holds the methods that discover server-supported API groups, +// versions and resources. +type DiscoveryInterface interface { + RESTClient() restclient.Interface + ServerGroupsInterface + ServerResourcesInterface + ServerVersionInterface + OpenAPISchemaInterface +} + +// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. +// Note that If the ServerResourcesForGroupVersion method returns a cache miss +// error, the user needs to explicitly call Invalidate to clear the cache, +// otherwise the same cache miss error will be returned next time. +type CachedDiscoveryInterface interface { + DiscoveryInterface + // Fresh is supposed to tell the caller whether or not to retry if the cache + // fails to find something (false = retry, true = no need to retry). + // + // TODO: this needs to be revisited, this interface can't be locked properly + // and doesn't make a lot of sense. + Fresh() bool + // Invalidate enforces that no cached data that is older than the current time + // is used. + Invalidate() +} + +// ServerGroupsInterface has methods for obtaining supported groups on the API server +type ServerGroupsInterface interface { + // ServerGroups returns the supported groups, with information like supported versions and the + // preferred version. + ServerGroups() (*metav1.APIGroupList, error) +} + +// ServerResourcesInterface has methods for obtaining supported resources on the API server +type ServerResourcesInterface interface { + // ServerResourcesForGroupVersion returns the supported resources for a group and version. + ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) + // ServerResources returns the supported resources for all groups and versions. + // + // The returned resource list might be non-nil with partial results even in the case of + // non-nil error. + // + // Deprecated: use ServerGroupsAndResources instead. + ServerResources() ([]*metav1.APIResourceList, error) + // ServerResources returns the supported groups and resources for all groups and versions. + // + // The returned group and resource lists might be non-nil with partial results even in the + // case of non-nil error. + ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) + // ServerPreferredResources returns the supported resources with the version preferred by the + // server. + // + // The returned group and resource lists might be non-nil with partial results even in the + // case of non-nil error. + ServerPreferredResources() ([]*metav1.APIResourceList, error) + // ServerPreferredNamespacedResources returns the supported namespaced resources with the + // version preferred by the server. + // + // The returned resource list might be non-nil with partial results even in the case of + // non-nil error. + ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) +} + +// ServerVersionInterface has a method for retrieving the server's version. +type ServerVersionInterface interface { + // ServerVersion retrieves and parses the server's version (git version). + ServerVersion() (*version.Info, error) +} + +// OpenAPISchemaInterface has a method to retrieve the open API schema. +type OpenAPISchemaInterface interface { + // OpenAPISchema retrieves and parses the swagger API schema the server supports. + OpenAPISchema() (*openapi_v2.Document, error) +} + +// DiscoveryClient implements the functions that discover server-supported API groups, +// versions and resources. +type DiscoveryClient struct { + restClient restclient.Interface + + LegacyPrefix string +} + +// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so +// group would be "". +func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { + groupVersions := []metav1.GroupVersionForDiscovery{} + for _, version := range apiVersions.Versions { + groupVersion := metav1.GroupVersionForDiscovery{ + GroupVersion: version, + Version: version, + } + groupVersions = append(groupVersions, groupVersion) + } + apiGroup.Versions = groupVersions + // There should be only one groupVersion returned at /api + apiGroup.PreferredVersion = groupVersions[0] + return +} + +// ServerGroups returns the supported groups, with information like supported versions and the +// preferred version. +func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { + // Get the groupVersions exposed at /api + v := &metav1.APIVersions{} + err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) + apiGroup := metav1.APIGroup{} + if err == nil && len(v.Versions) != 0 { + apiGroup = apiVersionsToAPIGroup(v) + } + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + + // Get the groupVersions exposed at /apis + apiGroupList = &metav1.APIGroupList{} + err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) + if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { + return nil, err + } + // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api + if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + apiGroupList = &metav1.APIGroupList{} + } + + // prepend the group retrieved from /api to the list if not empty + if len(v.Versions) != 0 { + apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...) + } + return apiGroupList, nil +} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { + url := url.URL{} + if len(groupVersion) == 0 { + return nil, fmt.Errorf("groupVersion shouldn't be empty") + } + if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { + url.Path = d.LegacyPrefix + "/" + groupVersion + } else { + url.Path = "/apis/" + groupVersion + } + resources = &metav1.APIResourceList{ + GroupVersion: groupVersion, + } + err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources) + if err != nil { + // ignore 403 or 404 error to be compatible with an v1.0 server. + if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { + return resources, nil + } + return nil, err + } + return resources, nil +} + +// ServerResources returns the supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + _, rs, err := d.ServerGroupsAndResources() + return rs, err +} + +// ServerGroupsAndResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + return ServerGroupsAndResources(d) + }) +} + +// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. +type ErrGroupDiscoveryFailed struct { + // Groups is a list of the groups that failed to load and the error cause + Groups map[schema.GroupVersion]error +} + +// Error implements the error interface +func (e *ErrGroupDiscoveryFailed) Error() string { + var groups []string + for k, v := range e.Groups { + groups = append(groups, fmt.Sprintf("%s: %v", k, v)) + } + sort.Strings(groups) + return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) +} + +// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover +// a complete list of APIs for the client to use. +func IsGroupDiscoveryFailedError(err error) bool { + _, ok := err.(*ErrGroupDiscoveryFailed) + return err != nil && ok +} + +// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. +// Deprecated: use ServerGroupsAndResources instead. +func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + _, rs, err := ServerGroupsAndResources(d) + return rs, err +} + +func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + sgs, err := d.ServerGroups() + if sgs == nil { + return nil, nil, err + } + resultGroups := []*metav1.APIGroup{} + for i := range sgs.Groups { + resultGroups = append(resultGroups, &sgs.Groups[i]) + } + + groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) + + // order results by group/version discovery order + result := []*metav1.APIResourceList{} + for _, apiGroup := range sgs.Groups { + for _, version := range apiGroup.Versions { + gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + if resources, ok := groupVersionResources[gv]; ok { + result = append(result, resources) + } + } + } + + if len(failedGroups) == 0 { + return resultGroups, result, nil + } + + return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// ServerPreferredResources uses the provided discovery interface to look up preferred resources +func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + serverGroupList, err := d.ServerGroups() + if err != nil { + return nil, err + } + + groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList) + + result := []*metav1.APIResourceList{} + grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource + grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + + for _, apiGroup := range serverGroupList.Groups { + for _, version := range apiGroup.Versions { + groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + + apiResourceList, ok := groupVersionResources[groupVersion] + if !ok { + continue + } + + // create empty list which is filled later in another loop + emptyAPIResourceList := metav1.APIResourceList{ + GroupVersion: version.GroupVersion, + } + gvAPIResourceLists[groupVersion] = &emptyAPIResourceList + result = append(result, &emptyAPIResourceList) + + for i := range apiResourceList.APIResources { + apiResource := &apiResourceList.APIResources[i] + if strings.Contains(apiResource.Name, "/") { + continue + } + gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} + if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + // only override with preferred version + continue + } + grVersions[gv] = version.Version + grAPIResources[gv] = apiResource + } + } + } + + // group selected APIResources according to GroupVersion into APIResourceLists + for groupResource, apiResource := range grAPIResources { + version := grVersions[groupResource] + groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} + apiResourceList := gvAPIResourceLists[groupVersion] + apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) + } + + if len(failedGroups) == 0 { + return result, nil + } + + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} +} + +// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. +func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { + groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) + failedGroups := make(map[schema.GroupVersion]error) + + wg := &sync.WaitGroup{} + resultLock := &sync.Mutex{} + for _, apiGroup := range apiGroups.Groups { + for _, version := range apiGroup.Versions { + groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} + wg.Add(1) + go func() { + defer wg.Done() + defer utilruntime.HandleCrash() + + apiResourceList, err := d.ServerResourcesForGroupVersion(groupVersion.String()) + + // lock to record results + resultLock.Lock() + defer resultLock.Unlock() + + if err != nil { + // TODO: maybe restrict this to NotFound errors + failedGroups[groupVersion] = err + } + if apiResourceList != nil { + // even in case of error, some fallback might have been returned + groupVersionResources[groupVersion] = apiResourceList + } + }() + } + } + wg.Wait() + + return groupVersionResources, failedGroups +} + +// ServerPreferredResources returns the supported resources with the version preferred by the +// server. +func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + rs, err := ServerPreferredResources(d) + return nil, rs, err + }) + return rs, err +} + +// ServerPreferredNamespacedResources returns the supported namespaced resources with the +// version preferred by the server. +func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredNamespacedResources(d) +} + +// ServerPreferredNamespacedResources uses the provided discovery interface to look up preferred namespaced resources +func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + all, err := ServerPreferredResources(d) + return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { + return r.Namespaced + }), all), err +} + +// ServerVersion retrieves and parses the server's version (git version). +func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { + body, err := d.restClient.Get().AbsPath("/version").Do().Raw() + if err != nil { + return nil, err + } + var info version.Info + err = json.Unmarshal(body, &info) + if err != nil { + return nil, fmt.Errorf("unable to parse the server version: %v", err) + } + return &info, nil +} + +// OpenAPISchema fetches the open api schema using a rest client and parses the proto. +func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { + data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do().Raw() + if err != nil { + if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { + // single endpoint not found/registered in old server, try to fetch old endpoint + // TODO: remove this when kubectl/client-go don't work with 1.9 server + data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() + if err != nil { + return nil, err + } + } else { + return nil, err + } + } + document := &openapi_v2.Document{} + err = proto.Unmarshal(data, document) + if err != nil { + return nil, err + } + return document, nil +} + +// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. +func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { + var result []*metav1.APIResourceList + var resultGroups []*metav1.APIGroup + var err error + for i := 0; i < maxRetries; i++ { + resultGroups, result, err = f() + if err == nil { + return resultGroups, result, nil + } + if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { + return nil, nil, err + } + } + return resultGroups, result, err +} + +func setDiscoveryDefaults(config *restclient.Config) error { + config.APIPath = "" + config.GroupVersion = nil + if config.Timeout == 0 { + config.Timeout = defaultTimeout + } + codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} + config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) + if len(config.UserAgent) == 0 { + config.UserAgent = restclient.DefaultKubernetesUserAgent() + } + return nil +} + +// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client +// can be used to discover supported resources in the API server. +func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { + config := *c + if err := setDiscoveryDefaults(&config); err != nil { + return nil, err + } + client, err := restclient.UnversionedRESTClientFor(&config) + return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err +} + +// NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If +// there is an error, it panics. +func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { + client, err := NewDiscoveryClientForConfig(c) + if err != nil { + panic(err) + } + return client + +} + +// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. +func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { + return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (d *DiscoveryClient) RESTClient() restclient.Interface { + if d == nil { + return nil + } + return d.restClient +} diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go new file mode 100644 index 0000000..7649558 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package discovery provides ways to discover server-supported +// API groups, versions and resources. +package discovery diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go new file mode 100644 index 0000000..3bfe514 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -0,0 +1,125 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/util/sets" + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// MatchesServerVersion queries the server to compares the build version +// (git hash) of the client with the server's build version. It returns an error +// if it failed to contact the server or if the versions are not an exact match. +func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { + sVer, err := client.ServerVersion() + if err != nil { + return fmt.Errorf("couldn't read version from server: %v", err) + } + // GitVersion includes GitCommit and GitTreeState, but best to be safe? + if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { + return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) + } + + return nil +} + +// ServerSupportsVersion returns an error if the server doesn't have the required version +func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error { + groups, err := client.ServerGroups() + if err != nil { + // This is almost always a connection error, and higher level code should treat this as a generic error, + // not a negotiation specific error. + return err + } + versions := metav1.ExtractGroupVersions(groups) + serverVersions := sets.String{} + for _, v := range versions { + serverVersions.Insert(v) + } + + if serverVersions.Has(requiredGV.String()) { + return nil + } + + // If the server supports no versions, then we should pretend it has the version because of old servers. + // This can happen because discovery fails due to 403 Forbidden errors + if len(serverVersions) == 0 { + return nil + } + + return fmt.Errorf("server does not support API version %q", requiredGV) +} + +// GroupVersionResources converts APIResourceLists to the GroupVersionResources. +func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) { + gvrs := map[schema.GroupVersionResource]struct{}{} + for _, rl := range rls { + gv, err := schema.ParseGroupVersion(rl.GroupVersion) + if err != nil { + return nil, err + } + for i := range rl.APIResources { + gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} + } + } + return gvrs, nil +} + +// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped. +func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList { + result := []*metav1.APIResourceList{} + for _, rl := range rls { + filtered := *rl + filtered.APIResources = nil + for i := range rl.APIResources { + if pred.Match(rl.GroupVersion, &rl.APIResources[i]) { + filtered.APIResources = append(filtered.APIResources, rl.APIResources[i]) + } + } + if filtered.APIResources != nil { + result = append(result, &filtered) + } + } + return result +} + +// ResourcePredicate has a method to check if a resource matches a given condition. +type ResourcePredicate interface { + Match(groupVersion string, r *metav1.APIResource) bool +} + +// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. +type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool + +// Match is a wrapper around ResourcePredicateFunc. +func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { + return fn(groupVersion, r) +} + +// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported. +type SupportsAllVerbs struct { + Verbs []string +} + +// Match checks if a resource contains all the given verbs. +func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { + return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) +} diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go new file mode 100644 index 0000000..fb889e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -0,0 +1,580 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package kubernetes + +import ( + discovery "k8s.io/client-go/discovery" + admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" + appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" + appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" + appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" + authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" + authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" + authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" + authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" + autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" + autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" + autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" + batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" + batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" + batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" + certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" + coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" + coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" + corev1 "k8s.io/client-go/kubernetes/typed/core/v1" + eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" + extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" + networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" + networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" + nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" + nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" + policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" + rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" + rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" + rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" + schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" + schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" + settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" + storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" + storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" + storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface + AppsV1() appsv1.AppsV1Interface + AppsV1beta1() appsv1beta1.AppsV1beta1Interface + AppsV1beta2() appsv1beta2.AppsV1beta2Interface + AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface + AuthenticationV1() authenticationv1.AuthenticationV1Interface + AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface + AuthorizationV1() authorizationv1.AuthorizationV1Interface + AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface + AutoscalingV1() autoscalingv1.AutoscalingV1Interface + AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface + AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface + BatchV1() batchv1.BatchV1Interface + BatchV1beta1() batchv1beta1.BatchV1beta1Interface + BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface + CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface + CoordinationV1() coordinationv1.CoordinationV1Interface + CoreV1() corev1.CoreV1Interface + EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + NetworkingV1() networkingv1.NetworkingV1Interface + NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface + NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface + NodeV1beta1() nodev1beta1.NodeV1beta1Interface + PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface + RbacV1() rbacv1.RbacV1Interface + RbacV1beta1() rbacv1beta1.RbacV1beta1Interface + RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface + SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface + SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface + SchedulingV1() schedulingv1.SchedulingV1Interface + SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface + StorageV1beta1() storagev1beta1.StorageV1beta1Interface + StorageV1() storagev1.StorageV1Interface + StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface +} + +// Clientset contains the clients for groups. Each group has exactly one +// version included in a Clientset. +type Clientset struct { + *discovery.DiscoveryClient + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + appsV1 *appsv1.AppsV1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coordinationV1 *coordinationv1.CoordinationV1Client + coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + networkingV1 *networkingv1.NetworkingV1Client + networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client + nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client + nodeV1beta1 *nodev1beta1.NodeV1beta1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + schedulingV1 *schedulingv1.SchedulingV1Client + settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client +} + +// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client +func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 +} + +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 +} + +// AppsV1beta1 retrieves the AppsV1beta1Client +func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { + return c.appsV1beta1 +} + +// AppsV1beta2 retrieves the AppsV1beta2Client +func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { + return c.appsV1beta2 +} + +// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client +func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { + return c.auditregistrationV1alpha1 +} + +// AuthenticationV1 retrieves the AuthenticationV1Client +func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { + return c.authenticationV1 +} + +// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client +func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { + return c.authenticationV1beta1 +} + +// AuthorizationV1 retrieves the AuthorizationV1Client +func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { + return c.authorizationV1 +} + +// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client +func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { + return c.authorizationV1beta1 +} + +// AutoscalingV1 retrieves the AutoscalingV1Client +func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { + return c.autoscalingV1 +} + +// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client +func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { + return c.autoscalingV2beta1 +} + +// AutoscalingV2beta2 retrieves the AutoscalingV2beta2Client +func (c *Clientset) AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface { + return c.autoscalingV2beta2 +} + +// BatchV1 retrieves the BatchV1Client +func (c *Clientset) BatchV1() batchv1.BatchV1Interface { + return c.batchV1 +} + +// BatchV1beta1 retrieves the BatchV1beta1Client +func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { + return c.batchV1beta1 +} + +// BatchV2alpha1 retrieves the BatchV2alpha1Client +func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { + return c.batchV2alpha1 +} + +// CertificatesV1beta1 retrieves the CertificatesV1beta1Client +func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { + return c.certificatesV1beta1 +} + +// CoordinationV1beta1 retrieves the CoordinationV1beta1Client +func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { + return c.coordinationV1beta1 +} + +// CoordinationV1 retrieves the CoordinationV1Client +func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { + return c.coordinationV1 +} + +// CoreV1 retrieves the CoreV1Client +func (c *Clientset) CoreV1() corev1.CoreV1Interface { + return c.coreV1 +} + +// EventsV1beta1 retrieves the EventsV1beta1Client +func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + +// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client +func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { + return c.extensionsV1beta1 +} + +// NetworkingV1 retrieves the NetworkingV1Client +func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { + return c.networkingV1 +} + +// NetworkingV1beta1 retrieves the NetworkingV1beta1Client +func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { + return c.networkingV1beta1 +} + +// NodeV1alpha1 retrieves the NodeV1alpha1Client +func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { + return c.nodeV1alpha1 +} + +// NodeV1beta1 retrieves the NodeV1beta1Client +func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { + return c.nodeV1beta1 +} + +// PolicyV1beta1 retrieves the PolicyV1beta1Client +func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { + return c.policyV1beta1 +} + +// RbacV1 retrieves the RbacV1Client +func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { + return c.rbacV1 +} + +// RbacV1beta1 retrieves the RbacV1beta1Client +func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { + return c.rbacV1beta1 +} + +// RbacV1alpha1 retrieves the RbacV1alpha1Client +func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { + return c.rbacV1alpha1 +} + +// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client +func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { + return c.schedulingV1alpha1 +} + +// SchedulingV1beta1 retrieves the SchedulingV1beta1Client +func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface { + return c.schedulingV1beta1 +} + +// SchedulingV1 retrieves the SchedulingV1Client +func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { + return c.schedulingV1 +} + +// SettingsV1alpha1 retrieves the SettingsV1alpha1Client +func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { + return c.settingsV1alpha1 +} + +// StorageV1beta1 retrieves the StorageV1beta1Client +func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { + return c.storageV1beta1 +} + +// StorageV1 retrieves the StorageV1Client +func (c *Clientset) StorageV1() storagev1.StorageV1Interface { + return c.storageV1 +} + +// StorageV1alpha1 retrieves the StorageV1alpha1Client +func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { + return c.storageV1alpha1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + var cs Clientset + var err error + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.appsV1beta2, err = appsv1beta2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.authorizationV1, err = authorizationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.batchV1beta1, err = batchv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.batchV2alpha1, err = batchv2alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.rbacV1, err = rbacv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.rbacV1beta1, err = rbacv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.storageV1, err = storagev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + var cs Clientset + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) + cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) + cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) + cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) + cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) + cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) + cs.authorizationV1beta1 = authorizationv1beta1.NewForConfigOrDie(c) + cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c) + cs.autoscalingV2beta1 = autoscalingv2beta1.NewForConfigOrDie(c) + cs.autoscalingV2beta2 = autoscalingv2beta2.NewForConfigOrDie(c) + cs.batchV1 = batchv1.NewForConfigOrDie(c) + cs.batchV1beta1 = batchv1beta1.NewForConfigOrDie(c) + cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) + cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) + cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) + cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) + cs.coreV1 = corev1.NewForConfigOrDie(c) + cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) + cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) + cs.networkingV1 = networkingv1.NewForConfigOrDie(c) + cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) + cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) + cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) + cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) + cs.rbacV1 = rbacv1.NewForConfigOrDie(c) + cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) + cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) + cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) + cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) + cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) + cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) + cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) + cs.storageV1 = storagev1.NewForConfigOrDie(c) + cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) + return &cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) + cs.appsV1 = appsv1.New(c) + cs.appsV1beta1 = appsv1beta1.New(c) + cs.appsV1beta2 = appsv1beta2.New(c) + cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) + cs.authenticationV1 = authenticationv1.New(c) + cs.authenticationV1beta1 = authenticationv1beta1.New(c) + cs.authorizationV1 = authorizationv1.New(c) + cs.authorizationV1beta1 = authorizationv1beta1.New(c) + cs.autoscalingV1 = autoscalingv1.New(c) + cs.autoscalingV2beta1 = autoscalingv2beta1.New(c) + cs.autoscalingV2beta2 = autoscalingv2beta2.New(c) + cs.batchV1 = batchv1.New(c) + cs.batchV1beta1 = batchv1beta1.New(c) + cs.batchV2alpha1 = batchv2alpha1.New(c) + cs.certificatesV1beta1 = certificatesv1beta1.New(c) + cs.coordinationV1beta1 = coordinationv1beta1.New(c) + cs.coordinationV1 = coordinationv1.New(c) + cs.coreV1 = corev1.New(c) + cs.eventsV1beta1 = eventsv1beta1.New(c) + cs.extensionsV1beta1 = extensionsv1beta1.New(c) + cs.networkingV1 = networkingv1.New(c) + cs.networkingV1beta1 = networkingv1beta1.New(c) + cs.nodeV1alpha1 = nodev1alpha1.New(c) + cs.nodeV1beta1 = nodev1beta1.New(c) + cs.policyV1beta1 = policyv1beta1.New(c) + cs.rbacV1 = rbacv1.New(c) + cs.rbacV1beta1 = rbacv1beta1.New(c) + cs.rbacV1alpha1 = rbacv1alpha1.New(c) + cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) + cs.schedulingV1beta1 = schedulingv1beta1.New(c) + cs.schedulingV1 = schedulingv1.New(c) + cs.settingsV1alpha1 = settingsv1alpha1.New(c) + cs.storageV1beta1 = storagev1beta1.New(c) + cs.storageV1 = storagev1.New(c) + cs.storageV1alpha1 = storagev1alpha1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go new file mode 100644 index 0000000..b272334 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated clientset. +package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/import.go b/vendor/k8s.io/client-go/kubernetes/import.go new file mode 100644 index 0000000..c4f9a91 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/import.go @@ -0,0 +1,19 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file exists to enforce this clientset's vanity import path. + +package kubernetes // import "k8s.io/client-go/kubernetes" diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go new file mode 100644 index 0000000..7dc3756 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go new file mode 100644 index 0000000..8346d26 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -0,0 +1,126 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" + appsv1 "k8s.io/api/apps/v1" + appsv1beta1 "k8s.io/api/apps/v1beta1" + appsv1beta2 "k8s.io/api/apps/v1beta2" + auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" + authenticationv1 "k8s.io/api/authentication/v1" + authenticationv1beta1 "k8s.io/api/authentication/v1beta1" + authorizationv1 "k8s.io/api/authorization/v1" + authorizationv1beta1 "k8s.io/api/authorization/v1beta1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" + autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" + batchv1 "k8s.io/api/batch/v1" + batchv1beta1 "k8s.io/api/batch/v1beta1" + batchv2alpha1 "k8s.io/api/batch/v2alpha1" + certificatesv1beta1 "k8s.io/api/certificates/v1beta1" + coordinationv1 "k8s.io/api/coordination/v1" + coordinationv1beta1 "k8s.io/api/coordination/v1beta1" + corev1 "k8s.io/api/core/v1" + eventsv1beta1 "k8s.io/api/events/v1beta1" + extensionsv1beta1 "k8s.io/api/extensions/v1beta1" + networkingv1 "k8s.io/api/networking/v1" + networkingv1beta1 "k8s.io/api/networking/v1beta1" + nodev1alpha1 "k8s.io/api/node/v1alpha1" + nodev1beta1 "k8s.io/api/node/v1beta1" + policyv1beta1 "k8s.io/api/policy/v1beta1" + rbacv1 "k8s.io/api/rbac/v1" + rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" + rbacv1beta1 "k8s.io/api/rbac/v1beta1" + schedulingv1 "k8s.io/api/scheduling/v1" + schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" + schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" + settingsv1alpha1 "k8s.io/api/settings/v1alpha1" + storagev1 "k8s.io/api/storage/v1" + storagev1alpha1 "k8s.io/api/storage/v1alpha1" + storagev1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1beta1.AddToScheme, + appsv1.AddToScheme, + appsv1beta1.AddToScheme, + appsv1beta2.AddToScheme, + auditregistrationv1alpha1.AddToScheme, + authenticationv1.AddToScheme, + authenticationv1beta1.AddToScheme, + authorizationv1.AddToScheme, + authorizationv1beta1.AddToScheme, + autoscalingv1.AddToScheme, + autoscalingv2beta1.AddToScheme, + autoscalingv2beta2.AddToScheme, + batchv1.AddToScheme, + batchv1beta1.AddToScheme, + batchv2alpha1.AddToScheme, + certificatesv1beta1.AddToScheme, + coordinationv1beta1.AddToScheme, + coordinationv1.AddToScheme, + corev1.AddToScheme, + eventsv1beta1.AddToScheme, + extensionsv1beta1.AddToScheme, + networkingv1.AddToScheme, + networkingv1beta1.AddToScheme, + nodev1alpha1.AddToScheme, + nodev1beta1.AddToScheme, + policyv1beta1.AddToScheme, + rbacv1.AddToScheme, + rbacv1beta1.AddToScheme, + rbacv1alpha1.AddToScheme, + schedulingv1alpha1.AddToScheme, + schedulingv1beta1.AddToScheme, + schedulingv1.AddToScheme, + settingsv1alpha1.AddToScheme, + storagev1beta1.AddToScheme, + storagev1.AddToScheme, + storagev1alpha1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go new file mode 100644 index 0000000..b13ea79 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AdmissionregistrationV1beta1Interface interface { + RESTClient() rest.Interface + MutatingWebhookConfigurationsGetter + ValidatingWebhookConfigurationsGetter +} + +// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { + return newMutatingWebhookConfigurations(c) +} + +func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { + return newValidatingWebhookConfigurations(c) +} + +// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AdmissionregistrationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1beta1Client { + return &AdmissionregistrationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go new file mode 100644 index 0000000..2aeb9c9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type MutatingWebhookConfigurationExpansion interface{} + +type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go new file mode 100644 index 0000000..4524896 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type MutatingWebhookConfigurationsGetter interface { + MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface +} + +// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. +type MutatingWebhookConfigurationInterface interface { + Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) + MutatingWebhookConfigurationExpansion +} + +// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface +type mutatingWebhookConfigurations struct { + client rest.Interface +} + +// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations +func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { + return &mutatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. +func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. +func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.MutatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. +func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Post(). + Resource("mutatingwebhookconfigurations"). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. +func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Put(). + Resource("mutatingwebhookconfigurations"). + Name(mutatingWebhookConfiguration.Name). + Body(mutatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("mutatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched mutatingWebhookConfiguration. +func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { + result = &v1beta1.MutatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("mutatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go new file mode 100644 index 0000000..7e711b3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/admissionregistration/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. +// A group's client should implement this interface. +type ValidatingWebhookConfigurationsGetter interface { + ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface +} + +// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. +type ValidatingWebhookConfigurationInterface interface { + Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) + List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) + ValidatingWebhookConfigurationExpansion +} + +// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface +type validatingWebhookConfigurations struct { + client rest.Interface +} + +// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations +func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { + return &validatingWebhookConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. +func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. +func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ValidatingWebhookConfigurationList{} + err = c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. +func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Post(). + Resource("validatingwebhookconfigurations"). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. +func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Put(). + Resource("validatingwebhookconfigurations"). + Name(validatingWebhookConfiguration.Name). + Body(validatingWebhookConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. +func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("validatingwebhookconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched validatingWebhookConfiguration. +func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { + result = &v1beta1.ValidatingWebhookConfiguration{} + err = c.client.Patch(pt). + Resource("validatingwebhookconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go new file mode 100644 index 0000000..da19c75 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/apps/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1Interface interface { + RESTClient() rest.Interface + ControllerRevisionsGetter + DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + StatefulSetsGetter +} + +// AppsV1Client is used to interact with features provided by the apps group. +type AppsV1Client struct { + restClient rest.Interface +} + +func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + +func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1Client { + return &AppsV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go new file mode 100644 index 0000000..e28e4d2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ControllerRevision, error) + List(opts metav1.ListOptions) (*v1.ControllerRevisionList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { + result = &v1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go new file mode 100644 index 0000000..a535cda --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/apps/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1.DaemonSet) (*v1.DaemonSet, error) + Update(*v1.DaemonSet) (*v1.DaemonSet, error) + UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.DaemonSet, error) + List(opts metav1.ListOptions) (*v1.DaemonSetList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) { + result = &v1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go new file mode 100644 index 0000000..f9799a4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -0,0 +1,223 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1.Deployment) (*v1.Deployment, error) + Update(*v1.Deployment) (*v1.Deployment, error) + UpdateStatus(*v1.Deployment) (*v1.Deployment, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Deployment, error) + List(opts metav1.ListOptions) (*v1.DeploymentList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) + GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { + result = &v1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go new file mode 100644 index 0000000..88cfe4e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ControllerRevisionExpansion interface{} + +type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go new file mode 100644 index 0000000..ff3504e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -0,0 +1,223 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) + UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ReplicaSet, error) + List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) + GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { + result = &v1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go new file mode 100644 index 0000000..c12c470 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -0,0 +1,223 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1.StatefulSet) (*v1.StatefulSet, error) + Update(*v1.StatefulSet) (*v1.StatefulSet, error) + UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.StatefulSet, error) + List(opts metav1.ListOptions) (*v1.StatefulSetList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) + GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { + result = &v1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go new file mode 100644 index 0000000..2c9db88 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -0,0 +1,100 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/apps/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1beta1Interface interface { + RESTClient() rest.Interface + ControllerRevisionsGetter + DeploymentsGetter + StatefulSetsGetter +} + +// AppsV1beta1Client is used to interact with features provided by the apps group. +type AppsV1beta1Client struct { + restClient rest.Interface +} + +func (c *AppsV1beta1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + +func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AppsV1beta1Client { + return &AppsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go new file mode 100644 index 0000000..45ddb91 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) + Update(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error) + List(opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { + result = &v1beta1.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go new file mode 100644 index 0000000..05fdcb7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go new file mode 100644 index 0000000..113455d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -0,0 +1,25 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type ControllerRevisionExpansion interface{} + +type DeploymentExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go new file mode 100644 index 0000000..c4b35b4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/apps/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) + List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { + result = &v1beta1.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go new file mode 100644 index 0000000..99d677f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + v1beta2 "k8s.io/api/apps/v1beta2" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AppsV1beta2Interface interface { + RESTClient() rest.Interface + ControllerRevisionsGetter + DaemonSetsGetter + DeploymentsGetter + ReplicaSetsGetter + StatefulSetsGetter +} + +// AppsV1beta2Client is used to interact with features provided by the apps group. +type AppsV1beta2Client struct { + restClient rest.Interface +} + +func (c *AppsV1beta2Client) ControllerRevisions(namespace string) ControllerRevisionInterface { + return newControllerRevisions(c, namespace) +} + +func (c *AppsV1beta2Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *AppsV1beta2Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { + return newStatefulSets(c, namespace) +} + +// NewForConfig creates a new AppsV1beta2Client for the given config. +func NewForConfig(c *rest.Config) (*AppsV1beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AppsV1beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new AppsV1beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AppsV1beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AppsV1beta2Client for the given RESTClient. +func New(c rest.Interface) *AppsV1beta2Client { + return &AppsV1beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AppsV1beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go new file mode 100644 index 0000000..e1d6025 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. +// A group's client should implement this interface. +type ControllerRevisionsGetter interface { + ControllerRevisions(namespace string) ControllerRevisionInterface +} + +// ControllerRevisionInterface has methods to work with ControllerRevision resources. +type ControllerRevisionInterface interface { + Create(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) + Update(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error) + List(opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) + ControllerRevisionExpansion +} + +// controllerRevisions implements ControllerRevisionInterface +type controllerRevisions struct { + client rest.Interface + ns string +} + +// newControllerRevisions returns a ControllerRevisions +func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { + return &controllerRevisions{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. +func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. +func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.ControllerRevisionList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested controllerRevisions. +func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Post(). + Namespace(c.ns). + Resource("controllerrevisions"). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. +func (c *controllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Put(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(controllerRevision.Name). + Body(controllerRevision). + Do(). + Into(result) + return +} + +// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. +func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("controllerrevisions"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched controllerRevision. +func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { + result = &v1beta2.ControllerRevision{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("controllerrevisions"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go new file mode 100644 index 0000000..f8b7ac2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) + Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) + UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error) + List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { + result = &v1beta2.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go new file mode 100644 index 0000000..510250b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta2.Deployment) (*v1beta2.Deployment, error) + Update(*v1beta2.Deployment) (*v1beta2.Deployment, error) + UpdateStatus(*v1beta2.Deployment) (*v1beta2.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.Deployment, error) + List(opts v1.ListOptions) (*v1beta2.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { + result = &v1beta2.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go new file mode 100644 index 0000000..56518ef --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go new file mode 100644 index 0000000..6a21749 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +type ControllerRevisionExpansion interface{} + +type DaemonSetExpansion interface{} + +type DeploymentExpansion interface{} + +type ReplicaSetExpansion interface{} + +type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go new file mode 100644 index 0000000..7b73877 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) + Update(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) + UpdateStatus(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error) + List(opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { + result = &v1beta2.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go new file mode 100644 index 0000000..de7c3db --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -0,0 +1,222 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + "time" + + v1beta2 "k8s.io/api/apps/v1beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StatefulSetsGetter has a method to return a StatefulSetInterface. +// A group's client should implement this interface. +type StatefulSetsGetter interface { + StatefulSets(namespace string) StatefulSetInterface +} + +// StatefulSetInterface has methods to work with StatefulSet resources. +type StatefulSetInterface interface { + Create(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) + Update(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) + UpdateStatus(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta2.StatefulSet, error) + List(opts v1.ListOptions) (*v1beta2.StatefulSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) + GetScale(statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) + UpdateScale(statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error) + + StatefulSetExpansion +} + +// statefulSets implements StatefulSetInterface +type statefulSets struct { + client rest.Interface + ns string +} + +// newStatefulSets returns a StatefulSets +func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { + return &statefulSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. +func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. +func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta2.StatefulSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested statefulSets. +func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("statefulsets"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. +func (c *statefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + Body(statefulSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *statefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSet.Name). + SubResource("status"). + Body(statefulSet). + Do(). + Into(result) + return +} + +// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. +func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("statefulsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched statefulSet. +func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { + result = &v1beta2.StatefulSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("statefulsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. +func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { + result = &v1beta2.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *statefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { + result = &v1beta2.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("statefulsets"). + Name(statefulSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go new file mode 100644 index 0000000..f007b05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuditregistrationV1alpha1Interface interface { + RESTClient() rest.Interface + AuditSinksGetter +} + +// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group. +type AuditregistrationV1alpha1Client struct { + restClient rest.Interface +} + +func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface { + return newAuditSinks(c) +} + +// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuditregistrationV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuditregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AuditregistrationV1alpha1Client { + return &AuditregistrationV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go new file mode 100644 index 0000000..414d480 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// AuditSinksGetter has a method to return a AuditSinkInterface. +// A group's client should implement this interface. +type AuditSinksGetter interface { + AuditSinks() AuditSinkInterface +} + +// AuditSinkInterface has methods to work with AuditSink resources. +type AuditSinkInterface interface { + Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) + List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) + AuditSinkExpansion +} + +// auditSinks implements AuditSinkInterface +type auditSinks struct { + client rest.Interface +} + +// newAuditSinks returns a AuditSinks +func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { + return &auditSinks{ + client: c.RESTClient(), + } +} + +// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. +func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Get(). + Resource("auditsinks"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. +func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.AuditSinkList{} + err = c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested auditSinks. +func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("auditsinks"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Post(). + Resource("auditsinks"). + Body(auditSink). + Do(). + Into(result) + return +} + +// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. +func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Put(). + Resource("auditsinks"). + Name(auditSink.Name). + Body(auditSink). + Do(). + Into(result) + return +} + +// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. +func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("auditsinks"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("auditsinks"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched auditSink. +func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { + result = &v1alpha1.AuditSink{} + err = c.client.Patch(pt). + Resource("auditsinks"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go new file mode 100644 index 0000000..df51baa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go new file mode 100644 index 0000000..f0f5117 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type AuditSinkExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go new file mode 100644 index 0000000..3bdcee5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/authentication/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1Client { + return &AuthenticationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go new file mode 100644 index 0000000..177209e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go new file mode 100644 index 0000000..25a8d6a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go new file mode 100644 index 0000000..ea21f1b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationapi "k8s.io/api/authentication/v1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go new file mode 100644 index 0000000..7f3334a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/authentication/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthenticationV1beta1Interface interface { + RESTClient() rest.Interface + TokenReviewsGetter +} + +// AuthenticationV1beta1Client is used to interact with features provided by the authentication.k8s.io group. +type AuthenticationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { + return newTokenReviews(c) +} + +// NewForConfig creates a new AuthenticationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthenticationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthenticationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthenticationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthenticationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthenticationV1beta1Client { + return &AuthenticationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthenticationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go new file mode 100644 index 0000000..f6df769 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go new file mode 100644 index 0000000..0ac3561 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// TokenReviewsGetter has a method to return a TokenReviewInterface. +// A group's client should implement this interface. +type TokenReviewsGetter interface { + TokenReviews() TokenReviewInterface +} + +// TokenReviewInterface has methods to work with TokenReview resources. +type TokenReviewInterface interface { + TokenReviewExpansion +} + +// tokenReviews implements TokenReviewInterface +type tokenReviews struct { + client rest.Interface +} + +// newTokenReviews returns a TokenReviews +func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { + return &tokenReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go new file mode 100644 index 0000000..8f186fa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authenticationapi "k8s.io/api/authentication/v1beta1" +) + +type TokenReviewExpansion interface { + Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) +} + +func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { + result = &authenticationapi.TokenReview{} + err = c.client.Post(). + Resource("tokenreviews"). + Body(tokenReview). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go new file mode 100644 index 0000000..e84b900 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/authorization/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SelfSubjectRulesReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { + return newSelfSubjectRulesReviews(c) +} + +func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1Client { + return &AuthorizationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go new file mode 100644 index 0000000..177209e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go new file mode 100644 index 0000000..0292c78 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go new file mode 100644 index 0000000..0c123b0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/api/authorization/v1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go new file mode 100644 index 0000000..1e3a458 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000..5b70a27 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/api/authorization/v1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go new file mode 100644 index 0000000..50a0233 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. +// A group's client should implement this interface. +type SelfSubjectRulesReviewsGetter interface { + SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface +} + +// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. +type SelfSubjectRulesReviewInterface interface { + SelfSubjectRulesReviewExpansion +} + +// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type selfSubjectRulesReviews struct { + client rest.Interface +} + +// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews +func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { + return &selfSubjectRulesReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go new file mode 100644 index 0000000..e2cad88 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/api/authorization/v1" +) + +type SelfSubjectRulesReviewExpansion interface { + Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) +} + +func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + result = &authorizationapi.SelfSubjectRulesReview{} + err = c.client.Post(). + Resource("selfsubjectrulesreviews"). + Body(srr). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go new file mode 100644 index 0000000..9c09008 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go new file mode 100644 index 0000000..b5ed87d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authorizationapi "k8s.io/api/authorization/v1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go new file mode 100644 index 0000000..7f236f6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/authorization/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AuthorizationV1beta1Interface interface { + RESTClient() rest.Interface + LocalSubjectAccessReviewsGetter + SelfSubjectAccessReviewsGetter + SelfSubjectRulesReviewsGetter + SubjectAccessReviewsGetter +} + +// AuthorizationV1beta1Client is used to interact with features provided by the authorization.k8s.io group. +type AuthorizationV1beta1Client struct { + restClient rest.Interface +} + +func (c *AuthorizationV1beta1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { + return newLocalSubjectAccessReviews(c, namespace) +} + +func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { + return newSelfSubjectAccessReviews(c) +} + +func (c *AuthorizationV1beta1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { + return newSelfSubjectRulesReviews(c) +} + +func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface { + return newSubjectAccessReviews(c) +} + +// NewForConfig creates a new AuthorizationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AuthorizationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AuthorizationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AuthorizationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AuthorizationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *AuthorizationV1beta1Client { + return &AuthorizationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AuthorizationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go new file mode 100644 index 0000000..f6df769 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go new file mode 100644 index 0000000..f5e86a7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. +// A group's client should implement this interface. +type LocalSubjectAccessReviewsGetter interface { + LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface +} + +// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. +type LocalSubjectAccessReviewInterface interface { + LocalSubjectAccessReviewExpansion +} + +// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface +type localSubjectAccessReviews struct { + client rest.Interface + ns string +} + +// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews +func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { + return &localSubjectAccessReviews{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go new file mode 100644 index 0000000..bf1b8a5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/api/authorization/v1beta1" +) + +type LocalSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) +} + +func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { + result = &authorizationapi.LocalSubjectAccessReview{} + err = c.client.Post(). + Namespace(c.ns). + Resource("localsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go new file mode 100644 index 0000000..906712c --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. +// A group's client should implement this interface. +type SelfSubjectAccessReviewsGetter interface { + SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface +} + +// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. +type SelfSubjectAccessReviewInterface interface { + SelfSubjectAccessReviewExpansion +} + +// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface +type selfSubjectAccessReviews struct { + client rest.Interface +} + +// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews +func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { + return &selfSubjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go new file mode 100644 index 0000000..58fecfd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/api/authorization/v1beta1" +) + +type SelfSubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) +} + +func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { + result = &authorizationapi.SelfSubjectAccessReview{} + err = c.client.Post(). + Resource("selfsubjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go new file mode 100644 index 0000000..56c0f99 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. +// A group's client should implement this interface. +type SelfSubjectRulesReviewsGetter interface { + SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface +} + +// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. +type SelfSubjectRulesReviewInterface interface { + SelfSubjectRulesReviewExpansion +} + +// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface +type selfSubjectRulesReviews struct { + client rest.Interface +} + +// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews +func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { + return &selfSubjectRulesReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go new file mode 100644 index 0000000..5f1f37e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go @@ -0,0 +1,35 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/api/authorization/v1beta1" +) + +type SelfSubjectRulesReviewExpansion interface { + Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) +} + +func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { + result = &authorizationapi.SelfSubjectRulesReview{} + err = c.client.Post(). + Resource("selfsubjectrulesreviews"). + Body(srr). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go new file mode 100644 index 0000000..79f1ec5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. +// A group's client should implement this interface. +type SubjectAccessReviewsGetter interface { + SubjectAccessReviews() SubjectAccessReviewInterface +} + +// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. +type SubjectAccessReviewInterface interface { + SubjectAccessReviewExpansion +} + +// subjectAccessReviews implements SubjectAccessReviewInterface +type subjectAccessReviews struct { + client rest.Interface +} + +// newSubjectAccessReviews returns a SubjectAccessReviews +func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { + return &subjectAccessReviews{ + client: c.RESTClient(), + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go new file mode 100644 index 0000000..4f93689 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go @@ -0,0 +1,36 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + authorizationapi "k8s.io/api/authorization/v1beta1" +) + +// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. +type SubjectAccessReviewExpansion interface { + Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) +} + +func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { + result = &authorizationapi.SubjectAccessReview{} + err = c.client.Post(). + Resource("subjectaccessreviews"). + Body(sar). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go new file mode 100644 index 0000000..2bd49e2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/autoscaling/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV1Client { + return &AutoscalingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go new file mode 100644 index 0000000..c60028b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go new file mode 100644 index 0000000..0e0839f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/autoscaling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) + List(opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { + result = &v1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go new file mode 100644 index 0000000..3a49b26 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 + +import ( + v2beta1 "k8s.io/api/autoscaling/v2beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV2beta1Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2beta1Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2beta1Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2beta1Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV2beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2beta1Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2beta1Client { + return &AutoscalingV2beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go new file mode 100644 index 0000000..06fd344 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go new file mode 100644 index 0000000..6f1704f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go new file mode 100644 index 0000000..02d5cfb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta1 + +import ( + "time" + + v2beta1 "k8s.io/api/autoscaling/v2beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) + Update(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) + UpdateStatus(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) + List(opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2beta1.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { + result = &v2beta1.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go new file mode 100644 index 0000000..03fe25e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta2 + +import ( + v2beta2 "k8s.io/api/autoscaling/v2beta2" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type AutoscalingV2beta2Interface interface { + RESTClient() rest.Interface + HorizontalPodAutoscalersGetter +} + +// AutoscalingV2beta2Client is used to interact with features provided by the autoscaling group. +type AutoscalingV2beta2Client struct { + restClient rest.Interface +} + +func (c *AutoscalingV2beta2Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { + return newHorizontalPodAutoscalers(c, namespace) +} + +// NewForConfig creates a new AutoscalingV2beta2Client for the given config. +func NewForConfig(c *rest.Config) (*AutoscalingV2beta2Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &AutoscalingV2beta2Client{client}, nil +} + +// NewForConfigOrDie creates a new AutoscalingV2beta2Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta2Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new AutoscalingV2beta2Client for the given RESTClient. +func New(c rest.Interface) *AutoscalingV2beta2Client { + return &AutoscalingV2beta2Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2beta2.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *AutoscalingV2beta2Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go new file mode 100644 index 0000000..c600965 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go new file mode 100644 index 0000000..822e062 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta2 + +type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go new file mode 100644 index 0000000..91a0fa6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2beta2 + +import ( + "time" + + v2beta2 "k8s.io/api/autoscaling/v2beta2" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. +// A group's client should implement this interface. +type HorizontalPodAutoscalersGetter interface { + HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface +} + +// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. +type HorizontalPodAutoscalerInterface interface { + Create(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) + Update(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) + UpdateStatus(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) + List(opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) + HorizontalPodAutoscalerExpansion +} + +// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface +type horizontalPodAutoscalers struct { + client rest.Interface + ns string +} + +// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers +func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { + return &horizontalPodAutoscalers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. +func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { + result = &v2beta2.HorizontalPodAutoscaler{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. +func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2beta2.HorizontalPodAutoscalerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. +func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { + result = &v2beta2.HorizontalPodAutoscaler{} + err = c.client.Post(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. +func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { + result = &v2beta2.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { + result = &v2beta2.HorizontalPodAutoscaler{} + err = c.client.Put(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(horizontalPodAutoscaler.Name). + SubResource("status"). + Body(horizontalPodAutoscaler). + Do(). + Into(result) + return +} + +// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. +func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched horizontalPodAutoscaler. +func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { + result = &v2beta2.HorizontalPodAutoscaler{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("horizontalpodautoscalers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go new file mode 100644 index 0000000..d5e35e6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/batch/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type BatchV1Interface interface { + RESTClient() rest.Interface + JobsGetter +} + +// BatchV1Client is used to interact with features provided by the batch group. +type BatchV1Client struct { + restClient rest.Interface +} + +func (c *BatchV1Client) Jobs(namespace string) JobInterface { + return newJobs(c, namespace) +} + +// NewForConfig creates a new BatchV1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV1Client for the given RESTClient. +func New(c rest.Interface) *BatchV1Client { + return &BatchV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go new file mode 100644 index 0000000..dc41429 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type JobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go new file mode 100644 index 0000000..b55c602 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/batch/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// JobsGetter has a method to return a JobInterface. +// A group's client should implement this interface. +type JobsGetter interface { + Jobs(namespace string) JobInterface +} + +// JobInterface has methods to work with Job resources. +type JobInterface interface { + Create(*v1.Job) (*v1.Job, error) + Update(*v1.Job) (*v1.Job, error) + UpdateStatus(*v1.Job) (*v1.Job, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Job, error) + List(opts metav1.ListOptions) (*v1.JobList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) + JobExpansion +} + +// jobs implements JobInterface +type jobs struct { + client rest.Interface + ns string +} + +// newJobs returns a Jobs +func newJobs(c *BatchV1Client, namespace string) *jobs { + return &jobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the job, and returns the corresponding job object, and an error if there is any. +func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Jobs that match those selectors. +func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.JobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested jobs. +func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Post(). + Namespace(c.ns). + Resource("jobs"). + Body(job). + Do(). + Into(result) + return +} + +// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. +func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + Body(job). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Put(). + Namespace(c.ns). + Resource("jobs"). + Name(job.Name). + SubResource("status"). + Body(job). + Do(). + Into(result) + return +} + +// Delete takes name of the job and deletes it. Returns an error if one occurs. +func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("jobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched job. +func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { + result = &v1.Job{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("jobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go new file mode 100644 index 0000000..aa71ca8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/batch/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type BatchV1beta1Interface interface { + RESTClient() rest.Interface + CronJobsGetter +} + +// BatchV1beta1Client is used to interact with features provided by the batch group. +type BatchV1beta1Client struct { + restClient rest.Interface +} + +func (c *BatchV1beta1Client) CronJobs(namespace string) CronJobInterface { + return newCronJobs(c, namespace) +} + +// NewForConfig creates a new BatchV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV1beta1Client for the given RESTClient. +func New(c rest.Interface) *BatchV1beta1Client { + return &BatchV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go new file mode 100644 index 0000000..d89d2fa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/batch/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CronJobsGetter has a method to return a CronJobInterface. +// A group's client should implement this interface. +type CronJobsGetter interface { + CronJobs(namespace string) CronJobInterface +} + +// CronJobInterface has methods to work with CronJob resources. +type CronJobInterface interface { + Create(*v1beta1.CronJob) (*v1beta1.CronJob, error) + Update(*v1beta1.CronJob) (*v1beta1.CronJob, error) + UpdateStatus(*v1beta1.CronJob) (*v1beta1.CronJob, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CronJob, error) + List(opts v1.ListOptions) (*v1beta1.CronJobList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) + CronJobExpansion +} + +// cronJobs implements CronJobInterface +type cronJobs struct { + client rest.Interface + ns string +} + +// newCronJobs returns a CronJobs +func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { + return &cronJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronjobs"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + Body(cronJob). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *cronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + SubResource("status"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cronJob. +func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { + result = &v1beta1.CronJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronjobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go new file mode 100644 index 0000000..145e14a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go new file mode 100644 index 0000000..e6c6306 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + v2alpha1 "k8s.io/api/batch/v2alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type BatchV2alpha1Interface interface { + RESTClient() rest.Interface + CronJobsGetter +} + +// BatchV2alpha1Client is used to interact with features provided by the batch group. +type BatchV2alpha1Client struct { + restClient rest.Interface +} + +func (c *BatchV2alpha1Client) CronJobs(namespace string) CronJobInterface { + return newCronJobs(c, namespace) +} + +// NewForConfig creates a new BatchV2alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*BatchV2alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &BatchV2alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new BatchV2alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *BatchV2alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new BatchV2alpha1Client for the given RESTClient. +func New(c rest.Interface) *BatchV2alpha1Client { + return &BatchV2alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v2alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *BatchV2alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go new file mode 100644 index 0000000..19123b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +import ( + "time" + + v2alpha1 "k8s.io/api/batch/v2alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CronJobsGetter has a method to return a CronJobInterface. +// A group's client should implement this interface. +type CronJobsGetter interface { + CronJobs(namespace string) CronJobInterface +} + +// CronJobInterface has methods to work with CronJob resources. +type CronJobInterface interface { + Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error) + List(opts v1.ListOptions) (*v2alpha1.CronJobList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) + CronJobExpansion +} + +// cronJobs implements CronJobInterface +type cronJobs struct { + client rest.Interface + ns string +} + +// newCronJobs returns a CronJobs +func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { + return &cronJobs{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. +func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CronJobs that match those selectors. +func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v2alpha1.CronJobList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cronJobs. +func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Post(). + Namespace(c.ns). + Resource("cronjobs"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. +func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + Body(cronJob). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Put(). + Namespace(c.ns). + Resource("cronjobs"). + Name(cronJob.Name). + SubResource("status"). + Body(cronJob). + Do(). + Into(result) + return +} + +// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. +func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("cronjobs"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cronJob. +func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { + result = &v2alpha1.CronJob{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("cronjobs"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go new file mode 100644 index 0000000..3efe0d2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go new file mode 100644 index 0000000..34dafc4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v2alpha1 + +type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go new file mode 100644 index 0000000..baac42e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/certificates/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CertificatesV1beta1Interface interface { + RESTClient() rest.Interface + CertificateSigningRequestsGetter +} + +// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group. +type CertificatesV1beta1Client struct { + restClient rest.Interface +} + +func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSigningRequestInterface { + return newCertificateSigningRequests(c) +} + +// NewForConfig creates a new CertificatesV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CertificatesV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new CertificatesV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CertificatesV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CertificatesV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CertificatesV1beta1Client { + return &CertificatesV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CertificatesV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go new file mode 100644 index 0000000..712d3a0 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/certificates/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. +// A group's client should implement this interface. +type CertificateSigningRequestsGetter interface { + CertificateSigningRequests() CertificateSigningRequestInterface +} + +// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. +type CertificateSigningRequestInterface interface { + Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) + List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) + CertificateSigningRequestExpansion +} + +// certificateSigningRequests implements CertificateSigningRequestInterface +type certificateSigningRequests struct { + client rest.Interface +} + +// newCertificateSigningRequests returns a CertificateSigningRequests +func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { + return &certificateSigningRequests{ + client: c.RESTClient(), + } +} + +// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. +func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. +func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CertificateSigningRequestList{} + err = c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested certificateSigningRequests. +func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("certificatesigningrequests"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Post(). + Resource("certificatesigningrequests"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. +func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + SubResource("status"). + Body(certificateSigningRequest). + Do(). + Into(result) + return +} + +// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. +func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("certificatesigningrequests"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("certificatesigningrequests"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched certificateSigningRequest. +func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { + result = &v1beta1.CertificateSigningRequest{} + err = c.client.Patch(pt). + Resource("certificatesigningrequests"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go new file mode 100644 index 0000000..c63b806 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go @@ -0,0 +1,37 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + certificates "k8s.io/api/certificates/v1beta1" +) + +type CertificateSigningRequestExpansion interface { + UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) +} + +func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { + result = &certificates.CertificateSigningRequest{} + err = c.client.Put(). + Resource("certificatesigningrequests"). + Name(certificateSigningRequest.Name). + Body(certificateSigningRequest). + SubResource("approval"). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go new file mode 100644 index 0000000..f6df769 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go @@ -0,0 +1,19 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go new file mode 100644 index 0000000..9b566f3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/coordination/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1Interface interface { + RESTClient() rest.Interface + LeasesGetter +} + +// CoordinationV1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { + return newLeases(c, namespace) +} + +// NewForConfig creates a new CoordinationV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoordinationV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1Client { + return &CoordinationV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go new file mode 100644 index 0000000..ab24f37 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type LeaseExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go new file mode 100644 index 0000000..b6cf1b6 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/coordination/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// LeasesGetter has a method to return a LeaseInterface. +// A group's client should implement this interface. +type LeasesGetter interface { + Leases(namespace string) LeaseInterface +} + +// LeaseInterface has methods to work with Lease resources. +type LeaseInterface interface { + Create(*v1.Lease) (*v1.Lease, error) + Update(*v1.Lease) (*v1.Lease, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Lease, error) + List(opts metav1.ListOptions) (*v1.LeaseList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) + LeaseExpansion +} + +// leases implements LeaseInterface +type leases struct { + client rest.Interface + ns string +} + +// newLeases returns a Leases +func newLeases(c *CoordinationV1Client, namespace string) *leases { + return &leases{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. +func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Leases that match those selectors. +func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.LeaseList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested leases. +func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Post(). + Namespace(c.ns). + Resource("leases"). + Body(lease). + Do(). + Into(result) + return +} + +// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Put(). + Namespace(c.ns). + Resource("leases"). + Name(lease.Name). + Body(lease). + Do(). + Into(result) + return +} + +// Delete takes name of the lease and deletes it. Returns an error if one occurs. +func (c *leases) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched lease. +func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { + result = &v1.Lease{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("leases"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go new file mode 100644 index 0000000..91a7648 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/coordination/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoordinationV1beta1Interface interface { + RESTClient() rest.Interface + LeasesGetter +} + +// CoordinationV1beta1Client is used to interact with features provided by the coordination.k8s.io group. +type CoordinationV1beta1Client struct { + restClient rest.Interface +} + +func (c *CoordinationV1beta1Client) Leases(namespace string) LeaseInterface { + return newLeases(c, namespace) +} + +// NewForConfig creates a new CoordinationV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*CoordinationV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoordinationV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoordinationV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoordinationV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoordinationV1beta1Client for the given RESTClient. +func New(c rest.Interface) *CoordinationV1beta1Client { + return &CoordinationV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoordinationV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go new file mode 100644 index 0000000..dfd180d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type LeaseExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go new file mode 100644 index 0000000..490d815 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/coordination/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// LeasesGetter has a method to return a LeaseInterface. +// A group's client should implement this interface. +type LeasesGetter interface { + Leases(namespace string) LeaseInterface +} + +// LeaseInterface has methods to work with Lease resources. +type LeaseInterface interface { + Create(*v1beta1.Lease) (*v1beta1.Lease, error) + Update(*v1beta1.Lease) (*v1beta1.Lease, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Lease, error) + List(opts v1.ListOptions) (*v1beta1.LeaseList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) + LeaseExpansion +} + +// leases implements LeaseInterface +type leases struct { + client rest.Interface + ns string +} + +// newLeases returns a Leases +func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { + return &leases{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. +func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { + result = &v1beta1.Lease{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Leases that match those selectors. +func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.LeaseList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested leases. +func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { + result = &v1beta1.Lease{} + err = c.client.Post(). + Namespace(c.ns). + Resource("leases"). + Body(lease). + Do(). + Into(result) + return +} + +// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. +func (c *leases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { + result = &v1beta1.Lease{} + err = c.client.Put(). + Namespace(c.ns). + Resource("leases"). + Name(lease.Name). + Body(lease). + Do(). + Into(result) + return +} + +// Delete takes name of the lease and deletes it. Returns an error if one occurs. +func (c *leases) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("leases"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched lease. +func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { + result = &v1beta1.Lease{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("leases"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go new file mode 100644 index 0000000..302b2fd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ComponentStatusesGetter has a method to return a ComponentStatusInterface. +// A group's client should implement this interface. +type ComponentStatusesGetter interface { + ComponentStatuses() ComponentStatusInterface +} + +// ComponentStatusInterface has methods to work with ComponentStatus resources. +type ComponentStatusInterface interface { + Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ComponentStatus, error) + List(opts metav1.ListOptions) (*v1.ComponentStatusList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) + ComponentStatusExpansion +} + +// componentStatuses implements ComponentStatusInterface +type componentStatuses struct { + client rest.Interface +} + +// newComponentStatuses returns a ComponentStatuses +func newComponentStatuses(c *CoreV1Client) *componentStatuses { + return &componentStatuses{ + client: c.RESTClient(), + } +} + +// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. +func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Get(). + Resource("componentstatuses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. +func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ComponentStatusList{} + err = c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested componentStatuses. +func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("componentstatuses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Post(). + Resource("componentstatuses"). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. +func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Put(). + Resource("componentstatuses"). + Name(componentStatus.Name). + Body(componentStatus). + Do(). + Into(result) + return +} + +// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. +func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("componentstatuses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("componentstatuses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched componentStatus. +func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { + result = &v1.ComponentStatus{} + err = c.client.Patch(pt). + Resource("componentstatuses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go new file mode 100644 index 0000000..18ce954 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ConfigMapsGetter has a method to return a ConfigMapInterface. +// A group's client should implement this interface. +type ConfigMapsGetter interface { + ConfigMaps(namespace string) ConfigMapInterface +} + +// ConfigMapInterface has methods to work with ConfigMap resources. +type ConfigMapInterface interface { + Create(*v1.ConfigMap) (*v1.ConfigMap, error) + Update(*v1.ConfigMap) (*v1.ConfigMap, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) + List(opts metav1.ListOptions) (*v1.ConfigMapList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) + ConfigMapExpansion +} + +// configMaps implements ConfigMapInterface +type configMaps struct { + client rest.Interface + ns string +} + +// newConfigMaps returns a ConfigMaps +func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { + return &configMaps{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. +func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. +func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ConfigMapList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested configMaps. +func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Post(). + Namespace(c.ns). + Resource("configmaps"). + Body(configMap). + Do(). + Into(result) + return +} + +// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. +func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Put(). + Namespace(c.ns). + Resource("configmaps"). + Name(configMap.Name). + Body(configMap). + Do(). + Into(result) + return +} + +// Delete takes name of the configMap and deletes it. Returns an error if one occurs. +func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("configmaps"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched configMap. +func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { + result = &v1.ConfigMap{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("configmaps"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go new file mode 100644 index 0000000..044a28e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go @@ -0,0 +1,165 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/core/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type CoreV1Interface interface { + RESTClient() rest.Interface + ComponentStatusesGetter + ConfigMapsGetter + EndpointsGetter + EventsGetter + LimitRangesGetter + NamespacesGetter + NodesGetter + PersistentVolumesGetter + PersistentVolumeClaimsGetter + PodsGetter + PodTemplatesGetter + ReplicationControllersGetter + ResourceQuotasGetter + SecretsGetter + ServicesGetter + ServiceAccountsGetter +} + +// CoreV1Client is used to interact with features provided by the group. +type CoreV1Client struct { + restClient rest.Interface +} + +func (c *CoreV1Client) ComponentStatuses() ComponentStatusInterface { + return newComponentStatuses(c) +} + +func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface { + return newConfigMaps(c, namespace) +} + +func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface { + return newEndpoints(c, namespace) +} + +func (c *CoreV1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +func (c *CoreV1Client) LimitRanges(namespace string) LimitRangeInterface { + return newLimitRanges(c, namespace) +} + +func (c *CoreV1Client) Namespaces() NamespaceInterface { + return newNamespaces(c) +} + +func (c *CoreV1Client) Nodes() NodeInterface { + return newNodes(c) +} + +func (c *CoreV1Client) PersistentVolumes() PersistentVolumeInterface { + return newPersistentVolumes(c) +} + +func (c *CoreV1Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { + return newPersistentVolumeClaims(c, namespace) +} + +func (c *CoreV1Client) Pods(namespace string) PodInterface { + return newPods(c, namespace) +} + +func (c *CoreV1Client) PodTemplates(namespace string) PodTemplateInterface { + return newPodTemplates(c, namespace) +} + +func (c *CoreV1Client) ReplicationControllers(namespace string) ReplicationControllerInterface { + return newReplicationControllers(c, namespace) +} + +func (c *CoreV1Client) ResourceQuotas(namespace string) ResourceQuotaInterface { + return newResourceQuotas(c, namespace) +} + +func (c *CoreV1Client) Secrets(namespace string) SecretInterface { + return newSecrets(c, namespace) +} + +func (c *CoreV1Client) Services(namespace string) ServiceInterface { + return newServices(c, namespace) +} + +func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface { + return newServiceAccounts(c, namespace) +} + +// NewForConfig creates a new CoreV1Client for the given config. +func NewForConfig(c *rest.Config) (*CoreV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &CoreV1Client{client}, nil +} + +// NewForConfigOrDie creates a new CoreV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *CoreV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new CoreV1Client for the given RESTClient. +func New(c rest.Interface) *CoreV1Client { + return &CoreV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/api" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *CoreV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go new file mode 100644 index 0000000..978a2a1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EndpointsGetter has a method to return a EndpointsInterface. +// A group's client should implement this interface. +type EndpointsGetter interface { + Endpoints(namespace string) EndpointsInterface +} + +// EndpointsInterface has methods to work with Endpoints resources. +type EndpointsInterface interface { + Create(*v1.Endpoints) (*v1.Endpoints, error) + Update(*v1.Endpoints) (*v1.Endpoints, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Endpoints, error) + List(opts metav1.ListOptions) (*v1.EndpointsList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) + EndpointsExpansion +} + +// endpoints implements EndpointsInterface +type endpoints struct { + client rest.Interface + ns string +} + +// newEndpoints returns a Endpoints +func newEndpoints(c *CoreV1Client, namespace string) *endpoints { + return &endpoints{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. +func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Endpoints that match those selectors. +func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.EndpointsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested endpoints. +func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Post(). + Namespace(c.ns). + Resource("endpoints"). + Body(endpoints). + Do(). + Into(result) + return +} + +// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. +func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Put(). + Namespace(c.ns). + Resource("endpoints"). + Name(endpoints.Name). + Body(endpoints). + Do(). + Into(result) + return +} + +// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. +func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("endpoints"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched endpoints. +func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { + result = &v1.Endpoints{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("endpoints"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go new file mode 100644 index 0000000..55cfa09 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1.Event) (*v1.Event, error) + Update(*v1.Event) (*v1.Event, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Event, error) + List(opts metav1.ListOptions) (*v1.EventList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *CoreV1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { + result = &v1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go new file mode 100644 index 0000000..6929ade --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go @@ -0,0 +1,164 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "fmt" + + "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/fields" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + ref "k8s.io/client-go/tools/reference" +) + +// The EventExpansion interface allows manually adding extra methods to the EventInterface. +type EventExpansion interface { + // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. + CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) + // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. + UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) + PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) + // Search finds events about the specified object + Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) + // Returns the appropriate field selector based on the API version being used to communicate with the server. + // The returned field selector can be used with List and Watch to filter desired events. + GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector +} + +// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, +// or an error. The namespace to create the event within is deduced from the +// event; it must either match this event client's namespace, or this event +// client must have been created with the "" namespace. +func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + if e.ns != "" && event.Namespace != e.ns { + return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Post(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Body(event). + Do(). + Into(result) + return result, err +} + +// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, +// or an error. The namespace and key to update the event within is deduced from the event. The +// namespace must either match this event client's namespace, or this event client must have been +// created with the "" namespace. Update also requires the ResourceVersion to be set in the event +// object. +func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { + result := &v1.Event{} + err := e.client.Put(). + NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return result, err +} + +// PatchWithEventNamespace modifies an existing event. It returns the copy of +// the event that the server returns, or an error. The namespace and name of the +// target event is deduced from the incompleteEvent. The namespace must either +// match this event client's namespace, or this event client must have been +// created with the "" namespace. +func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { + if e.ns != "" && incompleteEvent.Namespace != e.ns { + return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) + } + result := &v1.Event{} + err := e.client.Patch(types.StrategicMergePatchType). + NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). + Resource("events"). + Name(incompleteEvent.Name). + Body(data). + Do(). + Into(result) + return result, err +} + +// Search finds events about the specified object. The namespace of the +// object must match this event's client namespace unless the event client +// was made with the "" namespace. +func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { + ref, err := ref.GetReference(scheme, objOrRef) + if err != nil { + return nil, err + } + if e.ns != "" && ref.Namespace != e.ns { + return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) + } + stringRefKind := string(ref.Kind) + var refKind *string + if stringRefKind != "" { + refKind = &stringRefKind + } + stringRefUID := string(ref.UID) + var refUID *string + if stringRefUID != "" { + refUID = &stringRefUID + } + fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) + return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) +} + +// Returns the appropriate field selector based on the API version being used to communicate with the server. +// The returned field selector can be used with List and Watch to filter desired events. +func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { + apiVersion := e.client.APIVersion().String() + field := fields.Set{} + if involvedObjectName != nil { + field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName + } + if involvedObjectNamespace != nil { + field["involvedObject.namespace"] = *involvedObjectNamespace + } + if involvedObjectKind != nil { + field["involvedObject.kind"] = *involvedObjectKind + } + if involvedObjectUID != nil { + field["involvedObject.uid"] = *involvedObjectUID + } + return field.AsSelector() +} + +// Returns the appropriate field label to use for name of the involved object as per the given API version. +func GetInvolvedObjectNameFieldLabel(version string) string { + return "involvedObject.name" +} + +// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. +type EventSinkImpl struct { + Interface EventInterface +} + +func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { + return e.Interface.CreateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { + return e.Interface.UpdateWithEventNamespace(event) +} + +func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { + return e.Interface.PatchWithEventNamespace(event, data) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go new file mode 100644 index 0000000..6e8591b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go @@ -0,0 +1,39 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ComponentStatusExpansion interface{} + +type ConfigMapExpansion interface{} + +type EndpointsExpansion interface{} + +type LimitRangeExpansion interface{} + +type PersistentVolumeExpansion interface{} + +type PersistentVolumeClaimExpansion interface{} + +type PodTemplateExpansion interface{} + +type ReplicationControllerExpansion interface{} + +type ResourceQuotaExpansion interface{} + +type SecretExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go new file mode 100644 index 0000000..2eeae11 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// LimitRangesGetter has a method to return a LimitRangeInterface. +// A group's client should implement this interface. +type LimitRangesGetter interface { + LimitRanges(namespace string) LimitRangeInterface +} + +// LimitRangeInterface has methods to work with LimitRange resources. +type LimitRangeInterface interface { + Create(*v1.LimitRange) (*v1.LimitRange, error) + Update(*v1.LimitRange) (*v1.LimitRange, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.LimitRange, error) + List(opts metav1.ListOptions) (*v1.LimitRangeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) + LimitRangeExpansion +} + +// limitRanges implements LimitRangeInterface +type limitRanges struct { + client rest.Interface + ns string +} + +// newLimitRanges returns a LimitRanges +func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { + return &limitRanges{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. +func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. +func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.LimitRangeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested limitRanges. +func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Post(). + Namespace(c.ns). + Resource("limitranges"). + Body(limitRange). + Do(). + Into(result) + return +} + +// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. +func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Put(). + Namespace(c.ns). + Resource("limitranges"). + Name(limitRange.Name). + Body(limitRange). + Do(). + Into(result) + return +} + +// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. +func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("limitranges"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched limitRange. +func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { + result = &v1.LimitRange{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("limitranges"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go new file mode 100644 index 0000000..8a81fe8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NamespacesGetter has a method to return a NamespaceInterface. +// A group's client should implement this interface. +type NamespacesGetter interface { + Namespaces() NamespaceInterface +} + +// NamespaceInterface has methods to work with Namespace resources. +type NamespaceInterface interface { + Create(*v1.Namespace) (*v1.Namespace, error) + Update(*v1.Namespace) (*v1.Namespace, error) + UpdateStatus(*v1.Namespace) (*v1.Namespace, error) + Delete(name string, options *metav1.DeleteOptions) error + Get(name string, options metav1.GetOptions) (*v1.Namespace, error) + List(opts metav1.ListOptions) (*v1.NamespaceList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) + NamespaceExpansion +} + +// namespaces implements NamespaceInterface +type namespaces struct { + client rest.Interface +} + +// newNamespaces returns a Namespaces +func newNamespaces(c *CoreV1Client) *namespaces { + return &namespaces{ + client: c.RESTClient(), + } +} + +// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. +func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Get(). + Resource("namespaces"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Namespaces that match those selectors. +func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NamespaceList{} + err = c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested namespaces. +func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("namespaces"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Post(). + Resource("namespaces"). + Body(namespace). + Do(). + Into(result) + return +} + +// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. +func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + Body(namespace). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put(). + Resource("namespaces"). + Name(namespace.Name). + SubResource("status"). + Body(namespace). + Do(). + Into(result) + return +} + +// Delete takes name of the namespace and deletes it. Returns an error if one occurs. +func (c *namespaces) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("namespaces"). + Name(name). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched namespace. +func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Patch(pt). + Resource("namespaces"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go new file mode 100644 index 0000000..17effe2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go @@ -0,0 +1,31 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import "k8s.io/api/core/v1" + +// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. +type NamespaceExpansion interface { + Finalize(item *v1.Namespace) (*v1.Namespace, error) +} + +// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. +func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { + result = &v1.Namespace{} + err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go new file mode 100644 index 0000000..d19fab8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NodesGetter has a method to return a NodeInterface. +// A group's client should implement this interface. +type NodesGetter interface { + Nodes() NodeInterface +} + +// NodeInterface has methods to work with Node resources. +type NodeInterface interface { + Create(*v1.Node) (*v1.Node, error) + Update(*v1.Node) (*v1.Node, error) + UpdateStatus(*v1.Node) (*v1.Node, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Node, error) + List(opts metav1.ListOptions) (*v1.NodeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) + NodeExpansion +} + +// nodes implements NodeInterface +type nodes struct { + client rest.Interface +} + +// newNodes returns a Nodes +func newNodes(c *CoreV1Client) *nodes { + return &nodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the node, and returns the corresponding node object, and an error if there is any. +func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Nodes that match those selectors. +func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NodeList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodes. +func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Post(). + Resource("nodes"). + Body(node). + Do(). + Into(result) + return +} + +// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. +func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + Body(node). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Put(). + Resource("nodes"). + Name(node.Name). + SubResource("status"). + Body(node). + Do(). + Into(result) + return +} + +// Delete takes name of the node and deletes it. Returns an error if one occurs. +func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("nodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("nodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched node. +func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { + result = &v1.Node{} + err = c.client.Patch(pt). + Resource("nodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go new file mode 100644 index 0000000..5db29c3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go @@ -0,0 +1,43 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/types" +) + +// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. +type NodeExpansion interface { + // PatchStatus modifies the status of an existing node. It returns the copy + // of the node that the server returns, or an error. + PatchStatus(nodeName string, data []byte) (*v1.Node, error) +} + +// PatchStatus modifies the status of an existing node. It returns the copy of +// the node that the server returns, or an error. +func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { + result := &v1.Node{} + err := c.client.Patch(types.StrategicMergePatchType). + Resource("nodes"). + Name(nodeName). + SubResource("status"). + Body(data). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go new file mode 100644 index 0000000..7451482 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. +// A group's client should implement this interface. +type PersistentVolumesGetter interface { + PersistentVolumes() PersistentVolumeInterface +} + +// PersistentVolumeInterface has methods to work with PersistentVolume resources. +type PersistentVolumeInterface interface { + Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) + UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.PersistentVolume, error) + List(opts metav1.ListOptions) (*v1.PersistentVolumeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) + PersistentVolumeExpansion +} + +// persistentVolumes implements PersistentVolumeInterface +type persistentVolumes struct { + client rest.Interface +} + +// newPersistentVolumes returns a PersistentVolumes +func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { + return &persistentVolumes{ + client: c.RESTClient(), + } +} + +// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. +func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Get(). + Resource("persistentvolumes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. +func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PersistentVolumeList{} + err = c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumes. +func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("persistentvolumes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Post(). + Resource("persistentvolumes"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. +func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Put(). + Resource("persistentvolumes"). + Name(persistentVolume.Name). + SubResource("status"). + Body(persistentVolume). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. +func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("persistentvolumes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("persistentvolumes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched persistentVolume. +func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { + result = &v1.PersistentVolume{} + err = c.client.Patch(pt). + Resource("persistentvolumes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go new file mode 100644 index 0000000..410ab37 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. +// A group's client should implement this interface. +type PersistentVolumeClaimsGetter interface { + PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface +} + +// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. +type PersistentVolumeClaimInterface interface { + Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.PersistentVolumeClaim, error) + List(opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) + PersistentVolumeClaimExpansion +} + +// persistentVolumeClaims implements PersistentVolumeClaimInterface +type persistentVolumeClaims struct { + client rest.Interface + ns string +} + +// newPersistentVolumeClaims returns a PersistentVolumeClaims +func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { + return &persistentVolumeClaims{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. +func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. +func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PersistentVolumeClaimList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. +func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Post(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. +func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Put(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(persistentVolumeClaim.Name). + SubResource("status"). + Body(persistentVolumeClaim). + Do(). + Into(result) + return +} + +// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. +func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched persistentVolumeClaim. +func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { + result = &v1.PersistentVolumeClaim{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("persistentvolumeclaims"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go new file mode 100644 index 0000000..8d6b6e8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodsGetter has a method to return a PodInterface. +// A group's client should implement this interface. +type PodsGetter interface { + Pods(namespace string) PodInterface +} + +// PodInterface has methods to work with Pod resources. +type PodInterface interface { + Create(*v1.Pod) (*v1.Pod, error) + Update(*v1.Pod) (*v1.Pod, error) + UpdateStatus(*v1.Pod) (*v1.Pod, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Pod, error) + List(opts metav1.ListOptions) (*v1.PodList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) + PodExpansion +} + +// pods implements PodInterface +type pods struct { + client rest.Interface + ns string +} + +// newPods returns a Pods +func newPods(c *CoreV1Client, namespace string) *pods { + return &pods{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. +func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Pods that match those selectors. +func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PodList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested pods. +func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Post(). + Namespace(c.ns). + Resource("pods"). + Body(pod). + Do(). + Into(result) + return +} + +// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. +func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + Body(pod). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Put(). + Namespace(c.ns). + Resource("pods"). + Name(pod.Name). + SubResource("status"). + Body(pod). + Do(). + Into(result) + return +} + +// Delete takes name of the pod and deletes it. Returns an error if one occurs. +func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched pod. +func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { + result = &v1.Pod{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("pods"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go new file mode 100644 index 0000000..ed876be --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go @@ -0,0 +1,45 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/api/core/v1" + policy "k8s.io/api/policy/v1beta1" + "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" +) + +// The PodExpansion interface allows manually adding extra methods to the PodInterface. +type PodExpansion interface { + Bind(binding *v1.Binding) error + Evict(eviction *policy.Eviction) error + GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request +} + +// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). +func (c *pods) Bind(binding *v1.Binding) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() +} + +func (c *pods) Evict(eviction *policy.Eviction) error { + return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() +} + +// Get constructs a request for getting the logs for a pod +func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { + return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go new file mode 100644 index 0000000..84d7c98 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodTemplatesGetter has a method to return a PodTemplateInterface. +// A group's client should implement this interface. +type PodTemplatesGetter interface { + PodTemplates(namespace string) PodTemplateInterface +} + +// PodTemplateInterface has methods to work with PodTemplate resources. +type PodTemplateInterface interface { + Create(*v1.PodTemplate) (*v1.PodTemplate, error) + Update(*v1.PodTemplate) (*v1.PodTemplate, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.PodTemplate, error) + List(opts metav1.ListOptions) (*v1.PodTemplateList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) + PodTemplateExpansion +} + +// podTemplates implements PodTemplateInterface +type podTemplates struct { + client rest.Interface + ns string +} + +// newPodTemplates returns a PodTemplates +func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { + return &podTemplates{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. +func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. +func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PodTemplateList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podTemplates. +func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podtemplates"). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. +func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podtemplates"). + Name(podTemplate.Name). + Body(podTemplate). + Do(). + Into(result) + return +} + +// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. +func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podtemplates"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podTemplate. +func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { + result = &v1.PodTemplate{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podtemplates"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go new file mode 100644 index 0000000..dd3182d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -0,0 +1,223 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + autoscalingv1 "k8s.io/api/autoscaling/v1" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. +// A group's client should implement this interface. +type ReplicationControllersGetter interface { + ReplicationControllers(namespace string) ReplicationControllerInterface +} + +// ReplicationControllerInterface has methods to work with ReplicationController resources. +type ReplicationControllerInterface interface { + Create(*v1.ReplicationController) (*v1.ReplicationController, error) + Update(*v1.ReplicationController) (*v1.ReplicationController, error) + UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ReplicationController, error) + List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) + GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + + ReplicationControllerExpansion +} + +// replicationControllers implements ReplicationControllerInterface +type replicationControllers struct { + client rest.Interface + ns string +} + +// newReplicationControllers returns a ReplicationControllers +func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { + return &replicationControllers{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. +func (c *replicationControllers) Get(name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. +func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ReplicationControllerList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicationControllers. +func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. +func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + Body(replicationController). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationController.Name). + SubResource("status"). + Body(replicationController). + Do(). + Into(result) + return +} + +// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. +func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("replicationcontrollers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicationController. +func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { + result = &v1.ReplicationController{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicationcontrollers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationControllerName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { + result = &autoscalingv1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicationcontrollers"). + Name(replicationControllerName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go new file mode 100644 index 0000000..5a17899 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. +// A group's client should implement this interface. +type ResourceQuotasGetter interface { + ResourceQuotas(namespace string) ResourceQuotaInterface +} + +// ResourceQuotaInterface has methods to work with ResourceQuota resources. +type ResourceQuotaInterface interface { + Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) + UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ResourceQuota, error) + List(opts metav1.ListOptions) (*v1.ResourceQuotaList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) + ResourceQuotaExpansion +} + +// resourceQuotas implements ResourceQuotaInterface +type resourceQuotas struct { + client rest.Interface + ns string +} + +// newResourceQuotas returns a ResourceQuotas +func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { + return &resourceQuotas{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. +func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. +func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ResourceQuotaList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested resourceQuotas. +func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Post(). + Namespace(c.ns). + Resource("resourcequotas"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. +func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Put(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(resourceQuota.Name). + SubResource("status"). + Body(resourceQuota). + Do(). + Into(result) + return +} + +// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. +func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("resourcequotas"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched resourceQuota. +func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { + result = &v1.ResourceQuota{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("resourcequotas"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go new file mode 100644 index 0000000..85c143b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// SecretsGetter has a method to return a SecretInterface. +// A group's client should implement this interface. +type SecretsGetter interface { + Secrets(namespace string) SecretInterface +} + +// SecretInterface has methods to work with Secret resources. +type SecretInterface interface { + Create(*v1.Secret) (*v1.Secret, error) + Update(*v1.Secret) (*v1.Secret, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Secret, error) + List(opts metav1.ListOptions) (*v1.SecretList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) + SecretExpansion +} + +// secrets implements SecretInterface +type secrets struct { + client rest.Interface + ns string +} + +// newSecrets returns a Secrets +func newSecrets(c *CoreV1Client, namespace string) *secrets { + return &secrets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. +func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Secrets that match those selectors. +func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.SecretList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested secrets. +func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Post(). + Namespace(c.ns). + Resource("secrets"). + Body(secret). + Do(). + Into(result) + return +} + +// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. +func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Put(). + Namespace(c.ns). + Resource("secrets"). + Name(secret.Name). + Body(secret). + Do(). + Into(result) + return +} + +// Delete takes name of the secret and deletes it. Returns an error if one occurs. +func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("secrets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched secret. +func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { + result = &v1.Secret{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("secrets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go new file mode 100644 index 0000000..b0e0941 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ServicesGetter has a method to return a ServiceInterface. +// A group's client should implement this interface. +type ServicesGetter interface { + Services(namespace string) ServiceInterface +} + +// ServiceInterface has methods to work with Service resources. +type ServiceInterface interface { + Create(*v1.Service) (*v1.Service, error) + Update(*v1.Service) (*v1.Service, error) + UpdateStatus(*v1.Service) (*v1.Service, error) + Delete(name string, options *metav1.DeleteOptions) error + Get(name string, options metav1.GetOptions) (*v1.Service, error) + List(opts metav1.ListOptions) (*v1.ServiceList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) + ServiceExpansion +} + +// services implements ServiceInterface +type services struct { + client rest.Interface + ns string +} + +// newServices returns a Services +func newServices(c *CoreV1Client, namespace string) *services { + return &services{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the service, and returns the corresponding service object, and an error if there is any. +func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Services that match those selectors. +func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested services. +func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("services"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Post(). + Namespace(c.ns). + Resource("services"). + Body(service). + Do(). + Into(result) + return +} + +// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. +func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + Body(service). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Put(). + Namespace(c.ns). + Resource("services"). + Name(service.Name). + SubResource("status"). + Body(service). + Do(). + Into(result) + return +} + +// Delete takes name of the service and deletes it. Returns an error if one occurs. +func (c *services) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("services"). + Name(name). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched service. +func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { + result = &v1.Service{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("services"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go new file mode 100644 index 0000000..4937fd1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + "k8s.io/apimachinery/pkg/util/net" + restclient "k8s.io/client-go/rest" +) + +// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. +type ServiceExpansion interface { + ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper +} + +// ProxyGet returns a response of the service by calling it through the proxy. +func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { + request := c.client.Get(). + Namespace(c.ns). + Resource("services"). + SubResource("proxy"). + Name(net.JoinSchemeNamePort(scheme, name, port)). + Suffix(path) + for k, v := range params { + request = request.Param(k, v) + } + return request +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go new file mode 100644 index 0000000..50af6a2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ServiceAccountsGetter has a method to return a ServiceAccountInterface. +// A group's client should implement this interface. +type ServiceAccountsGetter interface { + ServiceAccounts(namespace string) ServiceAccountInterface +} + +// ServiceAccountInterface has methods to work with ServiceAccount resources. +type ServiceAccountInterface interface { + Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ServiceAccount, error) + List(opts metav1.ListOptions) (*v1.ServiceAccountList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) + ServiceAccountExpansion +} + +// serviceAccounts implements ServiceAccountInterface +type serviceAccounts struct { + client rest.Interface + ns string +} + +// newServiceAccounts returns a ServiceAccounts +func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { + return &serviceAccounts{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. +func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. +func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ServiceAccountList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested serviceAccounts. +func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. +func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Put(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(serviceAccount.Name). + Body(serviceAccount). + Do(). + Into(result) + return +} + +// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. +func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("serviceaccounts"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched serviceAccount. +func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { + result = &v1.ServiceAccount{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("serviceaccounts"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go new file mode 100644 index 0000000..eaf643f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go @@ -0,0 +1,41 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + authenticationv1 "k8s.io/api/authentication/v1" +) + +// The ServiceAccountExpansion interface allows manually adding extra methods +// to the ServiceAccountInterface. +type ServiceAccountExpansion interface { + CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) +} + +// CreateToken creates a new token for a serviceaccount. +func (c *serviceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { + result := &authenticationv1.TokenRequest{} + err := c.client.Post(). + Namespace(c.ns). + Resource("serviceaccounts"). + SubResource("token"). + Name(name). + Body(tr). + Do(). + Into(result) + return result, err +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go new file mode 100644 index 0000000..143281b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/events/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// EventsGetter has a method to return a EventInterface. +// A group's client should implement this interface. +type EventsGetter interface { + Events(namespace string) EventInterface +} + +// EventInterface has methods to work with Event resources. +type EventInterface interface { + Create(*v1beta1.Event) (*v1beta1.Event, error) + Update(*v1beta1.Event) (*v1beta1.Event, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Event, error) + List(opts v1.ListOptions) (*v1beta1.EventList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) + EventExpansion +} + +// events implements EventInterface +type events struct { + client rest.Interface + ns string +} + +// newEvents returns a Events +func newEvents(c *EventsV1beta1Client, namespace string) *events { + return &events{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the event, and returns the corresponding event object, and an error if there is any. +func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Events that match those selectors. +func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.EventList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested events. +func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Post(). + Namespace(c.ns). + Resource("events"). + Body(event). + Do(). + Into(result) + return +} + +// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. +func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Put(). + Namespace(c.ns). + Resource("events"). + Name(event.Name). + Body(event). + Do(). + Into(result) + return +} + +// Delete takes name of the event and deletes it. Returns an error if one occurs. +func (c *events) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("events"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched event. +func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { + result = &v1beta1.Event{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("events"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go new file mode 100644 index 0000000..fb59635 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/events/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type EventsV1beta1Interface interface { + RESTClient() rest.Interface + EventsGetter +} + +// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group. +type EventsV1beta1Client struct { + restClient rest.Interface +} + +func (c *EventsV1beta1Client) Events(namespace string) EventInterface { + return newEvents(c, namespace) +} + +// NewForConfig creates a new EventsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &EventsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new EventsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *EventsV1beta1Client { + return &EventsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *EventsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go new file mode 100644 index 0000000..e27f693 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go new file mode 100644 index 0000000..93b1ae9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DaemonSetsGetter has a method to return a DaemonSetInterface. +// A group's client should implement this interface. +type DaemonSetsGetter interface { + DaemonSets(namespace string) DaemonSetInterface +} + +// DaemonSetInterface has methods to work with DaemonSet resources. +type DaemonSetInterface interface { + Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) + List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) + DaemonSetExpansion +} + +// daemonSets implements DaemonSetInterface +type daemonSets struct { + client rest.Interface + ns string +} + +// newDaemonSets returns a DaemonSets +func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { + return &daemonSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. +func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. +func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.DaemonSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested daemonSets. +func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("daemonsets"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. +func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + Body(daemonSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("daemonsets"). + Name(daemonSet.Name). + SubResource("status"). + Body(daemonSet). + Do(). + Into(result) + return +} + +// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. +func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("daemonsets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched daemonSet. +func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { + result = &v1beta1.DaemonSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("daemonsets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go new file mode 100644 index 0000000..5557b9f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -0,0 +1,222 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// DeploymentsGetter has a method to return a DeploymentInterface. +// A group's client should implement this interface. +type DeploymentsGetter interface { + Deployments(namespace string) DeploymentInterface +} + +// DeploymentInterface has methods to work with Deployment resources. +type DeploymentInterface interface { + Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) + UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) + List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) + GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + + DeploymentExpansion +} + +// deployments implements DeploymentInterface +type deployments struct { + client rest.Interface + ns string +} + +// newDeployments returns a Deployments +func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { + return &deployments{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. +func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Deployments that match those selectors. +func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.DeploymentList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested deployments. +func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Post(). + Namespace(c.ns). + Resource("deployments"). + Body(deployment). + Do(). + Into(result) + return +} + +// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. +func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + Body(deployment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deployment.Name). + SubResource("status"). + Body(deployment). + Do(). + Into(result) + return +} + +// Delete takes name of the deployment and deletes it. Returns an error if one occurs. +func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("deployments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched deployment. +func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { + result = &v1beta1.Deployment{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("deployments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("deployments"). + Name(deploymentName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go new file mode 100644 index 0000000..24734be --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go @@ -0,0 +1,29 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/api/extensions/v1beta1" + +// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. +type DeploymentExpansion interface { + Rollback(*v1beta1.DeploymentRollback) error +} + +// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. +func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { + return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go new file mode 100644 index 0000000..0e9edf5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -0,0 +1,110 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/extensions/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type ExtensionsV1beta1Interface interface { + RESTClient() rest.Interface + DaemonSetsGetter + DeploymentsGetter + IngressesGetter + PodSecurityPoliciesGetter + ReplicaSetsGetter +} + +// ExtensionsV1beta1Client is used to interact with features provided by the extensions group. +type ExtensionsV1beta1Client struct { + restClient rest.Interface +} + +func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface { + return newDaemonSets(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface { + return newDeployments(c, namespace) +} + +func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { + return newReplicaSets(c, namespace) +} + +// NewForConfig creates a new ExtensionsV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ExtensionsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ExtensionsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *ExtensionsV1beta1Client { + return &ExtensionsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go new file mode 100644 index 0000000..cfaeebd --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type DaemonSetExpansion interface{} + +type IngressExpansion interface{} + +type PodSecurityPolicyExpansion interface{} + +type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go new file mode 100644 index 0000000..4da51c3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client rest.Interface + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { + return &ingresses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go new file mode 100644 index 0000000..a947a54 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go new file mode 100644 index 0000000..4440290 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -0,0 +1,222 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/extensions/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ReplicaSetsGetter has a method to return a ReplicaSetInterface. +// A group's client should implement this interface. +type ReplicaSetsGetter interface { + ReplicaSets(namespace string) ReplicaSetInterface +} + +// ReplicaSetInterface has methods to work with ReplicaSet resources. +type ReplicaSetInterface interface { + Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) + List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) + GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) + + ReplicaSetExpansion +} + +// replicaSets implements ReplicaSetInterface +type replicaSets struct { + client rest.Interface + ns string +} + +// newReplicaSets returns a ReplicaSets +func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { + return &replicaSets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. +func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. +func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ReplicaSetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested replicaSets. +func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Post(). + Namespace(c.ns). + Resource("replicasets"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. +func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + Body(replicaSet). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSet.Name). + SubResource("status"). + Body(replicaSet). + Do(). + Into(result) + return +} + +// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. +func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("replicasets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched replicaSet. +func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { + result = &v1beta1.ReplicaSet{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("replicasets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} + +// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Get(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. +func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + err = c.client.Put(). + Namespace(c.ns). + Resource("replicasets"). + Name(replicaSetName). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go new file mode 100644 index 0000000..7d77495 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type NetworkPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go new file mode 100644 index 0000000..8684db4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/networking/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1Interface interface { + RESTClient() rest.Interface + NetworkPoliciesGetter +} + +// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { + return newNetworkPolicies(c, namespace) +} + +// NewForConfig creates a new NetworkingV1Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1Client { + return &NetworkingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go new file mode 100644 index 0000000..3f39be9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/networking/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. +// A group's client should implement this interface. +type NetworkPoliciesGetter interface { + NetworkPolicies(namespace string) NetworkPolicyInterface +} + +// NetworkPolicyInterface has methods to work with NetworkPolicy resources. +type NetworkPolicyInterface interface { + Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) + Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.NetworkPolicy, error) + List(opts metav1.ListOptions) (*v1.NetworkPolicyList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) + NetworkPolicyExpansion +} + +// networkPolicies implements NetworkPolicyInterface +type networkPolicies struct { + client rest.Interface + ns string +} + +// newNetworkPolicies returns a NetworkPolicies +func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { + return &networkPolicies{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. +func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. +func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.NetworkPolicyList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested networkPolicies. +func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Post(). + Namespace(c.ns). + Resource("networkpolicies"). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. +func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Put(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(networkPolicy.Name). + Body(networkPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. +func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("networkpolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched networkPolicy. +func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) { + result = &v1.NetworkPolicy{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("networkpolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go new file mode 100644 index 0000000..1442649 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type IngressExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go new file mode 100644 index 0000000..8d76678 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/networking/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// IngressesGetter has a method to return a IngressInterface. +// A group's client should implement this interface. +type IngressesGetter interface { + Ingresses(namespace string) IngressInterface +} + +// IngressInterface has methods to work with Ingress resources. +type IngressInterface interface { + Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) + UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) + List(opts v1.ListOptions) (*v1beta1.IngressList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) + IngressExpansion +} + +// ingresses implements IngressInterface +type ingresses struct { + client rest.Interface + ns string +} + +// newIngresses returns a Ingresses +func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { + return &ingresses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. +func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Ingresses that match those selectors. +func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.IngressList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested ingresses. +func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Post(). + Namespace(c.ns). + Resource("ingresses"). + Body(ingress). + Do(). + Into(result) + return +} + +// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. +func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + Body(ingress). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Put(). + Namespace(c.ns). + Resource("ingresses"). + Name(ingress.Name). + SubResource("status"). + Body(ingress). + Do(). + Into(result) + return +} + +// Delete takes name of the ingress and deletes it. Returns an error if one occurs. +func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("ingresses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched ingress. +func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { + result = &v1beta1.Ingress{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("ingresses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go new file mode 100644 index 0000000..541bf6a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/networking/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NetworkingV1beta1Interface interface { + RESTClient() rest.Interface + IngressesGetter +} + +// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. +type NetworkingV1beta1Client struct { + restClient rest.Interface +} + +func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { + return newIngresses(c, namespace) +} + +// NewForConfig creates a new NetworkingV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NetworkingV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NetworkingV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NetworkingV1beta1Client { + return &NetworkingV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NetworkingV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go new file mode 100644 index 0000000..df51baa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go new file mode 100644 index 0000000..fcef31d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go new file mode 100644 index 0000000..863f2d4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/node/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1alpha1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1alpha1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1alpha1Client struct { + restClient rest.Interface +} + +func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1alpha1Client { + return &NodeV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go new file mode 100644 index 0000000..044460e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/node/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { + result = &v1alpha1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go new file mode 100644 index 0000000..669dd02 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type RuntimeClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go new file mode 100644 index 0000000..52683e2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/node/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type NodeV1beta1Interface interface { + RESTClient() rest.Interface + RuntimeClassesGetter +} + +// NodeV1beta1Client is used to interact with features provided by the node.k8s.io group. +type NodeV1beta1Client struct { + restClient rest.Interface +} + +func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { + return newRuntimeClasses(c) +} + +// NewForConfig creates a new NodeV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &NodeV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new NodeV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new NodeV1beta1Client for the given RESTClient. +func New(c rest.Interface) *NodeV1beta1Client { + return &NodeV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *NodeV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go new file mode 100644 index 0000000..b3f7c49 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/node/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RuntimeClassesGetter has a method to return a RuntimeClassInterface. +// A group's client should implement this interface. +type RuntimeClassesGetter interface { + RuntimeClasses() RuntimeClassInterface +} + +// RuntimeClassInterface has methods to work with RuntimeClass resources. +type RuntimeClassInterface interface { + Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) + List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) + RuntimeClassExpansion +} + +// runtimeClasses implements RuntimeClassInterface +type runtimeClasses struct { + client rest.Interface +} + +// newRuntimeClasses returns a RuntimeClasses +func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { + return &runtimeClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. +func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Get(). + Resource("runtimeclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. +func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.RuntimeClassList{} + err = c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested runtimeClasses. +func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("runtimeclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Post(). + Resource("runtimeclasses"). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. +func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Put(). + Resource("runtimeclasses"). + Name(runtimeClass.Name). + Body(runtimeClass). + Do(). + Into(result) + return +} + +// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. +func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("runtimeclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("runtimeclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched runtimeClass. +func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { + result = &v1beta1.RuntimeClass{} + err = c.client.Patch(pt). + Resource("runtimeclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go new file mode 100644 index 0000000..12e8e76 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + rest "k8s.io/client-go/rest" +) + +// EvictionsGetter has a method to return a EvictionInterface. +// A group's client should implement this interface. +type EvictionsGetter interface { + Evictions(namespace string) EvictionInterface +} + +// EvictionInterface has methods to work with Eviction resources. +type EvictionInterface interface { + EvictionExpansion +} + +// evictions implements EvictionInterface +type evictions struct { + client rest.Interface + ns string +} + +// newEvictions returns a Evictions +func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { + return &evictions{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go new file mode 100644 index 0000000..40bad26 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go @@ -0,0 +1,38 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + policy "k8s.io/api/policy/v1beta1" +) + +// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. +type EvictionExpansion interface { + Evict(eviction *policy.Eviction) error +} + +func (c *evictions) Evict(eviction *policy.Eviction) error { + return c.client.Post(). + AbsPath("/api/v1"). + Namespace(eviction.Namespace). + Resource("pods"). + Name(eviction.Name). + SubResource("eviction"). + Body(eviction). + Do(). + Error() +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go new file mode 100644 index 0000000..078c16d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type PodDisruptionBudgetExpansion interface{} + +type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go new file mode 100644 index 0000000..864af9a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. +// A group's client should implement this interface. +type PodDisruptionBudgetsGetter interface { + PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface +} + +// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. +type PodDisruptionBudgetInterface interface { + Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) + List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) + PodDisruptionBudgetExpansion +} + +// podDisruptionBudgets implements PodDisruptionBudgetInterface +type podDisruptionBudgets struct { + client rest.Interface + ns string +} + +// newPodDisruptionBudgets returns a PodDisruptionBudgets +func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { + return &podDisruptionBudgets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. +func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. +func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodDisruptionBudgetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. +func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Post(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. +func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Put(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(podDisruptionBudget.Name). + SubResource("status"). + Body(podDisruptionBudget). + Do(). + Into(result) + return +} + +// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. +func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podDisruptionBudget. +func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { + result = &v1beta1.PodDisruptionBudget{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("poddisruptionbudgets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go new file mode 100644 index 0000000..d02096d --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/policy/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. +// A group's client should implement this interface. +type PodSecurityPoliciesGetter interface { + PodSecurityPolicies() PodSecurityPolicyInterface +} + +// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. +type PodSecurityPolicyInterface interface { + Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) + List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) + PodSecurityPolicyExpansion +} + +// podSecurityPolicies implements PodSecurityPolicyInterface +type podSecurityPolicies struct { + client rest.Interface +} + +// newPodSecurityPolicies returns a PodSecurityPolicies +func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { + return &podSecurityPolicies{ + client: c.RESTClient(), + } +} + +// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. +func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. +func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodSecurityPolicyList{} + err = c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podSecurityPolicies. +func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("podsecuritypolicies"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Post(). + Resource("podsecuritypolicies"). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. +func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Put(). + Resource("podsecuritypolicies"). + Name(podSecurityPolicy.Name). + Body(podSecurityPolicy). + Do(). + Into(result) + return +} + +// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. +func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("podsecuritypolicies"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("podsecuritypolicies"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podSecurityPolicy. +func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { + result = &v1beta1.PodSecurityPolicy{} + err = c.client.Patch(pt). + Resource("podsecuritypolicies"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go new file mode 100644 index 0000000..020e185 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go @@ -0,0 +1,100 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/policy/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type PolicyV1beta1Interface interface { + RESTClient() rest.Interface + EvictionsGetter + PodDisruptionBudgetsGetter + PodSecurityPoliciesGetter +} + +// PolicyV1beta1Client is used to interact with features provided by the policy group. +type PolicyV1beta1Client struct { + restClient rest.Interface +} + +func (c *PolicyV1beta1Client) Evictions(namespace string) EvictionInterface { + return newEvictions(c, namespace) +} + +func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { + return newPodDisruptionBudgets(c, namespace) +} + +func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { + return newPodSecurityPolicies(c) +} + +// NewForConfig creates a new PolicyV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &PolicyV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new PolicyV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *PolicyV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new PolicyV1beta1Client for the given RESTClient. +func New(c rest.Interface) *PolicyV1beta1Client { + return &PolicyV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *PolicyV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go new file mode 100644 index 0000000..0a47c44 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1.ClusterRole) (*v1.ClusterRole, error) + Update(*v1.ClusterRole) (*v1.ClusterRole, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ClusterRole, error) + List(opts metav1.ListOptions) (*v1.ClusterRoleList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) { + result = &v1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go new file mode 100644 index 0000000..c16ebc3 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) + Update(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.ClusterRoleBinding, error) + List(opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) { + result = &v1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go new file mode 100644 index 0000000..e3f1b02 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go new file mode 100644 index 0000000..e3855bb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/rbac/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type RbacV1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1Client struct { + restClient rest.Interface +} + +func (c *RbacV1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1Client { + return &RbacV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go new file mode 100644 index 0000000..a17d791 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1.Role) (*v1.Role, error) + Update(*v1.Role) (*v1.Role, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.Role, error) + List(opts metav1.ListOptions) (*v1.RoleList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1.Role) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1.Role) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) { + result = &v1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go new file mode 100644 index 0000000..c87e457 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/rbac/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1.RoleBinding) (*v1.RoleBinding, error) + Update(*v1.RoleBinding) (*v1.RoleBinding, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.RoleBinding, error) + List(opts metav1.ListOptions) (*v1.RoleBindingList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) { + result = &v1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go new file mode 100644 index 0000000..77e6687 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { + result = &v1alpha1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go new file mode 100644 index 0000000..0d1b9d2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { + result = &v1alpha1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go new file mode 100644 index 0000000..df51baa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go new file mode 100644 index 0000000..b8b5c78 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go new file mode 100644 index 0000000..de83531 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/rbac/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type RbacV1alpha1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1alpha1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1alpha1Client struct { + restClient rest.Interface +} + +func (c *RbacV1alpha1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1alpha1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1alpha1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1alpha1Client { + return &RbacV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go new file mode 100644 index 0000000..4a4b672 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1alpha1.Role) (*v1alpha1.Role, error) + Update(*v1alpha1.Role) (*v1alpha1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.Role, error) + List(opts v1.ListOptions) (*v1alpha1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1alpha1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { + result = &v1alpha1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go new file mode 100644 index 0000000..bf4e5a1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/rbac/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) + List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { + result = &v1alpha1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go new file mode 100644 index 0000000..21d3cab --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRolesGetter has a method to return a ClusterRoleInterface. +// A group's client should implement this interface. +type ClusterRolesGetter interface { + ClusterRoles() ClusterRoleInterface +} + +// ClusterRoleInterface has methods to work with ClusterRole resources. +type ClusterRoleInterface interface { + Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) + ClusterRoleExpansion +} + +// clusterRoles implements ClusterRoleInterface +type clusterRoles struct { + client rest.Interface +} + +// newClusterRoles returns a ClusterRoles +func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { + return &clusterRoles{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. +func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Get(). + Resource("clusterroles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. +func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ClusterRoleList{} + err = c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoles. +func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterroles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Post(). + Resource("clusterroles"). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. +func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Put(). + Resource("clusterroles"). + Name(clusterRole.Name). + Body(clusterRole). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. +func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterroles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterroles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRole. +func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { + result = &v1beta1.ClusterRole{} + err = c.client.Patch(pt). + Resource("clusterroles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go new file mode 100644 index 0000000..47eb9e4 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. +// A group's client should implement this interface. +type ClusterRoleBindingsGetter interface { + ClusterRoleBindings() ClusterRoleBindingInterface +} + +// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. +type ClusterRoleBindingInterface interface { + Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) + ClusterRoleBindingExpansion +} + +// clusterRoleBindings implements ClusterRoleBindingInterface +type clusterRoleBindings struct { + client rest.Interface +} + +// newClusterRoleBindings returns a ClusterRoleBindings +func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { + return &clusterRoleBindings{ + client: c.RESTClient(), + } +} + +// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. +func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Get(). + Resource("clusterrolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. +func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.ClusterRoleBindingList{} + err = c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested clusterRoleBindings. +func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("clusterrolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Post(). + Resource("clusterrolebindings"). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. +func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Put(). + Resource("clusterrolebindings"). + Name(clusterRoleBinding.Name). + Body(clusterRoleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. +func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("clusterrolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("clusterrolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched clusterRoleBinding. +func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { + result = &v1beta1.ClusterRoleBinding{} + err = c.client.Patch(pt). + Resource("clusterrolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go new file mode 100644 index 0000000..e7be79f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type ClusterRoleExpansion interface{} + +type ClusterRoleBindingExpansion interface{} + +type RoleExpansion interface{} + +type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go new file mode 100644 index 0000000..46718d7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/rbac/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type RbacV1beta1Interface interface { + RESTClient() rest.Interface + ClusterRolesGetter + ClusterRoleBindingsGetter + RolesGetter + RoleBindingsGetter +} + +// RbacV1beta1Client is used to interact with features provided by the rbac.authorization.k8s.io group. +type RbacV1beta1Client struct { + restClient rest.Interface +} + +func (c *RbacV1beta1Client) ClusterRoles() ClusterRoleInterface { + return newClusterRoles(c) +} + +func (c *RbacV1beta1Client) ClusterRoleBindings() ClusterRoleBindingInterface { + return newClusterRoleBindings(c) +} + +func (c *RbacV1beta1Client) Roles(namespace string) RoleInterface { + return newRoles(c, namespace) +} + +func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface { + return newRoleBindings(c, namespace) +} + +// NewForConfig creates a new RbacV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &RbacV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new RbacV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *RbacV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new RbacV1beta1Client for the given RESTClient. +func New(c rest.Interface) *RbacV1beta1Client { + return &RbacV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *RbacV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go new file mode 100644 index 0000000..2b61aad --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RolesGetter has a method to return a RoleInterface. +// A group's client should implement this interface. +type RolesGetter interface { + Roles(namespace string) RoleInterface +} + +// RoleInterface has methods to work with Role resources. +type RoleInterface interface { + Create(*v1beta1.Role) (*v1beta1.Role, error) + Update(*v1beta1.Role) (*v1beta1.Role, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.Role, error) + List(opts v1.ListOptions) (*v1beta1.RoleList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) + RoleExpansion +} + +// roles implements RoleInterface +type roles struct { + client rest.Interface + ns string +} + +// newRoles returns a Roles +func newRoles(c *RbacV1beta1Client, namespace string) *roles { + return &roles{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the role, and returns the corresponding role object, and an error if there is any. +func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of Roles that match those selectors. +func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.RoleList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roles. +func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Post(). + Namespace(c.ns). + Resource("roles"). + Body(role). + Do(). + Into(result) + return +} + +// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. +func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Put(). + Namespace(c.ns). + Resource("roles"). + Name(role.Name). + Body(role). + Do(). + Into(result) + return +} + +// Delete takes name of the role and deletes it. Returns an error if one occurs. +func (c *roles) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("roles"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched role. +func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { + result = &v1beta1.Role{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("roles"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go new file mode 100644 index 0000000..0bd118f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/rbac/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// RoleBindingsGetter has a method to return a RoleBindingInterface. +// A group's client should implement this interface. +type RoleBindingsGetter interface { + RoleBindings(namespace string) RoleBindingInterface +} + +// RoleBindingInterface has methods to work with RoleBinding resources. +type RoleBindingInterface interface { + Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) + List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) + RoleBindingExpansion +} + +// roleBindings implements RoleBindingInterface +type roleBindings struct { + client rest.Interface + ns string +} + +// newRoleBindings returns a RoleBindings +func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { + return &roleBindings{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. +func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. +func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.RoleBindingList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested roleBindings. +func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Post(). + Namespace(c.ns). + Resource("rolebindings"). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. +func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Put(). + Namespace(c.ns). + Resource("rolebindings"). + Name(roleBinding.Name). + Body(roleBinding). + Do(). + Into(result) + return +} + +// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. +func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("rolebindings"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched roleBinding. +func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { + result = &v1beta1.RoleBinding{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("rolebindings"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go new file mode 100644 index 0000000..cc32132 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go new file mode 100644 index 0000000..3abbb7b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/scheduling/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1.PriorityClass) (*v1.PriorityClass, error) + Update(*v1.PriorityClass) (*v1.PriorityClass, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error) + List(opts metav1.ListOptions) (*v1.PriorityClassList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { + result = &v1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go new file mode 100644 index 0000000..bd7e1e5 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/scheduling/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1Client { + return &SchedulingV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go new file mode 100644 index 0000000..df51baa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go new file mode 100644 index 0000000..52f81d8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go new file mode 100644 index 0000000..29d646f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) + Update(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error) + List(opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { + result = &v1alpha1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go new file mode 100644 index 0000000..375f41b --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/scheduling/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1alpha1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1alpha1Client { + return &SchedulingV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go new file mode 100644 index 0000000..3bab873 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go new file mode 100644 index 0000000..5e402f8 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/scheduling/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PriorityClassesGetter has a method to return a PriorityClassInterface. +// A group's client should implement this interface. +type PriorityClassesGetter interface { + PriorityClasses() PriorityClassInterface +} + +// PriorityClassInterface has methods to work with PriorityClass resources. +type PriorityClassInterface interface { + Create(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) + Update(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.PriorityClass, error) + List(opts v1.ListOptions) (*v1beta1.PriorityClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) + PriorityClassExpansion +} + +// priorityClasses implements PriorityClassInterface +type priorityClasses struct { + client rest.Interface +} + +// newPriorityClasses returns a PriorityClasses +func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { + return &priorityClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. +func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Get(). + Resource("priorityclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. +func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PriorityClassList{} + err = c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested priorityClasses. +func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("priorityclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Post(). + Resource("priorityclasses"). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. +func (c *priorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Put(). + Resource("priorityclasses"). + Name(priorityClass.Name). + Body(priorityClass). + Do(). + Into(result) + return +} + +// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. +func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("priorityclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("priorityclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched priorityClass. +func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { + result = &v1beta1.PriorityClass{} + err = c.client.Patch(pt). + Resource("priorityclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go new file mode 100644 index 0000000..6feec4a --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/scheduling/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SchedulingV1beta1Interface interface { + RESTClient() rest.Interface + PriorityClassesGetter +} + +// SchedulingV1beta1Client is used to interact with features provided by the scheduling.k8s.io group. +type SchedulingV1beta1Client struct { + restClient rest.Interface +} + +func (c *SchedulingV1beta1Client) PriorityClasses() PriorityClassInterface { + return newPriorityClasses(c) +} + +// NewForConfig creates a new SchedulingV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*SchedulingV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SchedulingV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new SchedulingV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SchedulingV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SchedulingV1beta1Client for the given RESTClient. +func New(c rest.Interface) *SchedulingV1beta1Client { + return &SchedulingV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SchedulingV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go new file mode 100644 index 0000000..df51baa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go new file mode 100644 index 0000000..23d9f94 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go new file mode 100644 index 0000000..8fd6adc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -0,0 +1,174 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/settings/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// PodPresetsGetter has a method to return a PodPresetInterface. +// A group's client should implement this interface. +type PodPresetsGetter interface { + PodPresets(namespace string) PodPresetInterface +} + +// PodPresetInterface has methods to work with PodPreset resources. +type PodPresetInterface interface { + Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) + List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) + PodPresetExpansion +} + +// podPresets implements PodPresetInterface +type podPresets struct { + client rest.Interface + ns string +} + +// newPodPresets returns a PodPresets +func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { + return &podPresets{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. +func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodPresets that match those selectors. +func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PodPresetList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podPresets. +func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Post(). + Namespace(c.ns). + Resource("podpresets"). + Body(podPreset). + Do(). + Into(result) + return +} + +// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. +func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Put(). + Namespace(c.ns). + Resource("podpresets"). + Name(podPreset.Name). + Body(podPreset). + Do(). + Into(result) + return +} + +// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. +func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("podpresets"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched podPreset. +func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { + result = &v1alpha1.PodPreset{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("podpresets"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go new file mode 100644 index 0000000..c2a03b9 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/settings/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type SettingsV1alpha1Interface interface { + RESTClient() rest.Interface + PodPresetsGetter +} + +// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. +type SettingsV1alpha1Client struct { + restClient rest.Interface +} + +func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { + return newPodPresets(c, namespace) +} + +// NewForConfig creates a new SettingsV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &SettingsV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new SettingsV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *SettingsV1alpha1Client { + return &SettingsV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go new file mode 100644 index 0000000..3af5d05 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go new file mode 100644 index 0000000..ccac161 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go new file mode 100644 index 0000000..92378cf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + v1 "k8s.io/api/storage/v1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1Interface interface { + RESTClient() rest.Interface + StorageClassesGetter + VolumeAttachmentsGetter +} + +// StorageV1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1Client struct { + restClient rest.Interface +} + +func (c *StorageV1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1Client { + return &StorageV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go new file mode 100644 index 0000000..3f4c48f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1.StorageClass) (*v1.StorageClass, error) + Update(*v1.StorageClass) (*v1.StorageClass, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.StorageClass, error) + List(opts metav1.ListOptions) (*v1.StorageClassList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { + result = &v1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go new file mode 100644 index 0000000..0f45097 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + v1 "k8s.io/api/storage/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) + List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { + result = &v1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go new file mode 100644 index 0000000..df51baa --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go new file mode 100644 index 0000000..cdb7ab2 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go new file mode 100644 index 0000000..c52f630 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go @@ -0,0 +1,90 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/storage/v1alpha1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1alpha1Interface interface { + RESTClient() rest.Interface + VolumeAttachmentsGetter +} + +// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1alpha1Client struct { + restClient rest.Interface +} + +func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1alpha1Client { + return &StorageV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go new file mode 100644 index 0000000..7fef94e --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "time" + + v1alpha1 "k8s.io/api/storage/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { + result = &v1alpha1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go new file mode 100644 index 0000000..86cf9bf --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSIDriversGetter has a method to return a CSIDriverInterface. +// A group's client should implement this interface. +type CSIDriversGetter interface { + CSIDrivers() CSIDriverInterface +} + +// CSIDriverInterface has methods to work with CSIDriver resources. +type CSIDriverInterface interface { + Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) + Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) + List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) + CSIDriverExpansion +} + +// cSIDrivers implements CSIDriverInterface +type cSIDrivers struct { + client rest.Interface +} + +// newCSIDrivers returns a CSIDrivers +func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { + return &cSIDrivers{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. +func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Get(). + Resource("csidrivers"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. +func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CSIDriverList{} + err = c.client.Get(). + Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSIDrivers. +func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csidrivers"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. +func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Post(). + Resource("csidrivers"). + Body(cSIDriver). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. +func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Put(). + Resource("csidrivers"). + Name(cSIDriver.Name). + Body(cSIDriver). + Do(). + Into(result) + return +} + +// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. +func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("csidrivers"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csidrivers"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSIDriver. +func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { + result = &v1beta1.CSIDriver{} + err = c.client.Patch(pt). + Resource("csidrivers"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go new file mode 100644 index 0000000..e5540c1 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// CSINodesGetter has a method to return a CSINodeInterface. +// A group's client should implement this interface. +type CSINodesGetter interface { + CSINodes() CSINodeInterface +} + +// CSINodeInterface has methods to work with CSINode resources. +type CSINodeInterface interface { + Create(*v1beta1.CSINode) (*v1beta1.CSINode, error) + Update(*v1beta1.CSINode) (*v1beta1.CSINode, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error) + List(opts v1.ListOptions) (*v1beta1.CSINodeList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) + CSINodeExpansion +} + +// cSINodes implements CSINodeInterface +type cSINodes struct { + client rest.Interface +} + +// newCSINodes returns a CSINodes +func newCSINodes(c *StorageV1beta1Client) *cSINodes { + return &cSINodes{ + client: c.RESTClient(), + } +} + +// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. +func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Get(). + Resource("csinodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of CSINodes that match those selectors. +func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.CSINodeList{} + err = c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested cSINodes. +func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("csinodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Post(). + Resource("csinodes"). + Body(cSINode). + Do(). + Into(result) + return +} + +// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. +func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Put(). + Resource("csinodes"). + Name(cSINode.Name). + Body(cSINode). + Do(). + Into(result) + return +} + +// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. +func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("csinodes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("csinodes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched cSINode. +func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { + result = &v1beta1.CSINode{} + err = c.client.Patch(pt). + Resource("csinodes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go new file mode 100644 index 0000000..7711019 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go new file mode 100644 index 0000000..7ba9314 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type CSIDriverExpansion interface{} + +type CSINodeExpansion interface{} + +type StorageClassExpansion interface{} + +type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go new file mode 100644 index 0000000..e9916bc --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -0,0 +1,105 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1beta1 "k8s.io/api/storage/v1beta1" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +type StorageV1beta1Interface interface { + RESTClient() rest.Interface + CSIDriversGetter + CSINodesGetter + StorageClassesGetter + VolumeAttachmentsGetter +} + +// StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. +type StorageV1beta1Client struct { + restClient rest.Interface +} + +func (c *StorageV1beta1Client) CSIDrivers() CSIDriverInterface { + return newCSIDrivers(c) +} + +func (c *StorageV1beta1Client) CSINodes() CSINodeInterface { + return newCSINodes(c) +} + +func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { + return newStorageClasses(c) +} + +func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { + return newVolumeAttachments(c) +} + +// NewForConfig creates a new StorageV1beta1Client for the given config. +func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &StorageV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new StorageV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *StorageV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new StorageV1beta1Client for the given RESTClient. +func New(c rest.Interface) *StorageV1beta1Client { + return &StorageV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *StorageV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go new file mode 100644 index 0000000..8a8f389 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -0,0 +1,164 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// StorageClassesGetter has a method to return a StorageClassInterface. +// A group's client should implement this interface. +type StorageClassesGetter interface { + StorageClasses() StorageClassInterface +} + +// StorageClassInterface has methods to work with StorageClass resources. +type StorageClassInterface interface { + Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error) + List(opts v1.ListOptions) (*v1beta1.StorageClassList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) + StorageClassExpansion +} + +// storageClasses implements StorageClassInterface +type storageClasses struct { + client rest.Interface +} + +// newStorageClasses returns a StorageClasses +func newStorageClasses(c *StorageV1beta1Client) *storageClasses { + return &storageClasses{ + client: c.RESTClient(), + } +} + +// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. +func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Get(). + Resource("storageclasses"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. +func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.StorageClassList{} + err = c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested storageClasses. +func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("storageclasses"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Post(). + Resource("storageclasses"). + Body(storageClass). + Do(). + Into(result) + return +} + +// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. +func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Put(). + Resource("storageclasses"). + Name(storageClass.Name). + Body(storageClass). + Do(). + Into(result) + return +} + +// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. +func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("storageclasses"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("storageclasses"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched storageClass. +func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { + result = &v1beta1.StorageClass{} + err = c.client.Patch(pt). + Resource("storageclasses"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go new file mode 100644 index 0000000..d319407 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -0,0 +1,180 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "time" + + v1beta1 "k8s.io/api/storage/v1beta1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. +// A group's client should implement this interface. +type VolumeAttachmentsGetter interface { + VolumeAttachments() VolumeAttachmentInterface +} + +// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. +type VolumeAttachmentInterface interface { + Create(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) + Update(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) + UpdateStatus(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error) + List(opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) + VolumeAttachmentExpansion +} + +// volumeAttachments implements VolumeAttachmentInterface +type volumeAttachments struct { + client rest.Interface +} + +// newVolumeAttachments returns a VolumeAttachments +func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { + return &volumeAttachments{ + client: c.RESTClient(), + } +} + +// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. +func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Get(). + Resource("volumeattachments"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. +func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.VolumeAttachmentList{} + err = c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested volumeAttachments. +func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("volumeattachments"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Post(). + Resource("volumeattachments"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. +func (c *volumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Put(). + Resource("volumeattachments"). + Name(volumeAttachment.Name). + SubResource("status"). + Body(volumeAttachment). + Do(). + Into(result) + return +} + +// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. +func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("volumeattachments"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Resource("volumeattachments"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched volumeAttachment. +func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { + result = &v1beta1.VolumeAttachment{} + err = c.client.Patch(pt). + Resource("volumeattachments"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS new file mode 100644 index 0000000..e0ec62d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +# approval on api packages bubbles to api-approvers +reviewers: +- sig-auth-authenticators-approvers +- sig-auth-authenticators-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go new file mode 100644 index 0000000..b994597 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -0,0 +1,20 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=client.authentication.k8s.io + +package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go new file mode 100644 index 0000000..e4fbc3e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/register.go @@ -0,0 +1,50 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientauthentication + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go new file mode 100644 index 0000000..6fb53ce --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/types.go @@ -0,0 +1,77 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package clientauthentication + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Response is populated when the transport encounters HTTP status codes, such as 401, + // suggesting previous credentials were invalid. + // +optional + Response *Response + + // Interactive is true when the transport detects the command is being called from an + // interactive prompt. + // +optional + Interactive bool +} + +// ExecCredentialStatus holds credentials for the transport to use. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time + // Token is a bearer token used by the client for request authentication. + // +optional + Token string + // PEM-encoded client TLS certificate. + // +optional + ClientCertificateData string + // PEM-encoded client TLS private key. + // +optional + ClientKeyData string +} + +// Response defines metadata about a failed request, including HTTP status code and +// response headers. +type Response struct { + // Headers holds HTTP headers returned by the server. + Header map[string][]string + // Code is the HTTP status code returned by the server. + Code int32 +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go new file mode 100644 index 0000000..19ab776 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=client.authentication.k8s.io + +package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go new file mode 100644 index 0000000..2acd13d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go new file mode 100644 index 0000000..c714e24 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -0,0 +1,78 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredential is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta `json:",inline"` + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec `json:"spec,omitempty"` + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus `json:"status,omitempty"` +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct { + // Response is populated when the transport encounters HTTP status codes, such as 401, + // suggesting previous credentials were invalid. + // +optional + Response *Response `json:"response,omitempty"` + + // Interactive is true when the transport detects the command is being called from an + // interactive prompt. + // +optional + Interactive bool `json:"interactive,omitempty"` +} + +// ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` + // Token is a bearer token used by the client for request authentication. + Token string `json:"token,omitempty"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty"` +} + +// Response defines metadata about a failed request, including HTTP status code and +// response headers. +type Response struct { + // Header holds HTTP headers returned by the server. + Header map[string][]string `json:"header,omitempty"` + // Code is the HTTP status code returned by the server. + Code int32 `json:"code,omitempty"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go new file mode 100644 index 0000000..461c20b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,176 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*Response)(nil), (*clientauthentication.Response)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_Response_To_clientauthentication_Response(a.(*Response), b.(*clientauthentication.Response), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.Response)(nil), (*Response)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_Response_To_v1alpha1_Response(a.(*clientauthentication.Response), b.(*Response), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + if err := Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + if err := Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredential_To_v1alpha1_ExecCredential(in, out, s) +} + +func autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + out.Response = (*clientauthentication.Response)(unsafe.Pointer(in.Response)) + out.Interactive = in.Interactive + return nil +} + +// Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + out.Response = (*Response)(unsafe.Pointer(in.Response)) + out.Interactive = in.Interactive + return nil +} + +// Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialSpec_To_v1alpha1_ExecCredentialSpec(in, out, s) +} + +func autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. +func Convert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_v1alpha1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialStatus_To_v1alpha1_ExecCredentialStatus(in, out, s) +} + +func autoConvert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { + out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) + out.Code = in.Code + return nil +} + +// Convert_v1alpha1_Response_To_clientauthentication_Response is an autogenerated conversion function. +func Convert_v1alpha1_Response_To_clientauthentication_Response(in *Response, out *clientauthentication.Response, s conversion.Scope) error { + return autoConvert_v1alpha1_Response_To_clientauthentication_Response(in, out, s) +} + +func autoConvert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { + out.Header = *(*map[string][]string)(unsafe.Pointer(&in.Header)) + out.Code = in.Code + return nil +} + +// Convert_clientauthentication_Response_To_v1alpha1_Response is an autogenerated conversion function. +func Convert_clientauthentication_Response_To_v1alpha1_Response(in *clientauthentication.Response, out *Response, s conversion.Scope) error { + return autoConvert_clientauthentication_Response_To_v1alpha1_Response(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 0000000..a73d31b --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,128 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(Response) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Response) DeepCopyInto(out *Response) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. +func (in *Response) DeepCopy() *Response { + if in == nil { + return nil + } + out := new(Response) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go new file mode 100644 index 0000000..dd621a3 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go new file mode 100644 index 0000000..f543806 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/conversion.go @@ -0,0 +1,26 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + conversion "k8s.io/apimachinery/pkg/conversion" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go new file mode 100644 index 0000000..22d1c58 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:conversion-gen=k8s.io/client-go/pkg/apis/clientauthentication +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=client.authentication.k8s.io + +package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go new file mode 100644 index 0000000..0bb92f1 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "client.authentication.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &ExecCredential{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go new file mode 100644 index 0000000..d6e2674 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/types.go @@ -0,0 +1,59 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// ExecCredentials is used by exec-based plugins to communicate credentials to +// HTTP transports. +type ExecCredential struct { + metav1.TypeMeta `json:",inline"` + + // Spec holds information passed to the plugin by the transport. This contains + // request and runtime specific information, such as if the session is interactive. + Spec ExecCredentialSpec `json:"spec,omitempty"` + + // Status is filled in by the plugin and holds the credentials that the transport + // should use to contact the API. + // +optional + Status *ExecCredentialStatus `json:"status,omitempty"` +} + +// ExecCredenitalSpec holds request and runtime specific information provided by +// the transport. +type ExecCredentialSpec struct{} + +// ExecCredentialStatus holds credentials for the transport to use. +// +// Token and ClientKeyData are sensitive fields. This data should only be +// transmitted in-memory between client and exec plugin process. Exec plugin +// itself should at least be protected via file permissions. +type ExecCredentialStatus struct { + // ExpirationTimestamp indicates a time when the provided credentials expire. + // +optional + ExpirationTimestamp *metav1.Time `json:"expirationTimestamp,omitempty"` + // Token is a bearer token used by the client for request authentication. + Token string `json:"token,omitempty"` + // PEM-encoded client TLS certificates (including intermediates, if any). + ClientCertificateData string `json:"clientCertificateData,omitempty"` + // PEM-encoded private key for the above certificate. + ClientKeyData string `json:"clientKeyData,omitempty"` +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go new file mode 100644 index 0000000..94ef4b7 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.conversion.go @@ -0,0 +1,142 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + clientauthentication "k8s.io/client-go/pkg/apis/clientauthentication" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ExecCredential)(nil), (*clientauthentication.ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(a.(*ExecCredential), b.(*clientauthentication.ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredential)(nil), (*ExecCredential)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(a.(*clientauthentication.ExecCredential), b.(*ExecCredential), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialSpec)(nil), (*clientauthentication.ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(a.(*ExecCredentialSpec), b.(*clientauthentication.ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*ExecCredentialStatus)(nil), (*clientauthentication.ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(a.(*ExecCredentialStatus), b.(*clientauthentication.ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*clientauthentication.ExecCredentialStatus)(nil), (*ExecCredentialStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(a.(*clientauthentication.ExecCredentialStatus), b.(*ExecCredentialStatus), scope) + }); err != nil { + return err + } + if err := s.AddConversionFunc((*clientauthentication.ExecCredentialSpec)(nil), (*ExecCredentialSpec)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(a.(*clientauthentication.ExecCredentialSpec), b.(*ExecCredentialSpec), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + if err := Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*clientauthentication.ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential is an autogenerated conversion function. +func Convert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in *ExecCredential, out *clientauthentication.ExecCredential, s conversion.Scope) error { + return autoConvert_v1beta1_ExecCredential_To_clientauthentication_ExecCredential(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + if err := Convert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(&in.Spec, &out.Spec, s); err != nil { + return err + } + out.Status = (*ExecCredentialStatus)(unsafe.Pointer(in.Status)) + return nil +} + +// Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in *clientauthentication.ExecCredential, out *ExecCredential, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredential_To_v1beta1_ExecCredential(in, out, s) +} + +func autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return nil +} + +// Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec is an autogenerated conversion function. +func Convert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in *ExecCredentialSpec, out *clientauthentication.ExecCredentialSpec, s conversion.Scope) error { + return autoConvert_v1beta1_ExecCredentialSpec_To_clientauthentication_ExecCredentialSpec(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialSpec_To_v1beta1_ExecCredentialSpec(in *clientauthentication.ExecCredentialSpec, out *ExecCredentialSpec, s conversion.Scope) error { + // WARNING: in.Response requires manual conversion: does not exist in peer-type + // WARNING: in.Interactive requires manual conversion: does not exist in peer-type + return nil +} + +func autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus is an autogenerated conversion function. +func Convert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in *ExecCredentialStatus, out *clientauthentication.ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_v1beta1_ExecCredentialStatus_To_clientauthentication_ExecCredentialStatus(in, out, s) +} + +func autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + out.ExpirationTimestamp = (*v1.Time)(unsafe.Pointer(in.ExpirationTimestamp)) + out.Token = in.Token + out.ClientCertificateData = in.ClientCertificateData + out.ClientKeyData = in.ClientKeyData + return nil +} + +// Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus is an autogenerated conversion function. +func Convert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in *clientauthentication.ExecCredentialStatus, out *ExecCredentialStatus, s conversion.Scope) error { + return autoConvert_clientauthentication_ExecCredentialStatus_To_v1beta1_ExecCredentialStatus(in, out, s) +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..736b8cf --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,92 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + out.Spec = in.Spec + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000..73e63fc --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go new file mode 100644 index 0000000..c568a6f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/zz_generated.deepcopy.go @@ -0,0 +1,128 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package clientauthentication + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredential) DeepCopyInto(out *ExecCredential) { + *out = *in + out.TypeMeta = in.TypeMeta + in.Spec.DeepCopyInto(&out.Spec) + if in.Status != nil { + in, out := &in.Status, &out.Status + *out = new(ExecCredentialStatus) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredential. +func (in *ExecCredential) DeepCopy() *ExecCredential { + if in == nil { + return nil + } + out := new(ExecCredential) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ExecCredential) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialSpec) DeepCopyInto(out *ExecCredentialSpec) { + *out = *in + if in.Response != nil { + in, out := &in.Response, &out.Response + *out = new(Response) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialSpec. +func (in *ExecCredentialSpec) DeepCopy() *ExecCredentialSpec { + if in == nil { + return nil + } + out := new(ExecCredentialSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecCredentialStatus) DeepCopyInto(out *ExecCredentialStatus) { + *out = *in + if in.ExpirationTimestamp != nil { + in, out := &in.ExpirationTimestamp, &out.ExpirationTimestamp + *out = (*in).DeepCopy() + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecCredentialStatus. +func (in *ExecCredentialStatus) DeepCopy() *ExecCredentialStatus { + if in == nil { + return nil + } + out := new(ExecCredentialStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Response) DeepCopyInto(out *Response) { + *out = *in + if in.Header != nil { + in, out := &in.Header, &out.Header + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Response. +func (in *Response) DeepCopy() *Response { + if in == nil { + return nil + } + out := new(Response) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/pkg/version/.gitattributes b/vendor/k8s.io/client-go/pkg/version/.gitattributes new file mode 100644 index 0000000..7e349ef --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/.gitattributes @@ -0,0 +1 @@ +base.go export-subst diff --git a/vendor/k8s.io/client-go/pkg/version/base.go b/vendor/k8s.io/client-go/pkg/version/base.go new file mode 100644 index 0000000..9b4c79f --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/base.go @@ -0,0 +1,63 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +// Base version information. +// +// This is the fallback data used when version information from git is not +// provided via go ldflags. It provides an approximation of the Kubernetes +// version for ad-hoc builds (e.g. `go build`) that cannot get the version +// information from git. +// +// If you are looking at these fields in the git tree, they look +// strange. They are modified on the fly by the build process. The +// in-tree values are dummy values used for "git archive", which also +// works for GitHub tar downloads. +// +// When releasing a new Kubernetes version, this file is updated by +// build/mark_new_version.sh to reflect the new version, and then a +// git annotated tag (using format vX.Y where X == Major version and Y +// == Minor version) is created to point to the commit that updates +// pkg/version/base.go +var ( + // TODO: Deprecate gitMajor and gitMinor, use only gitVersion + // instead. First step in deprecation, keep the fields but make + // them irrelevant. (Next we'll take it out, which may muck with + // scripts consuming the kubectl version output - but most of + // these should be looking at gitVersion already anyways.) + gitMajor string = "" // major version, always numeric + gitMinor string = "" // minor version, numeric possibly followed by "+" + + // semantic version, derived by build scripts (see + // https://git.k8s.io/community/contributors/design-proposals/release/versioning.md + // for a detailed discussion of this field) + // + // TODO: This field is still called "gitVersion" for legacy + // reasons. For prerelease versions, the build metadata on the + // semantic version is a git hash, but the version itself is no + // longer the direct output of "git describe", but a slight + // translation to be semver compliant. + + // NOTE: The $Format strings are replaced during 'git archive' thanks to the + // companion .gitattributes file containing 'export-subst' in this same + // directory. See also https://git-scm.com/docs/gitattributes + gitVersion string = "v0.0.0-master+$Format:%h$" + gitCommit string = "$Format:%H$" // sha1 from git, output of $(git rev-parse HEAD) + gitTreeState string = "" // state of git tree, either "clean" or "dirty" + + buildDate string = "1970-01-01T00:00:00Z" // build date in ISO8601 format, output of $(date -u +'%Y-%m-%dT%H:%M:%SZ') +) diff --git a/vendor/k8s.io/client-go/pkg/version/def.bzl b/vendor/k8s.io/client-go/pkg/version/def.bzl new file mode 100644 index 0000000..9c018a4 --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/def.bzl @@ -0,0 +1,38 @@ +# Copyright 2017 The Kubernetes Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Implements hack/lib/version.sh's kube::version::ldflags() for Bazel. +def version_x_defs(): + # This should match the list of packages in kube::version::ldflag + stamp_pkgs = [ + "k8s.io/kubernetes/pkg/version", + # In hack/lib/version.sh, this has a vendor/ prefix. That isn't needed here? + "k8s.io/client-go/pkg/version", + ] + # This should match the list of vars in kube::version::ldflags + # It should also match the list of vars set in hack/print-workspace-status.sh. + stamp_vars = [ + "buildDate", + "gitCommit", + "gitMajor", + "gitMinor", + "gitTreeState", + "gitVersion", + ] + # Generate the cross-product. + x_defs = {} + for pkg in stamp_pkgs: + for var in stamp_vars: + x_defs["%s.%s" % (pkg, var)] = "{%s}" % var + return x_defs diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go new file mode 100644 index 0000000..05e997e --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true + +// Package version supplies version information collected at build time to +// kubernetes components. +package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/pkg/version/version.go b/vendor/k8s.io/client-go/pkg/version/version.go new file mode 100644 index 0000000..8c8350d --- /dev/null +++ b/vendor/k8s.io/client-go/pkg/version/version.go @@ -0,0 +1,42 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package version + +import ( + "fmt" + "runtime" + + apimachineryversion "k8s.io/apimachinery/pkg/version" +) + +// Get returns the overall codebase version. It's for detecting +// what code a binary was built from. +func Get() apimachineryversion.Info { + // These variables typically come from -ldflags settings and in + // their absence fallback to the settings in pkg/version/base.go + return apimachineryversion.Info{ + Major: gitMajor, + Minor: gitMinor, + GitVersion: gitVersion, + GitCommit: gitCommit, + GitTreeState: gitTreeState, + BuildDate: buildDate, + GoVersion: runtime.Version(), + Compiler: runtime.Compiler, + Platform: fmt.Sprintf("%s/%s", runtime.GOOS, runtime.GOARCH), + } +} diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go new file mode 100644 index 0000000..b88902c --- /dev/null +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -0,0 +1,360 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package exec + +import ( + "bytes" + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "net" + "net/http" + "os" + "os/exec" + "reflect" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" + "golang.org/x/crypto/ssh/terminal" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/pkg/apis/clientauthentication" + "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" + "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" + "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" + "k8s.io/client-go/util/connrotation" + "k8s.io/klog" +) + +const execInfoEnv = "KUBERNETES_EXEC_INFO" + +var scheme = runtime.NewScheme() +var codecs = serializer.NewCodecFactory(scheme) + +func init() { + v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(v1alpha1.AddToScheme(scheme)) + utilruntime.Must(v1beta1.AddToScheme(scheme)) + utilruntime.Must(clientauthentication.AddToScheme(scheme)) +} + +var ( + // Since transports can be constantly re-initialized by programs like kubectl, + // keep a cache of initialized authenticators keyed by a hash of their config. + globalCache = newCache() + // The list of API versions we accept. + apiVersions = map[string]schema.GroupVersion{ + v1alpha1.SchemeGroupVersion.String(): v1alpha1.SchemeGroupVersion, + v1beta1.SchemeGroupVersion.String(): v1beta1.SchemeGroupVersion, + } +) + +func newCache() *cache { + return &cache{m: make(map[string]*Authenticator)} +} + +var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} + +func cacheKey(c *api.ExecConfig) string { + return spewConfig.Sprint(c) +} + +type cache struct { + mu sync.Mutex + m map[string]*Authenticator +} + +func (c *cache) get(s string) (*Authenticator, bool) { + c.mu.Lock() + defer c.mu.Unlock() + a, ok := c.m[s] + return a, ok +} + +// put inserts an authenticator into the cache. If an authenticator is already +// associated with the key, the first one is returned instead. +func (c *cache) put(s string, a *Authenticator) *Authenticator { + c.mu.Lock() + defer c.mu.Unlock() + existing, ok := c.m[s] + if ok { + return existing + } + c.m[s] = a + return a +} + +// GetAuthenticator returns an exec-based plugin for providing client credentials. +func GetAuthenticator(config *api.ExecConfig) (*Authenticator, error) { + return newAuthenticator(globalCache, config) +} + +func newAuthenticator(c *cache, config *api.ExecConfig) (*Authenticator, error) { + key := cacheKey(config) + if a, ok := c.get(key); ok { + return a, nil + } + + gv, ok := apiVersions[config.APIVersion] + if !ok { + return nil, fmt.Errorf("exec plugin: invalid apiVersion %q", config.APIVersion) + } + + a := &Authenticator{ + cmd: config.Command, + args: config.Args, + group: gv, + + stdin: os.Stdin, + stderr: os.Stderr, + interactive: terminal.IsTerminal(int(os.Stdout.Fd())), + now: time.Now, + environ: os.Environ, + } + + for _, env := range config.Env { + a.env = append(a.env, env.Name+"="+env.Value) + } + + return c.put(key, a), nil +} + +// Authenticator is a client credential provider that rotates credentials by executing a plugin. +// The plugin input and output are defined by the API group client.authentication.k8s.io. +type Authenticator struct { + // Set by the config + cmd string + args []string + group schema.GroupVersion + env []string + + // Stubbable for testing + stdin io.Reader + stderr io.Writer + interactive bool + now func() time.Time + environ func() []string + + // Cached results. + // + // The mutex also guards calling the plugin. Since the plugin could be + // interactive we want to make sure it's only called once. + mu sync.Mutex + cachedCreds *credentials + exp time.Time + + onRotate func() +} + +type credentials struct { + token string + cert *tls.Certificate +} + +// UpdateTransportConfig updates the transport.Config to use credentials +// returned by the plugin. +func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { + c.Wrap(func(rt http.RoundTripper) http.RoundTripper { + return &roundTripper{a, rt} + }) + + if c.TLS.GetCert != nil { + return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") + } + c.TLS.GetCert = a.cert + + var dial func(ctx context.Context, network, addr string) (net.Conn, error) + if c.Dial != nil { + dial = c.Dial + } else { + dial = (&net.Dialer{Timeout: 30 * time.Second, KeepAlive: 30 * time.Second}).DialContext + } + d := connrotation.NewDialer(dial) + a.onRotate = d.CloseAll + c.Dial = d.DialContext + + return nil +} + +type roundTripper struct { + a *Authenticator + base http.RoundTripper +} + +func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // If a user has already set credentials, use that. This makes commands like + // "kubectl get --token (token) pods" work. + if req.Header.Get("Authorization") != "" { + return r.base.RoundTrip(req) + } + + creds, err := r.a.getCreds() + if err != nil { + return nil, fmt.Errorf("getting credentials: %v", err) + } + if creds.token != "" { + req.Header.Set("Authorization", "Bearer "+creds.token) + } + + res, err := r.base.RoundTrip(req) + if err != nil { + return nil, err + } + if res.StatusCode == http.StatusUnauthorized { + resp := &clientauthentication.Response{ + Header: res.Header, + Code: int32(res.StatusCode), + } + if err := r.a.maybeRefreshCreds(creds, resp); err != nil { + klog.Errorf("refreshing credentials: %v", err) + } + } + return res, nil +} + +func (a *Authenticator) credsExpired() bool { + if a.exp.IsZero() { + return false + } + return a.now().After(a.exp) +} + +func (a *Authenticator) cert() (*tls.Certificate, error) { + creds, err := a.getCreds() + if err != nil { + return nil, err + } + return creds.cert, nil +} + +func (a *Authenticator) getCreds() (*credentials, error) { + a.mu.Lock() + defer a.mu.Unlock() + if a.cachedCreds != nil && !a.credsExpired() { + return a.cachedCreds, nil + } + + if err := a.refreshCredsLocked(nil); err != nil { + return nil, err + } + return a.cachedCreds, nil +} + +// maybeRefreshCreds executes the plugin to force a rotation of the +// credentials, unless they were rotated already. +func (a *Authenticator) maybeRefreshCreds(creds *credentials, r *clientauthentication.Response) error { + a.mu.Lock() + defer a.mu.Unlock() + + // Since we're not making a new pointer to a.cachedCreds in getCreds, no + // need to do deep comparison. + if creds != a.cachedCreds { + // Credentials already rotated. + return nil + } + + return a.refreshCredsLocked(r) +} + +// refreshCredsLocked executes the plugin and reads the credentials from +// stdout. It must be called while holding the Authenticator's mutex. +func (a *Authenticator) refreshCredsLocked(r *clientauthentication.Response) error { + cred := &clientauthentication.ExecCredential{ + Spec: clientauthentication.ExecCredentialSpec{ + Response: r, + Interactive: a.interactive, + }, + } + + env := append(a.environ(), a.env...) + if a.group == v1alpha1.SchemeGroupVersion { + // Input spec disabled for beta due to lack of use. Possibly re-enable this later if + // someone wants it back. + // + // See: https://github.com/kubernetes/kubernetes/issues/61796 + data, err := runtime.Encode(codecs.LegacyCodec(a.group), cred) + if err != nil { + return fmt.Errorf("encode ExecCredentials: %v", err) + } + env = append(env, fmt.Sprintf("%s=%s", execInfoEnv, data)) + } + + stdout := &bytes.Buffer{} + cmd := exec.Command(a.cmd, a.args...) + cmd.Env = env + cmd.Stderr = a.stderr + cmd.Stdout = stdout + if a.interactive { + cmd.Stdin = a.stdin + } + + if err := cmd.Run(); err != nil { + return fmt.Errorf("exec: %v", err) + } + + _, gvk, err := codecs.UniversalDecoder(a.group).Decode(stdout.Bytes(), nil, cred) + if err != nil { + return fmt.Errorf("decoding stdout: %v", err) + } + if gvk.Group != a.group.Group || gvk.Version != a.group.Version { + return fmt.Errorf("exec plugin is configured to use API version %s, plugin returned version %s", + a.group, schema.GroupVersion{Group: gvk.Group, Version: gvk.Version}) + } + + if cred.Status == nil { + return fmt.Errorf("exec plugin didn't return a status field") + } + if cred.Status.Token == "" && cred.Status.ClientCertificateData == "" && cred.Status.ClientKeyData == "" { + return fmt.Errorf("exec plugin didn't return a token or cert/key pair") + } + if (cred.Status.ClientCertificateData == "") != (cred.Status.ClientKeyData == "") { + return fmt.Errorf("exec plugin returned only certificate or key, not both") + } + + if cred.Status.ExpirationTimestamp != nil { + a.exp = cred.Status.ExpirationTimestamp.Time + } else { + a.exp = time.Time{} + } + + newCreds := &credentials{ + token: cred.Status.Token, + } + if cred.Status.ClientKeyData != "" && cred.Status.ClientCertificateData != "" { + cert, err := tls.X509KeyPair([]byte(cred.Status.ClientCertificateData), []byte(cred.Status.ClientKeyData)) + if err != nil { + return fmt.Errorf("failed parsing client key/certificate: %v", err) + } + newCreds.cert = &cert + } + + oldCreds := a.cachedCreds + a.cachedCreds = newCreds + // Only close all connections when TLS cert rotates. Token rotation doesn't + // need the extra noise. + if a.onRotate != nil && oldCreds != nil && !reflect.DeepEqual(oldCreds.cert, a.cachedCreds.cert) { + a.onRotate() + } + return nil +} diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS new file mode 100644 index 0000000..49dabc6 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -0,0 +1,26 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- thockin +- smarterclayton +- caesarxuchao +- wojtek-t +- deads2k +- brendandburns +- liggitt +- nikhiljindal +- gmarek +- erictune +- sttts +- luxas +- dims +- errordeveloper +- hongchaodeng +- krousey +- resouer +- cjcullen +- rmmh +- lixiaobing10051267 +- asalkeld +- juanvallejo +- lojies diff --git a/vendor/k8s.io/client-go/rest/client.go b/vendor/k8s.io/client-go/rest/client.go new file mode 100644 index 0000000..927403c --- /dev/null +++ b/vendor/k8s.io/client-go/rest/client.go @@ -0,0 +1,258 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "mime" + "net/http" + "net/url" + "os" + "strconv" + "strings" + "time" + + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/util/flowcontrol" +) + +const ( + // Environment variables: Note that the duration should be long enough that the backoff + // persists for some reasonable time (i.e. 120 seconds). The typical base might be "1". + envBackoffBase = "KUBE_CLIENT_BACKOFF_BASE" + envBackoffDuration = "KUBE_CLIENT_BACKOFF_DURATION" +) + +// Interface captures the set of operations for generically interacting with Kubernetes REST apis. +type Interface interface { + GetRateLimiter() flowcontrol.RateLimiter + Verb(verb string) *Request + Post() *Request + Put() *Request + Patch(pt types.PatchType) *Request + Get() *Request + Delete() *Request + APIVersion() schema.GroupVersion +} + +// RESTClient imposes common Kubernetes API conventions on a set of resource paths. +// The baseURL is expected to point to an HTTP or HTTPS path that is the parent +// of one or more resources. The server should return a decodable API resource +// object, or an api.Status object which contains information about the reason for +// any failure. +// +// Most consumers should use client.New() to get a Kubernetes API client. +type RESTClient struct { + // base is the root URL for all invocations of the client + base *url.URL + // versionedAPIPath is a path segment connecting the base URL to the resource root + versionedAPIPath string + + // contentConfig is the information used to communicate with the server. + contentConfig ContentConfig + + // serializers contain all serializers for underlying content type. + serializers Serializers + + // creates BackoffManager that is passed to requests. + createBackoffMgr func() BackoffManager + + // TODO extract this into a wrapper interface via the RESTClient interface in kubectl. + Throttle flowcontrol.RateLimiter + + // Set specific behavior of the client. If not set http.DefaultClient will be used. + Client *http.Client +} + +type Serializers struct { + Encoder runtime.Encoder + Decoder runtime.Decoder + StreamingSerializer runtime.Serializer + Framer runtime.Framer + RenegotiatedDecoder func(contentType string, params map[string]string) (runtime.Decoder, error) +} + +// NewRESTClient creates a new RESTClient. This client performs generic REST functions +// such as Get, Put, Post, and Delete on specified paths. Codec controls encoding and +// decoding of responses from the server. +func NewRESTClient(baseURL *url.URL, versionedAPIPath string, config ContentConfig, maxQPS float32, maxBurst int, rateLimiter flowcontrol.RateLimiter, client *http.Client) (*RESTClient, error) { + base := *baseURL + if !strings.HasSuffix(base.Path, "/") { + base.Path += "/" + } + base.RawQuery = "" + base.Fragment = "" + + if config.GroupVersion == nil { + config.GroupVersion = &schema.GroupVersion{} + } + if len(config.ContentType) == 0 { + config.ContentType = "application/json" + } + serializers, err := createSerializers(config) + if err != nil { + return nil, err + } + + var throttle flowcontrol.RateLimiter + if maxQPS > 0 && rateLimiter == nil { + throttle = flowcontrol.NewTokenBucketRateLimiter(maxQPS, maxBurst) + } else if rateLimiter != nil { + throttle = rateLimiter + } + return &RESTClient{ + base: &base, + versionedAPIPath: versionedAPIPath, + contentConfig: config, + serializers: *serializers, + createBackoffMgr: readExpBackoffConfig, + Throttle: throttle, + Client: client, + }, nil +} + +// GetRateLimiter returns rate limier for a given client, or nil if it's called on a nil client +func (c *RESTClient) GetRateLimiter() flowcontrol.RateLimiter { + if c == nil { + return nil + } + return c.Throttle +} + +// readExpBackoffConfig handles the internal logic of determining what the +// backoff policy is. By default if no information is available, NoBackoff. +// TODO Generalize this see #17727 . +func readExpBackoffConfig() BackoffManager { + backoffBase := os.Getenv(envBackoffBase) + backoffDuration := os.Getenv(envBackoffDuration) + + backoffBaseInt, errBase := strconv.ParseInt(backoffBase, 10, 64) + backoffDurationInt, errDuration := strconv.ParseInt(backoffDuration, 10, 64) + if errBase != nil || errDuration != nil { + return &NoBackoff{} + } + return &URLBackoff{ + Backoff: flowcontrol.NewBackOff( + time.Duration(backoffBaseInt)*time.Second, + time.Duration(backoffDurationInt)*time.Second)} +} + +// createSerializers creates all necessary serializers for given contentType. +// TODO: the negotiated serializer passed to this method should probably return +// serializers that control decoding and versioning without this package +// being aware of the types. Depends on whether RESTClient must deal with +// generic infrastructure. +func createSerializers(config ContentConfig) (*Serializers, error) { + mediaTypes := config.NegotiatedSerializer.SupportedMediaTypes() + contentType := config.ContentType + mediaType, _, err := mime.ParseMediaType(contentType) + if err != nil { + return nil, fmt.Errorf("the content type specified in the client configuration is not recognized: %v", err) + } + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, mediaType) + if !ok { + if len(contentType) != 0 || len(mediaTypes) == 0 { + return nil, fmt.Errorf("no serializers registered for %s", contentType) + } + info = mediaTypes[0] + } + + internalGV := schema.GroupVersions{ + { + Group: config.GroupVersion.Group, + Version: runtime.APIVersionInternal, + }, + // always include the legacy group as a decoding target to handle non-error `Status` return types + { + Group: "", + Version: runtime.APIVersionInternal, + }, + } + + s := &Serializers{ + Encoder: config.NegotiatedSerializer.EncoderForVersion(info.Serializer, *config.GroupVersion), + Decoder: config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), + + RenegotiatedDecoder: func(contentType string, params map[string]string) (runtime.Decoder, error) { + info, ok := runtime.SerializerInfoForMediaType(mediaTypes, contentType) + if !ok { + return nil, fmt.Errorf("serializer for %s not registered", contentType) + } + return config.NegotiatedSerializer.DecoderToVersion(info.Serializer, internalGV), nil + }, + } + if info.StreamSerializer != nil { + s.StreamingSerializer = info.StreamSerializer.Serializer + s.Framer = info.StreamSerializer.Framer + } + + return s, nil +} + +// Verb begins a request with a verb (GET, POST, PUT, DELETE). +// +// Example usage of RESTClient's request building interface: +// c, err := NewRESTClient(...) +// if err != nil { ... } +// resp, err := c.Verb("GET"). +// Path("pods"). +// SelectorParam("labels", "area=staging"). +// Timeout(10*time.Second). +// Do() +// if err != nil { ... } +// list, ok := resp.(*api.PodList) +// +func (c *RESTClient) Verb(verb string) *Request { + backoff := c.createBackoffMgr() + + if c.Client == nil { + return NewRequest(nil, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, 0) + } + return NewRequest(c.Client, verb, c.base, c.versionedAPIPath, c.contentConfig, c.serializers, backoff, c.Throttle, c.Client.Timeout) +} + +// Post begins a POST request. Short for c.Verb("POST"). +func (c *RESTClient) Post() *Request { + return c.Verb("POST") +} + +// Put begins a PUT request. Short for c.Verb("PUT"). +func (c *RESTClient) Put() *Request { + return c.Verb("PUT") +} + +// Patch begins a PATCH request. Short for c.Verb("Patch"). +func (c *RESTClient) Patch(pt types.PatchType) *Request { + return c.Verb("PATCH").SetHeader("Content-Type", string(pt)) +} + +// Get begins a GET request. Short for c.Verb("GET"). +func (c *RESTClient) Get() *Request { + return c.Verb("GET") +} + +// Delete begins a DELETE request. Short for c.Verb("DELETE"). +func (c *RESTClient) Delete() *Request { + return c.Verb("DELETE") +} + +// APIVersion returns the APIVersion this RESTClient is expected to use. +func (c *RESTClient) APIVersion() schema.GroupVersion { + return *c.contentConfig.GroupVersion +} diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go new file mode 100644 index 0000000..3f6b9bc --- /dev/null +++ b/vendor/k8s.io/client-go/rest/config.go @@ -0,0 +1,551 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + gruntime "runtime" + "strings" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/client-go/pkg/version" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + "k8s.io/client-go/transport" + certutil "k8s.io/client-go/util/cert" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" +) + +const ( + DefaultQPS float32 = 5.0 + DefaultBurst int = 10 +) + +var ErrNotInCluster = errors.New("unable to load in-cluster configuration, KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT must be defined") + +// Config holds the common attributes that can be passed to a Kubernetes client on +// initialization. +type Config struct { + // Host must be a host string, a host:port pair, or a URL to the base of the apiserver. + // If a URL is given then the (optional) Path of that URL represents a prefix that must + // be appended to all request URIs used to access the apiserver. This allows a frontend + // proxy to easily relocate all of the apiserver endpoints. + Host string + // APIPath is a sub-path that points to an API root. + APIPath string + + // ContentConfig contains settings that affect how objects are transformed when + // sent to the server. + ContentConfig + + // Server requires Basic authentication + Username string + Password string + + // Server requires Bearer authentication. This client will not attempt to use + // refresh tokens for an OAuth2 flow. + // TODO: demonstrate an OAuth2 compatible client. + BearerToken string + + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + + // Impersonate is the configuration that RESTClient will use for impersonation. + Impersonate ImpersonationConfig + + // Server requires plugin-specified authentication. + AuthProvider *clientcmdapi.AuthProviderConfig + + // Callback to persist config for AuthProvider. + AuthConfigPersister AuthProviderConfigPersister + + // Exec-based authentication provider. + ExecProvider *clientcmdapi.ExecConfig + + // TLSClientConfig contains settings to enable transport layer security + TLSClientConfig + + // UserAgent is an optional field that specifies the caller of this request. + UserAgent string + + // Transport may be used for custom HTTP behavior. This attribute may not + // be specified with the TLS client certificate options. Use WrapTransport + // to provide additional per-server middleware behavior. + Transport http.RoundTripper + // WrapTransport will be invoked for custom HTTP behavior after the underlying + // transport is initialized (either the transport created from TLSClientConfig, + // Transport, or http.DefaultTransport). The config may layer other RoundTrippers + // on top of the returned RoundTripper. + // + // A future release will change this field to an array. Use config.Wrap() + // instead of setting this value directly. + WrapTransport transport.WrapperFunc + + // QPS indicates the maximum QPS to the master from this client. + // If it's zero, the created RESTClient will use DefaultQPS: 5 + QPS float32 + + // Maximum burst for throttle. + // If it's zero, the created RESTClient will use DefaultBurst: 10. + Burst int + + // Rate limiter for limiting connections to the master from this client. If present overwrites QPS/Burst + RateLimiter flowcontrol.RateLimiter + + // The maximum length of time to wait before giving up on a server request. A value of zero means no timeout. + Timeout time.Duration + + // Dial specifies the dial function for creating unencrypted TCP connections. + Dial func(ctx context.Context, network, address string) (net.Conn, error) + + // Version forces a specific version to be used (if registered) + // Do we need this? + // Version string +} + +var _ fmt.Stringer = new(Config) +var _ fmt.GoStringer = new(Config) + +type sanitizedConfig *Config + +type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister } + +func (sanitizedAuthConfigPersister) GoString() string { + return "rest.AuthProviderConfigPersister(--- REDACTED ---)" +} +func (sanitizedAuthConfigPersister) String() string { + return "rest.AuthProviderConfigPersister(--- REDACTED ---)" +} + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config +// to prevent accidental leaking via logs. +func (c *Config) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of Config to +// prevent accidental leaking via logs. +func (c *Config) String() string { + if c == nil { + return "" + } + cc := sanitizedConfig(CopyConfig(c)) + // Explicitly mark non-empty credential fields as redacted. + if cc.Password != "" { + cc.Password = "--- REDACTED ---" + } + if cc.BearerToken != "" { + cc.BearerToken = "--- REDACTED ---" + } + if cc.AuthConfigPersister != nil { + cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} + } + + return fmt.Sprintf("%#v", cc) +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName is the username to impersonate on each request. + UserName string + // Groups are the groups to impersonate on each request. + Groups []string + // Extra is a free-form field which can be used to link some authentication information + // to authorization information. This field allows you to impersonate it. + Extra map[string][]string +} + +// +k8s:deepcopy-gen=true +// TLSClientConfig contains settings to enable transport layer security +type TLSClientConfig struct { + // Server should be accessed without verifying the TLS certificate. For testing only. + Insecure bool + // ServerName is passed to the server for SNI and is used in the client to check server + // ceritificates against. If ServerName is empty, the hostname used to contact the + // server is used. + ServerName string + + // Server requires TLS client certificate authentication + CertFile string + // Server requires TLS client certificate authentication + KeyFile string + // Trusted root certificates for server + CAFile string + + // CertData holds PEM-encoded bytes (typically read from a client certificate file). + // CertData takes precedence over CertFile + CertData []byte + // KeyData holds PEM-encoded bytes (typically read from a client certificate key file). + // KeyData takes precedence over KeyFile + KeyData []byte + // CAData holds PEM-encoded bytes (typically read from a root certificates bundle). + // CAData takes precedence over CAFile + CAData []byte +} + +var _ fmt.Stringer = TLSClientConfig{} +var _ fmt.GoStringer = TLSClientConfig{} + +type sanitizedTLSClientConfig TLSClientConfig + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// TLSClientConfig to prevent accidental leaking via logs. +func (c TLSClientConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of +// TLSClientConfig to prevent accidental leaking via logs. +func (c TLSClientConfig) String() string { + cc := sanitizedTLSClientConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CertFile: c.CertFile, + KeyFile: c.KeyFile, + CAFile: c.CAFile, + CertData: c.CertData, + KeyData: c.KeyData, + CAData: c.CAData, + } + // Explicitly mark non-empty credential fields as redacted. + if len(cc.CertData) != 0 { + cc.CertData = []byte("--- TRUNCATED ---") + } + if len(cc.KeyData) != 0 { + cc.KeyData = []byte("--- REDACTED ---") + } + return fmt.Sprintf("%#v", cc) +} + +type ContentConfig struct { + // AcceptContentTypes specifies the types the client will accept and is optional. + // If not set, ContentType will be used to define the Accept header + AcceptContentTypes string + // ContentType specifies the wire format used to communicate with the server. + // This value will be set as the Accept header on requests made to the server, and + // as the default content type on any object sent to the server. If not set, + // "application/json" is used. + ContentType string + // GroupVersion is the API version to talk to. Must be provided when initializing + // a RESTClient directly. When initializing a Client, will be set with the default + // code version. + GroupVersion *schema.GroupVersion + // NegotiatedSerializer is used for obtaining encoders and decoders for multiple + // supported media types. + NegotiatedSerializer runtime.NegotiatedSerializer +} + +// RESTClientFor returns a RESTClient that satisfies the requested attributes on a client Config +// object. Note that a RESTClient may require fields that are optional when initializing a Client. +// A RESTClient created by this method is generic - it expects to operate on an API that follows +// the Kubernetes conventions, but may not be the Kubernetes API. +func RESTClientFor(config *Config) (*RESTClient, error) { + if config.GroupVersion == nil { + return nil, fmt.Errorf("GroupVersion is required when initializing a RESTClient") + } + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + qps := config.QPS + if config.QPS == 0.0 { + qps = DefaultQPS + } + burst := config.Burst + if config.Burst == 0 { + burst = DefaultBurst + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + return NewRESTClient(baseURL, versionedAPIPath, config.ContentConfig, qps, burst, config.RateLimiter, httpClient) +} + +// UnversionedRESTClientFor is the same as RESTClientFor, except that it allows +// the config.Version to be empty. +func UnversionedRESTClientFor(config *Config) (*RESTClient, error) { + if config.NegotiatedSerializer == nil { + return nil, fmt.Errorf("NegotiatedSerializer is required when initializing a RESTClient") + } + + baseURL, versionedAPIPath, err := defaultServerUrlFor(config) + if err != nil { + return nil, err + } + + transport, err := TransportFor(config) + if err != nil { + return nil, err + } + + var httpClient *http.Client + if transport != http.DefaultTransport { + httpClient = &http.Client{Transport: transport} + if config.Timeout > 0 { + httpClient.Timeout = config.Timeout + } + } + + versionConfig := config.ContentConfig + if versionConfig.GroupVersion == nil { + v := metav1.SchemeGroupVersion + versionConfig.GroupVersion = &v + } + + return NewRESTClient(baseURL, versionedAPIPath, versionConfig, config.QPS, config.Burst, config.RateLimiter, httpClient) +} + +// SetKubernetesDefaults sets default values on the provided client config for accessing the +// Kubernetes API or returns an error if any of the defaults are impossible or invalid. +func SetKubernetesDefaults(config *Config) error { + if len(config.UserAgent) == 0 { + config.UserAgent = DefaultKubernetesUserAgent() + } + return nil +} + +// adjustCommit returns sufficient significant figures of the commit's git hash. +func adjustCommit(c string) string { + if len(c) == 0 { + return "unknown" + } + if len(c) > 7 { + return c[:7] + } + return c +} + +// adjustVersion strips "alpha", "beta", etc. from version in form +// major.minor.patch-[alpha|beta|etc]. +func adjustVersion(v string) string { + if len(v) == 0 { + return "unknown" + } + seg := strings.SplitN(v, "-", 2) + return seg[0] +} + +// adjustCommand returns the last component of the +// OS-specific command path for use in User-Agent. +func adjustCommand(p string) string { + // Unlikely, but better than returning "". + if len(p) == 0 { + return "unknown" + } + return filepath.Base(p) +} + +// buildUserAgent builds a User-Agent string from given args. +func buildUserAgent(command, version, os, arch, commit string) string { + return fmt.Sprintf( + "%s/%s (%s/%s) kubernetes/%s", command, version, os, arch, commit) +} + +// DefaultKubernetesUserAgent returns a User-Agent string built from static global vars. +func DefaultKubernetesUserAgent() string { + return buildUserAgent( + adjustCommand(os.Args[0]), + adjustVersion(version.Get().GitVersion), + gruntime.GOOS, + gruntime.GOARCH, + adjustCommit(version.Get().GitCommit)) +} + +// InClusterConfig returns a config object which uses the service account +// kubernetes gives to pods. It's intended for clients that expect to be +// running inside a pod running on kubernetes. It will return ErrNotInCluster +// if called from a process not running in a kubernetes environment. +func InClusterConfig() (*Config, error) { + const ( + tokenFile = "/var/run/secrets/kubernetes.io/serviceaccount/token" + rootCAFile = "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt" + ) + host, port := os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT") + if len(host) == 0 || len(port) == 0 { + return nil, ErrNotInCluster + } + + token, err := ioutil.ReadFile(tokenFile) + if err != nil { + return nil, err + } + + tlsClientConfig := TLSClientConfig{} + + if _, err := certutil.NewPool(rootCAFile); err != nil { + klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + } else { + tlsClientConfig.CAFile = rootCAFile + } + + return &Config{ + // TODO: switch to using cluster DNS. + Host: "https://" + net.JoinHostPort(host, port), + TLSClientConfig: tlsClientConfig, + BearerToken: string(token), + BearerTokenFile: tokenFile, + }, nil +} + +// IsConfigTransportTLS returns true if and only if the provided +// config will result in a protected connection to the server when it +// is passed to restclient.RESTClientFor(). Use to determine when to +// send credentials over the wire. +// +// Note: the Insecure flag is ignored when testing for this value, so MITM attacks are +// still possible. +func IsConfigTransportTLS(config Config) bool { + baseURL, _, err := defaultServerUrlFor(&config) + if err != nil { + return false + } + return baseURL.Scheme == "https" +} + +// LoadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func LoadTLSFiles(c *Config) error { + var err error + c.CAData, err = dataFromSliceOrFile(c.CAData, c.CAFile) + if err != nil { + return err + } + + c.CertData, err = dataFromSliceOrFile(c.CertData, c.CertFile) + if err != nil { + return err + } + + c.KeyData, err = dataFromSliceOrFile(c.KeyData, c.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +func AddUserAgent(config *Config, userAgent string) *Config { + fullUserAgent := DefaultKubernetesUserAgent() + "/" + userAgent + config.UserAgent = fullUserAgent + return config +} + +// AnonymousClientConfig returns a copy of the given config with all user credentials (cert/key, bearer token, and username/password) removed +func AnonymousClientConfig(config *Config) *Config { + // copy only known safe fields + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + TLSClientConfig: TLSClientConfig{ + Insecure: config.Insecure, + ServerName: config.ServerName, + CAFile: config.TLSClientConfig.CAFile, + CAData: config.TLSClientConfig.CAData, + }, + RateLimiter: config.RateLimiter, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + Timeout: config.Timeout, + Dial: config.Dial, + } +} + +// CopyConfig returns a copy of the given config +func CopyConfig(config *Config) *Config { + return &Config{ + Host: config.Host, + APIPath: config.APIPath, + ContentConfig: config.ContentConfig, + Username: config.Username, + Password: config.Password, + BearerToken: config.BearerToken, + BearerTokenFile: config.BearerTokenFile, + Impersonate: ImpersonationConfig{ + Groups: config.Impersonate.Groups, + Extra: config.Impersonate.Extra, + UserName: config.Impersonate.UserName, + }, + AuthProvider: config.AuthProvider, + AuthConfigPersister: config.AuthConfigPersister, + ExecProvider: config.ExecProvider, + TLSClientConfig: TLSClientConfig{ + Insecure: config.TLSClientConfig.Insecure, + ServerName: config.TLSClientConfig.ServerName, + CertFile: config.TLSClientConfig.CertFile, + KeyFile: config.TLSClientConfig.KeyFile, + CAFile: config.TLSClientConfig.CAFile, + CertData: config.TLSClientConfig.CertData, + KeyData: config.TLSClientConfig.KeyData, + CAData: config.TLSClientConfig.CAData, + }, + UserAgent: config.UserAgent, + Transport: config.Transport, + WrapTransport: config.WrapTransport, + QPS: config.QPS, + Burst: config.Burst, + RateLimiter: config.RateLimiter, + Timeout: config.Timeout, + Dial: config.Dial, + } +} diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go new file mode 100644 index 0000000..83ef5ae --- /dev/null +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/http" + "sync" + + "k8s.io/klog" + + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" +) + +type AuthProvider interface { + // WrapTransport allows the plugin to create a modified RoundTripper that + // attaches authorization headers (or other info) to requests. + WrapTransport(http.RoundTripper) http.RoundTripper + // Login allows the plugin to initialize its configuration. It must not + // require direct user interaction. + Login() error +} + +// Factory generates an AuthProvider plugin. +// clusterAddress is the address of the current cluster. +// config is the initial configuration for this plugin. +// persister allows the plugin to save updated configuration. +type Factory func(clusterAddress string, config map[string]string, persister AuthProviderConfigPersister) (AuthProvider, error) + +// AuthProviderConfigPersister allows a plugin to persist configuration info +// for just itself. +type AuthProviderConfigPersister interface { + Persist(map[string]string) error +} + +// All registered auth provider plugins. +var pluginsLock sync.Mutex +var plugins = make(map[string]Factory) + +func RegisterAuthProviderPlugin(name string, plugin Factory) error { + pluginsLock.Lock() + defer pluginsLock.Unlock() + if _, found := plugins[name]; found { + return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) + } + klog.V(4).Infof("Registered Auth Provider Plugin %q", name) + plugins[name] = plugin + return nil +} + +func GetAuthProvider(clusterAddress string, apc *clientcmdapi.AuthProviderConfig, persister AuthProviderConfigPersister) (AuthProvider, error) { + pluginsLock.Lock() + defer pluginsLock.Unlock() + p, ok := plugins[apc.Name] + if !ok { + return nil, fmt.Errorf("No Auth Provider found for name %q", apc.Name) + } + return p(clusterAddress, apc.Config, persister) +} diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go new file mode 100644 index 0000000..dd06303 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/request.go @@ -0,0 +1,1201 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "bytes" + "context" + "encoding/hex" + "fmt" + "io" + "io/ioutil" + "mime" + "net/http" + "net/url" + "path" + "reflect" + "strconv" + "strings" + "time" + + "golang.org/x/net/http2" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/util/net" + "k8s.io/apimachinery/pkg/watch" + restclientwatch "k8s.io/client-go/rest/watch" + "k8s.io/client-go/tools/metrics" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" +) + +var ( + // longThrottleLatency defines threshold for logging requests. All requests being + // throttle for more than longThrottleLatency will be logged. + longThrottleLatency = 50 * time.Millisecond +) + +// HTTPClient is an interface for testing a request object. +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +// ResponseWrapper is an interface for getting a response. +// The response may be either accessed as a raw data (the whole output is put into memory) or as a stream. +type ResponseWrapper interface { + DoRaw() ([]byte, error) + Stream() (io.ReadCloser, error) +} + +// RequestConstructionError is returned when there's an error assembling a request. +type RequestConstructionError struct { + Err error +} + +// Error returns a textual description of 'r'. +func (r *RequestConstructionError) Error() string { + return fmt.Sprintf("request construction error: '%v'", r.Err) +} + +// Request allows for building up a request to a server in a chained fashion. +// Any errors are stored until the end of your call, so you only have to +// check once. +type Request struct { + // required + client HTTPClient + verb string + + baseURL *url.URL + content ContentConfig + serializers Serializers + + // generic components accessible via method setters + pathPrefix string + subpath string + params url.Values + headers http.Header + + // structural elements of the request that are part of the Kubernetes API conventions + namespace string + namespaceSet bool + resource string + resourceName string + subresource string + timeout time.Duration + + // output + err error + body io.Reader + + // This is only used for per-request timeouts, deadlines, and cancellations. + ctx context.Context + + backoffMgr BackoffManager + throttle flowcontrol.RateLimiter +} + +// NewRequest creates a new request helper object for accessing runtime.Objects on a server. +func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { + if backoff == nil { + klog.V(2).Infof("Not implementing request backoff strategy.") + backoff = &NoBackoff{} + } + + pathPrefix := "/" + if baseURL != nil { + pathPrefix = path.Join(pathPrefix, baseURL.Path) + } + r := &Request{ + client: client, + verb: verb, + baseURL: baseURL, + pathPrefix: path.Join(pathPrefix, versionedAPIPath), + content: content, + serializers: serializers, + backoffMgr: backoff, + throttle: throttle, + timeout: timeout, + } + switch { + case len(content.AcceptContentTypes) > 0: + r.SetHeader("Accept", content.AcceptContentTypes) + case len(content.ContentType) > 0: + r.SetHeader("Accept", content.ContentType+", */*") + } + return r +} + +// Prefix adds segments to the relative beginning to the request path. These +// items will be placed before the optional Namespace, Resource, or Name sections. +// Setting AbsPath will clear any previously set Prefix segments +func (r *Request) Prefix(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.pathPrefix, path.Join(segments...)) + return r +} + +// Suffix appends segments to the end of the path. These items will be placed after the prefix and optional +// Namespace, Resource, or Name sections. +func (r *Request) Suffix(segments ...string) *Request { + if r.err != nil { + return r + } + r.subpath = path.Join(r.subpath, path.Join(segments...)) + return r +} + +// Resource sets the resource to access (/[ns//]) +func (r *Request) Resource(resource string) *Request { + if r.err != nil { + return r + } + if len(r.resource) != 0 { + r.err = fmt.Errorf("resource already set to %q, cannot change to %q", r.resource, resource) + return r + } + if msgs := IsValidPathSegmentName(resource); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource %q: %v", resource, msgs) + return r + } + r.resource = resource + return r +} + +// BackOff sets the request's backoff manager to the one specified, +// or defaults to the stub implementation if nil is provided +func (r *Request) BackOff(manager BackoffManager) *Request { + if manager == nil { + r.backoffMgr = &NoBackoff{} + return r + } + + r.backoffMgr = manager + return r +} + +// Throttle receives a rate-limiter and sets or replaces an existing request limiter +func (r *Request) Throttle(limiter flowcontrol.RateLimiter) *Request { + r.throttle = limiter + return r +} + +// SubResource sets a sub-resource path which can be multiple segments after the resource +// name but before the suffix. +func (r *Request) SubResource(subresources ...string) *Request { + if r.err != nil { + return r + } + subresource := path.Join(subresources...) + if len(r.subresource) != 0 { + r.err = fmt.Errorf("subresource already set to %q, cannot change to %q", r.resource, subresource) + return r + } + for _, s := range subresources { + if msgs := IsValidPathSegmentName(s); len(msgs) != 0 { + r.err = fmt.Errorf("invalid subresource %q: %v", s, msgs) + return r + } + } + r.subresource = subresource + return r +} + +// Name sets the name of a resource to access (/[ns//]) +func (r *Request) Name(resourceName string) *Request { + if r.err != nil { + return r + } + if len(resourceName) == 0 { + r.err = fmt.Errorf("resource name may not be empty") + return r + } + if len(r.resourceName) != 0 { + r.err = fmt.Errorf("resource name already set to %q, cannot change to %q", r.resourceName, resourceName) + return r + } + if msgs := IsValidPathSegmentName(resourceName); len(msgs) != 0 { + r.err = fmt.Errorf("invalid resource name %q: %v", resourceName, msgs) + return r + } + r.resourceName = resourceName + return r +} + +// Namespace applies the namespace scope to a request (/[ns//]) +func (r *Request) Namespace(namespace string) *Request { + if r.err != nil { + return r + } + if r.namespaceSet { + r.err = fmt.Errorf("namespace already set to %q, cannot change to %q", r.namespace, namespace) + return r + } + if msgs := IsValidPathSegmentName(namespace); len(msgs) != 0 { + r.err = fmt.Errorf("invalid namespace %q: %v", namespace, msgs) + return r + } + r.namespaceSet = true + r.namespace = namespace + return r +} + +// NamespaceIfScoped is a convenience function to set a namespace if scoped is true +func (r *Request) NamespaceIfScoped(namespace string, scoped bool) *Request { + if scoped { + return r.Namespace(namespace) + } + return r +} + +// AbsPath overwrites an existing path with the segments provided. Trailing slashes are preserved +// when a single segment is passed. +func (r *Request) AbsPath(segments ...string) *Request { + if r.err != nil { + return r + } + r.pathPrefix = path.Join(r.baseURL.Path, path.Join(segments...)) + if len(segments) == 1 && (len(r.baseURL.Path) > 1 || len(segments[0]) > 1) && strings.HasSuffix(segments[0], "/") { + // preserve any trailing slashes for legacy behavior + r.pathPrefix += "/" + } + return r +} + +// RequestURI overwrites existing path and parameters with the value of the provided server relative +// URI. +func (r *Request) RequestURI(uri string) *Request { + if r.err != nil { + return r + } + locator, err := url.Parse(uri) + if err != nil { + r.err = err + return r + } + r.pathPrefix = locator.Path + if len(locator.Query()) > 0 { + if r.params == nil { + r.params = make(url.Values) + } + for k, v := range locator.Query() { + r.params[k] = v + } + } + return r +} + +// Param creates a query parameter with the given string value. +func (r *Request) Param(paramName, s string) *Request { + if r.err != nil { + return r + } + return r.setParam(paramName, s) +} + +// VersionedParams will take the provided object, serialize it to a map[string][]string using the +// implicit RESTClient API version and the default parameter codec, and then add those as parameters +// to the request. Use this to provide versioned query parameters from client libraries. +// VersionedParams will not write query parameters that have omitempty set and are empty. If a +// parameter has already been set it is appended to (Params and VersionedParams are additive). +func (r *Request) VersionedParams(obj runtime.Object, codec runtime.ParameterCodec) *Request { + return r.SpecificallyVersionedParams(obj, codec, *r.content.GroupVersion) +} + +func (r *Request) SpecificallyVersionedParams(obj runtime.Object, codec runtime.ParameterCodec, version schema.GroupVersion) *Request { + if r.err != nil { + return r + } + params, err := codec.EncodeParameters(obj, version) + if err != nil { + r.err = err + return r + } + for k, v := range params { + if r.params == nil { + r.params = make(url.Values) + } + r.params[k] = append(r.params[k], v...) + } + return r +} + +func (r *Request) setParam(paramName, value string) *Request { + if r.params == nil { + r.params = make(url.Values) + } + r.params[paramName] = append(r.params[paramName], value) + return r +} + +func (r *Request) SetHeader(key string, values ...string) *Request { + if r.headers == nil { + r.headers = http.Header{} + } + r.headers.Del(key) + for _, value := range values { + r.headers.Add(key, value) + } + return r +} + +// Timeout makes the request use the given duration as an overall timeout for the +// request. Additionally, if set passes the value as "timeout" parameter in URL. +func (r *Request) Timeout(d time.Duration) *Request { + if r.err != nil { + return r + } + r.timeout = d + return r +} + +// Body makes the request use obj as the body. Optional. +// If obj is a string, try to read a file of that name. +// If obj is a []byte, send it directly. +// If obj is an io.Reader, use it directly. +// If obj is a runtime.Object, marshal it correctly, and set Content-Type header. +// If obj is a runtime.Object and nil, do nothing. +// Otherwise, set an error. +func (r *Request) Body(obj interface{}) *Request { + if r.err != nil { + return r + } + switch t := obj.(type) { + case string: + data, err := ioutil.ReadFile(t) + if err != nil { + r.err = err + return r + } + glogBody("Request Body", data) + r.body = bytes.NewReader(data) + case []byte: + glogBody("Request Body", t) + r.body = bytes.NewReader(t) + case io.Reader: + r.body = t + case runtime.Object: + // callers may pass typed interface pointers, therefore we must check nil with reflection + if reflect.ValueOf(t).IsNil() { + return r + } + data, err := runtime.Encode(r.serializers.Encoder, t) + if err != nil { + r.err = err + return r + } + glogBody("Request Body", data) + r.body = bytes.NewReader(data) + r.SetHeader("Content-Type", r.content.ContentType) + default: + r.err = fmt.Errorf("unknown type used for body: %+v", obj) + } + return r +} + +// Context adds a context to the request. Contexts are only used for +// timeouts, deadlines, and cancellations. +func (r *Request) Context(ctx context.Context) *Request { + r.ctx = ctx + return r +} + +// URL returns the current working URL. +func (r *Request) URL() *url.URL { + p := r.pathPrefix + if r.namespaceSet && len(r.namespace) > 0 { + p = path.Join(p, "namespaces", r.namespace) + } + if len(r.resource) != 0 { + p = path.Join(p, strings.ToLower(r.resource)) + } + // Join trims trailing slashes, so preserve r.pathPrefix's trailing slash for backwards compatibility if nothing was changed + if len(r.resourceName) != 0 || len(r.subpath) != 0 || len(r.subresource) != 0 { + p = path.Join(p, r.resourceName, r.subresource, r.subpath) + } + + finalURL := &url.URL{} + if r.baseURL != nil { + *finalURL = *r.baseURL + } + finalURL.Path = p + + query := url.Values{} + for key, values := range r.params { + for _, value := range values { + query.Add(key, value) + } + } + + // timeout is handled specially here. + if r.timeout != 0 { + query.Set("timeout", r.timeout.String()) + } + finalURL.RawQuery = query.Encode() + return finalURL +} + +// finalURLTemplate is similar to URL(), but will make all specific parameter values equal +// - instead of name or namespace, "{name}" and "{namespace}" will be used, and all query +// parameters will be reset. This creates a copy of the url so as not to change the +// underlying object. +func (r Request) finalURLTemplate() url.URL { + newParams := url.Values{} + v := []string{"{value}"} + for k := range r.params { + newParams[k] = v + } + r.params = newParams + url := r.URL() + segments := strings.Split(r.URL().Path, "/") + groupIndex := 0 + index := 0 + if r.URL() != nil && r.baseURL != nil && strings.Contains(r.URL().Path, r.baseURL.Path) { + groupIndex += len(strings.Split(r.baseURL.Path, "/")) + } + if groupIndex >= len(segments) { + return *url + } + + const CoreGroupPrefix = "api" + const NamedGroupPrefix = "apis" + isCoreGroup := segments[groupIndex] == CoreGroupPrefix + isNamedGroup := segments[groupIndex] == NamedGroupPrefix + if isCoreGroup { + // checking the case of core group with /api/v1/... format + index = groupIndex + 2 + } else if isNamedGroup { + // checking the case of named group with /apis/apps/v1/... format + index = groupIndex + 3 + } else { + // this should not happen that the only two possibilities are /api... and /apis..., just want to put an + // outlet here in case more API groups are added in future if ever possible: + // https://kubernetes.io/docs/concepts/overview/kubernetes-api/#api-groups + // if a wrong API groups name is encountered, return the {prefix} for url.Path + url.Path = "/{prefix}" + url.RawQuery = "" + return *url + } + //switch segLength := len(segments) - index; segLength { + switch { + // case len(segments) - index == 1: + // resource (with no name) do nothing + case len(segments)-index == 2: + // /$RESOURCE/$NAME: replace $NAME with {name} + segments[index+1] = "{name}" + case len(segments)-index == 3: + if segments[index+2] == "finalize" || segments[index+2] == "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+1] = "{name}" + } else { + // /namespace/$NAMESPACE/$RESOURCE: replace $NAMESPACE with {namespace} + segments[index+1] = "{namespace}" + } + case len(segments)-index >= 4: + segments[index+1] = "{namespace}" + // /namespace/$NAMESPACE/$RESOURCE/$NAME: replace $NAMESPACE with {namespace}, $NAME with {name} + if segments[index+3] != "finalize" && segments[index+3] != "status" { + // /$RESOURCE/$NAME/$SUBRESOURCE: replace $NAME with {name} + segments[index+3] = "{name}" + } + } + url.Path = path.Join(segments...) + return *url +} + +func (r *Request) tryThrottle() { + now := time.Now() + if r.throttle != nil { + r.throttle.Accept() + } + if latency := time.Since(now); latency > longThrottleLatency { + klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + } +} + +// Watch attempts to begin watching the requested location. +// Returns a watch.Interface, or an error. +func (r *Request) Watch() (watch.Interface, error) { + return r.WatchWithSpecificDecoders( + func(body io.ReadCloser) streaming.Decoder { + framer := r.serializers.Framer.NewFrameReader(body) + return streaming.NewDecoder(framer, r.serializers.StreamingSerializer) + }, + r.serializers.Decoder, + ) +} + +// WatchWithSpecificDecoders attempts to begin watching the requested location with a *different* decoder. +// Turns out that you want one "standard" decoder for the watch event and one "personal" decoder for the content +// Returns a watch.Interface, or an error. +func (r *Request) WatchWithSpecificDecoders(wrapperDecoderFn func(io.ReadCloser) streaming.Decoder, embeddedDecoder runtime.Decoder) (watch.Interface, error) { + // We specifically don't want to rate limit watches, so we + // don't use r.throttle here. + if r.err != nil { + return nil, r.err + } + if r.serializers.Framer == nil { + return nil, fmt.Errorf("watching resources is not possible with this client (content-type: %s)", r.content.ContentType) + } + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return nil, err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.baseURL, err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.baseURL, err, resp.StatusCode) + } + } + if err != nil { + // The watch stream mechanism handles many common partial data errors, so closed + // connections can be retried in many cases. + if net.IsProbableEOF(err) { + return watch.NewEmptyWatch(), nil + } + return nil, err + } + if resp.StatusCode != http.StatusOK { + defer resp.Body.Close() + if result := r.transformResponse(resp, req); result.err != nil { + return nil, result.err + } + return nil, fmt.Errorf("for request '%+v', got status: %v", url, resp.StatusCode) + } + wrapperDecoder := wrapperDecoderFn(resp.Body) + return watch.NewStreamWatcher(restclientwatch.NewDecoder(wrapperDecoder, embeddedDecoder)), nil +} + +// updateURLMetrics is a convenience function for pushing metrics. +// It also handles corner cases for incomplete/invalid request data. +func updateURLMetrics(req *Request, resp *http.Response, err error) { + url := "none" + if req.baseURL != nil { + url = req.baseURL.Host + } + + // Errors can be arbitrary strings. Unbound label cardinality is not suitable for a metric + // system so we just report them as ``. + if err != nil { + metrics.RequestResult.Increment("", req.verb, url) + } else { + //Metrics for failure codes + metrics.RequestResult.Increment(strconv.Itoa(resp.StatusCode), req.verb, url) + } +} + +// Stream formats and executes the request, and offers streaming of the response. +// Returns io.ReadCloser which could be used for streaming of the response, or an error +// Any non-2xx http status code causes an error. If we get a non-2xx code, we try to convert the body into an APIStatus object. +// If we can, we return that as an error. Otherwise, we create an error that lists the http status and the content of the response. +func (r *Request) Stream() (io.ReadCloser, error) { + if r.err != nil { + return nil, r.err + } + + r.tryThrottle() + + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, nil) + if err != nil { + return nil, err + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + client := r.client + if client == nil { + client = http.DefaultClient + } + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if r.baseURL != nil { + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + } + if err != nil { + return nil, err + } + + switch { + case (resp.StatusCode >= 200) && (resp.StatusCode < 300): + return resp.Body, nil + + default: + // ensure we close the body before returning the error + defer resp.Body.Close() + + result := r.transformResponse(resp, req) + err := result.Error() + if err == nil { + err = fmt.Errorf("%d while accessing %v: %s", result.statusCode, url, string(result.body)) + } + return nil, err + } +} + +// request connects to the server and invokes the provided function when a server response is +// received. It handles retry behavior and up front validation of requests. It will invoke +// fn at most once. It will return an error if a problem occurred prior to connecting to the +// server - the provided function is responsible for handling server errors. +func (r *Request) request(fn func(*http.Request, *http.Response)) error { + //Metrics for total request latency + start := time.Now() + defer func() { + metrics.RequestLatency.Observe(r.verb, r.finalURLTemplate(), time.Since(start)) + }() + + if r.err != nil { + klog.V(4).Infof("Error in request: %v", r.err) + return r.err + } + + // TODO: added to catch programmer errors (invoking operations with an object with an empty namespace) + if (r.verb == "GET" || r.verb == "PUT" || r.verb == "DELETE") && r.namespaceSet && len(r.resourceName) > 0 && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set when a resource name is provided") + } + if (r.verb == "POST") && r.namespaceSet && len(r.namespace) == 0 { + return fmt.Errorf("an empty namespace may not be set during creation") + } + + client := r.client + if client == nil { + client = http.DefaultClient + } + + // Right now we make about ten retry attempts if we get a Retry-After response. + maxRetries := 10 + retries := 0 + for { + url := r.URL().String() + req, err := http.NewRequest(r.verb, url, r.body) + if err != nil { + return err + } + if r.timeout > 0 { + if r.ctx == nil { + r.ctx = context.Background() + } + var cancelFn context.CancelFunc + r.ctx, cancelFn = context.WithTimeout(r.ctx, r.timeout) + defer cancelFn() + } + if r.ctx != nil { + req = req.WithContext(r.ctx) + } + req.Header = r.headers + + r.backoffMgr.Sleep(r.backoffMgr.CalculateBackoff(r.URL())) + if retries > 0 { + // We are retrying the request that we already send to apiserver + // at least once before. + // This request should also be throttled with the client-internal throttler. + r.tryThrottle() + } + resp, err := client.Do(req) + updateURLMetrics(r, resp, err) + if err != nil { + r.backoffMgr.UpdateBackoff(r.URL(), err, 0) + } else { + r.backoffMgr.UpdateBackoff(r.URL(), err, resp.StatusCode) + } + if err != nil { + // "Connection reset by peer" is usually a transient error. + // Thus in case of "GET" operations, we simply retry it. + // We are not automatically retrying "write" operations, as + // they are not idempotent. + if !net.IsConnectionReset(err) || r.verb != "GET" { + return err + } + // For the purpose of retry, we set the artificial "retry-after" response. + // TODO: Should we clean the original response if it exists? + resp = &http.Response{ + StatusCode: http.StatusInternalServerError, + Header: http.Header{"Retry-After": []string{"1"}}, + Body: ioutil.NopCloser(bytes.NewReader([]byte{})), + } + } + + done := func() bool { + // Ensure the response body is fully read and closed + // before we reconnect, so that we reuse the same TCP + // connection. + defer func() { + const maxBodySlurpSize = 2 << 10 + if resp.ContentLength <= maxBodySlurpSize { + io.Copy(ioutil.Discard, &io.LimitedReader{R: resp.Body, N: maxBodySlurpSize}) + } + resp.Body.Close() + }() + + retries++ + if seconds, wait := checkWait(resp); wait && retries < maxRetries { + if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { + _, err := seeker.Seek(0, 0) + if err != nil { + klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + fn(req, resp) + return true + } + } + + klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) + return false + } + fn(req, resp) + return true + }() + if done { + return nil + } + } +} + +// Do formats and executes the request. Returns a Result object for easy response +// processing. +// +// Error type: +// * If the request can't be constructed, or an error happened earlier while building its +// arguments: *RequestConstructionError +// * If the server responds with a status: *errors.StatusError or *errors.UnexpectedObjectError +// * http.Client.Do errors are returned directly. +func (r *Request) Do() Result { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result = r.transformResponse(resp, req) + }) + if err != nil { + return Result{err: err} + } + return result +} + +// DoRaw executes the request but does not process the response body. +func (r *Request) DoRaw() ([]byte, error) { + r.tryThrottle() + + var result Result + err := r.request(func(req *http.Request, resp *http.Response) { + result.body, result.err = ioutil.ReadAll(resp.Body) + glogBody("Response Body", result.body) + if resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent { + result.err = r.transformUnstructuredResponseError(resp, req, result.body) + } + }) + if err != nil { + return nil, err + } + return result.body, result.err +} + +// transformResponse converts an API response into a structured API object +func (r *Request) transformResponse(resp *http.Response, req *http.Request) Result { + var body []byte + if resp.Body != nil { + data, err := ioutil.ReadAll(resp.Body) + switch err.(type) { + case nil: + body = data + case http2.StreamError: + // This is trying to catch the scenario that the server may close the connection when sending the + // response body. This can be caused by server timeout due to a slow network connection. + // TODO: Add test for this. Steps may be: + // 1. client-go (or kubectl) sends a GET request. + // 2. Apiserver sends back the headers and then part of the body + // 3. Apiserver closes connection. + // 4. client-go should catch this and return an error. + klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) + return Result{ + err: streamErr, + } + default: + klog.Errorf("Unexpected error when reading response body: %#v", err) + unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) + return Result{ + err: unexpectedErr, + } + } + } + + glogBody("Response Body", body) + + // verify the content type is accurate + contentType := resp.Header.Get("Content-Type") + decoder := r.serializers.Decoder + if len(contentType) > 0 && (decoder == nil || (len(r.content.ContentType) > 0 && contentType != r.content.ContentType)) { + mediaType, params, err := mime.ParseMediaType(contentType) + if err != nil { + return Result{err: errors.NewInternalError(err)} + } + decoder, err = r.serializers.RenegotiatedDecoder(mediaType, params) + if err != nil { + // if we fail to negotiate a decoder, treat this as an unstructured error + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + return Result{err: r.transformUnstructuredResponseError(resp, req, body)} + } + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + } + } + } + + switch { + case resp.StatusCode == http.StatusSwitchingProtocols: + // no-op, we've been upgraded + case resp.StatusCode < http.StatusOK || resp.StatusCode > http.StatusPartialContent: + // calculate an unstructured error from the response which the Result object may use if the caller + // did not return a structured error. + retryAfter, _ := retryAfterSeconds(resp) + err := r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter) + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + err: err, + } + } + + return Result{ + body: body, + contentType: contentType, + statusCode: resp.StatusCode, + decoder: decoder, + } +} + +// truncateBody decides if the body should be truncated, based on the glog Verbosity. +func truncateBody(body string) string { + max := 0 + switch { + case bool(klog.V(10)): + return body + case bool(klog.V(9)): + max = 10240 + case bool(klog.V(8)): + max = 1024 + } + + if len(body) <= max { + return body + } + + return body[:max] + fmt.Sprintf(" [truncated %d chars]", len(body)-max) +} + +// glogBody logs a body output that could be either JSON or protobuf. It explicitly guards against +// allocating a new string for the body output unless necessary. Uses a simple heuristic to determine +// whether the body is printable. +func glogBody(prefix string, body []byte) { + if klog.V(8) { + if bytes.IndexFunc(body, func(r rune) bool { + return r < 0x0a + }) != -1 { + klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + } else { + klog.Infof("%s: %s", prefix, truncateBody(string(body))) + } + } +} + +// maxUnstructuredResponseTextBytes is an upper bound on how much output to include in the unstructured error. +const maxUnstructuredResponseTextBytes = 2048 + +// transformUnstructuredResponseError handles an error from the server that is not in a structured form. +// It is expected to transform any response that is not recognizable as a clear server sent error from the +// K8S API using the information provided with the request. In practice, HTTP proxies and client libraries +// introduce a level of uncertainty to the responses returned by servers that in common use result in +// unexpected responses. The rough structure is: +// +// 1. Assume the server sends you something sane - JSON + well defined error objects + proper codes +// - this is the happy path +// - when you get this output, trust what the server sends +// 2. Guard against empty fields / bodies in received JSON and attempt to cull sufficient info from them to +// generate a reasonable facsimile of the original failure. +// - Be sure to use a distinct error type or flag that allows a client to distinguish between this and error 1 above +// 3. Handle true disconnect failures / completely malformed data by moving up to a more generic client error +// 4. Distinguish between various connection failures like SSL certificates, timeouts, proxy errors, unexpected +// initial contact, the presence of mismatched body contents from posted content types +// - Give these a separate distinct error type and capture as much as possible of the original message +// +// TODO: introduce transformation of generic http.Client.Do() errors that separates 4. +func (r *Request) transformUnstructuredResponseError(resp *http.Response, req *http.Request, body []byte) error { + if body == nil && resp.Body != nil { + if data, err := ioutil.ReadAll(&io.LimitedReader{R: resp.Body, N: maxUnstructuredResponseTextBytes}); err == nil { + body = data + } + } + retryAfter, _ := retryAfterSeconds(resp) + return r.newUnstructuredResponseError(body, isTextResponse(resp), resp.StatusCode, req.Method, retryAfter) +} + +// newUnstructuredResponseError instantiates the appropriate generic error for the provided input. It also logs the body. +func (r *Request) newUnstructuredResponseError(body []byte, isTextResponse bool, statusCode int, method string, retryAfter int) error { + // cap the amount of output we create + if len(body) > maxUnstructuredResponseTextBytes { + body = body[:maxUnstructuredResponseTextBytes] + } + + message := "unknown" + if isTextResponse { + message = strings.TrimSpace(string(body)) + } + var groupResource schema.GroupResource + if len(r.resource) > 0 { + groupResource.Group = r.content.GroupVersion.Group + groupResource.Resource = r.resource + } + return errors.NewGenericServerResponse( + statusCode, + method, + groupResource, + r.resourceName, + message, + retryAfter, + true, + ) +} + +// isTextResponse returns true if the response appears to be a textual media type. +func isTextResponse(resp *http.Response) bool { + contentType := resp.Header.Get("Content-Type") + if len(contentType) == 0 { + return true + } + media, _, err := mime.ParseMediaType(contentType) + if err != nil { + return false + } + return strings.HasPrefix(media, "text/") +} + +// checkWait returns true along with a number of seconds if the server instructed us to wait +// before retrying. +func checkWait(resp *http.Response) (int, bool) { + switch r := resp.StatusCode; { + // any 500 error code and 429 can trigger a wait + case r == http.StatusTooManyRequests, r >= 500: + default: + return 0, false + } + i, ok := retryAfterSeconds(resp) + return i, ok +} + +// retryAfterSeconds returns the value of the Retry-After header and true, or 0 and false if +// the header was missing or not a valid number. +func retryAfterSeconds(resp *http.Response) (int, bool) { + if h := resp.Header.Get("Retry-After"); len(h) > 0 { + if i, err := strconv.Atoi(h); err == nil { + return i, true + } + } + return 0, false +} + +// Result contains the result of calling Request.Do(). +type Result struct { + body []byte + contentType string + err error + statusCode int + + decoder runtime.Decoder +} + +// Raw returns the raw result. +func (r Result) Raw() ([]byte, error) { + return r.body, r.err +} + +// Get returns the result as an object, which means it passes through the decoder. +// If the returned object is of type Status and has .Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +func (r Result) Get() (runtime.Object, error) { + if r.err != nil { + // Check whether the result has a Status object in the body and prefer that. + return nil, r.Error() + } + if r.decoder == nil { + return nil, fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + + // decode, but if the result is Status return that as an error instead. + out, _, err := r.decoder.Decode(r.body, nil, nil) + if err != nil { + return nil, err + } + switch t := out.(type) { + case *metav1.Status: + // any status besides StatusSuccess is considered an error. + if t.Status != metav1.StatusSuccess { + return nil, errors.FromObject(t) + } + } + return out, nil +} + +// StatusCode returns the HTTP status code of the request. (Only valid if no +// error was returned.) +func (r Result) StatusCode(statusCode *int) Result { + *statusCode = r.statusCode + return r +} + +// Into stores the result into obj, if possible. If obj is nil it is ignored. +// If the returned object is of type Status and has .Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +func (r Result) Into(obj runtime.Object) error { + if r.err != nil { + // Check whether the result has a Status object in the body and prefer that. + return r.Error() + } + if r.decoder == nil { + return fmt.Errorf("serializer for %s doesn't exist", r.contentType) + } + if len(r.body) == 0 { + return fmt.Errorf("0-length response with status code: %d and content type: %s", + r.statusCode, r.contentType) + } + + out, _, err := r.decoder.Decode(r.body, nil, obj) + if err != nil || out == obj { + return err + } + // if a different object is returned, see if it is Status and avoid double decoding + // the object. + switch t := out.(type) { + case *metav1.Status: + // any status besides StatusSuccess is considered an error. + if t.Status != metav1.StatusSuccess { + return errors.FromObject(t) + } + } + return nil +} + +// WasCreated updates the provided bool pointer to whether the server returned +// 201 created or a different response. +func (r Result) WasCreated(wasCreated *bool) Result { + *wasCreated = r.statusCode == http.StatusCreated + return r +} + +// Error returns the error executing the request, nil if no error occurred. +// If the returned object is of type Status and has Status != StatusSuccess, the +// additional information in Status will be used to enrich the error. +// See the Request.Do() comment for what errors you might get. +func (r Result) Error() error { + // if we have received an unexpected server error, and we have a body and decoder, we can try to extract + // a Status object. + if r.err == nil || !errors.IsUnexpectedServerError(r.err) || len(r.body) == 0 || r.decoder == nil { + return r.err + } + + // attempt to convert the body into a Status object + // to be backwards compatible with old servers that do not return a version, default to "v1" + out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) + if err != nil { + klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + return r.err + } + switch t := out.(type) { + case *metav1.Status: + // because we default the kind, we *must* check for StatusFailure + if t.Status == metav1.StatusFailure { + return errors.FromObject(t) + } + } + return r.err +} + +// NameMayNotBe specifies strings that cannot be used as names specified as path segments (like the REST API or etcd store) +var NameMayNotBe = []string{".", ".."} + +// NameMayNotContain specifies substrings that cannot be used in names specified as path segments (like the REST API or etcd store) +var NameMayNotContain = []string{"/", "%"} + +// IsValidPathSegmentName validates the name can be safely encoded as a path segment +func IsValidPathSegmentName(name string) []string { + for _, illegalName := range NameMayNotBe { + if name == illegalName { + return []string{fmt.Sprintf(`may not be '%s'`, illegalName)} + } + } + + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment +// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid +func IsValidPathSegmentPrefix(name string) []string { + var errors []string + for _, illegalContent := range NameMayNotContain { + if strings.Contains(name, illegalContent) { + errors = append(errors, fmt.Sprintf(`may not contain '%s'`, illegalContent)) + } + } + + return errors +} + +// ValidatePathSegmentName validates the name can be safely encoded as a path segment +func ValidatePathSegmentName(name string, prefix bool) []string { + if prefix { + return IsValidPathSegmentPrefix(name) + } + return IsValidPathSegmentName(name) +} diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go new file mode 100644 index 0000000..bd5749d --- /dev/null +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -0,0 +1,117 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "crypto/tls" + "errors" + "net/http" + + "k8s.io/client-go/plugin/pkg/client/auth/exec" + "k8s.io/client-go/transport" +) + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(config *Config) (*tls.Config, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.TLSConfigFor(cfg) +} + +// TransportFor returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. Will return the +// default http.DefaultTransport if no special case behavior is needed. +func TransportFor(config *Config) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.New(cfg) +} + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered behavior from the +// config. Exposed to allow more clients that need HTTP-like behavior but then must hijack +// the underlying connection (like WebSocket or HTTP2 clients). Pure HTTP clients should use +// the higher level TransportFor or RESTClientFor methods. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + cfg, err := config.TransportConfig() + if err != nil { + return nil, err + } + return transport.HTTPWrappersForConfig(cfg, rt) +} + +// TransportConfig converts a client config to an appropriate transport config. +func (c *Config) TransportConfig() (*transport.Config, error) { + conf := &transport.Config{ + UserAgent: c.UserAgent, + Transport: c.Transport, + WrapTransport: c.WrapTransport, + TLS: transport.TLSConfig{ + Insecure: c.Insecure, + ServerName: c.ServerName, + CAFile: c.CAFile, + CAData: c.CAData, + CertFile: c.CertFile, + CertData: c.CertData, + KeyFile: c.KeyFile, + KeyData: c.KeyData, + }, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + Impersonate: transport.ImpersonationConfig{ + UserName: c.Impersonate.UserName, + Groups: c.Impersonate.Groups, + Extra: c.Impersonate.Extra, + }, + Dial: c.Dial, + } + + if c.ExecProvider != nil && c.AuthProvider != nil { + return nil, errors.New("execProvider and authProvider cannot be used in combination") + } + + if c.ExecProvider != nil { + provider, err := exec.GetAuthenticator(c.ExecProvider) + if err != nil { + return nil, err + } + if err := provider.UpdateTransportConfig(conf); err != nil { + return nil, err + } + } + if c.AuthProvider != nil { + provider, err := GetAuthProvider(c.Host, c.AuthProvider, c.AuthConfigPersister) + if err != nil { + return nil, err + } + conf.Wrap(provider.WrapTransport) + } + return conf, nil +} + +// Wrap adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper prior to the +// first API call being made. The provided function is invoked after any +// existing transport wrappers are invoked. +func (c *Config) Wrap(fn transport.WrapperFunc) { + c.WrapTransport = transport.Wrappers(c.WrapTransport, fn) +} diff --git a/vendor/k8s.io/client-go/rest/url_utils.go b/vendor/k8s.io/client-go/rest/url_utils.go new file mode 100644 index 0000000..a56d183 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/url_utils.go @@ -0,0 +1,97 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "fmt" + "net/url" + "path" + + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// DefaultServerURL converts a host, host:port, or URL string to the default base server API path +// to use with a Client at a given API version following the standard conventions for a +// Kubernetes API. +func DefaultServerURL(host, apiPath string, groupVersion schema.GroupVersion, defaultTLS bool) (*url.URL, string, error) { + if host == "" { + return nil, "", fmt.Errorf("host must be a URL or a host:port pair") + } + base := host + hostURL, err := url.Parse(base) + if err != nil || hostURL.Scheme == "" || hostURL.Host == "" { + scheme := "http://" + if defaultTLS { + scheme = "https://" + } + hostURL, err = url.Parse(scheme + base) + if err != nil { + return nil, "", err + } + if hostURL.Path != "" && hostURL.Path != "/" { + return nil, "", fmt.Errorf("host must be a URL or a host:port pair: %q", base) + } + } + + // hostURL.Path is optional; a non-empty Path is treated as a prefix that is to be applied to + // all URIs used to access the host. this is useful when there's a proxy in front of the + // apiserver that has relocated the apiserver endpoints, forwarding all requests from, for + // example, /a/b/c to the apiserver. in this case the Path should be /a/b/c. + // + // if running without a frontend proxy (that changes the location of the apiserver), then + // hostURL.Path should be blank. + // + // versionedAPIPath, a path relative to baseURL.Path, points to a versioned API base + versionedAPIPath := DefaultVersionedAPIPath(apiPath, groupVersion) + + return hostURL, versionedAPIPath, nil +} + +// DefaultVersionedAPIPathFor constructs the default path for the given group version, assuming the given +// API path, following the standard conventions of the Kubernetes API. +func DefaultVersionedAPIPath(apiPath string, groupVersion schema.GroupVersion) string { + versionedAPIPath := path.Join("/", apiPath) + + // Add the version to the end of the path + if len(groupVersion.Group) > 0 { + versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Group, groupVersion.Version) + + } else { + versionedAPIPath = path.Join(versionedAPIPath, groupVersion.Version) + } + + return versionedAPIPath +} + +// defaultServerUrlFor is shared between IsConfigTransportTLS and RESTClientFor. It +// requires Host and Version to be set prior to being called. +func defaultServerUrlFor(config *Config) (*url.URL, string, error) { + // TODO: move the default to secure when the apiserver supports TLS by default + // config.Insecure is taken to mean "I want HTTPS but don't bother checking the certs against a CA." + hasCA := len(config.CAFile) != 0 || len(config.CAData) != 0 + hasCert := len(config.CertFile) != 0 || len(config.CertData) != 0 + defaultTLS := hasCA || hasCert || config.Insecure + host := config.Host + if host == "" { + host = "localhost" + } + + if config.GroupVersion != nil { + return DefaultServerURL(host, config.APIPath, *config.GroupVersion, defaultTLS) + } + return DefaultServerURL(host, config.APIPath, schema.GroupVersion{}, defaultTLS) +} diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go new file mode 100644 index 0000000..d00e42f --- /dev/null +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -0,0 +1,107 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package rest + +import ( + "net/url" + "time" + + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/util/flowcontrol" + "k8s.io/klog" +) + +// Set of resp. Codes that we backoff for. +// In general these should be errors that indicate a server is overloaded. +// These shouldn't be configured by any user, we set them based on conventions +// described in +var serverIsOverloadedSet = sets.NewInt(429) +var maxResponseCode = 499 + +type BackoffManager interface { + UpdateBackoff(actualUrl *url.URL, err error, responseCode int) + CalculateBackoff(actualUrl *url.URL) time.Duration + Sleep(d time.Duration) +} + +// URLBackoff struct implements the semantics on top of Backoff which +// we need for URL specific exponential backoff. +type URLBackoff struct { + // Uses backoff as underlying implementation. + Backoff *flowcontrol.Backoff +} + +// NoBackoff is a stub implementation, can be used for mocking or else as a default. +type NoBackoff struct { +} + +func (n *NoBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { + // do nothing. +} + +func (n *NoBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { + return 0 * time.Second +} + +func (n *NoBackoff) Sleep(d time.Duration) { + time.Sleep(d) +} + +// Disable makes the backoff trivial, i.e., sets it to zero. This might be used +// by tests which want to run 1000s of mock requests without slowing down. +func (b *URLBackoff) Disable() { + klog.V(4).Infof("Disabling backoff strategy") + b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) +} + +// baseUrlKey returns the key which urls will be mapped to. +// For example, 127.0.0.1:8080/api/v2/abcde -> 127.0.0.1:8080. +func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { + // Simple implementation for now, just the host. + // We may backoff specific paths (i.e. "pods") differentially + // in the future. + host, err := url.Parse(rawurl.String()) + if err != nil { + klog.V(4).Infof("Error extracting url: %v", rawurl) + panic("bad url!") + } + return host.Host +} + +// UpdateBackoff updates backoff metadata +func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode int) { + // range for retry counts that we store is [0,13] + if responseCode > maxResponseCode || serverIsOverloadedSet.Has(responseCode) { + b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) + return + } else if responseCode >= 300 || err != nil { + klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + } + + //If we got this far, there is no backoff required for this URL anymore. + b.Backoff.Reset(b.baseUrlKey(actualUrl)) +} + +// CalculateBackoff takes a url and back's off exponentially, +// based on its knowledge of existing failures. +func (b *URLBackoff) CalculateBackoff(actualUrl *url.URL) time.Duration { + return b.Backoff.Get(b.baseUrlKey(actualUrl)) +} + +func (b *URLBackoff) Sleep(d time.Duration) { + b.Backoff.Clock.Sleep(d) +} diff --git a/vendor/k8s.io/client-go/rest/watch/decoder.go b/vendor/k8s.io/client-go/rest/watch/decoder.go new file mode 100644 index 0000000..73bb63a --- /dev/null +++ b/vendor/k8s.io/client-go/rest/watch/decoder.go @@ -0,0 +1,72 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "fmt" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" +) + +// Decoder implements the watch.Decoder interface for io.ReadClosers that +// have contents which consist of a series of watchEvent objects encoded +// with the given streaming decoder. The internal objects will be then +// decoded by the embedded decoder. +type Decoder struct { + decoder streaming.Decoder + embeddedDecoder runtime.Decoder +} + +// NewDecoder creates an Decoder for the given writer and codec. +func NewDecoder(decoder streaming.Decoder, embeddedDecoder runtime.Decoder) *Decoder { + return &Decoder{ + decoder: decoder, + embeddedDecoder: embeddedDecoder, + } +} + +// Decode blocks until it can return the next object in the reader. Returns an error +// if the reader is closed or an object can't be decoded. +func (d *Decoder) Decode() (watch.EventType, runtime.Object, error) { + var got metav1.WatchEvent + res, _, err := d.decoder.Decode(nil, &got) + if err != nil { + return "", nil, err + } + if res != &got { + return "", nil, fmt.Errorf("unable to decode to metav1.Event") + } + switch got.Type { + case string(watch.Added), string(watch.Modified), string(watch.Deleted), string(watch.Error): + default: + return "", nil, fmt.Errorf("got invalid watch event type: %v", got.Type) + } + + obj, err := runtime.Decode(d.embeddedDecoder, got.Object.Raw) + if err != nil { + return "", nil, fmt.Errorf("unable to decode watch event: %v", err) + } + return watch.EventType(got.Type), obj, nil +} + +// Close closes the underlying r. +func (d *Decoder) Close() { + d.decoder.Close() +} diff --git a/vendor/k8s.io/client-go/rest/watch/encoder.go b/vendor/k8s.io/client-go/rest/watch/encoder.go new file mode 100644 index 0000000..e55aa12 --- /dev/null +++ b/vendor/k8s.io/client-go/rest/watch/encoder.go @@ -0,0 +1,56 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package versioned + +import ( + "encoding/json" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/streaming" + "k8s.io/apimachinery/pkg/watch" +) + +// Encoder serializes watch.Events into io.Writer. The internal objects +// are encoded using embedded encoder, and the outer Event is serialized +// using encoder. +// TODO: this type is only used by tests +type Encoder struct { + encoder streaming.Encoder + embeddedEncoder runtime.Encoder +} + +func NewEncoder(encoder streaming.Encoder, embeddedEncoder runtime.Encoder) *Encoder { + return &Encoder{ + encoder: encoder, + embeddedEncoder: embeddedEncoder, + } +} + +// Encode writes an event to the writer. Returns an error +// if the writer is closed or an object can't be encoded. +func (e *Encoder) Encode(event *watch.Event) error { + data, err := runtime.Encode(e.embeddedEncoder, event.Object) + if err != nil { + return err + } + // FIXME: get rid of json.RawMessage. + return e.encoder.Encode(&metav1.WatchEvent{ + Type: string(event.Type), + Object: runtime.RawExtension{Raw: json.RawMessage(data)}, + }) +} diff --git a/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go new file mode 100644 index 0000000..c1ab45f --- /dev/null +++ b/vendor/k8s.io/client-go/rest/zz_generated.deepcopy.go @@ -0,0 +1,52 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package rest + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TLSClientConfig) DeepCopyInto(out *TLSClientConfig) { + *out = *in + if in.CertData != nil { + in, out := &in.CertData, &out.CertData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.KeyData != nil { + in, out := &in.KeyData, &out.KeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.CAData != nil { + in, out := &in.CAData, &out.CAData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TLSClientConfig. +func (in *TLSClientConfig) DeepCopy() *TLSClientConfig { + if in == nil { + return nil + } + out := new(TLSClientConfig) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go new file mode 100644 index 0000000..5871575 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -0,0 +1,19 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package + +package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go new file mode 100644 index 0000000..65a3693 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/helpers.go @@ -0,0 +1,188 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "encoding/base64" + "errors" + "fmt" + "io/ioutil" + "os" + "path" + "path/filepath" +) + +func init() { + sDec, _ := base64.StdEncoding.DecodeString("REDACTED+") + redactedBytes = []byte(string(sDec)) + sDec, _ = base64.StdEncoding.DecodeString("DATA+OMITTED") + dataOmittedBytes = []byte(string(sDec)) +} + +// IsConfigEmpty returns true if the config is empty. +func IsConfigEmpty(config *Config) bool { + return len(config.AuthInfos) == 0 && len(config.Clusters) == 0 && len(config.Contexts) == 0 && + len(config.CurrentContext) == 0 && + len(config.Preferences.Extensions) == 0 && !config.Preferences.Colors && + len(config.Extensions) == 0 +} + +// MinifyConfig read the current context and uses that to keep only the relevant pieces of config +// This is useful for making secrets based on kubeconfig files +func MinifyConfig(config *Config) error { + if len(config.CurrentContext) == 0 { + return errors.New("current-context must exist in order to minify") + } + + currContext, exists := config.Contexts[config.CurrentContext] + if !exists { + return fmt.Errorf("cannot locate context %v", config.CurrentContext) + } + + newContexts := map[string]*Context{} + newContexts[config.CurrentContext] = currContext + + newClusters := map[string]*Cluster{} + if len(currContext.Cluster) > 0 { + if _, exists := config.Clusters[currContext.Cluster]; !exists { + return fmt.Errorf("cannot locate cluster %v", currContext.Cluster) + } + + newClusters[currContext.Cluster] = config.Clusters[currContext.Cluster] + } + + newAuthInfos := map[string]*AuthInfo{} + if len(currContext.AuthInfo) > 0 { + if _, exists := config.AuthInfos[currContext.AuthInfo]; !exists { + return fmt.Errorf("cannot locate user %v", currContext.AuthInfo) + } + + newAuthInfos[currContext.AuthInfo] = config.AuthInfos[currContext.AuthInfo] + } + + config.AuthInfos = newAuthInfos + config.Clusters = newClusters + config.Contexts = newContexts + + return nil +} + +var ( + redactedBytes []byte + dataOmittedBytes []byte +) + +// Flatten redacts raw data entries from the config object for a human-readable view. +func ShortenConfig(config *Config) { + // trick json encoder into printing a human readable string in the raw data + // by base64 decoding what we want to print. Relies on implementation of + // http://golang.org/pkg/encoding/json/#Marshal using base64 to encode []byte + for key, authInfo := range config.AuthInfos { + if len(authInfo.ClientKeyData) > 0 { + authInfo.ClientKeyData = redactedBytes + } + if len(authInfo.ClientCertificateData) > 0 { + authInfo.ClientCertificateData = redactedBytes + } + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + if len(cluster.CertificateAuthorityData) > 0 { + cluster.CertificateAuthorityData = dataOmittedBytes + } + config.Clusters[key] = cluster + } +} + +// Flatten changes the config object into a self contained config (useful for making secrets) +func FlattenConfig(config *Config) error { + for key, authInfo := range config.AuthInfos { + baseDir, err := MakeAbs(path.Dir(authInfo.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&authInfo.ClientCertificate, &authInfo.ClientCertificateData, baseDir); err != nil { + return err + } + if err := FlattenContent(&authInfo.ClientKey, &authInfo.ClientKeyData, baseDir); err != nil { + return err + } + + config.AuthInfos[key] = authInfo + } + for key, cluster := range config.Clusters { + baseDir, err := MakeAbs(path.Dir(cluster.LocationOfOrigin), "") + if err != nil { + return err + } + + if err := FlattenContent(&cluster.CertificateAuthority, &cluster.CertificateAuthorityData, baseDir); err != nil { + return err + } + + config.Clusters[key] = cluster + } + + return nil +} + +func FlattenContent(path *string, contents *[]byte, baseDir string) error { + if len(*path) != 0 { + if len(*contents) > 0 { + return errors.New("cannot have values for both path and contents") + } + + var err error + absPath := ResolvePath(*path, baseDir) + *contents, err = ioutil.ReadFile(absPath) + if err != nil { + return err + } + + *path = "" + } + + return nil +} + +// ResolvePath returns the path as an absolute paths, relative to the given base directory +func ResolvePath(path string, base string) string { + // Don't resolve empty paths + if len(path) > 0 { + // Don't resolve absolute paths + if !filepath.IsAbs(path) { + return filepath.Join(base, path) + } + } + + return path +} + +func MakeAbs(path, base string) (string, error) { + if filepath.IsAbs(path) { + return path, nil + } + if len(base) == 0 { + cwd, err := os.Getwd() + if err != nil { + return "", err + } + base = cwd + } + return filepath.Join(base, path), nil +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/register.go b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go new file mode 100644 index 0000000..2eec388 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/register.go @@ -0,0 +1,46 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// SchemeGroupVersion is group version used to register these objects +// TODO this should be in the "kubeconfig" group +var SchemeGroupVersion = schema.GroupVersion{Group: "", Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &Config{}, + ) + return nil +} + +func (obj *Config) GetObjectKind() schema.ObjectKind { return obj } +func (obj *Config) SetGroupVersionKind(gvk schema.GroupVersionKind) { + obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() +} +func (obj *Config) GroupVersionKind() schema.GroupVersionKind { + return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go new file mode 100644 index 0000000..990a440 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -0,0 +1,262 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package api + +import ( + "fmt" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Where possible, json tags match the cli argument names. +// Top level config objects and all values required for proper functioning are not "omitempty". Any truly optional piece of config is allowed to be omitted. + +// Config holds the information needed to build connect to remote kubernetes clusters as a given user +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Config struct { + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + Kind string `json:"kind,omitempty"` + // Legacy field from pkg/api/types.go TypeMeta. + // TODO(jlowdermilk): remove this after eliminating downstream dependencies. + // +optional + APIVersion string `json:"apiVersion,omitempty"` + // Preferences holds general information to be use for cli interactions + Preferences Preferences `json:"preferences"` + // Clusters is a map of referencable names to cluster configs + Clusters map[string]*Cluster `json:"clusters"` + // AuthInfos is a map of referencable names to user configs + AuthInfos map[string]*AuthInfo `json:"users"` + // Contexts is a map of referencable names to context configs + Contexts map[string]*Context `json:"contexts"` + // CurrentContext is the name of the context that you would like to use by default + CurrentContext string `json:"current-context"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// IMPORTANT if you add fields to this struct, please update IsConfigEmpty() +type Preferences struct { + // +optional + Colors bool `json:"colors,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Cluster contains information about how to communicate with a kubernetes cluster +type Cluster struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Server is the address of the kubernetes cluster (https://hostname:port). + Server string `json:"server"` + // InsecureSkipTLSVerify skips the validity check for the server's certificate. This will make your HTTPS connections insecure. + // +optional + InsecureSkipTLSVerify bool `json:"insecure-skip-tls-verify,omitempty"` + // CertificateAuthority is the path to a cert file for the certificate authority. + // +optional + CertificateAuthority string `json:"certificate-authority,omitempty"` + // CertificateAuthorityData contains PEM-encoded certificate authority certificates. Overrides CertificateAuthority + // +optional + CertificateAuthorityData []byte `json:"certificate-authority-data,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthInfo contains information that describes identity information. This is use to tell the kubernetes cluster who you are. +type AuthInfo struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // ClientCertificate is the path to a client cert file for TLS. + // +optional + ClientCertificate string `json:"client-certificate,omitempty"` + // ClientCertificateData contains PEM-encoded data from a client cert file for TLS. Overrides ClientCertificate + // +optional + ClientCertificateData []byte `json:"client-certificate-data,omitempty"` + // ClientKey is the path to a client key file for TLS. + // +optional + ClientKey string `json:"client-key,omitempty"` + // ClientKeyData contains PEM-encoded data from a client key file for TLS. Overrides ClientKey + // +optional + ClientKeyData []byte `json:"client-key-data,omitempty"` + // Token is the bearer token for authentication to the kubernetes cluster. + // +optional + Token string `json:"token,omitempty"` + // TokenFile is a pointer to a file that contains a bearer token (as described above). If both Token and TokenFile are present, Token takes precedence. + // +optional + TokenFile string `json:"tokenFile,omitempty"` + // Impersonate is the username to act-as. + // +optional + Impersonate string `json:"act-as,omitempty"` + // ImpersonateGroups is the groups to imperonate. + // +optional + ImpersonateGroups []string `json:"act-as-groups,omitempty"` + // ImpersonateUserExtra contains additional information for impersonated user. + // +optional + ImpersonateUserExtra map[string][]string `json:"act-as-user-extra,omitempty"` + // Username is the username for basic authentication to the kubernetes cluster. + // +optional + Username string `json:"username,omitempty"` + // Password is the password for basic authentication to the kubernetes cluster. + // +optional + Password string `json:"password,omitempty"` + // AuthProvider specifies a custom authentication plugin for the kubernetes cluster. + // +optional + AuthProvider *AuthProviderConfig `json:"auth-provider,omitempty"` + // Exec specifies a custom exec-based authentication plugin for the kubernetes cluster. + // +optional + Exec *ExecConfig `json:"exec,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// Context is a tuple of references to a cluster (how do I communicate with a kubernetes cluster), a user (how do I identify myself), and a namespace (what subset of resources do I want to work with) +type Context struct { + // LocationOfOrigin indicates where this object came from. It is used for round tripping config post-merge, but never serialized. + LocationOfOrigin string + // Cluster is the name of the cluster for this context + Cluster string `json:"cluster"` + // AuthInfo is the name of the authInfo for this context + AuthInfo string `json:"user"` + // Namespace is the default namespace to use on unspecified requests + // +optional + Namespace string `json:"namespace,omitempty"` + // Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields + // +optional + Extensions map[string]runtime.Object `json:"extensions,omitempty"` +} + +// AuthProviderConfig holds the configuration for a specified auth provider. +type AuthProviderConfig struct { + Name string `json:"name"` + // +optional + Config map[string]string `json:"config,omitempty"` +} + +var _ fmt.Stringer = new(AuthProviderConfig) +var _ fmt.GoStringer = new(AuthProviderConfig) + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// AuthProviderConfig to prevent accidental leaking via logs. +func (c AuthProviderConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of +// AuthProviderConfig to prevent accidental leaking via logs. +func (c AuthProviderConfig) String() string { + cfg := "" + if c.Config != nil { + cfg = "--- REDACTED ---" + } + return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg) +} + +// ExecConfig specifies a command to provide client credentials. The command is exec'd +// and outputs structured stdout holding credentials. +// +// See the client.authentiction.k8s.io API group for specifications of the exact input +// and output format +type ExecConfig struct { + // Command to execute. + Command string `json:"command"` + // Arguments to pass to the command when executing it. + // +optional + Args []string `json:"args"` + // Env defines additional environment variables to expose to the process. These + // are unioned with the host's environment, as well as variables client-go uses + // to pass argument to the plugin. + // +optional + Env []ExecEnvVar `json:"env"` + + // Preferred input version of the ExecInfo. The returned ExecCredentials MUST use + // the same encoding version as the input. + APIVersion string `json:"apiVersion,omitempty"` +} + +var _ fmt.Stringer = new(ExecConfig) +var _ fmt.GoStringer = new(ExecConfig) + +// GoString implements fmt.GoStringer and sanitizes sensitive fields of +// ExecConfig to prevent accidental leaking via logs. +func (c ExecConfig) GoString() string { + return c.String() +} + +// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig +// to prevent accidental leaking via logs. +func (c ExecConfig) String() string { + var args []string + if len(c.Args) > 0 { + args = []string{"--- REDACTED ---"} + } + env := "[]ExecEnvVar(nil)" + if len(c.Env) > 0 { + env = "[]ExecEnvVar{--- REDACTED ---}" + } + return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) +} + +// ExecEnvVar is used for setting environment variables when executing an exec-based +// credential plugin. +type ExecEnvVar struct { + Name string `json:"name"` + Value string `json:"value"` +} + +// NewConfig is a convenience function that returns a new Config object with non-nil maps +func NewConfig() *Config { + return &Config{ + Preferences: *NewPreferences(), + Clusters: make(map[string]*Cluster), + AuthInfos: make(map[string]*AuthInfo), + Contexts: make(map[string]*Context), + Extensions: make(map[string]runtime.Object), + } +} + +// NewContext is a convenience function that returns a new Context +// object with non-nil maps +func NewContext() *Context { + return &Context{Extensions: make(map[string]runtime.Object)} +} + +// NewCluster is a convenience function that returns a new Cluster +// object with non-nil maps +func NewCluster() *Cluster { + return &Cluster{Extensions: make(map[string]runtime.Object)} +} + +// NewAuthInfo is a convenience function that returns a new AuthInfo +// object with non-nil maps +func NewAuthInfo() *AuthInfo { + return &AuthInfo{ + Extensions: make(map[string]runtime.Object), + ImpersonateUserExtra: make(map[string][]string), + } +} + +// NewPreferences is a convenience function that returns a new +// Preferences object with non-nil maps +func NewPreferences() *Preferences { + return &Preferences{Extensions: make(map[string]runtime.Object)} +} diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go new file mode 100644 index 0000000..3240a7a --- /dev/null +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/zz_generated.deepcopy.go @@ -0,0 +1,324 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package api + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthInfo) DeepCopyInto(out *AuthInfo) { + *out = *in + if in.ClientCertificateData != nil { + in, out := &in.ClientCertificateData, &out.ClientCertificateData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ClientKeyData != nil { + in, out := &in.ClientKeyData, &out.ClientKeyData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.ImpersonateGroups != nil { + in, out := &in.ImpersonateGroups, &out.ImpersonateGroups + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.ImpersonateUserExtra != nil { + in, out := &in.ImpersonateUserExtra, &out.ImpersonateUserExtra + *out = make(map[string][]string, len(*in)) + for key, val := range *in { + var outVal []string + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = make([]string, len(*in)) + copy(*out, *in) + } + (*out)[key] = outVal + } + } + if in.AuthProvider != nil { + in, out := &in.AuthProvider, &out.AuthProvider + *out = new(AuthProviderConfig) + (*in).DeepCopyInto(*out) + } + if in.Exec != nil { + in, out := &in.Exec, &out.Exec + *out = new(ExecConfig) + (*in).DeepCopyInto(*out) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthInfo. +func (in *AuthInfo) DeepCopy() *AuthInfo { + if in == nil { + return nil + } + out := new(AuthInfo) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *AuthProviderConfig) DeepCopyInto(out *AuthProviderConfig) { + *out = *in + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = make(map[string]string, len(*in)) + for key, val := range *in { + (*out)[key] = val + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuthProviderConfig. +func (in *AuthProviderConfig) DeepCopy() *AuthProviderConfig { + if in == nil { + return nil + } + out := new(AuthProviderConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Cluster) DeepCopyInto(out *Cluster) { + *out = *in + if in.CertificateAuthorityData != nil { + in, out := &in.CertificateAuthorityData, &out.CertificateAuthorityData + *out = make([]byte, len(*in)) + copy(*out, *in) + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Cluster. +func (in *Cluster) DeepCopy() *Cluster { + if in == nil { + return nil + } + out := new(Cluster) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Config) DeepCopyInto(out *Config) { + *out = *in + in.Preferences.DeepCopyInto(&out.Preferences) + if in.Clusters != nil { + in, out := &in.Clusters, &out.Clusters + *out = make(map[string]*Cluster, len(*in)) + for key, val := range *in { + var outVal *Cluster + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(Cluster) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.AuthInfos != nil { + in, out := &in.AuthInfos, &out.AuthInfos + *out = make(map[string]*AuthInfo, len(*in)) + for key, val := range *in { + var outVal *AuthInfo + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(AuthInfo) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.Contexts != nil { + in, out := &in.Contexts, &out.Contexts + *out = make(map[string]*Context, len(*in)) + for key, val := range *in { + var outVal *Context + if val == nil { + (*out)[key] = nil + } else { + in, out := &val, &outVal + *out = new(Context) + (*in).DeepCopyInto(*out) + } + (*out)[key] = outVal + } + } + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Config. +func (in *Config) DeepCopy() *Config { + if in == nil { + return nil + } + out := new(Config) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Config) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Context) DeepCopyInto(out *Context) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Context. +func (in *Context) DeepCopy() *Context { + if in == nil { + return nil + } + out := new(Context) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecConfig) DeepCopyInto(out *ExecConfig) { + *out = *in + if in.Args != nil { + in, out := &in.Args, &out.Args + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Env != nil { + in, out := &in.Env, &out.Env + *out = make([]ExecEnvVar, len(*in)) + copy(*out, *in) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecConfig. +func (in *ExecConfig) DeepCopy() *ExecConfig { + if in == nil { + return nil + } + out := new(ExecConfig) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ExecEnvVar) DeepCopyInto(out *ExecEnvVar) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecEnvVar. +func (in *ExecEnvVar) DeepCopy() *ExecEnvVar { + if in == nil { + return nil + } + out := new(ExecEnvVar) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Preferences) DeepCopyInto(out *Preferences) { + *out = *in + if in.Extensions != nil { + in, out := &in.Extensions, &out.Extensions + *out = make(map[string]runtime.Object, len(*in)) + for key, val := range *in { + if val == nil { + (*out)[key] = nil + } else { + (*out)[key] = val.DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preferences. +func (in *Preferences) DeepCopy() *Preferences { + if in == nil { + return nil + } + out := new(Preferences) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS new file mode 100644 index 0000000..f150be5 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- wojtek-t +- eparis +- krousey +- jayunit100 +- fgrzadkowski +- tmrts diff --git a/vendor/k8s.io/client-go/tools/metrics/metrics.go b/vendor/k8s.io/client-go/tools/metrics/metrics.go new file mode 100644 index 0000000..a01306c --- /dev/null +++ b/vendor/k8s.io/client-go/tools/metrics/metrics.go @@ -0,0 +1,61 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package metrics provides abstractions for registering which metrics +// to record. +package metrics + +import ( + "net/url" + "sync" + "time" +) + +var registerMetrics sync.Once + +// LatencyMetric observes client latency partitioned by verb and url. +type LatencyMetric interface { + Observe(verb string, u url.URL, latency time.Duration) +} + +// ResultMetric counts response codes partitioned by method and host. +type ResultMetric interface { + Increment(code string, method string, host string) +} + +var ( + // RequestLatency is the latency metric that rest clients will update. + RequestLatency LatencyMetric = noopLatency{} + // RequestResult is the result metric that rest clients will update. + RequestResult ResultMetric = noopResult{} +) + +// Register registers metrics for the rest client to use. This can +// only be called once. +func Register(lm LatencyMetric, rm ResultMetric) { + registerMetrics.Do(func() { + RequestLatency = lm + RequestResult = rm + }) +} + +type noopLatency struct{} + +func (noopLatency) Observe(string, url.URL, time.Duration) {} + +type noopResult struct{} + +func (noopResult) Increment(string, string, string) {} diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go new file mode 100644 index 0000000..573d948 --- /dev/null +++ b/vendor/k8s.io/client-go/tools/reference/ref.go @@ -0,0 +1,126 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package reference + +import ( + "errors" + "fmt" + "net/url" + "strings" + + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +var ( + // Errors that could be returned by GetReference. + ErrNilObject = errors.New("can't reference a nil object") + ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") +) + +// GetReference returns an ObjectReference which refers to the given +// object, or an error if the object doesn't follow the conventions +// that would allow this. +// TODO: should take a meta.Interface see http://issue.k8s.io/7127 +func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { + if obj == nil { + return nil, ErrNilObject + } + if ref, ok := obj.(*v1.ObjectReference); ok { + // Don't make a reference to a reference. + return ref, nil + } + + gvk := obj.GetObjectKind().GroupVersionKind() + + // if the object referenced is actually persisted, we can just get kind from meta + // if we are building an object reference to something not yet persisted, we should fallback to scheme + kind := gvk.Kind + if len(kind) == 0 { + // TODO: this is wrong + gvks, _, err := scheme.ObjectKinds(obj) + if err != nil { + return nil, err + } + kind = gvks[0].Kind + } + + // An object that implements only List has enough metadata to build a reference + var listMeta metav1.Common + objectMeta, err := meta.Accessor(obj) + if err != nil { + listMeta, err = meta.CommonAccessor(obj) + if err != nil { + return nil, err + } + } else { + listMeta = objectMeta + } + + // if the object referenced is actually persisted, we can also get version from meta + version := gvk.GroupVersion().String() + if len(version) == 0 { + selfLink := listMeta.GetSelfLink() + if len(selfLink) == 0 { + return nil, ErrNoSelfLink + } + selfLinkUrl, err := url.Parse(selfLink) + if err != nil { + return nil, err + } + // example paths: ///* + parts := strings.Split(selfLinkUrl.Path, "/") + if len(parts) < 4 { + return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) + } + if parts[1] == "api" { + version = parts[2] + } else { + version = parts[2] + "/" + parts[3] + } + } + + // only has list metadata + if objectMeta == nil { + return &v1.ObjectReference{ + Kind: kind, + APIVersion: version, + ResourceVersion: listMeta.GetResourceVersion(), + }, nil + } + + return &v1.ObjectReference{ + Kind: kind, + APIVersion: version, + Name: objectMeta.GetName(), + Namespace: objectMeta.GetNamespace(), + UID: objectMeta.GetUID(), + ResourceVersion: objectMeta.GetResourceVersion(), + }, nil +} + +// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. +func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) { + ref, err := GetReference(scheme, obj) + if err != nil { + return nil, err + } + ref.FieldPath = fieldPath + return ref, nil +} diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS new file mode 100644 index 0000000..a521769 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +reviewers: +- smarterclayton +- wojtek-t +- deads2k +- liggitt +- krousey +- caesarxuchao diff --git a/vendor/k8s.io/client-go/transport/cache.go b/vendor/k8s.io/client-go/transport/cache.go new file mode 100644 index 0000000..7cffe2a --- /dev/null +++ b/vendor/k8s.io/client-go/transport/cache.go @@ -0,0 +1,117 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net" + "net/http" + "sync" + "time" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +// TlsTransportCache caches TLS http.RoundTrippers different configurations. The +// same RoundTripper will be returned for configs with identical TLS options If +// the config has no custom TLS options, http.DefaultTransport is returned. +type tlsTransportCache struct { + mu sync.Mutex + transports map[tlsCacheKey]*http.Transport +} + +const idleConnsPerHost = 25 + +var tlsCache = &tlsTransportCache{transports: make(map[tlsCacheKey]*http.Transport)} + +type tlsCacheKey struct { + insecure bool + caData string + certData string + keyData string + getCert string + serverName string + dial string +} + +func (t tlsCacheKey) String() string { + keyText := "" + if len(t.keyData) > 0 { + keyText = "" + } + return fmt.Sprintf("insecure:%v, caData:%#v, certData:%#v, keyData:%s, getCert: %s, serverName:%s, dial:%s", t.insecure, t.caData, t.certData, keyText, t.getCert, t.serverName, t.dial) +} + +func (c *tlsTransportCache) get(config *Config) (http.RoundTripper, error) { + key, err := tlsConfigKey(config) + if err != nil { + return nil, err + } + + // Ensure we only create a single transport for the given TLS options + c.mu.Lock() + defer c.mu.Unlock() + + // See if we already have a custom transport for this config + if t, ok := c.transports[key]; ok { + return t, nil + } + + // Get the TLS options for this client config + tlsConfig, err := TLSConfigFor(config) + if err != nil { + return nil, err + } + // The options didn't require a custom TLS config + if tlsConfig == nil && config.Dial == nil { + return http.DefaultTransport, nil + } + + dial := config.Dial + if dial == nil { + dial = (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + }).DialContext + } + // Cache a single transport for these options + c.transports[key] = utilnet.SetTransportDefaults(&http.Transport{ + Proxy: http.ProxyFromEnvironment, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + MaxIdleConnsPerHost: idleConnsPerHost, + DialContext: dial, + }) + return c.transports[key], nil +} + +// tlsConfigKey returns a unique key for tls.Config objects returned from TLSConfigFor +func tlsConfigKey(c *Config) (tlsCacheKey, error) { + // Make sure ca/key/cert content is loaded + if err := loadTLSFiles(c); err != nil { + return tlsCacheKey{}, err + } + return tlsCacheKey{ + insecure: c.TLS.Insecure, + caData: string(c.TLS.CAData), + certData: string(c.TLS.CertData), + keyData: string(c.TLS.KeyData), + getCert: fmt.Sprintf("%p", c.TLS.GetCert), + serverName: c.TLS.ServerName, + dial: fmt.Sprintf("%p", c.Dial), + }, nil +} diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go new file mode 100644 index 0000000..5de0a2c --- /dev/null +++ b/vendor/k8s.io/client-go/transport/config.go @@ -0,0 +1,126 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "context" + "crypto/tls" + "net" + "net/http" +) + +// Config holds various options for establishing a transport. +type Config struct { + // UserAgent is an optional field that specifies the caller of this + // request. + UserAgent string + + // The base TLS configuration for this transport. + TLS TLSConfig + + // Username and password for basic authentication + Username string + Password string + + // Bearer token for authentication + BearerToken string + + // Path to a file containing a BearerToken. + // If set, the contents are periodically read. + // The last successfully read value takes precedence over BearerToken. + BearerTokenFile string + + // Impersonate is the config that this Config will impersonate using + Impersonate ImpersonationConfig + + // Transport may be used for custom HTTP behavior. This attribute may + // not be specified with the TLS client certificate options. Use + // WrapTransport for most client level operations. + Transport http.RoundTripper + + // WrapTransport will be invoked for custom HTTP behavior after the + // underlying transport is initialized (either the transport created + // from TLSClientConfig, Transport, or http.DefaultTransport). The + // config may layer other RoundTrippers on top of the returned + // RoundTripper. + // + // A future release will change this field to an array. Use config.Wrap() + // instead of setting this value directly. + WrapTransport WrapperFunc + + // Dial specifies the dial function for creating unencrypted TCP connections. + Dial func(ctx context.Context, network, address string) (net.Conn, error) +} + +// ImpersonationConfig has all the available impersonation options +type ImpersonationConfig struct { + // UserName matches user.Info.GetName() + UserName string + // Groups matches user.Info.GetGroups() + Groups []string + // Extra matches user.Info.GetExtra() + Extra map[string][]string +} + +// HasCA returns whether the configuration has a certificate authority or not. +func (c *Config) HasCA() bool { + return len(c.TLS.CAData) > 0 || len(c.TLS.CAFile) > 0 +} + +// HasBasicAuth returns whether the configuration has basic authentication or not. +func (c *Config) HasBasicAuth() bool { + return len(c.Username) != 0 +} + +// HasTokenAuth returns whether the configuration has token authentication or not. +func (c *Config) HasTokenAuth() bool { + return len(c.BearerToken) != 0 || len(c.BearerTokenFile) != 0 +} + +// HasCertAuth returns whether the configuration has certificate authentication or not. +func (c *Config) HasCertAuth() bool { + return (len(c.TLS.CertData) != 0 || len(c.TLS.CertFile) != 0) && (len(c.TLS.KeyData) != 0 || len(c.TLS.KeyFile) != 0) +} + +// HasCertCallbacks returns whether the configuration has certificate callback or not. +func (c *Config) HasCertCallback() bool { + return c.TLS.GetCert != nil +} + +// Wrap adds a transport middleware function that will give the caller +// an opportunity to wrap the underlying http.RoundTripper prior to the +// first API call being made. The provided function is invoked after any +// existing transport wrappers are invoked. +func (c *Config) Wrap(fn WrapperFunc) { + c.WrapTransport = Wrappers(c.WrapTransport, fn) +} + +// TLSConfig holds the information needed to set up a TLS transport. +type TLSConfig struct { + CAFile string // Path of the PEM-encoded server trusted root certificates. + CertFile string // Path of the PEM-encoded client certificate. + KeyFile string // Path of the PEM-encoded client key. + + Insecure bool // Server should be accessed without verifying the certificate. For testing only. + ServerName string // Override for the server name passed to the server for SNI and used to verify certificates. + + CAData []byte // Bytes of the PEM-encoded server trusted root certificates. Supercedes CAFile. + CertData []byte // Bytes of the PEM-encoded client certificate. Supercedes CertFile. + KeyData []byte // Bytes of the PEM-encoded client key. Supercedes KeyFile. + + GetCert func() (*tls.Certificate, error) // Callback that returns a TLS client certificate. CertData, CertFile, KeyData and KeyFile supercede this field. +} diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go new file mode 100644 index 0000000..117a9c8 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -0,0 +1,564 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "net/http" + "strings" + "time" + + "golang.org/x/oauth2" + "k8s.io/klog" + + utilnet "k8s.io/apimachinery/pkg/util/net" +) + +// HTTPWrappersForConfig wraps a round tripper with any relevant layered +// behavior from the config. Exposed to allow more clients that need HTTP-like +// behavior but then must hijack the underlying connection (like WebSocket or +// HTTP2 clients). Pure HTTP clients should use the RoundTripper returned from +// New. +func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTripper, error) { + if config.WrapTransport != nil { + rt = config.WrapTransport(rt) + } + + rt = DebugWrappers(rt) + + // Set authentication wrappers + switch { + case config.HasBasicAuth() && config.HasTokenAuth(): + return nil, fmt.Errorf("username/password or bearer token may be set, but not both") + case config.HasTokenAuth(): + var err error + rt, err = NewBearerAuthWithRefreshRoundTripper(config.BearerToken, config.BearerTokenFile, rt) + if err != nil { + return nil, err + } + case config.HasBasicAuth(): + rt = NewBasicAuthRoundTripper(config.Username, config.Password, rt) + } + if len(config.UserAgent) > 0 { + rt = NewUserAgentRoundTripper(config.UserAgent, rt) + } + if len(config.Impersonate.UserName) > 0 || + len(config.Impersonate.Groups) > 0 || + len(config.Impersonate.Extra) > 0 { + rt = NewImpersonatingRoundTripper(config.Impersonate, rt) + } + return rt, nil +} + +// DebugWrappers wraps a round tripper and logs based on the current log level. +func DebugWrappers(rt http.RoundTripper) http.RoundTripper { + switch { + case bool(klog.V(9)): + rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) + case bool(klog.V(8)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) + case bool(klog.V(7)): + rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) + case bool(klog.V(6)): + rt = newDebuggingRoundTripper(rt, debugURLTiming) + } + + return rt +} + +type requestCanceler interface { + CancelRequest(*http.Request) +} + +type authProxyRoundTripper struct { + username string + groups []string + extra map[string][]string + + rt http.RoundTripper +} + +// NewAuthProxyRoundTripper provides a roundtripper which will add auth proxy fields to requests for +// authentication terminating proxy cases +// assuming you pull the user from the context: +// username is the user.Info.GetName() of the user +// groups is the user.Info.GetGroups() of the user +// extra is the user.Info.GetExtra() of the user +// extra can contain any additional information that the authenticator +// thought was interesting, for example authorization scopes. +// In order to faithfully round-trip through an impersonation flow, these keys +// MUST be lowercase. +func NewAuthProxyRoundTripper(username string, groups []string, extra map[string][]string, rt http.RoundTripper) http.RoundTripper { + return &authProxyRoundTripper{ + username: username, + groups: groups, + extra: extra, + rt: rt, + } +} + +func (rt *authProxyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + req = utilnet.CloneRequest(req) + SetAuthProxyHeaders(req, rt.username, rt.groups, rt.extra) + + return rt.rt.RoundTrip(req) +} + +// SetAuthProxyHeaders stomps the auth proxy header fields. It mutates its argument. +func SetAuthProxyHeaders(req *http.Request, username string, groups []string, extra map[string][]string) { + req.Header.Del("X-Remote-User") + req.Header.Del("X-Remote-Group") + for key := range req.Header { + if strings.HasPrefix(strings.ToLower(key), strings.ToLower("X-Remote-Extra-")) { + req.Header.Del(key) + } + } + + req.Header.Set("X-Remote-User", username) + for _, group := range groups { + req.Header.Add("X-Remote-Group", group) + } + for key, values := range extra { + for _, value := range values { + req.Header.Add("X-Remote-Extra-"+headerKeyEscape(key), value) + } + } +} + +func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.rt) + } +} + +func (rt *authProxyRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type userAgentRoundTripper struct { + agent string + rt http.RoundTripper +} + +func NewUserAgentRoundTripper(agent string, rt http.RoundTripper) http.RoundTripper { + return &userAgentRoundTripper{agent, rt} +} + +func (rt *userAgentRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("User-Agent")) != 0 { + return rt.rt.RoundTrip(req) + } + req = utilnet.CloneRequest(req) + req.Header.Set("User-Agent", rt.agent) + return rt.rt.RoundTrip(req) +} + +func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.rt) + } +} + +func (rt *userAgentRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +type basicAuthRoundTripper struct { + username string + password string + rt http.RoundTripper +} + +// NewBasicAuthRoundTripper will apply a BASIC auth authorization header to a +// request unless it has already been set. +func NewBasicAuthRoundTripper(username, password string, rt http.RoundTripper) http.RoundTripper { + return &basicAuthRoundTripper{username, password, rt} +} + +func (rt *basicAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + req = utilnet.CloneRequest(req) + req.SetBasicAuth(rt.username, rt.password) + return rt.rt.RoundTrip(req) +} + +func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.rt) + } +} + +func (rt *basicAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// These correspond to the headers used in pkg/apis/authentication. We don't want the package dependency, +// but you must not change the values. +const ( + // ImpersonateUserHeader is used to impersonate a particular user during an API server request + ImpersonateUserHeader = "Impersonate-User" + + // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. + // It can be repeated multiplied times for multiple groups. + ImpersonateGroupHeader = "Impersonate-Group" + + // ImpersonateUserExtraHeaderPrefix is a prefix for a header used to impersonate an entry in the + // extra map[string][]string for user.Info. The key for the `extra` map is suffix. + // The same key can be repeated multiple times to have multiple elements in the slice under a single key. + // For instance: + // Impersonate-Extra-Foo: one + // Impersonate-Extra-Foo: two + // results in extra["Foo"] = []string{"one", "two"} + ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" +) + +type impersonatingRoundTripper struct { + impersonate ImpersonationConfig + delegate http.RoundTripper +} + +// NewImpersonatingRoundTripper will add an Act-As header to a request unless it has already been set. +func NewImpersonatingRoundTripper(impersonate ImpersonationConfig, delegate http.RoundTripper) http.RoundTripper { + return &impersonatingRoundTripper{impersonate, delegate} +} + +func (rt *impersonatingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + // use the user header as marker for the rest. + if len(req.Header.Get(ImpersonateUserHeader)) != 0 { + return rt.delegate.RoundTrip(req) + } + req = utilnet.CloneRequest(req) + req.Header.Set(ImpersonateUserHeader, rt.impersonate.UserName) + + for _, group := range rt.impersonate.Groups { + req.Header.Add(ImpersonateGroupHeader, group) + } + for k, vv := range rt.impersonate.Extra { + for _, v := range vv { + req.Header.Add(ImpersonateUserExtraHeaderPrefix+headerKeyEscape(k), v) + } + } + + return rt.delegate.RoundTrip(req) +} + +func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegate.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.delegate) + } +} + +func (rt *impersonatingRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.delegate } + +type bearerAuthRoundTripper struct { + bearer string + source oauth2.TokenSource + rt http.RoundTripper +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +func NewBearerAuthRoundTripper(bearer string, rt http.RoundTripper) http.RoundTripper { + return &bearerAuthRoundTripper{bearer, nil, rt} +} + +// NewBearerAuthRoundTripper adds the provided bearer token to a request +// unless the authorization header has already been set. +// If tokenFile is non-empty, it is periodically read, +// and the last successfully read content is used as the bearer token. +// If tokenFile is non-empty and bearer is empty, the tokenFile is read +// immediately to populate the initial bearer token. +func NewBearerAuthWithRefreshRoundTripper(bearer string, tokenFile string, rt http.RoundTripper) (http.RoundTripper, error) { + if len(tokenFile) == 0 { + return &bearerAuthRoundTripper{bearer, nil, rt}, nil + } + source := NewCachedFileTokenSource(tokenFile) + if len(bearer) == 0 { + token, err := source.Token() + if err != nil { + return nil, err + } + bearer = token.AccessToken + } + return &bearerAuthRoundTripper{bearer, source, rt}, nil +} + +func (rt *bearerAuthRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + if len(req.Header.Get("Authorization")) != 0 { + return rt.rt.RoundTrip(req) + } + + req = utilnet.CloneRequest(req) + token := rt.bearer + if rt.source != nil { + if refreshedToken, err := rt.source.Token(); err == nil { + token = refreshedToken.AccessToken + } + } + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + return rt.rt.RoundTrip(req) +} + +func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.rt.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.rt) + } +} + +func (rt *bearerAuthRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt } + +// requestInfo keeps track of information about a request/response combination +type requestInfo struct { + RequestHeaders http.Header + RequestVerb string + RequestURL string + + ResponseStatus string + ResponseHeaders http.Header + ResponseErr error + + Duration time.Duration +} + +// newRequestInfo creates a new RequestInfo based on an http request +func newRequestInfo(req *http.Request) *requestInfo { + return &requestInfo{ + RequestURL: req.URL.String(), + RequestVerb: req.Method, + RequestHeaders: req.Header, + } +} + +// complete adds information about the response to the requestInfo +func (r *requestInfo) complete(response *http.Response, err error) { + if err != nil { + r.ResponseErr = err + return + } + r.ResponseStatus = response.Status + r.ResponseHeaders = response.Header +} + +// toCurl returns a string that can be run as a command in a terminal (minus the body) +func (r *requestInfo) toCurl() string { + headers := "" + for key, values := range r.RequestHeaders { + for _, value := range values { + headers += fmt.Sprintf(` -H %q`, fmt.Sprintf("%s: %s", key, value)) + } + } + + return fmt.Sprintf("curl -k -v -X%s %s '%s'", r.RequestVerb, headers, r.RequestURL) +} + +// debuggingRoundTripper will display information about the requests passing +// through it based on what is configured +type debuggingRoundTripper struct { + delegatedRoundTripper http.RoundTripper + + levels map[debugLevel]bool +} + +type debugLevel int + +const ( + debugJustURL debugLevel = iota + debugURLTiming + debugCurlCommand + debugRequestHeaders + debugResponseStatus + debugResponseHeaders +) + +func newDebuggingRoundTripper(rt http.RoundTripper, levels ...debugLevel) *debuggingRoundTripper { + drt := &debuggingRoundTripper{ + delegatedRoundTripper: rt, + levels: make(map[debugLevel]bool, len(levels)), + } + for _, v := range levels { + drt.levels[v] = true + } + return drt +} + +func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { + if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { + canceler.CancelRequest(req) + } else { + klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + } +} + +func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + reqInfo := newRequestInfo(req) + + if rt.levels[debugJustURL] { + klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + } + if rt.levels[debugCurlCommand] { + klog.Infof("%s", reqInfo.toCurl()) + + } + if rt.levels[debugRequestHeaders] { + klog.Infof("Request Headers:") + for key, values := range reqInfo.RequestHeaders { + for _, value := range values { + klog.Infof(" %s: %s", key, value) + } + } + } + + startTime := time.Now() + response, err := rt.delegatedRoundTripper.RoundTrip(req) + reqInfo.Duration = time.Since(startTime) + + reqInfo.complete(response, err) + + if rt.levels[debugURLTiming] { + klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseStatus] { + klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + } + if rt.levels[debugResponseHeaders] { + klog.Infof("Response Headers:") + for key, values := range reqInfo.ResponseHeaders { + for _, value := range values { + klog.Infof(" %s: %s", key, value) + } + } + } + + return response, err +} + +func (rt *debuggingRoundTripper) WrappedRoundTripper() http.RoundTripper { + return rt.delegatedRoundTripper +} + +func legalHeaderByte(b byte) bool { + return int(b) < len(legalHeaderKeyBytes) && legalHeaderKeyBytes[b] +} + +func shouldEscape(b byte) bool { + // url.PathUnescape() returns an error if any '%' is not followed by two + // hexadecimal digits, so we'll intentionally encode it. + return !legalHeaderByte(b) || b == '%' +} + +func headerKeyEscape(key string) string { + buf := strings.Builder{} + for i := 0; i < len(key); i++ { + b := key[i] + if shouldEscape(b) { + // %-encode bytes that should be escaped: + // https://tools.ietf.org/html/rfc3986#section-2.1 + fmt.Fprintf(&buf, "%%%02X", b) + continue + } + buf.WriteByte(b) + } + return buf.String() +} + +// legalHeaderKeyBytes was copied from net/http/lex.go's isTokenTable. +// See https://httpwg.github.io/specs/rfc7230.html#rule.token.separators +var legalHeaderKeyBytes = [127]bool{ + '%': true, + '!': true, + '#': true, + '$': true, + '&': true, + '\'': true, + '*': true, + '+': true, + '-': true, + '.': true, + '0': true, + '1': true, + '2': true, + '3': true, + '4': true, + '5': true, + '6': true, + '7': true, + '8': true, + '9': true, + 'A': true, + 'B': true, + 'C': true, + 'D': true, + 'E': true, + 'F': true, + 'G': true, + 'H': true, + 'I': true, + 'J': true, + 'K': true, + 'L': true, + 'M': true, + 'N': true, + 'O': true, + 'P': true, + 'Q': true, + 'R': true, + 'S': true, + 'T': true, + 'U': true, + 'W': true, + 'V': true, + 'X': true, + 'Y': true, + 'Z': true, + '^': true, + '_': true, + '`': true, + 'a': true, + 'b': true, + 'c': true, + 'd': true, + 'e': true, + 'f': true, + 'g': true, + 'h': true, + 'i': true, + 'j': true, + 'k': true, + 'l': true, + 'm': true, + 'n': true, + 'o': true, + 'p': true, + 'q': true, + 'r': true, + 's': true, + 't': true, + 'u': true, + 'v': true, + 'w': true, + 'x': true, + 'y': true, + 'z': true, + '|': true, + '~': true, +} diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go new file mode 100644 index 0000000..8595df2 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -0,0 +1,140 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "fmt" + "io/ioutil" + "net/http" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + "k8s.io/klog" +) + +// TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens +// authentication from an oauth2.TokenSource. +func TokenSourceWrapTransport(ts oauth2.TokenSource) func(http.RoundTripper) http.RoundTripper { + return func(rt http.RoundTripper) http.RoundTripper { + return &tokenSourceTransport{ + base: rt, + ort: &oauth2.Transport{ + Source: ts, + Base: rt, + }, + } + } +} + +// NewCachedFileTokenSource returns a oauth2.TokenSource reads a token from a +// file at a specified path and periodically reloads it. +func NewCachedFileTokenSource(path string) oauth2.TokenSource { + return &cachingTokenSource{ + now: time.Now, + leeway: 10 * time.Second, + base: &fileTokenSource{ + path: path, + // This period was picked because it is half of the duration between when the kubelet + // refreshes a projected service account token and when the original token expires. + // Default token lifetime is 10 minutes, and the kubelet starts refreshing at 80% of lifetime. + // This should induce re-reading at a frequency that works with the token volume source. + period: time.Minute, + }, + } +} + +type tokenSourceTransport struct { + base http.RoundTripper + ort http.RoundTripper +} + +func (tst *tokenSourceTransport) RoundTrip(req *http.Request) (*http.Response, error) { + // This is to allow --token to override other bearer token providers. + if req.Header.Get("Authorization") != "" { + return tst.base.RoundTrip(req) + } + return tst.ort.RoundTrip(req) +} + +type fileTokenSource struct { + path string + period time.Duration +} + +var _ = oauth2.TokenSource(&fileTokenSource{}) + +func (ts *fileTokenSource) Token() (*oauth2.Token, error) { + tokb, err := ioutil.ReadFile(ts.path) + if err != nil { + return nil, fmt.Errorf("failed to read token file %q: %v", ts.path, err) + } + tok := strings.TrimSpace(string(tokb)) + if len(tok) == 0 { + return nil, fmt.Errorf("read empty token from file %q", ts.path) + } + + return &oauth2.Token{ + AccessToken: tok, + Expiry: time.Now().Add(ts.period), + }, nil +} + +type cachingTokenSource struct { + base oauth2.TokenSource + leeway time.Duration + + sync.RWMutex + tok *oauth2.Token + + // for testing + now func() time.Time +} + +var _ = oauth2.TokenSource(&cachingTokenSource{}) + +func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { + now := ts.now() + // fast path + ts.RLock() + tok := ts.tok + ts.RUnlock() + + if tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) { + return tok, nil + } + + // slow path + ts.Lock() + defer ts.Unlock() + if tok := ts.tok; tok != nil && tok.Expiry.Add(-1*ts.leeway).After(now) { + return tok, nil + } + + tok, err := ts.base.Token() + if err != nil { + if ts.tok == nil { + return nil, err + } + klog.Errorf("Unable to rotate token: %v", err) + return ts.tok, nil + } + + ts.tok = tok + return tok, nil +} diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go new file mode 100644 index 0000000..2a145c9 --- /dev/null +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -0,0 +1,227 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package transport + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io/ioutil" + "net/http" +) + +// New returns an http.RoundTripper that will provide the authentication +// or transport level security defined by the provided Config. +func New(config *Config) (http.RoundTripper, error) { + // Set transport level security + if config.Transport != nil && (config.HasCA() || config.HasCertAuth() || config.HasCertCallback() || config.TLS.Insecure) { + return nil, fmt.Errorf("using a custom transport with TLS certificate options or the insecure flag is not allowed") + } + + var ( + rt http.RoundTripper + err error + ) + + if config.Transport != nil { + rt = config.Transport + } else { + rt, err = tlsCache.get(config) + if err != nil { + return nil, err + } + } + + return HTTPWrappersForConfig(config, rt) +} + +// TLSConfigFor returns a tls.Config that will provide the transport level security defined +// by the provided Config. Will return nil if no transport level security is requested. +func TLSConfigFor(c *Config) (*tls.Config, error) { + if !(c.HasCA() || c.HasCertAuth() || c.HasCertCallback() || c.TLS.Insecure || len(c.TLS.ServerName) > 0) { + return nil, nil + } + if c.HasCA() && c.TLS.Insecure { + return nil, fmt.Errorf("specifying a root certificates file with the insecure flag is not allowed") + } + if err := loadTLSFiles(c); err != nil { + return nil, err + } + + tlsConfig := &tls.Config{ + // Can't use SSLv3 because of POODLE and BEAST + // Can't use TLSv1.0 because of POODLE and BEAST using CBC cipher + // Can't use TLSv1.1 because of RC4 cipher usage + MinVersion: tls.VersionTLS12, + InsecureSkipVerify: c.TLS.Insecure, + ServerName: c.TLS.ServerName, + } + + if c.HasCA() { + tlsConfig.RootCAs = rootCertPool(c.TLS.CAData) + } + + var staticCert *tls.Certificate + if c.HasCertAuth() { + // If key/cert were provided, verify them before setting up + // tlsConfig.GetClientCertificate. + cert, err := tls.X509KeyPair(c.TLS.CertData, c.TLS.KeyData) + if err != nil { + return nil, err + } + staticCert = &cert + } + + if c.HasCertAuth() || c.HasCertCallback() { + tlsConfig.GetClientCertificate = func(*tls.CertificateRequestInfo) (*tls.Certificate, error) { + // Note: static key/cert data always take precedence over cert + // callback. + if staticCert != nil { + return staticCert, nil + } + if c.HasCertCallback() { + cert, err := c.TLS.GetCert() + if err != nil { + return nil, err + } + // GetCert may return empty value, meaning no cert. + if cert != nil { + return cert, nil + } + } + + // Both c.TLS.CertData/KeyData were unset and GetCert didn't return + // anything. Return an empty tls.Certificate, no client cert will + // be sent to the server. + return &tls.Certificate{}, nil + } + } + + return tlsConfig, nil +} + +// loadTLSFiles copies the data from the CertFile, KeyFile, and CAFile fields into the CertData, +// KeyData, and CAFile fields, or returns an error. If no error is returned, all three fields are +// either populated or were empty to start. +func loadTLSFiles(c *Config) error { + var err error + c.TLS.CAData, err = dataFromSliceOrFile(c.TLS.CAData, c.TLS.CAFile) + if err != nil { + return err + } + + c.TLS.CertData, err = dataFromSliceOrFile(c.TLS.CertData, c.TLS.CertFile) + if err != nil { + return err + } + + c.TLS.KeyData, err = dataFromSliceOrFile(c.TLS.KeyData, c.TLS.KeyFile) + if err != nil { + return err + } + return nil +} + +// dataFromSliceOrFile returns data from the slice (if non-empty), or from the file, +// or an error if an error occurred reading the file +func dataFromSliceOrFile(data []byte, file string) ([]byte, error) { + if len(data) > 0 { + return data, nil + } + if len(file) > 0 { + fileData, err := ioutil.ReadFile(file) + if err != nil { + return []byte{}, err + } + return fileData, nil + } + return nil, nil +} + +// rootCertPool returns nil if caData is empty. When passed along, this will mean "use system CAs". +// When caData is not empty, it will be the ONLY information used in the CertPool. +func rootCertPool(caData []byte) *x509.CertPool { + // What we really want is a copy of x509.systemRootsPool, but that isn't exposed. It's difficult to build (see the go + // code for a look at the platform specific insanity), so we'll use the fact that RootCAs == nil gives us the system values + // It doesn't allow trusting either/or, but hopefully that won't be an issue + if len(caData) == 0 { + return nil + } + + // if we have caData, use it + certPool := x509.NewCertPool() + certPool.AppendCertsFromPEM(caData) + return certPool +} + +// WrapperFunc wraps an http.RoundTripper when a new transport +// is created for a client, allowing per connection behavior +// to be injected. +type WrapperFunc func(rt http.RoundTripper) http.RoundTripper + +// Wrappers accepts any number of wrappers and returns a wrapper +// function that is the equivalent of calling each of them in order. Nil +// values are ignored, which makes this function convenient for incrementally +// wrapping a function. +func Wrappers(fns ...WrapperFunc) WrapperFunc { + if len(fns) == 0 { + return nil + } + // optimize the common case of wrapping a possibly nil transport wrapper + // with an additional wrapper + if len(fns) == 2 && fns[0] == nil { + return fns[1] + } + return func(rt http.RoundTripper) http.RoundTripper { + base := rt + for _, fn := range fns { + if fn != nil { + base = fn(base) + } + } + return base + } +} + +// ContextCanceller prevents new requests after the provided context is finished. +// err is returned when the context is closed, allowing the caller to provide a context +// appropriate error. +func ContextCanceller(ctx context.Context, err error) WrapperFunc { + return func(rt http.RoundTripper) http.RoundTripper { + return &contextCanceller{ + ctx: ctx, + rt: rt, + err: err, + } + } +} + +type contextCanceller struct { + ctx context.Context + rt http.RoundTripper + err error +} + +func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) { + select { + case <-b.ctx.Done(): + return nil, b.err + default: + return b.rt.RoundTrip(req) + } +} diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS new file mode 100644 index 0000000..3cf0364 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/OWNERS @@ -0,0 +1,9 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go new file mode 100644 index 0000000..9fd097a --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -0,0 +1,206 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "bytes" + "crypto" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "fmt" + "io/ioutil" + "math/big" + "net" + "path" + "strings" + "time" + + "k8s.io/client-go/util/keyutil" +) + +const duration365d = time.Hour * 24 * 365 + +// Config contains the basic fields required for creating a certificate +type Config struct { + CommonName string + Organization []string + AltNames AltNames + Usages []x509.ExtKeyUsage +} + +// AltNames contains the domain names and IP addresses that will be added +// to the API Server's x509 certificate SubAltNames field. The values will +// be passed directly to the x509.Certificate object. +type AltNames struct { + DNSNames []string + IPs []net.IP +} + +// NewSelfSignedCACert creates a CA certificate +func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { + now := time.Now() + tmpl := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(0), + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + NotBefore: now.UTC(), + NotAfter: now.Add(duration365d * 10).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. +// Host may be an IP or a DNS name +// You may also specify additional subject alt names (either ip or dns names) for the certificate. +func GenerateSelfSignedCertKey(host string, alternateIPs []net.IP, alternateDNS []string) ([]byte, []byte, error) { + return GenerateSelfSignedCertKeyWithFixtures(host, alternateIPs, alternateDNS, "") +} + +// GenerateSelfSignedCertKeyWithFixtures creates a self-signed certificate and key for the given host. +// Host may be an IP or a DNS name. You may also specify additional subject alt names (either ip or dns names) +// for the certificate. +// +// If fixtureDirectory is non-empty, it is a directory path which can contain pre-generated certs. The format is: +// _-_-.crt +// _-_-.key +// Certs/keys not existing in that directory are created. +func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, alternateDNS []string, fixtureDirectory string) ([]byte, []byte, error) { + validFrom := time.Now().Add(-time.Hour) // valid an hour earlier to avoid flakes due to clock skew + maxAge := time.Hour * 24 * 365 // one year self-signed certs + + baseName := fmt.Sprintf("%s_%s_%s", host, strings.Join(ipsToStrings(alternateIPs), "-"), strings.Join(alternateDNS, "-")) + certFixturePath := path.Join(fixtureDirectory, baseName+".crt") + keyFixturePath := path.Join(fixtureDirectory, baseName+".key") + if len(fixtureDirectory) > 0 { + cert, err := ioutil.ReadFile(certFixturePath) + if err == nil { + key, err := ioutil.ReadFile(keyFixturePath) + if err == nil { + return cert, key, nil + } + return nil, nil, fmt.Errorf("cert %s can be read, but key %s cannot: %v", certFixturePath, keyFixturePath, err) + } + maxAge = 100 * time.Hour * 24 * 365 // 100 years fixtures + } + + caKey, err := rsa.GenerateKey(cryptorand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + caTemplate := x509.Certificate{ + SerialNumber: big.NewInt(1), + Subject: pkix.Name{ + CommonName: fmt.Sprintf("%s-ca@%d", host, time.Now().Unix()), + }, + NotBefore: validFrom, + NotAfter: validFrom.Add(maxAge), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, + BasicConstraintsValid: true, + IsCA: true, + } + + caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) + if err != nil { + return nil, nil, err + } + + caCertificate, err := x509.ParseCertificate(caDERBytes) + if err != nil { + return nil, nil, err + } + + priv, err := rsa.GenerateKey(cryptorand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + template := x509.Certificate{ + SerialNumber: big.NewInt(2), + Subject: pkix.Name{ + CommonName: fmt.Sprintf("%s@%d", host, time.Now().Unix()), + }, + NotBefore: validFrom, + NotAfter: validFrom.Add(maxAge), + + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth}, + BasicConstraintsValid: true, + } + + if ip := net.ParseIP(host); ip != nil { + template.IPAddresses = append(template.IPAddresses, ip) + } else { + template.DNSNames = append(template.DNSNames, host) + } + + template.IPAddresses = append(template.IPAddresses, alternateIPs...) + template.DNSNames = append(template.DNSNames, alternateDNS...) + + derBytes, err := x509.CreateCertificate(cryptorand.Reader, &template, caCertificate, &priv.PublicKey, caKey) + if err != nil { + return nil, nil, err + } + + // Generate cert, followed by ca + certBuffer := bytes.Buffer{} + if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: derBytes}); err != nil { + return nil, nil, err + } + if err := pem.Encode(&certBuffer, &pem.Block{Type: CertificateBlockType, Bytes: caDERBytes}); err != nil { + return nil, nil, err + } + + // Generate key + keyBuffer := bytes.Buffer{} + if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + return nil, nil, err + } + + if len(fixtureDirectory) > 0 { + if err := ioutil.WriteFile(certFixturePath, certBuffer.Bytes(), 0644); err != nil { + return nil, nil, fmt.Errorf("failed to write cert fixture to %s: %v", certFixturePath, err) + } + if err := ioutil.WriteFile(keyFixturePath, keyBuffer.Bytes(), 0644); err != nil { + return nil, nil, fmt.Errorf("failed to write key fixture to %s: %v", certFixturePath, err) + } + } + + return certBuffer.Bytes(), keyBuffer.Bytes(), nil +} + +func ipsToStrings(ips []net.IP) []string { + ss := make([]string, 0, len(ips)) + for _, ip := range ips { + ss = append(ss, ip.String()) + } + return ss +} diff --git a/vendor/k8s.io/client-go/util/cert/csr.go b/vendor/k8s.io/client-go/util/cert/csr.go new file mode 100644 index 0000000..39a6751 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/csr.go @@ -0,0 +1,75 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "net" +) + +// MakeCSR generates a PEM-encoded CSR using the supplied private key, subject, and SANs. +// All key types that are implemented via crypto.Signer are supported (This includes *rsa.PrivateKey and *ecdsa.PrivateKey.) +func MakeCSR(privateKey interface{}, subject *pkix.Name, dnsSANs []string, ipSANs []net.IP) (csr []byte, err error) { + template := &x509.CertificateRequest{ + Subject: *subject, + DNSNames: dnsSANs, + IPAddresses: ipSANs, + } + + return MakeCSRFromTemplate(privateKey, template) +} + +// MakeCSRFromTemplate generates a PEM-encoded CSR using the supplied private +// key and certificate request as a template. All key types that are +// implemented via crypto.Signer are supported (This includes *rsa.PrivateKey +// and *ecdsa.PrivateKey.) +func MakeCSRFromTemplate(privateKey interface{}, template *x509.CertificateRequest) ([]byte, error) { + t := *template + t.SignatureAlgorithm = sigType(privateKey) + + csrDER, err := x509.CreateCertificateRequest(cryptorand.Reader, &t, privateKey) + if err != nil { + return nil, err + } + + csrPemBlock := &pem.Block{ + Type: CertificateRequestBlockType, + Bytes: csrDER, + } + + return pem.EncodeToMemory(csrPemBlock), nil +} + +func sigType(privateKey interface{}) x509.SignatureAlgorithm { + // Customize the signature for RSA keys, depending on the key size + if privateKey, ok := privateKey.(*rsa.PrivateKey); ok { + keySize := privateKey.N.BitLen() + switch { + case keySize >= 4096: + return x509.SHA512WithRSA + case keySize >= 3072: + return x509.SHA384WithRSA + default: + return x509.SHA256WithRSA + } + } + return x509.UnknownSignatureAlgorithm +} diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go new file mode 100644 index 0000000..5efb248 --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -0,0 +1,98 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/x509" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// CanReadCertAndKey returns true if the certificate and key files already exists, +// otherwise returns false. If lost one of cert and key, returns error. +func CanReadCertAndKey(certPath, keyPath string) (bool, error) { + certReadable := canReadFile(certPath) + keyReadable := canReadFile(keyPath) + + if certReadable == false && keyReadable == false { + return false, nil + } + + if certReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", certPath) + } + + if keyReadable == false { + return false, fmt.Errorf("error reading %s, certificate and key must be supplied as a pair", keyPath) + } + + return true, nil +} + +// If the file represented by path exists and +// readable, returns true otherwise returns false. +func canReadFile(path string) bool { + f, err := os.Open(path) + if err != nil { + return false + } + + defer f.Close() + + return true +} + +// WriteCert writes the pem-encoded certificate data to certPath. +// The certificate file will be created with file mode 0644. +// If the certificate file already exists, it will be overwritten. +// The parent directory of the certPath will be created as needed with file mode 0755. +func WriteCert(certPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(certPath), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(certPath, data, os.FileMode(0644)) +} + +// NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func NewPool(filename string) (*x509.CertPool, error) { + certs, err := CertsFromFile(filename) + if err != nil { + return nil, err + } + pool := x509.NewCertPool() + for _, cert := range certs { + pool.AddCert(cert) + } + return pool, nil +} + +// CertsFromFile returns the x509.Certificates contained in the given PEM-encoded file. +// Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates +func CertsFromFile(file string) ([]*x509.Certificate, error) { + pemBlock, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + certs, err := ParseCertsPEM(pemBlock) + if err != nil { + return nil, fmt.Errorf("error reading %s: %s", file, err) + } + return certs, nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go new file mode 100644 index 0000000..9185e2e --- /dev/null +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -0,0 +1,61 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package cert + +import ( + "crypto/x509" + "encoding/pem" + "errors" +) + +const ( + // CertificateBlockType is a possible value for pem.Block.Type. + CertificateBlockType = "CERTIFICATE" + // CertificateRequestBlockType is a possible value for pem.Block.Type. + CertificateRequestBlockType = "CERTIFICATE REQUEST" +) + +// ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array +// Returns an error if a certificate could not be parsed, or if the data does not contain any certificates +func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { + ok := false + certs := []*x509.Certificate{} + for len(pemCerts) > 0 { + var block *pem.Block + block, pemCerts = pem.Decode(pemCerts) + if block == nil { + break + } + // Only use PEM "CERTIFICATE" blocks without extra headers + if block.Type != CertificateBlockType || len(block.Headers) != 0 { + continue + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return certs, err + } + + certs = append(certs, cert) + ok = true + } + + if !ok { + return certs, errors.New("data does not contain any valid RSA or ECDSA certificates") + } + return certs, nil +} diff --git a/vendor/k8s.io/client-go/util/connrotation/connrotation.go b/vendor/k8s.io/client-go/util/connrotation/connrotation.go new file mode 100644 index 0000000..235a9e0 --- /dev/null +++ b/vendor/k8s.io/client-go/util/connrotation/connrotation.go @@ -0,0 +1,105 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package connrotation implements a connection dialer that tracks and can close +// all created connections. +// +// This is used for credential rotation of long-lived connections, when there's +// no way to re-authenticate on a live connection. +package connrotation + +import ( + "context" + "net" + "sync" +) + +// DialFunc is a shorthand for signature of net.DialContext. +type DialFunc func(ctx context.Context, network, address string) (net.Conn, error) + +// Dialer opens connections through Dial and tracks them. +type Dialer struct { + dial DialFunc + + mu sync.Mutex + conns map[*closableConn]struct{} +} + +// NewDialer creates a new Dialer instance. +// +// If dial is not nil, it will be used to create new underlying connections. +// Otherwise net.DialContext is used. +func NewDialer(dial DialFunc) *Dialer { + return &Dialer{ + dial: dial, + conns: make(map[*closableConn]struct{}), + } +} + +// CloseAll forcibly closes all tracked connections. +// +// Note: new connections may get created before CloseAll returns. +func (d *Dialer) CloseAll() { + d.mu.Lock() + conns := d.conns + d.conns = make(map[*closableConn]struct{}) + d.mu.Unlock() + + for conn := range conns { + conn.Close() + } +} + +// Dial creates a new tracked connection. +func (d *Dialer) Dial(network, address string) (net.Conn, error) { + return d.DialContext(context.Background(), network, address) +} + +// DialContext creates a new tracked connection. +func (d *Dialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + conn, err := d.dial(ctx, network, address) + if err != nil { + return nil, err + } + + closable := &closableConn{Conn: conn} + + // Start tracking the connection + d.mu.Lock() + d.conns[closable] = struct{}{} + d.mu.Unlock() + + // When the connection is closed, remove it from the map. This will + // be no-op if the connection isn't in the map, e.g. if CloseAll() + // is called. + closable.onClose = func() { + d.mu.Lock() + delete(d.conns, closable) + d.mu.Unlock() + } + + return closable, nil +} + +type closableConn struct { + onClose func() + net.Conn +} + +func (c *closableConn) Close() error { + go c.onClose() + return c.Conn.Close() +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go new file mode 100644 index 0000000..b7cb70e --- /dev/null +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -0,0 +1,149 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "time" + + "k8s.io/apimachinery/pkg/util/clock" + "k8s.io/utils/integer" +) + +type backoffEntry struct { + backoff time.Duration + lastUpdate time.Time +} + +type Backoff struct { + sync.Mutex + Clock clock.Clock + defaultDuration time.Duration + maxDuration time.Duration + perItemBackoff map[string]*backoffEntry +} + +func NewFakeBackOff(initial, max time.Duration, tc *clock.FakeClock) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: tc, + defaultDuration: initial, + maxDuration: max, + } +} + +func NewBackOff(initial, max time.Duration) *Backoff { + return &Backoff{ + perItemBackoff: map[string]*backoffEntry{}, + Clock: clock.RealClock{}, + defaultDuration: initial, + maxDuration: max, + } +} + +// Get the current backoff Duration +func (p *Backoff) Get(id string) time.Duration { + p.Lock() + defer p.Unlock() + var delay time.Duration + entry, ok := p.perItemBackoff[id] + if ok { + delay = entry.backoff + } + return delay +} + +// move backoff to the next mark, capping at maxDuration +func (p *Backoff) Next(id string, eventTime time.Time) { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok || hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + entry = p.initEntryUnsafe(id) + } else { + delay := entry.backoff * 2 // exponential + entry.backoff = time.Duration(integer.Int64Min(int64(delay), int64(p.maxDuration))) + } + entry.lastUpdate = p.Clock.Now() +} + +// Reset forces clearing of all backoff data for a given key. +func (p *Backoff) Reset(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Returns True if the elapsed time since eventTime is smaller than the current backoff window +func (p *Backoff) IsInBackOffSince(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return p.Clock.Now().Sub(eventTime) < entry.backoff +} + +// Returns True if time since lastupdate is less than the current backoff window. +func (p *Backoff) IsInBackOffSinceUpdate(id string, eventTime time.Time) bool { + p.Lock() + defer p.Unlock() + entry, ok := p.perItemBackoff[id] + if !ok { + return false + } + if hasExpired(eventTime, entry.lastUpdate, p.maxDuration) { + return false + } + return eventTime.Sub(entry.lastUpdate) < entry.backoff +} + +// Garbage collect records that have aged past maxDuration. Backoff users are expected +// to invoke this periodically. +func (p *Backoff) GC() { + p.Lock() + defer p.Unlock() + now := p.Clock.Now() + for id, entry := range p.perItemBackoff { + if now.Sub(entry.lastUpdate) > p.maxDuration*2 { + // GC when entry has not been updated for 2*maxDuration + delete(p.perItemBackoff, id) + } + } +} + +func (p *Backoff) DeleteEntry(id string) { + p.Lock() + defer p.Unlock() + delete(p.perItemBackoff, id) +} + +// Take a lock on *Backoff, before calling initEntryUnsafe +func (p *Backoff) initEntryUnsafe(id string) *backoffEntry { + entry := &backoffEntry{backoff: p.defaultDuration} + p.perItemBackoff[id] = entry + return entry +} + +// After 2*maxDuration we restart the backoff factor to the beginning +func hasExpired(eventTime time.Time, lastUpdate time.Time, maxDuration time.Duration) bool { + return eventTime.Sub(lastUpdate) > maxDuration*2 // consider stable if it's ok for twice the maxDuration +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/throttle.go b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go new file mode 100644 index 0000000..e671c04 --- /dev/null +++ b/vendor/k8s.io/client-go/util/flowcontrol/throttle.go @@ -0,0 +1,143 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flowcontrol + +import ( + "sync" + "time" + + "golang.org/x/time/rate" +) + +type RateLimiter interface { + // TryAccept returns true if a token is taken immediately. Otherwise, + // it returns false. + TryAccept() bool + // Accept returns once a token becomes available. + Accept() + // Stop stops the rate limiter, subsequent calls to CanAccept will return false + Stop() + // QPS returns QPS of this rate limiter + QPS() float32 +} + +type tokenBucketRateLimiter struct { + limiter *rate.Limiter + clock Clock + qps float32 +} + +// NewTokenBucketRateLimiter creates a rate limiter which implements a token bucket approach. +// The rate limiter allows bursts of up to 'burst' to exceed the QPS, while still maintaining a +// smoothed qps rate of 'qps'. +// The bucket is initially filled with 'burst' tokens, and refills at a rate of 'qps'. +// The maximum number of tokens in the bucket is capped at 'burst'. +func NewTokenBucketRateLimiter(qps float32, burst int) RateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiter(limiter, realClock{}, qps) +} + +// An injectable, mockable clock interface. +type Clock interface { + Now() time.Time + Sleep(time.Duration) +} + +type realClock struct{} + +func (realClock) Now() time.Time { + return time.Now() +} +func (realClock) Sleep(d time.Duration) { + time.Sleep(d) +} + +// NewTokenBucketRateLimiterWithClock is identical to NewTokenBucketRateLimiter +// but allows an injectable clock, for testing. +func NewTokenBucketRateLimiterWithClock(qps float32, burst int, c Clock) RateLimiter { + limiter := rate.NewLimiter(rate.Limit(qps), burst) + return newTokenBucketRateLimiter(limiter, c, qps) +} + +func newTokenBucketRateLimiter(limiter *rate.Limiter, c Clock, qps float32) RateLimiter { + return &tokenBucketRateLimiter{ + limiter: limiter, + clock: c, + qps: qps, + } +} + +func (t *tokenBucketRateLimiter) TryAccept() bool { + return t.limiter.AllowN(t.clock.Now(), 1) +} + +// Accept will block until a token becomes available +func (t *tokenBucketRateLimiter) Accept() { + now := t.clock.Now() + t.clock.Sleep(t.limiter.ReserveN(now, 1).DelayFrom(now)) +} + +func (t *tokenBucketRateLimiter) Stop() { +} + +func (t *tokenBucketRateLimiter) QPS() float32 { + return t.qps +} + +type fakeAlwaysRateLimiter struct{} + +func NewFakeAlwaysRateLimiter() RateLimiter { + return &fakeAlwaysRateLimiter{} +} + +func (t *fakeAlwaysRateLimiter) TryAccept() bool { + return true +} + +func (t *fakeAlwaysRateLimiter) Stop() {} + +func (t *fakeAlwaysRateLimiter) Accept() {} + +func (t *fakeAlwaysRateLimiter) QPS() float32 { + return 1 +} + +type fakeNeverRateLimiter struct { + wg sync.WaitGroup +} + +func NewFakeNeverRateLimiter() RateLimiter { + rl := fakeNeverRateLimiter{} + rl.wg.Add(1) + return &rl +} + +func (t *fakeNeverRateLimiter) TryAccept() bool { + return false +} + +func (t *fakeNeverRateLimiter) Stop() { + t.wg.Done() +} + +func (t *fakeNeverRateLimiter) Accept() { + t.wg.Wait() +} + +func (t *fakeNeverRateLimiter) QPS() float32 { + return 1 +} diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS new file mode 100644 index 0000000..470b7a1 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/OWNERS @@ -0,0 +1,7 @@ +approvers: +- sig-auth-certificates-approvers +reviewers: +- sig-auth-certificates-reviewers +labels: +- sig/auth + diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go new file mode 100644 index 0000000..83c2c62 --- /dev/null +++ b/vendor/k8s.io/client-go/util/keyutil/key.go @@ -0,0 +1,323 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package keyutil contains utilities for managing public/private key pairs. +package keyutil + +import ( + "crypto" + "crypto/ecdsa" + "crypto/elliptic" + cryptorand "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" +) + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to +// a PEM encoded block or returns an error. +func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { + switch t := privateKey.(type) { + case *ecdsa.PrivateKey: + derBytes, err := x509.MarshalECPrivateKey(t) + if err != nil { + return nil, err + } + block := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(block), nil + case *rsa.PrivateKey: + block := &pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(t), + } + return pem.EncodeToMemory(block), nil + default: + return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + } +} + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading private key file %s: %v", file, err) + } + return key, nil +} + +// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. +// Reads public keys from both public and private key files. +func PublicKeysFromFile(file string) ([]interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + keys, err := ParsePublicKeysPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading public key file %s: %v", file, err) + } + return keys, nil +} + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. +// Reads public keys from both public and private key files. +func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { + var block *pem.Block + keys := []interface{}{} + for { + // read the next block + block, keyData = pem.Decode(keyData) + if block == nil { + break + } + + // test block against parsing functions + if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseECPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + + // tolerate non-key PEM blocks for backwards compatibility + // originally, only the first PEM block was parsed and expected to be a key block + } + + if len(keys) == 0 { + return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") + } + return keys, nil +} + +// parseRSAPublicKey parses a single RSA public key from the provided data +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an RSA Public Key + var pubKey *rsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") + } + + return pubKey, nil +} + +// parseRSAPrivateKey parses a single RSA private key from the provided data +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { + return nil, err + } + } + + // Test if parsed key is an RSA Private Key + var privKey *rsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") + } + + return privKey, nil +} + +// parseECPublicKey parses a single ECDSA public key from the provided data +func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an ECDSA Public Key + var pubKey *ecdsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") + } + + return pubKey, nil +} + +// parseECPrivateKey parses a single ECDSA private key from the provided data +func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { + return nil, err + } + + // Test if parsed key is an ECDSA Private Key + var privKey *ecdsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") + } + + return privKey, nil +} diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml new file mode 100644 index 0000000..5677664 --- /dev/null +++ b/vendor/k8s.io/klog/.travis.yml @@ -0,0 +1,16 @@ +language: go +go_import_path: k8s.io/klog +dist: xenial +go: + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d .) + - diff -u <(echo -n) <(golint $(go list -e ./...)) + - go tool vet . || go vet . + - go test -v -race ./... +install: + - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md new file mode 100644 index 0000000..574a56a --- /dev/null +++ b/vendor/k8s.io/klog/CONTRIBUTING.md @@ -0,0 +1,22 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + +## Contact Information + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/k8s.io/klog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS new file mode 100644 index 0000000..380e514 --- /dev/null +++ b/vendor/k8s.io/klog/OWNERS @@ -0,0 +1,19 @@ +# See the OWNERS docs at https://go.k8s.io/owners +reviewers: + - jayunit100 + - hoegaarden + - andyxning + - neolit123 + - pohly + - yagonobre + - vincepri + - detiber +approvers: + - dims + - thockin + - justinsb + - tallclair + - piosz + - brancz + - DirectXMan12 + - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md new file mode 100644 index 0000000..841468b --- /dev/null +++ b/vendor/k8s.io/klog/README.md @@ -0,0 +1,97 @@ +klog +==== + +klog is a permanent fork of https://github.com/golang/glog. + +## Why was klog created? + +The decision to create klog was one that wasn't made lightly, but it was necessary due to some +drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: + +> The code in this repo [...] is not itself under development + +This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: + + * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. + * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it + * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. + +Historical context is available here: + + * https://github.com/kubernetes/kubernetes/issues/61006 + * https://github.com/kubernetes/kubernetes/issues/70264 + * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ + * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ + +---- + +How to use klog +=============== +- Replace imports for `github.com/golang/glog` with `k8s.io/klog` +- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags +- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) +- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) + +### Coexisting with glog +This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. + +## Community, discussion, contribution, and support + +Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). + +You can reach the maintainers of this project at: + +- [Slack](https://kubernetes.slack.com/messages/sig-architecture) +- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) + +### Code of conduct + +Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). + +---- + +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md new file mode 100644 index 0000000..b53eb96 --- /dev/null +++ b/vendor/k8s.io/klog/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `klog` is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS new file mode 100644 index 0000000..6128a58 --- /dev/null +++ b/vendor/k8s.io/klog/SECURITY_CONTACTS @@ -0,0 +1,20 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Committee to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +dims +thockin +justinsb +tallclair +piosz +brancz +DirectXMan12 +lavalamp diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md new file mode 100644 index 0000000..0d15c00 --- /dev/null +++ b/vendor/k8s.io/klog/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 0000000..3877d85 --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 0000000..fb64d27 --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go new file mode 100644 index 0000000..2712ce0 --- /dev/null +++ b/vendor/k8s.io/klog/klog.go @@ -0,0 +1,1308 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// klog.Info("Prepare to repel boarders") +// +// klog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if klog.V(2) { +// klog.Info("Starting transaction...") +// } +// +// klog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to standard error. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=true +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package klog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "math" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.ParseInt(patLev[1], 10, 32) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +// init sets up the defaults and runs flushDaemon. +func init() { + logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. + logging.setVState(0, nil, false) + logging.logDir = "" + logging.logFile = "" + logging.logFileMaxSizeMB = 1800 + logging.toStderr = true + logging.alsoToStderr = false + logging.skipHeaders = false + logging.addDirHeader = false + logging.skipLogHeaders = false + go logging.flushDaemon() +} + +// InitFlags is for explicitly initializing the flags. +func InitFlags(flagset *flag.FlagSet) { + if flagset == nil { + flagset = flag.CommandLine + } + + flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") + flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") + flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, + "Defines the maximum size a log file can grow to. Unit is megabytes. "+ + "If the value is 0, the maximum file size is unlimited.") + flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") + flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") + flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") + flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") + flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") + flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ + + // If non-empty, overrides the choice of directory in which to write logs. + // See createLogDirs for the full list of possible destinations. + logDir string + + // If non-empty, specifies the path of the file to write logs. mutually exclusive + // with the log-dir option. + logFile string + + // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the + // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. + logFileMaxSizeMB uint64 + + // If true, do not add the prefix headers, useful when used with SetOutput + skipHeaders bool + + // If true, do not add the headers to log files + skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + if l.skipHeaders { + return buf + } + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// redirectBuffer is used to set an alternate destination for the logs +type redirectBuffer struct { + w io.Writer +} + +func (rb *redirectBuffer) Sync() error { + return nil +} + +func (rb *redirectBuffer) Flush() error { + return nil +} + +func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { + return rb.w.Write(bytes) +} + +// SetOutput sets the output destination for all severities +func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + for s := fatalLog; s >= infoLog; s-- { + rb := &redirectBuffer{ + w: w, + } + logging.file[s] = rb + } +} + +// SetOutputBySeverity sets the output destination for specific severity +func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) + } + rb := &redirectBuffer{ + w: w, + } + logging.file[sev] = rb +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when klog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file + maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. +func CalculateMaxSize() uint64 { + if logging.logFile != "" { + if logging.logFileMaxSizeMB == 0 { + // If logFileMaxSizeMB is zero, we don't have limitations on the log size. + return math.MaxUint64 + } + // Flag logFileMaxSizeMB is in MB for user convenience. + return logging.logFileMaxSizeMB * 1024 * 1024 + } + // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. + return MaxSize +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= sb.maxbytes { + if err := sb.rotateFile(time.Now(), false); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now, startup) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + if sb.logger.skipLogHeaders { + return nil + } + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + maxbytes: CalculateMaxSize(), + } + if err := sb.rotateFile(now, true); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 5 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if klog.V(2) { klog.Info("log this") } +// or +// klog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is always appended. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go new file mode 100644 index 0000000..e4010ad --- /dev/null +++ b/vendor/k8s.io/klog/klog_file.go @@ -0,0 +1,139 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package klog + +import ( + "errors" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +func createLogDirs() { + if logging.logDir != "" { + logDirs = append(logDirs, logging.logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { + if logging.logFile != "" { + f, err := openOrCreate(logging.logFile, startup) + if err == nil { + return f, logging.logFile, nil + } + return nil, "", fmt.Errorf("log: unable to create log: %v", err) + } + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := openOrCreate(fname, startup) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} + +// The startup argument indicates whether this is the initial startup of klog. +// If startup is true, existing files are opened for appending instead of truncated. +func openOrCreate(name string, startup bool) (*os.File, error) { + if startup { + f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) + return f, err + } + f, err := os.Create(name) + return f, err +} diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/k8s.io/utils/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/vendor/k8s.io/utils/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/utils/integer/integer.go new file mode 100644 index 0000000..e4e740c --- /dev/null +++ b/vendor/k8s.io/utils/integer/integer.go @@ -0,0 +1,73 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integer + +// IntMax returns the maximum of the params +func IntMax(a, b int) int { + if b > a { + return b + } + return a +} + +// IntMin returns the minimum of the params +func IntMin(a, b int) int { + if b < a { + return b + } + return a +} + +// Int32Max returns the maximum of the params +func Int32Max(a, b int32) int32 { + if b > a { + return b + } + return a +} + +// Int32Min returns the minimum of the params +func Int32Min(a, b int32) int32 { + if b < a { + return b + } + return a +} + +// Int64Max returns the maximum of the params +func Int64Max(a, b int64) int64 { + if b > a { + return b + } + return a +} + +// Int64Min returns the minimum of the params +func Int64Min(a, b int64) int64 { + if b < a { + return b + } + return a +} + +// RoundToInt32 rounds floats into integer numbers. +func RoundToInt32(a float64) int32 { + if a < 0 { + return int32(a - 0.5) + } + return int32(a + 0.5) +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6ca435e..89f8e31 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -15,13 +15,8 @@ github.com/alicebob/miniredis/v2/server github.com/beorn7/perks/quantile # github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/bradfitz/gomemcache/memcache -# github.com/ericchiang/k8s v1.2.0 -github.com/ericchiang/k8s -github.com/ericchiang/k8s/apis/meta/v1 -github.com/ericchiang/k8s/runtime -github.com/ericchiang/k8s/runtime/schema -github.com/ericchiang/k8s/util/intstr -github.com/ericchiang/k8s/watch/versioned +# github.com/davecgh/go-spew v1.1.1 +github.com/davecgh/go-spew/spew # github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 github.com/gin-contrib/sse # github.com/gin-gonic/gin v1.4.0 @@ -29,17 +24,31 @@ github.com/gin-gonic/gin github.com/gin-gonic/gin/binding github.com/gin-gonic/gin/internal/json github.com/gin-gonic/gin/render -# github.com/golang/protobuf v1.3.2 +# github.com/gogo/protobuf v1.3.1 +github.com/gogo/protobuf/proto +github.com/gogo/protobuf/sortkeys +# github.com/golang/protobuf v1.4.2 github.com/golang/protobuf/proto +github.com/golang/protobuf/ptypes +github.com/golang/protobuf/ptypes/any +github.com/golang/protobuf/ptypes/duration +github.com/golang/protobuf/ptypes/timestamp # github.com/gomodule/redigo v2.0.0+incompatible github.com/gomodule/redigo/internal github.com/gomodule/redigo/redis +# github.com/google/gofuzz v1.1.0 +github.com/google/gofuzz # github.com/google/uuid v1.1.1 github.com/google/uuid +# github.com/googleapis/gnostic v0.5.1 +github.com/googleapis/gnostic/OpenAPIv2 +github.com/googleapis/gnostic/compiler +github.com/googleapis/gnostic/extensions +github.com/googleapis/gnostic/jsonschema # github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru -# github.com/json-iterator/go v1.1.7 +# github.com/json-iterator/go v1.1.8 github.com/json-iterator/go # github.com/konsorten/go-windows-terminal-sequences v1.0.2 github.com/konsorten/go-windows-terminal-sequences @@ -83,20 +92,207 @@ github.com/yuin/gopher-lua/parse github.com/yuin/gopher-lua/pm # github.com/zsais/go-gin-prometheus v0.1.0 github.com/zsais/go-gin-prometheus -# golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297 +# golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 +golang.org/x/crypto/ssh/terminal +# golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 +golang.org/x/net/context +golang.org/x/net/context/ctxhttp golang.org/x/net/http/httpguts golang.org/x/net/http2 golang.org/x/net/http2/hpack golang.org/x/net/idna -# golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 +# golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d +golang.org/x/oauth2 +golang.org/x/oauth2/internal +# golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 golang.org/x/sys/unix golang.org/x/sys/windows -# golang.org/x/text v0.3.0 +# golang.org/x/text v0.3.2 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm +# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e +golang.org/x/time/rate +# google.golang.org/appengine v1.4.0 +google.golang.org/appengine/internal +google.golang.org/appengine/internal/base +google.golang.org/appengine/internal/datastore +google.golang.org/appengine/internal/log +google.golang.org/appengine/internal/remote_api +google.golang.org/appengine/internal/urlfetch +google.golang.org/appengine/urlfetch +# google.golang.org/protobuf v1.23.0 +google.golang.org/protobuf/encoding/prototext +google.golang.org/protobuf/encoding/protowire +google.golang.org/protobuf/internal/descfmt +google.golang.org/protobuf/internal/descopts +google.golang.org/protobuf/internal/detrand +google.golang.org/protobuf/internal/encoding/defval +google.golang.org/protobuf/internal/encoding/messageset +google.golang.org/protobuf/internal/encoding/tag +google.golang.org/protobuf/internal/encoding/text +google.golang.org/protobuf/internal/errors +google.golang.org/protobuf/internal/fieldnum +google.golang.org/protobuf/internal/fieldsort +google.golang.org/protobuf/internal/filedesc +google.golang.org/protobuf/internal/filetype +google.golang.org/protobuf/internal/flags +google.golang.org/protobuf/internal/genname +google.golang.org/protobuf/internal/impl +google.golang.org/protobuf/internal/mapsort +google.golang.org/protobuf/internal/pragma +google.golang.org/protobuf/internal/set +google.golang.org/protobuf/internal/strs +google.golang.org/protobuf/internal/version +google.golang.org/protobuf/proto +google.golang.org/protobuf/reflect/protoreflect +google.golang.org/protobuf/reflect/protoregistry +google.golang.org/protobuf/runtime/protoiface +google.golang.org/protobuf/runtime/protoimpl +google.golang.org/protobuf/types/known/anypb +google.golang.org/protobuf/types/known/durationpb +google.golang.org/protobuf/types/known/timestamppb # gopkg.in/go-playground/validator.v8 v8.18.2 gopkg.in/go-playground/validator.v8 -# gopkg.in/yaml.v2 v2.2.2 +# gopkg.in/inf.v0 v0.9.1 +gopkg.in/inf.v0 +# gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 +# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 +gopkg.in/yaml.v3 +# k8s.io/api v0.18.6 +k8s.io/api/admissionregistration/v1beta1 +k8s.io/api/apps/v1 +k8s.io/api/apps/v1beta1 +k8s.io/api/apps/v1beta2 +k8s.io/api/auditregistration/v1alpha1 +k8s.io/api/authentication/v1 +k8s.io/api/authentication/v1beta1 +k8s.io/api/authorization/v1 +k8s.io/api/authorization/v1beta1 +k8s.io/api/autoscaling/v1 +k8s.io/api/autoscaling/v2beta1 +k8s.io/api/autoscaling/v2beta2 +k8s.io/api/batch/v1 +k8s.io/api/batch/v1beta1 +k8s.io/api/batch/v2alpha1 +k8s.io/api/certificates/v1beta1 +k8s.io/api/coordination/v1 +k8s.io/api/coordination/v1beta1 +k8s.io/api/core/v1 +k8s.io/api/events/v1beta1 +k8s.io/api/extensions/v1beta1 +k8s.io/api/networking/v1 +k8s.io/api/networking/v1beta1 +k8s.io/api/node/v1alpha1 +k8s.io/api/node/v1beta1 +k8s.io/api/policy/v1beta1 +k8s.io/api/rbac/v1 +k8s.io/api/rbac/v1alpha1 +k8s.io/api/rbac/v1beta1 +k8s.io/api/scheduling/v1 +k8s.io/api/scheduling/v1alpha1 +k8s.io/api/scheduling/v1beta1 +k8s.io/api/settings/v1alpha1 +k8s.io/api/storage/v1 +k8s.io/api/storage/v1alpha1 +k8s.io/api/storage/v1beta1 +# k8s.io/apimachinery v0.18.6 +k8s.io/apimachinery/pkg/api/errors +k8s.io/apimachinery/pkg/api/meta +k8s.io/apimachinery/pkg/api/resource +k8s.io/apimachinery/pkg/apis/meta/v1 +k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/conversion +k8s.io/apimachinery/pkg/conversion/queryparams +k8s.io/apimachinery/pkg/fields +k8s.io/apimachinery/pkg/labels +k8s.io/apimachinery/pkg/runtime +k8s.io/apimachinery/pkg/runtime/schema +k8s.io/apimachinery/pkg/runtime/serializer +k8s.io/apimachinery/pkg/runtime/serializer/json +k8s.io/apimachinery/pkg/runtime/serializer/protobuf +k8s.io/apimachinery/pkg/runtime/serializer/recognizer +k8s.io/apimachinery/pkg/runtime/serializer/streaming +k8s.io/apimachinery/pkg/runtime/serializer/versioning +k8s.io/apimachinery/pkg/selection +k8s.io/apimachinery/pkg/types +k8s.io/apimachinery/pkg/util/clock +k8s.io/apimachinery/pkg/util/errors +k8s.io/apimachinery/pkg/util/framer +k8s.io/apimachinery/pkg/util/intstr +k8s.io/apimachinery/pkg/util/json +k8s.io/apimachinery/pkg/util/naming +k8s.io/apimachinery/pkg/util/net +k8s.io/apimachinery/pkg/util/runtime +k8s.io/apimachinery/pkg/util/sets +k8s.io/apimachinery/pkg/util/validation +k8s.io/apimachinery/pkg/util/validation/field +k8s.io/apimachinery/pkg/util/yaml +k8s.io/apimachinery/pkg/version +k8s.io/apimachinery/pkg/watch +k8s.io/apimachinery/third_party/forked/golang/reflect +# k8s.io/client-go v11.0.0+incompatible +k8s.io/client-go/discovery +k8s.io/client-go/kubernetes +k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 +k8s.io/client-go/kubernetes/typed/apps/v1 +k8s.io/client-go/kubernetes/typed/apps/v1beta1 +k8s.io/client-go/kubernetes/typed/apps/v1beta2 +k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1 +k8s.io/client-go/kubernetes/typed/authentication/v1 +k8s.io/client-go/kubernetes/typed/authentication/v1beta1 +k8s.io/client-go/kubernetes/typed/authorization/v1 +k8s.io/client-go/kubernetes/typed/authorization/v1beta1 +k8s.io/client-go/kubernetes/typed/autoscaling/v1 +k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 +k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 +k8s.io/client-go/kubernetes/typed/batch/v1 +k8s.io/client-go/kubernetes/typed/batch/v1beta1 +k8s.io/client-go/kubernetes/typed/batch/v2alpha1 +k8s.io/client-go/kubernetes/typed/certificates/v1beta1 +k8s.io/client-go/kubernetes/typed/coordination/v1 +k8s.io/client-go/kubernetes/typed/coordination/v1beta1 +k8s.io/client-go/kubernetes/typed/core/v1 +k8s.io/client-go/kubernetes/typed/events/v1beta1 +k8s.io/client-go/kubernetes/typed/extensions/v1beta1 +k8s.io/client-go/kubernetes/typed/networking/v1 +k8s.io/client-go/kubernetes/typed/networking/v1beta1 +k8s.io/client-go/kubernetes/typed/node/v1alpha1 +k8s.io/client-go/kubernetes/typed/node/v1beta1 +k8s.io/client-go/kubernetes/typed/policy/v1beta1 +k8s.io/client-go/kubernetes/typed/rbac/v1 +k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 +k8s.io/client-go/kubernetes/typed/rbac/v1beta1 +k8s.io/client-go/kubernetes/typed/scheduling/v1 +k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 +k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 +k8s.io/client-go/kubernetes/typed/settings/v1alpha1 +k8s.io/client-go/kubernetes/typed/storage/v1 +k8s.io/client-go/kubernetes/typed/storage/v1alpha1 +k8s.io/client-go/kubernetes/typed/storage/v1beta1 +k8s.io/client-go/pkg/apis/clientauthentication +k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 +k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 +k8s.io/client-go/pkg/version +k8s.io/client-go/plugin/pkg/client/auth/exec +k8s.io/client-go/rest +k8s.io/client-go/rest/watch +k8s.io/client-go/tools/clientcmd/api +k8s.io/client-go/tools/metrics +k8s.io/client-go/tools/reference +k8s.io/client-go/transport +k8s.io/client-go/util/cert +k8s.io/client-go/util/connrotation +k8s.io/client-go/util/flowcontrol +k8s.io/client-go/util/keyutil +# k8s.io/klog v1.0.0 +k8s.io/klog +# k8s.io/utils v0.0.0-20200724153422-f32512634ab7 +k8s.io/utils/integer +# sigs.k8s.io/structured-merge-diff/v3 v3.0.0 +sigs.k8s.io/structured-merge-diff/v3/value +# sigs.k8s.io/yaml v1.2.0 +sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go new file mode 100644 index 0000000..f70cd41 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go @@ -0,0 +1,203 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Allocator provides a value object allocation strategy. +// Value objects can be allocated by passing an allocator to the "Using" +// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...). +// Value objects returned from "Using" functions should be given back to the allocator +// once longer needed by calling Allocator.Free(Value). +type Allocator interface { + // Free gives the allocator back any value objects returned by the "Using" + // receiver functions on the value interfaces. + // interface{} may be any of: Value, Map, List or Range. + Free(interface{}) + + // The unexported functions are for "Using" receiver functions of the value types + // to request what they need from the allocator. + allocValueUnstructured() *valueUnstructured + allocListUnstructuredRange() *listUnstructuredRange + allocValueReflect() *valueReflect + allocMapReflect() *mapReflect + allocStructReflect() *structReflect + allocListReflect() *listReflect + allocListReflectRange() *listReflectRange +} + +// HeapAllocator simply allocates objects to the heap. It is the default +// allocator used receiver functions on the value interfaces that do not accept +// an allocator and should be used whenever allocating objects that will not +// be given back to an allocator by calling Allocator.Free(Value). +var HeapAllocator = &heapAllocator{} + +type heapAllocator struct{} + +func (p *heapAllocator) allocValueUnstructured() *valueUnstructured { + return &valueUnstructured{} +} + +func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return &listUnstructuredRange{vv: &valueUnstructured{}} +} + +func (p *heapAllocator) allocValueReflect() *valueReflect { + return &valueReflect{} +} + +func (p *heapAllocator) allocStructReflect() *structReflect { + return &structReflect{} +} + +func (p *heapAllocator) allocMapReflect() *mapReflect { + return &mapReflect{} +} + +func (p *heapAllocator) allocListReflect() *listReflect { + return &listReflect{} +} + +func (p *heapAllocator) allocListReflectRange() *listReflectRange { + return &listReflectRange{vr: &valueReflect{}} +} + +func (p *heapAllocator) Free(_ interface{}) {} + +// NewFreelistAllocator creates freelist based allocator. +// This allocator provides fast allocation and freeing of short lived value objects. +// +// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is +// allocated at once, the excess will be returned to the heap for garbage collection when freed. +// +// This allocator is unsafe and must not be accessed concurrently by goroutines. +// +// This allocator works well for traversal of value data trees. Typical usage is to acquire +// a freelist at the beginning of the traversal and use it through out +// for all temporary value access. +func NewFreelistAllocator() Allocator { + return &freelistAllocator{ + valueUnstructured: &freelist{new: func() interface{} { + return &valueUnstructured{} + }}, + listUnstructuredRange: &freelist{new: func() interface{} { + return &listUnstructuredRange{vv: &valueUnstructured{}} + }}, + valueReflect: &freelist{new: func() interface{} { + return &valueReflect{} + }}, + mapReflect: &freelist{new: func() interface{} { + return &mapReflect{} + }}, + structReflect: &freelist{new: func() interface{} { + return &structReflect{} + }}, + listReflect: &freelist{new: func() interface{} { + return &listReflect{} + }}, + listReflectRange: &freelist{new: func() interface{} { + return &listReflectRange{vr: &valueReflect{}} + }}, + } +} + +// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory. +// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects +// that don't fit into the freelist are orphaned on the heap to be garbage collected. +const freelistMaxSize = 1000 + +type freelistAllocator struct { + valueUnstructured *freelist + listUnstructuredRange *freelist + valueReflect *freelist + mapReflect *freelist + structReflect *freelist + listReflect *freelist + listReflectRange *freelist +} + +type freelist struct { + list []interface{} + new func() interface{} +} + +func (f *freelist) allocate() interface{} { + var w2 interface{} + if n := len(f.list); n > 0 { + w2, f.list = f.list[n-1], f.list[:n-1] + } else { + w2 = f.new() + } + return w2 +} + +func (f *freelist) free(v interface{}) { + if len(f.list) < freelistMaxSize { + f.list = append(f.list, v) + } +} + +func (w *freelistAllocator) Free(value interface{}) { + switch v := value.(type) { + case *valueUnstructured: + v.Value = nil // don't hold references to unstructured objects + w.valueUnstructured.free(v) + case *listUnstructuredRange: + v.vv.Value = nil // don't hold references to unstructured objects + w.listUnstructuredRange.free(v) + case *valueReflect: + v.ParentMapKey = nil + v.ParentMap = nil + w.valueReflect.free(v) + case *mapReflect: + w.mapReflect.free(v) + case *structReflect: + w.structReflect.free(v) + case *listReflect: + w.listReflect.free(v) + case *listReflectRange: + v.vr.ParentMapKey = nil + v.vr.ParentMap = nil + w.listReflectRange.free(v) + } +} + +func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured { + return w.valueUnstructured.allocate().(*valueUnstructured) +} + +func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange { + return w.listUnstructuredRange.allocate().(*listUnstructuredRange) +} + +func (w *freelistAllocator) allocValueReflect() *valueReflect { + return w.valueReflect.allocate().(*valueReflect) +} + +func (w *freelistAllocator) allocStructReflect() *structReflect { + return w.structReflect.allocate().(*structReflect) +} + +func (w *freelistAllocator) allocMapReflect() *mapReflect { + return w.mapReflect.allocate().(*mapReflect) +} + +func (w *freelistAllocator) allocListReflect() *listReflect { + return w.listReflect.allocate().(*listReflect) +} + +func (w *freelistAllocator) allocListReflectRange() *listReflectRange { + return w.listReflectRange.allocate().(*listReflectRange) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go new file mode 100644 index 0000000..84d7f0f --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package value defines types for an in-memory representation of yaml or json +// objects, organized for convenient comparison with a schema (as defined by +// the sibling schema package). Functions for reading and writing the objects +// are also provided. +package value diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go new file mode 100644 index 0000000..be3c672 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go @@ -0,0 +1,97 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sort" + "strings" +) + +// Field is an individual key-value pair. +type Field struct { + Name string + Value Value +} + +// FieldList is a list of key-value pairs. Each field is expected to +// have a different name. +type FieldList []Field + +// Sort sorts the field list by Name. +func (f FieldList) Sort() { + if len(f) < 2 { + return + } + if len(f) == 2 { + if f[1].Name < f[0].Name { + f[0], f[1] = f[1], f[0] + } + return + } + sort.SliceStable(f, func(i, j int) bool { + return f[i].Name < f[j].Name + }) +} + +// Less compares two lists lexically. +func (f FieldList) Less(rhs FieldList) bool { + return f.Compare(rhs) == -1 +} + +// Compare compares two lists lexically. The result will be 0 if f==rhs, -1 +// if f < rhs, and +1 if f > rhs. +func (f FieldList) Compare(rhs FieldList) int { + i := 0 + for { + if i >= len(f) && i >= len(rhs) { + // Maps are the same length and all items are equal. + return 0 + } + if i >= len(f) { + // F is shorter. + return -1 + } + if i >= len(rhs) { + // RHS is shorter. + return 1 + } + if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 { + return c + } + if c := Compare(f[i].Value, rhs[i].Value); c != 0 { + return c + } + // The items are equal; continue. + i++ + } +} + +// Equals returns true if the two fieldslist are equals, false otherwise. +func (f FieldList) Equals(rhs FieldList) bool { + if len(f) != len(rhs) { + return false + } + for i := range f { + if f[i].Name != rhs[i].Name { + return false + } + if !Equals(f[i].Value, rhs[i].Value) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go new file mode 100644 index 0000000..d4adb8f --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go @@ -0,0 +1,91 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + "reflect" + "strings" +) + +// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 +// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go + +func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) { + tag := f.Tag.Get("json") + if tag == "-" { + return "", true, false, false + } + name, opts := parseTag(tag) + if name == "" { + name = f.Name + } + return name, false, opts.Contains("inline"), opts.Contains("omitempty") +} + +func isZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Chan, reflect.Func: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + } + return false +} + +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go new file mode 100644 index 0000000..0748f18 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go @@ -0,0 +1,139 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// List represents a list object. +type List interface { + // Length returns how many items can be found in the map. + Length() int + // At returns the item at the given position in the map. It will + // panic if the index is out of range. + At(int) Value + // AtUsing uses the provided allocator and returns the item at the given + // position in the map. It will panic if the index is out of range. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + AtUsing(Allocator, int) Value + // Range returns a ListRange for iterating over the items in the list. + Range() ListRange + // RangeUsing uses the provided allocator and returns a ListRange for + // iterating over the items in the list. + // The returned Range should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + RangeUsing(Allocator) ListRange + // Equals compares the two lists, and return true if they are the same, false otherwise. + // Implementations can use ListEquals as a general implementation for this methods. + Equals(List) bool + // EqualsUsing uses the provided allocator and compares the two lists, and return true if + // they are the same, false otherwise. Implementations can use ListEqualsUsing as a general + // implementation for this methods. + EqualsUsing(Allocator, List) bool +} + +// ListRange represents a single iteration across the items of a list. +type ListRange interface { + // Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items. + Next() bool + // Item returns the index and value of the current item in the range. or panics if there is no current item. + // For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding + // pointers to the value returned by Item() that escape the iteration loop since they become invalid once either + // Item() or Allocator.Free() is called. + Item() (index int, value Value) +} + +var EmptyRange = &emptyRange{} + +type emptyRange struct{} + +func (_ *emptyRange) Next() bool { + return false +} + +func (_ *emptyRange) Item() (index int, value Value) { + panic("Item called on empty ListRange") +} + +// ListEquals compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. +func ListEquals(lhs, rhs List) bool { + return ListEqualsUsing(HeapAllocator, lhs, rhs) +} + +// ListEqualsUsing uses the provided allocator and compares two lists lexically. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func ListEqualsUsing(a Allocator, lhs, rhs List) bool { + if lhs.Length() != rhs.Length() { + return false + } + + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for lhsRange.Next() && rhsRange.Next() { + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if !EqualsUsing(a, lv, rv) { + return false + } + } + return true +} + +// ListLess compares two lists lexically. +func ListLess(lhs, rhs List) bool { + return ListCompare(lhs, rhs) == -1 +} + +// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompare(lhs, rhs List) int { + return ListCompareUsing(HeapAllocator, lhs, rhs) +} + +// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1 +// if l < rhs, and +1 if l > rhs. +func ListCompareUsing(a Allocator, lhs, rhs List) int { + lhsRange := lhs.RangeUsing(a) + defer a.Free(lhsRange) + rhsRange := rhs.RangeUsing(a) + defer a.Free(rhsRange) + + for { + lhsOk := lhsRange.Next() + rhsOk := rhsRange.Next() + if !lhsOk && !rhsOk { + // Lists are the same length and all items are equal. + return 0 + } + if !lhsOk { + // LHS is shorter. + return -1 + } + if !rhsOk { + // RHS is shorter. + return 1 + } + _, lv := lhsRange.Item() + _, rv := rhsRange.Item() + if c := CompareUsing(a, lv, rv); c != 0 { + return c + } + // The items are equal; continue. + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go new file mode 100644 index 0000000..197d4c9 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go @@ -0,0 +1,98 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "reflect" +) + +type listReflect struct { + Value reflect.Value +} + +func (r listReflect) Length() int { + val := r.Value + return val.Len() +} + +func (r listReflect) At(i int) Value { + val := r.Value + return mustWrapValueReflect(val.Index(i), nil, nil) +} + +func (r listReflect) AtUsing(a Allocator, i int) Value { + val := r.Value + return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil) +} + +func (r listReflect) Unstructured() interface{} { + l := r.Length() + result := make([]interface{}, l) + for i := 0; i < l; i++ { + result[i] = r.At(i).Unstructured() + } + return result +} + +func (r listReflect) Range() ListRange { + return r.RangeUsing(HeapAllocator) +} + +func (r listReflect) RangeUsing(a Allocator) ListRange { + length := r.Value.Len() + if length == 0 { + return EmptyRange + } + rr := a.allocListReflectRange() + rr.list = r.Value + rr.i = -1 + rr.entry = TypeReflectEntryOf(r.Value.Type().Elem()) + return rr +} + +func (r listReflect) Equals(other List) bool { + return r.EqualsUsing(HeapAllocator, other) +} +func (r listReflect) EqualsUsing(a Allocator, other List) bool { + if otherReflectList, ok := other.(*listReflect); ok { + return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface()) + } + return ListEqualsUsing(a, &r, other) +} + +type listReflectRange struct { + list reflect.Value + vr *valueReflect + i int + entry *TypeReflectCacheEntry +} + +func (r *listReflectRange) Next() bool { + r.i += 1 + return r.i < r.list.Len() +} + +func (r *listReflectRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= r.list.Len() { + panic("Item() called on ListRange with no more items") + } + v := r.list.Index(r.i) + return r.i, r.vr.mustReuse(v, r.entry, nil, nil) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go new file mode 100644 index 0000000..64cd8e7 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go @@ -0,0 +1,74 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +type listUnstructured []interface{} + +func (l listUnstructured) Length() int { + return len(l) +} + +func (l listUnstructured) At(i int) Value { + return NewValueInterface(l[i]) +} + +func (l listUnstructured) AtUsing(a Allocator, i int) Value { + return a.allocValueUnstructured().reuse(l[i]) +} + +func (l listUnstructured) Equals(other List) bool { + return l.EqualsUsing(HeapAllocator, other) +} + +func (l listUnstructured) EqualsUsing(a Allocator, other List) bool { + return ListEqualsUsing(a, &l, other) +} + +func (l listUnstructured) Range() ListRange { + return l.RangeUsing(HeapAllocator) +} + +func (l listUnstructured) RangeUsing(a Allocator) ListRange { + if len(l) == 0 { + return EmptyRange + } + r := a.allocListUnstructuredRange() + r.list = l + r.i = -1 + return r +} + +type listUnstructuredRange struct { + list listUnstructured + vv *valueUnstructured + i int +} + +func (r *listUnstructuredRange) Next() bool { + r.i += 1 + return r.i < len(r.list) +} + +func (r *listUnstructuredRange) Item() (index int, value Value) { + if r.i < 0 { + panic("Item() called before first calling Next()") + } + if r.i >= len(r.list) { + panic("Item() called on ListRange with no more items") + } + return r.i, r.vv.reuse(r.list[r.i]) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go new file mode 100644 index 0000000..168b9fa --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go @@ -0,0 +1,270 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "sort" +) + +// Map represents a Map or go structure. +type Map interface { + // Set changes or set the value of the given key. + Set(key string, val Value) + // Get returns the value for the given key, if present, or (nil, false) otherwise. + Get(key string) (Value, bool) + // GetUsing uses the provided allocator and returns the value for the given key, + // if present, or (nil, false) otherwise. + // The returned Value should be given back to the Allocator when no longer needed + // by calling Allocator.Free(Value). + GetUsing(a Allocator, key string) (Value, bool) + // Has returns true if the key is present, or false otherwise. + Has(key string) bool + // Delete removes the key from the map. + Delete(key string) + // Equals compares the two maps, and return true if they are the same, false otherwise. + // Implementations can use MapEquals as a general implementation for this methods. + Equals(other Map) bool + // EqualsUsing uses the provided allocator and compares the two maps, and return true if + // they are the same, false otherwise. Implementations can use MapEqualsUsing as a general + // implementation for this methods. + EqualsUsing(a Allocator, other Map) bool + // Iterate runs the given function for each key/value in the + // map. Returning false in the closure prematurely stops the + // iteration. + Iterate(func(key string, value Value) bool) bool + // IterateUsing uses the provided allocator and runs the given function for each key/value + // in the map. Returning false in the closure prematurely stops the iteration. + IterateUsing(Allocator, func(key string, value Value) bool) bool + // Length returns the number of items in the map. + Length() int + // Empty returns true if the map is empty. + Empty() bool + // Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called + // with the values from both maps, otherwise it is called with the value of the map that contains the key and nil + // for the map that does not contain the key. Returning false in the closure prematurely stops the iteration. + Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool + // ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps + // contain a value for a given key, fn is called with the values from both maps, otherwise it is called with + // the value of the map that contains the key and nil for the map that does not contain the key. Returning + // false in the closure prematurely stops the iteration. + ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool +} + +// MapTraverseOrder defines the map traversal ordering available. +type MapTraverseOrder int + +const ( + // Unordered indicates that the map traversal has no ordering requirement. + Unordered = iota + // LexicalKeyOrder indicates that the map traversal is ordered by key, lexically. + LexicalKeyOrder +) + +// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called +// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil +// for the other map. Returning false in the closure prematurely stops the iteration. +func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return MapZipUsing(HeapAllocator, lhs, rhs, order, fn) +} + +// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps +// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with +// the value of the map that contains the key and nil for the other map. Returning false in the closure +// prematurely stops the iteration. +func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if lhs != nil { + return lhs.ZipUsing(a, rhs, order, fn) + } + if rhs != nil { + return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped + return fn(key, lhs, rhs) + }) + } + return true +} + +// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide +// their own optimized implementation. +func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + switch order { + case Unordered: + return unorderedMapZip(a, lhs, rhs, fn) + case LexicalKeyOrder: + return lexicalKeyOrderedMapZip(a, lhs, rhs, fn) + default: + panic("Unsupported map order") + } +} + +func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) { + return true + } + + if lhs != nil { + ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool { + var rhsValue Value + if rhs != nil { + if item, ok := rhs.GetUsing(a, key); ok { + rhsValue = item + defer a.Free(rhsValue) + } + } + return fn(key, lhsValue, rhsValue) + }) + if !ok { + return false + } + } + if rhs != nil { + return rhs.IterateUsing(a, func(key string, rhsValue Value) bool { + if lhs == nil || !lhs.Has(key) { + return fn(key, nil, rhsValue) + } + return true + }) + } + return true +} + +func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { + var lhsLength, rhsLength int + var orderedLength int // rough estimate of length of union of map keys + if lhs != nil { + lhsLength = lhs.Length() + orderedLength = lhsLength + } + if rhs != nil { + rhsLength = rhs.Length() + if rhsLength > orderedLength { + orderedLength = rhsLength + } + } + if lhsLength == 0 && rhsLength == 0 { + return true + } + + ordered := make([]string, 0, orderedLength) + if lhs != nil { + lhs.IterateUsing(a, func(key string, _ Value) bool { + ordered = append(ordered, key) + return true + }) + } + if rhs != nil { + rhs.IterateUsing(a, func(key string, _ Value) bool { + if lhs == nil || !lhs.Has(key) { + ordered = append(ordered, key) + } + return true + }) + } + sort.Strings(ordered) + for _, key := range ordered { + var litem, ritem Value + if lhs != nil { + litem, _ = lhs.GetUsing(a, key) + } + if rhs != nil { + ritem, _ = rhs.GetUsing(a, key) + } + ok := fn(key, litem, ritem) + if litem != nil { + a.Free(litem) + } + if ritem != nil { + a.Free(ritem) + } + if !ok { + return false + } + } + return true +} + +// MapLess compares two maps lexically. +func MapLess(lhs, rhs Map) bool { + return MapCompare(lhs, rhs) == -1 +} + +// MapCompare compares two maps lexically. +func MapCompare(lhs, rhs Map) int { + return MapCompareUsing(HeapAllocator, lhs, rhs) +} + +// MapCompareUsing uses the provided allocator and compares two maps lexically. +func MapCompareUsing(a Allocator, lhs, rhs Map) int { + c := 0 + var llength, rlength int + if lhs != nil { + llength = lhs.Length() + } + if rhs != nil { + rlength = rhs.Length() + } + if llength == 0 && rlength == 0 { + return 0 + } + i := 0 + MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool { + switch { + case i == llength: + c = -1 + case i == rlength: + c = 1 + case lhs == nil: + c = 1 + case rhs == nil: + c = -1 + default: + c = CompareUsing(a, lhs, rhs) + } + i++ + return c == 0 + }) + return c +} + +// MapEquals returns true if lhs == rhs, false otherwise. This function +// acts on generic types and should not be used by callers, but can help +// implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. +func MapEquals(lhs, rhs Map) bool { + return MapEqualsUsing(HeapAllocator, lhs, rhs) +} + +// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs, +// false otherwise. This function acts on generic types and should not be used +// by callers, but can help implement Map.Equals. +// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. +func MapEqualsUsing(a Allocator, lhs, rhs Map) bool { + if lhs == nil && rhs == nil { + return true + } + if lhs == nil || rhs == nil { + return false + } + if lhs.Length() != rhs.Length() { + return false + } + return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool { + if lhs == nil || rhs == nil { + return false + } + return EqualsUsing(a, lhs, rhs) + }) +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go new file mode 100644 index 0000000..dc8b8c7 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go @@ -0,0 +1,209 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "reflect" +) + +type mapReflect struct { + valueReflect +} + +func (r mapReflect) Length() int { + val := r.Value + return val.Len() +} + +func (r mapReflect) Empty() bool { + val := r.Value + return val.Len() == 0 +} + +func (r mapReflect) Get(key string) (Value, bool) { + return r.GetUsing(HeapAllocator, key) +} + +func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) { + k, v, ok := r.get(key) + if !ok { + return nil, false + } + return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true +} + +func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) { + mapKey := r.toMapKey(k) + val := r.Value.MapIndex(mapKey) + return mapKey, val, val.IsValid() && val != reflect.Value{} +} + +func (r mapReflect) Has(key string) bool { + var val reflect.Value + val = r.Value.MapIndex(r.toMapKey(key)) + if !val.IsValid() { + return false + } + return val != reflect.Value{} +} + +func (r mapReflect) Set(key string, val Value) { + r.Value.SetMapIndex(r.toMapKey(key), reflect.ValueOf(val.Unstructured())) +} + +func (r mapReflect) Delete(key string) { + val := r.Value + val.SetMapIndex(r.toMapKey(key), reflect.Value{}) +} + +// TODO: Do we need to support types that implement json.Marshaler and are used as string keys? +func (r mapReflect) toMapKey(key string) reflect.Value { + val := r.Value + return reflect.ValueOf(key).Convert(val.Type().Key()) +} + +func (r mapReflect) Iterate(fn func(string, Value) bool) bool { + return r.IterateUsing(HeapAllocator, fn) +} + +func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + if r.Value.Len() == 0 { + return true + } + v := a.allocValueReflect() + defer a.Free(v) + return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool { + return fn(key.String(), v.mustReuse(value, e, &r.Value, &key)) + }) +} + +func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool { + iter := val.MapRange() + entry := TypeReflectEntryOf(val.Type().Elem()) + for iter.Next() { + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(entry, iter.Key(), next) { + return false + } + } + return true +} + +func (r mapReflect) Unstructured() interface{} { + result := make(map[string]interface{}, r.Length()) + r.Iterate(func(s string, value Value) bool { + result[s] = value.Unstructured() + return true + }) + return result +} + +func (r mapReflect) Equals(m Map) bool { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { + lhsLength := r.Length() + rhsLength := m.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vr := a.allocValueReflect() + defer a.Free(vr) + entry := TypeReflectEntryOf(r.Value.Type().Elem()) + return m.Iterate(func(key string, value Value) bool { + _, lhsVal, ok := r.get(key) + if !ok { + return false + } + return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) + }) +} + +func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered { + return r.unorderedReflectZip(a, otherMapReflect, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// unorderedReflectZip provides an optimized unordered zip for mapReflect types. +func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool { + if r.Empty() && (other == nil || other.Empty()) { + return true + } + + lhs := r.Value + lhsEntry := TypeReflectEntryOf(lhs.Type().Elem()) + + // map lookup via reflection is expensive enough that it is better to keep track of visited keys + visited := map[string]struct{}{} + + vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(vlhs) + defer a.Free(vrhs) + + if other != nil { + rhs := other.Value + rhsEntry := TypeReflectEntryOf(rhs.Type().Elem()) + iter := rhs.MapRange() + + for iter.Next() { + key := iter.Key() + keyString := key.String() + next := iter.Value() + if !next.IsValid() { + continue + } + rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key) + visited[keyString] = struct{}{} + var lhsVal Value + if _, v, ok := r.get(keyString); ok { + lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key) + } + if !fn(keyString, lhsVal, rhsVal) { + return false + } + } + } + + iter := lhs.MapRange() + for iter.Next() { + key := iter.Key() + if _, ok := visited[key.String()]; ok { + continue + } + next := iter.Value() + if !next.IsValid() { + continue + } + if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go new file mode 100644 index 0000000..d8e2086 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go @@ -0,0 +1,190 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +type mapUnstructuredInterface map[interface{}]interface{} + +func (m mapUnstructuredInterface) Set(key string, val Value) { + m[key] = val.Unstructured() +} + +func (m mapUnstructuredInterface) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} + +func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) { + if v, ok := m[key]; !ok { + return nil, false + } else { + return a.allocValueUnstructured().reuse(v), true + } +} + +func (m mapUnstructuredInterface) Has(key string) bool { + _, ok := m[key] + return ok +} + +func (m mapUnstructuredInterface) Delete(key string) { + delete(m, key) +} + +func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + for k, v := range m { + if ks, ok := k.(string); !ok { + continue + } else { + if !fn(ks, vv.reuse(v)) { + return false + } + } + } + return true +} + +func (m mapUnstructuredInterface) Length() int { + return len(m) +} + +func (m mapUnstructuredInterface) Empty() bool { + return len(m) == 0 +} + +func (m mapUnstructuredInterface) Equals(other Map) bool { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.Iterate(func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return Equals(vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +type mapUnstructuredString map[string]interface{} + +func (m mapUnstructuredString) Set(key string, val Value) { + m[key] = val.Unstructured() +} + +func (m mapUnstructuredString) Get(key string) (Value, bool) { + return m.GetUsing(HeapAllocator, key) +} +func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) { + if v, ok := m[key]; !ok { + return nil, false + } else { + return a.allocValueUnstructured().reuse(v), true + } +} + +func (m mapUnstructuredString) Has(key string) bool { + _, ok := m[key] + return ok +} + +func (m mapUnstructuredString) Delete(key string) { + delete(m, key) +} + +func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool { + return m.IterateUsing(HeapAllocator, fn) +} + +func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { + if len(m) == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + for k, v := range m { + if !fn(k, vv.reuse(v)) { + return false + } + } + return true +} + +func (m mapUnstructuredString) Length() int { + return len(m) +} + +func (m mapUnstructuredString) Equals(other Map) bool { + return m.EqualsUsing(HeapAllocator, other) +} + +func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { + lhsLength := m.Length() + rhsLength := other.Length() + if lhsLength != rhsLength { + return false + } + if lhsLength == 0 { + return true + } + vv := a.allocValueUnstructured() + defer a.Free(vv) + return other.Iterate(func(key string, value Value) bool { + lhsVal, ok := m[key] + if !ok { + return false + } + return Equals(vv.reuse(lhsVal), value) + }) +} + +func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return m.ZipUsing(HeapAllocator, other, order, fn) +} + +func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return defaultMapZip(a, m, other, order, fn) +} + +func (m mapUnstructuredString) Empty() bool { + return len(m) == 0 +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go new file mode 100644 index 0000000..49e6dd1 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go @@ -0,0 +1,463 @@ +/* +Copyright 2020 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "encoding/json" + "fmt" + "reflect" + "sort" + "sync" + "sync/atomic" +) + +// UnstructuredConverter defines how a type can be converted directly to unstructured. +// Types that implement json.Marshaler may also optionally implement this interface to provide a more +// direct and more efficient conversion. All types that choose to implement this interface must still +// implement this same conversion via json.Marshaler. +type UnstructuredConverter interface { + json.Marshaler // require that json.Marshaler is implemented + + // ToUnstructured returns the unstructured representation. + ToUnstructured() interface{} +} + +// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured. +type TypeReflectCacheEntry struct { + isJsonMarshaler bool + ptrIsJsonMarshaler bool + isJsonUnmarshaler bool + ptrIsJsonUnmarshaler bool + isStringConvertable bool + ptrIsStringConvertable bool + + structFields map[string]*FieldCacheEntry + orderedStructFields []*FieldCacheEntry +} + +// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from +// unstructured. +type FieldCacheEntry struct { + // JsonName returns the name of the field according to the json tags on the struct field. + JsonName string + // isOmitEmpty is true if the field has the json 'omitempty' tag. + isOmitEmpty bool + // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of + // a field in a reflect.Value struct. The field indices in the list form a path used + // to traverse through intermediary 'inline' fields. + fieldPath [][]int + + fieldType reflect.Type + TypeEntry *TypeReflectCacheEntry +} + +func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { + return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) +} + +// GetUsing returns the field identified by this FieldCacheEntry from the provided struct. +func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { + // field might be nested within 'inline' structs + for _, elem := range f.fieldPath { + structVal = structVal.FieldByIndex(elem) + } + return structVal +} + +var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() +var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() +var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem() +var defaultReflectCache = newReflectCache() + +// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type. +func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry { + cm := defaultReflectCache.get() + if record, ok := cm[t]; ok { + return record + } + updates := reflectCacheMap{} + result := typeReflectEntryOf(cm, t, updates) + if len(updates) > 0 { + defaultReflectCache.update(updates) + } + return result +} + +// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively +// depend on, to the cache. +func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry { + if record, ok := cm[t]; ok { + return record + } + if record, ok := updates[t]; ok { + return record + } + typeEntry := &TypeReflectCacheEntry{ + isJsonMarshaler: t.Implements(marshalerType), + ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType), + isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType), + isStringConvertable: t.Implements(unstructuredConvertableType), + ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType), + } + if t.Kind() == reflect.Struct { + fieldEntries := map[string]*FieldCacheEntry{} + buildStructCacheEntry(t, fieldEntries, nil) + typeEntry.structFields = fieldEntries + sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries)) + i := 0 + for _, entry := range fieldEntries { + sortedByJsonName[i] = entry + i++ + } + sort.Slice(sortedByJsonName, func(i, j int) bool { + return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName + }) + typeEntry.orderedStructFields = sortedByJsonName + } + + // cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving + // the field.typeEntry references, or creating them if they are not already in the cache + updates[t] = typeEntry + + for _, field := range typeEntry.structFields { + if field.TypeEntry == nil { + field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates) + } + } + return typeEntry +} + +func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) { + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + jsonName, omit, isInline, isOmitempty := lookupJsonTags(field) + if omit { + continue + } + if isInline { + buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) + continue + } + info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} + infos[jsonName] = info + } +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry { + return e.structFields +} + +// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. +func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry { + return e.orderedStructFields +} + +// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured. +func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool { + return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable +} + +// ToUnstructured converts the provided value to unstructured and returns it. +func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) { + // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 + // and is intended to replace it. + + // Check if the object has a custom string converter and use it if available, since it is much more efficient + // than round tripping through json. + if converter, ok := e.getUnstructuredConverter(sv); ok { + return converter.ToUnstructured(), nil + } + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := e.getJsonMarshaler(sv); ok { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil, nil + } + + data, err := marshaler.MarshalJSON() + if err != nil { + return nil, err + } + switch { + case len(data) == 0: + return nil, fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + return nil, nil + + case bytes.Equal(data, trueBytes): + return true, nil + + case bytes.Equal(data, falseBytes): + return false, nil + + case data[0] == '"': + var result string + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding string from json: %v", err) + } + return result, nil + + case data[0] == '{': + result := make(map[string]interface{}) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding object from json: %v", err) + } + return result, nil + + case data[0] == '[': + result := make([]interface{}, 0) + err := unmarshal(data, &result) + if err != nil { + return nil, fmt.Errorf("error decoding array from json: %v", err) + } + return result, nil + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = unmarshal(data, &resultInt); err == nil { + return resultInt, nil + } else if err = unmarshal(data, &resultFloat); err == nil { + return resultFloat, nil + } else { + return nil, fmt.Errorf("error decoding number from json: %v", err) + } + } + } + + return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type()) +} + +// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured. +func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool { + return e.isJsonUnmarshaler +} + +// FromUnstructured converts the provided source value from unstructured into the provided destination value. +func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error { + // TODO: this could be made much more efficient using direct conversions like + // UnstructuredConverter.ToUnstructured provides. + st := dv.Type() + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok { + return unmarshaler.UnmarshalJSON(data) + } + return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type()) +} + +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) { + if e.isJsonMarshaler { + return v.Interface().(json.Marshaler), true + } + if e.ptrIsJsonMarshaler { + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + return v.Interface().(json.Marshaler), true + } + } + return nil, false +} + +func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) { + if !e.isJsonUnmarshaler { + return nil, false + } + return v.Addr().Interface().(json.Unmarshaler), true +} + +func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) { + if e.isStringConvertable { + return v.Interface().(UnstructuredConverter), true + } + if e.ptrIsStringConvertable { + // Check pointer receivers if v is not a pointer + if v.CanAddr() { + v = v.Addr() + return v.Interface().(UnstructuredConverter), true + } + } + return nil, false +} + +type typeReflectCache struct { + // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any + // go program using this cache + value atomic.Value + // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a + // read-lock since the atomic value is always read-only + mu sync.Mutex +} + +func newReflectCache() *typeReflectCache { + cache := &typeReflectCache{} + cache.value.Store(make(reflectCacheMap)) + return cache +} + +type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry + +// get returns the reflectCacheMap. +func (c *typeReflectCache) get() reflectCacheMap { + return c.value.Load().(reflectCacheMap) +} + +// update merges the provided updates into the cache. +func (c *typeReflectCache) update(updates reflectCacheMap) { + c.mu.Lock() + defer c.mu.Unlock() + + currentCacheMap := c.value.Load().(reflectCacheMap) + + hasNewEntries := false + for t := range updates { + if _, ok := currentCacheMap[t]; !ok { + hasNewEntries = true + break + } + } + if !hasNewEntries { + // Bail if the updates have been set while waiting for lock acquisition. + // This is safe since setting entries is idempotent. + return + } + + newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates)) + for k, v := range currentCacheMap { + newCacheMap[k] = v + } + for t, update := range updates { + newCacheMap[t] = update + } + c.value.Store(newCacheMap) +} + +// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json +// to handle number conversions as expected by Kubernetes + +// limit recursive depth to prevent stack overflow errors +const maxDepth = 10000 + +// unmarshal unmarshals the given data +// If v is a *map[string]interface{}, numbers are converted to int64 or float64 +func unmarshal(data []byte, v interface{}) error { + switch v := v.(type) { + case *map[string]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertMapNumbers(*v, 0) + + case *[]interface{}: + // Build a decoder from the given data + decoder := json.NewDecoder(bytes.NewBuffer(data)) + // Preserve numbers, rather than casting to float64 automatically + decoder.UseNumber() + // Run the decode + if err := decoder.Decode(v); err != nil { + return err + } + // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 + return convertSliceNumbers(*v, 0) + + default: + return json.Unmarshal(data, v) + } +} + +// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertMapNumbers(m map[string]interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for k, v := range m { + switch v := v.(type) { + case json.Number: + m[k], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. +// values which are map[string]interface{} or []interface{} are recursively visited +func convertSliceNumbers(s []interface{}, depth int) error { + if depth > maxDepth { + return fmt.Errorf("exceeded max depth of %d", maxDepth) + } + + var err error + for i, v := range s { + switch v := v.(type) { + case json.Number: + s[i], err = convertNumber(v) + case map[string]interface{}: + err = convertMapNumbers(v, depth+1) + case []interface{}: + err = convertSliceNumbers(v, depth+1) + } + if err != nil { + return err + } + } + return nil +} + +// convertNumber converts a json.Number to an int64 or float64, or returns an error +func convertNumber(n json.Number) (interface{}, error) { + // Attempt to convert to an int64 first + if i, err := n.Int64(); err == nil { + return i, nil + } + // Return a float64 (default json.Decode() behavior) + // An overflow will return an error + return n.Float64() +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go new file mode 100644 index 0000000..c78a4c1 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go @@ -0,0 +1,50 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +// Compare compares floats. The result will be 0 if lhs==rhs, -1 if f < +// rhs, and +1 if f > rhs. +func FloatCompare(lhs, rhs float64) int { + if lhs > rhs { + return 1 + } else if lhs < rhs { + return -1 + } + return 0 +} + +// IntCompare compares integers. The result will be 0 if i==rhs, -1 if i < +// rhs, and +1 if i > rhs. +func IntCompare(lhs, rhs int64) int { + if lhs > rhs { + return 1 + } else if lhs < rhs { + return -1 + } + return 0 +} + +// Compare compares booleans. The result will be 0 if b==rhs, -1 if b < +// rhs, and +1 if b > rhs. +func BoolCompare(lhs, rhs bool) int { + if lhs == rhs { + return 0 + } else if lhs == false { + return -1 + } + return 1 +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go new file mode 100644 index 0000000..4a7bb5c --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go @@ -0,0 +1,208 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" + "reflect" +) + +type structReflect struct { + valueReflect +} + +func (r structReflect) Length() int { + i := 0 + eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + i++ + return true + }) + return i +} + +func (r structReflect) Empty() bool { + return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return false // exit early if the struct is non-empty + }) +} + +func (r structReflect) Get(key string) (Value, bool) { + return r.GetUsing(HeapAllocator, key) +} + +func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) { + if val, ok := r.findJsonNameField(key); ok { + return a.allocValueReflect().mustReuse(val, nil, nil, nil), true + } + return nil, false +} + +func (r structReflect) Has(key string) bool { + _, ok := r.findJsonNameField(key) + return ok +} + +func (r structReflect) Set(key string, val Value) { + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] + if !ok { + panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface())) + } + oldVal := fieldEntry.GetFrom(r.Value) + newVal := reflect.ValueOf(val.Unstructured()) + r.update(fieldEntry, key, oldVal, newVal) +} + +func (r structReflect) Delete(key string) { + fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] + if !ok { + panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface())) + } + oldVal := fieldEntry.GetFrom(r.Value) + if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty { + panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface())) + } + r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type())) +} + +func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) { + if oldVal.CanSet() { + oldVal.Set(newVal) + return + } + + // map items are not addressable, so if a struct is contained in a map, the only way to modify it is + // to write a replacement fieldEntry into the map. + if r.ParentMap != nil { + if r.ParentMapKey == nil { + panic("ParentMapKey must not be nil if ParentMap is not nil") + } + replacement := reflect.New(r.Value.Type()).Elem() + fieldEntry.GetFrom(replacement).Set(newVal) + r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement) + return + } + + // This should never happen since NewValueReflect ensures that the root object reflected on is a pointer and map + // item replacement is handled above. + panic(fmt.Sprintf("key %s may not be modified on struct: %T: struct is not settable", key, r.Value.Interface())) +} + +func (r structReflect) Iterate(fn func(string, Value) bool) bool { + return r.IterateUsing(HeapAllocator, fn) +} + +func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { + vr := a.allocValueReflect() + defer a.Free(vr) + return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool { + return fn(s, vr.mustReuse(value, e, nil, nil)) + }) +} + +func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool { + for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() { + fieldVal := fieldCacheEntry.GetFrom(structVal) + if fieldCacheEntry.CanOmit(fieldVal) { + // omit it + continue + } + ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal) + if !ok { + return false + } + } + return true +} + +func (r structReflect) Unstructured() interface{} { + // Use number of struct fields as a cheap way to rough estimate map size + result := make(map[string]interface{}, r.Value.NumField()) + r.Iterate(func(s string, value Value) bool { + result[s] = value.Unstructured() + return true + }) + return result +} + +func (r structReflect) Equals(m Map) bool { + return r.EqualsUsing(HeapAllocator, m) +} + +func (r structReflect) EqualsUsing(a Allocator, m Map) bool { + // MapEquals uses zip and is fairly efficient for structReflect + return MapEqualsUsing(a, &r, m) +} + +func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] + if !ok { + return reflect.Value{}, false + } + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) { + structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] + if !ok { + return reflect.Value{}, false + } + fieldVal := structCacheEntry.GetFrom(r.Value) + return fieldVal, !structCacheEntry.CanOmit(fieldVal) +} + +func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + return r.ZipUsing(HeapAllocator, other, order, fn) +} + +func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { + if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() { + lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect() + defer a.Free(lhsvr) + defer a.Free(rhsvr) + return r.structZip(otherStruct, lhsvr, rhsvr, fn) + } + return defaultMapZip(a, &r, other, order, fn) +} + +// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is +// no additional cost to ordering the zip for structured types. +func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool { + lhsVal := r.Value + rhsVal := other.Value + + for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() { + lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal) + rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal) + lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal) + rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal) + if lhsOmit && rhsOmit { + continue + } + var lhsVal, rhsVal Value + if !lhsOmit { + lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !rhsOmit { + rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) + } + if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) { + return false + } + } + return true +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go new file mode 100644 index 0000000..ea79e3a --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go @@ -0,0 +1,347 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "bytes" + "fmt" + "io" + "strings" + + jsoniter "github.com/json-iterator/go" + "gopkg.in/yaml.v2" +) + +var ( + readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() + writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() +) + +// A Value corresponds to an 'atom' in the schema. It should return true +// for at least one of the IsXXX methods below, or the value is +// considered "invalid" +type Value interface { + // IsMap returns true if the Value is a Map, false otherwise. + IsMap() bool + // IsList returns true if the Value is a List, false otherwise. + IsList() bool + // IsBool returns true if the Value is a bool, false otherwise. + IsBool() bool + // IsInt returns true if the Value is a int64, false otherwise. + IsInt() bool + // IsFloat returns true if the Value is a float64, false + // otherwise. + IsFloat() bool + // IsString returns true if the Value is a string, false + // otherwise. + IsString() bool + // IsMap returns true if the Value is null, false otherwise. + IsNull() bool + + // AsMap converts the Value into a Map (or panic if the type + // doesn't allow it). + AsMap() Map + // AsMapUsing uses the provided allocator and converts the Value + // into a Map (or panic if the type doesn't allow it). + AsMapUsing(Allocator) Map + // AsList converts the Value into a List (or panic if the type + // doesn't allow it). + AsList() List + // AsListUsing uses the provided allocator and converts the Value + // into a List (or panic if the type doesn't allow it). + AsListUsing(Allocator) List + // AsBool converts the Value into a bool (or panic if the type + // doesn't allow it). + AsBool() bool + // AsInt converts the Value into an int64 (or panic if the type + // doesn't allow it). + AsInt() int64 + // AsFloat converts the Value into a float64 (or panic if the type + // doesn't allow it). + AsFloat() float64 + // AsString converts the Value into a string (or panic if the type + // doesn't allow it). + AsString() string + + // Unstructured converts the Value into an Unstructured interface{}. + Unstructured() interface{} +} + +// FromJSON is a helper function for reading a JSON document. +func FromJSON(input []byte) (Value, error) { + return FromJSONFast(input) +} + +// FromJSONFast is a helper function for reading a JSON document. +func FromJSONFast(input []byte) (Value, error) { + iter := readPool.BorrowIterator(input) + defer readPool.ReturnIterator(iter) + return ReadJSONIter(iter) +} + +// ToJSON is a helper function for producing a JSon document. +func ToJSON(v Value) ([]byte, error) { + buf := bytes.Buffer{} + stream := writePool.BorrowStream(&buf) + defer writePool.ReturnStream(stream) + WriteJSONStream(v, stream) + b := stream.Buffer() + err := stream.Flush() + // Help jsoniter manage its buffers--without this, the next + // use of the stream is likely to require an allocation. Look + // at the jsoniter stream code to understand why. They were probably + // optimizing for folks using the buffer directly. + stream.SetBuffer(b[:0]) + return buf.Bytes(), err +} + +// ReadJSONIter reads a Value from a JSON iterator. +func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) { + v := iter.Read() + if iter.Error != nil && iter.Error != io.EOF { + return nil, iter.Error + } + return NewValueInterface(v), nil +} + +// WriteJSONStream writes a value into a JSON stream. +func WriteJSONStream(v Value, stream *jsoniter.Stream) { + stream.WriteVal(v.Unstructured()) +} + +// ToYAML marshals a value as YAML. +func ToYAML(v Value) ([]byte, error) { + return yaml.Marshal(v.Unstructured()) +} + +// Equals returns true iff the two values are equal. +func Equals(lhs, rhs Value) bool { + return EqualsUsing(HeapAllocator, lhs, rhs) +} + +// EqualsUsing uses the provided allocator and returns true iff the two values are equal. +func EqualsUsing(a Allocator, lhs, rhs Value) bool { + if lhs.IsFloat() || rhs.IsFloat() { + var lf float64 + if lhs.IsFloat() { + lf = lhs.AsFloat() + } else if lhs.IsInt() { + lf = float64(lhs.AsInt()) + } else { + return false + } + var rf float64 + if rhs.IsFloat() { + rf = rhs.AsFloat() + } else if rhs.IsInt() { + rf = float64(rhs.AsInt()) + } else { + return false + } + return lf == rf + } + if lhs.IsInt() { + if rhs.IsInt() { + return lhs.AsInt() == rhs.AsInt() + } + return false + } else if rhs.IsInt() { + return false + } + if lhs.IsString() { + if rhs.IsString() { + return lhs.AsString() == rhs.AsString() + } + return false + } else if rhs.IsString() { + return false + } + if lhs.IsBool() { + if rhs.IsBool() { + return lhs.AsBool() == rhs.AsBool() + } + return false + } else if rhs.IsBool() { + return false + } + if lhs.IsList() { + if rhs.IsList() { + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) + } + return false + } else if rhs.IsList() { + return false + } + if lhs.IsMap() { + if rhs.IsMap() { + lhsList := lhs.AsMapUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsMapUsing(a) + defer a.Free(rhsList) + return lhsList.EqualsUsing(a, rhsList) + } + return false + } else if rhs.IsMap() { + return false + } + if lhs.IsNull() { + if rhs.IsNull() { + return true + } + return false + } else if rhs.IsNull() { + return false + } + // No field is set, on either objects. + return true +} + +// ToString returns a human-readable representation of the value. +func ToString(v Value) string { + if v.IsNull() { + return "null" + } + switch { + case v.IsFloat(): + return fmt.Sprintf("%v", v.AsFloat()) + case v.IsInt(): + return fmt.Sprintf("%v", v.AsInt()) + case v.IsString(): + return fmt.Sprintf("%q", v.AsString()) + case v.IsBool(): + return fmt.Sprintf("%v", v.AsBool()) + case v.IsList(): + strs := []string{} + list := v.AsList() + for i := 0; i < list.Length(); i++ { + strs = append(strs, ToString(list.At(i))) + } + return "[" + strings.Join(strs, ",") + "]" + case v.IsMap(): + strs := []string{} + v.AsMap().Iterate(func(k string, v Value) bool { + strs = append(strs, fmt.Sprintf("%v=%v", k, ToString(v))) + return true + }) + return strings.Join(strs, "") + } + // No field is set, on either objects. + return "{{undefined}}" +} + +// Less provides a total ordering for Value (so that they can be sorted, even +// if they are of different types). +func Less(lhs, rhs Value) bool { + return Compare(lhs, rhs) == -1 +} + +// Compare provides a total ordering for Value (so that they can be +// sorted, even if they are of different types). The result will be 0 if +// v==rhs, -1 if v < rhs, and +1 if v > rhs. +func Compare(lhs, rhs Value) int { + return CompareUsing(HeapAllocator, lhs, rhs) +} + +// CompareUsing uses the provided allocator and provides a total +// ordering for Value (so that they can be sorted, even if they +// are of different types). The result will be 0 if v==rhs, -1 +// if v < rhs, and +1 if v > rhs. +func CompareUsing(a Allocator, lhs, rhs Value) int { + if lhs.IsFloat() { + if !rhs.IsFloat() { + // Extra: compare floats and ints numerically. + if rhs.IsInt() { + return FloatCompare(lhs.AsFloat(), float64(rhs.AsInt())) + } + return -1 + } + return FloatCompare(lhs.AsFloat(), rhs.AsFloat()) + } else if rhs.IsFloat() { + // Extra: compare floats and ints numerically. + if lhs.IsInt() { + return FloatCompare(float64(lhs.AsInt()), rhs.AsFloat()) + } + return 1 + } + + if lhs.IsInt() { + if !rhs.IsInt() { + return -1 + } + return IntCompare(lhs.AsInt(), rhs.AsInt()) + } else if rhs.IsInt() { + return 1 + } + + if lhs.IsString() { + if !rhs.IsString() { + return -1 + } + return strings.Compare(lhs.AsString(), rhs.AsString()) + } else if rhs.IsString() { + return 1 + } + + if lhs.IsBool() { + if !rhs.IsBool() { + return -1 + } + return BoolCompare(lhs.AsBool(), rhs.AsBool()) + } else if rhs.IsBool() { + return 1 + } + + if lhs.IsList() { + if !rhs.IsList() { + return -1 + } + lhsList := lhs.AsListUsing(a) + defer a.Free(lhsList) + rhsList := rhs.AsListUsing(a) + defer a.Free(rhsList) + return ListCompareUsing(a, lhsList, rhsList) + } else if rhs.IsList() { + return 1 + } + if lhs.IsMap() { + if !rhs.IsMap() { + return -1 + } + lhsMap := lhs.AsMapUsing(a) + defer a.Free(lhsMap) + rhsMap := rhs.AsMapUsing(a) + defer a.Free(rhsMap) + return MapCompareUsing(a, lhsMap, rhsMap) + } else if rhs.IsMap() { + return 1 + } + if lhs.IsNull() { + if !rhs.IsNull() { + return -1 + } + return 0 + } else if rhs.IsNull() { + return 1 + } + + // Invalid Value-- nothing is set. + return 0 +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go new file mode 100644 index 0000000..05e70de --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go @@ -0,0 +1,294 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "encoding/base64" + "fmt" + "reflect" +) + +// NewValueReflect creates a Value backed by an "interface{}" type, +// typically an structured object in Kubernetes world that is uses reflection to expose. +// The provided "interface{}" value must be a pointer so that the value can be modified via reflection. +// The provided "interface{}" may contain structs and types that are converted to Values +// by the jsonMarshaler interface. +func NewValueReflect(value interface{}) (Value, error) { + if value == nil { + return NewValueInterface(nil), nil + } + v := reflect.ValueOf(value) + if v.Kind() != reflect.Ptr { + // The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible. + return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer") + } + return wrapValueReflect(v, nil, nil) +} + +// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap +// and parentMapKey must be provided so that the returned value may be set and deleted. +func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) { + val := HeapAllocator.allocValueReflect() + return val.reuse(value, nil, parentMap, parentMapKey) +} + +// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data +// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value { + v, err := wrapValueReflect(value, parentMap, parentMapKey) + if err != nil { + panic(err) + } + return v +} + +// the value interface doesn't care about the type for value.IsNull, so we can use a constant +var nilType = reflect.TypeOf(&struct{}{}) + +// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey +// must be provided so that the returned value may be set and deleted. +func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) { + if cacheEntry == nil { + cacheEntry = TypeReflectEntryOf(value.Type()) + } + if cacheEntry.CanConvertToUnstructured() { + u, err := cacheEntry.ToUnstructured(value) + if err != nil { + return nil, err + } + if u == nil { + value = reflect.Zero(nilType) + } else { + value = reflect.ValueOf(u) + } + } + r.Value = dereference(value) + r.ParentMap = parentMap + r.ParentMapKey = parentMapKey + r.kind = kind(r.Value) + return r, nil +} + +// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a +// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. +func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value { + v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey) + if err != nil { + panic(err) + } + return v +} + +func dereference(val reflect.Value) reflect.Value { + kind := val.Kind() + if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) { + return val.Elem() + } + return val +} + +type valueReflect struct { + ParentMap *reflect.Value + ParentMapKey *reflect.Value + Value reflect.Value + kind reflectType +} + +func (r valueReflect) IsMap() bool { + return r.kind == mapType || r.kind == structMapType +} + +func (r valueReflect) IsList() bool { + return r.kind == listType +} + +func (r valueReflect) IsBool() bool { + return r.kind == boolType +} + +func (r valueReflect) IsInt() bool { + return r.kind == intType || r.kind == uintType +} + +func (r valueReflect) IsFloat() bool { + return r.kind == floatType +} + +func (r valueReflect) IsString() bool { + return r.kind == stringType || r.kind == byteStringType +} + +func (r valueReflect) IsNull() bool { + return r.kind == nullType +} + +type reflectType = int + +const ( + mapType = iota + structMapType + listType + intType + uintType + floatType + stringType + byteStringType + boolType + nullType +) + +func kind(v reflect.Value) reflectType { + typ := v.Type() + rk := typ.Kind() + switch rk { + case reflect.Map: + if v.IsNil() { + return nullType + } + return mapType + case reflect.Struct: + return structMapType + case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: + return intType + case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8: + // Uint64 deliberately excluded, see valueUnstructured.Int. + return uintType + case reflect.Float64, reflect.Float32: + return floatType + case reflect.String: + return stringType + case reflect.Bool: + return boolType + case reflect.Slice: + if v.IsNil() { + return nullType + } + elemKind := typ.Elem().Kind() + if elemKind == reflect.Uint8 { + return byteStringType + } + return listType + case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface: + if v.IsNil() { + return nullType + } + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + default: + panic(fmt.Sprintf("unsupported type: %v", v.Type())) + } +} + +// TODO find a cleaner way to avoid panics from reflect.IsNil() +func safeIsNil(v reflect.Value) bool { + k := v.Kind() + switch k { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: + return v.IsNil() + } + return false +} + +func (r valueReflect) AsMap() Map { + return r.AsMapUsing(HeapAllocator) +} + +func (r valueReflect) AsMapUsing(a Allocator) Map { + switch r.kind { + case structMapType: + v := a.allocStructReflect() + v.valueReflect = r + return v + case mapType: + v := a.allocMapReflect() + v.valueReflect = r + return v + default: + panic("value is not a map or struct") + } +} + +func (r valueReflect) AsList() List { + return r.AsListUsing(HeapAllocator) +} + +func (r valueReflect) AsListUsing(a Allocator) List { + if r.IsList() { + v := a.allocListReflect() + v.Value = r.Value + return v + } + panic("value is not a list") +} + +func (r valueReflect) AsBool() bool { + if r.IsBool() { + return r.Value.Bool() + } + panic("value is not a bool") +} + +func (r valueReflect) AsInt() int64 { + if r.kind == intType { + return r.Value.Int() + } + if r.kind == uintType { + return int64(r.Value.Uint()) + } + + panic("value is not an int") +} + +func (r valueReflect) AsFloat() float64 { + if r.IsFloat() { + return r.Value.Float() + } + panic("value is not a float") +} + +func (r valueReflect) AsString() string { + switch r.kind { + case stringType: + return r.Value.String() + case byteStringType: + return base64.StdEncoding.EncodeToString(r.Value.Bytes()) + } + panic("value is not a string") +} + +func (r valueReflect) Unstructured() interface{} { + val := r.Value + switch { + case r.IsNull(): + return nil + case val.Kind() == reflect.Struct: + return structReflect{r}.Unstructured() + case val.Kind() == reflect.Map: + return mapReflect{valueReflect: r}.Unstructured() + case r.IsList(): + return listReflect{r.Value}.Unstructured() + case r.IsString(): + return r.AsString() + case r.IsInt(): + return r.AsInt() + case r.IsBool(): + return r.AsBool() + case r.IsFloat(): + return r.AsFloat() + default: + panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type())) + } +} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go new file mode 100644 index 0000000..ac5a926 --- /dev/null +++ b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go @@ -0,0 +1,178 @@ +/* +Copyright 2018 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package value + +import ( + "fmt" +) + +// NewValueInterface creates a Value backed by an "interface{}" type, +// typically an unstructured object in Kubernetes world. +// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types, +// string or boolean. Nested interface{} must also be one of these types. +func NewValueInterface(v interface{}) Value { + return Value(HeapAllocator.allocValueUnstructured().reuse(v)) +} + +type valueUnstructured struct { + Value interface{} +} + +// reuse replaces the value of the valueUnstructured. +func (vi *valueUnstructured) reuse(value interface{}) Value { + vi.Value = value + return vi +} + +func (v valueUnstructured) IsMap() bool { + if _, ok := v.Value.(map[string]interface{}); ok { + return true + } + if _, ok := v.Value.(map[interface{}]interface{}); ok { + return true + } + return false +} + +func (v valueUnstructured) AsMap() Map { + return v.AsMapUsing(HeapAllocator) +} + +func (v valueUnstructured) AsMapUsing(_ Allocator) Map { + if v.Value == nil { + panic("invalid nil") + } + switch t := v.Value.(type) { + case map[string]interface{}: + return mapUnstructuredString(t) + case map[interface{}]interface{}: + return mapUnstructuredInterface(t) + } + panic(fmt.Errorf("not a map: %#v", v)) +} + +func (v valueUnstructured) IsList() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.([]interface{}) + return ok +} + +func (v valueUnstructured) AsList() List { + return v.AsListUsing(HeapAllocator) +} + +func (v valueUnstructured) AsListUsing(_ Allocator) List { + return listUnstructured(v.Value.([]interface{})) +} + +func (v valueUnstructured) IsFloat() bool { + if v.Value == nil { + return false + } else if _, ok := v.Value.(float64); ok { + return true + } else if _, ok := v.Value.(float32); ok { + return true + } + return false +} + +func (v valueUnstructured) AsFloat() float64 { + if f, ok := v.Value.(float32); ok { + return float64(f) + } + return v.Value.(float64) +} + +func (v valueUnstructured) IsInt() bool { + if v.Value == nil { + return false + } else if _, ok := v.Value.(int); ok { + return true + } else if _, ok := v.Value.(int8); ok { + return true + } else if _, ok := v.Value.(int16); ok { + return true + } else if _, ok := v.Value.(int32); ok { + return true + } else if _, ok := v.Value.(int64); ok { + return true + } else if _, ok := v.Value.(uint); ok { + return true + } else if _, ok := v.Value.(uint8); ok { + return true + } else if _, ok := v.Value.(uint16); ok { + return true + } else if _, ok := v.Value.(uint32); ok { + return true + } + return false +} + +func (v valueUnstructured) AsInt() int64 { + if i, ok := v.Value.(int); ok { + return int64(i) + } else if i, ok := v.Value.(int8); ok { + return int64(i) + } else if i, ok := v.Value.(int16); ok { + return int64(i) + } else if i, ok := v.Value.(int32); ok { + return int64(i) + } else if i, ok := v.Value.(uint); ok { + return int64(i) + } else if i, ok := v.Value.(uint8); ok { + return int64(i) + } else if i, ok := v.Value.(uint16); ok { + return int64(i) + } else if i, ok := v.Value.(uint32); ok { + return int64(i) + } + return v.Value.(int64) +} + +func (v valueUnstructured) IsString() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.(string) + return ok +} + +func (v valueUnstructured) AsString() string { + return v.Value.(string) +} + +func (v valueUnstructured) IsBool() bool { + if v.Value == nil { + return false + } + _, ok := v.Value.(bool) + return ok +} + +func (v valueUnstructured) AsBool() bool { + return v.Value.(bool) +} + +func (v valueUnstructured) IsNull() bool { + return v.Value == nil +} + +func (v valueUnstructured) Unstructured() interface{} { + return v.Value +} diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/sigs.k8s.io/yaml/.gitignore new file mode 100644 index 0000000..e256a31 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/.gitignore @@ -0,0 +1,20 @@ +# OSX leaves these everywhere on SMB shares +._* + +# Eclipse files +.classpath +.project +.settings/** + +# Emacs save files +*~ + +# Vim-related files +[._]*.s[a-w][a-z] +[._]s[a-w][a-z] +*.un~ +Session.vim +.netrwhist + +# Go test binaries +*.test diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml new file mode 100644 index 0000000..d20e23e --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/.travis.yml @@ -0,0 +1,13 @@ +language: go +dist: xenial +go: + - 1.12.x + - 1.13.x +script: + - diff -u <(echo -n) <(gofmt -d *.go) + - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) + - GO111MODULE=on go vet . + - GO111MODULE=on go test -v -race ./... + - git diff --exit-code +install: + - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md new file mode 100644 index 0000000..de47115 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md @@ -0,0 +1,31 @@ +# Contributing Guidelines + +Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: + +_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ + +## Getting Started + +We have full documentation on how to get started contributing here: + + + +- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests +- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) +- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers + +## Mentorship + +- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! + + diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/sigs.k8s.io/yaml/LICENSE new file mode 100644 index 0000000..7805d36 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS new file mode 100644 index 0000000..325b40b --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/OWNERS @@ -0,0 +1,27 @@ +# See the OWNERS docs at https://go.k8s.io/owners + +approvers: +- dims +- lavalamp +- smarterclayton +- deads2k +- sttts +- liggitt +- caesarxuchao +reviewers: +- dims +- thockin +- lavalamp +- smarterclayton +- wojtek-t +- deads2k +- derekwaynecarr +- caesarxuchao +- mikedanese +- liggitt +- gmarek +- sttts +- ncdc +- tallclair +labels: +- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/sigs.k8s.io/yaml/README.md new file mode 100644 index 0000000..5a651d9 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/README.md @@ -0,0 +1,123 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml) + +kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml). + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get sigs.k8s.io/yaml +``` + +And import using: + +``` +import "sigs.k8s.io/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "sigs.k8s.io/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "sigs.k8s.io/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md new file mode 100644 index 0000000..6b64246 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/RELEASE.md @@ -0,0 +1,9 @@ +# Release Process + +The `yaml` Project is released on an as-needed basis. The process is as follows: + +1. An issue is proposing a new release with a changelog since the last release +1. All [OWNERS](OWNERS) must LGTM this release +1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` +1. The release issue is closed +1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS new file mode 100644 index 0000000..0648a8e --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS @@ -0,0 +1,17 @@ +# Defined below are the security contacts for this repo. +# +# They are the contact point for the Product Security Team to reach out +# to for triaging and handling of incoming issues. +# +# The below names agree to abide by the +# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) +# and will be removed and replaced if they violate that agreement. +# +# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE +# INSTRUCTIONS AT https://kubernetes.io/security/ + +cjcullen +jessfraz +liggitt +philips +tallclair diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md new file mode 100644 index 0000000..0d15c00 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/code-of-conduct.md @@ -0,0 +1,3 @@ +# Kubernetes Community Code of Conduct + +Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/sigs.k8s.io/yaml/fields.go new file mode 100644 index 0000000..235b7f2 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/fields.go @@ -0,0 +1,502 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod new file mode 100644 index 0000000..7224f34 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/go.mod @@ -0,0 +1,8 @@ +module sigs.k8s.io/yaml + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + gopkg.in/yaml.v2 v2.2.8 +) diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum new file mode 100644 index 0000000..76e4948 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/go.sum @@ -0,0 +1,9 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/sigs.k8s.io/yaml/yaml.go new file mode 100644 index 0000000..efbc535 --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/yaml.go @@ -0,0 +1,380 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshal marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, false, opts...) +} + +// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal +// into an object, optionally configuring the behavior of the JSON unmarshal. +func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { + return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) +} + +// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, +// optionally performing the unmarshalling strictly +func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { + vo := reflect.ValueOf(o) + unmarshalFn := yaml.Unmarshal + if strict { + unmarshalFn = yaml.UnmarshalStrict + } + j, err := yamlToJSON(y, &vo, unmarshalFn) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + +// JSONToYAML Converts JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yamlUnmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } +} + +// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice, +// without going through a byte representation. A nil or empty map[string]interface{} input is +// converted to an empty map, i.e. yaml.MapSlice(nil). +// +// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice. +// +// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml: +// - float64s are down-casted as far as possible without data-loss to int, int64, uint64. +// - int64s are down-casted to int if possible without data-loss. +// +// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case. +// +// string, bool and any other types are unchanged. +func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice { + if len(j) == 0 { + return nil + } + ret := make(yaml.MapSlice, 0, len(j)) + for k, v := range j { + ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)}) + } + return ret +} + +func jsonToYAMLValue(j interface{}) interface{} { + switch j := j.(type) { + case map[string]interface{}: + if j == nil { + return interface{}(nil) + } + return JSONObjectToYAMLObject(j) + case []interface{}: + if j == nil { + return interface{}(nil) + } + ret := make([]interface{}, len(j)) + for i := range j { + ret[i] = jsonToYAMLValue(j[i]) + } + return ret + case float64: + // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 + if i64 := int64(j); j == float64(i64) { + if i := int(i64); i64 == int64(i) { + return i + } + return i64 + } + if ui64 := uint64(j); j == float64(ui64) { + return ui64 + } + return j + case int64: + if i := int(j); j == int64(i) { + return i + } + return j + } + return j +} diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go new file mode 100644 index 0000000..ab3e06a --- /dev/null +++ b/vendor/sigs.k8s.io/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} From b88478ea534c127fec1a12075db604f2ce13ff31 Mon Sep 17 00:00:00 2001 From: Ryan Rose Date: Mon, 27 Jul 2020 08:47:50 -0400 Subject: [PATCH 3/6] fix versioniong for k8s api and client --- go.mod | 10 +- go.sum | 21 +- vendor/github.com/davecgh/go-spew/LICENSE | 15 - .../github.com/davecgh/go-spew/spew/bypass.go | 145 - .../davecgh/go-spew/spew/bypasssafe.go | 38 - .../github.com/davecgh/go-spew/spew/common.go | 341 - .../github.com/davecgh/go-spew/spew/config.go | 306 - vendor/github.com/davecgh/go-spew/spew/doc.go | 211 - .../github.com/davecgh/go-spew/spew/dump.go | 509 - .../github.com/davecgh/go-spew/spew/format.go | 419 - .../github.com/davecgh/go-spew/spew/spew.go | 148 - vendor/github.com/golang/glog/LICENSE | 191 + vendor/github.com/golang/glog/README | 44 + vendor/github.com/golang/glog/glog.go | 1180 + vendor/github.com/golang/glog/glog_file.go | 124 + vendor/github.com/google/btree/.travis.yml | 1 + .../utils => github.com/google/btree}/LICENSE | 0 vendor/github.com/google/btree/README.md | 12 + vendor/github.com/google/btree/btree.go | 890 + .../gregjones/httpcache/.travis.yml | 18 + .../gregjones/httpcache/LICENSE.txt | 7 + .../github.com/gregjones/httpcache/README.md | 29 + .../httpcache/diskcache/diskcache.go | 61 + .../gregjones/httpcache/httpcache.go | 551 + vendor/github.com/peterbourgon/diskv/LICENSE | 19 + .../github.com/peterbourgon/diskv/README.md | 141 + .../peterbourgon/diskv/compression.go | 64 + vendor/github.com/peterbourgon/diskv/diskv.go | 624 + vendor/github.com/peterbourgon/diskv/index.go | 115 + .../v1alpha1/doc.go | 12 +- .../v1alpha1/generated.pb.go | 1027 + .../v1alpha1/generated.proto | 107 + .../v1alpha1}/register.go | 16 +- .../admissionregistration/v1alpha1/types.go | 106 + .../v1alpha1/types_swagger_doc_generated.go | 71 + .../v1alpha1}/zz_generated.deepcopy.go | 93 +- .../api/admissionregistration/v1beta1/doc.go | 5 +- .../v1beta1/generated.pb.go | 2788 +- .../v1beta1/generated.proto | 318 +- .../admissionregistration/v1beta1/types.go | 287 +- .../v1beta1/types_swagger_doc_generated.go | 71 +- .../v1beta1/zz_generated.deepcopy.go | 180 +- vendor/k8s.io/api/apps/v1/doc.go | 1 - vendor/k8s.io/api/apps/v1/generated.pb.go | 3705 +- vendor/k8s.io/api/apps/v1/generated.proto | 21 +- vendor/k8s.io/api/apps/v1/types.go | 35 +- .../apps/v1/types_swagger_doc_generated.go | 22 +- .../api/apps/v1/zz_generated.deepcopy.go | 10 +- vendor/k8s.io/api/apps/v1beta1/doc.go | 1 - .../k8s.io/api/apps/v1beta1/generated.pb.go | 3072 +- .../k8s.io/api/apps/v1beta1/generated.proto | 13 +- vendor/k8s.io/api/apps/v1beta1/types.go | 25 +- .../v1beta1/types_swagger_doc_generated.go | 12 +- .../api/apps/v1beta1/zz_generated.deepcopy.go | 6 +- vendor/k8s.io/api/apps/v1beta2/doc.go | 1 - .../k8s.io/api/apps/v1beta2/generated.pb.go | 4124 +- .../k8s.io/api/apps/v1beta2/generated.proto | 29 +- vendor/k8s.io/api/apps/v1beta2/types.go | 41 +- .../v1beta2/types_swagger_doc_generated.go | 30 +- .../api/apps/v1beta2/zz_generated.deepcopy.go | 10 +- .../api/auditregistration/v1alpha1/doc.go | 23 - .../v1alpha1/generated.pb.go | 2030 - .../v1alpha1/generated.proto | 162 - .../auditregistration/v1alpha1/register.go | 56 - .../api/auditregistration/v1alpha1/types.go | 198 - .../v1alpha1/types_swagger_doc_generated.go | 111 - .../v1alpha1/zz_generated.deepcopy.go | 229 - vendor/k8s.io/api/authentication/v1/doc.go | 2 - .../api/authentication/v1/generated.pb.go | 1404 +- .../api/authentication/v1/generated.proto | 25 +- vendor/k8s.io/api/authentication/v1/types.go | 25 +- .../v1/types_swagger_doc_generated.go | 8 +- .../v1/zz_generated.deepcopy.go | 12 +- .../k8s.io/api/authentication/v1beta1/doc.go | 2 - .../authentication/v1beta1/generated.pb.go | 955 +- .../authentication/v1beta1/generated.proto | 20 - .../api/authentication/v1beta1/types.go | 20 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 12 +- vendor/k8s.io/api/authorization/v1/doc.go | 2 - .../api/authorization/v1/generated.pb.go | 1999 +- vendor/k8s.io/api/authorization/v1/types.go | 8 +- .../k8s.io/api/authorization/v1beta1/doc.go | 2 - .../api/authorization/v1beta1/generated.pb.go | 2003 +- .../k8s.io/api/authorization/v1beta1/types.go | 8 +- vendor/k8s.io/api/autoscaling/v1/doc.go | 1 - .../k8s.io/api/autoscaling/v1/generated.pb.go | 2542 +- .../k8s.io/api/autoscaling/v1/generated.proto | 18 +- vendor/k8s.io/api/autoscaling/v1/types.go | 24 +- .../v1/types_swagger_doc_generated.go | 22 +- .../autoscaling/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 1 - .../api/autoscaling/v2beta1/generated.pb.go | 2339 +- .../api/autoscaling/v2beta1/generated.proto | 13 +- .../k8s.io/api/autoscaling/v2beta1/types.go | 19 +- .../v2beta1/types_swagger_doc_generated.go | 16 +- .../v2beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 1 - .../api/autoscaling/v2beta2/generated.pb.go | 3478 +- .../api/autoscaling/v2beta2/generated.proto | 79 +- .../k8s.io/api/autoscaling/v2beta2/types.go | 105 +- .../v2beta2/types_swagger_doc_generated.go | 41 +- .../v2beta2/zz_generated.deepcopy.go | 80 +- vendor/k8s.io/api/batch/v1/doc.go | 1 - vendor/k8s.io/api/batch/v1/generated.pb.go | 932 +- vendor/k8s.io/api/batch/v1/generated.proto | 8 +- vendor/k8s.io/api/batch/v1/types.go | 8 +- .../batch/v1/types_swagger_doc_generated.go | 8 +- .../api/batch/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v1beta1/doc.go | 1 - .../k8s.io/api/batch/v1beta1/generated.pb.go | 886 +- .../k8s.io/api/batch/v1beta1/generated.proto | 16 +- vendor/k8s.io/api/batch/v1beta1/types.go | 16 +- .../v1beta1/types_swagger_doc_generated.go | 16 +- .../batch/v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/batch/v2alpha1/doc.go | 1 - .../k8s.io/api/batch/v2alpha1/generated.pb.go | 886 +- .../k8s.io/api/batch/v2alpha1/generated.proto | 16 +- vendor/k8s.io/api/batch/v2alpha1/types.go | 16 +- .../v2alpha1/types_swagger_doc_generated.go | 16 +- .../batch/v2alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/certificates/v1beta1/doc.go | 2 - .../api/certificates/v1beta1/generated.pb.go | 1093 +- .../api/certificates/v1beta1/generated.proto | 13 - .../k8s.io/api/certificates/v1beta1/types.go | 81 +- .../v1beta1/types_swagger_doc_generated.go | 15 +- .../v1beta1/zz_generated.deepcopy.go | 7 +- vendor/k8s.io/api/coordination/v1/doc.go | 23 - .../api/coordination/v1/generated.pb.go | 976 - .../api/coordination/v1/generated.proto | 80 - vendor/k8s.io/api/coordination/v1/types.go | 74 - .../v1/types_swagger_doc_generated.go | 63 - .../coordination/v1/zz_generated.deepcopy.go | 124 - vendor/k8s.io/api/coordination/v1beta1/doc.go | 2 - .../api/coordination/v1beta1/generated.pb.go | 537 +- .../api/coordination/v1beta1/generated.proto | 6 +- .../k8s.io/api/coordination/v1beta1/types.go | 6 +- .../v1beta1/types_swagger_doc_generated.go | 6 +- .../v1beta1/zz_generated.deepcopy.go | 2 +- .../api/core/v1/annotation_key_constants.go | 25 - vendor/k8s.io/api/core/v1/doc.go | 1 - vendor/k8s.io/api/core/v1/generated.pb.go | 52270 ++++++---------- vendor/k8s.io/api/core/v1/generated.proto | 875 +- vendor/k8s.io/api/core/v1/register.go | 1 - vendor/k8s.io/api/core/v1/resource.go | 8 - vendor/k8s.io/api/core/v1/taint.go | 8 +- vendor/k8s.io/api/core/v1/types.go | 951 +- .../core/v1/types_swagger_doc_generated.go | 496 +- .../k8s.io/api/core/v1/well_known_labels.go | 50 - .../k8s.io/api/core/v1/well_known_taints.go | 48 - .../api/core/v1/zz_generated.deepcopy.go | 445 +- vendor/k8s.io/api/events/v1beta1/doc.go | 2 - .../k8s.io/api/events/v1beta1/generated.pb.go | 756 +- .../k8s.io/api/events/v1beta1/generated.proto | 3 +- vendor/k8s.io/api/events/v1beta1/types.go | 7 +- .../v1beta1/types_swagger_doc_generated.go | 4 +- .../events/v1beta1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/extensions/v1beta1/doc.go | 1 - .../api/extensions/v1beta1/generated.pb.go | 8391 +-- .../api/extensions/v1beta1/generated.proto | 226 +- .../k8s.io/api/extensions/v1beta1/register.go | 1 + vendor/k8s.io/api/extensions/v1beta1/types.go | 276 +- .../v1beta1/types_swagger_doc_generated.go | 134 +- .../v1beta1/zz_generated.deepcopy.go | 210 +- vendor/k8s.io/api/networking/v1/doc.go | 2 - .../k8s.io/api/networking/v1/generated.pb.go | 1124 +- .../k8s.io/api/networking/v1/generated.proto | 22 +- vendor/k8s.io/api/networking/v1/types.go | 18 +- .../v1/types_swagger_doc_generated.go | 14 +- .../networking/v1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/networking/v1beta1/doc.go | 22 - .../api/networking/v1beta1/generated.pb.go | 3183 - .../api/networking/v1beta1/generated.proto | 276 - .../k8s.io/api/networking/v1beta1/register.go | 58 - vendor/k8s.io/api/networking/v1beta1/types.go | 320 - .../v1beta1/types_swagger_doc_generated.go | 160 - .../v1beta1/well_known_annotations.go | 32 - .../v1beta1/zz_generated.deepcopy.go | 351 - .../k8s.io/api/node/v1alpha1/generated.pb.go | 1583 - .../k8s.io/api/node/v1alpha1/generated.proto | 118 - vendor/k8s.io/api/node/v1alpha1/register.go | 52 - vendor/k8s.io/api/node/v1alpha1/types.go | 116 - .../v1alpha1/types_swagger_doc_generated.go | 80 - .../node/v1alpha1/zz_generated.deepcopy.go | 165 - vendor/k8s.io/api/node/v1beta1/doc.go | 23 - .../k8s.io/api/node/v1beta1/generated.pb.go | 1412 - .../k8s.io/api/node/v1beta1/generated.proto | 108 - vendor/k8s.io/api/node/v1beta1/register.go | 52 - vendor/k8s.io/api/node/v1beta1/types.go | 106 - .../v1beta1/types_swagger_doc_generated.go | 71 - vendor/k8s.io/api/policy/v1beta1/doc.go | 3 +- .../k8s.io/api/policy/v1beta1/generated.pb.go | 3315 +- .../k8s.io/api/policy/v1beta1/generated.proto | 73 +- vendor/k8s.io/api/policy/v1beta1/types.go | 106 +- .../v1beta1/types_swagger_doc_generated.go | 38 +- .../policy/v1beta1/zz_generated.deepcopy.go | 82 +- vendor/k8s.io/api/rbac/v1/doc.go | 2 - vendor/k8s.io/api/rbac/v1/generated.pb.go | 1578 +- vendor/k8s.io/api/rbac/v1/generated.proto | 2 - vendor/k8s.io/api/rbac/v1/types.go | 2 - .../rbac/v1/types_swagger_doc_generated.go | 2 +- .../api/rbac/v1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/rbac/v1alpha1/doc.go | 2 - .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 1582 +- .../k8s.io/api/rbac/v1alpha1/generated.proto | 17 +- vendor/k8s.io/api/rbac/v1alpha1/types.go | 17 +- .../v1alpha1/types_swagger_doc_generated.go | 20 +- .../rbac/v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/rbac/v1beta1/doc.go | 2 - .../k8s.io/api/rbac/v1beta1/generated.pb.go | 1578 +- .../k8s.io/api/rbac/v1beta1/generated.proto | 14 +- vendor/k8s.io/api/rbac/v1beta1/types.go | 14 +- .../v1beta1/types_swagger_doc_generated.go | 18 +- .../api/rbac/v1beta1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/scheduling/v1/doc.go | 23 - .../k8s.io/api/scheduling/v1/generated.pb.go | 735 - .../k8s.io/api/scheduling/v1/generated.proto | 75 - vendor/k8s.io/api/scheduling/v1/register.go | 55 - vendor/k8s.io/api/scheduling/v1/types.go | 74 - .../v1/types_swagger_doc_generated.go | 53 - .../scheduling/v1/zz_generated.deepcopy.go | 90 - vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 2 - .../api/scheduling/v1alpha1/generated.pb.go | 443 +- .../api/scheduling/v1alpha1/generated.proto | 13 +- .../k8s.io/api/scheduling/v1alpha1/types.go | 13 +- .../v1alpha1/types_swagger_doc_generated.go | 13 +- .../v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/scheduling/v1beta1/doc.go | 2 - .../api/scheduling/v1beta1/generated.pb.go | 443 +- .../api/scheduling/v1beta1/generated.proto | 13 +- vendor/k8s.io/api/scheduling/v1beta1/types.go | 13 +- .../v1beta1/types_swagger_doc_generated.go | 13 +- .../v1beta1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/settings/v1alpha1/doc.go | 2 - .../api/settings/v1alpha1/generated.pb.go | 606 +- .../api/settings/v1alpha1/generated.proto | 2 +- vendor/k8s.io/api/settings/v1alpha1/types.go | 2 +- .../v1alpha1/types_swagger_doc_generated.go | 2 +- .../v1alpha1/zz_generated.deepcopy.go | 2 +- vendor/k8s.io/api/storage/v1/doc.go | 4 +- vendor/k8s.io/api/storage/v1/generated.pb.go | 4477 +- vendor/k8s.io/api/storage/v1/generated.proto | 286 +- vendor/k8s.io/api/storage/v1/register.go | 9 - vendor/k8s.io/api/storage/v1/types.go | 340 +- .../storage/v1/types_swagger_doc_generated.go | 148 +- .../api/storage/v1/zz_generated.deepcopy.go | 377 +- vendor/k8s.io/api/storage/v1alpha1/doc.go | 4 +- .../api/storage/v1alpha1/generated.pb.go | 1037 +- .../api/storage/v1alpha1/generated.proto | 16 +- vendor/k8s.io/api/storage/v1alpha1/types.go | 18 +- .../v1alpha1/types_swagger_doc_generated.go | 6 +- .../storage/v1alpha1/zz_generated.deepcopy.go | 8 +- vendor/k8s.io/api/storage/v1beta1/doc.go | 2 - .../api/storage/v1beta1/generated.pb.go | 3882 +- .../api/storage/v1beta1/generated.proto | 200 +- vendor/k8s.io/api/storage/v1beta1/register.go | 6 - vendor/k8s.io/api/storage/v1beta1/types.go | 243 +- .../v1beta1/types_swagger_doc_generated.go | 93 +- .../storage/v1beta1/zz_generated.deepcopy.go | 230 +- .../client-go/discovery/cached_discovery.go | 282 + .../client-go/discovery/discovery_client.go | 120 +- vendor/k8s.io/client-go/discovery/helper.go | 8 +- .../client-go/discovery/round_tripper.go | 65 + .../k8s.io/client-go/kubernetes/clientset.go | 288 +- .../client-go/kubernetes/scheme/register.go | 16 +- .../v1alpha1/admissionregistration_client.go} | 32 +- .../v1alpha1/doc.go | 0 .../v1alpha1/generated_expansion.go | 2 +- .../v1alpha1/initializerconfiguration.go | 147 + .../v1beta1/mutatingwebhookconfiguration.go | 17 - .../v1beta1/validatingwebhookconfiguration.go | 17 - .../typed/apps/v1/controllerrevision.go | 17 - .../kubernetes/typed/apps/v1/daemonset.go | 17 - .../kubernetes/typed/apps/v1/deployment.go | 49 - .../kubernetes/typed/apps/v1/replicaset.go | 49 - .../kubernetes/typed/apps/v1/statefulset.go | 49 - .../typed/apps/v1beta1/apps_client.go | 5 + .../typed/apps/v1beta1/controllerrevision.go | 17 - .../typed/apps/v1beta1/deployment.go | 17 - .../typed/apps/v1beta1/generated_expansion.go | 2 + .../v1beta1/scale.go} | 29 +- .../typed/apps/v1beta1/statefulset.go | 17 - .../typed/apps/v1beta2/apps_client.go | 5 + .../typed/apps/v1beta2/controllerrevision.go | 17 - .../typed/apps/v1beta2/daemonset.go | 17 - .../typed/apps/v1beta2/deployment.go | 17 - .../typed/apps/v1beta2/generated_expansion.go | 2 + .../typed/apps/v1beta2/replicaset.go | 17 - .../kubernetes/typed/apps/v1beta2/scale.go | 48 + .../typed/apps/v1beta2/statefulset.go | 17 - .../auditregistration/v1alpha1/auditsink.go | 164 - .../v1alpha1/generated_expansion.go | 21 - .../autoscaling/v1/horizontalpodautoscaler.go | 17 - .../v2beta1/horizontalpodautoscaler.go | 17 - .../v2beta2/horizontalpodautoscaler.go | 17 - .../kubernetes/typed/batch/v1/job.go | 17 - .../kubernetes/typed/batch/v1beta1/cronjob.go | 17 - .../typed/batch/v2alpha1/cronjob.go | 17 - .../v1beta1/certificatesigningrequest.go | 17 - .../coordination/v1/coordination_client.go | 90 - .../kubernetes/typed/coordination/v1/doc.go | 20 - .../coordination/v1/generated_expansion.go | 21 - .../kubernetes/typed/coordination/v1/lease.go | 174 - .../typed/coordination/v1beta1/lease.go | 17 - .../typed/core/v1/componentstatus.go | 17 - .../kubernetes/typed/core/v1/configmap.go | 17 - .../kubernetes/typed/core/v1/endpoints.go | 17 - .../kubernetes/typed/core/v1/event.go | 17 - .../kubernetes/typed/core/v1/limitrange.go | 17 - .../kubernetes/typed/core/v1/namespace.go | 12 - .../kubernetes/typed/core/v1/node.go | 17 - .../typed/core/v1/persistentvolume.go | 17 - .../typed/core/v1/persistentvolumeclaim.go | 17 - .../client-go/kubernetes/typed/core/v1/pod.go | 17 - .../kubernetes/typed/core/v1/podtemplate.go | 17 - .../typed/core/v1/replicationcontroller.go | 33 +- .../kubernetes/typed/core/v1/resourcequota.go | 17 - .../kubernetes/typed/core/v1/secret.go | 17 - .../kubernetes/typed/core/v1/service.go | 12 - .../typed/core/v1/serviceaccount.go | 17 - .../kubernetes/typed/events/v1beta1/event.go | 17 - .../typed/extensions/v1beta1/daemonset.go | 17 - .../typed/extensions/v1beta1/deployment.go | 17 - .../extensions/v1beta1/extensions_client.go | 5 + .../typed/extensions/v1beta1/ingress.go | 17 - .../extensions/v1beta1/podsecuritypolicy.go | 17 - .../typed/extensions/v1beta1/replicaset.go | 17 - .../v1beta1/scale.go} | 29 +- .../extensions/v1beta1/scale_expansion.go | 65 + .../typed/networking/v1/networkpolicy.go | 17 - .../typed/networking/v1beta1/doc.go | 20 - .../typed/networking/v1beta1/ingress.go | 191 - .../networking/v1beta1/networking_client.go | 90 - .../kubernetes/typed/node/v1alpha1/doc.go | 20 - .../typed/node/v1alpha1/node_client.go | 90 - .../typed/node/v1alpha1/runtimeclass.go | 164 - .../kubernetes/typed/node/v1beta1/doc.go | 20 - .../typed/node/v1beta1/node_client.go | 90 - .../typed/node/v1beta1/runtimeclass.go | 164 - .../policy/v1beta1/poddisruptionbudget.go | 17 - .../typed/policy/v1beta1/podsecuritypolicy.go | 17 - .../kubernetes/typed/rbac/v1/clusterrole.go | 17 - .../typed/rbac/v1/clusterrolebinding.go | 17 - .../kubernetes/typed/rbac/v1/role.go | 17 - .../kubernetes/typed/rbac/v1/rolebinding.go | 17 - .../typed/rbac/v1alpha1/clusterrole.go | 17 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 17 - .../kubernetes/typed/rbac/v1alpha1/role.go | 17 - .../typed/rbac/v1alpha1/rolebinding.go | 17 - .../typed/rbac/v1beta1/clusterrole.go | 17 - .../typed/rbac/v1beta1/clusterrolebinding.go | 17 - .../kubernetes/typed/rbac/v1beta1/role.go | 17 - .../typed/rbac/v1beta1/rolebinding.go | 17 - .../kubernetes/typed/scheduling/v1/doc.go | 20 - .../scheduling/v1/generated_expansion.go | 21 - .../typed/scheduling/v1/priorityclass.go | 164 - .../typed/scheduling/v1/scheduling_client.go | 90 - .../scheduling/v1alpha1/priorityclass.go | 17 - .../typed/scheduling/v1beta1/priorityclass.go | 17 - .../typed/settings/v1alpha1/podpreset.go | 17 - .../typed/storage/v1/generated_expansion.go | 2 - .../typed/storage/v1/storage_client.go | 5 - .../typed/storage/v1/storageclass.go | 17 - .../typed/storage/v1/volumeattachment.go | 180 - .../storage/v1alpha1/volumeattachment.go | 17 - .../typed/storage/v1beta1/csidriver.go | 164 - .../typed/storage/v1beta1/csinode.go | 164 - .../storage/v1beta1/generated_expansion.go | 4 - .../typed/storage/v1beta1/storage_client.go | 10 - .../typed/storage/v1beta1/storageclass.go | 17 - .../typed/storage/v1beta1/volumeattachment.go | 17 - .../pkg/apis/clientauthentication/OWNERS | 9 - .../pkg/apis/clientauthentication/doc.go | 1 - .../apis/clientauthentication/v1alpha1/doc.go | 1 - .../clientauthentication/v1alpha1/types.go | 2 +- .../apis/clientauthentication/v1beta1/doc.go | 1 - vendor/k8s.io/client-go/pkg/version/doc.go | 3 +- .../plugin/pkg/client/auth/exec/exec.go | 19 +- vendor/k8s.io/client-go/rest/OWNERS | 2 - vendor/k8s.io/client-go/rest/config.go | 87 +- vendor/k8s.io/client-go/rest/plugin.go | 4 +- vendor/k8s.io/client-go/rest/request.go | 36 +- vendor/k8s.io/client-go/rest/transport.go | 24 +- vendor/k8s.io/client-go/rest/urlbackoff.go | 8 +- .../client-go/tools/clientcmd/api/doc.go | 1 - .../client-go/tools/clientcmd/api/types.go | 44 - vendor/k8s.io/client-go/tools/metrics/OWNERS | 2 - vendor/k8s.io/client-go/transport/OWNERS | 2 - vendor/k8s.io/client-go/transport/config.go | 13 +- .../client-go/transport/round_trippers.go | 38 +- .../client-go/transport/token_source.go | 4 +- .../k8s.io/client-go/transport/transport.go | 58 - vendor/k8s.io/client-go/util/cert/OWNERS | 9 - vendor/k8s.io/client-go/util/cert/cert.go | 106 +- vendor/k8s.io/client-go/util/cert/io.go | 95 + vendor/k8s.io/client-go/util/cert/pem.go | 208 + .../client-go/util/flowcontrol/backoff.go | 2 +- .../util}/integer/integer.go | 6 - vendor/k8s.io/client-go/util/keyutil/OWNERS | 7 - vendor/k8s.io/client-go/util/keyutil/key.go | 323 - vendor/modules.txt | 33 +- 401 files changed, 46525 insertions(+), 102941 deletions(-) delete mode 100644 vendor/github.com/davecgh/go-spew/LICENSE delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go delete mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/golang/glog/LICENSE create mode 100644 vendor/github.com/golang/glog/README create mode 100644 vendor/github.com/golang/glog/glog.go create mode 100644 vendor/github.com/golang/glog/glog_file.go create mode 100644 vendor/github.com/google/btree/.travis.yml rename vendor/{k8s.io/utils => github.com/google/btree}/LICENSE (100%) create mode 100644 vendor/github.com/google/btree/README.md create mode 100644 vendor/github.com/google/btree/btree.go create mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml create mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt create mode 100644 vendor/github.com/gregjones/httpcache/README.md create mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go create mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go create mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE create mode 100644 vendor/github.com/peterbourgon/diskv/README.md create mode 100644 vendor/github.com/peterbourgon/diskv/compression.go create mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go create mode 100644 vendor/github.com/peterbourgon/diskv/index.go rename vendor/k8s.io/api/{node => admissionregistration}/v1alpha1/doc.go (57%) create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto rename vendor/k8s.io/api/{coordination/v1 => admissionregistration/v1alpha1}/register.go (86%) create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go create mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go rename vendor/k8s.io/api/{node/v1beta1 => admissionregistration/v1alpha1}/zz_generated.deepcopy.go (59%) delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/coordination/v1/doc.go delete mode 100644 vendor/k8s.io/api/coordination/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/coordination/v1/generated.proto delete mode 100644 vendor/k8s.io/api/coordination/v1/types.go delete mode 100644 vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/core/v1/well_known_labels.go delete mode 100644 vendor/k8s.io/api/core/v1/well_known_taints.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/networking/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go delete mode 100644 vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/node/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/node/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/generated.proto delete mode 100644 vendor/k8s.io/api/scheduling/v1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go create mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration/v1alpha1/auditregistration_client.go => admissionregistration/v1alpha1/admissionregistration_client.go} (59%) rename vendor/k8s.io/client-go/kubernetes/typed/{auditregistration => admissionregistration}/v1alpha1/doc.go (100%) rename vendor/k8s.io/client-go/kubernetes/typed/{node => admissionregistration}/v1alpha1/generated_expansion.go (92%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go rename vendor/k8s.io/client-go/kubernetes/typed/{networking/v1beta1/generated_expansion.go => apps/v1beta1/scale.go} (51%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go rename vendor/k8s.io/client-go/kubernetes/typed/{node/v1beta1/generated_expansion.go => extensions/v1beta1/scale.go} (51%) create mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go delete mode 100644 vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/cert/OWNERS rename vendor/k8s.io/{utils => client-go/util}/integer/integer.go (81%) delete mode 100644 vendor/k8s.io/client-go/util/keyutil/OWNERS delete mode 100644 vendor/k8s.io/client-go/util/keyutil/key.go diff --git a/go.mod b/go.mod index 16f55d4..5eea825 100644 --- a/go.mod +++ b/go.mod @@ -8,19 +8,23 @@ require ( github.com/Jim-Lambert-Bose/cache v1.0.3 github.com/alicebob/miniredis/v2 v2.11.0 github.com/gin-gonic/gin v1.4.0 + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/gomodule/redigo v2.0.0+incompatible + github.com/google/btree v1.0.0 // indirect github.com/google/uuid v1.1.1 github.com/googleapis/gnostic v0.5.1 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/golang-lru v0.5.3 github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect github.com/opentracing/opentracing-go v1.1.0 + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_golang v1.1.0 github.com/sirupsen/logrus v1.4.2 github.com/ugorji/go v1.1.7 // indirect github.com/zsais/go-gin-prometheus v0.1.0 golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect - k8s.io/api v0.18.6 // indirect - k8s.io/client-go v11.0.0+incompatible - k8s.io/utils v0.0.0-20200724153422-f32512634ab7 // indirect + k8s.io/api v0.0.0-20190708094356-59223ed9f6ce // indirect + k8s.io/apimachinery v0.18.6 // indirect + k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df ) diff --git a/go.sum b/go.sum index f791c1d..200e56d 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,7 @@ github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkg github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3/go.mod h1:VJ0WA2NBN22VlZ2dKZQPAPnyWw5XTlK1KymzLKsr59s= @@ -50,6 +51,8 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -66,6 +69,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= @@ -79,6 +84,8 @@ github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsC github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= @@ -127,6 +134,8 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -149,7 +158,6 @@ github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62/go.mod h1:65XQgovT github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= @@ -243,20 +251,17 @@ gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.18.6 h1:osqrAXbOQjkKIWDTjrqxWQ3w0GkKb1KA1XkUGHHYpeE= -k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= +k8s.io/api v0.0.0-20190708094356-59223ed9f6ce h1:5dyrqZX6sr+cG/e26KF2p6GuTIABBACjjGSOSdjS9sI= +k8s.io/api v0.0.0-20190708094356-59223ed9f6ce/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o= -k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= +k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df h1:d28f1QoXWvL+FVVpHzCXdx5020GRqd3X3LZjKwJ3xss= +k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200724153422-f32512634ab7 h1:bYyloM4UeWug24euLZfEH7muFQoqvFj9h/pxAnOZLt4= -k8s.io/utils v0.0.0-20200724153422-f32512634ab7/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= diff --git a/vendor/github.com/davecgh/go-spew/LICENSE b/vendor/github.com/davecgh/go-spew/LICENSE deleted file mode 100644 index bc52e96..0000000 --- a/vendor/github.com/davecgh/go-spew/LICENSE +++ /dev/null @@ -1,15 +0,0 @@ -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go deleted file mode 100644 index 7929947..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypass.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is not running on Google App Engine, compiled by GopherJS, and -// "-tags safe" is not added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// Go versions prior to 1.4 are disabled because they use a different layout -// for interfaces which make the implementation of unsafeReflectValue more complex. -// +build !js,!appengine,!safe,!disableunsafe,go1.4 - -package spew - -import ( - "reflect" - "unsafe" -) - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = false - - // ptrSize is the size of a pointer on the current arch. - ptrSize = unsafe.Sizeof((*byte)(nil)) -) - -type flag uintptr - -var ( - // flagRO indicates whether the value field of a reflect.Value - // is read-only. - flagRO flag - - // flagAddr indicates whether the address of the reflect.Value's - // value may be taken. - flagAddr flag -) - -// flagKindMask holds the bits that make up the kind -// part of the flags field. In all the supported versions, -// it is in the lower 5 bits. -const flagKindMask = flag(0x1f) - -// Different versions of Go have used different -// bit layouts for the flags type. This table -// records the known combinations. -var okFlags = []struct { - ro, addr flag -}{{ - // From Go 1.4 to 1.5 - ro: 1 << 5, - addr: 1 << 7, -}, { - // Up to Go tip. - ro: 1<<5 | 1<<6, - addr: 1 << 8, -}} - -var flagValOffset = func() uintptr { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - return field.Offset -}() - -// flagField returns a pointer to the flag field of a reflect.Value. -func flagField(v *reflect.Value) *flag { - return (*flag)(unsafe.Pointer(uintptr(unsafe.Pointer(v)) + flagValOffset)) -} - -// unsafeReflectValue converts the passed reflect.Value into a one that bypasses -// the typical safety restrictions preventing access to unaddressable and -// unexported data. It works by digging the raw pointer to the underlying -// value out of the protected value and generating a new unprotected (unsafe) -// reflect.Value to it. -// -// This allows us to check for implementations of the Stringer and error -// interfaces to be used for pretty printing ordinarily unaddressable and -// inaccessible values such as unexported struct fields. -func unsafeReflectValue(v reflect.Value) reflect.Value { - if !v.IsValid() || (v.CanInterface() && v.CanAddr()) { - return v - } - flagFieldPtr := flagField(&v) - *flagFieldPtr &^= flagRO - *flagFieldPtr |= flagAddr - return v -} - -// Sanity checks against future reflect package changes -// to the type or semantics of the Value.flag field. -func init() { - field, ok := reflect.TypeOf(reflect.Value{}).FieldByName("flag") - if !ok { - panic("reflect.Value has no flag field") - } - if field.Type.Kind() != reflect.TypeOf(flag(0)).Kind() { - panic("reflect.Value flag field has changed kind") - } - type t0 int - var t struct { - A t0 - // t0 will have flagEmbedRO set. - t0 - // a will have flagStickyRO set - a t0 - } - vA := reflect.ValueOf(t).FieldByName("A") - va := reflect.ValueOf(t).FieldByName("a") - vt0 := reflect.ValueOf(t).FieldByName("t0") - - // Infer flagRO from the difference between the flags - // for the (otherwise identical) fields in t. - flagPublic := *flagField(&vA) - flagWithRO := *flagField(&va) | *flagField(&vt0) - flagRO = flagPublic ^ flagWithRO - - // Infer flagAddr from the difference between a value - // taken from a pointer and not. - vPtrA := reflect.ValueOf(&t).Elem().FieldByName("A") - flagNoPtr := *flagField(&vA) - flagPtr := *flagField(&vPtrA) - flagAddr = flagNoPtr ^ flagPtr - - // Check that the inferred flags tally with one of the known versions. - for _, f := range okFlags { - if flagRO == f.ro && flagAddr == f.addr { - return - } - } - panic("reflect.Value read-only flag has changed semantics") -} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go deleted file mode 100644 index 205c28d..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2015-2016 Dave Collins -// -// Permission to use, copy, modify, and distribute this software for any -// purpose with or without fee is hereby granted, provided that the above -// copyright notice and this permission notice appear in all copies. -// -// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - -// NOTE: Due to the following build constraints, this file will only be compiled -// when the code is running on Google App Engine, compiled by GopherJS, or -// "-tags safe" is added to the go build command line. The "disableunsafe" -// tag is deprecated and thus should not be used. -// +build js appengine safe disableunsafe !go1.4 - -package spew - -import "reflect" - -const ( - // UnsafeDisabled is a build-time constant which specifies whether or - // not access to the unsafe package is available. - UnsafeDisabled = true -) - -// unsafeReflectValue typically converts the passed reflect.Value into a one -// that bypasses the typical safety restrictions preventing access to -// unaddressable and unexported data. However, doing this relies on access to -// the unsafe package. This is a stub version which simply returns the passed -// reflect.Value when the unsafe package is not available. -func unsafeReflectValue(v reflect.Value) reflect.Value { - return v -} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go deleted file mode 100644 index 1be8ce9..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/common.go +++ /dev/null @@ -1,341 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "reflect" - "sort" - "strconv" -) - -// Some constants in the form of bytes to avoid string overhead. This mirrors -// the technique used in the fmt package. -var ( - panicBytes = []byte("(PANIC=") - plusBytes = []byte("+") - iBytes = []byte("i") - trueBytes = []byte("true") - falseBytes = []byte("false") - interfaceBytes = []byte("(interface {})") - commaNewlineBytes = []byte(",\n") - newlineBytes = []byte("\n") - openBraceBytes = []byte("{") - openBraceNewlineBytes = []byte("{\n") - closeBraceBytes = []byte("}") - asteriskBytes = []byte("*") - colonBytes = []byte(":") - colonSpaceBytes = []byte(": ") - openParenBytes = []byte("(") - closeParenBytes = []byte(")") - spaceBytes = []byte(" ") - pointerChainBytes = []byte("->") - nilAngleBytes = []byte("") - maxNewlineBytes = []byte("\n") - maxShortBytes = []byte("") - circularBytes = []byte("") - circularShortBytes = []byte("") - invalidAngleBytes = []byte("") - openBracketBytes = []byte("[") - closeBracketBytes = []byte("]") - percentBytes = []byte("%") - precisionBytes = []byte(".") - openAngleBytes = []byte("<") - closeAngleBytes = []byte(">") - openMapBytes = []byte("map[") - closeMapBytes = []byte("]") - lenEqualsBytes = []byte("len=") - capEqualsBytes = []byte("cap=") -) - -// hexDigits is used to map a decimal value to a hex digit. -var hexDigits = "0123456789abcdef" - -// catchPanic handles any panics that might occur during the handleMethods -// calls. -func catchPanic(w io.Writer, v reflect.Value) { - if err := recover(); err != nil { - w.Write(panicBytes) - fmt.Fprintf(w, "%v", err) - w.Write(closeParenBytes) - } -} - -// handleMethods attempts to call the Error and String methods on the underlying -// type the passed reflect.Value represents and outputes the result to Writer w. -// -// It handles panics in any called methods by catching and displaying the error -// as the formatted value. -func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { - // We need an interface to check if the type implements the error or - // Stringer interface. However, the reflect package won't give us an - // interface on certain things like unexported struct fields in order - // to enforce visibility rules. We use unsafe, when it's available, - // to bypass these restrictions since this package does not mutate the - // values. - if !v.CanInterface() { - if UnsafeDisabled { - return false - } - - v = unsafeReflectValue(v) - } - - // Choose whether or not to do error and Stringer interface lookups against - // the base type or a pointer to the base type depending on settings. - // Technically calling one of these methods with a pointer receiver can - // mutate the value, however, types which choose to satisify an error or - // Stringer interface with a pointer receiver should not be mutating their - // state inside these interface methods. - if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { - v = unsafeReflectValue(v) - } - if v.CanAddr() { - v = v.Addr() - } - - // Is it an error or Stringer? - switch iface := v.Interface().(type) { - case error: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.Error())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - - w.Write([]byte(iface.Error())) - return true - - case fmt.Stringer: - defer catchPanic(w, v) - if cs.ContinueOnMethod { - w.Write(openParenBytes) - w.Write([]byte(iface.String())) - w.Write(closeParenBytes) - w.Write(spaceBytes) - return false - } - w.Write([]byte(iface.String())) - return true - } - return false -} - -// printBool outputs a boolean value as true or false to Writer w. -func printBool(w io.Writer, val bool) { - if val { - w.Write(trueBytes) - } else { - w.Write(falseBytes) - } -} - -// printInt outputs a signed integer value to Writer w. -func printInt(w io.Writer, val int64, base int) { - w.Write([]byte(strconv.FormatInt(val, base))) -} - -// printUint outputs an unsigned integer value to Writer w. -func printUint(w io.Writer, val uint64, base int) { - w.Write([]byte(strconv.FormatUint(val, base))) -} - -// printFloat outputs a floating point value using the specified precision, -// which is expected to be 32 or 64bit, to Writer w. -func printFloat(w io.Writer, val float64, precision int) { - w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) -} - -// printComplex outputs a complex value using the specified float precision -// for the real and imaginary parts to Writer w. -func printComplex(w io.Writer, c complex128, floatPrecision int) { - r := real(c) - w.Write(openParenBytes) - w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) - i := imag(c) - if i >= 0 { - w.Write(plusBytes) - } - w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) - w.Write(iBytes) - w.Write(closeParenBytes) -} - -// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' -// prefix to Writer w. -func printHexPtr(w io.Writer, p uintptr) { - // Null pointer. - num := uint64(p) - if num == 0 { - w.Write(nilAngleBytes) - return - } - - // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix - buf := make([]byte, 18) - - // It's simpler to construct the hex string right to left. - base := uint64(16) - i := len(buf) - 1 - for num >= base { - buf[i] = hexDigits[num%base] - num /= base - i-- - } - buf[i] = hexDigits[num] - - // Add '0x' prefix. - i-- - buf[i] = 'x' - i-- - buf[i] = '0' - - // Strip unused leading bytes. - buf = buf[i:] - w.Write(buf) -} - -// valuesSorter implements sort.Interface to allow a slice of reflect.Value -// elements to be sorted. -type valuesSorter struct { - values []reflect.Value - strings []string // either nil or same len and values - cs *ConfigState -} - -// newValuesSorter initializes a valuesSorter instance, which holds a set of -// surrogate keys on which the data should be sorted. It uses flags in -// ConfigState to decide if and how to populate those surrogate keys. -func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { - vs := &valuesSorter{values: values, cs: cs} - if canSortSimply(vs.values[0].Kind()) { - return vs - } - if !cs.DisableMethods { - vs.strings = make([]string, len(values)) - for i := range vs.values { - b := bytes.Buffer{} - if !handleMethods(cs, &b, vs.values[i]) { - vs.strings = nil - break - } - vs.strings[i] = b.String() - } - } - if vs.strings == nil && cs.SpewKeys { - vs.strings = make([]string, len(values)) - for i := range vs.values { - vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) - } - } - return vs -} - -// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted -// directly, or whether it should be considered for sorting by surrogate keys -// (if the ConfigState allows it). -func canSortSimply(kind reflect.Kind) bool { - // This switch parallels valueSortLess, except for the default case. - switch kind { - case reflect.Bool: - return true - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return true - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return true - case reflect.Float32, reflect.Float64: - return true - case reflect.String: - return true - case reflect.Uintptr: - return true - case reflect.Array: - return true - } - return false -} - -// Len returns the number of values in the slice. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Len() int { - return len(s.values) -} - -// Swap swaps the values at the passed indices. It is part of the -// sort.Interface implementation. -func (s *valuesSorter) Swap(i, j int) { - s.values[i], s.values[j] = s.values[j], s.values[i] - if s.strings != nil { - s.strings[i], s.strings[j] = s.strings[j], s.strings[i] - } -} - -// valueSortLess returns whether the first value should sort before the second -// value. It is used by valueSorter.Less as part of the sort.Interface -// implementation. -func valueSortLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Bool: - return !a.Bool() && b.Bool() - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - return a.Int() < b.Int() - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - return a.Uint() < b.Uint() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.String: - return a.String() < b.String() - case reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Array: - // Compare the contents of both arrays. - l := a.Len() - for i := 0; i < l; i++ { - av := a.Index(i) - bv := b.Index(i) - if av.Interface() == bv.Interface() { - continue - } - return valueSortLess(av, bv) - } - } - return a.String() < b.String() -} - -// Less returns whether the value at index i should sort before the -// value at index j. It is part of the sort.Interface implementation. -func (s *valuesSorter) Less(i, j int) bool { - if s.strings == nil { - return valueSortLess(s.values[i], s.values[j]) - } - return s.strings[i] < s.strings[j] -} - -// sortValues is a sort function that handles both native types and any type that -// can be converted to error or Stringer. Other inputs are sorted according to -// their Value.String() value to ensure display stability. -func sortValues(values []reflect.Value, cs *ConfigState) { - if len(values) == 0 { - return - } - sort.Sort(newValuesSorter(values, cs)) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go deleted file mode 100644 index 2e3d22f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/config.go +++ /dev/null @@ -1,306 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "io" - "os" -) - -// ConfigState houses the configuration options used by spew to format and -// display values. There is a global instance, Config, that is used to control -// all top-level Formatter and Dump functionality. Each ConfigState instance -// provides methods equivalent to the top-level functions. -// -// The zero value for ConfigState provides no indentation. You would typically -// want to set it to a space or a tab. -// -// Alternatively, you can use NewDefaultConfig to get a ConfigState instance -// with default settings. See the documentation of NewDefaultConfig for default -// values. -type ConfigState struct { - // Indent specifies the string to use for each indentation level. The - // global config instance that all top-level functions use set this to a - // single space by default. If you would like more indentation, you might - // set this to a tab with "\t" or perhaps two spaces with " ". - Indent string - - // MaxDepth controls the maximum number of levels to descend into nested - // data structures. The default, 0, means there is no limit. - // - // NOTE: Circular data structures are properly detected, so it is not - // necessary to set this value unless you specifically want to limit deeply - // nested data structures. - MaxDepth int - - // DisableMethods specifies whether or not error and Stringer interfaces are - // invoked for types that implement them. - DisableMethods bool - - // DisablePointerMethods specifies whether or not to check for and invoke - // error and Stringer interfaces on types which only accept a pointer - // receiver when the current type is not a pointer. - // - // NOTE: This might be an unsafe action since calling one of these methods - // with a pointer receiver could technically mutate the value, however, - // in practice, types which choose to satisify an error or Stringer - // interface with a pointer receiver should not be mutating their state - // inside these interface methods. As a result, this option relies on - // access to the unsafe package, so it will not have any effect when - // running in environments without access to the unsafe package such as - // Google App Engine or with the "safe" build tag specified. - DisablePointerMethods bool - - // DisablePointerAddresses specifies whether to disable the printing of - // pointer addresses. This is useful when diffing data structures in tests. - DisablePointerAddresses bool - - // DisableCapacities specifies whether to disable the printing of capacities - // for arrays, slices, maps and channels. This is useful when diffing - // data structures in tests. - DisableCapacities bool - - // ContinueOnMethod specifies whether or not recursion should continue once - // a custom error or Stringer interface is invoked. The default, false, - // means it will print the results of invoking the custom error or Stringer - // interface and return immediately instead of continuing to recurse into - // the internals of the data type. - // - // NOTE: This flag does not have any effect if method invocation is disabled - // via the DisableMethods or DisablePointerMethods options. - ContinueOnMethod bool - - // SortKeys specifies map keys should be sorted before being printed. Use - // this to have a more deterministic, diffable output. Note that only - // native types (bool, int, uint, floats, uintptr and string) and types - // that support the error or Stringer interfaces (if methods are - // enabled) are supported, with other types sorted according to the - // reflect.Value.String() output which guarantees display stability. - SortKeys bool - - // SpewKeys specifies that, as a last resort attempt, map keys should - // be spewed to strings and sorted by those strings. This is only - // considered if SortKeys is true. - SpewKeys bool -} - -// Config is the active configuration of the top-level functions. -// The configuration can be changed by modifying the contents of spew.Config. -var Config = ConfigState{Indent: " "} - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the formatted string as a value that satisfies error. See NewFormatter -// for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, c.convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, c.convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, c.convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a Formatter interface returned by c.NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, c.convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Print(a ...interface{}) (n int, err error) { - return fmt.Print(c.convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, c.convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Println(a ...interface{}) (n int, err error) { - return fmt.Println(c.convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprint(a ...interface{}) string { - return fmt.Sprint(c.convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a Formatter interface returned by c.NewFormatter. It returns -// the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, c.convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a Formatter interface returned by c.NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) -func (c *ConfigState) Sprintln(a ...interface{}) string { - return fmt.Sprintln(c.convertArgs(a)...) -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -c.Printf, c.Println, or c.Printf. -*/ -func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(c, v) -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { - fdump(c, w, a...) -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by modifying the public members -of c. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func (c *ConfigState) Dump(a ...interface{}) { - fdump(c, os.Stdout, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func (c *ConfigState) Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(c, &buf, a...) - return buf.String() -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a spew Formatter interface using -// the ConfigState associated with s. -func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = newFormatter(c, arg) - } - return formatters -} - -// NewDefaultConfig returns a ConfigState with the following default settings. -// -// Indent: " " -// MaxDepth: 0 -// DisableMethods: false -// DisablePointerMethods: false -// ContinueOnMethod: false -// SortKeys: false -func NewDefaultConfig() *ConfigState { - return &ConfigState{Indent: " "} -} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go deleted file mode 100644 index aacaac6..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/doc.go +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -/* -Package spew implements a deep pretty printer for Go data structures to aid in -debugging. - -A quick overview of the additional features spew provides over the built-in -printing facilities for Go data types are as follows: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output (only when using - Dump style) - -There are two different approaches spew allows for dumping Go data structures: - - * Dump style which prints with newlines, customizable indentation, - and additional debug information such as types and all pointer addresses - used to indirect to the final value - * A custom Formatter interface that integrates cleanly with the standard fmt - package and replaces %v, %+v, %#v, and %#+v to provide inline printing - similar to the default %v while providing the additional functionality - outlined above and passing unsupported format verbs such as %x and %q - along to fmt - -Quick Start - -This section demonstrates how to quickly get started with spew. See the -sections below for further details on formatting and configuration options. - -To dump a variable with full newlines, indentation, type, and pointer -information use Dump, Fdump, or Sdump: - spew.Dump(myVar1, myVar2, ...) - spew.Fdump(someWriter, myVar1, myVar2, ...) - str := spew.Sdump(myVar1, myVar2, ...) - -Alternatively, if you would prefer to use format strings with a compacted inline -printing style, use the convenience wrappers Printf, Fprintf, etc with -%v (most compact), %+v (adds pointer addresses), %#v (adds types), or -%#+v (adds types and pointer addresses): - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -Configuration Options - -Configuration of spew is handled by fields in the ConfigState type. For -convenience, all of the top-level functions use a global state available -via the spew.Config global. - -It is also possible to create a ConfigState instance that provides methods -equivalent to the top-level functions. This allows concurrent configuration -options. See the ConfigState documentation for more details. - -The following configuration options are available: - * Indent - String to use for each indentation level for Dump functions. - It is a single space by default. A popular alternative is "\t". - - * MaxDepth - Maximum number of levels to descend into nested data structures. - There is no limit by default. - - * DisableMethods - Disables invocation of error and Stringer interface methods. - Method invocation is enabled by default. - - * DisablePointerMethods - Disables invocation of error and Stringer interface methods on types - which only accept pointer receivers from non-pointer variables. - Pointer method invocation is enabled by default. - - * DisablePointerAddresses - DisablePointerAddresses specifies whether to disable the printing of - pointer addresses. This is useful when diffing data structures in tests. - - * DisableCapacities - DisableCapacities specifies whether to disable the printing of - capacities for arrays, slices, maps and channels. This is useful when - diffing data structures in tests. - - * ContinueOnMethod - Enables recursion into types after invoking error and Stringer interface - methods. Recursion after method invocation is disabled by default. - - * SortKeys - Specifies map keys should be sorted before being printed. Use - this to have a more deterministic, diffable output. Note that - only native types (bool, int, uint, floats, uintptr and string) - and types which implement error or Stringer interfaces are - supported with other types sorted according to the - reflect.Value.String() output which guarantees display - stability. Natural map order is used by default. - - * SpewKeys - Specifies that, as a last resort attempt, map keys should be - spewed to strings and sorted by those strings. This is only - considered if SortKeys is true. - -Dump Usage - -Simply call spew.Dump with a list of variables you want to dump: - - spew.Dump(myVar1, myVar2, ...) - -You may also call spew.Fdump if you would prefer to output to an arbitrary -io.Writer. For example, to dump to standard error: - - spew.Fdump(os.Stderr, myVar1, myVar2, ...) - -A third option is to call spew.Sdump to get the formatted output as a string: - - str := spew.Sdump(myVar1, myVar2, ...) - -Sample Dump Output - -See the Dump example for details on the setup of the types and variables being -shown here. - - (main.Foo) { - unexportedField: (*main.Bar)(0xf84002e210)({ - flag: (main.Flag) flagTwo, - data: (uintptr) - }), - ExportedField: (map[interface {}]interface {}) (len=1) { - (string) (len=3) "one": (bool) true - } - } - -Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C -command as shown. - ([]uint8) (len=32 cap=32) { - 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | - 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| - 00000020 31 32 |12| - } - -Custom Formatter - -Spew provides a custom formatter that implements the fmt.Formatter interface -so that it integrates cleanly with standard fmt package printing functions. The -formatter is useful for inline printing of smaller data types similar to the -standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Custom Formatter Usage - -The simplest way to make use of the spew custom formatter is to call one of the -convenience functions such as spew.Printf, spew.Println, or spew.Printf. The -functions have syntax you are most likely already familiar with: - - spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - spew.Println(myVar, myVar2) - spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) - spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) - -See the Index for the full list convenience functions. - -Sample Formatter Output - -Double pointer to a uint8: - %v: <**>5 - %+v: <**>(0xf8400420d0->0xf8400420c8)5 - %#v: (**uint8)5 - %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 - -Pointer to circular struct with a uint8 field and a pointer to itself: - %v: <*>{1 <*>} - %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} - %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} - %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} - -See the Printf example for details on the setup of variables being shown -here. - -Errors - -Since it is possible for custom Stringer/error interfaces to panic, spew -detects them and handles them internally by printing the panic information -inline with the output. Since spew is intended to provide deep pretty printing -capabilities on structures, it intentionally does not return any errors. -*/ -package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go deleted file mode 100644 index f78d89f..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/dump.go +++ /dev/null @@ -1,509 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "encoding/hex" - "fmt" - "io" - "os" - "reflect" - "regexp" - "strconv" - "strings" -) - -var ( - // uint8Type is a reflect.Type representing a uint8. It is used to - // convert cgo types to uint8 slices for hexdumping. - uint8Type = reflect.TypeOf(uint8(0)) - - // cCharRE is a regular expression that matches a cgo char. - // It is used to detect character arrays to hexdump them. - cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) - - // cUnsignedCharRE is a regular expression that matches a cgo unsigned - // char. It is used to detect unsigned character arrays to hexdump - // them. - cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) - - // cUint8tCharRE is a regular expression that matches a cgo uint8_t. - // It is used to detect uint8_t arrays to hexdump them. - cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) -) - -// dumpState contains information about the state of a dump operation. -type dumpState struct { - w io.Writer - depth int - pointers map[uintptr]int - ignoreNextType bool - ignoreNextIndent bool - cs *ConfigState -} - -// indent performs indentation according to the depth level and cs.Indent -// option. -func (d *dumpState) indent() { - if d.ignoreNextIndent { - d.ignoreNextIndent = false - return - } - d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) -} - -// unpackValue returns values inside of non-nil interfaces when possible. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface && !v.IsNil() { - v = v.Elem() - } - return v -} - -// dumpPtr handles formatting of pointers by indirecting them as necessary. -func (d *dumpState) dumpPtr(v reflect.Value) { - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range d.pointers { - if depth >= d.depth { - delete(d.pointers, k) - } - } - - // Keep list of all dereferenced pointers to show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by dereferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := d.pointers[addr]; ok && pd < d.depth { - cycleFound = true - indirects-- - break - } - d.pointers[addr] = d.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type information. - d.w.Write(openParenBytes) - d.w.Write(bytes.Repeat(asteriskBytes, indirects)) - d.w.Write([]byte(ve.Type().String())) - d.w.Write(closeParenBytes) - - // Display pointer information. - if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { - d.w.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - d.w.Write(pointerChainBytes) - } - printHexPtr(d.w, addr) - } - d.w.Write(closeParenBytes) - } - - // Display dereferenced value. - d.w.Write(openParenBytes) - switch { - case nilFound: - d.w.Write(nilAngleBytes) - - case cycleFound: - d.w.Write(circularBytes) - - default: - d.ignoreNextType = true - d.dump(ve) - } - d.w.Write(closeParenBytes) -} - -// dumpSlice handles formatting of arrays and slices. Byte (uint8 under -// reflection) arrays and slices are dumped in hexdump -C fashion. -func (d *dumpState) dumpSlice(v reflect.Value) { - // Determine whether this type should be hex dumped or not. Also, - // for types which should be hexdumped, try to use the underlying data - // first, then fall back to trying to convert them to a uint8 slice. - var buf []uint8 - doConvert := false - doHexDump := false - numEntries := v.Len() - if numEntries > 0 { - vt := v.Index(0).Type() - vts := vt.String() - switch { - // C types that need to be converted. - case cCharRE.MatchString(vts): - fallthrough - case cUnsignedCharRE.MatchString(vts): - fallthrough - case cUint8tCharRE.MatchString(vts): - doConvert = true - - // Try to use existing uint8 slices and fall back to converting - // and copying if that fails. - case vt.Kind() == reflect.Uint8: - // We need an addressable interface to convert the type - // to a byte slice. However, the reflect package won't - // give us an interface on certain things like - // unexported struct fields in order to enforce - // visibility rules. We use unsafe, when available, to - // bypass these restrictions since this package does not - // mutate the values. - vs := v - if !vs.CanInterface() || !vs.CanAddr() { - vs = unsafeReflectValue(vs) - } - if !UnsafeDisabled { - vs = vs.Slice(0, numEntries) - - // Use the existing uint8 slice if it can be - // type asserted. - iface := vs.Interface() - if slice, ok := iface.([]uint8); ok { - buf = slice - doHexDump = true - break - } - } - - // The underlying data needs to be converted if it can't - // be type asserted to a uint8 slice. - doConvert = true - } - - // Copy and convert the underlying type if needed. - if doConvert && vt.ConvertibleTo(uint8Type) { - // Convert and copy each element into a uint8 byte - // slice. - buf = make([]uint8, numEntries) - for i := 0; i < numEntries; i++ { - vv := v.Index(i) - buf[i] = uint8(vv.Convert(uint8Type).Uint()) - } - doHexDump = true - } - } - - // Hexdump the entire slice as needed. - if doHexDump { - indent := strings.Repeat(d.cs.Indent, d.depth) - str := indent + hex.Dump(buf) - str = strings.Replace(str, "\n", "\n"+indent, -1) - str = strings.TrimRight(str, d.cs.Indent) - d.w.Write([]byte(str)) - return - } - - // Recursively call dump for each item. - for i := 0; i < numEntries; i++ { - d.dump(d.unpackValue(v.Index(i))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } -} - -// dump is the main workhorse for dumping a value. It uses the passed reflect -// value to figure out what kind of object we are dealing with and formats it -// appropriately. It is a recursive function, however circular data structures -// are detected and handled properly. -func (d *dumpState) dump(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - d.w.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - d.indent() - d.dumpPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !d.ignoreNextType { - d.indent() - d.w.Write(openParenBytes) - d.w.Write([]byte(v.Type().String())) - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - d.ignoreNextType = false - - // Display length and capacity if the built-in len and cap functions - // work with the value's kind and the len/cap itself is non-zero. - valueLen, valueCap := 0, 0 - switch v.Kind() { - case reflect.Array, reflect.Slice, reflect.Chan: - valueLen, valueCap = v.Len(), v.Cap() - case reflect.Map, reflect.String: - valueLen = v.Len() - } - if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { - d.w.Write(openParenBytes) - if valueLen != 0 { - d.w.Write(lenEqualsBytes) - printInt(d.w, int64(valueLen), 10) - } - if !d.cs.DisableCapacities && valueCap != 0 { - if valueLen != 0 { - d.w.Write(spaceBytes) - } - d.w.Write(capEqualsBytes) - printInt(d.w, int64(valueCap), 10) - } - d.w.Write(closeParenBytes) - d.w.Write(spaceBytes) - } - - // Call Stringer/error interfaces if they exist and the handle methods flag - // is enabled - if !d.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(d.cs, d.w, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(d.w, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(d.w, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(d.w, v.Uint(), 10) - - case reflect.Float32: - printFloat(d.w, v.Float(), 32) - - case reflect.Float64: - printFloat(d.w, v.Float(), 64) - - case reflect.Complex64: - printComplex(d.w, v.Complex(), 32) - - case reflect.Complex128: - printComplex(d.w, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - d.dumpSlice(v) - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.String: - d.w.Write([]byte(strconv.Quote(v.String()))) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - d.w.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - d.w.Write(nilAngleBytes) - break - } - - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - numEntries := v.Len() - keys := v.MapKeys() - if d.cs.SortKeys { - sortValues(keys, d.cs) - } - for i, key := range keys { - d.dump(d.unpackValue(key)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.MapIndex(key))) - if i < (numEntries - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Struct: - d.w.Write(openBraceNewlineBytes) - d.depth++ - if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { - d.indent() - d.w.Write(maxNewlineBytes) - } else { - vt := v.Type() - numFields := v.NumField() - for i := 0; i < numFields; i++ { - d.indent() - vtf := vt.Field(i) - d.w.Write([]byte(vtf.Name)) - d.w.Write(colonSpaceBytes) - d.ignoreNextIndent = true - d.dump(d.unpackValue(v.Field(i))) - if i < (numFields - 1) { - d.w.Write(commaNewlineBytes) - } else { - d.w.Write(newlineBytes) - } - } - } - d.depth-- - d.indent() - d.w.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(d.w, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(d.w, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it in case any new - // types are added. - default: - if v.CanInterface() { - fmt.Fprintf(d.w, "%v", v.Interface()) - } else { - fmt.Fprintf(d.w, "%v", v.String()) - } - } -} - -// fdump is a helper function to consolidate the logic from the various public -// methods which take varying writers and config states. -func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { - for _, arg := range a { - if arg == nil { - w.Write(interfaceBytes) - w.Write(spaceBytes) - w.Write(nilAngleBytes) - w.Write(newlineBytes) - continue - } - - d := dumpState{w: w, cs: cs} - d.pointers = make(map[uintptr]int) - d.dump(reflect.ValueOf(arg)) - d.w.Write(newlineBytes) - } -} - -// Fdump formats and displays the passed arguments to io.Writer w. It formats -// exactly the same as Dump. -func Fdump(w io.Writer, a ...interface{}) { - fdump(&Config, w, a...) -} - -// Sdump returns a string with the passed arguments formatted exactly the same -// as Dump. -func Sdump(a ...interface{}) string { - var buf bytes.Buffer - fdump(&Config, &buf, a...) - return buf.String() -} - -/* -Dump displays the passed parameters to standard out with newlines, customizable -indentation, and additional debug information such as complete types and all -pointer addresses used to indirect to the final value. It provides the -following features over the built-in printing facilities provided by the fmt -package: - - * Pointers are dereferenced and followed - * Circular data structures are detected and handled properly - * Custom Stringer/error interfaces are optionally invoked, including - on unexported types - * Custom types which only implement the Stringer/error interfaces via - a pointer receiver are optionally invoked when passing non-pointer - variables - * Byte arrays and slices are dumped like the hexdump -C command which - includes offsets, byte values in hex, and ASCII output - -The configuration options are controlled by an exported package global, -spew.Config. See ConfigState for options documentation. - -See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to -get the formatted result as a string. -*/ -func Dump(a ...interface{}) { - fdump(&Config, os.Stdout, a...) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go deleted file mode 100644 index b04edb7..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/format.go +++ /dev/null @@ -1,419 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "bytes" - "fmt" - "reflect" - "strconv" - "strings" -) - -// supportedFlags is a list of all the character flags supported by fmt package. -const supportedFlags = "0-+# " - -// formatState implements the fmt.Formatter interface and contains information -// about the state of a formatting operation. The NewFormatter function can -// be used to get a new Formatter which can be used directly as arguments -// in standard fmt package printing calls. -type formatState struct { - value interface{} - fs fmt.State - depth int - pointers map[uintptr]int - ignoreNextType bool - cs *ConfigState -} - -// buildDefaultFormat recreates the original format string without precision -// and width information to pass in to fmt.Sprintf in the case of an -// unrecognized type. Unless new types are added to the language, this -// function won't ever be called. -func (f *formatState) buildDefaultFormat() (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - buf.WriteRune('v') - - format = buf.String() - return format -} - -// constructOrigFormat recreates the original format string including precision -// and width information to pass along to the standard fmt package. This allows -// automatic deferral of all format strings this package doesn't support. -func (f *formatState) constructOrigFormat(verb rune) (format string) { - buf := bytes.NewBuffer(percentBytes) - - for _, flag := range supportedFlags { - if f.fs.Flag(int(flag)) { - buf.WriteRune(flag) - } - } - - if width, ok := f.fs.Width(); ok { - buf.WriteString(strconv.Itoa(width)) - } - - if precision, ok := f.fs.Precision(); ok { - buf.Write(precisionBytes) - buf.WriteString(strconv.Itoa(precision)) - } - - buf.WriteRune(verb) - - format = buf.String() - return format -} - -// unpackValue returns values inside of non-nil interfaces when possible and -// ensures that types for values which have been unpacked from an interface -// are displayed when the show types flag is also set. -// This is useful for data types like structs, arrays, slices, and maps which -// can contain varying types packed inside an interface. -func (f *formatState) unpackValue(v reflect.Value) reflect.Value { - if v.Kind() == reflect.Interface { - f.ignoreNextType = false - if !v.IsNil() { - v = v.Elem() - } - } - return v -} - -// formatPtr handles formatting of pointers by indirecting them as necessary. -func (f *formatState) formatPtr(v reflect.Value) { - // Display nil if top level pointer is nil. - showTypes := f.fs.Flag('#') - if v.IsNil() && (!showTypes || f.ignoreNextType) { - f.fs.Write(nilAngleBytes) - return - } - - // Remove pointers at or below the current depth from map used to detect - // circular refs. - for k, depth := range f.pointers { - if depth >= f.depth { - delete(f.pointers, k) - } - } - - // Keep list of all dereferenced pointers to possibly show later. - pointerChain := make([]uintptr, 0) - - // Figure out how many levels of indirection there are by derferencing - // pointers and unpacking interfaces down the chain while detecting circular - // references. - nilFound := false - cycleFound := false - indirects := 0 - ve := v - for ve.Kind() == reflect.Ptr { - if ve.IsNil() { - nilFound = true - break - } - indirects++ - addr := ve.Pointer() - pointerChain = append(pointerChain, addr) - if pd, ok := f.pointers[addr]; ok && pd < f.depth { - cycleFound = true - indirects-- - break - } - f.pointers[addr] = f.depth - - ve = ve.Elem() - if ve.Kind() == reflect.Interface { - if ve.IsNil() { - nilFound = true - break - } - ve = ve.Elem() - } - } - - // Display type or indirection level depending on flags. - if showTypes && !f.ignoreNextType { - f.fs.Write(openParenBytes) - f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) - f.fs.Write([]byte(ve.Type().String())) - f.fs.Write(closeParenBytes) - } else { - if nilFound || cycleFound { - indirects += strings.Count(ve.Type().String(), "*") - } - f.fs.Write(openAngleBytes) - f.fs.Write([]byte(strings.Repeat("*", indirects))) - f.fs.Write(closeAngleBytes) - } - - // Display pointer information depending on flags. - if f.fs.Flag('+') && (len(pointerChain) > 0) { - f.fs.Write(openParenBytes) - for i, addr := range pointerChain { - if i > 0 { - f.fs.Write(pointerChainBytes) - } - printHexPtr(f.fs, addr) - } - f.fs.Write(closeParenBytes) - } - - // Display dereferenced value. - switch { - case nilFound: - f.fs.Write(nilAngleBytes) - - case cycleFound: - f.fs.Write(circularShortBytes) - - default: - f.ignoreNextType = true - f.format(ve) - } -} - -// format is the main workhorse for providing the Formatter interface. It -// uses the passed reflect value to figure out what kind of object we are -// dealing with and formats it appropriately. It is a recursive function, -// however circular data structures are detected and handled properly. -func (f *formatState) format(v reflect.Value) { - // Handle invalid reflect values immediately. - kind := v.Kind() - if kind == reflect.Invalid { - f.fs.Write(invalidAngleBytes) - return - } - - // Handle pointers specially. - if kind == reflect.Ptr { - f.formatPtr(v) - return - } - - // Print type information unless already handled elsewhere. - if !f.ignoreNextType && f.fs.Flag('#') { - f.fs.Write(openParenBytes) - f.fs.Write([]byte(v.Type().String())) - f.fs.Write(closeParenBytes) - } - f.ignoreNextType = false - - // Call Stringer/error interfaces if they exist and the handle methods - // flag is enabled. - if !f.cs.DisableMethods { - if (kind != reflect.Invalid) && (kind != reflect.Interface) { - if handled := handleMethods(f.cs, f.fs, v); handled { - return - } - } - } - - switch kind { - case reflect.Invalid: - // Do nothing. We should never get here since invalid has already - // been handled above. - - case reflect.Bool: - printBool(f.fs, v.Bool()) - - case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: - printInt(f.fs, v.Int(), 10) - - case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: - printUint(f.fs, v.Uint(), 10) - - case reflect.Float32: - printFloat(f.fs, v.Float(), 32) - - case reflect.Float64: - printFloat(f.fs, v.Float(), 64) - - case reflect.Complex64: - printComplex(f.fs, v.Complex(), 32) - - case reflect.Complex128: - printComplex(f.fs, v.Complex(), 64) - - case reflect.Slice: - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - fallthrough - - case reflect.Array: - f.fs.Write(openBracketBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - numEntries := v.Len() - for i := 0; i < numEntries; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(v.Index(i))) - } - } - f.depth-- - f.fs.Write(closeBracketBytes) - - case reflect.String: - f.fs.Write([]byte(v.String())) - - case reflect.Interface: - // The only time we should get here is for nil interfaces due to - // unpackValue calls. - if v.IsNil() { - f.fs.Write(nilAngleBytes) - } - - case reflect.Ptr: - // Do nothing. We should never get here since pointers have already - // been handled above. - - case reflect.Map: - // nil maps should be indicated as different than empty maps - if v.IsNil() { - f.fs.Write(nilAngleBytes) - break - } - - f.fs.Write(openMapBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - keys := v.MapKeys() - if f.cs.SortKeys { - sortValues(keys, f.cs) - } - for i, key := range keys { - if i > 0 { - f.fs.Write(spaceBytes) - } - f.ignoreNextType = true - f.format(f.unpackValue(key)) - f.fs.Write(colonBytes) - f.ignoreNextType = true - f.format(f.unpackValue(v.MapIndex(key))) - } - } - f.depth-- - f.fs.Write(closeMapBytes) - - case reflect.Struct: - numFields := v.NumField() - f.fs.Write(openBraceBytes) - f.depth++ - if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { - f.fs.Write(maxShortBytes) - } else { - vt := v.Type() - for i := 0; i < numFields; i++ { - if i > 0 { - f.fs.Write(spaceBytes) - } - vtf := vt.Field(i) - if f.fs.Flag('+') || f.fs.Flag('#') { - f.fs.Write([]byte(vtf.Name)) - f.fs.Write(colonBytes) - } - f.format(f.unpackValue(v.Field(i))) - } - } - f.depth-- - f.fs.Write(closeBraceBytes) - - case reflect.Uintptr: - printHexPtr(f.fs, uintptr(v.Uint())) - - case reflect.UnsafePointer, reflect.Chan, reflect.Func: - printHexPtr(f.fs, v.Pointer()) - - // There were not any other types at the time this code was written, but - // fall back to letting the default fmt package handle it if any get added. - default: - format := f.buildDefaultFormat() - if v.CanInterface() { - fmt.Fprintf(f.fs, format, v.Interface()) - } else { - fmt.Fprintf(f.fs, format, v.String()) - } - } -} - -// Format satisfies the fmt.Formatter interface. See NewFormatter for usage -// details. -func (f *formatState) Format(fs fmt.State, verb rune) { - f.fs = fs - - // Use standard formatting for verbs that are not v. - if verb != 'v' { - format := f.constructOrigFormat(verb) - fmt.Fprintf(fs, format, f.value) - return - } - - if f.value == nil { - if fs.Flag('#') { - fs.Write(interfaceBytes) - } - fs.Write(nilAngleBytes) - return - } - - f.format(reflect.ValueOf(f.value)) -} - -// newFormatter is a helper function to consolidate the logic from the various -// public methods which take varying config states. -func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { - fs := &formatState{value: v, cs: cs} - fs.pointers = make(map[uintptr]int) - return fs -} - -/* -NewFormatter returns a custom formatter that satisfies the fmt.Formatter -interface. As a result, it integrates cleanly with standard fmt package -printing functions. The formatter is useful for inline printing of smaller data -types similar to the standard %v format specifier. - -The custom formatter only responds to the %v (most compact), %+v (adds pointer -addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb -combinations. Any other verbs such as %x and %q will be sent to the the -standard fmt package for formatting. In addition, the custom formatter ignores -the width and precision arguments (however they will still work on the format -specifiers not handled by the custom formatter). - -Typically this function shouldn't be called directly. It is much easier to make -use of the custom formatter by calling one of the convenience functions such as -Printf, Println, or Fprintf. -*/ -func NewFormatter(v interface{}) fmt.Formatter { - return newFormatter(&Config, v) -} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go deleted file mode 100644 index 32c0e33..0000000 --- a/vendor/github.com/davecgh/go-spew/spew/spew.go +++ /dev/null @@ -1,148 +0,0 @@ -/* - * Copyright (c) 2013-2016 Dave Collins - * - * Permission to use, copy, modify, and distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -package spew - -import ( - "fmt" - "io" -) - -// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the formatted string as a value that satisfies error. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Errorf(format string, a ...interface{}) (err error) { - return fmt.Errorf(format, convertArgs(a)...) -} - -// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprint(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprint(w, convertArgs(a)...) -} - -// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { - return fmt.Fprintf(w, format, convertArgs(a)...) -} - -// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it -// passed with a default Formatter interface returned by NewFormatter. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) -func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { - return fmt.Fprintln(w, convertArgs(a)...) -} - -// Print is a wrapper for fmt.Print that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) -func Print(a ...interface{}) (n int, err error) { - return fmt.Print(convertArgs(a)...) -} - -// Printf is a wrapper for fmt.Printf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Printf(format string, a ...interface{}) (n int, err error) { - return fmt.Printf(format, convertArgs(a)...) -} - -// Println is a wrapper for fmt.Println that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the number of bytes written and any write error encountered. See -// NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) -func Println(a ...interface{}) (n int, err error) { - return fmt.Println(convertArgs(a)...) -} - -// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprint(a ...interface{}) string { - return fmt.Sprint(convertArgs(a)...) -} - -// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were -// passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintf(format string, a ...interface{}) string { - return fmt.Sprintf(format, convertArgs(a)...) -} - -// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it -// were passed with a default Formatter interface returned by NewFormatter. It -// returns the resulting string. See NewFormatter for formatting details. -// -// This function is shorthand for the following syntax: -// -// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) -func Sprintln(a ...interface{}) string { - return fmt.Sprintln(convertArgs(a)...) -} - -// convertArgs accepts a slice of arguments and returns a slice of the same -// length with each argument converted to a default spew Formatter interface. -func convertArgs(args []interface{}) (formatters []interface{}) { - formatters = make([]interface{}, len(args)) - for index, arg := range args { - formatters[index] = NewFormatter(arg) - } - return formatters -} diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE new file mode 100644 index 0000000..37ec93a --- /dev/null +++ b/vendor/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README new file mode 100644 index 0000000..387b4eb --- /dev/null +++ b/vendor/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go new file mode 100644 index 0000000..54bd7af --- /dev/null +++ b/vendor/github.com/golang/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 0000000..65075d2 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml new file mode 100644 index 0000000..4f2ee4d --- /dev/null +++ b/vendor/github.com/google/btree/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/k8s.io/utils/LICENSE b/vendor/github.com/google/btree/LICENSE similarity index 100% rename from vendor/k8s.io/utils/LICENSE rename to vendor/github.com/google/btree/LICENSE diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 0000000..6062a4d --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 0000000..6ff062f --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,890 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml new file mode 100644 index 0000000..597bc99 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/.travis.yml @@ -0,0 +1,18 @@ +sudo: false +language: go +matrix: + allow_failures: + - go: master + fast_finish: true + include: + - go: 1.10.x + - go: 1.11.x + env: GOFMT=1 + - go: master +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 0000000..81316be --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 0000000..51e7d23 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,29 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +This project isn't actively maintained; it works for what I, and seemingly others, want to do with it, and I consider it "done". That said, if you find any issues, please open a Pull Request and I will try to review it. Any changes now that change the public API won't be considered. + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. +- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). + +If you implement any other backend and wish it to be linked here, please send a PR editing this file. + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 0000000..42e3129 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 0000000..b41a63d --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,551 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": {}, + "Keep-Alive": {}, + "Proxy-Authenticate": {}, + "Proxy-Authorization": {}, + "Te": {}, + "Trailers": {}, + "Transfer-Encoding": {}, + "Upgrade": {}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE new file mode 100644 index 0000000..41ce7f1 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2011-2012 Peter Bourgon + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md new file mode 100644 index 0000000..3474739 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/README.md @@ -0,0 +1,141 @@ +# What is diskv? + +Diskv (disk-vee) is a simple, persistent key-value store written in the Go +language. It starts with an incredibly simple API for storing arbitrary data on +a filesystem by key, and builds several layers of performance-enhancing +abstraction on top. The end result is a conceptually simple, but highly +performant, disk-backed storage system. + +[![Build Status][1]][2] + +[1]: https://drone.io/github.com/peterbourgon/diskv/status.png +[2]: https://drone.io/github.com/peterbourgon/diskv/latest + + +# Installing + +Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. +Then, + +```bash +$ go get github.com/peterbourgon/diskv +``` + +[3]: http://golang.org +[4]: http://golang.org/doc/install/source +[5]: http://golang.org/doc/install + + +# Usage + +```go +package main + +import ( + "fmt" + "github.com/peterbourgon/diskv" +) + +func main() { + // Simplest transform function: put all the data files into the base dir. + flatTransform := func(s string) []string { return []string{} } + + // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. + d := diskv.New(diskv.Options{ + BasePath: "my-data-dir", + Transform: flatTransform, + CacheSizeMax: 1024 * 1024, + }) + + // Write three bytes to the key "alpha". + key := "alpha" + d.Write(key, []byte{'1', '2', '3'}) + + // Read the value back out of the store. + value, _ := d.Read(key) + fmt.Printf("%v\n", value) + + // Erase the key+value from the store (and the disk). + d.Erase(key) +} +``` + +More complex examples can be found in the "examples" subdirectory. + + +# Theory + +## Basic idea + +At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). +The data is written to a single file on disk, with the same name as the key. +The key determines where that file will be stored, via a user-provided +`TransformFunc`, which takes a key and returns a slice (`[]string`) +corresponding to a path list where the key file will be stored. The simplest +TransformFunc, + +```go +func SimpleTransform (key string) []string { + return []string{} +} +``` + +will place all keys in the same, base directory. The design is inspired by +[Redis diskstore][6]; a TransformFunc which emulates the default diskstore +behavior is available in the content-addressable-storage example. + +[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 + +**Note** that your TransformFunc should ensure that one valid key doesn't +transform to a subset of another valid key. That is, it shouldn't be possible +to construct valid keys that resolve to directory names. As a concrete example, +if your TransformFunc splits on every 3 characters, then + +```go +d.Write("abcabc", val) // OK: written to /abc/abc/abcabc +d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory +``` + +This will be addressed in an upcoming version of diskv. + +Probably the most important design principle behind diskv is that your data is +always flatly available on the disk. diskv will never do anything that would +prevent you from accessing, copying, backing up, or otherwise interacting with +your data via common UNIX commandline tools. + +## Adding a cache + +An in-memory caching layer is provided by combining the BasicStore +functionality with a simple map structure, and keeping it up-to-date as +appropriate. Since the map structure in Go is not threadsafe, it's combined +with a RWMutex to provide safe concurrent access. + +## Adding order + +diskv is a key-value store and therefore inherently unordered. An ordering +system can be injected into the store by passing something which satisfies the +diskv.Index interface. (A default implementation, using Google's +[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a +user-provided Less function) index of the keys, which can be queried. + +[7]: https://github.com/google/btree + +## Adding compression + +Something which implements the diskv.Compression interface may be passed +during store creation, so that all Writes and Reads are filtered through +a compression/decompression pipeline. Several default implementations, +using stdlib compression algorithms, are provided. Note that data is cached +compressed; the cost of decompression is borne with each Read. + +## Streaming + +diskv also now provides ReadStream and WriteStream methods, to allow very large +data to be handled efficiently. + + +# Future plans + + * Needs plenty of robust testing: huge datasets, etc... + * More thorough benchmarking + * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go new file mode 100644 index 0000000..5192b02 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/compression.go @@ -0,0 +1,64 @@ +package diskv + +import ( + "compress/flate" + "compress/gzip" + "compress/zlib" + "io" +) + +// Compression is an interface that Diskv uses to implement compression of +// data. Writer takes a destination io.Writer and returns a WriteCloser that +// compresses all data written through it. Reader takes a source io.Reader and +// returns a ReadCloser that decompresses all data read through it. You may +// define these methods on your own type, or use one of the NewCompression +// helpers. +type Compression interface { + Writer(dst io.Writer) (io.WriteCloser, error) + Reader(src io.Reader) (io.ReadCloser, error) +} + +// NewGzipCompression returns a Gzip-based Compression. +func NewGzipCompression() Compression { + return NewGzipCompressionLevel(flate.DefaultCompression) +} + +// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. +func NewGzipCompressionLevel(level int) Compression { + return &genericCompression{ + wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, + rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, + } +} + +// NewZlibCompression returns a Zlib-based Compression. +func NewZlibCompression() Compression { + return NewZlibCompressionLevel(flate.DefaultCompression) +} + +// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. +func NewZlibCompressionLevel(level int) Compression { + return NewZlibCompressionLevelDict(level, nil) +} + +// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given +// level, based on the given dictionary. +func NewZlibCompressionLevelDict(level int, dict []byte) Compression { + return &genericCompression{ + func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, + func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, + } +} + +type genericCompression struct { + wf func(w io.Writer) (io.WriteCloser, error) + rf func(r io.Reader) (io.ReadCloser, error) +} + +func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { + return g.wf(dst) +} + +func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { + return g.rf(src) +} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go new file mode 100644 index 0000000..524dc0a --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/diskv.go @@ -0,0 +1,624 @@ +// Diskv (disk-vee) is a simple, persistent, key-value store. +// It stores all data flatly on the filesystem. + +package diskv + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" +) + +const ( + defaultBasePath = "diskv" + defaultFilePerm os.FileMode = 0666 + defaultPathPerm os.FileMode = 0777 +) + +var ( + defaultTransform = func(s string) []string { return []string{} } + errCanceled = errors.New("canceled") + errEmptyKey = errors.New("empty key") + errBadKey = errors.New("bad key") + errImportDirectory = errors.New("can't import a directory") +) + +// TransformFunction transforms a key into a slice of strings, with each +// element in the slice representing a directory in the file path where the +// key's entry will eventually be stored. +// +// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], +// the final location of the data file will be /ab/cde/f/abcdef +type TransformFunction func(s string) []string + +// Options define a set of properties that dictate Diskv behavior. +// All values are optional. +type Options struct { + BasePath string + Transform TransformFunction + CacheSizeMax uint64 // bytes + PathPerm os.FileMode + FilePerm os.FileMode + // If TempDir is set, it will enable filesystem atomic writes by + // writing temporary files to that location before being moved + // to BasePath. + // Note that TempDir MUST be on the same device/partition as + // BasePath. + TempDir string + + Index Index + IndexLess LessFunction + + Compression Compression +} + +// Diskv implements the Diskv interface. You shouldn't construct Diskv +// structures directly; instead, use the New constructor. +type Diskv struct { + Options + mu sync.RWMutex + cache map[string][]byte + cacheSize uint64 +} + +// New returns an initialized Diskv structure, ready to use. +// If the path identified by baseDir already contains data, +// it will be accessible, but not yet cached. +func New(o Options) *Diskv { + if o.BasePath == "" { + o.BasePath = defaultBasePath + } + if o.Transform == nil { + o.Transform = defaultTransform + } + if o.PathPerm == 0 { + o.PathPerm = defaultPathPerm + } + if o.FilePerm == 0 { + o.FilePerm = defaultFilePerm + } + + d := &Diskv{ + Options: o, + cache: map[string][]byte{}, + cacheSize: 0, + } + + if d.Index != nil && d.IndexLess != nil { + d.Index.Initialize(d.IndexLess, d.Keys(nil)) + } + + return d +} + +// Write synchronously writes the key-value pair to disk, making it immediately +// available for reads. Write relies on the filesystem to perform an eventual +// sync to physical media. If you need stronger guarantees, see WriteStream. +func (d *Diskv) Write(key string, val []byte) error { + return d.WriteStream(key, bytes.NewBuffer(val), false) +} + +// WriteStream writes the data represented by the io.Reader to the disk, under +// the provided key. If sync is true, WriteStream performs an explicit sync on +// the file as soon as it's written. +// +// bytes.Buffer provides io.Reader semantics for basic data types. +func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { + if len(key) <= 0 { + return errEmptyKey + } + + d.mu.Lock() + defer d.mu.Unlock() + + return d.writeStreamWithLock(key, r, sync) +} + +// createKeyFileWithLock either creates the key file directly, or +// creates a temporary file in TempDir if it is set. +func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { + if d.TempDir != "" { + if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { + return nil, fmt.Errorf("temp mkdir: %s", err) + } + f, err := ioutil.TempFile(d.TempDir, "") + if err != nil { + return nil, fmt.Errorf("temp file: %s", err) + } + + if err := f.Chmod(d.FilePerm); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return nil, fmt.Errorf("chmod: %s", err) + } + return f, nil + } + + mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists + f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) + if err != nil { + return nil, fmt.Errorf("open file: %s", err) + } + return f, nil +} + +// writeStream does no input validation checking. +func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { + if err := d.ensurePathWithLock(key); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + f, err := d.createKeyFileWithLock(key) + if err != nil { + return fmt.Errorf("create key file: %s", err) + } + + wc := io.WriteCloser(&nopWriteCloser{f}) + if d.Compression != nil { + wc, err = d.Compression.Writer(f) + if err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression writer: %s", err) + } + } + + if _, err := io.Copy(wc, r); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("i/o copy: %s", err) + } + + if err := wc.Close(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("compression close: %s", err) + } + + if sync { + if err := f.Sync(); err != nil { + f.Close() // error deliberately ignored + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("file sync: %s", err) + } + } + + if err := f.Close(); err != nil { + return fmt.Errorf("file close: %s", err) + } + + if f.Name() != d.completeFilename(key) { + if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { + os.Remove(f.Name()) // error deliberately ignored + return fmt.Errorf("rename: %s", err) + } + } + + if d.Index != nil { + d.Index.Insert(key) + } + + d.bustCacheWithLock(key) // cache only on read + + return nil +} + +// Import imports the source file into diskv under the destination key. If the +// destination key already exists, it's overwritten. If move is true, the +// source file is removed after a successful import. +func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { + if dstKey == "" { + return errEmptyKey + } + + if fi, err := os.Stat(srcFilename); err != nil { + return err + } else if fi.IsDir() { + return errImportDirectory + } + + d.mu.Lock() + defer d.mu.Unlock() + + if err := d.ensurePathWithLock(dstKey); err != nil { + return fmt.Errorf("ensure path: %s", err) + } + + if move { + if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { + d.bustCacheWithLock(dstKey) + return nil + } else if err != syscall.EXDEV { + // If it failed due to being on a different device, fall back to copying + return err + } + } + + f, err := os.Open(srcFilename) + if err != nil { + return err + } + defer f.Close() + err = d.writeStreamWithLock(dstKey, f, false) + if err == nil && move { + err = os.Remove(srcFilename) + } + return err +} + +// Read reads the key and returns the value. +// If the key is available in the cache, Read won't touch the disk. +// If the key is not in the cache, Read will have the side-effect of +// lazily caching the value. +func (d *Diskv) Read(key string) ([]byte, error) { + rc, err := d.ReadStream(key, false) + if err != nil { + return []byte{}, err + } + defer rc.Close() + return ioutil.ReadAll(rc) +} + +// ReadStream reads the key and returns the value (data) as an io.ReadCloser. +// If the value is cached from a previous read, and direct is false, +// ReadStream will use the cached value. Otherwise, it will return a handle to +// the file on disk, and cache the data on read. +// +// If direct is true, ReadStream will lazily delete any cached value for the +// key, and return a direct handle to the file on disk. +// +// If compression is enabled, ReadStream taps into the io.Reader stream prior +// to decompression, and caches the compressed data. +func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { + d.mu.RLock() + defer d.mu.RUnlock() + + if val, ok := d.cache[key]; ok { + if !direct { + buf := bytes.NewBuffer(val) + if d.Compression != nil { + return d.Compression.Reader(buf) + } + return ioutil.NopCloser(buf), nil + } + + go func() { + d.mu.Lock() + defer d.mu.Unlock() + d.uncacheWithLock(key, uint64(len(val))) + }() + } + + return d.readWithRLock(key) +} + +// read ignores the cache, and returns an io.ReadCloser representing the +// decompressed data for the given key, streamed from the disk. Clients should +// acquire a read lock on the Diskv and check the cache themselves before +// calling read. +func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { + filename := d.completeFilename(key) + + fi, err := os.Stat(filename) + if err != nil { + return nil, err + } + if fi.IsDir() { + return nil, os.ErrNotExist + } + + f, err := os.Open(filename) + if err != nil { + return nil, err + } + + var r io.Reader + if d.CacheSizeMax > 0 { + r = newSiphon(f, d, key) + } else { + r = &closingReader{f} + } + + var rc = io.ReadCloser(ioutil.NopCloser(r)) + if d.Compression != nil { + rc, err = d.Compression.Reader(r) + if err != nil { + return nil, err + } + } + + return rc, nil +} + +// closingReader provides a Reader that automatically closes the +// embedded ReadCloser when it reaches EOF +type closingReader struct { + rc io.ReadCloser +} + +func (cr closingReader) Read(p []byte) (int, error) { + n, err := cr.rc.Read(p) + if err == io.EOF { + if closeErr := cr.rc.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + } + return n, err +} + +// siphon is like a TeeReader: it copies all data read through it to an +// internal buffer, and moves that buffer to the cache at EOF. +type siphon struct { + f *os.File + d *Diskv + key string + buf *bytes.Buffer +} + +// newSiphon constructs a siphoning reader that represents the passed file. +// When a successful series of reads ends in an EOF, the siphon will write +// the buffered data to Diskv's cache under the given key. +func newSiphon(f *os.File, d *Diskv, key string) io.Reader { + return &siphon{ + f: f, + d: d, + key: key, + buf: &bytes.Buffer{}, + } +} + +// Read implements the io.Reader interface for siphon. +func (s *siphon) Read(p []byte) (int, error) { + n, err := s.f.Read(p) + + if err == nil { + return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed + } + + if err == io.EOF { + s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail + if closeErr := s.f.Close(); closeErr != nil { + return n, closeErr // close must succeed for Read to succeed + } + return n, err + } + + return n, err +} + +// Erase synchronously erases the given key from the disk and the cache. +func (d *Diskv) Erase(key string) error { + d.mu.Lock() + defer d.mu.Unlock() + + d.bustCacheWithLock(key) + + // erase from index + if d.Index != nil { + d.Index.Delete(key) + } + + // erase from disk + filename := d.completeFilename(key) + if s, err := os.Stat(filename); err == nil { + if s.IsDir() { + return errBadKey + } + if err = os.Remove(filename); err != nil { + return err + } + } else { + // Return err as-is so caller can do os.IsNotExist(err). + return err + } + + // clean up and return + d.pruneDirsWithLock(key) + return nil +} + +// EraseAll will delete all of the data from the store, both in the cache and on +// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- +// diskv-related data. Care should be taken to always specify a diskv base +// directory that is exclusively for diskv data. +func (d *Diskv) EraseAll() error { + d.mu.Lock() + defer d.mu.Unlock() + d.cache = make(map[string][]byte) + d.cacheSize = 0 + if d.TempDir != "" { + os.RemoveAll(d.TempDir) // errors ignored + } + return os.RemoveAll(d.BasePath) +} + +// Has returns true if the given key exists. +func (d *Diskv) Has(key string) bool { + d.mu.Lock() + defer d.mu.Unlock() + + if _, ok := d.cache[key]; ok { + return true + } + + filename := d.completeFilename(key) + s, err := os.Stat(filename) + if err != nil { + return false + } + if s.IsDir() { + return false + } + + return true +} + +// Keys returns a channel that will yield every key accessible by the store, +// in undefined order. If a cancel channel is provided, closing it will +// terminate and close the keys channel. +func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { + return d.KeysPrefix("", cancel) +} + +// KeysPrefix returns a channel that will yield every key accessible by the +// store with the given prefix, in undefined order. If a cancel channel is +// provided, closing it will terminate and close the keys channel. If the +// provided prefix is the empty string, all keys will be yielded. +func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { + var prepath string + if prefix == "" { + prepath = d.BasePath + } else { + prepath = d.pathFor(prefix) + } + c := make(chan string) + go func() { + filepath.Walk(prepath, walker(c, prefix, cancel)) + close(c) + }() + return c +} + +// walker returns a function which satisfies the filepath.WalkFunc interface. +// It sends every non-directory file entry down the channel c. +func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { + return func(path string, info os.FileInfo, err error) error { + if err != nil { + return err + } + + if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { + return nil // "pass" + } + + select { + case c <- info.Name(): + case <-cancel: + return errCanceled + } + + return nil + } +} + +// pathFor returns the absolute path for location on the filesystem where the +// data for the given key will be stored. +func (d *Diskv) pathFor(key string) string { + return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) +} + +// ensurePathWithLock is a helper function that generates all necessary +// directories on the filesystem for the given key. +func (d *Diskv) ensurePathWithLock(key string) error { + return os.MkdirAll(d.pathFor(key), d.PathPerm) +} + +// completeFilename returns the absolute path to the file for the given key. +func (d *Diskv) completeFilename(key string) string { + return filepath.Join(d.pathFor(key), key) +} + +// cacheWithLock attempts to cache the given key-value pair in the store's +// cache. It can fail if the value is larger than the cache's maximum size. +func (d *Diskv) cacheWithLock(key string, val []byte) error { + valueSize := uint64(len(val)) + if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { + return fmt.Errorf("%s; not caching", err) + } + + // be very strict about memory guarantees + if (d.cacheSize + valueSize) > d.CacheSizeMax { + panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) + } + + d.cache[key] = val + d.cacheSize += valueSize + return nil +} + +// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. +func (d *Diskv) cacheWithoutLock(key string, val []byte) error { + d.mu.Lock() + defer d.mu.Unlock() + return d.cacheWithLock(key, val) +} + +func (d *Diskv) bustCacheWithLock(key string) { + if val, ok := d.cache[key]; ok { + d.uncacheWithLock(key, uint64(len(val))) + } +} + +func (d *Diskv) uncacheWithLock(key string, sz uint64) { + d.cacheSize -= sz + delete(d.cache, key) +} + +// pruneDirsWithLock deletes empty directories in the path walk leading to the +// key k. Typically this function is called after an Erase is made. +func (d *Diskv) pruneDirsWithLock(key string) error { + pathlist := d.Transform(key) + for i := range pathlist { + dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) + + // thanks to Steven Blenkinsop for this snippet + switch fi, err := os.Stat(dir); true { + case err != nil: + return err + case !fi.IsDir(): + panic(fmt.Sprintf("corrupt dirstate at %s", dir)) + } + + nlinks, err := filepath.Glob(filepath.Join(dir, "*")) + if err != nil { + return err + } else if len(nlinks) > 0 { + return nil // has subdirs -- do not prune + } + if err = os.Remove(dir); err != nil { + return err + } + } + + return nil +} + +// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order +// until the cache has at least valueSize bytes available. +func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { + if valueSize > d.CacheSizeMax { + return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) + } + + safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } + + for key, val := range d.cache { + if safe() { + break + } + + d.uncacheWithLock(key, uint64(len(val))) + } + + if !safe() { + panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) + } + + return nil +} + +// nopWriteCloser wraps an io.Writer and provides a no-op Close method to +// satisfy the io.WriteCloser interface. +type nopWriteCloser struct { + io.Writer +} + +func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } +func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go new file mode 100644 index 0000000..96fee51 --- /dev/null +++ b/vendor/github.com/peterbourgon/diskv/index.go @@ -0,0 +1,115 @@ +package diskv + +import ( + "sync" + + "github.com/google/btree" +) + +// Index is a generic interface for things that can +// provide an ordered list of keys. +type Index interface { + Initialize(less LessFunction, keys <-chan string) + Insert(key string) + Delete(key string) + Keys(from string, n int) []string +} + +// LessFunction is used to initialize an Index of keys in a specific order. +type LessFunction func(string, string) bool + +// btreeString is a custom data type that satisfies the BTree Less interface, +// making the strings it wraps sortable by the BTree package. +type btreeString struct { + s string + l LessFunction +} + +// Less satisfies the BTree.Less interface using the btreeString's LessFunction. +func (s btreeString) Less(i btree.Item) bool { + return s.l(s.s, i.(btreeString).s) +} + +// BTreeIndex is an implementation of the Index interface using google/btree. +type BTreeIndex struct { + sync.RWMutex + LessFunction + *btree.BTree +} + +// Initialize populates the BTree tree with data from the keys channel, +// according to the passed less function. It's destructive to the BTreeIndex. +func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { + i.Lock() + defer i.Unlock() + i.LessFunction = less + i.BTree = rebuild(less, keys) +} + +// Insert inserts the given key (only) into the BTree tree. +func (i *BTreeIndex) Insert(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) +} + +// Delete removes the given key (only) from the BTree tree. +func (i *BTreeIndex) Delete(key string) { + i.Lock() + defer i.Unlock() + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) +} + +// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, +// Keys will return the first n keys. If the passed 'from' key is non-empty, the +// first key in the returned slice will be the key that immediately follows the +// passed key, in key order. +func (i *BTreeIndex) Keys(from string, n int) []string { + i.RLock() + defer i.RUnlock() + + if i.BTree == nil || i.LessFunction == nil { + panic("uninitialized index") + } + + if i.BTree.Len() <= 0 { + return []string{} + } + + btreeFrom := btreeString{s: from, l: i.LessFunction} + skipFirst := true + if len(from) <= 0 || !i.BTree.Has(btreeFrom) { + // no such key, so fabricate an always-smallest item + btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} + skipFirst = false + } + + keys := []string{} + iterator := func(i btree.Item) bool { + keys = append(keys, i.(btreeString).s) + return len(keys) < n + } + i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) + + if skipFirst && len(keys) > 0 { + keys = keys[1:] + } + + return keys +} + +// rebuildIndex does the work of regenerating the index +// with the given keys. +func rebuild(less LessFunction, keys <-chan string) *btree.BTree { + tree := btree.New(2) + for key := range keys { + tree.ReplaceOrInsert(btreeString{s: key, l: less}) + } + return tree +} diff --git a/vendor/k8s.io/api/node/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go similarity index 57% rename from vendor/k8s.io/api/node/v1alpha1/doc.go rename to vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go index dfe9954..8a5d1fb 100644 --- a/vendor/k8s.io/api/node/v1alpha1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,9 +15,11 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +groupName=node.k8s.io - -package v1alpha1 // import "k8s.io/api/node/v1alpha1" +// Package v1alpha1 is the v1alpha1 version of the API. +// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration +// InitializerConfiguration and validatingWebhookConfiguration is for the +// new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io +package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go new file mode 100644 index 0000000..b87f74e --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go @@ -0,0 +1,1027 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +// DO NOT EDIT! + +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto + + It has these top-level messages: + Initializer + InitializerConfiguration + InitializerConfigurationList + Rule +*/ +package v1alpha1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } +func (*InitializerConfiguration) ProtoMessage() {} +func (*InitializerConfiguration) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } +func (*InitializerConfigurationList) ProtoMessage() {} +func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} +} + +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } + +func init() { + proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") + proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") + proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") + proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") +} +func (m *Initializer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.Initializers) > 0 { + for _, msg := range m.Initializers { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Rule) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Rule) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Initializer) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *InitializerConfiguration) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Initializers) > 0 { + for _, e := range m.Initializers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *InitializerConfigurationList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *Rule) Size() (n int) { + var l int + _ = l + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.APIVersions) > 0 { + for _, s := range m.APIVersions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Resources) > 0 { + for _, s := range m.Resources { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Initializer) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfiguration) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InitializerConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *InitializerConfigurationList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&InitializerConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Rule) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Rule{`, + `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, + `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, + `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Initializer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, Rule{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitializerConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitializerConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Initializers = append(m.Initializers, Initializer{}) + if err := m.Initializers[len(m.Initializers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: InitializerConfigurationList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: InitializerConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, InitializerConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Rule) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Rule: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 531 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x31, + 0x18, 0x6e, 0x6c, 0x0b, 0x6d, 0xda, 0x45, 0x19, 0x3c, 0x94, 0x22, 0xd3, 0xd2, 0x53, 0x45, 0x4c, + 0xec, 0x22, 0x8b, 0xd7, 0x9d, 0x3d, 0x48, 0xc1, 0x8f, 0x25, 0x88, 0x07, 0xf1, 0x60, 0xda, 0xbe, + 0x3b, 0x8d, 0xed, 0x4c, 0x86, 0x24, 0x53, 0xd0, 0x93, 0x17, 0xef, 0x82, 0x7f, 0xaa, 0xc7, 0x3d, + 0xee, 0xa9, 0xd8, 0x11, 0x3c, 0xfa, 0x1b, 0x24, 0x33, 0x9d, 0x9d, 0x59, 0xeb, 0xe2, 0xea, 0x2d, + 0xef, 0xf3, 0xe6, 0xf9, 0x4a, 0x30, 0x5b, 0x3c, 0xd1, 0x44, 0x48, 0xba, 0x88, 0x27, 0xa0, 0x42, + 0x30, 0xa0, 0xe9, 0x0a, 0xc2, 0x99, 0x54, 0x74, 0xb7, 0xe0, 0x91, 0xa0, 0x7c, 0x16, 0x08, 0xad, + 0x85, 0x0c, 0x15, 0xf8, 0x42, 0x1b, 0xc5, 0x8d, 0x90, 0x21, 0x5d, 0x8d, 0xf8, 0x32, 0x9a, 0xf3, + 0x11, 0xf5, 0x21, 0x04, 0xc5, 0x0d, 0xcc, 0x48, 0xa4, 0xa4, 0x91, 0xce, 0xfd, 0x8c, 0x4a, 0x78, + 0x24, 0xc8, 0x1f, 0xa9, 0x24, 0xa7, 0x76, 0x1f, 0xfa, 0xc2, 0xcc, 0xe3, 0x09, 0x99, 0xca, 0x80, + 0xfa, 0xd2, 0x97, 0x34, 0x55, 0x98, 0xc4, 0x67, 0xe9, 0x94, 0x0e, 0xe9, 0x29, 0x53, 0xee, 0x3e, + 0x2e, 0x42, 0x05, 0x7c, 0x3a, 0x17, 0x21, 0xa8, 0x0f, 0x34, 0x5a, 0xf8, 0x16, 0xd0, 0x34, 0x00, + 0xc3, 0xe9, 0x6a, 0x2f, 0x4f, 0x97, 0x5e, 0xc7, 0x52, 0x71, 0x68, 0x44, 0x00, 0x7b, 0x84, 0xa3, + 0xbf, 0x11, 0xf4, 0x74, 0x0e, 0x01, 0xff, 0x9d, 0x37, 0xf8, 0x8c, 0x70, 0x6b, 0x1c, 0x0a, 0x23, + 0xf8, 0x52, 0x7c, 0x04, 0xe5, 0xf4, 0x71, 0x2d, 0xe4, 0x01, 0x74, 0x50, 0x1f, 0x0d, 0x9b, 0x5e, + 0x7b, 0xbd, 0xe9, 0x55, 0x92, 0x4d, 0xaf, 0xf6, 0x82, 0x07, 0xc0, 0xd2, 0x8d, 0xf3, 0x0a, 0xd7, + 0x55, 0xbc, 0x04, 0xdd, 0xb9, 0xd5, 0xaf, 0x0e, 0x5b, 0x87, 0x94, 0xdc, 0xf8, 0xe9, 0x08, 0x8b, + 0x97, 0xe0, 0x1d, 0xec, 0x34, 0xeb, 0x76, 0xd2, 0x2c, 0x13, 0x1b, 0xfc, 0x44, 0xb8, 0x53, 0xca, + 0x71, 0x22, 0xc3, 0x33, 0xe1, 0xc7, 0x99, 0x80, 0xf3, 0x0e, 0x37, 0xec, 0x43, 0xcd, 0xb8, 0xe1, + 0x69, 0xb0, 0xd6, 0xe1, 0xa3, 0x92, 0xeb, 0x65, 0x5f, 0x12, 0x2d, 0x7c, 0x0b, 0x68, 0x62, 0x6f, + 0x93, 0xd5, 0x88, 0xbc, 0x9c, 0xbc, 0x87, 0xa9, 0x79, 0x0e, 0x86, 0x7b, 0xce, 0xce, 0x16, 0x17, + 0x18, 0xbb, 0x54, 0x75, 0x22, 0xdc, 0x16, 0x85, 0x7b, 0xde, 0xed, 0xe8, 0x1f, 0xba, 0x95, 0xc2, + 0x7b, 0x77, 0x77, 0x5e, 0xed, 0x12, 0xa8, 0xd9, 0x15, 0x87, 0xc1, 0x0f, 0x84, 0xef, 0x5d, 0x57, + 0xf8, 0x99, 0xd0, 0xc6, 0x79, 0xbb, 0x57, 0x9a, 0xdc, 0xac, 0xb4, 0x65, 0xa7, 0x95, 0xef, 0xec, + 0x62, 0x34, 0x72, 0xa4, 0x54, 0x78, 0x8e, 0xeb, 0xc2, 0x40, 0x90, 0x37, 0x3d, 0xf9, 0xbf, 0xa6, + 0x57, 0x52, 0x17, 0x3f, 0x3b, 0xb6, 0xca, 0x2c, 0x33, 0x18, 0x7c, 0x45, 0xb8, 0x66, 0xbf, 0xda, + 0x79, 0x80, 0x9b, 0x3c, 0x12, 0x4f, 0x95, 0x8c, 0x23, 0xdd, 0x41, 0xfd, 0xea, 0xb0, 0xe9, 0x1d, + 0x24, 0x9b, 0x5e, 0xf3, 0xf8, 0x74, 0x9c, 0x81, 0xac, 0xd8, 0x3b, 0x23, 0xdc, 0xe2, 0x91, 0x78, + 0x0d, 0xca, 0xe6, 0xc8, 0x52, 0x36, 0xbd, 0xdb, 0xc9, 0xa6, 0xd7, 0x3a, 0x3e, 0x1d, 0xe7, 0x30, + 0x2b, 0xdf, 0xb1, 0xfa, 0x0a, 0xb4, 0x8c, 0xd5, 0x14, 0x74, 0xa7, 0x5a, 0xe8, 0xb3, 0x1c, 0x64, + 0xc5, 0xde, 0x23, 0xeb, 0xad, 0x5b, 0x39, 0xdf, 0xba, 0x95, 0x8b, 0xad, 0x5b, 0xf9, 0x94, 0xb8, + 0x68, 0x9d, 0xb8, 0xe8, 0x3c, 0x71, 0xd1, 0x45, 0xe2, 0xa2, 0x6f, 0x89, 0x8b, 0xbe, 0x7c, 0x77, + 0x2b, 0x6f, 0x1a, 0x79, 0xe9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x06, 0xa3, 0xcb, 0x75, + 0x04, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto new file mode 100644 index 0000000..e17b559 --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto @@ -0,0 +1,107 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.api.admissionregistration.v1alpha1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1alpha1"; + +// Initializer describes the name and the failure policy of an initializer, and +// what resources it applies to. +message Initializer { + // Name is the identifier of the initializer. It will be added to the + // object that needs to be initialized. + // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where + // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required + optional string name = 1; + + // Rules describes what resources/subresources the initializer cares about. + // The initializer cares about an operation if it matches _any_ Rule. + // Rule.Resources must not include subresources. + repeated Rule rules = 2; +} + +// InitializerConfiguration describes the configuration of initializers. +message InitializerConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Initializers is a list of resources and their default initializers + // Order-sensitive. + // When merging multiple InitializerConfigurations, we sort the initializers + // from different InitializerConfigurations by the name of the + // InitializerConfigurations; the order of the initializers from the same + // InitializerConfiguration is preserved. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + repeated Initializer initializers = 2; +} + +// InitializerConfigurationList is a list of InitializerConfiguration. +message InitializerConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of InitializerConfiguration. + repeated InitializerConfiguration items = 2; +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +message Rule { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiGroups = 1; + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + repeated string apiVersions = 2; + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + repeated string resources = 3; +} + diff --git a/vendor/k8s.io/api/coordination/v1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go similarity index 86% rename from vendor/k8s.io/api/coordination/v1/register.go rename to vendor/k8s.io/api/admissionregistration/v1alpha1/register.go index 95b987b..e9a4164 100644 --- a/vendor/k8s.io/api/coordination/v1/register.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -22,11 +22,10 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" ) -// GroupName is the group name use in this package -const GroupName = "coordination.k8s.io" +const GroupName = "admissionregistration.k8s.io" // SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} // Resource takes an unqualified resource and returns a Group qualified GroupResource func Resource(resource string) schema.GroupResource { @@ -41,13 +40,12 @@ var ( AddToScheme = localSchemeBuilder.AddToScheme ) -// Adds the list of known types to api.Scheme. +// Adds the list of known types to scheme. func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, - &Lease{}, - &LeaseList{}, + &InitializerConfiguration{}, + &InitializerConfigurationList{}, ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) return nil } diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go new file mode 100644 index 0000000..a245f1e --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go @@ -0,0 +1,106 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitializerConfiguration describes the configuration of initializers. +type InitializerConfiguration struct { + metav1.TypeMeta `json:",inline"` + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // Initializers is a list of resources and their default initializers + // Order-sensitive. + // When merging multiple InitializerConfigurations, we sort the initializers + // from different InitializerConfigurations by the name of the + // InitializerConfigurations; the order of the initializers from the same + // InitializerConfiguration is preserved. + // +patchMergeKey=name + // +patchStrategy=merge + // +optional + Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// InitializerConfigurationList is a list of InitializerConfiguration. +type InitializerConfigurationList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of InitializerConfiguration. + Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// Initializer describes the name and the failure policy of an initializer, and +// what resources it applies to. +type Initializer struct { + // Name is the identifier of the initializer. It will be added to the + // object that needs to be initialized. + // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where + // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name + // of the organization. + // Required + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + + // Rules describes what resources/subresources the initializer cares about. + // The initializer cares about an operation if it matches _any_ Rule. + // Rule.Resources must not include subresources. + Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"` +} + +// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended +// to make sure that all the tuple expansions are valid. +type Rule struct { + // APIGroups is the API groups the resources belong to. '*' is all groups. + // If '*' is present, the length of the slice must be one. + // Required. + APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` + + // APIVersions is the API versions the resources belong to. '*' is all versions. + // If '*' is present, the length of the slice must be one. + // Required. + APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` + + // Resources is a list of resources this rule applies to. + // + // For example: + // 'pods' means pods. + // 'pods/log' means the log subresource of pods. + // '*' means all resources, but not subresources. + // 'pods/*' means all subresources of pods. + // '*/scale' means all scale subresources. + // '*/*' means all resources and their subresources. + // + // If wildcard is present, the validation rule will ensure resources do not + // overlap with each other. + // + // Depending on the enclosing object, subresources might not be allowed. + // Required. + Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` +} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go new file mode 100644 index 0000000..69e4b7c --- /dev/null +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go @@ -0,0 +1,71 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_Initializer = map[string]string{ + "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", + "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", + "rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_InitializerConfiguration = map[string]string{ + "": "InitializerConfiguration describes the configuration of initializers.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", +} + +func (InitializerConfiguration) SwaggerDoc() map[string]string { + return map_InitializerConfiguration +} + +var map_InitializerConfigurationList = map[string]string{ + "": "InitializerConfigurationList is a list of InitializerConfiguration.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of InitializerConfiguration.", +} + +func (InitializerConfigurationList) SwaggerDoc() map[string]string { + return map_InitializerConfigurationList +} + +var map_Rule = map[string]string{ + "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", + "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", + "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", + "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", +} + +func (Rule) SwaggerDoc() map[string]string { + return map_Rule +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go similarity index 59% rename from vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go rename to vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go index c398952..9f636b4 100644 --- a/vendor/k8s.io/api/node/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go @@ -18,66 +18,62 @@ limitations under the License. // Code generated by deepcopy-gen. DO NOT EDIT. -package v1beta1 +package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Overhead) DeepCopyInto(out *Overhead) { +func (in *Initializer) DeepCopyInto(out *Initializer) { *out = *in - if in.PodFixed != nil { - in, out := &in.PodFixed, &out.PodFixed - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]Rule, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. -func (in *Overhead) DeepCopy() *Overhead { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { if in == nil { return nil } - out := new(Overhead) + out := new(Initializer) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { +func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Overhead != nil { - in, out := &in.Overhead, &out.Overhead - *out = new(Overhead) - (*in).DeepCopyInto(*out) - } - if in.Scheduling != nil { - in, out := &in.Scheduling, &out.Scheduling - *out = new(Scheduling) - (*in).DeepCopyInto(*out) + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + *out = make([]Initializer, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. -func (in *RuntimeClass) DeepCopy() *RuntimeClass { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. +func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { if in == nil { return nil } - out := new(RuntimeClass) + out := new(InitializerConfiguration) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClass) DeepCopyObject() runtime.Object { +func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -85,13 +81,13 @@ func (in *RuntimeClass) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { +func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items - *out = make([]RuntimeClass, len(*in)) + *out = make([]InitializerConfiguration, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -99,18 +95,18 @@ func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. -func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. +func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { if in == nil { return nil } - out := new(RuntimeClassList) + out := new(InitializerConfigurationList) in.DeepCopyInto(out) return out } // DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClassList) DeepCopyObject() runtime.Object { +func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { if c := in.DeepCopy(); c != nil { return c } @@ -118,31 +114,32 @@ func (in *RuntimeClassList) DeepCopyObject() runtime.Object { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scheduling) DeepCopyInto(out *Scheduling) { +func (in *Rule) DeepCopyInto(out *Rule) { *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } + if in.APIGroups != nil { + in, out := &in.APIGroups, &out.APIGroups + *out = make([]string, len(*in)) + copy(*out, *in) } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + if in.APIVersions != nil { + in, out := &in.APIVersions, &out.APIVersions + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Resources != nil { + in, out := &in.Resources, &out.Resources + *out = make([]string, len(*in)) + copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. -func (in *Scheduling) DeepCopy() *Scheduling { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. +func (in *Rule) DeepCopy() *Rule { if in == nil { return nil } - out := new(Scheduling) + out := new(Rule) in.DeepCopyInto(out) return out } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go index 0a40726..afbb3d6 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go @@ -15,12 +15,11 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true -// +groupName=admissionregistration.k8s.io // Package v1beta1 is the v1beta1 version of the API. // AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the +// InitializerConfiguration and validatingWebhookConfiguration is for the // new dynamic admission controller configuration. +// +groupName=admissionregistration.k8s.io package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go index c98aa74..d6c9d95 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go @@ -14,24 +14,39 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto + + It has these top-level messages: + MutatingWebhookConfiguration + MutatingWebhookConfigurationList + Rule + RuleWithOperations + ServiceReference + ValidatingWebhookConfiguration + ValidatingWebhookConfigurationList + Webhook + WebhookClientConfig +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,548 +57,105 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *MutatingWebhook) Reset() { *m = MutatingWebhook{} } -func (*MutatingWebhook) ProtoMessage() {} -func (*MutatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{0} -} -func (m *MutatingWebhook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MutatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MutatingWebhook) XXX_Merge(src proto.Message) { - xxx_messageInfo_MutatingWebhook.Merge(m, src) -} -func (m *MutatingWebhook) XXX_Size() int { - return m.Size() -} -func (m *MutatingWebhook) XXX_DiscardUnknown() { - xxx_messageInfo_MutatingWebhook.DiscardUnknown(m) -} - -var xxx_messageInfo_MutatingWebhook proto.InternalMessageInfo +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } func (*MutatingWebhookConfiguration) ProtoMessage() {} func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{1} -} -func (m *MutatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MutatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MutatingWebhookConfiguration) XXX_Merge(src proto.Message) { - xxx_messageInfo_MutatingWebhookConfiguration.Merge(m, src) -} -func (m *MutatingWebhookConfiguration) XXX_Size() int { - return m.Size() -} -func (m *MutatingWebhookConfiguration) XXX_DiscardUnknown() { - xxx_messageInfo_MutatingWebhookConfiguration.DiscardUnknown(m) + return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_MutatingWebhookConfiguration proto.InternalMessageInfo - func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } func (*MutatingWebhookConfigurationList) ProtoMessage() {} func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{2} -} -func (m *MutatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MutatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MutatingWebhookConfigurationList) XXX_Merge(src proto.Message) { - xxx_messageInfo_MutatingWebhookConfigurationList.Merge(m, src) -} -func (m *MutatingWebhookConfigurationList) XXX_Size() int { - return m.Size() -} -func (m *MutatingWebhookConfigurationList) XXX_DiscardUnknown() { - xxx_messageInfo_MutatingWebhookConfigurationList.DiscardUnknown(m) -} - -var xxx_messageInfo_MutatingWebhookConfigurationList proto.InternalMessageInfo - -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{3} -} -func (m *Rule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Rule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Rule) XXX_Merge(src proto.Message) { - xxx_messageInfo_Rule.Merge(m, src) -} -func (m *Rule) XXX_Size() int { - return m.Size() -} -func (m *Rule) XXX_DiscardUnknown() { - xxx_messageInfo_Rule.DiscardUnknown(m) -} - -var xxx_messageInfo_Rule proto.InternalMessageInfo - -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{4} -} -func (m *RuleWithOperations) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuleWithOperations) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuleWithOperations) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuleWithOperations.Merge(m, src) -} -func (m *RuleWithOperations) XXX_Size() int { - return m.Size() -} -func (m *RuleWithOperations) XXX_DiscardUnknown() { - xxx_messageInfo_RuleWithOperations.DiscardUnknown(m) -} - -var xxx_messageInfo_RuleWithOperations proto.InternalMessageInfo - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{5} -} -func (m *ServiceReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceReference.Merge(m, src) -} -func (m *ServiceReference) XXX_Size() int { - return m.Size() -} -func (m *ServiceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceReference.DiscardUnknown(m) + return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_ServiceReference proto.InternalMessageInfo +func (m *Rule) Reset() { *m = Rule{} } +func (*Rule) ProtoMessage() {} +func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *ValidatingWebhook) Reset() { *m = ValidatingWebhook{} } -func (*ValidatingWebhook) ProtoMessage() {} -func (*ValidatingWebhook) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{6} -} -func (m *ValidatingWebhook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidatingWebhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidatingWebhook) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatingWebhook.Merge(m, src) -} -func (m *ValidatingWebhook) XXX_Size() int { - return m.Size() -} -func (m *ValidatingWebhook) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatingWebhook.DiscardUnknown(m) -} +func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } +func (*RuleWithOperations) ProtoMessage() {} +func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_ValidatingWebhook proto.InternalMessageInfo +func (m *ServiceReference) Reset() { *m = ServiceReference{} } +func (*ServiceReference) ProtoMessage() {} +func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } func (*ValidatingWebhookConfiguration) ProtoMessage() {} func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{7} -} -func (m *ValidatingWebhookConfiguration) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidatingWebhookConfiguration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidatingWebhookConfiguration) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatingWebhookConfiguration.Merge(m, src) -} -func (m *ValidatingWebhookConfiguration) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{5} } -func (m *ValidatingWebhookConfiguration) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatingWebhookConfiguration.DiscardUnknown(m) -} - -var xxx_messageInfo_ValidatingWebhookConfiguration proto.InternalMessageInfo func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } func (*ValidatingWebhookConfigurationList) ProtoMessage() {} func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{8} -} -func (m *ValidatingWebhookConfigurationList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ValidatingWebhookConfigurationList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ValidatingWebhookConfigurationList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ValidatingWebhookConfigurationList.Merge(m, src) -} -func (m *ValidatingWebhookConfigurationList) XXX_Size() int { - return m.Size() -} -func (m *ValidatingWebhookConfigurationList) XXX_DiscardUnknown() { - xxx_messageInfo_ValidatingWebhookConfigurationList.DiscardUnknown(m) + return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_ValidatingWebhookConfigurationList proto.InternalMessageInfo - -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_abeea74cbc46f55a, []int{9} -} -func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookClientConfig.Merge(m, src) -} -func (m *WebhookClientConfig) XXX_Size() int { - return m.Size() -} -func (m *WebhookClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) -} +func (m *Webhook) Reset() { *m = Webhook{} } +func (*Webhook) ProtoMessage() {} +func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo +func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } +func (*WebhookClientConfig) ProtoMessage() {} +func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func init() { - proto.RegisterType((*MutatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhook") proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") - proto.RegisterType((*ValidatingWebhook)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhook") proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") + proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptor_abeea74cbc46f55a) -} - -var fileDescriptor_abeea74cbc46f55a = []byte{ - // 1114 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4d, 0x6f, 0x23, 0x45, - 0x13, 0xce, 0xc4, 0xf6, 0xda, 0x6e, 0x27, 0xbb, 0x9b, 0x7e, 0x5f, 0x76, 0x4d, 0x58, 0x79, 0x2c, - 0x1f, 0x90, 0x25, 0xd8, 0x99, 0x4d, 0x40, 0x08, 0x16, 0x10, 0x8a, 0x03, 0x0b, 0x91, 0x92, 0xdd, - 0xd0, 0xd9, 0x0f, 0x89, 0x0f, 0x69, 0xdb, 0xe3, 0xb2, 0xdd, 0xd8, 0x9e, 0x1e, 0x4d, 0xf7, 0x38, - 0xe4, 0xc6, 0x4f, 0xe0, 0x2f, 0x70, 0xe2, 0x57, 0x70, 0xe0, 0x16, 0x6e, 0x7b, 0xdc, 0x0b, 0x23, - 0x32, 0x9c, 0x38, 0x70, 0xe0, 0x9a, 0x13, 0x9a, 0x9e, 0xf6, 0xf8, 0x2b, 0x59, 0x4c, 0x90, 0xf6, - 0x94, 0xdb, 0xf4, 0x53, 0x5d, 0x4f, 0x75, 0xd5, 0x54, 0xd5, 0x83, 0x3e, 0xef, 0xbd, 0x2b, 0x2c, - 0xc6, 0xed, 0x5e, 0xd0, 0x04, 0xdf, 0x05, 0x09, 0xc2, 0x1e, 0x82, 0xdb, 0xe2, 0xbe, 0xad, 0x0d, - 0xd4, 0x63, 0x36, 0x6d, 0x0d, 0x98, 0x10, 0x8c, 0xbb, 0x3e, 0x74, 0x98, 0x90, 0x3e, 0x95, 0x8c, - 0xbb, 0xf6, 0x70, 0xa3, 0x09, 0x92, 0x6e, 0xd8, 0x1d, 0x70, 0xc1, 0xa7, 0x12, 0x5a, 0x96, 0xe7, - 0x73, 0xc9, 0x71, 0x3d, 0xf1, 0xb4, 0xa8, 0xc7, 0xac, 0x33, 0x3d, 0x2d, 0xed, 0xb9, 0x7e, 0xbb, - 0xc3, 0x64, 0x37, 0x68, 0x5a, 0x0e, 0x1f, 0xd8, 0x1d, 0xde, 0xe1, 0xb6, 0x22, 0x68, 0x06, 0x6d, - 0x75, 0x52, 0x07, 0xf5, 0x95, 0x10, 0xaf, 0xbf, 0x3d, 0x7e, 0xd2, 0x80, 0x3a, 0x5d, 0xe6, 0x82, - 0x7f, 0x64, 0x7b, 0xbd, 0x4e, 0x0c, 0x08, 0x7b, 0x00, 0x92, 0xda, 0xc3, 0xb9, 0xe7, 0xac, 0xdb, - 0xe7, 0x79, 0xf9, 0x81, 0x2b, 0xd9, 0x00, 0xe6, 0x1c, 0xde, 0xf9, 0x27, 0x07, 0xe1, 0x74, 0x61, - 0x40, 0x67, 0xfd, 0x6a, 0xbf, 0xe4, 0xd1, 0xb5, 0xbd, 0x40, 0x52, 0xc9, 0xdc, 0xce, 0x13, 0x68, - 0x76, 0x39, 0xef, 0xe1, 0x2a, 0xca, 0xba, 0x74, 0x00, 0x65, 0xa3, 0x6a, 0xd4, 0x8b, 0x8d, 0x95, - 0xe3, 0xd0, 0x5c, 0x8a, 0x42, 0x33, 0x7b, 0x9f, 0x0e, 0x80, 0x28, 0x0b, 0x3e, 0x44, 0x2b, 0x4e, - 0x9f, 0x81, 0x2b, 0xb7, 0xb9, 0xdb, 0x66, 0x9d, 0xf2, 0x72, 0xd5, 0xa8, 0x97, 0x36, 0x3f, 0xb4, - 0x16, 0x2d, 0xa2, 0xa5, 0x43, 0x6d, 0x4f, 0x90, 0x34, 0xfe, 0xaf, 0x03, 0xad, 0x4c, 0xa2, 0x64, - 0x2a, 0x10, 0xa6, 0x28, 0xe7, 0x07, 0x7d, 0x10, 0xe5, 0x4c, 0x35, 0x53, 0x2f, 0x6d, 0x7e, 0xb0, - 0x78, 0x44, 0x12, 0xf4, 0xe1, 0x09, 0x93, 0xdd, 0x07, 0x1e, 0x24, 0x16, 0xd1, 0x58, 0xd5, 0x01, - 0x73, 0xb1, 0x4d, 0x90, 0x84, 0x19, 0xef, 0xa2, 0xd5, 0x36, 0x65, 0xfd, 0xc0, 0x87, 0x7d, 0xde, - 0x67, 0xce, 0x51, 0x39, 0xab, 0xca, 0xf0, 0x7a, 0x14, 0x9a, 0xab, 0xf7, 0x26, 0x0d, 0xa7, 0xa1, - 0xb9, 0x36, 0x05, 0x3c, 0x3c, 0xf2, 0x80, 0x4c, 0x3b, 0xe3, 0x8f, 0x51, 0x69, 0x40, 0xa5, 0xd3, - 0xd5, 0x5c, 0x45, 0xc5, 0x55, 0x8b, 0x42, 0xb3, 0xb4, 0x37, 0x86, 0x4f, 0x43, 0xf3, 0xda, 0xc4, - 0x51, 0xf1, 0x4c, 0xba, 0xe1, 0x6f, 0xd1, 0x5a, 0x5c, 0x77, 0xe1, 0x51, 0x07, 0x0e, 0xa0, 0x0f, - 0x8e, 0xe4, 0x7e, 0x39, 0xa7, 0x8a, 0xfe, 0xd6, 0x44, 0x09, 0xd2, 0x3f, 0x6f, 0x79, 0xbd, 0x4e, - 0x0c, 0x08, 0x2b, 0x6e, 0x30, 0x6b, 0xb8, 0x61, 0xed, 0xd2, 0x26, 0xf4, 0x47, 0xae, 0x8d, 0x57, - 0xa2, 0xd0, 0x5c, 0xbb, 0x3f, 0xcb, 0x48, 0xe6, 0x83, 0x60, 0x8e, 0xae, 0xf2, 0xe6, 0x37, 0xe0, - 0xc8, 0x34, 0x6c, 0xe9, 0xe2, 0x61, 0x71, 0x14, 0x9a, 0x57, 0x1f, 0x4c, 0xd1, 0x91, 0x19, 0xfa, - 0xb8, 0x60, 0x82, 0xb5, 0xe0, 0x93, 0x76, 0x1b, 0x1c, 0x29, 0xca, 0x57, 0xc6, 0x05, 0x3b, 0x18, - 0xc3, 0x71, 0xc1, 0xc6, 0xc7, 0xed, 0x3e, 0x15, 0x82, 0x4c, 0xba, 0xe1, 0xbb, 0xe8, 0x6a, 0xdc, - 0xf5, 0x3c, 0x90, 0x07, 0xe0, 0x70, 0xb7, 0x25, 0xca, 0xf9, 0xaa, 0x51, 0xcf, 0x25, 0x2f, 0x78, - 0x38, 0x65, 0x21, 0x33, 0x37, 0xf1, 0x23, 0x74, 0x33, 0x6d, 0x25, 0x02, 0x43, 0x06, 0x87, 0x8f, - 0xc1, 0x8f, 0x0f, 0xa2, 0x5c, 0xa8, 0x66, 0xea, 0xc5, 0xc6, 0x6b, 0x51, 0x68, 0xde, 0xdc, 0x3a, - 0xfb, 0x0a, 0x39, 0xcf, 0x17, 0x3f, 0x45, 0xd8, 0x07, 0xe6, 0x0e, 0xb9, 0xa3, 0xda, 0x4f, 0x37, - 0x04, 0x52, 0xf9, 0xdd, 0x89, 0x42, 0x13, 0x93, 0x39, 0xeb, 0x69, 0x68, 0xde, 0x98, 0x47, 0x55, - 0x7b, 0x9c, 0xc1, 0x55, 0xfb, 0xd5, 0x40, 0xb7, 0x66, 0x66, 0x39, 0x19, 0x9b, 0x20, 0xe9, 0x78, - 0xfc, 0x14, 0x15, 0xe2, 0x1f, 0xd3, 0xa2, 0x92, 0xaa, 0xe1, 0x2e, 0x6d, 0xde, 0x59, 0xec, 0x37, - 0x26, 0xff, 0x6c, 0x0f, 0x24, 0x6d, 0x60, 0x3d, 0x34, 0x68, 0x8c, 0x91, 0x94, 0x15, 0x7f, 0x89, - 0x0a, 0x3a, 0xb2, 0x28, 0x2f, 0xab, 0x11, 0x7d, 0x6f, 0xf1, 0x11, 0x9d, 0x79, 0x7b, 0x23, 0x1b, - 0x87, 0x22, 0x85, 0x43, 0x4d, 0x58, 0xfb, 0xd3, 0x40, 0xd5, 0x17, 0xe5, 0xb7, 0xcb, 0x84, 0xc4, - 0x5f, 0xcd, 0xe5, 0x68, 0x2d, 0xd8, 0xaa, 0x4c, 0x24, 0x19, 0x5e, 0xd7, 0x19, 0x16, 0x46, 0xc8, - 0x44, 0x7e, 0x3d, 0x94, 0x63, 0x12, 0x06, 0xa3, 0xe4, 0xee, 0x5d, 0x38, 0xb9, 0xa9, 0x87, 0x8f, - 0x37, 0xd1, 0x4e, 0x4c, 0x4e, 0x92, 0x18, 0xb5, 0x9f, 0x0d, 0x94, 0x8d, 0x57, 0x13, 0x7e, 0x03, - 0x15, 0xa9, 0xc7, 0x3e, 0xf5, 0x79, 0xe0, 0x89, 0xb2, 0xa1, 0x7a, 0x70, 0x35, 0x0a, 0xcd, 0xe2, - 0xd6, 0xfe, 0x4e, 0x02, 0x92, 0xb1, 0x1d, 0x6f, 0xa0, 0x12, 0xf5, 0x58, 0xda, 0xb2, 0xcb, 0xea, - 0xfa, 0xb5, 0x78, 0x80, 0xb6, 0xf6, 0x77, 0xd2, 0x36, 0x9d, 0xbc, 0x13, 0xf3, 0xfb, 0x20, 0x78, - 0xe0, 0x3b, 0x7a, 0xb3, 0x6a, 0x7e, 0x32, 0x02, 0xc9, 0xd8, 0x8e, 0xdf, 0x44, 0x39, 0xe1, 0x70, - 0x0f, 0xf4, 0x5e, 0xbc, 0x11, 0x3f, 0xfb, 0x20, 0x06, 0x4e, 0x43, 0xb3, 0xa8, 0x3e, 0x54, 0x83, - 0x26, 0x97, 0x6a, 0x3f, 0x1a, 0x08, 0xcf, 0xaf, 0x5e, 0xfc, 0x11, 0x42, 0x3c, 0x3d, 0xe9, 0x94, - 0x4c, 0xd5, 0x55, 0x29, 0x7a, 0x1a, 0x9a, 0xab, 0xe9, 0x49, 0x51, 0x4e, 0xb8, 0xe0, 0x7d, 0x94, - 0x8d, 0xd7, 0xb5, 0x56, 0x1e, 0xeb, 0xdf, 0xe9, 0xc0, 0x58, 0xd3, 0xe2, 0x13, 0x51, 0x4c, 0xb5, - 0x1f, 0x0c, 0x74, 0xfd, 0x00, 0xfc, 0x21, 0x73, 0x80, 0x40, 0x1b, 0x7c, 0x70, 0x1d, 0xc0, 0x36, - 0x2a, 0xa6, 0x3b, 0x51, 0xeb, 0xe1, 0x9a, 0xf6, 0x2d, 0xa6, 0xfb, 0x93, 0x8c, 0xef, 0xa4, 0xda, - 0xb9, 0x7c, 0xae, 0x76, 0xde, 0x42, 0x59, 0x8f, 0xca, 0x6e, 0x39, 0xa3, 0x6e, 0x14, 0x62, 0xeb, - 0x3e, 0x95, 0x5d, 0xa2, 0x50, 0x65, 0xe5, 0xbe, 0x54, 0xc5, 0xcd, 0x69, 0x2b, 0xf7, 0x25, 0x51, - 0x68, 0xed, 0x8f, 0x2b, 0x68, 0xed, 0x31, 0xed, 0xb3, 0xd6, 0xa5, 0x5e, 0x5f, 0xea, 0xf5, 0x82, - 0x7a, 0x8d, 0x2e, 0xf5, 0xfa, 0x22, 0x7a, 0x5d, 0x3b, 0x31, 0x50, 0x65, 0x6e, 0xd6, 0x5e, 0xb6, - 0x9e, 0x7e, 0x3d, 0xa7, 0xa7, 0xef, 0x2f, 0x3e, 0x42, 0x73, 0xaf, 0x9f, 0x53, 0xd4, 0xbf, 0x0c, - 0x54, 0x7b, 0x71, 0x8e, 0x2f, 0x41, 0x53, 0x07, 0xd3, 0x9a, 0xfa, 0xd9, 0x7f, 0x48, 0x70, 0x11, - 0x55, 0xfd, 0xc9, 0x40, 0xff, 0x3b, 0x63, 0x9d, 0xe1, 0x57, 0x51, 0x26, 0xf0, 0xfb, 0x7a, 0x2d, - 0xe7, 0xa3, 0xd0, 0xcc, 0x3c, 0x22, 0xbb, 0x24, 0xc6, 0x30, 0x45, 0x79, 0x91, 0x28, 0x83, 0x4e, - 0xff, 0xee, 0xe2, 0x6f, 0x9c, 0x95, 0x94, 0x46, 0x29, 0x0a, 0xcd, 0xfc, 0x08, 0x1d, 0xf1, 0xe2, - 0x3a, 0x2a, 0x38, 0xb4, 0x11, 0xb8, 0x2d, 0xad, 0x69, 0x2b, 0x8d, 0x95, 0xb8, 0x5c, 0xdb, 0x5b, - 0x09, 0x46, 0x52, 0x6b, 0xe3, 0xf6, 0xf1, 0x49, 0x65, 0xe9, 0xd9, 0x49, 0x65, 0xe9, 0xf9, 0x49, - 0x65, 0xe9, 0xbb, 0xa8, 0x62, 0x1c, 0x47, 0x15, 0xe3, 0x59, 0x54, 0x31, 0x9e, 0x47, 0x15, 0xe3, - 0xb7, 0xa8, 0x62, 0x7c, 0xff, 0x7b, 0x65, 0xe9, 0x8b, 0xbc, 0x8e, 0xff, 0x77, 0x00, 0x00, 0x00, - 0xff, 0xff, 0xae, 0x27, 0x2b, 0xcb, 0x2c, 0x0f, 0x00, 0x00, -} - -func (m *MutatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MutatingWebhook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MutatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.ReinvocationPolicy != nil { - i -= len(*m.ReinvocationPolicy) - copy(dAtA[i:], *m.ReinvocationPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReinvocationPolicy))) - i-- - dAtA[i] = 0x52 - } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a - } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 - } - } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return dAtA[:n], nil -} - -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MutatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l + i += n1 if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Webhooks { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -591,46 +163,37 @@ func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { } func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MutatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Rule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -638,56 +201,62 @@ func (m *Rule) Marshal() (dAtA []byte, err error) { } func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Rule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Scope != nil { - i -= len(*m.Scope) - copy(dAtA[i:], *m.Scope) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope))) - i-- - dAtA[i] = 0x22 - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } if len(m.APIVersions) > 0 { - for iNdEx := len(m.APIVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIVersions[iNdEx]) - copy(dAtA[i:], m.APIVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersions[iNdEx]))) - i-- + for _, s := range m.APIVersions { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -695,41 +264,40 @@ func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { } func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuleWithOperations) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Rule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 if len(m.Operations) > 0 { - for iNdEx := len(m.Operations) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Operations[iNdEx]) - copy(dAtA[i:], m.Operations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operations[iNdEx]))) - i-- + for _, s := range m.Operations { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) + n3, err := m.Rule.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *ServiceReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -737,249 +305,171 @@ func (m *ServiceReference) Marshal() (dAtA []byte, err error) { } func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) + i += copy(dAtA[i:], *m.Path) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ValidatingWebhook) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingWebhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.ObjectSelector != nil { - { - size, err := m.ObjectSelector.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + if len(m.Webhooks) > 0 { + for _, msg := range m.Webhooks { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.MatchPolicy != nil { - i -= len(*m.MatchPolicy) - copy(dAtA[i:], *m.MatchPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MatchPolicy))) - i-- - dAtA[i] = 0x4a - } - if len(m.AdmissionReviewVersions) > 0 { - for iNdEx := len(m.AdmissionReviewVersions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AdmissionReviewVersions[iNdEx]) - copy(dAtA[i:], m.AdmissionReviewVersions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AdmissionReviewVersions[iNdEx]))) - i-- - dAtA[i] = 0x42 + i += n } } - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x38 - } - if m.SideEffects != nil { - i -= len(*m.SideEffects) - copy(dAtA[i:], *m.SideEffects) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i-- - dAtA[i] = 0x32 - } - if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.FailurePolicy != nil { - i -= len(*m.FailurePolicy) - copy(dAtA[i:], *m.FailurePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { +func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingWebhookConfiguration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Webhooks) > 0 { - for iNdEx := len(m.Webhooks) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Webhooks[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { +func (m *Webhook) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ValidatingWebhookConfigurationList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) + n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.FailurePolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) + i += copy(dAtA[i:], *m.FailurePolicy) + } + if m.NamespaceSelector != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n7 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.SideEffects != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) + i += copy(dAtA[i:], *m.SideEffects) + } + return i, nil } func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -987,111 +477,63 @@ func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { } func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0x1a + if m.Service != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) + n8, err := m.Service.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 } if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) + i += copy(dAtA[i:], m.CABundle) } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.URL != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) + i += copy(dAtA[i:], *m.URL) } - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base -} -func (m *MutatingWebhook) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReinvocationPolicy != nil { - l = len(*m.ReinvocationPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n + return offset + 1 } - func (m *MutatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1106,9 +548,6 @@ func (m *MutatingWebhookConfiguration) Size() (n int) { } func (m *MutatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1123,9 +562,6 @@ func (m *MutatingWebhookConfigurationList) Size() (n int) { } func (m *Rule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.APIGroups) > 0 { @@ -1146,17 +582,10 @@ func (m *Rule) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.Scope != nil { - l = len(*m.Scope) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *RuleWithOperations) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Operations) > 0 { @@ -1171,9 +600,6 @@ func (m *RuleWithOperations) Size() (n int) { } func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Namespace) @@ -1184,70 +610,30 @@ func (m *ServiceReference) Size() (n int) { l = len(*m.Path) n += 1 + l + sovGenerated(uint64(l)) } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } return n } -func (m *ValidatingWebhook) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhookConfiguration) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { + if len(m.Webhooks) > 0 { + for _, e := range m.Webhooks { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - if len(m.AdmissionReviewVersions) > 0 { - for _, s := range m.AdmissionReviewVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.MatchPolicy != nil { - l = len(*m.MatchPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ObjectSelector != nil { - l = m.ObjectSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *ValidatingWebhookConfiguration) Size() (n int) { - if m == nil { - return 0 - } +func (m *ValidatingWebhookConfigurationList) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -1255,27 +641,35 @@ func (m *ValidatingWebhookConfiguration) Size() (n int) { return n } -func (m *ValidatingWebhookConfigurationList) Size() (n int) { - if m == nil { - return 0 - } +func (m *Webhook) Size() (n int) { var l int _ = l - l = m.ListMeta.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + l = m.ClientConfig.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Rules) > 0 { + for _, e := range m.Rules { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } } + if m.FailurePolicy != nil { + l = len(*m.FailurePolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NamespaceSelector != nil { + l = m.NamespaceSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.SideEffects != nil { + l = len(*m.SideEffects) + n += 1 + l + sovGenerated(uint64(l)) + } return n } func (m *WebhookClientConfig) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Service != nil { @@ -1294,48 +688,25 @@ func (m *WebhookClientConfig) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *MutatingWebhook) String() string { - if this == nil { - return "nil" - } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&MutatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ReinvocationPolicy:` + valueToStringGenerated(this.ReinvocationPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} func (this *MutatingWebhookConfiguration) String() string { if this == nil { return "nil" } - repeatedStringForWebhooks := "[]MutatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "MutatingWebhook", "MutatingWebhook", 1), `&`, ``, 1) + "," - } - repeatedStringForWebhooks += "}" s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1344,14 +715,9 @@ func (this *MutatingWebhookConfigurationList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]MutatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1364,7 +730,6 @@ func (this *Rule) String() string { `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `Scope:` + valueToStringGenerated(this.Scope) + `,`, `}`, }, "") return s @@ -1388,63 +753,43 @@ func (this *ServiceReference) String() string { `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhook) String() string { +func (this *ValidatingWebhookConfiguration) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]RuleWithOperations{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&ValidatingWebhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`, - `MatchPolicy:` + valueToStringGenerated(this.MatchPolicy) + `,`, - `ObjectSelector:` + strings.Replace(fmt.Sprintf("%v", this.ObjectSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfiguration) String() string { +func (this *ValidatingWebhookConfigurationList) String() string { if this == nil { return "nil" } - repeatedStringForWebhooks := "[]ValidatingWebhook{" - for _, f := range this.Webhooks { - repeatedStringForWebhooks += strings.Replace(strings.Replace(f.String(), "ValidatingWebhook", "ValidatingWebhook", 1), `&`, ``, 1) + "," - } - repeatedStringForWebhooks += "}" - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + repeatedStringForWebhooks + `,`, + s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *ValidatingWebhookConfigurationList) String() string { +func (this *Webhook) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ValidatingWebhookConfiguration{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + s := strings.Join([]string{`&Webhook{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, + `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, `}`, }, "") return s @@ -1454,7 +799,7 @@ func (this *WebhookClientConfig) String() string { return "nil" } s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, + `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, `URL:` + valueToStringGenerated(this.URL) + `,`, `}`, @@ -1469,7 +814,7 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { +func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -1484,7 +829,7 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1492,576 +837,15 @@ func (m *MutatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReinvocationPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ReinvocationPolicyType(dAtA[iNdEx:postIndex]) - m.ReinvocationPolicy = &s - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Webhooks = append(m.Webhooks, MutatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2073,180 +857,27 @@ func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2256,24 +887,22 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := ScopeType(dAtA[iNdEx:postIndex]) - m.Scope = &s + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -2284,9 +913,6 @@ func (m *Rule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2299,7 +925,7 @@ func (m *Rule) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { +func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2314,7 +940,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2322,17 +948,17 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") + return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2342,27 +968,25 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2374,7 +998,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2383,13 +1007,11 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, MutatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2402,9 +1024,6 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2417,7 +1036,7 @@ func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { } return nil } -func (m *ServiceReference) Unmarshal(dAtA []byte) error { +func (m *Rule) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2432,7 +1051,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2440,15 +1059,15 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + return fmt.Errorf("proto: Rule: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2460,7 +1079,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2470,17 +1089,14 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2492,7 +1108,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2502,17 +1118,14 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2524,7 +1137,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2534,35 +1147,11 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s + m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Port = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2572,9 +1161,6 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2587,7 +1173,7 @@ func (m *ServiceReference) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { +func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2602,7 +1188,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2610,15 +1196,15 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhook: wiretype end group for non-group") + return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhook: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2630,7 +1216,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2640,17 +1226,14 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2662,7 +1245,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2671,53 +1254,66 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceReference) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 4: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2729,7 +1325,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2739,20 +1335,16 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2762,31 +1354,24 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -2798,7 +1383,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2808,72 +1393,67 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s + s := string(dAtA[iNdEx:postIndex]) + m.Path = &s iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - intStringLen := int(stringLen) - if intStringLen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 9: + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2883,28 +1463,25 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := MatchPolicyType(dAtA[iNdEx:postIndex]) - m.MatchPolicy = &s + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2916,7 +1493,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2925,16 +1502,11 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ObjectSelector == nil { - m.ObjectSelector = &v1.LabelSelector{} - } - if err := m.ObjectSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Webhooks = append(m.Webhooks, Webhook{}) + if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -2947,9 +1519,6 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2962,7 +1531,7 @@ func (m *ValidatingWebhook) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { +func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2977,7 +1546,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2985,15 +1554,15 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3005,7 +1574,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3014,19 +1583,16 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3038,7 +1604,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3047,14 +1613,11 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Webhooks = append(m.Webhooks, ValidatingWebhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ValidatingWebhookConfiguration{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3067,9 +1630,6 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3082,7 +1642,7 @@ func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { } return nil } -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { +func (m *Webhook) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3097,7 +1657,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3105,15 +1665,44 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") + return fmt.Errorf("proto: Webhook: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3125,7 +1714,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3134,19 +1723,16 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3158,7 +1744,7 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3167,17 +1753,107 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Rules = append(m.Rules, RuleWithOperations{}) + if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + s := FailurePolicyType(dAtA[iNdEx:postIndex]) + m.FailurePolicy = &s + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NamespaceSelector == nil { + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} + } + if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := SideEffectClass(dAtA[iNdEx:postIndex]) + m.SideEffects = &s + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3187,9 +1863,6 @@ func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3217,7 +1890,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3245,7 +1918,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3254,9 +1927,6 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3281,7 +1951,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3290,9 +1960,6 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3315,7 +1982,7 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3325,9 +1992,6 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3343,9 +2007,6 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3361,7 +2022,6 @@ func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3393,8 +2053,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3411,34 +2073,118 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 906 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80, + 0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a, + 0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2, + 0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67, + 0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7, + 0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45, + 0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1, + 0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd, + 0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e, + 0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a, + 0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90, + 0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce, + 0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2, + 0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf, + 0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18, + 0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad, + 0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30, + 0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16, + 0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf, + 0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48, + 0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b, + 0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd, + 0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5, + 0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa, + 0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b, + 0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a, + 0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e, + 0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb, + 0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11, + 0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb, + 0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97, + 0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd, + 0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d, + 0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc, + 0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45, + 0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee, + 0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b, + 0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4, + 0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32, + 0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e, + 0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f, + 0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35, + 0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8, + 0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f, + 0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39, + 0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33, + 0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d, + 0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2, + 0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9, + 0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1, + 0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e, + 0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36, + 0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e, + 0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d, + 0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00, + 0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto index 086cbcc..4d55ca8 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto @@ -28,160 +28,9 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// MutatingWebhook describes an admission webhook and the resources and operations it applies to. -message MutatingWebhook { - // The name of the admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - optional string name = 1; - - // ClientConfig defines how to communicate with the hook. - // Required - optional WebhookClientConfig clientConfig = 2; - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks - // from putting the cluster in a state which cannot be recovered from without completely - // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called - // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated RuleWithOperations rules = 3; - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - optional string failurePolicy = 4; - - // matchPolicy defines how the "rules" list is used to match incoming requests. - // Allowed values are "Exact" or "Equivalent". - // - // - Exact: match a request only if it exactly matches a specified rule. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - // - // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - // - // Defaults to "Exact" - // +optional - optional string matchPolicy = 9; - - // NamespaceSelector decides whether to run the webhook on an object based - // on whether the namespace for that object matches the selector. If the - // object itself is a namespace, the matching is performed on - // object.metadata.labels. If the object is another cluster scoped resource, - // it never skips the webhook. - // - // For example, to run the webhook on any objects whose namespace is not - // associated with "runlevel" of "0" or "1"; you will set the selector as - // follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "runlevel", - // "operator": "NotIn", - // "values": [ - // "0", - // "1" - // ] - // } - // ] - // } - // - // If instead you want to only run the webhook on any objects whose - // namespace is associated with the "environment" of "prod" or "staging"; - // you will set the selector as follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "environment", - // "operator": "In", - // "values": [ - // "prod", - // "staging" - // ] - // } - // ] - // } - // - // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - // for more examples of label selectors. - // - // Default to the empty LabelSelector, which matches everything. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; - - // ObjectSelector decides whether to run the webhook based on if the - // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the webhook, and - // is considered to match if either object matches the selector. A null - // object (oldObject in the case of create, or newObject in the case of - // delete) or an object that cannot have labels (like a - // DeploymentRollback or a PodProxyOptions object) is not considered to - // match. - // Use the object selector only if the webhook is opt-in, because end - // users may skip the admission webhook by setting the labels. - // Default to the empty LabelSelector, which matches everything. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 11; - - // SideEffects states whether this webhook has side effects. - // Acceptable values are: Unknown, None, Some, NoneOnDryRun - // Webhooks with side effects MUST implement a reconciliation system, since a request may be - // rejected by a future step in the admission change and the side effects therefore need to be undone. - // Requests with the dryRun attribute will be auto-rejected if they match a webhook with - // sideEffects == Unknown or Some. Defaults to Unknown. - // +optional - optional string sideEffects = 6; - - // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, - // the webhook call will be ignored or the API call will fail based on the - // failure policy. - // The timeout value must be between 1 and 30 seconds. - // Default to 30 seconds. - // +optional - optional int32 timeoutSeconds = 7; - - // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, validation will fail for this object. - // If a persisted webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail - // and be subject to the failure policy. - // Default to `['v1beta1']`. - // +optional - repeated string admissionReviewVersions = 8; - - // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. - // Allowed values are "Never" and "IfNeeded". - // - // Never: the webhook will not be called more than once in a single admission evaluation. - // - // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation - // if the object being admitted is modified by other admission plugins after the initial webhook call. - // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. - // Note: - // * the number of additional invocations is not guaranteed to be exactly one. - // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. - // * webhooks that use this option may be reordered to minimize the number of additional invocations. - // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. - // - // Defaults to "Never". - // +optional - optional string reinvocationPolicy = 10; -} - // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. -// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. message MutatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -189,13 +38,13 @@ message MutatingWebhookConfiguration { // +optional // +patchMergeKey=name // +patchStrategy=merge - repeated MutatingWebhook Webhooks = 2; + repeated Webhook Webhooks = 2; } // MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. message MutatingWebhookConfigurationList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -217,7 +66,7 @@ message Rule { repeated string apiVersions = 2; // Resources is a list of resources this rule applies to. - // + // // For example: // 'pods' means pods. // 'pods/log' means the log subresource of pods. @@ -225,25 +74,13 @@ message Rule { // 'pods/*' means all subresources of pods. // '*/scale' means all scale subresources. // '*/*' means all resources and their subresources. - // + // // If wildcard is present, the validation rule will ensure resources do not // overlap with each other. - // + // // Depending on the enclosing object, subresources might not be allowed. // Required. repeated string resources = 3; - - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". - // - // +optional - optional string scope = 4; } // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make @@ -274,16 +111,34 @@ message ServiceReference { // this service. // +optional optional string path = 3; +} - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. - // `port` should be a valid port number (1-65535, inclusive). +// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. +message ValidatingWebhookConfiguration { + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional - optional int32 port = 4; + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // Webhooks is a list of webhooks and the affected resources and operations. + // +optional + // +patchMergeKey=name + // +patchStrategy=merge + repeated Webhook Webhooks = 2; +} + +// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. +message ValidatingWebhookConfigurationList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of ValidatingWebhookConfiguration. + repeated ValidatingWebhookConfiguration items = 2; } -// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. -message ValidatingWebhook { +// Webhook describes an admission webhook and the resources and operations it applies to. +message Webhook { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -308,29 +163,12 @@ message ValidatingWebhook { // +optional optional string failurePolicy = 4; - // matchPolicy defines how the "rules" list is used to match incoming requests. - // Allowed values are "Exact" or "Equivalent". - // - // - Exact: match a request only if it exactly matches a specified rule. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - // - // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - // - // Defaults to "Exact" - // +optional - optional string matchPolicy = 9; - // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on // object.metadata.labels. If the object is another cluster scoped resource, // it never skips the webhook. - // + // // For example, to run the webhook on any objects whose namespace is not // associated with "runlevel" of "0" or "1"; you will set the selector as // follows: @@ -346,7 +184,7 @@ message ValidatingWebhook { // } // ] // } - // + // // If instead you want to only run the webhook on any objects whose // namespace is associated with the "environment" of "prod" or "staging"; // you will set the selector as follows: @@ -362,30 +200,16 @@ message ValidatingWebhook { // } // ] // } - // + // // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels + // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ // for more examples of label selectors. - // + // // Default to the empty LabelSelector, which matches everything. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; - // ObjectSelector decides whether to run the webhook based on if the - // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the webhook, and - // is considered to match if either object matches the selector. A null - // object (oldObject in the case of create, or newObject in the case of - // delete) or an object that cannot have labels (like a - // DeploymentRollback or a PodProxyOptions object) is not considered to - // match. - // Use the object selector only if the webhook is opt-in, because end - // users may skip the admission webhook by setting the labels. - // Default to the empty LabelSelector, which matches everything. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector objectSelector = 10; - - // SideEffects states whether this webhook has side effects. + // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -393,95 +217,53 @@ message ValidatingWebhook { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional optional string sideEffects = 6; - - // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, - // the webhook call will be ignored or the API call will fail based on the - // failure policy. - // The timeout value must be between 1 and 30 seconds. - // Default to 30 seconds. - // +optional - optional int32 timeoutSeconds = 7; - - // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, validation will fail for this object. - // If a persisted webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail - // and be subject to the failure policy. - // Default to `['v1beta1']`. - // +optional - repeated string admissionReviewVersions = 8; -} - -// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. -message ValidatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated ValidatingWebhook Webhooks = 2; -} - -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -message ValidatingWebhookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ValidatingWebhookConfiguration. - repeated ValidatingWebhookConfiguration items = 2; } // WebhookClientConfig contains the information to make a TLS // connection with the webhook message WebhookClientConfig { // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` // must be specified. - // + // // The `host` should not refer to a service running in the cluster; use // the `service` field instead. The host might be resolved via external // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve // in-cluster DNS as that would be a layering violation). `host` may // also be an IP address. - // + // // Please note that using `localhost` or `127.0.0.1` as a `host` is // risky unless you take great care to run this webhook on all hosts // which run an apiserver which might need to make calls to this // webhook. Such installs are likely to be non-portable, i.e., not easy // to turn up in a new cluster. - // + // // The scheme must be "https"; the URL must begin with "https://". - // + // // A path is optional, and if present may be any string permissible in // a URL. You may use the path to pass an arbitrary string to the // webhook, for example, a cluster identifier. - // + // // Attempting to use a user or basic auth e.g. "user:password@" is not // allowed. Fragments ("#...") and query parameters ("?...") are not // allowed, either. - // + // // +optional optional string url = 3; // `service` is a reference to the service for this webhook. Either // `service` or `url` must be specified. - // + // // If the webhook is running within the cluster, then you should use `service`. - // + // + // Port 443 will be used if it is open, otherwise it is an error. + // // +optional optional ServiceReference service = 1; - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. optional bytes caBundle = 2; } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go index 37a993e..0b948ba 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go @@ -49,32 +49,8 @@ type Rule struct { // Depending on the enclosing object, subresources might not be allowed. // Required. Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - - // scope specifies the scope of this rule. - // Valid values are "Cluster", "Namespaced", and "*" - // "Cluster" means that only cluster-scoped resources will match this rule. - // Namespace API objects are cluster-scoped. - // "Namespaced" means that only namespaced resources will match this rule. - // "*" means that there are no scope restrictions. - // Subresources match the scope of their parent resource. - // Default is "*". - // - // +optional - Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"` } -type ScopeType string - -const ( - // ClusterScope means that scope is limited to cluster-scoped objects. - // Namespace objects are cluster-scoped. - ClusterScope ScopeType = "Cluster" - // NamespacedScope means that scope is limited to namespaced objects. - NamespacedScope ScopeType = "Namespaced" - // AllScopes means that all scopes are included. - AllScopes ScopeType = "*" -) - type FailurePolicyType string const ( @@ -84,16 +60,6 @@ const ( Fail FailurePolicyType = "Fail" ) -// MatchPolicyType specifies the type of match policy -type MatchPolicyType string - -const ( - // Exact means requests should only be sent to the webhook if they exactly match a given rule - Exact MatchPolicyType = "Exact" - // Equivalent means requests should be sent to the webhook if they modify a resource listed in rules via another API group or version. - Equivalent MatchPolicyType = "Equivalent" -) - type SideEffectClass string const ( @@ -115,17 +81,16 @@ const ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead. type ValidatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []ValidatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -134,7 +99,7 @@ type ValidatingWebhookConfiguration struct { type ValidatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of ValidatingWebhookConfiguration. @@ -146,17 +111,16 @@ type ValidatingWebhookConfigurationList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. -// Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead. type MutatingWebhookConfiguration struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Webhooks is a list of webhooks and the affected resources and operations. // +optional // +patchMergeKey=name // +patchStrategy=merge - Webhooks []MutatingWebhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` + Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -165,15 +129,15 @@ type MutatingWebhookConfiguration struct { type MutatingWebhookConfigurationList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of MutatingWebhookConfiguration. Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` } -// ValidatingWebhook describes an admission webhook and the resources and operations it applies to. -type ValidatingWebhook struct { +// Webhook describes an admission webhook and the resources and operations it applies to. +type Webhook struct { // The name of the admission webhook. // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where // "imagepolicy" is the name of the webhook, and kubernetes.io is the name @@ -198,155 +162,6 @@ type ValidatingWebhook struct { // +optional FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` - // matchPolicy defines how the "rules" list is used to match incoming requests. - // Allowed values are "Exact" or "Equivalent". - // - // - Exact: match a request only if it exactly matches a specified rule. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - // - // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - // - // Defaults to "Exact" - // +optional - MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` - - // NamespaceSelector decides whether to run the webhook on an object based - // on whether the namespace for that object matches the selector. If the - // object itself is a namespace, the matching is performed on - // object.metadata.labels. If the object is another cluster scoped resource, - // it never skips the webhook. - // - // For example, to run the webhook on any objects whose namespace is not - // associated with "runlevel" of "0" or "1"; you will set the selector as - // follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "runlevel", - // "operator": "NotIn", - // "values": [ - // "0", - // "1" - // ] - // } - // ] - // } - // - // If instead you want to only run the webhook on any objects whose - // namespace is associated with the "environment" of "prod" or "staging"; - // you will set the selector as follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "environment", - // "operator": "In", - // "values": [ - // "prod", - // "staging" - // ] - // } - // ] - // } - // - // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels - // for more examples of label selectors. - // - // Default to the empty LabelSelector, which matches everything. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` - - // ObjectSelector decides whether to run the webhook based on if the - // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the webhook, and - // is considered to match if either object matches the selector. A null - // object (oldObject in the case of create, or newObject in the case of - // delete) or an object that cannot have labels (like a - // DeploymentRollback or a PodProxyOptions object) is not considered to - // match. - // Use the object selector only if the webhook is opt-in, because end - // users may skip the admission webhook by setting the labels. - // Default to the empty LabelSelector, which matches everything. - // +optional - ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,10,opt,name=objectSelector"` - - // SideEffects states whether this webhook has side effects. - // Acceptable values are: Unknown, None, Some, NoneOnDryRun - // Webhooks with side effects MUST implement a reconciliation system, since a request may be - // rejected by a future step in the admission change and the side effects therefore need to be undone. - // Requests with the dryRun attribute will be auto-rejected if they match a webhook with - // sideEffects == Unknown or Some. Defaults to Unknown. - // +optional - SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` - - // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, - // the webhook call will be ignored or the API call will fail based on the - // failure policy. - // The timeout value must be between 1 and 30 seconds. - // Default to 30 seconds. - // +optional - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` - - // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, validation will fail for this object. - // If a persisted webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail - // and be subject to the failure policy. - // Default to `['v1beta1']`. - // +optional - AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` -} - -// MutatingWebhook describes an admission webhook and the resources and operations it applies to. -type MutatingWebhook struct { - // The name of the admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks - // from putting the cluster in a state which cannot be recovered from without completely - // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called - // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` - - // matchPolicy defines how the "rules" list is used to match incoming requests. - // Allowed values are "Exact" or "Equivalent". - // - // - Exact: match a request only if it exactly matches a specified rule. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // but "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook. - // - // - Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. - // For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, - // and "rules" only included `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]`, - // a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook. - // - // Defaults to "Exact" - // +optional - MatchPolicy *MatchPolicyType `json:"matchPolicy,omitempty" protobuf:"bytes,9,opt,name=matchPolicy,casttype=MatchPolicyType"` - // NamespaceSelector decides whether to run the webhook on an object based // on whether the namespace for that object matches the selector. If the // object itself is a namespace, the matching is performed on @@ -393,21 +208,7 @@ type MutatingWebhook struct { // +optional NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` - // ObjectSelector decides whether to run the webhook based on if the - // object has matching labels. objectSelector is evaluated against both - // the oldObject and newObject that would be sent to the webhook, and - // is considered to match if either object matches the selector. A null - // object (oldObject in the case of create, or newObject in the case of - // delete) or an object that cannot have labels (like a - // DeploymentRollback or a PodProxyOptions object) is not considered to - // match. - // Use the object selector only if the webhook is opt-in, because end - // users may skip the admission webhook by setting the labels. - // Default to the empty LabelSelector, which matches everything. - // +optional - ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"` - - // SideEffects states whether this webhook has side effects. + // SideEffects states whether this webhookk has side effects. // Acceptable values are: Unknown, None, Some, NoneOnDryRun // Webhooks with side effects MUST implement a reconciliation system, since a request may be // rejected by a future step in the admission change and the side effects therefore need to be undone. @@ -415,58 +216,8 @@ type MutatingWebhook struct { // sideEffects == Unknown or Some. Defaults to Unknown. // +optional SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` - - // TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, - // the webhook call will be ignored or the API call will fail based on the - // failure policy. - // The timeout value must be between 1 and 30 seconds. - // Default to 30 seconds. - // +optional - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"` - - // AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` - // versions the Webhook expects. API server will try to use first version in - // the list which it supports. If none of the versions specified in this list - // supported by API server, validation will fail for this object. - // If a persisted webhook configuration specifies allowed versions and does not - // include any versions known to the API Server, calls to the webhook will fail - // and be subject to the failure policy. - // Default to `['v1beta1']`. - // +optional - AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"` - - // reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. - // Allowed values are "Never" and "IfNeeded". - // - // Never: the webhook will not be called more than once in a single admission evaluation. - // - // IfNeeded: the webhook will be called at least one additional time as part of the admission evaluation - // if the object being admitted is modified by other admission plugins after the initial webhook call. - // Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. - // Note: - // * the number of additional invocations is not guaranteed to be exactly one. - // * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. - // * webhooks that use this option may be reordered to minimize the number of additional invocations. - // * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead. - // - // Defaults to "Never". - // +optional - ReinvocationPolicy *ReinvocationPolicyType `json:"reinvocationPolicy,omitempty" protobuf:"bytes,10,opt,name=reinvocationPolicy,casttype=ReinvocationPolicyType"` } -// ReinvocationPolicyType specifies what type of policy the admission hook uses. -type ReinvocationPolicyType string - -const ( - // NeverReinvocationPolicy indicates that the webhook must not be called more than once in a - // single admission evaluation. - NeverReinvocationPolicy ReinvocationPolicyType = "Never" - // IfNeededReinvocationPolicy indicates that the webhook may be called at least one - // additional time as part of the admission evaluation if the object being admitted is - // modified by other admission plugins after the initial webhook call. - IfNeededReinvocationPolicy ReinvocationPolicyType = "IfNeeded" -) - // RuleWithOperations is a tuple of Operations and Resources. It is recommended to make // sure that all the tuple expansions are valid. type RuleWithOperations struct { @@ -495,7 +246,7 @@ const ( // connection with the webhook type WebhookClientConfig struct { // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` + // (`[scheme://]host:port/path`). Exactly one of `url` or `service` // must be specified. // // The `host` should not refer to a service running in the cluster; use @@ -528,13 +279,15 @@ type WebhookClientConfig struct { // // If the webhook is running within the cluster, then you should use `service`. // + // Port 443 will be used if it is open, otherwise it is an error. + // // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"` + Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"` + // `caBundle` is a PEM encoded CA bundle which will be used to validate + // the webhook's server certificate. + // Required. + CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` } // ServiceReference holds a reference to Service.legacy.k8s.io @@ -550,10 +303,4 @@ type ServiceReference struct { // this service. // +optional Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. - // `port` should be a valid port number (1-65535, inclusive). - // +optional - Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` } diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go index d9fb5af..aab917a 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go @@ -27,28 +27,9 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_MutatingWebhook = map[string]string{ - "": "MutatingWebhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", - "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", - "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", - "reinvocationPolicy": "reinvocationPolicy indicates whether this webhook should be called multiple times as part of a single admission evaluation. Allowed values are \"Never\" and \"IfNeeded\".\n\nNever: the webhook will not be called more than once in a single admission evaluation.\n\nIfNeeded: the webhook will be called at least one additional time as part of the admission evaluation if the object being admitted is modified by other admission plugins after the initial webhook call. Webhooks that specify this option *must* be idempotent, able to process objects they previously admitted. Note: * the number of additional invocations is not guaranteed to be exactly one. * if additional invocations result in further modifications to the object, webhooks are not guaranteed to be invoked again. * webhooks that use this option may be reordered to minimize the number of additional invocations. * to validate an object after all mutations are guaranteed complete, use a validating admission webhook instead.\n\nDefaults to \"Never\".", -} - -func (MutatingWebhook) SwaggerDoc() map[string]string { - return map_MutatingWebhook -} - var map_MutatingWebhookConfiguration = map[string]string{ - "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 MutatingWebhookConfiguration instead.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -58,7 +39,7 @@ func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_MutatingWebhookConfigurationList = map[string]string{ "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of MutatingWebhookConfiguration.", } @@ -71,7 +52,6 @@ var map_Rule = map[string]string{ "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", - "scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".", } func (Rule) SwaggerDoc() map[string]string { @@ -92,34 +72,15 @@ var map_ServiceReference = map[string]string{ "namespace": "`namespace` is the namespace of the service. Required", "name": "`name` is the name of the service. Required", "path": "`path` is an optional URL path which will be sent in any request to this service.", - "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", } func (ServiceReference) SwaggerDoc() map[string]string { return map_ServiceReference } -var map_ValidatingWebhook = map[string]string{ - "": "ValidatingWebhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "matchPolicy": "matchPolicy defines how the \"rules\" list is used to match incoming requests. Allowed values are \"Exact\" or \"Equivalent\".\n\n- Exact: match a request only if it exactly matches a specified rule. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, but \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would not be sent to the webhook.\n\n- Equivalent: match a request if modifies a resource listed in rules, even via another API group or version. For example, if deployments can be modified via apps/v1, apps/v1beta1, and extensions/v1beta1, and \"rules\" only included `apiGroups:[\"apps\"], apiVersions:[\"v1\"], resources: [\"deployments\"]`, a request to apps/v1beta1 or extensions/v1beta1 would be converted to apps/v1 and sent to the webhook.\n\nDefaults to \"Exact\"", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "objectSelector": "ObjectSelector decides whether to run the webhook based on if the object has matching labels. objectSelector is evaluated against both the oldObject and newObject that would be sent to the webhook, and is considered to match if either object matches the selector. A null object (oldObject in the case of create, or newObject in the case of delete) or an object that cannot have labels (like a DeploymentRollback or a PodProxyOptions object) is not considered to match. Use the object selector only if the webhook is opt-in, because end users may skip the admission webhook by setting the labels. Default to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhook has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", - "timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.", - "admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.", -} - -func (ValidatingWebhook) SwaggerDoc() map[string]string { - return map_ValidatingWebhook -} - var map_ValidatingWebhookConfiguration = map[string]string{ - "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. Deprecated in v1.16, planned for removal in v1.19. Use admissionregistration.k8s.io/v1 ValidatingWebhookConfiguration instead.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", + "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", } @@ -129,7 +90,7 @@ func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { var map_ValidatingWebhookConfigurationList = map[string]string{ "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ValidatingWebhookConfiguration.", } @@ -137,11 +98,25 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { return map_ValidatingWebhookConfigurationList } +var map_Webhook = map[string]string{ + "": "Webhook describes an admission webhook and the resources and operations it applies to.", + "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", + "clientConfig": "ClientConfig defines how to communicate with the hook. Required", + "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", + "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", + "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", + "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", +} + +func (Webhook) SwaggerDoc() map[string]string { + return map_Webhook +} + var map_WebhookClientConfig = map[string]string{ "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", + "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", + "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", + "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", } func (WebhookClientConfig) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go index c4570d0..c6867be 100644 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go @@ -25,70 +25,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutatingWebhook) DeepCopyInto(out *MutatingWebhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.MatchPolicy != nil { - in, out := &in.MatchPolicy, &out.MatchPolicy - *out = new(MatchPolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ObjectSelector != nil { - in, out := &in.ObjectSelector, &out.ObjectSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - if in.AdmissionReviewVersions != nil { - in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ReinvocationPolicy != nil { - in, out := &in.ReinvocationPolicy, &out.ReinvocationPolicy - *out = new(ReinvocationPolicyType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhook. -func (in *MutatingWebhook) DeepCopy() *MutatingWebhook { - if in == nil { - return nil - } - out := new(MutatingWebhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { *out = *in @@ -96,7 +32,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfigu in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]MutatingWebhook, len(*in)) + *out = make([]Webhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -126,7 +62,7 @@ func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]MutatingWebhookConfiguration, len(*in)) @@ -173,11 +109,6 @@ func (in *Rule) DeepCopyInto(out *Rule) { *out = make([]string, len(*in)) copy(*out, *in) } - if in.Scope != nil { - in, out := &in.Scope, &out.Scope - *out = new(ScopeType) - **out = **in - } return } @@ -221,11 +152,6 @@ func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { *out = new(string) **out = **in } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } return } @@ -239,65 +165,6 @@ func (in *ServiceReference) DeepCopy() *ServiceReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatingWebhook) DeepCopyInto(out *ValidatingWebhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.MatchPolicy != nil { - in, out := &in.MatchPolicy, &out.MatchPolicy - *out = new(MatchPolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ObjectSelector != nil { - in, out := &in.ObjectSelector, &out.ObjectSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - if in.AdmissionReviewVersions != nil { - in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhook. -func (in *ValidatingWebhook) DeepCopy() *ValidatingWebhook { - if in == nil { - return nil - } - out := new(ValidatingWebhook) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { *out = *in @@ -305,7 +172,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookCon in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) if in.Webhooks != nil { in, out := &in.Webhooks, &out.Webhooks - *out = make([]ValidatingWebhook, len(*in)) + *out = make([]Webhook, len(*in)) for i := range *in { (*in)[i].DeepCopyInto(&(*out)[i]) } @@ -335,7 +202,7 @@ func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ValidatingWebhookConfiguration, len(*in)) @@ -364,6 +231,45 @@ func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { return nil } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Webhook) DeepCopyInto(out *Webhook) { + *out = *in + in.ClientConfig.DeepCopyInto(&out.ClientConfig) + if in.Rules != nil { + in, out := &in.Rules, &out.Rules + *out = make([]RuleWithOperations, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.FailurePolicy != nil { + in, out := &in.FailurePolicy, &out.FailurePolicy + *out = new(FailurePolicyType) + **out = **in + } + if in.NamespaceSelector != nil { + in, out := &in.NamespaceSelector, &out.NamespaceSelector + *out = new(v1.LabelSelector) + (*in).DeepCopyInto(*out) + } + if in.SideEffects != nil { + in, out := &in.SideEffects, &out.SideEffects + *out = new(SideEffectClass) + **out = **in + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. +func (in *Webhook) DeepCopy() *Webhook { + if in == nil { + return nil + } + out := new(Webhook) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { *out = *in diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go index 61dc97b..1d66c22 100644 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ b/vendor/k8s.io/api/apps/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go index 6ef25f5..eac6ef2 100644 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1/generated.pb.go @@ -14,28 +14,61 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto + + It has these top-level messages: + ControllerRevision + ControllerRevisionList + DaemonSet + DaemonSetCondition + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import strings "strings" +import reflect "reflect" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -46,791 +79,125 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{0} -} -func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ControllerRevision) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerRevision.Merge(m, src) -} -func (m *ControllerRevision) XXX_Size() int { - return m.Size() -} -func (m *ControllerRevision) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerRevision.DiscardUnknown(m) -} +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{1} -} -func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerRevisionList.Merge(m, src) -} -func (m *ControllerRevisionList) XXX_Size() int { - return m.Size() -} -func (m *ControllerRevisionList) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) -} +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{2} -} -func (m *DaemonSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSet.Merge(m, src) -} -func (m *DaemonSet) XXX_Size() int { - return m.Size() -} -func (m *DaemonSet) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSet.DiscardUnknown(m) -} +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_DaemonSet proto.InternalMessageInfo +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{3} -} -func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetCondition.Merge(m, src) -} -func (m *DaemonSetCondition) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) -} +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{4} -} -func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetList.Merge(m, src) -} -func (m *DaemonSetList) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetList) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetList.DiscardUnknown(m) -} +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{5} -} -func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetSpec.Merge(m, src) -} -func (m *DaemonSetSpec) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) -} +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{6} -} -func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetStatus.Merge(m, src) -} -func (m *DaemonSetStatus) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) -} +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{7} -} -func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) -} -func (m *DaemonSetUpdateStrategy) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{8} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -var xxx_messageInfo_Deployment proto.InternalMessageInfo +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{9} -} -func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCondition.Merge(m, src) -} -func (m *DeploymentCondition) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) -} +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{10} -} -func (m *DeploymentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentList.Merge(m, src) -} -func (m *DeploymentList) XXX_Size() int { - return m.Size() -} -func (m *DeploymentList) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentList.DiscardUnknown(m) -} +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } -var xxx_messageInfo_DeploymentList proto.InternalMessageInfo - -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{11} -} -func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentSpec.Merge(m, src) -} -func (m *DeploymentSpec) XXX_Size() int { - return m.Size() -} -func (m *DeploymentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{12} -} -func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStatus.Merge(m, src) -} -func (m *DeploymentStatus) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{13} -} -func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStrategy.Merge(m, src) -} -func (m *DeploymentStrategy) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{14} -} -func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSet.Merge(m, src) -} -func (m *ReplicaSet) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSet) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSet.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo - -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{15} -} -func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetCondition.Merge(m, src) -} -func (m *ReplicaSetCondition) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{16} -} -func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetList.Merge(m, src) -} -func (m *ReplicaSetList) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetList) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{17} -} -func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetSpec.Merge(m, src) -} -func (m *ReplicaSetSpec) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{18} -} -func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetStatus.Merge(m, src) -} -func (m *ReplicaSetStatus) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo - -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{19} -} -func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) -} -func (m *RollingUpdateDaemonSet) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) -} - -var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{20} -} -func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return fileDescriptorGenerated, []int{20} } -func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) -} -func (m *RollingUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{21} -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo - -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{22} + return fileDescriptorGenerated, []int{21} } -func (m *StatefulSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSet.Merge(m, src) -} -func (m *StatefulSet) XXX_Size() int { - return m.Size() -} -func (m *StatefulSet) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSet.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSet proto.InternalMessageInfo -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{23} -} -func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetCondition.Merge(m, src) -} -func (m *StatefulSetCondition) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) -} +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } -var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{24} -} -func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetList.Merge(m, src) -} -func (m *StatefulSetList) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetList) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetList.DiscardUnknown(m) -} +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } -var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo - -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{25} -} -func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetSpec.Merge(m, src) -} -func (m *StatefulSetSpec) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo - -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{26} -} -func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetStatus.Merge(m, src) -} -func (m *StatefulSetStatus) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo - -func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } -func (*StatefulSetUpdateStrategy) ProtoMessage() {} -func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_e1014cab6f31e43b, []int{27} -} -func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) -} -func (m *StatefulSetUpdateStrategy) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) -} +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } -var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } + +func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } +func (*StatefulSetUpdateStrategy) ProtoMessage() {} +func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{27} +} func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") @@ -862,146 +229,10 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptor_e1014cab6f31e43b) -} - -var fileDescriptor_e1014cab6f31e43b = []byte{ - // 2031 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, - 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x38, 0x24, 0xca, 0xcd, 0x53, 0xf5, 0x7e, 0xaf, 0x7f, 0x55, - 0xf5, 0xab, 0x7a, 0xaf, 0xab, 0x0d, 0xee, 0x1d, 0x7f, 0xdb, 0x53, 0x75, 0xbb, 0x7d, 0xec, 0x1f, - 0x12, 0xd7, 0x22, 0x94, 0x78, 0xed, 0x21, 0xb1, 0x34, 0xdb, 0x6d, 0x8b, 0x0e, 0xec, 0xe8, 0x6d, - 0xec, 0x38, 0x5e, 0x7b, 0x78, 0xbb, 0x3d, 0x20, 0x16, 0x71, 0x31, 0x25, 0x9a, 0xea, 0xb8, 0x36, - 0xb5, 0x21, 0x0c, 0x30, 0x2a, 0x76, 0x74, 0x95, 0x61, 0xd4, 0xe1, 0xed, 0xab, 0xb7, 0x06, 0x3a, - 0x3d, 0xf2, 0x0f, 0xd5, 0xbe, 0x6d, 0xb6, 0x07, 0xf6, 0xc0, 0x6e, 0x73, 0xe8, 0xa1, 0xff, 0x88, - 0xff, 0xe2, 0x3f, 0xf8, 0x5f, 0x01, 0xc5, 0xd5, 0x56, 0xe2, 0x31, 0x7d, 0xdb, 0x25, 0x92, 0xc7, - 0x5c, 0xbd, 0x1b, 0x63, 0x4c, 0xdc, 0x3f, 0xd2, 0x2d, 0xe2, 0x8e, 0xda, 0xce, 0xf1, 0x80, 0x35, - 0x78, 0x6d, 0x93, 0x50, 0x2c, 0x8b, 0x6a, 0x17, 0x45, 0xb9, 0xbe, 0x45, 0x75, 0x93, 0xe4, 0x02, - 0x5e, 0x9b, 0x14, 0xe0, 0xf5, 0x8f, 0x88, 0x89, 0x73, 0x71, 0xaf, 0x14, 0xc5, 0xf9, 0x54, 0x37, - 0xda, 0xba, 0x45, 0x3d, 0xea, 0x66, 0x83, 0x5a, 0xff, 0x51, 0x00, 0xec, 0xda, 0x16, 0x75, 0x6d, - 0xc3, 0x20, 0x2e, 0x22, 0x43, 0xdd, 0xd3, 0x6d, 0x0b, 0xfe, 0x14, 0xd4, 0xd8, 0x78, 0x34, 0x4c, - 0x71, 0x5d, 0xb9, 0xae, 0x6c, 0x2c, 0xdc, 0xf9, 0x96, 0x1a, 0x4f, 0x72, 0x44, 0xaf, 0x3a, 0xc7, - 0x03, 0xd6, 0xe0, 0xa9, 0x0c, 0xad, 0x0e, 0x6f, 0xab, 0xbb, 0x87, 0xef, 0x92, 0x3e, 0xed, 0x11, - 0x8a, 0x3b, 0xf0, 0xc9, 0x49, 0x73, 0x66, 0x7c, 0xd2, 0x04, 0x71, 0x1b, 0x8a, 0x58, 0xe1, 0x2e, - 0xa8, 0x70, 0xf6, 0x12, 0x67, 0xbf, 0x55, 0xc8, 0x2e, 0x06, 0xad, 0x22, 0xfc, 0xb3, 0x37, 0x1e, - 0x53, 0x62, 0xb1, 0xf4, 0x3a, 0x97, 0x04, 0x75, 0x65, 0x0b, 0x53, 0x8c, 0x38, 0x11, 0x7c, 0x19, - 0xd4, 0x5c, 0x91, 0x7e, 0xbd, 0x7c, 0x5d, 0xd9, 0x28, 0x77, 0x2e, 0x0b, 0x54, 0x2d, 0x1c, 0x16, - 0x8a, 0x10, 0xad, 0xbf, 0x2a, 0x60, 0x2d, 0x3f, 0xee, 0x6d, 0xdd, 0xa3, 0xf0, 0x9d, 0xdc, 0xd8, - 0xd5, 0xe9, 0xc6, 0xce, 0xa2, 0xf9, 0xc8, 0xa3, 0x07, 0x87, 0x2d, 0x89, 0x71, 0xbf, 0x0d, 0xaa, - 0x3a, 0x25, 0xa6, 0x57, 0x2f, 0x5d, 0x2f, 0x6f, 0x2c, 0xdc, 0xb9, 0xa1, 0xe6, 0x6b, 0x57, 0xcd, - 0x27, 0xd6, 0x59, 0x14, 0x94, 0xd5, 0xb7, 0x58, 0x30, 0x0a, 0x38, 0x5a, 0xff, 0x55, 0xc0, 0xfc, - 0x16, 0x26, 0xa6, 0x6d, 0xed, 0x13, 0x7a, 0x01, 0x8b, 0xd6, 0x05, 0x15, 0xcf, 0x21, 0x7d, 0xb1, - 0x68, 0x5f, 0x93, 0xe5, 0x1e, 0xa5, 0xb3, 0xef, 0x90, 0x7e, 0xbc, 0x50, 0xec, 0x17, 0xe2, 0xc1, - 0xf0, 0x6d, 0x30, 0xeb, 0x51, 0x4c, 0x7d, 0x8f, 0x2f, 0xd3, 0xc2, 0x9d, 0xaf, 0x9f, 0x4e, 0xc3, - 0xa1, 0x9d, 0x25, 0x41, 0x34, 0x1b, 0xfc, 0x46, 0x82, 0xa2, 0xf5, 0xaf, 0x12, 0x80, 0x11, 0xb6, - 0x6b, 0x5b, 0x9a, 0x4e, 0x59, 0xfd, 0xbe, 0x0e, 0x2a, 0x74, 0xe4, 0x10, 0x3e, 0x0d, 0xf3, 0x9d, - 0x1b, 0x61, 0x16, 0xf7, 0x47, 0x0e, 0xf9, 0xf8, 0xa4, 0xb9, 0x96, 0x8f, 0x60, 0x3d, 0x88, 0xc7, - 0xc0, 0xed, 0x28, 0xbf, 0x12, 0x8f, 0xbe, 0x9b, 0x7e, 0xf4, 0xc7, 0x27, 0x4d, 0xc9, 0x61, 0xa1, - 0x46, 0x4c, 0xe9, 0x04, 0xe1, 0x10, 0x40, 0x03, 0x7b, 0xf4, 0xbe, 0x8b, 0x2d, 0x2f, 0x78, 0x92, - 0x6e, 0x12, 0x31, 0xf2, 0x97, 0xa6, 0x5b, 0x1e, 0x16, 0xd1, 0xb9, 0x2a, 0xb2, 0x80, 0xdb, 0x39, - 0x36, 0x24, 0x79, 0x02, 0xbc, 0x01, 0x66, 0x5d, 0x82, 0x3d, 0xdb, 0xaa, 0x57, 0xf8, 0x28, 0xa2, - 0x09, 0x44, 0xbc, 0x15, 0x89, 0x5e, 0xf8, 0x22, 0x98, 0x33, 0x89, 0xe7, 0xe1, 0x01, 0xa9, 0x57, - 0x39, 0x70, 0x59, 0x00, 0xe7, 0x7a, 0x41, 0x33, 0x0a, 0xfb, 0x5b, 0xbf, 0x57, 0xc0, 0x62, 0x34, - 0x73, 0x17, 0xb0, 0x55, 0x3a, 0xe9, 0xad, 0xf2, 0xfc, 0xa9, 0x75, 0x52, 0xb0, 0x43, 0xde, 0x2b, - 0x27, 0x72, 0x66, 0x45, 0x08, 0x7f, 0x02, 0x6a, 0x1e, 0x31, 0x48, 0x9f, 0xda, 0xae, 0xc8, 0xf9, - 0x95, 0x29, 0x73, 0xc6, 0x87, 0xc4, 0xd8, 0x17, 0xa1, 0x9d, 0x4b, 0x2c, 0xe9, 0xf0, 0x17, 0x8a, - 0x28, 0xe1, 0x8f, 0x40, 0x8d, 0x12, 0xd3, 0x31, 0x30, 0x25, 0x62, 0x9b, 0xa4, 0xea, 0x9b, 0x95, - 0x0b, 0x23, 0xdb, 0xb3, 0xb5, 0xfb, 0x02, 0xc6, 0x37, 0x4a, 0x34, 0x0f, 0x61, 0x2b, 0x8a, 0x68, - 0xe0, 0x31, 0x58, 0xf2, 0x1d, 0x8d, 0x21, 0x29, 0x3b, 0xba, 0x07, 0x23, 0x51, 0x3e, 0x37, 0x4f, - 0x9d, 0x90, 0x83, 0x54, 0x48, 0x67, 0x4d, 0x3c, 0x60, 0x29, 0xdd, 0x8e, 0x32, 0xd4, 0x70, 0x13, - 0x2c, 0x9b, 0xba, 0x85, 0x08, 0xd6, 0x46, 0xfb, 0xa4, 0x6f, 0x5b, 0x9a, 0xc7, 0x0b, 0xa8, 0xda, - 0x59, 0x17, 0x04, 0xcb, 0xbd, 0x74, 0x37, 0xca, 0xe2, 0xe1, 0x36, 0x58, 0x0d, 0xcf, 0xd9, 0x1f, - 0xe8, 0x1e, 0xb5, 0xdd, 0xd1, 0xb6, 0x6e, 0xea, 0xb4, 0x3e, 0xcb, 0x79, 0xea, 0xe3, 0x93, 0xe6, - 0x2a, 0x92, 0xf4, 0x23, 0x69, 0x54, 0xeb, 0x37, 0xb3, 0x60, 0x39, 0x73, 0x1a, 0xc0, 0x07, 0x60, - 0xad, 0xef, 0xbb, 0x2e, 0xb1, 0xe8, 0x8e, 0x6f, 0x1e, 0x12, 0x77, 0xbf, 0x7f, 0x44, 0x34, 0xdf, - 0x20, 0x1a, 0x5f, 0xd1, 0x6a, 0xa7, 0x21, 0x72, 0x5d, 0xeb, 0x4a, 0x51, 0xa8, 0x20, 0x1a, 0xfe, - 0x10, 0x40, 0x8b, 0x37, 0xf5, 0x74, 0xcf, 0x8b, 0x38, 0x4b, 0x9c, 0x33, 0xda, 0x80, 0x3b, 0x39, - 0x04, 0x92, 0x44, 0xb1, 0x1c, 0x35, 0xe2, 0xe9, 0x2e, 0xd1, 0xb2, 0x39, 0x96, 0xd3, 0x39, 0x6e, - 0x49, 0x51, 0xa8, 0x20, 0x1a, 0xbe, 0x0a, 0x16, 0x82, 0xa7, 0xf1, 0x39, 0x17, 0x8b, 0xb3, 0x22, - 0xc8, 0x16, 0x76, 0xe2, 0x2e, 0x94, 0xc4, 0xb1, 0xa1, 0xd9, 0x87, 0x1e, 0x71, 0x87, 0x44, 0x7b, - 0x33, 0xf0, 0x00, 0x4c, 0x28, 0xab, 0x5c, 0x28, 0xa3, 0xa1, 0xed, 0xe6, 0x10, 0x48, 0x12, 0xc5, - 0x86, 0x16, 0x54, 0x4d, 0x6e, 0x68, 0xb3, 0xe9, 0xa1, 0x1d, 0x48, 0x51, 0xa8, 0x20, 0x9a, 0xd5, - 0x5e, 0x90, 0xf2, 0xe6, 0x10, 0xeb, 0x06, 0x3e, 0x34, 0x48, 0x7d, 0x2e, 0x5d, 0x7b, 0x3b, 0xe9, - 0x6e, 0x94, 0xc5, 0xc3, 0x37, 0xc1, 0x95, 0xa0, 0xe9, 0xc0, 0xc2, 0x11, 0x49, 0x8d, 0x93, 0x3c, - 0x27, 0x48, 0xae, 0xec, 0x64, 0x01, 0x28, 0x1f, 0x03, 0x5f, 0x07, 0x4b, 0x7d, 0xdb, 0x30, 0x78, - 0x3d, 0x76, 0x6d, 0xdf, 0xa2, 0xf5, 0x79, 0xce, 0x02, 0xd9, 0x1e, 0xea, 0xa6, 0x7a, 0x50, 0x06, - 0x09, 0x1f, 0x02, 0xd0, 0x0f, 0xe5, 0xc0, 0xab, 0x83, 0x62, 0xa1, 0xcf, 0xeb, 0x50, 0x2c, 0xc0, - 0x51, 0x93, 0x87, 0x12, 0x6c, 0xad, 0xf7, 0x14, 0xb0, 0x5e, 0xb0, 0xc7, 0xe1, 0xf7, 0x52, 0xaa, - 0x77, 0x33, 0xa3, 0x7a, 0xd7, 0x0a, 0xc2, 0x12, 0xd2, 0xd7, 0x07, 0x8b, 0xcc, 0x77, 0xe8, 0xd6, - 0x20, 0x80, 0x88, 0x13, 0xec, 0x25, 0x59, 0xee, 0x28, 0x09, 0x8c, 0x8f, 0xe1, 0x2b, 0xe3, 0x93, - 0xe6, 0x62, 0xaa, 0x0f, 0xa5, 0x39, 0x5b, 0xbf, 0x2c, 0x01, 0xb0, 0x45, 0x1c, 0xc3, 0x1e, 0x99, - 0xc4, 0xba, 0x08, 0xd7, 0xb2, 0x95, 0x72, 0x2d, 0x2d, 0xe9, 0x42, 0x44, 0xf9, 0x14, 0xda, 0x96, - 0xed, 0x8c, 0x6d, 0xf9, 0xc6, 0x04, 0x9e, 0xd3, 0x7d, 0xcb, 0x3f, 0xca, 0x60, 0x25, 0x06, 0xc7, - 0xc6, 0xe5, 0x5e, 0x6a, 0x09, 0x5f, 0xc8, 0x2c, 0xe1, 0xba, 0x24, 0xe4, 0x53, 0x73, 0x2e, 0xef, - 0x82, 0x25, 0xe6, 0x2b, 0x82, 0x55, 0xe3, 0xae, 0x65, 0xf6, 0xcc, 0xae, 0x25, 0x52, 0x9d, 0xed, - 0x14, 0x13, 0xca, 0x30, 0x17, 0xb8, 0xa4, 0xb9, 0xcf, 0xa3, 0x4b, 0xfa, 0x83, 0x02, 0x96, 0xe2, - 0x65, 0xba, 0x00, 0x9b, 0xd4, 0x4d, 0xdb, 0xa4, 0xc6, 0xe9, 0x75, 0x59, 0xe0, 0x93, 0xfe, 0x5e, - 0x49, 0x66, 0xcd, 0x8d, 0xd2, 0x06, 0x7b, 0xa1, 0x72, 0x0c, 0xbd, 0x8f, 0x3d, 0x21, 0xab, 0x97, - 0x82, 0x97, 0xa9, 0xa0, 0x0d, 0x45, 0xbd, 0x29, 0x4b, 0x55, 0xfa, 0x74, 0x2d, 0x55, 0xf9, 0x93, - 0xb1, 0x54, 0xf7, 0x41, 0xcd, 0x0b, 0xcd, 0x54, 0x85, 0x53, 0xde, 0x98, 0xb4, 0x9d, 0x85, 0x8f, - 0x8a, 0x58, 0x23, 0x07, 0x15, 0x31, 0xc9, 0xbc, 0x53, 0xf5, 0xb3, 0xf4, 0x4e, 0xac, 0xbc, 0x1d, - 0xec, 0x7b, 0x44, 0xe3, 0x5b, 0xa9, 0x16, 0x97, 0xf7, 0x1e, 0x6f, 0x45, 0xa2, 0x17, 0x1e, 0x80, - 0x75, 0xc7, 0xb5, 0x07, 0x2e, 0xf1, 0xbc, 0x2d, 0x82, 0x35, 0x43, 0xb7, 0x48, 0x38, 0x80, 0x40, - 0xf5, 0xae, 0x8d, 0x4f, 0x9a, 0xeb, 0x7b, 0x72, 0x08, 0x2a, 0x8a, 0x6d, 0xfd, 0xb9, 0x02, 0x2e, - 0x67, 0x4f, 0xc4, 0x02, 0x23, 0xa2, 0x9c, 0xcb, 0x88, 0xbc, 0x9c, 0x28, 0xd1, 0xc0, 0xa5, 0x25, - 0xde, 0xf9, 0x73, 0x65, 0xba, 0x09, 0x96, 0x85, 0xf1, 0x08, 0x3b, 0x85, 0x15, 0x8b, 0x96, 0xe7, - 0x20, 0xdd, 0x8d, 0xb2, 0x78, 0x78, 0x0f, 0x2c, 0xba, 0xdc, 0x5b, 0x85, 0x04, 0x81, 0x3f, 0xf9, - 0x8a, 0x20, 0x58, 0x44, 0xc9, 0x4e, 0x94, 0xc6, 0x32, 0x6f, 0x12, 0x5b, 0x8e, 0x90, 0xa0, 0x92, - 0xf6, 0x26, 0x9b, 0x59, 0x00, 0xca, 0xc7, 0xc0, 0x1e, 0x58, 0xf1, 0xad, 0x3c, 0x55, 0x50, 0x6b, - 0xd7, 0x04, 0xd5, 0xca, 0x41, 0x1e, 0x82, 0x64, 0x71, 0xf0, 0xc7, 0x29, 0xbb, 0x32, 0xcb, 0x4f, - 0x91, 0x17, 0x4e, 0xdf, 0x0e, 0x53, 0xfb, 0x15, 0x89, 0x8f, 0xaa, 0x4d, 0xeb, 0xa3, 0x5a, 0x7f, - 0x52, 0x00, 0xcc, 0x6f, 0xc1, 0x89, 0x2f, 0xf7, 0xb9, 0x88, 0x84, 0x44, 0x6a, 0x72, 0x87, 0x73, - 0x73, 0xb2, 0xc3, 0x89, 0x4f, 0xd0, 0xe9, 0x2c, 0x8e, 0x98, 0xde, 0x8b, 0xb9, 0x98, 0x99, 0xc2, - 0xe2, 0xc4, 0xf9, 0x3c, 0x9b, 0xc5, 0x49, 0xf0, 0x9c, 0x6e, 0x71, 0xfe, 0x5d, 0x02, 0x2b, 0x31, - 0x78, 0x6a, 0x8b, 0x23, 0x09, 0xf9, 0xf2, 0x72, 0x66, 0x3a, 0xdb, 0x11, 0x4f, 0xdd, 0xff, 0x89, - 0xed, 0x88, 0x13, 0x2a, 0xb0, 0x1d, 0xbf, 0x2b, 0x25, 0xb3, 0x3e, 0xa3, 0xed, 0xf8, 0x04, 0xae, - 0x2a, 0x3e, 0x77, 0xce, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, 0x29, 0x1d, 0x54, 0x26, - 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x8f, 0x21, 0x21, 0x86, 0x81, 0x82, 0x7e, - 0x55, 0x44, 0xae, 0x7e, 0x5f, 0x82, 0x41, 0xd2, 0xc8, 0xbc, 0x2c, 0x56, 0x9e, 0x55, 0x16, 0xab, - 0xe7, 0x90, 0x45, 0xb9, 0xb3, 0x28, 0x9f, 0xcb, 0x59, 0x4c, 0xad, 0x89, 0x92, 0xe3, 0x6a, 0xe2, - 0x3b, 0xfc, 0xaf, 0x15, 0xb0, 0x26, 0x7f, 0x7d, 0x86, 0x06, 0x58, 0x32, 0xf1, 0xe3, 0xe4, 0xe5, - 0xc5, 0x24, 0xc1, 0xf0, 0xa9, 0x6e, 0xa8, 0xc1, 0xd7, 0x1d, 0xf5, 0x2d, 0x8b, 0xee, 0xba, 0xfb, - 0xd4, 0xd5, 0xad, 0x41, 0x20, 0xb0, 0xbd, 0x14, 0x17, 0xca, 0x70, 0xb7, 0x3e, 0x54, 0xc0, 0x7a, - 0x81, 0xca, 0x5d, 0x6c, 0x26, 0xf0, 0x21, 0xa8, 0x99, 0xf8, 0xf1, 0xbe, 0xef, 0x0e, 0x42, 0x49, - 0x3e, 0xfb, 0x73, 0xf8, 0x2e, 0xec, 0x09, 0x16, 0x14, 0xf1, 0xb5, 0x76, 0xc1, 0xf5, 0xd4, 0x20, - 0xd9, 0xa6, 0x21, 0x8f, 0x7c, 0x83, 0xef, 0x1f, 0xe1, 0x29, 0x6e, 0x82, 0x79, 0x07, 0xbb, 0x54, - 0x8f, 0xcc, 0x68, 0xb5, 0xb3, 0x38, 0x3e, 0x69, 0xce, 0xef, 0x85, 0x8d, 0x28, 0xee, 0x6f, 0xfd, - 0xaa, 0x04, 0x16, 0x12, 0x24, 0x17, 0xa0, 0xef, 0x6f, 0xa4, 0xf4, 0x5d, 0xfa, 0xc5, 0x24, 0x39, - 0xaa, 0x22, 0x81, 0xef, 0x65, 0x04, 0xfe, 0x9b, 0x93, 0x88, 0x4e, 0x57, 0xf8, 0x8f, 0x4a, 0x60, - 0x35, 0x81, 0x8e, 0x25, 0xfe, 0x3b, 0x29, 0x89, 0xdf, 0xc8, 0x48, 0x7c, 0x5d, 0x16, 0xf3, 0xa5, - 0xc6, 0x4f, 0xd6, 0xf8, 0x3f, 0x2a, 0x60, 0x39, 0x31, 0x77, 0x17, 0x20, 0xf2, 0x5b, 0x69, 0x91, - 0x6f, 0x4e, 0xa8, 0x97, 0x02, 0x95, 0x7f, 0x52, 0x4d, 0xe5, 0xfd, 0x85, 0xbf, 0x5d, 0xf8, 0x39, - 0x58, 0x1d, 0xda, 0x86, 0x6f, 0x92, 0xae, 0x81, 0x75, 0x33, 0x04, 0x30, 0x55, 0x64, 0x93, 0xf8, - 0xa2, 0x94, 0x9e, 0xb8, 0x9e, 0xee, 0x51, 0x62, 0xd1, 0x07, 0x71, 0x64, 0xac, 0xc5, 0x0f, 0x24, - 0x74, 0x48, 0xfa, 0x10, 0xf8, 0x2a, 0x58, 0x60, 0x6a, 0xa6, 0xf7, 0xc9, 0x0e, 0x36, 0xc3, 0x9a, - 0x8a, 0xbe, 0x0f, 0xec, 0xc7, 0x5d, 0x28, 0x89, 0x83, 0x47, 0x60, 0xc5, 0xb1, 0xb5, 0x1e, 0xb6, - 0xf0, 0x80, 0xb0, 0xf3, 0x7f, 0xcf, 0x36, 0xf4, 0xfe, 0x88, 0xdf, 0x3b, 0xcc, 0x77, 0x5e, 0x0b, - 0xdf, 0x29, 0xf7, 0xf2, 0x10, 0xe6, 0xd9, 0x25, 0xcd, 0x7c, 0x3f, 0xcb, 0x28, 0xa1, 0x99, 0xfb, - 0x9c, 0x35, 0x97, 0xfb, 0x1f, 0x00, 0x59, 0x71, 0x9d, 0xf3, 0x83, 0x56, 0xd1, 0x8d, 0x4a, 0xed, - 0x5c, 0x5f, 0xa3, 0x3e, 0xaa, 0x80, 0x2b, 0xb9, 0x03, 0xf2, 0x33, 0xbc, 0xd3, 0xc8, 0x39, 0xaf, - 0xf2, 0x19, 0x9c, 0xd7, 0x26, 0x58, 0x16, 0x1f, 0xc2, 0x32, 0xc6, 0x2d, 0x32, 0xd0, 0xdd, 0x74, - 0x37, 0xca, 0xe2, 0x65, 0x77, 0x2a, 0xd5, 0x33, 0xde, 0xa9, 0x24, 0xb3, 0x10, 0xff, 0xbf, 0x11, - 0x54, 0x5d, 0x3e, 0x0b, 0xf1, 0x6f, 0x1c, 0x59, 0x3c, 0xfc, 0x6e, 0x58, 0x52, 0x11, 0xc3, 0x1c, - 0x67, 0xc8, 0xd4, 0x48, 0x44, 0x90, 0x41, 0x3f, 0xd3, 0xc7, 0x9e, 0x77, 0x24, 0x1f, 0x7b, 0x36, - 0x26, 0x94, 0xf2, 0xf4, 0x56, 0xf1, 0x6f, 0x0a, 0x78, 0xae, 0x70, 0x0f, 0xc0, 0xcd, 0x94, 0xce, - 0xde, 0xca, 0xe8, 0xec, 0xf3, 0x85, 0x81, 0x09, 0xb1, 0x35, 0xe5, 0x17, 0x22, 0x77, 0x27, 0x5e, - 0x88, 0x48, 0x5c, 0xd4, 0xe4, 0x9b, 0x91, 0xce, 0xc6, 0x93, 0xa7, 0x8d, 0x99, 0xf7, 0x9f, 0x36, - 0x66, 0x3e, 0x78, 0xda, 0x98, 0xf9, 0xc5, 0xb8, 0xa1, 0x3c, 0x19, 0x37, 0x94, 0xf7, 0xc7, 0x0d, - 0xe5, 0x83, 0x71, 0x43, 0xf9, 0xe7, 0xb8, 0xa1, 0xfc, 0xf6, 0xc3, 0xc6, 0xcc, 0xc3, 0xd2, 0xf0, - 0xf6, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x59, 0xb3, 0x11, 0xc0, 0x12, 0x26, 0x00, 0x00, -} - func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1009,45 +240,36 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x18 - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1055,46 +277,37 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1102,52 +315,41 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n4 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1155,52 +357,41 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1208,46 +399,37 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1255,62 +437,51 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x30 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n9 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n10, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n10 dAtA[i] = 0x1a - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + i += n11 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1318,65 +489,58 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x48 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - i-- - dAtA[i] = 0x40 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - i-- - dAtA[i] = 0x38 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1384,39 +548,31 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1424,52 +580,41 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n13 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1477,62 +622,49 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - { - size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + return i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1540,46 +672,37 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1587,80 +710,69 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ProgressDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - i-- - dAtA[i] = 0x48 - } - i-- - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x30 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x28 - { - size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n19 + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n20 dAtA[i] = 0x22 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) } - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1668,59 +780,52 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x40 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x38 + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1728,39 +833,31 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1768,52 +865,41 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n23 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1821,52 +907,41 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1874,46 +949,37 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1921,52 +987,43 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n28 } - i-- dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1974,51 +1031,44 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x32 - } - } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2026,34 +1076,27 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 } - return len(dAtA) - i, nil + return i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2061,46 +1104,37 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n31 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n32 } - return len(dAtA) - i, nil + return i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2108,27 +1142,22 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.Partition != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) } - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2136,52 +1165,41 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n33 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2189,52 +1207,41 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2242,46 +1249,37 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n37, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2289,88 +1287,73 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x40 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - { - size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n38, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n38 } - i-- - dAtA[i] = 0x3a - i -= len(m.PodManagementPolicy) - copy(dAtA[i:], m.PodManagementPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i-- - dAtA[i] = 0x32 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n39, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 if len(m.VolumeClaimTemplates) > 0 { - for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 - } - } - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2378,66 +1361,57 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x48 - } - i -= len(m.UpdateRevision) - copy(dAtA[i:], m.UpdateRevision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i-- - dAtA[i] = 0x3a - i -= len(m.CurrentRevision) - copy(dAtA[i:], m.CurrentRevision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2445,50 +1419,55 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *ControllerRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2500,9 +1479,6 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2517,9 +1493,6 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2532,9 +1505,6 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2551,9 +1521,6 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2568,9 +1535,6 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Selector != nil { @@ -2589,9 +1553,6 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -2615,9 +1576,6 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2630,9 +1588,6 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2645,9 +1600,6 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2666,9 +1618,6 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2683,9 +1632,6 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -2711,9 +1657,6 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2735,9 +1678,6 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2750,9 +1690,6 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2765,9 +1702,6 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2784,9 +1718,6 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2801,9 +1732,6 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -2820,9 +1748,6 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -2840,9 +1765,6 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -2853,9 +1775,6 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -2870,9 +1789,6 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Partition != nil { @@ -2882,9 +1798,6 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *StatefulSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2897,9 +1810,6 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2916,9 +1826,6 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2933,9 +1840,6 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -2966,9 +1870,6 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2993,9 +1894,6 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -3008,7 +1906,14 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -3018,8 +1923,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -3029,14 +1934,9 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ControllerRevision{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3046,7 +1946,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3060,7 +1960,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3071,14 +1971,9 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3088,8 +1983,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -3101,11 +1996,6 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -3116,7 +2006,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3127,7 +2017,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -3137,7 +2027,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3153,8 +2043,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3163,14 +2053,9 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Deployment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3181,8 +2066,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -3196,18 +2081,13 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -3220,7 +2100,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -3230,7 +2110,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3244,7 +2124,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3255,14 +2135,9 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3273,8 +2148,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -3284,18 +2159,13 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3305,7 +2175,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -3315,8 +2185,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -3336,7 +2206,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3350,7 +2220,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3361,14 +2231,9 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]StatefulSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3377,16 +2242,11 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } - repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" - for _, f := range this.VolumeClaimTemplates { - repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -3399,11 +2259,6 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]StatefulSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -3413,7 +2268,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3424,7 +2279,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -3452,7 +2307,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3480,7 +2335,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3489,9 +2344,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3513,7 +2365,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3522,9 +2374,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3546,7 +2395,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3560,9 +2409,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3590,7 +2436,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3618,7 +2464,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3627,9 +2473,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3651,7 +2494,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3660,9 +2503,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3680,9 +2520,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3710,7 +2547,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3738,7 +2575,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3747,9 +2584,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3771,7 +2605,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3780,9 +2614,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3804,7 +2635,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3813,9 +2644,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3832,9 +2660,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3862,7 +2687,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3890,7 +2715,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3900,9 +2725,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3922,7 +2744,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3932,9 +2754,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3954,7 +2773,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3963,9 +2782,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3987,7 +2803,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3997,9 +2813,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4019,7 +2832,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4029,9 +2842,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4046,9 +2856,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4076,7 +2883,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4104,7 +2911,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4113,9 +2920,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4137,7 +2941,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4146,9 +2950,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4166,9 +2967,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4196,7 +2994,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4224,7 +3022,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4233,14 +3031,11 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4260,7 +3055,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4269,9 +3064,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4293,7 +3085,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4302,9 +3094,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4326,7 +3115,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4345,7 +3134,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4360,9 +3149,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4390,7 +3176,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4418,7 +3204,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4437,7 +3223,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4456,7 +3242,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4475,7 +3261,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift + m.NumberReady |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4494,7 +3280,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4513,7 +3299,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4532,7 +3318,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift + m.NumberAvailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4551,7 +3337,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift + m.NumberUnavailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4570,7 +3356,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4590,7 +3376,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4599,9 +3385,6 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4619,9 +3402,6 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4649,7 +3429,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4677,7 +3457,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4687,9 +3467,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4709,7 +3486,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4718,9 +3495,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4740,9 +3514,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4770,7 +3541,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4798,7 +3569,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4807,9 +3578,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4831,7 +3599,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4840,9 +3608,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4864,7 +3629,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4873,9 +3638,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4892,9 +3654,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4922,7 +3681,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4950,7 +3709,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4960,9 +3719,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4982,7 +3738,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4992,9 +3748,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5014,7 +3767,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5024,9 +3777,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5046,7 +3796,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5056,9 +3806,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5078,7 +3825,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5087,9 +3834,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5111,7 +3855,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5120,9 +3864,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5139,9 +3880,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5169,7 +3907,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5197,7 +3935,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5206,9 +3944,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5230,7 +3965,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5239,9 +3974,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5259,9 +3991,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5289,7 +4018,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5317,7 +4046,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5337,7 +4066,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5346,14 +4075,11 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5373,7 +4099,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5382,9 +4108,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5406,7 +4129,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5415,9 +4138,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5439,7 +4159,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5458,7 +4178,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5478,7 +4198,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5498,7 +4218,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5513,9 +4233,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5543,7 +4260,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5571,7 +4288,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5590,7 +4307,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5609,7 +4326,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5628,7 +4345,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5647,7 +4364,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5666,7 +4383,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5675,9 +4392,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5700,7 +4414,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5719,7 +4433,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5734,9 +4448,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5764,7 +4475,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5792,7 +4503,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5802,9 +4513,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5824,7 +4532,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5833,9 +4541,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5855,9 +4560,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5885,7 +4587,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5913,7 +4615,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5922,9 +4624,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5946,7 +4645,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5955,9 +4654,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5979,7 +4675,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5988,9 +4684,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6007,9 +4700,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6037,7 +4727,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6065,7 +4755,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6075,9 +4765,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6097,7 +4784,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6107,9 +4794,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6129,7 +4813,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6138,9 +4822,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6162,7 +4843,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6172,9 +4853,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6194,7 +4872,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6204,9 +4882,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6221,9 +4896,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6251,7 +4923,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6279,7 +4951,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6288,9 +4960,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6312,7 +4981,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6321,9 +4990,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6341,9 +5007,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6371,7 +5034,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6399,7 +5062,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6419,7 +5082,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6428,14 +5091,11 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6455,7 +5115,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6464,9 +5124,6 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6488,7 +5145,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6502,9 +5159,6 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6532,7 +5186,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6560,7 +5214,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6579,7 +5233,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6598,7 +5252,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6617,7 +5271,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6636,7 +5290,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6655,7 +5309,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6664,9 +5318,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6684,9 +5335,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6714,7 +5362,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6742,7 +5390,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6751,14 +5399,11 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6773,9 +5418,6 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6803,7 +5445,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6831,7 +5473,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6840,14 +5482,11 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6867,7 +5506,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6876,14 +5515,11 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6898,9 +5534,6 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6928,7 +5561,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6956,7 +5589,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6971,9 +5604,6 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7001,7 +5631,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7029,7 +5659,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7038,9 +5668,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7062,7 +5689,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7071,9 +5698,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7095,7 +5719,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7104,9 +5728,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7123,9 +5744,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7153,7 +5771,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7181,7 +5799,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7191,9 +5809,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7213,7 +5828,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7223,9 +5838,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7245,7 +5857,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7254,9 +5866,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7278,7 +5887,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7288,9 +5897,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7310,7 +5916,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7320,9 +5926,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7337,9 +5940,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7367,7 +5967,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7395,7 +5995,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7404,9 +6004,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7428,7 +6025,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7437,9 +6034,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7457,9 +6051,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7487,7 +6078,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7515,7 +6106,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7535,7 +6126,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7544,14 +6135,11 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7571,7 +6159,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7580,9 +6168,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7604,7 +6189,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7613,13 +6198,10 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -7638,7 +6220,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7648,9 +6230,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7670,7 +6249,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7680,9 +6259,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7702,7 +6278,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7711,9 +6287,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7735,7 +6308,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7750,9 +6323,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7780,7 +6350,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7808,7 +6378,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7827,7 +6397,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7846,7 +6416,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7865,7 +6435,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= int32(b&0x7F) << shift + m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7884,7 +6454,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7903,7 +6473,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7913,9 +6483,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7935,7 +6502,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7945,9 +6512,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7967,7 +6531,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7987,7 +6551,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7996,9 +6560,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8016,9 +6577,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8046,7 +6604,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8074,7 +6632,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8084,9 +6642,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8106,7 +6661,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8115,9 +6670,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8137,9 +6689,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8155,7 +6704,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -8187,8 +6735,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -8205,34 +6755,189 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2037 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, + 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, + 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, + 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, + 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, + 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, + 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x75, 0x50, 0x94, 0x9b, 0xa7, 0xea, 0xfd, 0x5e, 0xff, 0xaa, + 0xea, 0x57, 0xf5, 0x5e, 0x57, 0x1b, 0xdc, 0x3b, 0xfe, 0xb6, 0xa7, 0xea, 0x76, 0xfb, 0xd8, 0x3f, + 0x24, 0xae, 0x45, 0x28, 0xf1, 0xda, 0x43, 0x62, 0x69, 0xb6, 0xdb, 0x16, 0x1d, 0xd8, 0xd1, 0xdb, + 0xd8, 0x71, 0xbc, 0xf6, 0xf0, 0x76, 0x7b, 0x40, 0x2c, 0xe2, 0x62, 0x4a, 0x34, 0xd5, 0x71, 0x6d, + 0x6a, 0x43, 0x18, 0x60, 0x54, 0xec, 0xe8, 0x2a, 0xc3, 0xa8, 0xc3, 0xdb, 0x57, 0x6f, 0x0d, 0x74, + 0x7a, 0xe4, 0x1f, 0xaa, 0x7d, 0xdb, 0x6c, 0x0f, 0xec, 0x81, 0xdd, 0xe6, 0xd0, 0x43, 0xff, 0x11, + 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x02, 0x8a, 0xab, 0xad, 0xc4, 0x63, 0xfa, 0xb6, 0x4b, 0x24, 0x8f, + 0xb9, 0x7a, 0x37, 0xc6, 0x98, 0xb8, 0x7f, 0xa4, 0x5b, 0xc4, 0x1d, 0xb5, 0x9d, 0xe3, 0x01, 0x6b, + 0xf0, 0xda, 0x26, 0xa1, 0x58, 0x16, 0xd5, 0x2e, 0x8a, 0x72, 0x7d, 0x8b, 0xea, 0x26, 0xc9, 0x05, + 0xbc, 0x31, 0x29, 0xc0, 0xeb, 0x1f, 0x11, 0x13, 0xe7, 0xe2, 0x5e, 0x2b, 0x8a, 0xf3, 0xa9, 0x6e, + 0xb4, 0x75, 0x8b, 0x7a, 0xd4, 0xcd, 0x06, 0xb5, 0xfe, 0xa3, 0x00, 0xd8, 0xb5, 0x2d, 0xea, 0xda, + 0x86, 0x41, 0x5c, 0x44, 0x86, 0xba, 0xa7, 0xdb, 0x16, 0xfc, 0x29, 0xa8, 0xb1, 0xf1, 0x68, 0x98, + 0xe2, 0xba, 0x72, 0x5d, 0xd9, 0x58, 0xb8, 0xf3, 0x2d, 0x35, 0x9e, 0xe4, 0x88, 0x5e, 0x75, 0x8e, + 0x07, 0xac, 0xc1, 0x53, 0x19, 0x5a, 0x1d, 0xde, 0x56, 0x77, 0x0f, 0xdf, 0x27, 0x7d, 0xda, 0x23, + 0x14, 0x77, 0xe0, 0x93, 0x93, 0xe6, 0xcc, 0xf8, 0xa4, 0x09, 0xe2, 0x36, 0x14, 0xb1, 0xc2, 0x5d, + 0x50, 0xe1, 0xec, 0x25, 0xce, 0x7e, 0xab, 0x90, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3d, + 0xa6, 0xc4, 0x62, 0xe9, 0x75, 0x2e, 0x09, 0xea, 0xca, 0x16, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x2a, + 0xa8, 0xb9, 0x22, 0xfd, 0x7a, 0xf9, 0xba, 0xb2, 0x51, 0xee, 0x5c, 0x16, 0xa8, 0x5a, 0x38, 0x2c, + 0x14, 0x21, 0x5a, 0x7f, 0x55, 0xc0, 0x5a, 0x7e, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x7b, 0xb9, 0xb1, + 0xab, 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x47, 0x0f, 0x0e, 0x5b, 0x12, 0xe3, 0x7e, 0x17, 0x54, + 0x75, 0x4a, 0x4c, 0xaf, 0x5e, 0xba, 0x5e, 0xde, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0xd7, 0xae, 0x9a, + 0x4f, 0xac, 0xb3, 0x28, 0x28, 0xab, 0xef, 0xb0, 0x60, 0x14, 0x70, 0xb4, 0xfe, 0xab, 0x80, 0xf9, + 0x2d, 0x4c, 0x4c, 0xdb, 0xda, 0x27, 0xf4, 0x02, 0x16, 0xad, 0x0b, 0x2a, 0x9e, 0x43, 0xfa, 0x62, + 0xd1, 0xbe, 0x26, 0xcb, 0x3d, 0x4a, 0x67, 0xdf, 0x21, 0xfd, 0x78, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, + 0xe1, 0xbb, 0x60, 0xd6, 0xa3, 0x98, 0xfa, 0x1e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, + 0x43, 0x3b, 0x4b, 0x82, 0x68, 0x36, 0xf8, 0x8d, 0x04, 0x45, 0xeb, 0x5f, 0x25, 0x00, 0x23, 0x6c, + 0xd7, 0xb6, 0x34, 0x9d, 0xb2, 0xfa, 0x7d, 0x13, 0x54, 0xe8, 0xc8, 0x21, 0x7c, 0x1a, 0xe6, 0x3b, + 0x37, 0xc2, 0x2c, 0xee, 0x8f, 0x1c, 0xf2, 0xe9, 0x49, 0x73, 0x2d, 0x1f, 0xc1, 0x7a, 0x10, 0x8f, + 0x81, 0xdb, 0x51, 0x7e, 0x25, 0x1e, 0x7d, 0x37, 0xfd, 0xe8, 0x4f, 0x4f, 0x9a, 0x92, 0xc3, 0x42, + 0x8d, 0x98, 0xd2, 0x09, 0xc2, 0x21, 0x80, 0x06, 0xf6, 0xe8, 0x7d, 0x17, 0x5b, 0x5e, 0xf0, 0x24, + 0xdd, 0x24, 0x62, 0xe4, 0xaf, 0x4c, 0xb7, 0x3c, 0x2c, 0xa2, 0x73, 0x55, 0x64, 0x01, 0xb7, 0x73, + 0x6c, 0x48, 0xf2, 0x04, 0x78, 0x03, 0xcc, 0xba, 0x04, 0x7b, 0xb6, 0x55, 0xaf, 0xf0, 0x51, 0x44, + 0x13, 0x88, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x65, 0x30, 0x67, 0x12, 0xcf, 0xc3, 0x03, 0x52, 0xaf, + 0x72, 0xe0, 0xb2, 0x00, 0xce, 0xf5, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0x7e, 0xaf, 0x80, 0xc5, 0x68, + 0xe6, 0x2e, 0x60, 0xab, 0x74, 0xd2, 0x5b, 0xe5, 0xc5, 0x53, 0xeb, 0xa4, 0x60, 0x87, 0x7c, 0x50, + 0x4e, 0xe4, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0xd4, 0x3c, 0x62, 0x90, 0x3e, 0xb5, 0x5d, 0x91, 0xf3, + 0x6b, 0x53, 0xe6, 0x8c, 0x0f, 0x89, 0xb1, 0x2f, 0x42, 0x3b, 0x97, 0x58, 0xd2, 0xe1, 0x2f, 0x14, + 0x51, 0xc2, 0x1f, 0x81, 0x1a, 0x25, 0xa6, 0x63, 0x60, 0x4a, 0xc4, 0x36, 0x49, 0xd5, 0x37, 0x2b, + 0x17, 0x46, 0xb6, 0x67, 0x6b, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x68, 0x1e, 0xc2, 0x56, 0x14, 0xd1, + 0xc0, 0x63, 0xb0, 0xe4, 0x3b, 0x1a, 0x43, 0x52, 0x76, 0x74, 0x0f, 0x46, 0xa2, 0x7c, 0x6e, 0x9e, + 0x3a, 0x21, 0x07, 0xa9, 0x90, 0xce, 0x9a, 0x78, 0xc0, 0x52, 0xba, 0x1d, 0x65, 0xa8, 0xe1, 0x26, + 0x58, 0x36, 0x75, 0x0b, 0x11, 0xac, 0x8d, 0xf6, 0x49, 0xdf, 0xb6, 0x34, 0x8f, 0x17, 0x50, 0xb5, + 0xb3, 0x2e, 0x08, 0x96, 0x7b, 0xe9, 0x6e, 0x94, 0xc5, 0xc3, 0x6d, 0xb0, 0x1a, 0x9e, 0xb3, 0x3f, + 0xd0, 0x3d, 0x6a, 0xbb, 0xa3, 0x6d, 0xdd, 0xd4, 0x69, 0x7d, 0x96, 0xf3, 0xd4, 0xc7, 0x27, 0xcd, + 0x55, 0x24, 0xe9, 0x47, 0xd2, 0xa8, 0xd6, 0x6f, 0x66, 0xc1, 0x72, 0xe6, 0x34, 0x80, 0x0f, 0xc0, + 0x5a, 0xdf, 0x77, 0x5d, 0x62, 0xd1, 0x1d, 0xdf, 0x3c, 0x24, 0xee, 0x7e, 0xff, 0x88, 0x68, 0xbe, + 0x41, 0x34, 0xbe, 0xa2, 0xd5, 0x4e, 0x43, 0xe4, 0xba, 0xd6, 0x95, 0xa2, 0x50, 0x41, 0x34, 0xfc, + 0x21, 0x80, 0x16, 0x6f, 0xea, 0xe9, 0x9e, 0x17, 0x71, 0x96, 0x38, 0x67, 0xb4, 0x01, 0x77, 0x72, + 0x08, 0x24, 0x89, 0x62, 0x39, 0x6a, 0xc4, 0xd3, 0x5d, 0xa2, 0x65, 0x73, 0x2c, 0xa7, 0x73, 0xdc, + 0x92, 0xa2, 0x50, 0x41, 0x34, 0x7c, 0x1d, 0x2c, 0x04, 0x4f, 0xe3, 0x73, 0x2e, 0x16, 0x67, 0x45, + 0x90, 0x2d, 0xec, 0xc4, 0x5d, 0x28, 0x89, 0x63, 0x43, 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x89, 0xf6, + 0x76, 0xe0, 0x01, 0x98, 0x50, 0x56, 0xb9, 0x50, 0x46, 0x43, 0xdb, 0xcd, 0x21, 0x90, 0x24, 0x8a, + 0x0d, 0x2d, 0xa8, 0x9a, 0xdc, 0xd0, 0x66, 0xd3, 0x43, 0x3b, 0x90, 0xa2, 0x50, 0x41, 0x34, 0xab, + 0xbd, 0x20, 0xe5, 0xcd, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xfa, 0x5c, 0xba, 0xf6, 0x76, 0xd2, + 0xdd, 0x28, 0x8b, 0x87, 0x6f, 0x83, 0x2b, 0x41, 0xd3, 0x81, 0x85, 0x23, 0x92, 0x1a, 0x27, 0x79, + 0x41, 0x90, 0x5c, 0xd9, 0xc9, 0x02, 0x50, 0x3e, 0x06, 0xbe, 0x09, 0x96, 0xfa, 0xb6, 0x61, 0xf0, + 0x7a, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xf3, 0x9c, 0x05, 0xb2, 0x3d, 0xd4, 0x4d, 0xf5, 0xa0, 0x0c, + 0x12, 0x3e, 0x04, 0xa0, 0x1f, 0xca, 0x81, 0x57, 0x07, 0xc5, 0x42, 0x9f, 0xd7, 0xa1, 0x58, 0x80, + 0xa3, 0x26, 0x0f, 0x25, 0xd8, 0x5a, 0x1f, 0x28, 0x60, 0xbd, 0x60, 0x8f, 0xc3, 0xef, 0xa5, 0x54, + 0xef, 0x66, 0x46, 0xf5, 0xae, 0x15, 0x84, 0x25, 0xa4, 0xaf, 0x0f, 0x16, 0x99, 0xef, 0xd0, 0xad, + 0x41, 0x00, 0x11, 0x27, 0xd8, 0x2b, 0xb2, 0xdc, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, + 0xcd, 0xc5, 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x59, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, + 0x89, 0x75, 0x11, 0xae, 0x65, 0x2b, 0xe5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xf2, 0x29, 0xb4, 0x2d, + 0xdb, 0x19, 0xdb, 0xf2, 0x8d, 0x09, 0x3c, 0xa7, 0xfb, 0x96, 0x7f, 0x94, 0xc1, 0x4a, 0x0c, 0x8e, + 0x8d, 0xcb, 0xbd, 0xd4, 0x12, 0xbe, 0x94, 0x59, 0xc2, 0x75, 0x49, 0xc8, 0x73, 0x73, 0x2e, 0x9f, + 0xbd, 0x83, 0x80, 0xef, 0x83, 0x25, 0x66, 0x55, 0x82, 0x42, 0xe0, 0x46, 0x68, 0xf6, 0xcc, 0x46, + 0x28, 0x12, 0xb2, 0xed, 0x14, 0x13, 0xca, 0x30, 0x17, 0x18, 0xaf, 0xb9, 0xe7, 0x6d, 0xbc, 0x5a, + 0x7f, 0x50, 0xc0, 0x52, 0xbc, 0x4c, 0x17, 0x60, 0x93, 0xba, 0x69, 0x9b, 0xd4, 0x38, 0xbd, 0x2e, + 0x0b, 0x7c, 0xd2, 0xdf, 0x2b, 0xc9, 0xac, 0xb9, 0x51, 0xda, 0x60, 0x2f, 0x54, 0x8e, 0xa1, 0xf7, + 0xb1, 0x27, 0x64, 0xf5, 0x52, 0xf0, 0x32, 0x15, 0xb4, 0xa1, 0xa8, 0x37, 0x65, 0xa9, 0x4a, 0xcf, + 0xd7, 0x52, 0x95, 0x3f, 0x1b, 0x4b, 0x75, 0x1f, 0xd4, 0xbc, 0xd0, 0x4c, 0x55, 0x38, 0xe5, 0x8d, + 0x49, 0xdb, 0x59, 0xf8, 0xa8, 0x88, 0x35, 0x72, 0x50, 0x11, 0x93, 0xcc, 0x3b, 0x55, 0x3f, 0x4f, + 0xef, 0xc4, 0xb6, 0xb0, 0x83, 0x7d, 0x8f, 0x68, 0xbc, 0xee, 0x6b, 0xf1, 0x16, 0xde, 0xe3, 0xad, + 0x48, 0xf4, 0xc2, 0x03, 0xb0, 0xee, 0xb8, 0xf6, 0xc0, 0x25, 0x9e, 0xb7, 0x45, 0xb0, 0x66, 0xe8, + 0x16, 0x09, 0x07, 0x10, 0xa8, 0xde, 0xb5, 0xf1, 0x49, 0x73, 0x7d, 0x4f, 0x0e, 0x41, 0x45, 0xb1, + 0xad, 0x3f, 0x57, 0xc0, 0xe5, 0xec, 0x89, 0x58, 0x60, 0x44, 0x94, 0x73, 0x19, 0x91, 0x57, 0x13, + 0x25, 0x1a, 0xb8, 0xb4, 0xc4, 0x3b, 0x7f, 0xae, 0x4c, 0x37, 0xc1, 0xb2, 0x30, 0x1e, 0x61, 0xa7, + 0xb0, 0x62, 0xd1, 0xf2, 0x1c, 0xa4, 0xbb, 0x51, 0x16, 0xcf, 0xec, 0x45, 0xec, 0x1a, 0x42, 0x92, + 0x4a, 0xda, 0x5e, 0x6c, 0x66, 0x01, 0x28, 0x1f, 0x03, 0x7b, 0x60, 0xc5, 0xb7, 0xf2, 0x54, 0x41, + 0xb9, 0x5c, 0x13, 0x54, 0x2b, 0x07, 0x79, 0x08, 0x92, 0xc5, 0xc1, 0x1f, 0xa7, 0x1c, 0xc7, 0x2c, + 0x3f, 0x08, 0x5e, 0x3a, 0xbd, 0xa2, 0xa7, 0xb6, 0x1c, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xa1, 0x0c, + 0xb3, 0x0c, 0x4c, 0xd9, 0x57, 0x44, 0xd8, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x56, 0xe2, 0xa3, 0x6a, + 0xd3, 0xfa, 0xa8, 0xd6, 0x9f, 0x14, 0x00, 0xf3, 0x5b, 0x70, 0xe2, 0xcb, 0x7d, 0x2e, 0x22, 0x21, + 0x91, 0x9a, 0xdc, 0xe1, 0xdc, 0x9c, 0xec, 0x70, 0xe2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, + 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x71, 0x3e, 0xcf, 0x66, 0x71, 0x12, 0x3c, 0xa7, 0x5b, 0x9c, + 0x7f, 0x97, 0xc0, 0x4a, 0x0c, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0xbe, 0xbc, 0x9c, 0x99, 0x7c, 0x39, + 0xc3, 0x6c, 0x47, 0x3c, 0x75, 0xff, 0x27, 0xb6, 0x23, 0x4e, 0xa8, 0xc0, 0x76, 0xfc, 0xae, 0x94, + 0xcc, 0xfa, 0x0b, 0x6f, 0x3b, 0x9e, 0xfd, 0x72, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, + 0x29, 0x1d, 0x54, 0x26, 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x4f, 0x43, 0x42, + 0x0c, 0x03, 0x05, 0xfd, 0xaa, 0x88, 0x5c, 0xfd, 0xbe, 0x04, 0x83, 0xa4, 0x91, 0x05, 0x9a, 0x5e, + 0x3e, 0x97, 0xa6, 0xe7, 0xd4, 0xa6, 0x72, 0x06, 0xb5, 0x91, 0xea, 0x73, 0xf5, 0x1c, 0xfa, 0x3c, + 0xb5, 0xa0, 0x4a, 0x8e, 0xab, 0x89, 0xef, 0xf0, 0xbf, 0x56, 0xc0, 0x9a, 0xfc, 0xf5, 0x19, 0x1a, + 0x60, 0xc9, 0xc4, 0x8f, 0x93, 0x97, 0x17, 0x93, 0x04, 0xc3, 0xa7, 0xba, 0xa1, 0x06, 0x5f, 0x77, + 0xd4, 0x77, 0x2c, 0xba, 0xeb, 0xee, 0x53, 0x57, 0xb7, 0x06, 0x81, 0xc0, 0xf6, 0x52, 0x5c, 0x28, + 0xc3, 0xdd, 0xfa, 0x58, 0x01, 0xeb, 0x05, 0x2a, 0x77, 0xb1, 0x99, 0xc0, 0x87, 0xa0, 0x66, 0xe2, + 0xc7, 0xfb, 0xbe, 0x3b, 0x08, 0x25, 0xf9, 0xec, 0xcf, 0xe1, 0x1b, 0xb9, 0x27, 0x58, 0x50, 0xc4, + 0xd7, 0xda, 0x05, 0xd7, 0x53, 0x83, 0x64, 0x9b, 0x86, 0x3c, 0xf2, 0x0d, 0xbe, 0x7f, 0x84, 0xa7, + 0xb8, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x32, 0xa3, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, + 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0xab, 0x12, 0x58, 0x48, 0x90, 0x5c, 0x80, 0xbe, 0xbf, 0x95, + 0xd2, 0x77, 0xe9, 0x17, 0x93, 0xe4, 0xa8, 0x8a, 0x04, 0xbe, 0x97, 0x11, 0xf8, 0x6f, 0x4e, 0x22, + 0x3a, 0x5d, 0xe1, 0x3f, 0x29, 0x81, 0xd5, 0x04, 0x3a, 0x96, 0xf8, 0xef, 0xa4, 0x24, 0x7e, 0x23, + 0x23, 0xf1, 0x75, 0x59, 0xcc, 0x97, 0x1a, 0x3f, 0x59, 0xe3, 0xff, 0xa8, 0x80, 0xe5, 0xc4, 0xdc, + 0x5d, 0x80, 0xc8, 0x6f, 0xa5, 0x45, 0xbe, 0x39, 0xa1, 0x5e, 0x0a, 0x54, 0xfe, 0x49, 0x35, 0x95, + 0xf7, 0x17, 0x5e, 0xe6, 0x7f, 0x0e, 0x56, 0x87, 0xb6, 0xe1, 0x9b, 0xa4, 0x6b, 0x60, 0xdd, 0x0c, + 0x01, 0x4c, 0xc9, 0xd8, 0x24, 0xbe, 0x2c, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, + 0x1c, 0x19, 0x6b, 0xf1, 0x03, 0x09, 0x1d, 0x92, 0x3e, 0x04, 0xbe, 0x0e, 0x16, 0x98, 0xa6, 0xea, + 0x7d, 0xb2, 0x83, 0xcd, 0xb0, 0xa6, 0xa2, 0xef, 0x03, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, + 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, + 0xf7, 0x0e, 0xf3, 0x9d, 0x37, 0xc2, 0x17, 0xd2, 0xbd, 0x3c, 0x84, 0x79, 0x76, 0x49, 0x33, 0xdf, + 0xcf, 0x32, 0x4a, 0x68, 0xe6, 0x3e, 0x67, 0xcd, 0xe5, 0xfe, 0x07, 0x40, 0x56, 0x5c, 0xe7, 0xfc, + 0xa0, 0x55, 0x74, 0xa3, 0x52, 0x3b, 0xd7, 0xd7, 0xa8, 0x4f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, + 0x1c, 0xef, 0x34, 0x72, 0x6e, 0xa9, 0x7c, 0x06, 0xb7, 0xb4, 0x09, 0x96, 0xc5, 0x87, 0xb0, 0x8c, + 0xd9, 0x8a, 0xec, 0x68, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x9d, 0x4a, 0xf5, 0x8c, 0x77, 0x2a, + 0xc9, 0x2c, 0xc4, 0xff, 0x6f, 0x04, 0x55, 0x97, 0xcf, 0x42, 0xfc, 0x1b, 0x47, 0x16, 0x0f, 0xbf, + 0x1b, 0x96, 0x54, 0xc4, 0x30, 0xc7, 0x19, 0x32, 0x35, 0x12, 0x11, 0x64, 0xd0, 0xcf, 0xf4, 0xb1, + 0xe7, 0x3d, 0xc9, 0xc7, 0x9e, 0x8d, 0x09, 0xa5, 0x3c, 0xbd, 0x55, 0xfc, 0x9b, 0x02, 0x5e, 0x28, + 0xdc, 0x03, 0x70, 0x33, 0xa5, 0xb3, 0xb7, 0x32, 0x3a, 0xfb, 0x62, 0x61, 0x60, 0x42, 0x6c, 0x4d, + 0xf9, 0x85, 0xc8, 0xdd, 0x89, 0x17, 0x22, 0x12, 0x17, 0x35, 0xf9, 0x66, 0xa4, 0xb3, 0xf1, 0xe4, + 0x69, 0x63, 0xe6, 0xc3, 0xa7, 0x8d, 0x99, 0x8f, 0x9e, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, + 0xc6, 0x0d, 0xe5, 0xc3, 0x71, 0x43, 0xf9, 0x68, 0xdc, 0x50, 0xfe, 0x39, 0x6e, 0x28, 0xbf, 0xfd, + 0xb8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x01, + 0x7b, 0x12, 0x26, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto index 6c55279..c8a957a 100644 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ b/vendor/k8s.io/api/apps/v1/generated.proto @@ -41,7 +41,7 @@ option go_package = "v1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,7 +54,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -65,12 +65,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -78,7 +78,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -107,7 +107,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -280,7 +280,6 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -366,12 +365,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -379,7 +378,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -408,7 +407,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go index e003a0c..4431ca2 100644 --- a/vendor/k8s.io/api/apps/v1/types.go +++ b/vendor/k8s.io/api/apps/v1/types.go @@ -32,8 +32,6 @@ const ( ) // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // StatefulSet represents a set of pods with consistent identities. @@ -69,7 +67,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement PodManagementPolicyType = "Parallel" + ParallelPodManagement = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -95,13 +93,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -246,8 +244,6 @@ type StatefulSetList struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Deployment enables declarative updates for Pods and ReplicaSets. @@ -283,8 +279,7 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -618,12 +613,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -631,7 +626,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -649,7 +644,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -658,8 +653,6 @@ type DaemonSetList struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicaSet ensures that a specified number of pod replicas are running at any given time. @@ -668,12 +661,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -681,7 +674,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -692,7 +685,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -800,7 +793,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -817,7 +810,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go index 3f0299d..85fb159 100644 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go index 7b7ff38..885203f 100644 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -653,7 +653,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go index 9072bab..6047ed5 100644 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ b/vendor/k8s.io/api/apps/v1beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go index f81b559..ef9aa8e 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go @@ -14,29 +14,56 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto + + It has these top-level messages: + ControllerRevision + ControllerRevisionList + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + RollbackConfig + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + Scale + ScaleSpec + ScaleStatus + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -47,596 +74,98 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{0} -} -func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ControllerRevision) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerRevision.Merge(m, src) -} -func (m *ControllerRevision) XXX_Size() int { - return m.Size() -} -func (m *ControllerRevision) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerRevision.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo - -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{1} -} -func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerRevisionList.Merge(m, src) -} -func (m *ControllerRevisionList) XXX_Size() int { - return m.Size() -} -func (m *ControllerRevisionList) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{2} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} - -var xxx_messageInfo_Deployment proto.InternalMessageInfo - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{3} -} -func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCondition.Merge(m, src) -} -func (m *DeploymentCondition) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{4} -} -func (m *DeploymentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentList.Merge(m, src) -} -func (m *DeploymentList) XXX_Size() int { - return m.Size() -} -func (m *DeploymentList) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentList.DiscardUnknown(m) -} +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_DeploymentList proto.InternalMessageInfo +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{5} -} -func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentRollback) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentRollback.Merge(m, src) -} -func (m *DeploymentRollback) XXX_Size() int { - return m.Size() -} -func (m *DeploymentRollback) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{6} -} -func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentSpec.Merge(m, src) -} -func (m *DeploymentSpec) XXX_Size() int { - return m.Size() -} -func (m *DeploymentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{7} -} -func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStatus.Merge(m, src) -} -func (m *DeploymentStatus) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) -} +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{8} -} -func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStrategy.Merge(m, src) -} -func (m *DeploymentStrategy) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) -} +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{9} -} -func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollbackConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollbackConfig.Merge(m, src) -} -func (m *RollbackConfig) XXX_Size() int { - return m.Size() -} -func (m *RollbackConfig) XXX_DiscardUnknown() { - xxx_messageInfo_RollbackConfig.DiscardUnknown(m) -} +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{10} -} -func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return fileDescriptorGenerated, []int{10} } -func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) -} -func (m *RollingUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{11} -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) + return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{12} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_Scale proto.InternalMessageInfo +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{13} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) -} +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{14} -} -func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleStatus.Merge(m, src) -} -func (m *ScaleStatus) XXX_Size() int { - return m.Size() -} -func (m *ScaleStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleStatus.DiscardUnknown(m) -} +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{15} -} -func (m *StatefulSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSet.Merge(m, src) -} -func (m *StatefulSet) XXX_Size() int { - return m.Size() -} -func (m *StatefulSet) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSet.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSet proto.InternalMessageInfo - -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{16} -} -func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetCondition.Merge(m, src) -} -func (m *StatefulSetCondition) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo - -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{17} -} -func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetList.Merge(m, src) -} -func (m *StatefulSetList) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetList) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetList.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo - -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{18} -} -func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetSpec.Merge(m, src) -} -func (m *StatefulSetSpec) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo - -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{19} -} -func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetStatus.Merge(m, src) -} -func (m *StatefulSetStatus) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_2a07313e8f66e805, []int{20} -} -func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) -} -func (m *StatefulSetUpdateStrategy) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) + return fileDescriptorGenerated, []int{20} } -var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo - func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") @@ -644,7 +173,6 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") @@ -654,7 +182,6 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") @@ -662,135 +189,10 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptor_2a07313e8f66e805) -} - -var fileDescriptor_2a07313e8f66e805 = []byte{ - // 1855 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0x35, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0xab, - 0x87, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xb5, 0x04, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x0f, 0xce, 0xde, 0xf3, 0x54, 0xdd, 0xee, 0x9d, 0xf9, 0x03, 0xea, 0x5a, 0x94, 0x51, 0xaf, - 0x37, 0xa6, 0xd6, 0xd0, 0x76, 0x7b, 0x52, 0x40, 0x1c, 0xbd, 0x47, 0x1c, 0xc7, 0xeb, 0x8d, 0x6f, - 0x0f, 0x28, 0x23, 0xb7, 0x7b, 0x23, 0x6a, 0x51, 0x97, 0x30, 0x3a, 0x54, 0x1d, 0xd7, 0x66, 0x36, - 0x5a, 0x0b, 0x14, 0x55, 0xe2, 0xe8, 0x2a, 0x57, 0x54, 0xa5, 0xe2, 0xfa, 0xf6, 0x48, 0x67, 0x8f, - 0xfc, 0x81, 0xaa, 0xd9, 0x66, 0x6f, 0x64, 0x8f, 0xec, 0x9e, 0xd0, 0x1f, 0xf8, 0x0f, 0xc5, 0x97, - 0xf8, 0x10, 0xbf, 0x02, 0x9c, 0xf5, 0x6e, 0xc2, 0xa1, 0x66, 0xbb, 0xb4, 0x37, 0xce, 0xf9, 0x5a, - 0x7f, 0x27, 0xd6, 0x31, 0x89, 0xf6, 0x48, 0xb7, 0xa8, 0x3b, 0xe9, 0x39, 0x67, 0x23, 0xbe, 0xe0, - 0xf5, 0x4c, 0xca, 0x48, 0x91, 0x55, 0xaf, 0xcc, 0xca, 0xf5, 0x2d, 0xa6, 0x9b, 0x34, 0x67, 0xf0, - 0xee, 0x65, 0x06, 0x9e, 0xf6, 0x88, 0x9a, 0x24, 0x67, 0xf7, 0x76, 0x99, 0x9d, 0xcf, 0x74, 0xa3, - 0xa7, 0x5b, 0xcc, 0x63, 0x6e, 0xd6, 0xa8, 0xfb, 0x2f, 0x05, 0xd0, 0x9e, 0x6d, 0x31, 0xd7, 0x36, - 0x0c, 0xea, 0x62, 0x3a, 0xd6, 0x3d, 0xdd, 0xb6, 0xd0, 0xe7, 0xd0, 0xe4, 0xe7, 0x19, 0x12, 0x46, - 0xda, 0xca, 0xa6, 0xb2, 0xb5, 0xb8, 0xf3, 0x96, 0x1a, 0xdf, 0x74, 0x04, 0xaf, 0x3a, 0x67, 0x23, - 0xbe, 0xe0, 0xa9, 0x5c, 0x5b, 0x1d, 0xdf, 0x56, 0x0f, 0x07, 0x5f, 0x50, 0x8d, 0x1d, 0x50, 0x46, - 0xfa, 0xe8, 0xc9, 0xf9, 0xc6, 0xcc, 0xf4, 0x7c, 0x03, 0xe2, 0x35, 0x1c, 0xa1, 0xa2, 0x43, 0xa8, - 0x0b, 0xf4, 0x9a, 0x40, 0xdf, 0x2e, 0x45, 0x97, 0x87, 0x56, 0x31, 0xf9, 0xc5, 0xfd, 0xc7, 0x8c, - 0x5a, 0x7c, 0x7b, 0xfd, 0x17, 0x24, 0x74, 0xfd, 0x1e, 0x61, 0x04, 0x0b, 0x20, 0xf4, 0x26, 0x34, - 0x5d, 0xb9, 0xfd, 0xf6, 0xec, 0xa6, 0xb2, 0x35, 0xdb, 0xbf, 0x21, 0xb5, 0x9a, 0xe1, 0xb1, 0x70, - 0xa4, 0xd1, 0x7d, 0xa2, 0xc0, 0xad, 0xfc, 0xb9, 0xf7, 0x75, 0x8f, 0xa1, 0x9f, 0xe5, 0xce, 0xae, - 0x56, 0x3b, 0x3b, 0xb7, 0x16, 0x27, 0x8f, 0x1c, 0x87, 0x2b, 0x89, 0x73, 0x1f, 0x41, 0x43, 0x67, - 0xd4, 0xf4, 0xda, 0xb5, 0xcd, 0xd9, 0xad, 0xc5, 0x9d, 0x37, 0xd4, 0x92, 0x00, 0x56, 0xf3, 0xbb, - 0xeb, 0x2f, 0x49, 0xdc, 0xc6, 0x03, 0x8e, 0x80, 0x03, 0xa0, 0xee, 0xaf, 0x6b, 0x00, 0xf7, 0xa8, - 0x63, 0xd8, 0x13, 0x93, 0x5a, 0xec, 0x1a, 0x9e, 0xee, 0x01, 0xd4, 0x3d, 0x87, 0x6a, 0xf2, 0xe9, - 0x5e, 0x2d, 0x3d, 0x41, 0xbc, 0xa9, 0x63, 0x87, 0x6a, 0xf1, 0xa3, 0xf1, 0x2f, 0x2c, 0x20, 0xd0, - 0x27, 0x30, 0xe7, 0x31, 0xc2, 0x7c, 0x4f, 0x3c, 0xd9, 0xe2, 0xce, 0x6b, 0x55, 0xc0, 0x84, 0x41, - 0xbf, 0x25, 0xe1, 0xe6, 0x82, 0x6f, 0x2c, 0x81, 0xba, 0x7f, 0x9d, 0x85, 0x95, 0x58, 0x79, 0xcf, - 0xb6, 0x86, 0x3a, 0xe3, 0x21, 0x7d, 0x17, 0xea, 0x6c, 0xe2, 0x50, 0x71, 0x27, 0x0b, 0xfd, 0x57, - 0xc3, 0xcd, 0x9c, 0x4c, 0x1c, 0xfa, 0xec, 0x7c, 0x63, 0xad, 0xc0, 0x84, 0x8b, 0xb0, 0x30, 0x42, - 0xfb, 0xd1, 0x3e, 0x6b, 0xc2, 0xfc, 0x9d, 0xb4, 0xf3, 0x67, 0xe7, 0x1b, 0x05, 0x05, 0x44, 0x8d, - 0x90, 0xd2, 0x5b, 0x44, 0x5f, 0x40, 0xcb, 0x20, 0x1e, 0x3b, 0x75, 0x86, 0x84, 0xd1, 0x13, 0xdd, - 0xa4, 0xed, 0x39, 0x71, 0xfa, 0xd7, 0xab, 0x3d, 0x14, 0xb7, 0xe8, 0xdf, 0x92, 0x3b, 0x68, 0xed, - 0xa7, 0x90, 0x70, 0x06, 0x19, 0x8d, 0x01, 0xf1, 0x95, 0x13, 0x97, 0x58, 0x5e, 0x70, 0x2a, 0xee, - 0x6f, 0xfe, 0xca, 0xfe, 0xd6, 0xa5, 0x3f, 0xb4, 0x9f, 0x43, 0xc3, 0x05, 0x1e, 0xd0, 0x2b, 0x30, - 0xe7, 0x52, 0xe2, 0xd9, 0x56, 0xbb, 0x2e, 0x6e, 0x2c, 0x7a, 0x2e, 0x2c, 0x56, 0xb1, 0x94, 0xa2, - 0xd7, 0x60, 0xde, 0xa4, 0x9e, 0x47, 0x46, 0xb4, 0xdd, 0x10, 0x8a, 0xcb, 0x52, 0x71, 0xfe, 0x20, - 0x58, 0xc6, 0xa1, 0xbc, 0xfb, 0x7b, 0x05, 0x5a, 0xf1, 0x33, 0x5d, 0x43, 0xae, 0x7e, 0x94, 0xce, - 0xd5, 0xef, 0x56, 0x08, 0xce, 0x92, 0x1c, 0xfd, 0x7b, 0x0d, 0x50, 0xac, 0x84, 0x6d, 0xc3, 0x18, - 0x10, 0xed, 0x0c, 0x6d, 0x42, 0xdd, 0x22, 0x66, 0x18, 0x93, 0x51, 0x82, 0xfc, 0x88, 0x98, 0x14, - 0x0b, 0x09, 0xfa, 0x52, 0x01, 0xe4, 0x8b, 0xd7, 0x1c, 0xee, 0x5a, 0x96, 0xcd, 0x08, 0xbf, 0xe0, - 0x70, 0x43, 0x7b, 0x15, 0x36, 0x14, 0xfa, 0x52, 0x4f, 0x73, 0x28, 0xf7, 0x2d, 0xe6, 0x4e, 0xe2, - 0x87, 0xcd, 0x2b, 0xe0, 0x02, 0xd7, 0xe8, 0xa7, 0x00, 0xae, 0xc4, 0x3c, 0xb1, 0x65, 0xda, 0x96, - 0xd7, 0x80, 0xd0, 0xfd, 0x9e, 0x6d, 0x3d, 0xd4, 0x47, 0x71, 0x61, 0xc1, 0x11, 0x04, 0x4e, 0xc0, - 0xad, 0xdf, 0x87, 0xb5, 0x92, 0x7d, 0xa2, 0x1b, 0x30, 0x7b, 0x46, 0x27, 0xc1, 0x55, 0x61, 0xfe, - 0x13, 0xad, 0x42, 0x63, 0x4c, 0x0c, 0x9f, 0x06, 0x39, 0x89, 0x83, 0x8f, 0x3b, 0xb5, 0xf7, 0x94, - 0xee, 0xef, 0x1a, 0xc9, 0x48, 0xe1, 0xf5, 0x06, 0x6d, 0xf1, 0xf6, 0xe0, 0x18, 0xba, 0x46, 0x3c, - 0x81, 0xd1, 0xe8, 0xbf, 0x10, 0xb4, 0x86, 0x60, 0x0d, 0x47, 0x52, 0xf4, 0x73, 0x68, 0x7a, 0xd4, - 0xa0, 0x1a, 0xb3, 0x5d, 0x59, 0xe2, 0xde, 0xae, 0x18, 0x53, 0x64, 0x40, 0x8d, 0x63, 0x69, 0x1a, - 0xc0, 0x87, 0x5f, 0x38, 0x82, 0x44, 0x9f, 0x40, 0x93, 0x51, 0xd3, 0x31, 0x08, 0xa3, 0xf2, 0xf6, - 0x52, 0x71, 0xc5, 0x6b, 0x07, 0x07, 0x3b, 0xb2, 0x87, 0x27, 0x52, 0x4d, 0x54, 0xcf, 0x28, 0x4e, - 0xc3, 0x55, 0x1c, 0xc1, 0xa0, 0x9f, 0x40, 0xd3, 0x63, 0xbc, 0xab, 0x8f, 0x26, 0x22, 0xdb, 0x2e, - 0x6a, 0x2b, 0xc9, 0x3a, 0x1a, 0x98, 0xc4, 0xd0, 0xe1, 0x0a, 0x8e, 0xe0, 0xd0, 0x2e, 0x2c, 0x9b, - 0xba, 0x85, 0x29, 0x19, 0x4e, 0x8e, 0xa9, 0x66, 0x5b, 0x43, 0x4f, 0xa4, 0x69, 0xa3, 0xbf, 0x26, - 0x8d, 0x96, 0x0f, 0xd2, 0x62, 0x9c, 0xd5, 0x47, 0xfb, 0xb0, 0x1a, 0xb6, 0xdd, 0x8f, 0x74, 0x8f, - 0xd9, 0xee, 0x64, 0x5f, 0x37, 0x75, 0x26, 0x6a, 0x5e, 0xa3, 0xdf, 0x9e, 0x9e, 0x6f, 0xac, 0xe2, - 0x02, 0x39, 0x2e, 0xb4, 0xe2, 0x75, 0xc5, 0x21, 0xbe, 0x47, 0x87, 0xa2, 0x86, 0x35, 0xe3, 0xba, - 0x72, 0x24, 0x56, 0xb1, 0x94, 0xa2, 0x1f, 0xa7, 0xc2, 0xb4, 0x79, 0xb5, 0x30, 0x6d, 0x95, 0x87, - 0x28, 0x3a, 0x85, 0x35, 0xc7, 0xb5, 0x47, 0x2e, 0xf5, 0xbc, 0x7b, 0x94, 0x0c, 0x0d, 0xdd, 0xa2, - 0xe1, 0xcd, 0x2c, 0x88, 0x13, 0xbd, 0x34, 0x3d, 0xdf, 0x58, 0x3b, 0x2a, 0x56, 0xc1, 0x65, 0xb6, - 0xdd, 0x3f, 0xd5, 0xe1, 0x46, 0xb6, 0xc7, 0xa1, 0x8f, 0x01, 0xd9, 0x03, 0x8f, 0xba, 0x63, 0x3a, - 0xfc, 0x30, 0x18, 0xdc, 0xf8, 0x74, 0xa3, 0x88, 0xe9, 0x26, 0xca, 0xdb, 0xc3, 0x9c, 0x06, 0x2e, - 0xb0, 0x0a, 0xe6, 0x23, 0x99, 0x00, 0x35, 0xb1, 0xd1, 0xc4, 0x7c, 0x94, 0x4b, 0x82, 0x5d, 0x58, - 0x96, 0xb9, 0x1f, 0x0a, 0x45, 0xb0, 0x26, 0xde, 0xfd, 0x34, 0x2d, 0xc6, 0x59, 0x7d, 0x74, 0x17, - 0x96, 0x5c, 0x1e, 0x07, 0x11, 0xc0, 0xbc, 0x00, 0xf8, 0x96, 0x04, 0x58, 0xc2, 0x49, 0x21, 0x4e, - 0xeb, 0xa2, 0x0f, 0xe1, 0x26, 0x19, 0x13, 0xdd, 0x20, 0x03, 0x83, 0x46, 0x00, 0x75, 0x01, 0xf0, - 0xa2, 0x04, 0xb8, 0xb9, 0x9b, 0x55, 0xc0, 0x79, 0x1b, 0x74, 0x00, 0x2b, 0xbe, 0x95, 0x87, 0x0a, - 0x82, 0xf8, 0x25, 0x09, 0xb5, 0x72, 0x9a, 0x57, 0xc1, 0x45, 0x76, 0xe8, 0x73, 0x00, 0x2d, 0xec, - 0xea, 0x5e, 0x7b, 0x4e, 0x94, 0xe1, 0x37, 0x2b, 0x24, 0x5b, 0x34, 0x0a, 0xc4, 0x25, 0x30, 0x5a, - 0xf2, 0x70, 0x02, 0x13, 0xdd, 0x81, 0x96, 0x66, 0x1b, 0x86, 0x88, 0xfc, 0x3d, 0xdb, 0xb7, 0x98, - 0x08, 0xde, 0x46, 0x1f, 0xf1, 0x66, 0xbf, 0x97, 0x92, 0xe0, 0x8c, 0x66, 0xf7, 0x8f, 0x4a, 0xb2, - 0xcd, 0x84, 0xe9, 0x8c, 0xee, 0xa4, 0x46, 0x9f, 0x57, 0x32, 0xa3, 0xcf, 0xad, 0xbc, 0x45, 0x62, - 0xf2, 0xd1, 0x61, 0x89, 0x07, 0xbf, 0x6e, 0x8d, 0x82, 0x07, 0x97, 0x25, 0xf1, 0xad, 0x0b, 0x53, - 0x29, 0xd2, 0x4e, 0x34, 0xc6, 0x9b, 0xe2, 0xcd, 0x93, 0x42, 0x9c, 0x46, 0xee, 0x7e, 0x00, 0xad, - 0x74, 0x1e, 0xa6, 0x66, 0x7a, 0xe5, 0xd2, 0x99, 0xfe, 0x1b, 0x05, 0xd6, 0x4a, 0xbc, 0x23, 0x03, - 0x5a, 0x26, 0x79, 0x9c, 0x78, 0xe6, 0x4b, 0x67, 0x63, 0xce, 0x9a, 0xd4, 0x80, 0x35, 0xa9, 0x0f, - 0x2c, 0x76, 0xe8, 0x1e, 0x33, 0x57, 0xb7, 0x46, 0xc1, 0x3b, 0x1c, 0xa4, 0xb0, 0x70, 0x06, 0x1b, - 0x7d, 0x06, 0x4d, 0x93, 0x3c, 0x3e, 0xf6, 0xdd, 0x51, 0xd1, 0x7d, 0x55, 0xf3, 0x23, 0xfa, 0xc7, - 0x81, 0x44, 0xc1, 0x11, 0x5e, 0xf7, 0x10, 0x36, 0x53, 0x87, 0xe4, 0xa5, 0x82, 0x3e, 0xf4, 0x8d, - 0x63, 0x1a, 0x3f, 0xf8, 0x1b, 0xb0, 0xe0, 0x10, 0x97, 0xe9, 0x51, 0xb9, 0x68, 0xf4, 0x97, 0xa6, - 0xe7, 0x1b, 0x0b, 0x47, 0xe1, 0x22, 0x8e, 0xe5, 0xdd, 0x7f, 0x2b, 0xd0, 0x38, 0xd6, 0x88, 0x41, - 0xaf, 0x81, 0x3a, 0xdc, 0x4b, 0x51, 0x87, 0x6e, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xd9, 0xfd, 0x6d, 0x0d, 0x16, 0x13, 0x2e, 0xae, 0x66, 0xcd, 0xaf, 0x3b, 0x31, - 0x68, 0xf0, 0x4a, 0xb2, 0x53, 0xe5, 0x20, 0x6a, 0x38, 0x54, 0x04, 0xf3, 0x5b, 0xdc, 0xbd, 0xf3, - 0xb3, 0xc6, 0x07, 0xd0, 0x62, 0xc4, 0x1d, 0x51, 0x16, 0xca, 0xc4, 0x85, 0x2d, 0xc4, 0xe4, 0xe1, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x47, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x7e, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0xc2, 0x38, 0xfb, 0xbf, 0x48, 0x18, 0xff, 0xa0, 0xc0, 0x72, 0xe2, 0xee, 0xae, 0x81, - 0x31, 0x3e, 0x48, 0x33, 0xc6, 0x97, 0xab, 0x04, 0x4d, 0x09, 0x65, 0xfc, 0x73, 0x23, 0xb5, 0xf9, - 0xff, 0x7b, 0x12, 0xf3, 0x4b, 0x58, 0x1d, 0xdb, 0x86, 0x6f, 0xd2, 0x3d, 0x83, 0xe8, 0x66, 0xa8, - 0xc0, 0x87, 0xbe, 0xd9, 0xec, 0x1f, 0x43, 0x11, 0x3c, 0x75, 0x3d, 0xdd, 0x63, 0xd4, 0x62, 0x9f, - 0xc6, 0x96, 0xfd, 0x6f, 0x4b, 0x27, 0xab, 0x9f, 0x16, 0xc0, 0xe1, 0x42, 0x27, 0xe8, 0x7b, 0xb0, - 0xc8, 0x07, 0x66, 0x5d, 0xa3, 0x9c, 0x7b, 0xcb, 0xc0, 0x5a, 0x91, 0x40, 0x8b, 0xc7, 0xb1, 0x08, - 0x27, 0xf5, 0xd0, 0x23, 0x58, 0x71, 0xec, 0xe1, 0x01, 0xb1, 0xc8, 0x88, 0xf2, 0x31, 0xe3, 0xc8, - 0x36, 0x74, 0x6d, 0x22, 0x98, 0xcd, 0x42, 0xff, 0xdd, 0x70, 0xb8, 0x3c, 0xca, 0xab, 0x3c, 0xe3, - 0x14, 0x21, 0xbf, 0x2c, 0x92, 0xba, 0x08, 0x12, 0xb9, 0xd0, 0xf2, 0x65, 0xbb, 0x97, 0x44, 0x2f, - 0xf8, 0x0b, 0x67, 0xa7, 0x4a, 0x84, 0x9d, 0xa6, 0x2c, 0xe3, 0xea, 0x9f, 0x5e, 0xc7, 0x19, 0x0f, - 0xa5, 0xc4, 0xad, 0xf9, 0xdf, 0x10, 0xb7, 0xee, 0x3f, 0xeb, 0x70, 0x33, 0x57, 0x2a, 0xd1, 0x0f, - 0x2f, 0x60, 0x38, 0xb7, 0x9e, 0x1b, 0xbb, 0xc9, 0x51, 0x93, 0xd9, 0x2b, 0x50, 0x93, 0x5d, 0x58, - 0xd6, 0x7c, 0xd7, 0xa5, 0x16, 0xcb, 0x10, 0x93, 0x88, 0x1a, 0xed, 0xa5, 0xc5, 0x38, 0xab, 0x5f, - 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, 0xbb, 0xfc, 0x2e, 0xe4, - 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, 0x83, 0xd3, 0x94, 0x14, - 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x47, 0x81, 0xc8, 0xf1, 0xed, - 0x2a, 0xb1, 0x5c, 0x99, 0x48, 0x75, 0xff, 0xa2, 0xc0, 0x8b, 0xa5, 0x49, 0x80, 0x76, 0x53, 0x2d, - 0x77, 0x3b, 0xd3, 0x72, 0xbf, 0x53, 0x6a, 0x98, 0xe8, 0xbb, 0x6e, 0x31, 0x35, 0x7a, 0xbf, 0x1a, - 0x35, 0x2a, 0x98, 0xdb, 0x2f, 0xe7, 0x48, 0xfd, 0xed, 0x27, 0x4f, 0x3b, 0x33, 0x5f, 0x3d, 0xed, - 0xcc, 0x7c, 0xfd, 0xb4, 0x33, 0xf3, 0xab, 0x69, 0x47, 0x79, 0x32, 0xed, 0x28, 0x5f, 0x4d, 0x3b, - 0xca, 0xd7, 0xd3, 0x8e, 0xf2, 0xb7, 0x69, 0x47, 0xf9, 0xcd, 0x37, 0x9d, 0x99, 0xcf, 0xe6, 0xa5, - 0xc7, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x99, 0x8d, 0x1e, 0xaf, 0x61, 0x1b, 0x00, 0x00, -} - func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -798,45 +200,36 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x18 - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -844,46 +237,37 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -891,52 +275,41 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n4 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -944,62 +317,49 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n7 dAtA[i] = 0x3a - { - size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n8 + return i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1007,46 +367,37 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1054,61 +405,51 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForUpdatedAnnotations { dAtA[i] = 0x12 - i -= len(keysForUpdatedAnnotations[iNdEx]) - copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) - i-- + i++ + v := m.UpdatedAnnotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + return i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1116,92 +457,79 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ProgressDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - i-- - dAtA[i] = 0x48 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - if m.RollbackTo != nil { - { - size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n11, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x42 + i += n11 } - i-- - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n12, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x38 + i += n12 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n13, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x28 - { - size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0x22 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + i++ + if m.RollbackTo != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + i += n14 } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) } - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1209,59 +537,52 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x40 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x38 + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1269,39 +590,31 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1309,25 +622,20 @@ func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { } func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1335,46 +643,37 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n16 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n17 } - return len(dAtA) - i, nil + return i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1382,27 +681,22 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.Partition != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) } - return len(dAtA) - i, nil + return i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1410,52 +704,41 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n18 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n19, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n19 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n20, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 + return i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1463,25 +746,20 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1489,54 +767,46 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForSelector { dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i += copy(dAtA[i:], m.TargetSelector) + return i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1544,52 +814,41 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n21 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n22, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n22 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n23, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + return i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1597,52 +856,41 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1650,46 +898,37 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n25, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1697,88 +936,73 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x40 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - { - size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n26, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n26 } - i-- - dAtA[i] = 0x3a - i -= len(m.PodManagementPolicy) - copy(dAtA[i:], m.PodManagementPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i-- - dAtA[i] = 0x32 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n27, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 if len(m.VolumeClaimTemplates) > 0 { - for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 - } - } - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1786,68 +1010,59 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } + if m.ObservedGeneration != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) } + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) } - i -= len(m.UpdateRevision) - copy(dAtA[i:], m.UpdateRevision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i-- - dAtA[i] = 0x3a - i -= len(m.CurrentRevision) - copy(dAtA[i:], m.CurrentRevision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - if m.ObservedGeneration != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1855,50 +1070,55 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *ControllerRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1910,9 +1130,6 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1927,9 +1144,6 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1942,9 +1156,6 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1963,9 +1174,6 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1980,9 +1188,6 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2001,9 +1206,6 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -2033,9 +1235,6 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2057,9 +1256,6 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2072,9 +1268,6 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -2082,9 +1275,6 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -2099,9 +1289,6 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Partition != nil { @@ -2111,9 +1298,6 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2126,9 +1310,6 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -2136,9 +1317,6 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -2156,9 +1334,6 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2171,9 +1346,6 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2190,9 +1362,6 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2207,9 +1376,6 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -2240,9 +1406,6 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ObservedGeneration != nil { @@ -2269,9 +1432,6 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2284,7 +1444,14 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2294,8 +1461,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -2305,14 +1472,9 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ControllerRevision{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2322,7 +1484,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2338,8 +1500,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2348,14 +1510,9 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Deployment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2388,13 +1545,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -2404,18 +1561,13 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -2428,7 +1580,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -2448,8 +1600,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2469,7 +1621,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2513,7 +1665,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2527,7 +1679,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2538,14 +1690,9 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]StatefulSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2554,16 +1701,11 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } - repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" - for _, f := range this.VolumeClaimTemplates { - repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -2576,11 +1718,6 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]StatefulSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -2590,7 +1727,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2601,7 +1738,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -2629,7 +1766,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2657,7 +1794,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2666,9 +1803,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2690,7 +1824,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2699,9 +1833,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2723,7 +1854,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2737,9 +1868,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2767,7 +1895,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2795,7 +1923,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2804,9 +1932,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2828,7 +1953,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2837,9 +1962,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2857,9 +1979,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2887,7 +2006,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2915,7 +2034,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2924,9 +2043,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2948,7 +2064,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2957,9 +2073,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2981,7 +2094,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2990,9 +2103,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3009,9 +2119,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3039,7 +2146,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3067,7 +2174,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3077,9 +2184,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3099,7 +2203,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3109,9 +2213,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3131,7 +2232,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3141,9 +2242,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3163,7 +2261,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3173,9 +2271,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3195,7 +2290,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3204,9 +2299,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3228,7 +2320,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3237,9 +2329,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3256,9 +2345,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3286,7 +2372,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3314,7 +2400,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3323,9 +2409,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3347,7 +2430,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3356,9 +2439,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3376,9 +2456,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3406,7 +2483,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3424,7 +2501,58 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var stringLen uint64 + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3434,29 +2562,12 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3466,29 +2577,26 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3498,86 +2606,41 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -3593,7 +2656,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3602,9 +2665,6 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3621,9 +2681,6 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3651,7 +2708,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3679,7 +2736,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3699,7 +2756,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3708,14 +2765,11 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3735,7 +2789,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3744,9 +2798,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3768,7 +2819,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3777,9 +2828,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3801,7 +2849,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3820,7 +2868,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3840,7 +2888,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3860,7 +2908,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3869,9 +2917,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3896,7 +2941,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3911,9 +2956,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3941,7 +2983,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3969,7 +3011,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3988,7 +3030,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4007,7 +3049,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4026,7 +3068,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4045,7 +3087,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4064,7 +3106,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4073,9 +3115,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4098,7 +3137,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4117,7 +3156,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4132,9 +3171,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4162,7 +3198,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4190,7 +3226,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4200,9 +3236,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4222,7 +3255,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4231,9 +3264,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4253,9 +3283,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4283,7 +3310,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4311,7 +3338,7 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4325,9 +3352,6 @@ func (m *RollbackConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4355,7 +3379,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4383,7 +3407,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4392,14 +3416,11 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4419,7 +3440,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4428,14 +3449,11 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4450,9 +3468,6 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4480,7 +3495,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4508,7 +3523,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4523,9 +3538,6 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4553,7 +3565,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4581,7 +3593,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4590,9 +3602,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4614,7 +3623,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4623,9 +3632,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4647,7 +3653,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4656,9 +3662,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4675,9 +3678,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4705,7 +3705,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4733,7 +3733,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4747,9 +3747,6 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4777,7 +3774,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4805,7 +3802,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4824,7 +3821,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4833,20 +3830,54 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4856,86 +3887,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -4951,7 +3937,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4961,9 +3947,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4978,9 +3961,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5008,7 +3988,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5036,7 +4016,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5045,9 +4025,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5069,7 +4046,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5078,9 +4055,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5102,7 +4076,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5111,9 +4085,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5130,9 +4101,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5160,7 +4128,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5188,7 +4156,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5198,9 +4166,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5220,7 +4185,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5230,9 +4195,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5252,7 +4214,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5261,9 +4223,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5285,7 +4244,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5295,9 +4254,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5317,7 +4273,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5327,9 +4283,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5344,9 +4297,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5374,7 +4324,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5402,7 +4352,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5411,9 +4361,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5435,7 +4382,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5444,9 +4391,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5464,9 +4408,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5494,7 +4435,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5522,7 +4463,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5542,7 +4483,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5551,14 +4492,11 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5578,7 +4516,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5587,9 +4525,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5611,7 +4546,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5620,13 +4555,10 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -5645,7 +4577,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5655,9 +4587,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5677,7 +4606,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5687,9 +4616,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5709,7 +4635,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5718,9 +4644,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5742,7 +4665,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5757,9 +4680,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5787,7 +4707,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5815,7 +4735,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5835,7 +4755,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5854,7 +4774,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5873,7 +4793,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= int32(b&0x7F) << shift + m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5892,7 +4812,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5911,7 +4831,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5921,9 +4841,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5943,7 +4860,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5953,9 +4870,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5975,7 +4889,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5995,7 +4909,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6004,9 +4918,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6024,9 +4935,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6054,7 +4962,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6082,7 +4990,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6092,9 +5000,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6114,7 +5019,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6123,9 +5028,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6145,9 +5047,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6163,7 +5062,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -6195,8 +5093,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -6213,34 +5113,178 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1859 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, + 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0xd5, + 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, + 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0x6b, + 0x86, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, + 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, + 0x77, 0xdb, 0x6d, 0xa4, 0xf5, 0x21, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, + 0xc0, 0x8f, 0xce, 0xde, 0xf3, 0x35, 0xc3, 0xe9, 0x9e, 0x8d, 0xfa, 0xd4, 0xb3, 0x29, 0xa3, 0x7e, + 0x77, 0x4c, 0xed, 0x81, 0xe3, 0x75, 0xa5, 0x80, 0xb8, 0x46, 0x97, 0xb8, 0xae, 0xdf, 0x1d, 0xdf, + 0xee, 0x53, 0x46, 0x6e, 0x77, 0x87, 0xd4, 0xa6, 0x1e, 0x61, 0x74, 0xa0, 0xb9, 0x9e, 0xc3, 0x1c, + 0xb4, 0x16, 0x28, 0x6a, 0xc4, 0x35, 0x34, 0xae, 0xa8, 0x49, 0xc5, 0xf5, 0xed, 0xa1, 0xc1, 0x1e, + 0x8d, 0xfa, 0x9a, 0xee, 0x58, 0xdd, 0xa1, 0x33, 0x74, 0xba, 0x42, 0xbf, 0x3f, 0x7a, 0x28, 0xbe, + 0xc4, 0x87, 0xf8, 0x15, 0xe0, 0xac, 0xab, 0x09, 0x87, 0xba, 0xe3, 0xd1, 0xee, 0x38, 0xe7, 0x6b, + 0xfd, 0x9d, 0x58, 0xc7, 0x22, 0xfa, 0x23, 0xc3, 0xa6, 0xde, 0xa4, 0xeb, 0x9e, 0x0d, 0xf9, 0x82, + 0xdf, 0xb5, 0x28, 0x23, 0x45, 0x56, 0xdd, 0x32, 0x2b, 0x6f, 0x64, 0x33, 0xc3, 0xa2, 0x39, 0x83, + 0x77, 0x2f, 0x33, 0xf0, 0xf5, 0x47, 0xd4, 0x22, 0x39, 0xbb, 0xb7, 0xcb, 0xec, 0x46, 0xcc, 0x30, + 0xbb, 0x86, 0xcd, 0x7c, 0xe6, 0x65, 0x8d, 0xd4, 0xff, 0x28, 0x80, 0xf6, 0x1c, 0x9b, 0x79, 0x8e, + 0x69, 0x52, 0x0f, 0xd3, 0xb1, 0xe1, 0x1b, 0x8e, 0x8d, 0x3e, 0x87, 0x26, 0x3f, 0xcf, 0x80, 0x30, + 0xd2, 0x56, 0x36, 0x95, 0xad, 0xc5, 0x9d, 0xb7, 0xb4, 0xf8, 0xa6, 0x23, 0x78, 0xcd, 0x3d, 0x1b, + 0xf2, 0x05, 0x5f, 0xe3, 0xda, 0xda, 0xf8, 0xb6, 0x76, 0xd8, 0xff, 0x82, 0xea, 0xec, 0x80, 0x32, + 0xd2, 0x43, 0x4f, 0xce, 0x37, 0x66, 0xa6, 0xe7, 0x1b, 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x1d, 0x42, + 0x5d, 0xa0, 0xd7, 0x04, 0xfa, 0x76, 0x29, 0xba, 0x3c, 0xb4, 0x86, 0xc9, 0xaf, 0xee, 0x3f, 0x66, + 0xd4, 0xe6, 0xdb, 0xeb, 0xbd, 0x20, 0xa1, 0xeb, 0xf7, 0x08, 0x23, 0x58, 0x00, 0xa1, 0x37, 0xa1, + 0xe9, 0xc9, 0xed, 0xb7, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xde, 0x0d, 0xa9, 0xd5, 0x0c, 0x8f, 0x85, + 0x23, 0x0d, 0xf5, 0x89, 0x02, 0xb7, 0xf2, 0xe7, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0x3b, 0xbb, + 0x56, 0xed, 0xec, 0xdc, 0x5a, 0x9c, 0x3c, 0x72, 0x1c, 0xae, 0x24, 0xce, 0x7d, 0x04, 0x0d, 0x83, + 0x51, 0xcb, 0x6f, 0xd7, 0x36, 0x67, 0xb7, 0x16, 0x77, 0xde, 0xd0, 0x4a, 0x02, 0x58, 0xcb, 0xef, + 0xae, 0xb7, 0x24, 0x71, 0x1b, 0x0f, 0x38, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0xd6, 0x00, 0xee, 0x51, + 0xd7, 0x74, 0x26, 0x16, 0xb5, 0xd9, 0x35, 0x3c, 0xdd, 0x03, 0xa8, 0xfb, 0x2e, 0xd5, 0xe5, 0xd3, + 0xbd, 0x5a, 0x7a, 0x82, 0x78, 0x53, 0xc7, 0x2e, 0xd5, 0xe3, 0x47, 0xe3, 0x5f, 0x58, 0x40, 0xa0, + 0x4f, 0x60, 0xce, 0x67, 0x84, 0x8d, 0x7c, 0xf1, 0x64, 0x8b, 0x3b, 0xaf, 0x55, 0x01, 0x13, 0x06, + 0xbd, 0x96, 0x84, 0x9b, 0x0b, 0xbe, 0xb1, 0x04, 0x52, 0xff, 0x3e, 0x0b, 0x2b, 0xb1, 0xf2, 0x9e, + 0x63, 0x0f, 0x0c, 0xc6, 0x43, 0xfa, 0x2e, 0xd4, 0xd9, 0xc4, 0xa5, 0xe2, 0x4e, 0x16, 0x7a, 0xaf, + 0x86, 0x9b, 0x39, 0x99, 0xb8, 0xf4, 0xd9, 0xf9, 0xc6, 0x5a, 0x81, 0x09, 0x17, 0x61, 0x61, 0x84, + 0xf6, 0xa3, 0x7d, 0xd6, 0x84, 0xf9, 0x3b, 0x69, 0xe7, 0xcf, 0xce, 0x37, 0x0a, 0x0a, 0x88, 0x16, + 0x21, 0xa5, 0xb7, 0x88, 0x5e, 0x81, 0x39, 0x8f, 0x12, 0xdf, 0xb1, 0xdb, 0x75, 0x81, 0x16, 0x1d, + 0x05, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x35, 0x98, 0xb7, 0xa8, 0xef, 0x93, 0x21, 0x6d, 0x37, 0x84, + 0xe2, 0xb2, 0x54, 0x9c, 0x3f, 0x08, 0x96, 0x71, 0x28, 0x47, 0x5f, 0x40, 0xcb, 0x24, 0x3e, 0x3b, + 0x75, 0x07, 0x84, 0xd1, 0x13, 0xc3, 0xa2, 0xed, 0x39, 0x71, 0xa1, 0xaf, 0x57, 0x7b, 0x7b, 0x6e, + 0xd1, 0xbb, 0x25, 0xd1, 0x5b, 0xfb, 0x29, 0x24, 0x9c, 0x41, 0x46, 0x63, 0x40, 0x7c, 0xe5, 0xc4, + 0x23, 0xb6, 0x1f, 0x5c, 0x14, 0xf7, 0x37, 0x7f, 0x65, 0x7f, 0xeb, 0xd2, 0x1f, 0xda, 0xcf, 0xa1, + 0xe1, 0x02, 0x0f, 0xea, 0x1f, 0x15, 0x68, 0xc5, 0xcf, 0x74, 0x0d, 0xb9, 0xfa, 0x51, 0x3a, 0x57, + 0xbf, 0x5f, 0x21, 0x38, 0x4b, 0x72, 0xf4, 0x9f, 0x35, 0x40, 0xb1, 0x12, 0x76, 0x4c, 0xb3, 0x4f, + 0xf4, 0x33, 0xb4, 0x09, 0x75, 0x9b, 0x58, 0x61, 0x4c, 0x46, 0x09, 0xf2, 0x13, 0x62, 0x51, 0x2c, + 0x24, 0xe8, 0x4b, 0x05, 0xd0, 0x48, 0x5c, 0xfd, 0x60, 0xd7, 0xb6, 0x1d, 0x46, 0xf8, 0x6d, 0x84, + 0x1b, 0xda, 0xab, 0xb0, 0xa1, 0xd0, 0x97, 0x76, 0x9a, 0x43, 0xb9, 0x6f, 0x33, 0x6f, 0x12, 0xbf, + 0x42, 0x5e, 0x01, 0x17, 0xb8, 0x46, 0x3f, 0x07, 0xf0, 0x24, 0xe6, 0x89, 0x23, 0xd3, 0xb6, 0xbc, + 0x06, 0x84, 0xee, 0xf7, 0x1c, 0xfb, 0xa1, 0x31, 0x8c, 0x0b, 0x0b, 0x8e, 0x20, 0x70, 0x02, 0x6e, + 0xfd, 0x3e, 0xac, 0x95, 0xec, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0xae, 0x0a, 0xf3, 0x9f, + 0x68, 0x15, 0x1a, 0x63, 0x62, 0x8e, 0x68, 0x90, 0x93, 0x38, 0xf8, 0xb8, 0x53, 0x7b, 0x4f, 0x51, + 0xff, 0xd0, 0x48, 0x46, 0x0a, 0xaf, 0x37, 0x68, 0x8b, 0xb7, 0x07, 0xd7, 0x34, 0x74, 0xe2, 0x0b, + 0x8c, 0x46, 0xef, 0x85, 0xa0, 0x35, 0x04, 0x6b, 0x38, 0x92, 0xa2, 0x5f, 0x42, 0xd3, 0xa7, 0x26, + 0xd5, 0x99, 0xe3, 0xc9, 0x12, 0xf7, 0x76, 0xc5, 0x98, 0x22, 0x7d, 0x6a, 0x1e, 0x4b, 0xd3, 0x00, + 0x3e, 0xfc, 0xc2, 0x11, 0x24, 0xfa, 0x04, 0x9a, 0x8c, 0x5a, 0xae, 0x49, 0x18, 0x95, 0xb7, 0x97, + 0x8a, 0x2b, 0x5e, 0x3b, 0x38, 0xd8, 0x91, 0x33, 0x38, 0x91, 0x6a, 0xa2, 0x7a, 0x46, 0x71, 0x1a, + 0xae, 0xe2, 0x08, 0x06, 0xfd, 0x0c, 0x9a, 0x3e, 0xe3, 0x5d, 0x7d, 0x38, 0x11, 0x15, 0xe5, 0xa2, + 0xb6, 0x92, 0xac, 0xa3, 0x81, 0x49, 0x0c, 0x1d, 0xae, 0xe0, 0x08, 0x0e, 0xed, 0xc2, 0xb2, 0x65, + 0xd8, 0x98, 0x92, 0xc1, 0xe4, 0x98, 0xea, 0x8e, 0x3d, 0xf0, 0x45, 0x29, 0x6a, 0xf4, 0xd6, 0xa4, + 0xd1, 0xf2, 0x41, 0x5a, 0x8c, 0xb3, 0xfa, 0x68, 0x1f, 0x56, 0xc3, 0xb6, 0xfb, 0x91, 0xe1, 0x33, + 0xc7, 0x9b, 0xec, 0x1b, 0x96, 0xc1, 0x44, 0x81, 0x6a, 0xf4, 0xda, 0xd3, 0xf3, 0x8d, 0x55, 0x5c, + 0x20, 0xc7, 0x85, 0x56, 0xbc, 0x76, 0xba, 0x64, 0xe4, 0xd3, 0x81, 0x28, 0x38, 0xcd, 0xb8, 0x76, + 0x1e, 0x89, 0x55, 0x2c, 0xa5, 0xe8, 0xa7, 0xa9, 0x30, 0x6d, 0x5e, 0x2d, 0x4c, 0x5b, 0xe5, 0x21, + 0x8a, 0x4e, 0x61, 0xcd, 0xf5, 0x9c, 0xa1, 0x47, 0x7d, 0xff, 0x1e, 0x25, 0x03, 0xd3, 0xb0, 0x69, + 0x78, 0x33, 0x0b, 0xe2, 0x44, 0x2f, 0x4d, 0xcf, 0x37, 0xd6, 0x8e, 0x8a, 0x55, 0x70, 0x99, 0xad, + 0xfa, 0x97, 0x3a, 0xdc, 0xc8, 0xf6, 0x38, 0xf4, 0x31, 0x20, 0xa7, 0xef, 0x53, 0x6f, 0x4c, 0x07, + 0x1f, 0x06, 0x83, 0x1b, 0x9f, 0x6e, 0x14, 0x31, 0xdd, 0x44, 0x79, 0x7b, 0x98, 0xd3, 0xc0, 0x05, + 0x56, 0xc1, 0x7c, 0x24, 0x13, 0xa0, 0x26, 0x36, 0x9a, 0x98, 0x8f, 0x72, 0x49, 0xb0, 0x0b, 0xcb, + 0x32, 0xf7, 0x43, 0xa1, 0x08, 0xd6, 0xc4, 0xbb, 0x9f, 0xa6, 0xc5, 0x38, 0xab, 0x8f, 0x3e, 0x84, + 0x9b, 0x64, 0x4c, 0x0c, 0x93, 0xf4, 0x4d, 0x1a, 0x81, 0xd4, 0x05, 0xc8, 0x8b, 0x12, 0xe4, 0xe6, + 0x6e, 0x56, 0x01, 0xe7, 0x6d, 0xd0, 0x01, 0xac, 0x8c, 0xec, 0x3c, 0x54, 0x10, 0x87, 0x2f, 0x49, + 0xa8, 0x95, 0xd3, 0xbc, 0x0a, 0x2e, 0xb2, 0x43, 0x9f, 0x03, 0xe8, 0x61, 0x63, 0xf6, 0xdb, 0x73, + 0xa2, 0x92, 0xbe, 0x59, 0x21, 0x5f, 0xa2, 0x6e, 0x1e, 0x57, 0xb1, 0x68, 0xc9, 0xc7, 0x09, 0x4c, + 0x74, 0x17, 0x96, 0x3c, 0x9e, 0x01, 0xd1, 0x56, 0xe7, 0xc5, 0x56, 0xbf, 0x23, 0xcd, 0x96, 0x70, + 0x52, 0x88, 0xd3, 0xba, 0xe8, 0x0e, 0xb4, 0x74, 0xc7, 0x34, 0x45, 0xe4, 0xef, 0x39, 0x23, 0x9b, + 0x89, 0xe0, 0x6d, 0xf4, 0x10, 0xef, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, + 0x6d, 0x26, 0x4c, 0x67, 0x74, 0x27, 0x35, 0xfa, 0xbc, 0x92, 0x19, 0x7d, 0x6e, 0xe5, 0x2d, 0x12, + 0x93, 0x8f, 0x01, 0x4b, 0x3c, 0xf8, 0x0d, 0x7b, 0x18, 0x3c, 0xb8, 0x2c, 0x89, 0x6f, 0x5d, 0x98, + 0x4a, 0x91, 0x76, 0xa2, 0x31, 0xde, 0x14, 0x27, 0x4f, 0x0a, 0x71, 0x1a, 0x59, 0xfd, 0x00, 0x5a, + 0xe9, 0x3c, 0x4c, 0xcd, 0xf4, 0xca, 0xa5, 0x33, 0xfd, 0x37, 0x0a, 0xac, 0x95, 0x78, 0x47, 0x26, + 0xb4, 0x2c, 0xf2, 0x38, 0x11, 0x23, 0x97, 0xce, 0xc6, 0x9c, 0x35, 0x69, 0x01, 0x6b, 0xd2, 0x1e, + 0xd8, 0xec, 0xd0, 0x3b, 0x66, 0x9e, 0x61, 0x0f, 0x83, 0x77, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, + 0xfa, 0x0c, 0x9a, 0x16, 0x79, 0x7c, 0x3c, 0xf2, 0x86, 0x45, 0xf7, 0x55, 0xcd, 0x8f, 0xe8, 0x1f, + 0x07, 0x12, 0x05, 0x47, 0x78, 0xea, 0x21, 0x6c, 0xa6, 0x0e, 0xc9, 0x4b, 0x05, 0x7d, 0x38, 0x32, + 0x8f, 0x69, 0xfc, 0xe0, 0x6f, 0xc0, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xe5, 0xa2, 0xd1, 0x5b, 0x9a, + 0x9e, 0x6f, 0x2c, 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0x71, 0xac, 0x13, 0x93, + 0x5e, 0x03, 0x75, 0xb8, 0x97, 0xa2, 0x0e, 0x6a, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, + 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, + 0xe5, 0xb2, 0x2a, 0xa9, 0xfe, 0xbe, 0x06, 0x8b, 0x09, 0x17, 0x57, 0xb3, 0xe6, 0xd7, 0x9d, 0x18, + 0x34, 0x78, 0x19, 0xda, 0xa9, 0x72, 0x10, 0x2d, 0x1c, 0x2a, 0x82, 0xf9, 0x2d, 0xee, 0xde, 0xf9, + 0x59, 0xe3, 0x03, 0x68, 0x31, 0xe2, 0x0d, 0x29, 0x0b, 0x65, 0xe2, 0xc2, 0x16, 0xe2, 0x49, 0xff, + 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, + 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, + 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x57, 0x0d, 0x56, 0x13, + 0xda, 0x31, 0x37, 0xfd, 0x61, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, + 0xd3, 0x62, 0x76, 0x37, 0xfb, 0xbc, 0xd9, 0xdd, 0x73, 0x20, 0xc5, 0xea, 0x9f, 0x14, 0x58, 0x4e, + 0xdc, 0xdd, 0x35, 0x30, 0xc6, 0x07, 0x69, 0xc6, 0xf8, 0x72, 0x95, 0xa0, 0x29, 0xa1, 0x8c, 0x7f, + 0x6d, 0xa4, 0x36, 0xff, 0xad, 0x27, 0x31, 0xbf, 0x86, 0xd5, 0xb1, 0x63, 0x8e, 0x2c, 0xba, 0x67, + 0x12, 0xc3, 0x0a, 0x15, 0xf8, 0xc4, 0x38, 0x9b, 0xfd, 0x63, 0x28, 0x82, 0xa7, 0x9e, 0x6f, 0xf8, + 0x8c, 0xda, 0xec, 0xd3, 0xd8, 0xb2, 0xf7, 0x5d, 0xe9, 0x64, 0xf5, 0xd3, 0x02, 0x38, 0x5c, 0xe8, + 0x04, 0xfd, 0x00, 0x16, 0xf9, 0xc0, 0x6c, 0xe8, 0x94, 0x73, 0x6f, 0x19, 0x58, 0x2b, 0x12, 0x68, + 0xf1, 0x38, 0x16, 0xe1, 0xa4, 0x1e, 0x7a, 0x04, 0x2b, 0xae, 0x33, 0x38, 0x20, 0x36, 0x19, 0x52, + 0x3e, 0x66, 0x1c, 0x39, 0xa6, 0xa1, 0x4f, 0x04, 0xb3, 0x59, 0xe8, 0xbd, 0x1b, 0x4e, 0xa6, 0x47, + 0x79, 0x95, 0x67, 0x9c, 0x22, 0xe4, 0x97, 0x45, 0x52, 0x17, 0x41, 0x22, 0x0f, 0x5a, 0x23, 0xd9, + 0xee, 0x25, 0xd1, 0x0b, 0xfe, 0x6f, 0xd9, 0xa9, 0x12, 0x61, 0xa7, 0x29, 0xcb, 0xb8, 0xfa, 0xa7, + 0xd7, 0x71, 0xc6, 0x43, 0x29, 0x71, 0x6b, 0xfe, 0x3f, 0xc4, 0x4d, 0xfd, 0x77, 0x1d, 0x6e, 0xe6, + 0x4a, 0x25, 0xfa, 0xf1, 0x05, 0x0c, 0xe7, 0xd6, 0x73, 0x63, 0x37, 0xb9, 0x01, 0x7d, 0xf6, 0x0a, + 0x03, 0xfa, 0x2e, 0x2c, 0xeb, 0x23, 0xcf, 0xa3, 0x36, 0xcb, 0xb0, 0x9a, 0x88, 0x1a, 0xed, 0xa5, + 0xc5, 0x38, 0xab, 0x5f, 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, + 0xbb, 0xfc, 0x2e, 0xe4, 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, + 0x83, 0xd3, 0x94, 0x14, 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x84, + 0x81, 0xc8, 0xf1, 0xed, 0x2a, 0xb1, 0x5c, 0x99, 0x85, 0xa9, 0x7f, 0x53, 0xe0, 0xc5, 0xd2, 0x24, + 0x40, 0xbb, 0xa9, 0x96, 0xbb, 0x9d, 0x69, 0xb9, 0xdf, 0x2b, 0x35, 0x4c, 0xf4, 0x5d, 0xaf, 0x98, + 0x1a, 0xbd, 0x5f, 0x8d, 0x1a, 0x15, 0xcc, 0xed, 0x97, 0x73, 0xa4, 0xde, 0xf6, 0x93, 0xa7, 0x9d, + 0x99, 0xaf, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, 0xcd, 0xb4, 0xa3, 0x3c, 0x99, 0x76, + 0x94, 0xaf, 0xa6, 0x1d, 0xe5, 0xeb, 0x69, 0x47, 0xf9, 0xc7, 0xb4, 0xa3, 0xfc, 0xee, 0x9b, 0xce, + 0xcc, 0x67, 0xf3, 0xd2, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x89, 0x29, 0x5c, 0x61, + 0x1b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto index 694f657..6f41f06 100644 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta1/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta1"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -143,7 +143,6 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -263,7 +262,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -277,15 +276,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go index b77fcf7..d462604 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ b/vendor/k8s.io/api/apps/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -55,20 +55,22 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } +// +genclient +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -111,7 +113,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement PodManagementPolicyType = "Parallel" + ParallelPodManagement = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -134,13 +136,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -321,8 +323,7 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -433,7 +434,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -541,7 +542,7 @@ type DeploymentList struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -558,7 +559,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go index 504b858..68ebef3 100644 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -149,7 +149,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -167,9 +167,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go index fb27612..93892bf 100644 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -137,7 +137,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -470,7 +470,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go index 9f49986..e93e164 100644 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ b/vendor/k8s.io/api/apps/v1beta2/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go index 8a9f200..72d832c 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go @@ -14,29 +14,66 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto +// DO NOT EDIT! +/* + Package v1beta2 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto + + It has these top-level messages: + ControllerRevision + ControllerRevisionList + DaemonSet + DaemonSetCondition + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentSpec + DeploymentStatus + DeploymentStrategy + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + RollingUpdateDaemonSet + RollingUpdateDeployment + RollingUpdateStatefulSetStrategy + Scale + ScaleSpec + ScaleStatus + StatefulSet + StatefulSetCondition + StatefulSetList + StatefulSetSpec + StatefulSetStatus + StatefulSetUpdateStrategy +*/ package v1beta2 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -47,876 +84,138 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{0} -} -func (m *ControllerRevision) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ControllerRevision) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ControllerRevision) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerRevision.Merge(m, src) -} -func (m *ControllerRevision) XXX_Size() int { - return m.Size() -} -func (m *ControllerRevision) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerRevision.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerRevision proto.InternalMessageInfo - -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{1} -} -func (m *ControllerRevisionList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ControllerRevisionList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ControllerRevisionList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ControllerRevisionList.Merge(m, src) -} -func (m *ControllerRevisionList) XXX_Size() int { - return m.Size() -} -func (m *ControllerRevisionList) XXX_DiscardUnknown() { - xxx_messageInfo_ControllerRevisionList.DiscardUnknown(m) -} - -var xxx_messageInfo_ControllerRevisionList proto.InternalMessageInfo - -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{2} -} -func (m *DaemonSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSet.Merge(m, src) -} -func (m *DaemonSet) XXX_Size() int { - return m.Size() -} -func (m *DaemonSet) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSet.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSet proto.InternalMessageInfo - -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{3} -} -func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetCondition.Merge(m, src) -} -func (m *DaemonSetCondition) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo - -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{4} -} -func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetList.Merge(m, src) -} -func (m *DaemonSetList) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetList) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetList.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo - -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{5} -} -func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetSpec.Merge(m, src) -} -func (m *DaemonSetSpec) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo - -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{6} -} -func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetStatus.Merge(m, src) -} -func (m *DaemonSetStatus) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo - -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{7} -} -func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) -} -func (m *DaemonSetUpdateStrategy) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{8} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_Deployment proto.InternalMessageInfo +func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } +func (*ControllerRevision) ProtoMessage() {} +func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{9} -} -func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCondition.Merge(m, src) -} -func (m *DeploymentCondition) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) -} +func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } +func (*ControllerRevisionList) ProtoMessage() {} +func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{10} -} -func (m *DeploymentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentList.Merge(m, src) -} -func (m *DeploymentList) XXX_Size() int { - return m.Size() -} -func (m *DeploymentList) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentList.DiscardUnknown(m) -} +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_DeploymentList proto.InternalMessageInfo +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{11} -} -func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentSpec.Merge(m, src) -} -func (m *DeploymentSpec) XXX_Size() int { - return m.Size() -} -func (m *DeploymentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) -} +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{12} -} -func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStatus.Merge(m, src) -} -func (m *DeploymentStatus) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) -} +func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } +func (*DaemonSetUpdateStrategy) ProtoMessage() {} +func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{13} -} -func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStrategy.Merge(m, src) -} -func (m *DeploymentStrategy) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{14} -} -func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSet.Merge(m, src) -} -func (m *ReplicaSet) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSet) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSet.DiscardUnknown(m) -} +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{15} -} -func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetCondition.Merge(m, src) -} -func (m *ReplicaSetCondition) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) -} +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{16} -} -func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetList.Merge(m, src) -} -func (m *ReplicaSetList) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetList) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) -} +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{17} -} -func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetSpec.Merge(m, src) -} -func (m *ReplicaSetSpec) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) -} +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{18} -} -func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetStatus.Merge(m, src) -} -func (m *ReplicaSetStatus) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) -} +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{19} -} -func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) -} -func (m *RollingUpdateDaemonSet) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) -} +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } -var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } func (*RollingUpdateDeployment) ProtoMessage() {} func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{20} + return fileDescriptorGenerated, []int{20} } -func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) -} -func (m *RollingUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) -} - -var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{21} -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateStatefulSetStrategy.Merge(m, src) -} -func (m *RollingUpdateStatefulSetStrategy) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateStatefulSetStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateStatefulSetStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_RollingUpdateStatefulSetStrategy proto.InternalMessageInfo - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{22} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} - -var xxx_messageInfo_Scale proto.InternalMessageInfo - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{23} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{24} -} -func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleStatus.Merge(m, src) -} -func (m *ScaleStatus) XXX_Size() int { - return m.Size() -} -func (m *ScaleStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo - -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{25} -} -func (m *StatefulSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSet.Merge(m, src) -} -func (m *StatefulSet) XXX_Size() int { - return m.Size() -} -func (m *StatefulSet) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSet.DiscardUnknown(m) + return fileDescriptorGenerated, []int{21} } -var xxx_messageInfo_StatefulSet proto.InternalMessageInfo +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{26} -} -func (m *StatefulSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetCondition.Merge(m, src) -} -func (m *StatefulSetCondition) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetCondition.DiscardUnknown(m) -} +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } -var xxx_messageInfo_StatefulSetCondition proto.InternalMessageInfo +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{27} -} -func (m *StatefulSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetList.Merge(m, src) -} -func (m *StatefulSetList) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetList) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetList.DiscardUnknown(m) -} +func (m *StatefulSet) Reset() { *m = StatefulSet{} } +func (*StatefulSet) ProtoMessage() {} +func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } -var xxx_messageInfo_StatefulSetList proto.InternalMessageInfo +func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } +func (*StatefulSetCondition) ProtoMessage() {} +func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{28} -} -func (m *StatefulSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetSpec.Merge(m, src) -} -func (m *StatefulSetSpec) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetSpec.DiscardUnknown(m) -} +func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } +func (*StatefulSetList) ProtoMessage() {} +func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } -var xxx_messageInfo_StatefulSetSpec proto.InternalMessageInfo +func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } +func (*StatefulSetSpec) ProtoMessage() {} +func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{29} -} -func (m *StatefulSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetStatus.Merge(m, src) -} -func (m *StatefulSetStatus) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_StatefulSetStatus proto.InternalMessageInfo +func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } +func (*StatefulSetStatus) ProtoMessage() {} +func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } func (*StatefulSetUpdateStrategy) ProtoMessage() {} func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_42fe616264472f7e, []int{30} -} -func (m *StatefulSetUpdateStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatefulSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatefulSetUpdateStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatefulSetUpdateStrategy.Merge(m, src) -} -func (m *StatefulSetUpdateStrategy) XXX_Size() int { - return m.Size() -} -func (m *StatefulSetUpdateStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_StatefulSetUpdateStrategy.DiscardUnknown(m) + return fileDescriptorGenerated, []int{30} } -var xxx_messageInfo_StatefulSetUpdateStrategy proto.InternalMessageInfo - func init() { proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") @@ -943,7 +242,6 @@ func init() { proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus.SelectorEntry") proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") @@ -951,155 +249,10 @@ func init() { proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptor_42fe616264472f7e) -} - -var fileDescriptor_42fe616264472f7e = []byte{ - // 2171 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x1c, 0xb7, - 0xf9, 0xd6, 0xec, 0x87, 0xb4, 0xa2, 0x2c, 0xc9, 0xa6, 0xf4, 0x93, 0x36, 0xf2, 0xaf, 0x2b, 0x63, - 0x13, 0x38, 0x4a, 0x6c, 0xcd, 0xda, 0xca, 0x07, 0x12, 0xbb, 0x68, 0xab, 0x95, 0x52, 0xdb, 0x81, - 0xbe, 0x42, 0x59, 0x06, 0x1a, 0xb4, 0xa8, 0xa9, 0x5d, 0x7a, 0x35, 0xd1, 0x7c, 0x61, 0x86, 0xb3, - 0xf5, 0xa2, 0x97, 0x9e, 0x0a, 0x14, 0x28, 0xd0, 0xf6, 0xda, 0x7f, 0xa2, 0xb7, 0xa2, 0x68, 0x6f, - 0x45, 0x50, 0xf8, 0x52, 0x20, 0xe8, 0x25, 0x39, 0x09, 0xf5, 0xe6, 0x54, 0x14, 0xbd, 0x14, 0xe8, - 0x25, 0x40, 0x81, 0x82, 0x1c, 0xce, 0x07, 0xe7, 0xc3, 0x3b, 0x52, 0x1c, 0xa5, 0x29, 0x72, 0xd3, - 0x92, 0xcf, 0xfb, 0xf0, 0x7d, 0xc9, 0x97, 0x7c, 0x1f, 0x72, 0x04, 0xbe, 0x73, 0xfc, 0x96, 0xab, - 0x6a, 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, - 0x25, 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0xcd, 0x43, 0x42, 0xf1, 0x5a, 0xab, - 0x47, 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, - 0xa6, 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, - 0x3d, 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc4, 0x7f, 0xf1, 0x1f, 0xfc, 0x2f, 0x9f, 0x67, - 0xa9, 0x19, 0x1b, 0xb0, 0x63, 0x39, 0xa4, 0xd5, 0xbf, 0x99, 0x1c, 0x6b, 0xe9, 0xf5, 0x08, 0x63, - 0xe0, 0xce, 0x91, 0x66, 0x12, 0x67, 0xd0, 0xb2, 0x8f, 0x7b, 0xac, 0xc1, 0x6d, 0x19, 0x84, 0xe2, - 0x2c, 0xab, 0x56, 0x9e, 0x95, 0xe3, 0x99, 0x54, 0x33, 0x48, 0xca, 0xe0, 0xcd, 0x51, 0x06, 0x6e, - 0xe7, 0x88, 0x18, 0x38, 0x65, 0xf7, 0x5a, 0x9e, 0x9d, 0x47, 0x35, 0xbd, 0xa5, 0x99, 0xd4, 0xa5, - 0x4e, 0xd2, 0xa8, 0xf9, 0x2f, 0x05, 0xc0, 0x0d, 0xcb, 0xa4, 0x8e, 0xa5, 0xeb, 0xc4, 0x41, 0xa4, - 0xaf, 0xb9, 0x9a, 0x65, 0xc2, 0x87, 0xa0, 0xc6, 0xe2, 0xe9, 0x62, 0x8a, 0xeb, 0xca, 0x15, 0x65, - 0x65, 0x6a, 0xed, 0x86, 0x1a, 0xcd, 0x74, 0x48, 0xaf, 0xda, 0xc7, 0x3d, 0xd6, 0xe0, 0xaa, 0x0c, - 0xad, 0xf6, 0x6f, 0xaa, 0xbb, 0x87, 0x1f, 0x90, 0x0e, 0xdd, 0x26, 0x14, 0xb7, 0xe1, 0x93, 0x93, - 0xe5, 0xb1, 0xe1, 0xc9, 0x32, 0x88, 0xda, 0x50, 0xc8, 0x0a, 0x77, 0x41, 0x85, 0xb3, 0x97, 0x38, - 0xfb, 0x6a, 0x2e, 0xbb, 0x08, 0x5a, 0x45, 0xf8, 0x47, 0xef, 0x3c, 0xa6, 0xc4, 0x64, 0xee, 0xb5, - 0x2f, 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x1d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, - 0x7c, 0x45, 0x59, 0x29, 0xb7, 0x2f, 0x0a, 0x54, 0x2d, 0x08, 0x0b, 0x85, 0x88, 0xe6, 0x13, 0x05, - 0x2c, 0xa4, 0xe3, 0xde, 0xd2, 0x5c, 0x0a, 0xbf, 0x9f, 0x8a, 0x5d, 0x2d, 0x16, 0x3b, 0xb3, 0xe6, - 0x91, 0x87, 0x03, 0x07, 0x2d, 0xb1, 0xb8, 0xf7, 0x40, 0x55, 0xa3, 0xc4, 0x70, 0xeb, 0xa5, 0x2b, - 0xe5, 0x95, 0xa9, 0xb5, 0x6b, 0x6a, 0x4e, 0x02, 0xab, 0x69, 0xef, 0xda, 0xd3, 0x82, 0xb7, 0x7a, - 0x8f, 0x31, 0x20, 0x9f, 0xa8, 0xf9, 0xb3, 0x12, 0x98, 0xdc, 0xc4, 0xc4, 0xb0, 0xcc, 0x7d, 0x42, - 0xcf, 0x61, 0xe5, 0xee, 0x82, 0x8a, 0x6b, 0x93, 0x8e, 0x58, 0xb9, 0xab, 0xb9, 0x01, 0x84, 0x3e, - 0xed, 0xdb, 0xa4, 0x13, 0x2d, 0x19, 0xfb, 0x85, 0x38, 0x03, 0xdc, 0x03, 0xe3, 0x2e, 0xc5, 0xd4, - 0x73, 0xf9, 0x82, 0x4d, 0xad, 0xad, 0x14, 0xe0, 0xe2, 0xf8, 0xf6, 0x8c, 0x60, 0x1b, 0xf7, 0x7f, - 0x23, 0xc1, 0xd3, 0xfc, 0x5b, 0x09, 0xc0, 0x10, 0xbb, 0x61, 0x99, 0x5d, 0x8d, 0xb2, 0x74, 0xbe, - 0x05, 0x2a, 0x74, 0x60, 0x13, 0x3e, 0x21, 0x93, 0xed, 0xab, 0x81, 0x2b, 0xf7, 0x07, 0x36, 0xf9, - 0xec, 0x64, 0x79, 0x21, 0x6d, 0xc1, 0x7a, 0x10, 0xb7, 0x81, 0x5b, 0xa1, 0x93, 0x25, 0x6e, 0xfd, - 0xba, 0x3c, 0xf4, 0x67, 0x27, 0xcb, 0x19, 0x67, 0x87, 0x1a, 0x32, 0xc9, 0x0e, 0xc2, 0x3e, 0x80, - 0x3a, 0x76, 0xe9, 0x7d, 0x07, 0x9b, 0xae, 0x3f, 0x92, 0x66, 0x10, 0x11, 0xfe, 0xab, 0xc5, 0x16, - 0x8a, 0x59, 0xb4, 0x97, 0x84, 0x17, 0x70, 0x2b, 0xc5, 0x86, 0x32, 0x46, 0x80, 0x57, 0xc1, 0xb8, - 0x43, 0xb0, 0x6b, 0x99, 0xf5, 0x0a, 0x8f, 0x22, 0x9c, 0x40, 0xc4, 0x5b, 0x91, 0xe8, 0x85, 0xaf, - 0x80, 0x09, 0x83, 0xb8, 0x2e, 0xee, 0x91, 0x7a, 0x95, 0x03, 0x67, 0x05, 0x70, 0x62, 0xdb, 0x6f, - 0x46, 0x41, 0x7f, 0xf3, 0xb7, 0x0a, 0x98, 0x0e, 0x67, 0xee, 0x1c, 0x76, 0xce, 0x1d, 0x79, 0xe7, - 0x34, 0x47, 0x27, 0x4b, 0xce, 0x86, 0xf9, 0xb0, 0x1c, 0x73, 0x9c, 0xa5, 0x23, 0xfc, 0x01, 0xa8, - 0xb9, 0x44, 0x27, 0x1d, 0x6a, 0x39, 0xc2, 0xf1, 0xd7, 0x0a, 0x3a, 0x8e, 0x0f, 0x89, 0xbe, 0x2f, - 0x4c, 0xdb, 0x17, 0x98, 0xe7, 0xc1, 0x2f, 0x14, 0x52, 0xc2, 0xf7, 0x40, 0x8d, 0x12, 0xc3, 0xd6, - 0x31, 0x25, 0x62, 0xd7, 0xbc, 0x18, 0x77, 0x9e, 0xe5, 0x0c, 0x23, 0xdb, 0xb3, 0xba, 0xf7, 0x05, - 0x8c, 0x6f, 0x99, 0x70, 0x32, 0x82, 0x56, 0x14, 0xd2, 0x40, 0x1b, 0xcc, 0x78, 0x76, 0x97, 0x21, - 0x29, 0x3b, 0xce, 0x7b, 0x03, 0x91, 0x43, 0x37, 0x46, 0xcf, 0xca, 0x81, 0x64, 0xd7, 0x5e, 0x10, - 0xa3, 0xcc, 0xc8, 0xed, 0x28, 0xc1, 0x0f, 0xd7, 0xc1, 0xac, 0xa1, 0x99, 0x88, 0xe0, 0xee, 0x60, - 0x9f, 0x74, 0x2c, 0xb3, 0xeb, 0xf2, 0x54, 0xaa, 0xb6, 0x17, 0x05, 0xc1, 0xec, 0xb6, 0xdc, 0x8d, - 0x92, 0x78, 0xb8, 0x05, 0xe6, 0x83, 0x03, 0xf8, 0xae, 0xe6, 0x52, 0xcb, 0x19, 0x6c, 0x69, 0x86, - 0x46, 0xeb, 0xe3, 0x9c, 0xa7, 0x3e, 0x3c, 0x59, 0x9e, 0x47, 0x19, 0xfd, 0x28, 0xd3, 0xaa, 0xf9, - 0xab, 0x71, 0x30, 0x9b, 0x38, 0x17, 0xe0, 0x03, 0xb0, 0xd0, 0xf1, 0x1c, 0x87, 0x98, 0x74, 0xc7, - 0x33, 0x0e, 0x89, 0xb3, 0xdf, 0x39, 0x22, 0x5d, 0x4f, 0x27, 0x5d, 0xbe, 0xac, 0xd5, 0x76, 0x43, - 0xf8, 0xba, 0xb0, 0x91, 0x89, 0x42, 0x39, 0xd6, 0xf0, 0x5d, 0x00, 0x4d, 0xde, 0xb4, 0xad, 0xb9, - 0x6e, 0xc8, 0x59, 0xe2, 0x9c, 0xe1, 0x56, 0xdc, 0x49, 0x21, 0x50, 0x86, 0x15, 0xf3, 0xb1, 0x4b, - 0x5c, 0xcd, 0x21, 0xdd, 0xa4, 0x8f, 0x65, 0xd9, 0xc7, 0xcd, 0x4c, 0x14, 0xca, 0xb1, 0x86, 0x6f, - 0x80, 0x29, 0x7f, 0x34, 0x3e, 0xe7, 0x62, 0x71, 0xe6, 0x04, 0xd9, 0xd4, 0x4e, 0xd4, 0x85, 0xe2, - 0x38, 0x16, 0x9a, 0x75, 0xe8, 0x12, 0xa7, 0x4f, 0xba, 0x77, 0x7c, 0x71, 0xc0, 0x2a, 0x68, 0x95, - 0x57, 0xd0, 0x30, 0xb4, 0xdd, 0x14, 0x02, 0x65, 0x58, 0xb1, 0xd0, 0xfc, 0xac, 0x49, 0x85, 0x36, - 0x2e, 0x87, 0x76, 0x90, 0x89, 0x42, 0x39, 0xd6, 0x2c, 0xf7, 0x7c, 0x97, 0xd7, 0xfb, 0x58, 0xd3, - 0xf1, 0xa1, 0x4e, 0xea, 0x13, 0x72, 0xee, 0xed, 0xc8, 0xdd, 0x28, 0x89, 0x87, 0x77, 0xc0, 0x25, - 0xbf, 0xe9, 0xc0, 0xc4, 0x21, 0x49, 0x8d, 0x93, 0xbc, 0x20, 0x48, 0x2e, 0xed, 0x24, 0x01, 0x28, - 0x6d, 0x03, 0x6f, 0x81, 0x99, 0x8e, 0xa5, 0xeb, 0x3c, 0x1f, 0x37, 0x2c, 0xcf, 0xa4, 0xf5, 0x49, - 0xce, 0x02, 0xd9, 0x1e, 0xda, 0x90, 0x7a, 0x50, 0x02, 0x09, 0x7f, 0x08, 0x40, 0x27, 0x28, 0x0c, - 0x6e, 0x1d, 0x8c, 0x50, 0x00, 0xe9, 0xb2, 0x14, 0x55, 0xe6, 0xb0, 0xc9, 0x45, 0x31, 0xca, 0xe6, - 0x87, 0x0a, 0x58, 0xcc, 0xd9, 0xe8, 0xf0, 0xdb, 0x52, 0x11, 0xbc, 0x96, 0x28, 0x82, 0x97, 0x73, - 0xcc, 0x62, 0x95, 0xf0, 0x08, 0x4c, 0x33, 0x41, 0xa2, 0x99, 0x3d, 0x1f, 0x22, 0xce, 0xb2, 0x56, - 0x6e, 0x00, 0x28, 0x8e, 0x8e, 0x4e, 0xe5, 0x4b, 0xc3, 0x93, 0xe5, 0x69, 0xa9, 0x0f, 0xc9, 0xc4, - 0xcd, 0x9f, 0x97, 0x00, 0xd8, 0x24, 0xb6, 0x6e, 0x0d, 0x0c, 0x62, 0x9e, 0x87, 0xa6, 0xb9, 0x27, - 0x69, 0x9a, 0x97, 0xf3, 0x97, 0x24, 0x74, 0x2a, 0x57, 0xd4, 0xbc, 0x97, 0x10, 0x35, 0xaf, 0x14, - 0x21, 0x7b, 0xb6, 0xaa, 0xf9, 0xb8, 0x0c, 0xe6, 0x22, 0x70, 0x24, 0x6b, 0x6e, 0x4b, 0x2b, 0xfa, - 0x72, 0x62, 0x45, 0x17, 0x33, 0x4c, 0xbe, 0x30, 0x5d, 0xf3, 0x01, 0x98, 0x61, 0xaa, 0xc3, 0x5f, - 0x3f, 0xae, 0x69, 0xc6, 0x4f, 0xad, 0x69, 0xc2, 0x4a, 0xb4, 0x25, 0x31, 0xa1, 0x04, 0x73, 0x8e, - 0x86, 0x9a, 0xf8, 0x2a, 0x6a, 0xa8, 0xdf, 0x29, 0x60, 0x26, 0x5a, 0xa6, 0x73, 0x10, 0x51, 0x77, - 0x65, 0x11, 0xf5, 0x62, 0x81, 0xe4, 0xcc, 0x51, 0x51, 0x1f, 0x57, 0xe2, 0xae, 0x73, 0x19, 0xb5, - 0xc2, 0xae, 0x60, 0xb6, 0xae, 0x75, 0xb0, 0x2b, 0xea, 0xed, 0x05, 0xff, 0xfa, 0xe5, 0xb7, 0xa1, - 0xb0, 0x57, 0x12, 0x5c, 0xa5, 0x2f, 0x56, 0x70, 0x95, 0x9f, 0x8f, 0xe0, 0xfa, 0x1e, 0xa8, 0xb9, - 0x81, 0xd4, 0xaa, 0x70, 0xca, 0x6b, 0x85, 0x36, 0xb6, 0x50, 0x59, 0x21, 0x75, 0xa8, 0xaf, 0x42, - 0xba, 0x2c, 0x65, 0x55, 0xfd, 0x32, 0x95, 0x15, 0x4b, 0x74, 0x1b, 0x7b, 0x2e, 0xe9, 0xf2, 0x4d, - 0x55, 0x8b, 0x12, 0x7d, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, 0xed, 0x58, 0x3d, 0x87, - 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x10, 0x80, 0x5f, 0x13, 0x2f, 0x0f, 0x4f, 0x96, - 0x17, 0xf7, 0xb2, 0x21, 0x28, 0xcf, 0xb6, 0xf9, 0xc7, 0x0a, 0xb8, 0x98, 0x3c, 0x1b, 0x73, 0x64, - 0x8a, 0x72, 0x26, 0x99, 0x72, 0x3d, 0x96, 0xa7, 0xbe, 0x86, 0x8b, 0x3d, 0x15, 0xa4, 0x72, 0x75, - 0x1d, 0xcc, 0x0a, 0x59, 0x12, 0x74, 0x0a, 0xa1, 0x16, 0x2e, 0xcf, 0x81, 0xdc, 0x8d, 0x92, 0x78, - 0x78, 0x1b, 0x4c, 0x3b, 0x5c, 0x79, 0x05, 0x04, 0xbe, 0x7a, 0xf9, 0x3f, 0x41, 0x30, 0x8d, 0xe2, - 0x9d, 0x48, 0xc6, 0x32, 0xe5, 0x12, 0x09, 0x92, 0x80, 0xa0, 0x22, 0x2b, 0x97, 0xf5, 0x24, 0x00, - 0xa5, 0x6d, 0xe0, 0x36, 0x98, 0xf3, 0xcc, 0x34, 0x95, 0x9f, 0x6b, 0x97, 0x05, 0xd5, 0xdc, 0x41, - 0x1a, 0x82, 0xb2, 0xec, 0xe0, 0x43, 0x49, 0xcc, 0x8c, 0xf3, 0xf3, 0xe4, 0x7a, 0x81, 0x3d, 0x51, - 0x58, 0xcd, 0x64, 0x48, 0xad, 0x5a, 0x51, 0xa9, 0xd5, 0xfc, 0x83, 0x02, 0x60, 0x7a, 0x1f, 0x8e, - 0x7c, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0x6e, 0x14, 0xd4, 0x3f, 0xd1, 0x81, - 0x5a, 0x4c, 0x00, 0x89, 0x89, 0x3e, 0x9f, 0x47, 0x9d, 0xa2, 0x02, 0x28, 0x72, 0xea, 0x39, 0x08, - 0xa0, 0x18, 0xd9, 0xb3, 0x05, 0xd0, 0xdf, 0x4b, 0x60, 0x2e, 0x02, 0x17, 0x16, 0x40, 0x19, 0x26, - 0x5f, 0x3f, 0xec, 0x14, 0x13, 0x25, 0xd1, 0xd4, 0xfd, 0x37, 0x89, 0x92, 0xc8, 0xab, 0x1c, 0x51, - 0xf2, 0x9b, 0x52, 0xdc, 0xf5, 0x53, 0x8a, 0x92, 0xe7, 0xf0, 0xc2, 0xf1, 0x95, 0xd3, 0x35, 0xcd, - 0x3f, 0x95, 0xc1, 0xc5, 0xe4, 0x3e, 0x94, 0x0a, 0xa4, 0x32, 0xb2, 0x40, 0xee, 0x81, 0xf9, 0x47, - 0x9e, 0xae, 0x0f, 0x78, 0x0c, 0xb1, 0x2a, 0xe9, 0x97, 0xd6, 0xff, 0x17, 0x96, 0xf3, 0xdf, 0xcd, - 0xc0, 0xa0, 0x4c, 0xcb, 0x74, 0xbd, 0xac, 0x7c, 0xde, 0x7a, 0x59, 0x3d, 0x43, 0xbd, 0xcc, 0x96, - 0x1c, 0xe5, 0x33, 0x49, 0x8e, 0xd3, 0x15, 0xcb, 0x8c, 0x83, 0x6b, 0xe4, 0xd5, 0xff, 0xa7, 0x0a, - 0x58, 0xc8, 0xbe, 0x70, 0x43, 0x1d, 0xcc, 0x18, 0xf8, 0x71, 0xfc, 0xe1, 0x63, 0x54, 0x11, 0xf1, - 0xa8, 0xa6, 0xab, 0xfe, 0x27, 0x23, 0xf5, 0x9e, 0x49, 0x77, 0x9d, 0x7d, 0xea, 0x68, 0x66, 0xcf, - 0xaf, 0xbc, 0xdb, 0x12, 0x17, 0x4a, 0x70, 0x37, 0x3f, 0x55, 0xc0, 0x62, 0x4e, 0xe5, 0x3b, 0x5f, - 0x4f, 0xe0, 0xfb, 0xa0, 0x66, 0xe0, 0xc7, 0xfb, 0x9e, 0xd3, 0xcb, 0xaa, 0xd5, 0xc5, 0xc6, 0xe1, - 0x5b, 0x71, 0x5b, 0xb0, 0xa0, 0x90, 0xaf, 0xb9, 0x0b, 0xae, 0x48, 0x41, 0xb2, 0x9d, 0x43, 0x1e, - 0x79, 0x3a, 0xdf, 0x44, 0x42, 0x6c, 0x5c, 0x03, 0x93, 0x36, 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, - 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, 0xbf, 0xf9, 0x6f, 0x05, 0x54, 0xf7, - 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, 0xff, 0x92, 0xce, 0xfd, 0xc9, 0x2d, - 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xd2, 0x08, 0x9e, 0x67, 0xd7, 0xf8, 0xb7, 0xc1, 0x64, 0x38, 0xdc, - 0xe9, 0x0e, 0xa0, 0xe6, 0xaf, 0x4b, 0x60, 0x2a, 0x36, 0xc4, 0x29, 0x8f, 0xaf, 0x87, 0xd2, 0x99, - 0xcd, 0x36, 0xe6, 0x5a, 0x91, 0x40, 0xd4, 0xe0, 0x7c, 0x7e, 0xc7, 0xa4, 0x4e, 0xfc, 0x82, 0x97, - 0x3e, 0xb6, 0xbf, 0x05, 0x66, 0x28, 0x76, 0x7a, 0x84, 0x06, 0x7d, 0x7c, 0xc2, 0x26, 0xa3, 0x07, - 0x8f, 0xfb, 0x52, 0x2f, 0x4a, 0xa0, 0x97, 0x6e, 0x83, 0x69, 0x69, 0x30, 0x78, 0x11, 0x94, 0x8f, - 0xc9, 0xc0, 0x97, 0x3d, 0x88, 0xfd, 0x09, 0xe7, 0x41, 0xb5, 0x8f, 0x75, 0xcf, 0xcf, 0xf3, 0x49, - 0xe4, 0xff, 0xb8, 0x55, 0x7a, 0x4b, 0x69, 0xfe, 0x82, 0x4d, 0x4e, 0x94, 0x9c, 0xe7, 0x90, 0x5d, - 0xef, 0x4a, 0xd9, 0x95, 0xff, 0x51, 0x2f, 0xbe, 0x65, 0xf2, 0x72, 0x0c, 0x25, 0x72, 0xec, 0xd5, - 0x42, 0x6c, 0xcf, 0xce, 0xb4, 0x7f, 0x94, 0xc0, 0x7c, 0x0c, 0x1d, 0xc9, 0xc9, 0x6f, 0x4a, 0x72, - 0x72, 0x25, 0x21, 0x27, 0xeb, 0x59, 0x36, 0x5f, 0xeb, 0xc9, 0xd1, 0x7a, 0xf2, 0xf7, 0x0a, 0x98, - 0x8d, 0xcd, 0xdd, 0x39, 0x08, 0xca, 0x7b, 0xb2, 0xa0, 0x7c, 0xa9, 0x48, 0xd2, 0xe4, 0x28, 0xca, - 0x3f, 0x57, 0x25, 0xe7, 0xff, 0xe7, 0xdf, 0xb9, 0x7e, 0x0c, 0xe6, 0xfb, 0x96, 0xee, 0x19, 0x64, - 0x43, 0xc7, 0x9a, 0x11, 0x00, 0x98, 0x02, 0x2b, 0x27, 0xef, 0x72, 0x21, 0x3d, 0x71, 0x5c, 0xcd, - 0xa5, 0xc4, 0xa4, 0x0f, 0x22, 0xcb, 0x48, 0xf7, 0x3d, 0xc8, 0xa0, 0x43, 0x99, 0x83, 0xc0, 0x37, - 0xc0, 0x14, 0x53, 0x4e, 0x5a, 0x87, 0xec, 0x60, 0x23, 0x48, 0xac, 0xf0, 0x13, 0xd6, 0x7e, 0xd4, - 0x85, 0xe2, 0x38, 0x78, 0x04, 0xe6, 0x6c, 0xab, 0xbb, 0x8d, 0x4d, 0xdc, 0x23, 0x4c, 0x66, 0xec, - 0x59, 0xba, 0xd6, 0x19, 0xf0, 0xc7, 0xaf, 0xc9, 0xf6, 0x9b, 0xc1, 0xc3, 0xc6, 0x5e, 0x1a, 0xc2, - 0x2e, 0x89, 0x19, 0xcd, 0x7c, 0x53, 0x67, 0x51, 0x42, 0x27, 0xf5, 0xd9, 0xd5, 0x7f, 0x76, 0x5e, - 0x2b, 0x92, 0x61, 0x67, 0xfc, 0xf0, 0x9a, 0xf7, 0xb6, 0x57, 0x3b, 0xd3, 0x57, 0xd3, 0x7f, 0x56, - 0xc0, 0xa5, 0xd4, 0x51, 0xf9, 0x25, 0xbe, 0xae, 0xa5, 0xa4, 0x7e, 0xf9, 0x14, 0x52, 0x7f, 0x1d, - 0xcc, 0x8a, 0x0f, 0xb6, 0x89, 0x9b, 0x42, 0x78, 0x63, 0xdb, 0x90, 0xbb, 0x51, 0x12, 0x9f, 0xf5, - 0xba, 0x57, 0x3d, 0xe5, 0xeb, 0x5e, 0xdc, 0x0b, 0xf1, 0x0f, 0x48, 0x7e, 0xea, 0xa5, 0xbd, 0x10, - 0xff, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, 0xb2, 0x42, 0x38, 0x90, 0x7a, - 0x51, 0x02, 0xfd, 0xb9, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, 0x45, 0xf2, 0xb9, 0xf8, 0xdd, - 0xe4, 0x2f, 0x0a, 0x78, 0x21, 0x77, 0x23, 0xc0, 0x75, 0xa9, 0xec, 0xae, 0x26, 0xca, 0xee, 0x37, - 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0x7e, 0x9a, 0x7b, 0xbb, 0xd8, 0xd3, 0x5c, 0x86, 0x76, 0x1f, - 0xfd, 0x46, 0xd7, 0x5e, 0x7d, 0xf2, 0xb4, 0x31, 0xf6, 0xd1, 0xd3, 0xc6, 0xd8, 0x27, 0x4f, 0x1b, - 0x63, 0x3f, 0x19, 0x36, 0x94, 0x27, 0xc3, 0x86, 0xf2, 0xd1, 0xb0, 0xa1, 0x7c, 0x32, 0x6c, 0x28, - 0x7f, 0x1d, 0x36, 0x94, 0x5f, 0x7e, 0xda, 0x18, 0x7b, 0x7f, 0x42, 0x8c, 0xf8, 0x9f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x70, 0x2d, 0x26, 0x9d, 0xec, 0x28, 0x00, 0x00, -} - func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1107,45 +260,36 @@ func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ControllerRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x18 - { - size, err := m.Data.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) + n2, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil } func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1153,46 +297,37 @@ func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { } func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ControllerRevisionList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1200,52 +335,41 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n4 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1253,52 +377,41 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1306,46 +419,37 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n8, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n8 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1353,62 +457,51 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x30 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n9, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n9 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n10, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n10 dAtA[i] = 0x1a - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + i += n11 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1416,65 +509,58 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x48 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - i-- - dAtA[i] = 0x40 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - i-- - dAtA[i] = 0x38 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1482,39 +568,31 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1522,52 +600,41 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n13 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n14, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n14 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n15, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + return i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1575,62 +642,49 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n16 dAtA[i] = 0x3a - { - size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n17 + return i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1638,46 +692,37 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1685,80 +730,69 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ProgressDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - i-- - dAtA[i] = 0x48 - } - i-- - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x30 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x28 - { - size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n19 } - i-- + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 dAtA[i] = 0x22 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) } - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1766,59 +800,52 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x40 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x38 + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1826,39 +853,31 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1866,52 +885,41 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n23 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n24, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n24 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n25, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + return i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1919,52 +927,41 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1972,46 +969,37 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n27, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2019,52 +1007,43 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n28 } - i-- dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n29, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n29 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2072,51 +1051,44 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2124,34 +1096,27 @@ func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 } - return len(dAtA) - i, nil + return i, nil } func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2159,46 +1124,37 @@ func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n31 } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n32 } - return len(dAtA) - i, nil + return i, nil } func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2206,27 +1162,22 @@ func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { } func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateStatefulSetStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.Partition != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) - i-- dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) } - return len(dAtA) - i, nil + return i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2234,52 +1185,41 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n33 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n34, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n34 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n35, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2287,25 +1227,20 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2313,54 +1248,46 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForSelector { dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i += copy(dAtA[i:], m.TargetSelector) + return i, nil } func (m *StatefulSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2368,52 +1295,41 @@ func (m *StatefulSet) Marshal() (dAtA []byte, err error) { } func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n36 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n37 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n38, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil } func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2421,52 +1337,41 @@ func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n39 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2474,46 +1379,37 @@ func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n40, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2521,88 +1417,73 @@ func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x40 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - { - size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n41, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n41 } - i-- - dAtA[i] = 0x3a - i -= len(m.PodManagementPolicy) - copy(dAtA[i:], m.PodManagementPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i-- - dAtA[i] = 0x32 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n42, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 if len(m.VolumeClaimTemplates) > 0 { - for iNdEx := len(m.VolumeClaimTemplates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeClaimTemplates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.VolumeClaimTemplates { dAtA[i] = 0x22 - } - } - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) + i += copy(dAtA[i:], m.PodManagementPolicy) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2610,66 +1491,57 @@ func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) + i += copy(dAtA[i:], m.CurrentRevision) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) + i += copy(dAtA[i:], m.UpdateRevision) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x48 - } - i -= len(m.UpdateRevision) - copy(dAtA[i:], m.UpdateRevision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i-- - dAtA[i] = 0x3a - i -= len(m.CurrentRevision) - copy(dAtA[i:], m.CurrentRevision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2677,50 +1549,55 @@ func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatefulSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *ControllerRevision) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2732,9 +1609,6 @@ func (m *ControllerRevision) Size() (n int) { } func (m *ControllerRevisionList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2749,9 +1623,6 @@ func (m *ControllerRevisionList) Size() (n int) { } func (m *DaemonSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2764,9 +1635,6 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2783,9 +1651,6 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2800,9 +1665,6 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Selector != nil { @@ -2821,9 +1683,6 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -2847,9 +1706,6 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2862,9 +1718,6 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2877,9 +1730,6 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2898,9 +1748,6 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2915,9 +1762,6 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -2943,9 +1787,6 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -2967,9 +1808,6 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2982,9 +1820,6 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2997,9 +1832,6 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -3016,9 +1848,6 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -3033,9 +1862,6 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -3052,9 +1878,6 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3072,9 +1895,6 @@ func (m *ReplicaSetStatus) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -3085,9 +1905,6 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -3102,9 +1919,6 @@ func (m *RollingUpdateDeployment) Size() (n int) { } func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Partition != nil { @@ -3114,9 +1928,6 @@ func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { } func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -3129,9 +1940,6 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3139,9 +1947,6 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -3159,9 +1964,6 @@ func (m *ScaleStatus) Size() (n int) { } func (m *StatefulSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -3174,9 +1976,6 @@ func (m *StatefulSet) Size() (n int) { } func (m *StatefulSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -3193,9 +1992,6 @@ func (m *StatefulSetCondition) Size() (n int) { } func (m *StatefulSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -3210,9 +2006,6 @@ func (m *StatefulSetList) Size() (n int) { } func (m *StatefulSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -3243,9 +2036,6 @@ func (m *StatefulSetSpec) Size() (n int) { } func (m *StatefulSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -3270,9 +2060,6 @@ func (m *StatefulSetStatus) Size() (n int) { } func (m *StatefulSetUpdateStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -3285,7 +2072,14 @@ func (m *StatefulSetUpdateStrategy) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -3295,8 +2089,8 @@ func (this *ControllerRevision) String() string { return "nil" } s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Data), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") @@ -3306,14 +2100,9 @@ func (this *ControllerRevisionList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ControllerRevision{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3323,7 +2112,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3337,7 +2126,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3348,14 +2137,9 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3365,8 +2149,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -3378,11 +2162,6 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -3393,7 +2172,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3404,7 +2183,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -3414,7 +2193,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3430,8 +2209,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3440,14 +2219,9 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Deployment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3458,8 +2232,8 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, @@ -3473,18 +2247,13 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -3497,7 +2266,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -3507,7 +2276,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3521,7 +2290,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3532,14 +2301,9 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3550,8 +2314,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -3561,18 +2325,13 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3582,7 +2341,7 @@ func (this *RollingUpdateDaemonSet) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -3592,8 +2351,8 @@ func (this *RollingUpdateDeployment) String() string { return "nil" } s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -3613,7 +2372,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3657,7 +2416,7 @@ func (this *StatefulSet) String() string { return "nil" } s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -3671,7 +2430,7 @@ func (this *StatefulSetCondition) String() string { s := strings.Join([]string{`&StatefulSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -3682,14 +2441,9 @@ func (this *StatefulSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]StatefulSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3698,16 +2452,11 @@ func (this *StatefulSetSpec) String() string { if this == nil { return "nil" } - repeatedStringForVolumeClaimTemplates := "[]PersistentVolumeClaim{" - for _, f := range this.VolumeClaimTemplates { - repeatedStringForVolumeClaimTemplates += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeClaimTemplates += "}" s := strings.Join([]string{`&StatefulSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + repeatedStringForVolumeClaimTemplates + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, @@ -3720,11 +2469,6 @@ func (this *StatefulSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]StatefulSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&StatefulSetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, @@ -3734,7 +2478,7 @@ func (this *StatefulSetStatus) String() string { `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -3745,7 +2489,7 @@ func (this *StatefulSetUpdateStrategy) String() string { } s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, `}`, }, "") return s @@ -3773,7 +2517,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3801,7 +2545,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3810,9 +2554,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3834,7 +2575,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3843,9 +2584,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3867,7 +2605,7 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Revision |= int64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3881,9 +2619,6 @@ func (m *ControllerRevision) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3911,7 +2646,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3939,7 +2674,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3948,9 +2683,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3972,7 +2704,7 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3981,9 +2713,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4001,9 +2730,6 @@ func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4031,7 +2757,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4059,7 +2785,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4068,9 +2794,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4092,7 +2815,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4101,9 +2824,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4125,7 +2845,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4134,9 +2854,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4153,9 +2870,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4183,7 +2897,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4211,7 +2925,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4221,9 +2935,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4243,7 +2954,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4253,9 +2964,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4275,7 +2983,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4284,9 +2992,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4308,7 +3013,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4318,9 +3023,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4340,7 +3042,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4350,9 +3052,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4367,9 +3066,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4397,7 +3093,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4425,7 +3121,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4434,9 +3130,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4458,7 +3151,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4467,9 +3160,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4487,9 +3177,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4517,7 +3204,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4545,7 +3232,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4554,14 +3241,11 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4581,7 +3265,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4590,9 +3274,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4614,7 +3295,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4623,9 +3304,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4647,7 +3325,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4666,7 +3344,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4681,9 +3359,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4711,7 +3386,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4739,7 +3414,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4758,7 +3433,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4777,7 +3452,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4796,7 +3471,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift + m.NumberReady |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4815,7 +3490,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4834,7 +3509,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4853,7 +3528,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift + m.NumberAvailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4872,7 +3547,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift + m.NumberUnavailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4891,7 +3566,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4911,7 +3586,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4920,9 +3595,6 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4940,9 +3612,6 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4970,7 +3639,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4998,7 +3667,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5008,9 +3677,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5030,7 +3696,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5039,9 +3705,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5061,9 +3724,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5091,7 +3751,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5119,7 +3779,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5128,9 +3788,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5152,7 +3809,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5161,9 +3818,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5185,7 +3839,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5194,9 +3848,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5213,9 +3864,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5243,7 +3891,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5271,7 +3919,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5281,9 +3929,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5303,7 +3948,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5313,9 +3958,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5335,7 +3977,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5345,9 +3987,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5367,7 +4006,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5377,9 +4016,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5399,7 +4035,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5408,9 +4044,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5432,7 +4065,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5441,9 +4074,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5460,9 +4090,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5490,7 +4117,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5518,7 +4145,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5527,9 +4154,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5551,7 +4175,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5560,9 +4184,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5580,9 +4201,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5610,7 +4228,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5638,7 +4256,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5658,7 +4276,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5667,14 +4285,11 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5694,7 +4309,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5703,9 +4318,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5727,7 +4339,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5736,9 +4348,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5760,7 +4369,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5779,7 +4388,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5799,7 +4408,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5819,7 +4428,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5834,9 +4443,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5864,7 +4470,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5892,7 +4498,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5911,7 +4517,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5930,7 +4536,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5949,7 +4555,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5968,7 +4574,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5987,7 +4593,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5996,9 +4602,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6021,7 +4624,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6040,7 +4643,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6055,9 +4658,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6085,7 +4685,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6113,7 +4713,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6123,9 +4723,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6145,7 +4742,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6154,9 +4751,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6176,9 +4770,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6206,7 +4797,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6234,7 +4825,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6243,9 +4834,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6267,7 +4855,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6276,9 +4864,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6300,7 +4885,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6309,9 +4894,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6328,9 +4910,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6358,7 +4937,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6386,7 +4965,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6396,9 +4975,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6418,7 +4994,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6428,9 +5004,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6450,7 +5023,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6459,9 +5032,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6483,7 +5053,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6493,9 +5063,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6515,7 +5082,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6525,9 +5092,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6542,9 +5106,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6572,7 +5133,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6600,7 +5161,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6609,9 +5170,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6633,7 +5191,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6642,9 +5200,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6662,9 +5217,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6692,7 +5244,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6720,7 +5272,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6740,7 +5292,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6749,14 +5301,11 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -6776,7 +5325,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6785,9 +5334,6 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6809,7 +5355,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6823,9 +5369,6 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6853,7 +5396,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6881,7 +5424,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6900,7 +5443,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6919,7 +5462,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6938,7 +5481,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6957,7 +5500,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -6976,7 +5519,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6985,9 +5528,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7005,9 +5545,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7035,7 +5572,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7063,7 +5600,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7072,14 +5609,11 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7094,9 +5628,6 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7124,7 +5655,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7152,7 +5683,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7161,14 +5692,11 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7188,7 +5716,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7197,14 +5725,11 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7219,9 +5744,6 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7249,7 +5771,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7277,7 +5799,7 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7292,9 +5814,6 @@ func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7322,7 +5841,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7350,7 +5869,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7359,9 +5878,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7383,7 +5899,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7392,9 +5908,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7416,7 +5929,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7425,9 +5938,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7444,9 +5954,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7474,7 +5981,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7502,7 +6009,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7516,9 +6023,6 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7546,7 +6050,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7574,7 +6078,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7593,7 +6097,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7602,20 +6106,54 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -7625,86 +6163,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -7720,7 +6213,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7730,9 +6223,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7747,9 +6237,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7777,7 +6264,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7805,7 +6292,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7814,9 +6301,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7838,7 +6322,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7847,9 +6331,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7871,7 +6352,7 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7880,9 +6361,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7899,9 +6377,6 @@ func (m *StatefulSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7929,7 +6404,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7957,7 +6432,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7967,9 +6442,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7989,7 +6461,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7999,9 +6471,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8021,7 +6490,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8030,9 +6499,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8054,7 +6520,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8064,9 +6530,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8086,7 +6549,7 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8096,9 +6559,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8113,9 +6573,6 @@ func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8143,7 +6600,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8171,7 +6628,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8180,9 +6637,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8204,7 +6658,7 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8213,9 +6667,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8233,9 +6684,6 @@ func (m *StatefulSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8263,7 +6711,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8291,7 +6739,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8311,7 +6759,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8320,14 +6768,11 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8347,7 +6792,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8356,9 +6801,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8380,7 +6822,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8389,13 +6831,10 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, v11.PersistentVolumeClaim{}) + m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -8414,7 +6853,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8424,9 +6863,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8446,7 +6882,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8456,9 +6892,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8478,7 +6911,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8487,9 +6920,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8511,7 +6941,7 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8526,9 +6956,6 @@ func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8556,7 +6983,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8584,7 +7011,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8603,7 +7030,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8622,7 +7049,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8641,7 +7068,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= int32(b&0x7F) << shift + m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8660,7 +7087,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8679,7 +7106,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8689,9 +7116,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8711,7 +7135,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8721,9 +7145,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8743,7 +7164,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8763,7 +7184,7 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8772,9 +7193,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8792,9 +7210,6 @@ func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8822,7 +7237,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8850,7 +7265,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8860,9 +7275,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8882,7 +7294,7 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8891,9 +7303,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8913,9 +7322,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8931,7 +7337,6 @@ func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -8963,8 +7368,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -8981,34 +7388,197 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, + 0x19, 0xd7, 0xec, 0x43, 0x5a, 0x51, 0x91, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, + 0x1c, 0x25, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, + 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, + 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, + 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, + 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, + 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xfc, 0xb6, 0xab, 0x6a, + 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, + 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0x8d, 0x43, 0x42, 0xf1, 0x5a, 0xab, 0x47, + 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, + 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, + 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc8, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x35, + 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x92, 0x63, 0x2d, 0xbd, 0x11, 0x61, 0x0c, 0xdc, + 0x39, 0xd2, 0x4c, 0xe2, 0x0c, 0x5a, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2d, 0x83, 0x50, 0x9c, 0x65, + 0xd5, 0xca, 0xb3, 0x72, 0x3c, 0x93, 0x6a, 0x06, 0x49, 0x19, 0xbc, 0x35, 0xca, 0xc0, 0xed, 0x1c, + 0x11, 0x03, 0xa7, 0xec, 0x5e, 0xcf, 0xb3, 0xf3, 0xa8, 0xa6, 0xb7, 0x34, 0x93, 0xba, 0xd4, 0x49, + 0x1a, 0x35, 0xff, 0xa3, 0x00, 0xb8, 0x61, 0x99, 0xd4, 0xb1, 0x74, 0x9d, 0x38, 0x88, 0xf4, 0x35, + 0x57, 0xb3, 0x4c, 0xf8, 0x00, 0xd4, 0xd8, 0x7c, 0xba, 0x98, 0xe2, 0xba, 0x72, 0x59, 0x59, 0x99, + 0x5a, 0xbb, 0xae, 0x46, 0x2b, 0x1d, 0xd2, 0xab, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2a, 0x43, 0xab, + 0xfd, 0x1b, 0xea, 0xee, 0xe1, 0x87, 0xa4, 0x43, 0xb7, 0x09, 0xc5, 0x6d, 0xf8, 0xf8, 0x64, 0x79, + 0x6c, 0x78, 0xb2, 0x0c, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x5d, 0x50, 0xe1, 0xec, 0x25, 0xce, 0xbe, + 0x9a, 0xcb, 0x2e, 0x26, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1f, 0x51, 0x62, 0x32, 0xf7, 0xda, 0x2f, + 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x0d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, 0x7c, + 0x59, 0x59, 0x29, 0xb7, 0x2f, 0x08, 0x54, 0x2d, 0x98, 0x16, 0x0a, 0x11, 0xcd, 0xc7, 0x0a, 0x58, + 0x48, 0xcf, 0x7b, 0x4b, 0x73, 0x29, 0xfc, 0x61, 0x6a, 0xee, 0x6a, 0xb1, 0xb9, 0x33, 0x6b, 0x3e, + 0xf3, 0x70, 0xe0, 0xa0, 0x25, 0x36, 0xef, 0x3d, 0x50, 0xd5, 0x28, 0x31, 0xdc, 0x7a, 0xe9, 0x72, + 0x79, 0x65, 0x6a, 0xed, 0xaa, 0x9a, 0x13, 0xc0, 0x6a, 0xda, 0xbb, 0xf6, 0xb4, 0xe0, 0xad, 0xde, + 0x65, 0x0c, 0xc8, 0x27, 0x6a, 0xfe, 0xa2, 0x04, 0x26, 0x37, 0x31, 0x31, 0x2c, 0x73, 0x9f, 0xd0, + 0x73, 0xd8, 0xb9, 0x3b, 0xa0, 0xe2, 0xda, 0xa4, 0x23, 0x76, 0xee, 0x4a, 0xee, 0x04, 0x42, 0x9f, + 0xf6, 0x6d, 0xd2, 0x89, 0xb6, 0x8c, 0x3d, 0x21, 0xce, 0x00, 0xf7, 0xc0, 0xb8, 0x4b, 0x31, 0xf5, + 0x5c, 0xbe, 0x61, 0x53, 0x6b, 0x2b, 0x05, 0xb8, 0x38, 0xbe, 0x3d, 0x23, 0xd8, 0xc6, 0xfd, 0x67, + 0x24, 0x78, 0x9a, 0xff, 0x28, 0x01, 0x18, 0x62, 0x37, 0x2c, 0xb3, 0xab, 0x51, 0x16, 0xce, 0x37, + 0x41, 0x85, 0x0e, 0x6c, 0xc2, 0x17, 0x64, 0xb2, 0x7d, 0x25, 0x70, 0xe5, 0xde, 0xc0, 0x26, 0x9f, + 0x9f, 0x2c, 0x2f, 0xa4, 0x2d, 0x58, 0x0f, 0xe2, 0x36, 0x70, 0x2b, 0x74, 0xb2, 0xc4, 0xad, 0xdf, + 0x90, 0x87, 0xfe, 0xfc, 0x64, 0x39, 0xe3, 0xec, 0x50, 0x43, 0x26, 0xd9, 0x41, 0xd8, 0x07, 0x50, + 0xc7, 0x2e, 0xbd, 0xe7, 0x60, 0xd3, 0xf5, 0x47, 0xd2, 0x0c, 0x22, 0xa6, 0xff, 0x5a, 0xb1, 0x8d, + 0x62, 0x16, 0xed, 0x25, 0xe1, 0x05, 0xdc, 0x4a, 0xb1, 0xa1, 0x8c, 0x11, 0xe0, 0x15, 0x30, 0xee, + 0x10, 0xec, 0x5a, 0x66, 0xbd, 0xc2, 0x67, 0x11, 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, + 0xc1, 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, + 0xa3, 0xa0, 0xbf, 0xf9, 0x7b, 0x05, 0x4c, 0x87, 0x2b, 0x77, 0x0e, 0x99, 0x73, 0x5b, 0xce, 0x9c, + 0xe6, 0xe8, 0x60, 0xc9, 0x49, 0x98, 0x8f, 0xca, 0x31, 0xc7, 0x59, 0x38, 0xc2, 0x1f, 0x81, 0x9a, + 0x4b, 0x74, 0xd2, 0xa1, 0x96, 0x23, 0x1c, 0x7f, 0xbd, 0xa0, 0xe3, 0xf8, 0x90, 0xe8, 0xfb, 0xc2, + 0xb4, 0xfd, 0x02, 0xf3, 0x3c, 0x78, 0x42, 0x21, 0x25, 0x7c, 0x1f, 0xd4, 0x28, 0x31, 0x6c, 0x1d, + 0x53, 0x22, 0xb2, 0xe6, 0xa5, 0xb8, 0xf3, 0x2c, 0x66, 0x18, 0xd9, 0x9e, 0xd5, 0xbd, 0x27, 0x60, + 0x3c, 0x65, 0xc2, 0xc5, 0x08, 0x5a, 0x51, 0x48, 0x03, 0x6d, 0x30, 0xe3, 0xd9, 0x5d, 0x86, 0xa4, + 0xec, 0x38, 0xef, 0x0d, 0x44, 0x0c, 0x5d, 0x1f, 0xbd, 0x2a, 0x07, 0x92, 0x5d, 0x7b, 0x41, 0x8c, + 0x32, 0x23, 0xb7, 0xa3, 0x04, 0x3f, 0x5c, 0x07, 0xb3, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, + 0xd2, 0xb1, 0xcc, 0xae, 0xcb, 0x43, 0xa9, 0xda, 0x5e, 0x14, 0x04, 0xb3, 0xdb, 0x72, 0x37, 0x4a, + 0xe2, 0xe1, 0x16, 0x98, 0x0f, 0x0e, 0xe0, 0x3b, 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa5, 0x19, 0x1a, + 0xad, 0x8f, 0x73, 0x9e, 0xfa, 0xf0, 0x64, 0x79, 0x1e, 0x65, 0xf4, 0xa3, 0x4c, 0xab, 0xe6, 0x6f, + 0xc6, 0xc1, 0x6c, 0xe2, 0x5c, 0x80, 0xf7, 0xc1, 0x42, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, + 0x38, 0x24, 0xce, 0x7e, 0xe7, 0x88, 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0xe1, + 0xeb, 0xc2, 0x46, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0xf7, 0x00, 0x34, 0x79, 0xd3, 0xb6, 0xe6, 0xba, + 0x21, 0x67, 0x89, 0x73, 0x86, 0xa9, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x97, 0xb8, + 0x9a, 0x43, 0xba, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x9b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x04, + 0x53, 0xfe, 0x68, 0x7c, 0xcd, 0xc5, 0xe6, 0xcc, 0x09, 0xb2, 0xa9, 0x9d, 0xa8, 0x0b, 0xc5, 0x71, + 0x6c, 0x6a, 0xd6, 0xa1, 0x4b, 0x9c, 0x3e, 0xe9, 0xde, 0xf6, 0xc5, 0x01, 0xab, 0xa0, 0x55, 0x5e, + 0x41, 0xc3, 0xa9, 0xed, 0xa6, 0x10, 0x28, 0xc3, 0x8a, 0x4d, 0xcd, 0x8f, 0x9a, 0xd4, 0xd4, 0xc6, + 0xe5, 0xa9, 0x1d, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xf5, 0x3e, 0xd6, 0x74, + 0x7c, 0xa8, 0x93, 0xfa, 0x84, 0x1c, 0x7b, 0x3b, 0x72, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x70, 0xd1, + 0x6f, 0x3a, 0x30, 0x71, 0x48, 0x52, 0xe3, 0x24, 0x2f, 0x0a, 0x92, 0x8b, 0x3b, 0x49, 0x00, 0x4a, + 0xdb, 0xc0, 0x9b, 0x60, 0xa6, 0x63, 0xe9, 0x3a, 0x8f, 0xc7, 0x0d, 0xcb, 0x33, 0x69, 0x7d, 0x92, + 0xb3, 0x40, 0x96, 0x43, 0x1b, 0x52, 0x0f, 0x4a, 0x20, 0xe1, 0x8f, 0x01, 0xe8, 0x04, 0x85, 0xc1, + 0xad, 0x83, 0x11, 0x0a, 0x20, 0x5d, 0x96, 0xa2, 0xca, 0x1c, 0x36, 0xb9, 0x28, 0x46, 0xd9, 0xfc, + 0x48, 0x01, 0x8b, 0x39, 0x89, 0x0e, 0xbf, 0x2b, 0x15, 0xc1, 0xab, 0x89, 0x22, 0x78, 0x29, 0xc7, + 0x2c, 0x56, 0x09, 0x8f, 0xc0, 0x34, 0x13, 0x24, 0x9a, 0xd9, 0xf3, 0x21, 0xe2, 0x2c, 0x6b, 0xe5, + 0x4e, 0x00, 0xc5, 0xd1, 0xd1, 0xa9, 0x7c, 0x71, 0x78, 0xb2, 0x3c, 0x2d, 0xf5, 0x21, 0x99, 0xb8, + 0xf9, 0xcb, 0x12, 0x00, 0x9b, 0xc4, 0xd6, 0xad, 0x81, 0x41, 0xcc, 0xf3, 0xd0, 0x34, 0x77, 0x25, + 0x4d, 0xf3, 0x4a, 0xfe, 0x96, 0x84, 0x4e, 0xe5, 0x8a, 0x9a, 0xf7, 0x13, 0xa2, 0xe6, 0xd5, 0x22, + 0x64, 0x4f, 0x57, 0x35, 0x9f, 0x94, 0xc1, 0x5c, 0x04, 0x8e, 0x64, 0xcd, 0x2d, 0x69, 0x47, 0x5f, + 0x49, 0xec, 0xe8, 0x62, 0x86, 0xc9, 0x73, 0xd3, 0x35, 0xcf, 0x5e, 0x5f, 0xc0, 0x0f, 0xc1, 0x0c, + 0x13, 0x32, 0x7e, 0x48, 0x70, 0x99, 0x34, 0x7e, 0x6a, 0x99, 0x14, 0x16, 0xb7, 0x2d, 0x89, 0x09, + 0x25, 0x98, 0x73, 0x64, 0xd9, 0xc4, 0xf3, 0x96, 0x65, 0xcd, 0x3f, 0x28, 0x60, 0x26, 0xda, 0xa6, + 0x73, 0x10, 0x51, 0x77, 0x64, 0x11, 0xf5, 0x52, 0x81, 0xe0, 0xcc, 0x51, 0x51, 0x9f, 0x54, 0xe2, + 0xae, 0x73, 0x19, 0xb5, 0xc2, 0x5e, 0xc1, 0x6c, 0x5d, 0xeb, 0x60, 0x57, 0xd4, 0xdb, 0x17, 0xfc, + 0xd7, 0x2f, 0xbf, 0x0d, 0x85, 0xbd, 0x92, 0xe0, 0x2a, 0x3d, 0x5f, 0xc1, 0x55, 0x7e, 0x36, 0x82, + 0xeb, 0x07, 0xa0, 0xe6, 0x06, 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x16, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, + 0xea, 0x50, 0x5f, 0x85, 0x74, 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, + 0x5c, 0xd2, 0xe5, 0x19, 0x50, 0x8b, 0x92, 0x79, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, + 0xed, 0x58, 0x3d, 0x87, 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, + 0x5e, 0x1a, 0x9e, 0x2c, 0x2f, 0xee, 0x65, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, + 0x79, 0x36, 0xe6, 0xc8, 0x14, 0xe5, 0x4c, 0x32, 0xe5, 0x5a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, + 0x2a, 0x48, 0xc5, 0xea, 0x3a, 0x98, 0x15, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x03, + 0xb9, 0x1b, 0x25, 0xf1, 0x4c, 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x3d, 0x09, + 0x40, 0x69, 0x1b, 0xb8, 0x0d, 0xe6, 0x3c, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0x77, + 0x90, 0x86, 0xa0, 0x2c, 0x3b, 0xf8, 0x40, 0xd2, 0x23, 0xe3, 0xfc, 0x48, 0xb8, 0x56, 0x20, 0xac, + 0x0b, 0x0b, 0x12, 0x78, 0x0b, 0x4c, 0x3b, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, + 0x6c, 0x1a, 0xc5, 0x3b, 0x91, 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, + 0xc0, 0x74, 0x1e, 0x8e, 0xbc, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0xae, 0x17, + 0xd4, 0x3f, 0xd1, 0x81, 0x5a, 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9f, 0x4b, 0x9d, 0xa2, 0x02, 0x28, + 0x72, 0xea, 0x19, 0x08, 0xa0, 0x18, 0xd9, 0xd3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x2e, 0x02, 0x17, + 0x16, 0x40, 0x19, 0x26, 0x5f, 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, + 0x27, 0x51, 0x12, 0x79, 0x95, 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, + 0xfc, 0x4e, 0xa6, 0xf9, 0x97, 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, + 0x1e, 0x98, 0x7f, 0xe8, 0xe9, 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, + 0x72, 0xfe, 0xfb, 0x19, 0x18, 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0x67, 0x2a, 0xf6, 0xa9, 0x0a, + 0x54, 0x39, 0x45, 0x05, 0xca, 0x2c, 0xdc, 0xd5, 0x33, 0x14, 0xee, 0xd3, 0x55, 0xda, 0x8c, 0x83, + 0x6b, 0xe4, 0xab, 0xff, 0xcf, 0x15, 0xb0, 0x90, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x63, 0xe0, 0x47, + 0xf1, 0x8b, 0x8f, 0x51, 0x45, 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0xbb, 0x26, 0xdd, + 0x75, 0xf6, 0xa9, 0xa3, 0x99, 0x3d, 0xbf, 0xf2, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, + 0x01, 0x8b, 0x39, 0x95, 0xef, 0x7c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xed, 0x7b, 0x4e, + 0x2f, 0xab, 0x56, 0x17, 0x1b, 0x87, 0x67, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0x5c, + 0x96, 0x26, 0xc9, 0x32, 0x87, 0x3c, 0xf4, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x05, 0x93, 0x36, + 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, + 0xbf, 0xf9, 0x5f, 0x05, 0x54, 0xf7, 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, + 0x7f, 0x93, 0xce, 0xfd, 0xc9, 0x2d, 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xf2, 0x08, 0x9e, 0xa7, 0xd7, + 0xf8, 0x77, 0xc0, 0x64, 0x38, 0xdc, 0xe9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x2a, 0x36, 0xc4, + 0x29, 0x8f, 0xaf, 0x07, 0xd2, 0xb1, 0xcf, 0x12, 0x73, 0xad, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, + 0xd7, 0xa4, 0x4e, 0xfc, 0x05, 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0xcc, 0x50, 0xec, 0xf4, 0x08, 0x0d, + 0xfa, 0xf8, 0x82, 0x4d, 0x46, 0xb7, 0x13, 0xf7, 0xa4, 0x5e, 0x94, 0x40, 0x2f, 0xdd, 0x02, 0xd3, + 0xd2, 0x60, 0xf0, 0x02, 0x28, 0x1f, 0x93, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x41, 0xb5, + 0x8f, 0x75, 0xcf, 0x8f, 0xf3, 0x49, 0xe4, 0x3f, 0xdc, 0x2c, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, + 0x27, 0x0a, 0xce, 0x73, 0x88, 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, + 0x18, 0x43, 0x89, 0x18, 0x7b, 0xad, 0x10, 0xdb, 0xd3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x1f, 0x43, + 0x47, 0x72, 0xf2, 0xdb, 0x92, 0x9c, 0x5c, 0x49, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, + 0xb4, 0x9e, 0xfc, 0xa3, 0x02, 0x66, 0x63, 0x6b, 0x77, 0x0e, 0x82, 0xf2, 0xae, 0x2c, 0x28, 0x5f, + 0x2e, 0x12, 0x34, 0x39, 0x8a, 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, + 0xef, 0x5b, 0xba, 0x67, 0x90, 0x0d, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, + 0x0b, 0xe9, 0x89, 0xe3, 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x1f, 0x59, 0x46, 0xba, 0xef, 0x7e, 0x06, + 0x1d, 0xca, 0x1c, 0x04, 0xbe, 0x09, 0xa6, 0x98, 0x7e, 0xd3, 0x3a, 0x64, 0x07, 0x1b, 0x41, 0x60, + 0x85, 0x9f, 0xb0, 0xf6, 0xa3, 0x2e, 0x14, 0xc7, 0xc1, 0x23, 0x30, 0x67, 0x5b, 0xdd, 0x6d, 0x6c, + 0xe2, 0x1e, 0x61, 0x32, 0x63, 0xcf, 0xd2, 0xb5, 0xce, 0x80, 0x5f, 0x7e, 0x4d, 0xb6, 0xdf, 0x0a, + 0x6e, 0x45, 0xf6, 0xd2, 0x10, 0xf6, 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, + 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x5a, 0x91, 0x08, 0x3b, 0xe3, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, + 0x99, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, + 0xcb, 0xa7, 0x90, 0xe7, 0xeb, 0x60, 0x56, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x43, + 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, 0x5e, 0xf5, 0x94, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, + 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, + 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, + 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, 0xdf, 0x14, 0xf0, 0x62, 0x6e, 0x22, 0xc0, 0x75, 0xa9, 0xec, + 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, + 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, 0x47, 0xd7, 0x5e, 0x7d, 0xfc, 0xa4, 0x31, 0xf6, 0xf1, 0x93, + 0xc6, 0xd8, 0xa7, 0x4f, 0x1a, 0x63, 0x3f, 0x1b, 0x36, 0x94, 0xc7, 0xc3, 0x86, 0xf2, 0xf1, 0xb0, + 0xa1, 0x7c, 0x3a, 0x6c, 0x28, 0x7f, 0x1f, 0x36, 0x94, 0x5f, 0x7f, 0xd6, 0x18, 0xfb, 0x60, 0x42, + 0x8c, 0xf8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x85, 0x43, 0x0a, 0xec, 0x28, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto index 17e4397..cc3656d 100644 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ b/vendor/k8s.io/api/apps/v1beta2/generated.proto @@ -43,7 +43,7 @@ option go_package = "v1beta2"; // depend on its stability. It is primarily for internal use by controllers. message ControllerRevision { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -56,7 +56,7 @@ message ControllerRevision { // ControllerRevisionList is a resource containing a list of ControllerRevision objects. message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -69,12 +69,12 @@ message ControllerRevisionList { // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -82,7 +82,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -111,7 +111,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -286,7 +286,6 @@ message DeploymentSpec { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys optional DeploymentStrategy strategy = 4; // Minimum number of seconds for which a newly created pod should be ready @@ -374,12 +373,12 @@ message DeploymentStrategy { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -387,7 +386,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -416,7 +415,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -527,7 +526,7 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } @@ -543,15 +542,15 @@ message RollingUpdateStatefulSetStrategy { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go index d358455..e552522 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ b/vendor/k8s.io/api/apps/v1beta2/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta2 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" runtime "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" @@ -57,20 +57,22 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } +// +genclient +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -115,7 +117,7 @@ const ( // ParallelPodManagement will create and delete pods as soon as the stateful set // replica count is changed, and will not wait for pods to be ready or complete // termination. - ParallelPodManagement PodManagementPolicyType = "Parallel" + ParallelPodManagement = "Parallel" ) // StatefulSetUpdateStrategy indicates the strategy that the StatefulSet @@ -141,13 +143,13 @@ const ( // ordering constraints. When a scale operation is performed with this // strategy, new Pods will be created from the specification version indicated // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType StatefulSetUpdateStrategyType = "RollingUpdate" + RollingUpdateStatefulSetStrategyType = "RollingUpdate" // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version // tracking and ordered rolling restarts are disabled. Pods are recreated // from the StatefulSetSpec when they are manually deleted. When a scale // operation is performed with this strategy,specification version indicated // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType StatefulSetUpdateStrategyType = "OnDelete" + OnDeleteStatefulSetStrategyType = "OnDelete" ) // RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. @@ -329,8 +331,7 @@ type DeploymentSpec struct { // The deployment strategy to use to replace existing pods with new ones. // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` + Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` // Minimum number of seconds for which a newly created pod should be ready // without any of its container crashing, for it to be considered available. @@ -413,7 +414,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -666,12 +667,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -679,7 +680,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -697,7 +698,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -716,12 +717,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -729,7 +730,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -740,7 +741,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -850,7 +851,7 @@ type ReplicaSetCondition struct { type ControllerRevision struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -867,7 +868,7 @@ type ControllerRevision struct { type ControllerRevisionList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go index 822158a..627df3a 100644 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_ControllerRevision = map[string]string{ "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data is the serialized representation of the state.", "revision": "Revision indicates the revision of the state represented by Data.", } @@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string { var map_ControllerRevisionList = map[string]string{ "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of ControllerRevisions", } @@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string { var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -272,7 +272,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { @@ -290,9 +290,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go index 127bf09..8a0bad2 100644 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go @@ -58,7 +58,7 @@ func (in *ControllerRevision) DeepCopyObject() runtime.Object { func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ControllerRevision, len(*in)) @@ -136,7 +136,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -292,7 +292,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -457,7 +457,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -720,7 +720,7 @@ func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StatefulSet, len(*in)) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go deleted file mode 100644 index ae8f767..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=auditregistration.k8s.io - -package v1alpha1 // import "k8s.io/api/auditregistration/v1alpha1" diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go deleted file mode 100644 index f8eec3d..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.pb.go +++ /dev/null @@ -1,2030 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AuditSink) Reset() { *m = AuditSink{} } -func (*AuditSink) ProtoMessage() {} -func (*AuditSink) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{0} -} -func (m *AuditSink) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuditSink) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AuditSink) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditSink.Merge(m, src) -} -func (m *AuditSink) XXX_Size() int { - return m.Size() -} -func (m *AuditSink) XXX_DiscardUnknown() { - xxx_messageInfo_AuditSink.DiscardUnknown(m) -} - -var xxx_messageInfo_AuditSink proto.InternalMessageInfo - -func (m *AuditSinkList) Reset() { *m = AuditSinkList{} } -func (*AuditSinkList) ProtoMessage() {} -func (*AuditSinkList) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{1} -} -func (m *AuditSinkList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuditSinkList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AuditSinkList) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditSinkList.Merge(m, src) -} -func (m *AuditSinkList) XXX_Size() int { - return m.Size() -} -func (m *AuditSinkList) XXX_DiscardUnknown() { - xxx_messageInfo_AuditSinkList.DiscardUnknown(m) -} - -var xxx_messageInfo_AuditSinkList proto.InternalMessageInfo - -func (m *AuditSinkSpec) Reset() { *m = AuditSinkSpec{} } -func (*AuditSinkSpec) ProtoMessage() {} -func (*AuditSinkSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{2} -} -func (m *AuditSinkSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AuditSinkSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AuditSinkSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_AuditSinkSpec.Merge(m, src) -} -func (m *AuditSinkSpec) XXX_Size() int { - return m.Size() -} -func (m *AuditSinkSpec) XXX_DiscardUnknown() { - xxx_messageInfo_AuditSinkSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_AuditSinkSpec proto.InternalMessageInfo - -func (m *Policy) Reset() { *m = Policy{} } -func (*Policy) ProtoMessage() {} -func (*Policy) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{3} -} -func (m *Policy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Policy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Policy) XXX_Merge(src proto.Message) { - xxx_messageInfo_Policy.Merge(m, src) -} -func (m *Policy) XXX_Size() int { - return m.Size() -} -func (m *Policy) XXX_DiscardUnknown() { - xxx_messageInfo_Policy.DiscardUnknown(m) -} - -var xxx_messageInfo_Policy proto.InternalMessageInfo - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{4} -} -func (m *ServiceReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServiceReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceReference.Merge(m, src) -} -func (m *ServiceReference) XXX_Size() int { - return m.Size() -} -func (m *ServiceReference) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ServiceReference proto.InternalMessageInfo - -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{5} -} -func (m *Webhook) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Webhook) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Webhook) XXX_Merge(src proto.Message) { - xxx_messageInfo_Webhook.Merge(m, src) -} -func (m *Webhook) XXX_Size() int { - return m.Size() -} -func (m *Webhook) XXX_DiscardUnknown() { - xxx_messageInfo_Webhook.DiscardUnknown(m) -} - -var xxx_messageInfo_Webhook proto.InternalMessageInfo - -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{6} -} -func (m *WebhookClientConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WebhookClientConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WebhookClientConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookClientConfig.Merge(m, src) -} -func (m *WebhookClientConfig) XXX_Size() int { - return m.Size() -} -func (m *WebhookClientConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookClientConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_WebhookClientConfig proto.InternalMessageInfo - -func (m *WebhookThrottleConfig) Reset() { *m = WebhookThrottleConfig{} } -func (*WebhookThrottleConfig) ProtoMessage() {} -func (*WebhookThrottleConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_642d3597c6afa8ba, []int{7} -} -func (m *WebhookThrottleConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WebhookThrottleConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WebhookThrottleConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_WebhookThrottleConfig.Merge(m, src) -} -func (m *WebhookThrottleConfig) XXX_Size() int { - return m.Size() -} -func (m *WebhookThrottleConfig) XXX_DiscardUnknown() { - xxx_messageInfo_WebhookThrottleConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_WebhookThrottleConfig proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AuditSink)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSink") - proto.RegisterType((*AuditSinkList)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkList") - proto.RegisterType((*AuditSinkSpec)(nil), "k8s.io.api.auditregistration.v1alpha1.AuditSinkSpec") - proto.RegisterType((*Policy)(nil), "k8s.io.api.auditregistration.v1alpha1.Policy") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.auditregistration.v1alpha1.ServiceReference") - proto.RegisterType((*Webhook)(nil), "k8s.io.api.auditregistration.v1alpha1.Webhook") - proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookClientConfig") - proto.RegisterType((*WebhookThrottleConfig)(nil), "k8s.io.api.auditregistration.v1alpha1.WebhookThrottleConfig") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto", fileDescriptor_642d3597c6afa8ba) -} - -var fileDescriptor_642d3597c6afa8ba = []byte{ - // 765 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0x41, 0x6f, 0x13, 0x47, - 0x14, 0xf6, 0xc6, 0x76, 0x6c, 0x4f, 0x9c, 0x36, 0x9d, 0xb4, 0x95, 0x1b, 0x55, 0x6b, 0x6b, 0xa5, - 0x4a, 0x91, 0xda, 0xcc, 0x36, 0x55, 0xd4, 0x56, 0x88, 0x4b, 0x36, 0x27, 0xa4, 0x10, 0xc2, 0x98, - 0x80, 0x40, 0x08, 0x31, 0x5e, 0x3f, 0xef, 0x0e, 0xb6, 0x77, 0x97, 0xdd, 0x59, 0xa3, 0xdc, 0xf8, - 0x09, 0xfc, 0x05, 0xfe, 0x06, 0x37, 0x24, 0x90, 0x72, 0xcc, 0x31, 0xa7, 0x88, 0x98, 0x03, 0xff, - 0x81, 0x13, 0x9a, 0xd9, 0x59, 0xdb, 0xc4, 0x41, 0x38, 0xb7, 0x79, 0xdf, 0x7b, 0xdf, 0xf7, 0xbe, - 0xf7, 0xde, 0xa0, 0x83, 0xfe, 0xff, 0x09, 0xe1, 0xa1, 0xdd, 0x4f, 0x3b, 0x10, 0x07, 0x20, 0x20, - 0xb1, 0x47, 0x10, 0x74, 0xc3, 0xd8, 0xd6, 0x09, 0x16, 0x71, 0x9b, 0xa5, 0x5d, 0x2e, 0x62, 0xf0, - 0x78, 0x22, 0x62, 0x26, 0x78, 0x18, 0xd8, 0xa3, 0x6d, 0x36, 0x88, 0x7c, 0xb6, 0x6d, 0x7b, 0x10, - 0x40, 0xcc, 0x04, 0x74, 0x49, 0x14, 0x87, 0x22, 0xc4, 0x7f, 0x64, 0x34, 0xc2, 0x22, 0x4e, 0xe6, - 0x68, 0x24, 0xa7, 0x6d, 0x6c, 0x79, 0x5c, 0xf8, 0x69, 0x87, 0xb8, 0xe1, 0xd0, 0xf6, 0x42, 0x2f, - 0xb4, 0x15, 0xbb, 0x93, 0xf6, 0x54, 0xa4, 0x02, 0xf5, 0xca, 0x54, 0x37, 0x76, 0xa6, 0x66, 0x86, - 0xcc, 0xf5, 0x79, 0x00, 0xf1, 0xb1, 0x1d, 0xf5, 0x3d, 0x09, 0x24, 0xf6, 0x10, 0x04, 0xb3, 0x47, - 0x73, 0x5e, 0x36, 0xec, 0x6f, 0xb1, 0xe2, 0x34, 0x10, 0x7c, 0x08, 0x73, 0x84, 0x7f, 0xbf, 0x47, - 0x48, 0x5c, 0x1f, 0x86, 0xec, 0x32, 0xcf, 0x7a, 0x6f, 0xa0, 0xda, 0xae, 0x1c, 0xb6, 0xcd, 0x83, - 0x3e, 0x7e, 0x8a, 0xaa, 0xd2, 0x51, 0x97, 0x09, 0xd6, 0x30, 0x5a, 0xc6, 0xe6, 0xca, 0x3f, 0x7f, - 0x93, 0xe9, 0x56, 0x26, 0xc2, 0x24, 0xea, 0x7b, 0x12, 0x48, 0x88, 0xac, 0x26, 0xa3, 0x6d, 0x72, - 0xa7, 0xf3, 0x0c, 0x5c, 0x71, 0x1b, 0x04, 0x73, 0xf0, 0xc9, 0x79, 0xb3, 0x30, 0x3e, 0x6f, 0xa2, - 0x29, 0x46, 0x27, 0xaa, 0xf8, 0x3e, 0x2a, 0x25, 0x11, 0xb8, 0x8d, 0x25, 0xa5, 0xbe, 0x43, 0x16, - 0xda, 0x39, 0x99, 0x38, 0x6c, 0x47, 0xe0, 0x3a, 0x75, 0xdd, 0xa1, 0x24, 0x23, 0xaa, 0xf4, 0xac, - 0x77, 0x06, 0x5a, 0x9d, 0x54, 0xed, 0xf3, 0x44, 0xe0, 0xc7, 0x73, 0xb3, 0x90, 0xc5, 0x66, 0x91, - 0x6c, 0x35, 0xc9, 0x9a, 0xee, 0x53, 0xcd, 0x91, 0x99, 0x39, 0x8e, 0x50, 0x99, 0x0b, 0x18, 0x26, - 0x8d, 0xa5, 0x56, 0xf1, 0xd2, 0x9a, 0x16, 0x1a, 0xc4, 0x59, 0xd5, 0xe2, 0xe5, 0x5b, 0x52, 0x86, - 0x66, 0x6a, 0xd6, 0xdb, 0xd9, 0x31, 0xe4, 0x78, 0xf8, 0x08, 0x2d, 0x47, 0xe1, 0x80, 0xbb, 0xc7, - 0x7a, 0x88, 0xad, 0x05, 0x3b, 0x1d, 0x2a, 0x92, 0xf3, 0x83, 0x6e, 0xb3, 0x9c, 0xc5, 0x54, 0x8b, - 0xe1, 0x87, 0xa8, 0xf2, 0x02, 0x3a, 0x7e, 0x18, 0xf6, 0xf5, 0x29, 0xc8, 0x82, 0xba, 0x0f, 0x32, - 0x96, 0xf3, 0xa3, 0x16, 0xae, 0x68, 0x80, 0xe6, 0x7a, 0x96, 0x8b, 0x74, 0x33, 0xfc, 0x17, 0x2a, - 0x0f, 0x60, 0x04, 0x03, 0x65, 0xbd, 0xe6, 0xfc, 0x9a, 0x8f, 0xbc, 0x2f, 0xc1, 0xcf, 0xf9, 0x83, - 0x66, 0x45, 0xf8, 0x4f, 0xb4, 0x9c, 0x08, 0xe6, 0x41, 0xb6, 0xd3, 0x9a, 0xb3, 0x2e, 0x6d, 0xb7, - 0x15, 0x22, 0x6b, 0xd5, 0x8b, 0xea, 0x12, 0xeb, 0xb5, 0x81, 0xd6, 0xda, 0x10, 0x8f, 0xb8, 0x0b, - 0x14, 0x7a, 0x10, 0x43, 0xe0, 0x02, 0xb6, 0x51, 0x2d, 0x60, 0x43, 0x48, 0x22, 0xe6, 0x82, 0xee, - 0xf9, 0x93, 0xee, 0x59, 0x3b, 0xc8, 0x13, 0x74, 0x5a, 0x83, 0x5b, 0xa8, 0x24, 0x03, 0xb5, 0x82, - 0xda, 0xf4, 0x5f, 0xc9, 0x5a, 0xaa, 0x32, 0xf8, 0x77, 0x54, 0x8a, 0x98, 0xf0, 0x1b, 0x45, 0x55, - 0x51, 0x95, 0xd9, 0x43, 0x26, 0x7c, 0xaa, 0x50, 0x95, 0x0d, 0x63, 0xd1, 0x28, 0xb5, 0x8c, 0xcd, - 0xb2, 0xce, 0x86, 0xb1, 0xa0, 0x0a, 0xb5, 0x3e, 0x19, 0x28, 0xdf, 0x0e, 0xee, 0xa1, 0xaa, 0xf0, - 0xe3, 0x50, 0x88, 0x01, 0xe8, 0x43, 0xde, 0xbc, 0xde, 0xc2, 0xef, 0x69, 0xf6, 0x5e, 0x18, 0xf4, - 0xb8, 0xe7, 0xd4, 0xe5, 0xbf, 0xcc, 0x31, 0x3a, 0xd1, 0xc6, 0x02, 0xd5, 0xdd, 0x01, 0x87, 0x40, - 0x64, 0x75, 0xfa, 0xb8, 0x37, 0xae, 0xd7, 0x6b, 0x6f, 0x46, 0xc1, 0xf9, 0x59, 0x6f, 0xa5, 0x3e, - 0x8b, 0xd2, 0xaf, 0xba, 0x58, 0x6f, 0x0c, 0xb4, 0x7e, 0x05, 0x17, 0xff, 0x86, 0x8a, 0x69, 0x9c, - 0x9f, 0xbf, 0x32, 0x3e, 0x6f, 0x16, 0x8f, 0xe8, 0x3e, 0x95, 0x18, 0x7e, 0x82, 0x2a, 0x49, 0x76, - 0x3f, 0xed, 0xf1, 0xbf, 0x05, 0x3d, 0x5e, 0xbe, 0xba, 0xb3, 0x22, 0x7f, 0x61, 0x8e, 0xe6, 0xa2, - 0x78, 0x13, 0x55, 0x5d, 0xe6, 0xa4, 0x41, 0x77, 0x00, 0xea, 0x78, 0xf5, 0x6c, 0x65, 0x7b, 0xbb, - 0x19, 0x46, 0x27, 0x59, 0xab, 0x8d, 0x7e, 0xb9, 0x72, 0xc7, 0xd2, 0xfd, 0xf3, 0x28, 0x51, 0xee, - 0x8b, 0x99, 0xfb, 0xbb, 0x87, 0x6d, 0x2a, 0x31, 0xdc, 0x44, 0xe5, 0x4e, 0x1a, 0x27, 0x42, 0x79, - 0x2f, 0x3a, 0x35, 0xf9, 0xab, 0x1d, 0x09, 0xd0, 0x0c, 0x77, 0xc8, 0xc9, 0x85, 0x59, 0x38, 0xbd, - 0x30, 0x0b, 0x67, 0x17, 0x66, 0xe1, 0xe5, 0xd8, 0x34, 0x4e, 0xc6, 0xa6, 0x71, 0x3a, 0x36, 0x8d, - 0xb3, 0xb1, 0x69, 0x7c, 0x18, 0x9b, 0xc6, 0xab, 0x8f, 0x66, 0xe1, 0x51, 0x35, 0x9f, 0xea, 0x4b, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x6c, 0xff, 0x86, 0xcd, 0x06, 0x00, 0x00, -} - -func (m *AuditSink) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditSink) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuditSink) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AuditSinkList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditSinkList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuditSinkList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *AuditSinkSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AuditSinkSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AuditSinkSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Webhook.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.Policy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Policy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Policy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Policy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Stages) > 0 { - for iNdEx := len(m.Stages) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Stages[iNdEx]) - copy(dAtA[i:], m.Stages[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Stages[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Level) - copy(dAtA[i:], m.Level) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServiceReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Port != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Port)) - i-- - dAtA[i] = 0x20 - } - if m.Path != nil { - i -= len(*m.Path) - copy(dAtA[i:], *m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Webhook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Webhook) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.ClientConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if m.Throttle != nil { - { - size, err := m.Throttle.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WebhookClientConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.CABundle != nil { - i -= len(m.CABundle) - copy(dAtA[i:], m.CABundle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i-- - dAtA[i] = 0x1a - } - if m.Service != nil { - { - size, err := m.Service.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.URL != nil { - i -= len(*m.URL) - copy(dAtA[i:], *m.URL) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *WebhookThrottleConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookThrottleConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WebhookThrottleConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Burst != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Burst)) - i-- - dAtA[i] = 0x10 - } - if m.QPS != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.QPS)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AuditSink) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AuditSinkList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AuditSinkSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Policy.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Webhook.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Policy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Stages) > 0 { - for _, s := range m.Stages { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - n += 1 + sovGenerated(uint64(*m.Port)) - } - return n -} - -func (m *Webhook) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Throttle != nil { - l = m.Throttle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WebhookClientConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WebhookThrottleConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.QPS != nil { - n += 1 + sovGenerated(uint64(*m.QPS)) - } - if m.Burst != nil { - n += 1 + sovGenerated(uint64(*m.Burst)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AuditSink) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AuditSink{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "AuditSinkSpec", "AuditSinkSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AuditSinkList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]AuditSink{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "AuditSink", "AuditSink", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&AuditSinkList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *AuditSinkSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AuditSinkSpec{`, - `Policy:` + strings.Replace(strings.Replace(this.Policy.String(), "Policy", "Policy", 1), `&`, ``, 1) + `,`, - `Webhook:` + strings.Replace(strings.Replace(this.Webhook.String(), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Policy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Policy{`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `Stages:` + fmt.Sprintf("%v", this.Stages) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `Port:` + valueToStringGenerated(this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *Webhook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Webhook{`, - `Throttle:` + strings.Replace(this.Throttle.String(), "WebhookThrottleConfig", "WebhookThrottleConfig", 1) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookClientConfig{`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `Service:` + strings.Replace(this.Service.String(), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookThrottleConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookThrottleConfig{`, - `QPS:` + valueToStringGenerated(this.QPS) + `,`, - `Burst:` + valueToStringGenerated(this.Burst) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AuditSink) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditSink: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditSink: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditSinkList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditSinkList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditSinkList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, AuditSink{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AuditSinkSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AuditSinkSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AuditSinkSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Policy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhook", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Webhook.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Policy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Policy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Policy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Level = Level(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Stages", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Stages = append(m.Stages, Stage(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Port = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Webhook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Webhook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Throttle", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Throttle == nil { - m.Throttle = &WebhookThrottleConfig{} - } - if err := m.Throttle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.URL = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &ServiceReference{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookThrottleConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookThrottleConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookThrottleConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field QPS", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.QPS = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Burst", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Burst = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto deleted file mode 100644 index 674debe..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/generated.proto +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.auditregistration.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// AuditSink represents a cluster level audit sink -message AuditSink { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the audit configuration spec - optional AuditSinkSpec spec = 2; -} - -// AuditSinkList is a list of AuditSink items. -message AuditSinkList { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of audit configurations. - repeated AuditSink items = 2; -} - -// AuditSinkSpec holds the spec for the audit sink -message AuditSinkSpec { - // Policy defines the policy for selecting which events should be sent to the webhook - // required - optional Policy policy = 1; - - // Webhook to send events - // required - optional Webhook webhook = 2; -} - -// Policy defines the configuration of how audit events are logged -message Policy { - // The Level that all requests are recorded at. - // available options: None, Metadata, Request, RequestResponse - // required - optional string level = 1; - - // Stages is a list of stages for which events are created. - // +optional - repeated string stages = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // `namespace` is the namespace of the service. - // Required - optional string namespace = 1; - - // `name` is the name of the service. - // Required - optional string name = 2; - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - optional string path = 3; - - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. - // `port` should be a valid port number (1-65535, inclusive). - // +optional - optional int32 port = 4; -} - -// Webhook holds the configuration of the webhook -message Webhook { - // Throttle holds the options for throttling the webhook - // +optional - optional WebhookThrottleConfig throttle = 1; - - // ClientConfig holds the connection parameters for the webhook - // required - optional WebhookClientConfig clientConfig = 2; -} - -// WebhookClientConfig contains the information to make a connection with the webhook -message WebhookClientConfig { - // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - optional string url = 1; - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - optional ServiceReference service = 2; - - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - optional bytes caBundle = 3; -} - -// WebhookThrottleConfig holds the configuration for throttling events -message WebhookThrottleConfig { - // ThrottleQPS maximum number of batches per second - // default 10 QPS - // +optional - optional int64 qps = 1; - - // ThrottleBurst is the maximum number of events sent at the same moment - // default 15 QPS - // +optional - optional int64 burst = 2; -} - diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go b/vendor/k8s.io/api/auditregistration/v1alpha1/register.go deleted file mode 100644 index d627160..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "auditregistration.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -func init() { - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - localSchemeBuilder.Register(addKnownTypes) -} - -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &AuditSink{}, - &AuditSinkList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types.go deleted file mode 100644 index a0fb48c..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types.go +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:openapi-gen=true - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Level defines the amount of information logged during auditing -type Level string - -// Valid audit levels -const ( - // LevelNone disables auditing - LevelNone Level = "None" - // LevelMetadata provides the basic level of auditing. - LevelMetadata Level = "Metadata" - // LevelRequest provides Metadata level of auditing, and additionally - // logs the request object (does not apply for non-resource requests). - LevelRequest Level = "Request" - // LevelRequestResponse provides Request level of auditing, and additionally - // logs the response object (does not apply for non-resource requests and watches). - LevelRequestResponse Level = "RequestResponse" -) - -// Stage defines the stages in request handling during which audit events may be generated. -type Stage string - -// Valid audit stages. -const ( - // The stage for events generated after the audit handler receives the request, but before it - // is delegated down the handler chain. - StageRequestReceived = "RequestReceived" - // The stage for events generated after the response headers are sent, but before the response body - // is sent. This stage is only generated for long-running requests (e.g. watch). - StageResponseStarted = "ResponseStarted" - // The stage for events generated after the response body has been completed, and no more bytes - // will be sent. - StageResponseComplete = "ResponseComplete" - // The stage for events generated when a panic occurred. - StagePanic = "Panic" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuditSink represents a cluster level audit sink -type AuditSink struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the audit configuration spec - Spec AuditSinkSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// AuditSinkSpec holds the spec for the audit sink -type AuditSinkSpec struct { - // Policy defines the policy for selecting which events should be sent to the webhook - // required - Policy Policy `json:"policy" protobuf:"bytes,1,opt,name=policy"` - - // Webhook to send events - // required - Webhook Webhook `json:"webhook" protobuf:"bytes,2,opt,name=webhook"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// AuditSinkList is a list of AuditSink items. -type AuditSinkList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of audit configurations. - Items []AuditSink `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Policy defines the configuration of how audit events are logged -type Policy struct { - // The Level that all requests are recorded at. - // available options: None, Metadata, Request, RequestResponse - // required - Level Level `json:"level" protobuf:"bytes,1,opt,name=level"` - - // Stages is a list of stages for which events are created. - // +optional - Stages []Stage `json:"stages" protobuf:"bytes,2,opt,name=stages"` -} - -// Webhook holds the configuration of the webhook -type Webhook struct { - // Throttle holds the options for throttling the webhook - // +optional - Throttle *WebhookThrottleConfig `json:"throttle,omitempty" protobuf:"bytes,1,opt,name=throttle"` - - // ClientConfig holds the connection parameters for the webhook - // required - ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` -} - -// WebhookThrottleConfig holds the configuration for throttling events -type WebhookThrottleConfig struct { - // ThrottleQPS maximum number of batches per second - // default 10 QPS - // +optional - QPS *int64 `json:"qps,omitempty" protobuf:"bytes,1,opt,name=qps"` - - // ThrottleBurst is the maximum number of events sent at the same moment - // default 15 QPS - // +optional - Burst *int64 `json:"burst,omitempty" protobuf:"bytes,2,opt,name=burst"` -} - -// WebhookClientConfig contains the information to make a connection with the webhook -type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form - // (`scheme://host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string `json:"url,omitempty" protobuf:"bytes,1,opt,name=url"` - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // +optional - Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,2,opt,name=service"` - - // `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. - // If unspecified, system trust roots on the apiserver are used. - // +optional - CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,3,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // `namespace` is the namespace of the service. - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - - // `name` is the name of the service. - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` - - // If specified, the port on the service that hosting webhook. - // Default to 443 for backward compatibility. - // `port` should be a valid port number (1-65535, inclusive). - // +optional - Port *int32 `json:"port,omitempty" protobuf:"varint,4,opt,name=port"` -} diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 1a86f4d..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,111 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AuditSink = map[string]string{ - "": "AuditSink represents a cluster level audit sink", - "spec": "Spec defines the audit configuration spec", -} - -func (AuditSink) SwaggerDoc() map[string]string { - return map_AuditSink -} - -var map_AuditSinkList = map[string]string{ - "": "AuditSinkList is a list of AuditSink items.", - "items": "List of audit configurations.", -} - -func (AuditSinkList) SwaggerDoc() map[string]string { - return map_AuditSinkList -} - -var map_AuditSinkSpec = map[string]string{ - "": "AuditSinkSpec holds the spec for the audit sink", - "policy": "Policy defines the policy for selecting which events should be sent to the webhook required", - "webhook": "Webhook to send events required", -} - -func (AuditSinkSpec) SwaggerDoc() map[string]string { - return map_AuditSinkSpec -} - -var map_Policy = map[string]string{ - "": "Policy defines the configuration of how audit events are logged", - "level": "The Level that all requests are recorded at. available options: None, Metadata, Request, RequestResponse required", - "stages": "Stages is a list of stages for which events are created.", -} - -func (Policy) SwaggerDoc() map[string]string { - return map_Policy -} - -var map_ServiceReference = map[string]string{ - "": "ServiceReference holds a reference to Service.legacy.k8s.io", - "namespace": "`namespace` is the namespace of the service. Required", - "name": "`name` is the name of the service. Required", - "path": "`path` is an optional URL path which will be sent in any request to this service.", - "port": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).", -} - -func (ServiceReference) SwaggerDoc() map[string]string { - return map_ServiceReference -} - -var map_Webhook = map[string]string{ - "": "Webhook holds the configuration of the webhook", - "throttle": "Throttle holds the options for throttling the webhook", - "clientConfig": "ClientConfig holds the connection parameters for the webhook required", -} - -func (Webhook) SwaggerDoc() map[string]string { - return map_Webhook -} - -var map_WebhookClientConfig = map[string]string{ - "": "WebhookClientConfig contains the information to make a connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.", -} - -func (WebhookClientConfig) SwaggerDoc() map[string]string { - return map_WebhookClientConfig -} - -var map_WebhookThrottleConfig = map[string]string{ - "": "WebhookThrottleConfig holds the configuration for throttling events", - "qps": "ThrottleQPS maximum number of batches per second default 10 QPS", - "burst": "ThrottleBurst is the maximum number of events sent at the same moment default 15 QPS", -} - -func (WebhookThrottleConfig) SwaggerDoc() map[string]string { - return map_WebhookThrottleConfig -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 621a19e..0000000 --- a/vendor/k8s.io/api/auditregistration/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,229 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditSink) DeepCopyInto(out *AuditSink) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSink. -func (in *AuditSink) DeepCopy() *AuditSink { - if in == nil { - return nil - } - out := new(AuditSink) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuditSink) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditSinkList) DeepCopyInto(out *AuditSinkList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]AuditSink, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkList. -func (in *AuditSinkList) DeepCopy() *AuditSinkList { - if in == nil { - return nil - } - out := new(AuditSinkList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *AuditSinkList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AuditSinkSpec) DeepCopyInto(out *AuditSinkSpec) { - *out = *in - in.Policy.DeepCopyInto(&out.Policy) - in.Webhook.DeepCopyInto(&out.Webhook) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AuditSinkSpec. -func (in *AuditSinkSpec) DeepCopy() *AuditSinkSpec { - if in == nil { - return nil - } - out := new(AuditSinkSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Policy) DeepCopyInto(out *Policy) { - *out = *in - if in.Stages != nil { - in, out := &in.Stages, &out.Stages - *out = make([]Stage, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy. -func (in *Policy) DeepCopy() *Policy { - if in == nil { - return nil - } - out := new(Policy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - if in.Throttle != nil { - in, out := &in.Throttle, &out.Throttle - *out = new(WebhookThrottleConfig) - (*in).DeepCopyInto(*out) - } - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookThrottleConfig) DeepCopyInto(out *WebhookThrottleConfig) { - *out = *in - if in.QPS != nil { - in, out := &in.QPS, &out.QPS - *out = new(int64) - **out = **in - } - if in.Burst != nil { - in, out := &in.Burst, &out.Burst - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookThrottleConfig. -func (in *WebhookThrottleConfig) DeepCopy() *WebhookThrottleConfig { - if in == nil { - return nil - } - out := new(WebhookThrottleConfig) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go index 1614265..2d2ed2e 100644 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ b/vendor/k8s.io/api/authentication/v1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true - package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go index 6524f8c..2ce2e2d 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1/generated.pb.go @@ -14,26 +14,41 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto + + It has these top-level messages: + BoundObjectReference + ExtraValue + TokenRequest + TokenRequestSpec + TokenRequestStatus + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import strings "strings" +import reflect "reflect" - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -44,259 +59,43 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{0} -} -func (m *BoundObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *BoundObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *BoundObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_BoundObjectReference.Merge(m, src) -} -func (m *BoundObjectReference) XXX_Size() int { - return m.Size() -} -func (m *BoundObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_BoundObjectReference.DiscardUnknown(m) -} - -var xxx_messageInfo_BoundObjectReference proto.InternalMessageInfo - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{1} -} -func (m *ExtraValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExtraValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtraValue.Merge(m, src) -} -func (m *ExtraValue) XXX_Size() int { - return m.Size() -} -func (m *ExtraValue) XXX_DiscardUnknown() { - xxx_messageInfo_ExtraValue.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } +func (*BoundObjectReference) ProtoMessage() {} +func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{2} -} -func (m *TokenRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenRequest.Merge(m, src) -} -func (m *TokenRequest) XXX_Size() int { - return m.Size() -} -func (m *TokenRequest) XXX_DiscardUnknown() { - xxx_messageInfo_TokenRequest.DiscardUnknown(m) -} +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_TokenRequest proto.InternalMessageInfo +func (m *TokenRequest) Reset() { *m = TokenRequest{} } +func (*TokenRequest) ProtoMessage() {} +func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{3} -} -func (m *TokenRequestSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenRequestSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenRequestSpec.Merge(m, src) -} -func (m *TokenRequestSpec) XXX_Size() int { - return m.Size() -} -func (m *TokenRequestSpec) XXX_DiscardUnknown() { - xxx_messageInfo_TokenRequestSpec.DiscardUnknown(m) -} +func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } +func (*TokenRequestSpec) ProtoMessage() {} +func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_TokenRequestSpec proto.InternalMessageInfo +func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } +func (*TokenRequestStatus) ProtoMessage() {} +func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{4} -} -func (m *TokenRequestStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenRequestStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenRequestStatus.Merge(m, src) -} -func (m *TokenRequestStatus) XXX_Size() int { - return m.Size() -} -func (m *TokenRequestStatus) XXX_DiscardUnknown() { - xxx_messageInfo_TokenRequestStatus.DiscardUnknown(m) -} +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_TokenRequestStatus proto.InternalMessageInfo +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{5} -} -func (m *TokenReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenReview.Merge(m, src) -} -func (m *TokenReview) XXX_Size() int { - return m.Size() -} -func (m *TokenReview) XXX_DiscardUnknown() { - xxx_messageInfo_TokenReview.DiscardUnknown(m) -} +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_TokenReview proto.InternalMessageInfo - -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{6} -} -func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenReviewSpec.Merge(m, src) -} -func (m *TokenReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *TokenReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo - -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{7} -} -func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenReviewStatus.Merge(m, src) -} -func (m *TokenReviewStatus) XXX_Size() int { - return m.Size() -} -func (m *TokenReviewStatus) XXX_DiscardUnknown() { - xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo - -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_2953ea822e7ffe1e, []int{8} -} -func (m *UserInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *UserInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserInfo.Merge(m, src) -} -func (m *UserInfo) XXX_Size() int { - return m.Size() -} -func (m *UserInfo) XXX_DiscardUnknown() { - xxx_messageInfo_UserInfo.DiscardUnknown(m) -} - -var xxx_messageInfo_UserInfo proto.InternalMessageInfo +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func init() { proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") @@ -308,78 +107,11 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") - proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1.UserInfo.ExtraEntry") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptor_2953ea822e7ffe1e) } - -var fileDescriptor_2953ea822e7ffe1e = []byte{ - // 903 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0xf3, 0xa3, 0x4a, 0x26, 0xdb, 0xd2, 0xce, 0xb2, 0x52, 0x54, 0x20, 0x2e, 0x5e, 0x09, - 0x55, 0xc0, 0xda, 0x9b, 0x08, 0xc1, 0x6a, 0x91, 0x90, 0x6a, 0x1a, 0x41, 0x84, 0x60, 0x57, 0xb3, - 0xdb, 0x82, 0x38, 0x31, 0xb1, 0x5f, 0x53, 0x13, 0x3c, 0x36, 0xf6, 0x38, 0x6c, 0x6e, 0xfb, 0x27, - 0x70, 0x04, 0x89, 0x03, 0x7f, 0x04, 0x12, 0xff, 0x42, 0x8f, 0x2b, 0x4e, 0x3d, 0xa0, 0x88, 0x9a, - 0x2b, 0x47, 0x4e, 0x9c, 0xd0, 0x8c, 0xa7, 0x71, 0x9c, 0xb4, 0x69, 0x4e, 0x7b, 0x8b, 0xdf, 0xfb, - 0xde, 0xf7, 0xde, 0xfb, 0xe6, 0xcb, 0x0c, 0xea, 0x8d, 0x1e, 0xc4, 0xa6, 0x17, 0x58, 0xa3, 0x64, - 0x00, 0x11, 0x03, 0x0e, 0xb1, 0x35, 0x06, 0xe6, 0x06, 0x91, 0xa5, 0x12, 0x34, 0xf4, 0x2c, 0x9a, - 0xf0, 0x53, 0x60, 0xdc, 0x73, 0x28, 0xf7, 0x02, 0x66, 0x8d, 0x3b, 0xd6, 0x10, 0x18, 0x44, 0x94, - 0x83, 0x6b, 0x86, 0x51, 0xc0, 0x03, 0xfc, 0x7a, 0x86, 0x36, 0x69, 0xe8, 0x99, 0x45, 0xb4, 0x39, - 0xee, 0xec, 0xde, 0x1b, 0x7a, 0xfc, 0x34, 0x19, 0x98, 0x4e, 0xe0, 0x5b, 0xc3, 0x60, 0x18, 0x58, - 0xb2, 0x68, 0x90, 0x9c, 0xc8, 0x2f, 0xf9, 0x21, 0x7f, 0x65, 0x64, 0xbb, 0xef, 0xe5, 0xad, 0x7d, - 0xea, 0x9c, 0x7a, 0x0c, 0xa2, 0x89, 0x15, 0x8e, 0x86, 0x22, 0x10, 0x5b, 0x3e, 0x70, 0x7a, 0xc5, - 0x08, 0xbb, 0xd6, 0x75, 0x55, 0x51, 0xc2, 0xb8, 0xe7, 0xc3, 0x52, 0xc1, 0xfb, 0x37, 0x15, 0xc4, - 0xce, 0x29, 0xf8, 0x74, 0xb1, 0xce, 0xf8, 0x43, 0x43, 0xaf, 0xda, 0x41, 0xc2, 0xdc, 0x47, 0x83, - 0x6f, 0xc1, 0xe1, 0x04, 0x4e, 0x20, 0x02, 0xe6, 0x00, 0xde, 0x43, 0xd5, 0x91, 0xc7, 0xdc, 0x96, - 0xb6, 0xa7, 0xed, 0x37, 0xec, 0x5b, 0x67, 0x53, 0xbd, 0x94, 0x4e, 0xf5, 0xea, 0x67, 0x1e, 0x73, - 0x89, 0xcc, 0xe0, 0x2e, 0x42, 0xf4, 0x71, 0xff, 0x18, 0xa2, 0xd8, 0x0b, 0x58, 0xab, 0x2c, 0x71, - 0x58, 0xe1, 0xd0, 0xc1, 0x2c, 0x43, 0xe6, 0x50, 0x82, 0x95, 0x51, 0x1f, 0x5a, 0x95, 0x22, 0xeb, - 0x17, 0xd4, 0x07, 0x22, 0x33, 0xd8, 0x46, 0x95, 0xa4, 0x7f, 0xd8, 0xaa, 0x4a, 0xc0, 0x7d, 0x05, - 0xa8, 0x1c, 0xf5, 0x0f, 0xff, 0x9b, 0xea, 0x6f, 0x5e, 0xb7, 0x24, 0x9f, 0x84, 0x10, 0x9b, 0x47, - 0xfd, 0x43, 0x22, 0x8a, 0x8d, 0x0f, 0x10, 0xea, 0x3d, 0xe3, 0x11, 0x3d, 0xa6, 0xdf, 0x25, 0x80, - 0x75, 0x54, 0xf3, 0x38, 0xf8, 0x71, 0x4b, 0xdb, 0xab, 0xec, 0x37, 0xec, 0x46, 0x3a, 0xd5, 0x6b, - 0x7d, 0x11, 0x20, 0x59, 0xfc, 0x61, 0xfd, 0xa7, 0x5f, 0xf5, 0xd2, 0xf3, 0x3f, 0xf7, 0x4a, 0xc6, - 0x2f, 0x65, 0x74, 0xeb, 0x69, 0x30, 0x02, 0x46, 0xe0, 0xfb, 0x04, 0x62, 0x8e, 0xbf, 0x41, 0x75, - 0x71, 0x44, 0x2e, 0xe5, 0x54, 0x2a, 0xd1, 0xec, 0xde, 0x37, 0x73, 0x77, 0xcc, 0x86, 0x30, 0xc3, - 0xd1, 0x50, 0x04, 0x62, 0x53, 0xa0, 0xcd, 0x71, 0xc7, 0xcc, 0xe4, 0xfc, 0x1c, 0x38, 0xcd, 0x35, - 0xc9, 0x63, 0x64, 0xc6, 0x8a, 0x1f, 0xa3, 0x6a, 0x1c, 0x82, 0x23, 0xf5, 0x6b, 0x76, 0x4d, 0x73, - 0x95, 0xf7, 0xcc, 0xf9, 0xd9, 0x9e, 0x84, 0xe0, 0xe4, 0x0a, 0x8a, 0x2f, 0x22, 0x99, 0xf0, 0x57, - 0x68, 0x23, 0xe6, 0x94, 0x27, 0xb1, 0x54, 0xb9, 0x38, 0xf1, 0x4d, 0x9c, 0xb2, 0xce, 0xde, 0x52, - 0xac, 0x1b, 0xd9, 0x37, 0x51, 0x7c, 0xc6, 0xbf, 0x1a, 0xda, 0x5e, 0x1c, 0x01, 0xbf, 0x83, 0x1a, - 0x34, 0x71, 0x3d, 0x61, 0x9a, 0x4b, 0x89, 0x37, 0xd3, 0xa9, 0xde, 0x38, 0xb8, 0x0c, 0x92, 0x3c, - 0x8f, 0x3f, 0x46, 0x3b, 0xf0, 0x2c, 0xf4, 0x22, 0xd9, 0xfd, 0x09, 0x38, 0x01, 0x73, 0x63, 0x79, - 0xd6, 0x15, 0xfb, 0x4e, 0x3a, 0xd5, 0x77, 0x7a, 0x8b, 0x49, 0xb2, 0x8c, 0xc7, 0x0c, 0x6d, 0x0d, - 0x0a, 0x96, 0x55, 0x8b, 0x76, 0x57, 0x2f, 0x7a, 0x95, 0xcd, 0x6d, 0x9c, 0x4e, 0xf5, 0xad, 0x62, - 0x86, 0x2c, 0xb0, 0x1b, 0xbf, 0x69, 0x08, 0x2f, 0xab, 0x84, 0xef, 0xa2, 0x1a, 0x17, 0x51, 0xf5, - 0x17, 0xd9, 0x54, 0xa2, 0xd5, 0x32, 0x68, 0x96, 0xc3, 0x13, 0x74, 0x3b, 0x5f, 0xe0, 0xa9, 0xe7, - 0x43, 0xcc, 0xa9, 0x1f, 0xaa, 0xd3, 0x7e, 0x7b, 0x3d, 0x2f, 0x89, 0x32, 0xfb, 0x35, 0x45, 0x7f, - 0xbb, 0xb7, 0x4c, 0x47, 0xae, 0xea, 0x61, 0xfc, 0x5c, 0x46, 0x4d, 0x35, 0xf6, 0xd8, 0x83, 0x1f, - 0x5e, 0x82, 0x97, 0x1f, 0x15, 0xbc, 0x7c, 0x6f, 0x2d, 0xdf, 0x89, 0xd1, 0xae, 0xb5, 0xf2, 0x97, - 0x0b, 0x56, 0xb6, 0xd6, 0xa7, 0x5c, 0xed, 0x64, 0x07, 0xbd, 0xb2, 0xd0, 0x7f, 0xbd, 0xe3, 0x2c, - 0x98, 0xbd, 0xbc, 0xda, 0xec, 0xc6, 0x3f, 0x1a, 0xda, 0x59, 0x1a, 0x09, 0x7f, 0x88, 0x36, 0xe7, - 0x26, 0x87, 0xec, 0x86, 0xad, 0xdb, 0x77, 0x54, 0xbf, 0xcd, 0x83, 0xf9, 0x24, 0x29, 0x62, 0xf1, - 0xa7, 0xa8, 0x9a, 0xc4, 0x10, 0x29, 0x85, 0xdf, 0x5a, 0x2d, 0xc7, 0x51, 0x0c, 0x51, 0x9f, 0x9d, - 0x04, 0xb9, 0xb4, 0x22, 0x42, 0x24, 0x43, 0x71, 0x93, 0xea, 0x0d, 0x7f, 0xdb, 0xbb, 0xa8, 0x06, - 0x51, 0x14, 0x44, 0xea, 0xde, 0x9e, 0x69, 0xd3, 0x13, 0x41, 0x92, 0xe5, 0x8c, 0xdf, 0xcb, 0xa8, - 0x7e, 0xd9, 0x12, 0xbf, 0x8b, 0xea, 0xa2, 0x8d, 0xbc, 0xec, 0x33, 0x41, 0xb7, 0x55, 0x91, 0xc4, - 0x88, 0x38, 0x99, 0x21, 0xf0, 0x1b, 0xa8, 0x92, 0x78, 0xae, 0x7a, 0x43, 0x9a, 0x73, 0x97, 0x3e, - 0x11, 0x71, 0x6c, 0xa0, 0x8d, 0x61, 0x14, 0x24, 0xa1, 0xb0, 0x81, 0x18, 0x14, 0x89, 0x13, 0xfd, - 0x44, 0x46, 0x88, 0xca, 0xe0, 0x63, 0x54, 0x03, 0x71, 0xe7, 0xcb, 0x5d, 0x9a, 0xdd, 0xce, 0x7a, - 0xd2, 0x98, 0xf2, 0x9d, 0xe8, 0x31, 0x1e, 0x4d, 0xe6, 0xb6, 0x12, 0x31, 0x92, 0xd1, 0xed, 0x0e, - 0xd4, 0x5b, 0x22, 0x31, 0x78, 0x1b, 0x55, 0x46, 0x30, 0xc9, 0x36, 0x22, 0xe2, 0x27, 0xfe, 0x08, - 0xd5, 0xc6, 0xe2, 0x99, 0x51, 0x47, 0xb2, 0xbf, 0xba, 0x6f, 0xfe, 0x2c, 0x91, 0xac, 0xec, 0x61, - 0xf9, 0x81, 0x66, 0xef, 0x9f, 0x5d, 0xb4, 0x4b, 0x2f, 0x2e, 0xda, 0xa5, 0xf3, 0x8b, 0x76, 0xe9, - 0x79, 0xda, 0xd6, 0xce, 0xd2, 0xb6, 0xf6, 0x22, 0x6d, 0x6b, 0xe7, 0x69, 0x5b, 0xfb, 0x2b, 0x6d, - 0x6b, 0x3f, 0xfe, 0xdd, 0x2e, 0x7d, 0x5d, 0x1e, 0x77, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x8c, - 0x44, 0x87, 0xd0, 0xe2, 0x08, 0x00, 0x00, -} - func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -387,42 +119,33 @@ func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { } func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *BoundObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -430,31 +153,32 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m[iNdEx]) - copy(dAtA[i:], m[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) - i-- + for _, s := range m { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *TokenRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -462,52 +186,41 @@ func (m *TokenRequest) Marshal() (dAtA []byte, err error) { } func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -515,48 +228,47 @@ func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ExpirationSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - i-- - dAtA[i] = 0x20 + if len(m.Audiences) > 0 { + for _, s := range m.Audiences { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } if m.BoundObjectRef != nil { - { - size, err := m.BoundObjectRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a - } - if len(m.Audiences) > 0 { - for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Audiences[iNdEx]) - copy(dAtA[i:], m.Audiences[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) - i-- - dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) + n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n4 + } + if m.ExpirationSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) } - return len(dAtA) - i, nil + return i, nil } func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -564,37 +276,29 @@ func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.ExpirationTimestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) + n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -602,52 +306,41 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n6 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n7, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n7 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n8, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + return i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -655,36 +348,21 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Audiences) > 0 { - for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Audiences[iNdEx]) - copy(dAtA[i:], m.Audiences[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + return i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -692,54 +370,37 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Audiences) > 0 { - for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Audiences[iNdEx]) - copy(dAtA[i:], m.Audiences[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x1a - { - size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i-- + dAtA[i] = 0x8 + i++ if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) + n9, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -747,81 +408,95 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { - v := m.Extra[string(keysForExtra[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForExtra[iNdEx]) - copy(dAtA[i:], keysForExtra[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + for _, k := range keysForExtra { dAtA[i] = 0x22 + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n10, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 } } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Groups[iNdEx]) - copy(dAtA[i:], m.Groups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x12 - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *BoundObjectReference) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -836,9 +511,6 @@ func (m *BoundObjectReference) Size() (n int) { } func (m ExtraValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -851,9 +523,6 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -866,9 +535,6 @@ func (m *TokenRequest) Size() (n int) { } func (m *TokenRequestSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Audiences) > 0 { @@ -888,9 +554,6 @@ func (m *TokenRequestSpec) Size() (n int) { } func (m *TokenRequestStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Token) @@ -901,9 +564,6 @@ func (m *TokenRequestStatus) Size() (n int) { } func (m *TokenReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -916,26 +576,14 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } func (m *TokenReviewStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -943,19 +591,10 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } func (m *UserInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Username) @@ -981,7 +620,14 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1004,7 +650,7 @@ func (this *TokenRequest) String() string { return "nil" } s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1017,7 +663,7 @@ func (this *TokenRequestSpec) String() string { } s := strings.Join([]string{`&TokenRequestSpec{`, `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(this.BoundObjectRef.String(), "BoundObjectReference", "BoundObjectReference", 1) + `,`, + `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, `}`, }, "") @@ -1029,7 +675,7 @@ func (this *TokenRequestStatus) String() string { } s := strings.Join([]string{`&TokenRequestStatus{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ExpirationTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1039,7 +685,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1052,7 +698,6 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -1065,7 +710,6 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -1116,7 +760,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1144,7 +788,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1154,9 +798,6 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1176,7 +817,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1186,9 +827,6 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1208,7 +846,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1218,9 +856,6 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1240,7 +875,7 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1250,9 +885,6 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1267,9 +899,6 @@ func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1297,7 +926,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1325,7 +954,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1335,9 +964,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1352,9 +978,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1382,7 +1005,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1410,7 +1033,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1419,9 +1042,6 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1443,7 +1063,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1452,9 +1072,6 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1476,7 +1093,7 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1485,9 +1102,6 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1504,9 +1118,6 @@ func (m *TokenRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1534,7 +1145,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1562,7 +1173,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1572,9 +1183,6 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1594,7 +1202,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1603,9 +1211,6 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1630,7 +1235,7 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1645,9 +1250,6 @@ func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1675,7 +1277,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1703,7 +1305,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1713,9 +1315,6 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1735,7 +1334,7 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1744,9 +1343,6 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1763,9 +1359,6 @@ func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1793,7 +1386,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1821,7 +1414,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1830,9 +1423,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1854,7 +1444,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1863,9 +1453,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1887,7 +1474,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1896,9 +1483,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1915,9 +1499,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1945,7 +1526,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1973,7 +1554,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1983,46 +1564,11 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2032,9 +1578,6 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2062,7 +1605,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2090,7 +1633,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2110,7 +1653,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2119,9 +1662,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2143,7 +1683,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2153,46 +1693,11 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -2202,9 +1707,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2232,7 +1734,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2260,7 +1762,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2270,9 +1772,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2292,7 +1791,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2302,9 +1801,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2324,7 +1820,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2334,9 +1830,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2356,7 +1849,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2365,20 +1858,54 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2388,88 +1915,46 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -2480,9 +1965,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2498,7 +1980,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -2530,8 +2011,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -2548,34 +2031,117 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 892 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24, + 0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69, + 0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13, + 0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b, + 0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde, + 0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2, + 0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69, + 0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53, + 0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad, + 0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0, + 0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e, + 0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15, + 0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90, + 0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c, + 0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d, + 0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79, + 0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38, + 0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb, + 0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05, + 0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7, + 0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80, + 0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a, + 0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9, + 0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6, + 0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1, + 0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a, + 0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5, + 0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15, + 0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55, + 0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75, + 0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91, + 0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38, + 0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9, + 0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72, + 0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5, + 0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07, + 0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf, + 0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f, + 0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97, + 0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e, + 0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7, + 0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88, + 0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21, + 0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73, + 0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec, + 0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d, + 0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b, + 0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5, + 0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6, + 0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4, + 0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b, + 0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85, + 0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e, + 0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07, + 0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto index db7be17..10c7921 100644 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1/generated.proto @@ -84,10 +84,7 @@ message TokenRequestSpec { optional int64 expirationSeconds = 4; // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound object exists. - // NOTE: The API server's TokenReview endpoint will validate the - // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds - // small if you want prompt revocation. + // The token will only be valid for as long as the bound objet exists. // +optional optional BoundObjectReference boundObjectRef = 3; } @@ -121,14 +118,6 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; - - // Audiences is a list of the identifiers that the resource server presented - // with the token identifies as. Audience-aware token authenticators will - // verify that the token was intended for at least one of the audiences in - // this list. If no audiences are provided, the audience will default to the - // audience of the Kubernetes apiserver. - // +optional - repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -141,18 +130,6 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; - // Audiences are audience identifiers chosen by the authenticator that are - // compatible with both the TokenReview and token. An identifier is any - // identifier in the intersection of the TokenReviewSpec audiences and the - // token's audiences. A client of the TokenReview API that sets the - // spec.audiences field should validate that a compatible audience identifier - // is returned in the status.audiences field to ensure that the TokenReview - // server is audience aware. If a TokenReview returns an empty - // status.audience field where status.authenticated is "true", the token is - // valid against the audience of the Kubernetes API server. - // +optional - repeated string audiences = 4; - // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go index 668b720..723457a 100644 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ b/vendor/k8s.io/api/authentication/v1/types.go @@ -40,7 +40,7 @@ const ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. @@ -64,13 +64,6 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` - // Audiences is a list of the identifiers that the resource server presented - // with the token identifies as. Audience-aware token authenticators will - // verify that the token was intended for at least one of the audiences in - // this list. If no audiences are provided, the audience will default to the - // audience of the Kubernetes apiserver. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -81,17 +74,6 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Audiences are audience identifiers chosen by the authenticator that are - // compatible with both the TokenReview and token. An identifier is any - // identifier in the intersection of the TokenReviewSpec audiences and the - // token's audiences. A client of the TokenReview API that sets the - // spec.audiences field should validate that a compatible audience identifier - // is returned in the status.audiences field to ensure that the TokenReview - // server is audience aware. If a TokenReview returns an empty - // status.audience field where status.authenticated is "true", the token is - // valid against the audience of the Kubernetes API server. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` @@ -155,10 +137,7 @@ type TokenRequestSpec struct { ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound object exists. - // NOTE: The API server's TokenReview endpoint will validate the - // BoundObjectRef, but other audiences may not. Keep ExpirationSeconds - // small if you want prompt revocation. + // The token will only be valid for as long as the bound objet exists. // +optional BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` } diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go index 09f6b92..6632a5d 100644 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ var map_TokenRequestSpec = map[string]string{ "": "TokenRequestSpec contains client provided parameters of a token request.", "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", - "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound object exists. NOTE: The API server's TokenReview endpoint will validate the BoundObjectRef, but other audiences may not. Keep ExpirationSeconds small if you want prompt revocation.", + "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.", } func (TokenRequestSpec) SwaggerDoc() map[string]string { @@ -79,9 +79,8 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", - "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -92,7 +91,6 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", - "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go index aca99c4..f36c253 100644 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go @@ -141,7 +141,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) return } @@ -167,11 +167,6 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } @@ -189,11 +184,6 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go index 185a224..e0de315 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ b/vendor/k8s.io/api/authentication/v1beta1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=authentication.k8s.io // +k8s:openapi-gen=true - package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go index 6c391db..8503d21 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go @@ -14,24 +14,35 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + TokenReview + TokenReviewSpec + TokenReviewStatus + UserInfo +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,147 +53,27 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{0} -} -func (m *ExtraValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExtraValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtraValue.Merge(m, src) -} -func (m *ExtraValue) XXX_Size() int { - return m.Size() -} -func (m *ExtraValue) XXX_DiscardUnknown() { - xxx_messageInfo_ExtraValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtraValue proto.InternalMessageInfo - -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{1} -} -func (m *TokenReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenReview.Merge(m, src) -} -func (m *TokenReview) XXX_Size() int { - return m.Size() -} -func (m *TokenReview) XXX_DiscardUnknown() { - xxx_messageInfo_TokenReview.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_TokenReview proto.InternalMessageInfo +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{2} -} -func (m *TokenReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenReviewSpec.Merge(m, src) -} -func (m *TokenReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *TokenReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_TokenReviewSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_TokenReviewSpec proto.InternalMessageInfo - -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{3} -} -func (m *TokenReviewStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TokenReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TokenReviewStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_TokenReviewStatus.Merge(m, src) -} -func (m *TokenReviewStatus) XXX_Size() int { - return m.Size() -} -func (m *TokenReviewStatus) XXX_DiscardUnknown() { - xxx_messageInfo_TokenReviewStatus.DiscardUnknown(m) -} +func (m *TokenReview) Reset() { *m = TokenReview{} } +func (*TokenReview) ProtoMessage() {} +func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_TokenReviewStatus proto.InternalMessageInfo +func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } +func (*TokenReviewSpec) ProtoMessage() {} +func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_77c9b20d3ad27844, []int{4} -} -func (m *UserInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UserInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *UserInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_UserInfo.Merge(m, src) -} -func (m *UserInfo) XXX_Size() int { - return m.Size() -} -func (m *UserInfo) XXX_DiscardUnknown() { - xxx_messageInfo_UserInfo.DiscardUnknown(m) -} +func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } +func (*TokenReviewStatus) ProtoMessage() {} +func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_UserInfo proto.InternalMessageInfo +func (m *UserInfo) Reset() { *m = UserInfo{} } +func (*UserInfo) ProtoMessage() {} +func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") @@ -190,63 +81,11 @@ func init() { proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") - proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.UserInfo.ExtraEntry") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptor_77c9b20d3ad27844) } - -var fileDescriptor_77c9b20d3ad27844 = []byte{ - // 663 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xb6, 0xf3, 0x53, 0x92, 0x0d, 0x81, 0xb2, 0x12, 0x52, 0x14, 0x09, 0xa7, 0x84, 0x4b, 0xa5, - 0xd2, 0x35, 0xad, 0xaa, 0x52, 0x95, 0x53, 0x0d, 0x15, 0x2a, 0x52, 0x85, 0xb4, 0xb4, 0x1c, 0x80, - 0x03, 0x1b, 0x67, 0xea, 0x98, 0xe0, 0x1f, 0xad, 0xd7, 0x81, 0xde, 0xfa, 0x08, 0x1c, 0x39, 0x22, - 0xf1, 0x24, 0xdc, 0x7a, 0xec, 0xb1, 0x07, 0x14, 0x51, 0xf3, 0x04, 0xbc, 0x01, 0xda, 0xf5, 0xb6, - 0x4e, 0x1b, 0x41, 0xdb, 0x9b, 0xf7, 0x9b, 0xf9, 0xbe, 0x99, 0xf9, 0xc6, 0x83, 0x5e, 0x0c, 0xd7, - 0x12, 0xe2, 0x47, 0xf6, 0x30, 0xed, 0x01, 0x0f, 0x41, 0x40, 0x62, 0x8f, 0x20, 0xec, 0x47, 0xdc, - 0xd6, 0x01, 0x16, 0xfb, 0x36, 0x4b, 0xc5, 0x00, 0x42, 0xe1, 0xbb, 0x4c, 0xf8, 0x51, 0x68, 0x8f, - 0x96, 0x7a, 0x20, 0xd8, 0x92, 0xed, 0x41, 0x08, 0x9c, 0x09, 0xe8, 0x93, 0x98, 0x47, 0x22, 0xc2, - 0xf7, 0x73, 0x0a, 0x61, 0xb1, 0x4f, 0xce, 0x53, 0x88, 0xa6, 0xb4, 0x17, 0x3d, 0x5f, 0x0c, 0xd2, - 0x1e, 0x71, 0xa3, 0xc0, 0xf6, 0x22, 0x2f, 0xb2, 0x15, 0xb3, 0x97, 0xee, 0xa9, 0x97, 0x7a, 0xa8, - 0xaf, 0x5c, 0xb1, 0xbd, 0x52, 0x34, 0x11, 0x30, 0x77, 0xe0, 0x87, 0xc0, 0xf7, 0xed, 0x78, 0xe8, - 0x49, 0x20, 0xb1, 0x03, 0x10, 0xcc, 0x1e, 0x4d, 0xf5, 0xd1, 0xb6, 0xff, 0xc5, 0xe2, 0x69, 0x28, - 0xfc, 0x00, 0xa6, 0x08, 0xab, 0x97, 0x11, 0x12, 0x77, 0x00, 0x01, 0xbb, 0xc8, 0xeb, 0x3e, 0x46, - 0x68, 0xf3, 0xb3, 0xe0, 0xec, 0x35, 0xfb, 0x98, 0x02, 0xee, 0xa0, 0xaa, 0x2f, 0x20, 0x48, 0x5a, - 0xe6, 0x5c, 0x79, 0xbe, 0xee, 0xd4, 0xb3, 0x71, 0xa7, 0xba, 0x25, 0x01, 0x9a, 0xe3, 0xeb, 0xb5, - 0xaf, 0xdf, 0x3a, 0xc6, 0xc1, 0xcf, 0x39, 0xa3, 0xfb, 0xbd, 0x84, 0x1a, 0x3b, 0xd1, 0x10, 0x42, - 0x0a, 0x23, 0x1f, 0x3e, 0xe1, 0xf7, 0xa8, 0x26, 0x87, 0xe9, 0x33, 0xc1, 0x5a, 0xe6, 0x9c, 0x39, - 0xdf, 0x58, 0x7e, 0x44, 0x0a, 0x33, 0xcf, 0x7a, 0x22, 0xf1, 0xd0, 0x93, 0x40, 0x42, 0x64, 0x36, - 0x19, 0x2d, 0x91, 0x97, 0xbd, 0x0f, 0xe0, 0x8a, 0x6d, 0x10, 0xcc, 0xc1, 0x87, 0xe3, 0x8e, 0x91, - 0x8d, 0x3b, 0xa8, 0xc0, 0xe8, 0x99, 0x2a, 0xde, 0x41, 0x95, 0x24, 0x06, 0xb7, 0x55, 0x52, 0xea, - 0xcb, 0xe4, 0xd2, 0x55, 0x91, 0x89, 0xfe, 0x5e, 0xc5, 0xe0, 0x3a, 0x37, 0xb5, 0x7e, 0x45, 0xbe, - 0xa8, 0x52, 0xc3, 0xef, 0xd0, 0x4c, 0x22, 0x98, 0x48, 0x93, 0x56, 0x59, 0xe9, 0xae, 0x5c, 0x53, - 0x57, 0x71, 0x9d, 0x5b, 0x5a, 0x79, 0x26, 0x7f, 0x53, 0xad, 0xd9, 0x75, 0xd1, 0xed, 0x0b, 0x4d, - 0xe0, 0x07, 0xa8, 0x2a, 0x24, 0xa4, 0x5c, 0xaa, 0x3b, 0x4d, 0xcd, 0xac, 0xe6, 0x79, 0x79, 0x0c, - 0x2f, 0xa0, 0x3a, 0x4b, 0xfb, 0x3e, 0x84, 0x2e, 0x24, 0xad, 0x92, 0x5a, 0x46, 0x33, 0x1b, 0x77, - 0xea, 0x1b, 0xa7, 0x20, 0x2d, 0xe2, 0xdd, 0x3f, 0x26, 0xba, 0x33, 0xd5, 0x12, 0x7e, 0x82, 0x9a, - 0x13, 0xed, 0x43, 0x5f, 0xd5, 0xab, 0x39, 0x77, 0x75, 0xbd, 0xe6, 0xc6, 0x64, 0x90, 0x9e, 0xcf, - 0xc5, 0xdb, 0xa8, 0x92, 0x26, 0xc0, 0xb5, 0xd7, 0x0b, 0x57, 0xf0, 0x64, 0x37, 0x01, 0xbe, 0x15, - 0xee, 0x45, 0x85, 0xc9, 0x12, 0xa1, 0x4a, 0xe6, 0xfc, 0x38, 0x95, 0xff, 0x8f, 0x23, 0x0d, 0x02, - 0xce, 0x23, 0xae, 0x16, 0x32, 0x61, 0xd0, 0xa6, 0x04, 0x69, 0x1e, 0xeb, 0xfe, 0x28, 0xa1, 0xda, - 0x69, 0x49, 0xfc, 0x10, 0xd5, 0x64, 0x99, 0x90, 0x05, 0xa0, 0x5d, 0x9d, 0xd5, 0x24, 0x95, 0x23, - 0x71, 0x7a, 0x96, 0x81, 0xef, 0xa1, 0x72, 0xea, 0xf7, 0xd5, 0x68, 0x75, 0xa7, 0xa1, 0x13, 0xcb, - 0xbb, 0x5b, 0xcf, 0xa8, 0xc4, 0x71, 0x17, 0xcd, 0x78, 0x3c, 0x4a, 0x63, 0xf9, 0x43, 0xc8, 0x46, - 0x91, 0x5c, 0xeb, 0x73, 0x85, 0x50, 0x1d, 0xc1, 0x6f, 0x51, 0x15, 0xe4, 0xd5, 0xa8, 0x59, 0x1a, - 0xcb, 0xab, 0xd7, 0xf0, 0x87, 0xa8, 0x73, 0xdb, 0x0c, 0x05, 0xdf, 0x9f, 0x18, 0x4d, 0x62, 0x34, - 0xd7, 0x6c, 0x7b, 0xfa, 0x24, 0x55, 0x0e, 0x9e, 0x45, 0xe5, 0x21, 0xec, 0xe7, 0x63, 0x51, 0xf9, - 0x89, 0x9f, 0xa2, 0xea, 0x48, 0x5e, 0xab, 0x5e, 0xce, 0xe2, 0x15, 0x8a, 0x17, 0x27, 0x4e, 0x73, - 0xee, 0x7a, 0x69, 0xcd, 0x74, 0x16, 0x0f, 0x4f, 0x2c, 0xe3, 0xe8, 0xc4, 0x32, 0x8e, 0x4f, 0x2c, - 0xe3, 0x20, 0xb3, 0xcc, 0xc3, 0xcc, 0x32, 0x8f, 0x32, 0xcb, 0x3c, 0xce, 0x2c, 0xf3, 0x57, 0x66, - 0x99, 0x5f, 0x7e, 0x5b, 0xc6, 0x9b, 0x1b, 0x5a, 0xe4, 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x66, - 0xbb, 0x89, 0x53, 0x68, 0x05, 0x00, 0x00, -} - func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -254,31 +93,32 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m[iNdEx]) - copy(dAtA[i:], m[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) - i-- + for _, s := range m { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *TokenReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -286,52 +126,41 @@ func (m *TokenReview) Marshal() (dAtA []byte, err error) { } func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -339,36 +168,21 @@ func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Audiences) > 0 { - for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Audiences[iNdEx]) - copy(dAtA[i:], m.Audiences[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Token) - copy(dAtA[i:], m.Token) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) + i += copy(dAtA[i:], m.Token) + return i, nil } func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -376,54 +190,37 @@ func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TokenReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Audiences) > 0 { - for iNdEx := len(m.Audiences) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Audiences[iNdEx]) - copy(dAtA[i:], m.Audiences[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audiences[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x1a - { - size, err := m.User.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i-- + dAtA[i] = 0x8 + i++ if m.Authenticated { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) + n4, err := m.User.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil } func (m *UserInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -431,81 +228,95 @@ func (m *UserInfo) Marshal() (dAtA []byte, err error) { } func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UserInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) for k := range m.Extra { keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { - v := m.Extra[string(keysForExtra[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForExtra[iNdEx]) - copy(dAtA[i:], keysForExtra[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + for _, k := range keysForExtra { dAtA[i] = 0x22 + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n5, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Groups[iNdEx]) - copy(dAtA[i:], m.Groups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x12 - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m ExtraValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -518,9 +329,6 @@ func (m ExtraValue) Size() (n int) { } func (m *TokenReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -533,26 +341,14 @@ func (m *TokenReview) Size() (n int) { } func (m *TokenReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Token) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } func (m *TokenReviewStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -560,19 +356,10 @@ func (m *TokenReviewStatus) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Error) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } func (m *UserInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Username) @@ -598,7 +385,14 @@ func (m *UserInfo) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -608,7 +402,7 @@ func (this *TokenReview) String() string { return "nil" } s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -621,7 +415,6 @@ func (this *TokenReviewSpec) String() string { } s := strings.Join([]string{`&TokenReviewSpec{`, `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -634,7 +427,6 @@ func (this *TokenReviewStatus) String() string { `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, `}`, }, "") return s @@ -685,7 +477,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -713,7 +505,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -723,9 +515,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -740,9 +529,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -770,7 +556,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -798,7 +584,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -807,9 +593,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -831,7 +614,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -840,9 +623,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -864,7 +644,7 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -873,9 +653,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -892,9 +669,6 @@ func (m *TokenReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -922,7 +696,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -950,7 +724,7 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -960,46 +734,11 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Token = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1009,9 +748,6 @@ func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1039,7 +775,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1067,7 +803,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1087,7 +823,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1096,9 +832,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1120,7 +853,7 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1130,46 +863,11 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1179,9 +877,6 @@ func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1209,7 +904,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1237,7 +932,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1247,9 +942,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1269,7 +961,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1279,9 +971,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1301,7 +990,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1311,9 +1000,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1333,7 +1019,7 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1342,20 +1028,54 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1365,88 +1085,46 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -1457,9 +1135,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1475,7 +1150,6 @@ func (m *UserInfo) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1507,8 +1181,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1525,34 +1201,101 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 635 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40, + 0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85, + 0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83, + 0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26, + 0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17, + 0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d, + 0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a, + 0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2, + 0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b, + 0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88, + 0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a, + 0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48, + 0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82, + 0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe, + 0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57, + 0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f, + 0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f, + 0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9, + 0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91, + 0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17, + 0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9, + 0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5, + 0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c, + 0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d, + 0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7, + 0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb, + 0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8, + 0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19, + 0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8, + 0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4, + 0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c, + 0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6, + 0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed, + 0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77, + 0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8, + 0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb, + 0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8, + 0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c, + 0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto index caf2a6a..a057bc5 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ b/vendor/k8s.io/api/authentication/v1beta1/generated.proto @@ -57,14 +57,6 @@ message TokenReviewSpec { // Token is the opaque bearer token. // +optional optional string token = 1; - - // Audiences is a list of the identifiers that the resource server presented - // with the token identifies as. Audience-aware token authenticators will - // verify that the token was intended for at least one of the audiences in - // this list. If no audiences are provided, the audience will default to the - // audience of the Kubernetes apiserver. - // +optional - repeated string audiences = 2; } // TokenReviewStatus is the result of the token authentication request. @@ -77,18 +69,6 @@ message TokenReviewStatus { // +optional optional UserInfo user = 2; - // Audiences are audience identifiers chosen by the authenticator that are - // compatible with both the TokenReview and token. An identifier is any - // identifier in the intersection of the TokenReviewSpec audiences and the - // token's audiences. A client of the TokenReview API that sets the - // spec.audiences field should validate that a compatible audience identifier - // is returned in the status.audiences field to ensure that the TokenReview - // server is audience aware. If a TokenReview returns an empty - // status.audience field where status.authenticated is "true", the token is - // valid against the audience of the Kubernetes API server. - // +optional - repeated string audiences = 4; - // Error indicates that the token couldn't be checked // +optional optional string error = 3; diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go index 0083fb0..a90949d 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // TokenReview attempts to authenticate a token to a known user. @@ -48,13 +48,6 @@ type TokenReviewSpec struct { // Token is the opaque bearer token. // +optional Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` - // Audiences is a list of the identifiers that the resource server presented - // with the token identifies as. Audience-aware token authenticators will - // verify that the token was intended for at least one of the audiences in - // this list. If no audiences are provided, the audience will default to the - // audience of the Kubernetes apiserver. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,2,rep,name=audiences"` } // TokenReviewStatus is the result of the token authentication request. @@ -65,17 +58,6 @@ type TokenReviewStatus struct { // User is the UserInfo associated with the provided token. // +optional User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Audiences are audience identifiers chosen by the authenticator that are - // compatible with both the TokenReview and token. An identifier is any - // identifier in the intersection of the TokenReviewSpec audiences and the - // token's audiences. A client of the TokenReview API that sets the - // spec.audiences field should validate that a compatible audience identifier - // is returned in the status.audiences field to ensure that the TokenReview - // server is audience aware. If a TokenReview returns an empty - // status.audience field where status.authenticated is "true", the token is - // valid against the audience of the Kubernetes API server. - // +optional - Audiences []string `json:"audiences,omitempty" protobuf:"bytes,4,rep,name=audiences"` // Error indicates that the token couldn't be checked // +optional Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go index 8c9acfb..968999d 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go @@ -38,9 +38,8 @@ func (TokenReview) SwaggerDoc() map[string]string { } var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", - "audiences": "Audiences is a list of the identifiers that the resource server presented with the token identifies as. Audience-aware token authenticators will verify that the token was intended for at least one of the audiences in this list. If no audiences are provided, the audience will default to the audience of the Kubernetes apiserver.", + "": "TokenReviewSpec is a description of the token authentication request.", + "token": "Token is the opaque bearer token.", } func (TokenReviewSpec) SwaggerDoc() map[string]string { @@ -51,7 +50,6 @@ var map_TokenReviewStatus = map[string]string{ "": "TokenReviewStatus is the result of the token authentication request.", "authenticated": "Authenticated indicates that the token was associated with a known user.", "user": "User is the UserInfo associated with the provided token.", - "audiences": "Audiences are audience identifiers chosen by the authenticator that are compatible with both the TokenReview and token. An identifier is any identifier in the intersection of the TokenReviewSpec audiences and the token's audiences. A client of the TokenReview API that sets the spec.audiences field should validate that a compatible audience identifier is returned in the status.audiences field to ensure that the TokenReview server is audience aware. If a TokenReview returns an empty status.audience field where status.authenticated is \"true\", the token is valid against the audience of the Kubernetes API server.", "error": "Error indicates that the token couldn't be checked", } diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go index a5d82a8..3a5f6d5 100644 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go @@ -49,7 +49,7 @@ func (in *TokenReview) DeepCopyInto(out *TokenReview) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) + out.Spec = in.Spec in.Status.DeepCopyInto(&out.Status) return } @@ -75,11 +75,6 @@ func (in *TokenReview) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } @@ -97,11 +92,6 @@ func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { *out = *in in.User.DeepCopyInto(&out.User) - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } return } diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go index cf100e6..c06b798 100644 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ b/vendor/k8s.io/api/authorization/v1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io - package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go index dbc0bdc..e9145af 100644 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1/generated.pb.go @@ -14,24 +14,44 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + NonResourceRule + ResourceAttributes + ResourceRule + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SelfSubjectRulesReview + SelfSubjectRulesReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus + SubjectRulesReviewStatus +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,399 +62,75 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{0} -} -func (m *ExtraValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExtraValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtraValue.Merge(m, src) -} -func (m *ExtraValue) XXX_Size() int { - return m.Size() -} -func (m *ExtraValue) XXX_DiscardUnknown() { - xxx_messageInfo_ExtraValue.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{1} -} -func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) -} -func (m *LocalSubjectAccessReview) XXX_Size() int { - return m.Size() -} -func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { - xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) + return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{2} -} -func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonResourceAttributes.Merge(m, src) -} -func (m *NonResourceAttributes) XXX_Size() int { - return m.Size() -} -func (m *NonResourceAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo - -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{3} -} -func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NonResourceRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonResourceRule.Merge(m, src) -} -func (m *NonResourceRule) XXX_Size() int { - return m.Size() -} -func (m *NonResourceRule) XXX_DiscardUnknown() { - xxx_messageInfo_NonResourceRule.DiscardUnknown(m) -} - -var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo - -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{4} -} -func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceAttributes.Merge(m, src) -} -func (m *ResourceAttributes) XXX_Size() int { - return m.Size() -} -func (m *ResourceAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo - -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{5} -} -func (m *ResourceRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRule.Merge(m, src) -} -func (m *ResourceRule) XXX_Size() int { - return m.Size() -} -func (m *ResourceRule) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRule.DiscardUnknown(m) -} +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_ResourceRule proto.InternalMessageInfo +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{6} -} -func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) -} -func (m *SelfSubjectAccessReview) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) -} +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{7} -} -func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) -} -func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo - -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{8} -} -func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) -} -func (m *SelfSubjectRulesReview) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) -} - -var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{9} -} -func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) -} -func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo - -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{10} -} -func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectAccessReview.Merge(m, src) -} -func (m *SubjectAccessReview) XXX_Size() int { - return m.Size() -} -func (m *SubjectAccessReview) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) + return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{11} + return fileDescriptorGenerated, []int{11} } -func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) -} -func (m *SubjectAccessReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{12} -} -func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + return fileDescriptorGenerated, []int{12} } -func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) -} -func (m *SubjectAccessReviewStatus) XXX_Size() int { - return m.Size() -} -func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e50da13573e369bd, []int{13} -} -func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) -} -func (m *SubjectRulesReviewStatus) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{13} } -func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") @@ -449,95 +145,13 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") - proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptor_e50da13573e369bd) -} - -var fileDescriptor_e50da13573e369bd = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcf, 0x6f, 0x1b, 0xc5, - 0x17, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xdf, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xda, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0xff, 0x00, 0x47, 0x0e, 0x1c, 0xf8, 0x0f, 0xb8, 0x20, 0x71, 0xe3, 0xc0, 0x01, - 0xe5, 0xd8, 0x63, 0x91, 0x90, 0x45, 0x96, 0x33, 0xff, 0x03, 0x9a, 0xd9, 0xb1, 0x77, 0x9d, 0xac, - 0xdd, 0x84, 0x03, 0xbd, 0xf4, 0xb6, 0xfb, 0x3e, 0x9f, 0xf7, 0xe6, 0xcd, 0xfb, 0x35, 0x0f, 0x6c, - 0x1f, 0x6d, 0x32, 0xcb, 0xf5, 0xed, 0xa3, 0xb0, 0x49, 0xa8, 0x47, 0x38, 0x61, 0x76, 0x9f, 0x78, - 0x2d, 0x9f, 0xda, 0x0a, 0xc0, 0x81, 0x6b, 0xe3, 0x90, 0x77, 0x7c, 0xea, 0x7e, 0x8d, 0xb9, 0xeb, - 0x7b, 0x76, 0x7f, 0xdd, 0x6e, 0x13, 0x8f, 0x50, 0xcc, 0x49, 0xcb, 0x0a, 0xa8, 0xcf, 0x7d, 0x78, - 0x2b, 0x26, 0x5b, 0x38, 0x70, 0xad, 0x31, 0xb2, 0xd5, 0x5f, 0x5f, 0x79, 0xaf, 0xed, 0xf2, 0x4e, - 0xd8, 0xb4, 0x1c, 0xbf, 0x67, 0xb7, 0xfd, 0xb6, 0x6f, 0x4b, 0x9d, 0x66, 0x78, 0x28, 0xff, 0xe4, - 0x8f, 0xfc, 0x8a, 0x6d, 0xad, 0xdc, 0x4d, 0x0e, 0xee, 0x61, 0xa7, 0xe3, 0x7a, 0x84, 0x9e, 0xd8, - 0xc1, 0x51, 0x5b, 0x08, 0x98, 0xdd, 0x23, 0x1c, 0x67, 0x78, 0xb0, 0x62, 0x4f, 0xd2, 0xa2, 0xa1, - 0xc7, 0xdd, 0x1e, 0xb9, 0xa0, 0x70, 0xff, 0x55, 0x0a, 0xcc, 0xe9, 0x90, 0x1e, 0x3e, 0xaf, 0x67, - 0x6e, 0x00, 0xb0, 0xf3, 0x15, 0xa7, 0xf8, 0x00, 0x77, 0x43, 0x02, 0x6b, 0xa0, 0xe8, 0x72, 0xd2, - 0x63, 0x86, 0xb6, 0x9a, 0x5f, 0x2b, 0x37, 0xca, 0xd1, 0xa0, 0x56, 0xdc, 0x15, 0x02, 0x14, 0xcb, - 0x1f, 0x94, 0xbe, 0xfb, 0xa1, 0x96, 0x7b, 0xfe, 0xc7, 0x6a, 0xce, 0xfc, 0x49, 0x07, 0xc6, 0x23, - 0xdf, 0xc1, 0xdd, 0xbd, 0xb0, 0xf9, 0x05, 0x71, 0xf8, 0x96, 0xe3, 0x10, 0xc6, 0x10, 0xe9, 0xbb, - 0xe4, 0x18, 0x7e, 0x0e, 0x4a, 0xe2, 0x66, 0x2d, 0xcc, 0xb1, 0xa1, 0xad, 0x6a, 0x6b, 0x95, 0xfa, - 0xfb, 0x56, 0x12, 0xd3, 0x91, 0x83, 0x56, 0x70, 0xd4, 0x16, 0x02, 0x66, 0x09, 0xb6, 0xd5, 0x5f, - 0xb7, 0x3e, 0x92, 0xb6, 0x1e, 0x13, 0x8e, 0x1b, 0xf0, 0x74, 0x50, 0xcb, 0x45, 0x83, 0x1a, 0x48, - 0x64, 0x68, 0x64, 0x15, 0x1e, 0x80, 0x02, 0x0b, 0x88, 0x63, 0xe8, 0xd2, 0xfa, 0x5d, 0x6b, 0x4a, - 0xc6, 0xac, 0x0c, 0x0f, 0xf7, 0x02, 0xe2, 0x34, 0xae, 0xa9, 0x13, 0x0a, 0xe2, 0x0f, 0x49, 0x7b, - 0xf0, 0x33, 0x30, 0xc3, 0x38, 0xe6, 0x21, 0x33, 0xf2, 0xd2, 0xf2, 0xfd, 0x2b, 0x5b, 0x96, 0xda, - 0x8d, 0xff, 0x29, 0xdb, 0x33, 0xf1, 0x3f, 0x52, 0x56, 0xcd, 0x4f, 0xc0, 0xd2, 0x13, 0xdf, 0x43, - 0x84, 0xf9, 0x21, 0x75, 0xc8, 0x16, 0xe7, 0xd4, 0x6d, 0x86, 0x9c, 0x30, 0xb8, 0x0a, 0x0a, 0x01, - 0xe6, 0x1d, 0x19, 0xae, 0x72, 0xe2, 0xda, 0x53, 0xcc, 0x3b, 0x48, 0x22, 0x82, 0xd1, 0x27, 0xb4, - 0x29, 0xaf, 0x9c, 0x62, 0x1c, 0x10, 0xda, 0x44, 0x12, 0x31, 0xbf, 0x04, 0xf3, 0x29, 0xe3, 0x28, - 0xec, 0xca, 0x8c, 0x0a, 0x68, 0x2c, 0xa3, 0x42, 0x83, 0xa1, 0x58, 0x0e, 0x1f, 0x82, 0x79, 0x2f, - 0xd1, 0xd9, 0x47, 0x8f, 0x98, 0xa1, 0x4b, 0xea, 0x62, 0x34, 0xa8, 0xa5, 0xcd, 0x09, 0x08, 0x9d, - 0xe7, 0x9a, 0xbf, 0xe8, 0x00, 0x66, 0xdc, 0xc6, 0x06, 0x65, 0x0f, 0xf7, 0x08, 0x0b, 0xb0, 0x43, - 0xd4, 0x95, 0xae, 0x2b, 0x87, 0xcb, 0x4f, 0x86, 0x00, 0x4a, 0x38, 0xaf, 0xbe, 0x1c, 0x7c, 0x0b, - 0x14, 0xdb, 0xd4, 0x0f, 0x03, 0x99, 0x98, 0x72, 0x63, 0x4e, 0x51, 0x8a, 0x1f, 0x0a, 0x21, 0x8a, - 0x31, 0x78, 0x1b, 0xcc, 0xf6, 0x09, 0x65, 0xae, 0xef, 0x19, 0x05, 0x49, 0x9b, 0x57, 0xb4, 0xd9, - 0x83, 0x58, 0x8c, 0x86, 0x38, 0xbc, 0x03, 0x4a, 0x54, 0x39, 0x6e, 0x14, 0x25, 0x77, 0x41, 0x71, - 0x4b, 0xa3, 0x08, 0x8e, 0x18, 0xf0, 0x1e, 0xa8, 0xb0, 0xb0, 0x39, 0x52, 0x98, 0x91, 0x0a, 0x8b, - 0x4a, 0xa1, 0xb2, 0x97, 0x40, 0x28, 0xcd, 0x13, 0xd7, 0x12, 0x77, 0x34, 0x66, 0xc7, 0xaf, 0x25, - 0x42, 0x80, 0x24, 0x62, 0xfe, 0xaa, 0x81, 0x6b, 0x57, 0xcb, 0xd8, 0xbb, 0xa0, 0x8c, 0x03, 0x57, - 0x5e, 0x7b, 0x98, 0xab, 0x39, 0x11, 0xd7, 0xad, 0xa7, 0xbb, 0xb1, 0x10, 0x25, 0xb8, 0x20, 0x0f, - 0x9d, 0x11, 0x25, 0x3d, 0x22, 0x0f, 0x8f, 0x64, 0x28, 0xc1, 0xe1, 0x06, 0x98, 0x1b, 0xfe, 0xc8, - 0x24, 0x19, 0x05, 0xa9, 0x70, 0x3d, 0x1a, 0xd4, 0xe6, 0x50, 0x1a, 0x40, 0xe3, 0x3c, 0xf3, 0x67, - 0x1d, 0x2c, 0xef, 0x91, 0xee, 0xe1, 0xeb, 0x99, 0x05, 0xcf, 0xc6, 0x66, 0xc1, 0xe6, 0xf4, 0x8e, - 0xcd, 0xf6, 0xf2, 0xb5, 0xcd, 0x83, 0xef, 0x75, 0x70, 0x6b, 0x8a, 0x4f, 0xf0, 0x18, 0x40, 0x7a, - 0xa1, 0xbd, 0x54, 0x1c, 0xed, 0xa9, 0xbe, 0x5c, 0xec, 0xca, 0xc6, 0x8d, 0x68, 0x50, 0xcb, 0xe8, - 0x56, 0x94, 0x71, 0x04, 0xfc, 0x46, 0x03, 0x4b, 0x5e, 0xd6, 0xa4, 0x52, 0x61, 0xae, 0x4f, 0x3d, - 0x3c, 0x73, 0xc6, 0x35, 0x6e, 0x46, 0x83, 0x5a, 0xf6, 0xf8, 0x43, 0xd9, 0x67, 0x89, 0x57, 0xe6, - 0x46, 0x2a, 0x3c, 0xa2, 0x41, 0xfe, 0xbb, 0xba, 0xfa, 0x78, 0xac, 0xae, 0x36, 0x2e, 0x5b, 0x57, - 0x29, 0x27, 0x27, 0x96, 0xd5, 0xa7, 0xe7, 0xca, 0xea, 0xde, 0x65, 0xca, 0x2a, 0x6d, 0x78, 0x7a, - 0x55, 0x3d, 0x06, 0x2b, 0x93, 0x1d, 0xba, 0xf2, 0x70, 0x36, 0x7f, 0xd4, 0xc1, 0xe2, 0x9b, 0x67, - 0xfe, 0x2a, 0x6d, 0xfd, 0x5b, 0x01, 0x2c, 0xbf, 0x69, 0xe9, 0x49, 0x8b, 0x4e, 0xc8, 0x08, 0x55, - 0xcf, 0xf8, 0x28, 0x39, 0xfb, 0x8c, 0x50, 0x24, 0x11, 0x68, 0x82, 0x99, 0x76, 0xfc, 0xba, 0xc5, - 0xef, 0x0f, 0x10, 0x01, 0x56, 0x4f, 0x9b, 0x42, 0x60, 0x0b, 0x14, 0x89, 0xd8, 0x5b, 0x8d, 0xe2, - 0x6a, 0x7e, 0xad, 0x52, 0xff, 0xe0, 0xdf, 0x54, 0x86, 0x25, 0x37, 0xdf, 0x1d, 0x8f, 0xd3, 0x93, - 0x64, 0x9d, 0x90, 0x32, 0x14, 0x1b, 0x87, 0xff, 0x07, 0xf9, 0xd0, 0x6d, 0xa9, 0xd7, 0xbe, 0xa2, - 0x28, 0xf9, 0xfd, 0xdd, 0x6d, 0x24, 0xe4, 0x2b, 0x58, 0x2d, 0xcf, 0xd2, 0x04, 0x5c, 0x00, 0xf9, - 0x23, 0x72, 0x12, 0x37, 0x14, 0x12, 0x9f, 0xf0, 0x21, 0x28, 0xf6, 0xc5, 0x5e, 0xad, 0xe2, 0xfb, - 0xce, 0x54, 0x27, 0x93, 0x35, 0x1c, 0xc5, 0x5a, 0x0f, 0xf4, 0x4d, 0xcd, 0xfc, 0x5d, 0x03, 0x37, - 0x27, 0x96, 0x9f, 0x58, 0x77, 0x70, 0xb7, 0xeb, 0x1f, 0x93, 0x96, 0x3c, 0xb6, 0x94, 0xac, 0x3b, - 0x5b, 0xb1, 0x18, 0x0d, 0x71, 0xf8, 0x36, 0x98, 0x69, 0x11, 0xcf, 0x25, 0x2d, 0xb9, 0x18, 0x95, - 0x92, 0xca, 0xdd, 0x96, 0x52, 0xa4, 0x50, 0xc1, 0xa3, 0x04, 0x33, 0xdf, 0x53, 0xab, 0xd8, 0x88, - 0x87, 0xa4, 0x14, 0x29, 0x14, 0x6e, 0x81, 0x79, 0x22, 0xdc, 0x94, 0xfe, 0xef, 0x50, 0xea, 0x0f, - 0x33, 0xba, 0xac, 0x14, 0xe6, 0x77, 0xc6, 0x61, 0x74, 0x9e, 0x6f, 0xfe, 0xad, 0x03, 0x63, 0xd2, - 0x68, 0x83, 0x87, 0xc9, 0x2e, 0x22, 0x41, 0xb9, 0x0e, 0x55, 0xea, 0xb7, 0x2f, 0xd5, 0x20, 0x42, - 0xa3, 0xb1, 0xa4, 0x1c, 0x99, 0x4b, 0x4b, 0x53, 0xab, 0x8b, 0xfc, 0x85, 0x14, 0x2c, 0x78, 0xe3, - 0x3b, 0x73, 0xbc, 0x54, 0x55, 0xea, 0x77, 0x2e, 0xdb, 0x0e, 0xf2, 0x34, 0x43, 0x9d, 0xb6, 0x70, - 0x0e, 0x60, 0xe8, 0x82, 0x7d, 0x58, 0x07, 0xc0, 0xf5, 0x1c, 0xbf, 0x17, 0x74, 0x09, 0x27, 0x32, - 0x6c, 0xa5, 0x64, 0x0e, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xc5, 0xbb, 0x70, 0xb5, 0x78, 0x37, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x99, 0x87, 0xb8, 0x24, - 0x47, 0x0f, 0x00, 0x00, -} - func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -545,31 +159,32 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m[iNdEx]) - copy(dAtA[i:], m[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) - i-- + for _, s := range m { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -577,52 +192,41 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -630,32 +234,25 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Verb) - copy(dAtA[i:], m.Verb) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i-- - dAtA[i] = 0x12 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + return i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -663,40 +260,47 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.NonResourceURLs) > 0 { - for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NonResourceURLs[iNdEx]) - copy(dAtA[i:], m.NonResourceURLs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- + for _, s := range m.Verbs { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -704,57 +308,45 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x3a - i -= len(m.Subresource) - copy(dAtA[i:], m.Subresource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i-- - dAtA[i] = 0x32 - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x2a - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x22 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0x1a - i -= len(m.Verb) - copy(dAtA[i:], m.Verb) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i += copy(dAtA[i:], m.Subresource) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -762,58 +354,77 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) - i-- + for _, s := range m.APIGroups { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -821,52 +432,41 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n4 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -874,46 +474,37 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.NonResourceAttributes != nil { - { - size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n7 } - if m.ResourceAttributes != nil { - { - size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n8 } - return len(dAtA) - i, nil + return i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -921,52 +512,41 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n9 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -974,27 +554,21 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1002,52 +576,41 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n12 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1055,94 +618,91 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x32 - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { - v := m.Extra[string(keysForExtra[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForExtra[iNdEx]) - copy(dAtA[i:], keysForExtra[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n15 } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Groups[iNdEx]) - copy(dAtA[i:], m.Groups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n16 } - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- dAtA[i] = 0x1a - if m.NonResourceAttributes != nil { - { - size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x12 } - if m.ResourceAttributes != nil { - { - size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x2a + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n17, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n17 } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1150,48 +710,41 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i-- - if m.Denied { + dAtA[i] = 0x8 + i++ + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - i -= len(m.EvaluationError) - copy(dAtA[i:], m.EvaluationError) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i-- - dAtA[i] = 0x1a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- + i++ dAtA[i] = 0x12 - i-- - if m.Allowed { + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + return i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1199,74 +752,77 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.EvaluationError) - copy(dAtA[i:], m.EvaluationError) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i-- - dAtA[i] = 0x22 - i-- - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.ResourceRules) > 0 { + for _, msg := range m.ResourceRules { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - i-- - dAtA[i] = 0x18 if len(m.NonResourceRules) > 0 { - for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + for _, msg := range m.NonResourceRules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - if len(m.ResourceRules) > 0 { - for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0x18 + i++ + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m ExtraValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -1279,9 +835,6 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1294,9 +847,6 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Path) @@ -1307,9 +857,6 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1328,9 +875,6 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Namespace) @@ -1351,9 +895,6 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1384,9 +925,6 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1399,9 +937,6 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResourceAttributes != nil { @@ -1416,9 +951,6 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1431,9 +963,6 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Namespace) @@ -1442,9 +971,6 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1457,9 +983,6 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResourceAttributes != nil { @@ -1493,9 +1016,6 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -1508,9 +1028,6 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1532,7 +1049,14 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1542,7 +1066,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1605,7 +1129,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1617,8 +1141,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1628,7 +1152,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1650,7 +1174,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1672,8 +1196,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1699,19 +1223,9 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } - repeatedStringForResourceRules := "[]ResourceRule{" - for _, f := range this.ResourceRules { - repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceRules += "}" - repeatedStringForNonResourceRules := "[]NonResourceRule{" - for _, f := range this.NonResourceRules { - repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," - } - repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + repeatedStringForResourceRules + `,`, - `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, + `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1741,7 +1255,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1769,7 +1283,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1779,9 +1293,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1796,9 +1307,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1826,7 +1334,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1854,7 +1362,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1863,9 +1371,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1887,7 +1392,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1896,9 +1401,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1920,7 +1422,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1929,9 +1431,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1948,9 +1447,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1978,7 +1474,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2006,7 +1502,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2016,9 +1512,6 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2038,7 +1531,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2048,9 +1541,6 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2065,9 +1555,6 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2095,7 +1582,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2123,7 +1610,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2133,9 +1620,6 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2155,7 +1639,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2165,9 +1649,6 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2182,9 +1663,6 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2212,7 +1690,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2240,7 +1718,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2250,9 +1728,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2272,7 +1747,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2282,9 +1757,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2304,7 +1776,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2314,9 +1786,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2336,7 +1805,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2346,9 +1815,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2368,7 +1834,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2378,9 +1844,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2400,7 +1863,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2410,9 +1873,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2432,7 +1892,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2442,9 +1902,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2459,9 +1916,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2489,7 +1943,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2517,7 +1971,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2527,9 +1981,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2549,7 +2000,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2559,9 +2010,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2581,7 +2029,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2591,9 +2039,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2613,7 +2058,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2623,9 +2068,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2640,9 +2082,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2670,7 +2109,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2698,7 +2137,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2707,9 +2146,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2731,7 +2167,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2740,9 +2176,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2764,7 +2197,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2773,9 +2206,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2792,9 +2222,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2822,7 +2249,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2850,7 +2277,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2859,9 +2286,6 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2886,7 +2310,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2895,9 +2319,6 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2917,9 +2338,6 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2947,7 +2365,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2975,7 +2393,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2984,9 +2402,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3008,7 +2423,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3017,9 +2432,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3041,7 +2453,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3050,9 +2462,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3069,9 +2478,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3099,7 +2505,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3127,7 +2533,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3137,9 +2543,6 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3154,9 +2557,6 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3184,7 +2584,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3212,7 +2612,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3221,9 +2621,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3245,7 +2642,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3254,9 +2651,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3278,7 +2672,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3287,9 +2681,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3306,9 +2697,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3336,7 +2724,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3364,7 +2752,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3373,9 +2761,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3400,7 +2785,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3409,9 +2794,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3436,7 +2818,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3446,9 +2828,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3468,7 +2847,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3478,9 +2857,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3500,7 +2876,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3509,20 +2885,54 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3532,88 +2942,46 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -3629,7 +2997,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3639,9 +3007,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3656,9 +3021,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3686,7 +3048,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3714,7 +3076,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3734,7 +3096,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3744,9 +3106,6 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3766,7 +3125,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3776,9 +3135,6 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3798,7 +3154,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3813,9 +3169,6 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3843,7 +3196,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3871,7 +3224,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3880,9 +3233,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3905,7 +3255,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3914,9 +3264,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3939,7 +3286,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3959,7 +3306,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3969,9 +3316,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3986,9 +3330,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4004,7 +3345,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4036,8 +3376,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4054,34 +3396,133 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1140 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, + 0x1b, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xff, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xdb, 0xd1, + 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, + 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, + 0xc2, 0xa9, 0x12, 0x5f, 0x80, 0x23, 0x07, 0x0e, 0x7c, 0x03, 0x2e, 0x48, 0xdc, 0x38, 0x70, 0x40, + 0x39, 0xf6, 0x58, 0x24, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, + 0x9b, 0x70, 0xa0, 0x97, 0xde, 0x76, 0x9f, 0xdf, 0xef, 0x79, 0x99, 0xe7, 0x65, 0xe6, 0x01, 0xdb, + 0x47, 0x9b, 0xcc, 0x72, 0x7d, 0xfb, 0x28, 0x6c, 0x12, 0xea, 0x11, 0x4e, 0x98, 0xdd, 0x27, 0x5e, + 0xcb, 0xa7, 0xb6, 0x02, 0x70, 0xe0, 0xda, 0x38, 0xe4, 0x1d, 0x9f, 0xba, 0x5f, 0x63, 0xee, 0xfa, + 0x9e, 0xdd, 0x5f, 0xb7, 0xdb, 0xc4, 0x23, 0x14, 0x73, 0xd2, 0xb2, 0x02, 0xea, 0x73, 0x1f, 0xde, + 0x8a, 0xc9, 0x16, 0x0e, 0x5c, 0x6b, 0x8c, 0x6c, 0xf5, 0xd7, 0x57, 0xde, 0x6b, 0xbb, 0xbc, 0x13, + 0x36, 0x2d, 0xc7, 0xef, 0xd9, 0x6d, 0xbf, 0xed, 0xdb, 0x52, 0xa7, 0x19, 0x1e, 0xca, 0x3f, 0xf9, + 0x23, 0xbf, 0x62, 0x5b, 0x2b, 0x77, 0x13, 0xc7, 0x3d, 0xec, 0x74, 0x5c, 0x8f, 0xd0, 0x13, 0x3b, + 0x38, 0x6a, 0x0b, 0x01, 0xb3, 0x7b, 0x84, 0xe3, 0x8c, 0x08, 0x56, 0xec, 0x49, 0x5a, 0x34, 0xf4, + 0xb8, 0xdb, 0x23, 0x17, 0x14, 0xee, 0xbf, 0x4a, 0x81, 0x39, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0xcc, + 0x0d, 0x00, 0x76, 0xbe, 0xe2, 0x14, 0x1f, 0xe0, 0x6e, 0x48, 0x60, 0x0d, 0x14, 0x5d, 0x4e, 0x7a, + 0xcc, 0xd0, 0x56, 0xf3, 0x6b, 0xe5, 0x46, 0x39, 0x1a, 0xd4, 0x8a, 0xbb, 0x42, 0x80, 0x62, 0xf9, + 0x83, 0xd2, 0x77, 0x3f, 0xd4, 0x72, 0xcf, 0xff, 0x58, 0xcd, 0x99, 0x3f, 0xe9, 0xc0, 0x78, 0xe4, + 0x3b, 0xb8, 0xbb, 0x17, 0x36, 0xbf, 0x20, 0x0e, 0xdf, 0x72, 0x1c, 0xc2, 0x18, 0x22, 0x7d, 0x97, + 0x1c, 0xc3, 0xcf, 0x41, 0x49, 0x9c, 0xac, 0x85, 0x39, 0x36, 0xb4, 0x55, 0x6d, 0xad, 0x52, 0x7f, + 0xdf, 0x4a, 0x72, 0x3a, 0x0a, 0xd0, 0x0a, 0x8e, 0xda, 0x42, 0xc0, 0x2c, 0xc1, 0xb6, 0xfa, 0xeb, + 0xd6, 0x47, 0xd2, 0xd6, 0x63, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0x6a, 0xb9, 0x68, 0x50, 0x03, 0x89, + 0x0c, 0x8d, 0xac, 0xc2, 0x03, 0x50, 0x60, 0x01, 0x71, 0x0c, 0x5d, 0x5a, 0xbf, 0x6b, 0x4d, 0xa9, + 0x98, 0x95, 0x11, 0xe1, 0x5e, 0x40, 0x9c, 0xc6, 0x35, 0xe5, 0xa1, 0x20, 0xfe, 0x90, 0xb4, 0x07, + 0x3f, 0x03, 0x33, 0x8c, 0x63, 0x1e, 0x32, 0x23, 0x2f, 0x2d, 0xdf, 0xbf, 0xb2, 0x65, 0xa9, 0xdd, + 0xf8, 0x9f, 0xb2, 0x3d, 0x13, 0xff, 0x23, 0x65, 0xd5, 0xfc, 0x04, 0x2c, 0x3d, 0xf1, 0x3d, 0x44, + 0x98, 0x1f, 0x52, 0x87, 0x6c, 0x71, 0x4e, 0xdd, 0x66, 0xc8, 0x09, 0x83, 0xab, 0xa0, 0x10, 0x60, + 0xde, 0x91, 0xe9, 0x2a, 0x27, 0xa1, 0x3d, 0xc5, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, + 0xf2, 0xc8, 0x29, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xf3, 0x4b, 0x30, 0x9f, 0x32, 0x8e, 0xc2, + 0xae, 0xac, 0xa8, 0x80, 0xc6, 0x2a, 0x2a, 0x34, 0x18, 0x8a, 0xe5, 0xf0, 0x21, 0x98, 0xf7, 0x12, + 0x9d, 0x7d, 0xf4, 0x88, 0x19, 0xba, 0xa4, 0x2e, 0x46, 0x83, 0x5a, 0xda, 0x9c, 0x80, 0xd0, 0x79, + 0xae, 0xf9, 0x8b, 0x0e, 0x60, 0xc6, 0x69, 0x6c, 0x50, 0xf6, 0x70, 0x8f, 0xb0, 0x00, 0x3b, 0x44, + 0x1d, 0xe9, 0xba, 0x0a, 0xb8, 0xfc, 0x64, 0x08, 0xa0, 0x84, 0xf3, 0xea, 0xc3, 0xc1, 0xb7, 0x40, + 0xb1, 0x4d, 0xfd, 0x30, 0x90, 0x85, 0x29, 0x37, 0xe6, 0x14, 0xa5, 0xf8, 0xa1, 0x10, 0xa2, 0x18, + 0x83, 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0xfa, 0x9e, 0x51, 0x90, 0xb4, 0x79, 0x45, 0x9b, 0x3d, + 0x88, 0xc5, 0x68, 0x88, 0xc3, 0x3b, 0xa0, 0x44, 0x55, 0xe0, 0x46, 0x51, 0x72, 0x17, 0x14, 0xb7, + 0x34, 0xca, 0xe0, 0x88, 0x01, 0xef, 0x81, 0x0a, 0x0b, 0x9b, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, + 0x14, 0x2a, 0x7b, 0x09, 0x84, 0xd2, 0x3c, 0x71, 0x2c, 0x71, 0x46, 0x63, 0x76, 0xfc, 0x58, 0x22, + 0x05, 0x48, 0x22, 0xe6, 0xaf, 0x1a, 0xb8, 0x76, 0xb5, 0x8a, 0xbd, 0x0b, 0xca, 0x38, 0x70, 0xe5, + 0xb1, 0x87, 0xb5, 0x9a, 0x13, 0x79, 0xdd, 0x7a, 0xba, 0x1b, 0x0b, 0x51, 0x82, 0x0b, 0xf2, 0x30, + 0x18, 0xd1, 0xd2, 0x23, 0xf2, 0xd0, 0x25, 0x43, 0x09, 0x0e, 0x37, 0xc0, 0xdc, 0xf0, 0x47, 0x16, + 0xc9, 0x28, 0x48, 0x85, 0xeb, 0xd1, 0xa0, 0x36, 0x87, 0xd2, 0x00, 0x1a, 0xe7, 0x99, 0x3f, 0xeb, + 0x60, 0x79, 0x8f, 0x74, 0x0f, 0x5f, 0xcf, 0x5d, 0xf0, 0x6c, 0xec, 0x2e, 0xd8, 0x9c, 0x3e, 0xb1, + 0xd9, 0x51, 0xbe, 0xb6, 0xfb, 0xe0, 0x7b, 0x1d, 0xdc, 0x9a, 0x12, 0x13, 0x3c, 0x06, 0x90, 0x5e, + 0x18, 0x2f, 0x95, 0x47, 0x7b, 0x6a, 0x2c, 0x17, 0xa7, 0xb2, 0x71, 0x23, 0x1a, 0xd4, 0x32, 0xa6, + 0x15, 0x65, 0xb8, 0x80, 0xdf, 0x68, 0x60, 0xc9, 0xcb, 0xba, 0xa9, 0x54, 0x9a, 0xeb, 0x53, 0x9d, + 0x67, 0xde, 0x71, 0x8d, 0x9b, 0xd1, 0xa0, 0x96, 0x7d, 0xfd, 0xa1, 0x6c, 0x5f, 0xe2, 0x95, 0xb9, + 0x91, 0x4a, 0x8f, 0x18, 0x90, 0xff, 0xae, 0xaf, 0x3e, 0x1e, 0xeb, 0xab, 0x8d, 0xcb, 0xf6, 0x55, + 0x2a, 0xc8, 0x89, 0x6d, 0xf5, 0xe9, 0xb9, 0xb6, 0xba, 0x77, 0x99, 0xb6, 0x4a, 0x1b, 0x9e, 0xde, + 0x55, 0x8f, 0xc1, 0xca, 0xe4, 0x80, 0xae, 0x7c, 0x39, 0x9b, 0x3f, 0xea, 0x60, 0xf1, 0xcd, 0x33, + 0x7f, 0x95, 0xb1, 0xfe, 0xad, 0x00, 0x96, 0xdf, 0x8c, 0xf4, 0xa4, 0x45, 0x27, 0x64, 0x84, 0xaa, + 0x67, 0x7c, 0x54, 0x9c, 0x7d, 0x46, 0x28, 0x92, 0x08, 0x34, 0xc1, 0x4c, 0x3b, 0x7e, 0xdd, 0xe2, + 0xf7, 0x07, 0x88, 0x04, 0xab, 0xa7, 0x4d, 0x21, 0xb0, 0x05, 0x8a, 0x44, 0xec, 0xad, 0x46, 0x71, + 0x35, 0xbf, 0x56, 0xa9, 0x7f, 0xf0, 0x6f, 0x3a, 0xc3, 0x92, 0x9b, 0xef, 0x8e, 0xc7, 0xe9, 0x49, + 0xb2, 0x4e, 0x48, 0x19, 0x8a, 0x8d, 0xc3, 0xff, 0x83, 0x7c, 0xe8, 0xb6, 0xd4, 0x6b, 0x5f, 0x51, + 0x94, 0xfc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x15, 0xac, 0x96, 0x67, 0x69, 0x02, 0x2e, 0x80, 0xfc, + 0x11, 0x39, 0x89, 0x07, 0x0a, 0x89, 0x4f, 0xf8, 0x10, 0x14, 0xfb, 0x62, 0xaf, 0x56, 0xf9, 0x7d, + 0x67, 0x6a, 0x90, 0xc9, 0x1a, 0x8e, 0x62, 0xad, 0x07, 0xfa, 0xa6, 0x66, 0xfe, 0xae, 0x81, 0x9b, + 0x13, 0xdb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8f, 0x49, 0x4b, 0xba, 0x2d, 0x25, 0xeb, 0xce, + 0x56, 0x2c, 0x46, 0x43, 0x1c, 0xbe, 0x0d, 0x66, 0x28, 0xc1, 0xcc, 0xf7, 0xd4, 0x8a, 0x35, 0xea, + 0x5c, 0x24, 0xa5, 0x48, 0xa1, 0x70, 0x0b, 0xcc, 0x13, 0xe1, 0x5e, 0xc6, 0xb5, 0x43, 0xa9, 0x3f, + 0xac, 0xd4, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0xbe, 0x70, 0xd5, 0x22, 0x9e, 0x4b, + 0x5a, 0x72, 0x07, 0x2b, 0x25, 0xae, 0xb6, 0xa5, 0x14, 0x29, 0xd4, 0xfc, 0x5b, 0x07, 0xc6, 0xa4, + 0xab, 0x0d, 0x1e, 0x26, 0xbb, 0x88, 0x04, 0xe5, 0x3a, 0x54, 0xa9, 0xdf, 0xbe, 0xd4, 0x80, 0x08, + 0x8d, 0xc6, 0x92, 0x72, 0x3b, 0x97, 0x96, 0xa6, 0x56, 0x17, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, + 0x77, 0xe6, 0x78, 0xa9, 0xaa, 0xd4, 0xef, 0x5c, 0x76, 0x1c, 0xa4, 0x37, 0x43, 0x79, 0x5b, 0x38, + 0x07, 0x30, 0x74, 0xc1, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, + 0xde, 0x52, 0x72, 0x0f, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xd5, 0xa5, 0x70, 0xb5, 0xba, 0x34, + 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, + 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, + 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x85, 0x45, 0x74, + 0x47, 0x0f, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go index be8913e..86b05c5 100644 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ b/vendor/k8s.io/api/authorization/v1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. @@ -43,7 +43,7 @@ type SubjectAccessReview struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a @@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct { } // +genclient -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. @@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go index 7046f11..ea4f802 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ b/vendor/k8s.io/api/authorization/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=authorization.k8s.io - package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go index 647c0c5..75ee6cf 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go @@ -14,24 +14,44 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto + + It has these top-level messages: + ExtraValue + LocalSubjectAccessReview + NonResourceAttributes + NonResourceRule + ResourceAttributes + ResourceRule + SelfSubjectAccessReview + SelfSubjectAccessReviewSpec + SelfSubjectRulesReview + SelfSubjectRulesReviewSpec + SubjectAccessReview + SubjectAccessReviewSpec + SubjectAccessReviewStatus + SubjectRulesReviewStatus +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,400 +62,76 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{0} -} -func (m *ExtraValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExtraValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtraValue.Merge(m, src) -} -func (m *ExtraValue) XXX_Size() int { - return m.Size() -} -func (m *ExtraValue) XXX_DiscardUnknown() { - xxx_messageInfo_ExtraValue.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } func (*LocalSubjectAccessReview) ProtoMessage() {} func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{1} -} -func (m *LocalSubjectAccessReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LocalSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LocalSubjectAccessReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_LocalSubjectAccessReview.Merge(m, src) -} -func (m *LocalSubjectAccessReview) XXX_Size() int { - return m.Size() -} -func (m *LocalSubjectAccessReview) XXX_DiscardUnknown() { - xxx_messageInfo_LocalSubjectAccessReview.DiscardUnknown(m) -} - -var xxx_messageInfo_LocalSubjectAccessReview proto.InternalMessageInfo - -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{2} -} -func (m *NonResourceAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NonResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NonResourceAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonResourceAttributes.Merge(m, src) -} -func (m *NonResourceAttributes) XXX_Size() int { - return m.Size() -} -func (m *NonResourceAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_NonResourceAttributes.DiscardUnknown(m) + return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_NonResourceAttributes proto.InternalMessageInfo +func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } +func (*NonResourceAttributes) ProtoMessage() {} +func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{3} -} -func (m *NonResourceRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NonResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NonResourceRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_NonResourceRule.Merge(m, src) -} -func (m *NonResourceRule) XXX_Size() int { - return m.Size() -} -func (m *NonResourceRule) XXX_DiscardUnknown() { - xxx_messageInfo_NonResourceRule.DiscardUnknown(m) -} +func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } +func (*NonResourceRule) ProtoMessage() {} +func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_NonResourceRule proto.InternalMessageInfo +func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } +func (*ResourceAttributes) ProtoMessage() {} +func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{4} -} -func (m *ResourceAttributes) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceAttributes) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceAttributes.Merge(m, src) -} -func (m *ResourceAttributes) XXX_Size() int { - return m.Size() -} -func (m *ResourceAttributes) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceAttributes.DiscardUnknown(m) -} +func (m *ResourceRule) Reset() { *m = ResourceRule{} } +func (*ResourceRule) ProtoMessage() {} +func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_ResourceAttributes proto.InternalMessageInfo - -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{5} -} -func (m *ResourceRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRule.Merge(m, src) -} -func (m *ResourceRule) XXX_Size() int { - return m.Size() -} -func (m *ResourceRule) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRule.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceRule proto.InternalMessageInfo - -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{6} -} -func (m *SelfSubjectAccessReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectAccessReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectAccessReview.Merge(m, src) -} -func (m *SelfSubjectAccessReview) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectAccessReview) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectAccessReview.DiscardUnknown(m) -} - -var xxx_messageInfo_SelfSubjectAccessReview proto.InternalMessageInfo +func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } +func (*SelfSubjectAccessReview) ProtoMessage() {} +func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{7} -} -func (m *SelfSubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectAccessReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectAccessReviewSpec.Merge(m, src) -} -func (m *SelfSubjectAccessReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectAccessReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectAccessReviewSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_SelfSubjectAccessReviewSpec proto.InternalMessageInfo - -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{8} -} -func (m *SelfSubjectRulesReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectRulesReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectRulesReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectRulesReview.Merge(m, src) -} -func (m *SelfSubjectRulesReview) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectRulesReview) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectRulesReview.DiscardUnknown(m) -} - -var xxx_messageInfo_SelfSubjectRulesReview proto.InternalMessageInfo +func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } +func (*SelfSubjectRulesReview) ProtoMessage() {} +func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{9} -} -func (m *SelfSubjectRulesReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SelfSubjectRulesReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SelfSubjectRulesReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SelfSubjectRulesReviewSpec.Merge(m, src) -} -func (m *SelfSubjectRulesReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *SelfSubjectRulesReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SelfSubjectRulesReviewSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_SelfSubjectRulesReviewSpec proto.InternalMessageInfo - -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{10} -} -func (m *SubjectAccessReview) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectAccessReview) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectAccessReview) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectAccessReview.Merge(m, src) -} -func (m *SubjectAccessReview) XXX_Size() int { - return m.Size() -} -func (m *SubjectAccessReview) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectAccessReview.DiscardUnknown(m) + return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_SubjectAccessReview proto.InternalMessageInfo +func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } +func (*SubjectAccessReview) ProtoMessage() {} +func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } func (*SubjectAccessReviewSpec) ProtoMessage() {} func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{11} -} -func (m *SubjectAccessReviewSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectAccessReviewSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectAccessReviewSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectAccessReviewSpec.Merge(m, src) -} -func (m *SubjectAccessReviewSpec) XXX_Size() int { - return m.Size() -} -func (m *SubjectAccessReviewSpec) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectAccessReviewSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_SubjectAccessReviewSpec proto.InternalMessageInfo - func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } func (*SubjectAccessReviewStatus) ProtoMessage() {} func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{12} -} -func (m *SubjectAccessReviewStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return fileDescriptorGenerated, []int{12} } -func (m *SubjectAccessReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectAccessReviewStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectAccessReviewStatus.Merge(m, src) -} -func (m *SubjectAccessReviewStatus) XXX_Size() int { - return m.Size() -} -func (m *SubjectAccessReviewStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectAccessReviewStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_SubjectAccessReviewStatus proto.InternalMessageInfo func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } func (*SubjectRulesReviewStatus) ProtoMessage() {} func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_43130d8376f09103, []int{13} -} -func (m *SubjectRulesReviewStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SubjectRulesReviewStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SubjectRulesReviewStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_SubjectRulesReviewStatus.Merge(m, src) -} -func (m *SubjectRulesReviewStatus) XXX_Size() int { - return m.Size() -} -func (m *SubjectRulesReviewStatus) XXX_DiscardUnknown() { - xxx_messageInfo_SubjectRulesReviewStatus.DiscardUnknown(m) + return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_SubjectRulesReviewStatus proto.InternalMessageInfo - func init() { proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") @@ -449,95 +145,13 @@ func init() { proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") - proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec.ExtraEntry") proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptor_43130d8376f09103) -} - -var fileDescriptor_43130d8376f09103 = []byte{ - // 1141 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xf7, 0xfa, 0x4f, 0x62, 0x8f, 0x1b, 0x92, 0x4e, 0x94, 0x66, 0x1b, 0x84, 0x6d, 0x19, 0x09, - 0x05, 0xd1, 0xee, 0x92, 0xa8, 0x90, 0x12, 0xe8, 0x21, 0x56, 0x22, 0x14, 0xa9, 0x2d, 0xd5, 0x44, - 0xc9, 0x81, 0x4a, 0xc0, 0xec, 0x7a, 0x62, 0x2f, 0xb6, 0x77, 0x97, 0x99, 0x59, 0x87, 0x20, 0x0e, - 0x3d, 0x72, 0xe4, 0xc8, 0x91, 0x13, 0xdf, 0x81, 0x0b, 0x12, 0x9c, 0x72, 0xec, 0x31, 0x48, 0xc8, - 0x22, 0xcb, 0x87, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0x79, 0xbf, 0xdf, 0x7b, 0xf3, 0xde, 0x9b, 0xf7, 0xde, 0x3e, 0xb0, 0xdb, 0x79, 0xc8, 0x0c, - 0xc7, 0x33, 0x3b, 0x81, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0x66, 0x9f, 0xb8, 0x4d, 0x8f, 0x9a, 0x0a, - 0xc0, 0xbe, 0x63, 0xe2, 0x80, 0xb7, 0x3d, 0xea, 0x7c, 0x87, 0xb9, 0xe3, 0xb9, 0x66, 0x7f, 0xcd, - 0x22, 0x1c, 0xaf, 0x99, 0x2d, 0xe2, 0x12, 0x8a, 0x39, 0x69, 0x1a, 0x3e, 0xf5, 0xb8, 0x07, 0x6b, - 0x91, 0x86, 0x81, 0x7d, 0xc7, 0x18, 0xd3, 0x30, 0x94, 0xc6, 0xca, 0xfd, 0x96, 0xc3, 0xdb, 0x81, - 0x65, 0xd8, 0x5e, 0xcf, 0x6c, 0x79, 0x2d, 0xcf, 0x94, 0x8a, 0x56, 0x70, 0x28, 0x4f, 0xf2, 0x20, - 0xbf, 0x22, 0x83, 0x2b, 0x0f, 0x62, 0x17, 0x7a, 0xd8, 0x6e, 0x3b, 0x2e, 0xa1, 0xc7, 0xa6, 0xdf, - 0x69, 0x09, 0x01, 0x33, 0x7b, 0x84, 0x63, 0xb3, 0x7f, 0xc1, 0x8d, 0x15, 0xf3, 0x32, 0x2d, 0x1a, - 0xb8, 0xdc, 0xe9, 0x91, 0x0b, 0x0a, 0x1f, 0x4e, 0x52, 0x60, 0x76, 0x9b, 0xf4, 0xf0, 0x79, 0xbd, - 0xfa, 0x06, 0x00, 0x3b, 0xdf, 0x72, 0x8a, 0x0f, 0x70, 0x37, 0x20, 0xb0, 0x0a, 0x0a, 0x0e, 0x27, - 0x3d, 0xa6, 0x6b, 0xb5, 0xdc, 0x6a, 0xa9, 0x51, 0x0a, 0x07, 0xd5, 0xc2, 0xae, 0x10, 0xa0, 0x48, - 0xbe, 0x59, 0xfc, 0xe9, 0xe7, 0x6a, 0xe6, 0xc5, 0x5f, 0xb5, 0x4c, 0xfd, 0xb7, 0x2c, 0xd0, 0x1f, - 0x7b, 0x36, 0xee, 0xee, 0x05, 0xd6, 0xd7, 0xc4, 0xe6, 0x5b, 0xb6, 0x4d, 0x18, 0x43, 0xa4, 0xef, - 0x90, 0x23, 0xf8, 0x15, 0x28, 0x8a, 0xc8, 0x9a, 0x98, 0x63, 0x5d, 0xab, 0x69, 0xab, 0xe5, 0xf5, - 0xf7, 0x8d, 0x38, 0xb1, 0x23, 0x07, 0x0d, 0xbf, 0xd3, 0x12, 0x02, 0x66, 0x08, 0xb6, 0xd1, 0x5f, - 0x33, 0x3e, 0x93, 0xb6, 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, - 0x65, 0x68, 0x64, 0x15, 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xeb, 0x1f, 0x19, 0x93, - 0x9e, 0xcd, 0x48, 0x71, 0x73, 0xcf, 0x27, 0x76, 0xe3, 0x96, 0xba, 0x26, 0x2f, 0x4e, 0x48, 0x1a, - 0x85, 0x36, 0x98, 0x61, 0x1c, 0xf3, 0x80, 0xe9, 0x39, 0x69, 0xfe, 0xe3, 0x9b, 0x99, 0x97, 0x26, - 0x1a, 0x6f, 0xa8, 0x0b, 0x66, 0xa2, 0x33, 0x52, 0xa6, 0xeb, 0xcf, 0xc1, 0xd2, 0x53, 0xcf, 0x45, - 0x84, 0x79, 0x01, 0xb5, 0xc9, 0x16, 0xe7, 0xd4, 0xb1, 0x02, 0x4e, 0x18, 0xac, 0x81, 0xbc, 0x8f, - 0x79, 0x5b, 0x26, 0xae, 0x14, 0xfb, 0xf7, 0x0c, 0xf3, 0x36, 0x92, 0x88, 0x60, 0xf4, 0x09, 0xb5, - 0x64, 0xf0, 0x09, 0xc6, 0x01, 0xa1, 0x16, 0x92, 0x48, 0xfd, 0x1b, 0x30, 0x9f, 0x30, 0x8e, 0x82, - 0xae, 0x7c, 0x5b, 0x01, 0x8d, 0xbd, 0xad, 0xd0, 0x60, 0x28, 0x92, 0xc3, 0x47, 0x60, 0xde, 0x8d, - 0x75, 0xf6, 0xd1, 0x63, 0xa6, 0x67, 0x25, 0x75, 0x31, 0x1c, 0x54, 0x93, 0xe6, 0x04, 0x84, 0xce, - 0x73, 0x45, 0x41, 0xc0, 0x94, 0x68, 0x4c, 0x50, 0x72, 0x71, 0x8f, 0x30, 0x1f, 0xdb, 0x44, 0x85, - 0x74, 0x5b, 0x39, 0x5c, 0x7a, 0x3a, 0x04, 0x50, 0xcc, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, - 0xd4, 0x0b, 0x7c, 0xf9, 0x3a, 0xa5, 0xc6, 0x9c, 0xa2, 0x14, 0x3e, 0x15, 0x42, 0x14, 0x61, 0xf0, - 0x5d, 0x30, 0xdb, 0x27, 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0x6d, 0x5e, 0xd1, 0x66, 0x0f, 0x22, - 0x31, 0x1a, 0xe2, 0xf0, 0x1e, 0x28, 0x52, 0xe5, 0xb8, 0x5e, 0x90, 0xdc, 0x05, 0xc5, 0x2d, 0x8e, - 0x32, 0x38, 0x62, 0xc0, 0x0f, 0x40, 0x99, 0x05, 0xd6, 0x48, 0x61, 0x46, 0x2a, 0x2c, 0x2a, 0x85, - 0xf2, 0x5e, 0x0c, 0xa1, 0x24, 0x4f, 0x84, 0x25, 0x62, 0xd4, 0x67, 0xc7, 0xc3, 0x12, 0x29, 0x40, - 0x12, 0xa9, 0xff, 0xa1, 0x81, 0x5b, 0xd3, 0xbd, 0xd8, 0x7b, 0xa0, 0x84, 0x7d, 0x47, 0x86, 0x3d, - 0x7c, 0xab, 0x39, 0x91, 0xd7, 0xad, 0x67, 0xbb, 0x91, 0x10, 0xc5, 0xb8, 0x20, 0x0f, 0x9d, 0x11, - 0x75, 0x3d, 0x22, 0x0f, 0xaf, 0x64, 0x28, 0xc6, 0xe1, 0x06, 0x98, 0x1b, 0x1e, 0xe4, 0x23, 0xe9, - 0x79, 0xa9, 0x70, 0x3b, 0x1c, 0x54, 0xe7, 0x50, 0x12, 0x40, 0xe3, 0xbc, 0xfa, 0xef, 0x59, 0xb0, - 0xbc, 0x47, 0xba, 0x87, 0xaf, 0x66, 0x2a, 0x7c, 0x39, 0x36, 0x15, 0x1e, 0x5d, 0xa3, 0x6d, 0xd3, - 0x5d, 0x7d, 0xb5, 0x93, 0xe1, 0x97, 0x2c, 0x78, 0xf3, 0x0a, 0xc7, 0xe0, 0xf7, 0x00, 0xd2, 0x0b, - 0x8d, 0xa6, 0x32, 0xfa, 0x60, 0xb2, 0x43, 0x17, 0x9b, 0xb4, 0x71, 0x27, 0x1c, 0x54, 0x53, 0x9a, - 0x17, 0xa5, 0xdc, 0x03, 0x7f, 0xd0, 0xc0, 0x92, 0x9b, 0x36, 0xb8, 0x54, 0xd6, 0x37, 0x26, 0x7b, - 0x90, 0x3a, 0xf7, 0x1a, 0x77, 0xc3, 0x41, 0x35, 0x7d, 0x24, 0xa2, 0xf4, 0x0b, 0xc5, 0xc8, 0xb9, - 0x93, 0x48, 0x94, 0x68, 0x9a, 0xff, 0xaf, 0xd6, 0xbe, 0x18, 0xab, 0xb5, 0x4f, 0xa6, 0xaa, 0xb5, - 0x84, 0xa7, 0x97, 0x96, 0x9a, 0x75, 0xae, 0xd4, 0x36, 0xaf, 0x5d, 0x6a, 0x49, 0xeb, 0x57, 0x57, - 0xda, 0x13, 0xb0, 0x72, 0xb9, 0x57, 0x53, 0x8f, 0xee, 0xfa, 0xaf, 0x59, 0xb0, 0xf8, 0x7a, 0x1d, - 0xb8, 0x59, 0xd3, 0x9f, 0xe6, 0xc1, 0xf2, 0xeb, 0x86, 0xbf, 0xba, 0xe1, 0xc5, 0x4f, 0x34, 0x60, - 0x84, 0xaa, 0x1f, 0xff, 0xe8, 0xad, 0xf6, 0x19, 0xa1, 0x48, 0x22, 0xb0, 0x36, 0xdc, 0x0d, 0xa2, - 0x1f, 0x16, 0x10, 0x99, 0x56, 0xff, 0x42, 0xb5, 0x18, 0x38, 0xa0, 0x40, 0xc4, 0xc6, 0xab, 0x17, - 0x6a, 0xb9, 0xd5, 0xf2, 0xfa, 0xf6, 0x8d, 0x6b, 0xc5, 0x90, 0x8b, 0xf3, 0x8e, 0xcb, 0xe9, 0x71, - 0xbc, 0x83, 0x48, 0x19, 0x8a, 0x6e, 0x80, 0x6f, 0x81, 0x5c, 0xe0, 0x34, 0xd5, 0x8a, 0x50, 0x56, - 0x94, 0xdc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x95, 0x43, 0xb5, 0x7b, 0x4b, 0x13, 0x70, 0x01, 0xe4, - 0x3a, 0xe4, 0x38, 0xea, 0x33, 0x24, 0x3e, 0x61, 0x03, 0x14, 0xfa, 0x62, 0x2d, 0x57, 0x79, 0xbe, - 0x37, 0xd9, 0xd3, 0x78, 0x95, 0x47, 0x91, 0xea, 0x66, 0xf6, 0xa1, 0x56, 0xff, 0x53, 0x03, 0x77, - 0x2f, 0x2d, 0x48, 0xb1, 0x28, 0xe1, 0x6e, 0xd7, 0x3b, 0x22, 0x4d, 0x79, 0x77, 0x31, 0x5e, 0x94, - 0xb6, 0x22, 0x31, 0x1a, 0xe2, 0xf0, 0x1d, 0x30, 0xd3, 0x24, 0xae, 0x43, 0x9a, 0x72, 0xa5, 0x2a, - 0xc6, 0xb5, 0xbc, 0x2d, 0xa5, 0x48, 0xa1, 0x82, 0x47, 0x09, 0x66, 0x9e, 0xab, 0x96, 0xb8, 0x11, - 0x0f, 0x49, 0x29, 0x52, 0x28, 0xdc, 0x02, 0xf3, 0x44, 0xb8, 0x29, 0x83, 0xd8, 0xa1, 0xd4, 0x1b, - 0xbe, 0xec, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0x7e, 0xfd, 0xdf, 0x2c, 0xd0, 0x2f, - 0x1b, 0x7b, 0xb0, 0x13, 0x6f, 0x31, 0x12, 0x94, 0x8b, 0x54, 0x79, 0xdd, 0xb8, 0x7e, 0xcb, 0x08, - 0xb5, 0xc6, 0x92, 0xf2, 0x66, 0x2e, 0x29, 0x4d, 0x6c, 0x3e, 0xf2, 0x08, 0x8f, 0xc0, 0x82, 0x3b, - 0xbe, 0x72, 0x47, 0x3b, 0x59, 0x79, 0x7d, 0x6d, 0xaa, 0x06, 0x91, 0x57, 0xea, 0xea, 0xca, 0x85, - 0x73, 0x00, 0x43, 0x17, 0x2e, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, - 0x26, 0xb0, 0x18, 0x4f, 0xcb, 0xdd, 0x11, 0x82, 0x12, 0xac, 0xb4, 0xcc, 0xe7, 0xa7, 0xcb, 0x7c, - 0xe3, 0xfe, 0xc9, 0x59, 0x25, 0xf3, 0xf2, 0xac, 0x92, 0x39, 0x3d, 0xab, 0x64, 0x5e, 0x84, 0x15, - 0xed, 0x24, 0xac, 0x68, 0x2f, 0xc3, 0x8a, 0x76, 0x1a, 0x56, 0xb4, 0xbf, 0xc3, 0x8a, 0xf6, 0xe3, - 0x3f, 0x95, 0xcc, 0xe7, 0xb3, 0x2a, 0xc2, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x7b, 0xc9, 0xa5, - 0x34, 0xa4, 0x0f, 0x00, 0x00, -} - func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -545,31 +159,32 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m[iNdEx]) - copy(dAtA[i:], m[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) - i-- + for _, s := range m { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -577,52 +192,41 @@ func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LocalSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -630,32 +234,25 @@ func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NonResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Verb) - copy(dAtA[i:], m.Verb) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i-- - dAtA[i] = 0x12 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + return i, nil } func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -663,40 +260,47 @@ func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { } func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NonResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.NonResourceURLs) > 0 { - for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NonResourceURLs[iNdEx]) - copy(dAtA[i:], m.NonResourceURLs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- + for _, s := range m.Verbs { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil } func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -704,57 +308,45 @@ func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { } func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x3a - i -= len(m.Subresource) - copy(dAtA[i:], m.Subresource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i-- - dAtA[i] = 0x32 - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x2a - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x22 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0x1a - i -= len(m.Verb) - copy(dAtA[i:], m.Verb) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) + i += copy(dAtA[i:], m.Verb) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) + i += copy(dAtA[i:], m.Subresource) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } func (m *ResourceRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -762,58 +354,77 @@ func (m *ResourceRule) Marshal() (dAtA []byte, err error) { } func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) - i-- + for _, s := range m.APIGroups { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.Resources) > 0 { + for _, s := range m.Resources { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -821,52 +432,41 @@ func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n4 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n5, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n6, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + return i, nil } func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -874,46 +474,37 @@ func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.NonResourceAttributes != nil { - { - size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n7 } - if m.ResourceAttributes != nil { - { - size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n8 } - return len(dAtA) - i, nil + return i, nil } func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -921,52 +512,41 @@ func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectRulesReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n9 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n11, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil } func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -974,27 +554,21 @@ func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SelfSubjectRulesReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil } func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1002,52 +576,41 @@ func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectAccessReview) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n12 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil } func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1055,94 +618,91 @@ func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectAccessReviewSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x32 - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { - v := m.Extra[string(keysForExtra[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForExtra[iNdEx]) - copy(dAtA[i:], keysForExtra[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + if m.ResourceAttributes != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) + n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n15 } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Groups[iNdEx]) - copy(dAtA[i:], m.Groups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.NonResourceAttributes != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) + n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n16 } - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- dAtA[i] = 0x1a - if m.NonResourceAttributes != nil { - { - size, err := m.NonResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x12 } - if m.ResourceAttributes != nil { - { - size, err := m.ResourceAttributes.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Extra) > 0 { + keysForExtra := make([]string, 0, len(m.Extra)) + for k := range m.Extra { + keysForExtra = append(keysForExtra, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) + for _, k := range keysForExtra { + dAtA[i] = 0x2a + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n17, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n17 } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil } func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1150,48 +710,41 @@ func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectAccessReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i-- - if m.Denied { + dAtA[i] = 0x8 + i++ + if m.Allowed { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - i -= len(m.EvaluationError) - copy(dAtA[i:], m.EvaluationError) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i-- - dAtA[i] = 0x1a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- + i++ dAtA[i] = 0x12 - i-- - if m.Allowed { + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + dAtA[i] = 0x20 + i++ + if m.Denied { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + return i, nil } func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1199,74 +752,77 @@ func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { } func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SubjectRulesReviewStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i - var l int - _ = l - i -= len(m.EvaluationError) - copy(dAtA[i:], m.EvaluationError) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i-- - dAtA[i] = 0x22 - i-- - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - if len(m.NonResourceRules) > 0 { - for iNdEx := len(m.NonResourceRules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.NonResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + var l int + _ = l + if len(m.ResourceRules) > 0 { + for _, msg := range m.ResourceRules { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - if len(m.ResourceRules) > 0 { - for iNdEx := len(m.ResourceRules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ResourceRules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.NonResourceRules) > 0 { + for _, msg := range m.NonResourceRules { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + dAtA[i] = 0x18 + i++ + if m.Incomplete { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) + i += copy(dAtA[i:], m.EvaluationError) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m ExtraValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -1279,9 +835,6 @@ func (m ExtraValue) Size() (n int) { } func (m *LocalSubjectAccessReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1294,9 +847,6 @@ func (m *LocalSubjectAccessReview) Size() (n int) { } func (m *NonResourceAttributes) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Path) @@ -1307,9 +857,6 @@ func (m *NonResourceAttributes) Size() (n int) { } func (m *NonResourceRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1328,9 +875,6 @@ func (m *NonResourceRule) Size() (n int) { } func (m *ResourceAttributes) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Namespace) @@ -1351,9 +895,6 @@ func (m *ResourceAttributes) Size() (n int) { } func (m *ResourceRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1384,9 +925,6 @@ func (m *ResourceRule) Size() (n int) { } func (m *SelfSubjectAccessReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1399,9 +937,6 @@ func (m *SelfSubjectAccessReview) Size() (n int) { } func (m *SelfSubjectAccessReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResourceAttributes != nil { @@ -1416,9 +951,6 @@ func (m *SelfSubjectAccessReviewSpec) Size() (n int) { } func (m *SelfSubjectRulesReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1431,9 +963,6 @@ func (m *SelfSubjectRulesReview) Size() (n int) { } func (m *SelfSubjectRulesReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Namespace) @@ -1442,9 +971,6 @@ func (m *SelfSubjectRulesReviewSpec) Size() (n int) { } func (m *SubjectAccessReview) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1457,9 +983,6 @@ func (m *SubjectAccessReview) Size() (n int) { } func (m *SubjectAccessReviewSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ResourceAttributes != nil { @@ -1493,9 +1016,6 @@ func (m *SubjectAccessReviewSpec) Size() (n int) { } func (m *SubjectAccessReviewStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -1508,9 +1028,6 @@ func (m *SubjectAccessReviewStatus) Size() (n int) { } func (m *SubjectRulesReviewStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ResourceRules) > 0 { @@ -1532,7 +1049,14 @@ func (m *SubjectRulesReviewStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1542,7 +1066,7 @@ func (this *LocalSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1605,7 +1129,7 @@ func (this *SelfSubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1617,8 +1141,8 @@ func (this *SelfSubjectAccessReviewSpec) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `}`, }, "") return s @@ -1628,7 +1152,7 @@ func (this *SelfSubjectRulesReview) String() string { return "nil" } s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1650,7 +1174,7 @@ func (this *SubjectAccessReview) String() string { return "nil" } s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1672,8 +1196,8 @@ func (this *SubjectAccessReviewSpec) String() string { } mapStringForExtra += "}" s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(this.ResourceAttributes.String(), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(this.NonResourceAttributes.String(), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, + `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, + `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, `User:` + fmt.Sprintf("%v", this.User) + `,`, `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Extra:` + mapStringForExtra + `,`, @@ -1699,19 +1223,9 @@ func (this *SubjectRulesReviewStatus) String() string { if this == nil { return "nil" } - repeatedStringForResourceRules := "[]ResourceRule{" - for _, f := range this.ResourceRules { - repeatedStringForResourceRules += strings.Replace(strings.Replace(f.String(), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + "," - } - repeatedStringForResourceRules += "}" - repeatedStringForNonResourceRules := "[]NonResourceRule{" - for _, f := range this.NonResourceRules { - repeatedStringForNonResourceRules += strings.Replace(strings.Replace(f.String(), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + "," - } - repeatedStringForNonResourceRules += "}" s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + repeatedStringForResourceRules + `,`, - `NonResourceRules:` + repeatedStringForNonResourceRules + `,`, + `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, + `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, `}`, @@ -1741,7 +1255,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1769,7 +1283,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1779,9 +1293,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1796,9 +1307,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1826,7 +1334,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1854,7 +1362,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1863,9 +1371,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1887,7 +1392,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1896,9 +1401,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1920,7 +1422,7 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1929,9 +1431,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1948,9 +1447,6 @@ func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1978,7 +1474,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2006,7 +1502,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2016,9 +1512,6 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2038,7 +1531,7 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2048,9 +1541,6 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2065,9 +1555,6 @@ func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2095,7 +1582,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2123,7 +1610,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2133,9 +1620,6 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2155,7 +1639,7 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2165,9 +1649,6 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2182,9 +1663,6 @@ func (m *NonResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2212,7 +1690,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2240,7 +1718,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2250,9 +1728,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2272,7 +1747,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2282,9 +1757,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2304,7 +1776,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2314,9 +1786,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2336,7 +1805,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2346,9 +1815,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2368,7 +1834,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2378,9 +1844,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2400,7 +1863,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2410,9 +1873,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2432,7 +1892,7 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2442,9 +1902,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2459,9 +1916,6 @@ func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2489,7 +1943,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2517,7 +1971,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2527,9 +1981,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2549,7 +2000,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2559,9 +2010,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2581,7 +2029,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2591,9 +2039,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2613,7 +2058,7 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2623,9 +2068,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2640,9 +2082,6 @@ func (m *ResourceRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2670,7 +2109,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2698,7 +2137,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2707,9 +2146,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2731,7 +2167,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2740,9 +2176,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2764,7 +2197,7 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2773,9 +2206,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2792,9 +2222,6 @@ func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2822,7 +2249,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2850,7 +2277,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2859,9 +2286,6 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2886,7 +2310,7 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2895,9 +2319,6 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2917,9 +2338,6 @@ func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2947,7 +2365,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2975,7 +2393,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2984,9 +2402,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3008,7 +2423,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3017,9 +2432,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3041,7 +2453,7 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3050,9 +2462,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3069,9 +2478,6 @@ func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3099,7 +2505,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3127,7 +2533,7 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3137,9 +2543,6 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3154,9 +2557,6 @@ func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3184,7 +2584,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3212,7 +2612,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3221,9 +2621,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3245,7 +2642,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3254,9 +2651,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3278,7 +2672,7 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3287,9 +2681,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3306,9 +2697,6 @@ func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3336,7 +2724,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3364,7 +2752,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3373,9 +2761,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3400,7 +2785,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3409,9 +2794,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3436,7 +2818,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3446,9 +2828,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3468,7 +2847,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3478,9 +2857,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3500,7 +2876,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3509,20 +2885,54 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3532,88 +2942,46 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - m.Extra[mapkey] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -3629,7 +2997,7 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3639,9 +3007,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3656,9 +3021,6 @@ func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3686,7 +3048,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3714,7 +3076,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3734,7 +3096,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3744,9 +3106,6 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3766,7 +3125,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3776,9 +3135,6 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3798,7 +3154,7 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3813,9 +3169,6 @@ func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3843,7 +3196,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3871,7 +3224,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3880,9 +3233,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3905,7 +3255,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3914,9 +3264,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3939,7 +3286,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3959,7 +3306,7 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3969,9 +3316,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3986,9 +3330,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4004,7 +3345,6 @@ func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4036,8 +3376,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4054,34 +3396,133 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1137 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcb, 0x6f, 0x1b, 0x45, + 0x18, 0xf7, 0xfa, 0x91, 0xd8, 0xe3, 0x86, 0xa4, 0x13, 0xa5, 0xd9, 0x06, 0x61, 0x5b, 0x46, 0x42, + 0x41, 0xb4, 0xbb, 0x24, 0x2a, 0xa4, 0x04, 0x7a, 0x88, 0x95, 0x08, 0x45, 0x6a, 0x4b, 0x35, 0x51, + 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, + 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0xff, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, 0x64, + 0x91, 0xe5, 0x8f, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, + 0xdb, 0xf9, 0x7e, 0xdf, 0xfb, 0xb5, 0x1f, 0xd8, 0xed, 0x3c, 0x64, 0x86, 0xe3, 0x99, 0x9d, 0xc0, + 0x22, 0xd4, 0x25, 0x9c, 0x30, 0xb3, 0x4f, 0xdc, 0xa6, 0x47, 0x4d, 0x05, 0x60, 0xdf, 0x31, 0x71, + 0xc0, 0xdb, 0x1e, 0x75, 0xbe, 0xc3, 0xdc, 0xf1, 0x5c, 0xb3, 0xbf, 0x66, 0x11, 0x8e, 0xd7, 0xcc, + 0x16, 0x71, 0x09, 0xc5, 0x9c, 0x34, 0x0d, 0x9f, 0x7a, 0xdc, 0x83, 0xb5, 0x48, 0xc2, 0xc0, 0xbe, + 0x63, 0x8c, 0x49, 0x18, 0x4a, 0x62, 0xe5, 0x7e, 0xcb, 0xe1, 0xed, 0xc0, 0x32, 0x6c, 0xaf, 0x67, + 0xb6, 0xbc, 0x96, 0x67, 0x4a, 0x41, 0x2b, 0x38, 0x94, 0x2f, 0xf9, 0x90, 0x5f, 0x91, 0xc2, 0x95, + 0x07, 0xb1, 0x0b, 0x3d, 0x6c, 0xb7, 0x1d, 0x97, 0xd0, 0x63, 0xd3, 0xef, 0xb4, 0x04, 0x81, 0x99, + 0x3d, 0xc2, 0xb1, 0xd9, 0xbf, 0xe0, 0xc6, 0x8a, 0x79, 0x99, 0x14, 0x0d, 0x5c, 0xee, 0xf4, 0xc8, + 0x05, 0x81, 0x0f, 0x27, 0x09, 0x30, 0xbb, 0x4d, 0x7a, 0xf8, 0xbc, 0x5c, 0x7d, 0x03, 0x80, 0x9d, + 0x6f, 0x39, 0xc5, 0x07, 0xb8, 0x1b, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x1e, 0xd3, 0xb5, 0x5a, + 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0x83, 0x6a, 0x61, 0x57, 0x10, 0x50, 0x44, 0xdf, 0x2c, 0xfe, 0xf4, + 0x73, 0x35, 0xf3, 0xe2, 0xaf, 0x5a, 0xa6, 0xfe, 0x5b, 0x16, 0xe8, 0x8f, 0x3d, 0x1b, 0x77, 0xf7, + 0x02, 0xeb, 0x6b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x77, 0xc8, 0x11, 0xfc, 0x0a, + 0x14, 0x45, 0x64, 0x4d, 0xcc, 0xb1, 0xae, 0xd5, 0xb4, 0xd5, 0xf2, 0xfa, 0xfb, 0x46, 0x9c, 0xd8, + 0x91, 0x83, 0x86, 0xdf, 0x69, 0x09, 0x02, 0x33, 0x04, 0xb7, 0xd1, 0x5f, 0x33, 0x3e, 0x93, 0xba, + 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, 0x69, 0x68, 0xa4, 0x15, + 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xed, 0x1f, 0x19, 0x93, 0xca, 0x66, 0xa4, 0xb8, + 0xb9, 0xe7, 0x13, 0xbb, 0x71, 0x4b, 0x99, 0xc9, 0x8b, 0x17, 0x92, 0x4a, 0xa1, 0x0d, 0x66, 0x18, + 0xc7, 0x3c, 0x60, 0x7a, 0x4e, 0xaa, 0xff, 0xf8, 0x66, 0xea, 0xa5, 0x8a, 0xc6, 0x1b, 0xca, 0xc0, + 0x4c, 0xf4, 0x46, 0x4a, 0x75, 0xfd, 0x39, 0x58, 0x7a, 0xea, 0xb9, 0x88, 0x30, 0x2f, 0xa0, 0x36, + 0xd9, 0xe2, 0x9c, 0x3a, 0x56, 0xc0, 0x09, 0x83, 0x35, 0x90, 0xf7, 0x31, 0x6f, 0xcb, 0xc4, 0x95, + 0x62, 0xff, 0x9e, 0x61, 0xde, 0x46, 0x12, 0x11, 0x1c, 0x7d, 0x42, 0x2d, 0x19, 0x7c, 0x82, 0xe3, + 0x80, 0x50, 0x0b, 0x49, 0xa4, 0xfe, 0x0d, 0x98, 0x4f, 0x28, 0x47, 0x41, 0x57, 0xd6, 0x56, 0x40, + 0x63, 0xb5, 0x15, 0x12, 0x0c, 0x45, 0x74, 0xf8, 0x08, 0xcc, 0xbb, 0xb1, 0xcc, 0x3e, 0x7a, 0xcc, + 0xf4, 0xac, 0x64, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x3a, 0x01, 0xa1, 0xf3, 0xbc, 0xa2, 0x21, 0x60, + 0x4a, 0x34, 0x26, 0x28, 0xb9, 0xb8, 0x47, 0x98, 0x8f, 0x6d, 0xa2, 0x42, 0xba, 0xad, 0x1c, 0x2e, + 0x3d, 0x1d, 0x02, 0x28, 0xe6, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, 0xd4, 0x0b, 0x7c, 0x59, + 0x9d, 0x52, 0x63, 0x4e, 0xb1, 0x14, 0x3e, 0x15, 0x44, 0x14, 0x61, 0xf0, 0x5d, 0x30, 0xdb, 0x27, + 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0xdb, 0xbc, 0x62, 0x9b, 0x3d, 0x88, 0xc8, 0x68, 0x88, 0xc3, + 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0xf2, 0x2e, 0x28, 0xde, 0xe2, 0x28, 0x83, 0x23, 0x0e, + 0xf8, 0x01, 0x28, 0xb3, 0xc0, 0x1a, 0x09, 0xcc, 0x48, 0x81, 0x45, 0x25, 0x50, 0xde, 0x8b, 0x21, + 0x94, 0xe4, 0x13, 0x61, 0x89, 0x18, 0xf5, 0xd9, 0xf1, 0xb0, 0x44, 0x0a, 0x90, 0x44, 0xea, 0x7f, + 0x68, 0xe0, 0xd6, 0x74, 0x15, 0x7b, 0x0f, 0x94, 0xb0, 0xef, 0xc8, 0xb0, 0x87, 0xb5, 0x9a, 0x13, + 0x79, 0xdd, 0x7a, 0xb6, 0x1b, 0x11, 0x51, 0x8c, 0x0b, 0xe6, 0xa1, 0x33, 0xa2, 0xaf, 0x47, 0xcc, + 0x43, 0x93, 0x0c, 0xc5, 0x38, 0xdc, 0x00, 0x73, 0xc3, 0x87, 0x2c, 0x92, 0x9e, 0x97, 0x02, 0xb7, + 0xc3, 0x41, 0x75, 0x0e, 0x25, 0x01, 0x34, 0xce, 0x57, 0xff, 0x3d, 0x0b, 0x96, 0xf7, 0x48, 0xf7, + 0xf0, 0xd5, 0x6c, 0x85, 0x2f, 0xc7, 0xb6, 0xc2, 0xa3, 0x6b, 0x8c, 0x6d, 0xba, 0xab, 0xaf, 0x76, + 0x33, 0xfc, 0x92, 0x05, 0x6f, 0x5e, 0xe1, 0x18, 0xfc, 0x1e, 0x40, 0x7a, 0x61, 0xd0, 0x54, 0x46, + 0x1f, 0x4c, 0x76, 0xe8, 0xe2, 0x90, 0x36, 0xee, 0x84, 0x83, 0x6a, 0xca, 0xf0, 0xa2, 0x14, 0x3b, + 0xf0, 0x07, 0x0d, 0x2c, 0xb9, 0x69, 0x8b, 0x4b, 0x65, 0x7d, 0x63, 0xb2, 0x07, 0xa9, 0x7b, 0xaf, + 0x71, 0x37, 0x1c, 0x54, 0xd3, 0x57, 0x22, 0x4a, 0x37, 0x28, 0x56, 0xce, 0x9d, 0x44, 0xa2, 0xc4, + 0xd0, 0xfc, 0x7f, 0xbd, 0xf6, 0xc5, 0x58, 0xaf, 0x7d, 0x32, 0x55, 0xaf, 0x25, 0x3c, 0xbd, 0xb4, + 0xd5, 0xac, 0x73, 0xad, 0xb6, 0x79, 0xed, 0x56, 0x4b, 0x6a, 0xbf, 0xba, 0xd3, 0x9e, 0x80, 0x95, + 0xcb, 0xbd, 0x9a, 0x7a, 0x75, 0xd7, 0x7f, 0xcd, 0x82, 0xc5, 0xd7, 0xe7, 0xc0, 0xcd, 0x86, 0xfe, + 0x34, 0x0f, 0x96, 0x5f, 0x0f, 0xfc, 0xd5, 0x03, 0x2f, 0x7e, 0xa2, 0x01, 0x23, 0x54, 0xfd, 0xf8, + 0x47, 0xb5, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0xf0, 0x36, 0x88, 0x7e, 0x58, 0x40, 0x64, + 0x5a, 0xfd, 0x0b, 0xd5, 0x61, 0xe0, 0x80, 0x02, 0x11, 0x17, 0xaf, 0x5e, 0xa8, 0xe5, 0x56, 0xcb, + 0xeb, 0xdb, 0x37, 0xee, 0x15, 0x43, 0x1e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, 0xf1, 0x0d, 0x22, 0x69, + 0x28, 0xb2, 0x00, 0xdf, 0x02, 0xb9, 0xc0, 0x69, 0xaa, 0x13, 0xa1, 0xac, 0x58, 0x72, 0xfb, 0xbb, + 0xdb, 0x48, 0xd0, 0x57, 0x0e, 0xd5, 0xed, 0x2d, 0x55, 0xc0, 0x05, 0x90, 0xeb, 0x90, 0xe3, 0x68, + 0xce, 0x90, 0xf8, 0x84, 0x0d, 0x50, 0xe8, 0x8b, 0xb3, 0x5c, 0xe5, 0xf9, 0xde, 0x64, 0x4f, 0xe3, + 0x53, 0x1e, 0x45, 0xa2, 0x9b, 0xd9, 0x87, 0x5a, 0xfd, 0x4f, 0x0d, 0xdc, 0xbd, 0xb4, 0x21, 0xc5, + 0xa1, 0x84, 0xbb, 0x5d, 0xef, 0x88, 0x34, 0xa5, 0xed, 0x62, 0x7c, 0x28, 0x6d, 0x45, 0x64, 0x34, + 0xc4, 0xe1, 0x3b, 0x60, 0x86, 0x12, 0xcc, 0x3c, 0x57, 0x1d, 0x67, 0xa3, 0x5e, 0x46, 0x92, 0x8a, + 0x14, 0x0a, 0xb7, 0xc0, 0x3c, 0x11, 0xe6, 0xa5, 0x73, 0x3b, 0x94, 0x7a, 0xc3, 0x8a, 0x2d, 0x2b, + 0x81, 0xf9, 0x9d, 0x71, 0x18, 0x9d, 0xe7, 0x17, 0xa6, 0x9a, 0xc4, 0x75, 0x48, 0x53, 0x5e, 0x6f, + 0xc5, 0xd8, 0xd4, 0xb6, 0xa4, 0x22, 0x85, 0xd6, 0xff, 0xcd, 0x02, 0xfd, 0xb2, 0xb5, 0x07, 0x3b, + 0xf1, 0x15, 0x23, 0x41, 0x79, 0x48, 0x95, 0xd7, 0x8d, 0xeb, 0x8f, 0x8c, 0x10, 0x6b, 0x2c, 0x29, + 0xdb, 0x73, 0x49, 0x6a, 0xe2, 0xf2, 0x91, 0x4f, 0x78, 0x04, 0x16, 0xdc, 0xf1, 0x93, 0x3b, 0xba, + 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x0d, 0x88, 0x34, 0xa9, 0x2b, 0x93, 0x0b, 0xe7, 0x00, 0x86, 0x2e, + 0x18, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, 0x26, 0xba, 0x18, 0x6f, + 0xcb, 0xdd, 0x11, 0x82, 0x12, 0x5c, 0x69, 0x15, 0xca, 0x4f, 0x57, 0xa1, 0xc6, 0xfd, 0x93, 0xb3, + 0x4a, 0xe6, 0xe5, 0x59, 0x25, 0x73, 0x7a, 0x56, 0xc9, 0xbc, 0x08, 0x2b, 0xda, 0x49, 0x58, 0xd1, + 0x5e, 0x86, 0x15, 0xed, 0x34, 0xac, 0x68, 0x7f, 0x87, 0x15, 0xed, 0xc7, 0x7f, 0x2a, 0x99, 0xcf, + 0x67, 0x55, 0x84, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xba, 0xf8, 0x96, 0xa4, 0x0f, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go index cf117d2..618ff8c 100644 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ b/vendor/k8s.io/api/authorization/v1beta1/types.go @@ -24,7 +24,7 @@ import ( // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SubjectAccessReview checks whether or not a user or group can perform an action. @@ -43,7 +43,7 @@ type SubjectAccessReview struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a @@ -63,7 +63,7 @@ type SelfSubjectAccessReview struct { } // +genclient -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. @@ -189,7 +189,7 @@ type SubjectAccessReviewStatus struct { // +genclient // +genclient:nonNamespaced -// +genclient:onlyVerbs=create +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go index 8c9c09b..9c3be84 100644 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go index 1e3d890..47a46a5 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go @@ -14,27 +14,52 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + ExternalMetricSource + ExternalMetricStatus + HorizontalPodAutoscaler + HorizontalPodAutoscalerCondition + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus + Scale + ScaleSpec + ScaleStatus +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -45,669 +70,119 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{0} -} -func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) -} -func (m *CrossVersionObjectReference) XXX_Size() int { - return m.Size() -} -func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) + return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{1} -} -func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalMetricSource.Merge(m, src) -} -func (m *ExternalMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ExternalMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) -} +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo - -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{2} -} -func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalMetricStatus.Merge(m, src) -} -func (m *ExternalMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ExternalMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{3} -} -func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) -} -func (m *HorizontalPodAutoscaler) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{4} -} -func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) -} -func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{4} } -func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{5} -} -func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) -} -func (m *HorizontalPodAutoscalerList) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{5} } -func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{6} -} -func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) -} -func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo - func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{7} -} -func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) -} -func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) + return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{8} -} -func (m *MetricSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricSpec.Merge(m, src) -} -func (m *MetricSpec) XXX_Size() int { - return m.Size() -} -func (m *MetricSpec) XXX_DiscardUnknown() { - xxx_messageInfo_MetricSpec.DiscardUnknown(m) -} +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_MetricSpec proto.InternalMessageInfo +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{9} -} -func (m *MetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricStatus.Merge(m, src) -} -func (m *MetricStatus) XXX_Size() int { - return m.Size() -} -func (m *MetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_MetricStatus.DiscardUnknown(m) -} +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_MetricStatus proto.InternalMessageInfo +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{10} -} -func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMetricSource.Merge(m, src) -} -func (m *ObjectMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ObjectMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) -} +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{11} -} -func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMetricStatus.Merge(m, src) -} -func (m *ObjectMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ObjectMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) -} +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{12} -} -func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodsMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodsMetricSource.Merge(m, src) -} -func (m *PodsMetricSource) XXX_Size() int { - return m.Size() -} -func (m *PodsMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) -} - -var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{13} -} -func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodsMetricStatus.Merge(m, src) -} -func (m *PodsMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *PodsMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{14} -} -func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetricSource.Merge(m, src) -} -func (m *ResourceMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{15} -} -func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetricStatus.Merge(m, src) -} -func (m *ResourceMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{16} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) -} - -var xxx_messageInfo_Scale proto.InternalMessageInfo - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{17} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() -} -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_2bb1f2101a7f10e2, []int{18} -} -func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleStatus.Merge(m, src) -} -func (m *ScaleStatus) XXX_Size() int { - return m.Size() -} -func (m *ScaleStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptor_2bb1f2101a7f10e2) -} - -var fileDescriptor_2bb1f2101a7f10e2 = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, + proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") + proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") + proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") + proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") + proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") + proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") + proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") + proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") + proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") + proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") + proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") + proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") + proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") + proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") + proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") + proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") + proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") + proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") } - func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -715,37 +190,29 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x1a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -753,63 +220,51 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.TargetAverageValue != nil { - { - size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n1 } if m.TargetValue != nil { - { - size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - if m.MetricSelector != nil { - { - size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.TargetAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n3 } - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -817,61 +272,49 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CurrentAverageValue != nil { - { - size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - { - size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n4 } - i-- dAtA[i] = 0x1a - if m.MetricSelector != nil { - { - size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.CurrentAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n6 } - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -879,52 +322,41 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n7 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n8 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n9, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -932,52 +364,41 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -985,46 +406,37 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1032,45 +444,38 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.TargetCPUUtilizationPercentage != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) - i-- - dAtA[i] = 0x20 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) + n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - i-- - dAtA[i] = 0x18 + i += n12 if m.MinReplicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) } - { - size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + if m.TargetCPUUtilizationPercentage != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1078,50 +483,43 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CurrentCPUUtilizationPercentage != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) - i-- - dAtA[i] = 0x28 + if m.ObservedGeneration != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) } - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x18 if m.LastScaleTime != nil { - { - size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) + n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 } - if m.ObservedGeneration != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + if m.CurrentCPUUtilizationPercentage != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) } - return len(dAtA) - i, nil + return i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1129,75 +527,61 @@ func (m *MetricSpec) Marshal() (dAtA []byte, err error) { } func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.External != nil { - { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n14, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n14 } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n15, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n15 } - if m.Pods != nil { - { - size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n16, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n16 } - if m.Object != nil { - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n17, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n17 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1205,75 +589,61 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.External != nil { - { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n18, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n18 } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n19, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n19 } - if m.Pods != nil { - { - size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n20, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n20 } - if m.Object != nil { - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n21, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n21 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1281,71 +651,57 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AverageValue != nil { - { - size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n22, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n23, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n23 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 - } - { - size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n24, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n24 } - i-- - dAtA[i] = 0x1a - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if m.AverageValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) + n25, err := m.AverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n25 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1353,71 +709,57 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AverageValue != nil { - { - size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n26, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n27 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 - } - { - size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n28 } - i-- - dAtA[i] = 0x1a - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if m.AverageValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) + n29, err := m.AverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n29 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1425,49 +767,39 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a - } - { - size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n31, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n31 } - i-- - dAtA[i] = 0x12 - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1475,49 +807,39 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a - } - { - size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n33, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n33 } - i-- - dAtA[i] = 0x12 - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1525,44 +847,36 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.TargetAverageValue != nil { - { - size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if m.TargetAverageUtilization != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.TargetAverageValue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1570,42 +884,34 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if m.CurrentAverageUtilization != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1613,52 +919,41 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n36 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n37, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n37 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n38, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 + return i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1666,25 +961,20 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1692,41 +982,48 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Selector) - copy(dAtA[i:], m.Selector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i-- - dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) + i += copy(dAtA[i:], m.Selector) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CrossVersionObjectReference) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -1739,9 +1036,6 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1762,9 +1056,6 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1783,9 +1074,6 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1798,9 +1086,6 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1817,9 +1102,6 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1834,9 +1116,6 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1852,9 +1131,6 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ObservedGeneration != nil { @@ -1873,9 +1149,6 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1900,9 +1173,6 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1927,9 +1197,6 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Target.Size() @@ -1950,9 +1217,6 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Target.Size() @@ -1973,9 +1237,6 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1990,9 +1251,6 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -2007,9 +1265,6 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2025,9 +1280,6 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2041,9 +1293,6 @@ func (m *ResourceMetricStatus) Size() (n int) { } func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2056,9 +1305,6 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -2066,9 +1312,6 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -2078,7 +1321,14 @@ func (m *ScaleStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2101,9 +1351,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2114,9 +1364,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2126,7 +1376,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2140,7 +1390,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2151,14 +1401,9 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]HorizontalPodAutoscaler{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2182,7 +1427,7 @@ func (this *HorizontalPodAutoscalerStatus) String() string { } s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, @@ -2196,10 +1441,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -2210,10 +1455,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2225,9 +1470,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2239,9 +1484,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2252,8 +1497,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -2264,8 +1509,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -2277,7 +1522,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2289,7 +1534,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2299,7 +1544,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2350,7 +1595,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2378,7 +1623,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2388,9 +1633,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2410,7 +1652,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2420,9 +1662,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2442,7 +1681,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2452,9 +1691,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2469,9 +1705,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2499,7 +1732,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2527,7 +1760,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2537,9 +1770,6 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2559,7 +1789,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2568,14 +1798,11 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &v1.LabelSelector{} + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2595,7 +1822,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2604,14 +1831,11 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &resource.Quantity{} + m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2631,7 +1855,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2640,14 +1864,11 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &resource.Quantity{} + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2662,9 +1883,6 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2692,7 +1910,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2720,7 +1938,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2730,9 +1948,6 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2752,7 +1967,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2761,14 +1976,11 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &v1.LabelSelector{} + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2788,7 +2000,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2797,9 +2009,6 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2821,7 +2030,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2830,14 +2039,11 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &resource.Quantity{} + m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2852,9 +2058,6 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2882,7 +2085,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2910,7 +2113,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2919,9 +2122,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2943,7 +2143,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2952,9 +2152,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2976,7 +2173,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2985,9 +2182,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3004,9 +2198,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3034,7 +2225,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3062,7 +2253,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3072,9 +2263,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3094,7 +2282,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3104,9 +2292,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3126,7 +2311,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3135,9 +2320,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3159,7 +2341,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3169,9 +2351,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3191,7 +2370,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3201,9 +2380,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3218,9 +2394,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3248,7 +2421,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3276,7 +2449,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3285,9 +2458,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3309,7 +2479,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3318,9 +2488,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3338,9 +2505,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3368,7 +2532,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3396,7 +2560,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3405,9 +2569,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3429,7 +2590,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3449,7 +2610,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= int32(b&0x7F) << shift + m.MaxReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3468,7 +2629,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3483,9 +2644,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3513,7 +2671,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3541,7 +2699,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3561,7 +2719,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3570,14 +2728,11 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &v1.Time{} + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3597,7 +2752,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= int32(b&0x7F) << shift + m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3616,7 +2771,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= int32(b&0x7F) << shift + m.DesiredReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3635,7 +2790,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3650,9 +2805,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3680,7 +2832,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3708,7 +2860,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3718,9 +2870,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3740,7 +2889,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3749,9 +2898,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3776,7 +2922,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3785,9 +2931,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3812,7 +2955,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3821,9 +2964,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3848,7 +2988,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3857,9 +2997,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3879,9 +3016,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3909,7 +3043,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3937,7 +3071,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3947,9 +3081,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3969,7 +3100,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3978,9 +3109,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4005,7 +3133,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4014,9 +3142,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4041,7 +3166,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4050,9 +3175,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4077,7 +3199,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4086,9 +3208,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4108,9 +3227,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4138,7 +3254,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4166,7 +3282,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4175,9 +3291,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4199,7 +3312,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4209,9 +3322,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4231,7 +3341,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4240,9 +3350,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4264,7 +3371,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4273,14 +3380,11 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4300,7 +3404,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4309,14 +3413,11 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &resource.Quantity{} + m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4331,9 +3432,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4361,7 +3459,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4389,7 +3487,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4398,9 +3496,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4422,7 +3517,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4432,9 +3527,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4454,7 +3546,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4463,9 +3555,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4487,7 +3576,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4496,14 +3585,11 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4523,7 +3609,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4532,14 +3618,11 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &resource.Quantity{} + m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4554,9 +3637,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4584,7 +3664,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4612,7 +3692,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4622,9 +3702,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4644,7 +3721,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4653,9 +3730,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4677,7 +3751,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4686,14 +3760,11 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4708,9 +3779,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4738,7 +3806,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4766,7 +3834,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4776,9 +3844,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4798,7 +3863,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4807,9 +3872,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4831,7 +3893,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4840,14 +3902,11 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4862,9 +3921,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4892,7 +3948,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4920,7 +3976,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4930,9 +3986,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4952,7 +4005,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4972,7 +4025,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4981,14 +4034,11 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &resource.Quantity{} + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5003,9 +4053,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5033,7 +4080,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5061,7 +4108,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5071,9 +4118,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5093,7 +4137,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5113,7 +4157,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5122,9 +4166,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5141,9 +4182,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5171,7 +4209,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5199,7 +4237,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5208,9 +4246,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5232,7 +4267,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5241,9 +4276,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5265,7 +4297,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5274,9 +4306,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5293,9 +4322,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5323,7 +4349,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5351,7 +4377,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5365,9 +4391,6 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5395,7 +4418,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5423,7 +4446,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5442,7 +4465,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5452,9 +4475,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5469,9 +4489,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5487,7 +4504,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5519,8 +4535,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5537,34 +4555,156 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1516 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, + 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, + 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, + 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, + 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, + 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, + 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, + 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, + 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, + 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, + 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, + 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, + 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, + 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, + 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, + 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, + 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, + 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, + 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, + 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, + 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, + 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, + 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, + 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, + 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, + 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, + 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, + 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, + 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, + 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, + 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, + 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, + 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, + 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, + 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, + 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, + 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, + 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, + 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, + 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, + 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, + 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, + 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, + 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, + 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, + 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, + 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, + 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, + 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, + 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, + 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, + 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, + 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, + 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, + 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, + 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, + 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, + 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, + 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, + 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, + 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, + 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, + 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, + 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, + 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, + 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, + 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, + 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, + 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, + 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, + 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, + 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, + 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, + 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, + 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, + 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, + 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, + 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, + 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, + 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, + 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, + 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, + 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, + 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, + 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, + 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, + 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, + 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, + 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, + 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, + 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, + 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, + 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, + 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto index f50ed9d..5b56b2a 100644 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -88,11 +88,11 @@ message ExternalMetricStatus { // configuration of a horizontal pod autoscaler. message HorizontalPodAutoscaler { - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -141,11 +141,7 @@ message HorizontalPodAutoscalerSpec { // and will set the desired number of pods by using its Scale subresource. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. + // lower limit for the number of pods that can be set by the autoscaler, default 1. // +optional optional int32 minReplicas = 2; @@ -384,15 +380,15 @@ message ResourceMetricStatus { // Scale represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go index 55b2a0d..c03af13 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ b/vendor/k8s.io/api/autoscaling/v1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,11 +38,7 @@ type HorizontalPodAutoscalerSpec struct { // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption // and will set the desired number of pods by using its Scale subresource. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. + // lower limit for the number of pods that can be set by the autoscaler, default 1. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. @@ -82,11 +78,11 @@ type HorizontalPodAutoscalerStatus struct { // configuration of a horizontal pod autoscaler. type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -113,15 +109,15 @@ type HorizontalPodAutoscalerList struct { // Scale represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -151,7 +147,7 @@ type ScaleStatus struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -const ( +var ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -322,7 +318,7 @@ type MetricStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -const ( +var ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go index 129ce2b..a6e874f 100644 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", "status": "current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "specification of a horizontal pod autoscaler.", "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", } @@ -196,8 +196,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -207,8 +207,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } @@ -219,9 +219,9 @@ func (ResourceMetricStatus) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go index ddb6011..3fda47d 100644 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go index 2cc9f11..da9789e 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go index e129e41..bee9412 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go @@ -14,27 +14,49 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +// DO NOT EDIT! +/* + Package v2beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + ExternalMetricSource + ExternalMetricStatus + HorizontalPodAutoscaler + HorizontalPodAutoscalerCondition + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricSpec + MetricStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus +*/ package v2beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -45,455 +67,81 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{0} -} -func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) -} -func (m *CrossVersionObjectReference) XXX_Size() int { - return m.Size() -} -func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) -} - -var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo - -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{1} -} -func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalMetricSource.Merge(m, src) -} -func (m *ExternalMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ExternalMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo - -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{2} -} -func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalMetricStatus.Merge(m, src) -} -func (m *ExternalMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ExternalMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) + return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{3} -} -func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) -} -func (m *HorizontalPodAutoscaler) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) -} +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{4} -} -func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return fileDescriptorGenerated, []int{4} } -func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) -} -func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{5} -} -func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) -} -func (m *HorizontalPodAutoscalerList) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{5} } -func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{6} -} -func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) -} -func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo - func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{7} -} -func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) -} -func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{8} -} -func (m *MetricSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricSpec.Merge(m, src) -} -func (m *MetricSpec) XXX_Size() int { - return m.Size() -} -func (m *MetricSpec) XXX_DiscardUnknown() { - xxx_messageInfo_MetricSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricSpec proto.InternalMessageInfo - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{9} -} -func (m *MetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricStatus.Merge(m, src) -} -func (m *MetricStatus) XXX_Size() int { - return m.Size() -} -func (m *MetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_MetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricStatus proto.InternalMessageInfo - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{10} -} -func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMetricSource.Merge(m, src) -} -func (m *ObjectMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ObjectMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{11} -} -func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMetricStatus.Merge(m, src) -} -func (m *ObjectMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ObjectMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) + return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{12} -} -func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodsMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodsMetricSource.Merge(m, src) -} -func (m *PodsMetricSource) XXX_Size() int { - return m.Size() -} -func (m *PodsMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) -} +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{13} -} -func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodsMetricStatus.Merge(m, src) -} -func (m *PodsMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *PodsMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{14} -} -func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetricSource.Merge(m, src) -} -func (m *ResourceMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) -} +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_26c1bfc7a52d0478, []int{15} -} -func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetricStatus.Merge(m, src) -} -func (m *ResourceMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) -} +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") @@ -513,112 +161,10 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptor_26c1bfc7a52d0478) -} - -var fileDescriptor_26c1bfc7a52d0478 = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, -} - func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -626,37 +172,29 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x1a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -664,63 +202,51 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.TargetAverageValue != nil { - { - size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n1 } if m.TargetValue != nil { - { - size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - if m.MetricSelector != nil { - { - size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.TargetAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n3 } - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -728,61 +254,49 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CurrentAverageValue != nil { - { - size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - { - size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + if m.MetricSelector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) + n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n4 } - i-- dAtA[i] = 0x1a - if m.MetricSelector != nil { - { - size, err := m.MetricSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + if m.CurrentAverageValue != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n6 } - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -790,52 +304,41 @@ func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n7 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n8, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n8 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n9, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + return i, nil } func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -843,52 +346,41 @@ func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -896,46 +388,37 @@ func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -943,54 +426,45 @@ func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) + n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - i-- - dAtA[i] = 0x18 + i += n12 if m.MinReplicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) } - { - size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -998,149 +472,124 @@ func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { } func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + if m.ObservedGeneration != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) + n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n13 } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) if len(m.CurrentMetrics) > 0 { - for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.CurrentMetrics { dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x18 - if m.LastScaleTime != nil { - { - size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 - } - if m.ObservedGeneration != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 } - return len(dAtA) - i, nil + return i, nil } func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.External != nil { - { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n14, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n14 } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n15, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n15 } - if m.Pods != nil { - { - size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n16, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n16 } - if m.Object != nil { - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n17, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n17 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1148,75 +597,61 @@ func (m *MetricStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.External != nil { - { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n18, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n18 } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n19, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n19 } - if m.Pods != nil { - { - size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n20, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n20 } - if m.Object != nil { - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n21, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n21 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1224,71 +659,57 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AverageValue != nil { - { - size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n22, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n23, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n23 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 - } - { - size, err := m.TargetValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n24, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n24 } - i-- - dAtA[i] = 0x1a - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if m.AverageValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) + n25, err := m.AverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n25 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1296,71 +717,57 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AverageValue != nil { - { - size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n26, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n27 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x22 - } - { - size, err := m.CurrentValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n28, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n28 } - i-- - dAtA[i] = 0x1a - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if m.AverageValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) + n29, err := m.AverageValue.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n29 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1368,49 +775,39 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a - } - { - size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n31, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n31 } - i-- - dAtA[i] = 0x12 - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1418,49 +815,39 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) + i += copy(dAtA[i:], m.MetricName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n32 if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a - } - { - size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n33, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n33 } - i-- - dAtA[i] = 0x12 - i -= len(m.MetricName) - copy(dAtA[i:], m.MetricName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1468,44 +855,36 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.TargetAverageValue != nil { - { - size, err := m.TargetAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if m.TargetAverageUtilization != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.TargetAverageValue != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) + n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 + } + return i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1513,53 +892,58 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.CurrentAverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if m.CurrentAverageUtilization != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) - i-- dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) + n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CrossVersionObjectReference) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -1572,9 +956,6 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1595,9 +976,6 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1616,9 +994,6 @@ func (m *ExternalMetricStatus) Size() (n int) { } func (m *HorizontalPodAutoscaler) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1631,9 +1006,6 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { } func (m *HorizontalPodAutoscalerCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1650,9 +1022,6 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1667,9 +1036,6 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ScaleTargetRef.Size() @@ -1688,9 +1054,6 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ObservedGeneration != nil { @@ -1718,9 +1081,6 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1745,9 +1105,6 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -1772,9 +1129,6 @@ func (m *MetricStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Target.Size() @@ -1795,9 +1149,6 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Target.Size() @@ -1818,9 +1169,6 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1835,9 +1183,6 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.MetricName) @@ -1852,9 +1197,6 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -1870,9 +1212,6 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -1886,7 +1225,14 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1909,9 +1255,9 @@ func (this *ExternalMetricSource) String() string { } s := strings.Join([]string{`&ExternalMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1922,9 +1268,9 @@ func (this *ExternalMetricStatus) String() string { } s := strings.Join([]string{`&ExternalMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -1934,7 +1280,7 @@ func (this *HorizontalPodAutoscaler) String() string { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -1948,7 +1294,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -1959,14 +1305,9 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]HorizontalPodAutoscaler{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1975,16 +1316,11 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } - repeatedStringForMetrics := "[]MetricSpec{" - for _, f := range this.Metrics { - repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," - } - repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + repeatedStringForMetrics + `,`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1993,23 +1329,13 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } - repeatedStringForCurrentMetrics := "[]MetricStatus{" - for _, f := range this.CurrentMetrics { - repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForCurrentMetrics += "}" - repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2020,10 +1346,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -2034,10 +1360,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2049,9 +1375,9 @@ func (this *ObjectMetricSource) String() string { s := strings.Join([]string{`&ObjectMetricSource{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2063,9 +1389,9 @@ func (this *ObjectMetricStatus) String() string { s := strings.Join([]string{`&ObjectMetricStatus{`, `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2076,8 +1402,8 @@ func (this *PodsMetricSource) String() string { } s := strings.Join([]string{`&PodsMetricSource{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -2088,8 +1414,8 @@ func (this *PodsMetricStatus) String() string { } s := strings.Join([]string{`&PodsMetricStatus{`, `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -2101,7 +1427,7 @@ func (this *ResourceMetricSource) String() string { s := strings.Join([]string{`&ResourceMetricSource{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `}`, }, "") return s @@ -2113,7 +1439,7 @@ func (this *ResourceMetricStatus) String() string { s := strings.Join([]string{`&ResourceMetricStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, + `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2141,7 +1467,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2169,7 +1495,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2179,9 +1505,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2201,7 +1524,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2211,9 +1534,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2233,7 +1553,7 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2243,9 +1563,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2260,9 +1577,6 @@ func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2290,7 +1604,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2318,7 +1632,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2328,9 +1642,6 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2350,7 +1661,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2359,14 +1670,11 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &v1.LabelSelector{} + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2386,7 +1694,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2395,14 +1703,11 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetValue == nil { - m.TargetValue = &resource.Quantity{} + m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2422,7 +1727,7 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2431,14 +1736,11 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &resource.Quantity{} + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2453,9 +1755,6 @@ func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2483,7 +1782,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2511,7 +1810,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2521,9 +1820,6 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2543,7 +1839,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2552,14 +1848,11 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MetricSelector == nil { - m.MetricSelector = &v1.LabelSelector{} + m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2579,7 +1872,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2588,9 +1881,6 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2612,7 +1902,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2621,14 +1911,11 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &resource.Quantity{} + m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2643,9 +1930,6 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2673,7 +1957,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2701,7 +1985,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2710,9 +1994,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2734,7 +2015,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2743,9 +2024,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2767,7 +2045,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2776,9 +2054,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2795,9 +2070,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2825,7 +2097,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2853,7 +2125,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2863,9 +2135,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2885,7 +2154,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2895,9 +2164,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2917,7 +2183,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2926,9 +2192,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2950,7 +2213,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2960,9 +2223,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2982,7 +2242,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2992,9 +2252,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3009,9 +2266,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3039,7 +2293,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3067,7 +2321,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3076,9 +2330,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3100,7 +2351,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3109,9 +2360,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3129,9 +2377,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3159,7 +2404,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3187,7 +2432,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3196,9 +2441,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3220,7 +2462,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3240,7 +2482,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= int32(b&0x7F) << shift + m.MaxReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3259,7 +2501,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3268,9 +2510,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3288,9 +2527,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3318,7 +2554,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3346,7 +2582,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3366,7 +2602,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3375,14 +2611,11 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &v1.Time{} + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3402,7 +2635,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= int32(b&0x7F) << shift + m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3421,7 +2654,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= int32(b&0x7F) << shift + m.DesiredReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3440,7 +2673,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3449,9 +2682,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3474,7 +2704,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3483,9 +2713,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3503,9 +2730,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3533,7 +2757,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3561,7 +2785,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3571,9 +2795,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3593,7 +2814,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3602,9 +2823,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3629,7 +2847,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3638,9 +2856,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3665,7 +2880,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3674,9 +2889,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3701,7 +2913,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3710,9 +2922,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3732,9 +2941,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3762,7 +2968,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3790,7 +2996,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3800,9 +3006,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3822,7 +3025,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3831,9 +3034,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3858,7 +3058,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3867,9 +3067,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3894,7 +3091,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3903,9 +3100,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3930,7 +3124,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3939,9 +3133,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3961,9 +3152,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3991,7 +3179,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4019,7 +3207,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4028,9 +3216,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4052,7 +3237,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4062,9 +3247,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4084,7 +3266,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4093,9 +3275,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4117,7 +3296,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4126,14 +3305,11 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4153,7 +3329,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4162,14 +3338,11 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &resource.Quantity{} + m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4184,9 +3357,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4214,7 +3384,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4242,7 +3412,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4251,9 +3421,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4275,7 +3442,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4285,9 +3452,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4307,7 +3471,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4316,9 +3480,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4340,7 +3501,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4349,14 +3510,11 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4376,7 +3534,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4385,14 +3543,11 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &resource.Quantity{} + m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4407,9 +3562,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4437,7 +3589,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4465,7 +3617,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4475,9 +3627,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4497,7 +3646,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4506,9 +3655,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4530,7 +3676,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4539,14 +3685,11 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4561,9 +3704,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4591,7 +3731,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4619,7 +3759,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4629,9 +3769,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4651,7 +3788,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4660,9 +3797,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4684,7 +3818,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4693,14 +3827,11 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4715,9 +3846,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4745,7 +3873,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4773,7 +3901,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4783,9 +3911,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4805,7 +3930,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4825,7 +3950,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4834,14 +3959,11 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TargetAverageValue == nil { - m.TargetAverageValue = &resource.Quantity{} + m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4856,9 +3978,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4886,7 +4005,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4914,7 +4033,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4924,9 +4043,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4946,7 +4062,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4966,7 +4082,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4975,9 +4091,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4994,9 +4107,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5012,7 +4122,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5044,8 +4153,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5062,34 +4173,154 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1475 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, + 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, + 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, + 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, + 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, + 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, + 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, + 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, + 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, + 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, + 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, + 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, + 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, + 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, + 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, + 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, + 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, + 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, + 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, + 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, + 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, + 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, + 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, + 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, + 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, + 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, + 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, + 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, + 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, + 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, + 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, + 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, + 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, + 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, + 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, + 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, + 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, + 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, + 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, + 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, + 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, + 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, + 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, + 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, + 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, + 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, + 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, + 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, + 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, + 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, + 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, + 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, + 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, + 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, + 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, + 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, + 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, + 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, + 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, + 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, + 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, + 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, + 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, + 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, + 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, + 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, + 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, + 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, + 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, + 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, + 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, + 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, + 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, + 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, + 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, + 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, + 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, + 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, + 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, + 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, + 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, + 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, + 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, + 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, + 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, + 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, + 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, + 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, + 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, + 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, + 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, + 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, + 0x18, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto index f90f93f..04bc0ed 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta1"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -92,12 +92,12 @@ message ExternalMetricStatus { // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -146,11 +146,8 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. // +optional optional int32 minReplicas = 2; diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go index 53a53a3..6a30e67 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types.go @@ -17,14 +17,14 @@ limitations under the License. package v2beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -38,11 +38,8 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -62,7 +59,7 @@ type HorizontalPodAutoscalerSpec struct { // MetricSourceType indicates the type of metric. type MetricSourceType string -const ( +var ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -231,7 +228,7 @@ type HorizontalPodAutoscalerStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -const ( +var ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" @@ -380,12 +377,12 @@ type ExternalMetricStatus struct { type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go index a0d5f53..411b817 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -64,8 +64,8 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -99,7 +99,7 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", } @@ -197,8 +197,8 @@ func (PodsMetricStatus) SwaggerDoc() map[string]string { } var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", + "name": "name is the name of the resource in question.", "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", } @@ -208,8 +208,8 @@ func (ResourceMetricSource) SwaggerDoc() map[string]string { } var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", + "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", + "name": "name is the name of the resource in question.", "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", } diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go index c51e05b..2ec7e61 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go @@ -148,7 +148,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go index 6d275f6..7c7d2b6 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go index c69d6cb..be752a1 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go @@ -14,27 +14,52 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +// DO NOT EDIT! +/* + Package v2beta2 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto + + It has these top-level messages: + CrossVersionObjectReference + ExternalMetricSource + ExternalMetricStatus + HorizontalPodAutoscaler + HorizontalPodAutoscalerCondition + HorizontalPodAutoscalerList + HorizontalPodAutoscalerSpec + HorizontalPodAutoscalerStatus + MetricIdentifier + MetricSpec + MetricStatus + MetricTarget + MetricValueStatus + ObjectMetricSource + ObjectMetricStatus + PodsMetricSource + PodsMetricStatus + ResourceMetricSource + ResourceMetricStatus +*/ package v2beta2 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -45,632 +70,99 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } func (*CrossVersionObjectReference) ProtoMessage() {} func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{0} -} -func (m *CrossVersionObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CrossVersionObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CrossVersionObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_CrossVersionObjectReference.Merge(m, src) -} -func (m *CrossVersionObjectReference) XXX_Size() int { - return m.Size() -} -func (m *CrossVersionObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_CrossVersionObjectReference.DiscardUnknown(m) -} - -var xxx_messageInfo_CrossVersionObjectReference proto.InternalMessageInfo - -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{1} -} -func (m *ExternalMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalMetricSource.Merge(m, src) -} -func (m *ExternalMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ExternalMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalMetricSource.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalMetricSource proto.InternalMessageInfo - -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{2} -} -func (m *ExternalMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExternalMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExternalMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExternalMetricStatus.Merge(m, src) -} -func (m *ExternalMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ExternalMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ExternalMetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ExternalMetricStatus proto.InternalMessageInfo - -func (m *HPAScalingPolicy) Reset() { *m = HPAScalingPolicy{} } -func (*HPAScalingPolicy) ProtoMessage() {} -func (*HPAScalingPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{3} -} -func (m *HPAScalingPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HPAScalingPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HPAScalingPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_HPAScalingPolicy.Merge(m, src) -} -func (m *HPAScalingPolicy) XXX_Size() int { - return m.Size() -} -func (m *HPAScalingPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_HPAScalingPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_HPAScalingPolicy proto.InternalMessageInfo - -func (m *HPAScalingRules) Reset() { *m = HPAScalingRules{} } -func (*HPAScalingRules) ProtoMessage() {} -func (*HPAScalingRules) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{4} -} -func (m *HPAScalingRules) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HPAScalingRules) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HPAScalingRules) XXX_Merge(src proto.Message) { - xxx_messageInfo_HPAScalingRules.Merge(m, src) -} -func (m *HPAScalingRules) XXX_Size() int { - return m.Size() -} -func (m *HPAScalingRules) XXX_DiscardUnknown() { - xxx_messageInfo_HPAScalingRules.DiscardUnknown(m) -} - -var xxx_messageInfo_HPAScalingRules proto.InternalMessageInfo - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{5} -} -func (m *HorizontalPodAutoscaler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscaler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscaler) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscaler.Merge(m, src) -} -func (m *HorizontalPodAutoscaler) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscaler) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscaler.DiscardUnknown(m) + return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_HorizontalPodAutoscaler proto.InternalMessageInfo +func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } +func (*ExternalMetricSource) ProtoMessage() {} +func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *HorizontalPodAutoscalerBehavior) Reset() { *m = HorizontalPodAutoscalerBehavior{} } -func (*HorizontalPodAutoscalerBehavior) ProtoMessage() {} -func (*HorizontalPodAutoscalerBehavior) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{6} -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerBehavior.Merge(m, src) -} -func (m *HorizontalPodAutoscalerBehavior) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerBehavior) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerBehavior.DiscardUnknown(m) -} +func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } +func (*ExternalMetricStatus) ProtoMessage() {} +func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_HorizontalPodAutoscalerBehavior proto.InternalMessageInfo +func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } +func (*HorizontalPodAutoscaler) ProtoMessage() {} +func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{7} -} -func (m *HorizontalPodAutoscalerCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + return fileDescriptorGenerated, []int{4} } -func (m *HorizontalPodAutoscalerCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerCondition.Merge(m, src) -} -func (m *HorizontalPodAutoscalerCondition) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerCondition) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerCondition proto.InternalMessageInfo func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } func (*HorizontalPodAutoscalerList) ProtoMessage() {} func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{8} -} -func (m *HorizontalPodAutoscalerList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerList.Merge(m, src) -} -func (m *HorizontalPodAutoscalerList) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerList) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerList.DiscardUnknown(m) + return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_HorizontalPodAutoscalerList proto.InternalMessageInfo - func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{9} -} -func (m *HorizontalPodAutoscalerSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerSpec.Merge(m, src) -} -func (m *HorizontalPodAutoscalerSpec) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{6} } -func (m *HorizontalPodAutoscalerSpec) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerSpec proto.InternalMessageInfo func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{10} -} -func (m *HorizontalPodAutoscalerStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HorizontalPodAutoscalerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HorizontalPodAutoscalerStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_HorizontalPodAutoscalerStatus.Merge(m, src) -} -func (m *HorizontalPodAutoscalerStatus) XXX_Size() int { - return m.Size() -} -func (m *HorizontalPodAutoscalerStatus) XXX_DiscardUnknown() { - xxx_messageInfo_HorizontalPodAutoscalerStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_HorizontalPodAutoscalerStatus proto.InternalMessageInfo - -func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } -func (*MetricIdentifier) ProtoMessage() {} -func (*MetricIdentifier) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{11} -} -func (m *MetricIdentifier) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricIdentifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricIdentifier) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricIdentifier.Merge(m, src) -} -func (m *MetricIdentifier) XXX_Size() int { - return m.Size() -} -func (m *MetricIdentifier) XXX_DiscardUnknown() { - xxx_messageInfo_MetricIdentifier.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricIdentifier proto.InternalMessageInfo - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{12} -} -func (m *MetricSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricSpec.Merge(m, src) -} -func (m *MetricSpec) XXX_Size() int { - return m.Size() -} -func (m *MetricSpec) XXX_DiscardUnknown() { - xxx_messageInfo_MetricSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricSpec proto.InternalMessageInfo - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{13} -} -func (m *MetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricStatus.Merge(m, src) -} -func (m *MetricStatus) XXX_Size() int { - return m.Size() -} -func (m *MetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_MetricStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricStatus proto.InternalMessageInfo - -func (m *MetricTarget) Reset() { *m = MetricTarget{} } -func (*MetricTarget) ProtoMessage() {} -func (*MetricTarget) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{14} -} -func (m *MetricTarget) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricTarget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricTarget) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricTarget.Merge(m, src) -} -func (m *MetricTarget) XXX_Size() int { - return m.Size() -} -func (m *MetricTarget) XXX_DiscardUnknown() { - xxx_messageInfo_MetricTarget.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricTarget proto.InternalMessageInfo - -func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } -func (*MetricValueStatus) ProtoMessage() {} -func (*MetricValueStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{15} -} -func (m *MetricValueStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *MetricValueStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *MetricValueStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_MetricValueStatus.Merge(m, src) -} -func (m *MetricValueStatus) XXX_Size() int { - return m.Size() -} -func (m *MetricValueStatus) XXX_DiscardUnknown() { - xxx_messageInfo_MetricValueStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_MetricValueStatus proto.InternalMessageInfo - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{16} -} -func (m *ObjectMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMetricSource.Merge(m, src) -} -func (m *ObjectMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ObjectMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMetricSource.DiscardUnknown(m) + return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_ObjectMetricSource proto.InternalMessageInfo +func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } +func (*MetricIdentifier) ProtoMessage() {} +func (*MetricIdentifier) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{17} -} -func (m *ObjectMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMetricStatus.Merge(m, src) -} -func (m *ObjectMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ObjectMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMetricStatus.DiscardUnknown(m) -} +func (m *MetricSpec) Reset() { *m = MetricSpec{} } +func (*MetricSpec) ProtoMessage() {} +func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_ObjectMetricStatus proto.InternalMessageInfo +func (m *MetricStatus) Reset() { *m = MetricStatus{} } +func (*MetricStatus) ProtoMessage() {} +func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{18} -} -func (m *PodsMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodsMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodsMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodsMetricSource.Merge(m, src) -} -func (m *PodsMetricSource) XXX_Size() int { - return m.Size() -} -func (m *PodsMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_PodsMetricSource.DiscardUnknown(m) -} +func (m *MetricTarget) Reset() { *m = MetricTarget{} } +func (*MetricTarget) ProtoMessage() {} +func (*MetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_PodsMetricSource proto.InternalMessageInfo +func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } +func (*MetricValueStatus) ProtoMessage() {} +func (*MetricValueStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{19} -} -func (m *PodsMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodsMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodsMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodsMetricStatus.Merge(m, src) -} -func (m *PodsMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *PodsMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodsMetricStatus.DiscardUnknown(m) -} +func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } +func (*ObjectMetricSource) ProtoMessage() {} +func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_PodsMetricStatus proto.InternalMessageInfo +func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } +func (*ObjectMetricStatus) ProtoMessage() {} +func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{20} -} -func (m *ResourceMetricSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetricSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceMetricSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetricSource.Merge(m, src) -} -func (m *ResourceMetricSource) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetricSource) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetricSource.DiscardUnknown(m) -} +func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } +func (*PodsMetricSource) ProtoMessage() {} +func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -var xxx_messageInfo_ResourceMetricSource proto.InternalMessageInfo +func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } +func (*PodsMetricStatus) ProtoMessage() {} +func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_592ad94d7d6be24f, []int{21} -} -func (m *ResourceMetricStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceMetricStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ResourceMetricStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceMetricStatus.Merge(m, src) -} -func (m *ResourceMetricStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceMetricStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceMetricStatus.DiscardUnknown(m) -} +func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } +func (*ResourceMetricSource) ProtoMessage() {} +func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -var xxx_messageInfo_ResourceMetricStatus proto.InternalMessageInfo +func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } +func (*ResourceMetricStatus) ProtoMessage() {} +func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func init() { proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") - proto.RegisterType((*HPAScalingPolicy)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingPolicy") - proto.RegisterType((*HPAScalingRules)(nil), "k8s.io.api.autoscaling.v2beta2.HPAScalingRules") proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerBehavior)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerBehavior") proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition") proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerList") proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec") @@ -687,123 +179,10 @@ func init() { proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptor_592ad94d7d6be24f) -} - -var fileDescriptor_592ad94d7d6be24f = []byte{ - // 1657 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdb, 0x6f, 0x1b, 0x45, - 0x17, 0xcf, 0xda, 0xce, 0x6d, 0x9c, 0x5b, 0xa7, 0xfd, 0x5a, 0x2b, 0xd5, 0x67, 0x47, 0xfb, 0x55, - 0x1f, 0x50, 0xd1, 0x35, 0x31, 0x01, 0x2a, 0x55, 0x08, 0xe2, 0x14, 0xda, 0xaa, 0x49, 0x1b, 0xc6, - 0x69, 0x40, 0x28, 0xad, 0x18, 0xef, 0x4e, 0x9c, 0x21, 0xf6, 0xae, 0xb5, 0xb3, 0x76, 0x9b, 0x22, - 0x21, 0x5e, 0x78, 0x47, 0x20, 0x5e, 0xf9, 0x03, 0x10, 0x42, 0xe2, 0x05, 0x89, 0x47, 0x2e, 0xaa, - 0x2a, 0x84, 0x50, 0xdf, 0x28, 0x2f, 0x16, 0x35, 0xff, 0x45, 0x9e, 0xd0, 0x5c, 0x76, 0xbd, 0xbb, - 0x76, 0x62, 0x27, 0x4a, 0x8a, 0xfa, 0xb6, 0x33, 0xe7, 0x9c, 0xdf, 0x99, 0x39, 0xf7, 0x59, 0x70, - 0x65, 0xfb, 0x22, 0x33, 0xa8, 0x93, 0xdf, 0x6e, 0x94, 0x89, 0x6b, 0x13, 0x8f, 0xb0, 0x7c, 0x93, - 0xd8, 0x96, 0xe3, 0xe6, 0x15, 0x01, 0xd7, 0x69, 0x1e, 0x37, 0x3c, 0x87, 0x99, 0xb8, 0x4a, 0xed, - 0x4a, 0xbe, 0x59, 0x28, 0x13, 0x0f, 0x17, 0xf2, 0x15, 0x62, 0x13, 0x17, 0x7b, 0xc4, 0x32, 0xea, - 0xae, 0xe3, 0x39, 0x30, 0x2b, 0xf9, 0x0d, 0x5c, 0xa7, 0x46, 0x88, 0xdf, 0x50, 0xfc, 0xb3, 0x17, - 0x2a, 0xd4, 0xdb, 0x6a, 0x94, 0x0d, 0xd3, 0xa9, 0xe5, 0x2b, 0x4e, 0xc5, 0xc9, 0x0b, 0xb1, 0x72, - 0x63, 0x53, 0xac, 0xc4, 0x42, 0x7c, 0x49, 0xb8, 0x59, 0x3d, 0xa4, 0xde, 0x74, 0x5c, 0x92, 0x6f, - 0xce, 0xc7, 0x55, 0xce, 0x2e, 0x74, 0x78, 0x6a, 0xd8, 0xdc, 0xa2, 0x36, 0x71, 0x77, 0xf2, 0xf5, - 0xed, 0x8a, 0x10, 0x72, 0x09, 0x73, 0x1a, 0xae, 0x49, 0x0e, 0x24, 0xc5, 0xf2, 0x35, 0xe2, 0xe1, - 0x5e, 0xba, 0xf2, 0x7b, 0x49, 0xb9, 0x0d, 0xdb, 0xa3, 0xb5, 0x6e, 0x35, 0xaf, 0xf6, 0x13, 0x60, - 0xe6, 0x16, 0xa9, 0xe1, 0xb8, 0x9c, 0xfe, 0xa5, 0x06, 0xce, 0x2e, 0xb9, 0x0e, 0x63, 0xeb, 0xc4, - 0x65, 0xd4, 0xb1, 0x6f, 0x96, 0x3f, 0x24, 0xa6, 0x87, 0xc8, 0x26, 0x71, 0x89, 0x6d, 0x12, 0x38, - 0x07, 0x52, 0xdb, 0xd4, 0xb6, 0x32, 0xda, 0x9c, 0xf6, 0xfc, 0x78, 0x71, 0xe2, 0x61, 0x2b, 0x37, - 0xd4, 0x6e, 0xe5, 0x52, 0xd7, 0xa9, 0x6d, 0x21, 0x41, 0xe1, 0x1c, 0x36, 0xae, 0x91, 0x4c, 0x22, - 0xca, 0x71, 0x03, 0xd7, 0x08, 0x12, 0x14, 0x58, 0x00, 0x00, 0xd7, 0xa9, 0x52, 0x90, 0x49, 0x0a, - 0x3e, 0xa8, 0xf8, 0xc0, 0xe2, 0xea, 0x35, 0x45, 0x41, 0x21, 0x2e, 0xfd, 0x81, 0x06, 0x4e, 0xbd, - 0x75, 0xcf, 0x23, 0xae, 0x8d, 0xab, 0x2b, 0xc4, 0x73, 0xa9, 0x59, 0x12, 0xf6, 0x85, 0xef, 0x81, - 0x91, 0x9a, 0x58, 0x8b, 0x23, 0xa5, 0x0b, 0x2f, 0x19, 0xfb, 0x47, 0x82, 0x21, 0xa5, 0xaf, 0x59, - 0xc4, 0xf6, 0xe8, 0x26, 0x25, 0x6e, 0x71, 0x4a, 0xa9, 0x1e, 0x91, 0x14, 0xa4, 0xf0, 0xe0, 0x1a, - 0x18, 0xf1, 0xb0, 0x5b, 0x21, 0x9e, 0xb8, 0x4a, 0xba, 0xf0, 0xe2, 0x60, 0xc8, 0x6b, 0x42, 0xa6, - 0x83, 0x2a, 0xd7, 0x48, 0x61, 0xe9, 0xbf, 0x77, 0x5f, 0xc4, 0xc3, 0x5e, 0x83, 0x1d, 0xe3, 0x45, - 0x36, 0xc0, 0xa8, 0xd9, 0x70, 0x5d, 0x62, 0xfb, 0x37, 0x99, 0x1f, 0x0c, 0x7a, 0x1d, 0x57, 0x1b, - 0x44, 0x9e, 0xae, 0x38, 0xad, 0xb0, 0x47, 0x97, 0x24, 0x12, 0xf2, 0x21, 0xf5, 0x6f, 0x35, 0x30, - 0x73, 0x75, 0x75, 0xb1, 0x24, 0x21, 0x56, 0x9d, 0x2a, 0x35, 0x77, 0xe0, 0x45, 0x90, 0xf2, 0x76, - 0xea, 0x44, 0x85, 0xc9, 0x39, 0x3f, 0x08, 0xd6, 0x76, 0xea, 0x64, 0xb7, 0x95, 0x3b, 0x15, 0xe7, - 0xe7, 0xfb, 0x48, 0x48, 0xc0, 0xff, 0x81, 0xe1, 0x26, 0xd7, 0x2b, 0x8e, 0x3a, 0x5c, 0x9c, 0x54, - 0xa2, 0xc3, 0xe2, 0x30, 0x48, 0xd2, 0xe0, 0x25, 0x30, 0x59, 0x27, 0x2e, 0x75, 0xac, 0x12, 0x31, - 0x1d, 0xdb, 0x62, 0x22, 0x88, 0x86, 0x8b, 0xff, 0x51, 0xcc, 0x93, 0xab, 0x61, 0x22, 0x8a, 0xf2, - 0xea, 0x5f, 0x25, 0xc0, 0x74, 0xe7, 0x00, 0xa8, 0x51, 0x25, 0x0c, 0xde, 0x01, 0xb3, 0xcc, 0xc3, - 0x65, 0x5a, 0xa5, 0xf7, 0xb1, 0x47, 0x1d, 0xfb, 0x5d, 0x6a, 0x5b, 0xce, 0xdd, 0x28, 0x7a, 0xb6, - 0xdd, 0xca, 0xcd, 0x96, 0xf6, 0xe4, 0x42, 0xfb, 0x20, 0xc0, 0xeb, 0x60, 0x82, 0x91, 0x2a, 0x31, - 0x3d, 0x79, 0x5f, 0x65, 0x97, 0xe7, 0xda, 0xad, 0xdc, 0x44, 0x29, 0xb4, 0xbf, 0xdb, 0xca, 0x9d, - 0x8c, 0x18, 0x46, 0x12, 0x51, 0x44, 0x18, 0xde, 0x01, 0x63, 0x75, 0xfe, 0x45, 0x09, 0xcb, 0x24, - 0xe6, 0x92, 0x83, 0xc4, 0x4a, 0xdc, 0xe0, 0xc5, 0x19, 0x65, 0xaa, 0xb1, 0x55, 0x85, 0x84, 0x02, - 0x4c, 0xfd, 0xc7, 0x04, 0x38, 0x73, 0xd5, 0x71, 0xe9, 0x7d, 0xc7, 0xf6, 0x70, 0x75, 0xd5, 0xb1, - 0x16, 0x15, 0x22, 0x71, 0xe1, 0x07, 0x60, 0x8c, 0xd7, 0x28, 0x0b, 0x7b, 0xb8, 0x47, 0x9c, 0x06, - 0xa5, 0xc6, 0xa8, 0x6f, 0x57, 0xf8, 0x06, 0x33, 0x38, 0xb7, 0xd1, 0x9c, 0x37, 0x64, 0x21, 0x59, - 0x21, 0x1e, 0xee, 0xe4, 0x7a, 0x67, 0x0f, 0x05, 0xa8, 0xf0, 0x36, 0x48, 0xb1, 0x3a, 0x31, 0x55, - 0xa8, 0x5e, 0xea, 0x7b, 0xb3, 0xde, 0x07, 0x2d, 0xd5, 0x89, 0xd9, 0x29, 0x3e, 0x7c, 0x85, 0x04, - 0x2c, 0x24, 0x60, 0x84, 0x89, 0x90, 0x16, 0x5e, 0x4d, 0x17, 0x5e, 0x3f, 0xac, 0x02, 0x99, 0x17, - 0x41, 0xce, 0xc9, 0x35, 0x52, 0xe0, 0xfa, 0x1f, 0x1a, 0xc8, 0xed, 0x21, 0x59, 0x24, 0x5b, 0xb8, - 0x49, 0x1d, 0x17, 0xae, 0x83, 0x51, 0xb1, 0x73, 0xab, 0xae, 0x4c, 0x99, 0x1f, 0xdc, 0x8d, 0x22, - 0x6c, 0x8b, 0x69, 0x9e, 0x91, 0x25, 0x89, 0x81, 0x7c, 0x30, 0xb8, 0x01, 0xc6, 0xc5, 0xe7, 0x65, - 0xe7, 0xae, 0xad, 0xcc, 0x78, 0x60, 0xe4, 0xc9, 0x76, 0x2b, 0x37, 0x5e, 0xf2, 0x51, 0x50, 0x07, - 0x50, 0xff, 0x34, 0x09, 0xe6, 0xf6, 0xb8, 0xd9, 0x92, 0x63, 0x5b, 0x94, 0x07, 0x3f, 0xbc, 0x1a, - 0xc9, 0xff, 0x85, 0x58, 0xfe, 0x9f, 0xeb, 0x27, 0x1f, 0xaa, 0x07, 0xcb, 0x81, 0xbf, 0x12, 0x11, - 0x2c, 0x65, 0xf0, 0xdd, 0x56, 0xae, 0x47, 0xaf, 0x36, 0x02, 0xa4, 0xa8, 0x5b, 0x60, 0x13, 0xc0, - 0x2a, 0x66, 0xde, 0x9a, 0x8b, 0x6d, 0x26, 0x35, 0xd1, 0x1a, 0x51, 0x91, 0x70, 0x7e, 0xb0, 0x40, - 0xe6, 0x12, 0xc5, 0x59, 0x75, 0x0a, 0xb8, 0xdc, 0x85, 0x86, 0x7a, 0x68, 0x80, 0xff, 0x07, 0x23, - 0x2e, 0xc1, 0xcc, 0xb1, 0x33, 0x29, 0x71, 0x8b, 0x20, 0x6c, 0x90, 0xd8, 0x45, 0x8a, 0x0a, 0x5f, - 0x00, 0xa3, 0x35, 0xc2, 0x18, 0xae, 0x90, 0xcc, 0xb0, 0x60, 0x0c, 0xea, 0xee, 0x8a, 0xdc, 0x46, - 0x3e, 0x5d, 0xff, 0x53, 0x03, 0x67, 0xf7, 0xb0, 0xe3, 0x32, 0x65, 0x1e, 0xdc, 0xe8, 0xca, 0x54, - 0x63, 0xb0, 0x0b, 0x72, 0x69, 0x91, 0xa7, 0x41, 0x8d, 0xf0, 0x77, 0x42, 0x59, 0xba, 0x01, 0x86, - 0xa9, 0x47, 0x6a, 0x7e, 0x01, 0x7a, 0xed, 0x90, 0x59, 0xd4, 0xa9, 0xef, 0xd7, 0x38, 0x1a, 0x92, - 0xa0, 0xfa, 0x83, 0xe4, 0x9e, 0x77, 0xe3, 0xa9, 0x0c, 0x3f, 0x02, 0x53, 0x62, 0xa5, 0x7a, 0x2b, - 0xd9, 0x54, 0x37, 0xec, 0x5b, 0x2d, 0xf6, 0x19, 0x6d, 0x8a, 0xa7, 0xd5, 0x51, 0xa6, 0x4a, 0x11, - 0x68, 0x14, 0x53, 0x05, 0xe7, 0x41, 0xba, 0x46, 0x6d, 0x44, 0xea, 0x55, 0x6a, 0x62, 0xa6, 0xfa, - 0xd4, 0x74, 0xbb, 0x95, 0x4b, 0xaf, 0x74, 0xb6, 0x51, 0x98, 0x07, 0xbe, 0x02, 0xd2, 0x35, 0x7c, - 0x2f, 0x10, 0x91, 0xfd, 0xe4, 0xa4, 0xd2, 0x97, 0x5e, 0xe9, 0x90, 0x50, 0x98, 0x0f, 0xde, 0xe2, - 0xd1, 0xc0, 0x3b, 0x31, 0xcb, 0xa4, 0x84, 0x99, 0xcf, 0x0f, 0xd6, 0xb8, 0x45, 0xf1, 0x0b, 0x45, - 0x8e, 0x80, 0x40, 0x3e, 0x16, 0xa4, 0x60, 0xac, 0xac, 0x6a, 0x90, 0x88, 0xb2, 0x74, 0xe1, 0x8d, - 0xc3, 0xba, 0x4f, 0xc1, 0x14, 0x27, 0x78, 0x98, 0xf8, 0x2b, 0x14, 0xc0, 0xeb, 0xdf, 0xa7, 0xc0, - 0x7f, 0xf7, 0x2d, 0xa0, 0xf0, 0x6d, 0x00, 0x9d, 0x32, 0x23, 0x6e, 0x93, 0x58, 0x57, 0xe4, 0x2c, - 0xca, 0x87, 0x42, 0xee, 0xce, 0x64, 0xf1, 0x34, 0xcf, 0xb0, 0x9b, 0x5d, 0x54, 0xd4, 0x43, 0x02, - 0x9a, 0x60, 0x92, 0xe7, 0x9d, 0xf4, 0x1d, 0x55, 0xf3, 0xe7, 0xc1, 0x92, 0xfa, 0x04, 0x1f, 0x1d, - 0x96, 0xc3, 0x20, 0x28, 0x8a, 0x09, 0x17, 0xc1, 0xb4, 0x1a, 0x7b, 0x62, 0xbe, 0x3c, 0xa3, 0x8c, - 0x3d, 0xbd, 0x14, 0x25, 0xa3, 0x38, 0x3f, 0x87, 0xb0, 0x08, 0xa3, 0x2e, 0xb1, 0x02, 0x88, 0x54, - 0x14, 0xe2, 0x72, 0x94, 0x8c, 0xe2, 0xfc, 0xb0, 0x0a, 0xa6, 0x14, 0xaa, 0x72, 0x6d, 0x66, 0x58, - 0x44, 0xc7, 0x80, 0x03, 0xaa, 0xea, 0x5c, 0x41, 0xb8, 0x2f, 0x45, 0xb0, 0x50, 0x0c, 0x1b, 0x7a, - 0x00, 0x98, 0x7e, 0x35, 0x65, 0x99, 0x11, 0xa1, 0xe9, 0xcd, 0x43, 0xc6, 0x4b, 0x50, 0x96, 0x3b, - 0x33, 0x40, 0xb0, 0xc5, 0x50, 0x48, 0x8f, 0xfe, 0x85, 0x06, 0x66, 0xe2, 0x03, 0x6e, 0xf0, 0xb4, - 0xd0, 0xf6, 0x7c, 0x5a, 0xdc, 0x06, 0x63, 0x72, 0x54, 0x72, 0x5c, 0x15, 0x00, 0x2f, 0x0f, 0x58, - 0xf4, 0x70, 0x99, 0x54, 0x4b, 0x4a, 0x54, 0x86, 0xb3, 0xbf, 0x42, 0x01, 0xa4, 0xfe, 0x75, 0x12, - 0x80, 0x4e, 0x8a, 0xc1, 0x85, 0x48, 0x97, 0x9b, 0x8b, 0x75, 0xb9, 0x99, 0xf0, 0x3b, 0x25, 0xd4, - 0xd1, 0xd6, 0xc1, 0x88, 0x23, 0x4a, 0x8f, 0x3a, 0x61, 0xa1, 0x9f, 0x31, 0x83, 0x31, 0x29, 0x40, - 0x2b, 0x02, 0xde, 0x3b, 0x54, 0x01, 0x53, 0x68, 0xf0, 0x06, 0x48, 0xd5, 0x1d, 0xcb, 0x9f, 0x6b, - 0xfa, 0x8e, 0x84, 0xab, 0x8e, 0xc5, 0x22, 0x98, 0x63, 0xfc, 0xec, 0x7c, 0x17, 0x09, 0x1c, 0x3e, - 0x66, 0xfa, 0xaf, 0x58, 0x11, 0xa2, 0xe9, 0xc2, 0x42, 0x3f, 0x4c, 0xa4, 0xf8, 0x23, 0xb8, 0xc2, - 0x98, 0x3e, 0x05, 0x05, 0x98, 0x1c, 0x9f, 0xa8, 0x87, 0x90, 0x2a, 0x43, 0x7d, 0xf1, 0x7b, 0xbd, - 0x00, 0x25, 0xbe, 0x4f, 0x41, 0x01, 0xa6, 0xfe, 0x4d, 0x12, 0x4c, 0x44, 0x5e, 0x58, 0xff, 0x86, - 0xbb, 0x64, 0xae, 0x1d, 0xad, 0xbb, 0x24, 0xe6, 0xd1, 0xbb, 0x4b, 0xe2, 0x1e, 0x9f, 0xbb, 0x42, - 0xf8, 0x3d, 0xdc, 0xf5, 0x73, 0xc2, 0x77, 0x97, 0x6c, 0xb5, 0x83, 0xb9, 0x4b, 0xf2, 0x86, 0xdc, - 0x75, 0x33, 0xfc, 0x7e, 0xec, 0x33, 0xf3, 0x18, 0xfe, 0xe5, 0x8c, 0x77, 0x1a, 0xd8, 0xf6, 0xa8, - 0xb7, 0x53, 0x1c, 0xef, 0x7a, 0x6b, 0x5a, 0x60, 0x02, 0x37, 0x89, 0x8b, 0x2b, 0x44, 0x6c, 0x2b, - 0x7f, 0x1d, 0x14, 0x77, 0x86, 0x3f, 0xf5, 0x16, 0x43, 0x38, 0x28, 0x82, 0xca, 0xdb, 0xa0, 0x5a, - 0xdf, 0xf2, 0x82, 0x37, 0xa4, 0xea, 0x0c, 0xa2, 0x0d, 0x2e, 0x76, 0x51, 0x51, 0x0f, 0x09, 0xfd, - 0xf3, 0x04, 0x38, 0xd1, 0xf5, 0x7a, 0xef, 0x18, 0x45, 0x3b, 0x26, 0xa3, 0x24, 0x9e, 0xa2, 0x51, - 0x92, 0x07, 0x36, 0xca, 0x2f, 0x09, 0x00, 0xbb, 0x8b, 0x28, 0xfc, 0x58, 0xb4, 0x62, 0xd3, 0xa5, - 0x65, 0x62, 0x49, 0xf2, 0x51, 0x8c, 0x91, 0xe1, 0x3e, 0x1e, 0xc6, 0x46, 0x71, 0x65, 0xc7, 0xf3, - 0x83, 0x29, 0xf4, 0x1f, 0x29, 0x79, 0xb4, 0xff, 0x91, 0xf4, 0xdf, 0xe2, 0x66, 0x7c, 0xa6, 0x7f, - 0x5c, 0xf5, 0x72, 0x7f, 0xf2, 0x29, 0xba, 0x5f, 0xff, 0x49, 0x03, 0x33, 0xf1, 0x26, 0xfc, 0xcc, - 0xfd, 0xce, 0xfc, 0x35, 0x7a, 0x89, 0x67, 0xfb, 0x57, 0xe6, 0x77, 0x1a, 0x38, 0xd5, 0x6b, 0x84, - 0x81, 0x4b, 0x91, 0xc1, 0x33, 0x1f, 0x1e, 0x3c, 0x77, 0x5b, 0xb9, 0x5c, 0x8f, 0x1f, 0x10, 0x3e, - 0x4c, 0x68, 0x36, 0x3d, 0x1e, 0x07, 0xfc, 0xd0, 0x7d, 0x66, 0xe9, 0x84, 0x23, 0x39, 0xf3, 0xb1, - 0xda, 0xbb, 0x78, 0xe1, 0xe1, 0x93, 0xec, 0xd0, 0xa3, 0x27, 0xd9, 0xa1, 0xc7, 0x4f, 0xb2, 0x43, - 0x9f, 0xb4, 0xb3, 0xda, 0xc3, 0x76, 0x56, 0x7b, 0xd4, 0xce, 0x6a, 0x8f, 0xdb, 0x59, 0xed, 0xaf, - 0x76, 0x56, 0xfb, 0xec, 0xef, 0xec, 0xd0, 0xfb, 0xa3, 0x0a, 0xfa, 0x9f, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x79, 0xae, 0x08, 0x04, 0x2d, 0x1a, 0x00, 0x00, -} - func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -811,37 +190,29 @@ func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { } func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CrossVersionObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x1a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil } func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -849,42 +220,33 @@ func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) + n1, err := m.Metric.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n2, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + return i, nil } func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -892,715 +254,467 @@ func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExternalMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) + n3, err := m.Metric.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n3 dAtA[i] = 0x12 - { - size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) + n4, err := m.Current.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n4 + return i, nil } -func (m *HPAScalingPolicy) Marshal() (dAtA []byte, err error) { +func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HPAScalingPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HPAScalingPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x10 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n6, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n7, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + return i, nil } -func (m *HPAScalingRules) Marshal() (dAtA []byte, err error) { +func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HPAScalingRules) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HPAScalingRules) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.StabilizationWindowSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.StabilizationWindowSeconds)) - i-- - dAtA[i] = 0x18 - } - if len(m.Policies) > 0 { - for iNdEx := len(m.Policies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Policies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.SelectPolicy != nil { - i -= len(*m.SelectPolicy) - copy(dAtA[i:], *m.SelectPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SelectPolicy))) - i-- - dAtA[i] = 0xa + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n8 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { +func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscaler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n9 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *HorizontalPodAutoscalerBehavior) Marshal() (dAtA []byte, err error) { +func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerBehavior) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerBehavior) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.ScaleDown != nil { - { - size, err := m.ScaleDown.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) + n10, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.ScaleUp != nil { - { - size, err := m.ScaleUp.MarshalToSizedBuffer(dAtA[:i]) + i += n10 + if m.MinReplicas != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) + } + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) + if len(m.Metrics) > 0 { + for _, msg := range m.Metrics { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { +func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) + if m.ObservedGeneration != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) + } + if m.LastScaleTime != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) + n11, err := m.LastScaleTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n11 } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) + if len(m.CurrentMetrics) > 0 { + for _, msg := range m.CurrentMetrics { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { +func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n12, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n12 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { +func (m *MetricSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Behavior != nil { - { - size, err := m.Behavior.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n13, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n13 } - if len(m.Metrics) > 0 { - for iNdEx := len(m.Metrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Metrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n14, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n14 } - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - i-- - dAtA[i] = 0x18 - if m.MinReplicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - i-- - dAtA[i] = 0x10 + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n15, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 } - { - size, err := m.ScaleTargetRef.MarshalToSizedBuffer(dAtA[:i]) + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n16, err := m.External.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n16 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { +func (m *MetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HorizontalPodAutoscalerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if m.Object != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n17, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n17 } - if len(m.CurrentMetrics) > 0 { - for iNdEx := len(m.CurrentMetrics) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.CurrentMetrics[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + if m.Pods != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) + n18, err := m.Pods.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n18 } - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - i-- - dAtA[i] = 0x18 - if m.LastScaleTime != nil { - { - size, err := m.LastScaleTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Resource != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) + n19, err := m.Resource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n19 } - if m.ObservedGeneration != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 + if m.External != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) + n20, err := m.External.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n20 } - return len(dAtA) - i, nil + return i, nil } -func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { +func (m *MetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricIdentifier) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MetricSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.External != nil { - { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Pods != nil { - { - size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Object != nil { - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) + i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.External != nil { - { - size, err := m.External.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += copy(dAtA[i:], m.Type) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) + n21, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n21 } - if m.Pods != nil { - { - size, err := m.Pods.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + if m.AverageValue != nil { dAtA[i] = 0x1a - } - if m.Object != nil { - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) + n22, err := m.AverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *MetricTarget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + i += n22 } - return dAtA[:n], nil -} - -func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricTarget) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l if m.AverageUtilization != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) - i-- dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) } - if m.AverageValue != nil { - { - size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Value != nil { - { - size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1608,51 +722,42 @@ func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { } func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *MetricValueStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AverageUtilization != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) - i-- - dAtA[i] = 0x18 + if m.Value != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) + n23, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } if m.AverageValue != nil { - { - size, err := m.AverageValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - if m.Value != nil { - { - size, err := m.Value.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) + n24, err := m.AverageValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n24 + } + if m.AverageUtilization != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) } - return len(dAtA) - i, nil + return i, nil } func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1660,52 +765,41 @@ func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) + n25, err := m.DescribedObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n25 dAtA[i] = 0x12 - { - size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n26, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n26 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) + n27, err := m.Metric.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil } func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1713,52 +807,41 @@ func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.DescribedObject.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) + n28, err := m.Metric.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n28 dAtA[i] = 0x12 - { - size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) + n29, err := m.Current.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n29 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) + n30, err := m.DescribedObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + return i, nil } func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1766,42 +849,33 @@ func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodsMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) + n31, err := m.Metric.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n31 dAtA[i] = 0x12 - { - size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n32, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n32 + return i, nil } func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1809,42 +883,33 @@ func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodsMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) + n33, err := m.Metric.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n33 dAtA[i] = 0x12 - { - size, err := m.Metric.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) + n34, err := m.Current.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n34 + return i, nil } func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1852,37 +917,29 @@ func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetricSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n35, err := m.Target.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + return i, nil } func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1890,48 +947,53 @@ func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { } func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ResourceMetricStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Current.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) + n36, err := m.Current.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CrossVersionObjectReference) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -1944,9 +1006,6 @@ func (m *CrossVersionObjectReference) Size() (n int) { } func (m *ExternalMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Metric.Size() @@ -1957,9 +1016,6 @@ func (m *ExternalMetricSource) Size() (n int) { } func (m *ExternalMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Metric.Size() @@ -1969,45 +1025,7 @@ func (m *ExternalMetricStatus) Size() (n int) { return n } -func (m *HPAScalingPolicy) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Value)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - return n -} - -func (m *HPAScalingRules) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.SelectPolicy != nil { - l = len(*m.SelectPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Policies) > 0 { - for _, e := range m.Policies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StabilizationWindowSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StabilizationWindowSeconds)) - } - return n -} - func (m *HorizontalPodAutoscaler) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -2019,27 +1037,7 @@ func (m *HorizontalPodAutoscaler) Size() (n int) { return n } -func (m *HorizontalPodAutoscalerBehavior) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ScaleUp != nil { - l = m.ScaleUp.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ScaleDown != nil { - l = m.ScaleDown.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *HorizontalPodAutoscalerCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2056,9 +1054,6 @@ func (m *HorizontalPodAutoscalerCondition) Size() (n int) { } func (m *HorizontalPodAutoscalerList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -2073,9 +1068,6 @@ func (m *HorizontalPodAutoscalerList) Size() (n int) { } func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ScaleTargetRef.Size() @@ -2090,17 +1082,10 @@ func (m *HorizontalPodAutoscalerSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.Behavior != nil { - l = m.Behavior.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ObservedGeneration != nil { @@ -2128,9 +1113,6 @@ func (m *HorizontalPodAutoscalerStatus) Size() (n int) { } func (m *MetricIdentifier) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2143,9 +1125,6 @@ func (m *MetricIdentifier) Size() (n int) { } func (m *MetricSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2170,9 +1149,6 @@ func (m *MetricSpec) Size() (n int) { } func (m *MetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2197,9 +1173,6 @@ func (m *MetricStatus) Size() (n int) { } func (m *MetricTarget) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -2219,9 +1192,6 @@ func (m *MetricTarget) Size() (n int) { } func (m *MetricValueStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -2239,9 +1209,6 @@ func (m *MetricValueStatus) Size() (n int) { } func (m *ObjectMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.DescribedObject.Size() @@ -2254,9 +1221,6 @@ func (m *ObjectMetricSource) Size() (n int) { } func (m *ObjectMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Metric.Size() @@ -2269,9 +1233,6 @@ func (m *ObjectMetricStatus) Size() (n int) { } func (m *PodsMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Metric.Size() @@ -2282,9 +1243,6 @@ func (m *PodsMetricSource) Size() (n int) { } func (m *PodsMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Metric.Size() @@ -2295,9 +1253,6 @@ func (m *PodsMetricStatus) Size() (n int) { } func (m *ResourceMetricSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2308,9 +1263,6 @@ func (m *ResourceMetricSource) Size() (n int) { } func (m *ResourceMetricStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -2321,7 +1273,14 @@ func (m *ResourceMetricStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -2360,58 +1319,18 @@ func (this *ExternalMetricStatus) String() string { }, "") return s } -func (this *HPAScalingPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HPAScalingPolicy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *HPAScalingRules) String() string { - if this == nil { - return "nil" - } - repeatedStringForPolicies := "[]HPAScalingPolicy{" - for _, f := range this.Policies { - repeatedStringForPolicies += strings.Replace(strings.Replace(f.String(), "HPAScalingPolicy", "HPAScalingPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForPolicies += "}" - s := strings.Join([]string{`&HPAScalingRules{`, - `SelectPolicy:` + valueToStringGenerated(this.SelectPolicy) + `,`, - `Policies:` + repeatedStringForPolicies + `,`, - `StabilizationWindowSeconds:` + valueToStringGenerated(this.StabilizationWindowSeconds) + `,`, - `}`, - }, "") - return s -} func (this *HorizontalPodAutoscaler) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *HorizontalPodAutoscalerBehavior) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerBehavior{`, - `ScaleUp:` + strings.Replace(this.ScaleUp.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, - `ScaleDown:` + strings.Replace(this.ScaleDown.String(), "HPAScalingRules", "HPAScalingRules", 1) + `,`, - `}`, - }, "") - return s -} func (this *HorizontalPodAutoscalerCondition) String() string { if this == nil { return "nil" @@ -2419,7 +1338,7 @@ func (this *HorizontalPodAutoscalerCondition) String() string { s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -2430,14 +1349,9 @@ func (this *HorizontalPodAutoscalerList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]HorizontalPodAutoscaler{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2446,17 +1360,11 @@ func (this *HorizontalPodAutoscalerSpec) String() string { if this == nil { return "nil" } - repeatedStringForMetrics := "[]MetricSpec{" - for _, f := range this.Metrics { - repeatedStringForMetrics += strings.Replace(strings.Replace(f.String(), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + "," - } - repeatedStringForMetrics += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + repeatedStringForMetrics + `,`, - `Behavior:` + strings.Replace(this.Behavior.String(), "HorizontalPodAutoscalerBehavior", "HorizontalPodAutoscalerBehavior", 1) + `,`, + `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2465,23 +1373,13 @@ func (this *HorizontalPodAutoscalerStatus) String() string { if this == nil { return "nil" } - repeatedStringForCurrentMetrics := "[]MetricStatus{" - for _, f := range this.CurrentMetrics { - repeatedStringForCurrentMetrics += strings.Replace(strings.Replace(f.String(), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForCurrentMetrics += "}" - repeatedStringForConditions := "[]HorizontalPodAutoscalerCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "v1.Time", 1) + `,`, + `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + repeatedStringForCurrentMetrics + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2492,7 +1390,7 @@ func (this *MetricIdentifier) String() string { } s := strings.Join([]string{`&MetricIdentifier{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `}`, }, "") return s @@ -2503,10 +1401,10 @@ func (this *MetricSpec) String() string { } s := strings.Join([]string{`&MetricSpec{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(this.Object.String(), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, `}`, }, "") return s @@ -2517,10 +1415,10 @@ func (this *MetricStatus) String() string { } s := strings.Join([]string{`&MetricStatus{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(this.Object.String(), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(this.Pods.String(), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(this.Resource.String(), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(this.External.String(), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, + `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, + `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, + `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, + `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, `}`, }, "") return s @@ -2531,8 +1429,8 @@ func (this *MetricTarget) String() string { } s := strings.Join([]string{`&MetricTarget{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -2543,8 +1441,8 @@ func (this *MetricValueStatus) String() string { return "nil" } s := strings.Join([]string{`&MetricValueStatus{`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "resource.Quantity", 1) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, `}`, }, "") @@ -2583,318 +1481,50 @@ func (this *PodsMetricSource) String() string { `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, `}`, }, "") - return s -} -func (this *PodsMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricStatus{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricSource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + return s +} +func (this *PodsMetricStatus) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&PodsMetricStatus{`, + `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceMetricSource) String() string { + if this == nil { + return "nil" } - return nil + s := strings.Join([]string{`&ResourceMetricSource{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } -func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { +func (this *ResourceMetricStatus) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ResourceMetricStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2909,7 +1539,7 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2917,17 +1547,17 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2937,30 +1567,26 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2970,81 +1596,24 @@ func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HPAScalingPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HPAScalingPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3056,7 +1625,7 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3066,52 +1635,11 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = HPAScalingPolicyType(dAtA[iNdEx:postIndex]) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) - } - m.PeriodSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PeriodSeconds |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3121,9 +1649,6 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3136,7 +1661,7 @@ func (m *HPAScalingPolicy) Unmarshal(dAtA []byte) error { } return nil } -func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { +func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3151,7 +1676,7 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3159,17 +1684,17 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HPAScalingRules: wiretype end group for non-group") + return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HPAScalingRules: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SelectPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3179,28 +1704,25 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := ScalingPolicySelect(dAtA[iNdEx:postIndex]) - m.SelectPolicy = &s + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Policies", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3212,7 +1734,7 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3221,37 +1743,13 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Policies = append(m.Policies, HPAScalingPolicy{}) - if err := m.Policies[len(m.Policies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StabilizationWindowSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StabilizationWindowSeconds = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3261,9 +1759,6 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3276,7 +1771,7 @@ func (m *HPAScalingRules) Unmarshal(dAtA []byte) error { } return nil } -func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { +func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3290,57 +1785,24 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 2: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3352,7 +1814,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3361,19 +1823,16 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3385,7 +1844,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3394,13 +1853,10 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3413,9 +1869,6 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3428,7 +1881,7 @@ func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { } return nil } -func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { +func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3443,7 +1896,7 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3451,15 +1904,15 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: wiretype end group for non-group") + return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerBehavior: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleUp", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3471,7 +1924,7 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3480,22 +1933,16 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ScaleUp == nil { - m.ScaleUp = &HPAScalingRules{} - } - if err := m.ScaleUp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleDown", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3507,7 +1954,7 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3516,16 +1963,40 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if m.ScaleDown == nil { - m.ScaleDown = &HPAScalingRules{} - } - if err := m.ScaleDown.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -3538,9 +2009,6 @@ func (m *HorizontalPodAutoscalerBehavior) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3568,7 +2036,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3596,7 +2064,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3606,9 +2074,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3628,7 +2093,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3638,9 +2103,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3660,7 +2122,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3669,9 +2131,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3693,7 +2152,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3703,9 +2162,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3725,7 +2181,7 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3735,9 +2191,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3752,9 +2205,6 @@ func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3782,7 +2232,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3810,7 +2260,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3819,9 +2269,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3843,7 +2290,7 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3852,9 +2299,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3872,9 +2316,6 @@ func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3902,7 +2343,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3930,7 +2371,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3939,9 +2380,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3963,7 +2401,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3983,7 +2421,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MaxReplicas |= int32(b&0x7F) << shift + m.MaxReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4002,7 +2440,7 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4011,9 +2449,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4022,42 +2457,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Behavior", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Behavior == nil { - m.Behavior = &HorizontalPodAutoscalerBehavior{} - } - if err := m.Behavior.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4067,9 +2466,6 @@ func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4097,7 +2493,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4125,7 +2521,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4145,7 +2541,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4154,14 +2550,11 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScaleTime == nil { - m.LastScaleTime = &v1.Time{} + m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4181,7 +2574,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentReplicas |= int32(b&0x7F) << shift + m.CurrentReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4200,7 +2593,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredReplicas |= int32(b&0x7F) << shift + m.DesiredReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -4219,7 +2612,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4228,9 +2621,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4253,7 +2643,7 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4262,9 +2652,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4282,9 +2669,6 @@ func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4312,7 +2696,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4340,7 +2724,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4350,9 +2734,6 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4372,7 +2753,7 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4381,14 +2762,11 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4403,9 +2781,6 @@ func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4433,7 +2808,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4461,7 +2836,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4471,9 +2846,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4493,7 +2865,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4502,9 +2874,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4529,7 +2898,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4538,9 +2907,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4565,7 +2931,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4574,9 +2940,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4601,7 +2964,7 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4610,9 +2973,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4632,9 +2992,6 @@ func (m *MetricSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4662,7 +3019,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4690,7 +3047,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4700,9 +3057,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4722,7 +3076,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4731,9 +3085,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4758,7 +3109,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4767,9 +3118,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4794,7 +3142,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4803,9 +3151,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4830,7 +3175,7 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4839,9 +3184,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4861,9 +3203,6 @@ func (m *MetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4891,7 +3230,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4919,7 +3258,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4929,9 +3268,6 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4951,7 +3287,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4960,14 +3296,11 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &resource.Quantity{} + m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -4987,7 +3320,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4996,14 +3329,11 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &resource.Quantity{} + m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5023,7 +3353,7 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5038,9 +3368,6 @@ func (m *MetricTarget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5068,7 +3395,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5096,7 +3423,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5105,14 +3432,11 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Value == nil { - m.Value = &resource.Quantity{} + m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5132,7 +3456,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5141,14 +3465,11 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AverageValue == nil { - m.AverageValue = &resource.Quantity{} + m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} } if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5168,7 +3489,7 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -5183,9 +3504,6 @@ func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5213,7 +3531,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5241,7 +3559,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5250,9 +3568,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5274,7 +3589,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5283,9 +3598,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5307,7 +3619,7 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5316,9 +3628,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5335,9 +3644,6 @@ func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5365,7 +3671,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5393,7 +3699,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5402,9 +3708,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5426,7 +3729,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5435,9 +3738,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5459,7 +3759,7 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5468,9 +3768,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5487,9 +3784,6 @@ func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5517,7 +3811,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5545,7 +3839,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5554,9 +3848,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5578,7 +3869,7 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5587,9 +3878,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5606,9 +3894,6 @@ func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5636,7 +3921,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5664,7 +3949,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5673,9 +3958,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5697,7 +3979,7 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5706,9 +3988,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5725,9 +4004,6 @@ func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5755,7 +4031,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5783,7 +4059,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5793,9 +4069,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5815,7 +4088,7 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5824,9 +4097,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5843,9 +4113,6 @@ func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5873,7 +4140,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5901,7 +4168,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5911,9 +4178,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5933,7 +4197,7 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5942,9 +4206,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5961,9 +4222,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5979,7 +4237,6 @@ func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -6011,8 +4268,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -6029,34 +4288,151 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1425 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, + 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, + 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, + 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, + 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, + 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, + 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, + 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, + 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, + 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, + 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, + 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, + 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, + 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, + 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, + 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, + 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, + 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, + 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, + 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, + 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, + 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, + 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, + 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, + 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, + 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, + 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, + 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, + 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, + 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, + 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, + 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, + 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, + 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, + 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, + 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, + 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, + 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, + 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, + 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, + 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, + 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, + 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, + 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, + 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, + 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, + 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, + 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, + 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, + 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, + 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, + 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, + 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, + 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, + 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, + 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, + 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, + 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, + 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, + 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, + 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, + 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, + 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, + 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, + 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, + 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, + 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, + 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, + 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, + 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, + 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, + 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, + 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, + 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, + 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, + 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, + 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, + 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, + 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, + 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, + 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, + 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, + 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, + 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, + 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, + 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, + 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, + 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, + 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto index 24dc588..b4e4c95 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto @@ -32,7 +32,7 @@ option go_package = "v2beta2"; // CrossVersionObjectReference contains enough information to let you identify the referred resource. message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" optional string kind = 1; // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -64,58 +64,17 @@ message ExternalMetricStatus { optional MetricValueStatus current = 2; } -// HPAScalingPolicy is a single policy which must hold true for a specified past interval. -message HPAScalingPolicy { - // Type is used to specify the scaling policy. - optional string type = 1; - - // Value contains the amount of change which is permitted by the policy. - // It must be greater than zero - optional int32 value = 2; - - // PeriodSeconds specifies the window of time for which the policy should hold true. - // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). - optional int32 periodSeconds = 3; -} - -// HPAScalingRules configures the scaling behavior for one direction. -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. -// They can limit the scaling velocity by specifying scaling policies. -// They can prevent flapping by specifying the stabilization window, so that the -// number of replicas is not set instantly, instead, the safest value from the stabilization -// window is chosen. -message HPAScalingRules { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be - // considered while scaling up or scaling down. - // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). - // If not set, use the default values: - // - For scale up: 0 (i.e. no stabilization is done). - // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). - // +optional - optional int32 stabilizationWindowSeconds = 3; - - // selectPolicy is used to specify which policy should be used. - // If not set, the default value MaxPolicySelect is used. - // +optional - optional string selectPolicy = 1; - - // policies is a list of potential scaling polices which can be used during scaling. - // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid - // +optional - repeated HPAScalingPolicy policies = 2; -} - // HorizontalPodAutoscaler is the configuration for a horizontal pod // autoscaler, which automatically manages the replica count of any resource // implementing the scale subresource based on the metrics specified. message HorizontalPodAutoscaler { // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional HorizontalPodAutoscalerSpec spec = 2; @@ -124,25 +83,6 @@ message HorizontalPodAutoscaler { optional HorizontalPodAutoscalerStatus status = 3; } -// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target -// in both Up and Down directions (scaleUp and scaleDown fields respectively). -message HorizontalPodAutoscalerBehavior { - // scaleUp is scaling policy for scaling Up. - // If not set, the default value is the higher of: - // * increase no more than 4 pods per 60 seconds - // * double the number of pods per 60 seconds - // No stabilization is used. - // +optional - optional HPAScalingRules scaleUp = 1; - - // scaleDown is scaling policy for scaling Down. - // If not set, the default value is to allow to scale down to minReplicas pods, with a - // 300 second stabilization window (i.e., the highest recommendation for - // the last 300sec is used). - // +optional - optional HPAScalingRules scaleDown = 2; -} - // HorizontalPodAutoscalerCondition describes the state of // a HorizontalPodAutoscaler at a certain point. message HorizontalPodAutoscalerCondition { @@ -183,11 +123,8 @@ message HorizontalPodAutoscalerSpec { // should be collected, as well as to actually change the replica count. optional CrossVersionObjectReference scaleTargetRef = 1; - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. // +optional optional int32 minReplicas = 2; @@ -205,12 +142,6 @@ message HorizontalPodAutoscalerSpec { // If not set, the default metric will be set to 80% average CPU utilization. // +optional repeated MetricSpec metrics = 4; - - // behavior configures the scaling behavior of the target - // in both Up and Down directions (scaleUp and scaleDown fields respectively). - // If not set, the default HPAScalingRules for scale up and scale down are used. - // +optional - optional HorizontalPodAutoscalerBehavior behavior = 5; } // HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go index 614caeb..2d33795 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types.go @@ -19,7 +19,7 @@ limitations under the License. package v2beta2 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,12 +33,12 @@ import ( type HorizontalPodAutoscaler struct { metav1.TypeMeta `json:",inline"` // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -52,11 +52,8 @@ type HorizontalPodAutoscalerSpec struct { // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics // should be collected, as well as to actually change the replica count. ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler - // can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the - // alpha feature gate HPAScaleToZero is enabled and at least one Object or External - // metric is configured. Scaling is active as long as at least one metric value is - // available. + // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. + // It defaults to 1 pod. // +optional MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. @@ -72,17 +69,11 @@ type HorizontalPodAutoscalerSpec struct { // If not set, the default metric will be set to 80% average CPU utilization. // +optional Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` - - // behavior configures the scaling behavior of the target - // in both Up and Down directions (scaleUp and scaleDown fields respectively). - // If not set, the default HPAScalingRules for scale up and scale down are used. - // +optional - Behavior *HorizontalPodAutoscalerBehavior `json:"behavior,omitempty" protobuf:"bytes,5,opt,name=behavior"` } // CrossVersionObjectReference contains enough information to let you identify the referred resource. type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds" + // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names Name string `json:"name" protobuf:"bytes,2,opt,name=name"` @@ -123,88 +114,10 @@ type MetricSpec struct { External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` } -// HorizontalPodAutoscalerBehavior configures the scaling behavior of the target -// in both Up and Down directions (scaleUp and scaleDown fields respectively). -type HorizontalPodAutoscalerBehavior struct { - // scaleUp is scaling policy for scaling Up. - // If not set, the default value is the higher of: - // * increase no more than 4 pods per 60 seconds - // * double the number of pods per 60 seconds - // No stabilization is used. - // +optional - ScaleUp *HPAScalingRules `json:"scaleUp,omitempty" protobuf:"bytes,1,opt,name=scaleUp"` - // scaleDown is scaling policy for scaling Down. - // If not set, the default value is to allow to scale down to minReplicas pods, with a - // 300 second stabilization window (i.e., the highest recommendation for - // the last 300sec is used). - // +optional - ScaleDown *HPAScalingRules `json:"scaleDown,omitempty" protobuf:"bytes,2,opt,name=scaleDown"` -} - -// ScalingPolicySelect is used to specify which policy should be used while scaling in a certain direction -type ScalingPolicySelect string - -const ( - // MaxPolicySelect selects the policy with the highest possible change. - MaxPolicySelect ScalingPolicySelect = "Max" - // MinPolicySelect selects the policy with the lowest possible change. - MinPolicySelect ScalingPolicySelect = "Min" - // DisabledPolicySelect disables the scaling in this direction. - DisabledPolicySelect ScalingPolicySelect = "Disabled" -) - -// HPAScalingRules configures the scaling behavior for one direction. -// These Rules are applied after calculating DesiredReplicas from metrics for the HPA. -// They can limit the scaling velocity by specifying scaling policies. -// They can prevent flapping by specifying the stabilization window, so that the -// number of replicas is not set instantly, instead, the safest value from the stabilization -// window is chosen. -type HPAScalingRules struct { - // StabilizationWindowSeconds is the number of seconds for which past recommendations should be - // considered while scaling up or scaling down. - // StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). - // If not set, use the default values: - // - For scale up: 0 (i.e. no stabilization is done). - // - For scale down: 300 (i.e. the stabilization window is 300 seconds long). - // +optional - StabilizationWindowSeconds *int32 `json:"stabilizationWindowSeconds" protobuf:"varint,3,opt,name=stabilizationWindowSeconds"` - // selectPolicy is used to specify which policy should be used. - // If not set, the default value MaxPolicySelect is used. - // +optional - SelectPolicy *ScalingPolicySelect `json:"selectPolicy,omitempty" protobuf:"bytes,1,opt,name=selectPolicy"` - // policies is a list of potential scaling polices which can be used during scaling. - // At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid - // +optional - Policies []HPAScalingPolicy `json:"policies,omitempty" protobuf:"bytes,2,rep,name=policies"` -} - -// HPAScalingPolicyType is the type of the policy which could be used while making scaling decisions. -type HPAScalingPolicyType string - -const ( - // PodsScalingPolicy is a policy used to specify a change in absolute number of pods. - PodsScalingPolicy HPAScalingPolicyType = "Pods" - // PercentScalingPolicy is a policy used to specify a relative amount of change with respect to - // the current number of pods. - PercentScalingPolicy HPAScalingPolicyType = "Percent" -) - -// HPAScalingPolicy is a single policy which must hold true for a specified past interval. -type HPAScalingPolicy struct { - // Type is used to specify the scaling policy. - Type HPAScalingPolicyType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=HPAScalingPolicyType"` - // Value contains the amount of change which is permitted by the policy. - // It must be greater than zero - Value int32 `json:"value" protobuf:"varint,2,opt,name=value"` - // PeriodSeconds specifies the window of time for which the policy should hold true. - // PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min). - PeriodSeconds int32 `json:"periodSeconds" protobuf:"varint,3,opt,name=periodSeconds"` -} - // MetricSourceType indicates the type of metric. type MetricSourceType string -const ( +var ( // ObjectMetricSourceType is a metric describing a kubernetes object // (for example, hits-per-second on an Ingress object). ObjectMetricSourceType MetricSourceType = "Object" @@ -305,7 +218,7 @@ type MetricTarget struct { // "Value", "AverageValue", or "Utilization" type MetricTargetType string -const ( +var ( // UtilizationMetricType declares a MetricTarget is an AverageUtilization value UtilizationMetricType MetricTargetType = "Utilization" // ValueMetricType declares a MetricTarget is a raw value @@ -346,7 +259,7 @@ type HorizontalPodAutoscalerStatus struct { // a HorizontalPodAutoscaler. type HorizontalPodAutoscalerConditionType string -const ( +var ( // ScalingActive indicates that the HPA controller is able to scale if necessary: // it's correctly configured, can fetch the desired metrics, and isn't disabled. ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go index 3f38880..996dc18 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v2beta2 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CrossVersionObjectReference = map[string]string{ "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds\"", + "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", "apiVersion": "API version of the referent", } @@ -58,32 +58,10 @@ func (ExternalMetricStatus) SwaggerDoc() map[string]string { return map_ExternalMetricStatus } -var map_HPAScalingPolicy = map[string]string{ - "": "HPAScalingPolicy is a single policy which must hold true for a specified past interval.", - "type": "Type is used to specify the scaling policy.", - "value": "Value contains the amount of change which is permitted by the policy. It must be greater than zero", - "periodSeconds": "PeriodSeconds specifies the window of time for which the policy should hold true. PeriodSeconds must be greater than zero and less than or equal to 1800 (30 min).", -} - -func (HPAScalingPolicy) SwaggerDoc() map[string]string { - return map_HPAScalingPolicy -} - -var map_HPAScalingRules = map[string]string{ - "": "HPAScalingRules configures the scaling behavior for one direction. These Rules are applied after calculating DesiredReplicas from metrics for the HPA. They can limit the scaling velocity by specifying scaling policies. They can prevent flapping by specifying the stabilization window, so that the number of replicas is not set instantly, instead, the safest value from the stabilization window is chosen.", - "stabilizationWindowSeconds": "StabilizationWindowSeconds is the number of seconds for which past recommendations should be considered while scaling up or scaling down. StabilizationWindowSeconds must be greater than or equal to zero and less than or equal to 3600 (one hour). If not set, use the default values: - For scale up: 0 (i.e. no stabilization is done). - For scale down: 300 (i.e. the stabilization window is 300 seconds long).", - "selectPolicy": "selectPolicy is used to specify which policy should be used. If not set, the default value MaxPolicySelect is used.", - "policies": "policies is a list of potential scaling polices which can be used during scaling. At least one policy must be specified, otherwise the HPAScalingRules will be discarded as invalid", -} - -func (HPAScalingRules) SwaggerDoc() map[string]string { - return map_HPAScalingRules -} - var map_HorizontalPodAutoscaler = map[string]string{ "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", + "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", "status": "status is the current information about the autoscaler.", } @@ -91,16 +69,6 @@ func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { return map_HorizontalPodAutoscaler } -var map_HorizontalPodAutoscalerBehavior = map[string]string{ - "": "HorizontalPodAutoscalerBehavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively).", - "scaleUp": "scaleUp is scaling policy for scaling Up. If not set, the default value is the higher of:\n * increase no more than 4 pods per 60 seconds\n * double the number of pods per 60 seconds\nNo stabilization is used.", - "scaleDown": "scaleDown is scaling policy for scaling Down. If not set, the default value is to allow to scale down to minReplicas pods, with a 300 second stabilization window (i.e., the highest recommendation for the last 300sec is used).", -} - -func (HorizontalPodAutoscalerBehavior) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerBehavior -} - var map_HorizontalPodAutoscalerCondition = map[string]string{ "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", "type": "type describes the current condition", @@ -127,10 +95,9 @@ func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { var map_HorizontalPodAutoscalerSpec = map[string]string{ "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod. minReplicas is allowed to be 0 if the alpha feature gate HPAScaleToZero is enabled and at least one Object or External metric is configured. Scaling is active as long as at least one metric value is available.", + "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", - "behavior": "behavior configures the scaling behavior of the target in both Up and Down directions (scaleUp and scaleDown fields respectively). If not set, the default HPAScalingRules for scale up and scale down are used.", } func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go index ca26fe9..a6a9565 100644 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go @@ -77,53 +77,6 @@ func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HPAScalingPolicy) DeepCopyInto(out *HPAScalingPolicy) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingPolicy. -func (in *HPAScalingPolicy) DeepCopy() *HPAScalingPolicy { - if in == nil { - return nil - } - out := new(HPAScalingPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HPAScalingRules) DeepCopyInto(out *HPAScalingRules) { - *out = *in - if in.StabilizationWindowSeconds != nil { - in, out := &in.StabilizationWindowSeconds, &out.StabilizationWindowSeconds - *out = new(int32) - **out = **in - } - if in.SelectPolicy != nil { - in, out := &in.SelectPolicy, &out.SelectPolicy - *out = new(ScalingPolicySelect) - **out = **in - } - if in.Policies != nil { - in, out := &in.Policies, &out.Policies - *out = make([]HPAScalingPolicy, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HPAScalingRules. -func (in *HPAScalingRules) DeepCopy() *HPAScalingRules { - if in == nil { - return nil - } - out := new(HPAScalingRules) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { *out = *in @@ -152,32 +105,6 @@ func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerBehavior) DeepCopyInto(out *HorizontalPodAutoscalerBehavior) { - *out = *in - if in.ScaleUp != nil { - in, out := &in.ScaleUp, &out.ScaleUp - *out = new(HPAScalingRules) - (*in).DeepCopyInto(*out) - } - if in.ScaleDown != nil { - in, out := &in.ScaleDown, &out.ScaleDown - *out = new(HPAScalingRules) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerBehavior. -func (in *HorizontalPodAutoscalerBehavior) DeepCopy() *HorizontalPodAutoscalerBehavior { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerBehavior) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { *out = *in @@ -199,7 +126,7 @@ func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerC func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]HorizontalPodAutoscaler, len(*in)) @@ -244,11 +171,6 @@ func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscaler (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.Behavior != nil { - in, out := &in.Behavior, &out.Behavior - *out = new(HorizontalPodAutoscalerBehavior) - (*in).DeepCopyInto(*out) - } return } diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go index c4a8db6..0449180 100644 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ b/vendor/k8s.io/api/batch/v1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go index 35944e7..097a6ff 100644 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1/generated.pb.go @@ -14,25 +14,36 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto + + It has these top-level messages: + Job + JobCondition + JobList + JobSpec + JobStatus +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,147 +54,27 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{0} -} -func (m *Job) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Job) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Job) XXX_Merge(src proto.Message) { - xxx_messageInfo_Job.Merge(m, src) -} -func (m *Job) XXX_Size() int { - return m.Size() -} -func (m *Job) XXX_DiscardUnknown() { - xxx_messageInfo_Job.DiscardUnknown(m) -} +func (m *Job) Reset() { *m = Job{} } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_Job proto.InternalMessageInfo - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{1} -} -func (m *JobCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobCondition.Merge(m, src) -} -func (m *JobCondition) XXX_Size() int { - return m.Size() -} -func (m *JobCondition) XXX_DiscardUnknown() { - xxx_messageInfo_JobCondition.DiscardUnknown(m) -} +func (m *JobCondition) Reset() { *m = JobCondition{} } +func (*JobCondition) ProtoMessage() {} +func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_JobCondition proto.InternalMessageInfo +func (m *JobList) Reset() { *m = JobList{} } +func (*JobList) ProtoMessage() {} +func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{2} -} -func (m *JobList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobList) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobList.Merge(m, src) -} -func (m *JobList) XXX_Size() int { - return m.Size() -} -func (m *JobList) XXX_DiscardUnknown() { - xxx_messageInfo_JobList.DiscardUnknown(m) -} +func (m *JobSpec) Reset() { *m = JobSpec{} } +func (*JobSpec) ProtoMessage() {} +func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_JobList proto.InternalMessageInfo - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{3} -} -func (m *JobSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobSpec.Merge(m, src) -} -func (m *JobSpec) XXX_Size() int { - return m.Size() -} -func (m *JobSpec) XXX_DiscardUnknown() { - xxx_messageInfo_JobSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_JobSpec proto.InternalMessageInfo - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b52da57c93de713, []int{4} -} -func (m *JobStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobStatus.Merge(m, src) -} -func (m *JobStatus) XXX_Size() int { - return m.Size() -} -func (m *JobStatus) XXX_DiscardUnknown() { - xxx_messageInfo_JobStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_JobStatus proto.InternalMessageInfo +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } func init() { proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") @@ -192,78 +83,10 @@ func init() { proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptor_3b52da57c93de713) -} - -var fileDescriptor_3b52da57c93de713 = []byte{ - // 929 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, - 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, - 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, - 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, - 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, - 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xcf, - 0x3d, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, - 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, - 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, - 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, - 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, - 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, - 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, - 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, - 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, - 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x0b, 0x95, 0x96, 0x66, 0x3c, 0x19, 0x49, - 0x80, 0x9b, 0x92, 0x6d, 0xce, 0x8e, 0xcd, 0x67, 0xce, 0xcf, 0xe0, 0x8a, 0x4b, 0x10, 0xd4, 0xc6, - 0x37, 0xa9, 0xb1, 0x96, 0xa5, 0x06, 0xaa, 0x30, 0x52, 0xba, 0xe2, 0xaf, 0xd0, 0x06, 0x8f, 0xc1, - 0xed, 0xac, 0x2b, 0xf7, 0x77, 0xcc, 0x25, 0xef, 0x6f, 0x5e, 0x30, 0xa7, 0x1f, 0x83, 0x6b, 0xef, - 0x14, 0x4e, 0x1b, 0xf2, 0x44, 0x94, 0x0e, 0x9f, 0xa3, 0x4d, 0x2e, 0xa8, 0x98, 0xf2, 0x4e, 0x43, - 0x39, 0xe8, 0x2b, 0x1d, 0x14, 0xcb, 0xde, 0x2b, 0x3c, 0x36, 0xf3, 0x33, 0x29, 0xd4, 0xdd, 0x3f, - 0x1b, 0x68, 0xe7, 0x82, 0x39, 0x67, 0x2c, 0xf2, 0x7c, 0xe1, 0xb3, 0x08, 0x9f, 0xa2, 0x0d, 0x71, - 0x1d, 0x83, 0xfa, 0xec, 0xb6, 0xfd, 0x64, 0x5e, 0x7a, 0x70, 0x1d, 0xc3, 0xab, 0xd4, 0xd8, 0xaf, - 0x73, 0x25, 0x46, 0x14, 0x1b, 0xf7, 0xca, 0x76, 0xd6, 0x95, 0xee, 0x74, 0xb1, 0xdc, 0xab, 0xd4, - 0x58, 0x92, 0x0e, 0xb3, 0x74, 0x5a, 0x6c, 0x0a, 0x8f, 0xd0, 0x6e, 0x40, 0xb9, 0xb8, 0x4a, 0x98, - 0x03, 0x03, 0x3f, 0x84, 0xe2, 0x1b, 0x3f, 0x7c, 0xd8, 0x0c, 0xa4, 0xc2, 0x3e, 0x28, 0x1a, 0xd8, - 0xed, 0xd5, 0x8d, 0xc8, 0xa2, 0x2f, 0x9e, 0x21, 0x2c, 0x81, 0x41, 0x42, 0x23, 0x9e, 0x7f, 0x92, - 0xac, 0xb6, 0xf1, 0xda, 0xd5, 0x0e, 0x8b, 0x6a, 0xb8, 0x77, 0xcf, 0x8d, 0x2c, 0xa9, 0x80, 0xdf, - 0x47, 0x9b, 0x09, 0x50, 0xce, 0xa2, 0x4e, 0x53, 0x3d, 0x57, 0x39, 0x1d, 0xa2, 0x50, 0x52, 0xdc, - 0xe2, 0x0f, 0xd0, 0x56, 0x08, 0x9c, 0xd3, 0x11, 0x74, 0x36, 0x15, 0xf1, 0x51, 0x41, 0xdc, 0xba, - 0xcc, 0x61, 0x32, 0xbf, 0xef, 0xfe, 0xae, 0xa1, 0xad, 0x0b, 0xe6, 0xf4, 0x7c, 0x2e, 0xf0, 0x0f, - 0xf7, 0xe2, 0x6b, 0x3e, 0xec, 0x63, 0xa4, 0x5a, 0x85, 0x77, 0xbf, 0xa8, 0xd3, 0x9a, 0x23, 0xb5, - 0xe8, 0x7e, 0x89, 0x9a, 0xbe, 0x80, 0x50, 0x8e, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, - 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, - 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, - 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x1f, 0x33, - 0x8f, 0x5b, 0x21, 0x39, 0xab, 0x60, 0x52, 0xe7, 0xe0, 0x67, 0xe8, 0x80, 0xba, 0xc2, 0x9f, 0xc1, - 0xd7, 0x40, 0xbd, 0xc0, 0x8f, 0xa0, 0x0f, 0x2e, 0x8b, 0xbc, 0xfc, 0xaf, 0xd3, 0xb0, 0xdf, 0xca, - 0x52, 0xe3, 0xe0, 0xe9, 0x32, 0x02, 0x59, 0xae, 0xc3, 0xa7, 0x68, 0xc7, 0xa1, 0xee, 0x84, 0x0d, - 0x87, 0x3d, 0x3f, 0xf4, 0x45, 0x67, 0x4b, 0x35, 0xb1, 0x9f, 0xa5, 0xc6, 0x8e, 0x5d, 0xc3, 0xc9, - 0x02, 0x0b, 0xff, 0x88, 0x5a, 0x1c, 0x02, 0x70, 0x05, 0x4b, 0x8a, 0x88, 0x7d, 0xf2, 0xc0, 0xa9, - 0x50, 0x07, 0x82, 0x7e, 0x21, 0xb5, 0x77, 0xe4, 0x58, 0xe6, 0x27, 0x52, 0x5a, 0xe2, 0xcf, 0xd1, - 0x5e, 0x48, 0xa3, 0x29, 0x2d, 0x99, 0x2a, 0x5b, 0x2d, 0x1b, 0x67, 0xa9, 0xb1, 0x77, 0xb9, 0x70, - 0x43, 0xee, 0x30, 0xf1, 0xb7, 0xa8, 0x25, 0x20, 0x8c, 0x03, 0x2a, 0xf2, 0xa0, 0x6d, 0x9f, 0xbc, - 0x57, 0x9f, 0xaa, 0xfc, 0xbf, 0xca, 0x46, 0xae, 0x98, 0x37, 0x28, 0x68, 0x6a, 0x31, 0x95, 0x29, - 0x99, 0xa3, 0xa4, 0xb4, 0xc1, 0xcf, 0xd1, 0x63, 0x21, 0x82, 0xe2, 0xc5, 0x9e, 0x0e, 0x05, 0x24, - 0xe7, 0x7e, 0xe4, 0xf3, 0x31, 0x78, 0x9d, 0x96, 0x7a, 0xae, 0xb7, 0xb3, 0xd4, 0x78, 0x3c, 0x18, - 0xf4, 0x96, 0x51, 0xc8, 0x2a, 0x6d, 0xf7, 0xb7, 0x06, 0x6a, 0x97, 0x5b, 0x0d, 0x3f, 0x47, 0xc8, - 0x9d, 0xef, 0x10, 0xde, 0xd1, 0x54, 0x1e, 0xdf, 0x5d, 0x95, 0xc7, 0x72, 0xdb, 0x54, 0xab, 0xb9, - 0x84, 0x38, 0xa9, 0x19, 0xe1, 0xef, 0x50, 0x9b, 0x0b, 0x9a, 0x08, 0xb5, 0x0d, 0xd6, 0x5f, 0x7b, - 0x1b, 0xec, 0x66, 0xa9, 0xd1, 0xee, 0xcf, 0x0d, 0x48, 0xe5, 0x85, 0x87, 0x68, 0xaf, 0x0a, 0xe6, - 0xff, 0xdc, 0x6c, 0x6a, 0x9e, 0x67, 0x0b, 0x2e, 0xe4, 0x8e, 0xab, 0xdc, 0x2f, 0x79, 0x72, 0x55, - 0xd0, 0x9a, 0xd5, 0x7e, 0xc9, 0x63, 0x4e, 0x8a, 0x5b, 0x6c, 0xa1, 0x36, 0x9f, 0xba, 0x2e, 0x80, - 0x07, 0x9e, 0x8a, 0x4b, 0xd3, 0x7e, 0xa3, 0xa0, 0xb6, 0xfb, 0xf3, 0x0b, 0x52, 0x71, 0xa4, 0xf1, - 0x90, 0xfa, 0x01, 0x78, 0x2a, 0x26, 0x35, 0xe3, 0x73, 0x85, 0x92, 0xe2, 0xd6, 0x3e, 0xba, 0xb9, - 0xd5, 0xd7, 0x5e, 0xdc, 0xea, 0x6b, 0x2f, 0x6f, 0xf5, 0xb5, 0x5f, 0x32, 0x5d, 0xbb, 0xc9, 0x74, - 0xed, 0x45, 0xa6, 0x6b, 0x2f, 0x33, 0x5d, 0xfb, 0x2b, 0xd3, 0xb5, 0x5f, 0xff, 0xd6, 0xd7, 0xbe, - 0x5f, 0x9f, 0x1d, 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x73, 0xe7, 0x7a, 0xb8, 0x08, 0x00, - 0x00, -} - func (m *Job) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -271,52 +94,41 @@ func (m *Job) Marshal() (dAtA []byte, err error) { } func (m *Job) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Job) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *JobCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -324,62 +136,49 @@ func (m *JobCondition) Marshal() (dAtA []byte, err error) { } func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x32 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x2a - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n4 dAtA[i] = 0x22 - { - size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *JobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -387,46 +186,37 @@ func (m *JobList) Marshal() (dAtA []byte, err error) { } func (m *JobList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *JobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -434,79 +224,70 @@ func (m *JobSpec) Marshal() (dAtA []byte, err error) { } func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.TTLSecondsAfterFinished != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) - i-- - dAtA[i] = 0x40 + if m.Parallelism != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) } - if m.BackoffLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) - i-- - dAtA[i] = 0x38 + if m.Completions != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) } - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if m.Selector != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n7, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n7 } - i-- - dAtA[i] = 0x32 if m.ManualSelector != nil { - i-- + dAtA[i] = 0x28 + i++ if *m.ManualSelector { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x28 + i++ } - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.ActiveDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - i-- - dAtA[i] = 0x18 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.Completions != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) - i-- - dAtA[i] = 0x10 + i += n8 + if m.BackoffLimit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) } - if m.Parallelism != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - i-- - dAtA[i] = 0x8 + if m.TTLSecondsAfterFinished != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) } - return len(dAtA) - i, nil + return i, nil } func (m *JobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -514,80 +295,82 @@ func (m *JobStatus) Marshal() (dAtA []byte, err error) { } func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) - i-- - dAtA[i] = 0x20 - if m.CompletionTime != nil { - { - size, err := m.CompletionTime.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1a } if m.StartTime != nil { - { - size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) + n9, err := m.StartTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 } - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.CompletionTime != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) + n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n10 } - return len(dAtA) - i, nil + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Job) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -600,9 +383,6 @@ func (m *Job) Size() (n int) { } func (m *JobCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -621,9 +401,6 @@ func (m *JobCondition) Size() (n int) { } func (m *JobList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -638,9 +415,6 @@ func (m *JobList) Size() (n int) { } func (m *JobSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Parallelism != nil { @@ -671,9 +445,6 @@ func (m *JobSpec) Size() (n int) { } func (m *JobStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Conditions) > 0 { @@ -697,7 +468,14 @@ func (m *JobStatus) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -707,7 +485,7 @@ func (this *Job) String() string { return "nil" } s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -721,8 +499,8 @@ func (this *JobCondition) String() string { s := strings.Join([]string{`&JobCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -733,14 +511,9 @@ func (this *JobList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Job{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Job", "Job", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -753,9 +526,9 @@ func (this *JobSpec) String() string { `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, `Completions:` + valueToStringGenerated(this.Completions) + `,`, `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, `}`, @@ -766,15 +539,10 @@ func (this *JobStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]JobCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "JobCondition", "JobCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "v1.Time", 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `Active:` + fmt.Sprintf("%v", this.Active) + `,`, `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, @@ -805,7 +573,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -833,7 +601,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -842,9 +610,6 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -866,7 +631,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -875,9 +640,6 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -899,7 +661,7 @@ func (m *Job) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -908,9 +670,6 @@ func (m *Job) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -927,9 +686,6 @@ func (m *Job) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -957,7 +713,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -985,7 +741,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -995,9 +751,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1017,7 +770,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1027,9 +780,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1049,7 +799,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1058,9 +808,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1082,7 +829,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1091,9 +838,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1115,7 +859,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1125,9 +869,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1147,7 +888,7 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1157,9 +898,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1174,9 +912,6 @@ func (m *JobCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1204,7 +939,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1232,7 +967,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1241,9 +976,6 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1265,7 +997,7 @@ func (m *JobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1274,9 +1006,6 @@ func (m *JobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1294,9 +1023,6 @@ func (m *JobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1324,7 +1050,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1352,7 +1078,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1372,7 +1098,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1392,7 +1118,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1412,7 +1138,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1421,14 +1147,11 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1448,7 +1171,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1469,7 +1192,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1478,9 +1201,6 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1502,7 +1222,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1522,7 +1242,7 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1537,9 +1257,6 @@ func (m *JobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1567,7 +1284,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1595,7 +1312,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1604,9 +1321,6 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1629,7 +1343,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1638,14 +1352,11 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &v1.Time{} + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1665,7 +1376,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1674,14 +1385,11 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.CompletionTime == nil { - m.CompletionTime = &v1.Time{} + m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1701,7 +1409,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Active |= int32(b&0x7F) << shift + m.Active |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1720,7 +1428,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Succeeded |= int32(b&0x7F) << shift + m.Succeeded |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1739,7 +1447,7 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Failed |= int32(b&0x7F) << shift + m.Failed |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1753,9 +1461,6 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1771,7 +1476,6 @@ func (m *JobStatus) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1803,8 +1507,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1821,34 +1527,120 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 929 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, + 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, + 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, + 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, + 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, + 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xdf, + 0x39, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, + 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, + 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, + 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, + 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, + 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, + 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, + 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, + 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, + 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x84, 0x4a, 0x4b, 0x33, 0x9e, 0x8c, 0x24, + 0xc0, 0x4d, 0xc9, 0x36, 0x67, 0xc7, 0xe6, 0x33, 0xe7, 0x67, 0x70, 0xc5, 0x25, 0x08, 0x6a, 0xe3, + 0x9b, 0xd4, 0x58, 0xcb, 0x52, 0x03, 0x55, 0x18, 0x29, 0x5d, 0xf1, 0x57, 0x68, 0x83, 0xc7, 0xe0, + 0x76, 0xd6, 0x95, 0xfb, 0x3b, 0xe6, 0x92, 0xf9, 0x9b, 0x17, 0xcc, 0xe9, 0xc7, 0xe0, 0xda, 0x3b, + 0x85, 0xd3, 0x86, 0x3c, 0x11, 0xa5, 0xc3, 0xe7, 0x68, 0x93, 0x0b, 0x2a, 0xa6, 0xbc, 0xd3, 0x50, + 0x0e, 0xfa, 0x4a, 0x07, 0xc5, 0xb2, 0xf7, 0x0a, 0x8f, 0xcd, 0xfc, 0x4c, 0x0a, 0x75, 0xf7, 0xcf, + 0x06, 0xda, 0xb9, 0x60, 0xce, 0x19, 0x8b, 0x3c, 0x5f, 0xf8, 0x2c, 0xc2, 0xa7, 0x68, 0x43, 0x5c, + 0xc7, 0xa0, 0x3e, 0xbb, 0x6d, 0x3f, 0x99, 0x97, 0x1e, 0x5c, 0xc7, 0xf0, 0x2a, 0x35, 0xf6, 0xeb, + 0x5c, 0x89, 0x11, 0xc5, 0xc6, 0xbd, 0xb2, 0x9d, 0x75, 0xa5, 0x3b, 0x5d, 0x2c, 0xf7, 0x2a, 0x35, + 0x96, 0xa4, 0xc3, 0x2c, 0x9d, 0x16, 0x9b, 0xc2, 0x23, 0xb4, 0x1b, 0x50, 0x2e, 0xae, 0x12, 0xe6, + 0xc0, 0xc0, 0x0f, 0xa1, 0xf8, 0xc6, 0x0f, 0x1f, 0xf6, 0x06, 0x52, 0x61, 0x1f, 0x14, 0x0d, 0xec, + 0xf6, 0xea, 0x46, 0x64, 0xd1, 0x17, 0xcf, 0x10, 0x96, 0xc0, 0x20, 0xa1, 0x11, 0xcf, 0x3f, 0x49, + 0x56, 0xdb, 0x78, 0xed, 0x6a, 0x87, 0x45, 0x35, 0xdc, 0xbb, 0xe7, 0x46, 0x96, 0x54, 0xc0, 0xef, + 0xa3, 0xcd, 0x04, 0x28, 0x67, 0x51, 0xa7, 0xa9, 0xc6, 0x55, 0xbe, 0x0e, 0x51, 0x28, 0x29, 0x6e, + 0xf1, 0x07, 0x68, 0x2b, 0x04, 0xce, 0xe9, 0x08, 0x3a, 0x9b, 0x8a, 0xf8, 0xa8, 0x20, 0x6e, 0x5d, + 0xe6, 0x30, 0x99, 0xdf, 0x77, 0x7f, 0xd7, 0xd0, 0xd6, 0x05, 0x73, 0x7a, 0x3e, 0x17, 0xf8, 0x87, + 0x7b, 0xf1, 0x35, 0x1f, 0xf6, 0x31, 0x52, 0xad, 0xc2, 0xbb, 0x5f, 0xd4, 0x69, 0xcd, 0x91, 0x5a, + 0x74, 0xbf, 0x44, 0x4d, 0x5f, 0x40, 0x28, 0x9f, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, + 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, + 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, + 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x87, 0x99, + 0xc7, 0xad, 0x90, 0x9c, 0x55, 0x30, 0xa9, 0x73, 0xf0, 0x33, 0x74, 0x40, 0x5d, 0xe1, 0xcf, 0xe0, + 0x6b, 0xa0, 0x5e, 0xe0, 0x47, 0xd0, 0x07, 0x97, 0x45, 0x5e, 0xfe, 0xd7, 0x69, 0xd8, 0x6f, 0x65, + 0xa9, 0x71, 0xf0, 0x74, 0x19, 0x81, 0x2c, 0xd7, 0xe1, 0x1f, 0x51, 0x8b, 0x43, 0x00, 0xae, 0x60, + 0x49, 0x11, 0x96, 0x4f, 0x1e, 0x38, 0x5f, 0xea, 0x40, 0xd0, 0x2f, 0xa4, 0xf6, 0x8e, 0x1c, 0xf0, + 0xfc, 0x44, 0x4a, 0x4b, 0xfc, 0x39, 0xda, 0x0b, 0x69, 0x34, 0xa5, 0x25, 0x53, 0xa5, 0xa4, 0x65, + 0xe3, 0x2c, 0x35, 0xf6, 0x2e, 0x17, 0x6e, 0xc8, 0x1d, 0x26, 0xfe, 0x16, 0xb5, 0x04, 0x84, 0x71, + 0x40, 0x45, 0x1e, 0x99, 0xed, 0x93, 0xf7, 0xea, 0xef, 0x23, 0xff, 0x79, 0xb2, 0x91, 0x2b, 0xe6, + 0x0d, 0x0a, 0x9a, 0x5a, 0x31, 0xe5, 0x7b, 0xcf, 0x51, 0x52, 0xda, 0xe0, 0x53, 0xb4, 0xe3, 0x50, + 0x77, 0xc2, 0x86, 0xc3, 0x9e, 0x1f, 0xfa, 0xa2, 0xb3, 0xa5, 0x46, 0xbe, 0x9f, 0xa5, 0xc6, 0x8e, + 0x5d, 0xc3, 0xc9, 0x02, 0x0b, 0x3f, 0x47, 0x8f, 0x85, 0x08, 0x8a, 0x89, 0x3d, 0x1d, 0x0a, 0x48, + 0xce, 0xfd, 0xc8, 0xe7, 0x63, 0xf0, 0x3a, 0x2d, 0x65, 0xf0, 0x76, 0x96, 0x1a, 0x8f, 0x07, 0x83, + 0xde, 0x32, 0x0a, 0x59, 0xa5, 0xed, 0xfe, 0xd6, 0x40, 0xed, 0x72, 0xab, 0xe1, 0xe7, 0x08, 0xb9, + 0xf3, 0x1d, 0xc2, 0x3b, 0x9a, 0xca, 0xe3, 0xbb, 0xab, 0xf2, 0x58, 0x6e, 0x9b, 0x6a, 0x35, 0x97, + 0x10, 0x27, 0x35, 0x23, 0xfc, 0x1d, 0x6a, 0x73, 0x41, 0x13, 0xa1, 0xb6, 0xc1, 0xfa, 0x6b, 0x6f, + 0x83, 0xdd, 0x2c, 0x35, 0xda, 0xfd, 0xb9, 0x01, 0xa9, 0xbc, 0xf0, 0x10, 0xed, 0x55, 0xc1, 0xfc, + 0x9f, 0x9b, 0x4d, 0xa5, 0xe0, 0x6c, 0xc1, 0x85, 0xdc, 0x71, 0x95, 0xfb, 0x25, 0x4f, 0xae, 0x8a, + 0x67, 0xb3, 0xda, 0x2f, 0x79, 0xcc, 0x49, 0x71, 0x8b, 0x2d, 0xd4, 0xe6, 0x53, 0xd7, 0x05, 0xf0, + 0xc0, 0x53, 0x21, 0x6b, 0xda, 0x6f, 0x14, 0xd4, 0x76, 0x7f, 0x7e, 0x41, 0x2a, 0x8e, 0x34, 0x1e, + 0x52, 0x3f, 0x00, 0x4f, 0x85, 0xab, 0x66, 0x7c, 0xae, 0x50, 0x52, 0xdc, 0xda, 0x47, 0x37, 0xb7, + 0xfa, 0xda, 0x8b, 0x5b, 0x7d, 0xed, 0xe5, 0xad, 0xbe, 0xf6, 0x4b, 0xa6, 0x6b, 0x37, 0x99, 0xae, + 0xbd, 0xc8, 0x74, 0xed, 0x65, 0xa6, 0x6b, 0x7f, 0x65, 0xba, 0xf6, 0xeb, 0xdf, 0xfa, 0xda, 0xf7, + 0xeb, 0xb3, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdb, 0x98, 0xf9, 0xb8, 0x08, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto index 75de45f..039149d 100644 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ b/vendor/k8s.io/api/batch/v1/generated.proto @@ -32,17 +32,17 @@ option go_package = "v1"; // Job represents the configuration of a single job. message Job { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobSpec spec = 2; // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobStatus status = 3; } @@ -75,7 +75,7 @@ message JobCondition { // JobList is a collection of jobs. message JobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go index 646a3cd..8dad904 100644 --- a/vendor/k8s.io/api/batch/v1/types.go +++ b/vendor/k8s.io/api/batch/v1/types.go @@ -28,17 +28,17 @@ import ( type Job struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -49,7 +49,7 @@ type Job struct { type JobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go index 0120e07..d8e2bdd 100644 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Job = map[string]string{ "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Job) SwaggerDoc() map[string]string { @@ -54,7 +54,7 @@ func (JobCondition) SwaggerDoc() map[string]string { var map_JobList = map[string]string{ "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of Jobs.", } diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go index beba55a..88cb016 100644 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go @@ -75,7 +75,7 @@ func (in *JobCondition) DeepCopy() *JobCondition { func (in *JobList) DeepCopyInto(out *JobList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Job, len(*in)) diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go index 258ff02..43020ed 100644 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ b/vendor/k8s.io/api/batch/v1beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go index 69c4054..ece2204 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go @@ -14,25 +14,37 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto + + It has these top-level messages: + CronJob + CronJobList + CronJobSpec + CronJobStatus + JobTemplate + JobTemplateSpec +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,175 +55,31 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{0} -} -func (m *CronJob) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJob) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJob.Merge(m, src) -} -func (m *CronJob) XXX_Size() int { - return m.Size() -} -func (m *CronJob) XXX_DiscardUnknown() { - xxx_messageInfo_CronJob.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_CronJob proto.InternalMessageInfo - -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{1} -} -func (m *CronJobList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJobList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJobList.Merge(m, src) -} -func (m *CronJobList) XXX_Size() int { - return m.Size() -} -func (m *CronJobList) XXX_DiscardUnknown() { - xxx_messageInfo_CronJobList.DiscardUnknown(m) -} +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_CronJobList proto.InternalMessageInfo +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{2} -} -func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJobSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJobSpec.Merge(m, src) -} -func (m *CronJobSpec) XXX_Size() int { - return m.Size() -} -func (m *CronJobSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CronJobSpec.DiscardUnknown(m) -} +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{3} -} -func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJobStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJobStatus.Merge(m, src) -} -func (m *CronJobStatus) XXX_Size() int { - return m.Size() -} -func (m *CronJobStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CronJobStatus.DiscardUnknown(m) -} +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo - -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_e57b277b05179ae7, []int{5} -} -func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplateSpec.Merge(m, src) -} -func (m *JobTemplateSpec) XXX_Size() int { - return m.Size() -} -func (m *JobTemplateSpec) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") @@ -221,68 +89,10 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptor_e57b277b05179ae7) -} - -var fileDescriptor_e57b277b05179ae7 = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, - 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, - 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, - 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, - 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, - 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, - 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, - 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, - 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, - 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, - 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, - 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, - 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, - 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, - 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, - 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, - 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, - 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, - 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, - 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, - 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, - 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, - 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, - 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, - 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, - 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, - 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, - 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, - 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, - 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, - 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, - 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, - 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, - 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, - 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, - 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, - 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, - 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, - 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, - 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, - 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, - 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, - 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, - 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, - 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, - 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, - 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, - 0x07, 0x00, 0x00, -} - func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -290,52 +100,41 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -343,46 +142,37 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -390,67 +180,58 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.FailedJobsHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) - i-- - dAtA[i] = 0x38 - } - if m.SuccessfulJobsHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i += copy(dAtA[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(dAtA[i:], m.ConcurrencyPolicy) if m.Suspend != nil { - i-- + dAtA[i] = 0x20 + i++ if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 + i++ } - i -= len(m.ConcurrencyPolicy) - copy(dAtA[i:], m.ConcurrencyPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i-- - dAtA[i] = 0x1a - if m.StartingDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) + n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= len(m.Schedule) - copy(dAtA[i:], m.Schedule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + if m.SuccessfulJobsHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + } + return i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -458,48 +239,39 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.LastScheduleTime != nil { - { - size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Active) > 0 { + for _, msg := range m.Active { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x22 } - if len(m.Active) > 0 { - for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.LastScheduleTime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) + n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -507,42 +279,33 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n7 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n8 + return i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -550,53 +313,57 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n9 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n10 + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CronJob) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -609,9 +376,6 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -626,9 +390,6 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Schedule) @@ -653,9 +414,6 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Active) > 0 { @@ -672,9 +430,6 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -685,9 +440,6 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -698,7 +450,14 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -708,7 +467,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -719,14 +478,9 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]CronJob{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -751,14 +505,9 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } - repeatedStringForActive := "[]ObjectReference{" - for _, f := range this.Active { - repeatedStringForActive += fmt.Sprintf("%v", f) + "," - } - repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + repeatedStringForActive + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, + `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `}`, }, "") return s @@ -768,7 +517,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -779,8 +528,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -808,7 +557,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -836,7 +585,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -845,9 +594,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -869,7 +615,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -878,9 +624,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -902,7 +645,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -911,9 +654,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -930,9 +670,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -960,7 +697,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -988,7 +725,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -997,9 +734,6 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1021,7 +755,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1030,9 +764,6 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1050,9 +781,6 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1080,7 +808,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1108,7 +836,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1118,9 +846,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1140,7 +865,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1160,7 +885,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1170,9 +895,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1192,7 +914,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1213,7 +935,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1222,9 +944,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1246,7 +965,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1266,7 +985,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1281,9 +1000,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1311,7 +1027,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1339,7 +1055,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1348,13 +1064,10 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, v11.ObjectReference{}) + m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1373,7 +1086,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1382,14 +1095,11 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &v1.Time{} + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1404,9 +1114,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1434,7 +1141,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1462,7 +1169,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1471,9 +1178,6 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1495,7 +1199,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1504,9 +1208,6 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1523,9 +1224,6 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1553,7 +1251,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1581,7 +1279,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1590,9 +1288,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1614,7 +1309,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1623,9 +1318,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1642,9 +1334,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1660,7 +1349,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1692,8 +1380,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1710,34 +1400,110 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 771 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, + 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, + 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, + 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, + 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, + 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, + 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, + 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, + 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, + 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, + 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, + 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, + 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, + 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, + 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, + 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, + 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, + 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, + 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, + 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, + 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, + 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, + 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, + 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, + 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, + 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, + 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, + 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, + 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, + 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, + 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, + 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, + 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, + 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, + 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, + 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, + 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, + 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, + 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, + 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, + 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, + 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, + 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, + 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, + 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, + 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, + 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, + 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto index 995b4f3..043b355 100644 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ b/vendor/k8s.io/api/batch/v1beta1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v1beta1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -112,12 +112,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -125,12 +125,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go index 2978747..cb5c9ba 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ b/vendor/k8s.io/api/batch/v1beta1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go index ecc9144..abbdfec 100644 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go index 7c9dcb7..1c8bc44 100644 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go index 3044b0c..f4ed01a 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ b/vendor/k8s.io/api/batch/v2alpha1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go index 3e58dbb..6ab41eb 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go @@ -14,25 +14,37 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto +// DO NOT EDIT! +/* + Package v2alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto + + It has these top-level messages: + CronJob + CronJobList + CronJobSpec + CronJobStatus + JobTemplate + JobTemplateSpec +*/ package v2alpha1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,175 +55,31 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { - return fileDescriptor_5495a0550fe29c46, []int{0} -} -func (m *CronJob) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJob) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJob) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJob.Merge(m, src) -} -func (m *CronJob) XXX_Size() int { - return m.Size() -} -func (m *CronJob) XXX_DiscardUnknown() { - xxx_messageInfo_CronJob.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_CronJob proto.InternalMessageInfo - -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { - return fileDescriptor_5495a0550fe29c46, []int{1} -} -func (m *CronJobList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJobList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJobList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJobList.Merge(m, src) -} -func (m *CronJobList) XXX_Size() int { - return m.Size() -} -func (m *CronJobList) XXX_DiscardUnknown() { - xxx_messageInfo_CronJobList.DiscardUnknown(m) -} +func (m *CronJob) Reset() { *m = CronJob{} } +func (*CronJob) ProtoMessage() {} +func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_CronJobList proto.InternalMessageInfo +func (m *CronJobList) Reset() { *m = CronJobList{} } +func (*CronJobList) ProtoMessage() {} +func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5495a0550fe29c46, []int{2} -} -func (m *CronJobSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJobSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJobSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJobSpec.Merge(m, src) -} -func (m *CronJobSpec) XXX_Size() int { - return m.Size() -} -func (m *CronJobSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CronJobSpec.DiscardUnknown(m) -} +func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } +func (*CronJobSpec) ProtoMessage() {} +func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_CronJobSpec proto.InternalMessageInfo +func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } +func (*CronJobStatus) ProtoMessage() {} +func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5495a0550fe29c46, []int{3} -} -func (m *CronJobStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CronJobStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CronJobStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CronJobStatus.Merge(m, src) -} -func (m *CronJobStatus) XXX_Size() int { - return m.Size() -} -func (m *CronJobStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CronJobStatus.DiscardUnknown(m) -} +func (m *JobTemplate) Reset() { *m = JobTemplate{} } +func (*JobTemplate) ProtoMessage() {} +func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_CronJobStatus proto.InternalMessageInfo - -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_5495a0550fe29c46, []int{4} -} -func (m *JobTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplate.Merge(m, src) -} -func (m *JobTemplate) XXX_Size() int { - return m.Size() -} -func (m *JobTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplate.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplate proto.InternalMessageInfo - -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5495a0550fe29c46, []int{5} -} -func (m *JobTemplateSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *JobTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *JobTemplateSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_JobTemplateSpec.Merge(m, src) -} -func (m *JobTemplateSpec) XXX_Size() int { - return m.Size() -} -func (m *JobTemplateSpec) XXX_DiscardUnknown() { - xxx_messageInfo_JobTemplateSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_JobTemplateSpec proto.InternalMessageInfo +func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } +func (*JobTemplateSpec) ProtoMessage() {} +func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") @@ -221,68 +89,10 @@ func init() { proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptor_5495a0550fe29c46) -} - -var fileDescriptor_5495a0550fe29c46 = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, - 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, - 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, - 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, - 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, - 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, - 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, - 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, - 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, - 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, - 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, - 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, - 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, - 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, - 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, - 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, - 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, - 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, - 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, - 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, - 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, - 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, - 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, - 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, - 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, - 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, - 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, - 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, - 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, - 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, - 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, - 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, - 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, - 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, - 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, - 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, - 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, - 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, - 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, - 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, - 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, - 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, - 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, - 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, - 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, - 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, - 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, - 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, - 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, -} - func (m *CronJob) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -290,52 +100,41 @@ func (m *CronJob) Marshal() (dAtA []byte, err error) { } func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJob) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *CronJobList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -343,46 +142,37 @@ func (m *CronJobList) Marshal() (dAtA []byte, err error) { } func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJobList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -390,67 +180,58 @@ func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { } func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJobSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.FailedJobsHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) - i-- - dAtA[i] = 0x38 - } - if m.SuccessfulJobsHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) - i-- - dAtA[i] = 0x30 - } - { - size, err := m.JobTemplate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) + i += copy(dAtA[i:], m.Schedule) + if m.StartingDeadlineSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) + i += copy(dAtA[i:], m.ConcurrencyPolicy) if m.Suspend != nil { - i-- + dAtA[i] = 0x20 + i++ if *m.Suspend { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 + i++ } - i -= len(m.ConcurrencyPolicy) - copy(dAtA[i:], m.ConcurrencyPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i-- - dAtA[i] = 0x1a - if m.StartingDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) + n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= len(m.Schedule) - copy(dAtA[i:], m.Schedule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n5 + if m.SuccessfulJobsHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) + } + if m.FailedJobsHistoryLimit != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) + } + return i, nil } func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -458,48 +239,39 @@ func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { } func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CronJobStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.LastScheduleTime != nil { - { - size, err := m.LastScheduleTime.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Active) > 0 { + for _, msg := range m.Active { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x22 } - if len(m.Active) > 0 { - for iNdEx := len(m.Active) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Active[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.LastScheduleTime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) + n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *JobTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -507,42 +279,33 @@ func (m *JobTemplate) Marshal() (dAtA []byte, err error) { } func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n7 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n8, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n8 + return i, nil } func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -550,53 +313,57 @@ func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { } func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *JobTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n9 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n10, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n10 + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CronJob) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -609,9 +376,6 @@ func (m *CronJob) Size() (n int) { } func (m *CronJobList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -626,9 +390,6 @@ func (m *CronJobList) Size() (n int) { } func (m *CronJobSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Schedule) @@ -653,9 +414,6 @@ func (m *CronJobSpec) Size() (n int) { } func (m *CronJobStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Active) > 0 { @@ -672,9 +430,6 @@ func (m *CronJobStatus) Size() (n int) { } func (m *JobTemplate) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -685,9 +440,6 @@ func (m *JobTemplate) Size() (n int) { } func (m *JobTemplateSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -698,7 +450,14 @@ func (m *JobTemplateSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -708,7 +467,7 @@ func (this *CronJob) String() string { return "nil" } s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -719,14 +478,9 @@ func (this *CronJobList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]CronJob{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CronJob", "CronJob", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -751,14 +505,9 @@ func (this *CronJobStatus) String() string { if this == nil { return "nil" } - repeatedStringForActive := "[]ObjectReference{" - for _, f := range this.Active { - repeatedStringForActive += fmt.Sprintf("%v", f) + "," - } - repeatedStringForActive += "}" s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + repeatedStringForActive + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "v1.Time", 1) + `,`, + `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, `}`, }, "") return s @@ -768,7 +517,7 @@ func (this *JobTemplate) String() string { return "nil" } s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -779,8 +528,8 @@ func (this *JobTemplateSpec) String() string { return "nil" } s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Spec), "JobSpec", "v12.JobSpec", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -808,7 +557,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -836,7 +585,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -845,9 +594,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -869,7 +615,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -878,9 +624,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -902,7 +645,7 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -911,9 +654,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -930,9 +670,6 @@ func (m *CronJob) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -960,7 +697,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -988,7 +725,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -997,9 +734,6 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1021,7 +755,7 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1030,9 +764,6 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1050,9 +781,6 @@ func (m *CronJobList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1080,7 +808,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1108,7 +836,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1118,9 +846,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1140,7 +865,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1160,7 +885,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1170,9 +895,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1192,7 +914,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1213,7 +935,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1222,9 +944,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1246,7 +965,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1266,7 +985,7 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1281,9 +1000,6 @@ func (m *CronJobSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1311,7 +1027,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1339,7 +1055,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1348,13 +1064,10 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Active = append(m.Active, v11.ObjectReference{}) + m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1373,7 +1086,7 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1382,14 +1095,11 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.LastScheduleTime == nil { - m.LastScheduleTime = &v1.Time{} + m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1404,9 +1114,6 @@ func (m *CronJobStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1434,7 +1141,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1462,7 +1169,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1471,9 +1178,6 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1495,7 +1199,7 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1504,9 +1208,6 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1523,9 +1224,6 @@ func (m *JobTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1553,7 +1251,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1581,7 +1279,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1590,9 +1288,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1614,7 +1309,7 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1623,9 +1318,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1642,9 +1334,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1660,7 +1349,6 @@ func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1692,8 +1380,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1710,34 +1400,110 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 774 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, + 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, + 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, + 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, + 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, + 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, + 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, + 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, + 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, + 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, + 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, + 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, + 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, + 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, + 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, + 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, + 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, + 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, + 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, + 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, + 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, + 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, + 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, + 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, + 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, + 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, + 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, + 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, + 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, + 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, + 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, + 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, + 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, + 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, + 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, + 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, + 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, + 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, + 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, + 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, + 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, + 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, + 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, + 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, + 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, + 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, + 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, + 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, + 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto index 0bba13b..4321c33 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ b/vendor/k8s.io/api/batch/v2alpha1/generated.proto @@ -33,17 +33,17 @@ option go_package = "v2alpha1"; // CronJob represents the configuration of a single cron job. message CronJob { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional CronJobSpec spec = 2; // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional CronJobStatus status = 3; } @@ -51,7 +51,7 @@ message CronJob { // CronJobList is a collection of cron jobs. message CronJobList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -110,12 +110,12 @@ message CronJobStatus { // JobTemplate describes a template for creating copies of a predefined pod. message JobTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional JobTemplateSpec template = 2; } @@ -123,12 +123,12 @@ message JobTemplate { // JobTemplateSpec describes the data a Job should have when created from a template message JobTemplateSpec { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional k8s.io.api.batch.v1.JobSpec spec = 2; } diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go index 465e614..cccff94 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types.go @@ -28,12 +28,12 @@ import ( type JobTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -41,12 +41,12 @@ type JobTemplate struct { // JobTemplateSpec describes the data a Job should have when created from a template type JobTemplateSpec struct { // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -58,17 +58,17 @@ type JobTemplateSpec struct { type CronJob struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -80,7 +80,7 @@ type CronJobList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go index bc80eca..f448a92 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go @@ -29,9 +29,9 @@ package v2alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_CronJob = map[string]string{ "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (CronJob) SwaggerDoc() map[string]string { @@ -40,7 +40,7 @@ func (CronJob) SwaggerDoc() map[string]string { var map_CronJobList = map[string]string{ "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of CronJobs.", } @@ -75,8 +75,8 @@ func (CronJobStatus) SwaggerDoc() map[string]string { var map_JobTemplate = map[string]string{ "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (JobTemplate) SwaggerDoc() map[string]string { @@ -85,8 +85,8 @@ func (JobTemplate) SwaggerDoc() map[string]string { var map_JobTemplateSpec = map[string]string{ "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (JobTemplateSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go index 1b03f67..20d87e7 100644 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go @@ -57,7 +57,7 @@ func (in *CronJob) DeepCopyObject() runtime.Object { func (in *CronJobList) DeepCopyInto(out *CronJobList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CronJob, len(*in)) diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go index 9055248..fb23aad 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ b/vendor/k8s.io/api/certificates/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=certificates.k8s.io - package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go index 24fa4bf..eda1599 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go @@ -14,24 +14,36 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto + + It has these top-level messages: + CertificateSigningRequest + CertificateSigningRequestCondition + CertificateSigningRequestList + CertificateSigningRequestSpec + CertificateSigningRequestStatus + ExtraValue +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,250 +54,54 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } func (*CertificateSigningRequest) ProtoMessage() {} func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{0} -} -func (m *CertificateSigningRequest) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateSigningRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CertificateSigningRequest) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateSigningRequest.Merge(m, src) -} -func (m *CertificateSigningRequest) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{0} } -func (m *CertificateSigningRequest) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateSigningRequest.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateSigningRequest proto.InternalMessageInfo func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } func (*CertificateSigningRequestCondition) ProtoMessage() {} func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{1} -} -func (m *CertificateSigningRequestCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) + return fileDescriptorGenerated, []int{1} } -func (m *CertificateSigningRequestCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CertificateSigningRequestCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateSigningRequestCondition.Merge(m, src) -} -func (m *CertificateSigningRequestCondition) XXX_Size() int { - return m.Size() -} -func (m *CertificateSigningRequestCondition) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateSigningRequestCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateSigningRequestCondition proto.InternalMessageInfo func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } func (*CertificateSigningRequestList) ProtoMessage() {} func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{2} + return fileDescriptorGenerated, []int{2} } -func (m *CertificateSigningRequestList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateSigningRequestList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CertificateSigningRequestList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateSigningRequestList.Merge(m, src) -} -func (m *CertificateSigningRequestList) XXX_Size() int { - return m.Size() -} -func (m *CertificateSigningRequestList) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateSigningRequestList.DiscardUnknown(m) -} - -var xxx_messageInfo_CertificateSigningRequestList proto.InternalMessageInfo func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } func (*CertificateSigningRequestSpec) ProtoMessage() {} func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{3} -} -func (m *CertificateSigningRequestSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateSigningRequestSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CertificateSigningRequestSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateSigningRequestSpec.Merge(m, src) -} -func (m *CertificateSigningRequestSpec) XXX_Size() int { - return m.Size() -} -func (m *CertificateSigningRequestSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateSigningRequestSpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_CertificateSigningRequestSpec proto.InternalMessageInfo - func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } func (*CertificateSigningRequestStatus) ProtoMessage() {} func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{4} -} -func (m *CertificateSigningRequestStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CertificateSigningRequestStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CertificateSigningRequestStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_CertificateSigningRequestStatus.Merge(m, src) -} -func (m *CertificateSigningRequestStatus) XXX_Size() int { - return m.Size() -} -func (m *CertificateSigningRequestStatus) XXX_DiscardUnknown() { - xxx_messageInfo_CertificateSigningRequestStatus.DiscardUnknown(m) + return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_CertificateSigningRequestStatus proto.InternalMessageInfo - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { - return fileDescriptor_09d156762b8218ef, []int{5} -} -func (m *ExtraValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExtraValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExtraValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExtraValue.Merge(m, src) -} -func (m *ExtraValue) XXX_Size() int { - return m.Size() -} -func (m *ExtraValue) XXX_DiscardUnknown() { - xxx_messageInfo_ExtraValue.DiscardUnknown(m) -} - -var xxx_messageInfo_ExtraValue proto.InternalMessageInfo +func (m *ExtraValue) Reset() { *m = ExtraValue{} } +func (*ExtraValue) ProtoMessage() {} +func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") - proto.RegisterMapType((map[string]ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec.ExtraEntry") proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptor_09d156762b8218ef) -} - -var fileDescriptor_09d156762b8218ef = []byte{ - // 824 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xf6, 0xfa, 0xdb, 0xe3, 0x90, 0x56, 0x23, 0x54, 0x2d, 0x91, 0xba, 0x1b, 0xad, 0x00, 0x85, - 0x8f, 0xce, 0x92, 0x0a, 0x41, 0x94, 0x03, 0x82, 0x0d, 0x15, 0x44, 0xb4, 0x20, 0x4d, 0x1a, 0x0e, - 0x08, 0x89, 0x8e, 0xd7, 0x6f, 0x37, 0x53, 0x77, 0x3f, 0xd8, 0x99, 0x35, 0xf8, 0xd6, 0x9f, 0xc0, - 0x91, 0x0b, 0x12, 0x3f, 0x27, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x59, 0xc4, 0xdc, 0xf9, 0x01, 0x3d, - 0xa1, 0x99, 0x1d, 0x7b, 0x8d, 0x23, 0xd7, 0x55, 0x73, 0xdb, 0xf7, 0x79, 0xdf, 0xe7, 0x79, 0x3f, - 0x67, 0xd1, 0x97, 0xa3, 0x03, 0x41, 0x78, 0xea, 0x8f, 0x8a, 0x01, 0xe4, 0x09, 0x48, 0x10, 0xfe, - 0x18, 0x92, 0x61, 0x9a, 0xfb, 0xc6, 0xc1, 0x32, 0xee, 0x87, 0x90, 0x4b, 0xfe, 0x90, 0x87, 0x4c, - 0xbb, 0xf7, 0x07, 0x20, 0xd9, 0xbe, 0x1f, 0x41, 0x02, 0x39, 0x93, 0x30, 0x24, 0x59, 0x9e, 0xca, - 0x14, 0xbb, 0x25, 0x81, 0xb0, 0x8c, 0x93, 0x65, 0x02, 0x31, 0x84, 0x9d, 0x5b, 0x11, 0x97, 0x67, - 0xc5, 0x80, 0x84, 0x69, 0xec, 0x47, 0x69, 0x94, 0xfa, 0x9a, 0x37, 0x28, 0x1e, 0x6a, 0x4b, 0x1b, - 0xfa, 0xab, 0xd4, 0xdb, 0xf9, 0xb0, 0x2a, 0x20, 0x66, 0xe1, 0x19, 0x4f, 0x20, 0x9f, 0xf8, 0xd9, - 0x28, 0x52, 0x80, 0xf0, 0x63, 0x90, 0xcc, 0x1f, 0x5f, 0xaa, 0x62, 0xc7, 0x5f, 0xc7, 0xca, 0x8b, - 0x44, 0xf2, 0x18, 0x2e, 0x11, 0x3e, 0xda, 0x44, 0x10, 0xe1, 0x19, 0xc4, 0x6c, 0x95, 0xe7, 0xfd, - 0x51, 0x47, 0x6f, 0x1c, 0x55, 0x6d, 0x9e, 0xf0, 0x28, 0xe1, 0x49, 0x44, 0xe1, 0xc7, 0x02, 0x84, - 0xc4, 0x0f, 0x50, 0x57, 0x55, 0x38, 0x64, 0x92, 0xd9, 0xd6, 0xae, 0xb5, 0xd7, 0xbf, 0xfd, 0x01, - 0xa9, 0xe6, 0xb3, 0x48, 0x44, 0xb2, 0x51, 0xa4, 0x00, 0x41, 0x54, 0x34, 0x19, 0xef, 0x93, 0x6f, - 0x06, 0x8f, 0x20, 0x94, 0xf7, 0x40, 0xb2, 0x00, 0x9f, 0x4f, 0xdd, 0xda, 0x6c, 0xea, 0xa2, 0x0a, - 0xa3, 0x0b, 0x55, 0xfc, 0x00, 0x35, 0x45, 0x06, 0xa1, 0x5d, 0xd7, 0xea, 0x9f, 0x90, 0x0d, 0xd3, - 0x27, 0x6b, 0x6b, 0x3d, 0xc9, 0x20, 0x0c, 0xb6, 0x4c, 0xae, 0xa6, 0xb2, 0xa8, 0x56, 0xc6, 0x67, - 0xa8, 0x2d, 0x24, 0x93, 0x85, 0xb0, 0x1b, 0x3a, 0xc7, 0xa7, 0x57, 0xc8, 0xa1, 0x75, 0x82, 0x6d, - 0x93, 0xa5, 0x5d, 0xda, 0xd4, 0xe8, 0x7b, 0xbf, 0xd5, 0x91, 0xb7, 0x96, 0x7b, 0x94, 0x26, 0x43, - 0x2e, 0x79, 0x9a, 0xe0, 0x03, 0xd4, 0x94, 0x93, 0x0c, 0xf4, 0x40, 0x7b, 0xc1, 0x9b, 0xf3, 0x92, - 0xef, 0x4f, 0x32, 0x78, 0x3e, 0x75, 0x5f, 0x5f, 0x8d, 0x57, 0x38, 0xd5, 0x0c, 0xfc, 0x36, 0x6a, - 0xe7, 0xc0, 0x44, 0x9a, 0xe8, 0x71, 0xf5, 0xaa, 0x42, 0xa8, 0x46, 0xa9, 0xf1, 0xe2, 0x77, 0x50, - 0x27, 0x06, 0x21, 0x58, 0x04, 0xba, 0xe7, 0x5e, 0x70, 0xcd, 0x04, 0x76, 0xee, 0x95, 0x30, 0x9d, - 0xfb, 0xf1, 0x23, 0xb4, 0xfd, 0x98, 0x09, 0x79, 0x9a, 0x0d, 0x99, 0x84, 0xfb, 0x3c, 0x06, 0xbb, - 0xa9, 0xa7, 0xf4, 0xee, 0xcb, 0xed, 0x59, 0x31, 0x82, 0x1b, 0x46, 0x7d, 0xfb, 0xee, 0xff, 0x94, - 0xe8, 0x8a, 0xb2, 0x37, 0xb5, 0xd0, 0xcd, 0xb5, 0xf3, 0xb9, 0xcb, 0x85, 0xc4, 0xdf, 0x5f, 0xba, - 0x37, 0xf2, 0x72, 0x75, 0x28, 0xb6, 0xbe, 0xb6, 0xeb, 0xa6, 0x96, 0xee, 0x1c, 0x59, 0xba, 0xb5, - 0x1f, 0x50, 0x8b, 0x4b, 0x88, 0x85, 0x5d, 0xdf, 0x6d, 0xec, 0xf5, 0x6f, 0x1f, 0xbe, 0xfa, 0x21, - 0x04, 0xaf, 0x99, 0x34, 0xad, 0x63, 0x25, 0x48, 0x4b, 0x5d, 0xef, 0xdf, 0xc6, 0x0b, 0x1a, 0x54, - 0x27, 0x89, 0xdf, 0x42, 0x9d, 0xbc, 0x34, 0x75, 0x7f, 0x5b, 0x41, 0x5f, 0x6d, 0xc5, 0x44, 0xd0, - 0xb9, 0x0f, 0x13, 0x84, 0x04, 0x8f, 0x12, 0xc8, 0xbf, 0x66, 0x31, 0xd8, 0x9d, 0x72, 0xd9, 0xea, - 0x0d, 0x9d, 0x2c, 0x50, 0xba, 0x14, 0x81, 0x09, 0x6a, 0x17, 0x6a, 0x9d, 0xc2, 0x6e, 0xed, 0x36, - 0xf6, 0x7a, 0xc1, 0x0d, 0x75, 0x14, 0xa7, 0x1a, 0x79, 0x3e, 0x75, 0xbb, 0x5f, 0xc1, 0x44, 0x1b, - 0xd4, 0x44, 0xe1, 0xf7, 0x51, 0xb7, 0x10, 0x90, 0x27, 0x4a, 0xbd, 0x3c, 0xa5, 0xc5, 0xdc, 0x4e, - 0x0d, 0x4e, 0x17, 0x11, 0xf8, 0x26, 0x6a, 0x14, 0x7c, 0x68, 0x4e, 0xa9, 0x6f, 0x02, 0x1b, 0xa7, - 0xc7, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x75, 0x72, 0xa4, - 0x92, 0x7f, 0xa1, 0x11, 0x6a, 0x3c, 0x38, 0x41, 0x2d, 0xf8, 0x59, 0xe6, 0xcc, 0x6e, 0xeb, 0xd1, - 0x1f, 0x5f, 0xed, 0x9d, 0x93, 0x3b, 0x4a, 0xeb, 0x4e, 0x22, 0xf3, 0x49, 0xb5, 0x09, 0x8d, 0xd1, - 0x32, 0xcd, 0x0e, 0x20, 0x54, 0xc5, 0xe0, 0xeb, 0xa8, 0x31, 0x82, 0x49, 0xf9, 0xe0, 0xa8, 0xfa, - 0xc4, 0x9f, 0xa1, 0xd6, 0x98, 0x3d, 0x2e, 0xc0, 0xfc, 0x77, 0xde, 0xdb, 0x58, 0x8f, 0x56, 0xfb, - 0x56, 0x51, 0x68, 0xc9, 0x3c, 0xac, 0x1f, 0x58, 0xde, 0x9f, 0x16, 0x72, 0x37, 0xfc, 0x2d, 0xf0, - 0x4f, 0x08, 0x85, 0xf3, 0xb7, 0x2c, 0x6c, 0x4b, 0xf7, 0x7f, 0xf4, 0xea, 0xfd, 0x2f, 0xfe, 0x0b, - 0xd5, 0x8f, 0x75, 0x01, 0x09, 0xba, 0x94, 0x0a, 0xef, 0xa3, 0xfe, 0x92, 0xb4, 0xee, 0x74, 0x2b, - 0xb8, 0x36, 0x9b, 0xba, 0xfd, 0x25, 0x71, 0xba, 0x1c, 0xe3, 0x7d, 0x6c, 0xc6, 0xa6, 0x1b, 0xc5, - 0xee, 0xfc, 0xbd, 0x58, 0x7a, 0xaf, 0xbd, 0xd5, 0x7b, 0x3f, 0xec, 0xfe, 0xfa, 0xbb, 0x5b, 0x7b, - 0xf2, 0xd7, 0x6e, 0x2d, 0xb8, 0x75, 0x7e, 0xe1, 0xd4, 0x9e, 0x5e, 0x38, 0xb5, 0x67, 0x17, 0x4e, - 0xed, 0xc9, 0xcc, 0xb1, 0xce, 0x67, 0x8e, 0xf5, 0x74, 0xe6, 0x58, 0xcf, 0x66, 0x8e, 0xf5, 0xf7, - 0xcc, 0xb1, 0x7e, 0xf9, 0xc7, 0xa9, 0x7d, 0xd7, 0x31, 0xdd, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, - 0x69, 0x8d, 0xc8, 0xd3, 0xaf, 0x07, 0x00, 0x00, -} - func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -293,52 +109,41 @@ func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateSigningRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -346,47 +151,37 @@ func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) } func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateSigningRequestCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x1a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + return i, nil } func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -394,46 +189,37 @@ func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateSigningRequestList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -441,21 +227,53 @@ func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.SignerName != nil { - i -= len(*m.SignerName) - copy(dAtA[i:], *m.SignerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SignerName))) - i-- - dAtA[i] = 0x3a + if m.Request != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) + i += copy(dAtA[i:], m.Request) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) + i += copy(dAtA[i:], m.Username) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + if len(m.Groups) > 0 { + for _, s := range m.Groups { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Usages) > 0 { + for _, s := range m.Usages { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } if len(m.Extra) > 0 { keysForExtra := make([]string, 0, len(m.Extra)) @@ -463,71 +281,38 @@ func (m *CertificateSigningRequestSpec) MarshalToSizedBuffer(dAtA []byte) (int, keysForExtra = append(keysForExtra, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for iNdEx := len(keysForExtra) - 1; iNdEx >= 0; iNdEx-- { - v := m.Extra[string(keysForExtra[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForExtra[iNdEx]) - copy(dAtA[i:], keysForExtra[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForExtra[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + for _, k := range keysForExtra { dAtA[i] = 0x32 + i++ + v := m.Extra[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n6, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } } - if len(m.Usages) > 0 { - for iNdEx := len(m.Usages) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Usages[iNdEx]) - copy(dAtA[i:], m.Usages[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Usages[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Groups[iNdEx]) - copy(dAtA[i:], m.Groups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Groups[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x1a - i -= len(m.Username) - copy(dAtA[i:], m.Username) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i-- - dAtA[i] = 0x12 - if m.Request != nil { - i -= len(m.Request) - copy(dAtA[i:], m.Request) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -535,43 +320,35 @@ func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { } func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CertificateSigningRequestStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Certificate != nil { - i -= len(m.Certificate) - copy(dAtA[i:], m.Certificate) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i-- - dAtA[i] = 0x12 - } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + if m.Certificate != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) + i += copy(dAtA[i:], m.Certificate) + } + return i, nil } func (m ExtraValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -579,42 +356,56 @@ func (m ExtraValue) Marshal() (dAtA []byte, err error) { } func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m ExtraValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m[iNdEx]) - copy(dAtA[i:], m[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) - i-- + for _, s := range m { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *CertificateSigningRequest) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -627,9 +418,6 @@ func (m *CertificateSigningRequest) Size() (n int) { } func (m *CertificateSigningRequestCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -644,9 +432,6 @@ func (m *CertificateSigningRequestCondition) Size() (n int) { } func (m *CertificateSigningRequestList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -661,9 +446,6 @@ func (m *CertificateSigningRequestList) Size() (n int) { } func (m *CertificateSigningRequestSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Request != nil { @@ -695,17 +477,10 @@ func (m *CertificateSigningRequestSpec) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.SignerName != nil { - l = len(*m.SignerName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *CertificateSigningRequestStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Conditions) > 0 { @@ -722,9 +497,6 @@ func (m *CertificateSigningRequestStatus) Size() (n int) { } func (m ExtraValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -737,7 +509,14 @@ func (m ExtraValue) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -747,7 +526,7 @@ func (this *CertificateSigningRequest) String() string { return "nil" } s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -762,7 +541,7 @@ func (this *CertificateSigningRequestCondition) String() string { `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -771,14 +550,9 @@ func (this *CertificateSigningRequestList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]CertificateSigningRequest{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -804,7 +578,6 @@ func (this *CertificateSigningRequestSpec) String() string { `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, `Extra:` + mapStringForExtra + `,`, - `SignerName:` + valueToStringGenerated(this.SignerName) + `,`, `}`, }, "") return s @@ -813,13 +586,8 @@ func (this *CertificateSigningRequestStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]CertificateSigningRequestCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, `}`, }, "") @@ -848,7 +616,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -876,7 +644,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -885,9 +653,6 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -909,7 +674,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -918,9 +683,6 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -942,7 +704,7 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -951,9 +713,6 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -970,9 +729,6 @@ func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1000,7 +756,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1028,7 +784,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1038,9 +794,6 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1060,7 +813,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1070,9 +823,6 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1092,7 +842,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1102,9 +852,6 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1124,7 +871,7 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1133,9 +880,6 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1152,9 +896,6 @@ func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1182,7 +923,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1210,7 +951,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1219,9 +960,6 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1243,7 +981,7 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1252,9 +990,6 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1272,9 +1007,6 @@ func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1302,7 +1034,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1330,7 +1062,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1339,9 +1071,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1364,7 +1093,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1374,9 +1103,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1396,7 +1122,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1406,9 +1132,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1428,7 +1151,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1438,9 +1161,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1460,7 +1180,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1470,9 +1190,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1492,7 +1209,7 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1501,20 +1218,54 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Extra == nil { m.Extra = make(map[string]ExtraValue) } - var mapkey string - mapvalue := &ExtraValue{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1524,121 +1275,46 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } - } - m.Extra[mapkey] = *mapvalue - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SignerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + if mapmsglen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + mapvalue := &ExtraValue{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err } + iNdEx = postmsgIndex + m.Extra[mapkey] = *mapvalue + } else { + var mapvalue ExtraValue + m.Extra[mapkey] = mapvalue } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SignerName = &s iNdEx = postIndex default: iNdEx = preIndex @@ -1649,9 +1325,6 @@ func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1679,7 +1352,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1707,7 +1380,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1716,9 +1389,6 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1741,7 +1411,7 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1750,9 +1420,6 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1770,9 +1437,6 @@ func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1800,7 +1464,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1828,7 +1492,7 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1838,9 +1502,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1855,9 +1516,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1873,7 +1531,6 @@ func (m *ExtraValue) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1905,8 +1562,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1923,34 +1582,112 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xdb, 0x44, + 0x18, 0x8e, 0xf3, 0xb5, 0xc9, 0x64, 0xd9, 0x56, 0x23, 0x54, 0x99, 0x95, 0x6a, 0xaf, 0x2c, 0x40, + 0xcb, 0x47, 0xc7, 0x6c, 0x85, 0x60, 0xb5, 0x07, 0x04, 0x5e, 0x2a, 0x58, 0xd1, 0x0a, 0x69, 0xda, + 0x70, 0x40, 0x48, 0x74, 0xe2, 0xbc, 0x75, 0xa6, 0xa9, 0x3f, 0xf0, 0x8c, 0x03, 0xb9, 0xf5, 0x27, + 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0xbc, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x45, 0x6c, 0xf8, 0x17, + 0x3d, 0xa1, 0x19, 0x4f, 0xe2, 0x90, 0x55, 0x48, 0xd5, 0xbd, 0x79, 0x9e, 0xf7, 0x79, 0x9e, 0xf7, + 0x63, 0xde, 0x31, 0xfa, 0x72, 0x7c, 0x2c, 0x08, 0x4f, 0xfd, 0x71, 0x31, 0x80, 0x3c, 0x01, 0x09, + 0xc2, 0x9f, 0x40, 0x32, 0x4c, 0x73, 0xdf, 0x04, 0x58, 0xc6, 0xfd, 0x10, 0x72, 0xc9, 0x1f, 0xf1, + 0x90, 0xe9, 0xf0, 0xd1, 0x00, 0x24, 0x3b, 0xf2, 0x23, 0x48, 0x20, 0x67, 0x12, 0x86, 0x24, 0xcb, + 0x53, 0x99, 0x62, 0xb7, 0x14, 0x10, 0x96, 0x71, 0xb2, 0x2a, 0x20, 0x46, 0xb0, 0x7f, 0x2b, 0xe2, + 0x72, 0x54, 0x0c, 0x48, 0x98, 0xc6, 0x7e, 0x94, 0x46, 0xa9, 0xaf, 0x75, 0x83, 0xe2, 0x91, 0x3e, + 0xe9, 0x83, 0xfe, 0x2a, 0xfd, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, + 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x54, + 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc1, 0x47, 0xdb, 0x04, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x75, + 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, + 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, + 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x62, 0x93, 0xc9, 0x11, + 0xf9, 0x7a, 0xf0, 0x18, 0x42, 0x79, 0x0f, 0x24, 0x0b, 0xf0, 0xf9, 0xcc, 0xad, 0xcd, 0x67, 0x2e, + 0xaa, 0x30, 0xba, 0x74, 0xc5, 0x0f, 0x51, 0x53, 0x64, 0x10, 0xda, 0x75, 0xed, 0xfe, 0x09, 0xd9, + 0x32, 0x7d, 0xb2, 0xb1, 0xd6, 0xfb, 0x19, 0x84, 0xc1, 0xae, 0xc9, 0xd5, 0x54, 0x27, 0xaa, 0x9d, + 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4f, + 0xb0, 0x67, 0xb2, 0xb4, 0xcb, 0x33, 0x35, 0xfe, 0xde, 0xaf, 0x75, 0xe4, 0x6d, 0xd4, 0x9e, 0xa6, + 0xc9, 0x90, 0x4b, 0x9e, 0x26, 0xf8, 0x18, 0x35, 0xe5, 0x34, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, + 0xa2, 0xe4, 0x07, 0xd3, 0x0c, 0x5e, 0xcc, 0xdc, 0xd7, 0xd7, 0xf9, 0x0a, 0xa7, 0x5a, 0x81, 0xdf, + 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x51, 0xfc, + 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0xc4, 0x9d, 0x7b, 0x25, + 0x4c, 0x17, 0x71, 0xfc, 0x18, 0xed, 0x3d, 0x61, 0x42, 0xf6, 0xb3, 0x21, 0x93, 0xf0, 0x80, 0xc7, + 0x60, 0x37, 0xf5, 0x94, 0xde, 0x7d, 0xb9, 0x7b, 0x56, 0x8a, 0xe0, 0x86, 0x71, 0xdf, 0xbb, 0xfb, + 0x1f, 0x27, 0xba, 0xe6, 0xec, 0xcd, 0x2c, 0x74, 0x73, 0xe3, 0x7c, 0xee, 0x72, 0x21, 0xf1, 0x77, + 0x97, 0xf6, 0x8d, 0xbc, 0x5c, 0x1d, 0x4a, 0xad, 0xb7, 0xed, 0xba, 0xa9, 0xa5, 0xb3, 0x40, 0x56, + 0x76, 0xed, 0x7b, 0xd4, 0xe2, 0x12, 0x62, 0x61, 0xd7, 0x0f, 0x1a, 0x87, 0xbd, 0xdb, 0x27, 0xaf, + 0xbe, 0x08, 0xc1, 0x6b, 0x26, 0x4d, 0xeb, 0x4c, 0x19, 0xd2, 0xd2, 0xd7, 0xfb, 0xbd, 0xf1, 0x3f, + 0x0d, 0xaa, 0x95, 0xc4, 0x6f, 0xa1, 0x9d, 0xbc, 0x3c, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0xba, 0x15, + 0xc3, 0xa0, 0x8b, 0x18, 0x7e, 0x1f, 0x75, 0x0a, 0x01, 0x79, 0xc2, 0x62, 0x30, 0x57, 0xbd, 0xec, + 0xab, 0x6f, 0x70, 0xba, 0x64, 0xe0, 0x9b, 0xa8, 0x51, 0xf0, 0xa1, 0xb9, 0xea, 0x9e, 0x21, 0x36, + 0xfa, 0x67, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x0f, 0x1a, + 0x87, 0xdd, 0x00, 0xa9, 0x8d, 0xf9, 0x42, 0x23, 0xd4, 0x44, 0x30, 0x41, 0xed, 0x42, 0xed, 0x83, + 0xb0, 0x5b, 0x9a, 0x73, 0x43, 0x71, 0xfa, 0x1a, 0x79, 0x31, 0x73, 0x3b, 0x5f, 0xc1, 0x54, 0x1f, + 0xa8, 0x61, 0xe1, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, 0x77, + 0x4b, 0xee, 0x28, 0xaf, 0x3b, 0x89, 0xcc, 0xa7, 0xd5, 0x64, 0x35, 0x46, 0xcb, 0x34, 0xfb, 0x80, + 0x50, 0xc5, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x01, 0x51, 0xf5, 0x89, 0x3f, 0x43, 0xad, + 0x09, 0x7b, 0x52, 0x80, 0xf9, 0x8f, 0xbc, 0xb7, 0xb5, 0x1e, 0xed, 0xf6, 0x8d, 0x92, 0xd0, 0x52, + 0x79, 0x52, 0x3f, 0xb6, 0xbc, 0x3f, 0x2d, 0xe4, 0x6e, 0x79, 0xfd, 0xf8, 0x47, 0x84, 0xc2, 0xc5, + 0xdb, 0x14, 0xb6, 0xa5, 0xfb, 0x3f, 0x7d, 0xf5, 0xfe, 0x97, 0xef, 0xbc, 0xfa, 0x51, 0x2e, 0x21, + 0x41, 0x57, 0x52, 0xe1, 0x23, 0xd4, 0x5b, 0xb1, 0xd6, 0x9d, 0xee, 0x06, 0xd7, 0xe6, 0x33, 0xb7, + 0xb7, 0x62, 0x4e, 0x57, 0x39, 0xde, 0xc7, 0x66, 0x6c, 0xba, 0x51, 0xec, 0x2e, 0xf6, 0xdf, 0xd2, + 0x77, 0xdc, 0x5d, 0xdf, 0xdf, 0x93, 0xce, 0x2f, 0xbf, 0xb9, 0xb5, 0xa7, 0x7f, 0x1d, 0xd4, 0x82, + 0x5b, 0xe7, 0x17, 0x4e, 0xed, 0xd9, 0x85, 0x53, 0x7b, 0x7e, 0xe1, 0xd4, 0x9e, 0xce, 0x1d, 0xeb, + 0x7c, 0xee, 0x58, 0xcf, 0xe6, 0x8e, 0xf5, 0x7c, 0xee, 0x58, 0x7f, 0xcf, 0x1d, 0xeb, 0xe7, 0x7f, + 0x9c, 0xda, 0xb7, 0x3b, 0xa6, 0xbb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x6b, 0x5b, 0xf9, + 0x7f, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto index 78d2dbc..5200224 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ b/vendor/k8s.io/api/certificates/v1beta1/generated.proto @@ -73,19 +73,6 @@ message CertificateSigningRequestSpec { // Base64-encoded PKCS#10 CSR data optional bytes request = 1; - // Requested signer for the request. It is a qualified name in the form: - // `scope-hostname.io/name`. - // If empty, it will be defaulted: - // 1. If it's a kubelet client certificate, it is assigned - // "kubernetes.io/kube-apiserver-client-kubelet". - // 2. If it's a kubelet serving certificate, it is assigned - // "kubernetes.io/kubelet-serving". - // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". - // Distribution of trust for signers happens out of band. - // You can select on this field using `spec.signerName`. - // +optional - optional string signerName = 7; - // allowedUsages specifies a set of usage contexts the key will be // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go index 5a46e63..bb9e82d 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types.go @@ -48,19 +48,6 @@ type CertificateSigningRequestSpec struct { // Base64-encoded PKCS#10 CSR data Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` - // Requested signer for the request. It is a qualified name in the form: - // `scope-hostname.io/name`. - // If empty, it will be defaulted: - // 1. If it's a kubelet client certificate, it is assigned - // "kubernetes.io/kube-apiserver-client-kubelet". - // 2. If it's a kubelet serving certificate, it is assigned - // "kubernetes.io/kubelet-serving". - // 3. Otherwise, it is assigned "kubernetes.io/legacy-unknown". - // Distribution of trust for signers happens out of band. - // You can select on this field using `spec.signerName`. - // +optional - SignerName *string `json:"signerName,omitempty" protobuf:"bytes,7,opt,name=signerName"` - // allowedUsages specifies a set of usage contexts the key will be // valid for. // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 @@ -85,28 +72,6 @@ type CertificateSigningRequestSpec struct { Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` } -// Built in signerName values that are honoured by kube-controller-manager. -// None of these usages are related to ServiceAccount token secrets -// `.data[ca.crt]` in any way. -const ( - // Signs certificates that will be honored as client-certs by the - // kube-apiserver. Never auto-approved by kube-controller-manager. - KubeAPIServerClientSignerName = "kubernetes.io/kube-apiserver-client" - - // Signs client certificates that will be honored as client-certs by the - // kube-apiserver for a kubelet. - // May be auto-approved by kube-controller-manager. - KubeAPIServerClientKubeletSignerName = "kubernetes.io/kube-apiserver-client-kubelet" - - // Signs serving certificates that are honored as a valid kubelet serving - // certificate by the kube-apiserver, but has no other guarantees. - KubeletServingSignerName = "kubernetes.io/kubelet-serving" - - // Has no guarantees for trust at all. Some distributions may honor these - // as client certs, but that behavior is not standard kubernetes behavior. - LegacyUnknownSignerName = "kubernetes.io/legacy-unknown" -) - // ExtraValue masks the value so protobuf can generate // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -164,27 +129,27 @@ type CertificateSigningRequestList struct { type KeyUsage string const ( - UsageSigning KeyUsage = "signing" - UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommitment KeyUsage = "content commitment" - UsageKeyEncipherment KeyUsage = "key encipherment" - UsageKeyAgreement KeyUsage = "key agreement" - UsageDataEncipherment KeyUsage = "data encipherment" - UsageCertSign KeyUsage = "cert sign" - UsageCRLSign KeyUsage = "crl sign" - UsageEncipherOnly KeyUsage = "encipher only" - UsageDecipherOnly KeyUsage = "decipher only" - UsageAny KeyUsage = "any" - UsageServerAuth KeyUsage = "server auth" - UsageClientAuth KeyUsage = "client auth" - UsageCodeSigning KeyUsage = "code signing" - UsageEmailProtection KeyUsage = "email protection" - UsageSMIME KeyUsage = "s/mime" - UsageIPsecEndSystem KeyUsage = "ipsec end system" - UsageIPsecTunnel KeyUsage = "ipsec tunnel" - UsageIPsecUser KeyUsage = "ipsec user" - UsageTimestamping KeyUsage = "timestamping" - UsageOCSPSigning KeyUsage = "ocsp signing" - UsageMicrosoftSGC KeyUsage = "microsoft sgc" - UsageNetscapeSGC KeyUsage = "netscape sgc" + UsageSigning KeyUsage = "signing" + UsageDigitalSignature KeyUsage = "digital signature" + UsageContentCommittment KeyUsage = "content commitment" + UsageKeyEncipherment KeyUsage = "key encipherment" + UsageKeyAgreement KeyUsage = "key agreement" + UsageDataEncipherment KeyUsage = "data encipherment" + UsageCertSign KeyUsage = "cert sign" + UsageCRLSign KeyUsage = "crl sign" + UsageEncipherOnly KeyUsage = "encipher only" + UsageDecipherOnly KeyUsage = "decipher only" + UsageAny KeyUsage = "any" + UsageServerAuth KeyUsage = "server auth" + UsageClientAuth KeyUsage = "client auth" + UsageCodeSigning KeyUsage = "code signing" + UsageEmailProtection KeyUsage = "email protection" + UsageSMIME KeyUsage = "s/mime" + UsageIPsecEndSystem KeyUsage = "ipsec end system" + UsageIPsecTunnel KeyUsage = "ipsec tunnel" + UsageIPsecUser KeyUsage = "ipsec user" + UsageTimestamping KeyUsage = "timestamping" + UsageOCSPSigning KeyUsage = "ocsp signing" + UsageMicrosoftSGC KeyUsage = "microsoft sgc" + UsageNetscapSGC KeyUsage = "netscape sgc" ) diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go index a2edb45..f6a7e16 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go @@ -49,14 +49,13 @@ func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { } var map_CertificateSigningRequestSpec = map[string]string{ - "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", - "request": "Base64-encoded PKCS#10 CSR data", - "signerName": "Requested signer for the request. It is a qualified name in the form: `scope-hostname.io/name`. If empty, it will be defaulted:\n 1. If it's a kubelet client certificate, it is assigned\n \"kubernetes.io/kube-apiserver-client-kubelet\".\n 2. If it's a kubelet serving certificate, it is assigned\n \"kubernetes.io/kubelet-serving\".\n 3. Otherwise, it is assigned \"kubernetes.io/legacy-unknown\".\nDistribution of trust for signers happens out of band. You can select on this field using `spec.signerName`.", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", - "username": "Information about the requesting user. See user.Info interface for details.", - "uid": "UID information about the requesting user. See user.Info interface for details.", - "groups": "Group information about the requesting user. See user.Info interface for details.", - "extra": "Extra information about the requesting user. See user.Info interface for details.", + "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", + "request": "Base64-encoded PKCS#10 CSR data", + "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", + "username": "Information about the requesting user. See user.Info interface for details.", + "uid": "UID information about the requesting user. See user.Info interface for details.", + "groups": "Group information about the requesting user. See user.Info interface for details.", + "extra": "Extra information about the requesting user. See user.Info interface for details.", } func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go index 11d0f77..1b103f1 100644 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go @@ -73,7 +73,7 @@ func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequ func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) @@ -110,11 +110,6 @@ func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningReq *out = make([]byte, len(*in)) copy(*out, *in) } - if in.SignerName != nil { - in, out := &in.SignerName, &out.SignerName - *out = new(string) - **out = **in - } if in.Usages != nil { in, out := &in.Usages, &out.Usages *out = make([]KeyUsage, len(*in)) diff --git a/vendor/k8s.io/api/coordination/v1/doc.go b/vendor/k8s.io/api/coordination/v1/doc.go deleted file mode 100644 index fc2f4f2..0000000 --- a/vendor/k8s.io/api/coordination/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=coordination.k8s.io - -package v1 // import "k8s.io/api/coordination/v1" diff --git a/vendor/k8s.io/api/coordination/v1/generated.pb.go b/vendor/k8s.io/api/coordination/v1/generated.pb.go deleted file mode 100644 index 22c3d62..0000000 --- a/vendor/k8s.io/api/coordination/v1/generated.pb.go +++ /dev/null @@ -1,976 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto - -package v1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{0} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} - -var xxx_messageInfo_Lease proto.InternalMessageInfo - -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{1} -} -func (m *LeaseList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LeaseList) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseList.Merge(m, src) -} -func (m *LeaseList) XXX_Size() int { - return m.Size() -} -func (m *LeaseList) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseList.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseList proto.InternalMessageInfo - -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_929e1148ad9baca3, []int{2} -} -func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LeaseSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseSpec.Merge(m, src) -} -func (m *LeaseSpec) XXX_Size() int { - return m.Size() -} -func (m *LeaseSpec) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1.Lease") - proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1.LeaseList") - proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1.LeaseSpec") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1/generated.proto", fileDescriptor_929e1148ad9baca3) -} - -var fileDescriptor_929e1148ad9baca3 = []byte{ - // 535 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x90, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0x36, 0x91, 0x9a, 0x0d, 0x2d, 0x91, 0x95, 0x83, 0x95, 0x83, 0x5d, 0x22, 0x21, - 0xe5, 0xc2, 0x2e, 0xa9, 0x10, 0x42, 0x9c, 0xc0, 0x20, 0xa0, 0x52, 0x2a, 0x24, 0xb7, 0x27, 0xd4, - 0x03, 0x1b, 0x7b, 0x70, 0x96, 0xd4, 0x5e, 0xb3, 0xbb, 0x0e, 0xea, 0x8d, 0x47, 0xe0, 0xca, 0x63, - 0xc0, 0x53, 0xe4, 0xd8, 0x63, 0x4f, 0x16, 0x31, 0x2f, 0x82, 0x76, 0x93, 0x36, 0x21, 0x49, 0xd5, - 0x8a, 0xdb, 0xee, 0xcc, 0xfc, 0xdf, 0xfc, 0xf3, 0xa3, 0x57, 0xa3, 0x67, 0x12, 0x33, 0x4e, 0x46, - 0xf9, 0x00, 0x44, 0x0a, 0x0a, 0x24, 0x19, 0x43, 0x1a, 0x71, 0x41, 0xe6, 0x0d, 0x9a, 0x31, 0x12, - 0x72, 0x2e, 0x22, 0x96, 0x52, 0xc5, 0x78, 0x4a, 0xc6, 0x3d, 0x12, 0x43, 0x0a, 0x82, 0x2a, 0x88, - 0x70, 0x26, 0xb8, 0xe2, 0x76, 0x7b, 0x36, 0x8b, 0x69, 0xc6, 0xf0, 0xf2, 0x2c, 0x1e, 0xf7, 0xda, - 0x8f, 0x62, 0xa6, 0x86, 0xf9, 0x00, 0x87, 0x3c, 0x21, 0x31, 0x8f, 0x39, 0x31, 0x92, 0x41, 0xfe, - 0xc9, 0xfc, 0xcc, 0xc7, 0xbc, 0x66, 0xa8, 0xf6, 0x93, 0xc5, 0xda, 0x84, 0x86, 0x43, 0x96, 0x82, - 0x38, 0x27, 0xd9, 0x28, 0xd6, 0x05, 0x49, 0x12, 0x50, 0x74, 0x83, 0x81, 0x36, 0xb9, 0x49, 0x25, - 0xf2, 0x54, 0xb1, 0x04, 0xd6, 0x04, 0x4f, 0x6f, 0x13, 0xc8, 0x70, 0x08, 0x09, 0x5d, 0xd5, 0x75, - 0x7e, 0x59, 0xa8, 0xd6, 0x07, 0x2a, 0xc1, 0xfe, 0x88, 0x76, 0xb4, 0x9b, 0x88, 0x2a, 0xea, 0x58, - 0xfb, 0x56, 0xb7, 0x71, 0xf0, 0x18, 0x2f, 0x62, 0xb8, 0x86, 0xe2, 0x6c, 0x14, 0xeb, 0x82, 0xc4, - 0x7a, 0x1a, 0x8f, 0x7b, 0xf8, 0xfd, 0xe0, 0x33, 0x84, 0xea, 0x08, 0x14, 0xf5, 0xed, 0x49, 0xe1, - 0x55, 0xca, 0xc2, 0x43, 0x8b, 0x5a, 0x70, 0x4d, 0xb5, 0xdf, 0xa2, 0xaa, 0xcc, 0x20, 0x74, 0xb6, - 0x0c, 0xfd, 0x21, 0xbe, 0x39, 0x64, 0x6c, 0x2c, 0x1d, 0x67, 0x10, 0xfa, 0xf7, 0xe6, 0xc8, 0xaa, - 0xfe, 0x05, 0x06, 0xd0, 0xf9, 0x69, 0xa1, 0xba, 0x99, 0xe8, 0x33, 0xa9, 0xec, 0xd3, 0x35, 0xe3, - 0xf8, 0x6e, 0xc6, 0xb5, 0xda, 0xd8, 0x6e, 0xce, 0x77, 0xec, 0x5c, 0x55, 0x96, 0x4c, 0xbf, 0x41, - 0x35, 0xa6, 0x20, 0x91, 0xce, 0xd6, 0xfe, 0x76, 0xb7, 0x71, 0xf0, 0xe0, 0x56, 0xd7, 0xfe, 0xee, - 0x9c, 0x56, 0x3b, 0xd4, 0xba, 0x60, 0x26, 0xef, 0xfc, 0xd8, 0x9e, 0x7b, 0xd6, 0x77, 0xd8, 0xcf, - 0xd1, 0xde, 0x90, 0x9f, 0x45, 0x20, 0x0e, 0x23, 0x48, 0x15, 0x53, 0xe7, 0xc6, 0x79, 0xdd, 0xb7, - 0xcb, 0xc2, 0xdb, 0x7b, 0xf7, 0x4f, 0x27, 0x58, 0x99, 0xb4, 0xfb, 0xa8, 0x75, 0xa6, 0x41, 0xaf, - 0x73, 0x61, 0x36, 0x1f, 0x43, 0xc8, 0xd3, 0x48, 0x9a, 0x58, 0x6b, 0xbe, 0x53, 0x16, 0x5e, 0xab, - 0xbf, 0xa1, 0x1f, 0x6c, 0x54, 0xd9, 0x03, 0xd4, 0xa0, 0xe1, 0x97, 0x9c, 0x09, 0x38, 0x61, 0x09, - 0x38, 0xdb, 0x26, 0x40, 0x72, 0xb7, 0x00, 0x8f, 0x58, 0x28, 0xb8, 0x96, 0xf9, 0xf7, 0xcb, 0xc2, - 0x6b, 0xbc, 0x5c, 0x70, 0x82, 0x65, 0xa8, 0x7d, 0x8a, 0xea, 0x02, 0x52, 0xf8, 0x6a, 0x36, 0x54, - 0xff, 0x6f, 0xc3, 0x6e, 0x59, 0x78, 0xf5, 0xe0, 0x8a, 0x12, 0x2c, 0x80, 0xf6, 0x0b, 0xd4, 0x34, - 0x97, 0x9d, 0x08, 0x9a, 0x4a, 0xa6, 0x6f, 0x93, 0x4e, 0xcd, 0x64, 0xd1, 0x2a, 0x0b, 0xaf, 0xd9, - 0x5f, 0xe9, 0x05, 0x6b, 0xd3, 0x7e, 0x77, 0x32, 0x75, 0x2b, 0x17, 0x53, 0xb7, 0x72, 0x39, 0x75, - 0x2b, 0xdf, 0x4a, 0xd7, 0x9a, 0x94, 0xae, 0x75, 0x51, 0xba, 0xd6, 0x65, 0xe9, 0x5a, 0xbf, 0x4b, - 0xd7, 0xfa, 0xfe, 0xc7, 0xad, 0x7c, 0xd8, 0x1a, 0xf7, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x41, - 0x5e, 0x94, 0x96, 0x5e, 0x04, 0x00, 0x00, -} - -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LeaseList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.LeaseTransitions != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) - i-- - dAtA[i] = 0x28 - } - if m.RenewTime != nil { - { - size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.AcquireTime != nil { - { - size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.LeaseDurationSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) - i-- - dAtA[i] = 0x10 - } - if m.HolderIdentity != nil { - i -= len(*m.HolderIdentity) - copy(dAtA[i:], *m.HolderIdentity) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LeaseList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LeaseSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HolderIdentity != nil { - l = len(*m.HolderIdentity) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LeaseDurationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) - } - if m.AcquireTime != nil { - l = m.AcquireTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RenewTime != nil { - l = m.RenewTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LeaseTransitions != nil { - n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Lease) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LeaseList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Lease{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *LeaseSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LeaseSpec{`, - `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, - `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, - `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Lease{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.HolderIdentity = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LeaseDurationSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AcquireTime == nil { - m.AcquireTime = &v1.MicroTime{} - } - if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RenewTime == nil { - m.RenewTime = &v1.MicroTime{} - } - if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.LeaseTransitions = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/coordination/v1/generated.proto b/vendor/k8s.io/api/coordination/v1/generated.proto deleted file mode 100644 index 4206746..0000000 --- a/vendor/k8s.io/api/coordination/v1/generated.proto +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.coordination.v1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Lease defines a lease concept. -message Lease { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional LeaseSpec spec = 2; -} - -// LeaseList is a list of Lease objects. -message LeaseList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated Lease items = 2; -} - -// LeaseSpec is a specification of a Lease. -message LeaseSpec { - // holderIdentity contains the identity of the holder of a current lease. - // +optional - optional string holderIdentity = 1; - - // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. - // +optional - optional int32 leaseDurationSeconds = 2; - - // acquireTime is a time when the current lease was acquired. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; - - // renewTime is a time when the current holder of a lease has last - // updated the lease. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; - - // leaseTransitions is the number of transitions of a lease between - // holders. - // +optional - optional int32 leaseTransitions = 5; -} - diff --git a/vendor/k8s.io/api/coordination/v1/types.go b/vendor/k8s.io/api/coordination/v1/types.go deleted file mode 100644 index 7a5605a..0000000 --- a/vendor/k8s.io/api/coordination/v1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Lease defines a lease concept. -type Lease struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// LeaseSpec is a specification of a Lease. -type LeaseSpec struct { - // holderIdentity contains the identity of the holder of a current lease. - // +optional - HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` - // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. - // +optional - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` - // acquireTime is a time when the current lease was acquired. - // +optional - AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` - // renewTime is a time when the current holder of a lease has last - // updated the lease. - // +optional - RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` - // leaseTransitions is the number of transitions of a lease between - // holders. - // +optional - LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LeaseList is a list of Lease objects. -type LeaseList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go deleted file mode 100644 index 0f14404..0000000 --- a/vendor/k8s.io/api/coordination/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Lease = map[string]string{ - "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (Lease) SwaggerDoc() map[string]string { - return map_Lease -} - -var map_LeaseList = map[string]string{ - "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (LeaseList) SwaggerDoc() map[string]string { - return map_LeaseList -} - -var map_LeaseSpec = map[string]string{ - "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", - "acquireTime": "acquireTime is a time when the current lease was acquired.", - "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", - "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", -} - -func (LeaseSpec) SwaggerDoc() map[string]string { - return map_LeaseSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go deleted file mode 100644 index 2dd7edd..0000000 --- a/vendor/k8s.io/api/coordination/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lease) DeepCopyInto(out *Lease) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. -func (in *Lease) DeepCopy() *Lease { - if in == nil { - return nil - } - out := new(Lease) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Lease) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaseList) DeepCopyInto(out *LeaseList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Lease, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. -func (in *LeaseList) DeepCopy() *LeaseList { - if in == nil { - return nil - } - out := new(LeaseList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LeaseList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { - *out = *in - if in.HolderIdentity != nil { - in, out := &in.HolderIdentity, &out.HolderIdentity - *out = new(string) - **out = **in - } - if in.LeaseDurationSeconds != nil { - in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds - *out = new(int32) - **out = **in - } - if in.AcquireTime != nil { - in, out := &in.AcquireTime, &out.AcquireTime - *out = (*in).DeepCopy() - } - if in.RenewTime != nil { - in, out := &in.RenewTime, &out.RenewTime - *out = (*in).DeepCopy() - } - if in.LeaseTransitions != nil { - in, out := &in.LeaseTransitions, &out.LeaseTransitions - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. -func (in *LeaseSpec) DeepCopy() *LeaseSpec { - if in == nil { - return nil - } - out := new(LeaseSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go index 304732d..fecb513 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ b/vendor/k8s.io/api/coordination/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=coordination.k8s.io - package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go index 57a314c..6c2dbd9 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go @@ -14,24 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto + + It has these top-level messages: + Lease + LeaseList + LeaseSpec +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,144 +51,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{0} -} -func (m *Lease) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Lease) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lease.Merge(m, src) -} -func (m *Lease) XXX_Size() int { - return m.Size() -} -func (m *Lease) XXX_DiscardUnknown() { - xxx_messageInfo_Lease.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_Lease proto.InternalMessageInfo +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{1} -} -func (m *LeaseList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LeaseList) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseList.Merge(m, src) -} -func (m *LeaseList) XXX_Size() int { - return m.Size() -} -func (m *LeaseList) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseList.DiscardUnknown(m) -} - -var xxx_messageInfo_LeaseList proto.InternalMessageInfo - -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_daca6bcd2ff63a80, []int{2} -} -func (m *LeaseSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LeaseSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LeaseSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_LeaseSpec.Merge(m, src) -} -func (m *LeaseSpec) XXX_Size() int { - return m.Size() -} -func (m *LeaseSpec) XXX_DiscardUnknown() { - xxx_messageInfo_LeaseSpec.DiscardUnknown(m) -} +func (m *LeaseList) Reset() { *m = LeaseList{} } +func (*LeaseList) ProtoMessage() {} +func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_LeaseSpec proto.InternalMessageInfo +func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } +func (*LeaseSpec) ProtoMessage() {} +func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptor_daca6bcd2ff63a80) -} - -var fileDescriptor_daca6bcd2ff63a80 = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, -} - func (m *Lease) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -187,42 +81,33 @@ func (m *Lease) Marshal() (dAtA []byte, err error) { } func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + return i, nil } func (m *LeaseList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -230,46 +115,37 @@ func (m *LeaseList) Marshal() (dAtA []byte, err error) { } func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -277,74 +153,77 @@ func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { } func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LeaseSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.LeaseTransitions != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) - i-- - dAtA[i] = 0x28 + if m.HolderIdentity != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) + i += copy(dAtA[i:], *m.HolderIdentity) } - if m.RenewTime != nil { - { - size, err := m.RenewTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + if m.LeaseDurationSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) } if m.AcquireTime != nil { - { - size, err := m.AcquireTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) + n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 } - if m.LeaseDurationSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) - i-- - dAtA[i] = 0x10 + if m.RenewTime != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) + n5, err := m.RenewTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - if m.HolderIdentity != nil { - i -= len(*m.HolderIdentity) - copy(dAtA[i:], *m.HolderIdentity) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i-- - dAtA[i] = 0xa + if m.LeaseTransitions != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) } - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Lease) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -355,9 +234,6 @@ func (m *Lease) Size() (n int) { } func (m *LeaseList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -372,9 +248,6 @@ func (m *LeaseList) Size() (n int) { } func (m *LeaseSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.HolderIdentity != nil { @@ -399,7 +272,14 @@ func (m *LeaseSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -409,7 +289,7 @@ func (this *Lease) String() string { return "nil" } s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -419,14 +299,9 @@ func (this *LeaseList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Lease{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Lease", "Lease", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -438,8 +313,8 @@ func (this *LeaseSpec) String() string { s := strings.Join([]string{`&LeaseSpec{`, `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "v1.MicroTime", 1) + `,`, + `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, + `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, `}`, }, "") @@ -468,7 +343,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -496,7 +371,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -505,9 +380,6 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -529,7 +401,7 @@ func (m *Lease) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -538,9 +410,6 @@ func (m *Lease) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -557,9 +426,6 @@ func (m *Lease) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -587,7 +453,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -615,7 +481,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -624,9 +490,6 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -648,7 +511,7 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -657,9 +520,6 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -677,9 +537,6 @@ func (m *LeaseList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -707,7 +564,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -735,7 +592,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -745,9 +602,6 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -768,7 +622,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -788,7 +642,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -797,14 +651,11 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.AcquireTime == nil { - m.AcquireTime = &v1.MicroTime{} + m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} } if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -824,7 +675,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -833,14 +684,11 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.RenewTime == nil { - m.RenewTime = &v1.MicroTime{} + m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} } if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -860,7 +708,7 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -875,9 +723,6 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -893,7 +738,6 @@ func (m *LeaseSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -925,8 +769,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -943,34 +789,95 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, + 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, + 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, + 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, + 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, + 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, + 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, + 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, + 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, + 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, + 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, + 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, + 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, + 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, + 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, + 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, + 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, + 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, + 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, + 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, + 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, + 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, + 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, + 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, + 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, + 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, + 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, + 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, + 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, + 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, + 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, + 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, + 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto index cfc2711..918e0de 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ b/vendor/k8s.io/api/coordination/v1beta1/generated.proto @@ -30,12 +30,12 @@ option go_package = "v1beta1"; // Lease defines a lease concept. message Lease { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional LeaseSpec spec = 2; } @@ -43,7 +43,7 @@ message Lease { // LeaseList is a list of Lease objects. message LeaseList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go index da88f67..846f728 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types.go @@ -26,12 +26,12 @@ import ( // Lease defines a lease concept. type Lease struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -65,7 +65,7 @@ type LeaseSpec struct { type LeaseList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go index f557d26..4532d32 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go @@ -29,8 +29,8 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_Lease = map[string]string{ "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Lease) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (Lease) SwaggerDoc() map[string]string { var map_LeaseList = map[string]string{ "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go index de69621..a628ac1 100644 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go @@ -55,7 +55,7 @@ func (in *Lease) DeepCopyObject() runtime.Object { func (in *LeaseList) DeepCopyInto(out *LeaseList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Lease, len(*in)) diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go index edc9b4d..16a0cfc 100644 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ b/vendor/k8s.io/api/core/v1/annotation_key_constants.go @@ -78,29 +78,4 @@ const ( // // Not all cloud providers support this annotation, though AWS & GCE do. AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" - - // EndpointsLastChangeTriggerTime is the annotation key, set for endpoints objects, that - // represents the timestamp (stored as RFC 3339 date-time string, e.g. '2018-10-22T19:32:52.1Z') - // of the last change, of some Pod or Service object, that triggered the endpoints object change. - // In other words, if a Pod / Service changed at time T0, that change was observed by endpoints - // controller at T1, and the Endpoints object was changed at T2, the - // EndpointsLastChangeTriggerTime would be set to T0. - // - // The "endpoints change trigger" here means any Pod or Service change that resulted in the - // Endpoints object change. - // - // Given the definition of the "endpoints change trigger", please note that this annotation will - // be set ONLY for endpoints object changes triggered by either Pod or Service change. If the - // Endpoints object changes due to other reasons, this annotation won't be set (or updated if it's - // already set). - // - // This annotation will be used to compute the in-cluster network programming latency SLI, see - // https://github.com/kubernetes/community/blob/master/sig-scalability/slos/network_programming_latency.md - EndpointsLastChangeTriggerTime = "endpoints.kubernetes.io/last-change-trigger-time" - - // MigratedPluginsAnnotationKey is the annotation key, set for CSINode objects, that is a comma-separated - // list of in-tree plugins that will be serviced by the CSI backend on the Node represented by CSINode. - // This annotation is used by the Attach Detach Controller to determine whether to use the in-tree or - // CSI Backend for a volume plugin on a specific node. - MigratedPluginsAnnotationKey = "storage.alpha.kubernetes.io/migrated-plugins" ) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go index 1bdf0b2..96994c6 100644 --- a/vendor/k8s.io/api/core/v1/doc.go +++ b/vendor/k8s.io/api/core/v1/doc.go @@ -16,7 +16,6 @@ limitations under the License. // +k8s:openapi-gen=true // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // Package v1 is the v1 version of the core API. package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go index 8e58752..b569ea8 100644 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ b/vendor/k8s.io/api/core/v1/generated.pb.go @@ -14,29 +14,231 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto + + It has these top-level messages: + AWSElasticBlockStoreVolumeSource + Affinity + AttachedVolume + AvoidPods + AzureDiskVolumeSource + AzureFilePersistentVolumeSource + AzureFileVolumeSource + Binding + CSIPersistentVolumeSource + Capabilities + CephFSPersistentVolumeSource + CephFSVolumeSource + CinderPersistentVolumeSource + CinderVolumeSource + ClientIPConfig + ComponentCondition + ComponentStatus + ComponentStatusList + ConfigMap + ConfigMapEnvSource + ConfigMapKeySelector + ConfigMapList + ConfigMapNodeConfigSource + ConfigMapProjection + ConfigMapVolumeSource + Container + ContainerImage + ContainerPort + ContainerState + ContainerStateRunning + ContainerStateTerminated + ContainerStateWaiting + ContainerStatus + DaemonEndpoint + DownwardAPIProjection + DownwardAPIVolumeFile + DownwardAPIVolumeSource + EmptyDirVolumeSource + EndpointAddress + EndpointPort + EndpointSubset + Endpoints + EndpointsList + EnvFromSource + EnvVar + EnvVarSource + Event + EventList + EventSeries + EventSource + ExecAction + FCVolumeSource + FlexPersistentVolumeSource + FlexVolumeSource + FlockerVolumeSource + GCEPersistentDiskVolumeSource + GitRepoVolumeSource + GlusterfsVolumeSource + HTTPGetAction + HTTPHeader + Handler + HostAlias + HostPathVolumeSource + ISCSIPersistentVolumeSource + ISCSIVolumeSource + KeyToPath + Lifecycle + LimitRange + LimitRangeItem + LimitRangeList + LimitRangeSpec + List + LoadBalancerIngress + LoadBalancerStatus + LocalObjectReference + LocalVolumeSource + NFSVolumeSource + Namespace + NamespaceList + NamespaceSpec + NamespaceStatus + Node + NodeAddress + NodeAffinity + NodeCondition + NodeConfigSource + NodeConfigStatus + NodeDaemonEndpoints + NodeList + NodeProxyOptions + NodeResources + NodeSelector + NodeSelectorRequirement + NodeSelectorTerm + NodeSpec + NodeStatus + NodeSystemInfo + ObjectFieldSelector + ObjectReference + PersistentVolume + PersistentVolumeClaim + PersistentVolumeClaimCondition + PersistentVolumeClaimList + PersistentVolumeClaimSpec + PersistentVolumeClaimStatus + PersistentVolumeClaimVolumeSource + PersistentVolumeList + PersistentVolumeSource + PersistentVolumeSpec + PersistentVolumeStatus + PhotonPersistentDiskVolumeSource + Pod + PodAffinity + PodAffinityTerm + PodAntiAffinity + PodAttachOptions + PodCondition + PodDNSConfig + PodDNSConfigOption + PodExecOptions + PodList + PodLogOptions + PodPortForwardOptions + PodProxyOptions + PodReadinessGate + PodSecurityContext + PodSignature + PodSpec + PodStatus + PodStatusResult + PodTemplate + PodTemplateList + PodTemplateSpec + PortworxVolumeSource + Preconditions + PreferAvoidPodsEntry + PreferredSchedulingTerm + Probe + ProjectedVolumeSource + QuobyteVolumeSource + RBDPersistentVolumeSource + RBDVolumeSource + RangeAllocation + ReplicationController + ReplicationControllerCondition + ReplicationControllerList + ReplicationControllerSpec + ReplicationControllerStatus + ResourceFieldSelector + ResourceQuota + ResourceQuotaList + ResourceQuotaSpec + ResourceQuotaStatus + ResourceRequirements + SELinuxOptions + ScaleIOPersistentVolumeSource + ScaleIOVolumeSource + ScopeSelector + ScopedResourceSelectorRequirement + Secret + SecretEnvSource + SecretKeySelector + SecretList + SecretProjection + SecretReference + SecretVolumeSource + SecurityContext + SerializedReference + Service + ServiceAccount + ServiceAccountList + ServiceAccountTokenProjection + ServiceList + ServicePort + ServiceProxyOptions + ServiceSpec + ServiceStatus + SessionAffinityConfig + StorageOSPersistentVolumeSource + StorageOSVolumeSource + Sysctl + TCPSocketAction + Taint + Toleration + TopologySelectorLabelRequirement + TopologySelectorTerm + TypedLocalObjectReference + Volume + VolumeDevice + VolumeMount + VolumeNodeAffinity + VolumeProjection + VolumeSource + VsphereVirtualDiskVolumeSource + WeightedPodAffinityTerm +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - resource "k8s.io/apimachinery/pkg/api/resource" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -47,19114 +249,13803 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{0} -} -func (m *AWSElasticBlockStoreVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AWSElasticBlockStoreVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AWSElasticBlockStoreVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AWSElasticBlockStoreVolumeSource.Merge(m, src) -} -func (m *AWSElasticBlockStoreVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *AWSElasticBlockStoreVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_AWSElasticBlockStoreVolumeSource.DiscardUnknown(m) + return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_AWSElasticBlockStoreVolumeSource proto.InternalMessageInfo +func (m *Affinity) Reset() { *m = Affinity{} } +func (*Affinity) ProtoMessage() {} +func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *Affinity) Reset() { *m = Affinity{} } -func (*Affinity) ProtoMessage() {} -func (*Affinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{1} -} -func (m *Affinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Affinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Affinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Affinity.Merge(m, src) -} -func (m *Affinity) XXX_Size() int { - return m.Size() -} -func (m *Affinity) XXX_DiscardUnknown() { - xxx_messageInfo_Affinity.DiscardUnknown(m) -} +func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } +func (*AttachedVolume) ProtoMessage() {} +func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_Affinity proto.InternalMessageInfo +func (m *AvoidPods) Reset() { *m = AvoidPods{} } +func (*AvoidPods) ProtoMessage() {} +func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (*AttachedVolume) ProtoMessage() {} -func (*AttachedVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{2} -} -func (m *AttachedVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AttachedVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AttachedVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AttachedVolume.Merge(m, src) -} -func (m *AttachedVolume) XXX_Size() int { - return m.Size() -} -func (m *AttachedVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AttachedVolume.DiscardUnknown(m) +func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } +func (*AzureDiskVolumeSource) ProtoMessage() {} +func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } + +func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } +func (*AzureFilePersistentVolumeSource) ProtoMessage() {} +func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_AttachedVolume proto.InternalMessageInfo +func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } +func (*AzureFileVolumeSource) ProtoMessage() {} +func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *AvoidPods) Reset() { *m = AvoidPods{} } -func (*AvoidPods) ProtoMessage() {} -func (*AvoidPods) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{3} -} -func (m *AvoidPods) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AvoidPods) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AvoidPods) XXX_Merge(src proto.Message) { - xxx_messageInfo_AvoidPods.Merge(m, src) -} -func (m *AvoidPods) XXX_Size() int { - return m.Size() -} -func (m *AvoidPods) XXX_DiscardUnknown() { - xxx_messageInfo_AvoidPods.DiscardUnknown(m) +func (m *Binding) Reset() { *m = Binding{} } +func (*Binding) ProtoMessage() {} +func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } + +func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } +func (*CSIPersistentVolumeSource) ProtoMessage() {} +func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_AvoidPods proto.InternalMessageInfo +func (m *Capabilities) Reset() { *m = Capabilities{} } +func (*Capabilities) ProtoMessage() {} +func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } -func (*AzureDiskVolumeSource) ProtoMessage() {} -func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{4} -} -func (m *AzureDiskVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AzureDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AzureDiskVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AzureDiskVolumeSource.Merge(m, src) -} -func (m *AzureDiskVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *AzureDiskVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_AzureDiskVolumeSource.DiscardUnknown(m) +func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } +func (*CephFSPersistentVolumeSource) ProtoMessage() {} +func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{10} } -var xxx_messageInfo_AzureDiskVolumeSource proto.InternalMessageInfo +func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } +func (*CephFSVolumeSource) ProtoMessage() {} +func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } -func (*AzureFilePersistentVolumeSource) ProtoMessage() {} -func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{5} -} -func (m *AzureFilePersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AzureFilePersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AzureFilePersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AzureFilePersistentVolumeSource.Merge(m, src) -} -func (m *AzureFilePersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *AzureFilePersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_AzureFilePersistentVolumeSource.DiscardUnknown(m) +func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } +func (*CinderPersistentVolumeSource) ProtoMessage() {} +func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{12} } -var xxx_messageInfo_AzureFilePersistentVolumeSource proto.InternalMessageInfo +func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } +func (*CinderVolumeSource) ProtoMessage() {} +func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{6} -} -func (m *AzureFileVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AzureFileVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AzureFileVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_AzureFileVolumeSource.Merge(m, src) -} -func (m *AzureFileVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *AzureFileVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_AzureFileVolumeSource.DiscardUnknown(m) -} +func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } +func (*ClientIPConfig) ProtoMessage() {} +func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -var xxx_messageInfo_AzureFileVolumeSource proto.InternalMessageInfo +func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } +func (*ComponentCondition) ProtoMessage() {} +func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -func (m *Binding) Reset() { *m = Binding{} } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{7} -} -func (m *Binding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Binding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Binding) XXX_Merge(src proto.Message) { - xxx_messageInfo_Binding.Merge(m, src) -} -func (m *Binding) XXX_Size() int { - return m.Size() -} -func (m *Binding) XXX_DiscardUnknown() { - xxx_messageInfo_Binding.DiscardUnknown(m) -} +func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } +func (*ComponentStatus) ProtoMessage() {} +func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -var xxx_messageInfo_Binding proto.InternalMessageInfo +func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } +func (*ComponentStatusList) ProtoMessage() {} +func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } -func (*CSIPersistentVolumeSource) ProtoMessage() {} -func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{8} -} -func (m *CSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIPersistentVolumeSource.Merge(m, src) -} -func (m *CSIPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *CSIPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_CSIPersistentVolumeSource.DiscardUnknown(m) -} +func (m *ConfigMap) Reset() { *m = ConfigMap{} } +func (*ConfigMap) ProtoMessage() {} +func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } -var xxx_messageInfo_CSIPersistentVolumeSource proto.InternalMessageInfo +func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } +func (*ConfigMapEnvSource) ProtoMessage() {} +func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } -func (m *CSIVolumeSource) Reset() { *m = CSIVolumeSource{} } -func (*CSIVolumeSource) ProtoMessage() {} -func (*CSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{9} -} -func (m *CSIVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIVolumeSource.Merge(m, src) -} -func (m *CSIVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *CSIVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_CSIVolumeSource.DiscardUnknown(m) -} +func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } +func (*ConfigMapKeySelector) ProtoMessage() {} +func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } -var xxx_messageInfo_CSIVolumeSource proto.InternalMessageInfo +func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } +func (*ConfigMapList) ProtoMessage() {} +func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{10} -} -func (m *Capabilities) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Capabilities) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Capabilities) XXX_Merge(src proto.Message) { - xxx_messageInfo_Capabilities.Merge(m, src) -} -func (m *Capabilities) XXX_Size() int { - return m.Size() -} -func (m *Capabilities) XXX_DiscardUnknown() { - xxx_messageInfo_Capabilities.DiscardUnknown(m) +func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } +func (*ConfigMapNodeConfigSource) ProtoMessage() {} +func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{22} } -var xxx_messageInfo_Capabilities proto.InternalMessageInfo +func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } +func (*ConfigMapProjection) ProtoMessage() {} +func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } -func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } -func (*CephFSPersistentVolumeSource) ProtoMessage() {} -func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{11} -} -func (m *CephFSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CephFSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CephFSPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CephFSPersistentVolumeSource.Merge(m, src) -} -func (m *CephFSPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *CephFSPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_CephFSPersistentVolumeSource.DiscardUnknown(m) -} +func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } +func (*ConfigMapVolumeSource) ProtoMessage() {} +func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } -var xxx_messageInfo_CephFSPersistentVolumeSource proto.InternalMessageInfo +func (m *Container) Reset() { *m = Container{} } +func (*Container) ProtoMessage() {} +func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{12} -} -func (m *CephFSVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CephFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CephFSVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CephFSVolumeSource.Merge(m, src) -} -func (m *CephFSVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *CephFSVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_CephFSVolumeSource.DiscardUnknown(m) -} +func (m *ContainerImage) Reset() { *m = ContainerImage{} } +func (*ContainerImage) ProtoMessage() {} +func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } -var xxx_messageInfo_CephFSVolumeSource proto.InternalMessageInfo +func (m *ContainerPort) Reset() { *m = ContainerPort{} } +func (*ContainerPort) ProtoMessage() {} +func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } -func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } -func (*CinderPersistentVolumeSource) ProtoMessage() {} -func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{13} -} -func (m *CinderPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CinderPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CinderPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CinderPersistentVolumeSource.Merge(m, src) -} -func (m *CinderPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *CinderPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_CinderPersistentVolumeSource.DiscardUnknown(m) -} +func (m *ContainerState) Reset() { *m = ContainerState{} } +func (*ContainerState) ProtoMessage() {} +func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } -var xxx_messageInfo_CinderPersistentVolumeSource proto.InternalMessageInfo +func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } +func (*ContainerStateRunning) ProtoMessage() {} +func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{14} -} -func (m *CinderVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CinderVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CinderVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_CinderVolumeSource.Merge(m, src) -} -func (m *CinderVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *CinderVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_CinderVolumeSource.DiscardUnknown(m) +func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } +func (*ContainerStateTerminated) ProtoMessage() {} +func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{30} } -var xxx_messageInfo_CinderVolumeSource proto.InternalMessageInfo +func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } +func (*ContainerStateWaiting) ProtoMessage() {} +func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } -func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } -func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{15} -} -func (m *ClientIPConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClientIPConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClientIPConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClientIPConfig.Merge(m, src) -} -func (m *ClientIPConfig) XXX_Size() int { - return m.Size() -} -func (m *ClientIPConfig) XXX_DiscardUnknown() { - xxx_messageInfo_ClientIPConfig.DiscardUnknown(m) -} +func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } +func (*ContainerStatus) ProtoMessage() {} +func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } -var xxx_messageInfo_ClientIPConfig proto.InternalMessageInfo +func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } +func (*DaemonEndpoint) ProtoMessage() {} +func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{16} -} -func (m *ComponentCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ComponentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ComponentCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ComponentCondition.Merge(m, src) -} -func (m *ComponentCondition) XXX_Size() int { - return m.Size() -} -func (m *ComponentCondition) XXX_DiscardUnknown() { - xxx_messageInfo_ComponentCondition.DiscardUnknown(m) -} +func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } +func (*DownwardAPIProjection) ProtoMessage() {} +func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } -var xxx_messageInfo_ComponentCondition proto.InternalMessageInfo +func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } +func (*DownwardAPIVolumeFile) ProtoMessage() {} +func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{17} -} -func (m *ComponentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ComponentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ComponentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ComponentStatus.Merge(m, src) -} -func (m *ComponentStatus) XXX_Size() int { - return m.Size() -} -func (m *ComponentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ComponentStatus.DiscardUnknown(m) +func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } +func (*DownwardAPIVolumeSource) ProtoMessage() {} +func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{36} } -var xxx_messageInfo_ComponentStatus proto.InternalMessageInfo +func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } +func (*EmptyDirVolumeSource) ProtoMessage() {} +func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{18} -} -func (m *ComponentStatusList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ComponentStatusList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ComponentStatusList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ComponentStatusList.Merge(m, src) -} -func (m *ComponentStatusList) XXX_Size() int { - return m.Size() -} -func (m *ComponentStatusList) XXX_DiscardUnknown() { - xxx_messageInfo_ComponentStatusList.DiscardUnknown(m) -} +func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } +func (*EndpointAddress) ProtoMessage() {} +func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } -var xxx_messageInfo_ComponentStatusList proto.InternalMessageInfo +func (m *EndpointPort) Reset() { *m = EndpointPort{} } +func (*EndpointPort) ProtoMessage() {} +func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{19} -} -func (m *ConfigMap) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMap) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMap.Merge(m, src) -} -func (m *ConfigMap) XXX_Size() int { - return m.Size() -} -func (m *ConfigMap) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMap.DiscardUnknown(m) -} +func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } +func (*EndpointSubset) ProtoMessage() {} +func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } -var xxx_messageInfo_ConfigMap proto.InternalMessageInfo +func (m *Endpoints) Reset() { *m = Endpoints{} } +func (*Endpoints) ProtoMessage() {} +func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } -func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } -func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{20} -} -func (m *ConfigMapEnvSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMapEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMapEnvSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMapEnvSource.Merge(m, src) -} -func (m *ConfigMapEnvSource) XXX_Size() int { - return m.Size() -} -func (m *ConfigMapEnvSource) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMapEnvSource.DiscardUnknown(m) -} +func (m *EndpointsList) Reset() { *m = EndpointsList{} } +func (*EndpointsList) ProtoMessage() {} +func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } -var xxx_messageInfo_ConfigMapEnvSource proto.InternalMessageInfo +func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } +func (*EnvFromSource) ProtoMessage() {} +func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{21} -} -func (m *ConfigMapKeySelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMapKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMapKeySelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMapKeySelector.Merge(m, src) -} -func (m *ConfigMapKeySelector) XXX_Size() int { - return m.Size() -} -func (m *ConfigMapKeySelector) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMapKeySelector.DiscardUnknown(m) -} +func (m *EnvVar) Reset() { *m = EnvVar{} } +func (*EnvVar) ProtoMessage() {} +func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } -var xxx_messageInfo_ConfigMapKeySelector proto.InternalMessageInfo +func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } +func (*EnvVarSource) ProtoMessage() {} +func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{22} -} -func (m *ConfigMapList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMapList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMapList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMapList.Merge(m, src) -} -func (m *ConfigMapList) XXX_Size() int { - return m.Size() -} -func (m *ConfigMapList) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMapList.DiscardUnknown(m) -} +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } -var xxx_messageInfo_ConfigMapList proto.InternalMessageInfo +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } -func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } -func (*ConfigMapNodeConfigSource) ProtoMessage() {} -func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{23} -} -func (m *ConfigMapNodeConfigSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMapNodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMapNodeConfigSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMapNodeConfigSource.Merge(m, src) -} -func (m *ConfigMapNodeConfigSource) XXX_Size() int { - return m.Size() -} -func (m *ConfigMapNodeConfigSource) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMapNodeConfigSource.DiscardUnknown(m) -} +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } -var xxx_messageInfo_ConfigMapNodeConfigSource proto.InternalMessageInfo +func (m *EventSource) Reset() { *m = EventSource{} } +func (*EventSource) ProtoMessage() {} +func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } -func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } -func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{24} -} -func (m *ConfigMapProjection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMapProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMapProjection) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMapProjection.Merge(m, src) -} -func (m *ConfigMapProjection) XXX_Size() int { - return m.Size() -} -func (m *ConfigMapProjection) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMapProjection.DiscardUnknown(m) -} +func (m *ExecAction) Reset() { *m = ExecAction{} } +func (*ExecAction) ProtoMessage() {} +func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } -var xxx_messageInfo_ConfigMapProjection proto.InternalMessageInfo +func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } +func (*FCVolumeSource) ProtoMessage() {} +func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{25} -} -func (m *ConfigMapVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ConfigMapVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ConfigMapVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ConfigMapVolumeSource.Merge(m, src) -} -func (m *ConfigMapVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *ConfigMapVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_ConfigMapVolumeSource.DiscardUnknown(m) +func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } +func (*FlexPersistentVolumeSource) ProtoMessage() {} +func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{52} } -var xxx_messageInfo_ConfigMapVolumeSource proto.InternalMessageInfo +func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } +func (*FlexVolumeSource) ProtoMessage() {} +func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } -func (m *Container) Reset() { *m = Container{} } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{26} -} -func (m *Container) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Container) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Container) XXX_Merge(src proto.Message) { - xxx_messageInfo_Container.Merge(m, src) -} -func (m *Container) XXX_Size() int { - return m.Size() -} -func (m *Container) XXX_DiscardUnknown() { - xxx_messageInfo_Container.DiscardUnknown(m) +func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } +func (*FlockerVolumeSource) ProtoMessage() {} +func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } + +func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } +func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} +func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{55} } -var xxx_messageInfo_Container proto.InternalMessageInfo +func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } +func (*GitRepoVolumeSource) ProtoMessage() {} +func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{27} -} -func (m *ContainerImage) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerImage) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerImage.Merge(m, src) -} -func (m *ContainerImage) XXX_Size() int { - return m.Size() -} -func (m *ContainerImage) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerImage.DiscardUnknown(m) -} +func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } +func (*GlusterfsVolumeSource) ProtoMessage() {} +func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } -var xxx_messageInfo_ContainerImage proto.InternalMessageInfo +func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } +func (*HTTPGetAction) ProtoMessage() {} +func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{28} -} -func (m *ContainerPort) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerPort.Merge(m, src) -} -func (m *ContainerPort) XXX_Size() int { - return m.Size() -} -func (m *ContainerPort) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerPort.DiscardUnknown(m) -} +func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } +func (*HTTPHeader) ProtoMessage() {} +func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } -var xxx_messageInfo_ContainerPort proto.InternalMessageInfo +func (m *Handler) Reset() { *m = Handler{} } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{29} -} -func (m *ContainerState) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerState) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerState.Merge(m, src) -} -func (m *ContainerState) XXX_Size() int { - return m.Size() -} -func (m *ContainerState) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerState.DiscardUnknown(m) -} +func (m *HostAlias) Reset() { *m = HostAlias{} } +func (*HostAlias) ProtoMessage() {} +func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } -var xxx_messageInfo_ContainerState proto.InternalMessageInfo +func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } +func (*HostPathVolumeSource) ProtoMessage() {} +func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{30} -} -func (m *ContainerStateRunning) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerStateRunning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerStateRunning) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerStateRunning.Merge(m, src) -} -func (m *ContainerStateRunning) XXX_Size() int { - return m.Size() -} -func (m *ContainerStateRunning) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerStateRunning.DiscardUnknown(m) +func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } +func (*ISCSIPersistentVolumeSource) ProtoMessage() {} +func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{63} } -var xxx_messageInfo_ContainerStateRunning proto.InternalMessageInfo +func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } +func (*ISCSIVolumeSource) ProtoMessage() {} +func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (*ContainerStateTerminated) ProtoMessage() {} -func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{31} -} -func (m *ContainerStateTerminated) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerStateTerminated) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerStateTerminated) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerStateTerminated.Merge(m, src) -} -func (m *ContainerStateTerminated) XXX_Size() int { - return m.Size() -} -func (m *ContainerStateTerminated) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerStateTerminated.DiscardUnknown(m) -} +func (m *KeyToPath) Reset() { *m = KeyToPath{} } +func (*KeyToPath) ProtoMessage() {} +func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } -var xxx_messageInfo_ContainerStateTerminated proto.InternalMessageInfo +func (m *Lifecycle) Reset() { *m = Lifecycle{} } +func (*Lifecycle) ProtoMessage() {} +func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{32} -} -func (m *ContainerStateWaiting) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerStateWaiting) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerStateWaiting) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerStateWaiting.Merge(m, src) -} -func (m *ContainerStateWaiting) XXX_Size() int { - return m.Size() -} -func (m *ContainerStateWaiting) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerStateWaiting.DiscardUnknown(m) -} +func (m *LimitRange) Reset() { *m = LimitRange{} } +func (*LimitRange) ProtoMessage() {} +func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } -var xxx_messageInfo_ContainerStateWaiting proto.InternalMessageInfo +func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } +func (*LimitRangeItem) ProtoMessage() {} +func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{33} -} -func (m *ContainerStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ContainerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ContainerStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ContainerStatus.Merge(m, src) -} -func (m *ContainerStatus) XXX_Size() int { - return m.Size() -} -func (m *ContainerStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ContainerStatus.DiscardUnknown(m) -} +func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } +func (*LimitRangeList) ProtoMessage() {} +func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } -var xxx_messageInfo_ContainerStatus proto.InternalMessageInfo +func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } +func (*LimitRangeSpec) ProtoMessage() {} +func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{34} -} -func (m *DaemonEndpoint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonEndpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonEndpoint) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonEndpoint.Merge(m, src) -} -func (m *DaemonEndpoint) XXX_Size() int { - return m.Size() -} -func (m *DaemonEndpoint) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonEndpoint.DiscardUnknown(m) -} +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } -var xxx_messageInfo_DaemonEndpoint proto.InternalMessageInfo +func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } +func (*LoadBalancerIngress) ProtoMessage() {} +func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } -func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } -func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{35} -} -func (m *DownwardAPIProjection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DownwardAPIProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DownwardAPIProjection) XXX_Merge(src proto.Message) { - xxx_messageInfo_DownwardAPIProjection.Merge(m, src) -} -func (m *DownwardAPIProjection) XXX_Size() int { - return m.Size() -} -func (m *DownwardAPIProjection) XXX_DiscardUnknown() { - xxx_messageInfo_DownwardAPIProjection.DiscardUnknown(m) -} +func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } +func (*LoadBalancerStatus) ProtoMessage() {} +func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } -var xxx_messageInfo_DownwardAPIProjection proto.InternalMessageInfo +func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } +func (*LocalObjectReference) ProtoMessage() {} +func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{36} -} -func (m *DownwardAPIVolumeFile) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DownwardAPIVolumeFile) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DownwardAPIVolumeFile) XXX_Merge(src proto.Message) { - xxx_messageInfo_DownwardAPIVolumeFile.Merge(m, src) -} -func (m *DownwardAPIVolumeFile) XXX_Size() int { - return m.Size() -} -func (m *DownwardAPIVolumeFile) XXX_DiscardUnknown() { - xxx_messageInfo_DownwardAPIVolumeFile.DiscardUnknown(m) -} +func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } +func (*LocalVolumeSource) ProtoMessage() {} +func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } -var xxx_messageInfo_DownwardAPIVolumeFile proto.InternalMessageInfo +func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } +func (*NFSVolumeSource) ProtoMessage() {} +func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (*DownwardAPIVolumeSource) ProtoMessage() {} -func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{37} -} -func (m *DownwardAPIVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DownwardAPIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DownwardAPIVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_DownwardAPIVolumeSource.Merge(m, src) -} -func (m *DownwardAPIVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *DownwardAPIVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_DownwardAPIVolumeSource.DiscardUnknown(m) -} +func (m *Namespace) Reset() { *m = Namespace{} } +func (*Namespace) ProtoMessage() {} +func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } -var xxx_messageInfo_DownwardAPIVolumeSource proto.InternalMessageInfo +func (m *NamespaceList) Reset() { *m = NamespaceList{} } +func (*NamespaceList) ProtoMessage() {} +func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{38} -} -func (m *EmptyDirVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EmptyDirVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EmptyDirVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_EmptyDirVolumeSource.Merge(m, src) -} -func (m *EmptyDirVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *EmptyDirVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_EmptyDirVolumeSource.DiscardUnknown(m) -} +func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } +func (*NamespaceSpec) ProtoMessage() {} +func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } -var xxx_messageInfo_EmptyDirVolumeSource proto.InternalMessageInfo +func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } +func (*NamespaceStatus) ProtoMessage() {} +func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{39} -} -func (m *EndpointAddress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EndpointAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EndpointAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_EndpointAddress.Merge(m, src) -} -func (m *EndpointAddress) XXX_Size() int { - return m.Size() -} -func (m *EndpointAddress) XXX_DiscardUnknown() { - xxx_messageInfo_EndpointAddress.DiscardUnknown(m) -} +func (m *Node) Reset() { *m = Node{} } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } -var xxx_messageInfo_EndpointAddress proto.InternalMessageInfo +func (m *NodeAddress) Reset() { *m = NodeAddress{} } +func (*NodeAddress) ProtoMessage() {} +func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{40} -} -func (m *EndpointPort) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EndpointPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EndpointPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_EndpointPort.Merge(m, src) -} -func (m *EndpointPort) XXX_Size() int { - return m.Size() -} -func (m *EndpointPort) XXX_DiscardUnknown() { - xxx_messageInfo_EndpointPort.DiscardUnknown(m) -} +func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } +func (*NodeAffinity) ProtoMessage() {} +func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } -var xxx_messageInfo_EndpointPort proto.InternalMessageInfo +func (m *NodeCondition) Reset() { *m = NodeCondition{} } +func (*NodeCondition) ProtoMessage() {} +func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{41} -} -func (m *EndpointSubset) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EndpointSubset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EndpointSubset) XXX_Merge(src proto.Message) { - xxx_messageInfo_EndpointSubset.Merge(m, src) -} -func (m *EndpointSubset) XXX_Size() int { - return m.Size() -} -func (m *EndpointSubset) XXX_DiscardUnknown() { - xxx_messageInfo_EndpointSubset.DiscardUnknown(m) -} +func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } +func (*NodeConfigSource) ProtoMessage() {} +func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } -var xxx_messageInfo_EndpointSubset proto.InternalMessageInfo +func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } +func (*NodeConfigStatus) ProtoMessage() {} +func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{42} -} -func (m *Endpoints) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Endpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Endpoints) XXX_Merge(src proto.Message) { - xxx_messageInfo_Endpoints.Merge(m, src) -} -func (m *Endpoints) XXX_Size() int { - return m.Size() -} -func (m *Endpoints) XXX_DiscardUnknown() { - xxx_messageInfo_Endpoints.DiscardUnknown(m) -} +func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } +func (*NodeDaemonEndpoints) ProtoMessage() {} +func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } -var xxx_messageInfo_Endpoints proto.InternalMessageInfo +func (m *NodeList) Reset() { *m = NodeList{} } +func (*NodeList) ProtoMessage() {} +func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{43} -} -func (m *EndpointsList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EndpointsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EndpointsList) XXX_Merge(src proto.Message) { - xxx_messageInfo_EndpointsList.Merge(m, src) -} -func (m *EndpointsList) XXX_Size() int { - return m.Size() -} -func (m *EndpointsList) XXX_DiscardUnknown() { - xxx_messageInfo_EndpointsList.DiscardUnknown(m) -} +func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } +func (*NodeProxyOptions) ProtoMessage() {} +func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } -var xxx_messageInfo_EndpointsList proto.InternalMessageInfo +func (m *NodeResources) Reset() { *m = NodeResources{} } +func (*NodeResources) ProtoMessage() {} +func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } -func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } -func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{44} -} -func (m *EnvFromSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EnvFromSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EnvFromSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnvFromSource.Merge(m, src) -} -func (m *EnvFromSource) XXX_Size() int { - return m.Size() -} -func (m *EnvFromSource) XXX_DiscardUnknown() { - xxx_messageInfo_EnvFromSource.DiscardUnknown(m) +func (m *NodeSelector) Reset() { *m = NodeSelector{} } +func (*NodeSelector) ProtoMessage() {} +func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } + +func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } +func (*NodeSelectorRequirement) ProtoMessage() {} +func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{92} } -var xxx_messageInfo_EnvFromSource proto.InternalMessageInfo +func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } +func (*NodeSelectorTerm) ProtoMessage() {} +func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{45} -} -func (m *EnvVar) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EnvVar) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EnvVar) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnvVar.Merge(m, src) -} -func (m *EnvVar) XXX_Size() int { - return m.Size() -} -func (m *EnvVar) XXX_DiscardUnknown() { - xxx_messageInfo_EnvVar.DiscardUnknown(m) -} +func (m *NodeSpec) Reset() { *m = NodeSpec{} } +func (*NodeSpec) ProtoMessage() {} +func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } -var xxx_messageInfo_EnvVar proto.InternalMessageInfo +func (m *NodeStatus) Reset() { *m = NodeStatus{} } +func (*NodeStatus) ProtoMessage() {} +func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{46} -} -func (m *EnvVarSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EnvVarSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EnvVarSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_EnvVarSource.Merge(m, src) -} -func (m *EnvVarSource) XXX_Size() int { - return m.Size() -} -func (m *EnvVarSource) XXX_DiscardUnknown() { - xxx_messageInfo_EnvVarSource.DiscardUnknown(m) -} +func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } +func (*NodeSystemInfo) ProtoMessage() {} +func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } -var xxx_messageInfo_EnvVarSource proto.InternalMessageInfo +func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } +func (*ObjectFieldSelector) ProtoMessage() {} +func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } -func (m *EphemeralContainer) Reset() { *m = EphemeralContainer{} } -func (*EphemeralContainer) ProtoMessage() {} -func (*EphemeralContainer) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{47} -} -func (m *EphemeralContainer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EphemeralContainer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EphemeralContainer) XXX_Merge(src proto.Message) { - xxx_messageInfo_EphemeralContainer.Merge(m, src) -} -func (m *EphemeralContainer) XXX_Size() int { - return m.Size() -} -func (m *EphemeralContainer) XXX_DiscardUnknown() { - xxx_messageInfo_EphemeralContainer.DiscardUnknown(m) -} +func (m *ObjectReference) Reset() { *m = ObjectReference{} } +func (*ObjectReference) ProtoMessage() {} +func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } -var xxx_messageInfo_EphemeralContainer proto.InternalMessageInfo +func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } +func (*PersistentVolume) ProtoMessage() {} +func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } -func (m *EphemeralContainerCommon) Reset() { *m = EphemeralContainerCommon{} } -func (*EphemeralContainerCommon) ProtoMessage() {} -func (*EphemeralContainerCommon) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{48} -} -func (m *EphemeralContainerCommon) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } +func (*PersistentVolumeClaim) ProtoMessage() {} +func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } + +func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } +func (*PersistentVolumeClaimCondition) ProtoMessage() {} +func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{101} } -func (m *EphemeralContainerCommon) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil + +func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } +func (*PersistentVolumeClaimList) ProtoMessage() {} +func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{102} } -func (m *EphemeralContainerCommon) XXX_Merge(src proto.Message) { - xxx_messageInfo_EphemeralContainerCommon.Merge(m, src) + +func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } +func (*PersistentVolumeClaimSpec) ProtoMessage() {} +func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{103} } -func (m *EphemeralContainerCommon) XXX_Size() int { - return m.Size() + +func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } +func (*PersistentVolumeClaimStatus) ProtoMessage() {} +func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{104} } -func (m *EphemeralContainerCommon) XXX_DiscardUnknown() { - xxx_messageInfo_EphemeralContainerCommon.DiscardUnknown(m) + +func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } +func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} +func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{105} } -var xxx_messageInfo_EphemeralContainerCommon proto.InternalMessageInfo +func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } +func (*PersistentVolumeList) ProtoMessage() {} +func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } -func (m *EphemeralContainers) Reset() { *m = EphemeralContainers{} } -func (*EphemeralContainers) ProtoMessage() {} -func (*EphemeralContainers) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{49} -} -func (m *EphemeralContainers) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EphemeralContainers) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EphemeralContainers) XXX_Merge(src proto.Message) { - xxx_messageInfo_EphemeralContainers.Merge(m, src) -} -func (m *EphemeralContainers) XXX_Size() int { - return m.Size() -} -func (m *EphemeralContainers) XXX_DiscardUnknown() { - xxx_messageInfo_EphemeralContainers.DiscardUnknown(m) -} - -var xxx_messageInfo_EphemeralContainers proto.InternalMessageInfo - -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{50} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) +func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } +func (*PersistentVolumeSource) ProtoMessage() {} +func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{107} } -var xxx_messageInfo_Event proto.InternalMessageInfo +func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } +func (*PersistentVolumeSpec) ProtoMessage() {} +func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{51} -} -func (m *EventList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EventList) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventList.Merge(m, src) -} -func (m *EventList) XXX_Size() int { - return m.Size() -} -func (m *EventList) XXX_DiscardUnknown() { - xxx_messageInfo_EventList.DiscardUnknown(m) -} - -var xxx_messageInfo_EventList proto.InternalMessageInfo - -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{52} -} -func (m *EventSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EventSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventSeries.Merge(m, src) -} -func (m *EventSeries) XXX_Size() int { - return m.Size() -} -func (m *EventSeries) XXX_DiscardUnknown() { - xxx_messageInfo_EventSeries.DiscardUnknown(m) +func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } +func (*PersistentVolumeStatus) ProtoMessage() {} +func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{109} } -var xxx_messageInfo_EventSeries proto.InternalMessageInfo - -func (m *EventSource) Reset() { *m = EventSource{} } -func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{53} -} -func (m *EventSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EventSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventSource.Merge(m, src) -} -func (m *EventSource) XXX_Size() int { - return m.Size() -} -func (m *EventSource) XXX_DiscardUnknown() { - xxx_messageInfo_EventSource.DiscardUnknown(m) +func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } +func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} +func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{110} } -var xxx_messageInfo_EventSource proto.InternalMessageInfo +func (m *Pod) Reset() { *m = Pod{} } +func (*Pod) ProtoMessage() {} +func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{54} -} -func (m *ExecAction) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExecAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExecAction) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExecAction.Merge(m, src) -} -func (m *ExecAction) XXX_Size() int { - return m.Size() -} -func (m *ExecAction) XXX_DiscardUnknown() { - xxx_messageInfo_ExecAction.DiscardUnknown(m) -} +func (m *PodAffinity) Reset() { *m = PodAffinity{} } +func (*PodAffinity) ProtoMessage() {} +func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } -var xxx_messageInfo_ExecAction proto.InternalMessageInfo +func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } +func (*PodAffinityTerm) ProtoMessage() {} +func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{55} -} -func (m *FCVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FCVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FCVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_FCVolumeSource.Merge(m, src) -} -func (m *FCVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *FCVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_FCVolumeSource.DiscardUnknown(m) -} +func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } +func (*PodAntiAffinity) ProtoMessage() {} +func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } -var xxx_messageInfo_FCVolumeSource proto.InternalMessageInfo +func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } +func (*PodAttachOptions) ProtoMessage() {} +func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } -func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } -func (*FlexPersistentVolumeSource) ProtoMessage() {} -func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{56} -} -func (m *FlexPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FlexPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FlexPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlexPersistentVolumeSource.Merge(m, src) -} -func (m *FlexPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *FlexPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_FlexPersistentVolumeSource.DiscardUnknown(m) -} +func (m *PodCondition) Reset() { *m = PodCondition{} } +func (*PodCondition) ProtoMessage() {} +func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } -var xxx_messageInfo_FlexPersistentVolumeSource proto.InternalMessageInfo +func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } +func (*PodDNSConfig) ProtoMessage() {} +func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{57} -} -func (m *FlexVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FlexVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FlexVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlexVolumeSource.Merge(m, src) -} -func (m *FlexVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *FlexVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_FlexVolumeSource.DiscardUnknown(m) -} +func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } +func (*PodDNSConfigOption) ProtoMessage() {} +func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } -var xxx_messageInfo_FlexVolumeSource proto.InternalMessageInfo +func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } +func (*PodExecOptions) ProtoMessage() {} +func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{58} -} -func (m *FlockerVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FlockerVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FlockerVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_FlockerVolumeSource.Merge(m, src) -} -func (m *FlockerVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *FlockerVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_FlockerVolumeSource.DiscardUnknown(m) -} +func (m *PodList) Reset() { *m = PodList{} } +func (*PodList) ProtoMessage() {} +func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } -var xxx_messageInfo_FlockerVolumeSource proto.InternalMessageInfo +func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } +func (*PodLogOptions) ProtoMessage() {} +func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} -func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{59} -} -func (m *GCEPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GCEPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GCEPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_GCEPersistentDiskVolumeSource.Merge(m, src) -} -func (m *GCEPersistentDiskVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *GCEPersistentDiskVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_GCEPersistentDiskVolumeSource.DiscardUnknown(m) -} +func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } +func (*PodPortForwardOptions) ProtoMessage() {} +func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } -var xxx_messageInfo_GCEPersistentDiskVolumeSource proto.InternalMessageInfo +func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } +func (*PodProxyOptions) ProtoMessage() {} +func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{60} -} -func (m *GitRepoVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GitRepoVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GitRepoVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_GitRepoVolumeSource.Merge(m, src) -} -func (m *GitRepoVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *GitRepoVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_GitRepoVolumeSource.DiscardUnknown(m) -} +func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } +func (*PodReadinessGate) ProtoMessage() {} +func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } -var xxx_messageInfo_GitRepoVolumeSource proto.InternalMessageInfo +func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } +func (*PodSecurityContext) ProtoMessage() {} +func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } -func (m *GlusterfsPersistentVolumeSource) Reset() { *m = GlusterfsPersistentVolumeSource{} } -func (*GlusterfsPersistentVolumeSource) ProtoMessage() {} -func (*GlusterfsPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{61} -} -func (m *GlusterfsPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GlusterfsPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GlusterfsPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_GlusterfsPersistentVolumeSource.Merge(m, src) -} -func (m *GlusterfsPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *GlusterfsPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_GlusterfsPersistentVolumeSource.DiscardUnknown(m) -} +func (m *PodSignature) Reset() { *m = PodSignature{} } +func (*PodSignature) ProtoMessage() {} +func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } -var xxx_messageInfo_GlusterfsPersistentVolumeSource proto.InternalMessageInfo +func (m *PodSpec) Reset() { *m = PodSpec{} } +func (*PodSpec) ProtoMessage() {} +func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{62} -} -func (m *GlusterfsVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GlusterfsVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GlusterfsVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_GlusterfsVolumeSource.Merge(m, src) -} -func (m *GlusterfsVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *GlusterfsVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_GlusterfsVolumeSource.DiscardUnknown(m) -} +func (m *PodStatus) Reset() { *m = PodStatus{} } +func (*PodStatus) ProtoMessage() {} +func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } -var xxx_messageInfo_GlusterfsVolumeSource proto.InternalMessageInfo +func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } +func (*PodStatusResult) ProtoMessage() {} +func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{63} -} -func (m *HTTPGetAction) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPGetAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPGetAction) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPGetAction.Merge(m, src) -} -func (m *HTTPGetAction) XXX_Size() int { - return m.Size() -} -func (m *HTTPGetAction) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPGetAction.DiscardUnknown(m) -} +func (m *PodTemplate) Reset() { *m = PodTemplate{} } +func (*PodTemplate) ProtoMessage() {} +func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } -var xxx_messageInfo_HTTPGetAction proto.InternalMessageInfo +func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } +func (*PodTemplateList) ProtoMessage() {} +func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{64} -} -func (m *HTTPHeader) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPHeader) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPHeader.Merge(m, src) -} -func (m *HTTPHeader) XXX_Size() int { - return m.Size() -} -func (m *HTTPHeader) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPHeader.DiscardUnknown(m) -} +func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } +func (*PodTemplateSpec) ProtoMessage() {} +func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } -var xxx_messageInfo_HTTPHeader proto.InternalMessageInfo +func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } +func (*PortworxVolumeSource) ProtoMessage() {} +func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{65} -} -func (m *Handler) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Handler) XXX_Merge(src proto.Message) { - xxx_messageInfo_Handler.Merge(m, src) -} -func (m *Handler) XXX_Size() int { - return m.Size() -} -func (m *Handler) XXX_DiscardUnknown() { - xxx_messageInfo_Handler.DiscardUnknown(m) -} +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } -var xxx_messageInfo_Handler proto.InternalMessageInfo +func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } +func (*PreferAvoidPodsEntry) ProtoMessage() {} +func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } -func (m *HostAlias) Reset() { *m = HostAlias{} } -func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{66} -} -func (m *HostAlias) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostAlias) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostAlias) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostAlias.Merge(m, src) -} -func (m *HostAlias) XXX_Size() int { - return m.Size() -} -func (m *HostAlias) XXX_DiscardUnknown() { - xxx_messageInfo_HostAlias.DiscardUnknown(m) +func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } +func (*PreferredSchedulingTerm) ProtoMessage() {} +func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{136} } -var xxx_messageInfo_HostAlias proto.InternalMessageInfo +func (m *Probe) Reset() { *m = Probe{} } +func (*Probe) ProtoMessage() {} +func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{67} -} -func (m *HostPathVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPathVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPathVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPathVolumeSource.Merge(m, src) -} -func (m *HostPathVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *HostPathVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_HostPathVolumeSource.DiscardUnknown(m) -} +func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } +func (*ProjectedVolumeSource) ProtoMessage() {} +func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } -var xxx_messageInfo_HostPathVolumeSource proto.InternalMessageInfo +func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } +func (*QuobyteVolumeSource) ProtoMessage() {} +func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } -func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } -func (*ISCSIPersistentVolumeSource) ProtoMessage() {} -func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{68} -} -func (m *ISCSIPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ISCSIPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ISCSIPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ISCSIPersistentVolumeSource.Merge(m, src) -} -func (m *ISCSIPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *ISCSIPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_ISCSIPersistentVolumeSource.DiscardUnknown(m) +func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } +func (*RBDPersistentVolumeSource) ProtoMessage() {} +func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{140} } -var xxx_messageInfo_ISCSIPersistentVolumeSource proto.InternalMessageInfo +func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } +func (*RBDVolumeSource) ProtoMessage() {} +func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{69} -} -func (m *ISCSIVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ISCSIVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ISCSIVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ISCSIVolumeSource.Merge(m, src) -} -func (m *ISCSIVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *ISCSIVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_ISCSIVolumeSource.DiscardUnknown(m) -} +func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } +func (*RangeAllocation) ProtoMessage() {} +func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } -var xxx_messageInfo_ISCSIVolumeSource proto.InternalMessageInfo +func (m *ReplicationController) Reset() { *m = ReplicationController{} } +func (*ReplicationController) ProtoMessage() {} +func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{70} -} -func (m *KeyToPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *KeyToPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *KeyToPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_KeyToPath.Merge(m, src) -} -func (m *KeyToPath) XXX_Size() int { - return m.Size() -} -func (m *KeyToPath) XXX_DiscardUnknown() { - xxx_messageInfo_KeyToPath.DiscardUnknown(m) +func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } +func (*ReplicationControllerCondition) ProtoMessage() {} +func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{144} } -var xxx_messageInfo_KeyToPath proto.InternalMessageInfo - -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{71} -} -func (m *Lifecycle) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Lifecycle) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Lifecycle) XXX_Merge(src proto.Message) { - xxx_messageInfo_Lifecycle.Merge(m, src) -} -func (m *Lifecycle) XXX_Size() int { - return m.Size() -} -func (m *Lifecycle) XXX_DiscardUnknown() { - xxx_messageInfo_Lifecycle.DiscardUnknown(m) +func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } +func (*ReplicationControllerList) ProtoMessage() {} +func (*ReplicationControllerList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{145} } -var xxx_messageInfo_Lifecycle proto.InternalMessageInfo - -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{72} -} -func (m *LimitRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LimitRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LimitRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_LimitRange.Merge(m, src) -} -func (m *LimitRange) XXX_Size() int { - return m.Size() -} -func (m *LimitRange) XXX_DiscardUnknown() { - xxx_messageInfo_LimitRange.DiscardUnknown(m) +func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } +func (*ReplicationControllerSpec) ProtoMessage() {} +func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{146} } -var xxx_messageInfo_LimitRange proto.InternalMessageInfo - -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{73} -} -func (m *LimitRangeItem) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LimitRangeItem) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LimitRangeItem) XXX_Merge(src proto.Message) { - xxx_messageInfo_LimitRangeItem.Merge(m, src) -} -func (m *LimitRangeItem) XXX_Size() int { - return m.Size() -} -func (m *LimitRangeItem) XXX_DiscardUnknown() { - xxx_messageInfo_LimitRangeItem.DiscardUnknown(m) +func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } +func (*ReplicationControllerStatus) ProtoMessage() {} +func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{147} } -var xxx_messageInfo_LimitRangeItem proto.InternalMessageInfo +func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } +func (*ResourceFieldSelector) ProtoMessage() {} +func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{74} -} -func (m *LimitRangeList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LimitRangeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LimitRangeList) XXX_Merge(src proto.Message) { - xxx_messageInfo_LimitRangeList.Merge(m, src) -} -func (m *LimitRangeList) XXX_Size() int { - return m.Size() -} -func (m *LimitRangeList) XXX_DiscardUnknown() { - xxx_messageInfo_LimitRangeList.DiscardUnknown(m) -} +func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } +func (*ResourceQuota) ProtoMessage() {} +func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } -var xxx_messageInfo_LimitRangeList proto.InternalMessageInfo +func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } +func (*ResourceQuotaList) ProtoMessage() {} +func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{75} -} -func (m *LimitRangeSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LimitRangeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LimitRangeSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_LimitRangeSpec.Merge(m, src) -} -func (m *LimitRangeSpec) XXX_Size() int { - return m.Size() -} -func (m *LimitRangeSpec) XXX_DiscardUnknown() { - xxx_messageInfo_LimitRangeSpec.DiscardUnknown(m) -} +func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } +func (*ResourceQuotaSpec) ProtoMessage() {} +func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } -var xxx_messageInfo_LimitRangeSpec proto.InternalMessageInfo +func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } +func (*ResourceQuotaStatus) ProtoMessage() {} +func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{76} -} -func (m *List) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *List) XXX_Merge(src proto.Message) { - xxx_messageInfo_List.Merge(m, src) -} -func (m *List) XXX_Size() int { - return m.Size() -} -func (m *List) XXX_DiscardUnknown() { - xxx_messageInfo_List.DiscardUnknown(m) -} +func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } +func (*ResourceRequirements) ProtoMessage() {} +func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } -var xxx_messageInfo_List proto.InternalMessageInfo +func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } +func (*SELinuxOptions) ProtoMessage() {} +func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{77} -} -func (m *LoadBalancerIngress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LoadBalancerIngress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LoadBalancerIngress) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoadBalancerIngress.Merge(m, src) -} -func (m *LoadBalancerIngress) XXX_Size() int { - return m.Size() -} -func (m *LoadBalancerIngress) XXX_DiscardUnknown() { - xxx_messageInfo_LoadBalancerIngress.DiscardUnknown(m) +func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } +func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} +func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{155} } -var xxx_messageInfo_LoadBalancerIngress proto.InternalMessageInfo - -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{78} -} -func (m *LoadBalancerStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LoadBalancerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LoadBalancerStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_LoadBalancerStatus.Merge(m, src) -} -func (m *LoadBalancerStatus) XXX_Size() int { - return m.Size() -} -func (m *LoadBalancerStatus) XXX_DiscardUnknown() { - xxx_messageInfo_LoadBalancerStatus.DiscardUnknown(m) -} +func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } +func (*ScaleIOVolumeSource) ProtoMessage() {} +func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } -var xxx_messageInfo_LoadBalancerStatus proto.InternalMessageInfo +func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } +func (*ScopeSelector) ProtoMessage() {} +func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{79} -} -func (m *LocalObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LocalObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_LocalObjectReference.Merge(m, src) -} -func (m *LocalObjectReference) XXX_Size() int { - return m.Size() -} -func (m *LocalObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_LocalObjectReference.DiscardUnknown(m) +func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } +func (*ScopedResourceSelectorRequirement) ProtoMessage() {} +func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{158} } -var xxx_messageInfo_LocalObjectReference proto.InternalMessageInfo - -func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } -func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{80} -} -func (m *LocalVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LocalVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LocalVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_LocalVolumeSource.Merge(m, src) -} -func (m *LocalVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *LocalVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_LocalVolumeSource.DiscardUnknown(m) -} +func (m *Secret) Reset() { *m = Secret{} } +func (*Secret) ProtoMessage() {} +func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } -var xxx_messageInfo_LocalVolumeSource proto.InternalMessageInfo +func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } +func (*SecretEnvSource) ProtoMessage() {} +func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{81} -} -func (m *NFSVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NFSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NFSVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_NFSVolumeSource.Merge(m, src) -} -func (m *NFSVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *NFSVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_NFSVolumeSource.DiscardUnknown(m) -} +func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } +func (*SecretKeySelector) ProtoMessage() {} +func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } -var xxx_messageInfo_NFSVolumeSource proto.InternalMessageInfo +func (m *SecretList) Reset() { *m = SecretList{} } +func (*SecretList) ProtoMessage() {} +func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{82} -} -func (m *Namespace) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Namespace) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Namespace) XXX_Merge(src proto.Message) { - xxx_messageInfo_Namespace.Merge(m, src) -} -func (m *Namespace) XXX_Size() int { - return m.Size() -} -func (m *Namespace) XXX_DiscardUnknown() { - xxx_messageInfo_Namespace.DiscardUnknown(m) -} +func (m *SecretProjection) Reset() { *m = SecretProjection{} } +func (*SecretProjection) ProtoMessage() {} +func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } -var xxx_messageInfo_Namespace proto.InternalMessageInfo +func (m *SecretReference) Reset() { *m = SecretReference{} } +func (*SecretReference) ProtoMessage() {} +func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } -func (m *NamespaceCondition) Reset() { *m = NamespaceCondition{} } -func (*NamespaceCondition) ProtoMessage() {} -func (*NamespaceCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{83} -} -func (m *NamespaceCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamespaceCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceCondition.Merge(m, src) -} -func (m *NamespaceCondition) XXX_Size() int { - return m.Size() -} -func (m *NamespaceCondition) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceCondition.DiscardUnknown(m) -} +func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } +func (*SecretVolumeSource) ProtoMessage() {} +func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } -var xxx_messageInfo_NamespaceCondition proto.InternalMessageInfo +func (m *SecurityContext) Reset() { *m = SecurityContext{} } +func (*SecurityContext) ProtoMessage() {} +func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{84} -} -func (m *NamespaceList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamespaceList) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceList.Merge(m, src) -} -func (m *NamespaceList) XXX_Size() int { - return m.Size() -} -func (m *NamespaceList) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceList.DiscardUnknown(m) -} +func (m *SerializedReference) Reset() { *m = SerializedReference{} } +func (*SerializedReference) ProtoMessage() {} +func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } -var xxx_messageInfo_NamespaceList proto.InternalMessageInfo +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{85} -} -func (m *NamespaceSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamespaceSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceSpec.Merge(m, src) -} -func (m *NamespaceSpec) XXX_Size() int { - return m.Size() -} -func (m *NamespaceSpec) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceSpec.DiscardUnknown(m) -} +func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } +func (*ServiceAccount) ProtoMessage() {} +func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } -var xxx_messageInfo_NamespaceSpec proto.InternalMessageInfo +func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } +func (*ServiceAccountList) ProtoMessage() {} +func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{86} -} -func (m *NamespaceStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NamespaceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NamespaceStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NamespaceStatus.Merge(m, src) -} -func (m *NamespaceStatus) XXX_Size() int { - return m.Size() -} -func (m *NamespaceStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NamespaceStatus.DiscardUnknown(m) +func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } +func (*ServiceAccountTokenProjection) ProtoMessage() {} +func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{171} } -var xxx_messageInfo_NamespaceStatus proto.InternalMessageInfo - -func (m *Node) Reset() { *m = Node{} } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{87} -} -func (m *Node) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Node) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Node) XXX_Merge(src proto.Message) { - xxx_messageInfo_Node.Merge(m, src) -} -func (m *Node) XXX_Size() int { - return m.Size() -} -func (m *Node) XXX_DiscardUnknown() { - xxx_messageInfo_Node.DiscardUnknown(m) -} +func (m *ServiceList) Reset() { *m = ServiceList{} } +func (*ServiceList) ProtoMessage() {} +func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } -var xxx_messageInfo_Node proto.InternalMessageInfo +func (m *ServicePort) Reset() { *m = ServicePort{} } +func (*ServicePort) ProtoMessage() {} +func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{88} -} -func (m *NodeAddress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeAddress) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeAddress.Merge(m, src) -} -func (m *NodeAddress) XXX_Size() int { - return m.Size() -} -func (m *NodeAddress) XXX_DiscardUnknown() { - xxx_messageInfo_NodeAddress.DiscardUnknown(m) -} +func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } +func (*ServiceProxyOptions) ProtoMessage() {} +func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } -var xxx_messageInfo_NodeAddress proto.InternalMessageInfo +func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } +func (*ServiceSpec) ProtoMessage() {} +func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{89} -} -func (m *NodeAffinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeAffinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeAffinity.Merge(m, src) -} -func (m *NodeAffinity) XXX_Size() int { - return m.Size() -} -func (m *NodeAffinity) XXX_DiscardUnknown() { - xxx_messageInfo_NodeAffinity.DiscardUnknown(m) -} +func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } +func (*ServiceStatus) ProtoMessage() {} +func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } -var xxx_messageInfo_NodeAffinity proto.InternalMessageInfo +func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } +func (*SessionAffinityConfig) ProtoMessage() {} +func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{90} -} -func (m *NodeCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeCondition.Merge(m, src) -} -func (m *NodeCondition) XXX_Size() int { - return m.Size() -} -func (m *NodeCondition) XXX_DiscardUnknown() { - xxx_messageInfo_NodeCondition.DiscardUnknown(m) +func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } +func (*StorageOSPersistentVolumeSource) ProtoMessage() {} +func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{178} } -var xxx_messageInfo_NodeCondition proto.InternalMessageInfo +func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } +func (*StorageOSVolumeSource) ProtoMessage() {} +func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } -func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } -func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{91} -} -func (m *NodeConfigSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeConfigSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeConfigSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeConfigSource.Merge(m, src) -} -func (m *NodeConfigSource) XXX_Size() int { - return m.Size() -} -func (m *NodeConfigSource) XXX_DiscardUnknown() { - xxx_messageInfo_NodeConfigSource.DiscardUnknown(m) -} +func (m *Sysctl) Reset() { *m = Sysctl{} } +func (*Sysctl) ProtoMessage() {} +func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } -var xxx_messageInfo_NodeConfigSource proto.InternalMessageInfo +func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } +func (*TCPSocketAction) ProtoMessage() {} +func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } -func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } -func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{92} -} -func (m *NodeConfigStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeConfigStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeConfigStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeConfigStatus.Merge(m, src) -} -func (m *NodeConfigStatus) XXX_Size() int { - return m.Size() -} -func (m *NodeConfigStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NodeConfigStatus.DiscardUnknown(m) -} +func (m *Taint) Reset() { *m = Taint{} } +func (*Taint) ProtoMessage() {} +func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } -var xxx_messageInfo_NodeConfigStatus proto.InternalMessageInfo +func (m *Toleration) Reset() { *m = Toleration{} } +func (*Toleration) ProtoMessage() {} +func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{93} -} -func (m *NodeDaemonEndpoints) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeDaemonEndpoints) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeDaemonEndpoints) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeDaemonEndpoints.Merge(m, src) -} -func (m *NodeDaemonEndpoints) XXX_Size() int { - return m.Size() -} -func (m *NodeDaemonEndpoints) XXX_DiscardUnknown() { - xxx_messageInfo_NodeDaemonEndpoints.DiscardUnknown(m) +func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } +func (*TopologySelectorLabelRequirement) ProtoMessage() {} +func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{184} } -var xxx_messageInfo_NodeDaemonEndpoints proto.InternalMessageInfo +func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } +func (*TopologySelectorTerm) ProtoMessage() {} +func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } -func (m *NodeList) Reset() { *m = NodeList{} } -func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{94} -} -func (m *NodeList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeList) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeList.Merge(m, src) -} -func (m *NodeList) XXX_Size() int { - return m.Size() -} -func (m *NodeList) XXX_DiscardUnknown() { - xxx_messageInfo_NodeList.DiscardUnknown(m) +func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } +func (*TypedLocalObjectReference) ProtoMessage() {} +func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{186} } -var xxx_messageInfo_NodeList proto.InternalMessageInfo - -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{95} -} -func (m *NodeProxyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeProxyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeProxyOptions.Merge(m, src) -} -func (m *NodeProxyOptions) XXX_Size() int { - return m.Size() -} -func (m *NodeProxyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_NodeProxyOptions.DiscardUnknown(m) -} +func (m *Volume) Reset() { *m = Volume{} } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } -var xxx_messageInfo_NodeProxyOptions proto.InternalMessageInfo +func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } +func (*VolumeDevice) ProtoMessage() {} +func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{96} -} -func (m *NodeResources) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeResources.Merge(m, src) -} -func (m *NodeResources) XXX_Size() int { - return m.Size() -} -func (m *NodeResources) XXX_DiscardUnknown() { - xxx_messageInfo_NodeResources.DiscardUnknown(m) -} +func (m *VolumeMount) Reset() { *m = VolumeMount{} } +func (*VolumeMount) ProtoMessage() {} +func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } -var xxx_messageInfo_NodeResources proto.InternalMessageInfo +func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } +func (*VolumeNodeAffinity) ProtoMessage() {} +func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{97} -} -func (m *NodeSelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeSelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeSelector.Merge(m, src) -} -func (m *NodeSelector) XXX_Size() int { - return m.Size() -} -func (m *NodeSelector) XXX_DiscardUnknown() { - xxx_messageInfo_NodeSelector.DiscardUnknown(m) -} +func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } +func (*VolumeProjection) ProtoMessage() {} +func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } -var xxx_messageInfo_NodeSelector proto.InternalMessageInfo +func (m *VolumeSource) Reset() { *m = VolumeSource{} } +func (*VolumeSource) ProtoMessage() {} +func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (*NodeSelectorRequirement) ProtoMessage() {} -func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{98} -} -func (m *NodeSelectorRequirement) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeSelectorRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeSelectorRequirement.Merge(m, src) -} -func (m *NodeSelectorRequirement) XXX_Size() int { - return m.Size() -} -func (m *NodeSelectorRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_NodeSelectorRequirement.DiscardUnknown(m) +func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } +func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} +func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{193} } -var xxx_messageInfo_NodeSelectorRequirement proto.InternalMessageInfo - -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{99} -} -func (m *NodeSelectorTerm) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeSelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NodeSelectorTerm) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeSelectorTerm.Merge(m, src) -} -func (m *NodeSelectorTerm) XXX_Size() int { - return m.Size() -} -func (m *NodeSelectorTerm) XXX_DiscardUnknown() { - xxx_messageInfo_NodeSelectorTerm.DiscardUnknown(m) +func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } +func (*WeightedPodAffinityTerm) ProtoMessage() {} +func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{194} } -var xxx_messageInfo_NodeSelectorTerm proto.InternalMessageInfo - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{100} -} -func (m *NodeSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func init() { + proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") + proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") + proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") + proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") + proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") + proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") + proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") + proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") + proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") + proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") + proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") + proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") + proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") + proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") + proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") + proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") + proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") + proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") + proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") + proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") + proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") + proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") + proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") + proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") + proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") + proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") + proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") + proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") + proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") + proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") + proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") + proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") + proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") + proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") + proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") + proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") + proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") + proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") + proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") + proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") + proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") + proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") + proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") + proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") + proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") + proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") + proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") + proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") + proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") + proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") + proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") + proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") + proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") + proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") + proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") + proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") + proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") + proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") + proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") + proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") + proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") + proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") + proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") + proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") + proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") + proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") + proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") + proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") + proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") + proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") + proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") + proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") + proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") + proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") + proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") + proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") + proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") + proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") + proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") + proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") + proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") + proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") + proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") + proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") + proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") + proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") + proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") + proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") + proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") + proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") + proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") + proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") + proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") + proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") + proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") + proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") + proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") + proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") + proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") + proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") + proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") + proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") + proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") + proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") + proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") + proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") + proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") + proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") + proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") + proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") + proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") + proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") + proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") + proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") + proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") + proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") + proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") + proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") + proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") + proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") + proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") + proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") + proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") + proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") + proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") + proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") + proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") + proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") + proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") + proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") + proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") + proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") + proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") + proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") + proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") + proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") + proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") + proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") + proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") + proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") + proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") + proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") + proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") + proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") + proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") + proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") + proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") + proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") + proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") + proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") + proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") + proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") + proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") + proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") + proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") + proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") + proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") + proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") + proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") + proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") + proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") + proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") + proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") + proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") + proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") + proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") + proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") + proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") + proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") + proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") + proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") + proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") + proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") + proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") + proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") + proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") + proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") + proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") + proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") + proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") + proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") + proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") + proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") + proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") + proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") + proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") + proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") + proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") + proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") + proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") + proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") + proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") + proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") + proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") + proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") } -func (m *NodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *NodeSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeSpec.Merge(m, src) -} -func (m *NodeSpec) XXX_Size() int { - return m.Size() -} -func (m *NodeSpec) XXX_DiscardUnknown() { - xxx_messageInfo_NodeSpec.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_NodeSpec proto.InternalMessageInfo - -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{101} -} -func (m *NodeStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *NodeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeStatus.Merge(m, src) -} -func (m *NodeStatus) XXX_Size() int { - return m.Size() -} -func (m *NodeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_NodeStatus.DiscardUnknown(m) + i++ + return i, nil } -var xxx_messageInfo_NodeStatus proto.InternalMessageInfo - -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{102} -} -func (m *NodeSystemInfo) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NodeSystemInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Affinity) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *NodeSystemInfo) XXX_Merge(src proto.Message) { - xxx_messageInfo_NodeSystemInfo.Merge(m, src) -} -func (m *NodeSystemInfo) XXX_Size() int { - return m.Size() -} -func (m *NodeSystemInfo) XXX_DiscardUnknown() { - xxx_messageInfo_NodeSystemInfo.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_NodeSystemInfo proto.InternalMessageInfo - -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{103} -} -func (m *ObjectFieldSelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeAffinity != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) + n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 } - return b[:n], nil -} -func (m *ObjectFieldSelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectFieldSelector.Merge(m, src) -} -func (m *ObjectFieldSelector) XXX_Size() int { - return m.Size() -} -func (m *ObjectFieldSelector) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectFieldSelector.DiscardUnknown(m) -} - -var xxx_messageInfo_ObjectFieldSelector proto.InternalMessageInfo - -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{104} -} -func (m *ObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + if m.PodAffinity != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) + n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 } - return b[:n], nil -} -func (m *ObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectReference.Merge(m, src) -} -func (m *ObjectReference) XXX_Size() int { - return m.Size() -} -func (m *ObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectReference.DiscardUnknown(m) -} - -var xxx_messageInfo_ObjectReference proto.InternalMessageInfo - -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{105} -} -func (m *PersistentVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + if m.PodAntiAffinity != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) + n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } - return b[:n], nil -} -func (m *PersistentVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolume.Merge(m, src) -} -func (m *PersistentVolume) XXX_Size() int { - return m.Size() + return i, nil } -func (m *PersistentVolume) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_PersistentVolume proto.InternalMessageInfo -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{106} -} -func (m *PersistentVolumeClaim) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeClaim) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PersistentVolumeClaim) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeClaim.Merge(m, src) -} -func (m *PersistentVolumeClaim) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeClaim) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeClaim.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PersistentVolumeClaim proto.InternalMessageInfo - -func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } -func (*PersistentVolumeClaimCondition) ProtoMessage() {} -func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{107} -} -func (m *PersistentVolumeClaimCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) + return i, nil } -func (m *PersistentVolumeClaimCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *AvoidPods) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PersistentVolumeClaimCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeClaimCondition.Merge(m, src) -} -func (m *PersistentVolumeClaimCondition) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeClaimCondition) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeClaimCondition.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PersistentVolumeClaimCondition proto.InternalMessageInfo - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (*PersistentVolumeClaimList) ProtoMessage() {} -func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{108} -} -func (m *PersistentVolumeClaimList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeClaimList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.PreferAvoidPods) > 0 { + for _, msg := range m.PreferAvoidPods { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return b[:n], nil -} -func (m *PersistentVolumeClaimList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeClaimList.Merge(m, src) -} -func (m *PersistentVolumeClaimList) XXX_Size() int { - return m.Size() + return i, nil } -func (m *PersistentVolumeClaimList) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeClaimList.DiscardUnknown(m) -} - -var xxx_messageInfo_PersistentVolumeClaimList proto.InternalMessageInfo -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} -func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{109} -} -func (m *PersistentVolumeClaimSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeClaimSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PersistentVolumeClaimSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeClaimSpec.Merge(m, src) -} -func (m *PersistentVolumeClaimSpec) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeClaimSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeClaimSpec.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PersistentVolumeClaimSpec proto.InternalMessageInfo - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} -func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{110} -} -func (m *PersistentVolumeClaimStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeClaimStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) + i += copy(dAtA[i:], m.DiskName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) + i += copy(dAtA[i:], m.DataDiskURI) + if m.CachingMode != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) + i += copy(dAtA[i:], *m.CachingMode) } - return b[:n], nil -} -func (m *PersistentVolumeClaimStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeClaimStatus.Merge(m, src) -} -func (m *PersistentVolumeClaimStatus) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeClaimStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeClaimStatus.DiscardUnknown(m) + if m.FSType != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i += copy(dAtA[i:], *m.FSType) + } + if m.ReadOnly != nil { + dAtA[i] = 0x28 + i++ + if *m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.Kind != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) + i += copy(dAtA[i:], *m.Kind) + } + return i, nil } -var xxx_messageInfo_PersistentVolumeClaimStatus proto.InternalMessageInfo - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} -func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{111} -} -func (m *PersistentVolumeClaimVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeClaimVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PersistentVolumeClaimVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeClaimVolumeSource.Merge(m, src) -} -func (m *PersistentVolumeClaimVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeClaimVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeClaimVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PersistentVolumeClaimVolumeSource proto.InternalMessageInfo - -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{112} -} -func (m *PersistentVolumeList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i += copy(dAtA[i:], m.ShareName) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *PersistentVolumeList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeList.Merge(m, src) -} -func (m *PersistentVolumeList) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeList) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeList.DiscardUnknown(m) + i++ + if m.SecretNamespace != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) + i += copy(dAtA[i:], *m.SecretNamespace) + } + return i, nil } -var xxx_messageInfo_PersistentVolumeList proto.InternalMessageInfo - -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{113} -} -func (m *PersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeSource.Merge(m, src) -} -func (m *PersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PersistentVolumeSource proto.InternalMessageInfo - -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{114} -} -func (m *PersistentVolumeSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) + i += copy(dAtA[i:], m.ShareName) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *PersistentVolumeSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeSpec.Merge(m, src) -} -func (m *PersistentVolumeSpec) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeSpec.DiscardUnknown(m) + i++ + return i, nil } -var xxx_messageInfo_PersistentVolumeSpec proto.InternalMessageInfo - -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{115} -} -func (m *PersistentVolumeStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PersistentVolumeStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Binding) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PersistentVolumeStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PersistentVolumeStatus.Merge(m, src) -} -func (m *PersistentVolumeStatus) XXX_Size() int { - return m.Size() -} -func (m *PersistentVolumeStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PersistentVolumeStatus.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PersistentVolumeStatus proto.InternalMessageInfo - -func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } -func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} -func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{116} -} -func (m *PhotonPersistentDiskVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PhotonPersistentDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Binding) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *PhotonPersistentDiskVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PhotonPersistentDiskVolumeSource.Merge(m, src) -} -func (m *PhotonPersistentDiskVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *PhotonPersistentDiskVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_PhotonPersistentDiskVolumeSource.DiscardUnknown(m) -} - -var xxx_messageInfo_PhotonPersistentDiskVolumeSource proto.InternalMessageInfo - -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{117} -} -func (m *Pod) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Pod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + i += n4 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) + n5, err := m.Target.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *Pod) XXX_Merge(src proto.Message) { - xxx_messageInfo_Pod.Merge(m, src) -} -func (m *Pod) XXX_Size() int { - return m.Size() -} -func (m *Pod) XXX_DiscardUnknown() { - xxx_messageInfo_Pod.DiscardUnknown(m) + i += n5 + return i, nil } -var xxx_messageInfo_Pod proto.InternalMessageInfo - -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{118} -} -func (m *PodAffinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodAffinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodAffinity.Merge(m, src) -} -func (m *PodAffinity) XXX_Size() int { - return m.Size() -} -func (m *PodAffinity) XXX_DiscardUnknown() { - xxx_messageInfo_PodAffinity.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodAffinity proto.InternalMessageInfo - -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{119} -} -func (m *PodAffinityTerm) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) + i += copy(dAtA[i:], m.VolumeHandle) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *PodAffinityTerm) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodAffinityTerm.Merge(m, src) -} -func (m *PodAffinityTerm) XXX_Size() int { - return m.Size() -} -func (m *PodAffinityTerm) XXX_DiscardUnknown() { - xxx_messageInfo_PodAffinityTerm.DiscardUnknown(m) + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if len(m.VolumeAttributes) > 0 { + keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) + for k := range m.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + for _, k := range keysForVolumeAttributes { + dAtA[i] = 0x2a + i++ + v := m.VolumeAttributes[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.ControllerPublishSecretRef != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) + n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + if m.NodeStageSecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) + n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.NodePublishSecretRef != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) + n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil } -var xxx_messageInfo_PodAffinityTerm proto.InternalMessageInfo - -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{120} -} -func (m *PodAntiAffinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodAntiAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Capabilities) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodAntiAffinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodAntiAffinity.Merge(m, src) -} -func (m *PodAntiAffinity) XXX_Size() int { - return m.Size() -} -func (m *PodAntiAffinity) XXX_DiscardUnknown() { - xxx_messageInfo_PodAntiAffinity.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodAntiAffinity proto.InternalMessageInfo - -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{121} -} -func (m *PodAttachOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodAttachOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *PodAttachOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodAttachOptions.Merge(m, src) -} -func (m *PodAttachOptions) XXX_Size() int { - return m.Size() -} -func (m *PodAttachOptions) XXX_DiscardUnknown() { - xxx_messageInfo_PodAttachOptions.DiscardUnknown(m) + if len(m.Drop) > 0 { + for _, s := range m.Drop { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil } -var xxx_messageInfo_PodAttachOptions proto.InternalMessageInfo - -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{122} -} -func (m *PodCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodCondition.Merge(m, src) -} -func (m *PodCondition) XXX_Size() int { - return m.Size() -} -func (m *PodCondition) XXX_DiscardUnknown() { - xxx_messageInfo_PodCondition.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodCondition proto.InternalMessageInfo - -func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } -func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{123} -} -func (m *PodDNSConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodDNSConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *PodDNSConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodDNSConfig.Merge(m, src) -} -func (m *PodDNSConfig) XXX_Size() int { - return m.Size() -} -func (m *PodDNSConfig) XXX_DiscardUnknown() { - xxx_messageInfo_PodDNSConfig.DiscardUnknown(m) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i += copy(dAtA[i:], m.SecretFile) + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n9, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -var xxx_messageInfo_PodDNSConfig proto.InternalMessageInfo - -func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } -func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{124} -} -func (m *PodDNSConfigOption) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodDNSConfigOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodDNSConfigOption) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodDNSConfigOption.Merge(m, src) -} -func (m *PodDNSConfigOption) XXX_Size() int { - return m.Size() -} -func (m *PodDNSConfigOption) XXX_DiscardUnknown() { - xxx_messageInfo_PodDNSConfigOption.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodDNSConfigOption proto.InternalMessageInfo - -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{125} -} -func (m *PodExecOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodExecOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *PodExecOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodExecOptions.Merge(m, src) -} -func (m *PodExecOptions) XXX_Size() int { - return m.Size() -} -func (m *PodExecOptions) XXX_DiscardUnknown() { - xxx_messageInfo_PodExecOptions.DiscardUnknown(m) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) + i += copy(dAtA[i:], m.SecretFile) + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n10, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -var xxx_messageInfo_PodExecOptions proto.InternalMessageInfo - -func (m *PodIP) Reset() { *m = PodIP{} } -func (*PodIP) ProtoMessage() {} -func (*PodIP) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{126} -} -func (m *PodIP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodIP) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodIP.Merge(m, src) -} -func (m *PodIP) XXX_Size() int { - return m.Size() -} -func (m *PodIP) XXX_DiscardUnknown() { - xxx_messageInfo_PodIP.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodIP proto.InternalMessageInfo - -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{127} -} -func (m *PodList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *PodList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodList.Merge(m, src) -} -func (m *PodList) XXX_Size() int { - return m.Size() -} -func (m *PodList) XXX_DiscardUnknown() { - xxx_messageInfo_PodList.DiscardUnknown(m) + i++ + if m.SecretRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n11, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + } + return i, nil } -var xxx_messageInfo_PodList proto.InternalMessageInfo - -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{128} -} -func (m *PodLogOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodLogOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodLogOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodLogOptions.Merge(m, src) -} -func (m *PodLogOptions) XXX_Size() int { - return m.Size() -} -func (m *PodLogOptions) XXX_DiscardUnknown() { - xxx_messageInfo_PodLogOptions.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodLogOptions proto.InternalMessageInfo - -func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } -func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{129} -} -func (m *PodPortForwardOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPortForwardOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *PodPortForwardOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPortForwardOptions.Merge(m, src) -} -func (m *PodPortForwardOptions) XXX_Size() int { - return m.Size() -} -func (m *PodPortForwardOptions) XXX_DiscardUnknown() { - xxx_messageInfo_PodPortForwardOptions.DiscardUnknown(m) + i++ + if m.SecretRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n12, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n12 + } + return i, nil } -var xxx_messageInfo_PodPortForwardOptions proto.InternalMessageInfo - -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{130} -} -func (m *PodProxyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodProxyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodProxyOptions.Merge(m, src) -} -func (m *PodProxyOptions) XXX_Size() int { - return m.Size() -} -func (m *PodProxyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_PodProxyOptions.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodProxyOptions proto.InternalMessageInfo - -func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } -func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{131} -} -func (m *PodReadinessGate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodReadinessGate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TimeoutSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) } - return b[:n], nil + return i, nil } -func (m *PodReadinessGate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodReadinessGate.Merge(m, src) -} -func (m *PodReadinessGate) XXX_Size() int { - return m.Size() -} -func (m *PodReadinessGate) XXX_DiscardUnknown() { - xxx_messageInfo_PodReadinessGate.DiscardUnknown(m) -} - -var xxx_messageInfo_PodReadinessGate proto.InternalMessageInfo -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{132} -} -func (m *PodSecurityContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodSecurityContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityContext.Merge(m, src) -} -func (m *PodSecurityContext) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityContext) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityContext.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodSecurityContext proto.InternalMessageInfo - -func (m *PodSignature) Reset() { *m = PodSignature{} } -func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{133} -} -func (m *PodSignature) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil } -func (m *PodSignature) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodSignature) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSignature.Merge(m, src) -} -func (m *PodSignature) XXX_Size() int { - return m.Size() -} -func (m *PodSignature) XXX_DiscardUnknown() { - xxx_messageInfo_PodSignature.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodSignature proto.InternalMessageInfo - -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{134} -} -func (m *PodSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *PodSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSpec.Merge(m, src) -} -func (m *PodSpec) XXX_Size() int { - return m.Size() -} -func (m *PodSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSpec.DiscardUnknown(m) + i += n13 + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_PodSpec proto.InternalMessageInfo - -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{135} -} -func (m *PodStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodStatus.Merge(m, src) -} -func (m *PodStatus) XXX_Size() int { - return m.Size() -} -func (m *PodStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodStatus.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodStatus proto.InternalMessageInfo - -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{136} -} -func (m *PodStatusResult) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodStatusResult) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n14, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *PodStatusResult) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodStatusResult.Merge(m, src) -} -func (m *PodStatusResult) XXX_Size() int { - return m.Size() -} -func (m *PodStatusResult) XXX_DiscardUnknown() { - xxx_messageInfo_PodStatusResult.DiscardUnknown(m) + i += n14 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_PodStatusResult proto.InternalMessageInfo - -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{137} -} -func (m *PodTemplate) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodTemplate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMap) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodTemplate) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodTemplate.Merge(m, src) -} -func (m *PodTemplate) XXX_Size() int { - return m.Size() -} -func (m *PodTemplate) XXX_DiscardUnknown() { - xxx_messageInfo_PodTemplate.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodTemplate proto.InternalMessageInfo - -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{138} -} -func (m *PodTemplateList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodTemplateList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *PodTemplateList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodTemplateList.Merge(m, src) -} -func (m *PodTemplateList) XXX_Size() int { - return m.Size() -} -func (m *PodTemplateList) XXX_DiscardUnknown() { - xxx_messageInfo_PodTemplateList.DiscardUnknown(m) + i += n15 + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for _, k := range keysForData { + dAtA[i] = 0x12 + i++ + v := m.Data[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.BinaryData) > 0 { + keysForBinaryData := make([]string, 0, len(m.BinaryData)) + for k := range m.BinaryData { + keysForBinaryData = append(keysForBinaryData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + for _, k := range keysForBinaryData { + dAtA[i] = 0x1a + i++ + v := m.BinaryData[string(k)] + byteSize := 0 + if v != nil { + byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + } + return i, nil } -var xxx_messageInfo_PodTemplateList proto.InternalMessageInfo - -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{139} -} -func (m *PodTemplateSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodTemplateSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PodTemplateSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodTemplateSpec.Merge(m, src) -} -func (m *PodTemplateSpec) XXX_Size() int { - return m.Size() -} -func (m *PodTemplateSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodTemplateSpec.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PodTemplateSpec proto.InternalMessageInfo - -func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } -func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{140} -} -func (m *PortworxVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PortworxVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n16, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *PortworxVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_PortworxVolumeSource.Merge(m, src) -} -func (m *PortworxVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *PortworxVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_PortworxVolumeSource.DiscardUnknown(m) + i += n16 + if m.Optional != nil { + dAtA[i] = 0x10 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } -var xxx_messageInfo_PortworxVolumeSource proto.InternalMessageInfo - -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{141} -} -func (m *Preconditions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *Preconditions) XXX_Merge(src proto.Message) { - xxx_messageInfo_Preconditions.Merge(m, src) -} -func (m *Preconditions) XXX_Size() int { - return m.Size() -} -func (m *Preconditions) XXX_DiscardUnknown() { - xxx_messageInfo_Preconditions.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_Preconditions proto.InternalMessageInfo - -func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } -func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{142} -} -func (m *PreferAvoidPodsEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PreferAvoidPodsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *PreferAvoidPodsEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_PreferAvoidPodsEntry.Merge(m, src) -} -func (m *PreferAvoidPodsEntry) XXX_Size() int { - return m.Size() -} -func (m *PreferAvoidPodsEntry) XXX_DiscardUnknown() { - xxx_messageInfo_PreferAvoidPodsEntry.DiscardUnknown(m) + i += n17 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Optional != nil { + dAtA[i] = 0x18 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } -var xxx_messageInfo_PreferAvoidPodsEntry proto.InternalMessageInfo - -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (*PreferredSchedulingTerm) ProtoMessage() {} -func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{143} -} -func (m *PreferredSchedulingTerm) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PreferredSchedulingTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *PreferredSchedulingTerm) XXX_Merge(src proto.Message) { - xxx_messageInfo_PreferredSchedulingTerm.Merge(m, src) -} -func (m *PreferredSchedulingTerm) XXX_Size() int { - return m.Size() -} -func (m *PreferredSchedulingTerm) XXX_DiscardUnknown() { - xxx_messageInfo_PreferredSchedulingTerm.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_PreferredSchedulingTerm proto.InternalMessageInfo - -func (m *Probe) Reset() { *m = Probe{} } -func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{144} -} -func (m *Probe) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Probe) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n18, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *Probe) XXX_Merge(src proto.Message) { - xxx_messageInfo_Probe.Merge(m, src) -} -func (m *Probe) XXX_Size() int { - return m.Size() -} -func (m *Probe) XXX_DiscardUnknown() { - xxx_messageInfo_Probe.DiscardUnknown(m) + i += n18 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_Probe proto.InternalMessageInfo - -func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } -func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{145} -} -func (m *ProjectedVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ProjectedVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ProjectedVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ProjectedVolumeSource.Merge(m, src) -} -func (m *ProjectedVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *ProjectedVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_ProjectedVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ProjectedVolumeSource proto.InternalMessageInfo - -func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } -func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{146} -} -func (m *QuobyteVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *QuobyteVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *QuobyteVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_QuobyteVolumeSource.Merge(m, src) -} -func (m *QuobyteVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *QuobyteVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_QuobyteVolumeSource.DiscardUnknown(m) +func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) + i += copy(dAtA[i:], m.KubeletConfigKey) + return i, nil } -var xxx_messageInfo_QuobyteVolumeSource proto.InternalMessageInfo - -func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } -func (*RBDPersistentVolumeSource) ProtoMessage() {} -func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{147} -} -func (m *RBDPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RBDPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *RBDPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_RBDPersistentVolumeSource.Merge(m, src) -} -func (m *RBDPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *RBDPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_RBDPersistentVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_RBDPersistentVolumeSource proto.InternalMessageInfo - -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{148} -} -func (m *RBDVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RBDVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *RBDVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_RBDVolumeSource.Merge(m, src) -} -func (m *RBDVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *RBDVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_RBDVolumeSource.DiscardUnknown(m) + i += n19 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } -var xxx_messageInfo_RBDVolumeSource proto.InternalMessageInfo - -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{149} -} -func (m *RangeAllocation) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RangeAllocation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *RangeAllocation) XXX_Merge(src proto.Message) { - xxx_messageInfo_RangeAllocation.Merge(m, src) -} -func (m *RangeAllocation) XXX_Size() int { - return m.Size() -} -func (m *RangeAllocation) XXX_DiscardUnknown() { - xxx_messageInfo_RangeAllocation.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_RangeAllocation proto.InternalMessageInfo - -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{150} -} -func (m *ReplicationController) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicationController) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *ReplicationController) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicationController.Merge(m, src) -} -func (m *ReplicationController) XXX_Size() int { - return m.Size() -} -func (m *ReplicationController) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicationController.DiscardUnknown(m) + i += n20 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DefaultMode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } -var xxx_messageInfo_ReplicationController proto.InternalMessageInfo - -func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } -func (*ReplicationControllerCondition) ProtoMessage() {} -func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{151} -} -func (m *ReplicationControllerCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicationControllerCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Container) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ReplicationControllerCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicationControllerCondition.Merge(m, src) -} -func (m *ReplicationControllerCondition) XXX_Size() int { - return m.Size() -} -func (m *ReplicationControllerCondition) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicationControllerCondition.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ReplicationControllerCondition proto.InternalMessageInfo - -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (*ReplicationControllerList) ProtoMessage() {} -func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{152} -} -func (m *ReplicationControllerList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicationControllerList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Container) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) + i += copy(dAtA[i:], m.WorkingDir) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Env) > 0 { + for _, msg := range m.Env { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n21, err := m.Resources.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *ReplicationControllerList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicationControllerList.Merge(m, src) -} -func (m *ReplicationControllerList) XXX_Size() int { - return m.Size() -} -func (m *ReplicationControllerList) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicationControllerList.DiscardUnknown(m) + i += n21 + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.LivenessProbe != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) + n22, err := m.LivenessProbe.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n22 + } + if m.ReadinessProbe != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) + n23, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 + } + if m.Lifecycle != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) + n24, err := m.Lifecycle.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + } + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) + i += copy(dAtA[i:], m.TerminationMessagePath) + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) + i += copy(dAtA[i:], m.ImagePullPolicy) + if m.SecurityContext != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n25, err := m.SecurityContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n25 + } + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x88 + i++ + dAtA[i] = 0x1 + i++ + if m.StdinOnce { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x90 + i++ + dAtA[i] = 0x1 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.EnvFrom) > 0 { + for _, msg := range m.EnvFrom { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) + i += copy(dAtA[i:], m.TerminationMessagePolicy) + if len(m.VolumeDevices) > 0 { + for _, msg := range m.VolumeDevices { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_ReplicationControllerList proto.InternalMessageInfo - -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (*ReplicationControllerSpec) ProtoMessage() {} -func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{153} -} -func (m *ReplicationControllerSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicationControllerSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerImage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ReplicationControllerSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicationControllerSpec.Merge(m, src) -} -func (m *ReplicationControllerSpec) XXX_Size() int { - return m.Size() -} -func (m *ReplicationControllerSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicationControllerSpec.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ReplicationControllerSpec proto.InternalMessageInfo - -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (*ReplicationControllerStatus) ProtoMessage() {} -func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{154} -} -func (m *ReplicationControllerStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicationControllerStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *ReplicationControllerStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicationControllerStatus.Merge(m, src) -} -func (m *ReplicationControllerStatus) XXX_Size() int { - return m.Size() -} -func (m *ReplicationControllerStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicationControllerStatus.DiscardUnknown(m) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) + return i, nil } -var xxx_messageInfo_ReplicationControllerStatus proto.InternalMessageInfo - -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{155} -} -func (m *ResourceFieldSelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceFieldSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ResourceFieldSelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceFieldSelector.Merge(m, src) -} -func (m *ResourceFieldSelector) XXX_Size() int { - return m.Size() -} -func (m *ResourceFieldSelector) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceFieldSelector.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ResourceFieldSelector proto.InternalMessageInfo - -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{156} -} -func (m *ResourceQuota) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) + return i, nil } -func (m *ResourceQuota) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *ContainerState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ResourceQuota) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceQuota.Merge(m, src) -} -func (m *ResourceQuota) XXX_Size() int { - return m.Size() -} -func (m *ResourceQuota) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceQuota.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ResourceQuota proto.InternalMessageInfo - -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{157} -} -func (m *ResourceQuotaList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceQuotaList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Waiting != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) + n26, err := m.Waiting.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n26 } - return b[:n], nil -} -func (m *ResourceQuotaList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceQuotaList.Merge(m, src) -} -func (m *ResourceQuotaList) XXX_Size() int { - return m.Size() -} -func (m *ResourceQuotaList) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceQuotaList.DiscardUnknown(m) + if m.Running != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) + n27, err := m.Running.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + } + if m.Terminated != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) + n28, err := m.Terminated.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + } + return i, nil } -var xxx_messageInfo_ResourceQuotaList proto.InternalMessageInfo - -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{158} -} -func (m *ResourceQuotaSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceQuotaSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ResourceQuotaSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceQuotaSpec.Merge(m, src) -} -func (m *ResourceQuotaSpec) XXX_Size() int { - return m.Size() -} -func (m *ResourceQuotaSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceQuotaSpec.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ResourceQuotaSpec proto.InternalMessageInfo - -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{159} -} -func (m *ResourceQuotaStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceQuotaStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n29, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil + i += n29 + return i, nil } -func (m *ResourceQuotaStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceQuotaStatus.Merge(m, src) -} -func (m *ResourceQuotaStatus) XXX_Size() int { - return m.Size() -} -func (m *ResourceQuotaStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceQuotaStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ResourceQuotaStatus proto.InternalMessageInfo -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{160} -} -func (m *ResourceRequirements) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ResourceRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ResourceRequirements) XXX_Merge(src proto.Message) { - xxx_messageInfo_ResourceRequirements.Merge(m, src) -} -func (m *ResourceRequirements) XXX_Size() int { - return m.Size() -} -func (m *ResourceRequirements) XXX_DiscardUnknown() { - xxx_messageInfo_ResourceRequirements.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ResourceRequirements proto.InternalMessageInfo - -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{161} -} -func (m *SELinuxOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) + n30, err := m.StartedAt.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *SELinuxOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxOptions.Merge(m, src) -} -func (m *SELinuxOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SELinuxOptions proto.InternalMessageInfo - -func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } -func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} -func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{162} -} -func (m *ScaleIOPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleIOPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + i += n30 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) + n31, err := m.FinishedAt.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *ScaleIOPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleIOPersistentVolumeSource.Merge(m, src) -} -func (m *ScaleIOPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *ScaleIOPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleIOPersistentVolumeSource.DiscardUnknown(m) + i += n31 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + return i, nil } -var xxx_messageInfo_ScaleIOPersistentVolumeSource proto.InternalMessageInfo - -func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } -func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{163} -} -func (m *ScaleIOVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleIOVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ScaleIOVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleIOVolumeSource.Merge(m, src) -} -func (m *ScaleIOVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *ScaleIOVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleIOVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ScaleIOVolumeSource proto.InternalMessageInfo - -func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } -func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{164} -} -func (m *ScopeSelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *ScopeSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ScopeSelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopeSelector.Merge(m, src) -} -func (m *ScopeSelector) XXX_Size() int { - return m.Size() -} -func (m *ScopeSelector) XXX_DiscardUnknown() { - xxx_messageInfo_ScopeSelector.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ScopeSelector proto.InternalMessageInfo - -func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } -func (*ScopedResourceSelectorRequirement) ProtoMessage() {} -func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{165} -} -func (m *ScopedResourceSelectorRequirement) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScopedResourceSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) + n32, err := m.State.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *ScopedResourceSelectorRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScopedResourceSelectorRequirement.Merge(m, src) -} -func (m *ScopedResourceSelectorRequirement) XXX_Size() int { - return m.Size() -} -func (m *ScopedResourceSelectorRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_ScopedResourceSelectorRequirement.DiscardUnknown(m) + i += n32 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) + n33, err := m.LastTerminationState.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + dAtA[i] = 0x20 + i++ + if m.Ready { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) + i += copy(dAtA[i:], m.Image) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) + i += copy(dAtA[i:], m.ImageID) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) + i += copy(dAtA[i:], m.ContainerID) + return i, nil } -var xxx_messageInfo_ScopedResourceSelectorRequirement proto.InternalMessageInfo - -func (m *Secret) Reset() { *m = Secret{} } -func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{166} -} -func (m *Secret) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Secret) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *Secret) XXX_Merge(src proto.Message) { - xxx_messageInfo_Secret.Merge(m, src) -} -func (m *Secret) XXX_Size() int { - return m.Size() -} -func (m *Secret) XXX_DiscardUnknown() { - xxx_messageInfo_Secret.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_Secret proto.InternalMessageInfo - -func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } -func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{167} -} -func (m *SecretEnvSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + return i, nil } -func (m *SecretEnvSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *SecretEnvSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretEnvSource.Merge(m, src) -} -func (m *SecretEnvSource) XXX_Size() int { - return m.Size() -} -func (m *SecretEnvSource) XXX_DiscardUnknown() { - xxx_messageInfo_SecretEnvSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_SecretEnvSource proto.InternalMessageInfo - -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{168} -} -func (m *SecretKeySelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *SecretKeySelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *SecretKeySelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretKeySelector.Merge(m, src) -} -func (m *SecretKeySelector) XXX_Size() int { - return m.Size() + return dAtA[:n], nil } -func (m *SecretKeySelector) XXX_DiscardUnknown() { - xxx_messageInfo_SecretKeySelector.DiscardUnknown(m) -} - -var xxx_messageInfo_SecretKeySelector proto.InternalMessageInfo -func (m *SecretList) Reset() { *m = SecretList{} } -func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{169} -} -func (m *SecretList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecretList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.FieldRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) + n34, err := m.FieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n34 } - return b[:n], nil -} -func (m *SecretList) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretList.Merge(m, src) -} -func (m *SecretList) XXX_Size() int { - return m.Size() -} -func (m *SecretList) XXX_DiscardUnknown() { - xxx_messageInfo_SecretList.DiscardUnknown(m) + if m.ResourceFieldRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n35, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n35 + } + if m.Mode != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + } + return i, nil } -var xxx_messageInfo_SecretList proto.InternalMessageInfo - -func (m *SecretProjection) Reset() { *m = SecretProjection{} } -func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{170} -} -func (m *SecretProjection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecretProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *SecretProjection) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretProjection.Merge(m, src) -} -func (m *SecretProjection) XXX_Size() int { - return m.Size() -} -func (m *SecretProjection) XXX_DiscardUnknown() { - xxx_messageInfo_SecretProjection.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_SecretProjection proto.InternalMessageInfo - -func (m *SecretReference) Reset() { *m = SecretReference{} } -func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{171} -} -func (m *SecretReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecretReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return b[:n], nil -} -func (m *SecretReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretReference.Merge(m, src) -} -func (m *SecretReference) XXX_Size() int { - return m.Size() -} -func (m *SecretReference) XXX_DiscardUnknown() { - xxx_messageInfo_SecretReference.DiscardUnknown(m) + if m.DefaultMode != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + return i, nil } -var xxx_messageInfo_SecretReference proto.InternalMessageInfo - -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{172} -} -func (m *SecretVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecretVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *SecretVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecretVolumeSource.Merge(m, src) -} -func (m *SecretVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *SecretVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_SecretVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_SecretVolumeSource proto.InternalMessageInfo - -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{173} -} -func (m *SecurityContext) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SecurityContext) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) + i += copy(dAtA[i:], m.Medium) + if m.SizeLimit != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) + n36, err := m.SizeLimit.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 } - return b[:n], nil + return i, nil } -func (m *SecurityContext) XXX_Merge(src proto.Message) { - xxx_messageInfo_SecurityContext.Merge(m, src) -} -func (m *SecurityContext) XXX_Size() int { - return m.Size() -} -func (m *SecurityContext) XXX_DiscardUnknown() { - xxx_messageInfo_SecurityContext.DiscardUnknown(m) -} - -var xxx_messageInfo_SecurityContext proto.InternalMessageInfo -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{174} -} -func (m *SerializedReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SerializedReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *SerializedReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_SerializedReference.Merge(m, src) -} -func (m *SerializedReference) XXX_Size() int { - return m.Size() -} -func (m *SerializedReference) XXX_DiscardUnknown() { - xxx_messageInfo_SerializedReference.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_SerializedReference proto.InternalMessageInfo - -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{175} -} -func (m *Service) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + if m.TargetRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) + n37, err := m.TargetRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n37 } - return b[:n], nil -} -func (m *Service) XXX_Merge(src proto.Message) { - xxx_messageInfo_Service.Merge(m, src) -} -func (m *Service) XXX_Size() int { - return m.Size() -} -func (m *Service) XXX_DiscardUnknown() { - xxx_messageInfo_Service.DiscardUnknown(m) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + if m.NodeName != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) + i += copy(dAtA[i:], *m.NodeName) + } + return i, nil } -var xxx_messageInfo_Service proto.InternalMessageInfo - -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{176} -} -func (m *ServiceAccount) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceAccount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EndpointPort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ServiceAccount) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceAccount.Merge(m, src) -} -func (m *ServiceAccount) XXX_Size() int { - return m.Size() -} -func (m *ServiceAccount) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceAccount.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ServiceAccount proto.InternalMessageInfo - -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{177} -} -func (m *ServiceAccountList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + return i, nil } -func (m *ServiceAccountList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ServiceAccountList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceAccountList.Merge(m, src) -} -func (m *ServiceAccountList) XXX_Size() int { - return m.Size() -} -func (m *ServiceAccountList) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceAccountList.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ServiceAccountList proto.InternalMessageInfo - -func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } -func (*ServiceAccountTokenProjection) ProtoMessage() {} -func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{178} -} -func (m *ServiceAccountTokenProjection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceAccountTokenProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return b[:n], nil -} -func (m *ServiceAccountTokenProjection) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceAccountTokenProjection.Merge(m, src) -} -func (m *ServiceAccountTokenProjection) XXX_Size() int { - return m.Size() -} -func (m *ServiceAccountTokenProjection) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceAccountTokenProjection.DiscardUnknown(m) + if len(m.NotReadyAddresses) > 0 { + for _, msg := range m.NotReadyAddresses { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_ServiceAccountTokenProjection proto.InternalMessageInfo - -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{179} -} -func (m *ServiceList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Endpoints) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ServiceList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceList.Merge(m, src) -} -func (m *ServiceList) XXX_Size() int { - return m.Size() -} -func (m *ServiceList) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceList.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ServiceList proto.InternalMessageInfo - -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{180} -} -func (m *ServicePort) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServicePort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n38, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *ServicePort) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServicePort.Merge(m, src) -} -func (m *ServicePort) XXX_Size() int { - return m.Size() -} -func (m *ServicePort) XXX_DiscardUnknown() { - xxx_messageInfo_ServicePort.DiscardUnknown(m) + i += n38 + if len(m.Subsets) > 0 { + for _, msg := range m.Subsets { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_ServicePort proto.InternalMessageInfo - -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{181} -} -func (m *ServiceProxyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceProxyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EndpointsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ServiceProxyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceProxyOptions.Merge(m, src) -} -func (m *ServiceProxyOptions) XXX_Size() int { - return m.Size() -} -func (m *ServiceProxyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceProxyOptions.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ServiceProxyOptions proto.InternalMessageInfo - -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{182} -} -func (m *ServiceSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n39, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *ServiceSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceSpec.Merge(m, src) -} -func (m *ServiceSpec) XXX_Size() int { - return m.Size() -} -func (m *ServiceSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceSpec.DiscardUnknown(m) + i += n39 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_ServiceSpec proto.InternalMessageInfo - -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{183} -} -func (m *ServiceStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServiceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *ServiceStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServiceStatus.Merge(m, src) -} -func (m *ServiceStatus) XXX_Size() int { - return m.Size() -} -func (m *ServiceStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ServiceStatus.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_ServiceStatus proto.InternalMessageInfo - -func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } -func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{184} -} -func (m *SessionAffinityConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SessionAffinityConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) + i += copy(dAtA[i:], m.Prefix) + if m.ConfigMapRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) + n40, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 } - return b[:n], nil -} -func (m *SessionAffinityConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_SessionAffinityConfig.Merge(m, src) -} -func (m *SessionAffinityConfig) XXX_Size() int { - return m.Size() -} -func (m *SessionAffinityConfig) XXX_DiscardUnknown() { - xxx_messageInfo_SessionAffinityConfig.DiscardUnknown(m) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n41, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n41 + } + return i, nil } -var xxx_messageInfo_SessionAffinityConfig proto.InternalMessageInfo - -func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } -func (*StorageOSPersistentVolumeSource) ProtoMessage() {} -func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{185} -} -func (m *StorageOSPersistentVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageOSPersistentVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EnvVar) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *StorageOSPersistentVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageOSPersistentVolumeSource.Merge(m, src) -} -func (m *StorageOSPersistentVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *StorageOSPersistentVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_StorageOSPersistentVolumeSource.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_StorageOSPersistentVolumeSource proto.InternalMessageInfo - -func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } -func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{186} -} -func (m *StorageOSVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageOSVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + if m.ValueFrom != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) + n42, err := m.ValueFrom.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n42 } - return b[:n], nil -} -func (m *StorageOSVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageOSVolumeSource.Merge(m, src) -} -func (m *StorageOSVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *StorageOSVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_StorageOSVolumeSource.DiscardUnknown(m) + return i, nil } -var xxx_messageInfo_StorageOSVolumeSource proto.InternalMessageInfo - -func (m *Sysctl) Reset() { *m = Sysctl{} } -func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{187} -} -func (m *Sysctl) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Sysctl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *Sysctl) XXX_Merge(src proto.Message) { - xxx_messageInfo_Sysctl.Merge(m, src) -} -func (m *Sysctl) XXX_Size() int { - return m.Size() -} -func (m *Sysctl) XXX_DiscardUnknown() { - xxx_messageInfo_Sysctl.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_Sysctl proto.InternalMessageInfo - -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{188} -} -func (m *TCPSocketAction) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TCPSocketAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.FieldRef != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) + n43, err := m.FieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n43 } - return b[:n], nil -} -func (m *TCPSocketAction) XXX_Merge(src proto.Message) { - xxx_messageInfo_TCPSocketAction.Merge(m, src) -} -func (m *TCPSocketAction) XXX_Size() int { - return m.Size() -} -func (m *TCPSocketAction) XXX_DiscardUnknown() { - xxx_messageInfo_TCPSocketAction.DiscardUnknown(m) + if m.ResourceFieldRef != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) + n44, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 + } + if m.ConfigMapKeyRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) + n45, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n45 + } + if m.SecretKeyRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) + n46, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + } + return i, nil } -var xxx_messageInfo_TCPSocketAction proto.InternalMessageInfo - -func (m *Taint) Reset() { *m = Taint{} } -func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{189} -} -func (m *Taint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Taint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Event) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *Taint) XXX_Merge(src proto.Message) { - xxx_messageInfo_Taint.Merge(m, src) -} -func (m *Taint) XXX_Size() int { - return m.Size() -} -func (m *Taint) XXX_DiscardUnknown() { - xxx_messageInfo_Taint.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_Taint proto.InternalMessageInfo - -func (m *Toleration) Reset() { *m = Toleration{} } -func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{190} -} -func (m *Toleration) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Toleration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *Event) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n47, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *Toleration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Toleration.Merge(m, src) -} -func (m *Toleration) XXX_Size() int { - return m.Size() -} -func (m *Toleration) XXX_DiscardUnknown() { - xxx_messageInfo_Toleration.DiscardUnknown(m) + i += n47 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) + n48, err := m.InvolvedObject.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n49, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n49 + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) + n50, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n50 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) + n51, err := m.LastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n52, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + if m.Series != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n53, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 + } + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + if m.Related != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n54, err := m.Related.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n54 + } + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + return i, nil } -var xxx_messageInfo_Toleration proto.InternalMessageInfo - -func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } -func (*TopologySelectorLabelRequirement) ProtoMessage() {} -func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{191} -} -func (m *TopologySelectorLabelRequirement) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TopologySelectorLabelRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EventList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *TopologySelectorLabelRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologySelectorLabelRequirement.Merge(m, src) -} -func (m *TopologySelectorLabelRequirement) XXX_Size() int { - return m.Size() -} -func (m *TopologySelectorLabelRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_TopologySelectorLabelRequirement.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_TopologySelectorLabelRequirement proto.InternalMessageInfo - -func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } -func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{192} -} -func (m *TopologySelectorTerm) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TopologySelectorTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EventList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n55, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *TopologySelectorTerm) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologySelectorTerm.Merge(m, src) -} -func (m *TopologySelectorTerm) XXX_Size() int { - return m.Size() -} -func (m *TopologySelectorTerm) XXX_DiscardUnknown() { - xxx_messageInfo_TopologySelectorTerm.DiscardUnknown(m) + i += n55 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_TopologySelectorTerm proto.InternalMessageInfo - -func (m *TopologySpreadConstraint) Reset() { *m = TopologySpreadConstraint{} } -func (*TopologySpreadConstraint) ProtoMessage() {} -func (*TopologySpreadConstraint) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{193} -} -func (m *TopologySpreadConstraint) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TopologySpreadConstraint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EventSeries) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *TopologySpreadConstraint) XXX_Merge(src proto.Message) { - xxx_messageInfo_TopologySpreadConstraint.Merge(m, src) -} -func (m *TopologySpreadConstraint) XXX_Size() int { - return m.Size() -} -func (m *TopologySpreadConstraint) XXX_DiscardUnknown() { - xxx_messageInfo_TopologySpreadConstraint.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_TopologySpreadConstraint proto.InternalMessageInfo - -func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } -func (*TypedLocalObjectReference) ProtoMessage() {} -func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{194} -} -func (m *TypedLocalObjectReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TypedLocalObjectReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n56, err := m.LastObservedTime.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *TypedLocalObjectReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_TypedLocalObjectReference.Merge(m, src) -} -func (m *TypedLocalObjectReference) XXX_Size() int { - return m.Size() -} -func (m *TypedLocalObjectReference) XXX_DiscardUnknown() { - xxx_messageInfo_TypedLocalObjectReference.DiscardUnknown(m) + i += n56 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil } -var xxx_messageInfo_TypedLocalObjectReference proto.InternalMessageInfo - -func (m *Volume) Reset() { *m = Volume{} } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{195} -} -func (m *Volume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Volume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *EventSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *Volume) XXX_Merge(src proto.Message) { - xxx_messageInfo_Volume.Merge(m, src) -} -func (m *Volume) XXX_Size() int { - return m.Size() -} -func (m *Volume) XXX_DiscardUnknown() { - xxx_messageInfo_Volume.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_Volume proto.InternalMessageInfo - -func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } -func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{196} -} -func (m *VolumeDevice) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) + i += copy(dAtA[i:], m.Component) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + return i, nil } -func (m *VolumeDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + +func (m *ExecAction) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *VolumeDevice) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeDevice.Merge(m, src) -} -func (m *VolumeDevice) XXX_Size() int { - return m.Size() -} -func (m *VolumeDevice) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeDevice.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeDevice proto.InternalMessageInfo - -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{197} -} -func (m *VolumeMount) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeMount) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeMount) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeMount.Merge(m, src) -} -func (m *VolumeMount) XXX_Size() int { - return m.Size() -} -func (m *VolumeMount) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeMount.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_VolumeMount proto.InternalMessageInfo - -func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } -func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{198} -} -func (m *VolumeNodeAffinity) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeNodeAffinity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *VolumeNodeAffinity) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeNodeAffinity.Merge(m, src) -} -func (m *VolumeNodeAffinity) XXX_Size() int { - return m.Size() -} -func (m *VolumeNodeAffinity) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeNodeAffinity.DiscardUnknown(m) + return i, nil } -var xxx_messageInfo_VolumeNodeAffinity proto.InternalMessageInfo - -func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } -func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{199} -} -func (m *VolumeProjection) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeProjection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *VolumeProjection) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeProjection.Merge(m, src) -} -func (m *VolumeProjection) XXX_Size() int { - return m.Size() -} -func (m *VolumeProjection) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeProjection.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_VolumeProjection proto.InternalMessageInfo - -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{200} -} -func (m *VolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err +func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *VolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeSource.Merge(m, src) -} -func (m *VolumeSource) XXX_Size() int { - return m.Size() -} -func (m *VolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeSource proto.InternalMessageInfo - -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} -func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{201} -} -func (m *VsphereVirtualDiskVolumeSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VsphereVirtualDiskVolumeSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + if m.Lun != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) } - return b[:n], nil -} -func (m *VsphereVirtualDiskVolumeSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VsphereVirtualDiskVolumeSource.Merge(m, src) -} -func (m *VsphereVirtualDiskVolumeSource) XXX_Size() int { - return m.Size() -} -func (m *VsphereVirtualDiskVolumeSource) XXX_DiscardUnknown() { - xxx_messageInfo_VsphereVirtualDiskVolumeSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VsphereVirtualDiskVolumeSource proto.InternalMessageInfo - -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (*WeightedPodAffinityTerm) ProtoMessage() {} -func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{202} -} -func (m *WeightedPodAffinityTerm) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WeightedPodAffinityTerm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return b[:n], nil -} -func (m *WeightedPodAffinityTerm) XXX_Merge(src proto.Message) { - xxx_messageInfo_WeightedPodAffinityTerm.Merge(m, src) -} -func (m *WeightedPodAffinityTerm) XXX_Size() int { - return m.Size() -} -func (m *WeightedPodAffinityTerm) XXX_DiscardUnknown() { - xxx_messageInfo_WeightedPodAffinityTerm.DiscardUnknown(m) -} - -var xxx_messageInfo_WeightedPodAffinityTerm proto.InternalMessageInfo - -func (m *WindowsSecurityContextOptions) Reset() { *m = WindowsSecurityContextOptions{} } -func (*WindowsSecurityContextOptions) ProtoMessage() {} -func (*WindowsSecurityContextOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_83c10c24ec417dc9, []int{203} -} -func (m *WindowsSecurityContextOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WindowsSecurityContextOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + i++ + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return b[:n], nil -} -func (m *WindowsSecurityContextOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_WindowsSecurityContextOptions.Merge(m, src) -} -func (m *WindowsSecurityContextOptions) XXX_Size() int { - return m.Size() -} -func (m *WindowsSecurityContextOptions) XXX_DiscardUnknown() { - xxx_messageInfo_WindowsSecurityContextOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_WindowsSecurityContextOptions proto.InternalMessageInfo - -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") - proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource.VolumeAttributesEntry") - proto.RegisterType((*CSIVolumeSource)(nil), "k8s.io.api.core.v1.CSIVolumeSource") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.CSIVolumeSource.VolumeAttributesEntry") - proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") - proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") - proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") - proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.ConfigMap.BinaryDataEntry") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ConfigMap.DataEntry") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") - proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") - proto.RegisterType((*EphemeralContainer)(nil), "k8s.io.api.core.v1.EphemeralContainer") - proto.RegisterType((*EphemeralContainerCommon)(nil), "k8s.io.api.core.v1.EphemeralContainerCommon") - proto.RegisterType((*EphemeralContainers)(nil), "k8s.io.api.core.v1.EphemeralContainers") - proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") - proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") - proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource.OptionsEntry") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.FlexVolumeSource.OptionsEntry") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsPersistentVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsPersistentVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") - proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.DefaultRequestEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MaxLimitRequestRatioEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.LimitRangeItem.MinEntry") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") - proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") - proto.RegisterType((*NamespaceCondition)(nil), "k8s.io.api.core.v1.NamespaceCondition") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") - proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") - proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeResources.CapacityEntry") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.AllocatableEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.NodeStatus.CapacityEntry") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus.CapacityEntry") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec.CapacityEntry") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") - proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") - proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") - proto.RegisterType((*PodIP)(nil), "k8s.io.api.core.v1.PodIP") - proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") - proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.PodSpec.NodeSelectorEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.PodSpec.OverheadEntry") - proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec.SelectorEntry") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec.HardEntry") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.HardEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus.UsedEntry") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.LimitsEntry") - proto.RegisterMapType((ResourceList)(nil), "k8s.io.api.core.v1.ResourceRequirements.RequestsEntry") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") - proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") - proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") - proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") - proto.RegisterMapType((map[string][]byte)(nil), "k8s.io.api.core.v1.Secret.DataEntry") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.Secret.StringDataEntry") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") - proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") - proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") - proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.core.v1.ServiceSpec.SelectorEntry") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") - proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") - proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") - proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") - proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") - proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") - proto.RegisterType((*TopologySpreadConstraint)(nil), "k8s.io.api.core.v1.TopologySpreadConstraint") - proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") - proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") - proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") - proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") - proto.RegisterType((*WindowsSecurityContextOptions)(nil), "k8s.io.api.core.v1.WindowsSecurityContextOptions") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptor_83c10c24ec417dc9) -} - -var fileDescriptor_83c10c24ec417dc9 = []byte{ - // 13727 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0xbd, 0x7b, 0x70, 0x24, 0x49, - 0x5a, 0x18, 0x7e, 0xd5, 0xad, 0x47, 0xf7, 0xa7, 0x77, 0xce, 0x63, 0x35, 0xda, 0x99, 0xd1, 0x6c, - 0xed, 0xdd, 0xec, 0xec, 0xed, 0xae, 0xe6, 0xf6, 0x75, 0xbb, 0xdc, 0xde, 0x2d, 0x48, 0x6a, 0x69, - 0xa6, 0x77, 0x46, 0x9a, 0xde, 0x6c, 0xcd, 0xcc, 0xdd, 0xb1, 0x77, 0xbf, 0x2b, 0x75, 0xa5, 0xa4, - 0x3a, 0x75, 0x57, 0xf5, 0x56, 0x55, 0x6b, 0x46, 0xfb, 0x83, 0x30, 0x3e, 0x9e, 0x67, 0xc0, 0x71, - 0x76, 0x10, 0x7e, 0x00, 0x41, 0x38, 0x30, 0x0e, 0xc0, 0xd8, 0x0e, 0x63, 0x30, 0x60, 0x0e, 0x1b, - 0x0c, 0xb6, 0x03, 0xfb, 0x0f, 0x8c, 0x09, 0xdb, 0x47, 0x04, 0x61, 0x19, 0x06, 0x87, 0x89, 0xfb, - 0xc3, 0x40, 0x18, 0xfc, 0x87, 0x65, 0xc2, 0x38, 0xf2, 0x59, 0x99, 0xd5, 0x55, 0xdd, 0xad, 0x59, - 0x8d, 0x6e, 0xb9, 0xd8, 0xff, 0xba, 0xf3, 0xfb, 0xf2, 0xcb, 0xac, 0x7c, 0x7c, 0xf9, 0xe5, 0x97, - 0xdf, 0x03, 0x5e, 0xdb, 0x7d, 0x35, 0x5a, 0xf0, 0x82, 0xab, 0xbb, 0x9d, 0x4d, 0x12, 0xfa, 0x24, - 0x26, 0xd1, 0xd5, 0x3d, 0xe2, 0xbb, 0x41, 0x78, 0x55, 0x00, 0x9c, 0xb6, 0x77, 0xb5, 0x11, 0x84, - 0xe4, 0xea, 0xde, 0xf3, 0x57, 0xb7, 0x89, 0x4f, 0x42, 0x27, 0x26, 0xee, 0x42, 0x3b, 0x0c, 0xe2, - 0x00, 0x21, 0x8e, 0xb3, 0xe0, 0xb4, 0xbd, 0x05, 0x8a, 0xb3, 0xb0, 0xf7, 0xfc, 0xdc, 0x73, 0xdb, - 0x5e, 0xbc, 0xd3, 0xd9, 0x5c, 0x68, 0x04, 0xad, 0xab, 0xdb, 0xc1, 0x76, 0x70, 0x95, 0xa1, 0x6e, - 0x76, 0xb6, 0xd8, 0x3f, 0xf6, 0x87, 0xfd, 0xe2, 0x24, 0xe6, 0x5e, 0x4a, 0x9a, 0x69, 0x39, 0x8d, - 0x1d, 0xcf, 0x27, 0xe1, 0xfe, 0xd5, 0xf6, 0xee, 0x36, 0x6b, 0x37, 0x24, 0x51, 0xd0, 0x09, 0x1b, - 0x24, 0xdd, 0x70, 0xcf, 0x5a, 0xd1, 0xd5, 0x16, 0x89, 0x9d, 0x8c, 0xee, 0xce, 0x5d, 0xcd, 0xab, - 0x15, 0x76, 0xfc, 0xd8, 0x6b, 0x75, 0x37, 0xf3, 0xd1, 0x7e, 0x15, 0xa2, 0xc6, 0x0e, 0x69, 0x39, - 0x5d, 0xf5, 0x5e, 0xcc, 0xab, 0xd7, 0x89, 0xbd, 0xe6, 0x55, 0xcf, 0x8f, 0xa3, 0x38, 0x4c, 0x57, - 0xb2, 0xbf, 0x62, 0xc1, 0xa5, 0xc5, 0xbb, 0xf5, 0x95, 0xa6, 0x13, 0xc5, 0x5e, 0x63, 0xa9, 0x19, - 0x34, 0x76, 0xeb, 0x71, 0x10, 0x92, 0x3b, 0x41, 0xb3, 0xd3, 0x22, 0x75, 0x36, 0x10, 0xe8, 0x59, - 0x28, 0xed, 0xb1, 0xff, 0xd5, 0xca, 0xac, 0x75, 0xc9, 0xba, 0x52, 0x5e, 0x9a, 0xfe, 0xf5, 0x83, - 0xf9, 0x0f, 0x3c, 0x38, 0x98, 0x2f, 0xdd, 0x11, 0xe5, 0x58, 0x61, 0xa0, 0xcb, 0x30, 0xb2, 0x15, - 0x6d, 0xec, 0xb7, 0xc9, 0x6c, 0x81, 0xe1, 0x4e, 0x0a, 0xdc, 0x91, 0xd5, 0x3a, 0x2d, 0xc5, 0x02, - 0x8a, 0xae, 0x42, 0xb9, 0xed, 0x84, 0xb1, 0x17, 0x7b, 0x81, 0x3f, 0x5b, 0xbc, 0x64, 0x5d, 0x19, - 0x5e, 0x9a, 0x11, 0xa8, 0xe5, 0x9a, 0x04, 0xe0, 0x04, 0x87, 0x76, 0x23, 0x24, 0x8e, 0x7b, 0xcb, - 0x6f, 0xee, 0xcf, 0x0e, 0x5d, 0xb2, 0xae, 0x94, 0x92, 0x6e, 0x60, 0x51, 0x8e, 0x15, 0x86, 0xfd, - 0x83, 0x05, 0x28, 0x2d, 0x6e, 0x6d, 0x79, 0xbe, 0x17, 0xef, 0xa3, 0x3b, 0x30, 0xee, 0x07, 0x2e, - 0x91, 0xff, 0xd9, 0x57, 0x8c, 0xbd, 0x70, 0x69, 0xa1, 0x7b, 0x29, 0x2d, 0xac, 0x6b, 0x78, 0x4b, - 0xd3, 0x0f, 0x0e, 0xe6, 0xc7, 0xf5, 0x12, 0x6c, 0xd0, 0x41, 0x18, 0xc6, 0xda, 0x81, 0xab, 0xc8, - 0x16, 0x18, 0xd9, 0xf9, 0x2c, 0xb2, 0xb5, 0x04, 0x6d, 0x69, 0xea, 0xc1, 0xc1, 0xfc, 0x98, 0x56, - 0x80, 0x75, 0x22, 0x68, 0x13, 0xa6, 0xe8, 0x5f, 0x3f, 0xf6, 0x14, 0xdd, 0x22, 0xa3, 0xfb, 0x64, - 0x1e, 0x5d, 0x0d, 0x75, 0xe9, 0xd4, 0x83, 0x83, 0xf9, 0xa9, 0x54, 0x21, 0x4e, 0x13, 0xb4, 0xdf, - 0x81, 0xc9, 0xc5, 0x38, 0x76, 0x1a, 0x3b, 0xc4, 0xe5, 0x33, 0x88, 0x5e, 0x82, 0x21, 0xdf, 0x69, - 0x11, 0x31, 0xbf, 0x97, 0xc4, 0xc0, 0x0e, 0xad, 0x3b, 0x2d, 0x72, 0x78, 0x30, 0x3f, 0x7d, 0xdb, - 0xf7, 0xde, 0xee, 0x88, 0x55, 0x41, 0xcb, 0x30, 0xc3, 0x46, 0x2f, 0x00, 0xb8, 0x64, 0xcf, 0x6b, - 0x90, 0x9a, 0x13, 0xef, 0x88, 0xf9, 0x46, 0xa2, 0x2e, 0x54, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0x7d, - 0x28, 0x2f, 0xee, 0x05, 0x9e, 0x5b, 0x0b, 0xdc, 0x08, 0xed, 0xc2, 0x54, 0x3b, 0x24, 0x5b, 0x24, - 0x54, 0x45, 0xb3, 0xd6, 0xa5, 0xe2, 0x95, 0xb1, 0x17, 0xae, 0x64, 0x7e, 0xac, 0x89, 0xba, 0xe2, - 0xc7, 0xe1, 0xfe, 0xd2, 0x63, 0xa2, 0xbd, 0xa9, 0x14, 0x14, 0xa7, 0x29, 0xdb, 0xff, 0xaa, 0x00, - 0x67, 0x16, 0xdf, 0xe9, 0x84, 0xa4, 0xe2, 0x45, 0xbb, 0xe9, 0x15, 0xee, 0x7a, 0xd1, 0xee, 0x7a, - 0x32, 0x02, 0x6a, 0x69, 0x55, 0x44, 0x39, 0x56, 0x18, 0xe8, 0x39, 0x18, 0xa5, 0xbf, 0x6f, 0xe3, - 0xaa, 0xf8, 0xe4, 0x53, 0x02, 0x79, 0xac, 0xe2, 0xc4, 0x4e, 0x85, 0x83, 0xb0, 0xc4, 0x41, 0x6b, - 0x30, 0xd6, 0x60, 0x1b, 0x72, 0x7b, 0x2d, 0x70, 0x09, 0x9b, 0xcc, 0xf2, 0xd2, 0x33, 0x14, 0x7d, - 0x39, 0x29, 0x3e, 0x3c, 0x98, 0x9f, 0xe5, 0x7d, 0x13, 0x24, 0x34, 0x18, 0xd6, 0xeb, 0x23, 0x5b, - 0xed, 0xaf, 0x21, 0x46, 0x09, 0x32, 0xf6, 0xd6, 0x15, 0x6d, 0xab, 0x0c, 0xb3, 0xad, 0x32, 0x9e, - 0xbd, 0x4d, 0xd0, 0xf3, 0x30, 0xb4, 0xeb, 0xf9, 0xee, 0xec, 0x08, 0xa3, 0x75, 0x81, 0xce, 0xf9, - 0x0d, 0xcf, 0x77, 0x0f, 0x0f, 0xe6, 0x67, 0x8c, 0xee, 0xd0, 0x42, 0xcc, 0x50, 0xed, 0x3f, 0xb1, - 0x60, 0x9e, 0xc1, 0x56, 0xbd, 0x26, 0xa9, 0x91, 0x30, 0xf2, 0xa2, 0x98, 0xf8, 0xb1, 0x31, 0xa0, - 0x2f, 0x00, 0x44, 0xa4, 0x11, 0x92, 0x58, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, - 0xca, 0x10, 0xa2, 0x1d, 0x27, 0x64, 0xeb, 0x4b, 0x0c, 0xac, 0x62, 0x08, 0x75, 0x09, 0xc0, 0x09, - 0x8e, 0xc1, 0x10, 0x8a, 0xfd, 0x18, 0x02, 0xfa, 0x04, 0x4c, 0x25, 0x8d, 0x45, 0x6d, 0xa7, 0x21, - 0x07, 0x90, 0x6d, 0x99, 0xba, 0x09, 0xc2, 0x69, 0x5c, 0xfb, 0xef, 0x5b, 0x62, 0xf1, 0xd0, 0xaf, - 0x7e, 0x8f, 0x7f, 0xab, 0xfd, 0x0b, 0x16, 0x8c, 0x2e, 0x79, 0xbe, 0xeb, 0xf9, 0xdb, 0xe8, 0x73, - 0x50, 0xa2, 0x67, 0x93, 0xeb, 0xc4, 0x8e, 0xe0, 0x7b, 0x1f, 0xd1, 0xf6, 0x96, 0x3a, 0x2a, 0x16, - 0xda, 0xbb, 0xdb, 0xb4, 0x20, 0x5a, 0xa0, 0xd8, 0x74, 0xb7, 0xdd, 0xda, 0xfc, 0x3c, 0x69, 0xc4, - 0x6b, 0x24, 0x76, 0x92, 0xcf, 0x49, 0xca, 0xb0, 0xa2, 0x8a, 0x6e, 0xc0, 0x48, 0xec, 0x84, 0xdb, - 0x24, 0x16, 0x0c, 0x30, 0x93, 0x51, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x37, 0x48, 0x72, 0x2c, - 0x6c, 0xb0, 0xaa, 0x58, 0x90, 0xb0, 0xbf, 0x7f, 0x14, 0xce, 0x2d, 0xd7, 0xab, 0x39, 0xeb, 0xea, - 0x32, 0x8c, 0xb8, 0xa1, 0xb7, 0x47, 0x42, 0x31, 0xce, 0x8a, 0x4a, 0x85, 0x95, 0x62, 0x01, 0x45, - 0xaf, 0xc2, 0x38, 0x3f, 0x90, 0xae, 0x3b, 0xbe, 0xdb, 0x94, 0x43, 0x7c, 0x5a, 0x60, 0x8f, 0xdf, - 0xd1, 0x60, 0xd8, 0xc0, 0x3c, 0xe2, 0xa2, 0xba, 0x9c, 0xda, 0x8c, 0x79, 0x87, 0xdd, 0x17, 0x2d, - 0x98, 0xe6, 0xcd, 0x2c, 0xc6, 0x71, 0xe8, 0x6d, 0x76, 0x62, 0x12, 0xcd, 0x0e, 0x33, 0x4e, 0xb7, - 0x9c, 0x35, 0x5a, 0xb9, 0x23, 0xb0, 0x70, 0x27, 0x45, 0x85, 0x33, 0xc1, 0x59, 0xd1, 0xee, 0x74, - 0x1a, 0x8c, 0xbb, 0x9a, 0x45, 0xdf, 0x6e, 0xc1, 0x5c, 0x23, 0xf0, 0xe3, 0x30, 0x68, 0x36, 0x49, - 0x58, 0xeb, 0x6c, 0x36, 0xbd, 0x68, 0x87, 0xaf, 0x53, 0x4c, 0xb6, 0x18, 0x27, 0xc8, 0x99, 0x43, - 0x85, 0x24, 0xe6, 0xf0, 0xe2, 0x83, 0x83, 0xf9, 0xb9, 0xe5, 0x5c, 0x52, 0xb8, 0x47, 0x33, 0x68, - 0x17, 0x10, 0x3d, 0x4a, 0xeb, 0xb1, 0xb3, 0x4d, 0x92, 0xc6, 0x47, 0x07, 0x6f, 0xfc, 0xec, 0x83, - 0x83, 0x79, 0xb4, 0xde, 0x45, 0x02, 0x67, 0x90, 0x45, 0x6f, 0xc3, 0x69, 0x5a, 0xda, 0xf5, 0xad, - 0xa5, 0xc1, 0x9b, 0x9b, 0x7d, 0x70, 0x30, 0x7f, 0x7a, 0x3d, 0x83, 0x08, 0xce, 0x24, 0x8d, 0xbe, - 0xcd, 0x82, 0x73, 0xc9, 0xe7, 0xaf, 0xdc, 0x6f, 0x3b, 0xbe, 0x9b, 0x34, 0x5c, 0x1e, 0xbc, 0x61, - 0xca, 0x93, 0xcf, 0x2d, 0xe7, 0x51, 0xc2, 0xf9, 0x8d, 0xcc, 0x2d, 0xc3, 0x99, 0xcc, 0xd5, 0x82, - 0xa6, 0xa1, 0xb8, 0x4b, 0xb8, 0x14, 0x54, 0xc6, 0xf4, 0x27, 0x3a, 0x0d, 0xc3, 0x7b, 0x4e, 0xb3, - 0x23, 0x36, 0x0a, 0xe6, 0x7f, 0x3e, 0x56, 0x78, 0xd5, 0xb2, 0xff, 0x75, 0x11, 0xa6, 0x96, 0xeb, - 0xd5, 0x87, 0xda, 0x85, 0xfa, 0x31, 0x54, 0xe8, 0x79, 0x0c, 0x25, 0x87, 0x5a, 0x31, 0xf7, 0x50, - 0xfb, 0x4b, 0x19, 0x5b, 0x68, 0x88, 0x6d, 0xa1, 0x6f, 0xc8, 0xd9, 0x42, 0xc7, 0xbc, 0x71, 0xf6, - 0x72, 0x56, 0xd1, 0x30, 0x9b, 0xcc, 0x4c, 0x89, 0xe5, 0x66, 0xd0, 0x70, 0x9a, 0x69, 0xd6, 0x77, - 0xc4, 0xa5, 0x74, 0x3c, 0xf3, 0xd8, 0x80, 0xf1, 0x65, 0xa7, 0xed, 0x6c, 0x7a, 0x4d, 0x2f, 0xf6, - 0x48, 0x84, 0x9e, 0x82, 0xa2, 0xe3, 0xba, 0x4c, 0xda, 0x2a, 0x2f, 0x9d, 0x79, 0x70, 0x30, 0x5f, - 0x5c, 0x74, 0xe9, 0xb1, 0x0f, 0x0a, 0x6b, 0x1f, 0x53, 0x0c, 0xf4, 0x61, 0x18, 0x72, 0xc3, 0xa0, - 0x3d, 0x5b, 0x60, 0x98, 0x74, 0xd7, 0x0d, 0x55, 0xc2, 0xa0, 0x9d, 0x42, 0x65, 0x38, 0xf6, 0xaf, - 0x14, 0xe0, 0xfc, 0x32, 0x69, 0xef, 0xac, 0xd6, 0x73, 0xf8, 0xf7, 0x15, 0x28, 0xb5, 0x02, 0xdf, - 0x8b, 0x83, 0x30, 0x12, 0x4d, 0xb3, 0x15, 0xb1, 0x26, 0xca, 0xb0, 0x82, 0xa2, 0x4b, 0x30, 0xd4, - 0x4e, 0x84, 0xca, 0x71, 0x29, 0x90, 0x32, 0x71, 0x92, 0x41, 0x28, 0x46, 0x27, 0x22, 0xa1, 0x58, - 0x31, 0x0a, 0xe3, 0x76, 0x44, 0x42, 0xcc, 0x20, 0xc9, 0xc9, 0x4c, 0xcf, 0x6c, 0xc1, 0xa1, 0x53, - 0x27, 0x33, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, 0x1c, 0xa5, 0x66, 0x76, 0xa0, 0x6d, 0x3a, 0xc1, - 0x8e, 0x6e, 0x35, 0x93, 0x09, 0x11, 0xe3, 0x44, 0x19, 0xe9, 0x7b, 0x74, 0x7f, 0xb9, 0x00, 0x88, - 0x0f, 0xe1, 0x5f, 0xb0, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0x83, 0x6f, 0x89, 0xe3, 0x1a, 0xbd, 0x3f, - 0xb5, 0xe0, 0xfc, 0xb2, 0xe7, 0xbb, 0x24, 0xcc, 0x59, 0x80, 0x8f, 0xe6, 0x2e, 0x7b, 0x34, 0xa1, - 0xc1, 0x58, 0x62, 0x43, 0xc7, 0xb0, 0xc4, 0xec, 0x3f, 0xb2, 0x00, 0xf1, 0xcf, 0x7e, 0xcf, 0x7d, - 0xec, 0xed, 0xee, 0x8f, 0x3d, 0x86, 0x65, 0x61, 0xdf, 0x84, 0xc9, 0xe5, 0xa6, 0x47, 0xfc, 0xb8, - 0x5a, 0x5b, 0x0e, 0xfc, 0x2d, 0x6f, 0x1b, 0x7d, 0x0c, 0x26, 0x63, 0xaf, 0x45, 0x82, 0x4e, 0x5c, - 0x27, 0x8d, 0xc0, 0x67, 0x37, 0x49, 0xeb, 0xca, 0xf0, 0x12, 0x7a, 0x70, 0x30, 0x3f, 0xb9, 0x61, - 0x40, 0x70, 0x0a, 0xd3, 0xfe, 0x1d, 0x3a, 0x7e, 0x41, 0xab, 0x1d, 0xf8, 0xc4, 0x8f, 0x97, 0x03, - 0xdf, 0xe5, 0x1a, 0x87, 0x8f, 0xc1, 0x50, 0x4c, 0xc7, 0x83, 0x8f, 0xdd, 0x65, 0xb9, 0x51, 0xe8, - 0x28, 0x1c, 0x1e, 0xcc, 0x9f, 0xed, 0xae, 0xc1, 0xc6, 0x89, 0xd5, 0x41, 0xdf, 0x00, 0x23, 0x51, - 0xec, 0xc4, 0x9d, 0x48, 0x8c, 0xe6, 0x13, 0x72, 0x34, 0xeb, 0xac, 0xf4, 0xf0, 0x60, 0x7e, 0x4a, - 0x55, 0xe3, 0x45, 0x58, 0x54, 0x40, 0x4f, 0xc3, 0x68, 0x8b, 0x44, 0x91, 0xb3, 0x2d, 0x4f, 0xc3, - 0x29, 0x51, 0x77, 0x74, 0x8d, 0x17, 0x63, 0x09, 0x47, 0x4f, 0xc2, 0x30, 0x09, 0xc3, 0x20, 0x14, - 0x7b, 0x74, 0x42, 0x20, 0x0e, 0xaf, 0xd0, 0x42, 0xcc, 0x61, 0xf6, 0xbf, 0xb7, 0x60, 0x4a, 0xf5, - 0x95, 0xb7, 0x75, 0x02, 0xb7, 0x82, 0x4f, 0x03, 0x34, 0xe4, 0x07, 0x46, 0xec, 0xf4, 0x18, 0x7b, - 0xe1, 0x72, 0xe6, 0x41, 0xdd, 0x35, 0x8c, 0x09, 0x65, 0x55, 0x14, 0x61, 0x8d, 0x9a, 0xfd, 0xcf, - 0x2d, 0x38, 0x95, 0xfa, 0xa2, 0x9b, 0x5e, 0x14, 0xa3, 0xb7, 0xba, 0xbe, 0x6a, 0x61, 0xb0, 0xaf, - 0xa2, 0xb5, 0xd9, 0x37, 0xa9, 0xa5, 0x2c, 0x4b, 0xb4, 0x2f, 0xba, 0x0e, 0xc3, 0x5e, 0x4c, 0x5a, - 0xf2, 0x63, 0x9e, 0xec, 0xf9, 0x31, 0xbc, 0x57, 0xc9, 0x8c, 0x54, 0x69, 0x4d, 0xcc, 0x09, 0xd8, - 0xbf, 0x52, 0x84, 0x32, 0x5f, 0xb6, 0x6b, 0x4e, 0xfb, 0x04, 0xe6, 0xe2, 0x19, 0x28, 0x7b, 0xad, - 0x56, 0x27, 0x76, 0x36, 0x05, 0x3b, 0x2f, 0xf1, 0xad, 0x55, 0x95, 0x85, 0x38, 0x81, 0xa3, 0x2a, - 0x0c, 0xb1, 0xae, 0xf0, 0xaf, 0x7c, 0x2a, 0xfb, 0x2b, 0x45, 0xdf, 0x17, 0x2a, 0x4e, 0xec, 0x70, - 0x49, 0x4a, 0x9d, 0x23, 0xb4, 0x08, 0x33, 0x12, 0xc8, 0x01, 0xd8, 0xf4, 0x7c, 0x27, 0xdc, 0xa7, - 0x65, 0xb3, 0x45, 0x46, 0xf0, 0xb9, 0xde, 0x04, 0x97, 0x14, 0x3e, 0x27, 0xab, 0x3e, 0x2c, 0x01, - 0x60, 0x8d, 0xe8, 0xdc, 0x2b, 0x50, 0x56, 0xc8, 0x47, 0x11, 0x88, 0xe6, 0x3e, 0x01, 0x53, 0xa9, - 0xb6, 0xfa, 0x55, 0x1f, 0xd7, 0xe5, 0xa9, 0x5f, 0x64, 0x2c, 0x43, 0xf4, 0x7a, 0xc5, 0xdf, 0x13, - 0x2c, 0xf7, 0x1d, 0x38, 0xdd, 0xcc, 0xe0, 0x64, 0x62, 0x5e, 0x07, 0xe7, 0x7c, 0xe7, 0xc5, 0x67, - 0x9f, 0xce, 0x82, 0xe2, 0xcc, 0x36, 0xa8, 0x8c, 0x10, 0xb4, 0xe9, 0x06, 0x71, 0x9a, 0xba, 0xb8, - 0x7d, 0x4b, 0x94, 0x61, 0x05, 0xa5, 0xfc, 0xee, 0xb4, 0xea, 0xfc, 0x0d, 0xb2, 0x5f, 0x27, 0x4d, - 0xd2, 0x88, 0x83, 0xf0, 0x6b, 0xda, 0xfd, 0x0b, 0x7c, 0xf4, 0x39, 0xbb, 0x1c, 0x13, 0x04, 0x8a, - 0x37, 0xc8, 0x3e, 0x9f, 0x0a, 0xfd, 0xeb, 0x8a, 0x3d, 0xbf, 0xee, 0xa7, 0x2d, 0x98, 0x50, 0x5f, - 0x77, 0x02, 0x7c, 0x61, 0xc9, 0xe4, 0x0b, 0x17, 0x7a, 0x2e, 0xf0, 0x1c, 0x8e, 0xf0, 0xe5, 0x02, - 0x9c, 0x53, 0x38, 0xf4, 0x6e, 0xc0, 0xff, 0x88, 0x55, 0x75, 0x15, 0xca, 0xbe, 0xd2, 0x5a, 0x59, - 0xa6, 0xba, 0x28, 0xd1, 0x59, 0x25, 0x38, 0x54, 0xc4, 0xf3, 0x13, 0xd5, 0xd2, 0xb8, 0xae, 0xce, - 0x15, 0xaa, 0xdb, 0x25, 0x28, 0x76, 0x3c, 0x57, 0x1c, 0x30, 0x1f, 0x91, 0xa3, 0x7d, 0xbb, 0x5a, - 0x39, 0x3c, 0x98, 0x7f, 0x22, 0xef, 0x29, 0x81, 0x9e, 0x6c, 0xd1, 0xc2, 0xed, 0x6a, 0x05, 0xd3, - 0xca, 0x68, 0x11, 0xa6, 0xe4, 0x6b, 0xc9, 0x1d, 0x2a, 0x6e, 0x05, 0xbe, 0x38, 0x87, 0x94, 0x4e, - 0x16, 0x9b, 0x60, 0x9c, 0xc6, 0x47, 0x15, 0x98, 0xde, 0xed, 0x6c, 0x92, 0x26, 0x89, 0xf9, 0x07, - 0xdf, 0x20, 0x5c, 0x63, 0x59, 0x4e, 0x6e, 0x66, 0x37, 0x52, 0x70, 0xdc, 0x55, 0xc3, 0xfe, 0x73, - 0x76, 0x1e, 0x88, 0xd1, 0xab, 0x85, 0x01, 0x5d, 0x58, 0x94, 0xfa, 0xd7, 0x72, 0x39, 0x0f, 0xb2, - 0x2a, 0x6e, 0x90, 0xfd, 0x8d, 0x80, 0x4a, 0xe6, 0xd9, 0xab, 0xc2, 0x58, 0xf3, 0x43, 0x3d, 0xd7, - 0xfc, 0xcf, 0x16, 0xe0, 0x8c, 0x1a, 0x01, 0x43, 0x08, 0xfc, 0x8b, 0x3e, 0x06, 0xcf, 0xc3, 0x98, - 0x4b, 0xb6, 0x9c, 0x4e, 0x33, 0x56, 0xea, 0xf3, 0x61, 0xfe, 0x84, 0x52, 0x49, 0x8a, 0xb1, 0x8e, - 0x73, 0x84, 0x61, 0xfb, 0x5f, 0x63, 0xec, 0x20, 0x8e, 0x1d, 0xba, 0xc6, 0xd5, 0xae, 0xb1, 0x72, - 0x77, 0xcd, 0x93, 0x30, 0xec, 0xb5, 0xa8, 0x60, 0x56, 0x30, 0xe5, 0xad, 0x2a, 0x2d, 0xc4, 0x1c, - 0x86, 0x3e, 0x04, 0xa3, 0x8d, 0xa0, 0xd5, 0x72, 0x7c, 0x97, 0x1d, 0x79, 0xe5, 0xa5, 0x31, 0x2a, - 0xbb, 0x2d, 0xf3, 0x22, 0x2c, 0x61, 0xe8, 0x3c, 0x0c, 0x39, 0xe1, 0x36, 0xd7, 0x61, 0x94, 0x97, - 0x4a, 0xb4, 0xa5, 0xc5, 0x70, 0x3b, 0xc2, 0xac, 0x94, 0x5e, 0xc1, 0xee, 0x05, 0xe1, 0xae, 0xe7, - 0x6f, 0x57, 0xbc, 0x50, 0x6c, 0x09, 0x75, 0x16, 0xde, 0x55, 0x10, 0xac, 0x61, 0xa1, 0x55, 0x18, - 0x6e, 0x07, 0x61, 0x1c, 0xcd, 0x8e, 0xb0, 0xe1, 0x7e, 0x22, 0x87, 0x11, 0xf1, 0xaf, 0xad, 0x05, - 0x61, 0x9c, 0x7c, 0x00, 0xfd, 0x17, 0x61, 0x5e, 0x1d, 0xdd, 0x84, 0x51, 0xe2, 0xef, 0xad, 0x86, - 0x41, 0x6b, 0xf6, 0x54, 0x3e, 0xa5, 0x15, 0x8e, 0xc2, 0x97, 0x59, 0x22, 0xa3, 0x8a, 0x62, 0x2c, - 0x49, 0xa0, 0x6f, 0x80, 0x22, 0xf1, 0xf7, 0x66, 0x47, 0x19, 0xa5, 0xb9, 0x1c, 0x4a, 0x77, 0x9c, - 0x30, 0xe1, 0xf9, 0x2b, 0xfe, 0x1e, 0xa6, 0x75, 0xd0, 0xa7, 0xa0, 0x2c, 0x19, 0x46, 0x24, 0x94, - 0x75, 0x99, 0x0b, 0x56, 0xb2, 0x19, 0x4c, 0xde, 0xee, 0x78, 0x21, 0x69, 0x11, 0x3f, 0x8e, 0x12, - 0x0e, 0x29, 0xa1, 0x11, 0x4e, 0xa8, 0xa1, 0x4f, 0x49, 0x0d, 0xf1, 0x5a, 0xd0, 0xf1, 0xe3, 0x68, - 0xb6, 0xcc, 0xba, 0x97, 0xf9, 0x76, 0x77, 0x27, 0xc1, 0x4b, 0xab, 0x90, 0x79, 0x65, 0x6c, 0x90, - 0x42, 0x9f, 0x81, 0x09, 0xfe, 0x9f, 0xbf, 0x80, 0x45, 0xb3, 0x67, 0x18, 0xed, 0x4b, 0xf9, 0xb4, - 0x39, 0xe2, 0xd2, 0x19, 0x41, 0x7c, 0x42, 0x2f, 0x8d, 0xb0, 0x49, 0x0d, 0x61, 0x98, 0x68, 0x7a, - 0x7b, 0xc4, 0x27, 0x51, 0x54, 0x0b, 0x83, 0x4d, 0x32, 0x0b, 0x6c, 0x60, 0xce, 0x65, 0xbf, 0x98, - 0x05, 0x9b, 0x64, 0x69, 0x86, 0xd2, 0xbc, 0xa9, 0xd7, 0xc1, 0x26, 0x09, 0x74, 0x1b, 0x26, 0xe9, - 0x8d, 0xcd, 0x4b, 0x88, 0x8e, 0xf5, 0x23, 0xca, 0xee, 0x55, 0xd8, 0xa8, 0x84, 0x53, 0x44, 0xd0, - 0x2d, 0x18, 0x8f, 0x62, 0x27, 0x8c, 0x3b, 0x6d, 0x4e, 0xf4, 0x6c, 0x3f, 0xa2, 0xec, 0xc1, 0xb5, - 0xae, 0x55, 0xc1, 0x06, 0x01, 0xf4, 0x06, 0x94, 0x9b, 0xde, 0x16, 0x69, 0xec, 0x37, 0x9a, 0x64, - 0x76, 0x9c, 0x51, 0xcb, 0x64, 0x2a, 0x37, 0x25, 0x12, 0x97, 0x73, 0xd5, 0x5f, 0x9c, 0x54, 0x47, - 0x77, 0xe0, 0x6c, 0x4c, 0xc2, 0x96, 0xe7, 0x3b, 0x94, 0x19, 0x88, 0xab, 0x15, 0x7b, 0xc8, 0x9c, - 0x60, 0xbb, 0xed, 0xa2, 0x98, 0x8d, 0xb3, 0x1b, 0x99, 0x58, 0x38, 0xa7, 0x36, 0xba, 0x0f, 0xb3, - 0x19, 0x90, 0xa0, 0xe9, 0x35, 0xf6, 0x67, 0x4f, 0x33, 0xca, 0x1f, 0x17, 0x94, 0x67, 0x37, 0x72, - 0xf0, 0x0e, 0x7b, 0xc0, 0x70, 0x2e, 0x75, 0x74, 0x0b, 0xa6, 0x18, 0x07, 0xaa, 0x75, 0x9a, 0x4d, - 0xd1, 0xe0, 0x24, 0x6b, 0xf0, 0x43, 0xf2, 0x3c, 0xae, 0x9a, 0xe0, 0xc3, 0x83, 0x79, 0x48, 0xfe, - 0xe1, 0x74, 0x6d, 0xb4, 0xc9, 0xde, 0xcc, 0x3a, 0xa1, 0x17, 0xef, 0x53, 0xbe, 0x41, 0xee, 0xc7, - 0xb3, 0x53, 0x3d, 0xf5, 0x15, 0x3a, 0xaa, 0x7a, 0x58, 0xd3, 0x0b, 0x71, 0x9a, 0x20, 0x65, 0xa9, - 0x51, 0xec, 0x7a, 0xfe, 0xec, 0x34, 0xbf, 0x97, 0x48, 0x8e, 0x54, 0xa7, 0x85, 0x98, 0xc3, 0xd8, - 0x7b, 0x19, 0xfd, 0x71, 0x8b, 0x9e, 0x5c, 0x33, 0x0c, 0x31, 0x79, 0x2f, 0x93, 0x00, 0x9c, 0xe0, - 0x50, 0x61, 0x32, 0x8e, 0xf7, 0x67, 0x11, 0x43, 0x55, 0x8c, 0x65, 0x63, 0xe3, 0x53, 0x98, 0x96, - 0xdb, 0x9b, 0x30, 0xa9, 0x18, 0x21, 0x1b, 0x13, 0x34, 0x0f, 0xc3, 0x4c, 0x7c, 0x12, 0xda, 0xb5, - 0x32, 0xed, 0x02, 0x13, 0xad, 0x30, 0x2f, 0x67, 0x5d, 0xf0, 0xde, 0x21, 0x4b, 0xfb, 0x31, 0xe1, - 0x77, 0xfa, 0xa2, 0xd6, 0x05, 0x09, 0xc0, 0x09, 0x8e, 0xfd, 0x7f, 0xb9, 0x18, 0x9a, 0x70, 0xdb, - 0x01, 0xce, 0x97, 0x67, 0xa1, 0xb4, 0x13, 0x44, 0x31, 0xc5, 0x66, 0x6d, 0x0c, 0x27, 0x82, 0xe7, - 0x75, 0x51, 0x8e, 0x15, 0x06, 0x7a, 0x0d, 0x26, 0x1a, 0x7a, 0x03, 0xe2, 0x70, 0x54, 0x6c, 0xc4, - 0x68, 0x1d, 0x9b, 0xb8, 0xe8, 0x55, 0x28, 0x31, 0x1b, 0x90, 0x46, 0xd0, 0x14, 0x52, 0x9b, 0x3c, - 0xe1, 0x4b, 0x35, 0x51, 0x7e, 0xa8, 0xfd, 0xc6, 0x0a, 0x1b, 0x5d, 0x86, 0x11, 0xda, 0x85, 0x6a, - 0x4d, 0x1c, 0x4b, 0x4a, 0x51, 0x74, 0x9d, 0x95, 0x62, 0x01, 0xb5, 0xff, 0x7a, 0x41, 0x1b, 0x65, - 0x7a, 0x1f, 0x26, 0xa8, 0x06, 0xa3, 0xf7, 0x1c, 0x2f, 0xf6, 0xfc, 0x6d, 0x21, 0x7f, 0x3c, 0xdd, - 0xf3, 0x8c, 0x62, 0x95, 0xee, 0xf2, 0x0a, 0xfc, 0x14, 0x15, 0x7f, 0xb0, 0x24, 0x43, 0x29, 0x86, - 0x1d, 0xdf, 0xa7, 0x14, 0x0b, 0x83, 0x52, 0xc4, 0xbc, 0x02, 0xa7, 0x28, 0xfe, 0x60, 0x49, 0x06, - 0xbd, 0x05, 0x20, 0x77, 0x18, 0x71, 0x85, 0xed, 0xc5, 0xb3, 0xfd, 0x89, 0x6e, 0xa8, 0x3a, 0x4b, - 0x93, 0xf4, 0x8c, 0x4e, 0xfe, 0x63, 0x8d, 0x9e, 0x1d, 0x33, 0x39, 0xad, 0xbb, 0x33, 0xe8, 0x9b, - 0xe9, 0x12, 0x77, 0xc2, 0x98, 0xb8, 0x8b, 0xb1, 0x18, 0x9c, 0x0f, 0x0f, 0x76, 0x49, 0xd9, 0xf0, - 0x5a, 0x44, 0xdf, 0x0e, 0x82, 0x08, 0x4e, 0xe8, 0xd9, 0x3f, 0x5f, 0x84, 0xd9, 0xbc, 0xee, 0xd2, - 0x45, 0x47, 0xee, 0x7b, 0xf1, 0x32, 0x15, 0xaf, 0x2c, 0x73, 0xd1, 0xad, 0x88, 0x72, 0xac, 0x30, - 0xe8, 0xec, 0x47, 0xde, 0xb6, 0xbc, 0x63, 0x0e, 0x27, 0xb3, 0x5f, 0x67, 0xa5, 0x58, 0x40, 0x29, - 0x5e, 0x48, 0x9c, 0x48, 0x18, 0xf7, 0x68, 0xab, 0x04, 0xb3, 0x52, 0x2c, 0xa0, 0xba, 0xb6, 0x6b, - 0xa8, 0x8f, 0xb6, 0xcb, 0x18, 0xa2, 0xe1, 0xe3, 0x1d, 0x22, 0xf4, 0x59, 0x80, 0x2d, 0xcf, 0xf7, - 0xa2, 0x1d, 0x46, 0x7d, 0xe4, 0xc8, 0xd4, 0x95, 0x70, 0xb6, 0xaa, 0xa8, 0x60, 0x8d, 0x22, 0x7a, - 0x19, 0xc6, 0xd4, 0x06, 0xac, 0x56, 0xd8, 0x4b, 0xa7, 0x66, 0x39, 0x92, 0x70, 0xa3, 0x0a, 0xd6, - 0xf1, 0xec, 0xcf, 0xa7, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xa1, 0xf7, - 0xf8, 0xda, 0x5f, 0x2d, 0xc2, 0x94, 0xd1, 0x58, 0x27, 0x1a, 0x80, 0x67, 0x5d, 0xa3, 0x0c, 0xdc, - 0x89, 0x89, 0xd8, 0x7f, 0x76, 0xff, 0xad, 0xa2, 0x33, 0x79, 0xba, 0x03, 0x78, 0x7d, 0xf4, 0x59, - 0x28, 0x37, 0x9d, 0x88, 0x69, 0xce, 0x88, 0xd8, 0x77, 0x83, 0x10, 0x4b, 0x2e, 0x26, 0x4e, 0x14, - 0x6b, 0xa7, 0x26, 0xa7, 0x9d, 0x90, 0xa4, 0x27, 0x0d, 0x95, 0x4f, 0xa4, 0xf5, 0x98, 0xea, 0x04, - 0x15, 0x62, 0xf6, 0x31, 0x87, 0xa1, 0x57, 0x61, 0x3c, 0x24, 0x6c, 0x55, 0x2c, 0x53, 0x69, 0x8e, - 0x2d, 0xb3, 0xe1, 0x44, 0xec, 0xc3, 0x1a, 0x0c, 0x1b, 0x98, 0xc9, 0xdd, 0x60, 0xa4, 0xc7, 0xdd, - 0xe0, 0x69, 0x18, 0x65, 0x3f, 0xd4, 0x0a, 0x50, 0xb3, 0x51, 0xe5, 0xc5, 0x58, 0xc2, 0xd3, 0x0b, - 0xa6, 0x34, 0xd8, 0x82, 0xa1, 0xb7, 0x0f, 0xb1, 0xa8, 0xd9, 0x2b, 0x73, 0x89, 0x73, 0x39, 0xb1, - 0xe4, 0xb1, 0x84, 0xd9, 0x1f, 0x86, 0xc9, 0x8a, 0x43, 0x5a, 0x81, 0xbf, 0xe2, 0xbb, 0xed, 0xc0, - 0xf3, 0x63, 0x34, 0x0b, 0x43, 0xec, 0x10, 0xe1, 0x2c, 0x60, 0x88, 0x36, 0x84, 0x87, 0xe8, 0x85, - 0xc0, 0xde, 0x86, 0x33, 0x95, 0xe0, 0x9e, 0x7f, 0xcf, 0x09, 0xdd, 0xc5, 0x5a, 0x55, 0xbb, 0x5f, - 0xaf, 0xcb, 0xfb, 0x1d, 0x37, 0xda, 0xca, 0x64, 0xbd, 0x5a, 0x4d, 0x2e, 0xd6, 0xae, 0x7a, 0x4d, - 0x92, 0xa3, 0x05, 0xf9, 0x9b, 0x05, 0xa3, 0xa5, 0x04, 0x5f, 0xbd, 0x6a, 0x59, 0xb9, 0xaf, 0x5a, - 0x6f, 0x42, 0x69, 0xcb, 0x23, 0x4d, 0x17, 0x93, 0x2d, 0xb1, 0x12, 0x9f, 0xca, 0xb7, 0x43, 0x59, - 0xa5, 0x98, 0x52, 0xeb, 0xc5, 0x6f, 0x87, 0xab, 0xa2, 0x32, 0x56, 0x64, 0xd0, 0x2e, 0x4c, 0xcb, - 0x0b, 0x83, 0x84, 0x8a, 0x75, 0xf9, 0x74, 0xaf, 0x5b, 0x88, 0x49, 0xfc, 0xf4, 0x83, 0x83, 0xf9, - 0x69, 0x9c, 0x22, 0x83, 0xbb, 0x08, 0xd3, 0xeb, 0x60, 0x8b, 0x72, 0xe0, 0x21, 0x36, 0xfc, 0xec, - 0x3a, 0xc8, 0x6e, 0xb6, 0xac, 0xd4, 0xfe, 0x61, 0x0b, 0x1e, 0xeb, 0x1a, 0x19, 0x71, 0xc3, 0x3f, - 0xe6, 0x59, 0x48, 0xdf, 0xb8, 0x0b, 0xfd, 0x6f, 0xdc, 0xf6, 0x3f, 0xb0, 0xe0, 0xf4, 0x4a, 0xab, - 0x1d, 0xef, 0x57, 0x3c, 0xf3, 0x09, 0xea, 0x15, 0x18, 0x69, 0x11, 0xd7, 0xeb, 0xb4, 0xc4, 0xcc, - 0xcd, 0x4b, 0x2e, 0xb5, 0xc6, 0x4a, 0x0f, 0x0f, 0xe6, 0x27, 0xea, 0x71, 0x10, 0x3a, 0xdb, 0x84, - 0x17, 0x60, 0x81, 0xce, 0x78, 0xbd, 0xf7, 0x0e, 0xb9, 0xe9, 0xb5, 0x3c, 0x69, 0x57, 0xd4, 0x53, - 0x67, 0xb7, 0x20, 0x07, 0x74, 0xe1, 0xcd, 0x8e, 0xe3, 0xc7, 0x5e, 0xbc, 0x2f, 0x5e, 0x8f, 0x24, - 0x11, 0x9c, 0xd0, 0xb3, 0xbf, 0x62, 0xc1, 0x94, 0x5c, 0xf7, 0x8b, 0xae, 0x1b, 0x92, 0x28, 0x42, - 0x73, 0x50, 0xf0, 0xda, 0xa2, 0x97, 0x20, 0x7a, 0x59, 0xa8, 0xd6, 0x70, 0xc1, 0x6b, 0x4b, 0xb1, - 0x8c, 0x31, 0xc2, 0xa2, 0xf9, 0x90, 0x76, 0x5d, 0x94, 0x63, 0x85, 0x81, 0xae, 0x40, 0xc9, 0x0f, - 0x5c, 0x6e, 0xdb, 0xc5, 0x8f, 0x34, 0xb6, 0xc0, 0xd6, 0x45, 0x19, 0x56, 0x50, 0x54, 0x83, 0x32, - 0x37, 0x7b, 0x4a, 0x16, 0xed, 0x40, 0xc6, 0x53, 0xec, 0xcb, 0x36, 0x64, 0x4d, 0x9c, 0x10, 0xb1, - 0x7f, 0xd9, 0x82, 0x71, 0xf9, 0x65, 0x03, 0xca, 0x9c, 0x74, 0x6b, 0x25, 0xf2, 0x66, 0xb2, 0xb5, - 0xa8, 0xcc, 0xc8, 0x20, 0x86, 0xa8, 0x58, 0x3c, 0x92, 0xa8, 0xf8, 0x3c, 0x8c, 0x39, 0xed, 0x76, - 0xcd, 0x94, 0x33, 0xd9, 0x52, 0x5a, 0x4c, 0x8a, 0xb1, 0x8e, 0x63, 0xff, 0x50, 0x01, 0x26, 0xe5, - 0x17, 0xd4, 0x3b, 0x9b, 0x11, 0x89, 0xd1, 0x06, 0x94, 0x1d, 0x3e, 0x4b, 0x44, 0x2e, 0xf2, 0x27, - 0xb3, 0xf5, 0x08, 0xc6, 0x94, 0x26, 0x07, 0xfe, 0xa2, 0xac, 0x8d, 0x13, 0x42, 0xa8, 0x09, 0x33, - 0x7e, 0x10, 0x33, 0xe6, 0xaf, 0xe0, 0xbd, 0x9e, 0x76, 0xd2, 0xd4, 0xcf, 0x09, 0xea, 0x33, 0xeb, - 0x69, 0x2a, 0xb8, 0x9b, 0x30, 0x5a, 0x91, 0xba, 0x99, 0x62, 0xbe, 0x32, 0x40, 0x9f, 0xb8, 0x6c, - 0xd5, 0x8c, 0xfd, 0x4b, 0x16, 0x94, 0x25, 0xda, 0x49, 0xbc, 0xe2, 0xad, 0xc1, 0x68, 0xc4, 0x26, - 0x41, 0x0e, 0x8d, 0xdd, 0xab, 0xe3, 0x7c, 0xbe, 0x92, 0x33, 0x8d, 0xff, 0x8f, 0xb0, 0xa4, 0xc1, - 0x54, 0xf3, 0xaa, 0xfb, 0xef, 0x11, 0xd5, 0xbc, 0xea, 0x4f, 0xce, 0xa1, 0xf4, 0x07, 0xac, 0xcf, - 0x9a, 0xae, 0x8b, 0x8a, 0x5e, 0xed, 0x90, 0x6c, 0x79, 0xf7, 0xd3, 0xa2, 0x57, 0x8d, 0x95, 0x62, - 0x01, 0x45, 0x6f, 0xc1, 0x78, 0x43, 0xea, 0x64, 0x93, 0x1d, 0x7e, 0xb9, 0xe7, 0xfb, 0x80, 0x7a, - 0x4a, 0xe2, 0xba, 0x90, 0x65, 0xad, 0x3e, 0x36, 0xa8, 0x99, 0x66, 0x04, 0xc5, 0x7e, 0x66, 0x04, - 0x09, 0xdd, 0xfc, 0x47, 0xf5, 0x1f, 0xb1, 0x60, 0x84, 0xeb, 0xe2, 0x06, 0x53, 0x85, 0x6a, 0x2f, - 0x6b, 0xc9, 0xd8, 0xdd, 0xa1, 0x85, 0xe2, 0xa5, 0x0c, 0xad, 0x41, 0x99, 0xfd, 0x60, 0xba, 0xc4, - 0x62, 0xbe, 0xd5, 0x3d, 0x6f, 0x55, 0xef, 0xe0, 0x1d, 0x59, 0x0d, 0x27, 0x14, 0xec, 0x1f, 0x28, - 0x52, 0xee, 0x96, 0xa0, 0x1a, 0x87, 0xbe, 0xf5, 0xe8, 0x0e, 0xfd, 0xc2, 0xa3, 0x3a, 0xf4, 0xb7, - 0x61, 0xaa, 0xa1, 0xbd, 0xc3, 0x25, 0x33, 0x79, 0xa5, 0xe7, 0x22, 0xd1, 0x9e, 0xec, 0xb8, 0x96, - 0x65, 0xd9, 0x24, 0x82, 0xd3, 0x54, 0xd1, 0x37, 0xc3, 0x38, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0xc4, - 0xf8, 0x50, 0xfe, 0x7a, 0xd1, 0x9b, 0xe0, 0x5a, 0x39, 0xad, 0x3a, 0x36, 0x88, 0xd9, 0x7f, 0x6c, - 0x01, 0x5a, 0x69, 0xef, 0x90, 0x16, 0x09, 0x9d, 0x66, 0xa2, 0x4e, 0xff, 0x2b, 0x16, 0xcc, 0x92, - 0xae, 0xe2, 0xe5, 0xa0, 0xd5, 0x12, 0x97, 0x96, 0x9c, 0x7b, 0xf5, 0x4a, 0x4e, 0x1d, 0xe5, 0x96, - 0x30, 0x9b, 0x87, 0x81, 0x73, 0xdb, 0x43, 0x6b, 0x70, 0x8a, 0x9f, 0x92, 0x0a, 0xa0, 0xd9, 0x5e, - 0x3f, 0x2e, 0x08, 0x9f, 0xda, 0xe8, 0x46, 0xc1, 0x59, 0xf5, 0xec, 0xef, 0x18, 0x87, 0xdc, 0x5e, - 0xbc, 0xff, 0x8e, 0xf0, 0xfe, 0x3b, 0xc2, 0xfb, 0xef, 0x08, 0xef, 0xbf, 0x23, 0xbc, 0xff, 0x8e, - 0xf0, 0x75, 0xff, 0x8e, 0xf0, 0x87, 0x16, 0x9c, 0xea, 0x3e, 0x06, 0x4e, 0x42, 0x30, 0xef, 0xc0, - 0xa9, 0xee, 0xb3, 0xae, 0xa7, 0x9d, 0x5d, 0x77, 0x3f, 0x93, 0x73, 0x2f, 0xe3, 0x1b, 0x70, 0x16, - 0x7d, 0xfb, 0xe7, 0x4b, 0x30, 0xbc, 0xb2, 0x47, 0xfc, 0xf8, 0x04, 0x3e, 0xb1, 0x01, 0x93, 0x9e, - 0xbf, 0x17, 0x34, 0xf7, 0x88, 0xcb, 0xe1, 0x47, 0xb9, 0x22, 0x9f, 0x15, 0xa4, 0x27, 0xab, 0x06, - 0x09, 0x9c, 0x22, 0xf9, 0x28, 0xd4, 0xd4, 0xd7, 0x60, 0x84, 0x9f, 0x0e, 0x42, 0x47, 0x9d, 0x79, - 0x18, 0xb0, 0x41, 0x14, 0x67, 0x5e, 0xa2, 0x42, 0xe7, 0xa7, 0x8f, 0xa8, 0x8e, 0x3e, 0x0f, 0x93, - 0x5b, 0x5e, 0x18, 0xc5, 0x1b, 0x5e, 0x8b, 0x44, 0xb1, 0xd3, 0x6a, 0x3f, 0x84, 0x5a, 0x5a, 0x8d, - 0xc3, 0xaa, 0x41, 0x09, 0xa7, 0x28, 0xa3, 0x6d, 0x98, 0x68, 0x3a, 0x7a, 0x53, 0xa3, 0x47, 0x6e, - 0x4a, 0x1d, 0x3b, 0x37, 0x75, 0x42, 0xd8, 0xa4, 0x4b, 0xf7, 0x69, 0x83, 0x69, 0x56, 0x4b, 0x4c, - 0xdf, 0xa0, 0xf6, 0x29, 0x57, 0xa9, 0x72, 0x18, 0x95, 0xa0, 0x98, 0xe5, 0x6d, 0xd9, 0x94, 0xa0, - 0x34, 0xfb, 0xda, 0xcf, 0x41, 0x99, 0xd0, 0x21, 0xa4, 0x84, 0xc5, 0xc9, 0x75, 0x75, 0xb0, 0xbe, - 0xae, 0x79, 0x8d, 0x30, 0x30, 0x1f, 0x04, 0x56, 0x24, 0x25, 0x9c, 0x10, 0x45, 0xcb, 0x30, 0x12, - 0x91, 0xd0, 0x23, 0x91, 0x38, 0xc3, 0x7a, 0x4c, 0x23, 0x43, 0xe3, 0x4e, 0x2b, 0xfc, 0x37, 0x16, - 0x55, 0xe9, 0xf2, 0x72, 0x98, 0xae, 0x94, 0x9d, 0x32, 0xda, 0xf2, 0x5a, 0x64, 0xa5, 0x58, 0x40, - 0xd1, 0x1b, 0x30, 0x1a, 0x92, 0x26, 0x7b, 0x71, 0x9a, 0x18, 0x7c, 0x91, 0xf3, 0x07, 0x2c, 0x5e, - 0x0f, 0x4b, 0x02, 0xe8, 0x06, 0xa0, 0x90, 0x50, 0x09, 0xcc, 0xf3, 0xb7, 0x95, 0x3d, 0xaa, 0xe0, - 0xe0, 0x6a, 0xc7, 0xe3, 0x04, 0x43, 0xfa, 0x0f, 0xe1, 0x8c, 0x6a, 0xe8, 0x1a, 0xcc, 0xa8, 0xd2, - 0xaa, 0x1f, 0xc5, 0x0e, 0xe5, 0x9c, 0x53, 0x8c, 0x96, 0x52, 0x80, 0xe0, 0x34, 0x02, 0xee, 0xae, - 0x63, 0xff, 0xa4, 0x05, 0x7c, 0x9c, 0x4f, 0xe0, 0xda, 0xff, 0xba, 0x79, 0xed, 0x3f, 0x97, 0x3b, - 0x73, 0x39, 0x57, 0xfe, 0x07, 0x16, 0x8c, 0x69, 0x33, 0x9b, 0xac, 0x59, 0xab, 0xc7, 0x9a, 0xed, - 0xc0, 0x34, 0x5d, 0xe9, 0xb7, 0x36, 0x23, 0x12, 0xee, 0x11, 0x97, 0x2d, 0xcc, 0xc2, 0xc3, 0x2d, - 0x4c, 0x65, 0xfb, 0x76, 0x33, 0x45, 0x10, 0x77, 0x35, 0x81, 0x5e, 0x91, 0xcf, 0x2f, 0x45, 0xc3, - 0xce, 0x9c, 0x3f, 0xad, 0x1c, 0x1e, 0xcc, 0x4f, 0x6b, 0x1f, 0xa2, 0x3f, 0xb7, 0xd8, 0x9f, 0x93, - 0xdf, 0xa8, 0x6c, 0x0c, 0x1b, 0x6a, 0xb1, 0xa4, 0x6c, 0x0c, 0xd5, 0x72, 0xc0, 0x09, 0x0e, 0xdd, - 0xa3, 0x3b, 0x41, 0x14, 0xa7, 0x6d, 0x0c, 0xaf, 0x07, 0x51, 0x8c, 0x19, 0xc4, 0x7e, 0x11, 0x60, - 0xe5, 0x3e, 0x69, 0xf0, 0xa5, 0xae, 0x5f, 0x67, 0xac, 0xfc, 0xeb, 0x8c, 0xfd, 0x5b, 0x16, 0x4c, - 0xae, 0x2e, 0x1b, 0x4a, 0xe4, 0x05, 0x00, 0x7e, 0x07, 0xbb, 0x7b, 0x77, 0x5d, 0x3e, 0xd0, 0xf3, - 0x37, 0x56, 0x55, 0x8a, 0x35, 0x0c, 0x74, 0x0e, 0x8a, 0xcd, 0x8e, 0x2f, 0x14, 0x9a, 0xa3, 0xf4, - 0xc0, 0xbe, 0xd9, 0xf1, 0x31, 0x2d, 0xd3, 0x9c, 0x1c, 0x8a, 0x03, 0x3b, 0x39, 0xf4, 0x0d, 0x36, - 0x80, 0xe6, 0x61, 0xf8, 0xde, 0x3d, 0xcf, 0xe5, 0x2e, 0x9d, 0xc2, 0x78, 0xe0, 0xee, 0xdd, 0x6a, - 0x25, 0xc2, 0xbc, 0xdc, 0xfe, 0x52, 0x11, 0xe6, 0x56, 0x9b, 0xe4, 0xfe, 0xbb, 0x74, 0x6b, 0x1d, - 0xd4, 0x45, 0xe3, 0x68, 0xaa, 0xa1, 0xa3, 0xba, 0xe1, 0xf4, 0x1f, 0x8f, 0x2d, 0x18, 0xe5, 0x26, - 0x76, 0xd2, 0xc9, 0xf5, 0xb5, 0xac, 0xd6, 0xf3, 0x07, 0x64, 0x81, 0x9b, 0xea, 0x09, 0x1f, 0x3d, - 0x75, 0xd2, 0x8a, 0x52, 0x2c, 0x89, 0xcf, 0x7d, 0x0c, 0xc6, 0x75, 0xcc, 0x23, 0x39, 0xc4, 0xfd, - 0xe5, 0x22, 0x4c, 0xd3, 0x1e, 0x3c, 0xd2, 0x89, 0xb8, 0xdd, 0x3d, 0x11, 0xc7, 0xed, 0x14, 0xd5, - 0x7f, 0x36, 0xde, 0x4a, 0xcf, 0xc6, 0xf3, 0x79, 0xb3, 0x71, 0xd2, 0x73, 0xf0, 0xed, 0x16, 0x9c, - 0x5a, 0x6d, 0x06, 0x8d, 0xdd, 0x94, 0xe3, 0xd2, 0xcb, 0x30, 0x46, 0xf9, 0x78, 0x64, 0xf8, 0xd4, - 0x1b, 0x51, 0x16, 0x04, 0x08, 0xeb, 0x78, 0x5a, 0xb5, 0xdb, 0xb7, 0xab, 0x95, 0xac, 0xe0, 0x0c, - 0x02, 0x84, 0x75, 0x3c, 0xfb, 0x37, 0x2c, 0xb8, 0x70, 0x6d, 0x79, 0x25, 0x59, 0x8a, 0x5d, 0xf1, - 0x21, 0x2e, 0xc3, 0x48, 0xdb, 0xd5, 0xba, 0x92, 0x28, 0x7c, 0x2b, 0xac, 0x17, 0x02, 0xfa, 0x5e, - 0x89, 0x7d, 0xf2, 0x13, 0x16, 0x9c, 0xba, 0xe6, 0xc5, 0xf4, 0x58, 0x4e, 0x47, 0x2a, 0xa0, 0xe7, - 0x72, 0xe4, 0xc5, 0x41, 0xb8, 0x9f, 0x8e, 0x54, 0x80, 0x15, 0x04, 0x6b, 0x58, 0xbc, 0xe5, 0x3d, - 0x8f, 0x19, 0x77, 0x17, 0xcc, 0xa7, 0x2f, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0x1f, 0xe6, 0x7a, 0x21, - 0xd3, 0x1a, 0xee, 0x0b, 0x0e, 0xab, 0x3e, 0xac, 0x22, 0x01, 0x38, 0xc1, 0xa1, 0x17, 0xa8, 0xf9, - 0x6b, 0xcd, 0x4e, 0x14, 0x93, 0x70, 0x2b, 0xca, 0xe1, 0x8e, 0x2f, 0x42, 0x99, 0x48, 0x1d, 0xbd, - 0xe8, 0xb5, 0x12, 0x35, 0x95, 0xf2, 0x9e, 0x07, 0x4c, 0x50, 0x78, 0x03, 0xb8, 0x41, 0x1e, 0xcd, - 0x8f, 0x6d, 0x15, 0x10, 0xd1, 0xdb, 0xd2, 0x23, 0x48, 0x30, 0x57, 0xf4, 0x95, 0x2e, 0x28, 0xce, - 0xa8, 0x61, 0xff, 0xb0, 0x05, 0x67, 0xd4, 0x07, 0xbf, 0xe7, 0x3e, 0xd3, 0xfe, 0x99, 0x02, 0x4c, - 0x5c, 0xdf, 0xd8, 0xa8, 0x5d, 0x23, 0xb1, 0x38, 0xb6, 0xfb, 0xbf, 0xbc, 0x63, 0xed, 0x01, 0xb1, - 0xd7, 0x2d, 0xb0, 0x13, 0x7b, 0xcd, 0x05, 0x1e, 0x88, 0x68, 0xa1, 0xea, 0xc7, 0xb7, 0xc2, 0x7a, - 0x1c, 0x7a, 0xfe, 0x76, 0xe6, 0x93, 0xa3, 0x14, 0x2e, 0x8a, 0x79, 0xc2, 0x05, 0x7a, 0x11, 0x46, - 0x58, 0x24, 0x24, 0x39, 0x09, 0x8f, 0xab, 0x4b, 0x14, 0x2b, 0x3d, 0x3c, 0x98, 0x2f, 0xdf, 0xc6, - 0x55, 0xfe, 0x07, 0x0b, 0x54, 0x74, 0x1b, 0xc6, 0x76, 0xe2, 0xb8, 0x7d, 0x9d, 0x38, 0x2e, 0xbd, - 0x2d, 0x73, 0x76, 0x78, 0x31, 0x8b, 0x1d, 0xd2, 0x41, 0xe0, 0x68, 0x09, 0x07, 0x49, 0xca, 0x22, - 0xac, 0xd3, 0xb1, 0xeb, 0x00, 0x09, 0xec, 0x98, 0xde, 0x4e, 0xec, 0xdf, 0xb7, 0x60, 0x94, 0x07, - 0xa5, 0x08, 0xd1, 0xc7, 0x61, 0x88, 0xdc, 0x27, 0x0d, 0x21, 0x2a, 0x67, 0x76, 0x38, 0x91, 0xb4, - 0xb8, 0x0e, 0x98, 0xfe, 0xc7, 0xac, 0x16, 0xba, 0x0e, 0xa3, 0xb4, 0xb7, 0xd7, 0x54, 0x84, 0x8e, - 0x27, 0xf2, 0xbe, 0x58, 0x4d, 0x3b, 0x17, 0xce, 0x44, 0x11, 0x96, 0xd5, 0xd9, 0x83, 0x75, 0xa3, - 0x5d, 0xa7, 0x1c, 0x3b, 0xee, 0x25, 0x58, 0x6c, 0x2c, 0xd7, 0x38, 0x92, 0xa0, 0xc6, 0x1f, 0xac, - 0x65, 0x21, 0x4e, 0x88, 0xd8, 0x1b, 0x50, 0xa6, 0x93, 0xba, 0xd8, 0xf4, 0x9c, 0xde, 0x6f, 0xf0, - 0xcf, 0x40, 0x59, 0xbe, 0xb0, 0x47, 0xc2, 0x19, 0x9d, 0x51, 0x95, 0x0f, 0xf0, 0x11, 0x4e, 0xe0, - 0xf6, 0x16, 0x9c, 0x66, 0xf6, 0x92, 0x4e, 0xbc, 0x63, 0xec, 0xb1, 0xfe, 0x8b, 0xf9, 0x59, 0x71, - 0xf3, 0xe4, 0x33, 0x33, 0xab, 0xf9, 0x7b, 0x8e, 0x4b, 0x8a, 0xc9, 0x2d, 0xd4, 0xfe, 0xea, 0x10, - 0x3c, 0x5e, 0xad, 0xe7, 0xc7, 0x2b, 0x79, 0x15, 0xc6, 0xb9, 0x5c, 0x4a, 0x97, 0xb6, 0xd3, 0x14, - 0xed, 0x2a, 0xe5, 0xef, 0x86, 0x06, 0xc3, 0x06, 0x26, 0xba, 0x00, 0x45, 0xef, 0x6d, 0x3f, 0xed, - 0x0d, 0x55, 0x7d, 0x73, 0x1d, 0xd3, 0x72, 0x0a, 0xa6, 0x22, 0x2e, 0x3f, 0x3b, 0x14, 0x58, 0x89, - 0xb9, 0xaf, 0xc3, 0xa4, 0x17, 0x35, 0x22, 0xaf, 0xea, 0x53, 0x3e, 0xa3, 0x71, 0x2a, 0xa5, 0x15, - 0xa1, 0x9d, 0x56, 0x50, 0x9c, 0xc2, 0xd6, 0x0e, 0xb2, 0xe1, 0x81, 0xc5, 0xe4, 0xbe, 0xde, 0xd9, - 0xf4, 0x06, 0xd0, 0x66, 0x5f, 0x17, 0x31, 0x2d, 0xbe, 0xb8, 0x01, 0xf0, 0x0f, 0x8e, 0xb0, 0x84, - 0xd1, 0x2b, 0x67, 0x63, 0xc7, 0x69, 0x2f, 0x76, 0xe2, 0x9d, 0x8a, 0x17, 0x35, 0x82, 0x3d, 0x12, - 0xee, 0x33, 0x6d, 0x41, 0x29, 0xb9, 0x72, 0x2a, 0xc0, 0xf2, 0xf5, 0xc5, 0x1a, 0xc5, 0xc4, 0xdd, - 0x75, 0xd0, 0x22, 0x4c, 0xc9, 0xc2, 0x3a, 0x89, 0xd8, 0x11, 0x36, 0xc6, 0xc8, 0x28, 0xff, 0x24, - 0x51, 0xac, 0x88, 0xa4, 0xf1, 0x4d, 0x49, 0x1a, 0x8e, 0x43, 0x92, 0x7e, 0x05, 0x26, 0x3c, 0xdf, - 0x8b, 0x3d, 0x27, 0x0e, 0xf8, 0x13, 0x14, 0x57, 0x0c, 0x30, 0xdd, 0x7a, 0x55, 0x07, 0x60, 0x13, - 0xcf, 0xfe, 0x6f, 0x43, 0x30, 0xc3, 0xa6, 0xed, 0xfd, 0x15, 0xf6, 0xf5, 0xb4, 0xc2, 0x6e, 0x77, - 0xaf, 0xb0, 0xe3, 0xb8, 0x22, 0x3c, 0xf4, 0x32, 0xfb, 0x3c, 0x94, 0x95, 0x4b, 0x96, 0xf4, 0xc9, - 0xb4, 0x72, 0x7c, 0x32, 0xfb, 0x4b, 0x1f, 0xd2, 0xaa, 0xad, 0x98, 0x69, 0xd5, 0xf6, 0xb7, 0x2d, - 0x48, 0xde, 0x54, 0xd0, 0x75, 0x28, 0xb7, 0x03, 0x66, 0xac, 0x19, 0x4a, 0x0b, 0xe8, 0xc7, 0x33, - 0x0f, 0x2a, 0x7e, 0x28, 0xf2, 0x8f, 0xaf, 0xc9, 0x1a, 0x38, 0xa9, 0x8c, 0x96, 0x60, 0xb4, 0x1d, - 0x92, 0x7a, 0xcc, 0xc2, 0x96, 0xf4, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0xb3, - 0x16, 0x00, 0x37, 0x1c, 0x73, 0xfc, 0x6d, 0x72, 0x02, 0xea, 0xee, 0x0a, 0x0c, 0x45, 0x6d, 0xd2, - 0xe8, 0x65, 0x46, 0x9b, 0xf4, 0xa7, 0xde, 0x26, 0x8d, 0x64, 0xc0, 0xe9, 0x3f, 0xcc, 0x6a, 0xdb, - 0xdf, 0x09, 0x30, 0x99, 0xa0, 0x55, 0x63, 0xd2, 0x42, 0xcf, 0x19, 0x61, 0x0c, 0xce, 0xa5, 0xc2, - 0x18, 0x94, 0x19, 0xb6, 0xa6, 0x59, 0xfd, 0x3c, 0x14, 0x5b, 0xce, 0x7d, 0xa1, 0x3a, 0x7b, 0xa6, - 0x77, 0x37, 0x28, 0xfd, 0x85, 0x35, 0xe7, 0x3e, 0xbf, 0x24, 0x3e, 0x23, 0x17, 0xc8, 0x9a, 0x73, - 0xff, 0x90, 0x1b, 0xcb, 0x32, 0x26, 0x75, 0xd3, 0x8b, 0xe2, 0x2f, 0xfc, 0xd7, 0xe4, 0x3f, 0x5b, - 0x76, 0xb4, 0x11, 0xd6, 0x96, 0xe7, 0x0b, 0x9b, 0xa8, 0x81, 0xda, 0xf2, 0xfc, 0x74, 0x5b, 0x9e, - 0x3f, 0x40, 0x5b, 0x9e, 0x8f, 0xde, 0x81, 0x51, 0x61, 0xb2, 0x28, 0xc2, 0x06, 0x5d, 0x1d, 0xa0, - 0x3d, 0x61, 0xf1, 0xc8, 0xdb, 0xbc, 0x2a, 0x2f, 0xc1, 0xa2, 0xb4, 0x6f, 0xbb, 0xb2, 0x41, 0xf4, - 0x37, 0x2c, 0x98, 0x14, 0xbf, 0x31, 0x79, 0xbb, 0x43, 0xa2, 0x58, 0xc8, 0x9e, 0x1f, 0x1d, 0xbc, - 0x0f, 0xa2, 0x22, 0xef, 0xca, 0x47, 0x25, 0x9b, 0x35, 0x81, 0x7d, 0x7b, 0x94, 0xea, 0x05, 0xfa, - 0x47, 0x16, 0x9c, 0x6e, 0x39, 0xf7, 0x79, 0x8b, 0xbc, 0x0c, 0x3b, 0xb1, 0x17, 0x88, 0xa7, 0xff, - 0x8f, 0x0f, 0x36, 0xfd, 0x5d, 0xd5, 0x79, 0x27, 0xe5, 0xfb, 0xe4, 0xe9, 0x2c, 0x94, 0xbe, 0x5d, - 0xcd, 0xec, 0xd7, 0xdc, 0x16, 0x94, 0xe4, 0x7a, 0xcb, 0x50, 0x35, 0x54, 0x74, 0xc1, 0xfa, 0xc8, - 0x16, 0xa3, 0x7a, 0x78, 0x00, 0xda, 0x8e, 0x58, 0x6b, 0x8f, 0xb4, 0x9d, 0xcf, 0xc3, 0xb8, 0xbe, - 0xc6, 0x1e, 0x69, 0x5b, 0x6f, 0xc3, 0xa9, 0x8c, 0xb5, 0xf4, 0x48, 0x9b, 0xbc, 0x07, 0xe7, 0x72, - 0xd7, 0xc7, 0xa3, 0x6c, 0xd8, 0xfe, 0x19, 0x4b, 0xe7, 0x83, 0x27, 0xf0, 0xe6, 0xb0, 0x6c, 0xbe, - 0x39, 0x5c, 0xec, 0xbd, 0x73, 0x72, 0x1e, 0x1e, 0xde, 0xd2, 0x3b, 0x4d, 0xb9, 0x3a, 0x7a, 0x03, - 0x46, 0x9a, 0xb4, 0x44, 0x1a, 0xbe, 0xda, 0xfd, 0x77, 0x64, 0x22, 0x4b, 0xb1, 0xf2, 0x08, 0x0b, - 0x0a, 0xf6, 0x2f, 0x58, 0x30, 0x74, 0x02, 0x23, 0x81, 0xcd, 0x91, 0x78, 0x2e, 0x97, 0xb4, 0x88, - 0x68, 0xbc, 0x80, 0x9d, 0x7b, 0x2b, 0xf7, 0x63, 0xe2, 0x47, 0xec, 0xaa, 0x98, 0x39, 0x30, 0xff, - 0x1f, 0x9c, 0xba, 0x19, 0x38, 0xee, 0x92, 0xd3, 0x74, 0xfc, 0x06, 0x09, 0xab, 0xfe, 0xf6, 0x91, - 0x8c, 0xb6, 0x0b, 0xfd, 0x8c, 0xb6, 0xed, 0x1d, 0x40, 0x7a, 0x03, 0xc2, 0xfb, 0x05, 0xc3, 0xa8, - 0xc7, 0x9b, 0x12, 0xc3, 0xff, 0x54, 0xb6, 0x68, 0xd6, 0xd5, 0x33, 0xcd, 0xaf, 0x83, 0x17, 0x60, - 0x49, 0xc8, 0x7e, 0x15, 0x32, 0x5d, 0xe8, 0xfb, 0xab, 0x0d, 0xec, 0x4f, 0xc1, 0x0c, 0xab, 0x79, - 0xc4, 0x2b, 0xad, 0x9d, 0xd2, 0x4a, 0x66, 0x04, 0xd7, 0xb3, 0xbf, 0x68, 0xc1, 0xd4, 0x7a, 0x2a, - 0xe6, 0xd8, 0x65, 0xf6, 0x00, 0x9a, 0xa1, 0x0c, 0xaf, 0xb3, 0x52, 0x2c, 0xa0, 0xc7, 0xae, 0x83, - 0xfa, 0x73, 0x0b, 0x92, 0xa8, 0x16, 0x27, 0x20, 0x78, 0x2d, 0x1b, 0x82, 0x57, 0xa6, 0x6e, 0x44, - 0x75, 0x27, 0x4f, 0xee, 0x42, 0x37, 0x54, 0xbc, 0xa7, 0x1e, 0x6a, 0x91, 0x84, 0x0c, 0x8f, 0x0e, - 0x34, 0x69, 0x06, 0x85, 0x92, 0x11, 0xa0, 0xec, 0xff, 0x5c, 0x00, 0xa4, 0x70, 0x07, 0x8e, 0x47, - 0xd5, 0x5d, 0xe3, 0x78, 0xe2, 0x51, 0xed, 0x01, 0x62, 0x4f, 0xf8, 0xa1, 0xe3, 0x47, 0x9c, 0xac, - 0x27, 0xb4, 0x6e, 0x47, 0xb3, 0x0f, 0x98, 0x13, 0x4d, 0xa2, 0x9b, 0x5d, 0xd4, 0x70, 0x46, 0x0b, - 0x9a, 0x69, 0xc6, 0xf0, 0xa0, 0xa6, 0x19, 0x23, 0x7d, 0x3c, 0xdc, 0x7e, 0xda, 0x82, 0x09, 0x35, - 0x4c, 0xef, 0x11, 0xfb, 0x73, 0xd5, 0x9f, 0x1c, 0xd6, 0x57, 0xd3, 0xba, 0xcc, 0x8e, 0x84, 0x6f, - 0x64, 0x9e, 0x8a, 0x4e, 0xd3, 0x7b, 0x87, 0xa8, 0x68, 0x80, 0xf3, 0xc2, 0xf3, 0x50, 0x94, 0x1e, - 0x1e, 0xcc, 0x4f, 0xa8, 0x7f, 0x3c, 0xfa, 0x70, 0x52, 0xc5, 0xfe, 0x31, 0xba, 0xd9, 0xcd, 0xa5, - 0x88, 0x5e, 0x86, 0xe1, 0xf6, 0x8e, 0x13, 0x91, 0x94, 0x9f, 0xce, 0x70, 0x8d, 0x16, 0x1e, 0x1e, - 0xcc, 0x4f, 0xaa, 0x0a, 0xac, 0x04, 0x73, 0xec, 0xc1, 0xa3, 0x7c, 0x75, 0x2f, 0xce, 0xbe, 0x51, - 0xbe, 0xfe, 0xd8, 0x82, 0xa1, 0xf5, 0xc0, 0x3d, 0x09, 0x16, 0xf0, 0xba, 0xc1, 0x02, 0xce, 0xe7, - 0x05, 0x86, 0xcf, 0xdd, 0xfd, 0xab, 0xa9, 0xdd, 0x7f, 0x31, 0x97, 0x42, 0xef, 0x8d, 0xdf, 0x82, - 0x31, 0x16, 0x6e, 0x5e, 0xf8, 0x24, 0xbd, 0x68, 0x6c, 0xf8, 0xf9, 0xd4, 0x86, 0x9f, 0xd2, 0x50, - 0xb5, 0x9d, 0xfe, 0x34, 0x8c, 0x0a, 0x27, 0x97, 0xb4, 0xc3, 0xa7, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x48, 0x11, 0x8c, 0xf0, 0xf6, 0xe8, 0x97, 0x2c, 0x58, 0x08, 0xb9, 0xf1, 0xab, 0x5b, 0xe9, 0x84, - 0x9e, 0xbf, 0x5d, 0x6f, 0xec, 0x10, 0xb7, 0xd3, 0xf4, 0xfc, 0xed, 0xea, 0xb6, 0x1f, 0xa8, 0xe2, - 0x95, 0xfb, 0xa4, 0xd1, 0x61, 0xcf, 0x57, 0x7d, 0x62, 0xe9, 0x2b, 0x23, 0xf2, 0x17, 0x1e, 0x1c, - 0xcc, 0x2f, 0xe0, 0x23, 0xd1, 0xc6, 0x47, 0xec, 0x0b, 0xfa, 0x0d, 0x0b, 0xae, 0xf2, 0xa8, 0xef, - 0x83, 0xf7, 0xbf, 0xc7, 0x3d, 0xb7, 0x26, 0x49, 0x25, 0x44, 0x36, 0x48, 0xd8, 0x5a, 0x7a, 0x45, - 0x0c, 0xe8, 0xd5, 0xda, 0xd1, 0xda, 0xc2, 0x47, 0xed, 0x9c, 0xfd, 0x2f, 0x8b, 0x30, 0x21, 0xa2, - 0x41, 0x89, 0x33, 0xe0, 0x65, 0x63, 0x49, 0x3c, 0x91, 0x5a, 0x12, 0x33, 0x06, 0xf2, 0xf1, 0xb0, - 0xff, 0x08, 0x66, 0x28, 0x73, 0xbe, 0x4e, 0x9c, 0x30, 0xde, 0x24, 0x0e, 0xb7, 0xb8, 0x2a, 0x1e, - 0x99, 0xfb, 0x2b, 0xc5, 0xda, 0xcd, 0x34, 0x31, 0xdc, 0x4d, 0xff, 0xeb, 0xe9, 0xcc, 0xf1, 0x61, - 0xba, 0x2b, 0xa0, 0xd7, 0xa7, 0xa1, 0xac, 0x3c, 0x34, 0x04, 0xd3, 0xe9, 0x1d, 0x17, 0x2f, 0x4d, - 0x81, 0x2b, 0xbf, 0x12, 0xef, 0xa0, 0x84, 0x9c, 0xfd, 0x8f, 0x0b, 0x46, 0x83, 0x7c, 0x12, 0xd7, - 0xa1, 0xe4, 0x44, 0x91, 0xb7, 0xed, 0x13, 0x57, 0xec, 0xd8, 0x0f, 0xe6, 0xed, 0x58, 0xa3, 0x19, - 0xe6, 0x25, 0xb3, 0x28, 0x6a, 0x62, 0x45, 0x03, 0x5d, 0xe7, 0x76, 0x6d, 0x7b, 0xf2, 0xa6, 0x36, - 0x18, 0x35, 0x90, 0x96, 0x6f, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x33, 0xdc, 0xf0, 0xf0, 0x86, 0x1f, - 0xdc, 0xf3, 0xaf, 0x05, 0x81, 0x8c, 0xb8, 0x30, 0x18, 0xc1, 0x19, 0x69, 0x6e, 0xa8, 0xaa, 0x63, - 0x93, 0xda, 0x60, 0x11, 0x32, 0xbf, 0x05, 0x4e, 0x51, 0xd2, 0xa6, 0x43, 0x74, 0x84, 0x08, 0x4c, - 0x89, 0x50, 0x63, 0xb2, 0x4c, 0x8c, 0x5d, 0xe6, 0x25, 0xcc, 0xac, 0x9d, 0x68, 0x80, 0x6f, 0x98, - 0x24, 0x70, 0x9a, 0xa6, 0xfd, 0xe3, 0x16, 0x30, 0xe7, 0xd0, 0x13, 0x90, 0x47, 0x3e, 0x61, 0xca, - 0x23, 0xb3, 0x79, 0x83, 0x9c, 0x23, 0x8a, 0xbc, 0xc4, 0x57, 0x56, 0x2d, 0x0c, 0xee, 0xef, 0x0b, - 0xa3, 0x8f, 0xfe, 0xf7, 0x0f, 0xfb, 0xff, 0x58, 0x9c, 0x89, 0x29, 0xff, 0x09, 0xf4, 0xad, 0x50, - 0x6a, 0x38, 0x6d, 0xa7, 0xc1, 0x73, 0xb1, 0xe4, 0xea, 0xe2, 0x8c, 0x4a, 0x0b, 0xcb, 0xa2, 0x06, - 0xd7, 0x2d, 0xc9, 0x90, 0x75, 0x25, 0x59, 0xdc, 0x57, 0x9f, 0xa4, 0x9a, 0x9c, 0xdb, 0x85, 0x09, - 0x83, 0xd8, 0x23, 0x55, 0x44, 0x7c, 0x2b, 0x3f, 0x62, 0x55, 0x88, 0xc5, 0x16, 0xcc, 0xf8, 0xda, - 0x7f, 0x7a, 0xa0, 0xc8, 0xcb, 0xe5, 0x07, 0xfb, 0x1d, 0xa2, 0xec, 0xf4, 0xd1, 0xfc, 0x4e, 0x53, - 0x64, 0x70, 0x37, 0x65, 0xfb, 0x47, 0x2d, 0x78, 0x4c, 0x47, 0xd4, 0x5c, 0x5b, 0xfa, 0x69, 0xf7, - 0x2b, 0x50, 0x0a, 0xda, 0x24, 0x74, 0xe2, 0x20, 0x14, 0xa7, 0xc6, 0x15, 0x39, 0xe8, 0xb7, 0x44, - 0xf9, 0xa1, 0x88, 0x64, 0x2e, 0xa9, 0xcb, 0x72, 0xac, 0x6a, 0xd2, 0xdb, 0x27, 0x1b, 0x8c, 0x48, - 0x38, 0x31, 0x31, 0x1e, 0xc0, 0x1e, 0xba, 0x23, 0x2c, 0x20, 0xf6, 0x57, 0x2d, 0xbe, 0xb0, 0xf4, - 0xae, 0xa3, 0xb7, 0x61, 0xba, 0xe5, 0xc4, 0x8d, 0x9d, 0x95, 0xfb, 0xed, 0x90, 0xbf, 0x95, 0xc8, - 0x71, 0x7a, 0xa6, 0xdf, 0x38, 0x69, 0x1f, 0x99, 0xd8, 0x52, 0xae, 0xa5, 0x88, 0xe1, 0x2e, 0xf2, - 0x68, 0x13, 0xc6, 0x58, 0x19, 0xf3, 0xcf, 0x8b, 0x7a, 0x89, 0x06, 0x79, 0xad, 0x29, 0x5b, 0x81, - 0xb5, 0x84, 0x0e, 0xd6, 0x89, 0xda, 0x3f, 0x55, 0xe4, 0xbb, 0x9d, 0x89, 0xf2, 0x4f, 0xc3, 0x68, - 0x3b, 0x70, 0x97, 0xab, 0x15, 0x2c, 0x66, 0x41, 0x1d, 0x23, 0x35, 0x5e, 0x8c, 0x25, 0x1c, 0x5d, - 0x81, 0x92, 0xf8, 0x29, 0xdf, 0xb6, 0x18, 0x6f, 0x16, 0x78, 0x11, 0x56, 0x50, 0xf4, 0x02, 0x40, - 0x3b, 0x0c, 0xf6, 0x3c, 0x97, 0xc5, 0x8d, 0x28, 0x9a, 0x66, 0x3e, 0x35, 0x05, 0xc1, 0x1a, 0x16, - 0x7a, 0x0d, 0x26, 0x3a, 0x7e, 0xc4, 0xc5, 0x11, 0x2d, 0x4a, 0xac, 0x32, 0x40, 0xb9, 0xad, 0x03, - 0xb1, 0x89, 0x8b, 0x16, 0x61, 0x24, 0x76, 0x98, 0xd9, 0xca, 0x70, 0xbe, 0xbd, 0xed, 0x06, 0xc5, - 0xd0, 0xd3, 0x7e, 0xd0, 0x0a, 0x58, 0x54, 0x44, 0x9f, 0x96, 0xae, 0xb2, 0x9c, 0xb1, 0x0b, 0x43, - 0xf7, 0xc1, 0x0e, 0x01, 0xcd, 0x51, 0x56, 0x18, 0xd0, 0x1b, 0xb4, 0xd0, 0xc7, 0x00, 0xc8, 0xfd, - 0x98, 0x84, 0xbe, 0xd3, 0x54, 0x56, 0x61, 0x4a, 0x2e, 0xa8, 0x04, 0xeb, 0x41, 0x7c, 0x3b, 0x22, - 0x2b, 0x0a, 0x03, 0x6b, 0xd8, 0xf6, 0x6f, 0x94, 0x01, 0x12, 0xb9, 0x1d, 0xbd, 0xd3, 0xc5, 0xb8, - 0x9e, 0xed, 0x2d, 0xe9, 0x1f, 0x1f, 0xd7, 0x42, 0xdf, 0x65, 0xc1, 0x98, 0xd3, 0x6c, 0x06, 0x0d, - 0x87, 0xc7, 0xf1, 0x2d, 0xf4, 0x66, 0x9c, 0xa2, 0xfd, 0xc5, 0xa4, 0x06, 0xef, 0xc2, 0x8b, 0x72, - 0x85, 0x6a, 0x90, 0xbe, 0xbd, 0xd0, 0x1b, 0x46, 0x1f, 0x91, 0x57, 0xc5, 0xa2, 0x31, 0x94, 0xea, - 0xaa, 0x58, 0x66, 0x67, 0x84, 0x7e, 0x4b, 0xbc, 0x6d, 0xdc, 0x12, 0x87, 0xf2, 0x7d, 0x01, 0x0d, - 0xf1, 0xb5, 0xdf, 0x05, 0x11, 0xd5, 0xf4, 0xb8, 0x00, 0xc3, 0xf9, 0x8e, 0x77, 0xda, 0x3d, 0xa9, - 0x4f, 0x4c, 0x80, 0xcf, 0xc3, 0x94, 0x6b, 0x0a, 0x01, 0x62, 0x25, 0x3e, 0x95, 0x47, 0x37, 0x25, - 0x33, 0x24, 0xc7, 0x7e, 0x0a, 0x80, 0xd3, 0x84, 0x51, 0x8d, 0x87, 0x89, 0xa8, 0xfa, 0x5b, 0x81, - 0x70, 0xb6, 0xb0, 0x73, 0xe7, 0x72, 0x3f, 0x8a, 0x49, 0x8b, 0x62, 0x26, 0xa7, 0xfb, 0xba, 0xa8, - 0x8b, 0x15, 0x15, 0xf4, 0x06, 0x8c, 0x30, 0xcf, 0xab, 0x68, 0xb6, 0x94, 0xaf, 0x2b, 0x36, 0xe3, - 0x9e, 0x25, 0x1b, 0x92, 0xfd, 0x8d, 0xb0, 0xa0, 0x80, 0xae, 0x4b, 0xbf, 0xc6, 0xa8, 0xea, 0xdf, - 0x8e, 0x08, 0xf3, 0x6b, 0x2c, 0x2f, 0x7d, 0x30, 0x71, 0x59, 0xe4, 0xe5, 0x99, 0xc9, 0xc1, 0x8c, - 0x9a, 0x54, 0x8a, 0x12, 0xff, 0x65, 0xce, 0xb1, 0x59, 0xc8, 0xef, 0x9e, 0x99, 0x97, 0x2c, 0x19, - 0xce, 0x3b, 0x26, 0x09, 0x9c, 0xa6, 0x49, 0x25, 0x52, 0xbe, 0xeb, 0x85, 0xbb, 0x46, 0x3f, 0xde, - 0xc1, 0x2f, 0xe2, 0xec, 0x34, 0xe2, 0x25, 0x58, 0xd4, 0x3f, 0x51, 0xf1, 0x60, 0xce, 0x87, 0xe9, - 0xf4, 0x16, 0x7d, 0xa4, 0xe2, 0xc8, 0xef, 0x0f, 0xc1, 0xa4, 0xb9, 0xa4, 0xd0, 0x55, 0x28, 0x0b, - 0x22, 0x2a, 0x4f, 0x80, 0xda, 0x25, 0x6b, 0x12, 0x80, 0x13, 0x1c, 0x96, 0x1e, 0x82, 0x55, 0xd7, - 0xcc, 0x6c, 0x93, 0xf4, 0x10, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0xb1, 0xda, 0x0c, 0x82, 0x58, 0x1d, - 0x48, 0x6a, 0xdd, 0x2d, 0xb1, 0x52, 0x2c, 0xa0, 0xf4, 0x20, 0xda, 0x25, 0xa1, 0x4f, 0x9a, 0x66, - 0x44, 0x61, 0x75, 0x10, 0xdd, 0xd0, 0x81, 0xd8, 0xc4, 0xa5, 0xc7, 0x69, 0x10, 0xb1, 0x85, 0x2c, - 0xae, 0x6f, 0x89, 0xd9, 0x72, 0x9d, 0xbb, 0x56, 0x4b, 0x38, 0xfa, 0x14, 0x3c, 0xa6, 0xa2, 0x26, - 0x61, 0xfe, 0x0e, 0x21, 0x5b, 0x1c, 0x31, 0xb4, 0x2d, 0x8f, 0x2d, 0x67, 0xa3, 0xe1, 0xbc, 0xfa, - 0xe8, 0x75, 0x98, 0x14, 0x22, 0xbe, 0xa4, 0x38, 0x6a, 0x9a, 0xc6, 0xdc, 0x30, 0xa0, 0x38, 0x85, - 0x2d, 0x63, 0x22, 0x33, 0x29, 0x5b, 0x52, 0x28, 0x75, 0xc7, 0x44, 0xd6, 0xe1, 0xb8, 0xab, 0x06, - 0x5a, 0x84, 0x29, 0x2e, 0x83, 0x79, 0xfe, 0x36, 0x9f, 0x13, 0xe1, 0x4d, 0xa5, 0xb6, 0xd4, 0x2d, - 0x13, 0x8c, 0xd3, 0xf8, 0xe8, 0x55, 0x18, 0x77, 0xc2, 0xc6, 0x8e, 0x17, 0x93, 0x46, 0xdc, 0x09, - 0xb9, 0x9b, 0x95, 0x66, 0x5b, 0xb4, 0xa8, 0xc1, 0xb0, 0x81, 0x69, 0xbf, 0x03, 0xa7, 0x32, 0x62, - 0x2e, 0xd0, 0x85, 0xe3, 0xb4, 0x3d, 0xf9, 0x4d, 0x29, 0x03, 0xe4, 0xc5, 0x5a, 0x55, 0x7e, 0x8d, - 0x86, 0x45, 0x57, 0x27, 0x8b, 0xcd, 0xa0, 0xa5, 0x18, 0x54, 0xab, 0x73, 0x55, 0x02, 0x70, 0x82, - 0x63, 0xff, 0xcf, 0x02, 0x4c, 0x65, 0xbc, 0xad, 0xb0, 0x34, 0x77, 0xa9, 0x4b, 0x4a, 0x92, 0xd5, - 0xce, 0x0c, 0xb1, 0x5d, 0x38, 0x42, 0x88, 0xed, 0x62, 0xbf, 0x10, 0xdb, 0x43, 0xef, 0x26, 0xc4, - 0xb6, 0x39, 0x62, 0xc3, 0x03, 0x8d, 0x58, 0x46, 0x58, 0xee, 0x91, 0x23, 0x86, 0xe5, 0x36, 0x06, - 0x7d, 0x74, 0x80, 0x41, 0xff, 0x81, 0x02, 0x4c, 0xa7, 0x6d, 0x20, 0x4f, 0x40, 0x6f, 0xfb, 0x86, - 0xa1, 0xb7, 0xcd, 0x4e, 0x1a, 0x99, 0xb6, 0xcc, 0xcc, 0xd3, 0xe1, 0xe2, 0x94, 0x0e, 0xf7, 0xc3, - 0x03, 0x51, 0xeb, 0xad, 0xcf, 0xfd, 0xbb, 0x05, 0x38, 0x93, 0xae, 0xb2, 0xdc, 0x74, 0xbc, 0xd6, - 0x09, 0x8c, 0xcd, 0x2d, 0x63, 0x6c, 0x9e, 0x1b, 0xe4, 0x6b, 0x58, 0xd7, 0x72, 0x07, 0xe8, 0x6e, - 0x6a, 0x80, 0xae, 0x0e, 0x4e, 0xb2, 0xf7, 0x28, 0x7d, 0xa5, 0x08, 0x17, 0x33, 0xeb, 0x25, 0x6a, - 0xcf, 0x55, 0x43, 0xed, 0xf9, 0x42, 0x4a, 0xed, 0x69, 0xf7, 0xae, 0x7d, 0x3c, 0x7a, 0x50, 0xe1, - 0x21, 0xcb, 0x02, 0x08, 0x3c, 0xa4, 0x0e, 0xd4, 0xf0, 0x90, 0x55, 0x84, 0xb0, 0x49, 0xf7, 0xeb, - 0x49, 0xf7, 0xf9, 0x6f, 0x2d, 0x38, 0x97, 0x39, 0x37, 0x27, 0xa0, 0xeb, 0x5a, 0x37, 0x75, 0x5d, - 0x4f, 0x0f, 0xbc, 0x5a, 0x73, 0x94, 0x5f, 0xbf, 0x36, 0x94, 0xf3, 0x2d, 0xec, 0x26, 0x7f, 0x0b, - 0xc6, 0x9c, 0x46, 0x83, 0x44, 0xd1, 0x5a, 0xe0, 0xaa, 0x28, 0xc2, 0xcf, 0xb1, 0x7b, 0x56, 0x52, - 0x7c, 0x78, 0x30, 0x3f, 0x97, 0x26, 0x91, 0x80, 0xb1, 0x4e, 0x01, 0x7d, 0x06, 0x4a, 0x91, 0x38, - 0x37, 0xc5, 0xdc, 0xbf, 0x38, 0xe0, 0xe0, 0x38, 0x9b, 0xa4, 0x69, 0x86, 0x39, 0x52, 0x9a, 0x0a, - 0x45, 0xd2, 0x0c, 0x89, 0x52, 0x38, 0xd6, 0x90, 0x28, 0x2f, 0x00, 0xec, 0xa9, 0xcb, 0x40, 0x5a, - 0xff, 0xa0, 0x5d, 0x13, 0x34, 0x2c, 0xf4, 0x4d, 0x30, 0x1d, 0xf1, 0x38, 0x80, 0xcb, 0x4d, 0x27, - 0x62, 0x6e, 0x2e, 0x62, 0x15, 0xb2, 0x50, 0x4a, 0xf5, 0x14, 0x0c, 0x77, 0x61, 0xa3, 0x55, 0xd9, - 0x2a, 0x0b, 0x5a, 0xc8, 0x17, 0xe6, 0xe5, 0xa4, 0x45, 0x91, 0x64, 0xf7, 0x74, 0x7a, 0xf8, 0xd9, - 0xc0, 0x6b, 0x35, 0xd1, 0x67, 0x00, 0xe8, 0xf2, 0x11, 0x7a, 0x88, 0xd1, 0x7c, 0xe6, 0x49, 0xb9, - 0x8a, 0x9b, 0x69, 0x95, 0xcb, 0x7c, 0x53, 0x2b, 0x8a, 0x08, 0xd6, 0x08, 0xda, 0x3f, 0x30, 0x04, - 0x8f, 0xf7, 0xe0, 0x91, 0x68, 0xd1, 0x7c, 0x87, 0x7d, 0x26, 0x7d, 0xb9, 0x9e, 0xcb, 0xac, 0x6c, - 0xdc, 0xb6, 0x53, 0x4b, 0xb1, 0xf0, 0xae, 0x97, 0xe2, 0xf7, 0x5a, 0x9a, 0xda, 0x83, 0xdb, 0x6a, - 0x7e, 0xe2, 0x88, 0xbc, 0xff, 0x18, 0xf5, 0x20, 0x5b, 0x19, 0xca, 0x84, 0x17, 0x06, 0xee, 0xce, - 0xc0, 0xda, 0x85, 0x93, 0xd5, 0x12, 0x7f, 0xc1, 0x82, 0x27, 0x32, 0xfb, 0x6b, 0x58, 0xe4, 0x5c, - 0x85, 0x72, 0x83, 0x16, 0x6a, 0xae, 0x88, 0x89, 0x8f, 0xb6, 0x04, 0xe0, 0x04, 0xc7, 0x30, 0xbc, - 0x29, 0xf4, 0x35, 0xbc, 0xf9, 0x65, 0x0b, 0xba, 0xf6, 0xc7, 0x09, 0x30, 0xea, 0xaa, 0xc9, 0xa8, - 0x3f, 0x38, 0xc8, 0x5c, 0xe6, 0xf0, 0xe8, 0x3f, 0x9a, 0x82, 0xb3, 0x39, 0xae, 0x38, 0x7b, 0x30, - 0xb3, 0xdd, 0x20, 0xa6, 0x93, 0xa7, 0xf8, 0x98, 0x4c, 0x7f, 0xd8, 0x9e, 0x1e, 0xa1, 0x2c, 0x63, - 0xe6, 0x4c, 0x17, 0x0a, 0xee, 0x6e, 0x02, 0x7d, 0xc1, 0x82, 0xd3, 0xce, 0xbd, 0xa8, 0x2b, 0xc5, - 0xbe, 0x58, 0x33, 0x2f, 0x65, 0x2a, 0x41, 0xfa, 0xa4, 0xe4, 0xe7, 0x29, 0x44, 0xb3, 0xb0, 0x70, - 0x66, 0x5b, 0x08, 0x8b, 0xb8, 0xf2, 0x54, 0x9c, 0xef, 0xe1, 0x86, 0x9c, 0xe5, 0x33, 0xc5, 0x4f, - 0x10, 0x09, 0xc1, 0x8a, 0x0e, 0xfa, 0x1c, 0x94, 0xb7, 0xa5, 0x23, 0x63, 0xc6, 0x09, 0x95, 0x0c, - 0x64, 0x6f, 0xf7, 0x4e, 0xfe, 0x92, 0xa9, 0x90, 0x70, 0x42, 0x14, 0xbd, 0x0e, 0x45, 0x7f, 0x2b, - 0xea, 0x95, 0x85, 0x33, 0x65, 0xb2, 0xc6, 0x9d, 0xfd, 0xd7, 0x57, 0xeb, 0x98, 0x56, 0x44, 0xd7, - 0xa1, 0x18, 0x6e, 0xba, 0x42, 0x83, 0x97, 0xc9, 0xc3, 0xf1, 0x52, 0x25, 0xa7, 0x57, 0x8c, 0x12, - 0x5e, 0xaa, 0x60, 0x4a, 0x02, 0xd5, 0x60, 0x98, 0xf9, 0xaf, 0x88, 0xf3, 0x20, 0x53, 0xf2, 0xed, - 0xe1, 0x07, 0xc6, 0x23, 0x02, 0x30, 0x04, 0xcc, 0x09, 0xa1, 0x0d, 0x18, 0x69, 0xb0, 0x8c, 0x8d, - 0x22, 0x1e, 0xd9, 0x47, 0x32, 0x75, 0x75, 0x3d, 0x52, 0x59, 0x0a, 0xd5, 0x15, 0xc3, 0xc0, 0x82, - 0x16, 0xa3, 0x4a, 0xda, 0x3b, 0x5b, 0x91, 0xc8, 0x30, 0x9c, 0x4d, 0xb5, 0x47, 0x86, 0x56, 0x41, - 0x95, 0x61, 0x60, 0x41, 0x0b, 0x7d, 0x0c, 0x0a, 0x5b, 0x0d, 0xe1, 0x9b, 0x92, 0xa9, 0xb4, 0x33, - 0xe3, 0x35, 0x2c, 0x8d, 0x3c, 0x38, 0x98, 0x2f, 0xac, 0x2e, 0xe3, 0xc2, 0x56, 0x03, 0xad, 0xc3, - 0xe8, 0x16, 0xf7, 0xf0, 0x16, 0x7a, 0xb9, 0xa7, 0xb2, 0x9d, 0xcf, 0xbb, 0x9c, 0xc0, 0xb9, 0x5b, - 0x86, 0x00, 0x60, 0x49, 0x84, 0x85, 0x69, 0x57, 0x9e, 0xea, 0x22, 0x74, 0xd7, 0xc2, 0xd1, 0xa2, - 0x0b, 0xf0, 0xf3, 0x39, 0xf1, 0x77, 0xc7, 0x1a, 0x45, 0xba, 0xaa, 0x1d, 0x99, 0xe6, 0x5d, 0x84, - 0x62, 0xc9, 0x5c, 0xd5, 0x7d, 0x32, 0xe0, 0xf3, 0x55, 0xad, 0x90, 0x70, 0x42, 0x14, 0xed, 0xc2, - 0xc4, 0x5e, 0xd4, 0xde, 0x21, 0x72, 0x4b, 0xb3, 0xc8, 0x2c, 0x39, 0x47, 0xd8, 0x1d, 0x81, 0xe8, - 0x85, 0x71, 0xc7, 0x69, 0x76, 0x71, 0x21, 0xf6, 0xfc, 0x7d, 0x47, 0x27, 0x86, 0x4d, 0xda, 0x74, - 0xf8, 0xdf, 0xee, 0x04, 0x9b, 0xfb, 0x31, 0x11, 0x11, 0xb7, 0x32, 0x87, 0xff, 0x4d, 0x8e, 0xd2, - 0x3d, 0xfc, 0x02, 0x80, 0x25, 0x11, 0x74, 0x47, 0x0c, 0x0f, 0xe3, 0x9e, 0xd3, 0xf9, 0x61, 0x31, - 0x17, 0x25, 0x52, 0xce, 0xa0, 0x30, 0x6e, 0x99, 0x90, 0x62, 0x5c, 0xb2, 0xbd, 0x13, 0xc4, 0x81, - 0x9f, 0xe2, 0xd0, 0x33, 0xf9, 0x5c, 0xb2, 0x96, 0x81, 0xdf, 0xcd, 0x25, 0xb3, 0xb0, 0x70, 0x66, - 0x5b, 0xc8, 0x85, 0xc9, 0x76, 0x10, 0xc6, 0xf7, 0x82, 0x50, 0xae, 0x2f, 0xd4, 0x43, 0xaf, 0x60, - 0x60, 0x8a, 0x16, 0x59, 0x30, 0x3b, 0x13, 0x82, 0x53, 0x34, 0xd1, 0x27, 0x61, 0x34, 0x6a, 0x38, - 0x4d, 0x52, 0xbd, 0x35, 0x7b, 0x2a, 0xff, 0xf8, 0xa9, 0x73, 0x94, 0x9c, 0xd5, 0xc5, 0x63, 0xba, - 0x73, 0x14, 0x2c, 0xc9, 0xa1, 0x55, 0x18, 0x66, 0x69, 0xb8, 0x58, 0x78, 0xb8, 0x9c, 0xe8, 0x9e, - 0x5d, 0x06, 0xc4, 0x9c, 0x37, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0xc4, 0xeb, 0x20, 0x9a, - 0x3d, 0x93, 0xbf, 0x07, 0x84, 0x54, 0x7e, 0xab, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x9c, 0x10, 0xa5, - 0x9c, 0x99, 0x72, 0xd3, 0xb3, 0x3d, 0x2c, 0x5f, 0x72, 0x79, 0x29, 0xe3, 0xcc, 0x94, 0x93, 0x52, - 0x12, 0xf6, 0xef, 0x8e, 0x76, 0xcb, 0x2c, 0xec, 0x42, 0xf6, 0x1d, 0x56, 0xd7, 0x5b, 0xdd, 0x47, - 0x07, 0xd5, 0x0f, 0x1d, 0xa3, 0xb4, 0xfa, 0x05, 0x0b, 0xce, 0xb6, 0x33, 0x3f, 0x44, 0x08, 0x00, - 0x83, 0xa9, 0x99, 0xf8, 0xa7, 0xab, 0x50, 0x82, 0xd9, 0x70, 0x9c, 0xd3, 0x52, 0xfa, 0x46, 0x50, - 0x7c, 0xd7, 0x37, 0x82, 0x35, 0x28, 0x31, 0x21, 0xb3, 0x4f, 0x06, 0xe3, 0xf4, 0xc5, 0x88, 0x89, - 0x12, 0xcb, 0xa2, 0x22, 0x56, 0x24, 0xd0, 0xf7, 0x59, 0x70, 0x21, 0xdd, 0x75, 0x4c, 0x18, 0x58, - 0xc4, 0x1f, 0xe4, 0x77, 0xc1, 0x55, 0xf1, 0xfd, 0x17, 0x6a, 0xbd, 0x90, 0x0f, 0xfb, 0x21, 0xe0, - 0xde, 0x8d, 0xa1, 0x4a, 0xc6, 0x65, 0x74, 0xc4, 0x54, 0xc0, 0x0f, 0x70, 0x21, 0x7d, 0x09, 0xc6, - 0x5b, 0x41, 0xc7, 0x8f, 0x85, 0xa1, 0x8c, 0x78, 0xb4, 0x67, 0x8f, 0xd5, 0x6b, 0x5a, 0x39, 0x36, - 0xb0, 0x52, 0xd7, 0xd8, 0xd2, 0x43, 0x5f, 0x63, 0xdf, 0x82, 0x71, 0x5f, 0xb3, 0xec, 0x14, 0xf2, - 0xc0, 0xe5, 0xfc, 0xd8, 0xa1, 0xba, 0x1d, 0x28, 0xef, 0xa5, 0x5e, 0x82, 0x0d, 0x6a, 0x27, 0x7b, - 0x37, 0xfa, 0x49, 0x2b, 0x43, 0xa8, 0xe7, 0xb7, 0xe5, 0x8f, 0x9b, 0xb7, 0xe5, 0xcb, 0xe9, 0xdb, - 0x72, 0x97, 0xf2, 0xd5, 0xb8, 0x28, 0x0f, 0x9e, 0x1a, 0x65, 0xd0, 0x30, 0x81, 0x76, 0x13, 0x2e, - 0xf5, 0x3b, 0x96, 0x98, 0xc5, 0x94, 0xab, 0x9e, 0xda, 0x12, 0x8b, 0x29, 0xb7, 0x5a, 0xc1, 0x0c, - 0x32, 0x68, 0x1c, 0x19, 0xfb, 0x7f, 0x58, 0x50, 0xac, 0x05, 0xee, 0x09, 0x28, 0x93, 0x3f, 0x61, - 0x28, 0x93, 0x1f, 0xcf, 0x3e, 0x10, 0xdd, 0x5c, 0xd5, 0xf1, 0x4a, 0x4a, 0x75, 0x7c, 0x21, 0x8f, - 0x40, 0x6f, 0x45, 0xf1, 0x8f, 0x15, 0x61, 0xac, 0x16, 0xb8, 0xca, 0x5c, 0xf9, 0xd7, 0x1e, 0xc6, - 0x5c, 0x39, 0x37, 0xc0, 0xbf, 0x46, 0x99, 0x19, 0x5a, 0x49, 0x1f, 0xcb, 0xbf, 0x60, 0x56, 0xcb, - 0x77, 0x89, 0xb7, 0xbd, 0x13, 0x13, 0x37, 0xfd, 0x39, 0x27, 0x67, 0xb5, 0xfc, 0xdf, 0x2d, 0x98, - 0x4a, 0xb5, 0x8e, 0x9a, 0x30, 0xd1, 0xd4, 0x15, 0x93, 0x62, 0x9d, 0x3e, 0x94, 0x4e, 0x53, 0x58, - 0x7d, 0x6a, 0x45, 0xd8, 0x24, 0x8e, 0x16, 0x00, 0xd4, 0x4b, 0x9d, 0xd4, 0x80, 0x31, 0xa9, 0x5f, - 0x3d, 0xe5, 0x45, 0x58, 0xc3, 0x40, 0x2f, 0xc3, 0x58, 0x1c, 0xb4, 0x83, 0x66, 0xb0, 0xbd, 0x7f, - 0x83, 0xc8, 0xc8, 0x45, 0xca, 0x96, 0x6b, 0x23, 0x01, 0x61, 0x1d, 0xcf, 0xfe, 0x89, 0x22, 0xff, - 0x50, 0x3f, 0xf6, 0xde, 0x5f, 0x93, 0xef, 0xed, 0x35, 0xf9, 0x15, 0x0b, 0xa6, 0x69, 0xeb, 0xcc, - 0x5c, 0x44, 0x1e, 0xb6, 0x2a, 0x66, 0xb0, 0xd5, 0x23, 0x66, 0xf0, 0x65, 0xca, 0xbb, 0xdc, 0xa0, - 0x13, 0x0b, 0x0d, 0x9a, 0xc6, 0x9c, 0x68, 0x29, 0x16, 0x50, 0x81, 0x47, 0xc2, 0x50, 0xb8, 0xb8, - 0xe9, 0x78, 0x24, 0x0c, 0xb1, 0x80, 0xca, 0x90, 0xc2, 0x43, 0xd9, 0x21, 0x85, 0x79, 0x1c, 0x46, - 0x61, 0x58, 0x20, 0xc4, 0x1e, 0x2d, 0x0e, 0xa3, 0xb4, 0x38, 0x48, 0x70, 0xec, 0x9f, 0x29, 0xc2, - 0x78, 0x2d, 0x70, 0x93, 0xb7, 0xb2, 0x97, 0x8c, 0xb7, 0xb2, 0x4b, 0xa9, 0xb7, 0xb2, 0x69, 0x1d, - 0xf7, 0xfd, 0x97, 0xb1, 0xaf, 0xd5, 0xcb, 0xd8, 0xbf, 0xb0, 0xd8, 0xac, 0x55, 0xd6, 0xeb, 0xdc, - 0xfa, 0x08, 0x3d, 0x0f, 0x63, 0x8c, 0x21, 0x31, 0x9f, 0x4a, 0xf9, 0x80, 0xc4, 0x52, 0xe5, 0xac, - 0x27, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa0, 0x14, 0x11, 0x27, 0x6c, 0xec, 0x28, 0x1e, 0x27, 0x5e, - 0x7b, 0x78, 0x19, 0x56, 0x50, 0xf4, 0x66, 0x12, 0x02, 0xb0, 0x98, 0xef, 0xa3, 0xa5, 0xf7, 0x87, - 0x6f, 0x91, 0xfc, 0xb8, 0x7f, 0xf6, 0x5d, 0x40, 0xdd, 0xf8, 0x03, 0xc4, 0xbe, 0x9a, 0x37, 0x63, - 0x5f, 0x95, 0xbb, 0xe2, 0x5e, 0xfd, 0x99, 0x05, 0x93, 0xb5, 0xc0, 0xa5, 0x5b, 0xf7, 0xeb, 0x69, - 0x9f, 0xea, 0xf1, 0x4f, 0x47, 0x7a, 0xc4, 0x3f, 0x7d, 0x12, 0x86, 0x6b, 0x81, 0x5b, 0xad, 0xf5, - 0xf2, 0x6d, 0xb6, 0xff, 0x9e, 0x05, 0xa3, 0xb5, 0xc0, 0x3d, 0x01, 0xe5, 0xfc, 0xc7, 0x4d, 0xe5, - 0xfc, 0x63, 0x39, 0xeb, 0x26, 0x47, 0x1f, 0xff, 0x77, 0x86, 0x60, 0x82, 0xf6, 0x33, 0xd8, 0x96, - 0x53, 0x69, 0x0c, 0x9b, 0x35, 0xc0, 0xb0, 0x51, 0x59, 0x38, 0x68, 0x36, 0x83, 0x7b, 0xe9, 0x69, - 0x5d, 0x65, 0xa5, 0x58, 0x40, 0xd1, 0xb3, 0x50, 0x6a, 0x87, 0x64, 0xcf, 0x0b, 0x84, 0x90, 0xa9, - 0x3d, 0x75, 0xd4, 0x44, 0x39, 0x56, 0x18, 0xf4, 0x72, 0x16, 0x79, 0x7e, 0x83, 0xd4, 0x49, 0x23, - 0xf0, 0x5d, 0xae, 0xbf, 0x2e, 0x8a, 0xb4, 0x01, 0x5a, 0x39, 0x36, 0xb0, 0xd0, 0x5d, 0x28, 0xb3, - 0xff, 0x8c, 0xed, 0x1c, 0x3d, 0x01, 0xa5, 0x48, 0x48, 0x26, 0x08, 0xe0, 0x84, 0x16, 0x7a, 0x01, - 0x20, 0x96, 0x11, 0xb2, 0x23, 0x11, 0xe7, 0x48, 0x09, 0xe4, 0x2a, 0x76, 0x76, 0x84, 0x35, 0x2c, - 0xf4, 0x0c, 0x94, 0x63, 0xc7, 0x6b, 0xde, 0xf4, 0x7c, 0x12, 0x31, 0xbd, 0x74, 0x51, 0xe6, 0x05, - 0x13, 0x85, 0x38, 0x81, 0x53, 0x81, 0x88, 0x05, 0x01, 0xe0, 0xe9, 0x6b, 0x4b, 0x0c, 0x9b, 0x09, - 0x44, 0x37, 0x55, 0x29, 0xd6, 0x30, 0xd0, 0x0e, 0x9c, 0xf7, 0x7c, 0x16, 0x62, 0x9f, 0xd4, 0x77, - 0xbd, 0xf6, 0xc6, 0xcd, 0xfa, 0x1d, 0x12, 0x7a, 0x5b, 0xfb, 0x4b, 0x4e, 0x63, 0x97, 0xf8, 0x32, - 0xb5, 0xe0, 0x07, 0x45, 0x17, 0xcf, 0x57, 0x7b, 0xe0, 0xe2, 0x9e, 0x94, 0xec, 0x57, 0xe1, 0x4c, - 0x2d, 0x70, 0x6b, 0x41, 0x18, 0xaf, 0x06, 0xe1, 0x3d, 0x27, 0x74, 0xe5, 0x4a, 0x99, 0x97, 0x59, - 0x48, 0x28, 0x2b, 0x1c, 0xe6, 0x8c, 0xc2, 0xc8, 0x85, 0xf5, 0x22, 0x13, 0xbe, 0x8e, 0xe8, 0x8c, - 0xd2, 0x60, 0x62, 0x80, 0xca, 0x37, 0x71, 0xcd, 0x89, 0x09, 0xba, 0xc5, 0xf2, 0xe8, 0x26, 0x27, - 0xa2, 0xa8, 0xfe, 0xb4, 0x96, 0x47, 0x37, 0x01, 0x66, 0x1e, 0xa1, 0x66, 0x7d, 0xfb, 0xaf, 0x0d, - 0x33, 0xe6, 0x98, 0xca, 0x59, 0x80, 0x3e, 0x0b, 0x93, 0x11, 0xb9, 0xe9, 0xf9, 0x9d, 0xfb, 0x52, - 0x27, 0xd0, 0xc3, 0x9d, 0xa8, 0xbe, 0xa2, 0x63, 0x72, 0xcd, 0xa2, 0x59, 0x86, 0x53, 0xd4, 0x50, - 0x0b, 0x26, 0xef, 0x79, 0xbe, 0x1b, 0xdc, 0x8b, 0x24, 0xfd, 0x52, 0xbe, 0x82, 0xf1, 0x2e, 0xc7, - 0x4c, 0xf5, 0xd1, 0x68, 0xee, 0xae, 0x41, 0x0c, 0xa7, 0x88, 0xd3, 0x05, 0x18, 0x76, 0xfc, 0xc5, - 0xe8, 0x76, 0x44, 0x42, 0x91, 0x11, 0x99, 0x2d, 0x40, 0x2c, 0x0b, 0x71, 0x02, 0xa7, 0x0b, 0x90, - 0xfd, 0xb9, 0x16, 0x06, 0x1d, 0x1e, 0xc7, 0x5e, 0x2c, 0x40, 0xac, 0x4a, 0xb1, 0x86, 0x41, 0x37, - 0x28, 0xfb, 0xb7, 0x1e, 0xf8, 0x38, 0x08, 0x62, 0xb9, 0xa5, 0x59, 0x0e, 0x4e, 0xad, 0x1c, 0x1b, - 0x58, 0x68, 0x15, 0x50, 0xd4, 0x69, 0xb7, 0x9b, 0xcc, 0x4e, 0xc1, 0x69, 0x32, 0x52, 0xfc, 0x8d, - 0xb8, 0xc8, 0xa3, 0x74, 0xd6, 0xbb, 0xa0, 0x38, 0xa3, 0x06, 0xe5, 0xd5, 0x5b, 0xa2, 0xab, 0xc3, - 0xac, 0xab, 0xfc, 0x31, 0xa2, 0xce, 0xfb, 0x29, 0x61, 0x68, 0x05, 0x46, 0xa3, 0xfd, 0xa8, 0x11, - 0x8b, 0x70, 0x63, 0x39, 0x69, 0x69, 0xea, 0x0c, 0x45, 0xcb, 0x8a, 0xc6, 0xab, 0x60, 0x59, 0x17, - 0x35, 0xe0, 0x94, 0xa0, 0xb8, 0xbc, 0xe3, 0xf8, 0x2a, 0xc9, 0x07, 0x37, 0xd7, 0x7c, 0xfe, 0xc1, - 0xc1, 0xfc, 0x29, 0xd1, 0xb2, 0x0e, 0x3e, 0x3c, 0x98, 0x3f, 0x5b, 0x0b, 0xdc, 0x0c, 0x08, 0xce, - 0xa2, 0x66, 0x7f, 0x2b, 0x93, 0x37, 0x58, 0x92, 0xde, 0xb8, 0x13, 0x12, 0xd4, 0x82, 0x89, 0x36, - 0x5b, 0xc6, 0x22, 0xfa, 0xbb, 0x58, 0x8b, 0x2f, 0x0d, 0xa8, 0x38, 0xb8, 0x47, 0xd9, 0xb4, 0x52, - 0xec, 0xb1, 0x1b, 0x59, 0x4d, 0x27, 0x87, 0x4d, 0xea, 0xf6, 0x57, 0xce, 0xb2, 0x13, 0xab, 0xce, - 0xb5, 0x01, 0xa3, 0xc2, 0x7a, 0x5b, 0x5c, 0x7d, 0xe6, 0xf2, 0xd5, 0x52, 0xc9, 0xb0, 0x09, 0x0b, - 0x70, 0x2c, 0xeb, 0xa2, 0xcf, 0xc0, 0x24, 0xbd, 0x49, 0x68, 0xd9, 0x2f, 0x4e, 0xe7, 0x7b, 0xd9, - 0x27, 0x49, 0x2f, 0xb4, 0xcc, 0x10, 0x7a, 0x65, 0x9c, 0x22, 0x86, 0xde, 0x64, 0x76, 0x06, 0x66, - 0x62, 0x8d, 0x3e, 0xa4, 0x75, 0x93, 0x02, 0x49, 0x56, 0x23, 0x92, 0x97, 0xb4, 0xc3, 0x7e, 0xb4, - 0x49, 0x3b, 0xd0, 0x4d, 0x98, 0x10, 0x99, 0x6a, 0xc5, 0xca, 0x2a, 0x1a, 0xda, 0xb2, 0x09, 0xac, - 0x03, 0x0f, 0xd3, 0x05, 0xd8, 0xac, 0x8c, 0xb6, 0xe1, 0x82, 0x96, 0x39, 0xe6, 0x5a, 0xe8, 0xb0, - 0x27, 0x6f, 0x8f, 0xb1, 0x3b, 0xed, 0x2c, 0x7d, 0xe2, 0xc1, 0xc1, 0xfc, 0x85, 0x8d, 0x5e, 0x88, - 0xb8, 0x37, 0x1d, 0x74, 0x0b, 0xce, 0x70, 0x1f, 0xd1, 0x0a, 0x71, 0xdc, 0xa6, 0xe7, 0xab, 0xc3, - 0x9a, 0x6f, 0xc9, 0x73, 0x0f, 0x0e, 0xe6, 0xcf, 0x2c, 0x66, 0x21, 0xe0, 0xec, 0x7a, 0xe8, 0xe3, - 0x50, 0x76, 0xfd, 0x48, 0x8c, 0xc1, 0x88, 0x91, 0x9c, 0xa7, 0x5c, 0x59, 0xaf, 0xab, 0xef, 0x4f, - 0xfe, 0xe0, 0xa4, 0x02, 0xda, 0xe6, 0x1a, 0x55, 0xa5, 0xc0, 0x18, 0xed, 0x8a, 0x6e, 0x93, 0x56, - 0x85, 0x19, 0x5e, 0x62, 0xfc, 0x29, 0x41, 0x19, 0x4f, 0x1b, 0x0e, 0x64, 0x06, 0x61, 0xf4, 0x06, - 0x20, 0x2a, 0xe1, 0x7b, 0x0d, 0xb2, 0xd8, 0x60, 0xa9, 0x05, 0x98, 0x02, 0xba, 0x64, 0xfa, 0x2d, - 0xd5, 0xbb, 0x30, 0x70, 0x46, 0x2d, 0x74, 0x9d, 0x1e, 0x39, 0x7a, 0xa9, 0xe0, 0x2a, 0x2a, 0x95, - 0x5a, 0x85, 0xb4, 0x43, 0xd2, 0x70, 0x62, 0xe2, 0x9a, 0x14, 0x71, 0xaa, 0x1e, 0x72, 0xe1, 0xbc, - 0xd3, 0x89, 0x03, 0xa6, 0xac, 0x36, 0x51, 0x37, 0x82, 0x5d, 0xe2, 0xb3, 0x77, 0xa2, 0xd2, 0xd2, - 0x25, 0x2a, 0x0d, 0x2c, 0xf6, 0xc0, 0xc3, 0x3d, 0xa9, 0x50, 0x29, 0x4e, 0xe5, 0x4e, 0x05, 0x33, - 0x68, 0x4f, 0x46, 0xfe, 0xd4, 0x97, 0x61, 0x6c, 0x27, 0x88, 0xe2, 0x75, 0x12, 0xdf, 0x0b, 0xc2, - 0x5d, 0x11, 0x7a, 0x31, 0x09, 0xd7, 0x9b, 0x80, 0xb0, 0x8e, 0x47, 0xaf, 0x69, 0xcc, 0x8a, 0xa1, - 0x5a, 0x61, 0x0f, 0xc8, 0xa5, 0x84, 0xc7, 0x5c, 0xe7, 0xc5, 0x58, 0xc2, 0x25, 0x6a, 0xb5, 0xb6, - 0xcc, 0x1e, 0x83, 0x53, 0xa8, 0xd5, 0xda, 0x32, 0x96, 0x70, 0xba, 0x5c, 0xa3, 0x1d, 0x27, 0x24, - 0xb5, 0x30, 0x68, 0x90, 0x48, 0x0b, 0x12, 0xfd, 0x38, 0x0f, 0x2c, 0x49, 0x97, 0x6b, 0x3d, 0x0b, - 0x01, 0x67, 0xd7, 0x43, 0xa4, 0x3b, 0x6b, 0xd2, 0x64, 0xbe, 0x16, 0xbf, 0x5b, 0xde, 0x18, 0x30, - 0x71, 0x92, 0x0f, 0xd3, 0x2a, 0x5f, 0x13, 0x0f, 0x25, 0x19, 0xcd, 0x4e, 0xb1, 0xb5, 0x3d, 0x78, - 0x1c, 0x4a, 0xf5, 0x2e, 0x52, 0x4d, 0x51, 0xc2, 0x5d, 0xb4, 0x8d, 0xb8, 0x4c, 0xd3, 0x7d, 0x93, - 0xe9, 0x5e, 0x85, 0x72, 0xd4, 0xd9, 0x74, 0x83, 0x96, 0xe3, 0xf9, 0xec, 0x31, 0x58, 0xbb, 0x2f, - 0xd4, 0x25, 0x00, 0x27, 0x38, 0x68, 0x15, 0x4a, 0x8e, 0x7c, 0xf4, 0x40, 0xf9, 0xe1, 0x3c, 0xd4, - 0x53, 0x07, 0xf7, 0x70, 0x97, 0xcf, 0x1c, 0xaa, 0x2e, 0x7a, 0x0d, 0x26, 0x84, 0x8f, 0xa3, 0x48, - 0x15, 0x78, 0xca, 0x74, 0x44, 0xa9, 0xeb, 0x40, 0x6c, 0xe2, 0xa2, 0xdb, 0x30, 0x16, 0x07, 0x4d, - 0xe6, 0x4d, 0x41, 0xc5, 0xb0, 0xb3, 0xf9, 0x21, 0xc1, 0x36, 0x14, 0x9a, 0xae, 0x6f, 0x54, 0x55, - 0xb1, 0x4e, 0x07, 0x6d, 0xf0, 0xf5, 0xce, 0x82, 0x25, 0x93, 0x68, 0xf6, 0xb1, 0xfc, 0x33, 0x49, - 0xc5, 0x54, 0x36, 0xb7, 0x83, 0xa8, 0x89, 0x75, 0x32, 0xe8, 0x1a, 0xcc, 0xb4, 0x43, 0x2f, 0x60, - 0x6b, 0x42, 0xbd, 0x77, 0xcd, 0x9a, 0x29, 0x5e, 0x6a, 0x69, 0x04, 0xdc, 0x5d, 0x87, 0xb9, 0xa8, - 0x8a, 0xc2, 0xd9, 0x73, 0x3c, 0x9b, 0x30, 0xbf, 0x7e, 0xf1, 0x32, 0xac, 0xa0, 0x68, 0x8d, 0x71, - 0x62, 0xae, 0x39, 0x98, 0x9d, 0xcb, 0x8f, 0x20, 0xa2, 0x6b, 0x18, 0xb8, 0x70, 0xa9, 0xfe, 0xe2, - 0x84, 0x02, 0x72, 0xb5, 0xb4, 0x73, 0x54, 0xa2, 0x8f, 0x66, 0xcf, 0xf7, 0x30, 0x25, 0x4b, 0x89, - 0xff, 0x89, 0x40, 0x60, 0x14, 0x47, 0x38, 0x45, 0x13, 0x7d, 0x13, 0x4c, 0x8b, 0x88, 0x65, 0xc9, - 0x30, 0x5d, 0x48, 0x6c, 0x54, 0x71, 0x0a, 0x86, 0xbb, 0xb0, 0x79, 0x10, 0x79, 0x67, 0xb3, 0x49, - 0x04, 0xeb, 0xbb, 0xe9, 0xf9, 0xbb, 0xd1, 0xec, 0x45, 0xc6, 0x1f, 0x44, 0x10, 0xf9, 0x34, 0x14, - 0x67, 0xd4, 0x40, 0x1b, 0x30, 0xdd, 0x0e, 0x09, 0x69, 0x31, 0x41, 0x5c, 0x9c, 0x67, 0xf3, 0xdc, - 0x43, 0x9b, 0xf6, 0xa4, 0x96, 0x82, 0x1d, 0x66, 0x94, 0xe1, 0x2e, 0x0a, 0xe8, 0x1e, 0x94, 0x82, - 0x3d, 0x12, 0xee, 0x10, 0xc7, 0x9d, 0xbd, 0xd4, 0xc3, 0x66, 0x5a, 0x1c, 0x6e, 0xb7, 0x04, 0x6e, - 0xea, 0x8d, 0x5c, 0x16, 0xf7, 0x7f, 0x23, 0x97, 0x8d, 0xa1, 0xef, 0xb7, 0xe0, 0x9c, 0x54, 0xab, - 0xd7, 0xdb, 0x74, 0xd4, 0x97, 0x03, 0x3f, 0x8a, 0x43, 0xee, 0x53, 0xfc, 0x44, 0xbe, 0x9f, 0xed, - 0x46, 0x4e, 0x25, 0xa5, 0xbc, 0x3c, 0x97, 0x87, 0x11, 0xe1, 0xfc, 0x16, 0xe7, 0xbe, 0x11, 0x66, - 0xba, 0x4e, 0xee, 0xa3, 0xe4, 0xb5, 0x98, 0xdb, 0x85, 0x09, 0x63, 0x74, 0x1e, 0xe9, 0xf3, 0xe8, - 0xbf, 0x19, 0x85, 0xb2, 0x7a, 0x3a, 0x43, 0x57, 0xcd, 0x17, 0xd1, 0x73, 0xe9, 0x17, 0xd1, 0x12, - 0xbd, 0x32, 0xeb, 0x8f, 0xa0, 0x1b, 0x19, 0x11, 0x9c, 0xf2, 0xf6, 0xe2, 0xe0, 0xae, 0xb9, 0x9a, - 0x26, 0xb4, 0x38, 0xf0, 0xd3, 0xea, 0x50, 0x4f, 0xe5, 0xea, 0x35, 0x98, 0xf1, 0x03, 0x26, 0x2e, - 0x12, 0x57, 0xca, 0x02, 0xec, 0xc8, 0x2f, 0xeb, 0x21, 0x11, 0x52, 0x08, 0xb8, 0xbb, 0x0e, 0x6d, - 0x90, 0x9f, 0xd9, 0x69, 0x6d, 0x2e, 0x3f, 0xd2, 0xb1, 0x80, 0xa2, 0x27, 0x61, 0xb8, 0x1d, 0xb8, - 0xd5, 0x9a, 0x10, 0x15, 0xb5, 0x1c, 0xa7, 0x6e, 0xb5, 0x86, 0x39, 0x0c, 0x2d, 0xc2, 0x08, 0xfb, - 0x11, 0xcd, 0x8e, 0xe7, 0xfb, 0xbe, 0xb3, 0x1a, 0x5a, 0xd6, 0x10, 0x56, 0x01, 0x8b, 0x8a, 0x4c, - 0xab, 0x44, 0xe5, 0x6b, 0xa6, 0x55, 0x1a, 0x7d, 0x48, 0xad, 0x92, 0x24, 0x80, 0x13, 0x5a, 0xe8, - 0x3e, 0x9c, 0x31, 0xee, 0x34, 0x7c, 0x89, 0x90, 0x48, 0xf8, 0xdf, 0x3e, 0xd9, 0xf3, 0x32, 0x23, - 0x9e, 0x62, 0x2f, 0x88, 0x4e, 0x9f, 0xa9, 0x66, 0x51, 0xc2, 0xd9, 0x0d, 0xa0, 0x26, 0xcc, 0x34, - 0xba, 0x5a, 0x2d, 0x0d, 0xde, 0xaa, 0x9a, 0xd0, 0xee, 0x16, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf4, - 0x76, 0x10, 0x31, 0x36, 0x2b, 0xc4, 0x5b, 0xe9, 0xbc, 0x59, 0x7a, 0xf3, 0x56, 0x9d, 0x95, 0x1f, - 0x1e, 0xcc, 0x8f, 0xd5, 0x02, 0x57, 0xfe, 0xc5, 0xaa, 0x02, 0xfa, 0x6e, 0x0b, 0xe6, 0xba, 0x2f, - 0x4d, 0xaa, 0xd3, 0x13, 0x83, 0x77, 0xda, 0x16, 0x8d, 0xce, 0xad, 0xe4, 0x92, 0xc3, 0x3d, 0x9a, - 0xb2, 0x7f, 0x91, 0x3f, 0x9b, 0x8a, 0xc7, 0x15, 0x12, 0x75, 0x9a, 0x27, 0x91, 0x65, 0x71, 0xc5, - 0x78, 0xf7, 0x79, 0xe8, 0xa7, 0xf9, 0x5f, 0xb5, 0xd8, 0xd3, 0xfc, 0x06, 0x69, 0xb5, 0x9b, 0x4e, - 0x7c, 0x12, 0xbe, 0x7f, 0x6f, 0x42, 0x29, 0x16, 0xad, 0xf5, 0x4a, 0x0c, 0xa9, 0x75, 0x8a, 0x99, - 0x27, 0x28, 0x61, 0x53, 0x96, 0x62, 0x45, 0xc6, 0xfe, 0xa7, 0x7c, 0x06, 0x24, 0xe4, 0x04, 0xd4, - 0xeb, 0x15, 0x53, 0xbd, 0x3e, 0xdf, 0xe7, 0x0b, 0x72, 0xd4, 0xec, 0xff, 0xc4, 0xec, 0x37, 0x53, - 0xb2, 0xbc, 0xd7, 0x6d, 0x42, 0xec, 0x1f, 0xb4, 0xe0, 0x74, 0x96, 0x11, 0x25, 0xbd, 0x20, 0x70, - 0x15, 0x8f, 0xb2, 0x91, 0x51, 0x23, 0x78, 0x47, 0x94, 0x63, 0x85, 0x31, 0x70, 0xce, 0xa5, 0xa3, - 0xc5, 0x20, 0xbd, 0x05, 0x13, 0xb5, 0x90, 0x68, 0x07, 0xda, 0xeb, 0xdc, 0x99, 0x97, 0xf7, 0xe7, - 0xd9, 0x23, 0x3b, 0xf2, 0xda, 0x3f, 0x55, 0x80, 0xd3, 0xfc, 0x91, 0x7b, 0x71, 0x2f, 0xf0, 0xdc, - 0x5a, 0xe0, 0x8a, 0x7c, 0x59, 0x9f, 0x86, 0xf1, 0xb6, 0xa6, 0x97, 0xeb, 0x15, 0x4f, 0x4f, 0xd7, - 0xdf, 0x25, 0x9a, 0x04, 0xbd, 0x14, 0x1b, 0xb4, 0x90, 0x0b, 0xe3, 0x64, 0xcf, 0x6b, 0xa8, 0x97, - 0xd2, 0xc2, 0x91, 0x0f, 0x17, 0xd5, 0xca, 0x8a, 0x46, 0x07, 0x1b, 0x54, 0x1f, 0x41, 0x0a, 0x55, - 0xfb, 0x87, 0x2c, 0x78, 0x2c, 0x27, 0xfa, 0x1e, 0x6d, 0xee, 0x1e, 0x33, 0x27, 0x10, 0xd9, 0x18, - 0x55, 0x73, 0xdc, 0xc8, 0x00, 0x0b, 0x28, 0xfa, 0x24, 0x00, 0x37, 0x12, 0xa0, 0x37, 0xd4, 0x7e, - 0x61, 0xca, 0x8c, 0x08, 0x4b, 0x5a, 0xb0, 0x1c, 0x59, 0x1f, 0x6b, 0xb4, 0xec, 0x1f, 0x2f, 0xc2, - 0x30, 0xcf, 0x23, 0xbd, 0x0a, 0xa3, 0x3b, 0x3c, 0x8b, 0xc0, 0x20, 0x09, 0x0b, 0x12, 0xdd, 0x01, - 0x2f, 0xc0, 0xb2, 0x32, 0x5a, 0x83, 0x53, 0x3c, 0x0b, 0x43, 0xb3, 0x42, 0x9a, 0xce, 0xbe, 0x54, - 0x74, 0xf1, 0x0c, 0x86, 0x4a, 0xe1, 0x57, 0xed, 0x46, 0xc1, 0x59, 0xf5, 0xd0, 0xeb, 0x30, 0x49, - 0x2f, 0x1e, 0x41, 0x27, 0x96, 0x94, 0x78, 0xfe, 0x05, 0x75, 0xd3, 0xd9, 0x30, 0xa0, 0x38, 0x85, - 0x4d, 0xef, 0xbe, 0xed, 0x2e, 0x95, 0xde, 0x70, 0x72, 0xf7, 0x35, 0xd5, 0x78, 0x26, 0x2e, 0xb3, - 0x9e, 0xec, 0x30, 0x5b, 0xd1, 0x8d, 0x9d, 0x90, 0x44, 0x3b, 0x41, 0xd3, 0x65, 0x82, 0xd6, 0xb0, - 0x66, 0x3d, 0x99, 0x82, 0xe3, 0xae, 0x1a, 0x94, 0xca, 0x96, 0xe3, 0x35, 0x3b, 0x21, 0x49, 0xa8, - 0x8c, 0x98, 0x54, 0x56, 0x53, 0x70, 0xdc, 0x55, 0x83, 0xae, 0xa3, 0x33, 0xb5, 0x30, 0xa0, 0xcc, - 0x4b, 0x86, 0x14, 0x51, 0x26, 0xb1, 0xa3, 0xd2, 0xfb, 0xb1, 0x47, 0xf0, 0x2d, 0x61, 0x34, 0xc8, - 0x29, 0x18, 0xef, 0xe1, 0x75, 0xe1, 0xf7, 0x28, 0xa9, 0xa0, 0xe7, 0x61, 0x4c, 0xc4, 0xd6, 0x67, - 0x96, 0x9b, 0x7c, 0xea, 0xd8, 0xfb, 0x7d, 0x25, 0x29, 0xc6, 0x3a, 0x8e, 0xfd, 0x3d, 0x05, 0x38, - 0x95, 0x61, 0x7a, 0xcf, 0x59, 0xd5, 0xb6, 0x17, 0xc5, 0x2a, 0x4b, 0x9b, 0xc6, 0xaa, 0x78, 0x39, - 0x56, 0x18, 0x74, 0x3f, 0x70, 0x66, 0x98, 0x66, 0x80, 0xc2, 0xb4, 0x55, 0x40, 0x8f, 0x98, 0xef, - 0xec, 0x12, 0x0c, 0x75, 0x22, 0x22, 0xc3, 0xe6, 0x29, 0xfe, 0xcd, 0x9e, 0x75, 0x18, 0x84, 0x8a, - 0xc7, 0xdb, 0xea, 0x85, 0x44, 0x13, 0x8f, 0xf9, 0x1b, 0x09, 0x87, 0xd1, 0xce, 0xc5, 0xc4, 0x77, - 0xfc, 0x58, 0x08, 0xd1, 0x49, 0xfc, 0x27, 0x56, 0x8a, 0x05, 0xd4, 0xfe, 0x52, 0x11, 0xce, 0xe5, - 0x3a, 0xe3, 0xd0, 0xae, 0xb7, 0x02, 0xdf, 0x8b, 0x03, 0x65, 0x18, 0xc1, 0x63, 0x3e, 0x91, 0xf6, - 0xce, 0x9a, 0x28, 0xc7, 0x0a, 0x03, 0x5d, 0x86, 0x61, 0xa6, 0x74, 0xea, 0xca, 0x57, 0xb7, 0x54, - 0xe1, 0x41, 0x40, 0x38, 0x78, 0xe0, 0x5c, 0xa0, 0x4f, 0xc2, 0x50, 0x3b, 0x08, 0x9a, 0x69, 0xa6, - 0x45, 0xbb, 0x1b, 0x04, 0x4d, 0xcc, 0x80, 0xe8, 0x43, 0x62, 0xbc, 0x52, 0x96, 0x00, 0xd8, 0x71, - 0x83, 0x48, 0x1b, 0xb4, 0xa7, 0x61, 0x74, 0x97, 0xec, 0x87, 0x9e, 0xbf, 0x9d, 0xb6, 0x10, 0xb9, - 0xc1, 0x8b, 0xb1, 0x84, 0x9b, 0xa9, 0x87, 0x46, 0x8f, 0x3b, 0x89, 0x67, 0xa9, 0xef, 0x11, 0xf8, - 0xbd, 0x45, 0x98, 0xc2, 0x4b, 0x95, 0xf7, 0x27, 0xe2, 0x76, 0xf7, 0x44, 0x1c, 0x77, 0x12, 0xcf, - 0xfe, 0xb3, 0xf1, 0x73, 0x16, 0x4c, 0xb1, 0x08, 0xff, 0x22, 0x5a, 0x90, 0x17, 0xf8, 0x27, 0x20, - 0xe2, 0x3d, 0x09, 0xc3, 0x21, 0x6d, 0x34, 0x9d, 0xa8, 0x8e, 0xf5, 0x04, 0x73, 0x18, 0x3a, 0x0f, - 0x43, 0xac, 0x0b, 0x74, 0xf2, 0xc6, 0x79, 0x8e, 0x9f, 0x8a, 0x13, 0x3b, 0x98, 0x95, 0xb2, 0x10, - 0x18, 0x98, 0xb4, 0x9b, 0x1e, 0xef, 0x74, 0xf2, 0x24, 0xf8, 0xde, 0x08, 0x81, 0x91, 0xd9, 0xb5, - 0x77, 0x17, 0x02, 0x23, 0x9b, 0x64, 0xef, 0xeb, 0xd3, 0x1f, 0x16, 0xe0, 0x62, 0x66, 0xbd, 0x81, - 0x43, 0x60, 0xf4, 0xae, 0xfd, 0x28, 0x23, 0xc1, 0x17, 0x4f, 0xd0, 0xfe, 0x6e, 0x68, 0x50, 0x09, - 0x73, 0x78, 0x80, 0xc8, 0x14, 0x99, 0x43, 0xf6, 0x1e, 0x89, 0x4c, 0x91, 0xd9, 0xb7, 0x9c, 0xeb, - 0xdf, 0x9f, 0x17, 0x72, 0xbe, 0x85, 0x5d, 0x04, 0xaf, 0x50, 0x3e, 0xc3, 0x80, 0x91, 0x90, 0x98, - 0xc7, 0x39, 0x8f, 0xe1, 0x65, 0x58, 0x41, 0xd1, 0x22, 0x4c, 0xb5, 0x3c, 0x9f, 0x32, 0x9f, 0x7d, - 0x53, 0xf0, 0x53, 0x81, 0x83, 0xd6, 0x4c, 0x30, 0x4e, 0xe3, 0x23, 0x4f, 0x8b, 0x5a, 0x51, 0xc8, - 0x4f, 0xfd, 0x9c, 0xdb, 0xdb, 0x05, 0xf3, 0xb9, 0x54, 0x8d, 0x62, 0x46, 0x04, 0x8b, 0x35, 0xed, - 0xfe, 0x5f, 0x1c, 0xfc, 0xfe, 0x3f, 0x9e, 0x7d, 0xf7, 0x9f, 0x7b, 0x0d, 0x26, 0x1e, 0x5a, 0xe1, - 0x6b, 0x7f, 0xa5, 0x08, 0x8f, 0xf7, 0xd8, 0xf6, 0x9c, 0xd7, 0x1b, 0x73, 0xa0, 0xf1, 0xfa, 0xae, - 0x79, 0xa8, 0xc1, 0xe9, 0xad, 0x4e, 0xb3, 0xb9, 0xcf, 0x4c, 0xdc, 0x89, 0x2b, 0x31, 0x84, 0x4c, - 0x79, 0x5e, 0x66, 0x55, 0x5a, 0xcd, 0xc0, 0xc1, 0x99, 0x35, 0xa9, 0x40, 0x4f, 0x4f, 0x92, 0x7d, - 0x45, 0x2a, 0x25, 0xd0, 0x63, 0x1d, 0x88, 0x4d, 0x5c, 0x74, 0x0d, 0x66, 0x9c, 0x3d, 0xc7, 0xe3, - 0xa1, 0x3f, 0x25, 0x01, 0x2e, 0xd1, 0x2b, 0x3d, 0xdd, 0x62, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x6f, - 0x00, 0x0a, 0x44, 0xea, 0xfa, 0x6b, 0xc4, 0x17, 0xaf, 0x5a, 0x6c, 0xee, 0x8a, 0x09, 0x4b, 0xb8, - 0xd5, 0x85, 0x81, 0x33, 0x6a, 0xa5, 0xa2, 0x40, 0x8c, 0xe4, 0x47, 0x81, 0xe8, 0xcd, 0x17, 0xfb, - 0x26, 0x21, 0xf8, 0x2f, 0x16, 0x3d, 0xbe, 0xb8, 0x90, 0x6f, 0x06, 0x33, 0x7b, 0x8d, 0x59, 0x8d, - 0x71, 0x1d, 0x9e, 0x16, 0x90, 0xe1, 0x8c, 0x66, 0x35, 0x96, 0x00, 0xb1, 0x89, 0xcb, 0x17, 0x44, - 0x94, 0xf8, 0x01, 0x1a, 0x22, 0xbe, 0x88, 0xb8, 0xa2, 0x30, 0xd0, 0xa7, 0x60, 0xd4, 0xf5, 0xf6, - 0xbc, 0x28, 0x08, 0xc5, 0x4a, 0x3f, 0xe2, 0x73, 0x41, 0xc2, 0x07, 0x2b, 0x9c, 0x0c, 0x96, 0xf4, - 0xec, 0xef, 0x2d, 0xc0, 0x84, 0x6c, 0xf1, 0xcd, 0x4e, 0x10, 0x3b, 0x27, 0x70, 0x2c, 0x5f, 0x33, - 0x8e, 0xe5, 0x0f, 0xf5, 0x0a, 0x3b, 0xc3, 0xba, 0x94, 0x7b, 0x1c, 0xdf, 0x4a, 0x1d, 0xc7, 0x4f, - 0xf5, 0x27, 0xd5, 0xfb, 0x18, 0xfe, 0x67, 0x16, 0xcc, 0x18, 0xf8, 0x27, 0x70, 0x1a, 0xac, 0x9a, - 0xa7, 0xc1, 0x13, 0x7d, 0xbf, 0x21, 0xe7, 0x14, 0xf8, 0xce, 0x62, 0xaa, 0xef, 0x8c, 0xfb, 0xbf, - 0x0d, 0x43, 0x3b, 0x4e, 0xe8, 0xf6, 0x0a, 0xb3, 0xdd, 0x55, 0x69, 0xe1, 0xba, 0x13, 0x8a, 0x67, - 0xbd, 0x67, 0x55, 0xe6, 0x65, 0x27, 0xec, 0xff, 0xa4, 0xc7, 0x9a, 0x42, 0xaf, 0xc2, 0x48, 0xd4, - 0x08, 0xda, 0xca, 0x28, 0xfd, 0x12, 0xcf, 0xca, 0x4c, 0x4b, 0x0e, 0x0f, 0xe6, 0x91, 0xd9, 0x1c, - 0x2d, 0xc6, 0x02, 0x1f, 0x7d, 0x1a, 0x26, 0xd8, 0x2f, 0x65, 0x63, 0x53, 0xcc, 0x4f, 0xc9, 0x53, - 0xd7, 0x11, 0xb9, 0x01, 0x9a, 0x51, 0x84, 0x4d, 0x52, 0x73, 0xdb, 0x50, 0x56, 0x9f, 0xf5, 0x48, - 0xdf, 0xe3, 0xfe, 0x43, 0x11, 0x4e, 0x65, 0xac, 0x39, 0x14, 0x19, 0x33, 0xf1, 0xfc, 0x80, 0x4b, - 0xf5, 0x5d, 0xce, 0x45, 0xc4, 0x6e, 0x43, 0xae, 0x58, 0x5b, 0x03, 0x37, 0x7a, 0x3b, 0x22, 0xe9, - 0x46, 0x69, 0x51, 0xff, 0x46, 0x69, 0x63, 0x27, 0x36, 0xd4, 0xb4, 0x21, 0xd5, 0xd3, 0x47, 0x3a, - 0xa7, 0x7f, 0x52, 0x84, 0xd3, 0x59, 0x91, 0xb0, 0xd0, 0xb7, 0xa4, 0xd2, 0xb3, 0xbd, 0x34, 0x68, - 0x0c, 0x2d, 0x9e, 0xb3, 0x8d, 0xeb, 0x80, 0x97, 0x16, 0xcc, 0x84, 0x6d, 0x7d, 0x87, 0x59, 0xb4, - 0xc9, 0x7c, 0xdc, 0x43, 0x9e, 0x56, 0x4f, 0xb2, 0x8f, 0x8f, 0x0e, 0xdc, 0x01, 0x91, 0x8f, 0x2f, - 0x4a, 0xbd, 0xdf, 0xcb, 0xe2, 0xfe, 0xef, 0xf7, 0xb2, 0xe5, 0x39, 0x0f, 0xc6, 0xb4, 0xaf, 0x79, - 0xa4, 0x33, 0xbe, 0x4b, 0x4f, 0x2b, 0xad, 0xdf, 0x8f, 0x74, 0xd6, 0x7f, 0xc8, 0x82, 0x94, 0xc9, - 0xb5, 0x52, 0x8b, 0x59, 0xb9, 0x6a, 0xb1, 0x4b, 0x30, 0x14, 0x06, 0x4d, 0x92, 0xce, 0x86, 0x86, - 0x83, 0x26, 0xc1, 0x0c, 0x42, 0x31, 0xe2, 0x44, 0xd9, 0x31, 0xae, 0x5f, 0xe4, 0xc4, 0x15, 0xed, - 0x49, 0x18, 0x6e, 0x92, 0x3d, 0xd2, 0x4c, 0x27, 0xad, 0xb8, 0x49, 0x0b, 0x31, 0x87, 0xd9, 0x3f, - 0x37, 0x04, 0x17, 0x7a, 0x46, 0x89, 0xa0, 0xd7, 0xa1, 0x6d, 0x27, 0x26, 0xf7, 0x9c, 0xfd, 0x74, - 0x74, 0xf9, 0x6b, 0xbc, 0x18, 0x4b, 0x38, 0x73, 0x8a, 0xe1, 0x41, 0x62, 0x53, 0x4a, 0x44, 0x11, - 0x1b, 0x56, 0x40, 0x4d, 0xa5, 0x54, 0xf1, 0x38, 0x94, 0x52, 0x2f, 0x00, 0x44, 0x51, 0x93, 0x1b, - 0xbe, 0xb8, 0xc2, 0xdb, 0x26, 0x09, 0x26, 0x5c, 0xbf, 0x29, 0x20, 0x58, 0xc3, 0x42, 0x15, 0x98, - 0x6e, 0x87, 0x41, 0xcc, 0x75, 0xb2, 0x15, 0x6e, 0x1b, 0x36, 0x6c, 0x3a, 0xe8, 0xd7, 0x52, 0x70, - 0xdc, 0x55, 0x03, 0xbd, 0x0c, 0x63, 0xc2, 0x69, 0xbf, 0x16, 0x04, 0x4d, 0xa1, 0x06, 0x52, 0xe6, - 0x52, 0xf5, 0x04, 0x84, 0x75, 0x3c, 0xad, 0x1a, 0x53, 0xf4, 0x8e, 0x66, 0x56, 0xe3, 0xca, 0x5e, - 0x0d, 0x2f, 0x15, 0x15, 0xaf, 0x34, 0x50, 0x54, 0xbc, 0x44, 0x31, 0x56, 0x1e, 0xf8, 0x6d, 0x0b, - 0xfa, 0xaa, 0x92, 0x7e, 0x7a, 0x08, 0x4e, 0x89, 0x85, 0xf3, 0xa8, 0x97, 0xcb, 0xed, 0xee, 0xe5, - 0x72, 0x1c, 0xaa, 0xb3, 0xf7, 0xd7, 0xcc, 0x49, 0xaf, 0x99, 0xef, 0xb3, 0xc0, 0x14, 0xaf, 0xd0, - 0xff, 0x9f, 0x9b, 0x9e, 0xe3, 0xe5, 0x5c, 0x71, 0xcd, 0x95, 0x07, 0xc8, 0xbb, 0x4c, 0xd4, 0x61, - 0xff, 0x27, 0x0b, 0x9e, 0xe8, 0x4b, 0x11, 0xad, 0x40, 0x99, 0xc9, 0x80, 0xda, 0xed, 0xec, 0x29, - 0x65, 0x3b, 0x2a, 0x01, 0x39, 0x22, 0x69, 0x52, 0x13, 0xad, 0x74, 0xe5, 0x41, 0x79, 0x3a, 0x23, - 0x0f, 0xca, 0x19, 0x63, 0x78, 0x1e, 0x32, 0x11, 0xca, 0x1f, 0x14, 0x61, 0x84, 0xaf, 0xf8, 0x13, - 0xb8, 0x86, 0x3d, 0x03, 0x65, 0xaf, 0xd5, 0xea, 0xf0, 0x6c, 0x12, 0xc3, 0xdc, 0xb3, 0x92, 0x0e, - 0x4d, 0x55, 0x16, 0xe2, 0x04, 0x8e, 0x56, 0x85, 0x92, 0xb7, 0x47, 0x8c, 0x3e, 0xde, 0xf1, 0x85, - 0x8a, 0x13, 0x3b, 0x5c, 0xa6, 0x50, 0x47, 0x5b, 0xa2, 0x0e, 0x46, 0x9f, 0x05, 0x88, 0xe2, 0xd0, - 0xf3, 0xb7, 0x69, 0x99, 0x88, 0xde, 0xf8, 0xe1, 0x1e, 0xd4, 0xea, 0x0a, 0x99, 0xd3, 0x4c, 0xb6, - 0xb9, 0x02, 0x60, 0x8d, 0x22, 0x5a, 0x30, 0x0e, 0xd7, 0xb9, 0x94, 0x96, 0x14, 0x38, 0xd5, 0xe4, - 0xa8, 0x9d, 0x7b, 0x05, 0xca, 0x8a, 0x78, 0x3f, 0x95, 0xcf, 0xb8, 0x2e, 0x89, 0x7c, 0x02, 0xa6, - 0x52, 0x7d, 0x3b, 0x92, 0xc6, 0xe8, 0xe7, 0x2d, 0x98, 0xe2, 0x9d, 0x59, 0xf1, 0xf7, 0x04, 0x03, - 0x7e, 0x07, 0x4e, 0x37, 0x33, 0x18, 0xa1, 0x98, 0xfe, 0xc1, 0x19, 0xa7, 0xd2, 0x10, 0x65, 0x41, - 0x71, 0x66, 0x1b, 0xe8, 0x0a, 0x5d, 0xe4, 0x94, 0xd1, 0x39, 0x4d, 0xe1, 0x68, 0x39, 0xce, 0x17, - 0x38, 0x2f, 0xc3, 0x0a, 0x6a, 0xff, 0xb6, 0x05, 0x33, 0xbc, 0xe7, 0x37, 0xc8, 0xbe, 0x62, 0x07, - 0x5f, 0xcb, 0xbe, 0x8b, 0x3c, 0x46, 0x85, 0x9c, 0x3c, 0x46, 0xfa, 0xa7, 0x15, 0x7b, 0x7e, 0xda, - 0x4f, 0x59, 0x20, 0x56, 0xc8, 0x09, 0xdc, 0xfb, 0xbf, 0xd1, 0xbc, 0xf7, 0xcf, 0xe5, 0x6f, 0x82, - 0x9c, 0x0b, 0xff, 0x9f, 0x59, 0x30, 0xcd, 0x11, 0x92, 0x07, 0xea, 0xaf, 0xe9, 0x3c, 0x0c, 0x92, - 0xed, 0xf4, 0x06, 0xd9, 0xdf, 0x08, 0x6a, 0x4e, 0xbc, 0x93, 0xfd, 0x51, 0xc6, 0x64, 0x0d, 0xf5, - 0x9c, 0x2c, 0x57, 0x6e, 0xa0, 0x23, 0xa4, 0x50, 0x3e, 0x72, 0x98, 0x7f, 0xfb, 0xab, 0x16, 0x20, - 0xde, 0x8c, 0x21, 0x2b, 0x51, 0x09, 0x84, 0x95, 0x6a, 0x67, 0x4b, 0xc2, 0x9a, 0x14, 0x04, 0x6b, - 0x58, 0xc7, 0x32, 0x3c, 0x29, 0x2b, 0x83, 0x62, 0x7f, 0x2b, 0x83, 0x23, 0x8c, 0xe8, 0x1f, 0x0c, - 0x43, 0xda, 0x5d, 0x04, 0xdd, 0x81, 0xf1, 0x86, 0xd3, 0x76, 0x36, 0xbd, 0xa6, 0x17, 0x7b, 0x24, - 0xea, 0x65, 0x9e, 0xb4, 0xac, 0xe1, 0x89, 0x77, 0x61, 0xad, 0x04, 0x1b, 0x74, 0xd0, 0x02, 0x40, - 0x3b, 0xf4, 0xf6, 0xbc, 0x26, 0xd9, 0x66, 0xea, 0x09, 0xe6, 0xda, 0xcd, 0x6d, 0x6e, 0x64, 0x29, - 0xd6, 0x30, 0x32, 0x7c, 0x67, 0x8b, 0x8f, 0xd8, 0x77, 0x16, 0x4e, 0xcc, 0x77, 0x76, 0xe8, 0x48, - 0xbe, 0xb3, 0xa5, 0x23, 0xfb, 0xce, 0x0e, 0x0f, 0xe4, 0x3b, 0x8b, 0xe1, 0xac, 0x14, 0xf7, 0xe8, - 0xff, 0x55, 0xaf, 0x49, 0x84, 0x8c, 0xcf, 0xfd, 0xd1, 0xe7, 0x1e, 0x1c, 0xcc, 0x9f, 0xc5, 0x99, - 0x18, 0x38, 0xa7, 0x26, 0xfa, 0x24, 0xcc, 0x3a, 0xcd, 0x66, 0x70, 0x4f, 0x4d, 0xea, 0x4a, 0xd4, - 0x70, 0x9a, 0x5c, 0xef, 0x3f, 0xca, 0xa8, 0x9e, 0x7f, 0x70, 0x30, 0x3f, 0xbb, 0x98, 0x83, 0x83, - 0x73, 0x6b, 0xa3, 0x8f, 0x43, 0xb9, 0x1d, 0x06, 0x8d, 0x35, 0xcd, 0xa7, 0xed, 0x22, 0x1d, 0xc0, - 0x9a, 0x2c, 0x3c, 0x3c, 0x98, 0x9f, 0x50, 0x7f, 0xd8, 0x81, 0x9f, 0x54, 0xb0, 0x77, 0xe1, 0x54, - 0x9d, 0x84, 0x1e, 0x4b, 0x88, 0xec, 0x26, 0xfc, 0x63, 0x03, 0xca, 0x61, 0x8a, 0x63, 0x0e, 0x14, - 0xd7, 0x4e, 0x8b, 0x87, 0x2e, 0x39, 0x64, 0x42, 0xc8, 0xfe, 0xdf, 0x16, 0x8c, 0x0a, 0xf7, 0x8d, - 0x13, 0x90, 0xea, 0x16, 0x0d, 0xe5, 0xfa, 0x7c, 0xf6, 0xa9, 0xc2, 0x3a, 0x93, 0xab, 0x56, 0xaf, - 0xa6, 0xd4, 0xea, 0x4f, 0xf4, 0x22, 0xd2, 0x5b, 0xa1, 0xfe, 0xb7, 0x8a, 0x30, 0x69, 0xfa, 0xf9, - 0x9d, 0xc0, 0x10, 0xac, 0xc3, 0x68, 0x24, 0x1c, 0xd9, 0x0a, 0xf9, 0xe6, 0xdb, 0xe9, 0x49, 0x4c, - 0x4c, 0xbb, 0x84, 0xeb, 0x9a, 0x24, 0x92, 0xe9, 0x21, 0x57, 0x7c, 0x84, 0x1e, 0x72, 0xfd, 0x5c, - 0x2d, 0x87, 0x8e, 0xc3, 0xd5, 0xd2, 0xfe, 0x32, 0x3b, 0xd9, 0xf4, 0xf2, 0x13, 0x10, 0x7a, 0xae, - 0x99, 0x67, 0xa0, 0xdd, 0x63, 0x65, 0x89, 0x4e, 0xe5, 0x08, 0x3f, 0x3f, 0x6b, 0xc1, 0x85, 0x8c, - 0xaf, 0xd2, 0x24, 0xa1, 0x67, 0xa1, 0xe4, 0x74, 0x5c, 0x4f, 0xed, 0x65, 0xed, 0x89, 0x6d, 0x51, - 0x94, 0x63, 0x85, 0x81, 0x96, 0x61, 0x86, 0xdc, 0x6f, 0x7b, 0xfc, 0x75, 0x51, 0xb7, 0xbf, 0x2c, - 0xf2, 0x58, 0xdf, 0x2b, 0x69, 0x20, 0xee, 0xc6, 0x57, 0xe1, 0x27, 0x8a, 0xb9, 0xe1, 0x27, 0xfe, - 0xa1, 0x05, 0x63, 0xca, 0x95, 0xeb, 0x91, 0x8f, 0xf6, 0x37, 0x99, 0xa3, 0xfd, 0x78, 0x8f, 0xd1, - 0xce, 0x19, 0xe6, 0xdf, 0x2a, 0xa8, 0xfe, 0xd6, 0x82, 0x30, 0x1e, 0x40, 0xc2, 0x7a, 0x15, 0x4a, - 0xed, 0x30, 0x88, 0x83, 0x46, 0xd0, 0x14, 0x02, 0xd6, 0xf9, 0x24, 0x0e, 0x0b, 0x2f, 0x3f, 0xd4, - 0x7e, 0x63, 0x85, 0x4d, 0x65, 0x1b, 0xa7, 0xdd, 0x96, 0x00, 0x69, 0x96, 0xc5, 0xa2, 0x94, 0x26, - 0xc5, 0x58, 0xc7, 0x61, 0x03, 0x1e, 0x84, 0xb1, 0x90, 0x83, 0x92, 0x01, 0x0f, 0xc2, 0x18, 0x33, - 0x08, 0x72, 0x01, 0x62, 0x27, 0xdc, 0x26, 0x31, 0x2d, 0x13, 0xa1, 0xa2, 0xf2, 0xf9, 0x4d, 0x27, - 0xf6, 0x9a, 0x0b, 0x9e, 0x1f, 0x47, 0x71, 0xb8, 0x50, 0xf5, 0xe3, 0x5b, 0x21, 0xbf, 0xe2, 0x69, - 0xb1, 0x58, 0x14, 0x2d, 0xac, 0xd1, 0x95, 0x6e, 0xcb, 0xac, 0x8d, 0x61, 0xf3, 0x7d, 0x7f, 0x5d, - 0x94, 0x63, 0x85, 0x61, 0xbf, 0xc2, 0x4e, 0x1f, 0x36, 0xa6, 0x47, 0x0b, 0x5e, 0xf2, 0x8b, 0x65, - 0x35, 0x1b, 0xec, 0x71, 0xaf, 0xa2, 0x87, 0x48, 0xe9, 0xcd, 0xec, 0x69, 0xc3, 0xba, 0x0b, 0x53, - 0x12, 0x47, 0x05, 0x7d, 0x73, 0x97, 0xcd, 0xc6, 0x73, 0x7d, 0x4e, 0x8d, 0x23, 0x58, 0x69, 0xb0, - 0x94, 0x05, 0x2c, 0xa0, 0x7b, 0xb5, 0x26, 0xf6, 0x85, 0x96, 0xb2, 0x40, 0x00, 0x70, 0x82, 0x83, - 0xae, 0x8a, 0x0b, 0x3c, 0x57, 0x7d, 0x3f, 0x9e, 0xba, 0xc0, 0xcb, 0xcf, 0xd7, 0x94, 0xe5, 0xcf, - 0xc3, 0x98, 0x4a, 0xd8, 0x59, 0xe3, 0x79, 0x20, 0xc5, 0xb2, 0x59, 0x49, 0x8a, 0xb1, 0x8e, 0x83, - 0x36, 0x60, 0x2a, 0xe2, 0xaa, 0x24, 0x15, 0x1f, 0x95, 0xab, 0xe4, 0x3e, 0x2c, 0x0d, 0x5d, 0xea, - 0x26, 0xf8, 0x90, 0x15, 0x71, 0x6e, 0x23, 0x5d, 0x85, 0xd3, 0x24, 0xd0, 0xeb, 0x30, 0xd9, 0x0c, - 0x1c, 0x77, 0xc9, 0x69, 0x3a, 0x7e, 0x83, 0x7d, 0x6f, 0xc9, 0xcc, 0xfb, 0x76, 0xd3, 0x80, 0xe2, - 0x14, 0x36, 0x15, 0x96, 0xf4, 0x12, 0x11, 0xd3, 0xd7, 0xf1, 0xb7, 0x49, 0x24, 0xd2, 0x2f, 0x32, - 0x61, 0xe9, 0x66, 0x0e, 0x0e, 0xce, 0xad, 0x8d, 0x5e, 0x85, 0x71, 0xf9, 0xf9, 0x9a, 0x67, 0x7d, - 0x62, 0xdb, 0xaf, 0xc1, 0xb0, 0x81, 0x89, 0xee, 0xc1, 0x19, 0xf9, 0x7f, 0x23, 0x74, 0xb6, 0xb6, - 0xbc, 0x86, 0x70, 0x37, 0xe5, 0x8e, 0x77, 0x8b, 0xd2, 0x3b, 0x6c, 0x25, 0x0b, 0xe9, 0xf0, 0x60, - 0xfe, 0x92, 0x18, 0xb5, 0x4c, 0x38, 0x9b, 0xc4, 0x6c, 0xfa, 0x68, 0x0d, 0x4e, 0xed, 0x10, 0xa7, - 0x19, 0xef, 0x2c, 0xef, 0x90, 0xc6, 0xae, 0xdc, 0x44, 0xcc, 0x5f, 0x5f, 0xb3, 0x88, 0xbf, 0xde, - 0x8d, 0x82, 0xb3, 0xea, 0xa1, 0xb7, 0x60, 0xb6, 0xdd, 0xd9, 0x6c, 0x7a, 0xd1, 0xce, 0x7a, 0x10, - 0x33, 0x6b, 0x17, 0x95, 0xff, 0x53, 0x38, 0xf6, 0xab, 0x88, 0x08, 0xb5, 0x1c, 0x3c, 0x9c, 0x4b, - 0x01, 0xbd, 0x03, 0x67, 0x52, 0x8b, 0x41, 0xb8, 0x36, 0x4f, 0xe6, 0x47, 0x48, 0xaf, 0x67, 0x55, - 0x10, 0x51, 0x02, 0xb2, 0x40, 0x38, 0xbb, 0x09, 0xf4, 0x12, 0x94, 0xbc, 0xf6, 0xaa, 0xd3, 0xf2, - 0x9a, 0xfb, 0x2c, 0xc4, 0x7b, 0x99, 0x85, 0x3d, 0x2f, 0x55, 0x6b, 0xbc, 0xec, 0x50, 0xfb, 0x8d, - 0x15, 0x26, 0xbd, 0x22, 0x68, 0x81, 0x2c, 0xa3, 0xd9, 0xe9, 0xc4, 0x98, 0x57, 0x8b, 0x76, 0x19, - 0x61, 0x03, 0xeb, 0xdd, 0xd9, 0x48, 0xbd, 0x4d, 0x2b, 0x6b, 0x32, 0x23, 0xfa, 0x1c, 0x8c, 0xeb, - 0x2b, 0x56, 0x9c, 0x7f, 0x97, 0xb3, 0x45, 0x2a, 0x6d, 0x65, 0x73, 0x89, 0x53, 0xad, 0x5e, 0x1d, - 0x86, 0x0d, 0x8a, 0x36, 0x81, 0xec, 0xb1, 0x44, 0x37, 0xa1, 0xd4, 0x68, 0x7a, 0xc4, 0x8f, 0xab, - 0xb5, 0x5e, 0x31, 0x98, 0x96, 0x05, 0x8e, 0x98, 0x1c, 0x11, 0xbe, 0x9a, 0x97, 0x61, 0x45, 0xc1, - 0xfe, 0x95, 0x02, 0xcc, 0xf7, 0x89, 0x85, 0x9e, 0x52, 0xe5, 0x5b, 0x03, 0xa9, 0xf2, 0x17, 0x65, - 0xe6, 0xd4, 0xf5, 0x94, 0xca, 0x22, 0x95, 0x15, 0x35, 0x51, 0x5c, 0xa4, 0xf1, 0x07, 0x36, 0xad, - 0xd6, 0x5f, 0x03, 0x86, 0xfa, 0x3a, 0x07, 0x18, 0xaf, 0x80, 0xc3, 0x83, 0xdf, 0x93, 0x72, 0x5f, - 0x74, 0xec, 0x2f, 0x17, 0xe0, 0x8c, 0x1a, 0xc2, 0xaf, 0xdf, 0x81, 0xbb, 0xdd, 0x3d, 0x70, 0xc7, - 0xf0, 0x1e, 0x66, 0xdf, 0x82, 0x11, 0x1e, 0x54, 0x6a, 0x00, 0xf9, 0xec, 0x49, 0x33, 0xfe, 0xa2, - 0x12, 0x09, 0x8c, 0x18, 0x8c, 0xdf, 0x6d, 0xc1, 0xd4, 0xc6, 0x72, 0xad, 0x1e, 0x34, 0x76, 0x49, - 0xbc, 0xc8, 0xe5, 0x69, 0x2c, 0x64, 0x2d, 0xeb, 0x21, 0x65, 0xa8, 0x2c, 0xe9, 0xec, 0x12, 0x0c, - 0xed, 0x04, 0x51, 0x9c, 0x7e, 0x2c, 0xbf, 0x1e, 0x44, 0x31, 0x66, 0x10, 0xfb, 0x77, 0x2c, 0x18, - 0x66, 0xb9, 0xc2, 0xfb, 0x65, 0xab, 0x1f, 0xe4, 0xbb, 0xd0, 0xcb, 0x30, 0x42, 0xb6, 0xb6, 0x48, - 0x23, 0x16, 0xb3, 0x2a, 0xbd, 0x9b, 0x47, 0x56, 0x58, 0x29, 0x15, 0x30, 0x58, 0x63, 0xfc, 0x2f, - 0x16, 0xc8, 0xe8, 0x2e, 0x94, 0x63, 0xaf, 0x45, 0x16, 0x5d, 0x57, 0x3c, 0x37, 0x3e, 0x84, 0x87, - 0xf6, 0x86, 0x24, 0x80, 0x13, 0x5a, 0xf6, 0x97, 0x0a, 0x00, 0x49, 0xb4, 0x8f, 0x7e, 0x9f, 0xb8, - 0xd4, 0xf5, 0x10, 0x75, 0x39, 0xe3, 0x21, 0x0a, 0x25, 0x04, 0x33, 0x5e, 0xa1, 0xd4, 0x30, 0x15, - 0x07, 0x1a, 0xa6, 0xa1, 0xa3, 0x0c, 0xd3, 0x32, 0xcc, 0x24, 0xd1, 0x4a, 0xcc, 0x60, 0x4d, 0xec, - 0x0e, 0xb5, 0x91, 0x06, 0xe2, 0x6e, 0x7c, 0x9b, 0xc0, 0x25, 0x15, 0xb4, 0x41, 0x9c, 0x35, 0xcc, - 0x9a, 0x55, 0x7f, 0xd8, 0xeb, 0x33, 0x4e, 0xc9, 0x4b, 0x5b, 0x21, 0xf7, 0xa5, 0xed, 0x47, 0x2d, - 0x38, 0x9d, 0x6e, 0x87, 0xb9, 0x17, 0x7e, 0xd1, 0x82, 0x33, 0xec, 0xbd, 0x91, 0xb5, 0xda, 0xfd, - 0xba, 0xf9, 0x52, 0xcf, 0x40, 0x14, 0x39, 0x3d, 0x4e, 0xdc, 0xe8, 0xd7, 0xb2, 0x48, 0xe3, 0xec, - 0x16, 0xed, 0xff, 0x58, 0x80, 0xd9, 0xbc, 0x08, 0x16, 0xcc, 0xd8, 0xdd, 0xb9, 0x5f, 0xdf, 0x25, - 0xf7, 0x84, 0x49, 0x71, 0x62, 0xec, 0xce, 0x8b, 0xb1, 0x84, 0xa7, 0xc3, 0x5b, 0x17, 0x06, 0x0b, - 0x6f, 0x8d, 0x76, 0x60, 0xe6, 0xde, 0x0e, 0xf1, 0x6f, 0xfb, 0x91, 0x13, 0x7b, 0xd1, 0x96, 0xc7, - 0x1e, 0x0a, 0xf9, 0xba, 0xf9, 0x98, 0x34, 0xfc, 0xbd, 0x9b, 0x46, 0x38, 0x3c, 0x98, 0xbf, 0x60, - 0x14, 0x24, 0x5d, 0xe6, 0x8c, 0x04, 0x77, 0x13, 0xed, 0x8e, 0x0e, 0x3e, 0xf4, 0x08, 0xa3, 0x83, - 0xdb, 0x5f, 0xb4, 0xe0, 0x5c, 0x6e, 0xf6, 0x3e, 0x74, 0x05, 0x4a, 0x4e, 0xdb, 0xe3, 0xba, 0x56, - 0xc1, 0x46, 0x99, 0xce, 0xa0, 0x56, 0xe5, 0x9a, 0x56, 0x05, 0x55, 0x59, 0x85, 0x0b, 0xb9, 0x59, - 0x85, 0xfb, 0x26, 0x09, 0xb6, 0xbf, 0xcb, 0x02, 0xe1, 0xa8, 0x37, 0x00, 0xef, 0xfe, 0xb4, 0x4c, - 0xca, 0x6e, 0x64, 0x10, 0xb9, 0x94, 0xef, 0xb9, 0x28, 0xf2, 0x86, 0x28, 0x59, 0xc9, 0xc8, 0x16, - 0x62, 0xd0, 0xb2, 0x5d, 0x10, 0xd0, 0x0a, 0x61, 0x9a, 0xca, 0xfe, 0xbd, 0x79, 0x01, 0xc0, 0x65, - 0xb8, 0x5a, 0x6a, 0x66, 0x75, 0x32, 0x57, 0x14, 0x04, 0x6b, 0x58, 0xf6, 0xbf, 0x2b, 0xc0, 0x98, - 0xcc, 0x58, 0xd1, 0xf1, 0x07, 0xd1, 0x27, 0x1c, 0x29, 0x85, 0x1d, 0xcb, 0x65, 0x4e, 0x09, 0xd7, - 0x12, 0x35, 0x4c, 0x92, 0xcb, 0x5c, 0x02, 0x70, 0x82, 0x43, 0x77, 0x51, 0xd4, 0xd9, 0x64, 0xe8, - 0x29, 0xb7, 0xb2, 0x3a, 0x2f, 0xc6, 0x12, 0x8e, 0x3e, 0x09, 0xd3, 0xbc, 0x5e, 0x18, 0xb4, 0x9d, - 0x6d, 0xae, 0xc4, 0x1e, 0x56, 0xfe, 0xe0, 0xd3, 0x6b, 0x29, 0xd8, 0xe1, 0xc1, 0xfc, 0xe9, 0x74, - 0x19, 0x7b, 0x9d, 0xe9, 0xa2, 0xc2, 0xcc, 0x43, 0x78, 0x23, 0x74, 0xf7, 0x77, 0x59, 0x95, 0x24, - 0x20, 0xac, 0xe3, 0xd9, 0x9f, 0x03, 0xd4, 0x9d, 0xbb, 0x03, 0xbd, 0xc1, 0x6d, 0x02, 0xbd, 0x90, - 0xb8, 0xbd, 0x5e, 0x6b, 0x74, 0xaf, 0x67, 0xe9, 0x11, 0xc2, 0x6b, 0x61, 0x55, 0xdf, 0xfe, 0xab, - 0x45, 0x98, 0x4e, 0xfb, 0xc0, 0xa2, 0xeb, 0x30, 0xc2, 0x45, 0x0f, 0x41, 0xbe, 0x87, 0x31, 0x80, - 0xe6, 0x39, 0xcb, 0x98, 0xb0, 0x90, 0x5e, 0x44, 0x7d, 0xf4, 0x16, 0x8c, 0xb9, 0xc1, 0x3d, 0xff, - 0x9e, 0x13, 0xba, 0x8b, 0xb5, 0xaa, 0x58, 0xce, 0x99, 0xb7, 0xa5, 0x4a, 0x82, 0xa6, 0x7b, 0xe3, - 0xb2, 0x87, 0xaf, 0x04, 0x84, 0x75, 0x72, 0x68, 0x83, 0x85, 0x1a, 0xde, 0xf2, 0xb6, 0xd7, 0x9c, - 0x76, 0x2f, 0x03, 0xf1, 0x65, 0x89, 0xa4, 0x51, 0x9e, 0x10, 0xf1, 0x88, 0x39, 0x00, 0x27, 0x84, - 0xd0, 0xb7, 0xc0, 0xa9, 0x28, 0x47, 0x27, 0x9b, 0x97, 0xca, 0xa9, 0x97, 0x9a, 0x72, 0xe9, 0x31, - 0x7a, 0x8f, 0xcd, 0xd2, 0xde, 0x66, 0x35, 0x63, 0xff, 0xea, 0x29, 0x30, 0x36, 0xb1, 0x91, 0xd9, - 0xcf, 0x3a, 0xa6, 0xcc, 0x7e, 0x18, 0x4a, 0xa4, 0xd5, 0x8e, 0xf7, 0x2b, 0x5e, 0xd8, 0x2b, 0x35, - 0xec, 0x8a, 0xc0, 0xe9, 0xa6, 0x29, 0x21, 0x58, 0xd1, 0xc9, 0x4e, 0xbf, 0x58, 0xfc, 0x1a, 0xa6, - 0x5f, 0x1c, 0x3a, 0xc1, 0xf4, 0x8b, 0xeb, 0x30, 0xba, 0xed, 0xc5, 0x98, 0xb4, 0x03, 0x21, 0xf4, - 0x67, 0xae, 0xc3, 0x6b, 0x1c, 0xa5, 0x3b, 0xd1, 0x97, 0x00, 0x60, 0x49, 0x04, 0xbd, 0xa1, 0x76, - 0xe0, 0x48, 0xfe, 0x9d, 0xb9, 0xfb, 0xd5, 0x3a, 0x73, 0x0f, 0x8a, 0x24, 0x8b, 0xa3, 0x0f, 0x9b, - 0x64, 0x71, 0x55, 0xa6, 0x46, 0x2c, 0xe5, 0x7b, 0x73, 0xb0, 0xcc, 0x87, 0x7d, 0x12, 0x22, 0xde, - 0xd1, 0xd3, 0x49, 0x96, 0xf3, 0x39, 0x81, 0xca, 0x14, 0x39, 0x60, 0x12, 0xc9, 0xef, 0xb2, 0xe0, - 0x4c, 0x3b, 0x2b, 0xb3, 0xaa, 0x78, 0xe0, 0x7d, 0x79, 0xe0, 0xd4, 0xb1, 0x46, 0x83, 0x4c, 0x51, - 0x93, 0x89, 0x86, 0xb3, 0x9b, 0xa3, 0x03, 0x1d, 0x6e, 0xba, 0x22, 0x0b, 0xe2, 0x93, 0x39, 0xd9, - 0x28, 0x7b, 0xe4, 0xa0, 0xdc, 0xc8, 0xc8, 0x7c, 0xf8, 0xc1, 0xbc, 0xcc, 0x87, 0x03, 0xe7, 0x3b, - 0x7c, 0x43, 0xe5, 0xa1, 0x9c, 0xc8, 0x5f, 0x4a, 0x3c, 0xcb, 0x64, 0xdf, 0xec, 0x93, 0x6f, 0xa8, - 0xec, 0x93, 0x3d, 0xe2, 0x54, 0xf2, 0xdc, 0x92, 0x7d, 0x73, 0x4e, 0x6a, 0x79, 0x23, 0xa7, 0x8e, - 0x27, 0x6f, 0xa4, 0x71, 0xd4, 0xf0, 0xd4, 0x85, 0xcf, 0xf4, 0x39, 0x6a, 0x0c, 0xba, 0xbd, 0x0f, - 0x1b, 0x9e, 0x23, 0x73, 0xe6, 0xa1, 0x72, 0x64, 0xde, 0xd1, 0x73, 0x4e, 0xa2, 0x3e, 0x49, 0x15, - 0x29, 0xd2, 0x80, 0x99, 0x26, 0xef, 0xe8, 0x07, 0xe0, 0xa9, 0x7c, 0xba, 0xea, 0x9c, 0xeb, 0xa6, - 0x9b, 0x79, 0x04, 0x76, 0x65, 0xb0, 0x3c, 0x7d, 0x32, 0x19, 0x2c, 0xcf, 0x1c, 0x7b, 0x06, 0xcb, - 0xb3, 0x27, 0x90, 0xc1, 0xf2, 0xb1, 0x13, 0xcc, 0x60, 0x79, 0x87, 0x59, 0x45, 0xf0, 0x70, 0x27, - 0x22, 0xae, 0x66, 0x76, 0x0c, 0xc7, 0xac, 0x98, 0x28, 0xfc, 0xe3, 0x14, 0x08, 0x27, 0xa4, 0x32, - 0x32, 0x63, 0xce, 0x3e, 0x82, 0xcc, 0x98, 0xeb, 0x49, 0x66, 0xcc, 0x73, 0xf9, 0x53, 0x9d, 0x61, - 0xba, 0x9e, 0x93, 0x0f, 0xf3, 0x8e, 0x9e, 0xc7, 0xf2, 0xf1, 0x1e, 0xaa, 0xf8, 0x2c, 0xc5, 0x63, - 0x8f, 0xec, 0x95, 0xaf, 0xf3, 0xec, 0x95, 0xe7, 0xf3, 0x39, 0x79, 0xfa, 0xb8, 0x33, 0x73, 0x56, - 0x7e, 0x4f, 0x01, 0x2e, 0xf6, 0xde, 0x17, 0x89, 0xd6, 0xb3, 0x96, 0xbc, 0x08, 0xa6, 0xb4, 0x9e, - 0xfc, 0x6e, 0x95, 0x60, 0x0d, 0x1c, 0x09, 0xeb, 0x1a, 0xcc, 0x28, 0xdb, 0xf4, 0xa6, 0xd7, 0xd8, - 0xd7, 0xd2, 0xf4, 0x2b, 0x7f, 0xde, 0x7a, 0x1a, 0x01, 0x77, 0xd7, 0x41, 0x8b, 0x30, 0x65, 0x14, - 0x56, 0x2b, 0xe2, 0x0e, 0xa5, 0xd4, 0xac, 0x75, 0x13, 0x8c, 0xd3, 0xf8, 0xf6, 0x4f, 0x5a, 0xf0, - 0x58, 0x4e, 0x72, 0xa8, 0x81, 0x03, 0x3d, 0x6d, 0xc1, 0x54, 0xdb, 0xac, 0xda, 0x27, 0x1e, 0x9c, - 0x91, 0x82, 0x4a, 0xf5, 0x35, 0x05, 0xc0, 0x69, 0xa2, 0xf6, 0x9f, 0x5a, 0x70, 0xa1, 0xa7, 0xe5, - 0x17, 0xc2, 0x70, 0x76, 0xbb, 0x15, 0x39, 0xcb, 0x21, 0x71, 0x89, 0x1f, 0x7b, 0x4e, 0xb3, 0xde, - 0x26, 0x0d, 0x4d, 0x6f, 0xcd, 0x4c, 0xa8, 0xae, 0xad, 0xd5, 0x17, 0xbb, 0x31, 0x70, 0x4e, 0x4d, - 0xb4, 0x0a, 0xa8, 0x1b, 0x22, 0x66, 0x98, 0xc5, 0x8c, 0xed, 0xa6, 0x87, 0x33, 0x6a, 0xa0, 0x57, - 0x60, 0x42, 0x59, 0x94, 0x69, 0x33, 0xce, 0x18, 0x30, 0xd6, 0x01, 0xd8, 0xc4, 0x5b, 0xba, 0xf2, - 0xeb, 0xbf, 0x77, 0xf1, 0x03, 0xbf, 0xf9, 0x7b, 0x17, 0x3f, 0xf0, 0xdb, 0xbf, 0x77, 0xf1, 0x03, - 0xdf, 0xf6, 0xe0, 0xa2, 0xf5, 0xeb, 0x0f, 0x2e, 0x5a, 0xbf, 0xf9, 0xe0, 0xa2, 0xf5, 0xdb, 0x0f, - 0x2e, 0x5a, 0xbf, 0xfb, 0xe0, 0xa2, 0xf5, 0xa5, 0xdf, 0xbf, 0xf8, 0x81, 0x4f, 0x17, 0xf6, 0x9e, - 0xff, 0x7f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5c, 0x2b, 0xc8, 0x61, 0xd8, 0xfd, 0x00, 0x00, + return i, nil } -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AWSElasticBlockStoreVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n57, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n57 + } + dAtA[i] = 0x20 + i++ if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - i-- - dAtA[i] = 0x18 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumeID) - copy(dAtA[i:], m.VolumeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for _, k := range keysForOptions { + dAtA[i] = 0x2a + i++ + v := m.Options[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil } -func (m *Affinity) Marshal() (dAtA []byte, err error) { +func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Affinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.PodAntiAffinity != nil { - { - size, err := m.PodAntiAffinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + if m.SecretRef != nil { dAtA[i] = 0x1a - } - if m.PodAffinity != nil { - { - size, err := m.PodAffinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n58, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n58 } - if m.NodeAffinity != nil { - { - size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Options) > 0 { + keysForOptions := make([]string, 0, len(m.Options)) + for k := range m.Options { + keysForOptions = append(keysForOptions, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + for _, k := range keysForOptions { + dAtA[i] = 0x2a + i++ + v := m.Options[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { +func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AttachedVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.DevicePath) - copy(dAtA[i:], m.DevicePath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) + i += copy(dAtA[i:], m.DatasetName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) + i += copy(dAtA[i:], m.DatasetUUID) + return i, nil } -func (m *AvoidPods) Marshal() (dAtA []byte, err error) { +func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AvoidPods) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.PreferAvoidPods) > 0 { - for iNdEx := len(m.PreferAvoidPods) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PreferAvoidPods[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) + i += copy(dAtA[i:], m.PDName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return len(dAtA) - i, nil + i++ + return i, nil } -func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AzureDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Kind != nil { - i -= len(*m.Kind) - copy(dAtA[i:], *m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i-- - dAtA[i] = 0x32 - } - if m.ReadOnly != nil { - i-- - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 - } - if m.FSType != nil { - i -= len(*m.FSType) - copy(dAtA[i:], *m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i-- - dAtA[i] = 0x22 - } - if m.CachingMode != nil { - i -= len(*m.CachingMode) - copy(dAtA[i:], *m.CachingMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) - i-- - dAtA[i] = 0x1a - } - i -= len(m.DataDiskURI) - copy(dAtA[i:], m.DataDiskURI) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) - i-- - dAtA[i] = 0x12 - i -= len(m.DiskName) - copy(dAtA[i:], m.DiskName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) + i += copy(dAtA[i:], m.Repository) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) + i += copy(dAtA[i:], m.Revision) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) + i += copy(dAtA[i:], m.Directory) + return i, nil } -func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AzureFilePersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.SecretNamespace != nil { - i -= len(*m.SecretNamespace) - copy(dAtA[i:], *m.SecretNamespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) - i-- - dAtA[i] = 0x22 - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) + i += copy(dAtA[i:], m.EndpointsName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.ShareName) - copy(dAtA[i:], m.ShareName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i-- - dAtA[i] = 0x12 - i -= len(m.SecretName) - copy(dAtA[i:], m.SecretName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + return i, nil } -func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AzureFileVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i -= len(m.ShareName) - copy(dAtA[i:], m.ShareName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i-- - dAtA[i] = 0x12 - i -= len(m.SecretName) - copy(dAtA[i:], m.SecretName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Binding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n59, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) + i += copy(dAtA[i:], m.Scheme) + if len(m.HTTPHeaders) > 0 { + for _, msg := range m.HTTPHeaders { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Binding) MarshalTo(dAtA []byte) (int, error) { +func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *Handler) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Binding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Handler) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Target.MarshalToSizedBuffer(dAtA[:i]) + if m.Exec != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) + n60, err := m.Exec.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n60 } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.HTTPGet != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) + n61, err := m.HTTPGet.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n61 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.TCPSocket != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) + n62, err := m.TCPSocket.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + } + return i, nil } -func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *HostAlias) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { +func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *CSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.ControllerExpandSecretRef != nil { - { - size, err := m.ControllerExpandSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Type != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) + i += copy(dAtA[i:], *m.Type) } - if m.NodePublishSecretRef != nil { - { - size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 + return i, nil +} + +func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.NodeStageSecretRef != nil { - { - size, err := m.NodeStageSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a + return dAtA[:n], nil +} + +func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ControllerPublishSecretRef != nil { - { - size, err := m.ControllerPublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x32 } - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { - v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForVolumeAttributes[iNdEx]) - copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n63, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n63 } - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x22 - i-- - if m.ReadOnly { + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.VolumeHandle) - copy(dAtA[i:], m.VolumeHandle) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) - i-- - dAtA[i] = 0x12 - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) + } + return i, nil } -func (m *CSIVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.NodePublishSecretRef != nil { - { - size, err := m.NodePublishSecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) + i += copy(dAtA[i:], m.TargetPortal) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) + i += copy(dAtA[i:], m.IQN) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) + i += copy(dAtA[i:], m.ISCSIInterface) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x30 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.Portals) > 0 { + for _, s := range m.Portals { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x2a } - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for iNdEx := len(keysForVolumeAttributes) - 1; iNdEx >= 0; iNdEx-- { - v := m.VolumeAttributes[string(keysForVolumeAttributes[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForVolumeAttributes[iNdEx]) - copy(dAtA[i:], keysForVolumeAttributes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForVolumeAttributes[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 + dAtA[i] = 0x40 + i++ + if m.DiscoveryCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecretRef != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n64, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n64 } - if m.FSType != nil { - i -= len(*m.FSType) - copy(dAtA[i:], *m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x58 + i++ + if m.SessionCHAPAuth { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if m.ReadOnly != nil { - i-- - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 + i++ + if m.InitiatorName != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) + i += copy(dAtA[i:], *m.InitiatorName) } - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *Capabilities) Marshal() (dAtA []byte, err error) { +func (m *KeyToPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { +func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.Mode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) + } + return i, nil +} + +func (m *Lifecycle) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Capabilities) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Drop) > 0 { - for iNdEx := len(m.Drop) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Drop[iNdEx]) - copy(dAtA[i:], m.Drop[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Drop[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.PostStart != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) + n65, err := m.PostStart.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n65 } - if len(m.Add) > 0 { - for iNdEx := len(m.Add) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Add[iNdEx]) - copy(dAtA[i:], m.Add[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Add[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.PreStop != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) + n66, err := m.PreStop.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n66 } - return len(dAtA) - i, nil + return i, nil } -func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LimitRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CephFSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n67, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= len(m.SecretFile) - copy(dAtA[i:], m.SecretFile) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i-- - dAtA[i] = 0x22 - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x1a - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- + i += n67 dAtA[i] = 0x12 - if len(m.Monitors) > 0 { - for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Monitors[iNdEx]) - copy(dAtA[i:], m.Monitors[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) - i-- - dAtA[i] = 0xa - } + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n68, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n68 + return i, nil } -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CephFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.Max) > 0 { + keysForMax := make([]string, 0, len(m.Max)) + for k := range m.Max { + keysForMax = append(keysForMax, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + for _, k := range keysForMax { + dAtA[i] = 0x12 + i++ + v := m.Max[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n69, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n69 + } } - i-- - dAtA[i] = 0x30 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Min) > 0 { + keysForMin := make([]string, 0, len(m.Min)) + for k := range m.Min { + keysForMin = append(keysForMin, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + for _, k := range keysForMin { + dAtA[i] = 0x1a + i++ + v := m.Min[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n70, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n70 } - i-- - dAtA[i] = 0x2a } - i -= len(m.SecretFile) - copy(dAtA[i:], m.SecretFile) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i-- - dAtA[i] = 0x22 - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x1a - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - if len(m.Monitors) > 0 { - for iNdEx := len(m.Monitors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Monitors[iNdEx]) - copy(dAtA[i:], m.Monitors[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Monitors[iNdEx]))) - i-- + if len(m.Default) > 0 { + keysForDefault := make([]string, 0, len(m.Default)) + for k := range m.Default { + keysForDefault = append(keysForDefault, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + for _, k := range keysForDefault { + dAtA[i] = 0x22 + i++ + v := m.Default[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n71, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n71 + } + } + if len(m.DefaultRequest) > 0 { + keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) + for k := range m.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + for _, k := range keysForDefaultRequest { + dAtA[i] = 0x2a + i++ + v := m.DefaultRequest[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n72, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n72 + } + } + if len(m.MaxLimitRequestRatio) > 0 { + keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) + for k := range m.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + for _, k := range keysForMaxLimitRequestRatio { + dAtA[i] = 0x32 + i++ + v := m.MaxLimitRequestRatio[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n73, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n73 } } - return len(dAtA) - i, nil + return i, nil } -func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CinderPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n74, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n74 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x22 - } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumeID) - copy(dAtA[i:], m.VolumeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CinderVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Limits) > 0 { + for _, msg := range m.Limits { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x22 - } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumeID) - copy(dAtA[i:], m.VolumeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { +func (m *List) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClientIPConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *List) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x8 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n75, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n75 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - return len(dAtA) - i, nil + return i, nil } -func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ComponentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) + i += copy(dAtA[i:], m.IP) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + return i, nil } -func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { +func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ComponentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { +func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ComponentStatusList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } -func (m *ConfigMap) Marshal() (dAtA []byte, err error) { +func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMap) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Immutable != nil { - i-- - if *m.Immutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if len(m.BinaryData) > 0 { - keysForBinaryData := make([]string, 0, len(m.BinaryData)) - for k := range m.BinaryData { - keysForBinaryData = append(keysForBinaryData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - for iNdEx := len(keysForBinaryData) - 1; iNdEx >= 0; iNdEx-- { - v := m.BinaryData[string(keysForBinaryData[iNdEx])] - baseI := i - if v != nil { - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - } - i -= len(keysForBinaryData[iNdEx]) - copy(dAtA[i:], keysForBinaryData[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForBinaryData[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { - v := m.Data[string(keysForData[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForData[iNdEx]) - copy(dAtA[i:], keysForData[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + if m.FSType != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) + i += copy(dAtA[i:], *m.FSType) + } + return i, nil } -func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { +func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMapEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) + i += copy(dAtA[i:], m.Server) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { +func (m *Namespace) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMapKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- + i += n76 dAtA[i] = 0x12 - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n77, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n77 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n78, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n78 + return i, nil } -func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { +func (m *NamespaceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMapList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n79, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n79 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { +func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { +func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *ConfigMapNodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.KubeletConfigKey) - copy(dAtA[i:], m.KubeletConfigKey) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) - i-- - dAtA[i] = 0x2a - i -= len(m.ResourceVersion) - copy(dAtA[i:], m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i-- - dAtA[i] = 0x22 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x1a - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + return i, nil } -func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { +func (m *Node) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMapProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n80, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + i += n80 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n81, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n81 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n82, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n82 + return i, nil } -func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeAddress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ConfigMapVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.DefaultMode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - i-- - dAtA[i] = 0x18 - } - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) + i += copy(dAtA[i:], m.Address) + return i, nil } -func (m *Container) Marshal() (dAtA []byte, err error) { +func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Container) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.StartupProbe != nil { - { - size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.VolumeDevices) > 0 { - for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - i -= len(m.TerminationMessagePolicy) - copy(dAtA[i:], m.TerminationMessagePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - if len(m.EnvFrom) > 0 { - for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - } - i-- - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - i-- - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - if m.SecurityContext != nil { - { - size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - i -= len(m.ImagePullPolicy) - copy(dAtA[i:], m.ImagePullPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i-- - dAtA[i] = 0x72 - i -= len(m.TerminationMessagePath) - copy(dAtA[i:], m.TerminationMessagePath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i-- - dAtA[i] = 0x6a - if m.Lifecycle != nil { - { - size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - if m.ReadinessProbe != nil { - { - size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.LivenessProbe != nil { - { - size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) + n83, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + i += n83 } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 - } - } - i -= len(m.WorkingDir) - copy(dAtA[i:], m.WorkingDir) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i-- - dAtA[i] = 0x2a - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0x22 - } - } - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- - dAtA[i] = 0x1a + i += n } } - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ContainerImage) Marshal() (dAtA []byte, err error) { +func (m *NodeCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerImage) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) - i-- - dAtA[i] = 0x10 - if len(m.Names) > 0 { - for iNdEx := len(m.Names) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Names[iNdEx]) - copy(dAtA[i:], m.Names[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Names[iNdEx]))) - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) + n84, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n84 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n85, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n85 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *ContainerPort) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.HostIP) - copy(dAtA[i:], m.HostIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i-- - dAtA[i] = 0x2a - i -= len(m.Protocol) - copy(dAtA[i:], m.Protocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i-- - dAtA[i] = 0x22 - i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.ConfigMap != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n86, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n86 + } + return i, nil } -func (m *ContainerState) Marshal() (dAtA []byte, err error) { +func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerState) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Terminated != nil { - { - size, err := m.Terminated.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Assigned != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) + n87, err := m.Assigned.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n87 } - if m.Running != nil { - { - size, err := m.Running.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + if m.Active != nil { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) + n88, err := m.Active.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n88 } - if m.Waiting != nil { - { - size, err := m.Waiting.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.LastKnownGood != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) + n89, err := m.LastKnownGood.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n89 } - return len(dAtA) - i, nil + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) + i += copy(dAtA[i:], m.Error) + return i, nil } -func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { +func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerStateRunning) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) + n90, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n90 + return i, nil } -func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { +func (m *NodeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerStateTerminated) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0x3a - { - size, err := m.FinishedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n91, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 - { - size, err := m.StartedAt.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n91 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x2a - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x22 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } -func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { +func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerStateWaiting) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil } -func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { +func (m *NodeResources) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ContainerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Started != nil { - i-- - if *m.Started { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n92, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n92 } - i-- - dAtA[i] = 0x48 } - i -= len(m.ContainerID) - copy(dAtA[i:], m.ContainerID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i-- - dAtA[i] = 0x42 - i -= len(m.ImageID) - copy(dAtA[i:], m.ImageID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) - i-- - dAtA[i] = 0x3a - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) - i-- - dAtA[i] = 0x28 - i-- - if m.Ready { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - { - size, err := m.LastTerminationState.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.State.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { +func (m *NodeSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonEndpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if len(m.NodeSelectorTerms) > 0 { + for _, msg := range m.NodeSelectorTerms { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DownwardAPIProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0xa + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } -func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { +func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DownwardAPIVolumeFile) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Mode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - i-- - dAtA[i] = 0x20 - } - if m.ResourceFieldRef != nil { - { - size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1a } - if m.FieldRef != nil { - { - size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) + if len(m.MatchFields) > 0 { + for _, msg := range m.MatchFields { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DownwardAPIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.DefaultMode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - i-- - dAtA[i] = 0x10 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) + i += copy(dAtA[i:], m.PodCIDR) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) + i += copy(dAtA[i:], m.DoNotUse_ExternalID) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) + i += copy(dAtA[i:], m.ProviderID) + dAtA[i] = 0x20 + i++ + if m.Unschedulable { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + if len(m.Taints) > 0 { + for _, msg := range m.Taints { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n + } + } + if m.ConfigSource != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) + n93, err := m.ConfigSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n93 } - return len(dAtA) - i, nil + return i, nil } -func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *NodeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EmptyDirVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.SizeLimit != nil { - { - size, err := m.SizeLimit.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n94, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n94 } - i-- - dAtA[i] = 0x12 } - i -= len(m.Medium) - copy(dAtA[i:], m.Medium) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.Allocatable) > 0 { + keysForAllocatable := make([]string, 0, len(m.Allocatable)) + for k := range m.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + for _, k := range keysForAllocatable { + dAtA[i] = 0x12 + i++ + v := m.Allocatable[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n95, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n95 + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Addresses) > 0 { + for _, msg := range m.Addresses { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) + n96, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n96 + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) + n97, err := m.NodeInfo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n97 + if len(m.Images) > 0 { + for _, msg := range m.Images { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.VolumesAttached) > 0 { + for _, msg := range m.VolumesAttached { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Config != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) + n98, err := m.Config.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n98 + } + return i, nil } -func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { +func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EndpointAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.NodeName != nil { - i -= len(*m.NodeName) - copy(dAtA[i:], *m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) - i-- - dAtA[i] = 0x22 - } - i -= len(m.Hostname) - copy(dAtA[i:], m.Hostname) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i-- - dAtA[i] = 0x1a - if m.TargetRef != nil { - { - size, err := m.TargetRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) + i += copy(dAtA[i:], m.MachineID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) + i += copy(dAtA[i:], m.SystemUUID) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) + i += copy(dAtA[i:], m.BootID) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) + i += copy(dAtA[i:], m.KernelVersion) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) + i += copy(dAtA[i:], m.OSImage) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) + i += copy(dAtA[i:], m.ContainerRuntimeVersion) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) + i += copy(dAtA[i:], m.KubeletVersion) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) + i += copy(dAtA[i:], m.KubeProxyVersion) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) + i += copy(dAtA[i:], m.OperatingSystem) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) + i += copy(dAtA[i:], m.Architecture) + return i, nil } -func (m *EndpointPort) Marshal() (dAtA []byte, err error) { +func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { +func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i += copy(dAtA[i:], m.FieldPath) + return i, nil +} + +func (m *ObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *EndpointPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.AppProtocol != nil { - i -= len(*m.AppProtocol) - copy(dAtA[i:], *m.AppProtocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) - i-- - dAtA[i] = 0x22 - } - i -= len(m.Protocol) - copy(dAtA[i:], m.Protocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) + i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) + i += copy(dAtA[i:], m.FieldPath) + return i, nil } -func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EndpointSubset) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n99, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.NotReadyAddresses) > 0 { - for iNdEx := len(m.NotReadyAddresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.NotReadyAddresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + i += n99 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n100, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + i += n100 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n101, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n101 + return i, nil } -func (m *Endpoints) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Endpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Subsets) > 0 { - for iNdEx := len(m.Subsets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subsets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n102, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n102 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n103, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n103 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n104, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n104 + return i, nil } -func (m *EndpointsList) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EndpointsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n105, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n105 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n106, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n106 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EnvFromSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n107, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.ConfigMapRef != nil { - { - size, err := m.ConfigMapRef.MarshalToSizedBuffer(dAtA[:i]) + i += n107 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x12 } - i -= len(m.Prefix) - copy(dAtA[i:], m.Prefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *EnvVar) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EnvVar) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.ValueFrom != nil { - { - size, err := m.ValueFrom.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x1a } - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) + n108, err := m.Resources.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n108 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + if m.Selector != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n109, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n109 + } + if m.StorageClassName != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) + i += copy(dAtA[i:], *m.StorageClassName) + } + if m.VolumeMode != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) + } + if m.DataSource != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) + n110, err := m.DataSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n110 + } + return i, nil } -func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EnvVarSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.SecretKeyRef != nil { - { - size, err := m.SecretKeyRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x22 } - if m.ConfigMapKeyRef != nil { - { - size, err := m.ConfigMapKeyRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - i-- - dAtA[i] = 0x1a - } - if m.ResourceFieldRef != nil { - { - size, err := m.ResourceFieldRef.MarshalToSizedBuffer(dAtA[:i]) + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0x1a + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n111, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n111 } - i-- - dAtA[i] = 0x12 } - if m.FieldRef != nil { - { - size, err := m.FieldRef.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *EphemeralContainer) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EphemeralContainer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EphemeralContainer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.TargetContainerName) - copy(dAtA[i:], m.TargetContainerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetContainerName))) - i-- - dAtA[i] = 0x12 - { - size, err := m.EphemeralContainerCommon.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) + i += copy(dAtA[i:], m.ClaimName) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -func (m *EphemeralContainerCommon) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EphemeralContainerCommon) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EphemeralContainerCommon) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.StartupProbe != nil { - { - size, err := m.StartupProbe.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n112, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n112 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 } - if len(m.VolumeDevices) > 0 { - for iNdEx := len(m.VolumeDevices) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeDevices[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } + return i, nil +} + +func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - i -= len(m.TerminationMessagePolicy) - copy(dAtA[i:], m.TerminationMessagePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - if len(m.EnvFrom) > 0 { - for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a + return dAtA[:n], nil +} + +func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.GCEPersistentDisk != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n113, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n113 } - i-- - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n114, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n114 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x90 - i-- - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.HostPath != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n115, err := m.HostPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n115 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x88 - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Glusterfs != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n116, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n116 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - if m.SecurityContext != nil { - { - size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.NFS != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n117, err := m.NFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x7a + i += n117 } - i -= len(m.ImagePullPolicy) - copy(dAtA[i:], m.ImagePullPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i-- - dAtA[i] = 0x72 - i -= len(m.TerminationMessagePath) - copy(dAtA[i:], m.TerminationMessagePath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i-- - dAtA[i] = 0x6a - if m.Lifecycle != nil { - { - size, err := m.Lifecycle.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.RBD != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n118, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x62 + i += n118 } - if m.ReadinessProbe != nil { - { - size, err := m.ReadinessProbe.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ISCSI != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n119, err := m.ISCSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x5a + i += n119 } - if m.LivenessProbe != nil { - { - size, err := m.LivenessProbe.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Cinder != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n120, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n120 + } + if m.CephFS != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n121, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n121 + } + if m.FC != nil { dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n122, err := m.FC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n122 } - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a + if m.Flocker != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n123, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n123 } - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n124, err := m.FlexVolume.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n124 } - i-- - dAtA[i] = 0x42 - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a + if m.AzureFile != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n125, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n125 } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + if m.VsphereVolume != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n126, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n126 } - i -= len(m.WorkingDir) - copy(dAtA[i:], m.WorkingDir) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i-- - dAtA[i] = 0x2a - if len(m.Args) > 0 { - for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Args[iNdEx]) - copy(dAtA[i:], m.Args[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Args[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.Quobyte != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n127, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n127 } - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- - dAtA[i] = 0x1a + if m.AzureDisk != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) + n128, err := m.AzureDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n128 } - i -= len(m.Image) - copy(dAtA[i:], m.Image) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *EphemeralContainers) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.PhotonPersistentDisk != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) + n129, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n129 } - return dAtA[:n], nil -} - -func (m *EphemeralContainers) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EphemeralContainers) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.EphemeralContainers) > 0 { - for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if m.PortworxVolume != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) + n130, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n130 } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.ScaleIO != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) + n131, err := m.ScaleIO.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n131 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.Local != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) + n132, err := m.Local.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n132 + } + if m.StorageOS != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) + n133, err := m.StorageOS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n133 + } + if m.CSI != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) + n134, err := m.CSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n134 + } + return i, nil } -func (m *Event) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.ReportingInstance) - copy(dAtA[i:], m.ReportingInstance) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i-- - dAtA[i] = 0x7a - i -= len(m.ReportingController) - copy(dAtA[i:], m.ReportingController) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i-- - dAtA[i] = 0x72 - if m.Related != nil { - { - size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Capacity) > 0 { + keysForCapacity := make([]string, 0, len(m.Capacity)) + for k := range m.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - i-- - dAtA[i] = 0x6a - } - i -= len(m.Action) - copy(dAtA[i:], m.Action) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i-- - dAtA[i] = 0x62 - if m.Series != nil { - { - size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + for _, k := range keysForCapacity { + dAtA[i] = 0xa + i++ + v := m.Capacity[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n135, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n135 } - i-- - dAtA[i] = 0x5a } - { - size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) + n136, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x52 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x4a - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x40 - { - size, err := m.LastTimestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n136 + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x3a - { - size, err := m.FirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) + if m.ClaimRef != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) + n137, err := m.ClaimRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n137 } - i-- + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) + i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) dAtA[i] = 0x32 - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) + i += copy(dAtA[i:], m.StorageClassName) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x2a - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x22 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x1a - { - size, err := m.InvolvedObject.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.VolumeMode != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) + i += copy(dAtA[i:], *m.VolumeMode) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.NodeAffinity != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) + n138, err := m.NodeAffinity.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n138 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *EventList) Marshal() (dAtA []byte, err error) { +func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + return i, nil } -func (m *EventSeries) Marshal() (dAtA []byte, err error) { +func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - { - size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) + i += copy(dAtA[i:], m.PdID) dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + return i, nil } -func (m *EventSource) Marshal() (dAtA []byte, err error) { +func (m *Pod) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Pod) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0x12 - i -= len(m.Component) - copy(dAtA[i:], m.Component) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n139, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n139 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n140, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n140 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n141, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n141 + return i, nil } -func (m *ExecAction) Marshal() (dAtA []byte, err error) { +func (m *PodAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExecAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } -func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FCVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.WWIDs) > 0 { - for iNdEx := len(m.WWIDs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.WWIDs[iNdEx]) - copy(dAtA[i:], m.WWIDs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WWIDs[iNdEx]))) - i-- - dAtA[i] = 0x2a + if m.LabelSelector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) + n142, err := m.LabelSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n142 } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x1a - if m.Lun != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) - i-- - dAtA[i] = 0x10 - } - if len(m.TargetWWNs) > 0 { - for iNdEx := len(m.TargetWWNs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TargetWWNs[iNdEx]) - copy(dAtA[i:], m.TargetWWNs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetWWNs[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) + i += copy(dAtA[i:], m.TopologyKey) + return i, nil } -func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FlexPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { - v := m.Options[string(keysForOptions[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForOptions[iNdEx]) - copy(dAtA[i:], keysForOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) - i-- + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1a } - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FlexVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for iNdEx := len(keysForOptions) - 1; iNdEx >= 0; iNdEx-- { - v := m.Options[string(keysForOptions[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForOptions[iNdEx]) - copy(dAtA[i:], keysForOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOptions[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a - } + dAtA[i] = 0x8 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - if m.ReadOnly { + i++ + dAtA[i] = 0x10 + i++ + if m.Stdout { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- + i++ + dAtA[i] = 0x18 + i++ + if m.Stderr { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ dAtA[i] = 0x20 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + return i, nil } -func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FlockerVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.DatasetUUID) - copy(dAtA[i:], m.DatasetUUID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) - i-- - dAtA[i] = 0x12 - i -= len(m.DatasetName) - copy(dAtA[i:], m.DatasetName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) + n143, err := m.LastProbeTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n143 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n144, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n144 + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GCEPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - i-- - dAtA[i] = 0x18 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.PDName) - copy(dAtA[i:], m.PDName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.Searches) > 0 { + for _, s := range m.Searches { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GitRepoVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Directory) - copy(dAtA[i:], m.Directory) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) - i-- - dAtA[i] = 0x1a - i -= len(m.Revision) - copy(dAtA[i:], m.Revision) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i-- - dAtA[i] = 0x12 - i -= len(m.Repository) - copy(dAtA[i:], m.Repository) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) + i += copy(dAtA[i:], *m.Value) + } + return i, nil } -func (m *GlusterfsPersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GlusterfsPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GlusterfsPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.EndpointsNamespace != nil { - i -= len(*m.EndpointsNamespace) - copy(dAtA[i:], *m.EndpointsNamespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.EndpointsNamespace))) - i-- - dAtA[i] = 0x22 + dAtA[i] = 0x8 + i++ + if m.Stdin { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - if m.ReadOnly { + i++ + dAtA[i] = 0x10 + i++ + if m.Stdout { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- + i++ dAtA[i] = 0x18 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - i -= len(m.EndpointsName) - copy(dAtA[i:], m.EndpointsName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GlusterfsVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i-- - if m.ReadOnly { + i++ + if m.Stderr { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - i -= len(m.EndpointsName) - copy(dAtA[i:], m.EndpointsName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + i++ + dAtA[i] = 0x20 + i++ + if m.TTY { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - return dAtA[:n], nil -} - -func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPGetAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.HTTPHeaders) > 0 { - for iNdEx := len(m.HTTPHeaders) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HTTPHeaders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + if len(m.Command) > 0 { + for _, s := range m.Command { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x2a - } - } - i -= len(m.Scheme) - copy(dAtA[i:], m.Scheme) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i-- - dAtA[i] = 0x22 - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0x1a - { - size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { +func (m *PodList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n145, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Handler) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.TCPSocket != nil { - { - size, err := m.TCPSocket.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.HTTPGet != nil { - { - size, err := m.HTTPGet.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + return 0, err } - if m.Exec != nil { - { - size, err := m.Exec.MarshalToSizedBuffer(dAtA[:i]) + i += n145 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *HostAlias) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostAlias) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Hostnames) > 0 { - for iNdEx := len(m.Hostnames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Hostnames[iNdEx]) - copy(dAtA[i:], m.Hostnames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostnames[iNdEx]))) - i-- - dAtA[i] = 0x12 + i += n } } - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPathVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Type != nil { - i -= len(*m.Type) - copy(dAtA[i:], *m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i-- - dAtA[i] = 0x12 - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ISCSIPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.InitiatorName != nil { - i -= len(*m.InitiatorName) - copy(dAtA[i:], *m.InitiatorName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i-- - dAtA[i] = 0x62 - } - i-- - if m.SessionCHAPAuth { + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) + i += copy(dAtA[i:], m.Container) + dAtA[i] = 0x10 + i++ + if m.Follow { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x58 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - i-- - if m.DiscoveryCHAPAuth { + i++ + dAtA[i] = 0x18 + i++ + if m.Previous { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 - if len(m.Portals) > 0 { - for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Portals[iNdEx]) - copy(dAtA[i:], m.Portals[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) - i-- - dAtA[i] = 0x3a + i++ + if m.SinceSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) + n146, err := m.SinceTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n146 } - i-- - if m.ReadOnly { + dAtA[i] = 0x30 + i++ + if m.Timestamps { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x2a - i -= len(m.ISCSIInterface) - copy(dAtA[i:], m.ISCSIInterface) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i-- - dAtA[i] = 0x22 - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - i-- - dAtA[i] = 0x18 - i -= len(m.IQN) - copy(dAtA[i:], m.IQN) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i-- - dAtA[i] = 0x12 - i -= len(m.TargetPortal) - copy(dAtA[i:], m.TargetPortal) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + if m.TailLines != nil { + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) + } + return i, nil } -func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ISCSIVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.InitiatorName != nil { - i -= len(*m.InitiatorName) - copy(dAtA[i:], *m.InitiatorName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i-- - dAtA[i] = 0x62 - } - i-- - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - i-- - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x40 - if len(m.Portals) > 0 { - for iNdEx := len(m.Portals) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Portals[iNdEx]) - copy(dAtA[i:], m.Portals[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Portals[iNdEx]))) - i-- - dAtA[i] = 0x3a + if len(m.Ports) > 0 { + for _, num := range m.Ports { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(num)) } } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x2a - i -= len(m.ISCSIInterface) - copy(dAtA[i:], m.ISCSIInterface) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i-- - dAtA[i] = 0x22 - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - i-- - dAtA[i] = 0x18 - i -= len(m.IQN) - copy(dAtA[i:], m.IQN) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i-- - dAtA[i] = 0x12 - i -= len(m.TargetPortal) - copy(dAtA[i:], m.TargetPortal) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *KeyToPath) Marshal() (dAtA []byte, err error) { +func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *KeyToPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Mode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - i-- - dAtA[i] = 0x18 - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil } -func (m *Lifecycle) Marshal() (dAtA []byte, err error) { +func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Lifecycle) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.PreStop != nil { - { - size, err := m.PreStop.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.PostStart != nil { - { - size, err := m.PostStart.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) + i += copy(dAtA[i:], m.ConditionType) + return i, nil } -func (m *LimitRange) Marshal() (dAtA []byte, err error) { +func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LimitRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.SELinuxOptions != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n147, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n147 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.RunAsUser != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) } - return dAtA[:n], nil -} - -func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LimitRangeItem) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.MaxLimitRequestRatio) > 0 { - keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) - for k := range m.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - for iNdEx := len(keysForMaxLimitRequestRatio) - 1; iNdEx >= 0; iNdEx-- { - v := m.MaxLimitRequestRatio[ResourceName(keysForMaxLimitRequestRatio[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForMaxLimitRequestRatio[iNdEx]) - copy(dAtA[i:], keysForMaxLimitRequestRatio[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMaxLimitRequestRatio[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x32 + if m.RunAsNonRoot != nil { + dAtA[i] = 0x18 + i++ + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - if len(m.DefaultRequest) > 0 { - keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) - for k := range m.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - for iNdEx := len(keysForDefaultRequest) - 1; iNdEx >= 0; iNdEx-- { - v := m.DefaultRequest[ResourceName(keysForDefaultRequest[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForDefaultRequest[iNdEx]) - copy(dAtA[i:], keysForDefaultRequest[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefaultRequest[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2a + if len(m.SupplementalGroups) > 0 { + for _, num := range m.SupplementalGroups { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(num)) } } - if len(m.Default) > 0 { - keysForDefault := make([]string, 0, len(m.Default)) - for k := range m.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - for iNdEx := len(keysForDefault) - 1; iNdEx >= 0; iNdEx-- { - v := m.Default[ResourceName(keysForDefault[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForDefault[iNdEx]) - copy(dAtA[i:], keysForDefault[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDefault[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } + if m.FSGroup != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) } - if len(m.Min) > 0 { - keysForMin := make([]string, 0, len(m.Min)) - for k := range m.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - for iNdEx := len(keysForMin) - 1; iNdEx >= 0; iNdEx-- { - v := m.Min[ResourceName(keysForMin[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForMin[iNdEx]) - copy(dAtA[i:], keysForMin[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMin[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } + if m.RunAsGroup != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) } - if len(m.Max) > 0 { - keysForMax := make([]string, 0, len(m.Max)) - for k := range m.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - for iNdEx := len(keysForMax) - 1; iNdEx >= 0; iNdEx-- { - v := m.Max[ResourceName(keysForMax[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Sysctls) > 0 { + for _, msg := range m.Sysctls { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForMax[iNdEx]) - copy(dAtA[i:], keysForMax[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMax[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + i += n } } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { +func (m *PodSignature) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LimitRangeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.PodController != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) + n148, err := m.PodController.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n148 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { +func (m *PodSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LimitRangeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Limits) > 0 { - for iNdEx := len(m.Limits) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Limits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Containers) > 0 { + for _, msg := range m.Containers { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n + } + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) + i += copy(dAtA[i:], m.RestartPolicy) + if m.TerminationGracePeriodSeconds != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) + } + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) + i += copy(dAtA[i:], m.DNSPolicy) + if len(m.NodeSelector) > 0 { + keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) + for k := range m.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + for _, k := range keysForNodeSelector { + dAtA[i] = 0x3a + i++ + v := m.NodeSelector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) + i += copy(dAtA[i:], m.ServiceAccountName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) + i += copy(dAtA[i:], m.DeprecatedServiceAccount) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + dAtA[i] = 0x58 + i++ + if m.HostNetwork { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x60 + i++ + if m.HostPID { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x68 + i++ + if m.HostIPC { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SecurityContext != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) + n149, err := m.SecurityContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n149 + } + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) + i += copy(dAtA[i:], m.Hostname) + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) + i += copy(dAtA[i:], m.Subdomain) + if m.Affinity != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) + n150, err := m.Affinity.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n150 + } + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) + i += copy(dAtA[i:], m.SchedulerName) + if len(m.InitContainers) > 0 { + for _, msg := range m.InitContainers { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.AutomountServiceAccountToken != nil { + dAtA[i] = 0xa8 + i++ + dAtA[i] = 0x1 + i++ + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Tolerations) > 0 { + for _, msg := range m.Tolerations { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.HostAliases) > 0 { + for _, msg := range m.HostAliases { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) + i += copy(dAtA[i:], m.PriorityClassName) + if m.Priority != nil { + dAtA[i] = 0xc8 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) + } + if m.DNSConfig != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) + n151, err := m.DNSConfig.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n151 + } + if m.ShareProcessNamespace != nil { + dAtA[i] = 0xd8 + i++ + dAtA[i] = 0x1 + i++ + if *m.ShareProcessNamespace { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.ReadinessGates) > 0 { + for _, msg := range m.ReadinessGates { + dAtA[i] = 0xe2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + if m.RuntimeClassName != nil { + dAtA[i] = 0xea + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) + i += copy(dAtA[i:], *m.RuntimeClassName) + } + return i, nil } -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *PodStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) + i += copy(dAtA[i:], m.Phase) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) + i += copy(dAtA[i:], m.HostIP) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) + i += copy(dAtA[i:], m.PodIP) + if m.StartTime != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) + n152, err := m.StartTime.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n152 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.ContainerStatuses) > 0 { + for _, msg := range m.ContainerStatuses { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) + i += copy(dAtA[i:], m.QOSClass) + if len(m.InitContainerStatuses) > 0 { + for _, msg := range m.InitContainerStatuses { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) + i += copy(dAtA[i:], m.NominatedNodeName) + return i, nil } -func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { +func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LoadBalancerIngress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Hostname) - copy(dAtA[i:], m.Hostname) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i-- - dAtA[i] = 0x12 - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n153, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n153 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n154, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n154 + return i, nil } -func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { +func (m *PodTemplate) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LoadBalancerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Ingress) > 0 { - for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n155, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n155 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n156, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n156 + return i, nil } -func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { +func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n157, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n157 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LocalVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.FSType != nil { - i -= len(*m.FSType) - copy(dAtA[i:], *m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i-- - dAtA[i] = 0x12 - } - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n158 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n159, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n159 + return i, nil } -func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NFSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) + i += copy(dAtA[i:], m.VolumeID) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x18 + i++ if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x12 - i -= len(m.Server) - copy(dAtA[i:], m.Server) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + return i, nil } -func (m *Namespace) Marshal() (dAtA []byte, err error) { +func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { +func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.UID != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) + } + return i, nil +} + +func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *Namespace) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) + n160, err := m.PodSignature.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n160 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) + n161, err := m.EvictionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n161 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *NamespaceCondition) Marshal() (dAtA []byte, err error) { +func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x32 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x2a - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) + n162, err := m.Preference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n162 + return i, nil } -func (m *NamespaceList) Marshal() (dAtA []byte, err error) { +func (m *Probe) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Probe) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) + n163, err := m.Handler.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n163 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) + return i, nil } -func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { +func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Finalizers) > 0 { - for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Finalizers[iNdEx]) - copy(dAtA[i:], m.Finalizers[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) - i-- + if len(m.Sources) > 0 { + for _, msg := range m.Sources { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + if m.DefaultMode != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) + } + return i, nil } -func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { +func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NamespaceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) + i += copy(dAtA[i:], m.Registry) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) + i += copy(dAtA[i:], m.Volume) + dAtA[i] = 0x18 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + return i, nil } -func (m *Node) Marshal() (dAtA []byte, err error) { +func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Node) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i += copy(dAtA[i:], m.RBDImage) dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i += copy(dAtA[i:], m.RBDPool) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i += copy(dAtA[i:], m.RadosUser) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i += copy(dAtA[i:], m.Keyring) + if m.SecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n164, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n164 } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x40 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + return i, nil } -func (m *NodeAddress) Marshal() (dAtA []byte, err error) { +func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeAddress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Address) - copy(dAtA[i:], m.Address) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) - i-- + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) + i += copy(dAtA[i:], m.RBDImage) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) + i += copy(dAtA[i:], m.RBDPool) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) + i += copy(dAtA[i:], m.RadosUser) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) + i += copy(dAtA[i:], m.Keyring) + if m.SecretRef != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n165, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n165 + } + dAtA[i] = 0x40 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { +func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - { - size, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + i += n166 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) + i += copy(dAtA[i:], m.Range) + if m.Data != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) } - return len(dAtA) - i, nil + return i, nil } -func (m *NodeCondition) Marshal() (dAtA []byte, err error) { +func (m *ReplicationController) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x32 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x2a - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 - { - size, err := m.LastHeartbeatTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n167 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n168, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n168 dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n169, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n169 + return i, nil } -func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { +func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeConfigSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.ConfigMap != nil { - { - size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n170, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n170 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { +func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeConfigStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Error) - copy(dAtA[i:], m.Error) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i-- - dAtA[i] = 0x22 - if m.LastKnownGood != nil { - { - size, err := m.LastKnownGood.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Active != nil { - { - size, err := m.Active.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n171, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.Assigned != nil { - { - size, err := m.Assigned.MarshalToSizedBuffer(dAtA[:i]) + i += n171 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { +func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeDaemonEndpoints) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.KubeletEndpoint.MarshalToSizedBuffer(dAtA[:i]) + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.Template != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n172, err := m.Template.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n172 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil } -func (m *NodeList) Marshal() (dAtA []byte, err error) { +func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) + i += copy(dAtA[i:], m.ContainerName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) + n173, err := m.Divisor.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n173 + return i, nil } -func (m *NodeResources) Marshal() (dAtA []byte, err error) { +func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n174, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil -} - -func (m *NodeSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + i += n174 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n175, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return dAtA[:n], nil -} - -func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.NodeSelectorTerms) > 0 { - for iNdEx := len(m.NodeSelectorTerms) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.NodeSelectorTerms[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } + i += n175 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n176, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n176 + return i, nil } -func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n177, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n177 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Operator) - copy(dAtA[i:], m.Operator) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { +func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeSelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.MatchFields) > 0 { - for iNdEx := len(m.MatchFields) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for _, k := range keysForHard { + dAtA[i] = 0xa + i++ + v := m.Hard[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n178, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n178 } } - if len(m.MatchExpressions) > 0 { - for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0xa + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.ScopeSelector != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) + n179, err := m.ScopeSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n179 } - return len(dAtA) - i, nil + return i, nil } -func (m *NodeSpec) Marshal() (dAtA []byte, err error) { +func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.PodCIDRs) > 0 { - for iNdEx := len(m.PodCIDRs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PodCIDRs[iNdEx]) - copy(dAtA[i:], m.PodCIDRs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDRs[iNdEx]))) - i-- - dAtA[i] = 0x3a + if len(m.Hard) > 0 { + keysForHard := make([]string, 0, len(m.Hard)) + for k := range m.Hard { + keysForHard = append(keysForHard, string(k)) } - } - if m.ConfigSource != nil { - { - size, err := m.ConfigSource.MarshalToSizedBuffer(dAtA[:i]) + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + for _, k := range keysForHard { + dAtA[i] = 0xa + i++ + v := m.Hard[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n180, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n180 } - i-- - dAtA[i] = 0x32 } - if len(m.Taints) > 0 { - for iNdEx := len(m.Taints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Taints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Used) > 0 { + keysForUsed := make([]string, 0, len(m.Used)) + for k := range m.Used { + keysForUsed = append(keysForUsed, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + for _, k := range keysForUsed { + dAtA[i] = 0x12 + i++ + v := m.Used[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n181, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n181 } } - i-- - if m.Unschedulable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i -= len(m.ProviderID) - copy(dAtA[i:], m.ProviderID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) - i-- - dAtA[i] = 0x1a - i -= len(m.DoNotUseExternalID) - copy(dAtA[i:], m.DoNotUseExternalID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUseExternalID))) - i-- - dAtA[i] = 0x12 - i -= len(m.PodCIDR) - copy(dAtA[i:], m.PodCIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { +func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Config != nil { - { - size, err := m.Config.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Limits) > 0 { + keysForLimits := make([]string, 0, len(m.Limits)) + for k := range m.Limits { + keysForLimits = append(keysForLimits, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + for _, k := range keysForLimits { + dAtA[i] = 0xa + i++ + v := m.Limits[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n182, err := (&v).MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if len(m.VolumesAttached) > 0 { - for iNdEx := len(m.VolumesAttached) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumesAttached[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - } - if len(m.VolumesInUse) > 0 { - for iNdEx := len(m.VolumesInUse) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.VolumesInUse[iNdEx]) - copy(dAtA[i:], m.VolumesInUse[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumesInUse[iNdEx]))) - i-- - dAtA[i] = 0x4a - } - } - if len(m.Images) > 0 { - for iNdEx := len(m.Images) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Images[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - { - size, err := m.NodeInfo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - { - size, err := m.DaemonEndpoints.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - if len(m.Addresses) > 0 { - for iNdEx := len(m.Addresses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Addresses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - } - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + i += n182 } } - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0x1a - if len(m.Allocatable) > 0 { - keysForAllocatable := make([]string, 0, len(m.Allocatable)) - for k := range m.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) + if len(m.Requests) > 0 { + keysForRequests := make([]string, 0, len(m.Requests)) + for k := range m.Requests { + keysForRequests = append(keysForRequests, string(k)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - for iNdEx := len(keysForAllocatable) - 1; iNdEx >= 0; iNdEx-- { - v := m.Allocatable[ResourceName(keysForAllocatable[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + for _, k := range keysForRequests { dAtA[i] = 0x12 - i -= len(keysForAllocatable[iNdEx]) - copy(dAtA[i:], keysForAllocatable[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAllocatable[iNdEx]))) - i-- + i++ + v := m.Requests[ResourceName(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 - } - } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n183, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + i += n183 } } - return len(dAtA) - i, nil + return i, nil } -func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { +func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NodeSystemInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Architecture) - copy(dAtA[i:], m.Architecture) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) - i-- - dAtA[i] = 0x52 - i -= len(m.OperatingSystem) - copy(dAtA[i:], m.OperatingSystem) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) - i-- - dAtA[i] = 0x4a - i -= len(m.KubeProxyVersion) - copy(dAtA[i:], m.KubeProxyVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) - i-- - dAtA[i] = 0x42 - i -= len(m.KubeletVersion) - copy(dAtA[i:], m.KubeletVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) - i-- - dAtA[i] = 0x3a - i -= len(m.ContainerRuntimeVersion) - copy(dAtA[i:], m.ContainerRuntimeVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) - i-- - dAtA[i] = 0x32 - i -= len(m.OSImage) - copy(dAtA[i:], m.OSImage) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) - i-- - dAtA[i] = 0x2a - i -= len(m.KernelVersion) - copy(dAtA[i:], m.KernelVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) - i-- - dAtA[i] = 0x22 - i -= len(m.BootID) - copy(dAtA[i:], m.BootID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) - i-- - dAtA[i] = 0x1a - i -= len(m.SystemUUID) - copy(dAtA[i:], m.SystemUUID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) - i-- - dAtA[i] = 0x12 - i -= len(m.MachineID) - copy(dAtA[i:], m.MachineID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) + i += copy(dAtA[i:], m.User) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) + i += copy(dAtA[i:], m.Role) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) + i += copy(dAtA[i:], m.Level) + return i, nil } -func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { +func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.FieldPath) - copy(dAtA[i:], m.FieldPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i += copy(dAtA[i:], m.System) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n184, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n184 + } + dAtA[i] = 0x20 + i++ + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i += copy(dAtA[i:], m.ProtectionDomain) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i += copy(dAtA[i:], m.StoragePool) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i += copy(dAtA[i:], m.StorageMode) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x50 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { +func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.FieldPath) - copy(dAtA[i:], m.FieldPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i-- - dAtA[i] = 0x3a - i -= len(m.ResourceVersion) - copy(dAtA[i:], m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i-- - dAtA[i] = 0x32 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x2a - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) + i += copy(dAtA[i:], m.Gateway) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) + i += copy(dAtA[i:], m.System) + if m.SecretRef != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n185, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n185 + } + dAtA[i] = 0x20 + i++ + if m.SSLEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) + i += copy(dAtA[i:], m.ProtectionDomain) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) + i += copy(dAtA[i:], m.StoragePool) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) + i += copy(dAtA[i:], m.StorageMode) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x50 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { +func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return i, nil +} + +func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - i-- + return dAtA[:n], nil +} + +func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) + i += copy(dAtA[i:], m.ScopeName) dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { +func (m *Secret) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolumeClaim) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Secret) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n186, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n186 + if len(m.Data) > 0 { + keysForData := make([]string, 0, len(m.Data)) + for k := range m.Data { + keysForData = append(keysForData, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + for _, k := range keysForData { + dAtA[i] = 0x12 + i++ + v := m.Data[string(k)] + byteSize := 0 + if v != nil { + byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.StringData) > 0 { + keysForStringData := make([]string, 0, len(m.StringData)) + for k := range m.StringData { + keysForStringData = append(keysForStringData, string(k)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + for _, k := range keysForStringData { + dAtA[i] = 0x22 + i++ + v := m.StringData[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { +func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { +func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n187, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n187 + if m.Optional != nil { + dAtA[i] = 0x10 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaimCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x32 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x2a - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 - { - size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n188 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if m.Optional != nil { + dAtA[i] = 0x18 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { +func (m *SecretList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { +func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n189, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n189 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *SecretProjection) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaimList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) + n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n190 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { +func (m *SecretReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil +} + +func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeClaimSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.DataSource != nil { - { - size, err := m.DataSource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.VolumeMode != nil { - i -= len(*m.VolumeMode) - copy(dAtA[i:], *m.VolumeMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i-- - dAtA[i] = 0x32 - } - if m.StorageClassName != nil { - i -= len(*m.StorageClassName) - copy(dAtA[i:], *m.StorageClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) - i-- - dAtA[i] = 0x2a - } - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x22 } - i -= len(m.VolumeName) - copy(dAtA[i:], m.VolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i-- - dAtA[i] = 0x1a - { - size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.DefaultMode != nil { + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) } - i-- - dAtA[i] = 0x12 - if len(m.AccessModes) > 0 { - for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AccessModes[iNdEx]) - copy(dAtA[i:], m.AccessModes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) - i-- - dAtA[i] = 0xa + if m.Optional != nil { + dAtA[i] = 0x20 + i++ + if *m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - return len(dAtA) - i, nil + return i, nil } -func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { +func (m *SecurityContext) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolumeClaimStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + if m.Capabilities != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) + n191, err := m.Capabilities.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n191 } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) + if m.Privileged != nil { + dAtA[i] = 0x10 + i++ + if *m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a + i++ + } + if m.SELinuxOptions != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n192, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n192 } - if len(m.AccessModes) > 0 { - for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AccessModes[iNdEx]) - copy(dAtA[i:], m.AccessModes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) - i-- - dAtA[i] = 0x12 + if m.RunAsUser != nil { + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) + } + if m.RunAsNonRoot != nil { + dAtA[i] = 0x28 + i++ + if *m.RunAsNonRoot { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.ReadOnlyRootFilesystem != nil { + dAtA[i] = 0x30 + i++ + if *m.ReadOnlyRootFilesystem { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x38 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.RunAsGroup != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) + } + if m.ProcMount != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) + i += copy(dAtA[i:], *m.ProcMount) + } + return i, nil } -func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *SerializedReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolumeClaimVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i -= len(m.ClaimName) - copy(dAtA[i:], m.ClaimName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) + n193, err := m.Reference.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n193 + return i, nil } -func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { +func (m *Service) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolumeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n194, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n194 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n195, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n195 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n196, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n196 + return i, nil } -func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.CSI != nil { - { - size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n197, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.StorageOS != nil { - { - size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) + i += n197 + if len(m.Secrets) > 0 { + for _, msg := range m.Secrets { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa } - if m.Local != nil { - { - size, err := m.Local.MarshalToSizedBuffer(dAtA[:i]) + if len(m.ImagePullSecrets) > 0 { + for _, msg := range m.ImagePullSecrets { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 } - if m.ScaleIO != nil { - { - size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.AutomountServiceAccountToken != nil { + dAtA[i] = 0x20 + i++ + if *m.AutomountServiceAccountToken { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a + i++ } - if m.PortworxVolume != nil { - { - size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 + return i, nil +} + +func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.PhotonPersistentDisk != nil { - { - size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a + return dAtA[:n], nil +} + +func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n198, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if m.AzureDisk != nil { - { - size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) + i += n198 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 } - if m.Quobyte != nil { - { - size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) + return i, nil +} + +func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) + i += copy(dAtA[i:], m.Audience) + if m.ExpirationSeconds != nil { + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) + } + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *ServiceList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n199, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n199 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x7a - } - if m.VsphereVolume != nil { - { - size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.AzureFile != nil { - { - size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - if m.FlexVolume != nil { - { - size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - if m.Flocker != nil { - { - size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.FC != nil { - { - size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.CephFS != nil { - { - size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.Cinder != nil { - { - size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.ISCSI != nil { - { - size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.RBD != nil { - { - size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 } - if m.NFS != nil { - { - size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - if m.Glusterfs != nil { - { - size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.HostPath != nil { - { - size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.AWSElasticBlockStore != nil { - { - size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + return i, nil +} + +func (m *ServicePort) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if m.GCEPersistentDisk != nil { - { - size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + return dAtA[:n], nil +} + +func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) + i += copy(dAtA[i:], m.Protocol) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) + n200, err := m.TargetPort.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n200 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) + return i, nil } -func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { +func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { +func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + return i, nil +} + +func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PersistentVolumeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.NodeAffinity != nil { - { - size, err := m.NodeAffinity.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x4a - } - if m.VolumeMode != nil { - i -= len(*m.VolumeMode) - copy(dAtA[i:], *m.VolumeMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i-- - dAtA[i] = 0x42 } - if len(m.MountOptions) > 0 { - for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MountOptions[iNdEx]) - copy(dAtA[i:], m.MountOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) - i-- - dAtA[i] = 0x3a + if len(m.Selector) > 0 { + keysForSelector := make([]string, 0, len(m.Selector)) + for k := range m.Selector { + keysForSelector = append(keysForSelector, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + for _, k := range keysForSelector { + dAtA[i] = 0x12 + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i -= len(m.StorageClassName) - copy(dAtA[i:], m.StorageClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) - i-- - dAtA[i] = 0x32 - i -= len(m.PersistentVolumeReclaimPolicy) - copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i-- - dAtA[i] = 0x2a - if m.ClaimRef != nil { - { - size, err := m.ClaimRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) + i += copy(dAtA[i:], m.ClusterIP) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x22 } - if len(m.AccessModes) > 0 { - for iNdEx := len(m.AccessModes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AccessModes[iNdEx]) - copy(dAtA[i:], m.AccessModes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AccessModes[iNdEx]))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) + i += copy(dAtA[i:], m.SessionAffinity) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) + i += copy(dAtA[i:], m.LoadBalancerIP) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + dAtA[i] = 0x4a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - { - size, err := m.PersistentVolumeSource.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) + i += copy(dAtA[i:], m.ExternalName) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) + i += copy(dAtA[i:], m.ExternalTrafficPolicy) + dAtA[i] = 0x60 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) + dAtA[i] = 0x68 + i++ + if m.PublishNotReadyAddresses { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if m.SessionAffinityConfig != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) + n201, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for iNdEx := len(keysForCapacity) - 1; iNdEx >= 0; iNdEx-- { - v := m.Capacity[ResourceName(keysForCapacity[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForCapacity[iNdEx]) - copy(dAtA[i:], keysForCapacity[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForCapacity[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } + i += n201 } - return len(dAtA) - i, nil + return i, nil } -func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { +func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PersistentVolumeStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x1a - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) + n202, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n202 + return i, nil } -func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { +func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PhotonPersistentDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.PdID) - copy(dAtA[i:], m.PdID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.ClientIP != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) + n203, err := m.ClientIP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n203 + } + return i, nil } -func (m *Pod) Marshal() (dAtA []byte, err error) { +func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Pod) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i += copy(dAtA[i:], m.VolumeNamespace) dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + i++ + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n204, err := m.SecretRef.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n204 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PodAffinity) Marshal() (dAtA []byte, err error) { +func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) + i += copy(dAtA[i:], m.VolumeName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) + i += copy(dAtA[i:], m.VolumeNamespace) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x20 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + i++ + if m.SecretRef != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) + n205, err := m.SecretRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n205 } - return len(dAtA) - i, nil + return i, nil } -func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { +func (m *Sysctl) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { +func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + return i, nil +} + +func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.TopologyKey) - copy(dAtA[i:], m.TopologyKey) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i-- - dAtA[i] = 0x1a - if len(m.Namespaces) > 0 { - for iNdEx := len(m.Namespaces) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Namespaces[iNdEx]) - copy(dAtA[i:], m.Namespaces[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespaces[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.LabelSelector != nil { - { - size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n206, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n206 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + return i, nil } -func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { +func (m *Taint) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodAntiAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Taint) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for iNdEx := len(m.PreferredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PreferredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for iNdEx := len(m.RequiredDuringSchedulingIgnoredDuringExecution) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.RequiredDuringSchedulingIgnoredDuringExecution[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i += copy(dAtA[i:], m.Effect) + if m.TimeAdded != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) + n207, err := m.TimeAdded.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n207 } - return len(dAtA) - i, nil + return i, nil } -func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { +func (m *Toleration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodAttachOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Container) - copy(dAtA[i:], m.Container) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i-- - dAtA[i] = 0x2a - i-- - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i-- - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i-- - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) + i += copy(dAtA[i:], m.Effect) + if m.TolerationSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } -func (m *PodCondition) Marshal() (dAtA []byte, err error) { +func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x32 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x2a - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - { - size, err := m.LastProbeTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { +func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDNSConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Options) > 0 { - for iNdEx := len(m.Options) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Options[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.Searches) > 0 { - for iNdEx := len(m.Searches) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Searches[iNdEx]) - copy(dAtA[i:], m.Searches[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Searches[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Nameservers) > 0 { - for iNdEx := len(m.Nameservers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Nameservers[iNdEx]) - copy(dAtA[i:], m.Nameservers[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Nameservers[iNdEx]))) - i-- + if len(m.MatchLabelExpressions) > 0 { + for _, msg := range m.MatchLabelExpressions { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } -func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { +func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDNSConfigOption) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.Value != nil { - i -= len(*m.Value) - copy(dAtA[i:], *m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i-- - dAtA[i] = 0x12 + if m.APIGroup != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) + i += copy(dAtA[i:], *m.APIGroup) } - i -= len(m.Name) - copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += copy(dAtA[i:], m.Name) + return i, nil } -func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { +func (m *Volume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { +func (m *Volume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) + n208, err := m.VolumeSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n208 + return i, nil +} + +func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *PodExecOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Command) > 0 { - for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Command[iNdEx]) - copy(dAtA[i:], m.Command[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Command[iNdEx]))) - i-- - dAtA[i] = 0x32 - } - } - i -= len(m.Container) - copy(dAtA[i:], m.Container) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i-- - dAtA[i] = 0x2a - i-- - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - i-- - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i-- - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - i-- - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) + i += copy(dAtA[i:], m.DevicePath) + return i, nil } -func (m *PodIP) Marshal() (dAtA []byte, err error) { +func (m *VolumeMount) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodIP) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) + i += copy(dAtA[i:], m.MountPath) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) + i += copy(dAtA[i:], m.SubPath) + if m.MountPropagation != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) + i += copy(dAtA[i:], *m.MountPropagation) + } + return i, nil } -func (m *PodList) Marshal() (dAtA []byte, err error) { +func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.Required != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) + n209, err := m.Required.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n209 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { +func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodLogOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.InsecureSkipTLSVerifyBackend { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - if m.LimitBytes != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) - i-- - dAtA[i] = 0x40 - } - if m.TailLines != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - i-- - dAtA[i] = 0x38 - } - i-- - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - if m.SinceTime != nil { - { - size, err := m.SinceTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Secret != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n210, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x2a + i += n210 } - if m.SinceSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) - i-- - dAtA[i] = 0x20 + if m.DownwardAPI != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n211 } - i-- - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ConfigMap != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n212, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n212 } - i-- - dAtA[i] = 0x18 - i-- - if m.Follow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.ServiceAccountToken != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) + n213, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n213 } - i-- - dAtA[i] = 0x10 - i -= len(m.Container) - copy(dAtA[i:], m.Container) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { +func (m *VolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPortForwardOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintGenerated(dAtA, i, uint64(m.Ports[iNdEx])) - i-- - dAtA[i] = 0x8 + if m.HostPath != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) + n214, err := m.HostPath.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n214 + } + if m.EmptyDir != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) + n215, err := m.EmptyDir.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n215 + } + if m.GCEPersistentDisk != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) + n216, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n216 + } + if m.AWSElasticBlockStore != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) + n217, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n217 + } + if m.GitRepo != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) + n218, err := m.GitRepo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n218 + } + if m.Secret != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) + n219, err := m.Secret.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n219 + } + if m.NFS != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) + n220, err := m.NFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n220 + } + if m.ISCSI != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) + n221, err := m.ISCSI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n221 + } + if m.Glusterfs != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) + n222, err := m.Glusterfs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n222 + } + if m.PersistentVolumeClaim != nil { + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) + n223, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n223 + } + if m.RBD != nil { + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) + n224, err := m.RBD.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n224 + } + if m.FlexVolume != nil { + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) + n225, err := m.FlexVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n225 + } + if m.Cinder != nil { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) + n226, err := m.Cinder.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n226 + } + if m.CephFS != nil { + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) + n227, err := m.CephFS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n227 + } + if m.Flocker != nil { + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) + n228, err := m.Flocker.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n228 + } + if m.DownwardAPI != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) + n229, err := m.DownwardAPI.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n229 + } + if m.FC != nil { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) + n230, err := m.FC.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n230 + } + if m.AzureFile != nil { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) + n231, err := m.AzureFile.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n231 + } + if m.ConfigMap != nil { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) + n232, err := m.ConfigMap.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n232 + } + if m.VsphereVolume != nil { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) + n233, err := m.VsphereVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n233 + } + if m.Quobyte != nil { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) + n234, err := m.Quobyte.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n234 + } + if m.AzureDisk != nil { + dAtA[i] = 0xb2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) + n235, err := m.AzureDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n235 + } + if m.PhotonPersistentDisk != nil { + dAtA[i] = 0xba + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) + n236, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n236 + } + if m.PortworxVolume != nil { + dAtA[i] = 0xc2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) + n237, err := m.PortworxVolume.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n237 + } + if m.ScaleIO != nil { + dAtA[i] = 0xca + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) + n238, err := m.ScaleIO.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n238 } - return len(dAtA) - i, nil + if m.Projected != nil { + dAtA[i] = 0xd2 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) + n239, err := m.Projected.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n239 + } + if m.StorageOS != nil { + dAtA[i] = 0xda + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) + n240, err := m.StorageOS.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n240 + } + return i, nil } -func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { +func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) + i += copy(dAtA[i:], m.VolumePath) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) + i += copy(dAtA[i:], m.FSType) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) + i += copy(dAtA[i:], m.StoragePolicyName) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) + i += copy(dAtA[i:], m.StoragePolicyID) + return i, nil } -func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { +func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodReadinessGate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.ConditionType) - copy(dAtA[i:], m.ConditionType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) + n241, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return dAtA[:n], nil + i += n241 + return i, nil } -func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n } -func (m *PodSecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Affinity) Size() (n int) { var l int _ = l - if m.FSGroupChangePolicy != nil { - i -= len(*m.FSGroupChangePolicy) - copy(dAtA[i:], *m.FSGroupChangePolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSGroupChangePolicy))) - i-- - dAtA[i] = 0x4a - } - if m.WindowsOptions != nil { - { - size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if len(m.Sysctls) > 0 { - for iNdEx := len(m.Sysctls) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Sysctls[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - } - if m.RunAsGroup != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - i-- - dAtA[i] = 0x30 - } - if m.FSGroup != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) - i-- - dAtA[i] = 0x28 - } - if len(m.SupplementalGroups) > 0 { - for iNdEx := len(m.SupplementalGroups) - 1; iNdEx >= 0; iNdEx-- { - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups[iNdEx])) - i-- - dAtA[i] = 0x20 - } - } - if m.RunAsNonRoot != nil { - i-- - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - } - if m.RunAsUser != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - i-- - dAtA[i] = 0x10 + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.PodAffinity != nil { + l = m.PodAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return len(dAtA) - i, nil -} - -func (m *PodSignature) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.PodAntiAffinity != nil { + l = m.PodAntiAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *AttachedVolume) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PodSignature) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *AvoidPods) Size() (n int) { var l int _ = l - if m.PodController != nil { - { - size, err := m.PodController.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.PreferAvoidPods) > 0 { + for _, e := range m.PreferAvoidPods { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *PodSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *AzureDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DiskName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DataDiskURI) + n += 1 + l + sovGenerated(uint64(l)) + if m.CachingMode != nil { + l = len(*m.CachingMode) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadOnly != nil { + n += 2 + } + if m.Kind != nil { + l = len(*m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *AzureFilePersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretNamespace != nil { + l = len(*m.SecretNamespace) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *PodSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *AzureFileVolumeSource) Size() (n int) { var l int _ = l - if len(m.EphemeralContainers) > 0 { - for iNdEx := len(m.EphemeralContainers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EphemeralContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x92 - } - } - if len(m.TopologySpreadConstraints) > 0 { - for iNdEx := len(m.TopologySpreadConstraints) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.TopologySpreadConstraints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x8a - } - } - if len(m.Overhead) > 0 { - keysForOverhead := make([]string, 0, len(m.Overhead)) - for k := range m.Overhead { - keysForOverhead = append(keysForOverhead, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) - for iNdEx := len(keysForOverhead) - 1; iNdEx >= 0; iNdEx-- { - v := m.Overhead[ResourceName(keysForOverhead[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForOverhead[iNdEx]) - copy(dAtA[i:], keysForOverhead[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForOverhead[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x2 - i-- - dAtA[i] = 0x82 - } - } - if m.PreemptionPolicy != nil { - i -= len(*m.PreemptionPolicy) - copy(dAtA[i:], *m.PreemptionPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xfa - } - if m.EnableServiceLinks != nil { - i-- - if *m.EnableServiceLinks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xf0 - } - if m.RuntimeClassName != nil { - i -= len(*m.RuntimeClassName) - copy(dAtA[i:], *m.RuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xea - } - if len(m.ReadinessGates) > 0 { - for iNdEx := len(m.ReadinessGates) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ReadinessGates[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 - } - } - if m.ShareProcessNamespace != nil { - i-- - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd8 - } - if m.DNSConfig != nil { - { - size, err := m.DNSConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 - } - if m.Priority != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc8 - } - i -= len(m.PriorityClassName) - copy(dAtA[i:], m.PriorityClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 - if len(m.HostAliases) > 0 { - for iNdEx := len(m.HostAliases) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostAliases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - } - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - } - if m.AutomountServiceAccountToken != nil { - i-- - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa8 - } - if len(m.InitContainers) > 0 { - for iNdEx := len(m.InitContainers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.InitContainers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - i -= len(m.SchedulerName) - copy(dAtA[i:], m.SchedulerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - if m.Affinity != nil { - { - size, err := m.Affinity.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - i -= len(m.Subdomain) - copy(dAtA[i:], m.Subdomain) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - i -= len(m.Hostname) - copy(dAtA[i:], m.Hostname) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - if len(m.ImagePullSecrets) > 0 { - for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - } - if m.SecurityContext != nil { - { - size, err := m.SecurityContext.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - i-- - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x68 - i-- - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x60 - i-- - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x58 - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x52 - i -= len(m.DeprecatedServiceAccount) - copy(dAtA[i:], m.DeprecatedServiceAccount) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i-- - dAtA[i] = 0x4a - i -= len(m.ServiceAccountName) - copy(dAtA[i:], m.ServiceAccountName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i-- - dAtA[i] = 0x42 - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForNodeSelector[iNdEx]) - copy(dAtA[i:], keysForNodeSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x3a - } - } - i -= len(m.DNSPolicy) - copy(dAtA[i:], m.DNSPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i-- - dAtA[i] = 0x32 - if m.ActiveDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - i-- - dAtA[i] = 0x28 - } - if m.TerminationGracePeriodSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - i-- - dAtA[i] = 0x20 - } - i -= len(m.RestartPolicy) - copy(dAtA[i:], m.RestartPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i-- - dAtA[i] = 0x1a - if len(m.Containers) > 0 { - for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *PodStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ShareName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Binding) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Target.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PodStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *CSIPersistentVolumeSource) Size() (n int) { var l int _ = l - if len(m.EphemeralContainerStatuses) > 0 { - for iNdEx := len(m.EphemeralContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EphemeralContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeHandle) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeAttributes) > 0 { + for k, v := range m.VolumeAttributes { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if len(m.PodIPs) > 0 { - for iNdEx := len(m.PodIPs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.PodIPs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } + if m.ControllerPublishSecretRef != nil { + l = m.ControllerPublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.NominatedNodeName) - copy(dAtA[i:], m.NominatedNodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) - i-- - dAtA[i] = 0x5a - if len(m.InitContainerStatuses) > 0 { - for iNdEx := len(m.InitContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.InitContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } + if m.NodeStageSecretRef != nil { + l = m.NodeStageSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.QOSClass) - copy(dAtA[i:], m.QOSClass) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) - i-- - dAtA[i] = 0x4a - if len(m.ContainerStatuses) > 0 { - for iNdEx := len(m.ContainerStatuses) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ContainerStatuses[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } + if m.NodePublishSecretRef != nil { + l = m.NodePublishSecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.StartTime != nil { - { - size, err := m.StartTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *Capabilities) Size() (n int) { + var l int + _ = l + if len(m.Add) > 0 { + for _, s := range m.Add { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x3a } - i -= len(m.PodIP) - copy(dAtA[i:], m.PodIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i-- - dAtA[i] = 0x32 - i -= len(m.HostIP) - copy(dAtA[i:], m.HostIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x1a - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Drop) > 0 { + for _, s := range m.Drop { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Phase) - copy(dAtA[i:], m.Phase) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *CephFSPersistentVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n } -func (m *PodStatusResult) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *CephFSVolumeSource) Size() (n int) { var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Monitors) > 0 { + for _, s := range m.Monitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SecretFile) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + n += 2 + return n } -func (m *PodTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *CinderPersistentVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *PodTemplate) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *CinderVolumeSource) Size() (n int) { var l int _ = l - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ClientIPConfig) Size() (n int) { + var l int + _ = l + if m.TimeoutSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } - return dAtA[:n], nil + return n } -func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ComponentCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PodTemplateList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ComponentStatus) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ComponentStatusList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *PodTemplateSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ConfigMap) Size() (n int) { var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.BinaryData) > 0 { + for k, v := range m.BinaryData { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) + } + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ConfigMapEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - return dAtA[:n], nil + return n } -func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PortworxVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ConfigMapKeySelector) Size() (n int) { var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - i-- - dAtA[i] = 0x18 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumeID) - copy(dAtA[i:], m.VolumeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *Preconditions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ConfigMapList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ConfigMapNodeConfigSource) Size() (n int) { + var l int + _ = l + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletConfigKey) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ConfigMapProjection) Size() (n int) { var l int _ = l - if m.UID != nil { - i -= len(*m.UID) - copy(dAtA[i:], *m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i-- - dAtA[i] = 0xa + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return len(dAtA) - i, nil -} - -func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.Optional != nil { + n += 2 } - return dAtA[:n], nil + return n } -func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ConfigMapVolumeSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + if m.Optional != nil { + n += 2 + } + return n } -func (m *PreferAvoidPodsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Container) Size() (n int) { var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x22 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x1a - { - size, err := m.EvictionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.PodSignature.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.WorkingDir) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, e := range m.Env { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.VolumeMounts) > 0 { + for _, e := range m.VolumeMounts { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.LivenessProbe != nil { + l = m.LivenessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ReadinessProbe != nil { + l = m.ReadinessProbe.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Lifecycle != nil { + l = m.Lifecycle.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.TerminationMessagePath) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImagePullPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 3 + n += 3 + n += 3 + if len(m.EnvFrom) > 0 { + for _, e := range m.EnvFrom { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TerminationMessagePolicy) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.VolumeDevices) > 0 { + for _, e := range m.VolumeDevices { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ContainerImage) Size() (n int) { + var l int + _ = l + if len(m.Names) > 0 { + for _, s := range m.Names { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + n += 1 + sovGenerated(uint64(m.SizeBytes)) + return n } -func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ContainerPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HostPort)) + n += 1 + sovGenerated(uint64(m.ContainerPort)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *PreferredSchedulingTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ContainerState) Size() (n int) { var l int _ = l - { - size, err := m.Preference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Waiting != nil { + l = m.Waiting.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if m.Running != nil { + l = m.Running.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Terminated != nil { + l = m.Terminated.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *Probe) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *ContainerStateRunning) Size() (n int) { + var l int + _ = l + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Probe) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ContainerStateTerminated) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.ExitCode)) + n += 1 + sovGenerated(uint64(m.Signal)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.StartedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FinishedAt.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Probe) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ContainerStateWaiting) Size() (n int) { var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) - i-- - dAtA[i] = 0x10 - { - size, err := m.Handler.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *ContainerStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.State.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTerminationState.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 1 + sovGenerated(uint64(m.RestartCount)) + l = len(m.Image) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ImageID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DaemonEndpoint) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Port)) + return n } -func (m *ProjectedVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *DownwardAPIProjection) Size() (n int) { var l int _ = l - if m.DefaultMode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - i-- - dAtA[i] = 0x10 - } - if len(m.Sources) > 0 { - for iNdEx := len(m.Sources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Sources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *DownwardAPIVolumeFile) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) + } + return n } -func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *DownwardAPIVolumeSource) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) + } + return n } -func (m *QuobyteVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *EmptyDirVolumeSource) Size() (n int) { var l int _ = l - i -= len(m.Tenant) - copy(dAtA[i:], m.Tenant) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Tenant))) - i-- - dAtA[i] = 0x32 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0x2a - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0x22 - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + l = len(m.Medium) + n += 1 + l + sovGenerated(uint64(l)) + if m.SizeLimit != nil { + l = m.SizeLimit.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x18 - i -= len(m.Volume) - copy(dAtA[i:], m.Volume) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) - i-- - dAtA[i] = 0x12 - i -= len(m.Registry) - copy(dAtA[i:], m.Registry) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *EndpointAddress) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if m.TargetRef != nil { + l = m.TargetRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + if m.NodeName != nil { + l = len(*m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *EndpointPort) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Port)) + l = len(m.Protocol) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *RBDPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *EndpointSubset) Size() (n int) { var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - i-- - dAtA[i] = 0x40 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.NotReadyAddresses) > 0 { + for _, e := range m.NotReadyAddresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x3a } - i -= len(m.Keyring) - copy(dAtA[i:], m.Keyring) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i-- - dAtA[i] = 0x32 - i -= len(m.RadosUser) - copy(dAtA[i:], m.RadosUser) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i-- - dAtA[i] = 0x2a - i -= len(m.RBDPool) - copy(dAtA[i:], m.RBDPool) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i-- - dAtA[i] = 0x22 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x1a - i -= len(m.RBDImage) - copy(dAtA[i:], m.RBDImage) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i-- - dAtA[i] = 0x12 - if len(m.CephMonitors) > 0 { - for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CephMonitors[iNdEx]) - copy(dAtA[i:], m.CephMonitors[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Endpoints) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Subsets) > 0 { + for _, e := range m.Subsets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *EndpointsList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *RBDVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *EnvFromSource) Size() (n int) { var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + l = len(m.Prefix) + n += 1 + l + sovGenerated(uint64(l)) + if m.ConfigMapRef != nil { + l = m.ConfigMapRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x40 if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.Keyring) - copy(dAtA[i:], m.Keyring) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i-- - dAtA[i] = 0x32 - i -= len(m.RadosUser) - copy(dAtA[i:], m.RadosUser) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i-- - dAtA[i] = 0x2a - i -= len(m.RBDPool) - copy(dAtA[i:], m.RBDPool) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i-- - dAtA[i] = 0x22 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x1a - i -= len(m.RBDImage) - copy(dAtA[i:], m.RBDImage) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i-- - dAtA[i] = 0x12 - if len(m.CephMonitors) > 0 { - for iNdEx := len(m.CephMonitors) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.CephMonitors[iNdEx]) - copy(dAtA[i:], m.CephMonitors[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CephMonitors[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + return n } -func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *EnvVar) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + if m.ValueFrom != nil { + l = m.ValueFrom.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *RangeAllocation) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *EnvVarSource) Size() (n int) { var l int _ = l - if m.Data != nil { - i -= len(m.Data) - copy(dAtA[i:], m.Data) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i-- - dAtA[i] = 0x1a + if m.FieldRef != nil { + l = m.FieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.Range) - copy(dAtA[i:], m.Range) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ResourceFieldRef != nil { + l = m.ResourceFieldRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ReplicationController) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.ConfigMapKeyRef != nil { + l = m.ConfigMapKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + if m.SecretKeyRef != nil { + l = m.SecretKeyRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ReplicationController) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Event) Size() (n int) { var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.InvolvedObject.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.FirstTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTimestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Count)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = m.EventTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Action) + n += 1 + l + sovGenerated(uint64(l)) + if m.Related != nil { + l = m.Related.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.ReportingController) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ReportingInstance) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *EventList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *EventSeries) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Count)) + l = m.LastObservedTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.State) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicationControllerCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *EventSource) Size() (n int) { var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.Component) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ExecAction) Size() (n int) { + var l int + _ = l + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ReplicationControllerList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *FCVolumeSource) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.TargetWWNs) > 0 { + for _, s := range m.TargetWWNs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Lun != nil { + n += 1 + sovGenerated(uint64(*m.Lun)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.WWIDs) > 0 { + for _, s := range m.WWIDs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ReplicationControllerSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *FlexPersistentVolumeSource) Size() (n int) { var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - if m.Template != nil { - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil + return n } -func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *FlexVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Driver) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + n += 2 + if len(m.Options) > 0 { + for k, v := range m.Options { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *FlockerVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.DatasetName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DatasetUUID) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ReplicationControllerStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *GCEPersistentDiskVolumeSource) Size() (n int) { var l int _ = l - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 - } - } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + l = len(m.PDName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Partition)) + n += 2 + return n } -func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *GitRepoVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Repository) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Revision) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Directory) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *GlusterfsVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.EndpointsName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *ResourceFieldSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *HTTPGetAction) Size() (n int) { var l int _ = l - { - size, err := m.Divisor.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Port.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Host) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Scheme) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.HTTPHeaders) > 0 { + for _, e := range m.HTTPHeaders { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.ContainerName) - copy(dAtA[i:], m.ContainerName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil + return n } -func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HTTPHeader) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ResourceQuota) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Handler) Size() (n int) { var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Exec != nil { + l = m.Exec.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.HTTPGet != nil { + l = m.HTTPGet.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.TCPSocket != nil { + l = m.TCPSocket.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *HostAlias) Size() (n int) { + var l int + _ = l + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Hostnames) > 0 { + for _, s := range m.Hostnames { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *HostPathVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Type != nil { + l = len(*m.Type) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ResourceQuotaList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ISCSIPersistentVolumeSource) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ResourceQuotaSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ISCSIVolumeSource) Size() (n int) { var l int _ = l - if m.ScopeSelector != nil { - { - size, err := m.ScopeSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.Scopes) > 0 { - for iNdEx := len(m.Scopes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Scopes[iNdEx]) - copy(dAtA[i:], m.Scopes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scopes[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { - v := m.Hard[ResourceName(keysForHard[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForHard[iNdEx]) - copy(dAtA[i:], keysForHard[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + l = len(m.TargetPortal) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.IQN) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.Lun)) + l = len(m.ISCSIInterface) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Portals) > 0 { + for _, s := range m.Portals { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + n += 2 + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.InitiatorName != nil { + l = len(*m.InitiatorName) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *KeyToPath) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.Mode != nil { + n += 1 + sovGenerated(uint64(*m.Mode)) } - return dAtA[:n], nil + return n } -func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Lifecycle) Size() (n int) { + var l int + _ = l + if m.PostStart != nil { + l = m.PostStart.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.PreStop != nil { + l = m.PreStop.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ResourceQuotaStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *LimitRange) Size() (n int) { var l int _ = l - if len(m.Used) > 0 { - keysForUsed := make([]string, 0, len(m.Used)) - for k := range m.Used { - keysForUsed = append(keysForUsed, string(k)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *LimitRangeItem) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Max) > 0 { + for k, v := range m.Max { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - for iNdEx := len(keysForUsed) - 1; iNdEx >= 0; iNdEx-- { - v := m.Used[ResourceName(keysForUsed[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForUsed[iNdEx]) - copy(dAtA[i:], keysForUsed[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsed[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + } + if len(m.Min) > 0 { + for k, v := range m.Min { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) + if len(m.Default) > 0 { + for k, v := range m.Default { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for iNdEx := len(keysForHard) - 1; iNdEx >= 0; iNdEx-- { - v := m.Hard[ResourceName(keysForHard[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForHard[iNdEx]) - copy(dAtA[i:], keysForHard[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForHard[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + } + if len(m.DefaultRequest) > 0 { + for k, v := range m.DefaultRequest { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return len(dAtA) - i, nil -} - -func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.MaxLimitRequestRatio) > 0 { + for k, v := range m.MaxLimitRequestRatio { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - return dAtA[:n], nil -} - -func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ResourceRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *LimitRangeList) Size() (n int) { var l int _ = l - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for iNdEx := len(keysForRequests) - 1; iNdEx >= 0; iNdEx-- { - v := m.Requests[ResourceName(keysForRequests[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForRequests[iNdEx]) - copy(dAtA[i:], keysForRequests[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForRequests[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } + return n +} + +func (m *LimitRangeSpec) Size() (n int) { + var l int + _ = l if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for iNdEx := len(keysForLimits) - 1; iNdEx >= 0; iNdEx-- { - v := m.Limits[ResourceName(keysForLimits[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForLimits[iNdEx]) - copy(dAtA[i:], keysForLimits[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLimits[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa + for _, e := range m.Limits { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *List) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *SELinuxOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *LoadBalancerIngress) Size() (n int) { var l int _ = l - i -= len(m.Level) - copy(dAtA[i:], m.Level) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i-- - dAtA[i] = 0x22 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x1a - i -= len(m.Role) - copy(dAtA[i:], m.Role) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) - i-- - dAtA[i] = 0x12 - i -= len(m.User) - copy(dAtA[i:], m.User) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.IP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Hostname) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *LoadBalancerStatus) Size() (n int) { + var l int + _ = l + if len(m.Ingress) > 0 { + for _, e := range m.Ingress { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *LocalObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleIOPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *LocalVolumeSource) Size() (n int) { var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x4a - i -= len(m.VolumeName) - copy(dAtA[i:], m.VolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i-- - dAtA[i] = 0x42 - i -= len(m.StorageMode) - copy(dAtA[i:], m.StorageMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i-- - dAtA[i] = 0x3a - i -= len(m.StoragePool) - copy(dAtA[i:], m.StoragePool) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i-- - dAtA[i] = 0x32 - i -= len(m.ProtectionDomain) - copy(dAtA[i:], m.ProtectionDomain) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i-- - dAtA[i] = 0x2a - i-- - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + if m.FSType != nil { + l = len(*m.FSType) + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.System) - copy(dAtA[i:], m.System) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i-- - dAtA[i] = 0x12 - i -= len(m.Gateway) - copy(dAtA[i:], m.Gateway) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *NFSVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Server) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Namespace) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScaleIOVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NamespaceList) Size() (n int) { var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x50 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x4a - i -= len(m.VolumeName) - copy(dAtA[i:], m.VolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i-- - dAtA[i] = 0x42 - i -= len(m.StorageMode) - copy(dAtA[i:], m.StorageMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i-- - dAtA[i] = 0x3a - i -= len(m.StoragePool) - copy(dAtA[i:], m.StoragePool) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i-- - dAtA[i] = 0x32 - i -= len(m.ProtectionDomain) - copy(dAtA[i:], m.ProtectionDomain) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i-- - dAtA[i] = 0x2a - i-- - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a } - i -= len(m.System) - copy(dAtA[i:], m.System) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i-- - dAtA[i] = 0x12 - i -= len(m.Gateway) - copy(dAtA[i:], m.Gateway) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NamespaceSpec) Size() (n int) { + var l int + _ = l + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *NamespaceStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScopeSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Node) Size() (n int) { var l int _ = l - if len(m.MatchExpressions) > 0 { - for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NodeAddress) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Address) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *NodeAffinity) Size() (n int) { + var l int + _ = l + if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { + l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *NodeCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastHeartbeatTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ScopedResourceSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NodeConfigSource) Size() (n int) { var l int _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x1a - } + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= len(m.Operator) - copy(dAtA[i:], m.Operator) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i-- - dAtA[i] = 0x12 - i -= len(m.ScopeName) - copy(dAtA[i:], m.ScopeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *Secret) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NodeConfigStatus) Size() (n int) { + var l int + _ = l + if m.Assigned != nil { + l = m.Assigned.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + if m.Active != nil { + l = m.Active.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.LastKnownGood != nil { + l = m.LastKnownGood.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + l = len(m.Error) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Secret) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *NodeDaemonEndpoints) Size() (n int) { + var l int + _ = l + l = m.KubeletEndpoint.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Secret) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NodeList) Size() (n int) { var l int _ = l - if m.Immutable != nil { - i-- - if *m.Immutable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x28 } - if len(m.StringData) > 0 { - keysForStringData := make([]string, 0, len(m.StringData)) - for k := range m.StringData { - keysForStringData = append(keysForStringData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - for iNdEx := len(keysForStringData) - 1; iNdEx >= 0; iNdEx-- { - v := m.StringData[string(keysForStringData[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForStringData[iNdEx]) - copy(dAtA[i:], keysForStringData[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForStringData[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x1a - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for iNdEx := len(keysForData) - 1; iNdEx >= 0; iNdEx-- { - v := m.Data[string(keysForData[iNdEx])] - baseI := i - if v != nil { - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - } - i -= len(keysForData[iNdEx]) - copy(dAtA[i:], keysForData[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForData[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + return n } -func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *NodeProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecretEnvSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NodeResources) Size() (n int) { var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NodeSelector) Size() (n int) { + var l int + _ = l + if len(m.NodeSelectorTerms) > 0 { + for _, e := range m.NodeSelectorTerms { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *NodeSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *SecretKeySelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NodeSelectorTerm) Size() (n int) { var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x18 } - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0x12 - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.MatchFields) > 0 { + for _, e := range m.MatchFields { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *SecretList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *NodeSpec) Size() (n int) { + var l int + _ = l + l = len(m.PodCIDR) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DoNotUse_ExternalID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ProviderID) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + if len(m.Taints) > 0 { + for _, e := range m.Taints { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + if m.ConfigSource != nil { + l = m.ConfigSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *SecretList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *NodeStatus) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Allocatable) > 0 { + for k, v := range m.Allocatable { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SecretProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SecretProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Addresses) > 0 { + for _, e := range m.Addresses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x20 } - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.DaemonEndpoints.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.NodeInfo.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Images) > 0 { + for _, e := range m.Images { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - { - size, err := m.LocalObjectReference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.VolumesInUse) > 0 { + for _, s := range m.VolumesInUse { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SecretReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.VolumesAttached) > 0 { + for _, e := range m.VolumesAttached { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + if m.Config != nil { + l = m.Config.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *NodeSystemInfo) Size() (n int) { + var l int + _ = l + l = len(m.MachineID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SystemUUID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.BootID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KernelVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OSImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ContainerRuntimeVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeletVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.KubeProxyVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.OperatingSystem) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Architecture) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecretReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ObjectFieldSelector) Size() (n int) { var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *ObjectReference) Size() (n int) { + var l int + _ = l + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.UID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.APIVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ResourceVersion) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FieldPath) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PersistentVolume) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecretVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PersistentVolumeClaim) Size() (n int) { var l int _ = l - if m.Optional != nil { - i-- - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x20 - } - if m.DefaultMode != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - i-- - dAtA[i] = 0x18 - } - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - i -= len(m.SecretName) - copy(dAtA[i:], m.SecretName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecurityContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *PersistentVolumeClaimCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PersistentVolumeClaimList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *SecurityContext) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PersistentVolumeClaimSpec) Size() (n int) { var l int _ = l - if m.WindowsOptions != nil { - { - size, err := m.WindowsOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x52 - } - if m.ProcMount != nil { - i -= len(*m.ProcMount) - copy(dAtA[i:], *m.ProcMount) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) - i-- - dAtA[i] = 0x4a - } - if m.RunAsGroup != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - i-- - dAtA[i] = 0x40 } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 + l = m.Resources.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + if m.Selector != nil { + l = m.Selector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.ReadOnlyRootFilesystem != nil { - i-- - if *m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 + if m.StorageClassName != nil { + l = len(*m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) } - if m.RunAsNonRoot != nil { - i-- - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x28 + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) } - if m.RunAsUser != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - i-- - dAtA[i] = 0x20 + if m.DataSource != nil { + l = m.DataSource.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *PersistentVolumeClaimStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a } - if m.Privileged != nil { - i-- - if *m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0x10 } - if m.Capabilities != nil { - { - size, err := m.Capabilities.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *SerializedReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil -} - -func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *SerializedReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { var l int _ = l - { - size, err := m.Reference.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.ClaimName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *Service) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PersistentVolumeList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PersistentVolumeSource) Size() (n int) { var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.Glusterfs != nil { + l = m.Glusterfs.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + if m.NFS != nil { + l = m.NFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ISCSI != nil { + l = m.ISCSI.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FC != nil { + l = m.FC.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.Local != nil { + l = m.Local.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.CSI != nil { + l = m.CSI.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ServiceAccount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PersistentVolumeSpec) Size() (n int) { var l int _ = l - if m.AutomountServiceAccountToken != nil { - i-- - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Capacity) > 0 { + for k, v := range m.Capacity { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0x20 } - if len(m.ImagePullSecrets) > 0 { - for iNdEx := len(m.ImagePullSecrets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ImagePullSecrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + l = m.PersistentVolumeSource.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.AccessModes) > 0 { + for _, s := range m.AccessModes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - if len(m.Secrets) > 0 { - for iNdEx := len(m.Secrets) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Secrets[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + if m.ClaimRef != nil { + l = m.ClaimRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = len(m.PersistentVolumeReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageClassName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.VolumeMode != nil { + l = len(*m.VolumeMode) + n += 1 + l + sovGenerated(uint64(l)) + } + if m.NodeAffinity != nil { + l = m.NodeAffinity.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *PersistentVolumeStatus) Size() (n int) { + var l int + _ = l + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.PdID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceAccountList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *Pod) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PodAffinity) Size() (n int) { + var l int + _ = l + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodAffinityTerm) Size() (n int) { + var l int + _ = l + if m.LabelSelector != nil { + l = m.LabelSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + if len(m.Namespaces) > 0 { + for _, s := range m.Namespaces { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.TopologyKey) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceAccountTokenProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodAntiAffinity) Size() (n int) { var l int _ = l - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0x1a - if m.ExpirationSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - i-- - dAtA[i] = 0x10 + if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - i -= len(m.Audience) - copy(dAtA[i:], m.Audience) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { + for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n } -func (m *ServiceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *PodAttachOptions) Size() (n int) { + var l int + _ = l + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PodCondition) Size() (n int) { + var l int + _ = l + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastProbeTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodDNSConfig) Size() (n int) { var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if len(m.Nameservers) > 0 { + for _, s := range m.Nameservers { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Searches) > 0 { + for _, s := range m.Searches { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *ServicePort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PodDNSConfigOption) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if m.Value != nil { + l = len(*m.Value) + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *ServicePort) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodExecOptions) Size() (n int) { var l int _ = l - if m.AppProtocol != nil { - i -= len(*m.AppProtocol) - copy(dAtA[i:], *m.AppProtocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.AppProtocol))) - i-- - dAtA[i] = 0x32 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) - i-- - dAtA[i] = 0x28 - { - size, err := m.TargetPort.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + n += 2 + n += 2 + n += 2 + n += 2 + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x22 - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - i-- - dAtA[i] = 0x18 - i -= len(m.Protocol) - copy(dAtA[i:], m.Protocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PodLogOptions) Size() (n int) { + var l int + _ = l + l = len(m.Container) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + if m.SinceSeconds != nil { + n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + } + if m.SinceTime != nil { + l = m.SinceTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + if m.TailLines != nil { + n += 1 + sovGenerated(uint64(*m.TailLines)) + } + if m.LimitBytes != nil { + n += 1 + sovGenerated(uint64(*m.LimitBytes)) + } + return n } -func (m *ServiceProxyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodPortForwardOptions) Size() (n int) { var l int _ = l - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.Ports) > 0 { + for _, e := range m.Ports { + n += 1 + sovGenerated(uint64(e)) + } + } + return n } -func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *PodProxyOptions) Size() (n int) { + var l int + _ = l + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PodReadinessGate) Size() (n int) { + var l int + _ = l + l = len(m.ConditionType) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *ServiceSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodSecurityContext) Size() (n int) { var l int _ = l - if len(m.TopologyKeys) > 0 { - for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TopologyKeys[iNdEx]) - copy(dAtA[i:], m.TopologyKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.IPFamily != nil { - i -= len(*m.IPFamily) - copy(dAtA[i:], *m.IPFamily) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IPFamily))) - i-- - dAtA[i] = 0x7a + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) } - if m.SessionAffinityConfig != nil { - { - size, err := m.SessionAffinityConfig.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.RunAsNonRoot != nil { + n += 2 + } + if len(m.SupplementalGroups) > 0 { + for _, e := range m.SupplementalGroups { + n += 1 + sovGenerated(uint64(e)) } - i-- - dAtA[i] = 0x72 } - i-- - if m.PublishNotReadyAddresses { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.FSGroup != nil { + n += 1 + sovGenerated(uint64(*m.FSGroup)) } - i-- - dAtA[i] = 0x68 - i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) - i-- - dAtA[i] = 0x60 - i -= len(m.ExternalTrafficPolicy) - copy(dAtA[i:], m.ExternalTrafficPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) - i-- - dAtA[i] = 0x5a - i -= len(m.ExternalName) - copy(dAtA[i:], m.ExternalName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) - i-- - dAtA[i] = 0x52 - if len(m.LoadBalancerSourceRanges) > 0 { - for iNdEx := len(m.LoadBalancerSourceRanges) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.LoadBalancerSourceRanges[iNdEx]) - copy(dAtA[i:], m.LoadBalancerSourceRanges[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerSourceRanges[iNdEx]))) - i-- - dAtA[i] = 0x4a - } + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) } - i -= len(m.LoadBalancerIP) - copy(dAtA[i:], m.LoadBalancerIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) - i-- - dAtA[i] = 0x42 - i -= len(m.SessionAffinity) - copy(dAtA[i:], m.SessionAffinity) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) - i-- - dAtA[i] = 0x3a - if len(m.ExternalIPs) > 0 { - for iNdEx := len(m.ExternalIPs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ExternalIPs[iNdEx]) - copy(dAtA[i:], m.ExternalIPs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalIPs[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x22 - i -= len(m.ClusterIP) - copy(dAtA[i:], m.ClusterIP) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) - i-- - dAtA[i] = 0x1a - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.Sysctls) > 0 { + for _, e := range m.Sysctls { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil + return n } -func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *PodSignature) Size() (n int) { + var l int + _ = l + if m.PodController != nil { + l = m.PodController.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *ServiceStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodSpec) Size() (n int) { var l int _ = l - { - size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.Volumes) > 0 { + for _, e := range m.Volumes { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SessionAffinityConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.ClientIP != nil { - { - size, err := m.ClientIP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.RestartPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if m.TerminationGracePeriodSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) + } + if m.ActiveDeadlineSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) + } + l = len(m.DNSPolicy) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.NodeSelector) > 0 { + for k, v := range m.NodeSelector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil -} - -func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.ServiceAccountName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DeprecatedServiceAccount) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + n += 2 + n += 2 + if m.SecurityContext != nil { + l = m.SecurityContext.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StorageOSPersistentVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x2a } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + l = len(m.Hostname) + n += 2 + l + sovGenerated(uint64(l)) + l = len(m.Subdomain) + n += 2 + l + sovGenerated(uint64(l)) + if m.Affinity != nil { + l = m.Affinity.Size() + n += 2 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x20 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x1a - i -= len(m.VolumeNamespace) - copy(dAtA[i:], m.VolumeNamespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumeName) - copy(dAtA[i:], m.VolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.SchedulerName) + n += 2 + l + sovGenerated(uint64(l)) + if len(m.InitContainers) > 0 { + for _, e := range m.InitContainers { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + if m.AutomountServiceAccountToken != nil { + n += 3 + } + if len(m.Tolerations) > 0 { + for _, e := range m.Tolerations { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if len(m.HostAliases) > 0 { + for _, e := range m.HostAliases { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + l = len(m.PriorityClassName) + n += 2 + l + sovGenerated(uint64(l)) + if m.Priority != nil { + n += 2 + sovGenerated(uint64(*m.Priority)) + } + if m.DNSConfig != nil { + l = m.DNSConfig.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.ShareProcessNamespace != nil { + n += 3 + } + if len(m.ReadinessGates) > 0 { + for _, e := range m.ReadinessGates { + l = e.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + } + if m.RuntimeClassName != nil { + l = len(*m.RuntimeClassName) + n += 2 + l + sovGenerated(uint64(l)) + } + return n } -func (m *StorageOSVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodStatus) Size() (n int) { var l int _ = l - if m.SecretRef != nil { - { - size, err := m.SecretRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Phase) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x2a } - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.HostIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.PodIP) + n += 1 + l + sovGenerated(uint64(l)) + if m.StartTime != nil { + l = m.StartTime.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x20 - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x1a - i -= len(m.VolumeNamespace) - copy(dAtA[i:], m.VolumeNamespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumeName) - copy(dAtA[i:], m.VolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Sysctl) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.ContainerStatuses) > 0 { + for _, e := range m.ContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + l = len(m.QOSClass) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.InitContainerStatuses) > 0 { + for _, e := range m.InitContainerStatuses { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + l = len(m.NominatedNodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Sysctl) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodStatusResult) Size() (n int) { var l int _ = l - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PodTemplate) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *TCPSocketAction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PodTemplateList) Size() (n int) { var l int _ = l - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Taint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil + return n } -func (m *Taint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *PodTemplateSpec) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Taint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PortworxVolumeSource) Size() (n int) { var l int _ = l - if m.TimeAdded != nil { - { - size, err := m.TimeAdded.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - i -= len(m.Effect) - copy(dAtA[i:], m.Effect) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i-- - dAtA[i] = 0x1a - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = len(m.VolumeID) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *Toleration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *Preconditions) Size() (n int) { + var l int + _ = l + if m.UID != nil { + l = len(*m.UID) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *Toleration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *PreferAvoidPodsEntry) Size() (n int) { var l int _ = l - if m.TolerationSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) - i-- - dAtA[i] = 0x28 - } - i -= len(m.Effect) - copy(dAtA[i:], m.Effect) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i-- - dAtA[i] = 0x22 - i -= len(m.Value) - copy(dAtA[i:], m.Value) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i-- - dAtA[i] = 0x1a - i -= len(m.Operator) - copy(dAtA[i:], m.Operator) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.PodSignature.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.EvictionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil +func (m *PreferredSchedulingTerm) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.Preference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *Probe) Size() (n int) { + var l int + _ = l + l = m.Handler.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) + n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) + n += 1 + sovGenerated(uint64(m.PeriodSeconds)) + n += 1 + sovGenerated(uint64(m.SuccessThreshold)) + n += 1 + sovGenerated(uint64(m.FailureThreshold)) + return n } -func (m *TopologySelectorLabelRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ProjectedVolumeSource) Size() (n int) { var l int _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x12 + if len(m.Sources) > 0 { + for _, e := range m.Sources { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - return dAtA[:n], nil + return n } -func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *QuobyteVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Registry) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Volume) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Group) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *TopologySelectorTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *RBDPersistentVolumeSource) Size() (n int) { var l int _ = l - if len(m.MatchLabelExpressions) > 0 { - for iNdEx := len(m.MatchLabelExpressions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchLabelExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } } - return len(dAtA) - i, nil -} - -func (m *TopologySpreadConstraint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *TopologySpreadConstraint) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + n += 2 + return n } -func (m *TopologySpreadConstraint) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *RBDVolumeSource) Size() (n int) { var l int _ = l - if m.LabelSelector != nil { - { - size, err := m.LabelSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.CephMonitors) > 0 { + for _, s := range m.CephMonitors { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x22 } - i -= len(m.WhenUnsatisfiable) - copy(dAtA[i:], m.WhenUnsatisfiable) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WhenUnsatisfiable))) - i-- - dAtA[i] = 0x1a - i -= len(m.TopologyKey) - copy(dAtA[i:], m.TopologyKey) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i-- - dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSkew)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + l = len(m.RBDImage) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RBDPool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.RadosUser) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Keyring) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 2 + return n } -func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *RangeAllocation) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Range) + n += 1 + l + sovGenerated(uint64(l)) + if m.Data != nil { + l = len(m.Data) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ReplicationController) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *TypedLocalObjectReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ReplicationControllerCondition) Size() (n int) { var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - if m.APIGroup != nil { - i -= len(*m.APIGroup) - copy(dAtA[i:], *m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Status) + n += 1 + l + sovGenerated(uint64(l)) + l = m.LastTransitionTime.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Reason) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *Volume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ReplicationControllerList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil -} - -func (m *Volume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *Volume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ReplicationControllerSpec) Size() (n int) { var l int _ = l - { - size, err := m.VolumeSource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.Replicas != nil { + n += 1 + sovGenerated(uint64(*m.Replicas)) + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.Template != nil { + l = m.Template.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + n += 1 + sovGenerated(uint64(m.MinReadySeconds)) + return n } -func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ReplicationControllerStatus) Size() (n int) { + var l int + _ = l + n += 1 + sovGenerated(uint64(m.Replicas)) + n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) + n += 1 + sovGenerated(uint64(m.ObservedGeneration)) + n += 1 + sovGenerated(uint64(m.ReadyReplicas)) + n += 1 + sovGenerated(uint64(m.AvailableReplicas)) + if len(m.Conditions) > 0 { + for _, e := range m.Conditions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ResourceFieldSelector) Size() (n int) { + var l int + _ = l + l = len(m.ContainerName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Resource) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Divisor.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *VolumeDevice) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ResourceQuota) Size() (n int) { var l int _ = l - i -= len(m.DevicePath) - copy(dAtA[i:], m.DevicePath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *VolumeMount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ResourceQuotaList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *ResourceQuotaSpec) Size() (n int) { + var l int + _ = l + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + if len(m.Scopes) > 0 { + for _, s := range m.Scopes { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.ScopeSelector != nil { + l = m.ScopeSelector.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *VolumeMount) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ResourceQuotaStatus) Size() (n int) { var l int _ = l - i -= len(m.SubPathExpr) - copy(dAtA[i:], m.SubPathExpr) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPathExpr))) - i-- - dAtA[i] = 0x32 - if m.MountPropagation != nil { - i -= len(*m.MountPropagation) - copy(dAtA[i:], *m.MountPropagation) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) - i-- - dAtA[i] = 0x2a + if len(m.Hard) > 0 { + for k, v := range m.Hard { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - i -= len(m.SubPath) - copy(dAtA[i:], m.SubPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i-- - dAtA[i] = 0x22 - i -= len(m.MountPath) - copy(dAtA[i:], m.MountPath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) - i-- - dAtA[i] = 0x1a - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Used) > 0 { + for k, v := range m.Used { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ResourceRequirements) Size() (n int) { + var l int + _ = l + if len(m.Limits) > 0 { + for k, v := range m.Limits { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } } - return dAtA[:n], nil + if len(m.Requests) > 0 { + for k, v := range m.Requests { + _ = k + _ = v + l = v.Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n } -func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *SELinuxOptions) Size() (n int) { + var l int + _ = l + l = len(m.User) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Role) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Level) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *VolumeNodeAffinity) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ScaleIOPersistentVolumeSource) Size() (n int) { var l int _ = l - if m.Required != nil { - { - size, err := m.Required.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return len(dAtA) - i, nil + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *ScaleIOVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.Gateway) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.System) + n += 1 + l + sovGenerated(uint64(l)) + if m.SecretRef != nil { + l = m.SecretRef.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + n += 2 + l = len(m.ProtectionDomain) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StoragePool) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.StorageMode) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.VolumeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.FSType) + n += 1 + l + sovGenerated(uint64(l)) + n += 2 + return n } -func (m *VolumeProjection) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *ScopeSelector) Size() (n int) { var l int _ = l - if m.ServiceAccountToken != nil { - { - size, err := m.ServiceAccountToken.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.MatchExpressions) > 0 { + for _, e := range m.MatchExpressions { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x22 } - if m.ConfigMap != nil { - { - size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *ScopedResourceSelectorRequirement) Size() (n int) { + var l int + _ = l + l = len(m.ScopeName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Operator) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a } - if m.DownwardAPI != nil { - { - size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + return n +} + +func (m *Secret) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Data) > 0 { + for k, v := range m.Data { + _ = k + _ = v + l = 0 + if v != nil { + l = 1 + len(v) + sovGenerated(uint64(len(v))) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0x12 } - if m.Secret != nil { - { - size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.StringData) > 0 { + for k, v := range m.StringData { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return n } -func (m *VolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *SecretEnvSource) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 } - return dAtA[:n], nil + return n } -func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *SecretKeySelector) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + if m.Optional != nil { + n += 2 + } + return n } -func (m *VolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *SecretList) Size() (n int) { var l int _ = l - if m.CSI != nil { - { - size, err := m.CSI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xe2 } - if m.StorageOS != nil { - { - size, err := m.StorageOS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *SecretProjection) Size() (n int) { + var l int + _ = l + l = m.LocalObjectReference.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xda } - if m.Projected != nil { - { - size, err := m.Projected.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xd2 + if m.Optional != nil { + n += 2 } - if m.ScaleIO != nil { - { - size, err := m.ScaleIO.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + return n +} + +func (m *SecretReference) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Namespace) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *SecretVolumeSource) Size() (n int) { + var l int + _ = l + l = len(m.SecretName) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xca } - if m.PortworxVolume != nil { - { - size, err := m.PortworxVolume.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 + if m.DefaultMode != nil { + n += 1 + sovGenerated(uint64(*m.DefaultMode)) } - if m.PhotonPersistentDisk != nil { - { - size, err := m.PhotonPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba - } - if m.AzureDisk != nil { - { - size, err := m.AzureDisk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if m.Quobyte != nil { - { - size, err := m.Quobyte.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - if m.VsphereVolume != nil { - { - size, err := m.VsphereVolume.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - if m.ConfigMap != nil { - { - size, err := m.ConfigMap.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - if m.AzureFile != nil { - { - size, err := m.AzureFile.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 - } - if m.FC != nil { - { - size, err := m.FC.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - if m.DownwardAPI != nil { - { - size, err := m.DownwardAPI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x82 - } - if m.Flocker != nil { - { - size, err := m.Flocker.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x7a - } - if m.CephFS != nil { - { - size, err := m.CephFS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x72 - } - if m.Cinder != nil { - { - size, err := m.Cinder.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } - if m.FlexVolume != nil { - { - size, err := m.FlexVolume.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - } - if m.RBD != nil { - { - size, err := m.RBD.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - } - if m.PersistentVolumeClaim != nil { - { - size, err := m.PersistentVolumeClaim.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x52 - } - if m.Glusterfs != nil { - { - size, err := m.Glusterfs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - if m.ISCSI != nil { - { - size, err := m.ISCSI.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - if m.NFS != nil { - { - size, err := m.NFS.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } - if m.Secret != nil { - { - size, err := m.Secret.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x32 + if m.Optional != nil { + n += 2 } - if m.GitRepo != nil { - { - size, err := m.GitRepo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a + return n +} + +func (m *SecurityContext) Size() (n int) { + var l int + _ = l + if m.Capabilities != nil { + l = m.Capabilities.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.AWSElasticBlockStore != nil { - { - size, err := m.AWSElasticBlockStore.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + if m.Privileged != nil { + n += 2 } - if m.GCEPersistentDisk != nil { - { - size, err := m.GCEPersistentDisk.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.SELinuxOptions != nil { + l = m.SELinuxOptions.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.EmptyDir != nil { - { - size, err := m.EmptyDir.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if m.RunAsUser != nil { + n += 1 + sovGenerated(uint64(*m.RunAsUser)) } - if m.HostPath != nil { - { - size, err := m.HostPath.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + if m.RunAsNonRoot != nil { + n += 2 } - return len(dAtA) - i, nil -} - -func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.ReadOnlyRootFilesystem != nil { + n += 2 } - return dAtA[:n], nil -} - -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VsphereVirtualDiskVolumeSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.StoragePolicyID) - copy(dAtA[i:], m.StoragePolicyID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) - i-- - dAtA[i] = 0x22 - i -= len(m.StoragePolicyName) - copy(dAtA[i:], m.StoragePolicyName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) - i-- - dAtA[i] = 0x1a - i -= len(m.FSType) - copy(dAtA[i:], m.FSType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i-- - dAtA[i] = 0x12 - i -= len(m.VolumePath) - copy(dAtA[i:], m.VolumePath) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.AllowPrivilegeEscalation != nil { + n += 2 } - return dAtA[:n], nil -} - -func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WeightedPodAffinityTerm) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.PodAffinityTerm.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.RunAsGroup != nil { + n += 1 + sovGenerated(uint64(*m.RunAsGroup)) } - i-- - dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *WindowsSecurityContextOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.ProcMount != nil { + l = len(*m.ProcMount) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *WindowsSecurityContextOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return n } -func (m *WindowsSecurityContextOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *SerializedReference) Size() (n int) { var l int _ = l - if m.RunAsUserName != nil { - i -= len(*m.RunAsUserName) - copy(dAtA[i:], *m.RunAsUserName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RunAsUserName))) - i-- - dAtA[i] = 0x1a - } - if m.GMSACredentialSpec != nil { - i -= len(*m.GMSACredentialSpec) - copy(dAtA[i:], *m.GMSACredentialSpec) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpec))) - i-- - dAtA[i] = 0x12 - } - if m.GMSACredentialSpecName != nil { - i -= len(*m.GMSACredentialSpecName) - copy(dAtA[i:], *m.GMSACredentialSpecName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.GMSACredentialSpecName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + l = m.Reference.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *Service) Size() (n int) { var l int _ = l - l = len(m.VolumeID) + l = m.ObjectMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 return n } -func (m *Affinity) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceAccount) Size() (n int) { var l int _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Secrets) > 0 { + for _, e := range m.Secrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if len(m.ImagePullSecrets) > 0 { + for _, e := range m.ImagePullSecrets { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AutomountServiceAccountToken != nil { + n += 2 } return n } -func (m *AttachedVolume) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceAccountList) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AvoidPods) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, e := range m.PreferAvoidPods { + if len(m.Items) > 0 { + for _, e := range m.Items { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -19162,314 +14053,229 @@ func (m *AvoidPods) Size() (n int) { return n } -func (m *AzureDiskVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceAccountTokenProjection) Size() (n int) { var l int _ = l - l = len(m.DiskName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DataDiskURI) + l = len(m.Audience) n += 1 + l + sovGenerated(uint64(l)) - if m.CachingMode != nil { - l = len(*m.CachingMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadOnly != nil { - n += 2 - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) + if m.ExpirationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) } + l = len(m.Path) + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AzureFilePersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceList) Size() (n int) { var l int _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) + l = m.ListMeta.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretNamespace != nil { - l = len(*m.SecretNamespace) - n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } return n } -func (m *AzureFileVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServicePort) Size() (n int) { var l int _ = l - l = len(m.SecretName) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) + l = len(m.Protocol) n += 1 + l + sovGenerated(uint64(l)) - n += 2 + n += 1 + sovGenerated(uint64(m.Port)) + l = m.TargetPort.Size() + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.NodePort)) return n } -func (m *Binding) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceProxyOptions) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() + l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *CSIPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceSpec) Size() (n int) { var l int _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeHandle) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { + if len(m.Ports) > 0 { + for _, e := range m.Ports { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if len(m.Selector) > 0 { + for k, v := range m.Selector { _ = k _ = v mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - if m.ControllerPublishSecretRef != nil { - l = m.ControllerPublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeStageSecretRef != nil { - l = m.NodeStageSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ClusterIP) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Type) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.ExternalIPs) > 0 { + for _, s := range m.ExternalIPs { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + l = len(m.SessionAffinity) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.LoadBalancerIP) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.LoadBalancerSourceRanges) > 0 { + for _, s := range m.LoadBalancerSourceRanges { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) + } } - if m.ControllerExpandSecretRef != nil { - l = m.ControllerExpandSecretRef.Size() + l = len(m.ExternalName) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.ExternalTrafficPolicy) + n += 1 + l + sovGenerated(uint64(l)) + n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) + n += 2 + if m.SessionAffinityConfig != nil { + l = m.SessionAffinityConfig.Size() n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *CSIVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *ServiceStatus) Size() (n int) { var l int _ = l - l = len(m.Driver) + l = m.LoadBalancer.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.ReadOnly != nil { - n += 2 - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *Capabilities) Size() (n int) { - if m == nil { - return 0 - } +func (m *SessionAffinityConfig) Size() (n int) { var l int _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.ClientIP != nil { + l = m.ClientIP.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *CephFSPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *StorageOSPersistentVolumeSource) Size() (n int) { var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) + l = len(m.VolumeName) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) + l = len(m.VolumeNamespace) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) + l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) + n += 2 if m.SecretRef != nil { l = m.SecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 2 return n } -func (m *CephFSVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *StorageOSVolumeSource) Size() (n int) { var l int _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) + l = len(m.VolumeName) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) + l = len(m.VolumeNamespace) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) + l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) + n += 2 if m.SecretRef != nil { l = m.SecretRef.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 2 return n } -func (m *CinderPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *Sysctl) Size() (n int) { var l int _ = l - l = len(m.VolumeID) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) + l = len(m.Value) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *CinderVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *TCPSocketAction) Size() (n int) { var l int _ = l - l = len(m.VolumeID) + l = m.Port.Size() n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) + l = len(m.Host) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } -func (m *ClientIPConfig) Size() (n int) { - if m == nil { - return 0 - } +func (m *Taint) Size() (n int) { var l int _ = l - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) + l = len(m.Key) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Value) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Effect) + n += 1 + l + sovGenerated(uint64(l)) + if m.TimeAdded != nil { + l = m.TimeAdded.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ComponentCondition) Size() (n int) { - if m == nil { - return 0 - } +func (m *Toleration) Size() (n int) { var l int _ = l - l = len(m.Type) + l = len(m.Key) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) + l = len(m.Operator) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) + l = len(m.Value) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) + l = len(m.Effect) n += 1 + l + sovGenerated(uint64(l)) + if m.TolerationSeconds != nil { + n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) + } return n } -func (m *ComponentStatus) Size() (n int) { - if m == nil { - return 0 - } +func (m *TopologySelectorLabelRequirement) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() + l = len(m.Key) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) n += 1 + l + sovGenerated(uint64(l)) } } return n } -func (m *ComponentStatusList) Size() (n int) { - if m == nil { - return 0 - } +func (m *TopologySelectorTerm) Size() (n int) { var l int _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { + if len(m.MatchLabelExpressions) > 0 { + for _, e := range m.MatchLabelExpressions { l = e.Size() n += 1 + l + sovGenerated(uint64(l)) } @@ -19477,9074 +14283,3019 @@ func (m *ComponentStatusList) Size() (n int) { return n } -func (m *ConfigMap) Size() (n int) { - if m == nil { - return 0 - } +func (m *TypedLocalObjectReference) Size() (n int) { var l int _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.BinaryData) > 0 { - for k, v := range m.BinaryData { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Immutable != nil { - n += 2 - } - return n -} - -func (m *ConfigMapEnvSource) Size() (n int) { - if m == nil { - return 0 + if m.APIGroup != nil { + l = len(*m.APIGroup) + n += 1 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = m.LocalObjectReference.Size() + l = len(m.Kind) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } return n } -func (m *ConfigMapKeySelector) Size() (n int) { - if m == nil { - return 0 - } +func (m *Volume) Size() (n int) { var l int _ = l - l = m.LocalObjectReference.Size() + l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) + l = m.VolumeSource.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } return n } -func (m *ConfigMapList) Size() (n int) { - if m == nil { - return 0 - } +func (m *VolumeDevice) Size() (n int) { var l int _ = l - l = m.ListMeta.Size() + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.DevicePath) n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } -func (m *ConfigMapNodeConfigSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *VolumeMount) Size() (n int) { var l int _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) l = len(m.Name) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) + n += 2 + l = len(m.MountPath) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletConfigKey) + l = len(m.SubPath) n += 1 + l + sovGenerated(uint64(l)) + if m.MountPropagation != nil { + l = len(*m.MountPropagation) + n += 1 + l + sovGenerated(uint64(l)) + } return n } -func (m *ConfigMapProjection) Size() (n int) { - if m == nil { - return 0 - } +func (m *VolumeNodeAffinity) Size() (n int) { var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 + if m.Required != nil { + l = m.Required.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *ConfigMapVolumeSource) Size() (n int) { - if m == nil { - return 0 - } +func (m *VolumeProjection) Size() (n int) { var l int _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.Secret != nil { + l = m.Secret.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.Optional != nil { - n += 2 + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.ServiceAccountToken != nil { + l = m.ServiceAccountToken.Size() + n += 1 + l + sovGenerated(uint64(l)) } return n } -func (m *Container) Size() (n int) { - if m == nil { - return 0 - } +func (m *VolumeSource) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.HostPath != nil { + l = m.HostPath.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.EmptyDir != nil { + l = m.EmptyDir.Size() + n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.GCEPersistentDisk != nil { + l = m.GCEPersistentDisk.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.AWSElasticBlockStore != nil { + l = m.AWSElasticBlockStore.Size() + n += 1 + l + sovGenerated(uint64(l)) } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + if m.GitRepo != nil { + l = m.GitRepo.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() + if m.Secret != nil { + l = m.Secret.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() + if m.NFS != nil { + l = m.NFS.Size() n += 1 + l + sovGenerated(uint64(l)) } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() + if m.ISCSI != nil { + l = m.ISCSI.Size() n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() + if m.Glusterfs != nil { + l = m.Glusterfs.Size() n += 1 + l + sovGenerated(uint64(l)) } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + if m.PersistentVolumeClaim != nil { + l = m.PersistentVolumeClaim.Size() + n += 1 + l + sovGenerated(uint64(l)) } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + if m.RBD != nil { + l = m.RBD.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.FlexVolume != nil { + l = m.FlexVolume.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Cinder != nil { + l = m.Cinder.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.CephFS != nil { + l = m.CephFS.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + if m.Flocker != nil { + l = m.Flocker.Size() + n += 1 + l + sovGenerated(uint64(l)) } - if m.StartupProbe != nil { - l = m.StartupProbe.Size() + if m.DownwardAPI != nil { + l = m.DownwardAPI.Size() n += 2 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *ContainerImage) Size() (n int) { - if m == nil { - return 0 + if m.FC != nil { + l = m.FC.Size() + n += 2 + l + sovGenerated(uint64(l)) } - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + if m.AzureFile != nil { + l = m.AzureFile.Size() + n += 2 + l + sovGenerated(uint64(l)) } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n -} - -func (m *ContainerPort) Size() (n int) { - if m == nil { - return 0 + if m.ConfigMap != nil { + l = m.ConfigMap.Size() + n += 2 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerState) Size() (n int) { - if m == nil { - return 0 + if m.VsphereVolume != nil { + l = m.VsphereVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) } - var l int - _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.Quobyte != nil { + l = m.Quobyte.Size() + n += 2 + l + sovGenerated(uint64(l)) } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.AzureDisk != nil { + l = m.AzureDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) + if m.PhotonPersistentDisk != nil { + l = m.PhotonPersistentDisk.Size() + n += 2 + l + sovGenerated(uint64(l)) } - return n -} - -func (m *ContainerStateRunning) Size() (n int) { - if m == nil { - return 0 + if m.PortworxVolume != nil { + l = m.PortworxVolume.Size() + n += 2 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateTerminated) Size() (n int) { - if m == nil { - return 0 + if m.ScaleIO != nil { + l = m.ScaleIO.Size() + n += 2 + l + sovGenerated(uint64(l)) } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateWaiting) Size() (n int) { - if m == nil { - return 0 + if m.Projected != nil { + l = m.Projected.Size() + n += 2 + l + sovGenerated(uint64(l)) + } + if m.StorageOS != nil { + l = m.StorageOS.Size() + n += 2 + l + sovGenerated(uint64(l)) } - var l int - _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *ContainerStatus) Size() (n int) { - if m == nil { - return 0 - } +func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { var l int _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() + l = len(m.VolumePath) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) + l = len(m.FSType) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) + l = len(m.StoragePolicyName) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) + l = len(m.StoragePolicyID) n += 1 + l + sovGenerated(uint64(l)) - if m.Started != nil { - n += 2 - } return n } -func (m *DaemonEndpoint) Size() (n int) { - if m == nil { - return 0 - } +func (m *WeightedPodAffinityTerm) Size() (n int) { var l int _ = l - n += 1 + sovGenerated(uint64(m.Port)) + n += 1 + sovGenerated(uint64(m.Weight)) + l = m.PodAffinityTerm.Size() + n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *DownwardAPIProjection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break } } return n } - -func (m *DownwardAPIVolumeFile) Size() (n int) { - if m == nil { - return 0 +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *AWSElasticBlockStoreVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Affinity) String() string { + if this == nil { + return "nil" } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&Affinity{`, + `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, + `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, + `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *AttachedVolume) String() string { + if this == nil { + return "nil" } - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) + s := strings.Join([]string{`&AttachedVolume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s +} +func (this *AvoidPods) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&AvoidPods{`, + `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *DownwardAPIVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *AzureDiskVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) + s := strings.Join([]string{`&AzureDiskVolumeSource{`, + `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, + `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, + `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, + `Kind:` + valueToStringGenerated(this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *AzureFilePersistentVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, + `}`, + }, "") + return s } - -func (m *EmptyDirVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *AzureFileVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - if m.SizeLimit != nil { - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&AzureFileVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Binding) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Binding{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *EndpointAddress) Size() (n int) { - if m == nil { - return 0 +func (this *CSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) + for k := range this.VolumeAttributes { + keysForVolumeAttributes = append(keysForVolumeAttributes, k) } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeName != nil { - l = len(*m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) + mapStringForVolumeAttributes := "map[string]string{" + for _, k := range keysForVolumeAttributes { + mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) } - return n + mapStringForVolumeAttributes += "}" + s := strings.Join([]string{`&CSIPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, + `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *EndpointPort) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - if m.AppProtocol != nil { - l = len(*m.AppProtocol) - n += 1 + l + sovGenerated(uint64(l)) +func (this *Capabilities) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Capabilities{`, + `Add:` + fmt.Sprintf("%v", this.Add) + `,`, + `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, + `}`, + }, "") + return s } - -func (m *EndpointSubset) Size() (n int) { - if m == nil { - return 0 +func (this *CephFSPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CephFSVolumeSource) String() string { + if this == nil { + return "nil" } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&CephFSVolumeSource{`, + `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CinderPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&CinderPersistentVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *CinderVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&CinderVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *Endpoints) Size() (n int) { - if m == nil { - return 0 +func (this *ClientIPConfig) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ClientIPConfig{`, + `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ComponentCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *EndpointsList) Size() (n int) { - if m == nil { - return 0 +func (this *ComponentStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ComponentStatus{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ComponentStatusList) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ComponentStatusList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *EnvFromSource) Size() (n int) { - if m == nil { - return 0 +func (this *ConfigMap) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string]string{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) } - return n -} - -func (m *EnvVar) Size() (n int) { - if m == nil { - return 0 + mapStringForData += "}" + keysForBinaryData := make([]string, 0, len(this.BinaryData)) + for k := range this.BinaryData { + keysForBinaryData = append(keysForBinaryData, k) } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) + mapStringForBinaryData := "map[string][]byte{" + for _, k := range keysForBinaryData { + mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) } - return n + mapStringForBinaryData += "}" + s := strings.Join([]string{`&ConfigMap{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `BinaryData:` + mapStringForBinaryData + `,`, + `}`, + }, "") + return s } - -func (m *EnvVarSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *ConfigMapEnvSource) String() string { + if this == nil { + return "nil" } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ConfigMapEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapKeySelector) String() string { + if this == nil { + return "nil" } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ConfigMapKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapList) String() string { + if this == nil { + return "nil" } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ConfigMapList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ConfigMapNodeConfigSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, + `}`, + }, "") + return s } - -func (m *EphemeralContainer) Size() (n int) { - if m == nil { - return 0 +func (this *ConfigMapProjection) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.EphemeralContainerCommon.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.TargetContainerName) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ConfigMapProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s } - -func (m *EphemeralContainerCommon) Size() (n int) { - if m == nil { - return 0 +func (this *ConfigMapVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ConfigMapVolumeSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *Container) String() string { + if this == nil { + return "nil" } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&Container{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, + `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, + `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, + `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, + `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, + `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, + `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, + `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerImage) String() string { + if this == nil { + return "nil" } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ContainerImage{`, + `Names:` + fmt.Sprintf("%v", this.Names) + `,`, + `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerPort) String() string { + if this == nil { + return "nil" } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ContainerPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, + `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerState) String() string { + if this == nil { + return "nil" } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ContainerState{`, + `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, + `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, + `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateRunning) String() string { + if this == nil { + return "nil" } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ContainerStateRunning{`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateTerminated) String() string { + if this == nil { + return "nil" } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ContainerStateTerminated{`, + `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, + `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStateWaiting) String() string { + if this == nil { + return "nil" } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ContainerStateWaiting{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ContainerStatus) String() string { + if this == nil { + return "nil" } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ContainerStatus{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, + `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, + `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, + `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, + `}`, + }, "") + return s +} +func (this *DaemonEndpoint) String() string { + if this == nil { + return "nil" } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DaemonEndpoint{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIProjection) String() string { + if this == nil { + return "nil" } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&DownwardAPIProjection{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeFile) String() string { + if this == nil { + return "nil" } - if m.StartupProbe != nil { - l = m.StartupProbe.Size() - n += 2 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&DownwardAPIVolumeFile{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s +} +func (this *DownwardAPIVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&DownwardAPIVolumeSource{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s } - -func (m *EphemeralContainers) Size() (n int) { - if m == nil { - return 0 +func (this *EmptyDirVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.EphemeralContainers) > 0 { - for _, e := range m.EphemeralContainers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&EmptyDirVolumeSource{`, + `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, + `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointAddress) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&EndpointAddress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, + `}`, + }, "") + return s } - -func (m *Event) Size() (n int) { - if m == nil { - return 0 +func (this *EndpointPort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&EndpointPort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `}`, + }, "") + return s +} +func (this *EndpointSubset) String() string { + if this == nil { + return "nil" } - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&EndpointSubset{`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Endpoints) String() string { + if this == nil { + return "nil" } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&Endpoints{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *EventList) Size() (n int) { - if m == nil { - return 0 +func (this *EndpointsList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&EndpointsList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *EnvFromSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&EnvFromSource{`, + `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, + `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, + `}`, + }, "") + return s } - -func (m *EventSeries) Size() (n int) { - if m == nil { - return 0 +func (this *EnvVar) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&EnvVar{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, + `}`, + }, "") + return s } - -func (m *EventSource) Size() (n int) { - if m == nil { - return 0 +func (this *EnvVarSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&EnvVarSource{`, + `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, + `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, + `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, + `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, + `}`, + }, "") + return s } - -func (m *ExecAction) Size() (n int) { - if m == nil { - return 0 +func (this *Event) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&Event{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, + `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, + `Action:` + fmt.Sprintf("%v", this.Action) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, + `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, + `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, + `}`, + }, "") + return s +} +func (this *EventList) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&EventList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *FCVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *EventSeries) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&EventSeries{`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `State:` + fmt.Sprintf("%v", this.State) + `,`, + `}`, + }, "") + return s +} +func (this *EventSource) String() string { + if this == nil { + return "nil" } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) + s := strings.Join([]string{`&EventSource{`, + `Component:` + fmt.Sprintf("%v", this.Component) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s +} +func (this *ExecAction) String() string { + if this == nil { + return "nil" } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ExecAction{`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s +} +func (this *FCVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&FCVolumeSource{`, + `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, + `Lun:` + valueToStringGenerated(this.Lun) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, + `}`, + }, "") + return s } - -func (m *FlexPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *FlexPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) } - return n + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexPersistentVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s } - -func (m *FlexVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *FlexVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForOptions := make([]string, 0, len(this.Options)) + for k := range this.Options { + keysForOptions = append(keysForOptions, k) } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) + mapStringForOptions := "map[string]string{" + for _, k := range keysForOptions { + mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) } - return n + mapStringForOptions += "}" + s := strings.Join([]string{`&FlexVolumeSource{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Options:` + mapStringForOptions + `,`, + `}`, + }, "") + return s } - -func (m *FlockerVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *FlockerVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DatasetUUID) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&FlockerVolumeSource{`, + `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, + `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, + `}`, + }, "") + return s } - -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *GCEPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n + s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, + `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s } - -func (m *GitRepoVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *GitRepoVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&GitRepoVolumeSource{`, + `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, + `}`, + }, "") + return s } - -func (m *GlusterfsPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *GlusterfsVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.EndpointsNamespace != nil { - l = len(*m.EndpointsNamespace) - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&GlusterfsVolumeSource{`, + `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *HTTPGetAction) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&HTTPGetAction{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, + `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *GlusterfsVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *HTTPHeader) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + s := strings.Join([]string{`&HTTPHeader{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s } - -func (m *HTTPGetAction) Size() (n int) { - if m == nil { - return 0 +func (this *Handler) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + s := strings.Join([]string{`&Handler{`, + `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, + `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, + `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, + `}`, + }, "") + return s } - -func (m *HTTPHeader) Size() (n int) { - if m == nil { - return 0 +func (this *HostAlias) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&HostAlias{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, + `}`, + }, "") + return s } - -func (m *Handler) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *HostPathVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&HostPathVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Type:` + valueToStringGenerated(this.Type) + `,`, + `}`, + }, "") + return s } - -func (m *HostAlias) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *ISCSIPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s } - -func (m *HostPathVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) +func (this *ISCSIVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ISCSIVolumeSource{`, + `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, + `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, + `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, + `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, + `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, + `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, + `}`, + }, "") + return s } - -func (m *ISCSIPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) +func (this *KeyToPath) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&KeyToPath{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `Mode:` + valueToStringGenerated(this.Mode) + `,`, + `}`, + }, "") + return s } - -func (m *ISCSIVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) +func (this *Lifecycle) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Lifecycle{`, + `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, + `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, + `}`, + }, "") + return s } - -func (m *KeyToPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) +func (this *LimitRange) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&LimitRange{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *Lifecycle) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *LimitRangeItem) String() string { + if this == nil { + return "nil" } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForMax := make([]string, 0, len(this.Max)) + for k := range this.Max { + keysForMax = append(keysForMax, string(k)) } - return n -} - -func (m *LimitRange) Size() (n int) { - if m == nil { - return 0 + github_com_gogo_protobuf_sortkeys.Strings(keysForMax) + mapStringForMax := "ResourceList{" + for _, k := range keysForMax { + mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LimitRangeItem) Size() (n int) { - if m == nil { - return 0 + mapStringForMax += "}" + keysForMin := make([]string, 0, len(this.Min)) + for k := range this.Min { + keysForMin = append(keysForMin, string(k)) } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForMin) + mapStringForMin := "ResourceList{" + for _, k := range keysForMin { + mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + mapStringForMin += "}" + keysForDefault := make([]string, 0, len(this.Default)) + for k := range this.Default { + keysForDefault = append(keysForDefault, string(k)) } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) + mapStringForDefault := "ResourceList{" + for _, k := range keysForDefault { + mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + mapStringForDefault += "}" + keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) + for k := range this.DefaultRequest { + keysForDefaultRequest = append(keysForDefaultRequest, string(k)) } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) + mapStringForDefaultRequest := "ResourceList{" + for _, k := range keysForDefaultRequest { + mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) } - return n -} - -func (m *LimitRangeList) Size() (n int) { - if m == nil { - return 0 + mapStringForDefaultRequest += "}" + keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) + for k := range this.MaxLimitRequestRatio { + keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) + mapStringForMaxLimitRequestRatio := "ResourceList{" + for _, k := range keysForMaxLimitRequestRatio { + mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) } - return n + mapStringForMaxLimitRequestRatio += "}" + s := strings.Join([]string{`&LimitRangeItem{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Max:` + mapStringForMax + `,`, + `Min:` + mapStringForMin + `,`, + `Default:` + mapStringForDefault + `,`, + `DefaultRequest:` + mapStringForDefaultRequest + `,`, + `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, + `}`, + }, "") + return s } - -func (m *LimitRangeSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *LimitRangeList) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&LimitRangeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *List) Size() (n int) { - if m == nil { - return 0 +func (this *LimitRangeSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&LimitRangeSpec{`, + `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *List) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&List{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *LoadBalancerIngress) Size() (n int) { - if m == nil { - return 0 +func (this *LoadBalancerIngress) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&LoadBalancerIngress{`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `}`, + }, "") + return s } - -func (m *LoadBalancerStatus) Size() (n int) { - if m == nil { - return 0 +func (this *LoadBalancerStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&LoadBalancerStatus{`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *LocalObjectReference) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&LocalObjectReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s } - -func (m *LocalObjectReference) Size() (n int) { - if m == nil { - return 0 +func (this *LocalVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&LocalVolumeSource{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `FSType:` + valueToStringGenerated(this.FSType) + `,`, + `}`, + }, "") + return s } - -func (m *LocalVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *NFSVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&NFSVolumeSource{`, + `Server:` + fmt.Sprintf("%v", this.Server) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Namespace) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Namespace{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NFSVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *NamespaceList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + s := strings.Join([]string{`&NamespaceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *Namespace) Size() (n int) { - if m == nil { - return 0 +func (this *NamespaceSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&NamespaceSpec{`, + `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, + `}`, + }, "") + return s } - -func (m *NamespaceCondition) Size() (n int) { - if m == nil { - return 0 +func (this *NamespaceStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&NamespaceStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `}`, + }, "") + return s } - -func (m *NamespaceList) Size() (n int) { - if m == nil { - return 0 +func (this *Node) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&Node{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeAddress) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NodeAddress{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Address:` + fmt.Sprintf("%v", this.Address) + `,`, + `}`, + }, "") + return s } - -func (m *NamespaceSpec) Size() (n int) { - if m == nil { - return 0 +func (this *NodeAffinity) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&NodeAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NodeCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *NamespaceStatus) Size() (n int) { - if m == nil { - return 0 +func (this *NodeConfigSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&NodeConfigSource{`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeConfigStatus) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NodeConfigStatus{`, + `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `Error:` + fmt.Sprintf("%v", this.Error) + `,`, + `}`, + }, "") + return s } - -func (m *Node) Size() (n int) { - if m == nil { - return 0 +func (this *NodeDaemonEndpoints) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&NodeDaemonEndpoints{`, + `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeAddress) Size() (n int) { - if m == nil { - return 0 +func (this *NodeList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&NodeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeAffinity) Size() (n int) { - if m == nil { - return 0 +func (this *NodeProxyOptions) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&NodeProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *NodeResources) String() string { + if this == nil { + return "nil" } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - return n + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) + } + mapStringForCapacity += "}" + s := strings.Join([]string{`&NodeResources{`, + `Capacity:` + mapStringForCapacity + `,`, + `}`, + }, "") + return s } - -func (m *NodeCondition) Size() (n int) { - if m == nil { - return 0 +func (this *NodeSelector) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&NodeSelector{`, + `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeConfigSource) Size() (n int) { - if m == nil { - return 0 +func (this *NodeSelectorRequirement) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&NodeSelectorRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSelectorTerm) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NodeSelectorTerm{`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, + `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeConfigStatus) Size() (n int) { - if m == nil { - return 0 +func (this *NodeSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.Assigned != nil { - l = m.Assigned.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&NodeSpec{`, + `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, + `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, + `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, + `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, + `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, + `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeStatus) String() string { + if this == nil { + return "nil" } - if m.Active != nil { - l = m.Active.Size() - n += 1 + l + sovGenerated(uint64(l)) + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - if m.LastKnownGood != nil { - l = m.LastKnownGood.Size() - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) } - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeDaemonEndpoints) Size() (n int) { - if m == nil { - return 0 + mapStringForCapacity += "}" + keysForAllocatable := make([]string, 0, len(this.Allocatable)) + for k := range this.Allocatable { + keysForAllocatable = append(keysForAllocatable, string(k)) } - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeList) Size() (n int) { - if m == nil { - return 0 + github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) + mapStringForAllocatable := "ResourceList{" + for _, k := range keysForAllocatable { + mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + mapStringForAllocatable += "}" + s := strings.Join([]string{`&NodeStatus{`, + `Capacity:` + mapStringForCapacity + `,`, + `Allocatable:` + mapStringForAllocatable + `,`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, + `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, + `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, + `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, + `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, + `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, + `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, + `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, + `}`, + }, "") + return s +} +func (this *NodeSystemInfo) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&NodeSystemInfo{`, + `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, + `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, + `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, + `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, + `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, + `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, + `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, + `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, + `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, + `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, + `}`, + }, "") + return s } - -func (m *NodeProxyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ObjectFieldSelector) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ObjectFieldSelector{`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s } - -func (m *NodeResources) Size() (n int) { - if m == nil { - return 0 +func (this *ObjectReference) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + s := strings.Join([]string{`&ObjectReference{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `UID:` + fmt.Sprintf("%v", this.UID) + `,`, + `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolume) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&PersistentVolume{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeSelector) Size() (n int) { - if m == nil { - return 0 +func (this *PersistentVolumeClaim) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PersistentVolumeClaim{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimCondition) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *NodeSelectorRequirement) Size() (n int) { - if m == nil { - return 0 +func (this *PersistentVolumeClaimList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PersistentVolumeClaimList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeClaimSpec) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeSelectorTerm) Size() (n int) { - if m == nil { - return 0 +func (this *PersistentVolumeClaimStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - if len(m.MatchFields) > 0 { - for _, e := range m.MatchFields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) } - return n + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `Capacity:` + mapStringForCapacity + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeSpec) Size() (n int) { - if m == nil { - return 0 +func (this *PersistentVolumeClaimVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUseExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Taints) > 0 { - for _, e := range m.Taints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ConfigSource != nil { - l = m.ConfigSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.PodCIDRs) > 0 { - for _, s := range m.PodCIDRs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, + `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s } - -func (m *NodeStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (this *PersistentVolumeList) String() string { + if this == nil { + return "nil" } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + s := strings.Join([]string{`&PersistentVolumeList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSource) String() string { + if this == nil { + return "nil" } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PersistentVolumeSource{`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, + `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, + `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeSpec) String() string { + if this == nil { + return "nil" } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + keysForCapacity := make([]string, 0, len(this.Capacity)) + for k := range this.Capacity { + keysForCapacity = append(keysForCapacity, string(k)) } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) + mapStringForCapacity := "ResourceList{" + for _, k := range keysForCapacity { + mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + mapStringForCapacity += "}" + s := strings.Join([]string{`&PersistentVolumeSpec{`, + `Capacity:` + mapStringForCapacity + `,`, + `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, + `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, + `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, + `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, + `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, + `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PersistentVolumeStatus) String() string { + if this == nil { + return "nil" } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PersistentVolumeStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `}`, + }, "") + return s +} +func (this *PhotonPersistentDiskVolumeSource) String() string { + if this == nil { + return "nil" } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, + `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `}`, + }, "") + return s +} +func (this *Pod) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Pod{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *NodeSystemInfo) Size() (n int) { - if m == nil { - return 0 +func (this *PodAffinity) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&PodAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *ObjectFieldSelector) Size() (n int) { - if m == nil { - return 0 +func (this *PodAffinityTerm) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&PodAffinityTerm{`, + `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, + `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, + `}`, + }, "") + return s } - -func (m *ObjectReference) Size() (n int) { - if m == nil { - return 0 +func (this *PodAntiAffinity) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&PodAntiAffinity{`, + `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolume) Size() (n int) { - if m == nil { - return 0 +func (this *PodAttachOptions) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&PodAttachOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeClaim) Size() (n int) { - if m == nil { - return 0 +func (this *PodCondition) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&PodCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeClaimCondition) Size() (n int) { - if m == nil { - return 0 +func (this *PodDNSConfig) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&PodDNSConfig{`, + `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, + `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, + `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeClaimList) Size() (n int) { - if m == nil { - return 0 +func (this *PodDNSConfigOption) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PodDNSConfigOption{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + valueToStringGenerated(this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *PodExecOptions) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&PodExecOptions{`, + `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, + `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, + `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, + `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeClaimSpec) Size() (n int) { - if m == nil { - return 0 +func (this *PodList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PodList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodLogOptions) String() string { + if this == nil { + return "nil" } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodLogOptions{`, + `Container:` + fmt.Sprintf("%v", this.Container) + `,`, + `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, + `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, + `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, + `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, + `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, + `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, + `}`, + }, "") + return s +} +func (this *PodPortForwardOptions) String() string { + if this == nil { + return "nil" } - if m.StorageClassName != nil { - l = len(*m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodPortForwardOptions{`, + `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, + `}`, + }, "") + return s +} +func (this *PodProxyOptions) String() string { + if this == nil { + return "nil" } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *PodReadinessGate) String() string { + if this == nil { + return "nil" } - if m.DataSource != nil { - l = m.DataSource.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodReadinessGate{`, + `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, + `}`, + }, "") + return s +} +func (this *PodSecurityContext) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&PodSecurityContext{`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, + `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeClaimStatus) Size() (n int) { - if m == nil { - return 0 +func (this *PodSignature) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&PodSignature{`, + `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodSpec) String() string { + if this == nil { + return "nil" } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) + for k := range this.NodeSelector { + keysForNodeSelector = append(keysForNodeSelector, k) } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) + mapStringForNodeSelector := "map[string]string{" + for _, k := range keysForNodeSelector { + mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) } - return n + mapStringForNodeSelector += "}" + s := strings.Join([]string{`&PodSpec{`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, + `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, + `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, + `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, + `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, + `NodeSelector:` + mapStringForNodeSelector + `,`, + `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, + `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, + `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, + `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, + `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, + `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, + `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, + `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, + `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, + `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, + `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, + `Priority:` + valueToStringGenerated(this.Priority) + `,`, + `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, + `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, + `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, + `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *PodStatus) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n + s := strings.Join([]string{`&PodStatus{`, + `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, + `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, + `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, + `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, + `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeList) Size() (n int) { - if m == nil { - return 0 +func (this *PodStatusResult) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + s := strings.Join([]string{`&PodStatusResult{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *PodTemplate) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodTemplate{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateList) String() string { + if this == nil { + return "nil" } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodTemplateList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PodTemplateSpec) String() string { + if this == nil { + return "nil" } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PodTemplateSpec{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PortworxVolumeSource) String() string { + if this == nil { + return "nil" } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PortworxVolumeSource{`, + `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *Preconditions) String() string { + if this == nil { + return "nil" } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&Preconditions{`, + `UID:` + valueToStringGenerated(this.UID) + `,`, + `}`, + }, "") + return s +} +func (this *PreferAvoidPodsEntry) String() string { + if this == nil { + return "nil" } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PreferAvoidPodsEntry{`, + `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, + `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *PreferredSchedulingTerm) String() string { + if this == nil { + return "nil" } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&PreferredSchedulingTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Probe) String() string { + if this == nil { + return "nil" } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&Probe{`, + `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, + `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, + `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, + `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, + `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, + `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, + `}`, + }, "") + return s +} +func (this *ProjectedVolumeSource) String() string { + if this == nil { + return "nil" } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ProjectedVolumeSource{`, + `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `}`, + }, "") + return s +} +func (this *QuobyteVolumeSource) String() string { + if this == nil { + return "nil" } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&QuobyteVolumeSource{`, + `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, + `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Group:` + fmt.Sprintf("%v", this.Group) + `,`, + `}`, + }, "") + return s +} +func (this *RBDPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&RBDPersistentVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RBDVolumeSource) String() string { + if this == nil { + return "nil" } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&RBDVolumeSource{`, + `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, + `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, + `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, + `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *RangeAllocation) String() string { + if this == nil { + return "nil" } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&RangeAllocation{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Range:` + fmt.Sprintf("%v", this.Range) + `,`, + `Data:` + valueToStringGenerated(this.Data) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationController) String() string { + if this == nil { + return "nil" } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ReplicationController{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerCondition) String() string { + if this == nil { + return "nil" } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ReplicationControllerCondition{`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Status:` + fmt.Sprintf("%v", this.Status) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerList) String() string { + if this == nil { + return "nil" } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ReplicationControllerList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerSpec) String() string { + if this == nil { + return "nil" } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) + mapStringForSelector += "}" + s := strings.Join([]string{`&ReplicationControllerSpec{`, + `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, + `Selector:` + mapStringForSelector + `,`, + `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, + `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, + `}`, + }, "") + return s +} +func (this *ReplicationControllerStatus) String() string { + if this == nil { + return "nil" } - if m.Local != nil { - l = m.Local.Size() - n += 2 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ReplicationControllerStatus{`, + `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, + `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, + `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, + `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, + `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceFieldSelector) String() string { + if this == nil { + return "nil" } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ResourceFieldSelector{`, + `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, + `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, + `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuota) String() string { + if this == nil { + return "nil" } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ResourceQuota{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaList) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ResourceQuotaList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PersistentVolumeSpec) Size() (n int) { - if m == nil { - return 0 +func (this *ResourceQuotaSpec) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) + mapStringForHard += "}" + s := strings.Join([]string{`&ResourceQuotaSpec{`, + `Hard:` + mapStringForHard + `,`, + `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, + `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ResourceQuotaStatus) String() string { + if this == nil { + return "nil" } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + keysForHard := make([]string, 0, len(this.Hard)) + for k := range this.Hard { + keysForHard = append(keysForHard, string(k)) } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForHard) + mapStringForHard := "ResourceList{" + for _, k := range keysForHard { + mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) } - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) + mapStringForHard += "}" + keysForUsed := make([]string, 0, len(this.Used)) + for k := range this.Used { + keysForUsed = append(keysForUsed, string(k)) } - return n -} - -func (m *PersistentVolumeStatus) Size() (n int) { - if m == nil { - return 0 + github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) + mapStringForUsed := "ResourceList{" + for _, k := range keysForUsed { + mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) } - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n + mapStringForUsed += "}" + s := strings.Join([]string{`&ResourceQuotaStatus{`, + `Hard:` + mapStringForHard + `,`, + `Used:` + mapStringForUsed + `,`, + `}`, + }, "") + return s } - -func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *ResourceRequirements) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.PdID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Pod) Size() (n int) { - if m == nil { - return 0 + keysForLimits := make([]string, 0, len(this.Limits)) + for k := range this.Limits { + keysForLimits = append(keysForLimits, string(k)) } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodAffinity) Size() (n int) { - if m == nil { - return 0 + github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) + mapStringForLimits := "ResourceList{" + for _, k := range keysForLimits { + mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) } - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + mapStringForLimits += "}" + keysForRequests := make([]string, 0, len(this.Requests)) + for k := range this.Requests { + keysForRequests = append(keysForRequests, string(k)) } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) + mapStringForRequests := "ResourceList{" + for _, k := range keysForRequests { + mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) } - return n + mapStringForRequests += "}" + s := strings.Join([]string{`&ResourceRequirements{`, + `Limits:` + mapStringForLimits + `,`, + `Requests:` + mapStringForRequests + `,`, + `}`, + }, "") + return s } - -func (m *PodAffinityTerm) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *SELinuxOptions) String() string { + if this == nil { + return "nil" } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&SELinuxOptions{`, + `User:` + fmt.Sprintf("%v", this.User) + `,`, + `Role:` + fmt.Sprintf("%v", this.Role) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `Level:` + fmt.Sprintf("%v", this.Level) + `,`, + `}`, + }, "") + return s } - -func (m *PodAntiAffinity) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *ScaleIOPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n + s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s } - -func (m *PodAttachOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ScaleIOVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ScaleIOVolumeSource{`, + `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, + `System:` + fmt.Sprintf("%v", this.System) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, + `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, + `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, + `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s } - -func (m *PodCondition) Size() (n int) { - if m == nil { - return 0 +func (this *ScopeSelector) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ScopeSelector{`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodDNSConfig) Size() (n int) { - if m == nil { - return 0 +func (this *ScopedResourceSelectorRequirement) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, + `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s +} +func (this *Secret) String() string { + if this == nil { + return "nil" } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + keysForData := make([]string, 0, len(this.Data)) + for k := range this.Data { + keysForData = append(keysForData, k) } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + github_com_gogo_protobuf_sortkeys.Strings(keysForData) + mapStringForData := "map[string][]byte{" + for _, k := range keysForData { + mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) } - return n -} - -func (m *PodDNSConfigOption) Size() (n int) { - if m == nil { - return 0 + mapStringForData += "}" + keysForStringData := make([]string, 0, len(this.StringData)) + for k := range this.StringData { + keysForStringData = append(keysForStringData, k) } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) + github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) + mapStringForStringData := "map[string]string{" + for _, k := range keysForStringData { + mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) } - return n + mapStringForStringData += "}" + s := strings.Join([]string{`&Secret{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Data:` + mapStringForData + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StringData:` + mapStringForStringData + `,`, + `}`, + }, "") + return s } - -func (m *PodExecOptions) Size() (n int) { - if m == nil { - return 0 +func (this *SecretEnvSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&SecretEnvSource{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretKeySelector) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&SecretKeySelector{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s } - -func (m *PodIP) Size() (n int) { - if m == nil { - return 0 +func (this *SecretList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&SecretList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodList) Size() (n int) { - if m == nil { - return 0 +func (this *SecretProjection) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&SecretProjection{`, + `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecretReference) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&SecretReference{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, + `}`, + }, "") + return s } - -func (m *PodLogOptions) Size() (n int) { - if m == nil { - return 0 +func (this *SecretVolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) + s := strings.Join([]string{`&SecretVolumeSource{`, + `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, + `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, + `Optional:` + valueToStringGenerated(this.Optional) + `,`, + `}`, + }, "") + return s +} +func (this *SecurityContext) String() string { + if this == nil { + return "nil" } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&SecurityContext{`, + `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, + `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, + `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, + `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, + `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, + `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, + `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, + `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, + `}`, + }, "") + return s +} +func (this *SerializedReference) String() string { + if this == nil { + return "nil" } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) + s := strings.Join([]string{`&SerializedReference{`, + `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) + s := strings.Join([]string{`&Service{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccount) String() string { + if this == nil { + return "nil" } - n += 2 - return n + s := strings.Join([]string{`&ServiceAccount{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, + `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, + `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, + `}`, + }, "") + return s } - -func (m *PodPortForwardOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ServiceAccountList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - n += 1 + sovGenerated(uint64(e)) - } + s := strings.Join([]string{`&ServiceAccountList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceAccountTokenProjection) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&ServiceAccountTokenProjection{`, + `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, + `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s } - -func (m *PodProxyOptions) Size() (n int) { - if m == nil { - return 0 +func (this *ServiceList) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ServiceList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *PodReadinessGate) Size() (n int) { - if m == nil { - return 0 +func (this *ServicePort) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.ConditionType) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&ServicePort{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, + `}`, + }, "") + return s } - -func (m *PodSecurityContext) Size() (n int) { - if m == nil { - return 0 +func (this *ServiceProxyOptions) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&ServiceProxyOptions{`, + `Path:` + fmt.Sprintf("%v", this.Path) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceSpec) String() string { + if this == nil { + return "nil" } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) + keysForSelector := make([]string, 0, len(this.Selector)) + for k := range this.Selector { + keysForSelector = append(keysForSelector, k) } - if m.RunAsNonRoot != nil { - n += 2 + github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) + mapStringForSelector := "map[string]string{" + for _, k := range keysForSelector { + mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) - } + mapStringForSelector += "}" + s := strings.Join([]string{`&ServiceSpec{`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, + `Selector:` + mapStringForSelector + `,`, + `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, + `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, + `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, + `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, + `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, + `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, + `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, + `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, + `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, + `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceStatus) String() string { + if this == nil { + return "nil" } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) + s := strings.Join([]string{`&ServiceStatus{`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *SessionAffinityConfig) String() string { + if this == nil { + return "nil" } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) + s := strings.Join([]string{`&SessionAffinityConfig{`, + `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSPersistentVolumeSource) String() string { + if this == nil { + return "nil" } - if len(m.Sysctls) > 0 { - for _, e := range m.Sysctls { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } + s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageOSVolumeSource) String() string { + if this == nil { + return "nil" } - if m.WindowsOptions != nil { - l = m.WindowsOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&StorageOSVolumeSource{`, + `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, + `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Sysctl) String() string { + if this == nil { + return "nil" } - if m.FSGroupChangePolicy != nil { - l = len(*m.FSGroupChangePolicy) - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&Sysctl{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *TCPSocketAction) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&TCPSocketAction{`, + `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `}`, + }, "") + return s } - -func (m *PodSignature) Size() (n int) { - if m == nil { - return 0 +func (this *Taint) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if m.PodController != nil { - l = m.PodController.Size() - n += 1 + l + sovGenerated(uint64(l)) + s := strings.Join([]string{`&Taint{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Toleration) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&Toleration{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, + `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, + `}`, + }, "") + return s } - -func (m *PodSpec) Size() (n int) { - if m == nil { - return 0 +func (this *TopologySelectorLabelRequirement) String() string { + if this == nil { + return "nil" } - var l int - _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) - } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ShareProcessNamespace != nil { - n += 3 - } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 2 + l + sovGenerated(uint64(l)) - } - if m.EnableServiceLinks != nil { - n += 3 - } - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.Overhead) > 0 { - for k, v := range m.Overhead { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 2 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.TopologySpreadConstraints) > 0 { - for _, e := range m.TopologySpreadConstraints { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.EphemeralContainers) > 0 { - for _, e := range m.EphemeralContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.QOSClass) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainerStatuses) > 0 { - for _, e := range m.InitContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.NominatedNodeName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.PodIPs) > 0 { - for _, e := range m.PodIPs { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EphemeralContainerStatuses) > 0 { - for _, e := range m.EphemeralContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodStatusResult) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodTemplate) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodTemplateList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodTemplateSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PortworxVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Preconditions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PreferAvoidPodsEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.PodSignature.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EvictionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PreferredSchedulingTerm) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, + `Key:` + fmt.Sprintf("%v", this.Key) + `,`, + `Values:` + fmt.Sprintf("%v", this.Values) + `,`, + `}`, + }, "") + return s } - -func (m *Probe) Size() (n int) { - if m == nil { - return 0 +func (this *TopologySelectorTerm) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n + s := strings.Join([]string{`&TopologySelectorTerm{`, + `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *ProjectedVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Sources) > 0 { - for _, e := range m.Sources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) +func (this *TypedLocalObjectReference) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&TypedLocalObjectReference{`, + `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `}`, + }, "") + return s } - -func (m *QuobyteVolumeSource) Size() (n int) { - if m == nil { - return 0 +func (this *Volume) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Volume) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Tenant) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&Volume{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *RBDPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *VolumeDevice) String() string { + if this == nil { + return "nil" } - n += 2 - return n + s := strings.Join([]string{`&VolumeDevice{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, + `}`, + }, "") + return s } - -func (m *RBDVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) +func (this *VolumeMount) String() string { + if this == nil { + return "nil" } - n += 2 - return n + s := strings.Join([]string{`&VolumeMount{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, + `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, + `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, + `}`, + }, "") + return s } - -func (m *RangeAllocation) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) +func (this *VolumeNodeAffinity) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&VolumeNodeAffinity{`, + `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicationController) Size() (n int) { - if m == nil { - return 0 +func (this *VolumeProjection) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&VolumeProjection{`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, + `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicationControllerCondition) Size() (n int) { - if m == nil { - return 0 +func (this *VolumeSource) String() string { + if this == nil { + return "nil" } - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n + s := strings.Join([]string{`&VolumeSource{`, + `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, + `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, + `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, + `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, + `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, + `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, + `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, + `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, + `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, + `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, + `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, + `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, + `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, + `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, + `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, + `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, + `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, + `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, + `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, + `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, + `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, + `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, + `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, + `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, + `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, + `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, + `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicationControllerList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } +func (this *VsphereVirtualDiskVolumeSource) String() string { + if this == nil { + return "nil" } - return n + s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, + `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, + `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, + `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, + `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, + `}`, + }, "") + return s } - -func (m *ReplicationControllerSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } +func (this *WeightedPodAffinityTerm) String() string { + if this == nil { + return "nil" } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n -} - -func (m *ReplicationControllerStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceFieldSelector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceQuota) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceQuotaList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceQuotaSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ScopeSelector != nil { - l = m.ScopeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceQuotaStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Used) > 0 { - for k, v := range m.Used { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ResourceRequirements) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SELinuxOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleIOPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ScaleIOVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ScopeSelector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ScopedResourceSelectorRequirement) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ScopeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Secret) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StringData) > 0 { - for k, v := range m.StringData { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Immutable != nil { - n += 2 - } - return n -} - -func (m *SecretEnvSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecretKeySelector) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecretList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SecretProjection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecretReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SecretVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecurityContext) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Privileged != nil { - n += 2 - } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if m.ReadOnlyRootFilesystem != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 2 - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - l = len(*m.ProcMount) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.WindowsOptions != nil { - l = m.WindowsOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SerializedReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Service) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceAccount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AutomountServiceAccountToken != nil { - n += 2 - } - return n -} - -func (m *ServiceAccountList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceAccountTokenProjection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Audience) - n += 1 + l + sovGenerated(uint64(l)) - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServicePort) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) - if m.AppProtocol != nil { - l = len(*m.AppProtocol) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ServiceProxyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ExternalName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - n += 2 - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IPFamily != nil { - l = len(*m.IPFamily) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SessionAffinityConfig) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.ClientIP != nil { - l = m.ClientIP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *StorageOSPersistentVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *StorageOSVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Sysctl) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TCPSocketAction) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Taint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeAdded != nil { - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Toleration) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TolerationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) - } - return n -} - -func (m *TopologySelectorLabelRequirement) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TopologySelectorTerm) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, e := range m.MatchLabelExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TopologySpreadConstraint) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.MaxSkew)) - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.WhenUnsatisfiable) - n += 1 + l + sovGenerated(uint64(l)) - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *TypedLocalObjectReference) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.APIGroup != nil { - l = len(*m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Volume) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeDevice) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeMount) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.MountPropagation != nil { - l = len(*m.MountPropagation) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.SubPathExpr) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeNodeAffinity) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Required != nil { - l = m.Required.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeProjection) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ServiceAccountToken != nil { - l = m.ServiceAccountToken.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Projected != nil { - l = m.Projected.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.VolumePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WeightedPodAffinityTerm) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WindowsSecurityContextOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.GMSACredentialSpecName != nil { - l = len(*m.GMSACredentialSpecName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GMSACredentialSpec != nil { - l = len(*m.GMSACredentialSpec) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUserName != nil { - l = len(*m.RunAsUserName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AWSElasticBlockStoreVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Affinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Affinity{`, - `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "NodeAffinity", "NodeAffinity", 1) + `,`, - `PodAffinity:` + strings.Replace(this.PodAffinity.String(), "PodAffinity", "PodAffinity", 1) + `,`, - `PodAntiAffinity:` + strings.Replace(this.PodAntiAffinity.String(), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AttachedVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AttachedVolume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *AvoidPods) String() string { - if this == nil { - return "nil" - } - repeatedStringForPreferAvoidPods := "[]PreferAvoidPodsEntry{" - for _, f := range this.PreferAvoidPods { - repeatedStringForPreferAvoidPods += strings.Replace(strings.Replace(f.String(), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + "," - } - repeatedStringForPreferAvoidPods += "}" - s := strings.Join([]string{`&AvoidPods{`, - `PreferAvoidPods:` + repeatedStringForPreferAvoidPods + `,`, - `}`, - }, "") - return s -} -func (this *AzureDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureDiskVolumeSource{`, - `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, - `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, - `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `Kind:` + valueToStringGenerated(this.Kind) + `,`, - `}`, - }, "") - return s -} -func (this *AzureFilePersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *AzureFileVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureFileVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Binding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) - } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `ControllerPublishSecretRef:` + strings.Replace(this.ControllerPublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `NodeStageSecretRef:` + strings.Replace(this.NodeStageSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `ControllerExpandSecretRef:` + strings.Replace(this.ControllerExpandSecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) - } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `NodePublishSecretRef:` + strings.Replace(this.NodePublishSecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Capabilities) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Capabilities{`, - `Add:` + fmt.Sprintf("%v", this.Add) + `,`, - `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CephFSVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CinderPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CinderPersistentVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CinderVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CinderVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClientIPConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClientIPConfig{`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ComponentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ComponentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatusList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ComponentStatus{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMap) String() string { - if this == nil { - return "nil" - } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string]string{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) - } - mapStringForData += "}" - keysForBinaryData := make([]string, 0, len(this.BinaryData)) - for k := range this.BinaryData { - keysForBinaryData = append(keysForBinaryData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - mapStringForBinaryData := "map[string][]byte{" - for _, k := range keysForBinaryData { - mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) - } - mapStringForBinaryData += "}" - s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `BinaryData:` + mapStringForBinaryData + `,`, - `Immutable:` + valueToStringGenerated(this.Immutable) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapEnvSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapKeySelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ConfigMap{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapNodeConfigSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapProjection) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]KeyToPath{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ConfigMapProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapVolumeSource) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]KeyToPath{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ConfigMapVolumeSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *Container) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]ContainerPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," - } - repeatedStringForEnv += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," - } - repeatedStringForVolumeMounts += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," - } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumeDevices := "[]VolumeDevice{" - for _, f := range this.VolumeDevices { - repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," - } - repeatedStringForVolumeDevices += "}" - s := strings.Join([]string{`&Container{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `Env:` + repeatedStringForEnv + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, - `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerImage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerImage{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, - `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerState) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerState{`, - `Waiting:` + strings.Replace(this.Waiting.String(), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, - `Running:` + strings.Replace(this.Running.String(), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, - `Terminated:` + strings.Replace(this.Terminated.String(), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateRunning) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateTerminated) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStateTerminated{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.StartedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FinishedAt), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateWaiting) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStateWaiting{`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, - `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `Started:` + valueToStringGenerated(this.Started) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonEndpoint) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonEndpoint{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIProjection) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DownwardAPIVolumeFile{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DownwardAPIProjection{`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeFile) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DownwardAPIVolumeFile{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeSource) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]DownwardAPIVolumeFile{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&DownwardAPIVolumeSource{`, - `Items:` + repeatedStringForItems + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *EmptyDirVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EmptyDirVolumeSource{`, - `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointAddress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EndpointAddress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `TargetRef:` + strings.Replace(this.TargetRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EndpointPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointSubset) String() string { - if this == nil { - return "nil" - } - repeatedStringForAddresses := "[]EndpointAddress{" - for _, f := range this.Addresses { - repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," - } - repeatedStringForAddresses += "}" - repeatedStringForNotReadyAddresses := "[]EndpointAddress{" - for _, f := range this.NotReadyAddresses { - repeatedStringForNotReadyAddresses += strings.Replace(strings.Replace(f.String(), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + "," - } - repeatedStringForNotReadyAddresses += "}" - repeatedStringForPorts := "[]EndpointPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - s := strings.Join([]string{`&EndpointSubset{`, - `Addresses:` + repeatedStringForAddresses + `,`, - `NotReadyAddresses:` + repeatedStringForNotReadyAddresses + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `}`, - }, "") - return s -} -func (this *Endpoints) String() string { - if this == nil { - return "nil" - } - repeatedStringForSubsets := "[]EndpointSubset{" - for _, f := range this.Subsets { - repeatedStringForSubsets += strings.Replace(strings.Replace(f.String(), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + "," - } - repeatedStringForSubsets += "}" - s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subsets:` + repeatedStringForSubsets + `,`, - `}`, - }, "") - return s -} -func (this *EndpointsList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Endpoints{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Endpoints", "Endpoints", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *EnvFromSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EnvFromSource{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `ConfigMapRef:` + strings.Replace(this.ConfigMapRef.String(), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretEnvSource", "SecretEnvSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVar) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EnvVar{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(this.ValueFrom.String(), "EnvVarSource", "EnvVarSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVarSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EnvVarSource{`, - `FieldRef:` + strings.Replace(this.FieldRef.String(), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(this.ResourceFieldRef.String(), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `ConfigMapKeyRef:` + strings.Replace(this.ConfigMapKeyRef.String(), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, - `SecretKeyRef:` + strings.Replace(this.SecretKeyRef.String(), "SecretKeySelector", "SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EphemeralContainer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EphemeralContainer{`, - `EphemeralContainerCommon:` + strings.Replace(strings.Replace(this.EphemeralContainerCommon.String(), "EphemeralContainerCommon", "EphemeralContainerCommon", 1), `&`, ``, 1) + `,`, - `TargetContainerName:` + fmt.Sprintf("%v", this.TargetContainerName) + `,`, - `}`, - }, "") - return s -} -func (this *EphemeralContainerCommon) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]ContainerPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += strings.Replace(strings.Replace(f.String(), "EnvVar", "EnvVar", 1), `&`, ``, 1) + "," - } - repeatedStringForEnv += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += strings.Replace(strings.Replace(f.String(), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + "," - } - repeatedStringForVolumeMounts += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += strings.Replace(strings.Replace(f.String(), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + "," - } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumeDevices := "[]VolumeDevice{" - for _, f := range this.VolumeDevices { - repeatedStringForVolumeDevices += strings.Replace(strings.Replace(f.String(), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + "," - } - repeatedStringForVolumeDevices += "}" - s := strings.Join([]string{`&EphemeralContainerCommon{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + repeatedStringForPorts + `,`, - `Env:` + repeatedStringForEnv + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, - `LivenessProbe:` + strings.Replace(this.LivenessProbe.String(), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(this.ReadinessProbe.String(), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(this.Lifecycle.String(), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + repeatedStringForVolumeDevices + `,`, - `StartupProbe:` + strings.Replace(this.StartupProbe.String(), "Probe", "Probe", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EphemeralContainers) String() string { - if this == nil { - return "nil" - } - repeatedStringForEphemeralContainers := "[]EphemeralContainer{" - for _, f := range this.EphemeralContainers { - repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," - } - repeatedStringForEphemeralContainers += "}" - s := strings.Join([]string{`&EphemeralContainers{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, - `}`, - }, "") - return s -} -func (this *Event) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.FirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Related:` + strings.Replace(this.Related.String(), "ObjectReference", "ObjectReference", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `}`, - }, "") - return s -} -func (this *EventList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Event{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *EventSeries) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s -} -func (this *EventSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventSource{`, - `Component:` + fmt.Sprintf("%v", this.Component) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *ExecAction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecAction{`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *FCVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FCVolumeSource{`, - `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, - `Lun:` + valueToStringGenerated(this.Lun) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, - `}`, - }, "") - return s -} -func (this *FlexPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) - } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlexVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) - } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlockerVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FlockerVolumeSource{`, - `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, - `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, - `}`, - }, "") - return s -} -func (this *GCEPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, - `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *GitRepoVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GitRepoVolumeSource{`, - `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GlusterfsPersistentVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `EndpointsNamespace:` + valueToStringGenerated(this.EndpointsNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GlusterfsVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPGetAction) String() string { - if this == nil { - return "nil" - } - repeatedStringForHTTPHeaders := "[]HTTPHeader{" - for _, f := range this.HTTPHeaders { - repeatedStringForHTTPHeaders += strings.Replace(strings.Replace(f.String(), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + "," - } - repeatedStringForHTTPHeaders += "}" - s := strings.Join([]string{`&HTTPGetAction{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `HTTPHeaders:` + repeatedStringForHTTPHeaders + `,`, - `}`, - }, "") - return s -} -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Handler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(this.Exec.String(), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(this.HTTPGet.String(), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(this.TCPSocket.String(), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HostAlias) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostAlias{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, - `}`, - }, "") - return s -} -func (this *HostPathVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPathVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Type:` + valueToStringGenerated(this.Type) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ISCSIVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s -} -func (this *KeyToPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KeyToPath{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *Lifecycle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(this.PostStart.String(), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(this.PreStop.String(), "Handler", "Handler", 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeItem) String() string { - if this == nil { - return "nil" - } - keysForMax := make([]string, 0, len(this.Max)) - for k := range this.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - mapStringForMax := "ResourceList{" - for _, k := range keysForMax { - mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) - } - mapStringForMax += "}" - keysForMin := make([]string, 0, len(this.Min)) - for k := range this.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - mapStringForMin := "ResourceList{" - for _, k := range keysForMin { - mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) - } - mapStringForMin += "}" - keysForDefault := make([]string, 0, len(this.Default)) - for k := range this.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - mapStringForDefault := "ResourceList{" - for _, k := range keysForDefault { - mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) - } - mapStringForDefault += "}" - keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) - for k := range this.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - mapStringForDefaultRequest := "ResourceList{" - for _, k := range keysForDefaultRequest { - mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) - } - mapStringForDefaultRequest += "}" - keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) - for k := range this.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - mapStringForMaxLimitRequestRatio := "ResourceList{" - for _, k := range keysForMaxLimitRequestRatio { - mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) - } - mapStringForMaxLimitRequestRatio += "}" - s := strings.Join([]string{`&LimitRangeItem{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Max:` + mapStringForMax + `,`, - `Min:` + mapStringForMin + `,`, - `Default:` + mapStringForDefault + `,`, - `DefaultRequest:` + mapStringForDefaultRequest + `,`, - `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]LimitRange{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "LimitRange", "LimitRange", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForLimits := "[]LimitRangeItem{" - for _, f := range this.Limits { - repeatedStringForLimits += strings.Replace(strings.Replace(f.String(), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + "," - } - repeatedStringForLimits += "}" - s := strings.Join([]string{`&LimitRangeSpec{`, - `Limits:` + repeatedStringForLimits + `,`, - `}`, - }, "") - return s -} -func (this *List) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]RawExtension{" - for _, f := range this.Items { - repeatedStringForItems += fmt.Sprintf("%v", f) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerIngress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForIngress := "[]LoadBalancerIngress{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - s := strings.Join([]string{`&LoadBalancerStatus{`, - `Ingress:` + repeatedStringForIngress + `,`, - `}`, - }, "") - return s -} -func (this *LocalObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalObjectReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *LocalVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *NFSVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NFSVolumeSource{`, - `Server:` + fmt.Sprintf("%v", this.Server) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Namespace) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Namespace{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Namespace", "Namespace", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceSpec{`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]NamespaceCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NamespaceCondition", "NamespaceCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&NamespaceStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *Node) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAddress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeAddress{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Address:` + fmt.Sprintf("%v", this.Address) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAffinity) String() string { - if this == nil { - return "nil" - } - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]PreferredSchedulingTerm{" - for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + "," - } - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" - s := strings.Join([]string{`&NodeAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(this.RequiredDuringSchedulingIgnoredDuringExecution.String(), "NodeSelector", "NodeSelector", 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, - `}`, - }, "") - return s -} -func (this *NodeCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastHeartbeatTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeConfigSource{`, - `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeConfigStatus{`, - `Assigned:` + strings.Replace(this.Assigned.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Active:` + strings.Replace(this.Active.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `LastKnownGood:` + strings.Replace(this.LastKnownGood.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *NodeDaemonEndpoints) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeDaemonEndpoints{`, - `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Node{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Node", "Node", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *NodeProxyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *NodeResources) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelector) String() string { - if this == nil { - return "nil" - } - repeatedStringForNodeSelectorTerms := "[]NodeSelectorTerm{" - for _, f := range this.NodeSelectorTerms { - repeatedStringForNodeSelectorTerms += strings.Replace(strings.Replace(f.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + "," - } - repeatedStringForNodeSelectorTerms += "}" - s := strings.Join([]string{`&NodeSelector{`, - `NodeSelectorTerms:` + repeatedStringForNodeSelectorTerms + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorTerm) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchExpressions := "[]NodeSelectorRequirement{" - for _, f := range this.MatchExpressions { - repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchExpressions += "}" - repeatedStringForMatchFields := "[]NodeSelectorRequirement{" - for _, f := range this.MatchFields { - repeatedStringForMatchFields += strings.Replace(strings.Replace(f.String(), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchFields += "}" - s := strings.Join([]string{`&NodeSelectorTerm{`, - `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, - `MatchFields:` + repeatedStringForMatchFields + `,`, - `}`, - }, "") - return s -} -func (this *NodeSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTaints := "[]Taint{" - for _, f := range this.Taints { - repeatedStringForTaints += strings.Replace(strings.Replace(f.String(), "Taint", "Taint", 1), `&`, ``, 1) + "," - } - repeatedStringForTaints += "}" - s := strings.Join([]string{`&NodeSpec{`, - `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUseExternalID:` + fmt.Sprintf("%v", this.DoNotUseExternalID) + `,`, - `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, - `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, - `Taints:` + repeatedStringForTaints + `,`, - `ConfigSource:` + strings.Replace(this.ConfigSource.String(), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `PodCIDRs:` + fmt.Sprintf("%v", this.PodCIDRs) + `,`, - `}`, - }, "") - return s -} -func (this *NodeStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]NodeCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - repeatedStringForAddresses := "[]NodeAddress{" - for _, f := range this.Addresses { - repeatedStringForAddresses += strings.Replace(strings.Replace(f.String(), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + "," - } - repeatedStringForAddresses += "}" - repeatedStringForImages := "[]ContainerImage{" - for _, f := range this.Images { - repeatedStringForImages += strings.Replace(strings.Replace(f.String(), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + "," - } - repeatedStringForImages += "}" - repeatedStringForVolumesAttached := "[]AttachedVolume{" - for _, f := range this.VolumesAttached { - repeatedStringForVolumesAttached += strings.Replace(strings.Replace(f.String(), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForVolumesAttached += "}" - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - keysForAllocatable := make([]string, 0, len(this.Allocatable)) - for k := range this.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - mapStringForAllocatable := "ResourceList{" - for _, k := range keysForAllocatable { - mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) - } - mapStringForAllocatable += "}" - s := strings.Join([]string{`&NodeStatus{`, - `Capacity:` + mapStringForCapacity + `,`, - `Allocatable:` + mapStringForAllocatable + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `Addresses:` + repeatedStringForAddresses + `,`, - `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, - `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, - `Images:` + repeatedStringForImages + `,`, - `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, - `VolumesAttached:` + repeatedStringForVolumesAttached + `,`, - `Config:` + strings.Replace(this.Config.String(), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSystemInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSystemInfo{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, - `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, - `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, - `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, - `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, - `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, - `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, - `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectFieldSelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectFieldSelector{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PersistentVolumeClaim{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `DataSource:` + strings.Replace(this.DataSource.String(), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]PersistentVolumeClaimCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Capacity:` + mapStringForCapacity + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PersistentVolume{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeSource{`, - `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsPersistentVolumeSource", "GlusterfsPersistentVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(this.RBD.String(), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(this.Cinder.String(), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, - `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, - `Local:` + strings.Replace(this.Local.String(), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(this.CSI.String(), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSpec) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeSpec{`, - `Capacity:` + mapStringForCapacity + `,`, - `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `ClaimRef:` + strings.Replace(this.ClaimRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, - `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, - `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `NodeAffinity:` + strings.Replace(this.NodeAffinity.String(), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s -} -func (this *PhotonPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, - `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *Pod) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinity) String() string { - if this == nil { - return "nil" - } - repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" - for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { - repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," - } - repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" - for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," - } - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" - s := strings.Join([]string{`&PodAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinityTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `}`, - }, "") - return s -} -func (this *PodAntiAffinity) String() string { - if this == nil { - return "nil" - } - repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution := "[]PodAffinityTerm{" - for _, f := range this.RequiredDuringSchedulingIgnoredDuringExecution { - repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + "," - } - repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution += "}" - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution := "[]WeightedPodAffinityTerm{" - for _, f := range this.PreferredDuringSchedulingIgnoredDuringExecution { - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += strings.Replace(strings.Replace(f.String(), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + "," - } - repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution += "}" - s := strings.Join([]string{`&PodAntiAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForRequiredDuringSchedulingIgnoredDuringExecution + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + repeatedStringForPreferredDuringSchedulingIgnoredDuringExecution + `,`, - `}`, - }, "") - return s -} -func (this *PodAttachOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodAttachOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `}`, - }, "") - return s -} -func (this *PodCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastProbeTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PodDNSConfig) String() string { - if this == nil { - return "nil" - } - repeatedStringForOptions := "[]PodDNSConfigOption{" - for _, f := range this.Options { - repeatedStringForOptions += strings.Replace(strings.Replace(f.String(), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + "," - } - repeatedStringForOptions += "}" - s := strings.Join([]string{`&PodDNSConfig{`, - `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, - `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, - `Options:` + repeatedStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *PodDNSConfigOption) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDNSConfigOption{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *PodExecOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodExecOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *PodIP) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodIP{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `}`, - }, "") - return s -} -func (this *PodList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Pod{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Pod", "Pod", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodLogOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodLogOptions{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, - `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, - `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "v1.Time", 1) + `,`, - `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, - `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, - `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, - `InsecureSkipTLSVerifyBackend:` + fmt.Sprintf("%v", this.InsecureSkipTLSVerifyBackend) + `,`, - `}`, - }, "") - return s -} -func (this *PodPortForwardOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPortForwardOptions{`, - `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, - `}`, - }, "") - return s -} -func (this *PodProxyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *PodReadinessGate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodReadinessGate{`, - `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityContext) String() string { - if this == nil { - return "nil" - } - repeatedStringForSysctls := "[]Sysctl{" - for _, f := range this.Sysctls { - repeatedStringForSysctls += strings.Replace(strings.Replace(f.String(), "Sysctl", "Sysctl", 1), `&`, ``, 1) + "," - } - repeatedStringForSysctls += "}" - s := strings.Join([]string{`&PodSecurityContext{`, - `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, - `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `Sysctls:` + repeatedStringForSysctls + `,`, - `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, - `FSGroupChangePolicy:` + valueToStringGenerated(this.FSGroupChangePolicy) + `,`, - `}`, - }, "") - return s -} -func (this *PodSignature) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "v1.OwnerReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += strings.Replace(strings.Replace(f.String(), "Volume", "Volume", 1), `&`, ``, 1) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForContainers := "[]Container{" - for _, f := range this.Containers { - repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," - } - repeatedStringForContainers += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," - } - repeatedStringForImagePullSecrets += "}" - repeatedStringForInitContainers := "[]Container{" - for _, f := range this.InitContainers { - repeatedStringForInitContainers += strings.Replace(strings.Replace(f.String(), "Container", "Container", 1), `&`, ``, 1) + "," - } - repeatedStringForInitContainers += "}" - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += strings.Replace(strings.Replace(f.String(), "Toleration", "Toleration", 1), `&`, ``, 1) + "," - } - repeatedStringForTolerations += "}" - repeatedStringForHostAliases := "[]HostAlias{" - for _, f := range this.HostAliases { - repeatedStringForHostAliases += strings.Replace(strings.Replace(f.String(), "HostAlias", "HostAlias", 1), `&`, ``, 1) + "," - } - repeatedStringForHostAliases += "}" - repeatedStringForReadinessGates := "[]PodReadinessGate{" - for _, f := range this.ReadinessGates { - repeatedStringForReadinessGates += strings.Replace(strings.Replace(f.String(), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + "," - } - repeatedStringForReadinessGates += "}" - repeatedStringForTopologySpreadConstraints := "[]TopologySpreadConstraint{" - for _, f := range this.TopologySpreadConstraints { - repeatedStringForTopologySpreadConstraints += strings.Replace(strings.Replace(f.String(), "TopologySpreadConstraint", "TopologySpreadConstraint", 1), `&`, ``, 1) + "," - } - repeatedStringForTopologySpreadConstraints += "}" - repeatedStringForEphemeralContainers := "[]EphemeralContainer{" - for _, f := range this.EphemeralContainers { - repeatedStringForEphemeralContainers += strings.Replace(strings.Replace(f.String(), "EphemeralContainer", "EphemeralContainer", 1), `&`, ``, 1) + "," - } - repeatedStringForEphemeralContainers += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - keysForOverhead := make([]string, 0, len(this.Overhead)) - for k := range this.Overhead { - keysForOverhead = append(keysForOverhead, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOverhead) - mapStringForOverhead := "ResourceList{" - for _, k := range keysForOverhead { - mapStringForOverhead += fmt.Sprintf("%v: %v,", k, this.Overhead[ResourceName(k)]) - } - mapStringForOverhead += "}" - s := strings.Join([]string{`&PodSpec{`, - `Volumes:` + repeatedStringForVolumes + `,`, - `Containers:` + repeatedStringForContainers + `,`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SecurityContext:` + strings.Replace(this.SecurityContext.String(), "PodSecurityContext", "PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `Affinity:` + strings.Replace(this.Affinity.String(), "Affinity", "Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `InitContainers:` + repeatedStringForInitContainers + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `HostAliases:` + repeatedStringForHostAliases + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `DNSConfig:` + strings.Replace(this.DNSConfig.String(), "PodDNSConfig", "PodDNSConfig", 1) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `ReadinessGates:` + repeatedStringForReadinessGates + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `EnableServiceLinks:` + valueToStringGenerated(this.EnableServiceLinks) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, - `Overhead:` + mapStringForOverhead + `,`, - `TopologySpreadConstraints:` + repeatedStringForTopologySpreadConstraints + `,`, - `EphemeralContainers:` + repeatedStringForEphemeralContainers + `,`, - `}`, - }, "") - return s -} -func (this *PodStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]PodCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "PodCondition", "PodCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - repeatedStringForContainerStatuses := "[]ContainerStatus{" - for _, f := range this.ContainerStatuses { - repeatedStringForContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForContainerStatuses += "}" - repeatedStringForInitContainerStatuses := "[]ContainerStatus{" - for _, f := range this.InitContainerStatuses { - repeatedStringForInitContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForInitContainerStatuses += "}" - repeatedStringForPodIPs := "[]PodIP{" - for _, f := range this.PodIPs { - repeatedStringForPodIPs += strings.Replace(strings.Replace(f.String(), "PodIP", "PodIP", 1), `&`, ``, 1) + "," - } - repeatedStringForPodIPs += "}" - repeatedStringForEphemeralContainerStatuses := "[]ContainerStatus{" - for _, f := range this.EphemeralContainerStatuses { - repeatedStringForEphemeralContainerStatuses += strings.Replace(strings.Replace(f.String(), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + "," - } - repeatedStringForEphemeralContainerStatuses += "}" - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "v1.Time", 1) + `,`, - `ContainerStatuses:` + repeatedStringForContainerStatuses + `,`, - `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, - `InitContainerStatuses:` + repeatedStringForInitContainerStatuses + `,`, - `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, - `PodIPs:` + repeatedStringForPodIPs + `,`, - `EphemeralContainerStatuses:` + repeatedStringForEphemeralContainerStatuses + `,`, - `}`, - }, "") - return s -} -func (this *PodStatusResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PodTemplate{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PortworxVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PortworxVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Preconditions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Preconditions{`, - `UID:` + valueToStringGenerated(this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *PreferAvoidPodsEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PreferAvoidPodsEntry{`, - `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EvictionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PreferredSchedulingTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PreferredSchedulingTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Probe) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, - `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, - `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, - `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, - `}`, - }, "") - return s -} -func (this *ProjectedVolumeSource) String() string { - if this == nil { - return "nil" - } - repeatedStringForSources := "[]VolumeProjection{" - for _, f := range this.Sources { - repeatedStringForSources += strings.Replace(strings.Replace(f.String(), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + "," - } - repeatedStringForSources += "}" - s := strings.Join([]string{`&ProjectedVolumeSource{`, - `Sources:` + repeatedStringForSources + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *QuobyteVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QuobyteVolumeSource{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Tenant:` + fmt.Sprintf("%v", this.Tenant) + `,`, - `}`, - }, "") - return s -} -func (this *RBDPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RBDPersistentVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RBDVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RBDVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RangeAllocation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Range:` + fmt.Sprintf("%v", this.Range) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationController) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationControllerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ReplicationController{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ReplicationControllerSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerStatus) String() string { - if this == nil { - return "nil" - } - repeatedStringForConditions := "[]ReplicationControllerCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" - s := strings.Join([]string{`&ReplicationControllerStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFieldSelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceFieldSelector{`, - `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Divisor), "Quantity", "resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuota) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ResourceQuota{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaSpec) String() string { - if this == nil { - return "nil" - } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) - } - mapStringForHard += "}" - s := strings.Join([]string{`&ResourceQuotaSpec{`, - `Hard:` + mapStringForHard + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `ScopeSelector:` + strings.Replace(this.ScopeSelector.String(), "ScopeSelector", "ScopeSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaStatus) String() string { - if this == nil { - return "nil" - } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) - } - mapStringForHard += "}" - keysForUsed := make([]string, 0, len(this.Used)) - for k := range this.Used { - keysForUsed = append(keysForUsed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - mapStringForUsed := "ResourceList{" - for _, k := range keysForUsed { - mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) - } - mapStringForUsed += "}" - s := strings.Join([]string{`&ResourceQuotaStatus{`, - `Hard:` + mapStringForHard + `,`, - `Used:` + mapStringForUsed + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequirements) String() string { - if this == nil { - return "nil" - } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) - } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) - } - mapStringForRequests += "}" - s := strings.Join([]string{`&ResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxOptions{`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Role:` + fmt.Sprintf("%v", this.Role) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "SecretReference", "SecretReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleIOVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScopeSelector) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchExpressions := "[]ScopedResourceSelectorRequirement{" - for _, f := range this.MatchExpressions { - repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchExpressions += "}" - s := strings.Join([]string{`&ScopeSelector{`, - `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, - `}`, - }, "") - return s -} -func (this *ScopedResourceSelectorRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, - `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *Secret) String() string { - if this == nil { - return "nil" - } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string][]byte{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) - } - mapStringForData += "}" - keysForStringData := make([]string, 0, len(this.StringData)) - for k := range this.StringData { - keysForStringData = append(keysForStringData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - mapStringForStringData := "map[string]string{" - for _, k := range keysForStringData { - mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) - } - mapStringForStringData += "}" - s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StringData:` + mapStringForStringData + `,`, - `Immutable:` + valueToStringGenerated(this.Immutable) + `,`, - `}`, - }, "") - return s -} -func (this *SecretEnvSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretKeySelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Secret{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Secret", "Secret", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *SecretProjection) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]KeyToPath{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&SecretProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SecretVolumeSource) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]KeyToPath{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&SecretVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `Items:` + repeatedStringForItems + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecurityContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecurityContext{`, - `Capabilities:` + strings.Replace(this.Capabilities.String(), "Capabilities", "Capabilities", 1) + `,`, - `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, - `SELinuxOptions:` + strings.Replace(this.SELinuxOptions.String(), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `WindowsOptions:` + strings.Replace(this.WindowsOptions.String(), "WindowsSecurityContextOptions", "WindowsSecurityContextOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SerializedReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SerializedReference{`, - `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccount) String() string { - if this == nil { - return "nil" - } - repeatedStringForSecrets := "[]ObjectReference{" - for _, f := range this.Secrets { - repeatedStringForSecrets += strings.Replace(strings.Replace(f.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + "," - } - repeatedStringForSecrets += "}" - repeatedStringForImagePullSecrets := "[]LocalObjectReference{" - for _, f := range this.ImagePullSecrets { - repeatedStringForImagePullSecrets += strings.Replace(strings.Replace(f.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + "," - } - repeatedStringForImagePullSecrets += "}" - s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Secrets:` + repeatedStringForSecrets + `,`, - `ImagePullSecrets:` + repeatedStringForImagePullSecrets + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]ServiceAccount{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountTokenProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceAccountTokenProjection{`, - `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Service{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Service", "Service", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *ServicePort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TargetPort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `AppProtocol:` + valueToStringGenerated(this.AppProtocol) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceProxyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForPorts := "[]ServicePort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "ServicePort", "ServicePort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + repeatedStringForPorts + `,`, - `Selector:` + mapStringForSelector + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, - `SessionAffinityConfig:` + strings.Replace(this.SessionAffinityConfig.String(), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `IPFamily:` + valueToStringGenerated(this.IPFamily) + `,`, - `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SessionAffinityConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SessionAffinityConfig{`, - `ClientIP:` + strings.Replace(this.ClientIP.String(), "ClientIPConfig", "ClientIPConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "ObjectReference", "ObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageOSVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(this.SecretRef.String(), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Sysctl) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sysctl{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TCPSocketAction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *Taint) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Taint{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Toleration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Toleration{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *TopologySelectorLabelRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *TopologySelectorTerm) String() string { - if this == nil { - return "nil" - } - repeatedStringForMatchLabelExpressions := "[]TopologySelectorLabelRequirement{" - for _, f := range this.MatchLabelExpressions { - repeatedStringForMatchLabelExpressions += strings.Replace(strings.Replace(f.String(), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchLabelExpressions += "}" - s := strings.Join([]string{`&TopologySelectorTerm{`, - `MatchLabelExpressions:` + repeatedStringForMatchLabelExpressions + `,`, - `}`, - }, "") - return s -} -func (this *TopologySpreadConstraint) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TopologySpreadConstraint{`, - `MaxSkew:` + fmt.Sprintf("%v", this.MaxSkew) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `WhenUnsatisfiable:` + fmt.Sprintf("%v", this.WhenUnsatisfiable) + `,`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TypedLocalObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Volume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Volume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeDevice) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeDevice{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeMount) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeMount{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `SubPathExpr:` + fmt.Sprintf("%v", this.SubPathExpr) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeNodeAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeNodeAffinity{`, - `Required:` + strings.Replace(this.Required.String(), "NodeSelector", "NodeSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeProjection{`, - `Secret:` + strings.Replace(this.Secret.String(), "SecretProjection", "SecretProjection", 1) + `,`, - `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, - `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, - `ServiceAccountToken:` + strings.Replace(this.ServiceAccountToken.String(), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeSource{`, - `HostPath:` + strings.Replace(this.HostPath.String(), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `EmptyDir:` + strings.Replace(this.EmptyDir.String(), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, - `GCEPersistentDisk:` + strings.Replace(this.GCEPersistentDisk.String(), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(this.AWSElasticBlockStore.String(), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `GitRepo:` + strings.Replace(this.GitRepo.String(), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, - `Secret:` + strings.Replace(this.Secret.String(), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(this.NFS.String(), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(this.ISCSI.String(), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(this.Glusterfs.String(), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `PersistentVolumeClaim:` + strings.Replace(this.PersistentVolumeClaim.String(), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(this.RBD.String(), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(this.FlexVolume.String(), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(this.Cinder.String(), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(this.CephFS.String(), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(this.Flocker.String(), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `DownwardAPI:` + strings.Replace(this.DownwardAPI.String(), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, - `FC:` + strings.Replace(this.FC.String(), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(this.AzureFile.String(), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, - `ConfigMap:` + strings.Replace(this.ConfigMap.String(), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(this.VsphereVolume.String(), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(this.Quobyte.String(), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(this.AzureDisk.String(), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(this.PhotonPersistentDisk.String(), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(this.PortworxVolume.String(), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(this.ScaleIO.String(), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, - `Projected:` + strings.Replace(this.Projected.String(), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(this.StorageOS.String(), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(this.CSI.String(), "CSIVolumeSource", "CSIVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VsphereVirtualDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, - `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, - `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, - `}`, - }, "") - return s -} -func (this *WeightedPodAffinityTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WeightedPodAffinityTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *WindowsSecurityContextOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WindowsSecurityContextOptions{`, - `GMSACredentialSpecName:` + valueToStringGenerated(this.GMSACredentialSpecName) + `,`, - `GMSACredentialSpec:` + valueToStringGenerated(this.GMSACredentialSpec) + `,`, - `RunAsUserName:` + valueToStringGenerated(this.RunAsUserName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Affinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} - } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} - } - if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} - } - if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AvoidPods) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DiskName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataDiskURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) - m.CachingMode = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SecretNamespace = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Binding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.VolumeAttributes[mapkey] = mapvalue - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerPublishSecretRef == nil { - m.ControllerPublishSecretRef = &SecretReference{} - } - if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeStageSecretRef == nil { - m.NodeStageSecretRef = &SecretReference{} - } - if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &SecretReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerExpandSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerExpandSecretRef == nil { - m.ControllerExpandSecretRef = &SecretReference{} - } - if err := m.ControllerExpandSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&WeightedPodAffinityTerm{`, + `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, + `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" } - return nil + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28559,7 +17310,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -28567,15 +17318,15 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -28587,7 +17338,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -28597,36 +17348,12 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } @@ -28640,7 +17367,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -28650,20 +17377,16 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) } - var msglen int + m.Partition = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28673,124 +17396,16 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Partition |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.VolumeAttributes[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28800,28 +17415,12 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &LocalObjectReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -28831,9 +17430,6 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28846,7 +17442,7 @@ func (m *CSIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Capabilities) Unmarshal(dAtA []byte) error { +func (m *Affinity) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28861,7 +17457,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -28869,17 +17465,17 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") + return fmt.Errorf("proto: Affinity: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28889,29 +17485,30 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) + if m.NodeAffinity == nil { + m.NodeAffinity = &NodeAffinity{} + } + if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -28921,23 +17518,57 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.PodAffinity == nil { + m.PodAffinity = &PodAffinity{} + } + if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + if m.PodAntiAffinity == nil { + m.PodAntiAffinity = &PodAntiAffinity{} + } + if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -28948,9 +17579,6 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -28963,7 +17591,7 @@ func (m *Capabilities) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AttachedVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -28978,7 +17606,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -28986,15 +17614,15 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29006,7 +17634,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29016,81 +17644,14 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) + m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29102,7 +17663,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29112,70 +17673,11 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.DevicePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29185,9 +17687,6 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29200,7 +17699,7 @@ func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AvoidPods) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29215,7 +17714,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29223,143 +17722,15 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -29371,48 +17742,23 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + if msglen < 0 { + return ErrInvalidLengthGenerated } - m.ReadOnly = bool(v != 0) + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) + if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -29422,9 +17768,6 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29437,7 +17780,7 @@ func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29452,7 +17795,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29460,15 +17803,15 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29480,7 +17823,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29490,17 +17833,43 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DiskName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.DataDiskURI = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29512,7 +17881,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29522,15 +17891,43 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) + m.CachingMode = &s + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) + m.FSType = &s iNdEx = postIndex - case 3: + case 5: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } @@ -29544,17 +17941,18 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) - case 4: + b := bool(v != 0) + m.ReadOnly = &b + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29564,27 +17962,21 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) + m.Kind = &s iNdEx = postIndex default: iNdEx = preIndex @@ -29595,9 +17987,6 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29610,7 +17999,7 @@ func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { +func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29625,7 +18014,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29633,15 +18022,15 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29653,7 +18042,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29663,17 +18052,14 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeID = string(dAtA[iNdEx:postIndex]) + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29685,7 +18071,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29695,13 +18081,10 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.FSType = string(dAtA[iNdEx:postIndex]) + m.ShareName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { @@ -29717,7 +18100,7 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -29725,9 +18108,9 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { m.ReadOnly = bool(v != 0) case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29737,27 +18120,21 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := string(dAtA[iNdEx:postIndex]) + m.SecretNamespace = &s iNdEx = postIndex default: iNdEx = preIndex @@ -29768,82 +18145,6 @@ func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -29856,7 +18157,7 @@ func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { +func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -29871,7 +18172,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29879,15 +18180,15 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29899,7 +18200,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29909,17 +18210,14 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) + m.SecretName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -29931,7 +18229,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -29941,51 +18239,16 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) + m.ShareName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -29995,24 +18258,12 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30022,9 +18273,6 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30037,7 +18285,7 @@ func (m *ComponentCondition) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { +func (m *Binding) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30052,7 +18300,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -30060,10 +18308,10 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + return fmt.Errorf("proto: Binding: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -30080,7 +18328,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -30089,9 +18337,6 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -30101,7 +18346,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30113,7 +18358,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -30122,14 +18367,10 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -30142,9 +18383,6 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30157,7 +18395,7 @@ func (m *ComponentStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { +func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30172,7 +18410,7 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -30180,17 +18418,17 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30200,30 +18438,26 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Driver = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30233,82 +18467,73 @@ func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.VolumeHandle = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.FSType = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30320,7 +18545,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -30329,21 +18554,10 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30353,122 +18567,89 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - if m.Data == nil { - m.Data = make(map[string]string) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.VolumeAttributes == nil { + m.VolumeAttributes = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - if skippy < 0 { - return ErrInvalidLengthGenerated + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if (iNdEx + skippy) > postIndex { + if iNdEx >= l { return io.ErrUnexpectedEOF } - iNdEx += skippy + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.VolumeAttributes[mapkey] = mapvalue + } else { + var mapvalue string + m.VolumeAttributes[mapkey] = mapvalue } - m.Data[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30480,7 +18661,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -30489,116 +18670,54 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.BinaryData == nil { - m.BinaryData = make(map[string][]byte) + if m.ControllerPublishSecretRef == nil { + m.ControllerPublishSecretRef = &SecretReference{} } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } + if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthGenerated - } - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } } - m.BinaryData[mapkey] = mapvalue + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodeStageSecretRef == nil { + m.NodeStageSecretRef = &SecretReference{} + } + if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30608,13 +18727,25 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Immutable = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.NodePublishSecretRef == nil { + m.NodePublishSecretRef = &SecretReference{} + } + if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30624,9 +18755,6 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30639,7 +18767,7 @@ func (m *ConfigMap) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { +func (m *Capabilities) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30654,7 +18782,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -30662,17 +18790,17 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30682,30 +18810,26 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30715,13 +18839,21 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30731,9 +18863,6 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -30746,7 +18875,7 @@ func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { +func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -30761,7 +18890,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -30769,17 +18898,17 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30789,28 +18918,24 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -30822,7 +18947,7 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -30832,19 +18957,16 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Key = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } - var v int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30854,71 +18976,26 @@ func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.User = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -30928,28 +19005,24 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -30961,7 +19034,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -30970,17 +19043,36 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -30990,9 +19082,6 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31005,7 +19094,7 @@ func (m *ConfigMapList) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { +func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31020,7 +19109,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31028,15 +19117,15 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") + return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31048,7 +19137,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31058,17 +19147,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Namespace = string(dAtA[iNdEx:postIndex]) + m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31080,7 +19166,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31090,17 +19176,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31112,7 +19195,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31122,17 +19205,14 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + m.User = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31144,7 +19224,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31154,19 +19234,16 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.SecretFile = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31176,24 +19253,45 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31203,9 +19301,6 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31218,7 +19313,7 @@ func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { +func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31233,7 +19328,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31241,17 +19336,17 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") + return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31261,30 +19356,26 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31294,29 +19385,24 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -31328,13 +19414,45 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + m.ReadOnly = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &SecretReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31344,9 +19462,6 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31359,7 +19474,7 @@ func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { +func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31374,7 +19489,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31382,17 +19497,17 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31402,30 +19517,26 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.VolumeID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31435,31 +19546,26 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) } - var v int32 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31469,17 +19575,17 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.DefaultMode = &v + m.ReadOnly = bool(v != 0) case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31489,13 +19595,25 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Optional = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SecretRef == nil { + m.SecretRef = &LocalObjectReference{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -31505,9 +19623,6 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -31520,7 +19635,7 @@ func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *Container) Unmarshal(dAtA []byte) error { +func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -31535,7 +19650,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31543,17 +19658,17 @@ func (m *Container) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") + return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) } - var stringLen uint64 + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31563,27 +19678,65 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + m.TimeoutSeconds = &v + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentCondition) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31595,7 +19748,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31605,17 +19758,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) + m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31627,7 +19777,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31637,17 +19787,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31659,7 +19806,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31669,17 +19816,14 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -31691,7 +19835,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -31701,51 +19845,64 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) + m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - if msglen < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF } - if postIndex > l { + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatus) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx = postIndex - case 7: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31757,7 +19914,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -31766,20 +19923,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31791,7 +19944,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -31800,19 +19953,67 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Conditions = append(m.Conditions, ComponentCondition{}) + if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31824,7 +20025,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -31833,20 +20034,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31858,7 +20055,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -31867,22 +20064,67 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, ComponentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMap) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31894,7 +20136,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -31903,22 +20145,16 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} - } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -31930,7 +20166,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -31939,24 +20175,10 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) - } - var stringLen uint64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31966,29 +20188,12 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) - } - var stringLen uint64 + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -31998,27 +20203,74 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Data == nil { + m.Data = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Data[mapkey] = mapvalue + } else { + var mapvalue string + m.Data[mapkey] = mapvalue + } iNdEx = postIndex - case 15: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32030,7 +20282,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32039,24 +20291,10 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32066,17 +20304,12 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Stdin = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) - } - var v int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32086,35 +20319,125 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.StdinOnce = bool(v != 0) - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.BinaryData == nil { + m.BinaryData = make(map[string][]byte) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.BinaryData[mapkey] = mapvalue + } else { + var mapvalue []byte + m.BinaryData[mapkey] = mapvalue + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - m.TTY = bool(v != 0) - case 19: + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32126,7 +20449,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32135,22 +20458,18 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32160,27 +20479,66 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + b := bool(v != 0) + m.Optional = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 21: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32192,7 +20550,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32201,22 +20559,18 @@ func (m *Container) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 22: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32226,28 +20580,42 @@ func (m *Container) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartupProbe == nil { - m.StartupProbe = &Probe{} + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32257,9 +20625,6 @@ func (m *Container) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32272,7 +20637,7 @@ func (m *Container) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerImage) Unmarshal(dAtA []byte) error { +func (m *ConfigMapList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32287,7 +20652,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32295,17 +20660,17 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32315,29 +20680,27 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - m.SizeBytes = 0 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32347,11 +20710,23 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SizeBytes |= int64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, ConfigMap{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32361,9 +20736,6 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32376,7 +20748,7 @@ func (m *ContainerImage) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerPort) Unmarshal(dAtA []byte) error { +func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32391,7 +20763,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32399,15 +20771,15 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32419,7 +20791,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32429,19 +20801,16 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Namespace = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - m.HostPort = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32451,16 +20820,26 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HostPort |= int32(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - m.ContainerPort = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32470,14 +20849,24 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ContainerPort |= int32(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32489,7 +20878,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32499,17 +20888,14 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32521,7 +20907,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32531,13 +20917,10 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.HostIP = string(dAtA[iNdEx:postIndex]) + m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -32548,9 +20931,6 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32563,7 +20943,7 @@ func (m *ContainerPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerState) Unmarshal(dAtA []byte) error { +func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32578,7 +20958,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32586,15 +20966,15 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32606,7 +20986,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32615,22 +20995,16 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32642,7 +21016,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32651,24 +21025,19 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Running == nil { - m.Running = &ContainerStateRunning{} - } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32678,28 +21047,13 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} - } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32709,9 +21063,6 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32724,7 +21075,7 @@ func (m *ContainerState) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { +func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32739,7 +21090,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32747,15 +21098,15 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") + return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32767,7 +21118,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32776,16 +21127,85 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, KeyToPath{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.DefaultMode = &v + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Optional = &b default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -32795,9 +21215,6 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -32810,7 +21227,7 @@ func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { } return nil } -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { +func (m *Container) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -32825,7 +21242,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32833,17 +21250,17 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") + return fmt.Errorf("proto: Container: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - m.ExitCode = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32853,16 +21270,26 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExitCode |= int32(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - m.Signal = 0 + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32872,14 +21299,82 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Signal |= int32(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -32891,7 +21386,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -32901,19 +21396,47 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.WorkingDir = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + m.Ports = append(m.Ports, ContainerPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -32923,27 +21446,26 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.Env = append(m.Env, EnvVar{}) + if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 5: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32955,7 +21477,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32964,19 +21486,16 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 6: + case 9: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -32988,7 +21507,7 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -32997,21 +21516,19 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) + if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 7: + case 10: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33021,82 +21538,30 @@ func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.LivenessProbe == nil { + m.LivenessProbe = &Probe{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 11: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33106,29 +21571,30 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Reason = string(dAtA[iNdEx:postIndex]) + if m.ReadinessProbe == nil { + m.ReadinessProbe = &Probe{} + } + if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33138,80 +21604,28 @@ func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if m.Lifecycle == nil { + m.Lifecycle = &Lifecycle{} } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 13: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33223,7 +21637,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33233,19 +21647,16 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 14: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33255,28 +21666,24 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + case 15: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33288,7 +21695,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -33297,19 +21704,19 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecurityContext == nil { + m.SecurityContext = &SecurityContext{} + } + if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: + case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -33321,17 +21728,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Ready = bool(v != 0) - case 5: + m.Stdin = bool(v != 0) + case 17: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) } - m.RestartCount = 0 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33341,16 +21748,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RestartCount |= int32(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + m.StdinOnce = bool(v != 0) + case 18: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33360,29 +21768,17 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + m.TTY = bool(v != 0) + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33392,27 +21788,26 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.ImageID = string(dAtA[iNdEx:postIndex]) + m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) + if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 8: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -33424,7 +21819,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33434,19 +21829,16 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ContainerID = string(dAtA[iNdEx:postIndex]) + m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Started", wireType) + case 21: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33456,13 +21848,23 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Started = &b + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) + if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33472,9 +21874,6 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33487,7 +21886,7 @@ func (m *ContainerStatus) Unmarshal(dAtA []byte) error { } return nil } -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { +func (m *ContainerImage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33502,7 +21901,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33510,17 +21909,46 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) } - m.Port = 0 + m.SizeBytes = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33530,7 +21958,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + m.SizeBytes |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33544,9 +21972,6 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33559,7 +21984,7 @@ func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { +func (m *ContainerPort) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33574,7 +21999,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33582,17 +22007,17 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33602,25 +22027,116 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) + } + m.HostPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.HostPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) + } + m.ContainerPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ContainerPort |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF } + m.HostIP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -33631,9 +22147,6 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33646,7 +22159,7 @@ func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { +func (m *ContainerState) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33661,7 +22174,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33669,47 +22182,15 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33721,7 +22202,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -33730,22 +22211,19 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} + if m.Waiting == nil { + m.Waiting = &ContainerStateWaiting{} } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33757,7 +22235,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -33766,24 +22244,21 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} + if m.Running == nil { + m.Running = &ContainerStateRunning{} } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33793,12 +22268,25 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Mode = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Terminated == nil { + m.Terminated = &ContainerStateTerminated{} + } + if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33808,9 +22296,6 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33823,7 +22308,7 @@ func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { } return nil } -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33838,7 +22323,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33846,15 +22331,15 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -33866,7 +22351,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -33875,37 +22360,13 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -33915,9 +22376,6 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -33930,7 +22388,7 @@ func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { +func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -33945,7 +22403,7 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -33953,17 +22411,17 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) } - var stringLen uint64 + m.ExitCode = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -33973,29 +22431,35 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.ExitCode |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) } - if postIndex > l { - return io.ErrUnexpectedEOF + m.Signal = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Signal |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34005,84 +22469,24 @@ func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.SizeLimit == nil { - m.SizeLimit = &resource.Quantity{} - } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34094,7 +22498,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34104,17 +22508,14 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.IP = string(dAtA[iNdEx:postIndex]) + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34126,7 +22527,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -34135,24 +22536,18 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} - } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 3: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34162,27 +22557,25 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Hostname = string(dAtA[iNdEx:postIndex]) + if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 4: + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34194,7 +22587,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34204,14 +22597,10 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -34222,9 +22611,6 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34237,7 +22623,7 @@ func (m *EndpointAddress) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointPort) Unmarshal(dAtA []byte) error { +func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34252,7 +22638,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34260,15 +22646,15 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34280,7 +22666,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34290,68 +22676,14 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Reason = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -34363,7 +22695,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34373,14 +22705,10 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s + m.Message = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -34391,9 +22719,6 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34406,7 +22731,7 @@ func (m *EndpointPort) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { +func (m *ContainerStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34421,7 +22746,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34429,17 +22754,17 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34449,29 +22774,24 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34483,7 +22803,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -34492,20 +22812,16 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34517,7 +22833,7 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -34526,75 +22842,86 @@ func (m *EndpointSubset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated + m.Ready = bool(v != 0) + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + m.RestartCount = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RestartCount |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34604,30 +22931,26 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ImageID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34637,25 +22960,20 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.ContainerID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -34666,9 +22984,6 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34681,7 +22996,7 @@ func (m *Endpoints) Unmarshal(dAtA []byte) error { } return nil } -func (m *EndpointsList) Unmarshal(dAtA []byte) error { +func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34696,7 +23011,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34704,50 +23019,17 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -34757,26 +23039,11 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -34786,9 +23053,6 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34801,7 +23065,7 @@ func (m *EndpointsList) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34816,7 +23080,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34824,83 +23088,15 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -34912,7 +23108,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -34921,16 +23117,11 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -34943,9 +23134,6 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -34958,7 +23146,7 @@ func (m *EnvFromSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVar) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -34973,7 +23161,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -34981,15 +23169,15 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35001,7 +23189,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35011,19 +23199,16 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.Path = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35033,27 +23218,28 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Value = string(dAtA[iNdEx:postIndex]) + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35065,7 +23251,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35074,19 +23260,36 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) + } + var v int32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Mode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -35096,9 +23299,6 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35111,7 +23311,7 @@ func (m *EnvVar) Unmarshal(dAtA []byte) error { } return nil } -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { +func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35126,7 +23326,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35134,15 +23334,15 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") + return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35154,7 +23354,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35163,96 +23363,19 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Items = append(m.Items, DownwardAPIVolumeFile{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) } - var msglen int + var v int32 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35262,28 +23385,12 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + m.DefaultMode = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -35293,9 +23400,6 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35308,7 +23412,7 @@ func (m *EnvVarSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { +func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35323,7 +23427,7 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35331,17 +23435,17 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EphemeralContainer: wiretype end group for non-group") + return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EphemeralContainer: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerCommon", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35351,30 +23455,26 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.EphemeralContainerCommon.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetContainerName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35384,23 +23484,24 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.TargetContainerName = string(dAtA[iNdEx:postIndex]) + if m.SizeLimit == nil { + m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} + } + if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -35411,9 +23512,6 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -35426,7 +23524,7 @@ func (m *EphemeralContainer) Unmarshal(dAtA []byte) error { } return nil } -func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { +func (m *EndpointAddress) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -35441,7 +23539,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35449,15 +23547,15 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EphemeralContainerCommon: wiretype end group for non-group") + return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EphemeralContainerCommon: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35469,7 +23567,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35479,19 +23577,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + m.IP = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35501,27 +23596,28 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Image = string(dAtA[iNdEx:postIndex]) + if m.TargetRef == nil { + m.TargetRef = &ObjectReference{} + } + if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35533,7 +23629,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35543,17 +23639,14 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35565,7 +23658,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35575,17 +23668,65 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + s := string(dAtA[iNdEx:postIndex]) + m.NodeName = &s iNdEx = postIndex - case 5: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointPort) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35597,7 +23738,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35607,19 +23748,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) } - var msglen int + m.Port = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35629,29 +23767,93 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + m.Port |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) } - postIndex := iNdEx + msglen - if postIndex < 0 { + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Protocol = Protocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { return err } - iNdEx = postIndex - case 7: + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointSubset) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35663,7 +23865,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35672,20 +23874,17 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Addresses = append(m.Addresses, EndpointAddress{}) + if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 8: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35697,7 +23896,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35706,19 +23905,17 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) + if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 9: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35730,7 +23927,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35739,20 +23936,67 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Ports = append(m.Ports, EndpointPort{}) + if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 10: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Endpoints) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35764,7 +24008,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35773,22 +24017,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 11: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35800,7 +24038,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35809,22 +24047,67 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} - } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.Subsets = append(m.Subsets, EndpointSubset{}) + if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 12: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EndpointsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35836,7 +24119,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35845,24 +24128,18 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 13: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -35872,27 +24149,76 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, Endpoints{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 14: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvFromSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -35904,7 +24230,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -35914,17 +24240,14 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) + m.Prefix = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 15: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -35936,7 +24259,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -35945,82 +24268,19 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} + if m.ConfigMapRef == nil { + m.ConfigMapRef = &ConfigMapEnvSource{} } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.StdinOnce = bool(v != 0) - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 19: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36032,7 +24292,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36041,20 +24301,69 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretRef == nil { + m.SecretRef = &SecretEnvSource{} + } + if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 20: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnvVar) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -36066,7 +24375,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36076,19 +24385,16 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 21: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -36098,29 +24404,24 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Value = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 22: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartupProbe", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36132,7 +24433,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36141,16 +24442,13 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.StartupProbe == nil { - m.StartupProbe = &Probe{} + if m.ValueFrom == nil { + m.ValueFrom = &EnvVarSource{} } - if err := m.StartupProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36163,9 +24461,6 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36178,7 +24473,7 @@ func (m *EphemeralContainerCommon) Unmarshal(dAtA []byte) error { } return nil } -func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { +func (m *EnvVarSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -36193,7 +24488,7 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36201,15 +24496,15 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: EphemeralContainers: wiretype end group for non-group") + return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: EphemeralContainers: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36221,7 +24516,7 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36230,19 +24525,52 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.FieldRef == nil { + m.FieldRef = &ObjectFieldSelector{} + } + if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.ResourceFieldRef == nil { + m.ResourceFieldRef = &ResourceFieldSelector{} + } + if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -36254,7 +24582,7 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36263,14 +24591,46 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ConfigMapKeyRef == nil { + m.ConfigMapKeyRef = &ConfigMapKeySelector{} + } + if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) - if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.SecretKeyRef == nil { + m.SecretKeyRef = &SecretKeySelector{} + } + if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -36283,9 +24643,6 @@ func (m *EphemeralContainers) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36313,7 +24670,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36341,7 +24698,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36350,9 +24707,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36374,7 +24728,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36383,9 +24737,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36407,7 +24758,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36417,9 +24768,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36439,7 +24787,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36449,9 +24797,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36471,7 +24816,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36480,9 +24825,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36504,7 +24846,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36513,9 +24855,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36537,7 +24876,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36546,9 +24885,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36570,7 +24906,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int32(b&0x7F) << shift + m.Count |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -36589,7 +24925,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36599,9 +24935,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36621,7 +24954,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36630,9 +24963,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36654,7 +24984,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36663,9 +24993,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36690,7 +25017,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36700,9 +25027,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36722,7 +25046,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36731,9 +25055,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36758,7 +25079,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36768,9 +25089,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36790,7 +25108,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36800,9 +25118,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36817,9 +25132,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36847,7 +25159,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36875,7 +25187,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36884,9 +25196,6 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36908,7 +25217,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -36917,9 +25226,6 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -36937,9 +25243,6 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -36967,7 +25270,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -36995,7 +25298,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int32(b&0x7F) << shift + m.Count |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -37014,7 +25317,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37023,9 +25326,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37047,7 +25347,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37057,9 +25357,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37074,9 +25371,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37104,7 +25398,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37132,7 +25426,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37142,9 +25436,6 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37164,7 +25455,7 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37174,9 +25465,6 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37191,9 +25479,6 @@ func (m *EventSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37221,7 +25506,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37249,7 +25534,7 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37259,9 +25544,6 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37276,9 +25558,6 @@ func (m *ExecAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37306,7 +25585,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37334,7 +25613,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37344,9 +25623,6 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37366,7 +25642,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -37386,7 +25662,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37396,9 +25672,6 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37418,7 +25691,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37438,7 +25711,7 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37448,9 +25721,6 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37465,9 +25735,6 @@ func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37495,7 +25762,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37523,7 +25790,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37533,9 +25800,6 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37555,7 +25819,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37565,9 +25829,6 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37587,7 +25848,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37596,9 +25857,6 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37623,7 +25881,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37643,7 +25901,7 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37652,20 +25910,54 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Options == nil { m.Options = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -37675,86 +25967,41 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue } - m.Options[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -37765,9 +26012,6 @@ func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -37795,7 +26039,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37823,7 +26067,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37833,9 +26077,6 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37855,7 +26096,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -37865,9 +26106,6 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37887,7 +26125,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37896,9 +26134,6 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -37923,7 +26158,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37943,7 +26178,7 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -37952,168 +26187,10 @@ func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.Options == nil { - m.Options = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Options[mapkey] = mapvalue - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) - } - var stringLen uint64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38123,29 +26200,12 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DatasetName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) - } - var stringLen uint64 + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -38155,180 +26215,71 @@ func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DatasetUUID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Options == nil { + m.Options = make(map[string]string) } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PDName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Options[mapkey] = mapvalue + } else { + var mapvalue string + m.Options[mapkey] = mapvalue } - m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -38338,9 +26289,6 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38353,7 +26301,7 @@ func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { +func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -38368,7 +26316,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38376,15 +26324,15 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38396,7 +26344,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38406,17 +26354,14 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Repository = string(dAtA[iNdEx:postIndex]) + m.DatasetName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38428,7 +26373,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38438,17 +26383,64 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Revision = string(dAtA[iNdEx:postIndex]) + m.DatasetUUID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 3: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38460,7 +26452,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38470,14 +26462,79 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PDName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Directory = string(dAtA[iNdEx:postIndex]) + m.FSType = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) + } + m.Partition = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Partition |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -38487,9 +26544,6 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38502,7 +26556,7 @@ func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { +func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -38517,7 +26571,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38525,15 +26579,15 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: wiretype end group for non-group") + return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38545,7 +26599,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38555,17 +26609,14 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.EndpointsName = string(dAtA[iNdEx:postIndex]) + m.Repository = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38577,7 +26628,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38587,37 +26638,14 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Path = string(dAtA[iNdEx:postIndex]) + m.Revision = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointsNamespace", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -38629,7 +26657,7 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38639,14 +26667,10 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.EndpointsNamespace = &s + m.Directory = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -38657,9 +26681,6 @@ func (m *GlusterfsPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38687,7 +26708,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38715,7 +26736,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38725,9 +26746,6 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38747,7 +26765,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38757,9 +26775,6 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38779,7 +26794,7 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -38794,9 +26809,6 @@ func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -38824,7 +26836,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38852,7 +26864,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38862,9 +26874,6 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38884,7 +26893,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -38893,9 +26902,6 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38917,7 +26923,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38927,9 +26933,6 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38949,7 +26952,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -38959,9 +26962,6 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -38981,7 +26981,7 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -38990,9 +26990,6 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39010,9 +27007,6 @@ func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39040,7 +27034,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39068,7 +27062,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39078,9 +27072,6 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39100,7 +27091,7 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39110,9 +27101,6 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39127,9 +27115,6 @@ func (m *HTTPHeader) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39157,7 +27142,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39185,7 +27170,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39194,9 +27179,6 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39221,7 +27203,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39230,9 +27212,6 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39257,7 +27236,7 @@ func (m *Handler) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39266,9 +27245,6 @@ func (m *Handler) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39288,9 +27264,6 @@ func (m *Handler) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39318,7 +27291,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39346,7 +27319,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39356,9 +27329,6 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39378,7 +27348,7 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39388,9 +27358,6 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39405,9 +27372,6 @@ func (m *HostAlias) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39435,7 +27399,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39463,7 +27427,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39473,9 +27437,6 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39495,7 +27456,7 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39505,9 +27466,6 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39523,9 +27481,6 @@ func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39553,7 +27508,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39581,7 +27536,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39591,9 +27546,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39613,7 +27565,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39623,9 +27575,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39645,7 +27594,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= int32(b&0x7F) << shift + m.Lun |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -39664,7 +27613,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39674,9 +27623,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39696,7 +27642,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39706,9 +27652,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39728,7 +27671,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39748,7 +27691,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39758,9 +27701,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39780,7 +27720,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39800,7 +27740,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39809,9 +27749,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39836,7 +27773,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -39856,7 +27793,7 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39866,9 +27803,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39884,9 +27818,6 @@ func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -39914,7 +27845,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39942,7 +27873,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39952,9 +27883,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -39974,7 +27902,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -39984,9 +27912,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40006,7 +27931,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Lun |= int32(b&0x7F) << shift + m.Lun |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -40025,7 +27950,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40035,9 +27960,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40057,7 +27979,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40067,9 +27989,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40089,7 +28008,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40109,7 +28028,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40119,9 +28038,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40141,7 +28057,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40161,7 +28077,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40170,9 +28086,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40197,7 +28110,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40217,7 +28130,7 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40227,9 +28140,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40245,9 +28155,6 @@ func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40275,7 +28182,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40303,7 +28210,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40313,9 +28220,6 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40335,7 +28239,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40345,9 +28249,6 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40367,7 +28268,7 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -40382,9 +28283,6 @@ func (m *KeyToPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40412,7 +28310,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40440,7 +28338,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40449,9 +28347,6 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40476,7 +28371,7 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40485,9 +28380,6 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40507,9 +28399,6 @@ func (m *Lifecycle) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40537,7 +28426,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40565,7 +28454,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40574,9 +28463,6 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40598,7 +28484,7 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40607,9 +28493,6 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40626,9 +28509,6 @@ func (m *LimitRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -40656,7 +28536,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40684,7 +28564,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -40694,9 +28574,6 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -40716,7 +28593,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40725,20 +28602,54 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Max == nil { m.Max = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40748,88 +28659,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Max[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Max[ResourceName(mapkey)] = mapvalue } - m.Max[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -40845,7 +28714,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40854,20 +28723,54 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Min == nil { m.Min = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -40877,88 +28780,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Min[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Min[ResourceName(mapkey)] = mapvalue } - m.Min[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -40974,7 +28835,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -40983,20 +28844,54 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Default == nil { m.Default = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41006,88 +28901,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Default[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Default[ResourceName(mapkey)] = mapvalue } - m.Default[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 5: if wireType != 2 { @@ -41103,7 +28956,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41112,20 +28965,54 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.DefaultRequest == nil { m.DefaultRequest = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41135,88 +29022,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DefaultRequest[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.DefaultRequest[ResourceName(mapkey)] = mapvalue } - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 6: if wireType != 2 { @@ -41232,7 +29077,7 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41241,20 +29086,54 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.MaxLimitRequestRatio == nil { m.MaxLimitRequestRatio = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -41264,88 +29143,46 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue } - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -41356,9 +29193,6 @@ func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41386,7 +29220,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41414,7 +29248,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41423,9 +29257,6 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41447,7 +29278,7 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41456,9 +29287,6 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41476,9 +29304,6 @@ func (m *LimitRangeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41506,7 +29331,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41534,7 +29359,7 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41543,9 +29368,6 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41563,9 +29385,6 @@ func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41593,7 +29412,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41621,7 +29440,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41630,9 +29449,6 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41654,7 +29470,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41663,13 +29479,10 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, runtime.RawExtension{}) + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -41683,9 +29496,6 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41713,7 +29523,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41741,7 +29551,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41751,9 +29561,6 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41773,7 +29580,7 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41783,9 +29590,6 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41800,9 +29604,6 @@ func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41830,7 +29631,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41858,7 +29659,7 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -41867,9 +29668,6 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41887,9 +29685,6 @@ func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -41917,7 +29712,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41945,7 +29740,7 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -41955,9 +29750,6 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -41972,9 +29764,6 @@ func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42002,7 +29791,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42030,7 +29819,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42040,9 +29829,6 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42062,7 +29848,7 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42072,9 +29858,6 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42090,9 +29873,6 @@ func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42120,7 +29900,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42148,7 +29928,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42158,9 +29938,6 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42180,7 +29957,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42190,9 +29967,6 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42212,7 +29986,7 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42227,9 +30001,6 @@ func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42257,7 +30028,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42285,7 +30056,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42294,9 +30065,6 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42318,7 +30086,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42327,9 +30095,6 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42351,7 +30116,7 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42360,9 +30125,6 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42379,223 +30141,6 @@ func (m *Namespace) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = NamespaceConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42623,7 +30168,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42651,7 +30196,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42660,9 +30205,6 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42684,7 +30226,7 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42693,9 +30235,6 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42713,9 +30252,6 @@ func (m *NamespaceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42743,7 +30279,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42771,7 +30307,7 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42781,9 +30317,6 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -42798,9 +30331,6 @@ func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42828,7 +30358,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42856,7 +30386,7 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42866,48 +30396,11 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, NamespaceCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -42917,9 +30410,6 @@ func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -42947,7 +30437,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -42975,7 +30465,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -42984,9 +30474,6 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43008,7 +30495,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43017,9 +30504,6 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43041,7 +30525,7 @@ func (m *Node) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43050,9 +30534,6 @@ func (m *Node) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43069,9 +30550,6 @@ func (m *Node) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43099,7 +30577,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43127,7 +30605,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43137,9 +30615,6 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43159,7 +30634,7 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43169,9 +30644,6 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43186,9 +30658,6 @@ func (m *NodeAddress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43216,7 +30685,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43244,7 +30713,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43253,9 +30722,6 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43280,7 +30746,7 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43289,9 +30755,6 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43309,9 +30772,6 @@ func (m *NodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43339,7 +30799,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43367,7 +30827,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43377,9 +30837,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43399,7 +30856,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43409,9 +30866,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43431,7 +30885,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43440,9 +30894,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43464,7 +30915,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43473,9 +30924,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43497,7 +30945,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43507,9 +30955,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43529,7 +30974,7 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43539,9 +30984,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43556,9 +30998,6 @@ func (m *NodeCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43586,7 +31025,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43614,7 +31053,7 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43623,9 +31062,6 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43645,9 +31081,6 @@ func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43675,7 +31108,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43703,7 +31136,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43712,9 +31145,6 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43739,7 +31169,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43748,9 +31178,6 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43775,7 +31202,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43784,9 +31211,6 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43811,7 +31235,7 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43821,9 +31245,6 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43838,9 +31259,6 @@ func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43868,7 +31286,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43896,7 +31314,7 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43905,9 +31323,6 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -43924,9 +31339,6 @@ func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -43954,7 +31366,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -43982,7 +31394,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -43991,9 +31403,6 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44015,7 +31424,7 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44024,9 +31433,6 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44044,9 +31450,6 @@ func (m *NodeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44074,7 +31477,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44102,7 +31505,7 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44112,9 +31515,6 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44129,9 +31529,6 @@ func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44159,7 +31556,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44187,7 +31584,7 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44196,20 +31593,54 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -44219,88 +31650,46 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -44311,9 +31700,6 @@ func (m *NodeResources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44341,7 +31727,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44369,7 +31755,7 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44378,9 +31764,6 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44398,9 +31781,6 @@ func (m *NodeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44428,7 +31808,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44456,7 +31836,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44466,9 +31846,6 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44488,7 +31865,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44498,9 +31875,6 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44520,7 +31894,7 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44530,9 +31904,6 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44547,9 +31918,6 @@ func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44577,7 +31945,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44605,7 +31973,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44614,9 +31982,6 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44639,7 +32004,7 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44648,9 +32013,6 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44668,9 +32030,6 @@ func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44698,7 +32057,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44726,7 +32085,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44736,9 +32095,6 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44746,7 +32102,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DoNotUseExternalID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DoNotUse_ExternalID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -44758,7 +32114,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44768,13 +32124,10 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.DoNotUseExternalID = string(dAtA[iNdEx:postIndex]) + m.DoNotUse_ExternalID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { @@ -44790,7 +32143,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44800,9 +32153,6 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44822,7 +32172,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44842,7 +32192,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44851,9 +32201,6 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44876,7 +32223,7 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -44885,9 +32232,6 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -44898,38 +32242,6 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodCIDRs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodCIDRs = append(m.PodCIDRs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -44939,9 +32251,6 @@ func (m *NodeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -44969,7 +32278,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -44997,7 +32306,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45006,20 +32315,54 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45029,88 +32372,46 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -45126,7 +32427,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45135,20 +32436,54 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Allocatable == nil { m.Allocatable = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -45158,88 +32493,46 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Allocatable[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Allocatable[ResourceName(mapkey)] = mapvalue } - m.Allocatable[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -45255,7 +32548,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45265,9 +32558,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45287,7 +32577,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45296,9 +32586,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45321,7 +32608,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45330,9 +32617,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45355,7 +32639,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45364,9 +32648,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45388,7 +32669,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45397,9 +32678,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45421,7 +32699,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45430,9 +32708,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45455,7 +32730,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45465,9 +32740,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45487,7 +32759,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45496,9 +32768,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45521,7 +32790,7 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -45530,9 +32799,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45552,9 +32818,6 @@ func (m *NodeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45582,7 +32845,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45610,7 +32873,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45620,9 +32883,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45642,7 +32902,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45652,9 +32912,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45674,7 +32931,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45684,9 +32941,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45706,7 +32960,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45716,9 +32970,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45738,7 +32989,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45748,9 +32999,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45770,7 +33018,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45780,9 +33028,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45802,7 +33047,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45812,9 +33057,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45834,7 +33076,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45844,9 +33086,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45866,7 +33105,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45876,9 +33115,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45898,7 +33134,7 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45908,9 +33144,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -45925,9 +33158,6 @@ func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -45955,7 +33185,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45983,7 +33213,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -45993,9 +33223,6 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46015,7 +33242,7 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46025,9 +33252,6 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46042,9 +33266,6 @@ func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46072,7 +33293,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46100,7 +33321,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46110,9 +33331,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46132,7 +33350,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46142,9 +33360,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46164,7 +33379,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46174,9 +33389,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46196,7 +33408,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46206,9 +33418,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46228,7 +33437,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46238,9 +33447,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46260,7 +33466,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46270,9 +33476,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46292,7 +33495,7 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46302,9 +33505,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46319,9 +33519,6 @@ func (m *ObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46349,7 +33546,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46377,7 +33574,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46386,9 +33583,6 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46410,7 +33604,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46419,9 +33613,6 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46443,7 +33634,7 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46452,9 +33643,6 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46471,9 +33659,6 @@ func (m *PersistentVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46501,7 +33686,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46529,7 +33714,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46538,9 +33723,6 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46562,7 +33744,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46571,9 +33753,6 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46595,7 +33774,7 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46604,9 +33783,6 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46623,9 +33799,6 @@ func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46653,7 +33826,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46681,7 +33854,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46691,9 +33864,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46713,7 +33883,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46723,9 +33893,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46745,7 +33912,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46754,9 +33921,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46778,7 +33942,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46787,9 +33951,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46811,7 +33972,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46821,9 +33982,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46843,7 +34001,7 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46853,9 +34011,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46870,9 +34025,6 @@ func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -46900,7 +34052,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -46928,7 +34080,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46937,9 +34089,6 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46961,7 +34110,7 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -46970,9 +34119,6 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -46990,9 +34136,6 @@ func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47020,7 +34163,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47048,7 +34191,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47058,9 +34201,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47080,7 +34220,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47089,9 +34229,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47113,7 +34250,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47123,9 +34260,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47145,7 +34279,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47154,14 +34288,11 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -47181,7 +34312,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47191,9 +34322,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47214,7 +34342,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47224,9 +34352,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47247,7 +34372,7 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47256,9 +34381,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47278,9 +34400,6 @@ func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47308,7 +34427,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47336,7 +34455,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47346,9 +34465,6 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47368,7 +34484,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47378,9 +34494,6 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47400,7 +34513,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47409,20 +34522,54 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -47432,88 +34579,46 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -47529,7 +34634,7 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47538,9 +34643,6 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47558,9 +34660,6 @@ func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47588,7 +34687,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47616,7 +34715,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47626,9 +34725,6 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47648,7 +34744,7 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47663,9 +34759,6 @@ func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47693,7 +34786,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47721,7 +34814,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47730,9 +34823,6 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47754,7 +34844,7 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47763,9 +34853,6 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47783,9 +34870,6 @@ func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -47813,7 +34897,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -47841,7 +34925,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47850,9 +34934,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47877,7 +34958,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47886,9 +34967,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47913,7 +34991,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47922,9 +35000,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -47949,7 +35024,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47958,14 +35033,11 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsPersistentVolumeSource{} + m.Glusterfs = &GlusterfsVolumeSource{} } if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -47985,7 +35057,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -47994,9 +35066,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48021,7 +35090,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48030,9 +35099,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48057,7 +35123,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48066,9 +35132,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48093,7 +35156,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48102,9 +35165,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48129,7 +35189,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48138,9 +35198,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48165,7 +35222,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48174,9 +35231,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48201,7 +35255,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48210,9 +35264,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48237,7 +35288,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48246,9 +35297,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48273,7 +35321,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48282,9 +35330,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48309,7 +35354,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48318,9 +35363,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48345,7 +35387,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48354,9 +35396,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48381,7 +35420,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48390,9 +35429,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48417,7 +35453,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48426,9 +35462,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48453,7 +35486,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48462,9 +35495,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48489,7 +35519,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48498,9 +35528,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48525,7 +35552,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48534,9 +35561,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48561,7 +35585,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48570,9 +35594,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48597,7 +35618,7 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48606,9 +35627,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48628,9 +35646,6 @@ func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -48658,7 +35673,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -48686,7 +35701,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48695,20 +35710,54 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Capacity == nil { m.Capacity = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -48718,88 +35767,46 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Capacity[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Capacity[ResourceName(mapkey)] = mapvalue } - m.Capacity[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -48815,7 +35822,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48824,9 +35831,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48848,7 +35852,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -48858,9 +35862,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48880,7 +35881,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -48889,9 +35890,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48916,7 +35914,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -48926,9 +35924,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48948,7 +35943,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -48958,9 +35953,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -48980,7 +35972,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -48990,9 +35982,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49012,7 +36001,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49022,9 +36011,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49045,7 +36031,7 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49054,9 +36040,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49076,9 +36059,6 @@ func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49106,7 +36086,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49134,7 +36114,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49144,9 +36124,6 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49166,7 +36143,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49176,9 +36153,6 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49198,7 +36172,7 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49208,9 +36182,6 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49225,9 +36196,6 @@ func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49255,7 +36223,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49283,7 +36251,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49293,9 +36261,6 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49315,7 +36280,7 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49325,9 +36290,6 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49342,9 +36304,6 @@ func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49372,7 +36331,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49400,7 +36359,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49409,9 +36368,6 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49433,7 +36389,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49442,9 +36398,6 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49466,7 +36419,7 @@ func (m *Pod) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49475,9 +36428,6 @@ func (m *Pod) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49494,9 +36444,6 @@ func (m *Pod) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49524,7 +36471,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49552,7 +36499,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49561,9 +36508,6 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49586,7 +36530,7 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49595,9 +36539,6 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49615,9 +36556,6 @@ func (m *PodAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49645,7 +36583,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49673,7 +36611,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49682,14 +36620,11 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.LabelSelector == nil { - m.LabelSelector = &v1.LabelSelector{} + m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -49709,7 +36644,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49719,9 +36654,6 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49741,7 +36673,7 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49751,9 +36683,6 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49768,9 +36697,6 @@ func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49798,7 +36724,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49826,7 +36752,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49835,9 +36761,6 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49860,7 +36783,7 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49869,9 +36792,6 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -49889,9 +36809,6 @@ func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -49919,7 +36836,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -49947,7 +36864,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49967,7 +36884,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -49987,7 +36904,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50007,7 +36924,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50027,7 +36944,7 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50037,9 +36954,6 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50054,9 +36968,6 @@ func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50084,7 +36995,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50112,7 +37023,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50122,9 +37033,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50144,7 +37052,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50154,9 +37062,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50176,7 +37081,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50185,9 +37090,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50209,7 +37111,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50218,9 +37120,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50242,7 +37141,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50252,9 +37151,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50274,7 +37170,7 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50284,9 +37180,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50301,9 +37194,6 @@ func (m *PodCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50331,7 +37221,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50359,7 +37249,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50369,9 +37259,6 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50391,7 +37278,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50401,9 +37288,6 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50423,7 +37307,7 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50432,9 +37316,6 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50452,9 +37333,6 @@ func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50482,7 +37360,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50510,7 +37388,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50520,9 +37398,6 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50542,7 +37417,7 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50552,9 +37427,6 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50570,9 +37442,6 @@ func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50600,7 +37469,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50628,7 +37497,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50648,7 +37517,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50668,7 +37537,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50688,7 +37557,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50708,7 +37577,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50718,9 +37587,6 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50740,7 +37606,7 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50750,9 +37616,6 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50767,94 +37630,6 @@ func (m *PodExecOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodIP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodIP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodIP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -50882,7 +37657,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -50910,7 +37685,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50919,9 +37694,6 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50943,7 +37715,7 @@ func (m *PodList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -50952,9 +37724,6 @@ func (m *PodList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -50972,9 +37741,6 @@ func (m *PodList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51002,7 +37768,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51030,7 +37796,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51040,9 +37806,6 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51062,7 +37825,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51082,7 +37845,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51102,7 +37865,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51122,7 +37885,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51131,14 +37894,11 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.SinceTime == nil { - m.SinceTime = &v1.Time{} + m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -51158,7 +37918,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51178,7 +37938,7 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51198,32 +37958,12 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } m.LimitBytes = &v - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InsecureSkipTLSVerifyBackend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.InsecureSkipTLSVerifyBackend = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51233,9 +37973,6 @@ func (m *PodLogOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51263,7 +38000,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51289,7 +38026,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -51306,7 +38043,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= int(b&0x7F) << shift + packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51315,23 +38052,9 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.Ports) == 0 { - m.Ports = make([]int32, 0, elementCount) - } for iNdEx < postIndex { var v int32 for shift := uint(0); ; shift += 7 { @@ -51343,7 +38066,7 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -51362,9 +38085,6 @@ func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51392,7 +38112,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51420,7 +38140,7 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51430,9 +38150,6 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51447,9 +38164,6 @@ func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51477,7 +38191,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51505,7 +38219,7 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51515,9 +38229,6 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51532,9 +38243,6 @@ func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51562,7 +38270,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51590,7 +38298,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51599,9 +38307,6 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51626,7 +38331,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51646,7 +38351,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51665,7 +38370,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51682,7 +38387,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - packedLen |= int(b&0x7F) << shift + packedLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51691,23 +38396,9 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + packedLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - var elementCount int - var count int - for _, integer := range dAtA[iNdEx:postIndex] { - if integer < 128 { - count++ - } - } - elementCount = count - if elementCount != 0 && len(m.SupplementalGroups) == 0 { - m.SupplementalGroups = make([]int64, 0, elementCount) - } for iNdEx < postIndex { var v int64 for shift := uint(0); ; shift += 7 { @@ -51719,7 +38410,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51743,7 +38434,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51763,7 +38454,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51783,7 +38474,7 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51792,9 +38483,6 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -51803,75 +38491,6 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WindowsOptions == nil { - m.WindowsOptions = &WindowsSecurityContextOptions{} - } - if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroupChangePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PodFSGroupChangePolicy(dAtA[iNdEx:postIndex]) - m.FSGroupChangePolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -51881,9 +38500,6 @@ func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -51911,7 +38527,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -51939,7 +38555,7 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -51948,14 +38564,11 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodController == nil { - m.PodController = &v1.OwnerReference{} + m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} } if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -51970,9 +38583,6 @@ func (m *PodSignature) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -52000,7 +38610,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52028,7 +38638,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52037,9 +38647,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52062,7 +38669,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52071,9 +38678,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52096,7 +38700,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52106,9 +38710,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52128,7 +38729,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52148,7 +38749,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52168,7 +38769,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52178,9 +38779,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52200,7 +38798,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52209,20 +38807,54 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.NodeSelector == nil { m.NodeSelector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -52232,86 +38864,41 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.NodeSelector[mapkey] = mapvalue + } else { + var mapvalue string + m.NodeSelector[mapkey] = mapvalue } - m.NodeSelector[mapkey] = mapvalue iNdEx = postIndex case 8: if wireType != 2 { @@ -52327,7 +38914,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52337,9 +38924,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52359,7 +38943,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52369,9 +38953,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52391,7 +38972,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52401,9 +38982,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52423,7 +39001,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52443,7 +39021,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52463,7 +39041,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52483,7 +39061,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52492,9 +39070,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52519,7 +39094,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52528,9 +39103,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52553,7 +39125,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52563,9 +39135,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52585,7 +39154,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52595,9 +39164,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52617,7 +39183,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52626,9 +39192,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52653,7 +39216,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52663,9 +39226,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52685,7 +39245,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52694,9 +39254,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52719,7 +39276,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52740,7 +39297,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52749,9 +39306,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52774,7 +39328,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52783,9 +39337,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52808,7 +39359,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52818,9 +39369,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52840,7 +39388,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -52860,7 +39408,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52869,9 +39417,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52896,7 +39441,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52917,7 +39462,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -52926,9 +39471,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -52951,7 +39493,7 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -52961,266 +39503,12 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.RuntimeClassName = &s iNdEx = postIndex - case 30: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field EnableServiceLinks", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.EnableServiceLinks = &b - case 31: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex - case 32: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Overhead == nil { - m.Overhead = make(ResourceList) - } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Overhead[ResourceName(mapkey)] = *mapvalue - iNdEx = postIndex - case 33: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologySpreadConstraints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologySpreadConstraints = append(m.TopologySpreadConstraints, TopologySpreadConstraint{}) - if err := m.TopologySpreadConstraints[len(m.TopologySpreadConstraints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 34: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EphemeralContainers = append(m.EphemeralContainers, EphemeralContainer{}) - if err := m.EphemeralContainers[len(m.EphemeralContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -53230,9 +39518,6 @@ func (m *PodSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -53260,7 +39545,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53288,7 +39573,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53298,9 +39583,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53320,7 +39602,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53329,9 +39611,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53354,7 +39633,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53364,9 +39643,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53386,7 +39662,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53396,9 +39672,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53418,7 +39691,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53428,9 +39701,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53450,7 +39720,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53460,9 +39730,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53482,7 +39749,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53491,14 +39758,11 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.StartTime == nil { - m.StartTime = &v1.Time{} + m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -53518,7 +39782,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53527,9 +39791,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53552,7 +39813,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53562,9 +39823,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53584,7 +39842,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53593,9 +39851,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53618,7 +39873,7 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53628,82 +39883,11 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodIPs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodIPs = append(m.PodIPs, PodIP{}) - if err := m.PodIPs[len(m.PodIPs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EphemeralContainerStatuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EphemeralContainerStatuses = append(m.EphemeralContainerStatuses, ContainerStatus{}) - if err := m.EphemeralContainerStatuses[len(m.EphemeralContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -53713,9 +39897,6 @@ func (m *PodStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -53743,7 +39924,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53771,7 +39952,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53780,9 +39961,6 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53804,7 +39982,7 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53813,9 +39991,6 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53832,9 +40007,6 @@ func (m *PodStatusResult) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -53862,7 +40034,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -53890,7 +40062,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53899,9 +40071,6 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53923,7 +40092,7 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -53932,9 +40101,6 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -53951,9 +40117,6 @@ func (m *PodTemplate) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -53981,7 +40144,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54009,7 +40172,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54018,9 +40181,6 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54042,7 +40202,7 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54051,9 +40211,6 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54071,9 +40228,6 @@ func (m *PodTemplateList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54101,7 +40255,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54129,7 +40283,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54138,9 +40292,6 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54162,7 +40313,7 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54171,9 +40322,6 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54190,9 +40338,6 @@ func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54220,7 +40365,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54248,7 +40393,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54258,9 +40403,6 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54280,7 +40422,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54290,9 +40432,6 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54312,7 +40451,7 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54327,9 +40466,6 @@ func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54357,7 +40493,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54385,7 +40521,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54395,9 +40531,6 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54413,9 +40546,6 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54443,7 +40573,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54471,7 +40601,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54480,9 +40610,6 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54504,7 +40631,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54513,9 +40640,6 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54537,7 +40661,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54547,9 +40671,6 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54569,7 +40690,7 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54579,9 +40700,6 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54596,9 +40714,6 @@ func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54626,7 +40741,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54654,7 +40769,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= int32(b&0x7F) << shift + m.Weight |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54673,7 +40788,7 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54682,9 +40797,6 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54701,9 +40813,6 @@ func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54731,7 +40840,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54759,7 +40868,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54768,9 +40877,6 @@ func (m *Probe) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54792,7 +40898,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.InitialDelaySeconds |= int32(b&0x7F) << shift + m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54811,7 +40917,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TimeoutSeconds |= int32(b&0x7F) << shift + m.TimeoutSeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54830,7 +40936,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.PeriodSeconds |= int32(b&0x7F) << shift + m.PeriodSeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54849,7 +40955,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.SuccessThreshold |= int32(b&0x7F) << shift + m.SuccessThreshold |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54868,7 +40974,7 @@ func (m *Probe) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FailureThreshold |= int32(b&0x7F) << shift + m.FailureThreshold |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54882,9 +40988,6 @@ func (m *Probe) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -54912,7 +41015,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -54940,7 +41043,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -54949,9 +41052,6 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -54974,7 +41074,7 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -54989,9 +41089,6 @@ func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -55019,7 +41116,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55047,7 +41144,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55057,9 +41154,6 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55079,7 +41173,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55089,9 +41183,6 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55111,7 +41202,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55131,7 +41222,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55141,9 +41232,6 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55163,7 +41251,7 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55173,46 +41261,11 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tenant", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tenant = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -55222,9 +41275,6 @@ func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -55252,7 +41302,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55280,7 +41330,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55290,9 +41340,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55312,7 +41359,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55322,9 +41369,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55344,7 +41388,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55354,9 +41398,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55376,7 +41417,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55386,9 +41427,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55408,7 +41446,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55418,9 +41456,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55440,7 +41475,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55450,9 +41485,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55472,7 +41504,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55481,9 +41513,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55508,7 +41537,7 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55523,9 +41552,6 @@ func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -55553,7 +41579,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55581,7 +41607,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55591,9 +41617,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55613,7 +41636,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55623,9 +41646,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55645,7 +41665,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55655,9 +41675,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55677,7 +41694,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55687,9 +41704,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55709,7 +41723,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55719,9 +41733,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55741,7 +41752,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55751,9 +41762,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55773,7 +41781,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55782,9 +41790,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55809,7 +41814,7 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55824,9 +41829,6 @@ func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -55854,7 +41856,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55882,7 +41884,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55891,9 +41893,6 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55915,7 +41914,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -55925,9 +41924,6 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55947,7 +41943,7 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -55956,9 +41952,6 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -55976,9 +41969,6 @@ func (m *RangeAllocation) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -56006,7 +41996,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56034,7 +42024,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56043,9 +42033,6 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56067,7 +42054,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56076,9 +42063,6 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56100,7 +42084,7 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56109,9 +42093,6 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56128,9 +42109,6 @@ func (m *ReplicationController) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -56158,7 +42136,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56186,7 +42164,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56196,9 +42174,6 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56218,7 +42193,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56228,9 +42203,6 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56250,7 +42222,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56259,9 +42231,6 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56283,7 +42252,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56293,9 +42262,6 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56315,7 +42281,7 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56325,9 +42291,6 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56342,9 +42305,6 @@ func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -56372,7 +42332,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56400,7 +42360,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56409,9 +42369,6 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56433,7 +42390,7 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56442,9 +42399,6 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56462,9 +42416,6 @@ func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -56492,7 +42443,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56520,7 +42471,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -56540,7 +42491,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56549,20 +42500,54 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -56572,86 +42557,41 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -56667,7 +42607,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56676,9 +42616,6 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56703,7 +42640,7 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -56717,9 +42654,6 @@ func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -56747,7 +42681,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56775,7 +42709,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -56794,7 +42728,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -56813,7 +42747,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56832,7 +42766,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -56851,7 +42785,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -56870,7 +42804,7 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -56879,9 +42813,6 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56899,9 +42830,6 @@ func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -56929,7 +42857,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56957,7 +42885,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56967,9 +42895,6 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -56989,7 +42914,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -56999,9 +42924,6 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57021,7 +42943,7 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57030,9 +42952,6 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57049,9 +42968,6 @@ func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -57079,7 +42995,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -57107,7 +43023,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57116,9 +43032,6 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57140,7 +43053,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57149,9 +43062,6 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57173,7 +43083,7 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57182,9 +43092,6 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57201,9 +43108,6 @@ func (m *ResourceQuota) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -57231,7 +43135,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -57259,7 +43163,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57268,9 +43172,6 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57292,7 +43193,7 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57301,9 +43202,6 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57321,9 +43219,6 @@ func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -57351,7 +43246,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -57379,7 +43274,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57388,20 +43283,54 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -57411,88 +43340,46 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue } - m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -57508,7 +43395,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -57518,9 +43405,6 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57540,7 +43424,7 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57549,9 +43433,6 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -57571,9 +43452,6 @@ func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -57601,7 +43479,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -57629,7 +43507,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57638,20 +43516,54 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Hard == nil { m.Hard = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -57661,88 +43573,46 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Hard[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Hard[ResourceName(mapkey)] = mapvalue } - m.Hard[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -57758,7 +43628,7 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57767,20 +43637,54 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Used == nil { m.Used = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -57790,88 +43694,46 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Used[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Used[ResourceName(mapkey)] = mapvalue } - m.Used[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -57882,9 +43744,6 @@ func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -57912,7 +43771,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -57940,7 +43799,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -57949,20 +43808,54 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Limits == nil { m.Limits = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -57972,88 +43865,46 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Limits[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Limits[ResourceName(mapkey)] = mapvalue } - m.Limits[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -58069,7 +43920,7 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -58078,20 +43929,54 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Requests == nil { m.Requests = make(ResourceList) } - var mapkey ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -58101,88 +43986,46 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postmsgIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.Requests[ResourceName(mapkey)] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity + m.Requests[ResourceName(mapkey)] = mapvalue } - m.Requests[ResourceName(mapkey)] = *mapvalue iNdEx = postIndex default: iNdEx = preIndex @@ -58193,9 +44036,6 @@ func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -58223,7 +44063,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58251,7 +44091,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58261,9 +44101,6 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58283,7 +44120,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58293,9 +44130,6 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58315,7 +44149,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58325,9 +44159,6 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58347,7 +44178,7 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58357,9 +44188,6 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58374,9 +44202,6 @@ func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -58404,7 +44229,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58432,7 +44257,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58442,9 +44267,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58464,7 +44286,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58474,9 +44296,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58496,7 +44315,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -58505,9 +44324,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58532,7 +44348,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -58552,7 +44368,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58562,9 +44378,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58584,7 +44397,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58594,9 +44407,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58616,7 +44426,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58626,9 +44436,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58648,7 +44455,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58658,9 +44465,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58680,7 +44484,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58690,9 +44494,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58712,7 +44513,7 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -58727,9 +44528,6 @@ func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -58757,7 +44555,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58785,7 +44583,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58795,9 +44593,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58817,7 +44612,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58827,9 +44622,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58849,7 +44641,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -58858,9 +44650,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58885,7 +44674,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -58905,7 +44694,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58915,9 +44704,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58937,7 +44723,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58947,9 +44733,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -58969,7 +44752,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -58979,9 +44762,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59001,7 +44781,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59011,9 +44791,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59033,7 +44810,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59043,9 +44820,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59065,7 +44839,7 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59080,9 +44854,6 @@ func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -59110,7 +44881,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59138,7 +44909,7 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59147,9 +44918,6 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59167,9 +44935,6 @@ func (m *ScopeSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -59197,7 +44962,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59225,7 +44990,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59235,9 +45000,6 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59257,7 +45019,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59267,9 +45029,6 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59289,7 +45048,7 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59299,9 +45058,6 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59316,9 +45072,6 @@ func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -59346,7 +45099,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59374,7 +45127,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59383,9 +45136,6 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59407,7 +45157,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59416,20 +45166,54 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Data == nil { m.Data = make(map[string][]byte) } - var mapkey string - mapvalue := []byte{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -59439,87 +45223,42 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex < 0 { - return ErrInvalidLengthGenerated + var mapbyteLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postbytesIndex > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + mapbyteLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intMapbyteLen := int(mapbyteLen) + if intMapbyteLen < 0 { + return ErrInvalidLengthGenerated + } + postbytesIndex := iNdEx + intMapbyteLen + if postbytesIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := make([]byte, mapbyteLen) + copy(mapvalue, dAtA[iNdEx:postbytesIndex]) + iNdEx = postbytesIndex + m.Data[mapkey] = mapvalue + } else { + var mapvalue []byte + m.Data[mapkey] = mapvalue } - m.Data[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -59535,7 +45274,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59545,9 +45284,6 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59567,7 +45303,7 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59576,20 +45312,54 @@ func (m *Secret) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.StringData == nil { m.StringData = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -59599,108 +45369,42 @@ func (m *Secret) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } - } - m.StringData[mapkey] = mapvalue - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Immutable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } - if iNdEx >= l { + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.StringData[mapkey] = mapvalue + } else { + var mapvalue string + m.StringData[mapkey] = mapvalue } - b := bool(v != 0) - m.Immutable = &b + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -59710,9 +45414,6 @@ func (m *Secret) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -59740,7 +45441,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59768,7 +45469,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59777,9 +45478,6 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59801,7 +45499,7 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59817,9 +45515,6 @@ func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -59847,7 +45542,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59875,7 +45570,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59884,9 +45579,6 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59908,7 +45600,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -59918,9 +45610,6 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -59940,7 +45629,7 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -59956,9 +45645,6 @@ func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -59986,7 +45672,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60014,7 +45700,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60023,9 +45709,6 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60047,7 +45730,7 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60056,9 +45739,6 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60076,9 +45756,6 @@ func (m *SecretList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -60106,7 +45783,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60134,7 +45811,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60143,9 +45820,6 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60167,7 +45841,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60176,9 +45850,6 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60201,7 +45872,7 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60217,9 +45888,6 @@ func (m *SecretProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -60247,7 +45915,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60275,7 +45943,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60285,9 +45953,6 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60307,7 +45972,7 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60317,9 +45982,6 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60334,9 +45996,6 @@ func (m *SecretReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -60364,7 +46023,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60392,7 +46051,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60402,9 +46061,6 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60424,7 +46080,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60433,9 +46089,6 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60458,7 +46111,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -60478,7 +46131,7 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60494,9 +46147,6 @@ func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -60524,7 +46174,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60552,7 +46202,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60561,9 +46211,6 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60588,7 +46235,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60609,7 +46256,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60618,9 +46265,6 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60645,7 +46289,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60665,7 +46309,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60686,7 +46330,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60707,7 +46351,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60728,7 +46372,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60748,7 +46392,7 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60758,51 +46402,12 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } s := ProcMountType(dAtA[iNdEx:postIndex]) m.ProcMount = &s iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WindowsOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.WindowsOptions == nil { - m.WindowsOptions = &WindowsSecurityContextOptions{} - } - if err := m.WindowsOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -60812,9 +46417,6 @@ func (m *SecurityContext) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -60842,7 +46444,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60870,7 +46472,7 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60879,9 +46481,6 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60898,9 +46497,6 @@ func (m *SerializedReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -60928,7 +46524,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -60956,7 +46552,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60965,9 +46561,6 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -60989,7 +46582,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -60998,9 +46591,6 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61022,7 +46612,7 @@ func (m *Service) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61031,9 +46621,6 @@ func (m *Service) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61050,9 +46637,6 @@ func (m *Service) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61080,7 +46664,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61108,7 +46692,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61117,9 +46701,6 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61141,7 +46722,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61150,9 +46731,6 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61175,7 +46753,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61184,9 +46762,6 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61209,7 +46784,7 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61225,9 +46800,6 @@ func (m *ServiceAccount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61255,7 +46827,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61283,7 +46855,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61292,9 +46864,6 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61316,7 +46885,7 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61325,9 +46894,6 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61345,9 +46911,6 @@ func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61375,7 +46938,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61403,7 +46966,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61413,9 +46976,6 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61435,7 +46995,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61455,7 +47015,7 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61465,9 +47025,6 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61482,9 +47039,6 @@ func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61512,7 +47066,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61540,7 +47094,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61549,9 +47103,6 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61573,7 +47124,7 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61582,9 +47133,6 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61602,9 +47150,6 @@ func (m *ServiceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61632,7 +47177,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61660,7 +47205,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61670,9 +47215,6 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61692,7 +47234,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61702,9 +47244,6 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61724,7 +47263,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Port |= int32(b&0x7F) << shift + m.Port |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -61743,7 +47282,7 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61752,9 +47291,6 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61776,44 +47312,11 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NodePort |= int32(b&0x7F) << shift + m.NodePort |= (int32(b) & 0x7F) << shift if b < 0x80 { break } } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AppProtocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.AppProtocol = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -61823,9 +47326,6 @@ func (m *ServicePort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61853,7 +47353,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61881,7 +47381,7 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61891,9 +47391,6 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -61908,9 +47405,6 @@ func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -61938,7 +47432,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -61966,7 +47460,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -61975,9 +47469,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62000,7 +47491,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62009,20 +47500,54 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -62032,86 +47557,41 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -62127,7 +47607,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62137,9 +47617,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62159,7 +47636,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62169,9 +47646,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62191,7 +47665,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62201,9 +47675,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62223,7 +47694,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62233,9 +47704,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62255,7 +47723,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62265,9 +47733,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62287,7 +47752,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62297,9 +47762,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62319,7 +47781,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62329,9 +47791,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62351,7 +47810,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62361,9 +47820,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62383,7 +47839,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.HealthCheckNodePort |= int32(b&0x7F) << shift + m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -62402,7 +47858,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62422,7 +47878,7 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62431,9 +47887,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62444,71 +47897,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPFamily", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := IPFamily(dAtA[iNdEx:postIndex]) - m.IPFamily = &s - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -62518,9 +47906,6 @@ func (m *ServiceSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -62548,7 +47933,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62576,7 +47961,7 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62585,9 +47970,6 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62604,9 +47986,6 @@ func (m *ServiceStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -62634,7 +48013,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62662,7 +48041,7 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62671,9 +48050,6 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62693,9 +48069,6 @@ func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -62723,7 +48096,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62751,7 +48124,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62761,9 +48134,6 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62783,7 +48153,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62793,9 +48163,6 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62815,7 +48182,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62825,9 +48192,6 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62847,7 +48211,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62867,7 +48231,7 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -62876,9 +48240,6 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62898,9 +48259,6 @@ func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -62928,7 +48286,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62956,7 +48314,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62966,9 +48324,6 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -62988,7 +48343,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -62998,9 +48353,6 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63020,7 +48372,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63030,9 +48382,6 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63052,7 +48401,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -63072,7 +48421,7 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -63081,9 +48430,6 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63103,9 +48449,6 @@ func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -63133,7 +48476,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63161,7 +48504,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63171,9 +48514,6 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63193,7 +48533,7 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63203,9 +48543,6 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63220,9 +48557,6 @@ func (m *Sysctl) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -63250,7 +48584,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63278,7 +48612,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -63287,9 +48621,6 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63311,7 +48642,7 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63321,9 +48652,6 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63338,9 +48666,6 @@ func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -63368,7 +48693,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63396,7 +48721,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63406,9 +48731,6 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63428,7 +48750,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63438,9 +48760,6 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63460,7 +48779,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63470,9 +48789,6 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63492,7 +48808,7 @@ func (m *Taint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -63501,14 +48817,11 @@ func (m *Taint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.TimeAdded == nil { - m.TimeAdded = &v1.Time{} + m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} } if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -63523,9 +48836,6 @@ func (m *Taint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -63553,7 +48863,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63581,7 +48891,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63591,9 +48901,6 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63613,7 +48920,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63623,9 +48930,6 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63645,7 +48949,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63655,9 +48959,6 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63677,7 +48978,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63687,9 +48988,6 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -63709,7 +49007,7 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63724,9 +49022,6 @@ func (m *Toleration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -63754,7 +49049,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63782,39 +49077,7 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63824,72 +49087,16 @@ func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + m.Key = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -63899,25 +49106,20 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{}) - if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -63928,9 +49130,6 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -63943,7 +49142,7 @@ func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { } return nil } -func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { +func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -63958,7 +49157,7 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -63966,98 +49165,15 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: TopologySpreadConstraint: wiretype end group for non-group") + return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: TopologySpreadConstraint: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSkew", wireType) - } - m.MaxSkew = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxSkew |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WhenUnsatisfiable", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WhenUnsatisfiable = UnsatisfiableConstraintAction(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -64069,7 +49185,7 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64078,16 +49194,11 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.LabelSelector == nil { - m.LabelSelector = &v1.LabelSelector{} - } - if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{}) + if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -64100,9 +49211,6 @@ func (m *TopologySpreadConstraint) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -64130,7 +49238,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64158,7 +49266,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64168,9 +49276,6 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64191,7 +49296,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64201,9 +49306,6 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64223,7 +49325,7 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64233,9 +49335,6 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64250,9 +49349,6 @@ func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -64280,7 +49376,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64308,7 +49404,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64318,9 +49414,6 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64340,7 +49433,7 @@ func (m *Volume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64349,9 +49442,6 @@ func (m *Volume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64368,9 +49458,6 @@ func (m *Volume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -64398,7 +49485,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64426,7 +49513,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64436,9 +49523,6 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64458,7 +49542,7 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64468,9 +49552,6 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64485,9 +49566,6 @@ func (m *VolumeDevice) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -64515,7 +49593,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64543,7 +49621,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64553,9 +49631,6 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64575,7 +49650,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64595,7 +49670,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64605,9 +49680,6 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64627,7 +49699,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64637,9 +49709,6 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64659,7 +49728,7 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64669,47 +49738,12 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } s := MountPropagationMode(dAtA[iNdEx:postIndex]) m.MountPropagation = &s iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPathExpr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPathExpr = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -64719,9 +49753,6 @@ func (m *VolumeMount) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -64749,7 +49780,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64777,7 +49808,7 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64786,9 +49817,6 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64808,9 +49836,6 @@ func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -64838,7 +49863,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -64866,7 +49891,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64875,9 +49900,6 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64902,7 +49924,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64911,9 +49933,6 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64938,7 +49957,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64947,9 +49966,6 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -64974,7 +49990,7 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -64983,9 +49999,6 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65005,9 +50018,6 @@ func (m *VolumeProjection) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -65035,7 +50045,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -65063,7 +50073,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65072,9 +50082,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65099,7 +50106,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65108,9 +50115,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65135,7 +50139,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65144,9 +50148,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65171,7 +50172,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65180,9 +50181,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65207,7 +50205,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65216,9 +50214,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65243,7 +50238,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65252,9 +50247,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65279,7 +50271,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65288,9 +50280,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65315,7 +50304,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65324,9 +50313,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65351,7 +50337,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65360,9 +50346,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65387,7 +50370,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65396,9 +50379,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65423,7 +50403,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65432,9 +50412,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65459,7 +50436,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65468,9 +50445,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65495,7 +50469,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65504,9 +50478,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65531,7 +50502,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65540,9 +50511,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65567,7 +50535,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65576,9 +50544,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65603,7 +50568,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65612,9 +50577,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65639,7 +50601,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65648,9 +50610,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65675,7 +50634,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65684,9 +50643,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65711,7 +50667,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65720,9 +50676,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65747,7 +50700,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65756,9 +50709,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65783,7 +50733,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65792,9 +50742,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65819,7 +50766,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65828,9 +50775,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65855,7 +50799,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65864,9 +50808,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65891,7 +50832,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65900,9 +50841,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65927,7 +50865,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65936,9 +50874,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65963,7 +50898,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -65972,9 +50907,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -65999,7 +50931,7 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -66008,9 +50940,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -66021,42 +50950,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CSI == nil { - m.CSI = &CSIVolumeSource{} - } - if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -66066,9 +50959,6 @@ func (m *VolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -66096,7 +50986,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -66124,7 +51014,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -66134,9 +51024,6 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -66156,7 +51043,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -66166,9 +51053,6 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -66188,7 +51072,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -66198,9 +51082,6 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -66220,7 +51101,7 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -66230,9 +51111,6 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -66247,9 +51125,6 @@ func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -66277,7 +51152,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -66305,7 +51180,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Weight |= int32(b&0x7F) << shift + m.Weight |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -66324,7 +51199,7 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -66333,9 +51208,6 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -66352,161 +51224,6 @@ func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WindowsSecurityContextOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WindowsSecurityContextOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpecName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GMSACredentialSpecName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GMSACredentialSpec", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.GMSACredentialSpec = &s - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUserName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.RunAsUserName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -66522,7 +51239,6 @@ func (m *WindowsSecurityContextOptions) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -66554,8 +51270,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -66572,34 +51290,860 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 12780 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x24, 0x47, + 0x7a, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0xf9, 0xf8, 0xae, 0x7d, 0x88, 0x4b, 0x69, 0x77, 0x56, 0xad, + 0xbb, 0xd5, 0xea, 0x24, 0x91, 0xa7, 0x95, 0x74, 0x92, 0x4f, 0x3a, 0xd9, 0x24, 0x87, 0xdc, 0x1d, + 0xed, 0x92, 0x3b, 0xaa, 0xe1, 0xee, 0xde, 0xc9, 0xba, 0xf3, 0x35, 0x67, 0x8a, 0x64, 0x8b, 0xc3, + 0xee, 0x51, 0x77, 0x0f, 0x77, 0xa9, 0xd8, 0x40, 0x72, 0x8e, 0x9d, 0x5c, 0x6c, 0x04, 0x87, 0xd8, + 0xc8, 0xc3, 0x36, 0x1c, 0xc0, 0x71, 0x60, 0x3b, 0x4e, 0x82, 0x38, 0x76, 0x6c, 0xc7, 0x67, 0x27, + 0x8e, 0x9d, 0x1f, 0x0e, 0x10, 0x5c, 0x9c, 0x00, 0xc1, 0x19, 0x30, 0xc2, 0xd8, 0x74, 0x1e, 0xf0, + 0x8f, 0x3c, 0x10, 0xe7, 0x47, 0xcc, 0x18, 0x71, 0x50, 0xcf, 0xae, 0xea, 0xe9, 0x9e, 0x19, 0xae, + 0xb8, 0x94, 0x7c, 0xb8, 0x7f, 0x33, 0xf5, 0x7d, 0xf5, 0x55, 0x75, 0x3d, 0xbf, 0xef, 0xab, 0xef, + 0x01, 0xaf, 0xed, 0xbc, 0x1a, 0xce, 0xb9, 0xfe, 0xfc, 0x4e, 0x67, 0x83, 0x04, 0x1e, 0x89, 0x48, + 0x38, 0xbf, 0x47, 0xbc, 0xa6, 0x1f, 0xcc, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x1b, 0x7e, 0x40, 0xe6, + 0xf7, 0x5e, 0x98, 0xdf, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0xe7, 0xda, 0x81, 0x1f, 0xf9, 0x08, + 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xbd, 0x17, 0x66, 0x9f, 0xdf, 0x72, 0xa3, + 0xed, 0xce, 0xc6, 0x5c, 0xc3, 0xdf, 0x9d, 0xdf, 0xf2, 0xb7, 0xfc, 0x79, 0x86, 0xba, 0xd1, 0xd9, + 0x64, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7d, 0x29, 0x6e, 0x66, 0xd7, 0x69, 0x6c, 0xbb, + 0x1e, 0x09, 0xf6, 0xe7, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0x68, 0x90, 0x64, + 0xc3, 0x3d, 0x6b, 0x85, 0xf3, 0xbb, 0x24, 0x72, 0x52, 0xba, 0x3b, 0x3b, 0x9f, 0x55, 0x2b, 0xe8, + 0x78, 0x91, 0xbb, 0xdb, 0xdd, 0xcc, 0xa7, 0xfb, 0x55, 0x08, 0x1b, 0xdb, 0x64, 0xd7, 0xe9, 0xaa, + 0xf7, 0x62, 0x56, 0xbd, 0x4e, 0xe4, 0xb6, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, + 0x86, 0x05, 0x97, 0x17, 0xee, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, + 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, 0xcf, 0x41, 0x71, + 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, 0x1d, 0x94, 0x3f, + 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, 0x19, 0xae, 0xef, + 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0xe6, + 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, 0x0e, 0x2d, 0x4e, + 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, 0xdb, 0x6b, 0xed, + 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x91, 0x1c, + 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, + 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, 0xc5, 0xa9, 0xc3, + 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, 0xb2, 0x39, 0x46, + 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, 0x02, 0xac, 0x13, + 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0xa7, 0xb2, 0xe8, 0x6a, + 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0f, 0x13, 0x0b, + 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x09, 0x0a, 0x9e, 0xb3, 0x4b, 0xc4, 0xfc, + 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, 0xe7, 0xbe, 0xd7, + 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, 0x20, 0x35, 0x27, + 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0x07, 0x50, 0x5a, 0xd8, + 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, 0x81, 0x2a, 0x9a, + 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, 0x17, 0x05, 0xfb, + 0x8b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xcb, 0x1c, 0x9c, 0x5b, 0x78, + 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, 0xe2, 0x11, 0x50, + 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x08, 0xfd, 0x7d, 0x07, 0x57, 0xc5, 0x27, + 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0xd1, 0x06, + 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4b, 0xd1, 0x97, 0xe2, 0xe2, + 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x15, + 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, 0xfa, 0x36, 0x41, + 0x2f, 0x40, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, 0x37, 0x5d, 0xaf, + 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc8, 0x82, 0x32, 0x83, + 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0xaf, 0x01, 0x84, + 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x20, 0x84, + 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0xc6, 0x81, + 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, 0x72, 0x00, 0xd9, + 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0x9e, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x8f, 0xf8, + 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0x7d, 0x09, 0x8a, 0xf4, + 0x6e, 0x6a, 0x3a, 0x91, 0x23, 0xce, 0xbd, 0x4f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x73, 0xed, 0x9d, + 0x2d, 0x5a, 0x10, 0xce, 0x51, 0x6c, 0xba, 0xdb, 0x6e, 0x6f, 0xbc, 0x4b, 0x1a, 0xd1, 0x2a, 0x89, + 0x9c, 0xf8, 0x73, 0xe2, 0x32, 0xac, 0xa8, 0xa2, 0x9b, 0x30, 0x1c, 0x39, 0xc1, 0x16, 0x89, 0xc4, + 0x01, 0x98, 0x7a, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x35, 0x48, 0x7c, 0x2d, 0xac, 0xb3, + 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xca, 0x30, 0x5c, 0x58, 0xaa, 0x57, 0x33, 0xd6, 0xd5, 0x15, 0x18, + 0x6e, 0x06, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x0a, + 0x63, 0xfc, 0x42, 0xba, 0xe1, 0x78, 0xcd, 0x96, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, 0xec, 0xae, 0x06, + 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, 0x62, 0xc1, 0x14, + 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x13, 0x91, 0x70, 0x66, 0x88, 0x9d, 0x74, 0x4b, 0x69, + 0xa3, 0x95, 0x39, 0x02, 0x73, 0x77, 0x13, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, 0x9d, 0x4a, 0x82, + 0x71, 0x57, 0xb3, 0xe8, 0x7b, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, 0x09, 0x6a, + 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, 0xe6, 0x50, 0x21, + 0x89, 0x39, 0xbc, 0x74, 0x78, 0x50, 0x9e, 0x5d, 0xca, 0x24, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x00, + 0xd1, 0xab, 0xb4, 0x1e, 0x39, 0x5b, 0x24, 0x6e, 0x7c, 0x64, 0xf0, 0xc6, 0xcf, 0x1f, 0x1e, 0x94, + 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, 0xb7, 0x16, 0x07, + 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, 0xbb, 0x04, 0xe7, + 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, 0x74, 0x16, 0x86, + 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xaa, 0x65, 0x37, 0x60, 0x6c, + 0xc9, 0x69, 0x3b, 0x1b, 0x6e, 0xcb, 0x8d, 0x5c, 0x12, 0xa2, 0xa7, 0x21, 0xef, 0x34, 0x9b, 0xec, + 0x8a, 0x2c, 0x2d, 0x9e, 0x3b, 0x3c, 0x28, 0xe7, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0xb5, 0x8f, + 0x29, 0x06, 0xfa, 0x24, 0x14, 0x9a, 0x81, 0xdf, 0x9e, 0xc9, 0x31, 0x4c, 0x3a, 0x54, 0x85, 0x4a, + 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xf5, 0x1c, 0x3c, 0xb1, 0x44, 0xda, 0xdb, 0x2b, 0xf5, + 0x8c, 0x4d, 0x77, 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x6d, 0xb2, + 0x2a, 0xca, 0xb0, 0x82, 0xa2, 0xcb, 0x50, 0x68, 0xc7, 0x9c, 0xc0, 0x98, 0xe4, 0x22, 0x18, 0x0f, + 0xc0, 0x20, 0x14, 0xa3, 0x13, 0x92, 0x40, 0xdc, 0x82, 0x0a, 0xe3, 0x4e, 0x48, 0x02, 0xcc, 0x20, + 0xf1, 0x71, 0x4a, 0x0f, 0x5a, 0xb1, 0xad, 0x12, 0xc7, 0x29, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, + 0x14, 0xaa, 0x49, 0x1d, 0x1a, 0x7c, 0x52, 0xc7, 0xd9, 0x79, 0xab, 0x66, 0x32, 0x26, 0x62, 0x1c, + 0x03, 0xc3, 0x7d, 0xcf, 0xdb, 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, + 0x07, 0x2e, 0x95, 0xf3, 0xba, 0xe5, 0x37, 0x9c, 0x56, 0xf2, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, 0xdf, + 0x16, 0x3c, 0xb1, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x1c, 0xef, 0xa4, + 0x37, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, + 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x62, 0xa9, 0xe5, 0x12, 0x2f, 0xaa, + 0xd6, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x9f, 0x81, 0x09, 0x2a, 0xd3, 0xfa, 0x9d, 0xa8, 0x4e, + 0x1a, 0xbe, 0xc7, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xc3, 0x83, 0xf2, 0xc4, 0xba, 0x01, 0xc1, 0x09, + 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xbb, 0x6d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, 0x4d, 0x2e, + 0x26, 0x7e, 0x06, 0x0a, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0x74, + 0x50, 0x3e, 0xdf, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, + 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, + 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, 0x4b, 0x72, 0xf8, 0x93, 0xa2, 0xee, + 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, + 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x63, 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, + 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0xca, + 0xa0, 0x74, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, 0x66, 0xff, 0x9a, 0x05, 0x67, 0x12, + 0x5f, 0x74, 0xcb, 0x0d, 0x23, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0d, 0xf6, 0x55, 0xb4, 0x36, 0xfb, + 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc8, 0xae, 0xfc, 0x98, 0xa7, + 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x0f, 0xe5, 0xa1, + 0xc4, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x42, 0x81, 0x51, 0xe7, 0x1d, 0x7f, 0x3a, + 0xbd, 0xe3, 0xa2, 0x3b, 0x73, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, 0x11, 0x66, 0x24, + 0x90, 0x03, 0xb0, 0xe1, 0x7a, 0x4e, 0xb0, 0x4f, 0xcb, 0x66, 0xf2, 0x8c, 0xe0, 0xf3, 0xbd, 0x09, + 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x06, 0x60, 0x8d, 0xe8, 0xec, 0x2b, 0x50, 0x52, 0xc8, + 0xc7, 0xe1, 0x71, 0x66, 0x3f, 0x0b, 0x93, 0x89, 0xb6, 0xfa, 0x55, 0x1f, 0xd3, 0x59, 0xa4, 0x5f, + 0x61, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xb7, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0x6c, 0x2b, 0xe5, 0x70, + 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xb3, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xbd, + 0xf6, 0xfd, 0x36, 0x5d, 0xf3, 0x4e, 0x8b, 0xf5, 0x57, 0x48, 0xdf, 0xb7, 0x45, 0x19, 0x56, 0x50, + 0x7a, 0x84, 0x9d, 0x55, 0x9d, 0xbf, 0x49, 0xf6, 0xeb, 0xa4, 0x45, 0x1a, 0x91, 0x1f, 0x7c, 0xa8, + 0xdd, 0xbf, 0xc8, 0x47, 0x9f, 0x9f, 0x80, 0xa3, 0x82, 0x40, 0xfe, 0x26, 0xd9, 0xe7, 0x53, 0xa1, + 0x7f, 0x5d, 0xbe, 0xe7, 0xd7, 0xfd, 0x9c, 0x05, 0xe3, 0xea, 0xeb, 0x4e, 0x61, 0xab, 0x2f, 0x9a, + 0x5b, 0xfd, 0x62, 0xcf, 0x05, 0x9e, 0xb1, 0xc9, 0xbf, 0x96, 0x83, 0x0b, 0x0a, 0x87, 0xb2, 0xfb, + 0xfc, 0x8f, 0x58, 0x55, 0xf3, 0x50, 0xf2, 0x94, 0xf6, 0xc0, 0x32, 0xc5, 0xf6, 0x58, 0x77, 0x10, + 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x45, 0xfc, 0x31, 0x5d, 0xad, 0x26, 0x54, 0x68, 0x8b, 0x90, 0xef, + 0xb8, 0x4d, 0x71, 0x67, 0x7c, 0x4a, 0x8e, 0xf6, 0x9d, 0x6a, 0xe5, 0xe8, 0xa0, 0xfc, 0x64, 0x96, + 0x4a, 0x97, 0x5e, 0x56, 0xe1, 0xdc, 0x9d, 0x6a, 0x05, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xd6, + 0xfa, 0x2e, 0xe5, 0xa0, 0x7c, 0x4f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, + 0x0a, 0x4c, 0xed, 0x74, 0x36, 0x48, 0x8b, 0x44, 0xfc, 0x83, 0x6f, 0x12, 0xae, 0x39, 0x2a, 0xc5, + 0xa2, 0xe5, 0xcd, 0x04, 0x1c, 0x77, 0xd5, 0xb0, 0xff, 0x94, 0x1d, 0xf1, 0x62, 0xf4, 0x6a, 0x81, + 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x61, 0x2e, 0xe7, 0x41, 0x56, 0xc5, 0x4d, 0xb2, 0xbf, 0xee, 0x53, + 0x66, 0x3b, 0x7d, 0x55, 0x18, 0x6b, 0xbe, 0xd0, 0x73, 0xcd, 0xff, 0x42, 0x0e, 0xce, 0xa9, 0x11, + 0x30, 0xf8, 0xba, 0x3f, 0xeb, 0x63, 0xf0, 0x02, 0x8c, 0x36, 0xc9, 0xa6, 0xd3, 0x69, 0x45, 0x4a, + 0x8d, 0x39, 0xc4, 0x55, 0xd9, 0x95, 0xb8, 0x18, 0xeb, 0x38, 0xc7, 0x18, 0xb6, 0x9f, 0x1c, 0x65, + 0x77, 0x6b, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xb9, 0x6b, 0x9e, 0x82, 0x21, 0x77, 0x97, + 0xf2, 0x5a, 0x39, 0x93, 0x85, 0xaa, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x13, 0x30, 0xd2, 0xf0, 0x77, + 0x77, 0x1d, 0xaf, 0xc9, 0xae, 0xbc, 0xd2, 0xe2, 0x28, 0x65, 0xc7, 0x96, 0x78, 0x11, 0x96, 0x30, + 0xf4, 0x04, 0x14, 0x9c, 0x60, 0x2b, 0x9c, 0x29, 0x30, 0x9c, 0x22, 0x6d, 0x69, 0x21, 0xd8, 0x0a, + 0x31, 0x2b, 0xa5, 0x52, 0xd5, 0x7d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x8a, 0x1b, 0x88, 0x2d, 0xa1, + 0xee, 0xc2, 0x7b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x43, 0x6d, 0x3f, 0x88, 0xc2, 0x99, 0x61, + 0x36, 0xdc, 0x4f, 0x66, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, + 0x98, 0x57, 0x47, 0xdf, 0x06, 0x79, 0xe2, 0xed, 0xcd, 0x8c, 0x30, 0x2a, 0xb3, 0x69, 0x54, 0x96, + 0xbd, 0xbd, 0xbb, 0x4e, 0x10, 0x9f, 0xd2, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xe7, 0xa1, 0x24, + 0xb7, 0x78, 0x28, 0xd4, 0x1c, 0xa9, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x71, 0x03, 0xb2, + 0x4b, 0xbc, 0x28, 0x8c, 0xcf, 0x34, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5e, 0xea, 0xd6, 0x56, + 0xfd, 0x8e, 0x17, 0x85, 0x33, 0x25, 0xd6, 0xbd, 0xd4, 0x57, 0x8f, 0xbb, 0x31, 0x5e, 0x52, 0xf9, + 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xde, 0x72, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, + 0x1b, 0x64, 0x06, 0x58, 0xcf, 0x2f, 0xa4, 0x3f, 0x06, 0xf8, 0x1b, 0x64, 0x71, 0xfa, 0xf0, 0xa0, + 0x3c, 0x7e, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x50, 0xb9, 0xc6, 0x8d, 0x89, 0x8e, + 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x4d, 0x28, 0xb5, 0xdc, 0x4d, + 0xd2, 0xd8, 0x6f, 0xb4, 0xc8, 0xcc, 0x18, 0xa3, 0x98, 0xba, 0xad, 0x6e, 0x49, 0x24, 0x2e, 0x17, + 0xa9, 0xbf, 0x38, 0xae, 0x8e, 0xee, 0xc2, 0xf9, 0x88, 0x04, 0xbb, 0xae, 0xe7, 0xd0, 0xed, 0x20, + 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xce, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x53, 0xb1, 0x70, + 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, 0x9a, 0xdf, 0x72, 0x1b, 0xfb, + 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, 0x07, 0x65, 0x88, 0xff, 0xe1, + 0x64, 0x6d, 0xb4, 0xc1, 0x74, 0xe8, 0x9d, 0xc0, 0x8d, 0xf6, 0xe9, 0xfa, 0x25, 0x0f, 0xa2, 0x99, + 0xc9, 0x9e, 0xa2, 0xb0, 0x8e, 0xaa, 0x14, 0xed, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x5b, 0x3b, 0x8c, + 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, 0x73, 0x18, 0xd3, 0x9f, + 0xd3, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xcd, 0x10, 0x63, 0xfd, 0xb9, 0x04, 0xe0, 0x18, 0x87, 0x32, + 0x35, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0xda, 0x2e, 0xeb, 0xeb, 0x9f, 0xc7, 0xb4, 0x1c, 0xdd, + 0x82, 0x11, 0xe2, 0xed, 0xad, 0x04, 0xfe, 0xee, 0xcc, 0x99, 0xec, 0x3d, 0xbb, 0xcc, 0x51, 0xf8, + 0x81, 0x1e, 0x0b, 0x78, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0x01, 0xcc, 0xa4, 0xcc, 0x08, 0x9f, 0x80, + 0xb3, 0x6c, 0x02, 0x5e, 0x17, 0x75, 0x67, 0xd6, 0x33, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x26, 0x75, + 0xf4, 0x05, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0xbe, 0x85, 0x33, 0xe7, 0xd8, 0xd7, 0x5c, 0xce, 0xde, + 0x9c, 0x1c, 0x71, 0xf1, 0x9c, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x9a, 0xbd, 0x01, 0x13, + 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x95, 0x61, 0x88, 0x71, 0x3b, 0x42, 0xbf, 0x55, 0xa2, 0x33, 0xc5, + 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0xfd, 0x88, 0x70, 0xa9, 0x3a, 0xaf, + 0xcd, 0x94, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x8c, 0x0f, 0xc7, 0x01, 0xae, 0x83, + 0xe7, 0xa0, 0xb8, 0xed, 0x87, 0x11, 0xc5, 0x66, 0x6d, 0x0c, 0xc5, 0x7c, 0xe2, 0x0d, 0x51, 0x8e, + 0x15, 0x06, 0x7a, 0x0d, 0xc6, 0x1b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, + 0x5c, 0xf4, 0x2a, 0x14, 0xd9, 0xd3, 0x79, 0xc3, 0x6f, 0x09, 0x26, 0x4b, 0x5e, 0xc8, 0xc5, 0x9a, + 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, 0x42, 0xb5, 0x26, 0x6e, 0x11, + 0xa5, 0xaa, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0x2d, 0xa7, 0x8d, 0x32, 0x95, 0x48, 0x09, + 0xaa, 0xc1, 0xc8, 0x7d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, 0xb0, 0x0b, 0xcf, 0xf4, 0xbc, 0x52, 0x58, + 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x41, 0xc7, 0xf3, 0x28, + 0xc5, 0xdc, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, 0x00, 0xc8, + 0x65, 0x49, 0x9a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x09, 0x7a, 0xa5, + 0xc6, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0x9d, 0xf4, 0x24, 0x70, + 0x82, 0x88, 0x34, 0x17, 0x22, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, 0x89, 0x7e, + 0x6a, 0x08, 0x22, 0x38, 0xa6, 0x67, 0xff, 0x52, 0x1e, 0x66, 0xb2, 0xba, 0x4b, 0x17, 0x1d, 0x79, + 0xe0, 0x46, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, + 0xba, 0x5b, 0x52, 0x24, 0x1c, 0x8a, 0x67, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, + 0xa1, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa1, 0x8f, 0xbe, + 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x45, 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, + 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, 0x46, 0xd5, + 0x06, 0xac, 0x56, 0xd8, 0x03, 0x91, 0xf6, 0xe0, 0x1e, 0x9f, 0x46, 0x15, 0xac, 0xe3, 0xd9, 0xef, + 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xae, 0xf7, 0xf8, 0xda, 0xbf, + 0x91, 0x87, 0x49, 0xa3, 0xb1, 0x4e, 0x38, 0xc0, 0x99, 0x75, 0x9d, 0xde, 0x73, 0x4e, 0x44, 0xc4, + 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x84, 0x52, 0xcb, + 0x09, 0x99, 0xee, 0x8a, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0x58, 0x8e, 0x70, 0xc2, 0x48, 0xbb, 0x6a, + 0x38, 0xed, 0x98, 0x24, 0xbd, 0x90, 0x29, 0xef, 0x23, 0x8d, 0x6e, 0x54, 0x27, 0x28, 0x83, 0xb4, + 0x8f, 0x39, 0x0c, 0xbd, 0x0a, 0x63, 0x01, 0x61, 0xab, 0x62, 0x89, 0xb2, 0x72, 0x6c, 0x99, 0x0d, + 0xc5, 0x3c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0x59, 0xf9, 0xe1, 0x1e, 0xac, 0xfc, 0x33, 0x30, + 0xc2, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0x27, 0x17, 0x4c, 0x71, 0xc0, + 0x05, 0xf3, 0x49, 0x98, 0xa8, 0x38, 0x64, 0xd7, 0xf7, 0x96, 0xbd, 0x66, 0xdb, 0x77, 0xbd, 0x08, + 0xcd, 0x40, 0x81, 0xdd, 0x0e, 0x7c, 0x6f, 0x17, 0x28, 0x05, 0x5c, 0xa0, 0x8c, 0xb9, 0xbd, 0x05, + 0xe7, 0x2a, 0xfe, 0x7d, 0xef, 0xbe, 0x13, 0x34, 0x17, 0x6a, 0x55, 0x4d, 0xce, 0x5d, 0x93, 0x72, + 0x16, 0x37, 0x62, 0x49, 0x3d, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x5b, 0x24, 0x43, 0x1b, + 0xf1, 0x37, 0x72, 0x46, 0x4b, 0x31, 0xbe, 0x7a, 0x30, 0xb2, 0x32, 0x1f, 0x8c, 0xde, 0x82, 0xe2, + 0xa6, 0x4b, 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x12, 0x7b, 0x3a, 0xfb, 0x5d, 0x7e, 0x85, 0x62, 0x4a, + 0xed, 0x13, 0x97, 0xd2, 0x56, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x92, 0x62, 0x80, 0x84, + 0x8a, 0x05, 0xf7, 0x4c, 0x2f, 0xd9, 0xc2, 0x24, 0x7e, 0xf6, 0xf0, 0xa0, 0x3c, 0x85, 0x13, 0x64, + 0x70, 0x17, 0x61, 0x2a, 0x96, 0xed, 0xd2, 0xa3, 0xb5, 0xc0, 0x86, 0x9f, 0x89, 0x65, 0x4c, 0xc2, + 0x64, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0xd7, 0xc8, 0x08, 0x49, 0xfb, 0x84, 0x67, 0x21, 0x29, + 0xf9, 0xe6, 0xfa, 0x4b, 0xbe, 0xf6, 0xdf, 0xb7, 0xe0, 0xec, 0xf2, 0x6e, 0x3b, 0xda, 0xaf, 0xb8, + 0xe6, 0xeb, 0xce, 0x2b, 0x30, 0xbc, 0x4b, 0x9a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x59, 0x1e, 0x3f, + 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x3c, 0x5e, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, + 0x76, 0x88, 0xbb, 0xef, 0x93, 0x5b, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, 0xe6, 0xe4, + 0x80, 0xce, 0xbd, 0xd5, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0xc5, 0xc3, 0x8c, 0x24, 0x82, 0x63, 0x7a, + 0xf6, 0x37, 0x2c, 0x98, 0x94, 0xeb, 0x7e, 0xa1, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0xc8, 0xb9, + 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0xcc, 0x55, 0x6b, 0x38, 0xe7, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0x5c, + 0x23, 0x5e, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x26, 0x22, 0x39, 0x38, + 0x76, 0x66, 0xe6, 0xcd, 0x57, 0xaf, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x8a, 0x9e, 0xdf, + 0xe4, 0xd6, 0x33, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x4d, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd0, 0x82, + 0x31, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0xc5, 0x8c, 0x64, 0xbc, 0xb5, 0x28, 0x33, 0xc8, + 0x20, 0x06, 0x0f, 0x98, 0x3f, 0x0e, 0x0f, 0x68, 0xff, 0x68, 0x0e, 0x26, 0x64, 0x77, 0xea, 0x9d, + 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x53, 0xe9, 0xc2, 0x87, + 0x31, 0x3f, 0xf1, 0xb5, 0xbc, 0x20, 0x6b, 0xe3, 0x98, 0x10, 0x6a, 0xc1, 0xb4, 0xe7, 0x47, 0xec, + 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x49, 0xea, 0x17, 0x04, 0xf5, 0xe9, 0xb5, 0x24, 0x15, 0xdc, + 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x3e, 0x5b, 0xdc, 0xd0, 0x67, 0x21, 0x5d, 0xdf, 0x61, 0xff, + 0xaa, 0x05, 0x25, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb2, 0x49, 0x90, 0x43, 0x63, + 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0xe6, 0xe1, 0xff, 0x43, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, + 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x71, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, + 0x0c, 0x52, 0x3b, 0x20, 0x9b, 0xee, 0x83, 0x24, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, + 0x30, 0xd6, 0x90, 0x8a, 0xce, 0xf8, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, + 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x7c, 0xbf, 0xe7, 0xf6, 0x98, 0x6e, 0xf6, + 0xe3, 0xf3, 0x8f, 0x5b, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x78, 0xec, + 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xc4, 0x7e, 0x30, 0xb5, 0x41, 0x3e, 0xdb, 0xa4, + 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x63, 0x0a, 0xf6, 0x0f, 0xe7, 0xe9, 0x51, 0x15, + 0xa3, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xdc, 0xa3, 0xba, 0xc1, 0xb7, 0x60, 0xb2, 0xa1, + 0x3d, 0x6e, 0xc5, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, + 0x82, 0x93, 0x54, 0xd1, 0x77, 0xc2, 0x18, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xf6, + 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x8a, 0x30, 0xb4, + 0xbc, 0x47, 0xbc, 0xe8, 0x14, 0x0e, 0xa4, 0x06, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xda, 0x23, 0x4d, + 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xa8, 0x1a, 0x24, 0x70, 0x82, 0xe4, 0xa3, 0x90, + 0x30, 0xaf, 0xc3, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0xa6, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d, 0x10, + 0x4b, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x62, 0xd3, 0x0d, 0xc2, 0x88, 0x8a, 0x86, + 0x61, 0xe4, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, + 0x0b, 0xc6, 0xa9, 0x90, 0x13, 0x37, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e, 0x08, + 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x91, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, + 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcc, 0x33, 0x49, 0x33, 0x4e, 0xf9, 0x12, 0x94, 0x08, + 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0xe7, 0x07, 0xeb, 0xeb, 0xaa, 0xdb, 0x08, 0x7c, 0x53, 0x96, + 0x5f, 0x96, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc1, 0x70, 0x48, 0x02, 0x97, 0x84, 0x42, 0x45, 0xde, + 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x3d, 0xe7, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, + 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0x91, 0x80, 0xb4, 0x98, + 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x01, + 0xa1, 0x3c, 0x84, 0xeb, 0x6d, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x06, 0xc7, + 0x18, 0xd2, 0x2a, 0x15, 0xa7, 0x54, 0x43, 0xd7, 0x61, 0x5a, 0x95, 0x56, 0xbd, 0x30, 0x72, 0xbc, + 0x06, 0x61, 0x6a, 0xee, 0x52, 0xcc, 0x15, 0xe1, 0x24, 0x02, 0xee, 0xae, 0x63, 0xff, 0x0c, 0x65, + 0x67, 0xe8, 0x68, 0x9d, 0x02, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xc8, 0x9c, 0xb9, 0x0c, 0x3e, + 0xe0, 0xd0, 0x82, 0x51, 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xf5, 0x58, 0xb3, 0x1d, 0x98, 0xa2, 0x2b, + 0xfd, 0xf6, 0x46, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0x33, 0xf7, 0x70, 0x0b, 0x53, 0xbd, 0x32, + 0xdf, 0x4a, 0x10, 0xc4, 0x5d, 0x4d, 0xa0, 0x57, 0xa4, 0xe6, 0x24, 0x6f, 0x18, 0x69, 0x71, 0xad, + 0xc8, 0xd1, 0x41, 0x79, 0x4a, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0x7f, 0x49, 0x7e, 0xa3, 0x7a, 0xcd, + 0x6f, 0xa8, 0xc5, 0x92, 0x78, 0xcd, 0x57, 0xcb, 0x01, 0xc7, 0x38, 0x74, 0x8f, 0x52, 0x11, 0x24, + 0xf9, 0x9a, 0x4f, 0x05, 0x14, 0xcc, 0x20, 0xf6, 0x8b, 0x00, 0xcb, 0x0f, 0x48, 0x83, 0x2f, 0x75, + 0xfd, 0x01, 0xd2, 0xca, 0x7e, 0x80, 0xb4, 0xff, 0x9d, 0x05, 0x13, 0x2b, 0x4b, 0x86, 0x98, 0x38, + 0x07, 0xc0, 0x65, 0xa3, 0x7b, 0xf7, 0xd6, 0xa4, 0x6e, 0x9d, 0xab, 0x47, 0x55, 0x29, 0xd6, 0x30, + 0xd0, 0x05, 0xc8, 0xb7, 0x3a, 0x9e, 0x10, 0x59, 0x46, 0x0e, 0x0f, 0xca, 0xf9, 0x5b, 0x1d, 0x0f, + 0xd3, 0x32, 0xcd, 0x42, 0x30, 0x3f, 0xb0, 0x85, 0x60, 0x5f, 0xf7, 0x2a, 0x54, 0x86, 0xa1, 0xfb, + 0xf7, 0xdd, 0x26, 0x37, 0x62, 0x17, 0x7a, 0xff, 0x7b, 0xf7, 0xaa, 0x95, 0x10, 0xf3, 0x72, 0xfb, + 0xab, 0x79, 0x98, 0x5d, 0x69, 0x91, 0x07, 0x1f, 0xd0, 0x90, 0x7f, 0x50, 0xfb, 0xc6, 0xe3, 0xf1, + 0x8b, 0xc7, 0xb5, 0x61, 0xed, 0x3f, 0x1e, 0x9b, 0x30, 0xc2, 0x1f, 0xb3, 0xa5, 0x59, 0xff, 0x6b, + 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x1c, 0x7f, 0x14, 0x17, 0xe6, 0xfc, 0xea, 0xa6, 0x15, 0xa5, 0x58, + 0x12, 0x9f, 0xfd, 0x0c, 0x8c, 0xe9, 0x98, 0xc7, 0xb2, 0x26, 0xff, 0x0b, 0x79, 0x98, 0xa2, 0x3d, + 0x78, 0xa4, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0xda, 0xa2, 0xb8, 0xff, 0x6c, 0xbc, 0x93, 0x9c, + 0x8d, 0x17, 0xb2, 0x66, 0xe3, 0xb4, 0xe7, 0xe0, 0x7b, 0x2d, 0x38, 0xb3, 0xd2, 0xf2, 0x1b, 0x3b, + 0x09, 0xab, 0xdf, 0x97, 0x61, 0x94, 0x9e, 0xe3, 0xa1, 0xe1, 0x45, 0x64, 0xf8, 0x95, 0x09, 0x10, + 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0x54, 0x2b, 0x69, 0xee, 0x68, 0x02, 0x84, 0x75, 0x3c, 0xfb, + 0xeb, 0x16, 0x5c, 0xbc, 0xbe, 0xb4, 0x1c, 0x2f, 0xc5, 0x2e, 0x8f, 0x38, 0x2a, 0x05, 0x36, 0xb5, + 0xae, 0xc4, 0x52, 0x60, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2a, 0xde, 0x9e, 0x3f, 0x6d, 0xc1, 0x99, + 0xeb, 0x6e, 0x44, 0xaf, 0xe5, 0xa4, 0x6f, 0x16, 0xbd, 0x97, 0x43, 0x37, 0xf2, 0x83, 0xfd, 0xa4, + 0x6f, 0x16, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x5c, 0x66, 0x46, 0x95, 0x33, 0x55, 0x51, + 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xac, 0xe9, 0x06, 0x4c, 0x94, 0xd8, 0x17, 0x27, 0xac, 0xfa, + 0xb0, 0x8a, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x31, 0x0b, 0xce, 0x5d, 0x6f, 0x75, 0xc2, 0x88, 0x04, + 0x9b, 0xa1, 0xd1, 0xd9, 0x17, 0xa1, 0x44, 0xa4, 0xb8, 0x2e, 0xfa, 0xaa, 0x18, 0x4c, 0x25, 0xc7, + 0x73, 0xc7, 0x30, 0x85, 0x37, 0x80, 0xe7, 0xc0, 0xf1, 0x5c, 0xc7, 0x7e, 0x3e, 0x07, 0xe3, 0x37, + 0xd6, 0xd7, 0x6b, 0xd7, 0x49, 0x24, 0x6e, 0xb1, 0xfe, 0xaa, 0x66, 0xac, 0x69, 0xcc, 0x7a, 0x09, + 0x45, 0x9d, 0xc8, 0x6d, 0xcd, 0x71, 0x4f, 0xe4, 0xb9, 0xaa, 0x17, 0xdd, 0x0e, 0xea, 0x51, 0xe0, + 0x7a, 0x5b, 0xa9, 0x3a, 0x36, 0x79, 0xd7, 0xe6, 0xb3, 0xee, 0x5a, 0xf4, 0x22, 0x0c, 0x33, 0x57, + 0x68, 0x29, 0x9e, 0x3c, 0xae, 0x64, 0x0a, 0x56, 0x7a, 0x74, 0x50, 0x2e, 0xdd, 0xc1, 0x55, 0xfe, + 0x07, 0x0b, 0x54, 0x74, 0x07, 0x46, 0xb7, 0xa3, 0xa8, 0x7d, 0x83, 0x38, 0x4d, 0x12, 0xc8, 0xd3, + 0xe1, 0x52, 0xda, 0xe9, 0x40, 0x07, 0x81, 0xa3, 0xc5, 0x1b, 0x2a, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, + 0xae, 0x03, 0xc4, 0xb0, 0x13, 0xd2, 0x2f, 0xd8, 0x7f, 0x60, 0xc1, 0x08, 0xf7, 0x4a, 0x0b, 0xd0, + 0xeb, 0x50, 0x20, 0x0f, 0x48, 0x43, 0x70, 0x8e, 0xa9, 0x1d, 0x8e, 0x19, 0x0f, 0xae, 0x2d, 0xa7, + 0xff, 0x31, 0xab, 0x85, 0x6e, 0xc0, 0x08, 0xed, 0xed, 0x75, 0xe5, 0xa2, 0xf7, 0x64, 0xd6, 0x17, + 0xab, 0x69, 0xe7, 0xbc, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xda, 0x75, 0x7a, 0x80, + 0x45, 0xbd, 0xee, 0xd9, 0xf5, 0xa5, 0x1a, 0x47, 0x12, 0xd4, 0xb8, 0xe6, 0x57, 0x16, 0xe2, 0x98, + 0x88, 0xbd, 0x0e, 0x25, 0x3a, 0xa9, 0x0b, 0x2d, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, 0x4a, 0x52, + 0x01, 0x1c, 0x0a, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x43, 0x1c, 0xc3, 0xed, 0x4d, 0x38, 0xcb, + 0x5e, 0xfe, 0x9d, 0x68, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, 0x33, 0x33, + 0xa3, 0xf9, 0x0e, 0x8c, 0x49, 0x8a, 0xb1, 0x50, 0x66, 0xff, 0x61, 0x01, 0x1e, 0xaf, 0xd6, 0xb3, + 0x1d, 0x16, 0x5f, 0x85, 0x31, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0xb4, 0x44, 0xbb, 0xea, 0x5d, 0x6c, + 0x5d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x84, 0xbc, 0xfb, 0x9e, 0x97, 0x34, 0xc3, 0xad, 0xbe, 0xb5, + 0x86, 0x69, 0x39, 0x05, 0x53, 0x8e, 0x8f, 0x1f, 0xa5, 0x0a, 0xac, 0xb8, 0xbe, 0x37, 0x60, 0xc2, + 0x0d, 0x1b, 0xa1, 0x5b, 0xf5, 0xe8, 0x39, 0x13, 0x3b, 0xbb, 0xc6, 0x4a, 0x02, 0xda, 0x69, 0x05, + 0xc5, 0x09, 0x6c, 0xed, 0x5c, 0x1f, 0x1a, 0x98, 0x6b, 0xec, 0xeb, 0xe9, 0x43, 0x19, 0xe2, 0x36, + 0xfb, 0xba, 0x90, 0x19, 0xb5, 0x09, 0x86, 0x98, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x4a, 0x60, 0x8d, + 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0x84, 0xe7, + 0x62, 0x2c, 0x81, 0x29, 0xc0, 0xd2, 0x8d, 0x85, 0x1a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xae, 0x10, + 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x49, 0xd9, 0x4c, 0x9d, 0x84, 0xec, 0x8e, 0x18, 0x65, 0x1d, 0x53, + 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x89, 0x8f, 0x5e, 0x81, 0x71, 0xd7, 0x73, 0x23, 0xd7, 0x89, + 0xfc, 0x80, 0xdd, 0xb0, 0x5c, 0x4e, 0x66, 0x96, 0x6c, 0x55, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0x3f, + 0x15, 0x60, 0x9a, 0x4d, 0xdb, 0xb7, 0x56, 0xd8, 0x47, 0x66, 0x85, 0xdd, 0xe9, 0x5e, 0x61, 0x27, + 0xc1, 0xee, 0x7e, 0x98, 0xcb, 0xec, 0x5d, 0x28, 0x29, 0x5b, 0x60, 0xe9, 0x0c, 0x60, 0x65, 0x38, + 0x03, 0xf4, 0xe7, 0x3e, 0xe4, 0x33, 0x6e, 0x3e, 0xf5, 0x19, 0xf7, 0x6f, 0x59, 0x10, 0x9b, 0x44, + 0xa2, 0x1b, 0x50, 0x6a, 0xfb, 0xcc, 0xec, 0x20, 0x90, 0xb6, 0x3c, 0x8f, 0xa7, 0x5e, 0x54, 0xfc, + 0x52, 0xe4, 0xe3, 0x57, 0x93, 0x35, 0x70, 0x5c, 0x19, 0x2d, 0xc2, 0x48, 0x3b, 0x20, 0xf5, 0x88, + 0xb9, 0xc0, 0xf6, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0x0b, 0x16, 0x00, 0x7f, + 0x29, 0x75, 0xbc, 0x2d, 0x72, 0x0a, 0xda, 0xdf, 0x0a, 0x14, 0xc2, 0x36, 0x69, 0xf4, 0x32, 0x08, + 0x89, 0xfb, 0x53, 0x6f, 0x93, 0x46, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0xef, 0x03, 0x98, + 0x88, 0xd1, 0xaa, 0x11, 0xd9, 0x45, 0xcf, 0x1b, 0x2e, 0x71, 0x17, 0x12, 0x2e, 0x71, 0x25, 0x86, + 0xad, 0x29, 0x1a, 0xdf, 0x85, 0xfc, 0xae, 0xf3, 0x40, 0x68, 0x92, 0x9e, 0xed, 0xdd, 0x0d, 0x4a, + 0x7f, 0x6e, 0xd5, 0x79, 0xc0, 0x65, 0xa6, 0x67, 0xe5, 0x02, 0x59, 0x75, 0x1e, 0x1c, 0x71, 0xb3, + 0x0f, 0x76, 0x48, 0xdd, 0x72, 0xc3, 0xe8, 0xcb, 0xff, 0x31, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, + 0xb5, 0xe5, 0x7a, 0xe2, 0xdd, 0x70, 0xa0, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x0d, 0xd0, 0x96, + 0xeb, 0xa1, 0xf7, 0x61, 0x44, 0xbc, 0xd1, 0x33, 0x5b, 0x6f, 0x53, 0x4b, 0x95, 0xd5, 0x9e, 0x78, + 0xe2, 0xe7, 0x6d, 0xce, 0x4b, 0x99, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xba, 0x05, + 0x13, 0xe2, 0x37, 0x26, 0xef, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xd3, 0x83, 0xf7, 0x41, 0x54, + 0xe4, 0x5d, 0xf9, 0xb4, 0x3c, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0xa1, 0x05, + 0x67, 0x77, 0x9d, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0xed, 0xfa, 0xeb, 0x83, + 0x4d, 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x36, 0x0d, 0xa5, 0x6f, 0x57, 0x53, 0xfb, + 0x35, 0xbb, 0x09, 0x45, 0xb9, 0xde, 0x52, 0x24, 0xef, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x44, 0x42, + 0xf7, 0x4b, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, + 0xdb, 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0xb8, 0x90, 0xb9, 0x3e, 0x1e, + 0x65, 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x2a, 0xf8, 0x25, 0x53, 0x05, 0x7f, 0xa9, + 0xf7, 0xce, 0xc9, 0xd0, 0xc3, 0xbf, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x6e, 0xd1, + 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xdf, 0x91, 0x31, 0x2f, 0xc5, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0xbf, + 0x6c, 0x41, 0xe1, 0x14, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x26, 0x69, 0x11, 0xd2, 0x6c, 0x0e, + 0x3b, 0xf7, 0x97, 0x1f, 0x44, 0xc4, 0x0b, 0x99, 0xa8, 0x98, 0x3a, 0x30, 0xdf, 0x05, 0x67, 0x6e, + 0xf9, 0x4e, 0x73, 0xd1, 0x69, 0x39, 0x5e, 0x83, 0x04, 0x55, 0x6f, 0xab, 0xaf, 0x95, 0x92, 0x6e, + 0x53, 0x94, 0xeb, 0x67, 0x53, 0x64, 0x6f, 0x03, 0xd2, 0x1b, 0x10, 0x76, 0x9c, 0x18, 0x46, 0x5c, + 0xde, 0x94, 0x18, 0xfe, 0xa7, 0xd3, 0xb9, 0xbb, 0xae, 0x9e, 0x69, 0x16, 0x8a, 0xbc, 0x00, 0x4b, + 0x42, 0xf6, 0xab, 0x90, 0xea, 0xbb, 0xd5, 0x5f, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x59, 0xcd, 0x63, + 0x8a, 0xb4, 0x76, 0x42, 0x49, 0x97, 0x12, 0x32, 0xca, 0xfe, 0x8a, 0x05, 0x93, 0x6b, 0x89, 0xf8, + 0x15, 0x57, 0xd8, 0x7b, 0x60, 0x8a, 0x6e, 0xb8, 0xce, 0x4a, 0xb1, 0x80, 0x9e, 0xb8, 0x0e, 0xea, + 0x4f, 0x2d, 0x88, 0xdd, 0x29, 0x4f, 0x81, 0xf1, 0x5a, 0x32, 0x18, 0xaf, 0x54, 0xdd, 0x88, 0xea, + 0x4e, 0x16, 0xdf, 0x85, 0x6e, 0xaa, 0xd8, 0x01, 0x3d, 0xd4, 0x22, 0x31, 0x19, 0xee, 0x69, 0x3e, + 0x61, 0x06, 0x18, 0x90, 0xd1, 0x04, 0x98, 0x29, 0x91, 0xc2, 0xfd, 0x88, 0x98, 0x12, 0xa9, 0xfe, + 0x64, 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x3b, 0x33, 0x0d, 0x77, 0x5a, 0xee, 0xfb, + 0x44, 0x05, 0x40, 0x29, 0x0b, 0x53, 0x6f, 0x51, 0x7a, 0x74, 0x50, 0x1e, 0x57, 0xff, 0x78, 0x94, + 0xac, 0xb8, 0x8a, 0x7d, 0x03, 0x26, 0x13, 0x03, 0x86, 0x5e, 0x86, 0xa1, 0xf6, 0xb6, 0x13, 0x92, + 0x84, 0xf9, 0xe4, 0x50, 0x8d, 0x16, 0x1e, 0x1d, 0x94, 0x27, 0x54, 0x05, 0x56, 0x82, 0x39, 0xb6, + 0xfd, 0x3f, 0x2d, 0x28, 0xac, 0xf9, 0xcd, 0xd3, 0x58, 0x4c, 0x6f, 0x18, 0x8b, 0xe9, 0x89, 0xac, + 0x18, 0x83, 0x99, 0xeb, 0x68, 0x25, 0xb1, 0x8e, 0x2e, 0x65, 0x52, 0xe8, 0xbd, 0x84, 0x76, 0x61, + 0x94, 0x45, 0x2e, 0x14, 0xe6, 0x9c, 0x2f, 0x1a, 0x32, 0x40, 0x39, 0x21, 0x03, 0x4c, 0x6a, 0xa8, + 0x9a, 0x24, 0xf0, 0x0c, 0x8c, 0x08, 0x93, 0xc2, 0xa4, 0x11, 0xbc, 0xc0, 0xc5, 0x12, 0x6e, 0xff, + 0x78, 0x1e, 0x8c, 0x48, 0x89, 0xe8, 0x57, 0x2d, 0x98, 0x0b, 0xb8, 0x57, 0x61, 0xb3, 0xd2, 0x09, + 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x4e, 0xcb, 0xf5, 0xb6, 0xaa, 0x5b, 0x9e, 0xaf, 0x8a, + 0x97, 0x1f, 0x90, 0x46, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, 0x1d, 0x1e, + 0x94, 0xe7, 0xf0, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x79, 0x1e, 0x40, 0x70, + 0xf0, 0xfe, 0xf7, 0x90, 0x98, 0x6a, 0x92, 0x54, 0x4c, 0x64, 0x9d, 0x04, 0xbb, 0x8b, 0xaf, 0x88, + 0x01, 0x9d, 0xaf, 0x1d, 0xaf, 0x2d, 0x7c, 0xdc, 0xce, 0xd9, 0xff, 0x22, 0x0f, 0xe3, 0xc2, 0xa1, + 0x5d, 0x44, 0x4a, 0x79, 0xd9, 0x58, 0x12, 0x4f, 0x26, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x32, 0x41, + 0x52, 0x42, 0x98, 0x6e, 0x39, 0x61, 0x74, 0x83, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0x6e, 0xca, 0x92, + 0x3f, 0xb6, 0xd9, 0x8d, 0x52, 0xd1, 0xdc, 0x4a, 0x12, 0xc3, 0xdd, 0xf4, 0xd1, 0x1e, 0x20, 0x66, + 0x8f, 0x13, 0x38, 0x5e, 0xc8, 0xbf, 0xc5, 0x15, 0x6f, 0x06, 0xc7, 0x6b, 0x75, 0x56, 0xb4, 0x8a, + 0x6e, 0x75, 0x51, 0xc3, 0x29, 0x2d, 0x68, 0x76, 0x56, 0x43, 0x83, 0xda, 0x59, 0x0d, 0xf7, 0xf1, + 0x34, 0xf1, 0x60, 0xaa, 0x2b, 0x26, 0xc1, 0xdb, 0x50, 0x52, 0xf6, 0x70, 0xe2, 0xd0, 0xe9, 0x1d, + 0xda, 0x23, 0x49, 0x81, 0xab, 0x51, 0x62, 0x5b, 0xcc, 0x98, 0x9c, 0xfd, 0x8f, 0x72, 0x46, 0x83, + 0x7c, 0x12, 0xd7, 0xa0, 0xe8, 0x84, 0xa1, 0xbb, 0xe5, 0x91, 0xa6, 0xd8, 0xb1, 0x1f, 0xcf, 0xda, + 0xb1, 0x46, 0x33, 0xcc, 0x26, 0x71, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xc1, 0x0d, 0x86, 0xf6, + 0x24, 0xcf, 0x3f, 0x18, 0x35, 0x90, 0x26, 0x45, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x0b, 0xdc, 0xa2, + 0xeb, 0xa6, 0xe7, 0xdf, 0xf7, 0xae, 0xfb, 0xbe, 0xf4, 0x42, 0x1b, 0x8c, 0xe0, 0xb4, 0xb4, 0xe3, + 0x52, 0xd5, 0xb1, 0x49, 0x6d, 0xb0, 0xb8, 0x3d, 0xdf, 0x0d, 0x67, 0x28, 0x69, 0xd3, 0x97, 0x24, + 0x44, 0x04, 0x26, 0x45, 0xb4, 0x04, 0x59, 0x26, 0xc6, 0x2e, 0x95, 0x9d, 0x37, 0x6b, 0xc7, 0x4a, + 0xbf, 0x9b, 0x26, 0x09, 0x9c, 0xa4, 0x69, 0xff, 0x94, 0x05, 0xcc, 0x0a, 0xfe, 0x14, 0x58, 0x86, + 0xcf, 0x9a, 0x2c, 0xc3, 0x4c, 0xd6, 0x20, 0x67, 0x70, 0x0b, 0x2f, 0xf1, 0x95, 0x55, 0x0b, 0xfc, + 0x07, 0xfb, 0xe2, 0x35, 0xbd, 0x3f, 0x27, 0x6b, 0xff, 0x5f, 0x8b, 0x1f, 0x62, 0xca, 0x31, 0x1d, + 0x7d, 0x0f, 0x14, 0x1b, 0x4e, 0xdb, 0x69, 0xf0, 0xb0, 0xbe, 0x99, 0x5a, 0x1d, 0xa3, 0xd2, 0xdc, + 0x92, 0xa8, 0xc1, 0xb5, 0x14, 0x32, 0xea, 0x46, 0x51, 0x16, 0xf7, 0xd5, 0x4c, 0xa8, 0x26, 0x67, + 0x77, 0x60, 0xdc, 0x20, 0xf6, 0x48, 0x45, 0xda, 0xef, 0xe1, 0x57, 0xac, 0x8a, 0x12, 0xb3, 0x0b, + 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0x52, 0x4c, 0xf9, 0x78, 0xbf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, + 0x95, 0x7f, 0x82, 0x0c, 0xee, 0xa6, 0x6c, 0xff, 0x84, 0x05, 0x8f, 0xe9, 0x88, 0x5a, 0xcc, 0x80, + 0x7e, 0x7a, 0xe2, 0x0a, 0x14, 0xfd, 0x36, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x55, 0x0e, + 0xfa, 0x6d, 0x51, 0x7e, 0x24, 0xe2, 0x2b, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, + 0x83, 0x11, 0x8a, 0x78, 0x0e, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x21, 0x16, 0x10, 0xfb, 0x0f, 0x2d, + 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x60, 0x6a, 0xd7, 0x89, 0x1a, 0xdb, 0xcb, 0x0f, 0xda, 0x01, + 0x57, 0x8f, 0xcb, 0x71, 0x7a, 0xb6, 0xdf, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0xa9, 0xad, 0x26, 0x88, + 0xe1, 0x2e, 0xf2, 0x68, 0x03, 0x46, 0x59, 0x19, 0xb3, 0x86, 0x0e, 0x7b, 0xb1, 0x06, 0x59, 0xad, + 0xa9, 0x57, 0xe7, 0xd5, 0x98, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0xce, 0xf3, 0xdd, 0xce, 0xb8, 0xed, + 0x67, 0x60, 0xa4, 0xed, 0x37, 0x97, 0xaa, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, + 0x25, 0x1c, 0xbd, 0x06, 0x40, 0x1e, 0x44, 0x24, 0xf0, 0x9c, 0x96, 0x32, 0x1a, 0x51, 0x66, 0x92, + 0x15, 0x7f, 0xcd, 0x8f, 0xee, 0x84, 0xe4, 0xbb, 0x96, 0x15, 0x0a, 0xd6, 0xd0, 0xd1, 0x35, 0x80, + 0x76, 0xe0, 0xef, 0xb9, 0x4d, 0xe6, 0x5e, 0x97, 0x37, 0x4d, 0x2a, 0x6a, 0x0a, 0x82, 0x35, 0x2c, + 0xf4, 0x1a, 0x8c, 0x77, 0xbc, 0x90, 0x73, 0x28, 0xce, 0x86, 0x88, 0x4e, 0x58, 0x8c, 0xad, 0x1b, + 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1c, 0x66, 0x13, 0x31, 0x94, 0x6d, 0xdb, + 0xb8, 0x4e, 0x31, 0xf4, 0xa0, 0xb2, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x57, 0x81, 0x9f, + 0xf5, 0xc2, 0xa8, 0x78, 0xb0, 0x7b, 0x41, 0xf3, 0x54, 0x10, 0xc6, 0xca, 0x06, 0x2d, 0xfb, 0xeb, + 0x25, 0x80, 0x98, 0x1d, 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf5, 0x66, 0xe0, 0x4f, 0xee, 0x30, + 0x42, 0xdf, 0x6f, 0xc1, 0xa8, 0xd3, 0x6a, 0xf9, 0x0d, 0x27, 0x62, 0xa3, 0x9c, 0xeb, 0x7d, 0x1e, + 0x8a, 0xf6, 0x17, 0xe2, 0x1a, 0xbc, 0x0b, 0x2f, 0xca, 0x85, 0xa7, 0x41, 0xfa, 0xf6, 0x42, 0x6f, + 0x18, 0x7d, 0x4a, 0x4a, 0x69, 0x7c, 0x79, 0xcc, 0x26, 0xa5, 0xb4, 0x12, 0x3b, 0xfa, 0x35, 0x01, + 0x0d, 0xdd, 0x31, 0x02, 0xcf, 0x15, 0xb2, 0x63, 0x30, 0x18, 0x5c, 0x69, 0xbf, 0x98, 0x73, 0xa8, + 0xa6, 0x3b, 0x57, 0x0d, 0x65, 0x07, 0x2a, 0xd1, 0xc4, 0x9f, 0x3e, 0x8e, 0x55, 0xef, 0xc2, 0x64, + 0xd3, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, 0x3a, 0x8b, 0x6e, 0x82, 0x15, 0x88, 0x6f, 0xf3, 0x04, 0x00, + 0x27, 0x09, 0xa3, 0x1a, 0x77, 0x73, 0xab, 0x7a, 0x9b, 0xbe, 0x30, 0x4e, 0xb7, 0x33, 0xe7, 0x72, + 0x3f, 0x8c, 0xc8, 0x2e, 0xc5, 0x8c, 0x2f, 0xed, 0x35, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x4d, 0x18, + 0x66, 0x7e, 0xb2, 0xe1, 0x4c, 0x31, 0x5b, 0x99, 0x68, 0x86, 0x78, 0x88, 0x37, 0x15, 0xfb, 0x1b, + 0x62, 0x41, 0x01, 0xdd, 0x90, 0x71, 0x60, 0xc2, 0xaa, 0x77, 0x27, 0x24, 0x2c, 0x0e, 0x4c, 0x69, + 0xf1, 0xe3, 0x71, 0x88, 0x17, 0x5e, 0x9e, 0x1a, 0x3e, 0xde, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, + 0x46, 0xa5, 0x9f, 0x81, 0xec, 0xee, 0x99, 0x91, 0xeb, 0xe3, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x49, + 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xb7, 0xf7, 0xdb, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, + 0x2f, 0xc1, 0xa2, 0xfe, 0xa9, 0xde, 0xfa, 0xb3, 0x1e, 0x4c, 0x25, 0xb7, 0xe8, 0x23, 0xe5, 0x32, + 0xfe, 0xa0, 0x00, 0x13, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x12, 0x44, 0x54, 0x50, 0x52, 0xb5, 0x4b, + 0x56, 0x25, 0x00, 0xc7, 0x38, 0x2c, 0x16, 0x2d, 0xab, 0xae, 0x99, 0x25, 0xc6, 0xb1, 0x68, 0x15, + 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0xf0, 0xfd, 0x48, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, + 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x21, 0x81, 0x47, 0x5a, 0x66, 0xac, 0x33, 0x75, 0x99, 0xdc, 0xd4, + 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0xa4, 0x1f, 0xb2, 0x85, 0x2c, 0xa4, 0xb2, 0xd8, 0xcc, 0xb3, 0xce, + 0x3d, 0xce, 0x25, 0x1c, 0x7d, 0x1e, 0x1e, 0x53, 0x0e, 0xe2, 0x98, 0x2b, 0xaa, 0x65, 0x8b, 0xc3, + 0x86, 0x12, 0xe5, 0xb1, 0xa5, 0x74, 0x34, 0x9c, 0x55, 0x1f, 0xbd, 0x01, 0x13, 0x82, 0x73, 0x97, + 0x14, 0x47, 0x4c, 0xdb, 0x89, 0x9b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xb4, 0x36, 0xc6, 0x3c, 0x4b, + 0x0a, 0xc5, 0xee, 0x68, 0x6d, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc9, 0x59, 0x2b, 0xd7, + 0xdb, 0xe2, 0x73, 0x22, 0xbc, 0x4f, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0a, + 0x63, 0x4e, 0xd0, 0xd8, 0x76, 0x23, 0xd2, 0x88, 0x3a, 0x01, 0x77, 0x4b, 0xd1, 0x8c, 0x4f, 0x16, + 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x4c, 0x8a, 0xe3, 0x1a, 0x5d, 0x38, 0x4e, 0xdb, 0x95, + 0xdf, 0x94, 0x30, 0xd8, 0x5c, 0xa8, 0x55, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x07, 0x37, + 0x2d, 0x09, 0x85, 0x5a, 0x9d, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x7f, 0xe5, 0x60, 0x32, 0x45, + 0xf9, 0xce, 0x12, 0x21, 0x24, 0x64, 0x8f, 0x38, 0xef, 0x81, 0x19, 0xfc, 0x2f, 0x77, 0x8c, 0xe0, + 0x7f, 0xf9, 0x7e, 0xc1, 0xff, 0x0a, 0x1f, 0x24, 0xf8, 0x9f, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, + 0x4a, 0xc0, 0xc0, 0xe1, 0x63, 0x06, 0x0c, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0xe1, 0x1c, + 0x4c, 0x25, 0x8d, 0xe4, 0x4e, 0x41, 0x1d, 0xfb, 0xa6, 0xa1, 0x8e, 0x4d, 0x4f, 0x2b, 0x92, 0x34, + 0xdd, 0xcb, 0x52, 0xcd, 0xe2, 0x84, 0x6a, 0xf6, 0x93, 0x03, 0x51, 0xeb, 0xad, 0xa6, 0xfd, 0x3b, + 0x39, 0x38, 0x97, 0xac, 0xb2, 0xd4, 0x72, 0xdc, 0xdd, 0x53, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, + 0x3f, 0xc8, 0xd7, 0xb0, 0xae, 0x65, 0x0e, 0xd0, 0xbd, 0xc4, 0x00, 0xcd, 0x0f, 0x4e, 0xb2, 0xf7, + 0x28, 0x7d, 0x23, 0x0f, 0x97, 0x52, 0xeb, 0xc5, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa1, + 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x19, 0xf5, 0xa6, 0xf0, 0x28, 0x64, 0x01, 0xe2, 0x1e, 0x52, 0xb5, + 0x69, 0x78, 0x14, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xcd, 0xa4, 0xd2, 0xfc, 0x57, 0x16, 0x5c, 0x48, + 0x9d, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x19, 0x3a, 0xad, + 0xdf, 0x2c, 0x64, 0x7c, 0x0b, 0x13, 0xd0, 0x6f, 0xc3, 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf5, + 0x9b, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x8a, 0x8b, 0x8f, 0x0e, 0xca, 0xb3, 0x49, 0x12, 0x31, + 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0xb9, 0x13, 0x8d, 0xf1, 0x78, 0x0d, 0x60, 0x4f, 0x71, 0xeb, + 0x49, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x05, 0x28, 0x86, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, + 0xc5, 0x01, 0xe7, 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x1d, + 0x30, 0x15, 0xf2, 0xc8, 0x28, 0x4b, 0x2d, 0x27, 0x64, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x7f, 0xf4, + 0x7a, 0x02, 0x86, 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xc6, 0x85, 0x2f, 0xcc, 0x2b, 0xf1, + 0x07, 0x89, 0x34, 0x4c, 0x67, 0x93, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, + 0x8f, 0xd0, 0x25, 0x8c, 0x64, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0xa6, 0x5a, 0x7e, 0x32, 0x5f, 0xbe, + 0x8a, 0x22, 0x82, 0x35, 0x82, 0xf6, 0x0f, 0x17, 0xe0, 0xf1, 0x1e, 0x67, 0x24, 0x5a, 0x30, 0x9f, + 0x40, 0x9f, 0x4d, 0x0a, 0xd7, 0xb3, 0xa9, 0x95, 0x0d, 0x69, 0x3b, 0xb1, 0x14, 0x73, 0x1f, 0x78, + 0x29, 0xfe, 0x80, 0xa5, 0xa9, 0x3d, 0xb8, 0x31, 0xdf, 0x67, 0x8f, 0x79, 0xf6, 0x9f, 0xa0, 0x1e, + 0x64, 0x33, 0x45, 0x99, 0x70, 0x6d, 0xe0, 0xee, 0x0c, 0xac, 0x5d, 0x38, 0x5d, 0xe5, 0xef, 0x97, + 0x2d, 0x78, 0x32, 0xb5, 0xbf, 0x86, 0xc9, 0xc6, 0x3c, 0x94, 0x1a, 0xb4, 0x50, 0x73, 0xdd, 0x8a, + 0x7d, 0x5a, 0x25, 0x00, 0xc7, 0x38, 0x86, 0x65, 0x46, 0xae, 0xaf, 0x65, 0xc6, 0x3f, 0xb7, 0xa0, + 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x35, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x19, 0x67, 0xf4, + 0x7f, 0x9e, 0x84, 0xf3, 0x19, 0xbe, 0x1a, 0x7b, 0x30, 0xbd, 0xd5, 0x20, 0xa6, 0x53, 0x9c, 0xf8, + 0x98, 0x54, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x74, 0x17, 0x0a, 0xee, 0x6e, 0x02, + 0x7d, 0xd9, 0x82, 0xb3, 0xce, 0xfd, 0xb0, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, 0xa5, 0x2a, 0x41, + 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xd2, 0xb0, 0x70, 0x6a, 0x5b, 0x08, 0x8b, 0x10, 0x9a, 0x94, + 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe6, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, + 0xa5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xd4, 0x3b, 0x36, 0xd5, 0x1d, 0x8e, 0x3f, 0x4b, 0x2a, + 0x10, 0x8e, 0x49, 0xa1, 0x37, 0x20, 0xef, 0x6d, 0x86, 0xbd, 0x12, 0xfd, 0x24, 0x2c, 0x99, 0xb8, + 0x4b, 0xf4, 0xda, 0x4a, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0xe4, 0x83, 0x8d, 0xa6, 0xd0, 0xdb, 0xa5, + 0x9e, 0xdc, 0x78, 0xb1, 0x92, 0xbe, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc1, 0x94, 0x04, 0xaa, 0xc1, + 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x2a, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, 0x4d, 0x33, 0x04, + 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb0, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, 0x52, 0x35, 0x74, + 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x21, + 0x93, 0xf0, 0xb3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, + 0x6e, 0xb3, 0x21, 0xbc, 0x1e, 0x52, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, 0xe1, 0x41, 0x39, + 0xb7, 0xb2, 0x84, 0x73, 0x9b, 0x0d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, 0x68, 0xe3, 0x9e, + 0x4e, 0x77, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, 0x16, 0x87, 0x52, + 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0x73, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xec, 0x15, 0x8c, 0x35, + 0x8a, 0xe8, 0x4b, 0x50, 0x72, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xd4, 0x8d, 0xd9, 0x3b, + 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, + 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x71, 0x71, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, + 0xd9, 0xc3, 0xde, 0xb2, 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, + 0x3f, 0x22, 0x22, 0x82, 0x73, 0xea, 0xf0, 0xbf, 0xc5, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, + 0x42, 0xb7, 0xba, 0x23, 0x53, 0x6b, 0xb2, 0xc8, 0xcd, 0x19, 0x5b, 0x3d, 0x35, 0xff, 0xa6, 0x36, + 0x28, 0xec, 0x8c, 0x8c, 0x49, 0xb1, 0xb3, 0xb1, 0xbd, 0xed, 0x47, 0xbe, 0x97, 0x38, 0x97, 0xa7, + 0xb3, 0xcf, 0xc6, 0x5a, 0x0a, 0x7e, 0xf7, 0xd9, 0x98, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x4d, 0x98, + 0x68, 0xfb, 0x41, 0x74, 0xdf, 0x0f, 0xe4, 0xfa, 0x42, 0x3d, 0xb4, 0x09, 0x06, 0xa6, 0x68, 0x91, + 0x45, 0x14, 0x37, 0x21, 0x38, 0x41, 0x13, 0x7d, 0x0e, 0x46, 0xc2, 0x86, 0xd3, 0x22, 0xd5, 0xdb, + 0x33, 0x67, 0xb2, 0x2f, 0x9d, 0x3a, 0x47, 0xc9, 0x58, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, + 0xd0, 0x0a, 0x0c, 0xb1, 0xb4, 0x00, 0x2c, 0xf8, 0x74, 0x46, 0x60, 0xa4, 0x2e, 0xbb, 0x52, 0x7e, + 0x36, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x98, 0x6a, 0x3f, 0x9c, 0x39, 0x97, 0xbd, 0x07, + 0x04, 0x2f, 0x7e, 0xbb, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, 0x33, 0x3d, 0x4d, + 0xcf, 0xf7, 0x30, 0x63, 0xc9, 0x3c, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, 0xc2, 0xfe, 0xbd, + 0x91, 0x6e, 0x4e, 0x85, 0x89, 0x61, 0x7f, 0xd1, 0xea, 0x7a, 0xa1, 0xfb, 0xf4, 0xa0, 0x5a, 0xa1, + 0x13, 0xe4, 0x51, 0xbf, 0x6c, 0xc1, 0xf9, 0x76, 0xea, 0x87, 0x88, 0x6b, 0x7f, 0x30, 0xe5, 0x12, + 0xff, 0x74, 0x15, 0x20, 0x3e, 0x1d, 0x8e, 0x33, 0x5a, 0x4a, 0xca, 0x01, 0xf9, 0x0f, 0x2c, 0x07, + 0xac, 0x42, 0x91, 0xb1, 0x96, 0x7d, 0x92, 0xa4, 0x25, 0xc5, 0x21, 0xc6, 0x40, 0x2c, 0x89, 0x8a, + 0x58, 0x91, 0x40, 0x3f, 0x68, 0xc1, 0xc5, 0x64, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x4e, 0x9d, 0x4b, + 0x80, 0x2b, 0xe2, 0xfb, 0x2f, 0xd6, 0x7a, 0x21, 0x1f, 0xf5, 0x43, 0xc0, 0xbd, 0x1b, 0x43, 0x95, + 0x14, 0x11, 0x74, 0xd8, 0x54, 0xbb, 0x0f, 0x20, 0x86, 0xbe, 0x04, 0x63, 0xbb, 0x7e, 0xc7, 0x8b, + 0x84, 0xd5, 0x8b, 0xf0, 0x53, 0x64, 0xcf, 0xcc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0x12, 0xc2, 0x6b, + 0xf1, 0xa1, 0x85, 0xd7, 0x77, 0x12, 0xa9, 0xb0, 0x4b, 0xd9, 0x61, 0xfb, 0x84, 0x9c, 0x7f, 0x8c, + 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0x8c, 0x95, 0xc2, 0xca, 0x73, 0x19, 0xf9, 0x75, 0x53, 0x46, + 0xbe, 0x92, 0x94, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, 0x41, 0x83, 0xa9, + 0xd9, 0x2d, 0xb8, 0xdc, 0xef, 0x5a, 0x62, 0xe6, 0x4f, 0x4d, 0xf5, 0xc0, 0x16, 0x9b, 0x3f, 0x35, + 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xdb, 0xb0, 0xff, 0x9b, 0x05, 0xf9, 0x9a, 0xdf, 0x3c, 0x05, + 0x15, 0xf2, 0x67, 0x0d, 0x15, 0xf2, 0xe3, 0x19, 0x29, 0xca, 0x33, 0x15, 0xc6, 0xcb, 0x09, 0x85, + 0xf1, 0xc5, 0x2c, 0x02, 0xbd, 0xd5, 0xc3, 0x3f, 0x99, 0x07, 0x3d, 0xa1, 0x3a, 0xfa, 0xcd, 0x87, + 0xb1, 0x3d, 0xce, 0xf7, 0xca, 0xb1, 0x2e, 0x28, 0x33, 0xab, 0x29, 0xe9, 0x7a, 0xf7, 0x67, 0xcc, + 0x04, 0xf9, 0x1e, 0x71, 0xb7, 0xb6, 0x23, 0xd2, 0x4c, 0x7e, 0xce, 0xe9, 0x99, 0x20, 0xff, 0x17, + 0x0b, 0x26, 0x13, 0xad, 0xa3, 0x16, 0x8c, 0xb7, 0x74, 0xfd, 0x9f, 0x58, 0xa7, 0x0f, 0xa5, 0x3a, + 0x14, 0x26, 0x9c, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x39, 0x00, 0xf5, 0x3e, 0x27, 0xf5, 0x5e, 0x8c, + 0xeb, 0x57, 0x0f, 0x78, 0x21, 0xd6, 0x30, 0xd0, 0xcb, 0x30, 0x1a, 0xf9, 0x6d, 0xbf, 0xe5, 0x6f, + 0xed, 0xdf, 0x24, 0x32, 0xbe, 0x8b, 0x32, 0xcc, 0x5a, 0x8f, 0x41, 0x58, 0xc7, 0xb3, 0x7f, 0x3a, + 0x0f, 0xc9, 0x24, 0xfc, 0xdf, 0x5a, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x0d, 0x0b, 0xa6, 0x68, 0xeb, + 0xcc, 0x48, 0x44, 0x5e, 0xb6, 0x2a, 0x07, 0x8d, 0xd5, 0x23, 0x07, 0xcd, 0x15, 0x7a, 0x76, 0x35, + 0xfd, 0x4e, 0x24, 0xf4, 0x66, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x04, 0xc2, + 0xf3, 0x49, 0xc7, 0x23, 0x41, 0x80, 0x05, 0x54, 0xa6, 0xa8, 0x29, 0x64, 0xa4, 0xa8, 0x61, 0xd1, + 0xea, 0x84, 0x39, 0x81, 0x60, 0x7b, 0xb4, 0x68, 0x75, 0xd2, 0xce, 0x20, 0xc6, 0xb1, 0x7f, 0x3e, + 0x0f, 0x63, 0x35, 0xbf, 0x19, 0xbf, 0x90, 0xbd, 0x64, 0xbc, 0x90, 0x5d, 0x4e, 0xbc, 0x90, 0x4d, + 0xe9, 0xb8, 0xdf, 0x7a, 0x0f, 0xfb, 0xb0, 0xde, 0xc3, 0xfe, 0x99, 0xc5, 0x66, 0xad, 0xb2, 0x56, + 0x17, 0x29, 0x72, 0x5f, 0x80, 0x51, 0x76, 0x20, 0x31, 0x57, 0x3b, 0xf9, 0x6c, 0xc4, 0xa2, 0xcf, + 0xaf, 0xc5, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa1, 0x18, 0x12, 0x27, 0x68, 0x6c, 0xab, 0x33, 0x4e, + 0x3c, 0xaa, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0x50, 0x5a, 0x3e, 0x3b, 0xd9, 0xab, 0xde, + 0x1f, 0xbe, 0x45, 0xb2, 0xa3, 0xa3, 0xd9, 0xf7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x12, 0xa9, 0x6c, + 0x86, 0x44, 0x2a, 0x75, 0x85, 0x43, 0xfa, 0x13, 0x0b, 0x26, 0x6a, 0x7e, 0x93, 0x6e, 0xdd, 0x6f, + 0xa6, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x5a, 0x30, 0x52, 0xf3, 0x9b, + 0xa7, 0xa0, 0x6d, 0x7f, 0xdd, 0xd4, 0xb6, 0x3f, 0x96, 0xb1, 0x24, 0x32, 0x14, 0xec, 0xbf, 0x98, + 0x87, 0x71, 0xda, 0x4f, 0x7f, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, 0x42, 0xd9, 0x5c, + 0xbf, 0xd5, 0xf2, 0xef, 0x27, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, 0xcf, 0x41, 0xb1, 0x1d, + 0x90, 0x3d, 0xd7, 0x17, 0xfc, 0xa3, 0xf6, 0x76, 0x51, 0x13, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, + 0x74, 0xbd, 0x06, 0x91, 0x99, 0xa6, 0x0b, 0x2c, 0x19, 0x15, 0x0f, 0xff, 0xac, 0x95, 0x63, 0x03, + 0x0b, 0xdd, 0x83, 0x12, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, 0x17, 0x04, 0x01, + 0x1c, 0xd3, 0x42, 0xd7, 0x00, 0x22, 0x19, 0x22, 0x38, 0x14, 0x91, 0x6d, 0x14, 0xaf, 0xad, 0x82, + 0x07, 0x87, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x29, 0x72, 0xdc, 0xd6, 0x2d, 0xd7, 0x23, 0x21, 0x53, + 0x39, 0xe7, 0x65, 0x4a, 0x05, 0x51, 0x88, 0x63, 0x38, 0xe5, 0x75, 0x98, 0xdb, 0x37, 0x4f, 0xbd, + 0x55, 0x64, 0xd8, 0x8c, 0xd7, 0xb9, 0xa5, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xe7, 0x6a, 0x7e, + 0xb3, 0xe6, 0x07, 0xd1, 0x8a, 0x1f, 0xdc, 0x77, 0x82, 0xa6, 0x9c, 0xbf, 0xb2, 0x8c, 0xee, 0x4f, + 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, 0xae, 0x1c, 0x0d, + 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0xd7, 0x9d, 0x88, 0xa0, 0xdb, 0x2c, 0x33, 0x57, 0x7c, 0x05, 0x89, + 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x62, 0x60, 0xea, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x2d, 0xcf, 0x4e, + 0xa3, 0x44, 0xd2, 0x39, 0xf4, 0x45, 0x98, 0x08, 0xc9, 0x2d, 0xd7, 0xeb, 0x3c, 0x90, 0x42, 0x78, + 0x0f, 0x67, 0x9c, 0xfa, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x50, 0xa3, 0xf3, 0x14, + 0x74, 0xbc, 0x85, 0xf0, 0x4e, 0x48, 0x02, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, 0x85, 0x38, 0x86, + 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0xf9, 0x1e, 0xf6, 0xfd, 0x48, 0xae, 0x64, 0x96, 0x36, 0x47, 0x2b, + 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x76, 0xda, 0xed, 0x16, 0x7b, 0xce, 0x77, 0x5a, 0xd7, 0x03, + 0xbf, 0xd3, 0xe6, 0x6f, 0x9d, 0xf9, 0xc5, 0xf3, 0xf4, 0x0a, 0xab, 0x77, 0x41, 0x71, 0x4a, 0x0d, + 0x7a, 0xfa, 0x6c, 0x86, 0xec, 0x37, 0x5b, 0xdd, 0x79, 0xa1, 0x5e, 0xaf, 0xb3, 0x22, 0x2c, 0x61, + 0x74, 0x31, 0xb1, 0xe6, 0x39, 0xe6, 0x70, 0xbc, 0x98, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, 0x5a, 0x86, + 0x91, 0x70, 0x3f, 0x6c, 0x44, 0x22, 0x0e, 0x53, 0x46, 0xfa, 0xca, 0x3a, 0x43, 0xd1, 0x52, 0x2a, + 0xf0, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x87, 0x5d, 0x86, 0x2c, 0x45, 0x56, 0xd4, 0x09, 0x08, 0xda, + 0x85, 0xf1, 0x36, 0x9b, 0x72, 0x11, 0xc0, 0x59, 0xcc, 0xdb, 0x4b, 0x03, 0x4a, 0xb5, 0xf7, 0xe9, + 0x41, 0xa3, 0xb4, 0x4e, 0x4c, 0x5c, 0xa8, 0xe9, 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xd7, 0xd3, 0xec, + 0xcc, 0xad, 0x73, 0x51, 0x75, 0x44, 0x18, 0x14, 0x0b, 0xbe, 0x7c, 0x36, 0x5b, 0x67, 0x12, 0x7f, + 0x91, 0x30, 0x4a, 0xc6, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0x4d, 0xf3, 0x83, 0xae, 0x5f, 0xa6, 0x62, + 0x8e, 0x65, 0x3c, 0x43, 0x8b, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc1, 0xb8, 0xc8, 0xa8, 0x24, 0x94, + 0x62, 0x79, 0x43, 0xe9, 0x31, 0x8e, 0x75, 0xe0, 0x51, 0xb2, 0x00, 0x9b, 0x95, 0xd1, 0x16, 0x5c, + 0xd4, 0xd2, 0x0b, 0x5e, 0x0f, 0x1c, 0xf6, 0x5e, 0xe9, 0xb2, 0x4d, 0xa4, 0x9d, 0x9b, 0x4f, 0x1e, + 0x1e, 0x94, 0x2f, 0xae, 0xf7, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x8e, 0xfb, 0xed, 0x55, + 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x3a, 0x98, 0xf9, 0x3a, 0xbc, 0x70, 0x78, 0x50, 0x3e, 0xb7, 0x90, + 0x86, 0x80, 0xd3, 0xeb, 0xa1, 0xd7, 0xa1, 0xd4, 0xf4, 0x42, 0x31, 0x06, 0xc3, 0x46, 0xe6, 0xcc, + 0x52, 0x65, 0xad, 0xae, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, 0x94, 0x1c, + 0x3a, 0x92, 0x9d, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, 0xab, 0xe1, + 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0x8c, 0x9a, 0xdb, 0x20, 0x0b, 0x0d, 0x16, 0x47, 0x9b, + 0xe9, 0x11, 0x8b, 0x86, 0xa7, 0x04, 0xaa, 0x77, 0x61, 0xe0, 0x94, 0x5a, 0xe8, 0x06, 0x3d, 0xc8, + 0xf4, 0x52, 0x61, 0xc1, 0x2b, 0x99, 0xfb, 0x99, 0x0a, 0x69, 0x07, 0xa4, 0xe1, 0x44, 0xa4, 0x69, + 0x52, 0xc4, 0x89, 0x7a, 0xf4, 0x2e, 0x55, 0x29, 0x75, 0xc0, 0x0c, 0x96, 0xd1, 0x9d, 0x56, 0x87, + 0xca, 0xc5, 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0xbe, 0x1f, 0xec, 0x88, 0xd8, 0x64, 0x71, 0x98, + 0xcc, 0x18, 0x84, 0x75, 0x3c, 0xca, 0x07, 0xb3, 0xc7, 0xe1, 0x6a, 0x85, 0xbd, 0xd0, 0x15, 0xe3, + 0x7d, 0x72, 0x83, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xd5, 0xda, 0x12, 0x7b, 0x6d, 0x4b, 0xa0, 0x56, + 0x6b, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0x4a, 0x3a, 0x91, 0xad, 0xd5, 0xec, 0xbe, 0x0e, 0x06, + 0x4c, 0x4c, 0xea, 0xc1, 0x94, 0xca, 0x87, 0xca, 0x83, 0xb6, 0x85, 0x33, 0x93, 0x6c, 0x91, 0x0c, + 0x1e, 0xf1, 0x4d, 0xe9, 0x89, 0xab, 0x09, 0x4a, 0xb8, 0x8b, 0xb6, 0x11, 0xbe, 0x64, 0xaa, 0x6f, + 0x4a, 0xa4, 0x79, 0x28, 0x85, 0x9d, 0x8d, 0xa6, 0xbf, 0xeb, 0xb8, 0x1e, 0x7b, 0x1c, 0xd3, 0x98, + 0xac, 0xba, 0x04, 0xe0, 0x18, 0x07, 0xad, 0x40, 0xd1, 0x91, 0x4a, 0x60, 0x94, 0x1d, 0xab, 0x40, + 0xa9, 0x7e, 0xb9, 0xfb, 0xae, 0x54, 0xfb, 0xaa, 0xba, 0xe8, 0x35, 0x18, 0x17, 0xde, 0x5a, 0x3c, + 0x82, 0x03, 0x7b, 0xbc, 0xd2, 0xcc, 0xf1, 0xeb, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x0b, 0x30, 0x41, + 0xa9, 0xc4, 0x07, 0xdb, 0xcc, 0xd9, 0x41, 0x4e, 0x44, 0x2d, 0xd5, 0x85, 0x5e, 0x19, 0x27, 0x88, + 0xa1, 0x26, 0x3c, 0xe1, 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, + 0xbd, 0x61, 0x15, 0x17, 0x2f, 0x1f, 0x1e, 0x94, 0x9f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, + 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x30, 0x9e, 0xb2, 0x12, 0xe7, 0xb3, 0xc3, 0xff, 0xac, 0x2b, + 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x05, 0x46, 0x25, 0xe1, + 0xcc, 0x63, 0xd9, 0x03, 0xa3, 0xe2, 0xa7, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, + 0xa6, 0xdb, 0x81, 0xeb, 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xec, 0x06, 0xb5, 0x24, 0x02, + 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x73, 0x81, 0xa7, 0xca, 0xe2, 0x8c, 0x37, 0x2f, 0xc3, + 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x18, 0x0f, 0xba, 0xd8, 0xc8, + 0xf9, 0x25, 0xf5, 0x17, 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, + 0xf0, 0xce, 0x70, 0x9b, 0xfc, 0xc7, 0x79, 0xfc, 0x46, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, + 0x3d, 0xd4, 0xd4, 0x32, 0x44, 0x53, 0x36, 0x34, 0x9c, 0x79, 0xa2, 0x87, 0x99, 0x51, 0x82, 0x67, + 0x8d, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1d, 0x30, 0x25, 0xc2, 0x1d, 0xc5, 0xe3, + 0x7e, 0x31, 0xb6, 0x5f, 0xc4, 0x09, 0x18, 0xee, 0xc2, 0x9e, 0xfd, 0x76, 0x98, 0xee, 0xba, 0x71, + 0x8e, 0x15, 0x7c, 0xfc, 0x8f, 0x87, 0xa0, 0xa4, 0x94, 0xe9, 0x68, 0xde, 0x7c, 0x23, 0xb9, 0x90, + 0x7c, 0x23, 0x29, 0x52, 0x9e, 0x5e, 0x7f, 0x16, 0x59, 0x37, 0xcc, 0xea, 0x72, 0xd9, 0xa9, 0xbe, + 0x74, 0xae, 0xbc, 0xaf, 0x8b, 0x9e, 0xa6, 0x1b, 0xc9, 0x0f, 0xfc, 0xd8, 0x52, 0xe8, 0xa9, 0x6e, + 0x19, 0x30, 0xd3, 0x2e, 0x7a, 0x8a, 0x0a, 0x36, 0xcd, 0x6a, 0x2d, 0x99, 0x7a, 0xb2, 0x46, 0x0b, + 0x31, 0x87, 0x31, 0x01, 0x90, 0xb2, 0x47, 0x4c, 0x00, 0x1c, 0x79, 0x48, 0x01, 0x50, 0x12, 0xc0, + 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x35, 0x54, 0xb9, 0xe5, 0x3d, 0xd5, 0x37, 0x7f, 0x67, + 0x47, 0x4b, 0xd1, 0xb6, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf8, 0x9e, 0x1f, 0xb2, + 0xc5, 0x24, 0x78, 0x04, 0xe9, 0xbe, 0x54, 0x7c, 0xeb, 0x76, 0x9d, 0x95, 0x1f, 0x1d, 0x94, 0x47, + 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0x7a, 0x00, 0xe7, 0x8c, 0x93, 0x55, 0x75, 0x17, 0x06, + 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, 0xae, 0x3c, 0x5f, + 0x64, 0xdc, 0x95, 0x7c, 0x08, 0x63, 0x37, 0x4a, 0xba, 0xf3, 0x7a, 0x02, 0x01, 0x77, 0xd7, 0xb1, + 0x7f, 0x85, 0xbf, 0x3d, 0x08, 0x0d, 0x25, 0x09, 0x3b, 0xad, 0xd3, 0x48, 0xe8, 0xb4, 0x6c, 0x28, + 0x4f, 0x1f, 0xfa, 0x7d, 0xeb, 0x37, 0x2c, 0xf6, 0xbe, 0xb5, 0x4e, 0x76, 0xdb, 0x2d, 0x2a, 0x27, + 0x3f, 0xfa, 0x8e, 0xbf, 0x05, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x07, 0x95, 0xd6, 0x29, 0xf6, 0xc6, + 0xa7, 0x38, 0x14, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf0, 0x19, 0x90, 0x90, 0x53, 0x50, 0x64, + 0x55, 0x4c, 0x45, 0x56, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xb4, 0xfe, 0xb1, 0xd9, 0x6f, 0x26, 0x0c, + 0x7e, 0xd4, 0x1f, 0x56, 0xed, 0x1f, 0xb1, 0xe0, 0x6c, 0x9a, 0x25, 0x12, 0xe5, 0x2a, 0xb9, 0x28, + 0xaa, 0x1e, 0x9a, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0xde, 0xe1, 0x78, 0xf1, + 0xdd, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x06, 0xf7, 0x83, 0xe3, 0xfd, 0x79, 0xee, + 0xd8, 0x3e, 0x70, 0xf6, 0xcf, 0xe6, 0xe0, 0x2c, 0x7f, 0x29, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, + 0x6f, 0x8a, 0xd4, 0x1c, 0x6f, 0xc3, 0x58, 0x5b, 0xd3, 0x1f, 0xf4, 0x8a, 0x30, 0xa5, 0xeb, 0x19, + 0x62, 0x39, 0x4e, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, 0xd4, 0x73, 0x43, + 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x8f, 0x20, 0x5b, 0x9b, 0xfd, + 0xa3, 0x16, 0x3c, 0x96, 0x11, 0x8f, 0x8a, 0x36, 0x77, 0x9f, 0xbd, 0xc9, 0x89, 0xc4, 0x4f, 0xaa, + 0x39, 0xfe, 0x52, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xb4, 0x51, 0xb1, 0xa6, 0x5f, 0xe0, + 0x1e, 0x23, 0xe6, 0x88, 0x16, 0x2b, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0xa7, 0xf2, 0x30, 0xc4, + 0x5e, 0x76, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x08, 0xcd, 0x83, 0x04, 0x83, 0x8e, 0xe5, 0x43, 0x5e, + 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0x84, 0xeb, 0x56, 0x85, 0xb4, 0x9c, 0x7d, 0xa9, 0x66, + 0xe0, 0xc9, 0x92, 0x54, 0xdc, 0x8b, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0xde, 0x80, 0x09, 0xca, + 0x97, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb1, 0xad, 0x15, 0x23, 0xb8, 0x6e, 0x40, 0x71, 0x02, 0x9b, + 0x0a, 0x4c, 0xed, 0x2e, 0x85, 0xca, 0x50, 0x2c, 0x30, 0x99, 0x4a, 0x14, 0x13, 0x97, 0x99, 0x20, + 0x75, 0x98, 0xc1, 0xd5, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xb9, 0xb6, 0x63, 0x13, + 0xa4, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, 0x4c, 0x65, 0xd8, + 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xc9, 0xaf, 0xa5, 0x37, 0xbe, + 0xb2, 0x2b, 0x1b, 0x91, 0x7e, 0x49, 0x3d, 0xc2, 0xd1, 0x08, 0xcb, 0x1b, 0x95, 0x3e, 0x5b, 0xd3, + 0x03, 0x0a, 0x8f, 0x24, 0x49, 0xe5, 0x61, 0x52, 0x30, 0xff, 0x9e, 0x05, 0x67, 0x52, 0xec, 0x57, + 0xf9, 0x51, 0xb5, 0xe5, 0x86, 0x91, 0x4a, 0x08, 0xa3, 0x1d, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, + 0x1f, 0xf8, 0x61, 0x98, 0x3c, 0x00, 0x85, 0x7d, 0x98, 0x80, 0x1e, 0xef, 0x00, 0x44, 0x97, 0xa1, + 0xd0, 0x09, 0x89, 0x0c, 0x24, 0xa5, 0xce, 0x6f, 0xa6, 0x19, 0x66, 0x10, 0xca, 0x9a, 0x6e, 0x29, + 0xa5, 0xac, 0xc6, 0x9a, 0x72, 0x4d, 0x2b, 0x87, 0xd9, 0x5f, 0xcd, 0xc3, 0x85, 0x4c, 0x4b, 0x75, + 0xda, 0xa5, 0x5d, 0xdf, 0x73, 0x23, 0x5f, 0xbd, 0x1a, 0xf2, 0x50, 0x26, 0xa4, 0xbd, 0xbd, 0x2a, + 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x64, 0x1a, 0xf6, 0x64, 0xca, 0x9b, 0xc5, 0x8a, 0x91, 0x89, 0x7d, + 0xd0, 0x74, 0x62, 0x4f, 0x41, 0xa1, 0xed, 0xfb, 0xad, 0xe4, 0x61, 0x44, 0xbb, 0xeb, 0xfb, 0x2d, + 0xcc, 0x80, 0xe8, 0x13, 0x62, 0x1c, 0x12, 0xcf, 0x64, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x18, 0xcf, + 0xc0, 0xc8, 0x0e, 0xd9, 0x0f, 0x5c, 0x6f, 0x2b, 0xf9, 0x7c, 0x7a, 0x93, 0x17, 0x63, 0x09, 0x37, + 0x33, 0x3e, 0x8c, 0x9c, 0x74, 0x1e, 0xb0, 0x62, 0xdf, 0xab, 0xed, 0x07, 0xf2, 0x30, 0x89, 0x17, + 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x79, 0xc0, 0xfa, 0xcf, 0xc6, 0x2f, 0x5a, + 0x30, 0xc9, 0xa2, 0x22, 0x8b, 0x00, 0x1a, 0xae, 0xef, 0x9d, 0x02, 0xeb, 0xf6, 0x14, 0x0c, 0x05, + 0xb4, 0xd1, 0x64, 0x72, 0x1f, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x09, 0x28, 0xb0, 0x2e, 0xd0, 0xc9, + 0x1b, 0xe3, 0x79, 0x11, 0x2a, 0x4e, 0xe4, 0x60, 0x56, 0xca, 0xbc, 0xc2, 0x31, 0x69, 0xb7, 0x5c, + 0xde, 0xe9, 0xf8, 0x49, 0xe2, 0xa3, 0xe1, 0x15, 0x9e, 0xda, 0xb5, 0x0f, 0xe6, 0x15, 0x9e, 0x4e, + 0xb2, 0xb7, 0x58, 0xf4, 0xdf, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0x5e, 0xe1, 0xbd, 0x6b, 0x9f, + 0x8c, 0x15, 0x4c, 0xba, 0x71, 0x4a, 0xfe, 0x14, 0x8d, 0x53, 0x0a, 0x83, 0x72, 0x8e, 0x43, 0x03, + 0x38, 0x6b, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x59, 0x3b, 0xb5, 0x6f, 0x19, 0x62, 0xdd, 0x9f, 0xe6, + 0x32, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x84, 0xc7, 0xf8, 0x19, + 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0x7b, 0xce, 0x65, 0xa7, 0x7e, 0xcc, 0x6c, 0x6a, 0xce, + 0x7c, 0x41, 0x52, 0x43, 0x90, 0xe2, 0x02, 0xbd, 0xaa, 0x09, 0xe5, 0xf9, 0xc1, 0x85, 0xf2, 0xb1, + 0x74, 0x81, 0x1c, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xb1, 0x54, 0xfe, 0x26, 0x2b, 0xaa, 0xa2, 0x80, + 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x1d, 0xf9, 0x8d, 0x3c, 0x3c, + 0xde, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, + 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xfe, 0x93, 0x34, 0x25, 0x86, 0xe0, 0x15, 0x9f, 0x90, 0x99, 0x28, + 0x56, 0x52, 0x70, 0x70, 0x6a, 0x4d, 0xf4, 0x26, 0x20, 0x5f, 0xe4, 0x9d, 0xbd, 0x4e, 0x3c, 0xa1, + 0x97, 0x67, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x32, 0xfd, 0xf4, + 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x30, 0xfd, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, + 0xcf, 0x71, 0x79, 0x74, 0x3c, 0x49, 0x80, 0x73, 0xfd, 0x4a, 0x09, 0xb6, 0x90, 0x44, 0xc0, 0xdd, + 0x75, 0x12, 0x8e, 0xd1, 0xc3, 0xd9, 0x8e, 0xd1, 0xbd, 0xcf, 0xc5, 0x7e, 0x3a, 0x5d, 0xfb, 0x3f, + 0x58, 0xf4, 0xfa, 0x4a, 0xc9, 0x1d, 0x4f, 0xc7, 0x41, 0xe9, 0x26, 0x35, 0x1f, 0xe5, 0x73, 0x9a, + 0x85, 0x47, 0x0c, 0xc4, 0x26, 0x2e, 0x5f, 0x10, 0x61, 0xec, 0x24, 0x63, 0xb0, 0xee, 0x22, 0xc6, + 0x81, 0xc2, 0x40, 0x9f, 0x87, 0x91, 0xa6, 0xbb, 0xe7, 0x86, 0x7e, 0x20, 0x36, 0xcb, 0x31, 0x5d, + 0x0d, 0xe2, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, 0x1c, 0x8c, 0xcb, 0x16, 0xdf, + 0xea, 0xf8, 0x91, 0x73, 0x0a, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe, 0x44, 0xaf, 0x40, 0x0f, 0xac, + 0x4b, 0x99, 0xd7, 0xf1, 0xed, 0xc4, 0x75, 0xfc, 0x74, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x6a, + 0xc1, 0xb4, 0x81, 0x7f, 0x0a, 0xb7, 0xc1, 0x8a, 0x79, 0x1b, 0x3c, 0xd9, 0xf7, 0x1b, 0x32, 0x6e, + 0x81, 0xef, 0xcb, 0x27, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0xa0, 0xb0, 0xed, 0x04, 0xcd, 0x5e, 0x01, + 0x65, 0xbb, 0x2a, 0xcd, 0xdd, 0x70, 0x82, 0x26, 0x3f, 0xc3, 0x9f, 0x53, 0xd9, 0x2a, 0x9d, 0xa0, + 0xd9, 0xd7, 0x27, 0x8c, 0x35, 0x85, 0x5e, 0x85, 0xe1, 0xb0, 0xe1, 0xb7, 0x95, 0xc5, 0xe6, 0x65, + 0x9e, 0xc9, 0x92, 0x96, 0x1c, 0x1d, 0x94, 0x91, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, 0xbd, 0x0d, + 0xe3, 0xec, 0x97, 0xb2, 0x5c, 0xc8, 0x67, 0xa7, 0x31, 0xa8, 0xeb, 0x88, 0xdc, 0x00, 0xc6, 0x28, + 0xc2, 0x26, 0xa9, 0xd9, 0x2d, 0x28, 0xa9, 0xcf, 0x7a, 0xa4, 0xbe, 0x3c, 0xff, 0x36, 0x0f, 0x67, + 0x52, 0xd6, 0x1c, 0x0a, 0x8d, 0x99, 0x78, 0x61, 0xc0, 0xa5, 0xfa, 0x01, 0xe7, 0x22, 0x64, 0xd2, + 0x50, 0x53, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x13, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, + 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0xca, 0xc3, 0xd9, 0xb4, + 0xd8, 0x33, 0xe8, 0xbb, 0x13, 0x29, 0x6d, 0x5e, 0x1a, 0x34, 0x6a, 0x0d, 0xcf, 0x73, 0x23, 0x12, + 0x34, 0xcf, 0x99, 0x49, 0x6e, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x80, 0x06, 0x3c, 0x15, 0x91, + 0x3c, 0x3e, 0x3e, 0x3d, 0x70, 0x07, 0x44, 0x0e, 0xa3, 0x30, 0xe1, 0x00, 0x2a, 0x8b, 0xfb, 0x3b, + 0x80, 0xca, 0x96, 0x67, 0x5d, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, + 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xe6, 0x91, 0x4a, 0xdd, 0x65, 0x65, 0xaa, 0xbb, + 0x2e, 0x43, 0x21, 0xf0, 0x5b, 0x24, 0x99, 0x41, 0x06, 0xfb, 0x2d, 0x82, 0x19, 0x84, 0x62, 0x44, + 0xb1, 0xb2, 0x63, 0x4c, 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0xb5, 0xc8, 0x1e, 0x69, 0x25, + 0xc3, 0xb3, 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2c, 0xc0, 0xc5, 0x9e, 0x2e, 0xd4, 0x54, + 0x1c, 0xda, 0x72, 0x22, 0x72, 0xdf, 0xd9, 0x4f, 0xc6, 0x51, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, + 0x8b, 0x71, 0x1e, 0x37, 0x31, 0xa1, 0x1c, 0x14, 0xe1, 0x12, 0x05, 0xf4, 0x11, 0x24, 0xa7, 0xbf, + 0x06, 0x10, 0x86, 0xad, 0x65, 0x8f, 0x72, 0x77, 0x4d, 0x61, 0x8a, 0x1e, 0xc7, 0xd7, 0xac, 0xdf, + 0x12, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb5, 0x03, 0x3f, 0xe2, 0xba, 0xd6, 0x0a, 0x37, 0x14, + 0x1a, 0x32, 0xbd, 0x57, 0x6b, 0x09, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x86, 0x51, 0xe1, 0xd1, 0x5a, + 0xf3, 0xfd, 0x96, 0x50, 0x03, 0x29, 0xb3, 0x93, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, + 0x70, 0x47, 0x52, 0xab, 0x71, 0x25, 0xae, 0x86, 0x97, 0x88, 0x43, 0x55, 0x1c, 0x28, 0x0e, 0x55, + 0xac, 0x18, 0x2b, 0x0d, 0xfc, 0x66, 0x05, 0x7d, 0x55, 0x49, 0x3f, 0x57, 0x80, 0x33, 0x62, 0xe1, + 0x3c, 0xea, 0xe5, 0xf2, 0x88, 0x52, 0xe8, 0x7f, 0x6b, 0xcd, 0x9c, 0xf6, 0x9a, 0xf9, 0x41, 0x0b, + 0x4c, 0xf6, 0x0a, 0xfd, 0xb9, 0xcc, 0x40, 0xf4, 0x2f, 0x67, 0xb2, 0x6b, 0x4d, 0x79, 0x81, 0x7c, + 0xc0, 0x90, 0xf4, 0xf6, 0xbf, 0xb7, 0xe0, 0xc9, 0xbe, 0x14, 0xd1, 0x32, 0x94, 0x18, 0x0f, 0xa8, + 0x49, 0x67, 0x4f, 0x2b, 0x43, 0x42, 0x09, 0xc8, 0x60, 0x49, 0xe3, 0x9a, 0x68, 0xb9, 0x2b, 0xe2, + 0xff, 0x33, 0x29, 0x11, 0xff, 0xcf, 0x19, 0xc3, 0xf3, 0x90, 0x21, 0xff, 0x7f, 0x25, 0x0f, 0xc3, + 0x7c, 0xc5, 0x9f, 0x82, 0x18, 0xb6, 0x22, 0xf4, 0xb6, 0x3d, 0x22, 0x51, 0xf1, 0xbe, 0xcc, 0x55, + 0x9c, 0xc8, 0xe1, 0x6c, 0x82, 0xba, 0xad, 0x62, 0x0d, 0x2f, 0x9a, 0x33, 0xee, 0xb3, 0xd9, 0x84, + 0x62, 0x12, 0x38, 0x0d, 0xed, 0x76, 0xfb, 0x22, 0x40, 0xc8, 0xb2, 0xe5, 0x53, 0x1a, 0x22, 0xa6, + 0xd9, 0x27, 0x7b, 0xb4, 0x5e, 0x57, 0xc8, 0xbc, 0x0f, 0xf1, 0x4e, 0x57, 0x00, 0xac, 0x51, 0x9c, + 0x7d, 0x05, 0x4a, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x4c, 0x67, 0x2e, 0x3e, 0x0b, 0x93, 0x89, 0xb6, + 0x8e, 0xa5, 0x04, 0xfa, 0x25, 0x0b, 0x26, 0x79, 0x97, 0x97, 0xbd, 0x3d, 0x71, 0xa6, 0xbe, 0x0f, + 0x67, 0x5b, 0x29, 0x67, 0x9b, 0x98, 0xd1, 0xc1, 0xcf, 0x42, 0xa5, 0xf4, 0x49, 0x83, 0xe2, 0xd4, + 0x36, 0xd0, 0x55, 0xba, 0x6e, 0xe9, 0xd9, 0xe5, 0xb4, 0x84, 0xf7, 0xd1, 0x18, 0x5f, 0xb3, 0xbc, + 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, + 0x7d, 0x17, 0x49, 0x38, 0x72, 0x19, 0x49, 0x38, 0xf4, 0x4f, 0xcb, 0xf7, 0xfc, 0xb4, 0x9f, 0xb5, + 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, 0xbf, 0xdd, 0x14, 0xe5, 0x67, 0xb3, 0x17, 0x75, 0x86, 0x0c, + 0xff, 0x27, 0x16, 0x4c, 0x71, 0x84, 0xf8, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xe9, 0xa9, + 0x14, 0xdb, 0xe9, 0x1f, 0x65, 0x4c, 0x56, 0xa1, 0xe7, 0x64, 0x35, 0xe5, 0x06, 0x3a, 0x46, 0x26, + 0xc9, 0x63, 0x07, 0xb3, 0xb6, 0xff, 0xd0, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, + 0x52, 0xed, 0xba, 0x88, 0x8f, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xc2, 0x20, 0x20, + 0xdf, 0xdf, 0x20, 0xe0, 0x18, 0x23, 0xfa, 0x7f, 0x0a, 0x90, 0x74, 0x07, 0x40, 0x77, 0x61, 0xac, + 0xe1, 0xb4, 0x9d, 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0x7b, 0x59, 0x12, 0x2d, 0x69, 0x78, 0xe2, + 0xa9, 0x57, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x07, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x45, 0xb6, 0x98, + 0xc6, 0x81, 0xf9, 0x3b, 0x72, 0xf3, 0x18, 0x59, 0x8a, 0x35, 0x8c, 0x14, 0xd7, 0xb5, 0xfc, 0xa3, + 0x73, 0x5d, 0x2b, 0x1c, 0xd3, 0x75, 0x6d, 0x68, 0x20, 0xd7, 0x35, 0x0c, 0xe7, 0x25, 0x8b, 0x44, + 0xff, 0xaf, 0xb8, 0x2d, 0x22, 0xf8, 0x62, 0xee, 0x05, 0x39, 0x7b, 0x78, 0x50, 0x3e, 0x8f, 0x53, + 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, 0x5a, 0x2d, 0xff, 0xbe, 0x1a, 0xb5, 0xe5, 0xb0, + 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x3c, 0xb3, 0x90, 0x81, 0x83, + 0x33, 0x6b, 0x27, 0x3c, 0xdf, 0x8a, 0x7d, 0x3d, 0xdf, 0x5e, 0x87, 0x52, 0x3b, 0xf0, 0x1b, 0xab, + 0x9a, 0x37, 0xce, 0x25, 0x96, 0xa7, 0x5e, 0x16, 0x1e, 0x1d, 0x94, 0xc7, 0xd5, 0x1f, 0x76, 0xc3, + 0xc7, 0x15, 0xec, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x60, 0x36, 0xe3, 0x0d, 0xbd, 0x0e, + 0xa5, 0x20, 0x71, 0x84, 0x0d, 0x14, 0x58, 0x49, 0x8b, 0xf2, 0x2b, 0x8f, 0xac, 0x98, 0x90, 0xfd, + 0xc7, 0x16, 0x8c, 0x08, 0x87, 0x86, 0x53, 0xe0, 0x9c, 0x16, 0x0c, 0x05, 0x76, 0x39, 0xfd, 0x98, + 0x67, 0x9d, 0xc9, 0x54, 0x5d, 0x57, 0x13, 0xaa, 0xeb, 0x27, 0x7b, 0x11, 0xe9, 0xad, 0xb4, 0xfe, + 0x9b, 0x79, 0x98, 0x30, 0x9d, 0x39, 0x4e, 0x61, 0x08, 0xd6, 0x60, 0x24, 0x14, 0x9e, 0x43, 0xb9, + 0x6c, 0xcb, 0xe9, 0xe4, 0x24, 0xc6, 0x66, 0x51, 0xc2, 0x57, 0x48, 0x12, 0x49, 0x75, 0x49, 0xca, + 0x3f, 0x42, 0x97, 0xa4, 0x7e, 0xfe, 0x34, 0x85, 0x93, 0xf0, 0xa7, 0xb1, 0xbf, 0xc6, 0xae, 0x1a, + 0xbd, 0xfc, 0x14, 0xb8, 0x90, 0xeb, 0xe6, 0xa5, 0x64, 0xf7, 0x58, 0x59, 0xa2, 0x53, 0x19, 0xdc, + 0xc8, 0x2f, 0x58, 0x70, 0x31, 0xe5, 0xab, 0x34, 0xd6, 0xe4, 0x39, 0x28, 0x3a, 0x9d, 0xa6, 0xab, + 0xf6, 0xb2, 0xf6, 0x8c, 0xb5, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0x1e, 0xb4, 0x5d, + 0xfe, 0x8e, 0xa8, 0xdb, 0x2e, 0xe6, 0x79, 0x88, 0xd9, 0xe5, 0x24, 0x10, 0x77, 0xe3, 0x2b, 0x77, + 0xec, 0x7c, 0xa6, 0x3b, 0xf6, 0x3f, 0xb0, 0x60, 0x54, 0x74, 0xfb, 0x14, 0x46, 0xfb, 0x3b, 0xcc, + 0xd1, 0x7e, 0xbc, 0xc7, 0x68, 0x67, 0x0c, 0xf3, 0xdf, 0xce, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0x1a, + 0x80, 0xe5, 0x79, 0x15, 0x8a, 0xed, 0xc0, 0x8f, 0xfc, 0x86, 0xdf, 0x12, 0x1c, 0xcf, 0x13, 0x71, + 0xb4, 0x00, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x9e, 0x1f, 0x44, 0x82, 0xcb, 0x88, + 0x47, 0xcf, 0x0f, 0x22, 0xcc, 0x20, 0xa8, 0x09, 0x10, 0x39, 0xc1, 0x16, 0x89, 0x68, 0x99, 0x08, + 0x3c, 0x92, 0x7d, 0x78, 0x74, 0x22, 0xb7, 0x35, 0xe7, 0x7a, 0x51, 0x18, 0x05, 0x73, 0x55, 0x2f, + 0xba, 0x1d, 0x70, 0x01, 0x4a, 0x73, 0xff, 0x57, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x68, 0xb2, 0x36, + 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x0a, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, + 0xcf, 0x33, 0xff, 0xeb, 0x45, 0x35, 0xb4, 0xec, 0x35, 0xac, 0xa2, 0xfb, 0xff, 0xf7, 0x3e, 0xb9, + 0x69, 0xc3, 0xba, 0x1f, 0x4d, 0x1c, 0x24, 0x00, 0x7d, 0x67, 0x97, 0x9d, 0xc4, 0xf3, 0x7d, 0xae, + 0x80, 0x63, 0x58, 0x46, 0xb0, 0xb0, 0xd7, 0x2c, 0x3c, 0x70, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0xf6, + 0x5a, 0x00, 0x70, 0x8c, 0x83, 0xe6, 0x85, 0xf8, 0x5d, 0x30, 0x92, 0xdf, 0x49, 0xf1, 0x5b, 0x7e, + 0xbe, 0x26, 0x7f, 0xbf, 0x00, 0xa3, 0x2a, 0x09, 0x5e, 0x8d, 0xe7, 0x12, 0x13, 0x61, 0x58, 0x96, + 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x75, 0x98, 0x0c, 0xb9, 0xee, 0x45, 0x45, 0xdb, 0xe3, 0x3a, 0xac, + 0x4f, 0x4a, 0xfb, 0x8a, 0xba, 0x09, 0x3e, 0x62, 0x45, 0xfc, 0xe8, 0x90, 0x8e, 0x96, 0x49, 0x12, + 0xe8, 0x0d, 0x98, 0x68, 0xe9, 0xe9, 0xe6, 0x6b, 0x42, 0xc5, 0xa5, 0xcc, 0x8f, 0x8d, 0x64, 0xf4, + 0x35, 0x9c, 0xc0, 0xa6, 0x9c, 0x92, 0x5e, 0x22, 0x22, 0x44, 0x3a, 0xde, 0x16, 0x09, 0x45, 0x0a, + 0x2f, 0xc6, 0x29, 0xdd, 0xca, 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0xab, 0x30, 0x26, 0x3f, 0x5f, 0x73, + 0x23, 0x8e, 0x8d, 0xdc, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x27, 0xff, 0xaf, 0x07, 0xce, + 0xe6, 0xa6, 0xdb, 0x10, 0x5e, 0xdc, 0xdc, 0xd3, 0x67, 0x41, 0xba, 0x0e, 0x2d, 0xa7, 0x21, 0x1d, + 0x1d, 0x94, 0x2f, 0x8b, 0x51, 0x4b, 0x85, 0xb3, 0x49, 0x4c, 0xa7, 0x8f, 0x56, 0xe1, 0xcc, 0x36, + 0x71, 0x5a, 0xd1, 0xf6, 0xd2, 0x36, 0x69, 0xec, 0xc8, 0x4d, 0xc4, 0x9c, 0x93, 0x35, 0xd3, 0xf0, + 0x1b, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x07, 0x66, 0xda, 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xaf, + 0xf9, 0x11, 0x33, 0xe9, 0x50, 0x39, 0xe4, 0x84, 0x17, 0xb3, 0x72, 0xcc, 0xae, 0x65, 0xe0, 0xe1, + 0x4c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x58, 0x0c, 0xc2, 0xa7, 0x72, 0x22, 0x3b, 0xde, 0x6e, 0x3d, + 0xad, 0x82, 0xf0, 0x91, 0x4c, 0x03, 0xe1, 0xf4, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, + 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0x98, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x25, 0x9d, 0x67, 0xd1, 0x56, + 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0x48, 0xff, 0x3e, 0x74, 0x0b, + 0x8a, 0x8d, 0x96, 0x4b, 0xbc, 0xa8, 0x5a, 0xeb, 0x15, 0xf4, 0x63, 0x49, 0xe0, 0x88, 0x01, 0x13, + 0x01, 0x4a, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0xcf, 0x41, 0xb9, 0x4f, 0xb4, 0xdb, 0x84, 0x3e, + 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x33, 0xe2, 0xad, 0x25, 0x84, 0xf4, 0x44, 0xb6, 0xbb, 0x58, + 0x54, 0x4f, 0xe2, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x0b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, + 0xa1, 0xc1, 0x05, 0x91, 0xcc, 0x67, 0x09, 0xfb, 0x6b, 0x39, 0x38, 0xa7, 0x86, 0xf0, 0x9b, 0x77, + 0xe0, 0xee, 0x74, 0x0f, 0xdc, 0x09, 0x3c, 0xea, 0xd8, 0xb7, 0x61, 0x98, 0x07, 0x4d, 0x19, 0x80, + 0x01, 0x7a, 0xca, 0x8c, 0xb0, 0xa5, 0xae, 0x69, 0x23, 0xca, 0xd6, 0x5f, 0xb2, 0x60, 0x72, 0x7d, + 0xa9, 0x56, 0xf7, 0x1b, 0x3b, 0x24, 0x5a, 0xe0, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, + 0x4d, 0x1a, 0xc7, 0x74, 0x19, 0x0a, 0xdb, 0x7e, 0x18, 0x25, 0x5f, 0x7c, 0x6f, 0xf8, 0x61, 0x84, + 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x86, 0x58, 0x1e, 0xd7, 0x7e, 0xc9, 0x85, 0x07, 0xf9, 0x2e, 0xf4, + 0x32, 0x0c, 0x93, 0xcd, 0x4d, 0xd2, 0x88, 0xc4, 0xac, 0x4a, 0x77, 0xd4, 0xe1, 0x65, 0x56, 0x4a, + 0x2f, 0x7d, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa5, 0xc8, 0xdd, 0x25, 0x0b, 0xcd, + 0xa6, 0x78, 0x33, 0x7b, 0x08, 0xef, 0xdf, 0x75, 0x49, 0x00, 0xc7, 0xb4, 0xec, 0xaf, 0xe6, 0x00, + 0x62, 0xd7, 0xff, 0x7e, 0x9f, 0xb8, 0xd8, 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, + 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, + 0xd0, 0x05, 0x66, 0x1c, 0x17, 0x26, 0xa4, 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, + 0x46, 0xd4, 0x94, 0x77, 0x0d, 0x33, 0xc9, 0x3c, 0x46, 0x9e, 0xe9, 0xf8, 0xb9, 0x28, 0x97, 0xf9, + 0x5c, 0xf4, 0x13, 0x16, 0x9c, 0x4d, 0xb6, 0xc3, 0x7c, 0xdf, 0xbe, 0x62, 0xc1, 0x39, 0xf6, 0x68, + 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x29, 0x3d, 0xa4, 0x43, 0xef, 0x1e, 0xc7, 0x7e, 0xcf, 0xab, + 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x57, 0x2c, 0xb8, 0x90, 0x99, 0x3e, 0x08, 0x5d, 0x85, 0xa2, + 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, + 0xad, 0x61, 0x2e, 0x33, 0xad, 0x61, 0xdf, 0x2c, 0x85, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x9d, 0x06, + 0x38, 0x64, 0xde, 0x96, 0x59, 0x61, 0x8d, 0x60, 0xe6, 0x97, 0xb3, 0xfd, 0xbf, 0x44, 0x08, 0x73, + 0x75, 0xa9, 0x1b, 0x81, 0xcb, 0x0d, 0x5a, 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, + 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, 0x1b, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, + 0xfd, 0x43, 0x39, 0x18, 0x95, 0xc1, 0xb3, 0x3b, 0xde, 0x20, 0x92, 0xe5, 0xb1, 0x72, 0xe8, 0xb0, + 0x64, 0xaa, 0x94, 0x70, 0x2d, 0x16, 0xc8, 0xe3, 0x64, 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xf4, 0x0c, + 0x8c, 0x84, 0x9d, 0x0d, 0x86, 0x9e, 0x70, 0xe2, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x73, 0x30, + 0xc5, 0xeb, 0x05, 0x7e, 0xdb, 0xd9, 0xe2, 0xea, 0xcf, 0x21, 0xe5, 0x55, 0x3b, 0xb5, 0x9a, 0x80, + 0x1d, 0x1d, 0x94, 0xcf, 0x26, 0xcb, 0x98, 0xe2, 0xbc, 0x8b, 0x8a, 0xfd, 0x25, 0x40, 0xdd, 0xf1, + 0xc0, 0xd1, 0x9b, 0xdc, 0x94, 0xca, 0x0d, 0x48, 0xb3, 0x97, 0x46, 0x5c, 0x77, 0x02, 0x95, 0x86, + 0xf4, 0xbc, 0x16, 0x56, 0xf5, 0xed, 0xbf, 0x9a, 0x87, 0xa9, 0xa4, 0x4b, 0x20, 0xba, 0x01, 0xc3, + 0xfc, 0xb2, 0x13, 0xe4, 0x7b, 0x3c, 0xb8, 0x6a, 0x8e, 0x84, 0x6c, 0xdb, 0x8b, 0xfb, 0x52, 0xd4, + 0x47, 0xef, 0xc0, 0x68, 0xd3, 0xbf, 0xef, 0xdd, 0x77, 0x82, 0xe6, 0x42, 0xad, 0x2a, 0xd6, 0x65, + 0x2a, 0xcf, 0x5c, 0x89, 0xd1, 0x74, 0xe7, 0x44, 0xf6, 0xb8, 0x10, 0x83, 0xb0, 0x4e, 0x0e, 0xad, + 0xb3, 0x18, 0x87, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xee, 0x65, 0x57, 0xbb, 0x24, 0x91, 0x34, 0xca, + 0xe3, 0x22, 0x10, 0x22, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x6e, 0x38, 0x13, 0x66, 0xa8, 0xd9, 0xb2, + 0xd2, 0x43, 0xf4, 0xd2, 0x3c, 0x2d, 0x3e, 0x46, 0xa5, 0x99, 0x34, 0x85, 0x5c, 0x5a, 0x33, 0xf6, + 0x97, 0xcf, 0x80, 0xb1, 0x1b, 0x8d, 0x1c, 0x41, 0xd6, 0x09, 0xe5, 0x08, 0xc2, 0x50, 0x24, 0xbb, + 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x72, 0xd8, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, + 0x9d, 0xf4, 0x44, 0x4e, 0xf9, 0x0f, 0x31, 0x91, 0x53, 0xe1, 0x14, 0x13, 0x39, 0xad, 0xc1, 0xc8, + 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x9b, 0x99, 0xba, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0xe4, 0x21, + 0x02, 0x80, 0x25, 0x11, 0xf4, 0xa6, 0xda, 0x81, 0xc3, 0xd9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xea, + 0x1e, 0x14, 0x89, 0x9b, 0x46, 0x1e, 0x36, 0x71, 0xd3, 0x8a, 0x4c, 0xb7, 0x54, 0xcc, 0x36, 0x82, + 0x67, 0xd9, 0x94, 0xfa, 0x24, 0x59, 0x32, 0x12, 0x53, 0x95, 0x4e, 0x2e, 0x31, 0xd5, 0xf7, 0x5b, + 0x70, 0xae, 0x9d, 0x96, 0xa3, 0x4d, 0x24, 0x49, 0x7a, 0x79, 0xe0, 0x24, 0x74, 0x46, 0x83, 0x4c, + 0x5c, 0x4f, 0x45, 0xc3, 0xe9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9a, 0x22, 0xb3, 0xd2, 0x53, 0x19, + 0x19, 0xae, 0x7a, 0xe4, 0xb5, 0x5a, 0x4f, 0xc9, 0xa6, 0xf4, 0xf1, 0xac, 0x6c, 0x4a, 0x03, 0xe7, + 0x50, 0x7a, 0x53, 0xe5, 0xb6, 0x1a, 0xcf, 0x5e, 0x4a, 0x3c, 0x73, 0x55, 0xdf, 0x8c, 0x56, 0x6f, + 0xaa, 0x8c, 0x56, 0x3d, 0x62, 0xbd, 0xf1, 0x7c, 0x55, 0x7d, 0xf3, 0x58, 0x69, 0xb9, 0xa8, 0x26, + 0x4f, 0x26, 0x17, 0x95, 0x71, 0xd5, 0xf0, 0x74, 0x48, 0xcf, 0xf6, 0xb9, 0x6a, 0x0c, 0xba, 0xbd, + 0x2f, 0x1b, 0x9e, 0x77, 0x6b, 0xfa, 0xa1, 0xf2, 0x6e, 0xdd, 0xd5, 0xf3, 0x58, 0xa1, 0x3e, 0x89, + 0x9a, 0x28, 0xd2, 0x80, 0xd9, 0xab, 0xee, 0xea, 0x17, 0xe0, 0x99, 0x6c, 0xba, 0xea, 0x9e, 0xeb, + 0xa6, 0x9b, 0x7a, 0x05, 0x76, 0x65, 0xc5, 0x3a, 0x7b, 0x3a, 0x59, 0xb1, 0xce, 0x9d, 0x78, 0x56, + 0xac, 0xf3, 0xa7, 0x90, 0x15, 0xeb, 0xb1, 0x0f, 0x35, 0x2b, 0xd6, 0xcc, 0x23, 0xc8, 0x8a, 0xb5, + 0x16, 0x67, 0xc5, 0xba, 0x90, 0x3d, 0x25, 0x29, 0x96, 0xb9, 0x19, 0xb9, 0xb0, 0xee, 0xb2, 0xe7, + 0x79, 0x1e, 0xb3, 0x42, 0x04, 0xa3, 0x4b, 0xcf, 0xfb, 0x9b, 0x16, 0xd8, 0x82, 0x4f, 0x89, 0x02, + 0xe1, 0x98, 0x14, 0xa5, 0x1b, 0xe7, 0xc6, 0x7a, 0xbc, 0x87, 0x42, 0x36, 0x4d, 0xd5, 0x95, 0x9d, + 0x11, 0xcb, 0xfe, 0xcb, 0x39, 0xb8, 0xd4, 0x7b, 0x5d, 0xc7, 0x7a, 0xb2, 0x5a, 0xfc, 0xae, 0x93, + 0xd0, 0x93, 0x71, 0x21, 0x27, 0xc6, 0x1a, 0x38, 0xb0, 0xcf, 0x75, 0x98, 0x56, 0x26, 0xb9, 0x2d, + 0xb7, 0xb1, 0xaf, 0xe5, 0x03, 0x56, 0xae, 0x87, 0xf5, 0x24, 0x02, 0xee, 0xae, 0x83, 0x16, 0x60, + 0xd2, 0x28, 0xac, 0x56, 0x84, 0x30, 0xa3, 0x14, 0x73, 0x75, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, + 0x58, 0xf0, 0x58, 0x46, 0xc2, 0x88, 0x81, 0xe3, 0xd6, 0x6c, 0xc2, 0x64, 0xdb, 0xac, 0xda, 0x27, + 0xbc, 0x95, 0x91, 0x96, 0x42, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0x8b, 0x57, 0x7f, 0xeb, 0xf7, + 0x2f, 0x7d, 0xec, 0xb7, 0x7f, 0xff, 0xd2, 0xc7, 0x7e, 0xe7, 0xf7, 0x2f, 0x7d, 0xec, 0xcf, 0x1f, + 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x39, 0xbc, 0x64, 0xfd, + 0xde, 0xe1, 0x25, 0xeb, 0xab, 0x7f, 0x70, 0xe9, 0x63, 0x6f, 0xe7, 0xf6, 0x5e, 0xf8, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0x20, 0x56, 0xf9, 0x8e, 0x0d, 0xe7, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto index d1cd8eb..f76251d 100644 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ b/vendor/k8s.io/api/core/v1/generated.proto @@ -31,7 +31,7 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; option go_package = "v1"; // Represents a Persistent Disk resource in AWS. -// +// // An AWS EBS disk must exist before mounting to a container. The disk // must also be in the same AWS zone as the kubelet. An AWS EBS disk // can only be mounted as read/write once. AWS EBS volumes support @@ -161,7 +161,7 @@ message AzureFileVolumeSource { // Deprecated in 1.7, please use the bindings subresource of pods instead. message Binding { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -198,7 +198,7 @@ message CSIPersistentVolumeSource { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference controllerPublishSecretRef = 6; @@ -206,7 +206,7 @@ message CSIPersistentVolumeSource { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodeStageSecretRef = 7; @@ -214,50 +214,10 @@ message CSIPersistentVolumeSource { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional optional SecretReference nodePublishSecretRef = 8; - - // ControllerExpandSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - optional SecretReference controllerExpandSecretRef = 9; -} - -// Represents a source location of a volume to mount, managed by an external CSI driver -message CSIVolumeSource { - // Driver is the name of the CSI driver that handles this volume. - // Consult with your admin for the correct name as registered in the cluster. - optional string driver = 1; - - // Specifies a read-only configuration for the volume. - // Defaults to false (read/write). - // +optional - optional bool readOnly = 2; - - // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". - // If not provided, the empty value is passed to the associated CSI driver - // which will determine the default filesystem to apply. - // +optional - optional string fsType = 3; - - // VolumeAttributes stores driver-specific properties that are passed to the CSI - // driver. Consult your driver's documentation for supported values. - // +optional - map volumeAttributes = 4; - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secret references are passed. - // +optional - optional LocalObjectReference nodePublishSecretRef = 5; } // Adds and removes POSIX capabilities from running containers. @@ -275,7 +235,7 @@ message Capabilities { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSPersistentVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -283,23 +243,23 @@ message CephFSPersistentVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional SecretReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -308,7 +268,7 @@ message CephFSPersistentVolumeSource { // Cephfs volumes do not support ownership management or SELinux relabeling. message CephFSVolumeSource { // Required: Monitors is a collection of Ceph monitors - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it repeated string monitors = 1; // Optional: Used as the mounted root, rather than the full Ceph tree, default is / @@ -316,23 +276,23 @@ message CephFSVolumeSource { optional string path = 2; // Optional: User is the rados user name, default is admin - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional string user = 3; // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional string secretFile = 4; // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 5; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional optional bool readOnly = 6; } @@ -342,20 +302,20 @@ message CephFSVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -370,20 +330,20 @@ message CinderPersistentVolumeSource { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. message CinderVolumeSource { - // volume id used to identify the volume in cinder. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md optional string volumeID = 1; // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional string fsType = 2; // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional bool readOnly = 3; @@ -426,7 +386,7 @@ message ComponentCondition { // ComponentStatus (and ComponentStatusList) holds the cluster validation info. message ComponentStatus { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -440,7 +400,7 @@ message ComponentStatus { // Status of all the conditions for the component as a list of ComponentStatus objects. message ComponentStatusList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -451,18 +411,10 @@ message ComponentStatusList { // ConfigMap holds configuration data for pods to consume. message ConfigMap { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Immutable, if set to true, ensures that data stored in the ConfigMap cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - optional bool immutable = 4; - // Data contains the configuration data. // Each key must consist of alphanumeric characters, '-', '_' or '.'. // Values with non-UTF-8 byte sequences must use the BinaryData field. @@ -484,7 +436,7 @@ message ConfigMap { // ConfigMapEnvSource selects a ConfigMap to populate the environment // variables with. -// +// // The contents of the target ConfigMap's Data field will represent the // key-value pairs as environment variables. message ConfigMapEnvSource { @@ -504,14 +456,14 @@ message ConfigMapKeySelector { // The key to select. optional string key = 2; - // Specify whether the ConfigMap or its key must be defined + // Specify whether the ConfigMap or it's key must be defined // +optional optional bool optional = 3; } // ConfigMapList is a resource containing a list of ConfigMap objects. message ConfigMapList { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -545,7 +497,7 @@ message ConfigMapNodeConfigSource { } // Adapts a ConfigMap into a projected volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names, // unless the items element is populated with specific mappings of keys to paths. @@ -564,13 +516,13 @@ message ConfigMapProjection { // +optional repeated KeyToPath items = 2; - // Specify whether the ConfigMap or its keys must be defined + // Specify whether the ConfigMap or it's keys must be defined // +optional optional bool optional = 4; } // Adapts a ConfigMap into a volume. -// +// // The contents of the target ConfigMap's Data field will be presented in a // volume as files using the keys in the Data field as the file names, unless // the items element is populated with specific mappings of keys to paths. @@ -596,7 +548,7 @@ message ConfigMapVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the ConfigMap or its keys must be defined + // Specify whether the ConfigMap or it's keys must be defined // +optional optional bool optional = 4; } @@ -654,9 +606,6 @@ message Container { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge - // +listType=map - // +listMapKey=containerPort - // +listMapKey=protocol repeated ContainerPort ports = 6; // List of sources to populate environment variables in the container. @@ -689,6 +638,7 @@ message Container { repeated VolumeMount volumeMounts = 9; // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -708,17 +658,6 @@ message Container { // +optional optional Probe readinessProbe = 11; - // StartupProbe indicates that the Pod has successfully initialized. - // If specified, no other probes are executed until this completes successfully. - // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - // when it might take a long time to load data or warm a cache, than during steady-state operation. - // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - optional Probe startupProbe = 22; - // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -919,13 +858,6 @@ message ContainerStatus { // Container's ID in the format 'docker://'. // +optional optional string containerID = 8; - - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. - // +optional - optional bool started = 9; } // DaemonEndpoint contains information about a single Daemon endpoint. @@ -1026,8 +958,7 @@ message EndpointAddress { // EndpointPort is a tuple that describes a single port. message EndpointPort { - // The name of this port. This must match the 'name' field in the - // corresponding ServicePort. + // The name of this port (corresponds to ServicePort.Name). // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -1041,16 +972,6 @@ message EndpointPort { // Default is TCP. // +optional optional string protocol = 3; - - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - optional string appProtocol = 4; } // EndpointSubset is a group of addresses with a common set of ports. The @@ -1094,7 +1015,7 @@ message EndpointSubset { // ] message Endpoints { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -1112,7 +1033,7 @@ message Endpoints { // EndpointsList is a list of endpoints. message EndpointsList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1159,7 +1080,7 @@ message EnvVar { // EnvVarSource represents a source for the value of an EnvVar. message EnvVarSource { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. // +optional optional ObjectFieldSelector fieldRef = 1; @@ -1177,196 +1098,10 @@ message EnvVarSource { optional SecretKeySelector secretKeyRef = 4; } -// An EphemeralContainer is a container that may be added temporarily to an existing pod for -// user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a pod is -// removed or restarted. If an ephemeral container causes a pod to exceed its resource -// allocation, the pod may be evicted. -// Ephemeral containers may not be added by directly updating the pod spec. They must be added -// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec -// once added. -// This is an alpha feature enabled by the EphemeralContainers feature flag. -message EphemeralContainer { - // Ephemeral containers have all of the fields of Container, plus additional fields - // specific to ephemeral containers. Fields in common with Container are in the - // following inlined struct so than an EphemeralContainer may easily be converted - // to a Container. - optional EphemeralContainerCommon ephemeralContainerCommon = 1; - - // If set, the name of the container from PodSpec that this ephemeral container targets. - // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container is run in whatever namespaces are shared - // for the pod. Note that the container runtime must support this feature. - // +optional - optional string targetContainerName = 2; -} - -// EphemeralContainerCommon is a copy of all fields in Container to be inlined in -// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer -// to Container and allows separate documentation for the fields of EphemeralContainer. -// When a new field is added to Container it must be added here as well. -message EphemeralContainerCommon { - // Name of the ephemeral container specified as a DNS_LABEL. - // This name must be unique among all containers, init containers and ephemeral containers. - optional string name = 1; - - // Docker image name. - // More info: https://kubernetes.io/docs/concepts/containers/images - optional string image = 2; - - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - repeated string command = 3; - - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - repeated string args = 4; - - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - // +optional - optional string workingDir = 5; - - // Ports are not allowed for ephemeral containers. - repeated ContainerPort ports = 6; - - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - repeated EnvFromSource envFrom = 19; - - // List of environment variables to set in the container. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated EnvVar env = 7; - - // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - // already allocated to the pod. - // +optional - optional ResourceRequirements resources = 8; - - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - // +optional - // +patchMergeKey=mountPath - // +patchStrategy=merge - repeated VolumeMount volumeMounts = 9; - - // volumeDevices is the list of block devices to be used by the container. - // +patchMergeKey=devicePath - // +patchStrategy=merge - // +optional - repeated VolumeDevice volumeDevices = 21; - - // Probes are not allowed for ephemeral containers. - // +optional - optional Probe livenessProbe = 10; - - // Probes are not allowed for ephemeral containers. - // +optional - optional Probe readinessProbe = 11; - - // Probes are not allowed for ephemeral containers. - // +optional - optional Probe startupProbe = 22; - - // Lifecycle is not allowed for ephemeral containers. - // +optional - optional Lifecycle lifecycle = 12; - - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Will be truncated by the node if greater than 4096 bytes. The total message length across - // all containers will be limited to 12kb. - // Defaults to /dev/termination-log. - // Cannot be updated. - // +optional - optional string terminationMessagePath = 13; - - // Indicate how the termination message should be populated. File will use the contents of - // terminationMessagePath to populate the container status message on both success and failure. - // FallbackToLogsOnError will use the last chunk of container log output if the termination - // message file is empty and the container exited with an error. - // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - // Defaults to File. - // Cannot be updated. - // +optional - optional string terminationMessagePolicy = 20; - - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - // +optional - optional string imagePullPolicy = 14; - - // SecurityContext is not allowed for ephemeral containers. - // +optional - optional SecurityContext securityContext = 15; - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - // +optional - optional bool stdin = 16; - - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - // +optional - optional bool stdinOnce = 17; - - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - // +optional - optional bool tty = 18; -} - -// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. -message EphemeralContainers { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // A list of ephemeral containers associated with this pod. New ephemeral containers - // may be appended to this list, but existing ephemeral containers may not be removed - // or modified. - // +patchMergeKey=name - // +patchStrategy=merge - repeated EphemeralContainer ephemeralContainers = 2; -} - // Event is a report of an event somewhere in the cluster. message Event { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The object that this event is about. @@ -1431,7 +1166,7 @@ message Event { // EventList is a list of events. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1449,7 +1184,6 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // State of this Series: Ongoing or Finished - // Deprecated. Planned removal for 1.18 optional string state = 3; } @@ -1580,7 +1314,7 @@ message FlockerVolumeSource { } // Represents a Persistent Disk resource in Google Compute Engine. -// +// // A GCE PD must exist before mounting to a container. The disk must // also be in the same GCE project and zone as the kubelet. A GCE PD // can only be mounted as read/write once or read-only many times. GCE @@ -1616,7 +1350,7 @@ message GCEPersistentDiskVolumeSource { // Represents a volume that is populated with the contents of a git repository. // Git repo volumes do not support ownership management. // Git repo volumes support SELinux relabeling. -// +// // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir // into the Pod's container. @@ -1636,44 +1370,20 @@ message GitRepoVolumeSource { optional string directory = 3; } -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -message GlusterfsPersistentVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - optional string endpoints = 1; - - // Path is the Glusterfs volume path. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - optional string path = 2; - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - // +optional - optional bool readOnly = 3; - - // EndpointsNamespace is the namespace that contains Glusterfs endpoint. - // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - // +optional - optional string endpointsNamespace = 4; -} - // Represents a Glusterfs mount that lasts the lifetime of a pod. // Glusterfs volumes do not support ownership management or SELinux relabeling. message GlusterfsVolumeSource { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod optional string endpoints = 1; // Path is the Glusterfs volume path. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod optional string path = 2; // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod // +optional optional bool readOnly = 3; } @@ -1899,15 +1609,11 @@ message Lifecycle { // +optional optional Handler postStart = 1; - // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness/startup probe failure, - // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the - // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes - // or until the termination grace period is reached. + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional optional Handler preStop = 2; @@ -1916,12 +1622,12 @@ message Lifecycle { // LimitRange sets resource usage limits for each kind of resource in a Namespace. message LimitRange { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional LimitRangeSpec spec = 2; } @@ -1929,6 +1635,7 @@ message LimitRange { // LimitRangeItem defines a min/max usage limit for any resource that matches on kind. message LimitRangeItem { // Type of resource that this limit applies to. + // +optional optional string type = 1; // Max usage constraints on this kind by resource name. @@ -1955,7 +1662,7 @@ message LimitRangeItem { // LimitRangeList is a list of LimitRange items. message LimitRangeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1973,7 +1680,7 @@ message LimitRangeSpec { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2050,43 +1757,25 @@ message NFSVolumeSource { // Use of multiple namespaces is optional. message Namespace { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NamespaceSpec spec = 2; // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NamespaceStatus status = 3; } -// NamespaceCondition contains details about state of namespace. -message NamespaceCondition { - // Type of namespace controller condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - - // +optional - optional string reason = 5; - - // +optional - optional string message = 6; -} - // NamespaceList is a list of Namespaces. message NamespaceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2109,31 +1798,25 @@ message NamespaceStatus { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional optional string phase = 1; - - // Represents the latest available observations of a namespace's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated NamespaceCondition conditions = 2; } // Node is a worker node in Kubernetes. // Each node will have a unique identifier in the cache (i.e. in etcd). message Node { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NodeSpec spec = 2; // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional NodeStatus status = 3; } @@ -2261,7 +1944,7 @@ message NodeDaemonEndpoints { // NodeList is the whole list of all Nodes which have been registered with master. message NodeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2329,13 +2012,6 @@ message NodeSpec { // +optional optional string podCIDR = 1; - // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this - // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for - // each of IPv4 and IPv6. - // +optional - // +patchStrategy=merge - repeated string podCIDRs = 7; - // ID of the node assigned by the cloud provider in the format: :// // +optional optional string providerID = 3; @@ -2388,9 +2064,6 @@ message NodeStatus { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses - // Note: This field is declared as mergeable, but the merge key is not sufficiently - // unique, which can cause data corruption when it is merged. Callers should instead - // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -2470,24 +2143,10 @@ message ObjectFieldSelector { } // ObjectReference contains enough information to let you inspect or modify the referred object. -// --- -// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. -// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. -// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular -// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". -// Those cannot be well described when embedded. -// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. -// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity -// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple -// and the version of the actual struct is irrelevant. -// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type -// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. -// Instead of using this type, create a locally provided and used type that is well-focused on your reference. -// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message ObjectReference { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional string kind = 1; @@ -2511,7 +2170,7 @@ message ObjectReference { optional string apiVersion = 5; // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -2532,7 +2191,7 @@ message ObjectReference { // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes message PersistentVolume { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2553,7 +2212,7 @@ message PersistentVolume { // PersistentVolumeClaim is a user's request for and claim to a persistent volume message PersistentVolumeClaim { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -2597,7 +2256,7 @@ message PersistentVolumeClaimCondition { // PersistentVolumeClaimList is a list of PersistentVolumeClaim items. message PersistentVolumeClaimList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2634,18 +2293,15 @@ message PersistentVolumeClaimSpec { // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. // +optional optional string volumeMode = 6; - // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) - // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) - // If the provisioner or an external controller can support the specified data source, - // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will + // This field requires the VolumeSnapshotDataSource alpha feature gate to be + // enabled and currently VolumeSnapshot is the only supported data source. + // If the provisioner can support VolumeSnapshot data source, it will create + // a new volume and data will be restored to the volume at the same time. + // If the provisioner does not support VolumeSnapshot data source, volume will // not be created and the failure will be reported as an event. // In the future, we plan to support more data source types and the behavior // of the provisioner may change. @@ -2694,7 +2350,7 @@ message PersistentVolumeClaimVolumeSource { // PersistentVolumeList is a list of PersistentVolume items. message PersistentVolumeList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -2728,9 +2384,9 @@ message PersistentVolumeSource { // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - optional GlusterfsPersistentVolumeSource glusterfs = 4; + optional GlusterfsVolumeSource glusterfs = 4; // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs @@ -2738,7 +2394,7 @@ message PersistentVolumeSource { optional NFSVolumeSource nfs = 5; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://examples.k8s.io/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional optional RBDPersistentVolumeSource rbd = 6; @@ -2747,8 +2403,8 @@ message PersistentVolumeSource { // +optional optional ISCSIPersistentVolumeSource iscsi = 7; - // Cinder represents a cinder volume attached and mounted on kubelets host machine. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional CinderPersistentVolumeSource cinder = 8; @@ -2801,11 +2457,11 @@ message PersistentVolumeSource { optional LocalVolumeSource local = 20; // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://examples.k8s.io/volumes/storageos/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional optional StorageOSPersistentVolumeSource storageos = 21; - // CSI represents storage that is handled by an external CSI driver (Beta feature). + // CSI represents storage that handled by an external CSI driver (Beta feature). // +optional optional CSIPersistentVolumeSource csi = 22; } @@ -2853,6 +2509,7 @@ message PersistentVolumeSpec { // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. // +optional optional string volumeMode = 8; @@ -2894,12 +2551,12 @@ message PhotonPersistentDiskVolumeSource { // by clients and scheduled onto hosts. message Pod { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; @@ -2907,7 +2564,7 @@ message Pod { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodStatus status = 3; } @@ -3113,23 +2770,15 @@ message PodExecOptions { repeated string command = 6; } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within the cluster. -message PodIP { - // ip is an IP address (IPv4 or IPv6) assigned to the pod - optional string ip = 1; -} - // PodList is a list of Pods. message PodList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md repeated Pod items = 2; } @@ -3176,15 +2825,6 @@ message PodLogOptions { // slightly more or slightly less than the specified limit. // +optional optional int64 limitBytes = 8; - - // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the - // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver - // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real - // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the - // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept - // the actual log data coming from the real kubelet). - // +optional - optional bool insecureSkipTLSVerifyBackend = 9; } // PodPortForwardOptions is the query options to a Pod's port forward call @@ -3225,12 +2865,6 @@ message PodSecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 1; - // The Windows specific settings applied to all containers. - // If unspecified, the options within a container's SecurityContext will be used. - // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional WindowsSecurityContextOptions windowsOptions = 8; - // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -3265,11 +2899,11 @@ message PodSecurityContext { // A special supplemental group that applies to all containers in a pod. // Some volume types allow the Kubelet to change the ownership of that volume // to be owned by the pod: - // + // // 1. The owning GID will be the FSGroup // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) // 3. The permission bits are OR'd with rw-rw---- - // + // // If unset, the Kubelet will not modify the ownership and permissions of any volume. // +optional optional int64 fsGroup = 5; @@ -3278,15 +2912,6 @@ message PodSecurityContext { // sysctls (by the container runtime) might fail to launch. // +optional repeated Sysctl sysctls = 7; - - // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - // before being exposed inside Pod. This field will only apply to - // volume types which support fsGroup based ownership(and permissions). - // It will have no effect on ephemeral volume types such as: secret, configmaps - // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". - // +optional - optional string fsGroupChangePolicy = 9; } // Describes the class of pods that should avoid this node. @@ -3311,7 +2936,7 @@ message PodSpec { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -3331,16 +2956,6 @@ message PodSpec { // +patchStrategy=merge repeated Container containers = 2; - // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - // pod to perform user-initiated actions such as debugging. This list cannot be specified when - // creating a pod, and it cannot be modified by updating the pod spec. In order to add an - // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated EphemeralContainer ephemeralContainers = 34; - // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -3424,6 +3039,7 @@ message PodSpec { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. + // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional optional bool shareProcessNamespace = 27; @@ -3498,7 +3114,7 @@ message PodSpec { // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md // +optional repeated PodReadinessGate readinessGates = 28; @@ -3506,46 +3122,10 @@ message PodSpec { // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is a beta feature as of Kubernetes v1.14. + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // This is an alpha feature and may change in the future. // +optional optional string runtimeClassName = 29; - - // EnableServiceLinks indicates whether information about services should be injected into pod's - // environment variables, matching the syntax of Docker links. - // Optional: Defaults to true. - // +optional - optional bool enableServiceLinks = 30; - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - optional string preemptionPolicy = 31; - - // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - // This field will be autopopulated at admission time by the RuntimeClass admission controller. If - // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - // The RuntimeClass admission controller will reject Pod create requests which have the overhead already - // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. - // +optional - map overhead = 32; - - // TopologySpreadConstraints describes how a group of pods ought to spread across topology - // domains. Scheduler will schedule pods in a way which abides by the constraints. - // This field is only honored by clusters that enable the EvenPodsSpread feature. - // All topologySpreadConstraints are ANDed. - // +optional - // +patchMergeKey=topologyKey - // +patchStrategy=merge - // +listType=map - // +listMapKey=topologyKey - // +listMapKey=whenUnsatisfiable - repeated TopologySpreadConstraint topologySpreadConstraints = 33; } // PodStatus represents information about the status of a pod. Status may trail the actual @@ -3556,7 +3136,7 @@ message PodStatus { // The conditions array, the reason and message fields, and the individual container status // arrays contain more detail about the pod's status. // There are five possible phase values: - // + // // Pending: The pod has been accepted by the Kubernetes system, but one or more of the // container images has not been created. This includes time before being scheduled as // well as time spent downloading images over the network, which could take a while. @@ -3568,7 +3148,7 @@ message PodStatus { // by the system. // Unknown: For some reason the state of the pod could not be obtained, typically due to an // error in communicating with the host of the pod. - // + // // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase // +optional optional string phase = 1; @@ -3608,14 +3188,6 @@ message PodStatus { // +optional optional string podIP = 6; - // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must - // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list - // is empty if no IPs have been allocated yet. - // +optional - // +patchStrategy=merge - // +patchMergeKey=ip - repeated PodIP podIPs = 12; - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3638,17 +3210,12 @@ message PodStatus { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional optional string qosClass = 9; - - // Status for any ephemeral containers that have run in this pod. - // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. - // +optional - repeated ContainerStatus ephemeralContainerStatuses = 13; } // PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded message PodStatusResult { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3656,7 +3223,7 @@ message PodStatusResult { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodStatus status = 2; } @@ -3664,12 +3231,12 @@ message PodStatusResult { // PodTemplate describes a template for creating copies of a predefined pod. message PodTemplate { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodTemplateSpec template = 2; } @@ -3677,7 +3244,7 @@ message PodTemplate { // PodTemplateList is a list of PodTemplates. message PodTemplateList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -3688,12 +3255,12 @@ message PodTemplateList { // PodTemplateSpec describes the data a pod should have when created from a template message PodTemplateSpec { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional PodSpec spec = 2; } @@ -3773,7 +3340,7 @@ message Probe { optional int32 periodSeconds = 4; // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. // +optional optional int32 successThreshold = 5; @@ -3822,22 +3389,17 @@ message QuobyteVolumeSource { // Default is no group // +optional optional string group = 5; - - // Tenant owning the given Quobyte volume in the Backend - // Used with dynamically provisioned Quobyte volumes, value is set by the plugin - // +optional - optional string tenant = 6; } // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. message RBDPersistentVolumeSource { // A collection of Ceph monitors. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3850,32 +3412,32 @@ message RBDPersistentVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional SecretReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3884,11 +3446,11 @@ message RBDPersistentVolumeSource { // RBD volumes support ownership management and SELinux relabeling. message RBDVolumeSource { // A collection of Ceph monitors. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it repeated string monitors = 1; // The rados image name. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it optional string image = 2; // Filesystem type of the volume that you want to mount. @@ -3901,32 +3463,32 @@ message RBDVolumeSource { // The rados pool name. // Default is rbd. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string pool = 4; // The rados user name. // Default is admin. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string user = 5; // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional string keyring = 6; // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional LocalObjectReference secretRef = 7; // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional optional bool readOnly = 8; } @@ -3934,7 +3496,7 @@ message RBDVolumeSource { // RangeAllocation is not a public type. message RangeAllocation { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -3949,12 +3511,12 @@ message RangeAllocation { message ReplicationController { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicationControllerSpec spec = 2; @@ -3962,7 +3524,7 @@ message ReplicationController { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicationControllerStatus status = 3; } @@ -3991,7 +3553,7 @@ message ReplicationControllerCondition { // ReplicationControllerList is a collection of replication controllers. message ReplicationControllerList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4077,17 +3639,17 @@ message ResourceFieldSelector { // ResourceQuota sets aggregate quota restrictions enforced per namespace message ResourceQuota { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ResourceQuotaSpec spec = 2; // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ResourceQuotaStatus status = 3; } @@ -4095,7 +3657,7 @@ message ResourceQuota { // ResourceQuotaList is a list of ResourceQuota items. message ResourceQuotaList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4291,18 +3853,10 @@ message ScopedResourceSelectorRequirement { // the Data field must be less than MaxSecretSize bytes. message Secret { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // Immutable, if set to true, ensures that data stored in the Secret cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - optional bool immutable = 5; - // Data contains the secret data. Each key must consist of alphanumeric // characters, '-', '_' or '.'. The serialized form of the secret data is a // base64 encoded string, representing the arbitrary (possibly non-string) @@ -4325,7 +3879,7 @@ message Secret { // SecretEnvSource selects a Secret to populate the environment // variables with. -// +// // The contents of the target Secret's Data field will represent the // key-value pairs as environment variables. message SecretEnvSource { @@ -4345,7 +3899,7 @@ message SecretKeySelector { // The key of the secret to select from. Must be a valid secret key. optional string key = 2; - // Specify whether the Secret or its key must be defined + // Specify whether the Secret or it's key must be defined // +optional optional bool optional = 3; } @@ -4353,7 +3907,7 @@ message SecretKeySelector { // SecretList is a list of Secret. message SecretList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4363,7 +3917,7 @@ message SecretList { } // Adapts a secret into a projected volume. -// +// // The contents of the target Secret's Data field will be presented in a // projected volume as files using the keys in the Data field as the file names. // Note that this is identical to a secret volume source without the default @@ -4399,7 +3953,7 @@ message SecretReference { } // Adapts a Secret into a volume. -// +// // The contents of the target Secret's Data field will be presented in a volume // as files using the keys in the Data field as the file names. // Secret volumes support ownership management and SELinux relabeling. @@ -4427,7 +3981,7 @@ message SecretVolumeSource { // +optional optional int32 defaultMode = 3; - // Specify whether the Secret or its keys must be defined + // Specify whether the Secret or it's keys must be defined // +optional optional bool optional = 4; } @@ -4454,12 +4008,6 @@ message SecurityContext { // +optional optional SELinuxOptions seLinuxOptions = 3; - // The Windows specific settings applied to all containers. - // If unspecified, the options from the PodSecurityContext will be used. - // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional WindowsSecurityContextOptions windowsOptions = 10; - // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -4517,19 +4065,19 @@ message SerializedReference { // will answer requests sent through the proxy. message Service { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ServiceSpec spec = 2; // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ServiceStatus status = 3; } @@ -4540,7 +4088,7 @@ message Service { // * a set of secrets message ServiceAccount { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -4567,7 +4115,7 @@ message ServiceAccount { // ServiceAccountList is a list of ServiceAccount objects message ServiceAccountList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4605,7 +4153,7 @@ message ServiceAccountTokenProjection { // ServiceList holds a list of services. message ServiceList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -4616,9 +4164,8 @@ message ServiceList { // ServicePort contains information on service's port. message ServicePort { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. When considering - // the endpoints for a Service, this must match the 'name' field in the - // EndpointPort. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. // Optional if only one ServicePort is defined on this service. // +optional optional string name = 1; @@ -4628,16 +4175,6 @@ message ServicePort { // +optional optional string protocol = 2; - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - optional string appProtocol = 6; - // The port that will be exposed by this service. optional int32 port = 3; @@ -4678,9 +4215,6 @@ message ServiceSpec { // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge - // +listType=map - // +listMapKey=port - // +listMapKey=protocol repeated ServicePort ports = 1; // Route service traffic to pods with label keys and values matching this @@ -4717,7 +4251,7 @@ message ServiceSpec { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types // +optional optional string type = 4; @@ -4788,31 +4322,6 @@ message ServiceSpec { // sessionAffinityConfig contains the configurations of session affinity. // +optional optional SessionAffinityConfig sessionAffinityConfig = 14; - - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. - // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is - // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. - // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which - // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of - // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the - // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. - // +optional - optional string ipFamily = 15; - - // topologyKeys is a preference-order list of topology keys which - // implementations of services should use to preferentially sort endpoints - // when accessing this Service, it can not be used at the same time as - // externalTrafficPolicy=Local. - // Topology keys must be valid label keys and at most 16 keys may be specified. - // Endpoints are chosen based on the first topology key with available backends. - // If this field is specified and all entries have no backends that match - // the topology of the client, the service has no backends for that client - // and connections should fail. - // The special value "*" may be used to mean "any topology". This catch-all - // value, if used, only makes sense as the last value in the list. - // If this is not specified or empty, no topology constraints will be applied. - // +optional - repeated string topologyKeys = 16; } // ServiceStatus represents the current status of a service. @@ -4921,7 +4430,7 @@ message Taint { // Required. The taint key to be applied to a node. optional string key = 1; - // The taint value corresponding to the taint key. + // Required. The taint value corresponding to the taint key. // +optional optional string value = 2; @@ -4991,59 +4500,6 @@ message TopologySelectorTerm { repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; } -// TopologySpreadConstraint specifies how to spread matching pods among the given topology. -message TopologySpreadConstraint { - // MaxSkew describes the degree to which pods may be unevenly distributed. - // It's the maximum permitted difference between the number of matching pods in - // any two topology domains of a given topology type. - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 1/1/0: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P | P | | - // +-------+-------+-------+ - // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; - // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) - // violate MaxSkew(1). - // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - // It's a required field. Default value is 1 and 0 is not allowed. - optional int32 maxSkew = 1; - - // TopologyKey is the key of node labels. Nodes that have a label with this key - // and identical values are considered to be in the same topology. - // We consider each as a "bucket", and try to put balanced number - // of pods into each bucket. - // It's a required field. - optional string topologyKey = 2; - - // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - // the spread constraint. - // - DoNotSchedule (default) tells the scheduler not to schedule it - // - ScheduleAnyway tells the scheduler to still schedule it - // It's considered as "Unsatisfiable" if and only if placing incoming pod on any - // topology violates "MaxSkew". - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 3/1/1: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P P P | P | P | - // +-------+-------+-------+ - // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - // won't make it *more* imbalanced. - // It's a required field. - optional string whenUnsatisfiable = 3; - - // LabelSelector is used to find matching pods. - // Pods that match this label selector are counted to determine the number of pods - // in their corresponding topology domain. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 4; -} - // TypedLocalObjectReference contains enough information to let you locate the // typed referenced object inside the same namespace. message TypedLocalObjectReference { @@ -5107,13 +4563,6 @@ message VolumeMount { // This field is beta in 1.10. // +optional optional string mountPropagation = 5; - - // Expanded path within the volume from which the container's volume should be mounted. - // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - // Defaults to "" (volume's root). - // SubPathExpr and SubPath are mutually exclusive. - // +optional - optional string subPathExpr = 6; } // VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -5191,12 +4640,12 @@ message VolumeSource { // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://examples.k8s.io/volumes/iscsi/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md // +optional optional ISCSIVolumeSource iscsi = 8; // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional optional GlusterfsVolumeSource glusterfs = 9; @@ -5207,7 +4656,7 @@ message VolumeSource { optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://examples.k8s.io/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional optional RBDVolumeSource rbd = 11; @@ -5216,8 +4665,8 @@ message VolumeSource { // +optional optional FlexVolumeSource flexVolume = 12; - // Cinder represents a cinder volume attached and mounted on kubelets host machine. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional optional CinderVolumeSource cinder = 13; @@ -5274,10 +4723,6 @@ message VolumeSource { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional optional StorageOSVolumeSource storageos = 27; - - // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). - // +optional - optional CSIVolumeSource csi = 28; } // Represents a vSphere volume resource. @@ -5310,23 +4755,3 @@ message WeightedPodAffinityTerm { optional PodAffinityTerm podAffinityTerm = 2; } -// WindowsSecurityContextOptions contain Windows-specific options and credentials. -message WindowsSecurityContextOptions { - // GMSACredentialSpecName is the name of the GMSA credential spec to use. - // +optional - optional string gmsaCredentialSpecName = 1; - - // GMSACredentialSpec is where the GMSA admission webhook - // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - // GMSA credential spec named by the GMSACredentialSpecName field. - // +optional - optional string gmsaCredentialSpec = 2; - - // The UserName in Windows to run the entrypoint of the container process. - // Defaults to the user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional string runAsUserName = 3; -} - diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go index 8da1dde..1aac0cb 100644 --- a/vendor/k8s.io/api/core/v1/register.go +++ b/vendor/k8s.io/api/core/v1/register.go @@ -88,7 +88,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &RangeAllocation{}, &ConfigMap{}, &ConfigMapList{}, - &EphemeralContainers{}, ) // Add common types diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go index 5bc9cd5..bb80412 100644 --- a/vendor/k8s.io/api/core/v1/resource.go +++ b/vendor/k8s.io/api/core/v1/resource.go @@ -41,14 +41,6 @@ func (self *ResourceList) Memory() *resource.Quantity { return &resource.Quantity{Format: resource.BinarySI} } -// Returns the Storage limit if specified. -func (self *ResourceList) Storage() *resource.Quantity { - if val, ok := (*self)[ResourceStorage]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - func (self *ResourceList) Pods() *resource.Quantity { if val, ok := (*self)[ResourcePods]; ok { return &val diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go index db71bd2..7b606a3 100644 --- a/vendor/k8s.io/api/core/v1/taint.go +++ b/vendor/k8s.io/api/core/v1/taint.go @@ -24,14 +24,8 @@ func (t *Taint) MatchTaint(taintToMatch *Taint) bool { return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect } -// taint.ToString() converts taint struct to string in format '=:', '=:', ':', or ''. +// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. func (t *Taint) ToString() string { - if len(t.Effect) == 0 { - if len(t.Value) == 0 { - return fmt.Sprintf("%v", t.Key) - } - return fmt.Sprintf("%v=%v:", t.Key, t.Value) - } if len(t.Value) == 0 { return fmt.Sprintf("%v:%v", t.Key, t.Effect) } diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go index b61a86a..d9a57bd 100644 --- a/vendor/k8s.io/api/core/v1/types.go +++ b/vendor/k8s.io/api/core/v1/types.go @@ -30,8 +30,6 @@ const ( NamespaceAll string = "" // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) NamespaceNodeLease string = "kube-node-lease" - // TopologyKeyAny is the service topology key that matches any node - TopologyKeyAny string = "*" ) // Volume represents a named volume in a pod that may be accessed by any container in the pod. @@ -89,11 +87,11 @@ type VolumeSource struct { NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. - // More info: https://examples.k8s.io/volumes/iscsi/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md // +optional ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` // PersistentVolumeClaimVolumeSource represents a reference to a @@ -102,15 +100,15 @@ type VolumeSource struct { // +optional PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://examples.k8s.io/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` // FlexVolume represents a generic volume resource that is // provisioned/attached using an exec based plugin. // +optional FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -153,9 +151,6 @@ type VolumeSource struct { // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. // +optional StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` - // CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature). - // +optional - CSI *CSIVolumeSource `json:"csi,omitempty" protobuf:"bytes,28,opt,name=csi"` } // PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. @@ -194,23 +189,23 @@ type PersistentVolumeSource struct { HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` // Glusterfs represents a Glusterfs volume that is attached to a host and // exposed to the pod. Provisioned by an admin. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md // +optional - Glusterfs *GlusterfsPersistentVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` + Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` // NFS represents an NFS mount on the host. Provisioned by an admin. // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs // +optional NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://examples.k8s.io/volumes/rbd/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md // +optional RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` // ISCSI represents an ISCSI Disk resource that is attached to a // kubelet's host machine and then exposed to the pod. Provisioned by an admin. // +optional ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // Cinder represents a cinder volume attached and mounted on kubelets host machine + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime @@ -250,10 +245,10 @@ type PersistentVolumeSource struct { // +optional Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://examples.k8s.io/volumes/storageos/README.md + // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md // +optional StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // CSI represents storage that is handled by an external CSI driver (Beta feature). + // CSI represents storage that handled by an external CSI driver (Beta feature). // +optional CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` } @@ -277,7 +272,7 @@ const ( type PersistentVolume struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -331,6 +326,7 @@ type PersistentVolumeSpec struct { MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` // volumeMode defines if a volume is intended to be used with a formatted filesystem // or to remain in raw block state. Value of Filesystem is implied when not included in spec. + // This is an alpha feature and may change in the future. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. @@ -391,7 +387,7 @@ type PersistentVolumeStatus struct { type PersistentVolumeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of persistent volumes. @@ -406,7 +402,7 @@ type PersistentVolumeList struct { type PersistentVolumeClaim struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -428,7 +424,7 @@ type PersistentVolumeClaim struct { type PersistentVolumeClaimList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // A list of persistent volume claims. @@ -459,22 +455,19 @@ type PersistentVolumeClaimSpec struct { StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` // volumeMode defines what type of volume is required by the claim. // Value of Filesystem is implied when not included in claim spec. + // This is an alpha feature and may change in the future. // +optional VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` - // This field can be used to specify either: - // * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) - // * An existing PVC (PersistentVolumeClaim) - // * An existing custom resource/object that implements data population (Alpha) - // In order to use VolumeSnapshot object types, the appropriate feature gate - // must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) - // If the provisioner or an external controller can support the specified data source, - // it will create a new volume based on the contents of the specified data source. - // If the specified data source is not supported, the volume will + // This field requires the VolumeSnapshotDataSource alpha feature gate to be + // enabled and currently VolumeSnapshot is the only supported data source. + // If the provisioner can support VolumeSnapshot data source, it will create + // a new volume and data will be restored to the volume at the same time. + // If the provisioner does not support VolumeSnapshot data source, volume will // not be created and the failure will be reported as an event. // In the future, we plan to support more data source types and the behavior // of the provisioner may change. // +optional - DataSource *TypedLocalObjectReference `json:"dataSource,omitempty" protobuf:"bytes,7,opt,name=dataSource"` + DataSource *TypedLocalObjectReference `json:"dataSource" protobuf:"bytes,7,opt,name=dataSource"` } // PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type @@ -530,7 +523,7 @@ type PersistentVolumeClaimStatus struct { type PersistentVolumeAccessMode string const ( - // can be mounted in read/write mode to exactly 1 host + // can be mounted read/write mode to exactly 1 host ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" // can be mounted in read-only mode to many hosts ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" @@ -629,52 +622,28 @@ type EmptyDirVolumeSource struct { // Glusterfs volumes do not support ownership management or SELinux relabeling. type GlusterfsVolumeSource struct { // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` - - // Path is the Glusterfs volume path. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsPersistentVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` // Path is the Glusterfs volume path. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod Path string `json:"path" protobuf:"bytes,2,opt,name=path"` // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. // Defaults to false. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod + // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - - // EndpointsNamespace is the namespace that contains Glusterfs endpoint. - // If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. - // More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod - // +optional - EndpointsNamespace *string `json:"endpointsNamespace,omitempty" protobuf:"bytes,4,opt,name=endpointsNamespace"` } // Represents a Rados Block Device mount that lasts the lifetime of a pod. // RBD volumes support ownership management and SELinux relabeling. type RBDVolumeSource struct { // A collection of Ceph monitors. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -685,28 +654,28 @@ type RBDVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -715,10 +684,10 @@ type RBDVolumeSource struct { // RBD volumes support ownership management and SELinux relabeling. type RBDPersistentVolumeSource struct { // A collection of Ceph monitors. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // The rados image name. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` // Filesystem type of the volume that you want to mount. // Tip: Ensure that the filesystem type is supported by the host operating system. @@ -729,28 +698,28 @@ type RBDPersistentVolumeSource struct { FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` // The rados pool name. // Default is rbd. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` // The rados user name. // Default is admin. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` // Keyring is the path to key ring for RBDUser. // Default is /etc/ceph/keyring. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` // SecretRef is name of the authentication secret for RBDUser. If provided // overrides keyring. // Default is nil. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` // ReadOnly here will force the ReadOnly setting in VolumeMounts. // Defaults to false. - // More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` } @@ -760,18 +729,18 @@ type RBDPersistentVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderVolumeSource struct { - // volume id used to identify the volume in cinder. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -785,18 +754,18 @@ type CinderVolumeSource struct { // The volume must also be in the same region as the kubelet. // Cinder volumes support ownership management and SELinux relabeling. type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // volume id used to identify the volume in cinder + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` // Filesystem type to mount. // Must be a filesystem type supported by the host operating system. // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/mysql-cinder-pd/README.md + // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` // Optional: points to a secret object containing parameters used to connect @@ -809,26 +778,26 @@ type CinderPersistentVolumeSource struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -848,26 +817,26 @@ type SecretReference struct { // Cephfs volumes do not support ownership management or SELinux relabeling. type CephFSPersistentVolumeSource struct { // Required: Monitors is a collection of Ceph monitors - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` // Optional: Used as the mounted root, rather than the full Ceph tree, default is / // +optional Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` // Optional: User is the rados user name, default is admin - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` // Optional: Defaults to false (read/write). ReadOnly here will force // the ReadOnly setting in VolumeMounts. - // More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it + // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it // +optional ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` } @@ -889,10 +858,9 @@ type FlockerVolumeSource struct { type StorageMedium string const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this - StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages - StorageMediumHugePagesPrefix StorageMedium = "HugePages-" // prefix for full medium notation HugePages- + StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this + StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) + StorageMediumHugePages StorageMedium = "HugePages" // use hugepages ) // Protocol defines network protocols supported for things like container ports. @@ -963,11 +931,6 @@ type QuobyteVolumeSource struct { // Default is no group // +optional Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` - - // Tenant owning the given Quobyte volume in the Backend - // Used with dynamically provisioned Quobyte volumes, value is set by the plugin - // +optional - Tenant string `json:"tenant,omitempty" protobuf:"bytes,6,opt,name=tenant"` } // FlexPersistentVolumeSource represents a generic persistent volume resource that is @@ -1099,7 +1062,7 @@ type SecretVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or its keys must be defined + // Specify whether the Secret or it's keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1525,7 +1488,7 @@ type ConfigMapVolumeSource struct { // mode, like fsGroup, and the result can be other mode bits set. // +optional DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or its keys must be defined + // Specify whether the ConfigMap or it's keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1552,7 +1515,7 @@ type ConfigMapProjection struct { // relative and may not contain the '..' path or start with '..'. // +optional Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or its keys must be defined + // Specify whether the ConfigMap or it's keys must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` } @@ -1677,7 +1640,7 @@ type CSIPersistentVolumeSource struct { // ControllerPublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` @@ -1685,7 +1648,7 @@ type CSIPersistentVolumeSource struct { // NodeStageSecretRef is a reference to the secret object containing sensitive // information to pass to the CSI driver to complete the CSI NodeStageVolume // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` @@ -1693,50 +1656,10 @@ type CSIPersistentVolumeSource struct { // NodePublishSecretRef is a reference to the secret object containing // sensitive information to pass to the CSI driver to complete the CSI // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the + // This field is optional, and may be empty if no secret is required. If the // secret object contains more than one secret, all secrets are passed. // +optional NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` - - // ControllerExpandSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerExpandVolume call. - // This is an alpha field and requires enabling ExpandCSIVolumes feature gate. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - ControllerExpandSecretRef *SecretReference `json:"controllerExpandSecretRef,omitempty" protobuf:"bytes,9,opt,name=controllerExpandSecretRef"` -} - -// Represents a source location of a volume to mount, managed by an external CSI driver -type CSIVolumeSource struct { - // Driver is the name of the CSI driver that handles this volume. - // Consult with your admin for the correct name as registered in the cluster. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - - // Specifies a read-only configuration for the volume. - // Defaults to false (read/write). - // +optional - ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` - - // Filesystem type to mount. Ex. "ext4", "xfs", "ntfs". - // If not provided, the empty value is passed to the associated CSI driver - // which will determine the default filesystem to apply. - // +optional - FSType *string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - - // VolumeAttributes stores driver-specific properties that are passed to the CSI - // driver. Consult your driver's documentation for supported values. - // +optional - VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,4,rep,name=volumeAttributes"` - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secret references are passed. - // +optional - NodePublishSecretRef *LocalObjectReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,5,opt,name=nodePublishSecretRef"` } // ContainerPort represents a network port in a single container. @@ -1785,12 +1708,6 @@ type VolumeMount struct { // This field is beta in 1.10. // +optional MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` - // Expanded path within the volume from which the container's volume should be mounted. - // Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. - // Defaults to "" (volume's root). - // SubPathExpr and SubPath are mutually exclusive. - // +optional - SubPathExpr string `json:"subPathExpr,omitempty" protobuf:"bytes,6,opt,name=subPathExpr"` } // MountPropagationMode describes mount propagation. @@ -1851,7 +1768,7 @@ type EnvVar struct { // EnvVarSource represents a source for the value of an EnvVar. type EnvVarSource struct { // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs. + // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. // +optional FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` // Selects a resource of the container: only resources limits and requests @@ -1893,7 +1810,7 @@ type ConfigMapKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key to select. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or its key must be defined + // Specify whether the ConfigMap or it's key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -1904,7 +1821,7 @@ type SecretKeySelector struct { LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` // The key of the secret to select from. Must be a valid secret key. Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or its key must be defined + // Specify whether the Secret or it's key must be defined // +optional Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` } @@ -2029,7 +1946,7 @@ type Probe struct { // +optional PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1. + // Defaults to 1. Must be 1 for liveness. Minimum value is 1. // +optional SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` // Minimum consecutive failures for the probe to be considered failed after having succeeded. @@ -2050,16 +1967,6 @@ const ( PullIfNotPresent PullPolicy = "IfNotPresent" ) -// PreemptionPolicy describes a policy for if/when to preempt a pod. -type PreemptionPolicy string - -const ( - // PreemptLowerPriority means that pod can preempt other pods with lower priority. - PreemptLowerPriority PreemptionPolicy = "PreemptLowerPriority" - // PreemptNever means that pod never preempts other pods with lower priority. - PreemptNever PreemptionPolicy = "Never" -) - // TerminationMessagePolicy describes how termination messages are retrieved from a container. type TerminationMessagePolicy string @@ -2153,9 +2060,6 @@ type Container struct { // +optional // +patchMergeKey=containerPort // +patchStrategy=merge - // +listType=map - // +listMapKey=containerPort - // +listMapKey=protocol Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` // List of sources to populate environment variables in the container. // The keys defined within a source must be a C_IDENTIFIER. All invalid keys @@ -2183,6 +2087,7 @@ type Container struct { // +patchStrategy=merge VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` // volumeDevices is the list of block devices to be used by the container. + // This is an alpha feature and may change in the future. // +patchMergeKey=devicePath // +patchStrategy=merge // +optional @@ -2199,16 +2104,6 @@ type Container struct { // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes // +optional ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // StartupProbe indicates that the Pod has successfully initialized. - // If specified, no other probes are executed until this completes successfully. - // If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. - // This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, - // when it might take a long time to load data or warm a cache, than during steady-state operation. - // This cannot be updated. - // This is a beta feature enabled by the StartupProbe feature flag. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` // Actions that the management system should take in response to container lifecycle events. // Cannot be updated. // +optional @@ -2294,15 +2189,11 @@ type Lifecycle struct { // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` - // PreStop is called immediately before a container is terminated due to an - // API request or management event such as liveness/startup probe failure, - // preemption, resource contention, etc. The handler is not called if the - // container crashes or exits. The reason for termination is passed to the - // handler. The Pod's termination grace period countdown begins before the - // PreStop hooked is executed. Regardless of the outcome of the handler, the - // container will eventually terminate within the Pod's termination grace - // period. Other management of the container blocks until the hook completes - // or until the termination grace period is reached. + // PreStop is called immediately before a container is terminated. + // The container is terminated after the handler completes. + // The reason for termination is passed to the handler. + // Regardless of the outcome of the handler, the container is eventually terminated. + // Other management of the container blocks until the hook completes. // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks // +optional PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` @@ -2403,12 +2294,6 @@ type ContainerStatus struct { // Container's ID in the format 'docker://'. // +optional ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` - // Specifies whether the container has passed its startup probe. - // Initialized as false, becomes true after startupProbe is considered successful. - // Resets to false when the container is restarted, or if kubelet loses state temporarily. - // Is always true when no startupProbe is defined. - // +optional - Started *bool `json:"started,omitempty" protobuf:"varint,9,opt,name=started"` } // PodPhase is a label for the condition of a pod at the current time. @@ -2439,22 +2324,18 @@ type PodConditionType string // These are valid conditions of pod. const ( - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" + // PodScheduled represents status of the scheduling process for this pod. + PodScheduled PodConditionType = "PodScheduled" // PodReady means the pod is able to service requests and should be added to the // load balancing pools of all matching services. PodReady PodConditionType = "Ready" - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" -) - -// These are reasons for a pod's transition to a condition. -const ( + // PodInitialized means that all init containers in the pod have started successfully. + PodInitialized PodConditionType = "Initialized" // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler // can't schedule the pod right now, for example due to insufficient resources in the cluster. PodReasonUnschedulable = "Unschedulable" + // ContainersReady indicates whether all containers in the pod are ready. + ContainersReady PodConditionType = "ContainersReady" ) // PodCondition contains details for the current condition of this pod. @@ -2752,7 +2633,7 @@ type PreferredSchedulingTerm struct { type Taint struct { // Required. The taint key to be applied to a node. Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // The taint value corresponding to the taint key. + // Required. The taint value corresponding to the taint key. // +optional Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` // Required. The effect of the taint on pods @@ -2844,7 +2725,7 @@ type PodSpec struct { // init container fails, the pod is considered to have failed and is handled according // to its restartPolicy. The name for an init container or normal container must be // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. + // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. // The resourceRequirements of an init container are taken into account during scheduling // by finding the highest request/limit for each resource type, and then using the max of // of that value or the sum of the normal containers. Limits are applied to init containers @@ -2862,15 +2743,6 @@ type PodSpec struct { // +patchMergeKey=name // +patchStrategy=merge Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` - // List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing - // pod to perform user-initiated actions such as debugging. This list cannot be specified when - // creating a pod, and it cannot be modified by updating the pod spec. In order to add an - // ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. - // This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - EphemeralContainers []EphemeralContainer `json:"ephemeralContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,34,rep,name=ephemeralContainers"` // Restart policy for all containers within the pod. // One of Always, OnFailure, Never. // Default to Always. @@ -2944,6 +2816,7 @@ type PodSpec struct { // in the same pod, and the first process in each container will not be assigned PID 1. // HostPID and ShareProcessNamespace cannot both be set. // Optional: Default to false. + // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. // +k8s:conversion-gen=false // +optional ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` @@ -3003,120 +2876,23 @@ type PodSpec struct { // configuration based on DNSPolicy. // +optional DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` + // If specified, all readiness gates will be evaluated for pod readiness. // A pod is ready when all its containers are ready AND // all conditions specified in the readiness gates have status equal to "True" - // More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md // +optional ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an // empty definition that uses the default runtime handler. - // More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md - // This is a beta feature as of Kubernetes v1.14. + // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md + // This is an alpha feature and may change in the future. // +optional RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` - // EnableServiceLinks indicates whether information about services should be injected into pod's - // environment variables, matching the syntax of Docker links. - // Optional: Defaults to true. - // +optional - EnableServiceLinks *bool `json:"enableServiceLinks,omitempty" protobuf:"varint,30,opt,name=enableServiceLinks"` - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,31,opt,name=preemptionPolicy"` - // Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. - // This field will be autopopulated at admission time by the RuntimeClass admission controller. If - // the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. - // The RuntimeClass admission controller will reject Pod create requests which have the overhead already - // set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value - // defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. - // More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature. - // +optional - Overhead ResourceList `json:"overhead,omitempty" protobuf:"bytes,32,opt,name=overhead"` - // TopologySpreadConstraints describes how a group of pods ought to spread across topology - // domains. Scheduler will schedule pods in a way which abides by the constraints. - // This field is only honored by clusters that enable the EvenPodsSpread feature. - // All topologySpreadConstraints are ANDed. - // +optional - // +patchMergeKey=topologyKey - // +patchStrategy=merge - // +listType=map - // +listMapKey=topologyKey - // +listMapKey=whenUnsatisfiable - TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty" patchStrategy:"merge" patchMergeKey:"topologyKey" protobuf:"bytes,33,opt,name=topologySpreadConstraints"` } -type UnsatisfiableConstraintAction string - -const ( - // DoNotSchedule instructs the scheduler not to schedule the pod - // when constraints are not satisfied. - DoNotSchedule UnsatisfiableConstraintAction = "DoNotSchedule" - // ScheduleAnyway instructs the scheduler to schedule the pod - // even if constraints are not satisfied. - ScheduleAnyway UnsatisfiableConstraintAction = "ScheduleAnyway" -) - -// TopologySpreadConstraint specifies how to spread matching pods among the given topology. -type TopologySpreadConstraint struct { - // MaxSkew describes the degree to which pods may be unevenly distributed. - // It's the maximum permitted difference between the number of matching pods in - // any two topology domains of a given topology type. - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 1/1/0: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P | P | | - // +-------+-------+-------+ - // - if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 1/1/1; - // scheduling it onto zone1(zone2) would make the ActualSkew(2-0) on zone1(zone2) - // violate MaxSkew(1). - // - if MaxSkew is 2, incoming pod can be scheduled onto any zone. - // It's a required field. Default value is 1 and 0 is not allowed. - MaxSkew int32 `json:"maxSkew" protobuf:"varint,1,opt,name=maxSkew"` - // TopologyKey is the key of node labels. Nodes that have a label with this key - // and identical values are considered to be in the same topology. - // We consider each as a "bucket", and try to put balanced number - // of pods into each bucket. - // It's a required field. - TopologyKey string `json:"topologyKey" protobuf:"bytes,2,opt,name=topologyKey"` - // WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy - // the spread constraint. - // - DoNotSchedule (default) tells the scheduler not to schedule it - // - ScheduleAnyway tells the scheduler to still schedule it - // It's considered as "Unsatisfiable" if and only if placing incoming pod on any - // topology violates "MaxSkew". - // For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same - // labelSelector spread as 3/1/1: - // +-------+-------+-------+ - // | zone1 | zone2 | zone3 | - // +-------+-------+-------+ - // | P P P | P | P | - // +-------+-------+-------+ - // If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled - // to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies - // MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler - // won't make it *more* imbalanced. - // It's a required field. - WhenUnsatisfiable UnsatisfiableConstraintAction `json:"whenUnsatisfiable" protobuf:"bytes,3,opt,name=whenUnsatisfiable,casttype=UnsatisfiableConstraintAction"` - // LabelSelector is used to find matching pods. - // Pods that match this label selector are counted to determine the number of pods - // in their corresponding topology domain. - // +optional - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,4,opt,name=labelSelector"` -} - -const ( - // The default value for enableServiceLinks attribute. - DefaultEnableServiceLinks = true -) - // HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the // pod's hosts file. type HostAlias struct { @@ -3126,22 +2902,6 @@ type HostAlias struct { Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` } -// PodFSGroupChangePolicy holds policies that will be used for applying fsGroup to a volume -// when volume is mounted. -type PodFSGroupChangePolicy string - -const ( - // FSGroupChangeOnRootMismatch indicates that volume's ownership and permissions will be changed - // only when permission and ownership of root directory does not match with expected - // permissions on the volume. This can help shorten the time it takes to change - // ownership and permissions of a volume. - FSGroupChangeOnRootMismatch PodFSGroupChangePolicy = "OnRootMismatch" - // FSGroupChangeAlways indicates that volume's ownership and permissions - // should always be changed whenever volume is mounted inside a Pod. This the default - // behavior. - FSGroupChangeAlways PodFSGroupChangePolicy = "Always" -) - // PodSecurityContext holds pod-level security attributes and common container settings. // Some fields are also present in container.securityContext. Field values of // container.securityContext take precedence over field values of PodSecurityContext. @@ -3153,11 +2913,6 @@ type PodSecurityContext struct { // takes precedence for that container. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` - // The Windows specific settings applied to all containers. - // If unspecified, the options within a container's SecurityContext will be used. - // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,8,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in SecurityContext. If set in both SecurityContext and @@ -3200,14 +2955,6 @@ type PodSecurityContext struct { // sysctls (by the container runtime) might fail to launch. // +optional Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` - // fsGroupChangePolicy defines behavior of changing ownership and permission of the volume - // before being exposed inside Pod. This field will only apply to - // volume types which support fsGroup based ownership(and permissions). - // It will have no effect on ephemeral volume types such as: secret, configmaps - // and emptydir. - // Valid values are "OnRootMismatch" and "Always". If not specified defaults to "Always". - // +optional - FSGroupChangePolicy *PodFSGroupChangePolicy `json:"fsGroupChangePolicy,omitempty" protobuf:"bytes,9,opt,name=fsGroupChangePolicy"` } // PodQOSClass defines the supported qos classes of Pods. @@ -3251,174 +2998,6 @@ type PodDNSConfigOption struct { Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` } -// IP address information for entries in the (plural) PodIPs field. -// Each entry includes: -// IP: An IP address allocated to the pod. Routable at least within the cluster. -type PodIP struct { - // ip is an IP address (IPv4 or IPv6) assigned to the pod - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` -} - -// EphemeralContainerCommon is a copy of all fields in Container to be inlined in -// EphemeralContainer. This separate type allows easy conversion from EphemeralContainer -// to Container and allows separate documentation for the fields of EphemeralContainer. -// When a new field is added to Container it must be added here as well. -type EphemeralContainerCommon struct { - // Name of the ephemeral container specified as a DNS_LABEL. - // This name must be unique among all containers, init containers and ephemeral containers. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Docker image name. - // More info: https://kubernetes.io/docs/concepts/containers/images - Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - // +optional - WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // Ports are not allowed for ephemeral containers. - Ports []ContainerPort `json:"ports,omitempty" protobuf:"bytes,6,rep,name=ports"` - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` - // List of environment variables to set in the container. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` - // Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources - // already allocated to the pod. - // +optional - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - // +optional - // +patchMergeKey=mountPath - // +patchStrategy=merge - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` - // volumeDevices is the list of block devices to be used by the container. - // +patchMergeKey=devicePath - // +patchStrategy=merge - // +optional - VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` - // Probes are not allowed for ephemeral containers. - // +optional - LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` - // Probes are not allowed for ephemeral containers. - // +optional - ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // Probes are not allowed for ephemeral containers. - // +optional - StartupProbe *Probe `json:"startupProbe,omitempty" protobuf:"bytes,22,opt,name=startupProbe"` - // Lifecycle is not allowed for ephemeral containers. - // +optional - Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Will be truncated by the node if greater than 4096 bytes. The total message length across - // all containers will be limited to 12kb. - // Defaults to /dev/termination-log. - // Cannot be updated. - // +optional - TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` - // Indicate how the termination message should be populated. File will use the contents of - // terminationMessagePath to populate the container status message on both success and failure. - // FallbackToLogsOnError will use the last chunk of container log output if the termination - // message file is empty and the container exited with an error. - // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - // Defaults to File. - // Cannot be updated. - // +optional - TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - // +optional - ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` - // SecurityContext is not allowed for ephemeral containers. - // +optional - SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - // +optional - Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - // +optional - StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - // +optional - TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` -} - -// EphemeralContainerCommon converts to Container. All fields must be kept in sync between -// these two types. -var _ = Container(EphemeralContainerCommon{}) - -// An EphemeralContainer is a container that may be added temporarily to an existing pod for -// user-initiated activities such as debugging. Ephemeral containers have no resource or -// scheduling guarantees, and they will not be restarted when they exit or when a pod is -// removed or restarted. If an ephemeral container causes a pod to exceed its resource -// allocation, the pod may be evicted. -// Ephemeral containers may not be added by directly updating the pod spec. They must be added -// via the pod's ephemeralcontainers subresource, and they will appear in the pod spec -// once added. -// This is an alpha feature enabled by the EphemeralContainers feature flag. -type EphemeralContainer struct { - // Ephemeral containers have all of the fields of Container, plus additional fields - // specific to ephemeral containers. Fields in common with Container are in the - // following inlined struct so than an EphemeralContainer may easily be converted - // to a Container. - EphemeralContainerCommon `json:",inline" protobuf:"bytes,1,req"` - - // If set, the name of the container from PodSpec that this ephemeral container targets. - // The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. - // If not set then the ephemeral container is run in whatever namespaces are shared - // for the pod. Note that the container runtime must support this feature. - // +optional - TargetContainerName string `json:"targetContainerName,omitempty" protobuf:"bytes,2,opt,name=targetContainerName"` -} - // PodStatus represents information about the status of a pod. Status may trail the actual // state of a system, especially if the node that hosts the pod cannot contact the control // plane. @@ -3474,14 +3053,6 @@ type PodStatus struct { // +optional PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` - // podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must - // match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list - // is empty if no IPs have been allocated yet. - // +optional - // +patchStrategy=merge - // +patchMergeKey=ip - PodIPs []PodIP `json:"podIPs,omitempty" protobuf:"bytes,12,rep,name=podIPs" patchStrategy:"merge" patchMergeKey:"ip"` - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. // This is before the Kubelet pulled the container image(s) for the pod. // +optional @@ -3503,10 +3074,6 @@ type PodStatus struct { // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md // +optional QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` - // Status for any ephemeral containers that have run in this pod. - // This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature. - // +optional - EphemeralContainerStatuses []ContainerStatus `json:"ephemeralContainerStatuses,omitempty" protobuf:"bytes,13,rep,name=ephemeralContainerStatuses"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -3515,21 +3082,19 @@ type PodStatus struct { type PodStatusResult struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Most recently observed status of the pod. // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` } // +genclient -// +genclient:method=GetEphemeralContainers,verb=get,subresource=ephemeralcontainers,result=EphemeralContainers -// +genclient:method=UpdateEphemeralContainers,verb=update,subresource=ephemeralcontainers,input=EphemeralContainers,result=EphemeralContainers // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Pod is a collection of containers that can run on a host. This resource is created @@ -3537,12 +3102,12 @@ type PodStatusResult struct { type Pod struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3550,7 +3115,7 @@ type Pod struct { // This data may not be up to date. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3561,24 +3126,24 @@ type Pod struct { type PodList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` } // PodTemplateSpec describes the data a pod should have when created from a template type PodTemplateSpec struct { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -3590,12 +3155,12 @@ type PodTemplateSpec struct { type PodTemplate struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` } @@ -3606,7 +3171,7 @@ type PodTemplate struct { type PodTemplateList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3708,8 +3273,8 @@ type ReplicationControllerCondition struct { } // +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale +// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale +// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ReplicationController represents the configuration of a replication controller. @@ -3718,12 +3283,12 @@ type ReplicationController struct { // If the Labels of a ReplicationController are empty, they are defaulted to // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -3731,7 +3296,7 @@ type ReplicationController struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -3742,7 +3307,7 @@ type ReplicationController struct { type ReplicationControllerList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -3843,28 +3408,12 @@ type LoadBalancerIngress struct { Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` } -// IPFamily represents the IP Family (IPv4 or IPv6). This type is used -// to express the family of an IP expressed by a type (i.e. service.Spec.IPFamily) -type IPFamily string - -const ( - // IPv4Protocol indicates that this IP is IPv4 protocol - IPv4Protocol IPFamily = "IPv4" - // IPv6Protocol indicates that this IP is IPv6 protocol - IPv6Protocol IPFamily = "IPv6" - // MaxServiceTopologyKeys is the largest number of topology keys allowed on a service - MaxServiceTopologyKeys = 16 -) - // ServiceSpec describes the attributes that a user creates on a service. type ServiceSpec struct { // The list of ports that are exposed by this service. // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies // +patchMergeKey=port // +patchStrategy=merge - // +listType=map - // +listMapKey=port - // +listMapKey=protocol Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` // Route service traffic to pods with label keys and values matching this @@ -3901,7 +3450,7 @@ type ServiceSpec struct { // "LoadBalancer" builds on NodePort and creates an // external load-balancer (if supported in the current cloud) which routes // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types // +optional Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` @@ -3968,43 +3517,16 @@ type ServiceSpec struct { // of peer discovery. // +optional PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"` - // sessionAffinityConfig contains the configurations of session affinity. // +optional SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` - - // ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. - // IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is - // available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. - // Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which - // allocate external load-balancers should use the same IP family. Endpoints for this Service will be of - // this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the - // cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment. - // +optional - IPFamily *IPFamily `json:"ipFamily,omitempty" protobuf:"bytes,15,opt,name=ipFamily,Configcasttype=IPFamily"` - - // topologyKeys is a preference-order list of topology keys which - // implementations of services should use to preferentially sort endpoints - // when accessing this Service, it can not be used at the same time as - // externalTrafficPolicy=Local. - // Topology keys must be valid label keys and at most 16 keys may be specified. - // Endpoints are chosen based on the first topology key with available backends. - // If this field is specified and all entries have no backends that match - // the topology of the client, the service has no backends for that client - // and connections should fail. - // The special value "*" may be used to mean "any topology". This catch-all - // value, if used, only makes sense as the last value in the list. - // If this is not specified or empty, no topology constraints will be applied. - // +optional - TopologyKeys []string `json:"topologyKeys,omitempty" protobuf:"bytes,16,opt,name=topologyKeys"` } // ServicePort contains information on service's port. type ServicePort struct { // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. When considering - // the endpoints for a Service, this must match the 'name' field in the - // EndpointPort. + // All ports within a ServiceSpec must have unique names. This maps to + // the 'Name' field in EndpointPort objects. // Optional if only one ServicePort is defined on this service. // +optional Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` @@ -4014,16 +3536,6 @@ type ServicePort struct { // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,6,opt,name=appProtocol"` - // The port that will be exposed by this service. Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` @@ -4057,19 +3569,19 @@ type ServicePort struct { type Service struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the service. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4086,7 +3598,7 @@ const ( type ServiceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4095,7 +3607,6 @@ type ServiceList struct { } // +genclient -// +genclient:method=CreateToken,verb=create,subresource=token,input=k8s.io/api/authentication/v1.TokenRequest,result=k8s.io/api/authentication/v1.TokenRequest // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceAccount binds together: @@ -4105,7 +3616,7 @@ type ServiceList struct { type ServiceAccount struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4135,7 +3646,7 @@ type ServiceAccount struct { type ServiceAccountList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4162,7 +3673,7 @@ type ServiceAccountList struct { type Endpoints struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4224,8 +3735,7 @@ type EndpointAddress struct { // EndpointPort is a tuple that describes a single port. type EndpointPort struct { - // The name of this port. This must match the 'name' field in the - // corresponding ServicePort. + // The name of this port (corresponds to ServicePort.Name). // Must be a DNS_LABEL. // Optional only if one port is defined. // +optional @@ -4239,16 +3749,6 @@ type EndpointPort struct { // Default is TCP. // +optional Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` - - // The application protocol for this port. - // This field follows standard Kubernetes label syntax. - // Un-prefixed names are reserved for IANA standard service names (as per - // RFC-6335 and http://www.iana.org/assignments/service-names). - // Non-standard protocols should use prefixed names such as - // mycompany.com/my-custom-protocol. - // Field can be enabled with ServiceAppProtocol feature gate. - // +optional - AppProtocol *string `json:"appProtocol,omitempty" protobuf:"bytes,4,opt,name=appProtocol"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -4257,7 +3757,7 @@ type EndpointPort struct { type EndpointsList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4270,14 +3770,6 @@ type NodeSpec struct { // PodCIDR represents the pod IP range assigned to the node. // +optional PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` - - // podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this - // field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for - // each of IPv4 and IPv6. - // +optional - // +patchStrategy=merge - PodCIDRs []string `json:"podCIDRs,omitempty" protobuf:"bytes,7,opt,name=podCIDRs" patchStrategy:"merge"` - // ID of the node assigned by the cloud provider in the format: :// // +optional ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` @@ -4296,7 +3788,7 @@ type NodeSpec struct { // Deprecated. Not all kubelets will set this field. Remove field after 1.13. // see: https://issues.k8s.io/61966 // +optional - DoNotUseExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` + DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` } // NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. @@ -4459,9 +3951,6 @@ type NodeStatus struct { // List of addresses reachable to the node. // Queried from cloud provider, if available. // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses - // Note: This field is declared as mergeable, but the merge key is not sufficiently - // unique, which can cause data corruption when it is merged. Callers should instead - // use a full-replacement patch. See http://pr.k8s.io/79391 for an example. // +optional // +patchMergeKey=type // +patchStrategy=merge @@ -4561,6 +4050,9 @@ type NodeConditionType string const ( // NodeReady means kubelet is healthy and ready to accept pods. NodeReady NodeConditionType = "Ready" + // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk + // space on the node. + NodeOutOfDisk NodeConditionType = "OutOfDisk" // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. NodeMemoryPressure NodeConditionType = "MemoryPressure" // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. @@ -4651,19 +4143,19 @@ type ResourceList map[ResourceName]resource.Quantity type Node struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the node. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4674,7 +4166,7 @@ type Node struct { type NodeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4705,12 +4197,6 @@ type NamespaceStatus struct { // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ // +optional Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` - - // Represents the latest available observations of a namespace's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []NamespaceCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` } type NamespacePhase string @@ -4723,42 +4209,6 @@ const ( NamespaceTerminating NamespacePhase = "Terminating" ) -const ( - // NamespaceTerminatingCause is returned as a defaults.cause item when a change is - // forbidden due to the namespace being terminated. - NamespaceTerminatingCause metav1.CauseType = "NamespaceTerminating" -) - -type NamespaceConditionType string - -// These are valid conditions of a namespace. -const ( - // NamespaceDeletionDiscoveryFailure contains information about namespace deleter errors during resource discovery. - NamespaceDeletionDiscoveryFailure NamespaceConditionType = "NamespaceDeletionDiscoveryFailure" - // NamespaceDeletionContentFailure contains information about namespace deleter errors during deletion of resources. - NamespaceDeletionContentFailure NamespaceConditionType = "NamespaceDeletionContentFailure" - // NamespaceDeletionGVParsingFailure contains information about namespace deleter errors parsing GV for legacy types. - NamespaceDeletionGVParsingFailure NamespaceConditionType = "NamespaceDeletionGroupVersionParsingFailure" - // NamespaceContentRemaining contains information about resources remaining in a namespace. - NamespaceContentRemaining NamespaceConditionType = "NamespaceContentRemaining" - // NamespaceFinalizersRemaining contains information about which finalizers are on resources remaining in a namespace. - NamespaceFinalizersRemaining NamespaceConditionType = "NamespaceFinalizersRemaining" -) - -// NamespaceCondition contains details about state of namespace. -type NamespaceCondition struct { - // Type of namespace controller condition. - Type NamespaceConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NamespaceConditionType"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - // +genclient // +genclient:nonNamespaced // +genclient:skipVerbs=deleteCollection @@ -4769,17 +4219,17 @@ type NamespaceCondition struct { type Namespace struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -4790,7 +4240,7 @@ type Namespace struct { type NamespaceList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4806,7 +4256,7 @@ type NamespaceList struct { type Binding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -4814,22 +4264,6 @@ type Binding struct { Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` } -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// A list of ephemeral containers used with the Pod ephemeralcontainers subresource. -type EphemeralContainers struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // A list of ephemeral containers associated with this pod. New ephemeral containers - // may be appended to this list, but existing ephemeral containers may not be removed - // or modified. - // +patchMergeKey=name - // +patchStrategy=merge - EphemeralContainers []EphemeralContainer `json:"ephemeralContainers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=ephemeralContainers"` -} - // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. // +k8s:openapi-gen=false type Preconditions struct { @@ -4838,7 +4272,6 @@ type Preconditions struct { UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodLogOptions is the query options for a Pod's logs REST call. @@ -4879,18 +4312,8 @@ type PodLogOptions struct { // slightly more or slightly less than the specified limit. // +optional LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` - - // insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the - // serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver - // and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real - // kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the - // connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept - // the actual log data coming from the real kubelet). - // +optional - InsecureSkipTLSVerifyBackend bool `json:"insecureSkipTLSVerifyBackend,omitempty" protobuf:"varint,9,opt,name=insecureSkipTLSVerifyBackend"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodAttachOptions is the query options to a Pod's remote attach call. @@ -4928,7 +4351,6 @@ type PodAttachOptions struct { Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodExecOptions is the query options to a Pod's remote exec call. @@ -4967,7 +4389,6 @@ type PodExecOptions struct { Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodPortForwardOptions is the query options to a Pod's port forward call @@ -4985,7 +4406,6 @@ type PodPortForwardOptions struct { Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodProxyOptions is the query options to a Pod's proxy call. @@ -4997,7 +4417,6 @@ type PodProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // NodeProxyOptions is the query options to a Node's proxy call. @@ -5009,7 +4428,6 @@ type NodeProxyOptions struct { Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ServiceProxyOptions is the query options to a Service's proxy call. @@ -5026,24 +4444,10 @@ type ServiceProxyOptions struct { } // ObjectReference contains enough information to let you inspect or modify the referred object. -// --- -// New uses of this type are discouraged because of difficulty describing its usage when embedded in APIs. -// 1. Ignored fields. It includes many fields which are not generally honored. For instance, ResourceVersion and FieldPath are both very rarely valid in actual usage. -// 2. Invalid usage help. It is impossible to add specific help for individual usage. In most embedded usages, there are particular -// restrictions like, "must refer only to types A and B" or "UID not honored" or "name must be restricted". -// Those cannot be well described when embedded. -// 3. Inconsistent validation. Because the usages are different, the validation rules are different by usage, which makes it hard for users to predict what will happen. -// 4. The fields are both imprecise and overly precise. Kind is not a precise mapping to a URL. This can produce ambiguity -// during interpretation and require a REST mapping. In most cases, the dependency is on the group,resource tuple -// and the version of the actual struct is irrelevant. -// 5. We cannot easily change it. Because this type is embedded in many locations, updates to this type -// will affect numerous schemas. Don't make new APIs embed an underspecified API type they do not control. -// Instead of using this type, create a locally provided and used type that is well-focused on your reference. -// For example, ServiceReferences for admission registration: https://github.com/kubernetes/api/blob/release-1.17/admissionregistration/v1/types.go#L533 . // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object type ObjectReference struct { // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // Namespace of the referent. @@ -5062,7 +4466,7 @@ type ObjectReference struct { // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -5137,7 +4541,7 @@ const ( type Event struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` // The object that this event is about. @@ -5207,7 +4611,6 @@ type EventSeries struct { // Time of the last occurrence observed LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` // State of this Series: Ongoing or Finished - // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` } @@ -5225,7 +4628,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5253,7 +4656,8 @@ const ( // LimitRangeItem defines a min/max usage limit for any resource that matches on kind. type LimitRangeItem struct { // Type of resource that this limit applies to. - Type LimitType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` + // +optional + Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` // Max usage constraints on this kind by resource name. // +optional Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` @@ -5284,12 +4688,12 @@ type LimitRangeSpec struct { type LimitRange struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` } @@ -5300,7 +4704,7 @@ type LimitRange struct { type LimitRangeList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5440,17 +4844,17 @@ type ResourceQuotaStatus struct { type ResourceQuota struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -5461,7 +4865,7 @@ type ResourceQuota struct { type ResourceQuotaList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5478,18 +4882,10 @@ type ResourceQuotaList struct { type Secret struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Immutable, if set to true, ensures that data stored in the Secret cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - Immutable *bool `json:"immutable,omitempty" protobuf:"varint,5,opt,name=immutable"` - // Data contains the secret data. Each key must consist of alphanumeric // characters, '-', '_' or '.'. The serialized form of the secret data is a // base64 encoded string, representing the arbitrary (possibly non-string) @@ -5591,10 +4987,6 @@ const ( TLSCertKey = "tls.crt" // TLSPrivateKeyKey is the key for the private key field in a TLS secret. TLSPrivateKeyKey = "tls.key" - // SecretTypeBootstrapToken is used during the automated bootstrap process (first - // implemented by kubeadm). It stores tokens that are used to sign well known - // ConfigMaps. They are used for authn. - SecretTypeBootstrapToken SecretType = "bootstrap.kubernetes.io/token" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -5603,7 +4995,7 @@ const ( type SecretList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5619,18 +5011,10 @@ type SecretList struct { type ConfigMap struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Immutable, if set to true, ensures that data stored in the ConfigMap cannot - // be updated (only object metadata can be modified). - // If not set to true, the field can be modified at any time. - // Defaulted to nil. - // This is an alpha field enabled by ImmutableEphemeralVolumes feature gate. - // +optional - Immutable *bool `json:"immutable,omitempty" protobuf:"varint,4,opt,name=immutable"` - // Data contains the configuration data. // Each key must consist of alphanumeric characters, '-', '_' or '.'. // Values with non-UTF-8 byte sequences must use the BinaryData field. @@ -5656,7 +5040,7 @@ type ConfigMap struct { type ConfigMapList struct { metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5698,7 +5082,7 @@ type ComponentCondition struct { type ComponentStatus struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5715,7 +5099,7 @@ type ComponentStatus struct { type ComponentStatusList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -5789,11 +5173,6 @@ type SecurityContext struct { // PodSecurityContext, the value specified in SecurityContext takes precedence. // +optional SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` - // The Windows specific settings applied to all containers. - // If unspecified, the options from the PodSecurityContext will be used. - // If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - WindowsOptions *WindowsSecurityContextOptions `json:"windowsOptions,omitempty" protobuf:"bytes,10,opt,name=windowsOptions"` // The UID to run the entrypoint of the container process. // Defaults to user specified in image metadata if unspecified. // May also be set in PodSecurityContext. If set in both SecurityContext and @@ -5864,33 +5243,13 @@ type SELinuxOptions struct { Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` } -// WindowsSecurityContextOptions contain Windows-specific options and credentials. -type WindowsSecurityContextOptions struct { - // GMSACredentialSpecName is the name of the GMSA credential spec to use. - // +optional - GMSACredentialSpecName *string `json:"gmsaCredentialSpecName,omitempty" protobuf:"bytes,1,opt,name=gmsaCredentialSpecName"` - - // GMSACredentialSpec is where the GMSA admission webhook - // (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the - // GMSA credential spec named by the GMSACredentialSpecName field. - // +optional - GMSACredentialSpec *string `json:"gmsaCredentialSpec,omitempty" protobuf:"bytes,2,opt,name=gmsaCredentialSpec"` - - // The UserName in Windows to run the entrypoint of the container process. - // Defaults to the user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUserName *string `json:"runAsUserName,omitempty" protobuf:"bytes,3,opt,name=runAsUserName"` -} - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RangeAllocation is not a public type. type RangeAllocation struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go index 331451f..c781e54 100644 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go @@ -108,7 +108,7 @@ func (AzureFileVolumeSource) SwaggerDoc() map[string]string { var map_Binding = map[string]string{ "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "target": "The target object that you want to bind to the standard object.", } @@ -123,29 +123,15 @@ var map_CSIPersistentVolumeSource = map[string]string{ "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "controllerExpandSecretRef": "ControllerExpandSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerExpandVolume call. This is an alpha field and requires enabling ExpandCSIVolumes feature gate. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", + "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", } func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { return map_CSIPersistentVolumeSource } -var map_CSIVolumeSource = map[string]string{ - "": "Represents a source location of a volume to mount, managed by an external CSI driver", - "driver": "Driver is the name of the CSI driver that handles this volume. Consult with your admin for the correct name as registered in the cluster.", - "readOnly": "Specifies a read-only configuration for the volume. Defaults to false (read/write).", - "fsType": "Filesystem type to mount. Ex. \"ext4\", \"xfs\", \"ntfs\". If not provided, the empty value is passed to the associated CSI driver which will determine the default filesystem to apply.", - "volumeAttributes": "VolumeAttributes stores driver-specific properties that are passed to the CSI driver. Consult your driver's documentation for supported values.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secret references are passed.", -} - -func (CSIVolumeSource) SwaggerDoc() map[string]string { - return map_CSIVolumeSource -} - var map_Capabilities = map[string]string{ "": "Adds and removes POSIX capabilities from running containers.", "add": "Added capabilities", @@ -158,12 +144,12 @@ func (Capabilities) SwaggerDoc() map[string]string { var map_CephFSPersistentVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", } func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -172,12 +158,12 @@ func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CephFSVolumeSource = map[string]string{ "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/volumes/cephfs/README.md#how-to-use-it", + "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", } func (CephFSVolumeSource) SwaggerDoc() map[string]string { @@ -186,9 +172,9 @@ func (CephFSVolumeSource) SwaggerDoc() map[string]string { var map_CinderPersistentVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -198,9 +184,9 @@ func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { var map_CinderVolumeSource = map[string]string{ "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", } @@ -231,7 +217,7 @@ func (ComponentCondition) SwaggerDoc() map[string]string { var map_ComponentStatus = map[string]string{ "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "conditions": "List of component conditions observed", } @@ -241,7 +227,7 @@ func (ComponentStatus) SwaggerDoc() map[string]string { var map_ComponentStatusList = map[string]string{ "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ComponentStatus objects.", } @@ -251,8 +237,7 @@ func (ComponentStatusList) SwaggerDoc() map[string]string { var map_ConfigMap = map[string]string{ "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "immutable": "Immutable, if set to true, ensures that data stored in the ConfigMap cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", } @@ -273,7 +258,7 @@ func (ConfigMapEnvSource) SwaggerDoc() map[string]string { var map_ConfigMapKeySelector = map[string]string{ "": "Selects a key from a ConfigMap.", "key": "The key to select.", - "optional": "Specify whether the ConfigMap or its key must be defined", + "optional": "Specify whether the ConfigMap or it's key must be defined", } func (ConfigMapKeySelector) SwaggerDoc() map[string]string { @@ -282,7 +267,7 @@ func (ConfigMapKeySelector) SwaggerDoc() map[string]string { var map_ConfigMapList = map[string]string{ "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of ConfigMaps.", } @@ -306,7 +291,7 @@ func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { var map_ConfigMapProjection = map[string]string{ "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or its keys must be defined", + "optional": "Specify whether the ConfigMap or it's keys must be defined", } func (ConfigMapProjection) SwaggerDoc() map[string]string { @@ -317,7 +302,7 @@ var map_ConfigMapVolumeSource = map[string]string{ "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or its keys must be defined", + "optional": "Specify whether the ConfigMap or it's keys must be defined", } func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { @@ -336,10 +321,9 @@ var map_Container = map[string]string{ "env": "List of environment variables to set in the container. Cannot be updated.", "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", + "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "startupProbe": "StartupProbe indicates that the Pod has successfully initialized. If specified, no other probes are executed until this completes successfully. If this probe fails, the Pod will be restarted, just as if the livenessProbe failed. This can be used to provide different probe parameters at the beginning of a Pod's lifecycle, when it might take a long time to load data or warm a cache, than during steady-state operation. This cannot be updated. This is a beta feature enabled by the StartupProbe feature flag. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", @@ -432,7 +416,6 @@ var map_ContainerStatus = map[string]string{ "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", "imageID": "ImageID of the container's image.", "containerID": "Container's ID in the format 'docker://'.", - "started": "Specifies whether the container has passed its startup probe. Initialized as false, becomes true after startupProbe is considered successful. Resets to false when the container is restarted, or if kubelet loses state temporarily. Is always true when no startupProbe is defined.", } func (ContainerStatus) SwaggerDoc() map[string]string { @@ -502,11 +485,10 @@ func (EndpointAddress) SwaggerDoc() map[string]string { } var map_EndpointPort = map[string]string{ - "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port. This must match the 'name' field in the corresponding ServicePort. Must be a DNS_LABEL. Optional only if one port is defined.", - "port": "The port number of the endpoint.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.", + "": "EndpointPort is a tuple that describes a single port.", + "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", + "port": "The port number of the endpoint.", + "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", } func (EndpointPort) SwaggerDoc() map[string]string { @@ -526,7 +508,7 @@ func (EndpointSubset) SwaggerDoc() map[string]string { var map_Endpoints = map[string]string{ "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", } @@ -536,7 +518,7 @@ func (Endpoints) SwaggerDoc() map[string]string { var map_EndpointsList = map[string]string{ "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of endpoints.", } @@ -568,7 +550,7 @@ func (EnvVar) SwaggerDoc() map[string]string { var map_EnvVarSource = map[string]string{ "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP, status.podIPs.", + "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", "configMapKeyRef": "Selects a key of a ConfigMap.", "secretKeyRef": "Selects a key of a secret in the pod's namespace", @@ -578,57 +560,9 @@ func (EnvVarSource) SwaggerDoc() map[string]string { return map_EnvVarSource } -var map_EphemeralContainer = map[string]string{ - "": "An EphemeralContainer is a container that may be added temporarily to an existing pod for user-initiated activities such as debugging. Ephemeral containers have no resource or scheduling guarantees, and they will not be restarted when they exit or when a pod is removed or restarted. If an ephemeral container causes a pod to exceed its resource allocation, the pod may be evicted. Ephemeral containers may not be added by directly updating the pod spec. They must be added via the pod's ephemeralcontainers subresource, and they will appear in the pod spec once added. This is an alpha feature enabled by the EphemeralContainers feature flag.", - "targetContainerName": "If set, the name of the container from PodSpec that this ephemeral container targets. The ephemeral container will be run in the namespaces (IPC, PID, etc) of this container. If not set then the ephemeral container is run in whatever namespaces are shared for the pod. Note that the container runtime must support this feature.", -} - -func (EphemeralContainer) SwaggerDoc() map[string]string { - return map_EphemeralContainer -} - -var map_EphemeralContainerCommon = map[string]string{ - "": "EphemeralContainerCommon is a copy of all fields in Container to be inlined in EphemeralContainer. This separate type allows easy conversion from EphemeralContainer to Container and allows separate documentation for the fields of EphemeralContainer. When a new field is added to Container it must be added here as well.", - "name": "Name of the ephemeral container specified as a DNS_LABEL. This name must be unique among all containers, init containers and ephemeral containers.", - "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - "ports": "Ports are not allowed for ephemeral containers.", - "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Resources are not allowed for ephemeral containers. Ephemeral containers use spare resources already allocated to the pod.", - "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container.", - "livenessProbe": "Probes are not allowed for ephemeral containers.", - "readinessProbe": "Probes are not allowed for ephemeral containers.", - "startupProbe": "Probes are not allowed for ephemeral containers.", - "lifecycle": "Lifecycle is not allowed for ephemeral containers.", - "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - "securityContext": "SecurityContext is not allowed for ephemeral containers.", - "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", -} - -func (EphemeralContainerCommon) SwaggerDoc() map[string]string { - return map_EphemeralContainerCommon -} - -var map_EphemeralContainers = map[string]string{ - "": "A list of ephemeral containers used with the Pod ephemeralcontainers subresource.", - "ephemeralContainers": "A list of ephemeral containers associated with this pod. New ephemeral containers may be appended to this list, but existing ephemeral containers may not be removed or modified.", -} - -func (EphemeralContainers) SwaggerDoc() map[string]string { - return map_EphemeralContainers -} - var map_Event = map[string]string{ "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "involvedObject": "The object that this event is about.", "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", "message": "A human-readable description of the status of this operation.", @@ -651,7 +585,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of events", } @@ -663,7 +597,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time of the last occurrence observed", - "state": "State of this Series: Ongoing or Finished Deprecated. Planned removal for 1.18", + "state": "State of this Series: Ongoing or Finished", } func (EventSeries) SwaggerDoc() map[string]string { @@ -761,23 +695,11 @@ func (GitRepoVolumeSource) SwaggerDoc() map[string]string { return map_GitRepoVolumeSource } -var map_GlusterfsPersistentVolumeSource = map[string]string{ - "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "endpointsNamespace": "EndpointsNamespace is the namespace that contains Glusterfs endpoint. If this field is empty, the EndpointNamespace defaults to the same namespace as the bound PVC. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", -} - -func (GlusterfsPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_GlusterfsPersistentVolumeSource -} - var map_GlusterfsVolumeSource = map[string]string{ "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://examples.k8s.io/volumes/glusterfs/README.md#create-a-pod", + "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", + "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", } func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { @@ -890,7 +812,7 @@ func (KeyToPath) SwaggerDoc() map[string]string { var map_Lifecycle = map[string]string{ "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated due to an API request or management event such as liveness/startup probe failure, preemption, resource contention, etc. The handler is not called if the container crashes or exits. The reason for termination is passed to the handler. The Pod's termination grace period countdown begins before the PreStop hooked is executed. Regardless of the outcome of the handler, the container will eventually terminate within the Pod's termination grace period. Other management of the container blocks until the hook completes or until the termination grace period is reached. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", + "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", } func (Lifecycle) SwaggerDoc() map[string]string { @@ -899,8 +821,8 @@ func (Lifecycle) SwaggerDoc() map[string]string { var map_LimitRange = map[string]string{ "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (LimitRange) SwaggerDoc() map[string]string { @@ -923,7 +845,7 @@ func (LimitRangeItem) SwaggerDoc() map[string]string { var map_LimitRangeList = map[string]string{ "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", } @@ -991,28 +913,18 @@ func (NFSVolumeSource) SwaggerDoc() map[string]string { var map_Namespace = map[string]string{ "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Namespace) SwaggerDoc() map[string]string { return map_Namespace } -var map_NamespaceCondition = map[string]string{ - "": "NamespaceCondition contains details about state of namespace.", - "type": "Type of namespace controller condition.", - "status": "Status of the condition, one of True, False, Unknown.", -} - -func (NamespaceCondition) SwaggerDoc() map[string]string { - return map_NamespaceCondition -} - var map_NamespaceList = map[string]string{ "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", } @@ -1030,9 +942,8 @@ func (NamespaceSpec) SwaggerDoc() map[string]string { } var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", - "conditions": "Represents the latest available observations of a namespace's current state.", + "": "NamespaceStatus is information about the current status of a Namespace.", + "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", } func (NamespaceStatus) SwaggerDoc() map[string]string { @@ -1041,9 +952,9 @@ func (NamespaceStatus) SwaggerDoc() map[string]string { var map_Node = map[string]string{ "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Node) SwaggerDoc() map[string]string { @@ -1116,7 +1027,7 @@ func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { var map_NodeList = map[string]string{ "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of nodes", } @@ -1175,7 +1086,6 @@ func (NodeSelectorTerm) SwaggerDoc() map[string]string { var map_NodeSpec = map[string]string{ "": "NodeSpec describes the attributes that a node is created with.", "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", - "podCIDRs": "podCIDRs represents the IP ranges assigned to the node for usage by Pods on that node. If this field is specified, the 0th entry must match the podCIDR field. It may contain at most 1 value for each of IPv4 and IPv6.", "providerID": "ID of the node assigned by the cloud provider in the format: ://", "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", "taints": "If specified, the node's taints.", @@ -1193,7 +1103,7 @@ var map_NodeStatus = map[string]string{ "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses Note: This field is declared as mergeable, but the merge key is not sufficiently unique, which can cause data corruption when it is merged. Callers should instead use a full-replacement patch. See http://pr.k8s.io/79391 for an example.", + "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", "daemonEndpoints": "Endpoints of daemons running on the Node.", "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", "images": "List of container images on this node", @@ -1236,12 +1146,12 @@ func (ObjectFieldSelector) SwaggerDoc() map[string]string { var map_ObjectReference = map[string]string{ "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", } @@ -1251,7 +1161,7 @@ func (ObjectReference) SwaggerDoc() map[string]string { var map_PersistentVolume = map[string]string{ "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", } @@ -1262,7 +1172,7 @@ func (PersistentVolume) SwaggerDoc() map[string]string { var map_PersistentVolumeClaim = map[string]string{ "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1285,7 +1195,7 @@ func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { var map_PersistentVolumeClaimList = map[string]string{ "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", } @@ -1300,8 +1210,8 @@ var map_PersistentVolumeClaimSpec = map[string]string{ "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec.", - "dataSource": "This field can be used to specify either: * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot - Beta) * An existing PVC (PersistentVolumeClaim) * An existing custom resource/object that implements data population (Alpha) In order to use VolumeSnapshot object types, the appropriate feature gate must be enabled (VolumeSnapshotDataSource or AnyVolumeDataSource) If the provisioner or an external controller can support the specified data source, it will create a new volume based on the contents of the specified data source. If the specified data source is not supported, the volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", + "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", + "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", } func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { @@ -1332,7 +1242,7 @@ func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { var map_PersistentVolumeList = map[string]string{ "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", } @@ -1345,11 +1255,11 @@ var map_PersistentVolumeSource = map[string]string{ "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", @@ -1362,8 +1272,8 @@ var map_PersistentVolumeSource = map[string]string{ "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://examples.k8s.io/volumes/storageos/README.md", - "csi": "CSI represents storage that is handled by an external CSI driver (Beta feature).", + "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", + "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).", } func (PersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1378,7 +1288,7 @@ var map_PersistentVolumeSpec = map[string]string{ "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec.", + "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", } @@ -1409,9 +1319,9 @@ func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { var map_Pod = map[string]string{ "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Pod) SwaggerDoc() map[string]string { @@ -1510,19 +1420,10 @@ func (PodExecOptions) SwaggerDoc() map[string]string { return map_PodExecOptions } -var map_PodIP = map[string]string{ - "": "IP address information for entries in the (plural) PodIPs field. Each entry includes:\n IP: An IP address allocated to the pod. Routable at least within the cluster.", - "ip": "ip is an IP address (IPv4 or IPv6) assigned to the pod", -} - -func (PodIP) SwaggerDoc() map[string]string { - return map_PodIP -} - var map_PodList = map[string]string{ "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", } func (PodList) SwaggerDoc() map[string]string { @@ -1530,16 +1431,15 @@ func (PodList) SwaggerDoc() map[string]string { } var map_PodLogOptions = map[string]string{ - "": "PodLogOptions is the query options for a Pod's logs REST call.", - "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow the log stream of the pod. Defaults to false.", - "previous": "Return previous terminated container logs. Defaults to false.", - "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", - "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", - "insecureSkipTLSVerifyBackend": "insecureSkipTLSVerifyBackend indicates that the apiserver should not confirm the validity of the serving certificate of the backend it is connecting to. This will make the HTTPS connection between the apiserver and the backend insecure. This means the apiserver cannot verify the log data it is receiving came from the real kubelet. If the kubelet is configured to verify the apiserver's TLS credentials, it does not mean the connection to the real kubelet is vulnerable to a man in the middle attack (e.g. an attacker could not intercept the actual log data coming from the real kubelet).", + "": "PodLogOptions is the query options for a Pod's logs REST call.", + "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", + "follow": "Follow the log stream of the pod. Defaults to false.", + "previous": "Return previous terminated container logs. Defaults to false.", + "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", + "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", + "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", + "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", } func (PodLogOptions) SwaggerDoc() map[string]string { @@ -1574,16 +1474,14 @@ func (PodReadinessGate) SwaggerDoc() map[string]string { } var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options within a container's SecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", - "fsGroupChangePolicy": "fsGroupChangePolicy defines behavior of changing ownership and permission of the volume before being exposed inside Pod. This field will only apply to volume types which support fsGroup based ownership(and permissions). It will have no effect on ephemeral volume types such as: secret, configmaps and emptydir. Valid values are \"OnRootMismatch\" and \"Always\". If not specified defaults to \"Always\".", + "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", + "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", + "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", + "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", + "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", + "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", } func (PodSecurityContext) SwaggerDoc() map[string]string { @@ -1602,9 +1500,8 @@ func (PodSignature) SwaggerDoc() map[string]string { var map_PodSpec = map[string]string{ "": "PodSpec is a description of a pod.", "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, Liveness probes, or Startup probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", + "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "ephemeralContainers": "List of ephemeral containers run in this pod. Ephemeral containers may be run in an existing pod to perform user-initiated actions such as debugging. This list cannot be specified when creating a pod, and it cannot be modified by updating the pod spec. In order to add an ephemeral container to an existing pod, use the pod's ephemeralcontainers subresource. This field is alpha-level and is only honored by servers that enable the EphemeralContainers feature.", "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", @@ -1617,7 +1514,7 @@ var map_PodSpec = map[string]string{ "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", "hostPID": "Use the host's pid namespace. Optional: Default to false.", "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", - "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false.", + "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.", "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", @@ -1629,12 +1526,8 @@ var map_PodSpec = map[string]string{ "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://git.k8s.io/enhancements/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md This is a beta feature as of Kubernetes v1.14.", - "enableServiceLinks": "EnableServiceLinks indicates whether information about services should be injected into pod's environment variables, matching the syntax of Docker links. Optional: Defaults to true.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. This field will be autopopulated at admission time by the RuntimeClass admission controller. If the RuntimeClass admission controller is enabled, overhead must not be set in Pod create requests. The RuntimeClass admission controller will reject Pod create requests which have the overhead already set. If RuntimeClass is configured and selected in the PodSpec, Overhead will be set to the value defined in the corresponding RuntimeClass, otherwise it will remain unset and treated as zero. More info: https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.", - "topologySpreadConstraints": "TopologySpreadConstraints describes how a group of pods ought to spread across topology domains. Scheduler will schedule pods in a way which abides by the constraints. This field is only honored by clusters that enable the EvenPodsSpread feature. All topologySpreadConstraints are ANDed.", + "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", + "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", } func (PodSpec) SwaggerDoc() map[string]string { @@ -1642,20 +1535,18 @@ func (PodSpec) SwaggerDoc() map[string]string { } var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "podIPs": "podIPs holds the IP addresses allocated to the pod. If this field is specified, the 0th entry must match the podIP field. Pods may be allocated at most 1 value for each of IPv4 and IPv6. This list is empty if no IPs have been allocated yet.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", - "ephemeralContainerStatuses": "Status for any ephemeral containers that have run in this pod. This field is alpha-level and is only populated by servers that enable the EphemeralContainers feature.", + "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", + "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", + "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", + "message": "A human readable message indicating details about why the pod is in this condition.", + "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", + "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", + "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", + "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", + "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", + "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", + "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", } func (PodStatus) SwaggerDoc() map[string]string { @@ -1664,8 +1555,8 @@ func (PodStatus) SwaggerDoc() map[string]string { var map_PodStatusResult = map[string]string{ "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (PodStatusResult) SwaggerDoc() map[string]string { @@ -1674,8 +1565,8 @@ func (PodStatusResult) SwaggerDoc() map[string]string { var map_PodTemplate = map[string]string{ "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (PodTemplate) SwaggerDoc() map[string]string { @@ -1684,7 +1575,7 @@ func (PodTemplate) SwaggerDoc() map[string]string { var map_PodTemplateList = map[string]string{ "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of pod templates", } @@ -1694,8 +1585,8 @@ func (PodTemplateList) SwaggerDoc() map[string]string { var map_PodTemplateSpec = map[string]string{ "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (PodTemplateSpec) SwaggerDoc() map[string]string { @@ -1745,11 +1636,11 @@ func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { } var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", + "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness and startup. Minimum value is 1.", + "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", } @@ -1774,7 +1665,6 @@ var map_QuobyteVolumeSource = map[string]string{ "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", "user": "User to map volume access to Defaults to serivceaccount user", "group": "Group to map volume access to Default is no group", - "tenant": "Tenant owning the given Quobyte volume in the Backend Used with dynamically provisioned Quobyte volumes, value is set by the plugin", } func (QuobyteVolumeSource) SwaggerDoc() map[string]string { @@ -1783,14 +1673,14 @@ func (QuobyteVolumeSource) SwaggerDoc() map[string]string { var map_RBDPersistentVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", } func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { @@ -1799,14 +1689,14 @@ func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { var map_RBDVolumeSource = map[string]string{ "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://examples.k8s.io/volumes/rbd/README.md#how-to-use-it", + "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", + "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", } func (RBDVolumeSource) SwaggerDoc() map[string]string { @@ -1815,7 +1705,7 @@ func (RBDVolumeSource) SwaggerDoc() map[string]string { var map_RangeAllocation = map[string]string{ "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "range": "Range is string that identifies the range represented by 'data'.", "data": "Data is a bit array containing all allocated addresses in the previous segment.", } @@ -1826,9 +1716,9 @@ func (RangeAllocation) SwaggerDoc() map[string]string { var map_ReplicationController = map[string]string{ "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicationController) SwaggerDoc() map[string]string { @@ -1850,7 +1740,7 @@ func (ReplicationControllerCondition) SwaggerDoc() map[string]string { var map_ReplicationControllerList = map[string]string{ "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -1897,9 +1787,9 @@ func (ResourceFieldSelector) SwaggerDoc() map[string]string { var map_ResourceQuota = map[string]string{ "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ResourceQuota) SwaggerDoc() map[string]string { @@ -1908,7 +1798,7 @@ func (ResourceQuota) SwaggerDoc() map[string]string { var map_ResourceQuotaList = map[string]string{ "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", } @@ -2017,8 +1907,7 @@ func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { var map_Secret = map[string]string{ "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "immutable": "Immutable, if set to true, ensures that data stored in the Secret cannot be updated (only object metadata can be modified). If not set to true, the field can be modified at any time. Defaulted to nil. This is an alpha field enabled by ImmutableEphemeralVolumes feature gate.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", "type": "Used to facilitate programmatic handling of secret data.", @@ -2040,7 +1929,7 @@ func (SecretEnvSource) SwaggerDoc() map[string]string { var map_SecretKeySelector = map[string]string{ "": "SecretKeySelector selects a key of a Secret.", "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or its key must be defined", + "optional": "Specify whether the Secret or it's key must be defined", } func (SecretKeySelector) SwaggerDoc() map[string]string { @@ -2049,7 +1938,7 @@ func (SecretKeySelector) SwaggerDoc() map[string]string { var map_SecretList = map[string]string{ "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", } @@ -2082,7 +1971,7 @@ var map_SecretVolumeSource = map[string]string{ "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or its keys must be defined", + "optional": "Specify whether the Secret or it's keys must be defined", } func (SecretVolumeSource) SwaggerDoc() map[string]string { @@ -2094,7 +1983,6 @@ var map_SecurityContext = map[string]string{ "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "windowsOptions": "The Windows specific settings applied to all containers. If unspecified, the options from the PodSecurityContext will be used. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", @@ -2118,9 +2006,9 @@ func (SerializedReference) SwaggerDoc() map[string]string { var map_Service = map[string]string{ "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Service) SwaggerDoc() map[string]string { @@ -2129,7 +2017,7 @@ func (Service) SwaggerDoc() map[string]string { var map_ServiceAccount = map[string]string{ "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", @@ -2141,7 +2029,7 @@ func (ServiceAccount) SwaggerDoc() map[string]string { var map_ServiceAccountList = map[string]string{ "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", } @@ -2162,7 +2050,7 @@ func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { var map_ServiceList = map[string]string{ "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of services", } @@ -2171,13 +2059,12 @@ func (ServiceList) SwaggerDoc() map[string]string { } var map_ServicePort = map[string]string{ - "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. When considering the endpoints for a Service, this must match the 'name' field in the EndpointPort. Optional if only one ServicePort is defined on this service.", - "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "appProtocol": "The application protocol for this port. This field follows standard Kubernetes label syntax. Un-prefixed names are reserved for IANA standard service names (as per RFC-6335 and http://www.iana.org/assignments/service-names). Non-standard protocols should use prefixed names such as mycompany.com/my-custom-protocol. Field can be enabled with ServiceAppProtocol feature gate.", - "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", + "": "ServicePort contains information on service's port.", + "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", + "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", + "port": "The port that will be exposed by this service.", + "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", + "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", } func (ServicePort) SwaggerDoc() map[string]string { @@ -2198,7 +2085,7 @@ var map_ServiceSpec = map[string]string{ "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types", + "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", @@ -2208,8 +2095,6 @@ var map_ServiceSpec = map[string]string{ "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", - "ipFamily": "ipFamily specifies whether this Service has a preference for a particular IP family (e.g. IPv4 vs. IPv6). If a specific IP family is requested, the clusterIP field will be allocated from that family, if it is available in the cluster. If no IP family is requested, the cluster's primary IP family will be used. Other IP fields (loadBalancerIP, loadBalancerSourceRanges, externalIPs) and controllers which allocate external load-balancers should use the same IP family. Endpoints for this Service will be of this family. This field is immutable after creation. Assigning a ServiceIPFamily not available in the cluster (e.g. IPv6 in IPv4 only cluster) is an error condition and will fail during clusterIP assignment.", - "topologyKeys": "topologyKeys is a preference-order list of topology keys which implementations of services should use to preferentially sort endpoints when accessing this Service, it can not be used at the same time as externalTrafficPolicy=Local. Topology keys must be valid label keys and at most 16 keys may be specified. Endpoints are chosen based on the first topology key with available backends. If this field is specified and all entries have no backends that match the topology of the client, the service has no backends for that client and connections should fail. The special value \"*\" may be used to mean \"any topology\". This catch-all value, if used, only makes sense as the last value in the list. If this is not specified or empty, no topology constraints will be applied.", } func (ServiceSpec) SwaggerDoc() map[string]string { @@ -2283,7 +2168,7 @@ func (TCPSocketAction) SwaggerDoc() map[string]string { var map_Taint = map[string]string{ "": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", "key": "Required. The taint key to be applied to a node.", - "value": "The taint value corresponding to the taint key.", + "value": "Required. The taint value corresponding to the taint key.", "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", } @@ -2316,7 +2201,7 @@ func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { } var map_TopologySelectorTerm = map[string]string{ - "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", + "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", "matchLabelExpressions": "A list of topology selector requirements by labels.", } @@ -2324,18 +2209,6 @@ func (TopologySelectorTerm) SwaggerDoc() map[string]string { return map_TopologySelectorTerm } -var map_TopologySpreadConstraint = map[string]string{ - "": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.", - "maxSkew": "MaxSkew describes the degree to which pods may be unevenly distributed. It's the maximum permitted difference between the number of matching pods in any two topology domains of a given topology type. For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 1/1/0: ", - "topologyKey": "TopologyKey is the key of node labels. Nodes that have a label with this key and identical values are considered to be in the same topology. We consider each as a \"bucket\", and try to put balanced number of pods into each bucket. It's a required field.", - "whenUnsatisfiable": "WhenUnsatisfiable indicates how to deal with a pod if it doesn't satisfy the spread constraint. - DoNotSchedule (default) tells the scheduler not to schedule it - ScheduleAnyway tells the scheduler to still schedule it It's considered as \"Unsatisfiable\" if and only if placing incoming pod on any topology violates \"MaxSkew\". For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same labelSelector spread as 3/1/1: ", - "labelSelector": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.", -} - -func (TopologySpreadConstraint) SwaggerDoc() map[string]string { - return map_TopologySpreadConstraint -} - var map_TypedLocalObjectReference = map[string]string{ "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", @@ -2373,7 +2246,6 @@ var map_VolumeMount = map[string]string{ "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", - "subPathExpr": "Expanded path within the volume from which the container's volume should be mounted. Behaves similarly to SubPath but environment variable references $(VAR_NAME) are expanded using the container's environment. Defaults to \"\" (volume's root). SubPathExpr and SubPath are mutually exclusive.", } func (VolumeMount) SwaggerDoc() map[string]string { @@ -2410,27 +2282,26 @@ var map_VolumeSource = map[string]string{ "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://examples.k8s.io/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/glusterfs/README.md", + "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", + "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://examples.k8s.io/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine. More info: https://examples.k8s.io/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", - "csi": "CSI (Container Storage Interface) represents storage that is handled by an external CSI driver (Alpha feature).", + "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", + "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", + "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", + "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", + "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", + "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", + "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", + "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", + "configMap": "ConfigMap represents a configMap that should populate this volume", + "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", + "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", + "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", + "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", + "projected": "Items for all in one resources secrets, configmaps, and downward API", + "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", + "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", + "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", } func (VolumeSource) SwaggerDoc() map[string]string { @@ -2459,15 +2330,4 @@ func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { return map_WeightedPodAffinityTerm } -var map_WindowsSecurityContextOptions = map[string]string{ - "": "WindowsSecurityContextOptions contain Windows-specific options and credentials.", - "gmsaCredentialSpecName": "GMSACredentialSpecName is the name of the GMSA credential spec to use.", - "gmsaCredentialSpec": "GMSACredentialSpec is where the GMSA admission webhook (https://github.com/kubernetes-sigs/windows-gmsa) inlines the contents of the GMSA credential spec named by the GMSACredentialSpecName field.", - "runAsUserName": "The UserName in Windows to run the entrypoint of the container process. Defaults to the user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", -} - -func (WindowsSecurityContextOptions) SwaggerDoc() map[string]string { - return map_WindowsSecurityContextOptions -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/well_known_labels.go b/vendor/k8s.io/api/core/v1/well_known_labels.go deleted file mode 100644 index 22aa55b..0000000 --- a/vendor/k8s.io/api/core/v1/well_known_labels.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - LabelHostname = "kubernetes.io/hostname" - - LabelZoneFailureDomain = "failure-domain.beta.kubernetes.io/zone" - LabelZoneRegion = "failure-domain.beta.kubernetes.io/region" - LabelZoneFailureDomainStable = "topology.kubernetes.io/zone" - LabelZoneRegionStable = "topology.kubernetes.io/region" - - LabelInstanceType = "beta.kubernetes.io/instance-type" - LabelInstanceTypeStable = "node.kubernetes.io/instance-type" - - LabelOSStable = "kubernetes.io/os" - LabelArchStable = "kubernetes.io/arch" - - // LabelWindowsBuild is used on Windows nodes to specify the Windows build number starting with v1.17.0. - // It's in the format MajorVersion.MinorVersion.BuildNumber (for ex: 10.0.17763) - LabelWindowsBuild = "node.kubernetes.io/windows-build" - - // LabelNamespaceSuffixKubelet is an allowed label namespace suffix kubelets can self-set ([*.]kubelet.kubernetes.io/*) - LabelNamespaceSuffixKubelet = "kubelet.kubernetes.io" - // LabelNamespaceSuffixNode is an allowed label namespace suffix kubelets can self-set ([*.]node.kubernetes.io/*) - LabelNamespaceSuffixNode = "node.kubernetes.io" - - // LabelNamespaceNodeRestriction is a forbidden label namespace that kubelets may not self-set when the NodeRestriction admission plugin is enabled - LabelNamespaceNodeRestriction = "node-restriction.kubernetes.io" - - // IsHeadlessService is added by Controller to an Endpoint denoting if its parent - // Service is Headless. The existence of this label can be used further by other - // controllers and kube-proxy to check if the Endpoint objects should be replicated when - // using Headless Services - IsHeadlessService = "service.kubernetes.io/headless" -) diff --git a/vendor/k8s.io/api/core/v1/well_known_taints.go b/vendor/k8s.io/api/core/v1/well_known_taints.go deleted file mode 100644 index e1a8f62..0000000 --- a/vendor/k8s.io/api/core/v1/well_known_taints.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -const ( - // TaintNodeNotReady will be added when node is not ready - // and removed when node becomes ready. - TaintNodeNotReady = "node.kubernetes.io/not-ready" - - // TaintNodeUnreachable will be added when node becomes unreachable - // (corresponding to NodeReady status ConditionUnknown) - // and removed when node becomes reachable (NodeReady status ConditionTrue). - TaintNodeUnreachable = "node.kubernetes.io/unreachable" - - // TaintNodeUnschedulable will be added when node becomes unschedulable - // and removed when node becomes scheduable. - TaintNodeUnschedulable = "node.kubernetes.io/unschedulable" - - // TaintNodeMemoryPressure will be added when node has memory pressure - // and removed when node has enough memory. - TaintNodeMemoryPressure = "node.kubernetes.io/memory-pressure" - - // TaintNodeDiskPressure will be added when node has disk pressure - // and removed when node has enough disk. - TaintNodeDiskPressure = "node.kubernetes.io/disk-pressure" - - // TaintNodeNetworkUnavailable will be added when node's network is unavailable - // and removed when network becomes ready. - TaintNodeNetworkUnavailable = "node.kubernetes.io/network-unavailable" - - // TaintNodePIDPressure will be added when node has pid pressure - // and removed when node has enough disk. - TaintNodePIDPressure = "node.kubernetes.io/pid-pressure" -) diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go index 23d9644..f8f3471 100644 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go @@ -237,11 +237,6 @@ func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource *out = new(SecretReference) **out = **in } - if in.ControllerExpandSecretRef != nil { - in, out := &in.ControllerExpandSecretRef, &out.ControllerExpandSecretRef - *out = new(SecretReference) - **out = **in - } return } @@ -255,44 +250,6 @@ func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIVolumeSource) DeepCopyInto(out *CSIVolumeSource) { - *out = *in - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.VolumeAttributes != nil { - in, out := &in.VolumeAttributes, &out.VolumeAttributes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.NodePublishSecretRef != nil { - in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIVolumeSource. -func (in *CSIVolumeSource) DeepCopy() *CSIVolumeSource { - if in == nil { - return nil - } - out := new(CSIVolumeSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Capabilities) DeepCopyInto(out *Capabilities) { *out = *in @@ -485,7 +442,7 @@ func (in *ComponentStatus) DeepCopyObject() runtime.Object { func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ComponentStatus, len(*in)) @@ -519,11 +476,6 @@ func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Immutable != nil { - in, out := &in.Immutable, &out.Immutable - *out = new(bool) - **out = **in - } if in.Data != nil { in, out := &in.Data, &out.Data *out = make(map[string]string, len(*in)) @@ -615,7 +567,7 @@ func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ConfigMap, len(*in)) @@ -778,11 +730,6 @@ func (in *Container) DeepCopyInto(out *Container) { *out = new(Probe) (*in).DeepCopyInto(*out) } - if in.StartupProbe != nil { - in, out := &in.StartupProbe, &out.StartupProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } if in.Lifecycle != nil { in, out := &in.Lifecycle, &out.Lifecycle *out = new(Lifecycle) @@ -930,11 +877,6 @@ func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { *out = *in in.State.DeepCopyInto(&out.State) in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - if in.Started != nil { - in, out := &in.Started, &out.Started - *out = new(bool) - **out = **in - } return } @@ -1096,11 +1038,6 @@ func (in *EndpointAddress) DeepCopy() *EndpointAddress { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { *out = *in - if in.AppProtocol != nil { - in, out := &in.AppProtocol, &out.AppProtocol - *out = new(string) - **out = **in - } return } @@ -1134,9 +1071,7 @@ func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]EndpointPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } return } @@ -1188,7 +1123,7 @@ func (in *Endpoints) DeepCopyObject() runtime.Object { func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Endpoints, len(*in)) @@ -1300,139 +1235,6 @@ func (in *EnvVarSource) DeepCopy() *EnvVarSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralContainer) DeepCopyInto(out *EphemeralContainer) { - *out = *in - in.EphemeralContainerCommon.DeepCopyInto(&out.EphemeralContainerCommon) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainer. -func (in *EphemeralContainer) DeepCopy() *EphemeralContainer { - if in == nil { - return nil - } - out := new(EphemeralContainer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralContainerCommon) DeepCopyInto(out *EphemeralContainerCommon) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeDevices != nil { - in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]VolumeDevice, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.StartupProbe != nil { - in, out := &in.StartupProbe, &out.StartupProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainerCommon. -func (in *EphemeralContainerCommon) DeepCopy() *EphemeralContainerCommon { - if in == nil { - return nil - } - out := new(EphemeralContainerCommon) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EphemeralContainers) DeepCopyInto(out *EphemeralContainers) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.EphemeralContainers != nil { - in, out := &in.EphemeralContainers, &out.EphemeralContainers - *out = make([]EphemeralContainer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EphemeralContainers. -func (in *EphemeralContainers) DeepCopy() *EphemeralContainers { - if in == nil { - return nil - } - out := new(EphemeralContainers) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EphemeralContainers) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Event) DeepCopyInto(out *Event) { *out = *in @@ -1478,7 +1280,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) @@ -1696,27 +1498,6 @@ func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsPersistentVolumeSource) DeepCopyInto(out *GlusterfsPersistentVolumeSource) { - *out = *in - if in.EndpointsNamespace != nil { - in, out := &in.EndpointsNamespace, &out.EndpointsNamespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsPersistentVolumeSource. -func (in *GlusterfsPersistentVolumeSource) DeepCopy() *GlusterfsPersistentVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { *out = *in @@ -2035,7 +1816,7 @@ func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]LimitRange, len(*in)) @@ -2091,7 +1872,7 @@ func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -2216,7 +1997,7 @@ func (in *Namespace) DeepCopyInto(out *Namespace) { out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) + out.Status = in.Status return } @@ -2238,28 +2019,11 @@ func (in *Namespace) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceCondition) DeepCopyInto(out *NamespaceCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceCondition. -func (in *NamespaceCondition) DeepCopy() *NamespaceCondition { - if in == nil { - return nil - } - out := new(NamespaceCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Namespace, len(*in)) @@ -2312,13 +2076,6 @@ func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NamespaceCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } @@ -2495,7 +2252,7 @@ func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { func (in *NodeList) DeepCopyInto(out *NodeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Node, len(*in)) @@ -2649,11 +2406,6 @@ func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { *out = *in - if in.PodCIDRs != nil { - in, out := &in.PodCIDRs, &out.PodCIDRs - *out = make([]string, len(*in)) - copy(*out, *in) - } if in.Taints != nil { in, out := &in.Taints, &out.Taints *out = make([]Taint, len(*in)) @@ -2879,7 +2631,7 @@ func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondi func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolumeClaim, len(*in)) @@ -3005,7 +2757,7 @@ func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVo func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PersistentVolume, len(*in)) @@ -3054,8 +2806,8 @@ func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { } if in.Glusterfs != nil { in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsPersistentVolumeSource) - (*in).DeepCopyInto(*out) + *out = new(GlusterfsVolumeSource) + **out = **in } if in.NFS != nil { in, out := &in.NFS, &out.NFS @@ -3482,27 +3234,11 @@ func (in *PodExecOptions) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodIP) DeepCopyInto(out *PodIP) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodIP. -func (in *PodIP) DeepCopy() *PodIP { - if in == nil { - return nil - } - out := new(PodIP) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *PodList) DeepCopyInto(out *PodList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Pod, len(*in)) @@ -3654,11 +3390,6 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = new(SELinuxOptions) **out = **in } - if in.WindowsOptions != nil { - in, out := &in.WindowsOptions, &out.WindowsOptions - *out = new(WindowsSecurityContextOptions) - (*in).DeepCopyInto(*out) - } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -3689,11 +3420,6 @@ func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { *out = make([]Sysctl, len(*in)) copy(*out, *in) } - if in.FSGroupChangePolicy != nil { - in, out := &in.FSGroupChangePolicy, &out.FSGroupChangePolicy - *out = new(PodFSGroupChangePolicy) - **out = **in - } return } @@ -3752,13 +3478,6 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.EphemeralContainers != nil { - in, out := &in.EphemeralContainers, &out.EphemeralContainers - *out = make([]EphemeralContainer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } if in.TerminationGracePeriodSeconds != nil { in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds *out = new(int64) @@ -3835,30 +3554,6 @@ func (in *PodSpec) DeepCopyInto(out *PodSpec) { *out = new(string) **out = **in } - if in.EnableServiceLinks != nil { - in, out := &in.EnableServiceLinks, &out.EnableServiceLinks - *out = new(bool) - **out = **in - } - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(PreemptionPolicy) - **out = **in - } - if in.Overhead != nil { - in, out := &in.Overhead, &out.Overhead - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.TopologySpreadConstraints != nil { - in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints - *out = make([]TopologySpreadConstraint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } @@ -3882,11 +3577,6 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.PodIPs != nil { - in, out := &in.PodIPs, &out.PodIPs - *out = make([]PodIP, len(*in)) - copy(*out, *in) - } if in.StartTime != nil { in, out := &in.StartTime, &out.StartTime *out = (*in).DeepCopy() @@ -3905,13 +3595,6 @@ func (in *PodStatus) DeepCopyInto(out *PodStatus) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - if in.EphemeralContainerStatuses != nil { - in, out := &in.EphemeralContainerStatuses, &out.EphemeralContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } @@ -3983,7 +3666,7 @@ func (in *PodTemplate) DeepCopyObject() runtime.Object { func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodTemplate, len(*in)) @@ -4295,7 +3978,7 @@ func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondi func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicationController, len(*in)) @@ -4451,7 +4134,7 @@ func (in *ResourceQuota) DeepCopyObject() runtime.Object { func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ResourceQuota, len(*in)) @@ -4680,11 +4363,6 @@ func (in *Secret) DeepCopyInto(out *Secret) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Immutable != nil { - in, out := &in.Immutable, &out.Immutable - *out = new(bool) - **out = **in - } if in.Data != nil { in, out := &in.Data, &out.Data *out = make(map[string][]byte, len(*in)) @@ -4776,7 +4454,7 @@ func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { func (in *SecretList) DeepCopyInto(out *SecretList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Secret, len(*in)) @@ -4901,11 +4579,6 @@ func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { *out = new(SELinuxOptions) **out = **in } - if in.WindowsOptions != nil { - in, out := &in.WindowsOptions, &out.WindowsOptions - *out = new(WindowsSecurityContextOptions) - (*in).DeepCopyInto(*out) - } if in.RunAsUser != nil { in, out := &in.RunAsUser, &out.RunAsUser *out = new(int64) @@ -5048,7 +4721,7 @@ func (in *ServiceAccount) DeepCopyObject() runtime.Object { func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ServiceAccount, len(*in)) @@ -5102,7 +4775,7 @@ func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjecti func (in *ServiceList) DeepCopyInto(out *ServiceList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Service, len(*in)) @@ -5134,11 +4807,6 @@ func (in *ServiceList) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ServicePort) DeepCopyInto(out *ServicePort) { *out = *in - if in.AppProtocol != nil { - in, out := &in.AppProtocol, &out.AppProtocol - *out = new(string) - **out = **in - } out.TargetPort = in.TargetPort return } @@ -5184,9 +4852,7 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { if in.Ports != nil { in, out := &in.Ports, &out.Ports *out = make([]ServicePort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } if in.Selector != nil { in, out := &in.Selector, &out.Selector @@ -5210,16 +4876,6 @@ func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { *out = new(SessionAffinityConfig) (*in).DeepCopyInto(*out) } - if in.IPFamily != nil { - in, out := &in.IPFamily, &out.IPFamily - *out = new(IPFamily) - **out = **in - } - if in.TopologyKeys != nil { - in, out := &in.TopologyKeys, &out.TopologyKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } return } @@ -5431,27 +5087,6 @@ func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint. -func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint { - if in == nil { - return nil - } - out := new(TopologySpreadConstraint) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { *out = *in @@ -5722,11 +5357,6 @@ func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { *out = new(StorageOSVolumeSource) (*in).DeepCopyInto(*out) } - if in.CSI != nil { - in, out := &in.CSI, &out.CSI - *out = new(CSIVolumeSource) - (*in).DeepCopyInto(*out) - } return } @@ -5772,34 +5402,3 @@ func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WindowsSecurityContextOptions) DeepCopyInto(out *WindowsSecurityContextOptions) { - *out = *in - if in.GMSACredentialSpecName != nil { - in, out := &in.GMSACredentialSpecName, &out.GMSACredentialSpecName - *out = new(string) - **out = **in - } - if in.GMSACredentialSpec != nil { - in, out := &in.GMSACredentialSpec, &out.GMSACredentialSpec - *out = new(string) - **out = **in - } - if in.RunAsUserName != nil { - in, out := &in.RunAsUserName, &out.RunAsUserName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WindowsSecurityContextOptions. -func (in *WindowsSecurityContextOptions) DeepCopy() *WindowsSecurityContextOptions { - if in == nil { - return nil - } - out := new(WindowsSecurityContextOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go index 9bec7b3..8b1a3e3 100644 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ b/vendor/k8s.io/api/events/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=events.k8s.io - package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go index 923dee5..e24a82a 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/events/v1beta1/generated.pb.go @@ -14,24 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto + + It has these top-level messages: + Event + EventList + EventSeries +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,161 +51,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{0} -} -func (m *Event) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Event) XXX_Merge(src proto.Message) { - xxx_messageInfo_Event.Merge(m, src) -} -func (m *Event) XXX_Size() int { - return m.Size() -} -func (m *Event) XXX_DiscardUnknown() { - xxx_messageInfo_Event.DiscardUnknown(m) -} - -var xxx_messageInfo_Event proto.InternalMessageInfo - -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{1} -} -func (m *EventList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EventList) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventList.Merge(m, src) -} -func (m *EventList) XXX_Size() int { - return m.Size() -} -func (m *EventList) XXX_DiscardUnknown() { - xxx_messageInfo_EventList.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_EventList proto.InternalMessageInfo +func (m *Event) Reset() { *m = Event{} } +func (*Event) ProtoMessage() {} +func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { - return fileDescriptor_4f97f691c32a5ac8, []int{2} -} -func (m *EventSeries) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *EventSeries) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *EventSeries) XXX_Merge(src proto.Message) { - xxx_messageInfo_EventSeries.Merge(m, src) -} -func (m *EventSeries) XXX_Size() int { - return m.Size() -} -func (m *EventSeries) XXX_DiscardUnknown() { - xxx_messageInfo_EventSeries.DiscardUnknown(m) -} +func (m *EventList) Reset() { *m = EventList{} } +func (*EventList) ProtoMessage() {} +func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_EventSeries proto.InternalMessageInfo +func (m *EventSeries) Reset() { *m = EventSeries{} } +func (*EventSeries) ProtoMessage() {} +func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptor_4f97f691c32a5ac8) -} - -var fileDescriptor_4f97f691c32a5ac8 = []byte{ - // 801 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, - 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, - 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, - 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, - 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, - 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, - 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, - 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, - 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, - 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, - 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, - 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, - 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, - 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, - 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, - 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, - 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, - 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, - 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, - 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, - 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, - 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, - 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, - 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, - 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, - 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, - 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, - 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, - 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, - 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, - 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, - 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, - 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, - 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, - 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, - 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, - 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, - 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, - 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, - 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, - 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, - 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, - 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, - 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, - 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, - 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, - 0x00, -} - func (m *Event) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -204,139 +81,112 @@ func (m *Event) Marshal() (dAtA []byte, err error) { } func (m *Event) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) - i-- - dAtA[i] = 0x78 - { - size, err := m.DeprecatedLastTimestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x72 - { - size, err := m.DeprecatedFirstTimestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) + n2, err := m.EventTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x6a - { - size, err := m.DeprecatedSource.MarshalToSizedBuffer(dAtA[:i]) + i += n2 + if m.Series != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) + n3, err := m.Series.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n3 } - i-- - dAtA[i] = 0x62 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0x5a - i -= len(m.Note) - copy(dAtA[i:], m.Note) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) - i-- - dAtA[i] = 0x52 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) + i += copy(dAtA[i:], m.ReportingController) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) + i += copy(dAtA[i:], m.ReportingInstance) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) + i += copy(dAtA[i:], m.Action) + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) + n4, err := m.Regarding.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 if m.Related != nil { - { - size, err := m.Related.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x4a - } - { - size, err := m.Regarding.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) + n5, err := m.Related.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n5 } - i-- - dAtA[i] = 0x42 - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x3a - i -= len(m.Action) - copy(dAtA[i:], m.Action) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i-- - dAtA[i] = 0x32 - i -= len(m.ReportingInstance) - copy(dAtA[i:], m.ReportingInstance) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i-- - dAtA[i] = 0x2a - i -= len(m.ReportingController) - copy(dAtA[i:], m.ReportingController) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i-- - dAtA[i] = 0x22 - if m.Series != nil { - { - size, err := m.Series.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) + i += copy(dAtA[i:], m.Note) + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) + n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.EventTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n6 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) + n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n7 + dAtA[i] = 0x72 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) + n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n8 + dAtA[i] = 0x78 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) + return i, nil } func (m *EventList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -344,46 +194,37 @@ func (m *EventList) Marshal() (dAtA []byte, err error) { } func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *EventSeries) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -391,51 +232,56 @@ func (m *EventSeries) Marshal() (dAtA []byte, err error) { } func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *EventSeries) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.State) - copy(dAtA[i:], m.State) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i-- - dAtA[i] = 0x1a - { - size, err := m.LastObservedTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) + n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) + i += copy(dAtA[i:], m.State) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *Event) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -475,9 +321,6 @@ func (m *Event) Size() (n int) { } func (m *EventList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -492,9 +335,6 @@ func (m *EventList) Size() (n int) { } func (m *EventSeries) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Count)) @@ -506,7 +346,14 @@ func (m *EventSeries) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -516,20 +363,20 @@ func (this *Event) String() string { return "nil" } s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EventTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EventTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(this.Series.String(), "EventSeries", "EventSeries", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, + `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, `Action:` + fmt.Sprintf("%v", this.Action) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Regarding:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Regarding), "ObjectReference", "v11.ObjectReference", 1), `&`, ``, 1) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "v11.ObjectReference", 1) + `,`, + `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, + `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, `Note:` + fmt.Sprintf("%v", this.Note) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `DeprecatedSource:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedSource), "EventSource", "v11.EventSource", 1), `&`, ``, 1) + `,`, - `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedFirstTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.DeprecatedLastTimestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, + `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, `}`, }, "") @@ -539,14 +386,9 @@ func (this *EventList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Event{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Event", "Event", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -557,7 +399,7 @@ func (this *EventSeries) String() string { } s := strings.Join([]string{`&EventSeries{`, `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastObservedTime), "MicroTime", "v1.MicroTime", 1), `&`, ``, 1) + `,`, + `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `}`, }, "") @@ -586,7 +428,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -614,7 +456,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -623,9 +465,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -647,7 +486,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -656,9 +495,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -680,7 +516,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -689,9 +525,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -716,7 +549,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -726,9 +559,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -748,7 +578,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -758,9 +588,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -780,7 +607,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -790,9 +617,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -812,7 +636,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -822,9 +646,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -844,7 +665,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -853,9 +674,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -877,7 +695,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -886,14 +704,11 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Related == nil { - m.Related = &v11.ObjectReference{} + m.Related = &k8s_io_api_core_v1.ObjectReference{} } if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -913,7 +728,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -923,9 +738,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -945,7 +757,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -955,9 +767,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -977,7 +786,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -986,9 +795,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1010,7 +816,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1019,9 +825,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1043,7 +846,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1052,9 +855,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1076,7 +876,7 @@ func (m *Event) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DeprecatedCount |= int32(b&0x7F) << shift + m.DeprecatedCount |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1090,9 +890,6 @@ func (m *Event) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1120,7 +917,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1148,7 +945,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1157,9 +954,6 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1181,7 +975,7 @@ func (m *EventList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1190,9 +984,6 @@ func (m *EventList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1210,9 +1001,6 @@ func (m *EventList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1240,7 +1028,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1268,7 +1056,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Count |= int32(b&0x7F) << shift + m.Count |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -1287,7 +1075,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1296,9 +1084,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1320,7 +1105,7 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1330,9 +1115,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1347,9 +1129,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1365,7 +1144,6 @@ func (m *EventSeries) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1397,8 +1175,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1415,34 +1195,112 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 801 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, + 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, + 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, + 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, + 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, + 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, + 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, + 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, + 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, + 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, + 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, + 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, + 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, + 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, + 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, + 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, + 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, + 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, + 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, + 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, + 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, + 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, + 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, + 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, + 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, + 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, + 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, + 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, + 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, + 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, + 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, + 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, + 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, + 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, + 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, + 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, + 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, + 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, + 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, + 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, + 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, + 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, + 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, + 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, + 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, + 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, + 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, + 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, + 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, + 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto index 58f5aa4..b3e565e 100644 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ b/vendor/k8s.io/api/events/v1beta1/generated.proto @@ -98,7 +98,7 @@ message Event { // EventList is a list of Event objects. message EventList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -116,7 +116,6 @@ message EventSeries { optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; // Information whether this series is ongoing or finished. - // Deprecated. Planned removal for 1.18 optional string state = 3; } diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go index 0571fbb..dc48ddb 100644 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ b/vendor/k8s.io/api/events/v1beta1/types.go @@ -43,11 +43,11 @@ type Event struct { // ID of the controller instance, e.g. `kubelet-xyzf`. // +optional - ReportingInstance string `json:"reportingInstance,omitempty" protobuf:"bytes,5,opt,name=reportingInstance"` + ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` // What action was taken/failed regarding to the regarding object. // +optional - Action string `json:"action,omitempty" protobuf:"bytes,6,name=action"` + Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` // Why the action was taken. Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` @@ -96,7 +96,6 @@ type EventSeries struct { // Time when last Event from the series was seen before last heartbeat. LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` // Information whether this series is ongoing or finished. - // Deprecated. Planned removal for 1.18 State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` } @@ -114,7 +113,7 @@ const ( type EventList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go index 639daca..a15672c 100644 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go @@ -51,7 +51,7 @@ func (Event) SwaggerDoc() map[string]string { var map_EventList = map[string]string{ "": "EventList is a list of Event objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -63,7 +63,7 @@ var map_EventSeries = map[string]string{ "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", "count": "Number of occurrences in this series up to the last heartbeat time", "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", - "state": "Information whether this series is ongoing or finished. Deprecated. Planned removal for 1.18", + "state": "Information whether this series is ongoing or finished.", } func (EventSeries) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go index 779ebaf..e52e142 100644 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go @@ -70,7 +70,7 @@ func (in *Event) DeepCopyObject() runtime.Object { func (in *EventList) DeepCopyInto(out *EventList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Event, len(*in)) diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go index fa799f3..8ce1830 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ b/vendor/k8s.io/api/extensions/v1beta1/doc.go @@ -15,7 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go index bd37f43..72d64db 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go @@ -14,29 +14,94 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto + + It has these top-level messages: + AllowedFlexVolume + AllowedHostPath + CustomMetricCurrentStatus + CustomMetricCurrentStatusList + CustomMetricTarget + CustomMetricTargetList + DaemonSet + DaemonSetCondition + DaemonSetList + DaemonSetSpec + DaemonSetStatus + DaemonSetUpdateStrategy + Deployment + DeploymentCondition + DeploymentList + DeploymentRollback + DeploymentSpec + DeploymentStatus + DeploymentStrategy + FSGroupStrategyOptions + HTTPIngressPath + HTTPIngressRuleValue + HostPortRange + IDRange + IPBlock + Ingress + IngressBackend + IngressList + IngressRule + IngressRuleValue + IngressSpec + IngressStatus + IngressTLS + NetworkPolicy + NetworkPolicyEgressRule + NetworkPolicyIngressRule + NetworkPolicyList + NetworkPolicyPeer + NetworkPolicyPort + NetworkPolicySpec + PodSecurityPolicy + PodSecurityPolicyList + PodSecurityPolicySpec + ReplicaSet + ReplicaSetCondition + ReplicaSetList + ReplicaSetSpec + ReplicaSetStatus + ReplicationControllerDummy + RollbackConfig + RollingUpdateDaemonSet + RollingUpdateDeployment + RunAsUserStrategyOptions + SELinuxStrategyOptions + Scale + ScaleSpec + ScaleStatus + SupplementalGroupsStrategyOptions +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -47,1580 +112,265 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) +func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } +func (*CustomMetricCurrentStatus) ProtoMessage() {} +func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) +func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } +func (*CustomMetricCurrentStatusList) ProtoMessage() {} +func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo +func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } +func (*CustomMetricTarget) ProtoMessage() {} +func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{3} -} -func (m *DaemonSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSet.Merge(m, src) -} -func (m *DaemonSet) XXX_Size() int { - return m.Size() -} -func (m *DaemonSet) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSet.DiscardUnknown(m) -} +func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } +func (*CustomMetricTargetList) ProtoMessage() {} +func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_DaemonSet proto.InternalMessageInfo +func (m *DaemonSet) Reset() { *m = DaemonSet{} } +func (*DaemonSet) ProtoMessage() {} +func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{4} -} -func (m *DaemonSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetCondition.Merge(m, src) -} -func (m *DaemonSetCondition) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetCondition.DiscardUnknown(m) -} +func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } +func (*DaemonSetCondition) ProtoMessage() {} +func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_DaemonSetCondition proto.InternalMessageInfo +func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } +func (*DaemonSetList) ProtoMessage() {} +func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{5} -} -func (m *DaemonSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetList.Merge(m, src) -} -func (m *DaemonSetList) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetList) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetList.DiscardUnknown(m) -} +func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } +func (*DaemonSetSpec) ProtoMessage() {} +func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_DaemonSetList proto.InternalMessageInfo - -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{6} -} -func (m *DaemonSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetSpec.Merge(m, src) -} -func (m *DaemonSetSpec) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetSpec proto.InternalMessageInfo - -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{7} -} -func (m *DaemonSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetStatus.Merge(m, src) -} -func (m *DaemonSetStatus) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetStatus proto.InternalMessageInfo +func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } +func (*DaemonSetStatus) ProtoMessage() {} +func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } func (*DaemonSetUpdateStrategy) ProtoMessage() {} func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{8} -} -func (m *DaemonSetUpdateStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DaemonSetUpdateStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DaemonSetUpdateStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DaemonSetUpdateStrategy.Merge(m, src) -} -func (m *DaemonSetUpdateStrategy) XXX_Size() int { - return m.Size() -} -func (m *DaemonSetUpdateStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DaemonSetUpdateStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DaemonSetUpdateStrategy proto.InternalMessageInfo - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{9} -} -func (m *Deployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Deployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_Deployment.Merge(m, src) -} -func (m *Deployment) XXX_Size() int { - return m.Size() -} -func (m *Deployment) XXX_DiscardUnknown() { - xxx_messageInfo_Deployment.DiscardUnknown(m) -} - -var xxx_messageInfo_Deployment proto.InternalMessageInfo - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{10} -} -func (m *DeploymentCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentCondition.Merge(m, src) -} -func (m *DeploymentCondition) XXX_Size() int { - return m.Size() -} -func (m *DeploymentCondition) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentCondition proto.InternalMessageInfo - -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{11} -} -func (m *DeploymentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentList.Merge(m, src) -} -func (m *DeploymentList) XXX_Size() int { - return m.Size() -} -func (m *DeploymentList) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentList.DiscardUnknown(m) + return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_DeploymentList proto.InternalMessageInfo +func (m *Deployment) Reset() { *m = Deployment{} } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{12} -} -func (m *DeploymentRollback) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentRollback) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentRollback) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentRollback.Merge(m, src) -} -func (m *DeploymentRollback) XXX_Size() int { - return m.Size() -} -func (m *DeploymentRollback) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentRollback.DiscardUnknown(m) -} +func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } +func (*DeploymentCondition) ProtoMessage() {} +func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_DeploymentRollback proto.InternalMessageInfo +func (m *DeploymentList) Reset() { *m = DeploymentList{} } +func (*DeploymentList) ProtoMessage() {} +func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{13} -} -func (m *DeploymentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentSpec.Merge(m, src) -} -func (m *DeploymentSpec) XXX_Size() int { - return m.Size() -} -func (m *DeploymentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentSpec.DiscardUnknown(m) -} +func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } +func (*DeploymentRollback) ProtoMessage() {} +func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -var xxx_messageInfo_DeploymentSpec proto.InternalMessageInfo - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{14} -} -func (m *DeploymentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStatus.Merge(m, src) -} -func (m *DeploymentStatus) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentStatus proto.InternalMessageInfo - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{15} -} -func (m *DeploymentStrategy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeploymentStrategy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeploymentStrategy) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeploymentStrategy.Merge(m, src) -} -func (m *DeploymentStrategy) XXX_Size() int { - return m.Size() -} -func (m *DeploymentStrategy) XXX_DiscardUnknown() { - xxx_messageInfo_DeploymentStrategy.DiscardUnknown(m) -} - -var xxx_messageInfo_DeploymentStrategy proto.InternalMessageInfo - -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{16} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{17} -} -func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPIngressPath.Merge(m, src) -} -func (m *HTTPIngressPath) XXX_Size() int { - return m.Size() -} -func (m *HTTPIngressPath) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) -} +func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } +func (*DeploymentSpec) ProtoMessage() {} +func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo +func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } +func (*DeploymentStatus) ProtoMessage() {} +func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{18} -} -func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) -} -func (m *HTTPIngressRuleValue) XXX_Size() int { - return m.Size() -} -func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{19} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} - -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{20} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} - -var xxx_messageInfo_IDRange proto.InternalMessageInfo - -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{21} -} -func (m *IPBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPBlock.Merge(m, src) -} -func (m *IPBlock) XXX_Size() int { - return m.Size() -} -func (m *IPBlock) XXX_DiscardUnknown() { - xxx_messageInfo_IPBlock.DiscardUnknown(m) -} - -var xxx_messageInfo_IPBlock proto.InternalMessageInfo - -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{22} -} -func (m *Ingress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Ingress) XXX_Merge(src proto.Message) { - xxx_messageInfo_Ingress.Merge(m, src) -} -func (m *Ingress) XXX_Size() int { - return m.Size() -} -func (m *Ingress) XXX_DiscardUnknown() { - xxx_messageInfo_Ingress.DiscardUnknown(m) -} - -var xxx_messageInfo_Ingress proto.InternalMessageInfo - -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{23} -} -func (m *IngressBackend) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressBackend) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressBackend.Merge(m, src) -} -func (m *IngressBackend) XXX_Size() int { - return m.Size() -} -func (m *IngressBackend) XXX_DiscardUnknown() { - xxx_messageInfo_IngressBackend.DiscardUnknown(m) -} +func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } +func (*DeploymentStrategy) ProtoMessage() {} +func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } -var xxx_messageInfo_IngressBackend proto.InternalMessageInfo +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{24} -} -func (m *IngressList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressList.Merge(m, src) -} -func (m *IngressList) XXX_Size() int { - return m.Size() -} -func (m *IngressList) XXX_DiscardUnknown() { - xxx_messageInfo_IngressList.DiscardUnknown(m) -} +func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } +func (*HTTPIngressPath) ProtoMessage() {} +func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } -var xxx_messageInfo_IngressList proto.InternalMessageInfo +func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } +func (*HTTPIngressRuleValue) ProtoMessage() {} +func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{25} -} -func (m *IngressRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressRule.Merge(m, src) -} -func (m *IngressRule) XXX_Size() int { - return m.Size() -} -func (m *IngressRule) XXX_DiscardUnknown() { - xxx_messageInfo_IngressRule.DiscardUnknown(m) -} +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } -var xxx_messageInfo_IngressRule proto.InternalMessageInfo +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{26} -} -func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressRuleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressRuleValue.Merge(m, src) -} -func (m *IngressRuleValue) XXX_Size() int { - return m.Size() -} -func (m *IngressRuleValue) XXX_DiscardUnknown() { - xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) -} +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } -var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo +func (m *Ingress) Reset() { *m = Ingress{} } +func (*Ingress) ProtoMessage() {} +func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{27} -} -func (m *IngressSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressSpec.Merge(m, src) -} -func (m *IngressSpec) XXX_Size() int { - return m.Size() -} -func (m *IngressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IngressSpec.DiscardUnknown(m) -} +func (m *IngressBackend) Reset() { *m = IngressBackend{} } +func (*IngressBackend) ProtoMessage() {} +func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } -var xxx_messageInfo_IngressSpec proto.InternalMessageInfo +func (m *IngressList) Reset() { *m = IngressList{} } +func (*IngressList) ProtoMessage() {} +func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{28} -} -func (m *IngressStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressStatus.Merge(m, src) -} -func (m *IngressStatus) XXX_Size() int { - return m.Size() -} -func (m *IngressStatus) XXX_DiscardUnknown() { - xxx_messageInfo_IngressStatus.DiscardUnknown(m) -} +func (m *IngressRule) Reset() { *m = IngressRule{} } +func (*IngressRule) ProtoMessage() {} +func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } -var xxx_messageInfo_IngressStatus proto.InternalMessageInfo +func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } +func (*IngressRuleValue) ProtoMessage() {} +func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{29} -} -func (m *IngressTLS) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressTLS) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressTLS.Merge(m, src) -} -func (m *IngressTLS) XXX_Size() int { - return m.Size() -} -func (m *IngressTLS) XXX_DiscardUnknown() { - xxx_messageInfo_IngressTLS.DiscardUnknown(m) -} +func (m *IngressSpec) Reset() { *m = IngressSpec{} } +func (*IngressSpec) ProtoMessage() {} +func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } -var xxx_messageInfo_IngressTLS proto.InternalMessageInfo +func (m *IngressStatus) Reset() { *m = IngressStatus{} } +func (*IngressStatus) ProtoMessage() {} +func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{30} -} -func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicy.Merge(m, src) -} -func (m *NetworkPolicy) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) -} +func (m *IngressTLS) Reset() { *m = IngressTLS{} } +func (*IngressTLS) ProtoMessage() {} +func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } -var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } func (*NetworkPolicyEgressRule) ProtoMessage() {} func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{31} -} -func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) -} -func (m *NetworkPolicyEgressRule) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo - -func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } -func (*NetworkPolicyIngressRule) ProtoMessage() {} -func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{32} -} -func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) -} -func (m *NetworkPolicyIngressRule) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo - -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{33} -} -func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyList.Merge(m, src) -} -func (m *NetworkPolicyList) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo - -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{34} -} -func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) -} -func (m *NetworkPolicyPeer) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo - -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{35} -} -func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyPort.Merge(m, src) -} -func (m *NetworkPolicyPort) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyPort) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo - -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{36} -} -func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicySpec.Merge(m, src) -} -func (m *NetworkPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo - -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{37} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{38} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{39} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{40} -} -func (m *ReplicaSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSet.Merge(m, src) -} -func (m *ReplicaSet) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSet) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSet.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSet proto.InternalMessageInfo - -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{41} -} -func (m *ReplicaSetCondition) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetCondition) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetCondition) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetCondition.Merge(m, src) -} -func (m *ReplicaSetCondition) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetCondition) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetCondition.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetCondition proto.InternalMessageInfo - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{42} -} -func (m *ReplicaSetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetList.Merge(m, src) -} -func (m *ReplicaSetList) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetList) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetList.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetList proto.InternalMessageInfo - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{43} -} -func (m *ReplicaSetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetSpec.Merge(m, src) -} -func (m *ReplicaSetSpec) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetSpec proto.InternalMessageInfo - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{44} -} -func (m *ReplicaSetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ReplicaSetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ReplicaSetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ReplicaSetStatus.Merge(m, src) -} -func (m *ReplicaSetStatus) XXX_Size() int { - return m.Size() -} -func (m *ReplicaSetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ReplicaSetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_ReplicaSetStatus proto.InternalMessageInfo - -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{45} -} -func (m *RollbackConfig) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollbackConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollbackConfig) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollbackConfig.Merge(m, src) -} -func (m *RollbackConfig) XXX_Size() int { - return m.Size() -} -func (m *RollbackConfig) XXX_DiscardUnknown() { - xxx_messageInfo_RollbackConfig.DiscardUnknown(m) -} - -var xxx_messageInfo_RollbackConfig proto.InternalMessageInfo - -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{46} -} -func (m *RollingUpdateDaemonSet) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateDaemonSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDaemonSet) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDaemonSet.Merge(m, src) -} -func (m *RollingUpdateDaemonSet) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{34} } -func (m *RollingUpdateDaemonSet) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDaemonSet.DiscardUnknown(m) + +func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } +func (*NetworkPolicyIngressRule) ProtoMessage() {} +func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{35} } -var xxx_messageInfo_RollingUpdateDaemonSet proto.InternalMessageInfo +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{47} -} -func (m *RollingUpdateDeployment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RollingUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RollingUpdateDeployment) XXX_Merge(src proto.Message) { - xxx_messageInfo_RollingUpdateDeployment.Merge(m, src) -} -func (m *RollingUpdateDeployment) XXX_Size() int { - return m.Size() -} -func (m *RollingUpdateDeployment) XXX_DiscardUnknown() { - xxx_messageInfo_RollingUpdateDeployment.DiscardUnknown(m) -} +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } -var xxx_messageInfo_RollingUpdateDeployment proto.InternalMessageInfo +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{48} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{49} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{50} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} +func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } +func (*ReplicaSet) ProtoMessage() {} +func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo +func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } +func (*ReplicaSetCondition) ProtoMessage() {} +func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{51} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) -} +func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } +func (*ReplicaSetList) ProtoMessage() {} +func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo +func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } +func (*ReplicaSetSpec) ProtoMessage() {} +func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{52} -} -func (m *Scale) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scale) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scale) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scale.Merge(m, src) -} -func (m *Scale) XXX_Size() int { - return m.Size() -} -func (m *Scale) XXX_DiscardUnknown() { - xxx_messageInfo_Scale.DiscardUnknown(m) +func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } +func (*ReplicaSetStatus) ProtoMessage() {} +func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } + +func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } +func (*ReplicationControllerDummy) ProtoMessage() {} +func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{48} } -var xxx_messageInfo_Scale proto.InternalMessageInfo +func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } +func (*RollbackConfig) ProtoMessage() {} +func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{53} -} -func (m *ScaleSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleSpec.Merge(m, src) -} -func (m *ScaleSpec) XXX_Size() int { - return m.Size() +func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } +func (*RollingUpdateDaemonSet) ProtoMessage() {} +func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } + +func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } +func (*RollingUpdateDeployment) ProtoMessage() {} +func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{51} } -func (m *ScaleSpec) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleSpec.DiscardUnknown(m) + +func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } +func (*RunAsUserStrategyOptions) ProtoMessage() {} +func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{52} } -var xxx_messageInfo_ScaleSpec proto.InternalMessageInfo +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{54} -} -func (m *ScaleStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ScaleStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ScaleStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_ScaleStatus.Merge(m, src) -} -func (m *ScaleStatus) XXX_Size() int { - return m.Size() -} -func (m *ScaleStatus) XXX_DiscardUnknown() { - xxx_messageInfo_ScaleStatus.DiscardUnknown(m) -} +func (m *Scale) Reset() { *m = Scale{} } +func (*Scale) ProtoMessage() {} +func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } -var xxx_messageInfo_ScaleStatus proto.InternalMessageInfo +func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } +func (*ScaleSpec) ProtoMessage() {} +func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } + +func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } +func (*ScaleStatus) ProtoMessage() {} +func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cdc93917efc28165, []int{55} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{57} } -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.extensions.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") + proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") + proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") + proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") + proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") @@ -1631,7 +381,6 @@ func init() { proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback.UpdatedAnnotationsEntry") proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") @@ -1664,358 +413,193 @@ func init() { proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") + proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus.SelectorEntry") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptor_cdc93917efc28165) -} - -var fileDescriptor_cdc93917efc28165 = []byte{ - // 3743 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0x4d, 0x6c, 0x1c, 0xc7, - 0x72, 0xd6, 0xec, 0x2e, 0xb9, 0xcb, 0xe2, 0x7f, 0x93, 0x22, 0xf7, 0x49, 0x4f, 0x5c, 0xbd, 0x31, - 0xa0, 0xc8, 0x8e, 0xb4, 0x6b, 0xc9, 0x92, 0x9e, 0x22, 0x21, 0xef, 0x99, 0x4b, 0x8a, 0x12, 0x5f, - 0xf8, 0xb3, 0xee, 0x25, 0x65, 0xc3, 0x88, 0x1d, 0x0f, 0x77, 0x9b, 0xcb, 0x11, 0x67, 0x67, 0xc6, - 0xd3, 0xb3, 0x34, 0x17, 0xc8, 0x21, 0x87, 0x20, 0x80, 0x81, 0x00, 0xc9, 0xc5, 0x49, 0x8e, 0x31, - 0x02, 0xe4, 0x94, 0x20, 0xc7, 0xe4, 0x60, 0x18, 0x09, 0xe2, 0x00, 0x42, 0xe0, 0x04, 0xbe, 0xc5, - 0x27, 0x22, 0xa6, 0x4f, 0x41, 0x4e, 0xb9, 0x05, 0x3a, 0x05, 0xdd, 0xd3, 0xf3, 0x3f, 0xc3, 0x1d, - 0xd2, 0x12, 0x11, 0x03, 0xef, 0x24, 0x6e, 0x57, 0xd5, 0x57, 0xd5, 0xdd, 0xd5, 0x55, 0xd5, 0x3d, - 0x25, 0x58, 0xd9, 0xbf, 0x4f, 0xab, 0xaa, 0x51, 0xdb, 0xef, 0xed, 0x10, 0x4b, 0x27, 0x36, 0xa1, - 0xb5, 0x03, 0xa2, 0xb7, 0x0d, 0xab, 0x26, 0x08, 0x8a, 0xa9, 0xd6, 0xc8, 0xa1, 0x4d, 0x74, 0xaa, - 0x1a, 0x3a, 0xad, 0x1d, 0xdc, 0xda, 0x21, 0xb6, 0x72, 0xab, 0xd6, 0x21, 0x3a, 0xb1, 0x14, 0x9b, - 0xb4, 0xab, 0xa6, 0x65, 0xd8, 0x06, 0xba, 0xe2, 0xb0, 0x57, 0x15, 0x53, 0xad, 0xfa, 0xec, 0x55, - 0xc1, 0x7e, 0xe9, 0x66, 0x47, 0xb5, 0xf7, 0x7a, 0x3b, 0xd5, 0x96, 0xd1, 0xad, 0x75, 0x8c, 0x8e, - 0x51, 0xe3, 0x52, 0x3b, 0xbd, 0x5d, 0xfe, 0x8b, 0xff, 0xe0, 0x7f, 0x39, 0x68, 0x97, 0xe4, 0x80, - 0xf2, 0x96, 0x61, 0x91, 0xda, 0x41, 0x4c, 0xe3, 0xa5, 0x3b, 0x3e, 0x4f, 0x57, 0x69, 0xed, 0xa9, - 0x3a, 0xb1, 0xfa, 0x35, 0x73, 0xbf, 0xc3, 0x06, 0x68, 0xad, 0x4b, 0x6c, 0x25, 0x49, 0xaa, 0x96, - 0x26, 0x65, 0xf5, 0x74, 0x5b, 0xed, 0x92, 0x98, 0xc0, 0xbd, 0x41, 0x02, 0xb4, 0xb5, 0x47, 0xba, - 0x4a, 0x4c, 0xee, 0xad, 0x34, 0xb9, 0x9e, 0xad, 0x6a, 0x35, 0x55, 0xb7, 0xa9, 0x6d, 0x45, 0x85, - 0xe4, 0x3b, 0x30, 0xb5, 0xa8, 0x69, 0xc6, 0x27, 0xa4, 0xbd, 0xd4, 0x5c, 0x5d, 0xb6, 0xd4, 0x03, - 0x62, 0xa1, 0xab, 0x50, 0xd0, 0x95, 0x2e, 0x29, 0x4b, 0x57, 0xa5, 0xeb, 0x23, 0xf5, 0xb1, 0xe7, - 0x47, 0x95, 0x0b, 0xc7, 0x47, 0x95, 0xc2, 0x86, 0xd2, 0x25, 0x98, 0x53, 0xe4, 0x87, 0x30, 0x2d, - 0xa4, 0x56, 0x34, 0x72, 0xf8, 0xd4, 0xd0, 0x7a, 0x5d, 0x82, 0xae, 0xc1, 0x70, 0x9b, 0x03, 0x08, - 0xc1, 0x09, 0x21, 0x38, 0xec, 0xc0, 0x62, 0x41, 0x95, 0x29, 0x4c, 0x0a, 0xe1, 0x27, 0x06, 0xb5, - 0x1b, 0x8a, 0xbd, 0x87, 0x6e, 0x03, 0x98, 0x8a, 0xbd, 0xd7, 0xb0, 0xc8, 0xae, 0x7a, 0x28, 0xc4, - 0x91, 0x10, 0x87, 0x86, 0x47, 0xc1, 0x01, 0x2e, 0x74, 0x03, 0x4a, 0x16, 0x51, 0xda, 0x9b, 0xba, - 0xd6, 0x2f, 0xe7, 0xae, 0x4a, 0xd7, 0x4b, 0xf5, 0x29, 0x21, 0x51, 0xc2, 0x62, 0x1c, 0x7b, 0x1c, - 0xf2, 0x67, 0x39, 0x18, 0x59, 0x56, 0x48, 0xd7, 0xd0, 0x9b, 0xc4, 0x46, 0x1f, 0x41, 0x89, 0x6d, - 0x57, 0x5b, 0xb1, 0x15, 0xae, 0x6d, 0xf4, 0xf6, 0x9b, 0x55, 0xdf, 0x9d, 0xbc, 0xd5, 0xab, 0x9a, - 0xfb, 0x1d, 0x36, 0x40, 0xab, 0x8c, 0xbb, 0x7a, 0x70, 0xab, 0xba, 0xb9, 0xf3, 0x8c, 0xb4, 0xec, - 0x75, 0x62, 0x2b, 0xbe, 0x7d, 0xfe, 0x18, 0xf6, 0x50, 0xd1, 0x06, 0x14, 0xa8, 0x49, 0x5a, 0xdc, - 0xb2, 0xd1, 0xdb, 0x37, 0xaa, 0x27, 0x3a, 0x6b, 0xd5, 0xb3, 0xac, 0x69, 0x92, 0x96, 0xbf, 0xe2, - 0xec, 0x17, 0xe6, 0x38, 0xe8, 0x29, 0x0c, 0x53, 0x5b, 0xb1, 0x7b, 0xb4, 0x9c, 0xe7, 0x88, 0xd5, - 0xcc, 0x88, 0x5c, 0xca, 0xdf, 0x0c, 0xe7, 0x37, 0x16, 0x68, 0xf2, 0x7f, 0xe5, 0x00, 0x79, 0xbc, - 0x4b, 0x86, 0xde, 0x56, 0x6d, 0xd5, 0xd0, 0xd1, 0x03, 0x28, 0xd8, 0x7d, 0xd3, 0x75, 0x81, 0x6b, - 0xae, 0x41, 0x5b, 0x7d, 0x93, 0xbc, 0x38, 0xaa, 0xcc, 0xc5, 0x25, 0x18, 0x05, 0x73, 0x19, 0xb4, - 0xe6, 0x99, 0x9a, 0xe3, 0xd2, 0x77, 0xc2, 0xaa, 0x5f, 0x1c, 0x55, 0x12, 0x0e, 0x5b, 0xd5, 0x43, - 0x0a, 0x1b, 0x88, 0x0e, 0x00, 0x69, 0x0a, 0xb5, 0xb7, 0x2c, 0x45, 0xa7, 0x8e, 0x26, 0xb5, 0x4b, - 0xc4, 0x22, 0xbc, 0x91, 0x6d, 0xd3, 0x98, 0x44, 0xfd, 0x92, 0xb0, 0x02, 0xad, 0xc5, 0xd0, 0x70, - 0x82, 0x06, 0xe6, 0xcd, 0x16, 0x51, 0xa8, 0xa1, 0x97, 0x0b, 0x61, 0x6f, 0xc6, 0x7c, 0x14, 0x0b, - 0x2a, 0x7a, 0x1d, 0x8a, 0x5d, 0x42, 0xa9, 0xd2, 0x21, 0xe5, 0x21, 0xce, 0x38, 0x29, 0x18, 0x8b, - 0xeb, 0xce, 0x30, 0x76, 0xe9, 0xf2, 0x17, 0x12, 0x8c, 0x7b, 0x2b, 0xb7, 0xa6, 0x52, 0x1b, 0xfd, - 0x6e, 0xcc, 0x0f, 0xab, 0xd9, 0xa6, 0xc4, 0xa4, 0xb9, 0x17, 0x7a, 0x3e, 0xef, 0x8e, 0x04, 0x7c, - 0x70, 0x1d, 0x86, 0x54, 0x9b, 0x74, 0xd9, 0x3e, 0xe4, 0xaf, 0x8f, 0xde, 0xbe, 0x9e, 0xd5, 0x65, - 0xea, 0xe3, 0x02, 0x74, 0x68, 0x95, 0x89, 0x63, 0x07, 0x45, 0xfe, 0xb3, 0x42, 0xc0, 0x7c, 0xe6, - 0x9a, 0xe8, 0x03, 0x28, 0x51, 0xa2, 0x91, 0x96, 0x6d, 0x58, 0xc2, 0xfc, 0xb7, 0x32, 0x9a, 0xaf, - 0xec, 0x10, 0xad, 0x29, 0x44, 0xeb, 0x63, 0xcc, 0x7e, 0xf7, 0x17, 0xf6, 0x20, 0xd1, 0x3b, 0x50, - 0xb2, 0x49, 0xd7, 0xd4, 0x14, 0x9b, 0x88, 0x73, 0xf4, 0x5a, 0x70, 0x0a, 0xcc, 0x73, 0x18, 0x58, - 0xc3, 0x68, 0x6f, 0x09, 0x36, 0x7e, 0x7c, 0xbc, 0x25, 0x71, 0x47, 0xb1, 0x07, 0x83, 0x0e, 0x60, - 0xa2, 0x67, 0xb6, 0x19, 0xa7, 0xcd, 0xa2, 0x60, 0xa7, 0x2f, 0x3c, 0xe9, 0x5e, 0xd6, 0xb5, 0xd9, - 0x0e, 0x49, 0xd7, 0xe7, 0x84, 0xae, 0x89, 0xf0, 0x38, 0x8e, 0x68, 0x41, 0x8b, 0x30, 0xd9, 0x55, - 0x75, 0x16, 0x97, 0xfa, 0x4d, 0xd2, 0x32, 0xf4, 0x36, 0xe5, 0x6e, 0x35, 0x54, 0x9f, 0x17, 0x00, - 0x93, 0xeb, 0x61, 0x32, 0x8e, 0xf2, 0xa3, 0x5f, 0x01, 0x72, 0xa7, 0xf1, 0xd8, 0x09, 0xe2, 0xaa, - 0xa1, 0x73, 0x9f, 0xcb, 0xfb, 0xce, 0xbd, 0x15, 0xe3, 0xc0, 0x09, 0x52, 0x68, 0x0d, 0x66, 0x2d, - 0x72, 0xa0, 0xb2, 0x39, 0x3e, 0x51, 0xa9, 0x6d, 0x58, 0xfd, 0x35, 0xb5, 0xab, 0xda, 0xe5, 0x61, - 0x6e, 0x53, 0xf9, 0xf8, 0xa8, 0x32, 0x8b, 0x13, 0xe8, 0x38, 0x51, 0x4a, 0xfe, 0xf3, 0x61, 0x98, - 0x8c, 0xc4, 0x1b, 0xf4, 0x14, 0xe6, 0x5a, 0x3d, 0xcb, 0x22, 0xba, 0xbd, 0xd1, 0xeb, 0xee, 0x10, - 0xab, 0xd9, 0xda, 0x23, 0xed, 0x9e, 0x46, 0xda, 0xdc, 0x51, 0x86, 0xea, 0x0b, 0xc2, 0xe2, 0xb9, - 0xa5, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0x56, 0x41, 0xe7, 0x43, 0xeb, 0x2a, 0xa5, 0x1e, 0x66, 0x8e, - 0x63, 0x7a, 0xab, 0xb0, 0x11, 0xe3, 0xc0, 0x09, 0x52, 0xcc, 0xc6, 0x36, 0xa1, 0xaa, 0x45, 0xda, - 0x51, 0x1b, 0xf3, 0x61, 0x1b, 0x97, 0x13, 0xb9, 0x70, 0x8a, 0x34, 0xba, 0x0b, 0xa3, 0x8e, 0x36, - 0xbe, 0x7f, 0x62, 0xa3, 0x67, 0x04, 0xd8, 0xe8, 0x86, 0x4f, 0xc2, 0x41, 0x3e, 0x36, 0x35, 0x63, - 0x87, 0x12, 0xeb, 0x80, 0xb4, 0xd3, 0x37, 0x78, 0x33, 0xc6, 0x81, 0x13, 0xa4, 0xd8, 0xd4, 0x1c, - 0x0f, 0x8c, 0x4d, 0x6d, 0x38, 0x3c, 0xb5, 0xed, 0x44, 0x2e, 0x9c, 0x22, 0xcd, 0xfc, 0xd8, 0x31, - 0x79, 0xf1, 0x40, 0x51, 0x35, 0x65, 0x47, 0x23, 0xe5, 0x62, 0xd8, 0x8f, 0x37, 0xc2, 0x64, 0x1c, - 0xe5, 0x47, 0x8f, 0x61, 0xda, 0x19, 0xda, 0xd6, 0x15, 0x0f, 0xa4, 0xc4, 0x41, 0x7e, 0x22, 0x40, - 0xa6, 0x37, 0xa2, 0x0c, 0x38, 0x2e, 0x83, 0x1e, 0xc0, 0x44, 0xcb, 0xd0, 0x34, 0xee, 0x8f, 0x4b, - 0x46, 0x4f, 0xb7, 0xcb, 0x23, 0x1c, 0x05, 0xb1, 0xf3, 0xb8, 0x14, 0xa2, 0xe0, 0x08, 0x27, 0x22, - 0x00, 0x2d, 0x37, 0xe1, 0xd0, 0x32, 0xf0, 0xf8, 0x78, 0x2b, 0x6b, 0x0c, 0xf0, 0x52, 0x95, 0x5f, - 0x03, 0x78, 0x43, 0x14, 0x07, 0x80, 0xe5, 0x7f, 0x95, 0x60, 0x3e, 0x25, 0x74, 0xa0, 0x5f, 0x86, - 0x52, 0xec, 0x6f, 0x46, 0x52, 0xec, 0xe5, 0x14, 0xb1, 0x40, 0x9e, 0xd5, 0x61, 0xdc, 0x62, 0xb3, - 0xd2, 0x3b, 0x0e, 0x8b, 0x88, 0x91, 0x77, 0x07, 0x4c, 0x03, 0x07, 0x65, 0xfc, 0x98, 0x3f, 0x7d, - 0x7c, 0x54, 0x19, 0x0f, 0xd1, 0x70, 0x18, 0x5e, 0xfe, 0x8b, 0x1c, 0xc0, 0x32, 0x31, 0x35, 0xa3, - 0xdf, 0x25, 0xfa, 0x79, 0xd4, 0x50, 0x9b, 0xa1, 0x1a, 0xea, 0xe6, 0xa0, 0xed, 0xf1, 0x4c, 0x4b, - 0x2d, 0xa2, 0xde, 0x8d, 0x14, 0x51, 0xb5, 0xec, 0x90, 0x27, 0x57, 0x51, 0xff, 0x91, 0x87, 0x19, - 0x9f, 0xd9, 0x2f, 0xa3, 0x1e, 0x86, 0xf6, 0xf8, 0x37, 0x22, 0x7b, 0x3c, 0x9f, 0x20, 0xf2, 0xca, - 0xea, 0xa8, 0x67, 0x30, 0xc1, 0xaa, 0x1c, 0x67, 0x2f, 0x79, 0x0d, 0x35, 0x7c, 0xea, 0x1a, 0xca, - 0xcb, 0x76, 0x6b, 0x21, 0x24, 0x1c, 0x41, 0x4e, 0xa9, 0xd9, 0x8a, 0x3f, 0xc6, 0x9a, 0xed, 0x4b, - 0x09, 0x26, 0xfc, 0x6d, 0x3a, 0x87, 0xa2, 0x6d, 0x23, 0x5c, 0xb4, 0xbd, 0x9e, 0xd9, 0x45, 0x53, - 0xaa, 0xb6, 0xff, 0x65, 0x05, 0xbe, 0xc7, 0xc4, 0x0e, 0xf8, 0x8e, 0xd2, 0xda, 0x1f, 0x7c, 0xc7, - 0x43, 0x9f, 0x49, 0x80, 0x44, 0x16, 0x58, 0xd4, 0x75, 0xc3, 0x56, 0x9c, 0x58, 0xe9, 0x98, 0xb5, - 0x9a, 0xd9, 0x2c, 0x57, 0x63, 0x75, 0x3b, 0x86, 0xf5, 0x48, 0xb7, 0xad, 0xbe, 0xbf, 0xc9, 0x71, - 0x06, 0x9c, 0x60, 0x00, 0x52, 0x00, 0x2c, 0x81, 0xb9, 0x65, 0x88, 0x83, 0x7c, 0x33, 0x43, 0xcc, - 0x63, 0x02, 0x4b, 0x86, 0xbe, 0xab, 0x76, 0xfc, 0xb0, 0x83, 0x3d, 0x20, 0x1c, 0x00, 0xbd, 0xf4, - 0x08, 0xe6, 0x53, 0xac, 0x45, 0x53, 0x90, 0xdf, 0x27, 0x7d, 0x67, 0xd9, 0x30, 0xfb, 0x13, 0xcd, - 0xc2, 0xd0, 0x81, 0xa2, 0xf5, 0x9c, 0xf0, 0x3b, 0x82, 0x9d, 0x1f, 0x0f, 0x72, 0xf7, 0x25, 0xf9, - 0x8b, 0xa1, 0xa0, 0xef, 0xf0, 0x8a, 0xf9, 0x3a, 0xbb, 0xb4, 0x9a, 0x9a, 0xda, 0x52, 0xa8, 0x28, - 0x84, 0xc6, 0x9c, 0x0b, 0xab, 0x33, 0x86, 0x3d, 0x6a, 0xa8, 0xb6, 0xce, 0xbd, 0xda, 0xda, 0x3a, - 0xff, 0x72, 0x6a, 0xeb, 0xdf, 0x83, 0x12, 0x75, 0xab, 0xea, 0x02, 0x87, 0xbc, 0x75, 0x8a, 0xf8, - 0x2a, 0x0a, 0x6a, 0x4f, 0x81, 0x57, 0x4a, 0x7b, 0xa0, 0x49, 0x45, 0xf4, 0xd0, 0x29, 0x8b, 0xe8, - 0x97, 0x5a, 0xf8, 0xb2, 0x78, 0x63, 0x2a, 0x3d, 0x4a, 0xda, 0x3c, 0xb6, 0x95, 0xfc, 0x78, 0xd3, - 0xe0, 0xa3, 0x58, 0x50, 0xd1, 0x07, 0x21, 0x97, 0x2d, 0x9d, 0xc5, 0x65, 0x27, 0xd2, 0xdd, 0x15, - 0x6d, 0xc3, 0xbc, 0x69, 0x19, 0x1d, 0x8b, 0x50, 0xba, 0x4c, 0x94, 0xb6, 0xa6, 0xea, 0xc4, 0x5d, - 0x1f, 0xa7, 0x22, 0xba, 0x7c, 0x7c, 0x54, 0x99, 0x6f, 0x24, 0xb3, 0xe0, 0x34, 0x59, 0xf9, 0x79, - 0x01, 0xa6, 0xa2, 0x19, 0x30, 0xa5, 0x48, 0x95, 0xce, 0x54, 0xa4, 0xde, 0x08, 0x1c, 0x06, 0xa7, - 0x82, 0x0f, 0xbc, 0xe0, 0xc4, 0x0e, 0xc4, 0x22, 0x4c, 0x8a, 0x68, 0xe0, 0x12, 0x45, 0x99, 0xee, - 0xed, 0xfe, 0x76, 0x98, 0x8c, 0xa3, 0xfc, 0xe8, 0x21, 0x8c, 0x5b, 0xbc, 0xee, 0x76, 0x01, 0x9c, - 0xda, 0xf5, 0xa2, 0x00, 0x18, 0xc7, 0x41, 0x22, 0x0e, 0xf3, 0xb2, 0xba, 0xd5, 0x2f, 0x47, 0x5d, - 0x80, 0x42, 0xb8, 0x6e, 0x5d, 0x8c, 0x32, 0xe0, 0xb8, 0x0c, 0x5a, 0x87, 0x99, 0x9e, 0x1e, 0x87, - 0x72, 0x5c, 0xf9, 0xb2, 0x80, 0x9a, 0xd9, 0x8e, 0xb3, 0xe0, 0x24, 0x39, 0xb4, 0x1b, 0x2a, 0x65, - 0x87, 0x79, 0x78, 0xbe, 0x9d, 0xf9, 0xe0, 0x65, 0xae, 0x65, 0x13, 0xca, 0xed, 0x52, 0xd6, 0x72, - 0x5b, 0xfe, 0x27, 0x29, 0x98, 0x84, 0xbc, 0x12, 0x78, 0xd0, 0x2b, 0x53, 0x4c, 0x22, 0x50, 0x1d, - 0x19, 0xc9, 0xd5, 0xef, 0xbd, 0x53, 0x55, 0xbf, 0x7e, 0xf2, 0x1c, 0x5c, 0xfe, 0x7e, 0x2e, 0xc1, - 0xdc, 0x4a, 0xf3, 0xb1, 0x65, 0xf4, 0x4c, 0xd7, 0x9c, 0x4d, 0xd3, 0x59, 0x9a, 0x9f, 0x43, 0xc1, - 0xea, 0x69, 0xee, 0x3c, 0x5e, 0x73, 0xe7, 0x81, 0x7b, 0x1a, 0x9b, 0xc7, 0x4c, 0x44, 0xca, 0x99, - 0x04, 0x13, 0x40, 0x1b, 0x30, 0x6c, 0x29, 0x7a, 0x87, 0xb8, 0x69, 0xf5, 0xda, 0x00, 0xeb, 0x57, - 0x97, 0x31, 0x63, 0x0f, 0x14, 0x36, 0x5c, 0x1a, 0x0b, 0x14, 0xf9, 0x9f, 0x25, 0x98, 0x7c, 0xb2, - 0xb5, 0xd5, 0x58, 0xd5, 0xf9, 0x89, 0xe6, 0x6f, 0xab, 0x57, 0xa1, 0x60, 0x2a, 0xf6, 0x5e, 0x34, - 0xd3, 0x33, 0x1a, 0xe6, 0x14, 0x74, 0x07, 0x4a, 0xec, 0x5f, 0x66, 0x17, 0x3f, 0x52, 0x23, 0x3c, - 0x10, 0x96, 0x1a, 0x62, 0xec, 0x45, 0xe0, 0x6f, 0xec, 0x71, 0xa2, 0xf7, 0xa0, 0xc8, 0xe2, 0x0f, - 0xd1, 0xdb, 0x19, 0x0b, 0x74, 0x61, 0x54, 0xdd, 0x11, 0xf2, 0x6b, 0x2e, 0x31, 0x80, 0x5d, 0x38, - 0x79, 0x1f, 0x66, 0x03, 0x93, 0x60, 0xab, 0xf8, 0x94, 0xe5, 0x54, 0xd4, 0x84, 0x21, 0xa6, 0x9d, - 0x65, 0xce, 0x7c, 0x86, 0x27, 0xd0, 0xc8, 0x42, 0xf8, 0xf5, 0x11, 0xfb, 0x45, 0xb1, 0x83, 0x25, - 0xaf, 0xc3, 0x38, 0x7f, 0x86, 0x36, 0x2c, 0x9b, 0x2f, 0x26, 0xba, 0x02, 0xf9, 0xae, 0xaa, 0x8b, - 0xec, 0x3c, 0x2a, 0x64, 0xf2, 0x2c, 0xb3, 0xb0, 0x71, 0x4e, 0x56, 0x0e, 0x45, 0xbc, 0xf2, 0xc9, - 0xca, 0x21, 0x66, 0xe3, 0xf2, 0x63, 0x28, 0x8a, 0x4d, 0x0a, 0x02, 0xe5, 0x4f, 0x06, 0xca, 0x27, - 0x00, 0x6d, 0x42, 0x71, 0xb5, 0x51, 0xd7, 0x0c, 0xa7, 0x56, 0x6b, 0xa9, 0x6d, 0x2b, 0xba, 0x83, - 0x4b, 0xab, 0xcb, 0x18, 0x73, 0x0a, 0x92, 0x61, 0x98, 0x1c, 0xb6, 0x88, 0x69, 0x73, 0x3f, 0x1a, - 0xa9, 0x03, 0xf3, 0x8d, 0x47, 0x7c, 0x04, 0x0b, 0x8a, 0xfc, 0x27, 0x39, 0x28, 0x8a, 0xe5, 0x38, - 0x87, 0xbb, 0xdb, 0x5a, 0xe8, 0xee, 0xf6, 0x46, 0x36, 0xd7, 0x48, 0xbd, 0xb8, 0x6d, 0x45, 0x2e, - 0x6e, 0x37, 0x32, 0xe2, 0x9d, 0x7c, 0x6b, 0xfb, 0x34, 0x07, 0x13, 0x61, 0xa7, 0x44, 0x77, 0x61, - 0x94, 0xa5, 0x29, 0xb5, 0x45, 0x36, 0xfc, 0xea, 0xd8, 0x7b, 0xba, 0x69, 0xfa, 0x24, 0x1c, 0xe4, - 0x43, 0x1d, 0x4f, 0x8c, 0xf9, 0x91, 0x98, 0x74, 0xfa, 0x92, 0xf6, 0x6c, 0x55, 0xab, 0x3a, 0x1f, - 0x64, 0xaa, 0xab, 0xba, 0xbd, 0x69, 0x35, 0x6d, 0x4b, 0xd5, 0x3b, 0x31, 0x45, 0xdc, 0x29, 0x83, - 0xc8, 0xe8, 0x5d, 0x96, 0x32, 0xa9, 0xd1, 0xb3, 0x5a, 0x24, 0xa9, 0xf4, 0x75, 0xcb, 0x36, 0x76, - 0x40, 0xdb, 0x6b, 0x46, 0x4b, 0xd1, 0x9c, 0xcd, 0xc1, 0x64, 0x97, 0x58, 0x44, 0x6f, 0x11, 0xb7, - 0xdc, 0x74, 0x20, 0xb0, 0x07, 0x26, 0xff, 0x83, 0x04, 0xa3, 0x62, 0x2d, 0xce, 0xe1, 0x92, 0xf3, - 0x3b, 0xe1, 0x4b, 0xce, 0xb5, 0x8c, 0x91, 0x23, 0xf9, 0x86, 0xf3, 0xd7, 0xbe, 0xe9, 0x2c, 0x56, - 0xb0, 0xe3, 0xb2, 0x67, 0x50, 0x3b, 0x7a, 0x5c, 0xd8, 0x29, 0xc7, 0x9c, 0x82, 0x7a, 0x30, 0xa5, - 0x46, 0x82, 0x8b, 0xd8, 0xb3, 0x5a, 0x36, 0x4b, 0x3c, 0xb1, 0x7a, 0x59, 0xc0, 0x4f, 0x45, 0x29, - 0x38, 0xa6, 0x42, 0x26, 0x10, 0xe3, 0x42, 0xef, 0x40, 0x61, 0xcf, 0xb6, 0xcd, 0x84, 0xe7, 0xf3, - 0x01, 0x21, 0xcd, 0x37, 0xa1, 0xc4, 0x67, 0xb7, 0xb5, 0xd5, 0xc0, 0x1c, 0x4a, 0xfe, 0xc7, 0x9c, - 0xb7, 0x1e, 0xfc, 0xce, 0xf1, 0xb6, 0x37, 0xdb, 0x25, 0x4d, 0xa1, 0x94, 0x3b, 0xb6, 0x73, 0x3f, - 0x9e, 0x0d, 0x18, 0xee, 0xd1, 0x70, 0x8c, 0x1b, 0x6d, 0xf9, 0xa1, 0x5e, 0x3a, 0x4b, 0xa8, 0x1f, - 0x4d, 0x0a, 0xf3, 0xe8, 0x09, 0xe4, 0x6d, 0x2d, 0xeb, 0x3d, 0x57, 0x20, 0x6e, 0xad, 0x35, 0xfd, - 0x58, 0xb9, 0xb5, 0xd6, 0xc4, 0x0c, 0x02, 0x6d, 0xc2, 0x10, 0x4b, 0xa7, 0x2c, 0x3a, 0xe4, 0xb3, - 0x47, 0x1b, 0xb6, 0x82, 0xbe, 0x4b, 0xb1, 0x5f, 0x14, 0x3b, 0x38, 0xf2, 0xc7, 0x30, 0x1e, 0x0a, - 0x21, 0xe8, 0x23, 0x18, 0xd3, 0x0c, 0xa5, 0x5d, 0x57, 0x34, 0x45, 0x6f, 0x11, 0xf7, 0x6b, 0xc7, - 0xb5, 0xa4, 0xb3, 0xb7, 0x16, 0xe0, 0x13, 0x01, 0x68, 0x56, 0x28, 0x19, 0x0b, 0xd2, 0x70, 0x08, - 0x51, 0x56, 0x00, 0xfc, 0x39, 0xa2, 0x0a, 0x0c, 0x31, 0x4f, 0x75, 0x52, 0xdd, 0x48, 0x7d, 0x84, - 0x59, 0xc8, 0x1c, 0x98, 0x62, 0x67, 0x1c, 0xdd, 0x06, 0xa0, 0xa4, 0x65, 0x11, 0x9b, 0x6f, 0x67, - 0x2e, 0xfc, 0xc5, 0xb4, 0xe9, 0x51, 0x70, 0x80, 0x4b, 0xfe, 0x17, 0x09, 0xc6, 0x37, 0x88, 0xfd, - 0x89, 0x61, 0xed, 0x37, 0x0c, 0x4d, 0x6d, 0xf5, 0xcf, 0x21, 0x0f, 0xe0, 0x50, 0x1e, 0x78, 0x73, - 0xc0, 0xce, 0x84, 0xac, 0x4b, 0xcb, 0x06, 0xf2, 0x97, 0x12, 0xcc, 0x87, 0x38, 0x1f, 0xf9, 0x87, - 0x7f, 0x1b, 0x86, 0x4c, 0xc3, 0xb2, 0xdd, 0x1a, 0xe1, 0x54, 0x0a, 0x59, 0x84, 0x0d, 0x54, 0x09, - 0x0c, 0x06, 0x3b, 0x68, 0x68, 0x0d, 0x72, 0xb6, 0x21, 0x5c, 0xf5, 0x74, 0x98, 0x84, 0x58, 0x75, - 0x10, 0x98, 0xb9, 0x2d, 0x03, 0xe7, 0x6c, 0x83, 0x6d, 0x44, 0x39, 0xc4, 0x15, 0x0c, 0x5f, 0xaf, - 0x68, 0x06, 0x18, 0x0a, 0xbb, 0x96, 0xd1, 0x3d, 0xf3, 0x1c, 0xbc, 0x8d, 0x58, 0xb1, 0x8c, 0x2e, - 0xe6, 0x58, 0xf2, 0x57, 0x12, 0x4c, 0x87, 0x38, 0xcf, 0x21, 0x75, 0xbc, 0x13, 0x4e, 0x1d, 0x37, - 0x4e, 0x33, 0x91, 0x94, 0x04, 0xf2, 0x55, 0x2e, 0x32, 0x0d, 0x36, 0x61, 0xb4, 0x0b, 0xa3, 0xa6, - 0xd1, 0x6e, 0xbe, 0x84, 0xef, 0x9b, 0x93, 0x2c, 0xa5, 0x37, 0x7c, 0x2c, 0x1c, 0x04, 0x46, 0x87, - 0x30, 0xad, 0x2b, 0x5d, 0x42, 0x4d, 0xa5, 0x45, 0x9a, 0x2f, 0xe1, 0xc5, 0xe7, 0x22, 0xff, 0x80, - 0x12, 0x45, 0xc4, 0x71, 0x25, 0x68, 0x1d, 0x8a, 0xaa, 0xc9, 0x4b, 0x4c, 0x51, 0x4b, 0x0c, 0xcc, - 0xc3, 0x4e, 0x41, 0xea, 0xc4, 0x73, 0xf1, 0x03, 0xbb, 0x18, 0xf2, 0xdf, 0x44, 0xbd, 0x81, 0x57, - 0x2c, 0x8f, 0xa1, 0xc4, 0x3b, 0x4d, 0x5a, 0x86, 0xe6, 0x7e, 0xea, 0xe0, 0x97, 0x0b, 0x31, 0xf6, - 0xe2, 0xa8, 0x72, 0x39, 0xe1, 0x15, 0xdb, 0x25, 0x63, 0x4f, 0x18, 0x6d, 0x40, 0xc1, 0xfc, 0x21, - 0xc5, 0x15, 0x4f, 0x93, 0xbc, 0xa2, 0xe2, 0x38, 0xf2, 0x1f, 0xe6, 0x23, 0xe6, 0xf2, 0x64, 0xf9, - 0xec, 0xa5, 0xed, 0xba, 0x57, 0xcc, 0xa5, 0xee, 0xfc, 0x0e, 0x14, 0x45, 0xaa, 0x15, 0xce, 0xfc, - 0xf3, 0xd3, 0x38, 0x73, 0x30, 0x8b, 0x79, 0x77, 0x29, 0x77, 0xd0, 0x05, 0x46, 0x1f, 0xc2, 0x30, - 0x71, 0x54, 0x38, 0xb9, 0xf1, 0xde, 0x69, 0x54, 0xf8, 0x71, 0xd5, 0xaf, 0xa1, 0xc5, 0x98, 0x40, - 0x45, 0xbf, 0x64, 0xeb, 0xc5, 0x78, 0x59, 0xc9, 0x49, 0xcb, 0x05, 0x9e, 0xae, 0xae, 0x38, 0xd3, - 0xf6, 0x86, 0x5f, 0x1c, 0x55, 0xc0, 0xff, 0x89, 0x83, 0x12, 0xf2, 0xbf, 0x49, 0x30, 0xcd, 0x57, - 0xa8, 0xd5, 0xb3, 0x54, 0xbb, 0x7f, 0x6e, 0x89, 0xe9, 0x69, 0x28, 0x31, 0xdd, 0x19, 0xb0, 0x2c, - 0x31, 0x0b, 0x53, 0x93, 0xd3, 0xd7, 0x12, 0x5c, 0x8c, 0x71, 0x9f, 0x43, 0x5c, 0xdc, 0x0e, 0xc7, - 0xc5, 0x37, 0x4f, 0x3b, 0xa1, 0x94, 0xd8, 0xf8, 0x3f, 0xd3, 0x09, 0xd3, 0xe1, 0x27, 0xe5, 0x36, - 0x80, 0x69, 0xa9, 0x07, 0xaa, 0x46, 0x3a, 0xe2, 0xab, 0x7e, 0x29, 0xd0, 0xb3, 0xe5, 0x51, 0x70, - 0x80, 0x0b, 0x51, 0x98, 0x6b, 0x93, 0x5d, 0xa5, 0xa7, 0xd9, 0x8b, 0xed, 0xf6, 0x92, 0x62, 0x2a, - 0x3b, 0xaa, 0xa6, 0xda, 0xaa, 0x78, 0xff, 0x18, 0xa9, 0x3f, 0x74, 0xbe, 0xb6, 0x27, 0x71, 0xbc, - 0x38, 0xaa, 0x5c, 0x49, 0xfa, 0xdc, 0xe5, 0xb2, 0xf4, 0x71, 0x0a, 0x34, 0xea, 0x43, 0xd9, 0x22, - 0x1f, 0xf7, 0x54, 0x8b, 0xb4, 0x97, 0x2d, 0xc3, 0x0c, 0xa9, 0xcd, 0x73, 0xb5, 0xbf, 0x7d, 0x7c, - 0x54, 0x29, 0xe3, 0x14, 0x9e, 0xc1, 0x8a, 0x53, 0xe1, 0xd1, 0x33, 0x98, 0x51, 0x44, 0x77, 0x5d, - 0x50, 0xab, 0x73, 0x4a, 0xee, 0x1f, 0x1f, 0x55, 0x66, 0x16, 0xe3, 0xe4, 0xc1, 0x0a, 0x93, 0x40, - 0x51, 0x0d, 0x8a, 0x07, 0xbc, 0x11, 0x8f, 0x96, 0x87, 0x38, 0x3e, 0x4b, 0x04, 0x45, 0xa7, 0x37, - 0x8f, 0x61, 0x0e, 0xaf, 0x34, 0xf9, 0xe9, 0x73, 0xb9, 0xd8, 0x5d, 0x97, 0xd5, 0x92, 0xe2, 0xc4, - 0xf3, 0x27, 0xf0, 0x92, 0x1f, 0xb5, 0x9e, 0xf8, 0x24, 0x1c, 0xe4, 0x43, 0x1f, 0xc0, 0xc8, 0x9e, - 0x78, 0x30, 0xa1, 0xe5, 0x62, 0xa6, 0x24, 0x1c, 0x7a, 0x60, 0xa9, 0x4f, 0x0b, 0x15, 0x23, 0xee, - 0x30, 0xc5, 0x3e, 0x22, 0x7a, 0x1d, 0x8a, 0xfc, 0xc7, 0xea, 0x32, 0x7f, 0x5f, 0x2c, 0xf9, 0xb1, - 0xed, 0x89, 0x33, 0x8c, 0x5d, 0xba, 0xcb, 0xba, 0xda, 0x58, 0xe2, 0xef, 0xdc, 0x11, 0xd6, 0xd5, - 0xc6, 0x12, 0x76, 0xe9, 0xe8, 0x23, 0x28, 0x52, 0xb2, 0xa6, 0xea, 0xbd, 0xc3, 0x32, 0x64, 0xfa, - 0x4a, 0xde, 0x7c, 0xc4, 0xb9, 0x23, 0x2f, 0x7d, 0xbe, 0x06, 0x41, 0xc7, 0x2e, 0x2c, 0xda, 0x83, - 0x11, 0xab, 0xa7, 0x2f, 0xd2, 0x6d, 0x4a, 0xac, 0xf2, 0x28, 0xd7, 0x31, 0x28, 0x9c, 0x63, 0x97, - 0x3f, 0xaa, 0xc5, 0x5b, 0x21, 0x8f, 0x03, 0xfb, 0xe0, 0x68, 0x0f, 0x80, 0xff, 0xe0, 0x8f, 0x8a, - 0xe5, 0x39, 0xae, 0xea, 0x7e, 0x16, 0x55, 0x49, 0x6f, 0x97, 0xe2, 0xc3, 0x82, 0x47, 0xc6, 0x01, - 0x6c, 0xf4, 0xc7, 0x12, 0x20, 0xda, 0x33, 0x4d, 0x8d, 0x74, 0x89, 0x6e, 0x2b, 0x1a, 0x1f, 0xa5, - 0xe5, 0x31, 0xae, 0xf2, 0xed, 0x41, 0x2b, 0x18, 0x13, 0x8c, 0xaa, 0xf6, 0xbe, 0x17, 0xc4, 0x59, - 0x71, 0x82, 0x5e, 0xb6, 0x89, 0xbb, 0x62, 0xd6, 0xe3, 0x99, 0x36, 0x31, 0xf9, 0xb9, 0xd6, 0xdf, - 0x44, 0x41, 0xc7, 0x2e, 0x2c, 0x7a, 0x0a, 0x73, 0x6e, 0xc7, 0x28, 0x36, 0x0c, 0x7b, 0x45, 0xd5, - 0x08, 0xed, 0x53, 0x9b, 0x74, 0xcb, 0x13, 0xdc, 0xc1, 0xbc, 0xb6, 0x19, 0x9c, 0xc8, 0x85, 0x53, - 0xa4, 0x51, 0x17, 0x2a, 0x6e, 0x70, 0x62, 0x27, 0xd7, 0x8b, 0x8e, 0x8f, 0x68, 0x4b, 0xd1, 0x9c, - 0x4f, 0x28, 0x93, 0x5c, 0xc1, 0x6b, 0xc7, 0x47, 0x95, 0xca, 0xf2, 0xc9, 0xac, 0x78, 0x10, 0x16, - 0x7a, 0x0f, 0xca, 0x4a, 0x9a, 0x9e, 0x29, 0xae, 0xe7, 0xa7, 0x2c, 0xe2, 0xa5, 0x2a, 0x48, 0x95, - 0x46, 0x36, 0x4c, 0x29, 0xe1, 0xde, 0x5d, 0x5a, 0x9e, 0xce, 0xf4, 0x1a, 0x1b, 0x69, 0xf9, 0xf5, - 0x1f, 0x4e, 0x22, 0x04, 0x8a, 0x63, 0x1a, 0xd0, 0xef, 0x03, 0x52, 0xa2, 0xed, 0xc6, 0xb4, 0x8c, - 0x32, 0x25, 0xba, 0x58, 0x9f, 0xb2, 0xef, 0x76, 0x31, 0x12, 0xc5, 0x09, 0x7a, 0x58, 0x81, 0xae, - 0x44, 0x5a, 0xa4, 0x69, 0x79, 0x9e, 0x2b, 0xaf, 0x65, 0x53, 0xee, 0xc9, 0x05, 0xbe, 0x14, 0x45, - 0x11, 0x71, 0x5c, 0x09, 0x5a, 0x83, 0x59, 0x31, 0xb8, 0xad, 0x53, 0x65, 0x97, 0x34, 0xfb, 0xb4, - 0x65, 0x6b, 0xb4, 0x3c, 0xc3, 0xe3, 0x3b, 0xff, 0x5a, 0xb9, 0x98, 0x40, 0xc7, 0x89, 0x52, 0xe8, - 0x6d, 0x98, 0xda, 0x35, 0xac, 0x1d, 0xb5, 0xdd, 0x26, 0xba, 0x8b, 0x34, 0xcb, 0x91, 0xf8, 0x3b, - 0xd0, 0x4a, 0x84, 0x86, 0x63, 0xdc, 0x88, 0xc2, 0x45, 0x81, 0xdc, 0xb0, 0x8c, 0xd6, 0xba, 0xd1, - 0xd3, 0x6d, 0xa7, 0xec, 0xbb, 0xe8, 0xa5, 0xd1, 0x8b, 0x8b, 0x49, 0x0c, 0x2f, 0x8e, 0x2a, 0x57, - 0x93, 0xab, 0x7c, 0x9f, 0x09, 0x27, 0x63, 0x23, 0x13, 0xc6, 0x44, 0xe3, 0x3b, 0x7f, 0x90, 0x2a, - 0x97, 0xf9, 0xd1, 0x7f, 0x30, 0x38, 0xe0, 0x79, 0x22, 0xd1, 0xf3, 0x3f, 0x75, 0x7c, 0x54, 0x19, - 0x0b, 0x32, 0xe0, 0x90, 0x06, 0xde, 0xe8, 0x24, 0x3e, 0xaf, 0x9d, 0x4f, 0xb3, 0xf8, 0xe9, 0x1a, - 0x9d, 0x7c, 0xd3, 0x5e, 0x5a, 0xa3, 0x53, 0x00, 0xf2, 0xe4, 0x27, 0xf3, 0xff, 0xce, 0xc1, 0x8c, - 0xcf, 0x9c, 0xb9, 0xd1, 0x29, 0x41, 0xe4, 0xd7, 0x0d, 0xe3, 0xd9, 0x9a, 0x8f, 0xfc, 0xa5, 0xfb, - 0xff, 0xd7, 0x7c, 0xe4, 0xdb, 0x96, 0x72, 0x7b, 0xf8, 0xbb, 0x5c, 0x70, 0x02, 0xa7, 0xec, 0x80, - 0x79, 0x09, 0x3d, 0xd3, 0x3f, 0xba, 0x26, 0x1a, 0xf9, 0xeb, 0x3c, 0x4c, 0x45, 0x4f, 0x63, 0xa8, - 0x51, 0x42, 0x1a, 0xd8, 0x28, 0xd1, 0x80, 0xd9, 0xdd, 0x9e, 0xa6, 0xf5, 0xf9, 0x1c, 0x02, 0xdd, - 0x12, 0xce, 0x27, 0xcb, 0x9f, 0x0a, 0xc9, 0xd9, 0x95, 0x04, 0x1e, 0x9c, 0x28, 0x19, 0xef, 0x9b, - 0x28, 0xfc, 0xd0, 0xbe, 0x89, 0xa1, 0x33, 0xf4, 0x4d, 0x24, 0xb7, 0x9e, 0xe4, 0xcf, 0xd4, 0x7a, - 0x72, 0x96, 0xa6, 0x89, 0x84, 0x20, 0x36, 0xb0, 0x01, 0xf8, 0x17, 0x30, 0x11, 0x6e, 0xe4, 0x71, - 0xf6, 0xd2, 0xe9, 0x25, 0x12, 0x9f, 0x86, 0x03, 0x7b, 0xe9, 0x8c, 0x63, 0x8f, 0x43, 0xfe, 0x23, - 0x09, 0xe6, 0x92, 0x1b, 0x76, 0x91, 0x06, 0x13, 0x5d, 0xe5, 0x30, 0xd8, 0x44, 0x2d, 0x9d, 0xf1, - 0x65, 0x8c, 0x77, 0x70, 0xac, 0x87, 0xb0, 0x70, 0x04, 0x5b, 0xfe, 0x5e, 0x82, 0xf9, 0x94, 0xde, - 0x89, 0xf3, 0xb5, 0x04, 0xbd, 0x0f, 0xa5, 0xae, 0x72, 0xd8, 0xec, 0x59, 0x1d, 0x72, 0xe6, 0xb7, - 0x40, 0x7e, 0xa0, 0xd7, 0x05, 0x0a, 0xf6, 0xf0, 0xe4, 0xbf, 0x92, 0xe0, 0x27, 0xa9, 0x57, 0x25, - 0x74, 0x2f, 0xd4, 0xe6, 0x21, 0x47, 0xda, 0x3c, 0x50, 0x5c, 0xf0, 0x15, 0x75, 0x79, 0x7c, 0x2e, - 0x41, 0x39, 0xed, 0xee, 0x88, 0xee, 0x86, 0x8c, 0xfc, 0x59, 0xc4, 0xc8, 0xe9, 0x98, 0xdc, 0x2b, - 0xb2, 0xf1, 0xdf, 0x25, 0xb8, 0x7c, 0x42, 0x0d, 0xe6, 0x5d, 0x51, 0x48, 0x3b, 0xc8, 0xc5, 0x9f, - 0xad, 0xc5, 0x37, 0x2f, 0xff, 0x8a, 0x92, 0xc0, 0x83, 0x53, 0xa5, 0xd1, 0x36, 0xcc, 0x8b, 0xfb, - 0x51, 0x94, 0x26, 0xca, 0x0b, 0xde, 0x0d, 0xb7, 0x9c, 0xcc, 0x82, 0xd3, 0x64, 0xe5, 0xbf, 0x95, - 0x60, 0x2e, 0xf9, 0x51, 0x00, 0xbd, 0x15, 0x5a, 0xf2, 0x4a, 0x64, 0xc9, 0x27, 0x23, 0x52, 0x62, - 0xc1, 0x3f, 0x84, 0x09, 0xf1, 0x74, 0x20, 0x60, 0x84, 0x33, 0xcb, 0x49, 0x19, 0x44, 0x40, 0xb8, - 0x05, 0x2c, 0x3f, 0x26, 0xe1, 0x31, 0x1c, 0x41, 0x93, 0x3f, 0xcd, 0xc1, 0x50, 0xb3, 0xa5, 0x68, - 0xe4, 0x1c, 0xea, 0xd7, 0x5f, 0x85, 0xea, 0xd7, 0x41, 0xff, 0xcf, 0x8c, 0x5b, 0x95, 0x5a, 0xba, - 0xe2, 0x48, 0xe9, 0xfa, 0x46, 0x26, 0xb4, 0x93, 0xab, 0xd6, 0xdf, 0x82, 0x11, 0x4f, 0xe9, 0xe9, - 0x92, 0xa9, 0xfc, 0x97, 0x39, 0x18, 0x0d, 0xa8, 0x38, 0x65, 0x2a, 0xde, 0x0d, 0xd5, 0x1f, 0xf9, - 0x0c, 0x0f, 0x35, 0x01, 0x5d, 0x55, 0xb7, 0xe2, 0x70, 0xfa, 0xa4, 0xfd, 0xce, 0xd8, 0x78, 0x21, - 0xf2, 0x0b, 0x98, 0xb0, 0x15, 0xab, 0x43, 0x6c, 0xef, 0xc3, 0x85, 0xd3, 0xc7, 0xe5, 0x35, 0xec, - 0x6f, 0x85, 0xa8, 0x38, 0xc2, 0x7d, 0xe9, 0x21, 0x8c, 0x87, 0x94, 0x9d, 0xaa, 0xcd, 0xf9, 0xef, - 0x25, 0xf8, 0xd9, 0xc0, 0xc7, 0x1e, 0x54, 0x0f, 0x1d, 0x92, 0x6a, 0xe4, 0x90, 0x2c, 0xa4, 0x03, - 0xbc, 0xba, 0x76, 0xb9, 0xfa, 0xcd, 0xe7, 0xdf, 0x2d, 0x5c, 0xf8, 0xe6, 0xbb, 0x85, 0x0b, 0xdf, - 0x7e, 0xb7, 0x70, 0xe1, 0x0f, 0x8e, 0x17, 0xa4, 0xe7, 0xc7, 0x0b, 0xd2, 0x37, 0xc7, 0x0b, 0xd2, - 0xb7, 0xc7, 0x0b, 0xd2, 0x7f, 0x1e, 0x2f, 0x48, 0x7f, 0xfa, 0xfd, 0xc2, 0x85, 0xf7, 0x8b, 0x02, - 0xee, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0x98, 0xf4, 0xad, 0x8a, 0xba, 0x3e, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { +func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { +func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + return i, nil +} + +func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i += copy(dAtA[i:], m.PathPrefix) + dAtA[i] = 0x10 + i++ + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { +func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { +func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) + n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { +func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { +func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) + n2, err := m.TargetValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + return i, nil +} + +func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2023,52 +607,41 @@ func (m *DaemonSet) Marshal() (dAtA []byte, err error) { } func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n3 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil } func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2076,52 +649,41 @@ func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2129,46 +691,37 @@ func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n7, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2176,65 +729,54 @@ func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int - _ = l - if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- - dAtA[i] = 0x30 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.UpdateStrategy.MarshalToSizedBuffer(dAtA[:i]) + _ = l + if m.Selector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n8 + } + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n9, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n9 dAtA[i] = 0x1a - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) + n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + i += n10 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) + if m.RevisionHistoryLimit != nil { + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2242,65 +784,58 @@ func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) + if m.CollisionCount != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x48 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - i-- - dAtA[i] = 0x40 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - i-- - dAtA[i] = 0x38 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + return i, nil } func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2308,39 +843,31 @@ func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { } func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DaemonSetUpdateStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Deployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2348,52 +875,41 @@ func (m *Deployment) Marshal() (dAtA []byte, err error) { } func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n12 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n13, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n13 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n14, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n14 + return i, nil } func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2401,62 +917,49 @@ func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { } func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) + n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n15 dAtA[i] = 0x3a - { - size, err := m.LastUpdateTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x32 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n16 + return i, nil } func (m *DeploymentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2464,46 +967,37 @@ func (m *DeploymentList) Marshal() (dAtA []byte, err error) { } func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n17, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2511,61 +1005,51 @@ func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { } func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentRollback) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) if len(m.UpdatedAnnotations) > 0 { keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) for k := range m.UpdatedAnnotations { keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for iNdEx := len(keysForUpdatedAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.UpdatedAnnotations[string(keysForUpdatedAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForUpdatedAnnotations { dAtA[i] = 0x12 - i -= len(keysForUpdatedAnnotations[iNdEx]) - copy(dAtA[i:], keysForUpdatedAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUpdatedAnnotations[iNdEx]))) - i-- + i++ + v := m.UpdatedAnnotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 + return i, nil } func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2573,92 +1057,79 @@ func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { } func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ProgressDeadlineSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - i-- - dAtA[i] = 0x48 + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) } - if m.RollbackTo != nil { - { - size, err := m.RollbackTo.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n19, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x42 + i += n19 } - i-- - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n20, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x38 + i += n20 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) + n21, err := m.Strategy.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n21 + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) if m.RevisionHistoryLimit != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - i-- dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) } - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x28 - { - size, err := m.Strategy.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x38 + i++ + if m.Paused { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0x22 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + i++ + if m.RollbackTo != nil { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) + n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + i += n22 } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + if m.ProgressDeadlineSeconds != nil { + dAtA[i] = 0x48 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) } - return len(dAtA) - i, nil + return i, nil } func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2666,59 +1137,52 @@ func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.CollisionCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - i-- - dAtA[i] = 0x40 - } - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x38 - if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) + if len(m.Conditions) > 0 { + for _, msg := range m.Conditions { dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + if m.CollisionCount != nil { + dAtA[i] = 0x40 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) + } + return i, nil } func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2726,39 +1190,31 @@ func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { } func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeploymentStrategy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) if m.RollingUpdate != nil { - { - size, err := m.RollingUpdate.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) + n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n23 } - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2766,41 +1222,33 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Ranges { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2808,44 +1256,29 @@ func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.PathType != nil { - i -= len(*m.PathType) - copy(dAtA[i:], *m.PathType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) + i += copy(dAtA[i:], m.Path) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) + n24, err := m.Backend.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n24 + return i, nil } func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2853,36 +1286,29 @@ func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Paths { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2890,28 +1316,23 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + return i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2919,28 +1340,23 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + return i, nil } func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2948,36 +1364,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- + for _, s := range m.Except { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Ingress) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2985,52 +1401,41 @@ func (m *Ingress) Marshal() (dAtA []byte, err error) { } func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n25 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n26, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n26 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n27, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n27 + return i, nil } func (m *IngressBackend) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3038,49 +1443,29 @@ func (m *IngressBackend) Marshal() (dAtA []byte, err error) { } func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - { - size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) + i += copy(dAtA[i:], m.ServiceName) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) + n28, err := m.ServicePort.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n28 + return i, nil } func (m *IngressList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3088,46 +1473,37 @@ func (m *IngressList) Marshal() (dAtA []byte, err error) { } func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n29, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n29 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *IngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3135,37 +1511,29 @@ func (m *IngressRule) Marshal() (dAtA []byte, err error) { } func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) + i += copy(dAtA[i:], m.Host) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) + n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n30 + return i, nil } func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3173,34 +1541,27 @@ func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { } func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) + n31, err := m.HTTP.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } - return len(dAtA) - i, nil + return i, nil } func (m *IngressSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3208,69 +1569,51 @@ func (m *IngressSpec) Marshal() (dAtA []byte, err error) { } func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.IngressClassName != nil { - i -= len(*m.IngressClassName) - copy(dAtA[i:], *m.IngressClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + if m.Backend != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) + n32, err := m.Backend.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n32 } if len(m.TLS) > 0 { - for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.TLS { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if m.Backend != nil { - { - size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) + if len(m.Rules) > 0 { + for _, msg := range m.Rules { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } func (m *IngressStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3278,32 +1621,25 @@ func (m *IngressStatus) Marshal() (dAtA []byte, err error) { } func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) + n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n33 + return i, nil } func (m *IngressTLS) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3311,36 +1647,36 @@ func (m *IngressTLS) Marshal() (dAtA []byte, err error) { } func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.SecretName) - copy(dAtA[i:], m.SecretName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i-- - dAtA[i] = 0x12 if len(m.Hosts) > 0 { - for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Hosts[iNdEx]) - copy(dAtA[i:], m.Hosts[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) - i-- + for _, s := range m.Hosts { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) + i += copy(dAtA[i:], m.SecretName) + return i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3348,42 +1684,33 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n34 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n35, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n35 + return i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3391,50 +1718,41 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.To) > 0 { - for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } - } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + if len(m.To) > 0 { + for _, msg := range m.To { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3442,50 +1760,41 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.From) > 0 { - for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.From) > 0 { + for _, msg := range m.From { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3493,46 +1802,37 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n36, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n36 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3540,58 +1840,47 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.IPBlock != nil { - { - size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.PodSelector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n37, err := m.PodSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n37 } if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n38 } - if m.PodSelector != nil { - { - size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.IPBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) + n39, err := m.IPBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n39 } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3599,41 +1888,33 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Port != nil { - { - size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } if m.Protocol != nil { - i -= len(*m.Protocol) - copy(dAtA[i:], *m.Protocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) + } + if m.Port != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n40, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n40 } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3641,69 +1922,64 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.PolicyTypes) > 0 { - for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PolicyTypes[iNdEx]) - copy(dAtA[i:], m.PolicyTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n41, err := m.PodSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Egress) > 0 { - for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n41 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n } } - if len(m.Ingress) > 0 { - for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Egress) > 0 { + for _, msg := range m.Egress { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - { - size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3711,42 +1987,33 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n42 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n43, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n43 + return i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3754,46 +2021,37 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n44, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n44 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3801,283 +2059,262 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 + dAtA[i] = 0x8 + i++ + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if m.RunAsGroup != nil { - { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 - } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x78 } - i-- - if m.ReadOnlyRootFilesystem { + dAtA[i] = 0x30 + i++ + if m.HostNetwork { dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + } else { + dAtA[i] = 0 } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x52 - i-- - if m.HostIPC { + dAtA[i] = 0x40 + i++ + if m.HostPID { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- + i++ dAtA[i] = 0x48 - i-- - if m.HostPID { + i++ + if m.HostIPC { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + i++ + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) + n45, err := m.SELinux.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - if m.HostNetwork { + i += n45 + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) + n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n46 + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) + n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n47 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) + n48, err := m.FSGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n48 + dAtA[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + i++ + if m.DefaultAllowPrivilegeEscalation != nil { + dAtA[i] = 0x78 + i++ + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.AllowedHostPaths) > 0 { + for _, msg := range m.AllowedHostPaths { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.AllowedProcMountTypes) > 0 { + for _, s := range m.AllowedProcMountTypes { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil } func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4085,52 +2322,41 @@ func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n49 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n50, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n50 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n51, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n51 + return i, nil } func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4138,52 +2364,41 @@ func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetCondition) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x2a - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - { - size, err := m.LastTransitionTime.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) + n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n52 + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4191,46 +2406,37 @@ func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n53, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n53 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4238,52 +2444,43 @@ func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - i-- - dAtA[i] = 0x20 - { - size, err := m.Template.MarshalToSizedBuffer(dAtA[:i]) + if m.Replicas != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) + } + if m.Selector != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n54, err := m.Selector.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n54 } - i-- dAtA[i] = 0x1a - if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.Replicas != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - i-- - dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) + n55, err := m.Template.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - return len(dAtA) - i, nil + i += n55 + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) + return i, nil } func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4291,282 +2488,183 @@ func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { } func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ReplicaSetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) if len(m.Conditions) > 0 { - for iNdEx := len(m.Conditions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Conditions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Conditions { dAtA[i] = 0x32 - } - } - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollbackConfig) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDaemonSet) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0xa } - return len(dAtA) - i, nil + return i, nil } -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { +func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RollingUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.MaxSurge != nil { - { - size, err := m.MaxSurge.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + return i, nil } -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { +func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) + return i, nil +} + +func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil } -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n56 } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + if m.MaxUnavailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n57 } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.MaxSurge != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) + n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n58 + } + return i, nil } -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { +func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) + if len(m.Ranges) > 0 { + for _, msg := range m.Ranges { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4574,39 +2672,31 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n59 } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *Scale) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4614,52 +2704,41 @@ func (m *Scale) Marshal() (dAtA []byte, err error) { } func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scale) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n60 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n61, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n61 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n62, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n62 + return i, nil } func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4667,25 +2746,20 @@ func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { } func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) + return i, nil } func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4693,54 +2767,46 @@ func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { } func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ScaleStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.TargetSelector) - copy(dAtA[i:], m.TargetSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) if len(m.Selector) > 0 { keysForSelector := make([]string, 0, len(m.Selector)) for k := range m.Selector { keysForSelector = append(keysForSelector, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for iNdEx := len(keysForSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.Selector[string(keysForSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForSelector { dAtA[i] = 0x12 - i -= len(keysForSelector[iNdEx]) - copy(dAtA[i:], keysForSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForSelector[iNdEx]))) - i-- + i++ + v := m.Selector[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) + i += copy(dAtA[i:], m.TargetSelector) + return i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -4748,86 +2814,118 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Ranges { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } -func (m *AllowedCSIDriver) Size() (n int) { - if m == nil { - return 0 - } +func (m *AllowedFlexVolume) Size() (n int) { var l int _ = l - l = len(m.Name) + l = len(m.Driver) n += 1 + l + sovGenerated(uint64(l)) return n } -func (m *AllowedFlexVolume) Size() (n int) { - if m == nil { - return 0 - } +func (m *AllowedHostPath) Size() (n int) { var l int _ = l - l = len(m.Driver) + l = len(m.PathPrefix) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } -func (m *AllowedHostPath) Size() (n int) { - if m == nil { - return 0 +func (m *CustomMetricCurrentStatus) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.CurrentValue.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *CustomMetricCurrentStatusList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } + return n +} + +func (m *CustomMetricTarget) Size() (n int) { var l int _ = l - l = len(m.PathPrefix) + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + l = m.TargetValue.Size() n += 1 + l + sovGenerated(uint64(l)) - n += 2 return n } -func (m *DaemonSet) Size() (n int) { - if m == nil { - return 0 +func (m *CustomMetricTargetList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } + return n +} + +func (m *DaemonSet) Size() (n int) { var l int _ = l l = m.ObjectMeta.Size() @@ -4840,9 +2938,6 @@ func (m *DaemonSet) Size() (n int) { } func (m *DaemonSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -4859,9 +2954,6 @@ func (m *DaemonSetCondition) Size() (n int) { } func (m *DaemonSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -4876,9 +2968,6 @@ func (m *DaemonSetList) Size() (n int) { } func (m *DaemonSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Selector != nil { @@ -4898,9 +2987,6 @@ func (m *DaemonSetSpec) Size() (n int) { } func (m *DaemonSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) @@ -4924,9 +3010,6 @@ func (m *DaemonSetStatus) Size() (n int) { } func (m *DaemonSetUpdateStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -4939,9 +3022,6 @@ func (m *DaemonSetUpdateStrategy) Size() (n int) { } func (m *Deployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -4954,9 +3034,6 @@ func (m *Deployment) Size() (n int) { } func (m *DeploymentCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -4975,9 +3052,6 @@ func (m *DeploymentCondition) Size() (n int) { } func (m *DeploymentList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -4992,9 +3066,6 @@ func (m *DeploymentList) Size() (n int) { } func (m *DeploymentRollback) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -5013,9 +3084,6 @@ func (m *DeploymentRollback) Size() (n int) { } func (m *DeploymentSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -5045,9 +3113,6 @@ func (m *DeploymentSpec) Size() (n int) { } func (m *DeploymentStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -5069,9 +3134,6 @@ func (m *DeploymentStatus) Size() (n int) { } func (m *DeploymentStrategy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -5084,9 +3146,6 @@ func (m *DeploymentStrategy) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -5101,26 +3160,16 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Path) n += 1 + l + sovGenerated(uint64(l)) l = m.Backend.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Paths) > 0 { @@ -5133,9 +3182,6 @@ func (m *HTTPIngressRuleValue) Size() (n int) { } func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -5144,9 +3190,6 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -5155,9 +3198,6 @@ func (m *IDRange) Size() (n int) { } func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.CIDR) @@ -5172,9 +3212,6 @@ func (m *IPBlock) Size() (n int) { } func (m *Ingress) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -5187,26 +3224,16 @@ func (m *Ingress) Size() (n int) { } func (m *IngressBackend) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.ServiceName) n += 1 + l + sovGenerated(uint64(l)) l = m.ServicePort.Size() n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *IngressList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -5221,9 +3248,6 @@ func (m *IngressList) Size() (n int) { } func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Host) @@ -5234,9 +3258,6 @@ func (m *IngressRule) Size() (n int) { } func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.HTTP != nil { @@ -5247,9 +3268,6 @@ func (m *IngressRuleValue) Size() (n int) { } func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Backend != nil { @@ -5268,17 +3286,10 @@ func (m *IngressSpec) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.LoadBalancer.Size() @@ -5287,9 +3298,6 @@ func (m *IngressStatus) Size() (n int) { } func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Hosts) > 0 { @@ -5304,9 +3312,6 @@ func (m *IngressTLS) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -5317,9 +3322,6 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Ports) > 0 { @@ -5338,9 +3340,6 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Ports) > 0 { @@ -5359,9 +3358,6 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -5376,9 +3372,6 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.PodSelector != nil { @@ -5397,9 +3390,6 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Protocol != nil { @@ -5414,9 +3404,6 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.PodSelector.Size() @@ -5443,9 +3430,6 @@ func (m *NetworkPolicySpec) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -5456,9 +3440,6 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -5473,9 +3454,6 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -5551,33 +3529,16 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) + if len(m.AllowedProcMountTypes) > 0 { + for _, s := range m.AllowedProcMountTypes { + l = len(s) + n += 2 + l + sovGenerated(uint64(l)) + } } return n } func (m *ReplicaSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -5590,9 +3551,6 @@ func (m *ReplicaSet) Size() (n int) { } func (m *ReplicaSetCondition) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -5609,9 +3567,6 @@ func (m *ReplicaSetCondition) Size() (n int) { } func (m *ReplicaSetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -5626,9 +3581,6 @@ func (m *ReplicaSetList) Size() (n int) { } func (m *ReplicaSetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Replicas != nil { @@ -5645,9 +3597,6 @@ func (m *ReplicaSetSpec) Size() (n int) { } func (m *ReplicaSetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -5664,10 +3613,13 @@ func (m *ReplicaSetStatus) Size() (n int) { return n } +func (m *ReplicationControllerDummy) Size() (n int) { + var l int + _ = l + return n +} + func (m *RollbackConfig) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Revision)) @@ -5675,9 +3627,6 @@ func (m *RollbackConfig) Size() (n int) { } func (m *RollingUpdateDaemonSet) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -5688,9 +3637,6 @@ func (m *RollingUpdateDaemonSet) Size() (n int) { } func (m *RollingUpdateDeployment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxUnavailable != nil { @@ -5704,27 +3650,7 @@ func (m *RollingUpdateDeployment) Size() (n int) { return n } -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -5738,29 +3664,7 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -5773,9 +3677,6 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *Scale) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -5788,9 +3689,6 @@ func (m *Scale) Size() (n int) { } func (m *ScaleSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -5798,9 +3696,6 @@ func (m *ScaleSpec) Size() (n int) { } func (m *ScaleStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Replicas)) @@ -5818,9 +3713,6 @@ func (m *ScaleStatus) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -5835,38 +3727,77 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AllowedCSIDriver) String() string { +func (this *AllowedFlexVolume) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedFlexVolume{`, + `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + `}`, + }, "") + return s +} +func (this *AllowedHostPath) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&AllowedHostPath{`, + `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricCurrentStatus) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllowedCSIDriver{`, + s := strings.Join([]string{`&CustomMetricCurrentStatus{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *AllowedFlexVolume) String() string { +func (this *CustomMetricCurrentStatusList) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, + s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *AllowedHostPath) String() string { +func (this *CustomMetricTarget) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + s := strings.Join([]string{`&CustomMetricTarget{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *CustomMetricTargetList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&CustomMetricTargetList{`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5876,7 +3807,7 @@ func (this *DaemonSet) String() string { return "nil" } s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -5890,7 +3821,7 @@ func (this *DaemonSetCondition) String() string { s := strings.Join([]string{`&DaemonSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -5901,14 +3832,9 @@ func (this *DaemonSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]DaemonSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5918,8 +3844,8 @@ func (this *DaemonSetSpec) String() string { return "nil" } s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, @@ -5932,11 +3858,6 @@ func (this *DaemonSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DaemonSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DaemonSetStatus{`, `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, @@ -5947,7 +3868,7 @@ func (this *DaemonSetStatus) String() string { `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5958,7 +3879,7 @@ func (this *DaemonSetUpdateStrategy) String() string { } s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, `}`, }, "") return s @@ -5968,7 +3889,7 @@ func (this *Deployment) String() string { return "nil" } s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -5984,8 +3905,8 @@ func (this *DeploymentCondition) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastUpdateTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -5994,14 +3915,9 @@ func (this *DeploymentList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Deployment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Deployment", "Deployment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6034,13 +3950,13 @@ func (this *DeploymentSpec) String() string { } s := strings.Join([]string{`&DeploymentSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1) + `,`, + `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, `}`, }, "") @@ -6050,18 +3966,13 @@ func (this *DeploymentStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]DeploymentCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&DeploymentStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, `}`, @@ -6074,7 +3985,7 @@ func (this *DeploymentStrategy) String() string { } s := strings.Join([]string{`&DeploymentStrategy{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(this.RollingUpdate.String(), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, + `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, `}`, }, "") return s @@ -6083,14 +3994,9 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6102,7 +4008,6 @@ func (this *HTTPIngressPath) String() string { s := strings.Join([]string{`&HTTPIngressPath{`, `Path:` + fmt.Sprintf("%v", this.Path) + `,`, `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, `}`, }, "") return s @@ -6111,13 +4016,8 @@ func (this *HTTPIngressRuleValue) String() string { if this == nil { return "nil" } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, + `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6160,7 +4060,7 @@ func (this *Ingress) String() string { return "nil" } s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -6173,8 +4073,7 @@ func (this *IngressBackend) String() string { } s := strings.Join([]string{`&IngressBackend{`, `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, + `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6183,14 +4082,9 @@ func (this *IngressList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6211,7 +4105,7 @@ func (this *IngressRuleValue) String() string { return "nil" } s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, + `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, `}`, }, "") return s @@ -6220,21 +4114,10 @@ func (this *IngressSpec) String() string { if this == nil { return "nil" } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, + `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, + `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6244,7 +4127,7 @@ func (this *IngressStatus) String() string { return "nil" } s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, + `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6265,7 +4148,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -6275,19 +4158,9 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6296,19 +4169,9 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6317,14 +4180,9 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6334,9 +4192,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -6347,7 +4205,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -6356,20 +4214,10 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, + `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, + `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -6380,7 +4228,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -6390,14 +4238,9 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6406,26 +4249,6 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -6433,7 +4256,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, + `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -6443,14 +4266,11 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, `}`, }, "") return s @@ -6460,7 +4280,7 @@ func (this *ReplicaSet) String() string { return "nil" } s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -6474,7 +4294,7 @@ func (this *ReplicaSetCondition) String() string { s := strings.Join([]string{`&ReplicaSetCondition{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LastTransitionTime), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, @@ -6485,14 +4305,9 @@ func (this *ReplicaSetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ReplicaSet{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6503,8 +4318,8 @@ func (this *ReplicaSetSpec) String() string { } s := strings.Join([]string{`&ReplicaSetSpec{`, `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "v11.PodTemplateSpec", 1), `&`, ``, 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, `}`, }, "") @@ -6514,65 +4329,53 @@ func (this *ReplicaSetStatus) String() string { if this == nil { return "nil" } - repeatedStringForConditions := "[]ReplicaSetCondition{" - for _, f := range this.Conditions { - repeatedStringForConditions += strings.Replace(strings.Replace(f.String(), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + "," - } - repeatedStringForConditions += "}" s := strings.Join([]string{`&ReplicaSetStatus{`, `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + repeatedStringForConditions + `,`, + `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, `}`, }, "") return s } -func (this *RollbackConfig) String() string { +func (this *ReplicationControllerDummy) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, + s := strings.Join([]string{`&ReplicationControllerDummy{`, `}`, }, "") return s } -func (this *RollingUpdateDaemonSet) String() string { +func (this *RollbackConfig) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + s := strings.Join([]string{`&RollbackConfig{`, + `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, `}`, }, "") return s } -func (this *RollingUpdateDeployment) String() string { +func (this *RollingUpdateDaemonSet) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "intstr.IntOrString", 1) + `,`, + s := strings.Join([]string{`&RollingUpdateDaemonSet{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s } -func (this *RunAsGroupStrategyOptions) String() string { +func (this *RollingUpdateDeployment) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, + s := strings.Join([]string{`&RollingUpdateDeployment{`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -6581,25 +4384,9 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -6610,7 +4397,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -6620,7 +4407,7 @@ func (this *Scale) String() string { return "nil" } s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -6663,27 +4450,200 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," + s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, + `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Driver = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.PathPrefix = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" + + if iNdEx > l { + return io.ErrUnexpectedEOF } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) + return nil } -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { +func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6698,7 +4658,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6706,10 +4666,10 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6726,7 +4686,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6736,13 +4696,40 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6753,9 +4740,6 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6768,7 +4752,7 @@ func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { } return nil } -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { +func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6783,7 +4767,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6791,17 +4775,17 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6811,23 +4795,22 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Driver = string(dAtA[iNdEx:postIndex]) + m.Items = append(m.Items, CustomMetricCurrentStatus{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6838,9 +4821,6 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6853,7 +4833,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } return nil } -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { +func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6868,7 +4848,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6876,15 +4856,15 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") + return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6896,7 +4876,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6906,19 +4886,16 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6928,12 +4905,22 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.ReadOnly = bool(v != 0) + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6943,7 +4930,85 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, CustomMetricTarget{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { return ErrInvalidLengthGenerated } if (iNdEx + skippy) > l { @@ -6973,7 +5038,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7001,7 +5066,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7010,9 +5075,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7034,7 +5096,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7043,9 +5105,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7067,7 +5126,7 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7076,9 +5135,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7095,9 +5151,6 @@ func (m *DaemonSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7125,7 +5178,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7153,7 +5206,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7163,9 +5216,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7185,7 +5235,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7195,9 +5245,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7217,7 +5264,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7226,9 +5273,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7250,7 +5294,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7260,9 +5304,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7282,7 +5323,7 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7292,9 +5333,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7309,9 +5347,6 @@ func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7339,7 +5374,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7367,7 +5402,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7376,9 +5411,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7400,7 +5432,7 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7409,9 +5441,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7429,9 +5458,6 @@ func (m *DaemonSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7459,7 +5485,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7487,7 +5513,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7496,14 +5522,11 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -7523,7 +5546,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7532,9 +5555,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7556,7 +5576,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7565,9 +5585,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7589,7 +5606,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7608,7 +5625,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.TemplateGeneration |= int64(b&0x7F) << shift + m.TemplateGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7627,7 +5644,7 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7642,9 +5659,6 @@ func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7672,7 +5686,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7700,7 +5714,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentNumberScheduled |= int32(b&0x7F) << shift + m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7719,7 +5733,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberMisscheduled |= int32(b&0x7F) << shift + m.NumberMisscheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7738,7 +5752,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredNumberScheduled |= int32(b&0x7F) << shift + m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7757,7 +5771,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberReady |= int32(b&0x7F) << shift + m.NumberReady |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7776,7 +5790,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7795,7 +5809,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedNumberScheduled |= int32(b&0x7F) << shift + m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7814,7 +5828,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberAvailable |= int32(b&0x7F) << shift + m.NumberAvailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7833,7 +5847,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.NumberUnavailable |= int32(b&0x7F) << shift + m.NumberUnavailable |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7852,7 +5866,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -7872,7 +5886,7 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7881,9 +5895,6 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7901,9 +5912,6 @@ func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7931,7 +5939,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7959,7 +5967,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7969,9 +5977,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7991,7 +5996,7 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8000,9 +6005,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8022,9 +6024,6 @@ func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8052,7 +6051,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8080,7 +6079,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8089,9 +6088,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8113,7 +6109,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8122,9 +6118,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8146,7 +6139,7 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8155,9 +6148,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8174,9 +6164,6 @@ func (m *Deployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8204,7 +6191,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8232,7 +6219,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8242,9 +6229,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8264,7 +6248,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8274,9 +6258,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8296,7 +6277,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8306,9 +6287,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8328,7 +6306,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8338,9 +6316,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8360,7 +6335,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8369,9 +6344,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8393,7 +6365,7 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8402,9 +6374,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8421,9 +6390,6 @@ func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8451,7 +6417,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8479,7 +6445,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8488,9 +6454,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8512,7 +6475,7 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8521,9 +6484,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8541,9 +6501,6 @@ func (m *DeploymentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8571,7 +6528,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8599,7 +6556,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8609,9 +6566,6 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8631,7 +6585,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8640,20 +6594,54 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.UpdatedAnnotations == nil { m.UpdatedAnnotations = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8663,86 +6651,41 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.UpdatedAnnotations[mapkey] = mapvalue + } else { + var mapvalue string + m.UpdatedAnnotations[mapkey] = mapvalue } - m.UpdatedAnnotations[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -8758,7 +6701,7 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8767,9 +6710,6 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8786,9 +6726,6 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8816,7 +6753,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8844,7 +6781,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8864,7 +6801,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8873,14 +6810,11 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -8900,7 +6834,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8909,9 +6843,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8933,7 +6864,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8942,9 +6873,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8966,7 +6894,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -8985,7 +6913,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9005,7 +6933,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9025,7 +6953,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9034,9 +6962,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9061,7 +6986,7 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9076,9 +7001,6 @@ func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9106,7 +7028,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9134,7 +7056,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9153,7 +7075,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9172,7 +7094,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UpdatedReplicas |= int32(b&0x7F) << shift + m.UpdatedReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9191,7 +7113,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9210,7 +7132,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.UnavailableReplicas |= int32(b&0x7F) << shift + m.UnavailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9229,7 +7151,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9238,9 +7160,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9263,7 +7182,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9282,7 +7201,7 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9297,9 +7216,6 @@ func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9327,7 +7243,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9355,7 +7271,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9365,9 +7281,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9387,7 +7300,7 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9396,9 +7309,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9418,9 +7328,6 @@ func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9448,7 +7355,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9476,7 +7383,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9486,9 +7393,6 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9508,7 +7412,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9517,9 +7421,6 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9537,9 +7438,6 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9567,7 +7465,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9595,7 +7493,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9605,9 +7503,6 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9627,7 +7522,7 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9636,9 +7531,6 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9646,39 +7538,6 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9688,9 +7547,6 @@ func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9718,7 +7574,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9746,7 +7602,7 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9755,9 +7611,6 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9775,9 +7628,6 @@ func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9805,7 +7655,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9833,7 +7683,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + m.Min |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9852,7 +7702,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + m.Max |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9866,9 +7716,6 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9896,7 +7743,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9924,7 +7771,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int64(b&0x7F) << shift + m.Min |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9943,7 +7790,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + m.Max |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9957,9 +7804,6 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9987,7 +7831,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10015,7 +7859,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10025,9 +7869,6 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10047,7 +7888,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10057,9 +7898,6 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10074,9 +7912,6 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10104,7 +7939,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10132,7 +7967,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10141,9 +7976,6 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10165,7 +7997,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10174,9 +8006,6 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10198,7 +8027,7 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10207,9 +8036,6 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10226,9 +8052,6 @@ func (m *Ingress) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10256,7 +8079,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10284,7 +8107,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10294,9 +8117,6 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10316,7 +8136,7 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10325,9 +8145,6 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10335,42 +8152,6 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10380,9 +8161,6 @@ func (m *IngressBackend) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10410,7 +8188,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10438,7 +8216,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10447,9 +8225,6 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10471,7 +8246,7 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10480,9 +8255,6 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10500,9 +8272,6 @@ func (m *IngressList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10530,7 +8299,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10558,7 +8327,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10568,9 +8337,6 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10590,7 +8356,7 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10599,9 +8365,6 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10618,9 +8381,6 @@ func (m *IngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10648,7 +8408,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10676,7 +8436,7 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10685,9 +8445,6 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10707,9 +8464,6 @@ func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10737,7 +8491,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10765,7 +8519,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10774,9 +8528,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10801,7 +8552,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10810,9 +8561,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10835,7 +8583,7 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10844,9 +8592,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10855,39 +8600,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10897,9 +8609,6 @@ func (m *IngressSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10927,7 +8636,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10955,7 +8664,7 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10964,9 +8673,6 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10983,9 +8689,6 @@ func (m *IngressStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11013,7 +8716,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11041,7 +8744,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11051,9 +8754,6 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11073,7 +8773,7 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11083,9 +8783,6 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11100,9 +8797,6 @@ func (m *IngressTLS) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11130,7 +8824,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11158,7 +8852,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11167,9 +8861,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11191,7 +8882,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11200,9 +8891,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11219,9 +8907,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11249,7 +8934,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11277,7 +8962,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11286,9 +8971,6 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11311,7 +8993,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11320,9 +9002,6 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11340,9 +9019,6 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11370,7 +9046,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11398,7 +9074,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11407,9 +9083,6 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11432,7 +9105,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11441,9 +9114,6 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11461,9 +9131,6 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11491,7 +9158,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11519,7 +9186,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11528,9 +9195,6 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11552,7 +9216,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11561,9 +9225,6 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11581,9 +9242,6 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11611,7 +9269,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11639,7 +9297,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11648,14 +9306,11 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11675,7 +9330,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11684,14 +9339,11 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11711,7 +9363,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11720,9 +9372,6 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11742,9 +9391,6 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11772,7 +9418,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11800,7 +9446,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11810,9 +9456,6 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11833,7 +9476,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11842,14 +9485,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &intstr.IntOrString{} + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -11864,9 +9504,6 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11894,7 +9531,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -11922,7 +9559,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11931,9 +9568,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11955,7 +9589,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11964,9 +9598,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -11989,7 +9620,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -11998,9 +9629,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12023,7 +9651,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12033,9 +9661,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12050,9 +9675,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12080,7 +9702,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12108,7 +9730,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12117,9 +9739,6 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12141,7 +9760,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12150,9 +9769,6 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12169,9 +9785,6 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12199,7 +9812,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12227,7 +9840,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12236,9 +9849,6 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12260,7 +9870,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12269,9 +9879,6 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12289,9 +9896,6 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -12319,7 +9923,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12347,7 +9951,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12367,7 +9971,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12377,9 +9981,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12399,7 +10000,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12409,9 +10010,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12431,7 +10029,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12441,9 +10039,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12463,7 +10058,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12473,9 +10068,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12495,7 +10087,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12515,7 +10107,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12524,9 +10116,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12549,7 +10138,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12569,7 +10158,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12589,7 +10178,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12598,9 +10187,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12622,7 +10208,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12631,9 +10217,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12655,7 +10238,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12664,9 +10247,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12688,7 +10268,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12697,9 +10277,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12721,7 +10298,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12741,7 +10318,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12762,7 +10339,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12783,7 +10360,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12792,9 +10369,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12817,7 +10391,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -12826,9 +10400,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12851,7 +10422,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12861,9 +10432,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12883,7 +10451,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12893,9 +10461,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -12915,7 +10480,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -12925,120 +10490,11 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -13048,9 +10504,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13078,7 +10531,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13106,7 +10559,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13115,9 +10568,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13139,7 +10589,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13148,9 +10598,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13172,7 +10619,7 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13181,9 +10628,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13200,9 +10644,6 @@ func (m *ReplicaSet) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13230,7 +10671,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13258,7 +10699,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13268,9 +10709,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13290,7 +10728,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13300,9 +10738,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13322,7 +10757,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13330,10 +10765,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if msglen < 0 { return ErrInvalidLengthGenerated } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } @@ -13355,7 +10787,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13365,9 +10797,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13387,7 +10816,7 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13397,9 +10826,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13414,9 +10840,6 @@ func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13444,7 +10867,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13472,7 +10895,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13481,9 +10904,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13505,7 +10925,7 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13514,9 +10934,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13534,9 +10951,6 @@ func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13564,7 +10978,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13592,7 +11006,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + v |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13612,7 +11026,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13621,14 +11035,11 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -13648,7 +11059,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13657,9 +11068,6 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13681,7 +11089,7 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.MinReadySeconds |= int32(b&0x7F) << shift + m.MinReadySeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13695,9 +11103,6 @@ func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -13725,7 +11130,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13753,7 +11158,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13772,7 +11177,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.FullyLabeledReplicas |= int32(b&0x7F) << shift + m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13791,7 +11196,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -13810,7 +11215,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ReadyReplicas |= int32(b&0x7F) << shift + m.ReadyReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13829,7 +11234,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.AvailableReplicas |= int32(b&0x7F) << shift + m.AvailableReplicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -13848,7 +11253,7 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -13857,9 +11262,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -13877,170 +11279,6 @@ func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14053,7 +11291,7 @@ func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { } return nil } -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { +func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14068,92 +11306,20 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxSurge == nil { - m.MaxSurge = &intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -14163,9 +11329,6 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14178,7 +11341,7 @@ func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollbackConfig) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14193,7 +11356,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14201,17 +11364,17 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) } - var stringLen uint64 + m.Revision = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14221,27 +11384,64 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Revision |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14253,7 +11453,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14262,14 +11462,13 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14282,9 +11481,6 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14297,7 +11493,7 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14312,7 +11508,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14320,17 +11516,17 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14340,27 +11536,28 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + if m.MaxUnavailable == nil { + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -14372,7 +11569,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14381,14 +11578,13 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.MaxSurge == nil { + m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} + } + if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -14401,9 +11597,6 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14416,7 +11609,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -14431,7 +11624,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14439,15 +11632,15 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -14459,7 +11652,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14469,19 +11662,16 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14491,24 +11681,22 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -14519,9 +11707,6 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14549,7 +11734,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14577,7 +11762,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14587,9 +11772,6 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14609,7 +11791,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14618,14 +11800,11 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -14640,9 +11819,6 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14670,7 +11846,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14698,7 +11874,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14707,9 +11883,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14731,7 +11904,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14740,9 +11913,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14764,7 +11934,7 @@ func (m *Scale) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14773,9 +11943,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -14792,9 +11959,6 @@ func (m *Scale) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14822,7 +11986,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14850,7 +12014,7 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -14864,9 +12028,6 @@ func (m *ScaleSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -14894,7 +12055,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -14922,7 +12083,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Replicas |= int32(b&0x7F) << shift + m.Replicas |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -14941,7 +12102,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -14950,20 +12111,54 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Selector == nil { m.Selector = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -14973,86 +12168,41 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Selector[mapkey] = mapvalue + } else { + var mapvalue string + m.Selector[mapkey] = mapvalue } - m.Selector[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -15068,7 +12218,7 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15078,9 +12228,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15095,9 +12242,6 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15125,7 +12269,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15153,7 +12297,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -15163,9 +12307,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15185,7 +12326,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -15194,9 +12335,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -15214,9 +12352,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -15232,7 +12367,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -15264,8 +12398,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -15282,34 +12418,291 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 3665 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x49, + 0x56, 0xef, 0xac, 0x2a, 0xbb, 0xca, 0xcf, 0xed, 0xaf, 0xb0, 0xdb, 0x5d, 0xdb, 0x33, 0xed, 0xea, + 0xcd, 0x91, 0x9a, 0x9e, 0xa1, 0xa7, 0x6a, 0xba, 0xe7, 0x63, 0x87, 0x69, 0xb1, 0xbb, 0x2e, 0xbb, + 0xdd, 0xed, 0xc5, 0x1f, 0x35, 0x51, 0x76, 0xb3, 0x8c, 0x98, 0x65, 0xd2, 0x55, 0xe1, 0x72, 0x8e, + 0xb3, 0x32, 0x73, 0x33, 0x22, 0xbd, 0x2e, 0x09, 0x21, 0x0e, 0x08, 0x09, 0x09, 0x04, 0x1c, 0x96, + 0x0f, 0x71, 0x61, 0x2f, 0x9c, 0x40, 0x70, 0x83, 0xc3, 0x6a, 0x24, 0xa4, 0x45, 0x1a, 0xa1, 0x45, + 0xda, 0x1b, 0x7b, 0xb2, 0x18, 0xcf, 0x09, 0xf1, 0x0f, 0xa0, 0x3e, 0x20, 0x14, 0x91, 0x91, 0xdf, + 0x99, 0xae, 0x2a, 0x4f, 0xb7, 0x85, 0xd0, 0xde, 0x2a, 0xe3, 0xbd, 0xf7, 0x7b, 0x2f, 0x22, 0x5e, + 0xbc, 0xf7, 0xe2, 0xa3, 0x60, 0xe3, 0xf8, 0x7d, 0x5a, 0xd7, 0xad, 0xc6, 0xb1, 0x7b, 0x40, 0x1c, + 0x93, 0x30, 0x42, 0x1b, 0x27, 0xc4, 0xec, 0x5a, 0x4e, 0x43, 0x12, 0x34, 0x5b, 0x6f, 0x90, 0x53, + 0x46, 0x4c, 0xaa, 0x5b, 0x26, 0x6d, 0x9c, 0x3c, 0x38, 0x20, 0x4c, 0x7b, 0xd0, 0xe8, 0x11, 0x93, + 0x38, 0x1a, 0x23, 0xdd, 0xba, 0xed, 0x58, 0xcc, 0x42, 0xb7, 0x3d, 0xf6, 0xba, 0x66, 0xeb, 0xf5, + 0x90, 0xbd, 0x2e, 0xd9, 0x6f, 0xbd, 0xd9, 0xd3, 0xd9, 0x91, 0x7b, 0x50, 0xef, 0x58, 0xfd, 0x46, + 0xcf, 0xea, 0x59, 0x0d, 0x21, 0x75, 0xe0, 0x1e, 0x8a, 0x2f, 0xf1, 0x21, 0x7e, 0x79, 0x68, 0xb7, + 0xd4, 0x88, 0xf2, 0x8e, 0xe5, 0x90, 0xc6, 0x49, 0x4a, 0xe3, 0xad, 0x77, 0x42, 0x9e, 0xbe, 0xd6, + 0x39, 0xd2, 0x4d, 0xe2, 0x0c, 0x1a, 0xf6, 0x71, 0x4f, 0x08, 0x39, 0x84, 0x5a, 0xae, 0xd3, 0x21, + 0x63, 0x49, 0xd1, 0x46, 0x9f, 0x30, 0x2d, 0x4b, 0x57, 0x23, 0x4f, 0xca, 0x71, 0x4d, 0xa6, 0xf7, + 0xd3, 0x6a, 0xde, 0x1b, 0x26, 0x40, 0x3b, 0x47, 0xa4, 0xaf, 0xa5, 0xe4, 0xde, 0xce, 0x93, 0x73, + 0x99, 0x6e, 0x34, 0x74, 0x93, 0x51, 0xe6, 0x24, 0x85, 0xd4, 0x47, 0xb0, 0xb0, 0x6a, 0x18, 0xd6, + 0x0f, 0x48, 0x77, 0xc3, 0x20, 0xa7, 0xcf, 0x2c, 0xc3, 0xed, 0x13, 0x74, 0x17, 0x26, 0xbb, 0x8e, + 0x7e, 0x42, 0x9c, 0xaa, 0x72, 0x47, 0xb9, 0x37, 0xd5, 0x9c, 0xfd, 0xfc, 0xac, 0x76, 0xed, 0xfc, + 0xac, 0x36, 0xb9, 0x2e, 0x5a, 0xb1, 0xa4, 0xaa, 0x14, 0xe6, 0xa4, 0xf0, 0x53, 0x8b, 0xb2, 0x96, + 0xc6, 0x8e, 0xd0, 0x43, 0x00, 0x5b, 0x63, 0x47, 0x2d, 0x87, 0x1c, 0xea, 0xa7, 0x52, 0x1c, 0x49, + 0x71, 0x68, 0x05, 0x14, 0x1c, 0xe1, 0x42, 0xf7, 0xa1, 0xe2, 0x10, 0xad, 0xbb, 0x6b, 0x1a, 0x83, + 0x6a, 0xe1, 0x8e, 0x72, 0xaf, 0xd2, 0x9c, 0x97, 0x12, 0x15, 0x2c, 0xdb, 0x71, 0xc0, 0xa1, 0xfe, + 0xa5, 0x02, 0x5f, 0x5b, 0x73, 0x29, 0xb3, 0xfa, 0xdb, 0x84, 0x39, 0x7a, 0x67, 0xcd, 0x75, 0x1c, + 0x62, 0xb2, 0x36, 0xd3, 0x98, 0x4b, 0xd1, 0x1d, 0x28, 0x99, 0x5a, 0x9f, 0x48, 0xcd, 0xd7, 0x25, + 0x4e, 0x69, 0x47, 0xeb, 0x13, 0x2c, 0x28, 0xe8, 0x23, 0x98, 0x38, 0xd1, 0x0c, 0x97, 0x08, 0x55, + 0xd3, 0x0f, 0xeb, 0xf5, 0xd0, 0xfb, 0x82, 0x61, 0xab, 0xdb, 0xc7, 0x3d, 0xe1, 0x8e, 0xbe, 0x2f, + 0xd4, 0x3f, 0x74, 0x35, 0x93, 0xe9, 0x6c, 0xd0, 0x5c, 0x92, 0x90, 0xd7, 0xa5, 0xde, 0x67, 0x1c, + 0x0b, 0x7b, 0x90, 0xea, 0xef, 0xc0, 0xed, 0x5c, 0xd3, 0xb6, 0x74, 0xca, 0xd0, 0xc7, 0x30, 0xa1, + 0x33, 0xd2, 0xa7, 0x55, 0xe5, 0x4e, 0xf1, 0xde, 0xf4, 0xc3, 0xf7, 0xeb, 0x17, 0xba, 0x7e, 0x3d, + 0x17, 0xac, 0x39, 0x23, 0xcd, 0x98, 0xd8, 0xe4, 0x70, 0xd8, 0x43, 0x55, 0xff, 0x54, 0x01, 0x14, + 0x95, 0xd9, 0xd3, 0x9c, 0x1e, 0x61, 0x23, 0x0c, 0xca, 0x6f, 0x7c, 0xb5, 0x41, 0x59, 0x94, 0x90, + 0xd3, 0x9e, 0xc2, 0xd8, 0x98, 0xd8, 0xb0, 0x9c, 0x36, 0x49, 0x0c, 0xc6, 0xb3, 0xf8, 0x60, 0x3c, + 0x18, 0x63, 0x30, 0x3c, 0x94, 0x9c, 0x51, 0xf8, 0x61, 0x01, 0xa6, 0xd6, 0x35, 0xd2, 0xb7, 0xcc, + 0x36, 0x61, 0xe8, 0x13, 0xa8, 0xf0, 0xa5, 0xd9, 0xd5, 0x98, 0x26, 0x06, 0x60, 0xfa, 0xe1, 0x5b, + 0x17, 0xf5, 0x8e, 0xd6, 0x39, 0x77, 0xfd, 0xe4, 0x41, 0x7d, 0xf7, 0xe0, 0x53, 0xd2, 0x61, 0xdb, + 0x84, 0x69, 0xa1, 0x07, 0x87, 0x6d, 0x38, 0x40, 0x45, 0x3b, 0x50, 0xa2, 0x36, 0xe9, 0xc8, 0xb1, + 0xbb, 0x3f, 0xa4, 0x1b, 0x81, 0x65, 0x6d, 0x9b, 0x74, 0xc2, 0xc9, 0xe0, 0x5f, 0x58, 0xe0, 0xa0, + 0x67, 0x30, 0x49, 0xc5, 0x2c, 0x57, 0x8b, 0xa9, 0xd9, 0xb8, 0x18, 0xd1, 0xf3, 0x8d, 0x60, 0xb9, + 0x7a, 0xdf, 0x58, 0xa2, 0xa9, 0xff, 0x59, 0x00, 0x14, 0xf0, 0xae, 0x59, 0x66, 0x57, 0x67, 0xba, + 0x65, 0xa2, 0x0f, 0xa0, 0xc4, 0x06, 0xb6, 0xef, 0x1d, 0x77, 0x7d, 0x83, 0xf6, 0x06, 0x36, 0x79, + 0x7e, 0x56, 0x5b, 0x4e, 0x4b, 0x70, 0x0a, 0x16, 0x32, 0x68, 0x2b, 0x30, 0xb5, 0x20, 0xa4, 0xdf, + 0x89, 0xab, 0x7e, 0x7e, 0x56, 0xcb, 0x08, 0xc7, 0xf5, 0x00, 0x29, 0x6e, 0x20, 0x3a, 0x01, 0x64, + 0x68, 0x94, 0xed, 0x39, 0x9a, 0x49, 0x3d, 0x4d, 0x7a, 0x9f, 0xc8, 0x41, 0x78, 0x63, 0xb4, 0x49, + 0xe3, 0x12, 0xcd, 0x5b, 0xd2, 0x0a, 0xb4, 0x95, 0x42, 0xc3, 0x19, 0x1a, 0x78, 0xbc, 0x73, 0x88, + 0x46, 0x2d, 0xb3, 0x5a, 0x8a, 0xc7, 0x3b, 0x2c, 0x5a, 0xb1, 0xa4, 0xa2, 0xd7, 0xa1, 0xdc, 0x27, + 0x94, 0x6a, 0x3d, 0x52, 0x9d, 0x10, 0x8c, 0x73, 0x92, 0xb1, 0xbc, 0xed, 0x35, 0x63, 0x9f, 0xae, + 0xfe, 0x58, 0x81, 0x99, 0x60, 0xe4, 0x84, 0xb7, 0xff, 0x66, 0xca, 0x0f, 0xeb, 0xa3, 0x75, 0x89, + 0x4b, 0x0b, 0x2f, 0x0c, 0xa2, 0xa2, 0xdf, 0x12, 0xf1, 0xc1, 0x6d, 0x7f, 0x2d, 0x15, 0xc4, 0x5a, + 0xba, 0x37, 0xaa, 0xcb, 0xe4, 0x2c, 0xa1, 0x3f, 0x2b, 0x45, 0xcc, 0xe7, 0xae, 0x89, 0x3e, 0x86, + 0x0a, 0x25, 0x06, 0xe9, 0x30, 0xcb, 0x91, 0xe6, 0xbf, 0x3d, 0xa2, 0xf9, 0xda, 0x01, 0x31, 0xda, + 0x52, 0xb4, 0x79, 0x9d, 0xdb, 0xef, 0x7f, 0xe1, 0x00, 0x12, 0x7d, 0x08, 0x15, 0x46, 0xfa, 0xb6, + 0xa1, 0x31, 0x3f, 0x06, 0xbd, 0x16, 0xed, 0x02, 0xf7, 0x1c, 0x0e, 0xd6, 0xb2, 0xba, 0x7b, 0x92, + 0x4d, 0x2c, 0x9f, 0x60, 0x48, 0xfc, 0x56, 0x1c, 0xc0, 0xa0, 0x13, 0x98, 0x75, 0xed, 0x2e, 0xe7, + 0x64, 0x3c, 0xe3, 0xf5, 0x06, 0xd2, 0x93, 0xde, 0x1b, 0x75, 0x6c, 0xf6, 0x63, 0xd2, 0xcd, 0x65, + 0xa9, 0x6b, 0x36, 0xde, 0x8e, 0x13, 0x5a, 0xd0, 0x2a, 0xcc, 0xf5, 0x75, 0x93, 0x67, 0xae, 0x41, + 0x9b, 0x74, 0x2c, 0xb3, 0x4b, 0x85, 0x5b, 0x4d, 0x34, 0x6f, 0x4a, 0x80, 0xb9, 0xed, 0x38, 0x19, + 0x27, 0xf9, 0xd1, 0x77, 0x00, 0xf9, 0xdd, 0x78, 0xe2, 0x25, 0x6c, 0xdd, 0x32, 0x85, 0xcf, 0x15, + 0x43, 0xe7, 0xde, 0x4b, 0x71, 0xe0, 0x0c, 0x29, 0xb4, 0x05, 0x4b, 0x0e, 0x39, 0xd1, 0x79, 0x1f, + 0x9f, 0xea, 0x94, 0x59, 0xce, 0x60, 0x4b, 0xef, 0xeb, 0xac, 0x3a, 0x29, 0x6c, 0xaa, 0x9e, 0x9f, + 0xd5, 0x96, 0x70, 0x06, 0x1d, 0x67, 0x4a, 0xa9, 0x7f, 0x3e, 0x09, 0x73, 0x89, 0x78, 0x83, 0x9e, + 0xc1, 0x72, 0xc7, 0x4b, 0x4e, 0x3b, 0x6e, 0xff, 0x80, 0x38, 0xed, 0xce, 0x11, 0xe9, 0xba, 0x06, + 0xe9, 0x0a, 0x47, 0x99, 0x68, 0xae, 0x48, 0x8b, 0x97, 0xd7, 0x32, 0xb9, 0x70, 0x8e, 0x34, 0x1f, + 0x05, 0x53, 0x34, 0x6d, 0xeb, 0x94, 0x06, 0x98, 0x05, 0x81, 0x19, 0x8c, 0xc2, 0x4e, 0x8a, 0x03, + 0x67, 0x48, 0x71, 0x1b, 0xbb, 0x84, 0xea, 0x0e, 0xe9, 0x26, 0x6d, 0x2c, 0xc6, 0x6d, 0x5c, 0xcf, + 0xe4, 0xc2, 0x39, 0xd2, 0xe8, 0x5d, 0x98, 0xf6, 0xb4, 0x89, 0xf9, 0x93, 0x13, 0x1d, 0xa4, 0xc3, + 0x9d, 0x90, 0x84, 0xa3, 0x7c, 0xbc, 0x6b, 0xd6, 0x01, 0x25, 0xce, 0x09, 0xe9, 0xe6, 0x4f, 0xf0, + 0x6e, 0x8a, 0x03, 0x67, 0x48, 0xf1, 0xae, 0x79, 0x1e, 0x98, 0xea, 0xda, 0x64, 0xbc, 0x6b, 0xfb, + 0x99, 0x5c, 0x38, 0x47, 0x9a, 0xfb, 0xb1, 0x67, 0xf2, 0xea, 0x89, 0xa6, 0x1b, 0xda, 0x81, 0x41, + 0xaa, 0xe5, 0xb8, 0x1f, 0xef, 0xc4, 0xc9, 0x38, 0xc9, 0x8f, 0x9e, 0xc0, 0x82, 0xd7, 0xb4, 0x6f, + 0x6a, 0x01, 0x48, 0x45, 0x80, 0x7c, 0x4d, 0x82, 0x2c, 0xec, 0x24, 0x19, 0x70, 0x5a, 0x06, 0x7d, + 0x00, 0xb3, 0x1d, 0xcb, 0x30, 0x84, 0x3f, 0xae, 0x59, 0xae, 0xc9, 0xaa, 0x53, 0x02, 0x05, 0xf1, + 0xf5, 0xb8, 0x16, 0xa3, 0xe0, 0x04, 0x27, 0x22, 0x00, 0x1d, 0x3f, 0xe1, 0xd0, 0x2a, 0x8c, 0x54, + 0x6b, 0xa4, 0x93, 0x5e, 0x58, 0x03, 0x04, 0x4d, 0x14, 0x47, 0x80, 0xd5, 0x7f, 0x55, 0xe0, 0x66, + 0x4e, 0xe8, 0x40, 0xdf, 0x8a, 0xa5, 0xd8, 0x5f, 0x4e, 0xa4, 0xd8, 0x57, 0x72, 0xc4, 0x22, 0x79, + 0xd6, 0x84, 0x19, 0x87, 0xf7, 0xca, 0xec, 0x79, 0x2c, 0x32, 0x46, 0xbe, 0x3b, 0xa4, 0x1b, 0x38, + 0x2a, 0x13, 0xc6, 0xfc, 0x85, 0xf3, 0xb3, 0xda, 0x4c, 0x8c, 0x86, 0xe3, 0xf0, 0xea, 0x5f, 0x14, + 0x00, 0xd6, 0x89, 0x6d, 0x58, 0x83, 0x3e, 0x31, 0xaf, 0xa2, 0x86, 0xda, 0x8d, 0xd5, 0x50, 0x6f, + 0x0e, 0x9b, 0x9e, 0xc0, 0xb4, 0xdc, 0x22, 0xea, 0xd7, 0x13, 0x45, 0x54, 0x63, 0x74, 0xc8, 0x8b, + 0xab, 0xa8, 0x7f, 0x2f, 0xc2, 0x62, 0xc8, 0x1c, 0x96, 0x51, 0x8f, 0x62, 0x73, 0xfc, 0x4b, 0x89, + 0x39, 0xbe, 0x99, 0x21, 0xf2, 0xd2, 0xea, 0xa8, 0x17, 0x5f, 0xcf, 0xa0, 0x4f, 0x61, 0x96, 0x17, + 0x4e, 0x9e, 0x7b, 0x88, 0xb2, 0x6c, 0x72, 0xec, 0xb2, 0x2c, 0x48, 0xa0, 0x5b, 0x31, 0x24, 0x9c, + 0x40, 0xce, 0x29, 0x03, 0xcb, 0x2f, 0xbb, 0x0c, 0x54, 0x3f, 0x53, 0x60, 0x36, 0x9c, 0xa6, 0x2b, + 0x28, 0xda, 0x76, 0xe2, 0x45, 0xdb, 0xeb, 0x23, 0xbb, 0x68, 0x4e, 0xd5, 0xf6, 0xdf, 0xbc, 0xc0, + 0x0f, 0x98, 0xf8, 0x02, 0x3f, 0xd0, 0x3a, 0xc7, 0x23, 0x6c, 0xff, 0x7e, 0xa8, 0x00, 0x92, 0x59, + 0x60, 0xd5, 0x34, 0x2d, 0xa6, 0x79, 0xb1, 0xd2, 0x33, 0x6b, 0x73, 0x64, 0xb3, 0x7c, 0x8d, 0xf5, + 0xfd, 0x14, 0xd6, 0x63, 0x93, 0x39, 0x83, 0x70, 0x46, 0xd2, 0x0c, 0x38, 0xc3, 0x00, 0xa4, 0x01, + 0x38, 0x12, 0x73, 0xcf, 0x92, 0x0b, 0xf9, 0xcd, 0x11, 0x62, 0x1e, 0x17, 0x58, 0xb3, 0xcc, 0x43, + 0xbd, 0x17, 0x86, 0x1d, 0x1c, 0x00, 0xe1, 0x08, 0xe8, 0xad, 0xc7, 0x70, 0x33, 0xc7, 0x5a, 0x34, + 0x0f, 0xc5, 0x63, 0x32, 0xf0, 0x86, 0x0d, 0xf3, 0x9f, 0x68, 0x29, 0xba, 0x4d, 0x9e, 0x92, 0x3b, + 0xdc, 0x0f, 0x0a, 0xef, 0x2b, 0xea, 0x8f, 0x27, 0xa2, 0xbe, 0x23, 0x2a, 0xe6, 0x7b, 0x50, 0x71, + 0x88, 0x6d, 0xe8, 0x1d, 0x8d, 0xca, 0x42, 0xe8, 0xba, 0x77, 0xa4, 0xe1, 0xb5, 0xe1, 0x80, 0x1a, + 0xab, 0xad, 0x0b, 0x2f, 0xb7, 0xb6, 0x2e, 0xbe, 0x98, 0xda, 0xfa, 0xb7, 0xa0, 0x42, 0xfd, 0xaa, + 0xba, 0x24, 0x20, 0x1f, 0x8c, 0x11, 0x5f, 0x65, 0x41, 0x1d, 0x28, 0x08, 0x4a, 0xe9, 0x00, 0x34, + 0xab, 0x88, 0x9e, 0x18, 0xb3, 0x88, 0x7e, 0xa1, 0x85, 0x2f, 0x8f, 0xa9, 0xb6, 0xe6, 0x52, 0xd2, + 0x15, 0x81, 0xa8, 0x12, 0xc6, 0xd4, 0x96, 0x68, 0xc5, 0x92, 0x8a, 0x3e, 0x8e, 0xb9, 0x6c, 0xe5, + 0x32, 0x2e, 0x3b, 0x9b, 0xef, 0xae, 0x68, 0x1f, 0x6e, 0xda, 0x8e, 0xd5, 0x73, 0x08, 0xa5, 0xeb, + 0x44, 0xeb, 0x1a, 0xba, 0x49, 0xfc, 0xf1, 0xf1, 0x2a, 0xa2, 0x57, 0xce, 0xcf, 0x6a, 0x37, 0x5b, + 0xd9, 0x2c, 0x38, 0x4f, 0x56, 0xfd, 0xbc, 0x04, 0xf3, 0xc9, 0x0c, 0x98, 0x53, 0xa4, 0x2a, 0x97, + 0x2a, 0x52, 0xef, 0x47, 0x16, 0x83, 0x57, 0xc1, 0x47, 0xce, 0xf8, 0x52, 0x0b, 0x62, 0x15, 0xe6, + 0x64, 0x34, 0xf0, 0x89, 0xb2, 0x4c, 0x0f, 0x66, 0x7f, 0x3f, 0x4e, 0xc6, 0x49, 0x7e, 0x5e, 0x7a, + 0x86, 0x15, 0xa5, 0x0f, 0x52, 0x8a, 0x97, 0x9e, 0xab, 0x49, 0x06, 0x9c, 0x96, 0x41, 0xdb, 0xb0, + 0xe8, 0x9a, 0x69, 0x28, 0xcf, 0x1b, 0x5f, 0x91, 0x50, 0x8b, 0xfb, 0x69, 0x16, 0x9c, 0x25, 0x87, + 0x0e, 0x63, 0xd5, 0xe8, 0xa4, 0x88, 0xb0, 0x0f, 0x47, 0x5e, 0x3b, 0x23, 0x97, 0xa3, 0xe8, 0x11, + 0xcc, 0x38, 0x62, 0xdf, 0xe1, 0x1b, 0xec, 0xd5, 0xee, 0x37, 0xa4, 0xd8, 0x0c, 0x8e, 0x12, 0x71, + 0x9c, 0x37, 0xa3, 0xdc, 0xae, 0x8c, 0x5a, 0x6e, 0xab, 0xff, 0xac, 0x44, 0x93, 0x50, 0x50, 0x02, + 0x0f, 0x3b, 0x65, 0x4a, 0x49, 0x44, 0xaa, 0x23, 0x2b, 0xbb, 0xfa, 0x7d, 0x6f, 0xac, 0xea, 0x37, + 0x4c, 0x9e, 0xc3, 0xcb, 0xdf, 0x1f, 0x29, 0xb0, 0xbc, 0xd1, 0x7e, 0xe2, 0x58, 0xae, 0xed, 0x9b, + 0xb3, 0x6b, 0x7b, 0xe3, 0xfa, 0x0d, 0x28, 0x39, 0xae, 0xe1, 0xf7, 0xe3, 0x35, 0xbf, 0x1f, 0xd8, + 0x35, 0x78, 0x3f, 0x16, 0x13, 0x52, 0x5e, 0x27, 0xb8, 0x00, 0xda, 0x81, 0x49, 0x47, 0x33, 0x7b, + 0xc4, 0x4f, 0xab, 0x77, 0x87, 0x58, 0xbf, 0xb9, 0x8e, 0x39, 0x7b, 0xa4, 0x78, 0x13, 0xd2, 0x58, + 0xa2, 0xa8, 0x7f, 0xa4, 0xc0, 0xdc, 0xd3, 0xbd, 0xbd, 0xd6, 0xa6, 0x29, 0x56, 0xb4, 0x38, 0x7d, + 0xbf, 0x03, 0x25, 0x5b, 0x63, 0x47, 0xc9, 0x4c, 0xcf, 0x69, 0x58, 0x50, 0xd0, 0x77, 0xa1, 0xcc, + 0x23, 0x09, 0x31, 0xbb, 0x23, 0x96, 0xda, 0x12, 0xbe, 0xe9, 0x09, 0x85, 0x15, 0xa2, 0x6c, 0xc0, + 0x3e, 0x9c, 0x7a, 0x0c, 0x4b, 0x11, 0x73, 0xf8, 0x78, 0x88, 0x63, 0x60, 0xd4, 0x86, 0x09, 0xae, + 0xd9, 0x3f, 0xe5, 0x1d, 0x76, 0x98, 0x99, 0xe8, 0x52, 0x58, 0xe9, 0xf0, 0x2f, 0x8a, 0x3d, 0x2c, + 0x75, 0x1b, 0x66, 0xc4, 0x95, 0x83, 0xe5, 0x30, 0x31, 0x2c, 0xe8, 0x36, 0x14, 0xfb, 0xba, 0x29, + 0xf3, 0xec, 0xb4, 0x94, 0x29, 0xf2, 0x1c, 0xc1, 0xdb, 0x05, 0x59, 0x3b, 0x95, 0x91, 0x27, 0x24, + 0x6b, 0xa7, 0x98, 0xb7, 0xab, 0x4f, 0xa0, 0x2c, 0x87, 0x3b, 0x0a, 0x54, 0xbc, 0x18, 0xa8, 0x98, + 0x01, 0xb4, 0x0b, 0xe5, 0xcd, 0x56, 0xd3, 0xb0, 0xbc, 0xaa, 0xab, 0xa3, 0x77, 0x9d, 0xe4, 0x5c, + 0xac, 0x6d, 0xae, 0x63, 0x2c, 0x28, 0x48, 0x85, 0x49, 0x72, 0xda, 0x21, 0x36, 0x13, 0x1e, 0x31, + 0xd5, 0x04, 0x3e, 0xcb, 0x8f, 0x45, 0x0b, 0x96, 0x14, 0xf5, 0x8f, 0x0b, 0x50, 0x96, 0xc3, 0x71, + 0x05, 0xbb, 0xb0, 0xad, 0xd8, 0x2e, 0xec, 0x8d, 0xd1, 0x5c, 0x23, 0x77, 0x0b, 0xb6, 0x97, 0xd8, + 0x82, 0xdd, 0x1f, 0x11, 0xef, 0xe2, 0xfd, 0xd7, 0x3f, 0x28, 0x30, 0x1b, 0x77, 0x4a, 0xf4, 0x2e, + 0x4c, 0xf3, 0x84, 0xa3, 0x77, 0xc8, 0x4e, 0x58, 0xe7, 0x06, 0x87, 0x30, 0xed, 0x90, 0x84, 0xa3, + 0x7c, 0xa8, 0x17, 0x88, 0x71, 0x3f, 0x92, 0x9d, 0xce, 0x1f, 0x52, 0x97, 0xe9, 0x46, 0xdd, 0xbb, + 0x46, 0xab, 0x6f, 0x9a, 0x6c, 0xd7, 0x69, 0x33, 0x47, 0x37, 0x7b, 0x29, 0x45, 0xc2, 0x29, 0xa3, + 0xc8, 0xea, 0x3f, 0x29, 0x30, 0x2d, 0x4d, 0xbe, 0x82, 0x5d, 0xc5, 0xaf, 0xc5, 0x77, 0x15, 0x77, + 0x47, 0x5c, 0xe0, 0xd9, 0x5b, 0x8a, 0xbf, 0x09, 0x4d, 0xe7, 0x4b, 0x9a, 0x7b, 0xf5, 0x91, 0x45, + 0x59, 0xd2, 0xab, 0xf9, 0x62, 0xc4, 0x82, 0x82, 0x5c, 0x98, 0xd7, 0x13, 0x31, 0x40, 0x0e, 0x6d, + 0x63, 0x34, 0x4b, 0x02, 0xb1, 0x66, 0x55, 0xc2, 0xcf, 0x27, 0x29, 0x38, 0xa5, 0x42, 0x25, 0x90, + 0xe2, 0x42, 0x1f, 0x42, 0xe9, 0x88, 0x31, 0x3b, 0xe3, 0xbc, 0x7a, 0x48, 0xe4, 0x09, 0x4d, 0xa8, + 0x88, 0xde, 0xed, 0xed, 0xb5, 0xb0, 0x80, 0x52, 0xff, 0x27, 0x1c, 0x8f, 0xb6, 0xe7, 0xe3, 0x41, + 0x3c, 0x55, 0x2e, 0x13, 0x4f, 0xa7, 0xb3, 0x62, 0x29, 0x7a, 0x0a, 0x45, 0x66, 0x8c, 0xba, 0x2d, + 0x94, 0x88, 0x7b, 0x5b, 0xed, 0x30, 0x20, 0xed, 0x6d, 0xb5, 0x31, 0x87, 0x40, 0xbb, 0x30, 0xc1, + 0xb3, 0x0f, 0x5f, 0x82, 0xc5, 0xd1, 0x97, 0x34, 0xef, 0x7f, 0xe8, 0x10, 0xfc, 0x8b, 0x62, 0x0f, + 0x47, 0xfd, 0x3e, 0xcc, 0xc4, 0xd6, 0x29, 0xfa, 0x04, 0xae, 0x1b, 0x96, 0xd6, 0x6d, 0x6a, 0x86, + 0x66, 0x76, 0x88, 0x7f, 0x39, 0x70, 0x37, 0x6b, 0x87, 0xb1, 0x15, 0xe1, 0x93, 0xab, 0x3c, 0xb8, + 0x4e, 0x8d, 0xd2, 0x70, 0x0c, 0x51, 0xd5, 0x00, 0xc2, 0x3e, 0xa2, 0x1a, 0x4c, 0x70, 0x3f, 0xf3, + 0xf2, 0xc9, 0x54, 0x73, 0x8a, 0x5b, 0xc8, 0xdd, 0x8f, 0x62, 0xaf, 0x1d, 0x3d, 0x04, 0xa0, 0xa4, + 0xe3, 0x10, 0x26, 0x82, 0x41, 0x21, 0x7e, 0x05, 0xdd, 0x0e, 0x28, 0x38, 0xc2, 0xa5, 0xfe, 0x8b, + 0x02, 0x33, 0x3b, 0x84, 0xfd, 0xc0, 0x72, 0x8e, 0x5b, 0x96, 0xa1, 0x77, 0x06, 0x57, 0x10, 0x6c, + 0x71, 0x2c, 0xd8, 0xbe, 0x35, 0x64, 0x66, 0x62, 0xd6, 0xe5, 0x85, 0x5c, 0xf5, 0x33, 0x05, 0x6e, + 0xc6, 0x38, 0x1f, 0x87, 0x4b, 0x77, 0x1f, 0x26, 0x6c, 0xcb, 0x61, 0x7e, 0x22, 0x1e, 0x4b, 0x21, + 0x0f, 0x63, 0x91, 0x54, 0xcc, 0x61, 0xb0, 0x87, 0x86, 0xb6, 0xa0, 0xc0, 0x2c, 0xe9, 0xaa, 0xe3, + 0x61, 0x12, 0xe2, 0x34, 0x41, 0x62, 0x16, 0xf6, 0x2c, 0x5c, 0x60, 0x16, 0x9f, 0x88, 0x6a, 0x8c, + 0x2b, 0x1a, 0x7c, 0x5e, 0x52, 0x0f, 0x30, 0x94, 0x0e, 0x1d, 0xab, 0x7f, 0xe9, 0x3e, 0x04, 0x13, + 0xb1, 0xe1, 0x58, 0x7d, 0x2c, 0xb0, 0xd4, 0x9f, 0x28, 0xb0, 0x10, 0xe3, 0xbc, 0x82, 0xc0, 0xff, + 0x61, 0x3c, 0xf0, 0xdf, 0x1f, 0xa7, 0x23, 0x39, 0xe1, 0xff, 0x27, 0x85, 0x44, 0x37, 0x78, 0x87, + 0xd1, 0x21, 0x4c, 0xdb, 0x56, 0xb7, 0xfd, 0x02, 0xae, 0x03, 0xe7, 0x78, 0xde, 0x6c, 0x85, 0x58, + 0x38, 0x0a, 0x8c, 0x4e, 0x61, 0xc1, 0xd4, 0xfa, 0x84, 0xda, 0x5a, 0x87, 0xb4, 0x5f, 0xc0, 0x01, + 0xc9, 0x0d, 0x71, 0xdf, 0x90, 0x44, 0xc4, 0x69, 0x25, 0x68, 0x1b, 0xca, 0xba, 0x2d, 0xea, 0x38, + 0x59, 0xbb, 0x0c, 0xcd, 0xa2, 0x5e, 0xd5, 0xe7, 0xc5, 0x73, 0xf9, 0x81, 0x7d, 0x0c, 0xf5, 0x6f, + 0x93, 0xde, 0xc0, 0xfd, 0x0f, 0x3d, 0x81, 0x8a, 0x78, 0x84, 0xd3, 0xb1, 0x0c, 0xff, 0x66, 0x80, + 0xcf, 0x6c, 0x4b, 0xb6, 0x3d, 0x3f, 0xab, 0xbd, 0x92, 0x71, 0xe8, 0xeb, 0x93, 0x71, 0x20, 0x8c, + 0x76, 0xa0, 0x64, 0x7f, 0x95, 0x0a, 0x46, 0x24, 0x39, 0x51, 0xb6, 0x08, 0x1c, 0xf5, 0xf7, 0x8a, + 0x09, 0x73, 0x45, 0xaa, 0xfb, 0xf4, 0x85, 0xcd, 0x7a, 0x50, 0x31, 0xe5, 0xce, 0xfc, 0x01, 0x94, + 0x65, 0x86, 0x97, 0xce, 0xfc, 0x8d, 0x71, 0x9c, 0x39, 0x9a, 0xc5, 0x82, 0x0d, 0x8b, 0xdf, 0xe8, + 0x03, 0xa3, 0xef, 0xc1, 0x24, 0xf1, 0x54, 0x78, 0xb9, 0xf1, 0xbd, 0x71, 0x54, 0x84, 0x71, 0x35, + 0x2c, 0x54, 0x65, 0x9b, 0x44, 0x45, 0xdf, 0xe2, 0xe3, 0xc5, 0x79, 0xf9, 0x26, 0x90, 0x56, 0x4b, + 0x22, 0x5d, 0xdd, 0xf6, 0xba, 0x1d, 0x34, 0x3f, 0x3f, 0xab, 0x41, 0xf8, 0x89, 0xa3, 0x12, 0xea, + 0xbf, 0x29, 0xb0, 0x20, 0x46, 0xa8, 0xe3, 0x3a, 0x3a, 0x1b, 0x5c, 0x59, 0x62, 0x7a, 0x16, 0x4b, + 0x4c, 0xef, 0x0c, 0x19, 0x96, 0x94, 0x85, 0xb9, 0xc9, 0xe9, 0xa7, 0x0a, 0xdc, 0x48, 0x71, 0x5f, + 0x41, 0x5c, 0xdc, 0x8f, 0xc7, 0xc5, 0xb7, 0xc6, 0xed, 0x50, 0x4e, 0x6c, 0xfc, 0xab, 0xb9, 0x8c, + 0xee, 0x88, 0x95, 0xf2, 0x10, 0xc0, 0x76, 0xf4, 0x13, 0xdd, 0x20, 0x3d, 0x79, 0x09, 0x5e, 0x89, + 0x3c, 0x82, 0x0b, 0x28, 0x38, 0xc2, 0x85, 0x28, 0x2c, 0x77, 0xc9, 0xa1, 0xe6, 0x1a, 0x6c, 0xb5, + 0xdb, 0x5d, 0xd3, 0x6c, 0xed, 0x40, 0x37, 0x74, 0xa6, 0xcb, 0xe3, 0x82, 0xa9, 0xe6, 0x23, 0xef, + 0x72, 0x3a, 0x8b, 0xe3, 0xf9, 0x59, 0xed, 0x76, 0xd6, 0xed, 0x90, 0xcf, 0x32, 0xc0, 0x39, 0xd0, + 0x68, 0x00, 0x55, 0x87, 0x7c, 0xdf, 0xd5, 0x1d, 0xd2, 0x5d, 0x77, 0x2c, 0x3b, 0xa6, 0xb6, 0x28, + 0xd4, 0xfe, 0xea, 0xf9, 0x59, 0xad, 0x8a, 0x73, 0x78, 0x86, 0x2b, 0xce, 0x85, 0x47, 0x9f, 0xc2, + 0xa2, 0xe6, 0xbd, 0x1d, 0x8c, 0x69, 0xf5, 0x56, 0xc9, 0xfb, 0xe7, 0x67, 0xb5, 0xc5, 0xd5, 0x34, + 0x79, 0xb8, 0xc2, 0x2c, 0x50, 0xd4, 0x80, 0xf2, 0x89, 0x78, 0xd9, 0x48, 0xab, 0x13, 0x02, 0x9f, + 0x27, 0x82, 0xb2, 0xf7, 0xd8, 0x91, 0x63, 0x4e, 0x6e, 0xb4, 0xc5, 0xea, 0xf3, 0xb9, 0xf8, 0x86, + 0x92, 0xd7, 0x92, 0x72, 0xc5, 0x8b, 0x13, 0xe3, 0x4a, 0x18, 0xb5, 0x9e, 0x86, 0x24, 0x1c, 0xe5, + 0x43, 0x1f, 0xc3, 0xd4, 0x91, 0x3c, 0x95, 0xa0, 0xd5, 0xf2, 0x48, 0x49, 0x38, 0x76, 0x8a, 0xd1, + 0x5c, 0x90, 0x2a, 0xa6, 0xfc, 0x66, 0x8a, 0x43, 0x44, 0xf4, 0x3a, 0x94, 0xc5, 0xc7, 0xe6, 0xba, + 0x38, 0x8e, 0xab, 0x84, 0xb1, 0xed, 0xa9, 0xd7, 0x8c, 0x7d, 0xba, 0xcf, 0xba, 0xd9, 0x5a, 0x13, + 0xc7, 0xc2, 0x09, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe9, 0xe8, 0x13, 0x28, 0x53, 0xb2, 0xa5, 0x9b, + 0xee, 0x69, 0x15, 0x46, 0xba, 0x54, 0x6e, 0x3f, 0x16, 0xdc, 0x89, 0x83, 0xb1, 0x50, 0x83, 0xa4, + 0x63, 0x1f, 0x16, 0x1d, 0xc1, 0x94, 0xe3, 0x9a, 0xab, 0x74, 0x9f, 0x12, 0xa7, 0x3a, 0x2d, 0x74, + 0x0c, 0x0b, 0xe7, 0xd8, 0xe7, 0x4f, 0x6a, 0x09, 0x46, 0x28, 0xe0, 0xc0, 0x21, 0x38, 0xfa, 0x43, + 0x05, 0x10, 0x75, 0x6d, 0xdb, 0x20, 0x7d, 0x62, 0x32, 0xcd, 0x10, 0x67, 0x71, 0xb4, 0x7a, 0x5d, + 0xe8, 0xfc, 0xf6, 0xb0, 0x7e, 0xa5, 0x04, 0x93, 0xca, 0x83, 0x43, 0xef, 0x34, 0x2b, 0xce, 0xd0, + 0xcb, 0x87, 0xf6, 0x90, 0x8a, 0xdf, 0xd5, 0x99, 0x91, 0x86, 0x36, 0xfb, 0xcc, 0x31, 0x1c, 0x5a, + 0x49, 0xc7, 0x3e, 0x2c, 0x7a, 0x06, 0xcb, 0xfe, 0xc3, 0x58, 0x6c, 0x59, 0x6c, 0x43, 0x37, 0x08, + 0x1d, 0x50, 0x46, 0xfa, 0xd5, 0x59, 0x31, 0xed, 0xc1, 0xdb, 0x0f, 0x9c, 0xc9, 0x85, 0x73, 0xa4, + 0x51, 0x1f, 0x6a, 0x7e, 0xc8, 0xe0, 0xeb, 0x29, 0x88, 0x59, 0x8f, 0x69, 0x47, 0x33, 0xbc, 0x7b, + 0x80, 0x39, 0xa1, 0xe0, 0xb5, 0xf3, 0xb3, 0x5a, 0x6d, 0xfd, 0x62, 0x56, 0x3c, 0x0c, 0x0b, 0x7d, + 0x17, 0xaa, 0x5a, 0x9e, 0x9e, 0x79, 0xa1, 0xe7, 0x55, 0x1e, 0x87, 0x72, 0x15, 0xe4, 0x4a, 0x23, + 0x06, 0xf3, 0x5a, 0xfc, 0x89, 0x32, 0xad, 0x2e, 0x8c, 0x74, 0x10, 0x99, 0x78, 0xd9, 0x1c, 0x1e, + 0x46, 0x24, 0x08, 0x14, 0xa7, 0x34, 0xa0, 0xdf, 0x06, 0xa4, 0x25, 0x5f, 0x55, 0xd3, 0x2a, 0x1a, + 0x29, 0xfd, 0xa4, 0x9e, 0x63, 0x87, 0x6e, 0x97, 0x22, 0x51, 0x9c, 0xa1, 0x07, 0x6d, 0xc1, 0x92, + 0x6c, 0xdd, 0x37, 0xa9, 0x76, 0x48, 0xda, 0x03, 0xda, 0x61, 0x06, 0xad, 0x2e, 0x8a, 0xd8, 0x27, + 0x2e, 0xbe, 0x56, 0x33, 0xe8, 0x38, 0x53, 0x0a, 0x7d, 0x1b, 0xe6, 0x0f, 0x2d, 0xe7, 0x40, 0xef, + 0x76, 0x89, 0xe9, 0x23, 0x2d, 0x09, 0xa4, 0x25, 0x3e, 0x1a, 0x1b, 0x09, 0x1a, 0x4e, 0x71, 0x23, + 0x0a, 0x37, 0x24, 0x72, 0xcb, 0xb1, 0x3a, 0xdb, 0x96, 0x6b, 0x32, 0xaf, 0x24, 0xba, 0x11, 0xa4, + 0x98, 0x1b, 0xab, 0x59, 0x0c, 0xcf, 0xcf, 0x6a, 0x77, 0xb2, 0x2b, 0xe0, 0x90, 0x09, 0x67, 0x63, + 0x8b, 0x17, 0x2c, 0xf2, 0x3e, 0xe3, 0x6a, 0x5e, 0x01, 0x8f, 0xf7, 0x82, 0x25, 0x34, 0xed, 0x85, + 0xbd, 0x60, 0x89, 0x40, 0x5e, 0x7c, 0x82, 0xfa, 0x5f, 0x05, 0x58, 0x0c, 0x99, 0x47, 0x7e, 0xc1, + 0x92, 0x21, 0xf2, 0x8b, 0x97, 0xc0, 0xc3, 0x5f, 0x02, 0x7f, 0xa6, 0xc0, 0x6c, 0x38, 0x74, 0xff, + 0xf7, 0x5e, 0x95, 0x84, 0xb6, 0xe5, 0xd4, 0xb9, 0x7f, 0x5f, 0x88, 0x76, 0xe0, 0xff, 0xfd, 0xd3, + 0x86, 0xaf, 0xfe, 0x7c, 0x57, 0xfd, 0x69, 0x11, 0xe6, 0x93, 0xab, 0x31, 0x76, 0x03, 0xae, 0x0c, + 0xbd, 0x01, 0x6f, 0xc1, 0xd2, 0xa1, 0x6b, 0x18, 0x03, 0x31, 0x0c, 0x91, 0x6b, 0x70, 0xef, 0x06, + 0xeb, 0x55, 0x29, 0xb9, 0xb4, 0x91, 0xc1, 0x83, 0x33, 0x25, 0x73, 0x6e, 0xf3, 0x8b, 0x97, 0xba, + 0xcd, 0x4f, 0x5d, 0x2e, 0x97, 0xc6, 0xb8, 0x5c, 0xce, 0xbc, 0x99, 0x9f, 0xb8, 0xc4, 0xcd, 0xfc, + 0x65, 0xae, 0xd2, 0x33, 0x82, 0xd8, 0xd0, 0x97, 0x9d, 0xaf, 0xc2, 0x2d, 0x29, 0xc6, 0xc4, 0x2d, + 0xb7, 0xc9, 0x1c, 0xcb, 0x30, 0x88, 0xb3, 0xee, 0xf6, 0xfb, 0x03, 0xf5, 0x9b, 0x30, 0x1b, 0x7f, + 0xbf, 0xe1, 0xcd, 0xb4, 0xf7, 0x84, 0x44, 0xde, 0x23, 0x46, 0x66, 0xda, 0x6b, 0xc7, 0x01, 0x87, + 0xfa, 0xfb, 0x0a, 0x2c, 0x67, 0xbf, 0xd3, 0x44, 0x06, 0xcc, 0xf6, 0xb5, 0xd3, 0xe8, 0xdb, 0x59, + 0xe5, 0x92, 0x27, 0x3c, 0xe2, 0xe2, 0x7e, 0x3b, 0x86, 0x85, 0x13, 0xd8, 0xea, 0x97, 0x0a, 0xdc, + 0xcc, 0xb9, 0x32, 0xbf, 0x5a, 0x4b, 0xd0, 0x47, 0x50, 0xe9, 0x6b, 0xa7, 0x6d, 0xd7, 0xe9, 0x91, + 0x4b, 0x9f, 0x69, 0x89, 0x88, 0xb1, 0x2d, 0x51, 0x70, 0x80, 0xa7, 0xfe, 0x48, 0x81, 0x6a, 0xde, + 0xee, 0x02, 0xbd, 0x1b, 0xbb, 0xdc, 0xff, 0x7a, 0xe2, 0x72, 0x7f, 0x21, 0x25, 0xf7, 0x92, 0xae, + 0xf6, 0xff, 0x4e, 0x81, 0xe5, 0xec, 0x5d, 0x16, 0x7a, 0x3b, 0x66, 0x61, 0x2d, 0x61, 0xe1, 0x5c, + 0x42, 0x4a, 0xda, 0xf7, 0x3d, 0x98, 0x95, 0x7b, 0x31, 0x09, 0x23, 0x47, 0x55, 0xcd, 0x8a, 0x95, + 0x12, 0xc2, 0xdf, 0x7b, 0x88, 0xf9, 0x8a, 0xb7, 0xe1, 0x04, 0x9a, 0xfa, 0x07, 0x05, 0x98, 0x68, + 0x77, 0x34, 0x83, 0x5c, 0x41, 0x99, 0xf5, 0x9d, 0x58, 0x99, 0x35, 0xec, 0x7f, 0x2e, 0xc2, 0xaa, + 0xdc, 0x0a, 0x0b, 0x27, 0x2a, 0xac, 0x37, 0x46, 0x42, 0xbb, 0xb8, 0xb8, 0xfa, 0x15, 0x98, 0x0a, + 0x94, 0x8e, 0x17, 0xf3, 0xd5, 0xbf, 0x2e, 0xc0, 0x74, 0x44, 0xc5, 0x98, 0x19, 0xe3, 0x30, 0x96, + 0x69, 0x47, 0xf9, 0x77, 0x61, 0x44, 0x57, 0xdd, 0xcf, 0xad, 0xde, 0x3b, 0xcd, 0xf0, 0x65, 0x5e, + 0x3a, 0xe5, 0x7e, 0x13, 0x66, 0x99, 0xf8, 0xf7, 0x5d, 0x70, 0x12, 0x5c, 0x14, 0xbe, 0x18, 0xbc, + 0xee, 0xdd, 0x8b, 0x51, 0x71, 0x82, 0xfb, 0xd6, 0x23, 0x98, 0x89, 0x29, 0x1b, 0xeb, 0x99, 0xe5, + 0x3f, 0x2a, 0xf0, 0xf5, 0xa1, 0xfb, 0x74, 0xd4, 0x8c, 0x2d, 0x92, 0x7a, 0x62, 0x91, 0xac, 0xe4, + 0x03, 0xbc, 0xbc, 0xe7, 0x3a, 0xcd, 0x37, 0x3f, 0xff, 0x62, 0xe5, 0xda, 0xcf, 0xbe, 0x58, 0xb9, + 0xf6, 0xf3, 0x2f, 0x56, 0xae, 0xfd, 0xee, 0xf9, 0x8a, 0xf2, 0xf9, 0xf9, 0x8a, 0xf2, 0xb3, 0xf3, + 0x15, 0xe5, 0xe7, 0xe7, 0x2b, 0xca, 0x7f, 0x9c, 0xaf, 0x28, 0x7f, 0xf2, 0xe5, 0xca, 0xb5, 0x8f, + 0xca, 0x12, 0xee, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd7, 0xb5, 0x56, 0x5c, 0x3d, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto index ef8367e..1a9b7eb 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ b/vendor/k8s.io/api/extensions/v1beta1/generated.proto @@ -22,6 +22,7 @@ syntax = 'proto2'; package k8s.io.api.extensions.v1beta1; import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -30,12 +31,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. // Deprecated: use AllowedFlexVolume from policy API Group instead. message AllowedFlexVolume { @@ -50,7 +45,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -61,17 +56,42 @@ message AllowedHostPath { optional bool readOnly = 2; } +message CustomMetricCurrentStatus { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricCurrentStatusList { + repeated CustomMetricCurrentStatus items = 1; +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +message CustomMetricTarget { + // Custom Metric name. + optional string name = 1; + + // Custom Metric value (average). + optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; +} + +message CustomMetricTargetList { + repeated CustomMetricTarget items = 1; +} + // DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for // more information. // DaemonSet represents the configuration of a daemon set. message DaemonSet { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetSpec spec = 2; @@ -79,7 +99,7 @@ message DaemonSet { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional DaemonSetStatus status = 3; } @@ -108,7 +128,7 @@ message DaemonSetCondition { // DaemonSetList is a collection of daemon sets. message DaemonSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -315,8 +335,6 @@ message DeploymentSpec { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. - // This is set to the max value of int32 (i.e. 2147483647) by default, which - // means "retaining all old RelicaSets". // +optional optional int32 revisionHistoryLimit = 6; @@ -408,33 +426,19 @@ message FSGroupStrategyOptions { repeated IDRange ranges = 2; } -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. // +optional optional string path = 1; - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - optional string pathType = 3; - // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. optional IngressBackend backend = 2; @@ -472,16 +476,16 @@ message IDRange { } // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. message IPBlock { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" optional string cidr = 1; // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional repeated string except = 2; @@ -491,20 +495,19 @@ message IPBlock { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. -// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. message Ingress { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional IngressSpec spec = 2; // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional IngressStatus status = 3; } @@ -512,24 +515,16 @@ message Ingress { // IngressBackend describes all endpoints for a given service and port. message IngressBackend { // Specifies the name of the referenced service. - // +optional optional string serviceName = 1; // Specifies the port of the referenced service. - // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; } // IngressList is a collection of Ingress. message IngressList { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -541,28 +536,18 @@ message IngressList { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. // +optional optional string host = 1; @@ -586,19 +571,6 @@ message IngressRuleValue { // IngressSpec describes the Ingress the user wishes to exist. message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - optional string ingressClassName = 4; - // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to @@ -649,7 +621,7 @@ message IngressTLS { // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -694,7 +666,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least one item, this rule allows traffic only if the + // If this field is present and contains at least on item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -704,7 +676,7 @@ message NetworkPolicyIngressRule { // Network Policy List is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -716,7 +688,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -725,7 +697,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. @@ -784,7 +756,7 @@ message NetworkPolicySpec { repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. - // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // Valid options are Ingress, Egress, or Ingress,Egress. // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -802,7 +774,7 @@ message NetworkPolicySpec { // Deprecated: use PodSecurityPolicy from policy API Group instead. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -815,7 +787,7 @@ message PodSecurityPolicy { // Deprecated: use PodSecurityPolicyList from policy API Group instead. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -875,12 +847,6 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -916,16 +882,11 @@ message PodSecurityPolicySpec { // +optional repeated AllowedFlexVolume allowedFlexVolumes = 18; - // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -935,7 +896,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -947,12 +908,6 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; } // DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for @@ -961,12 +916,12 @@ message PodSecurityPolicySpec { message ReplicaSet { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetSpec spec = 2; @@ -974,7 +929,7 @@ message ReplicaSet { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional ReplicaSetStatus status = 3; } @@ -1003,7 +958,7 @@ message ReplicaSetCondition { // ReplicaSetList is a collection of ReplicaSets. message ReplicaSetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -1070,6 +1025,10 @@ message ReplicaSetStatus { repeated ReplicaSetCondition conditions = 6; } +// Dummy definition +message ReplicationControllerDummy { +} + // DEPRECATED. message RollbackConfig { // The revision to rollback to. If set to 0, rollback to the last revision. @@ -1122,23 +1081,11 @@ message RollingUpdateDeployment { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use RunAsUserStrategyOptions from policy API Group instead. message RunAsUserStrategyOptions { @@ -1151,21 +1098,6 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. // Deprecated: use SELinuxStrategyOptions from policy API Group instead. message SELinuxStrategyOptions { @@ -1180,15 +1112,15 @@ message SELinuxStrategyOptions { // represents a scaling request for a resource. message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional optional ScaleSpec spec = 2; - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional optional ScaleStatus status = 3; } diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go index c69eff0..7625f67 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ b/vendor/k8s.io/api/extensions/v1beta1/register.go @@ -47,6 +47,7 @@ func addKnownTypes(scheme *runtime.Scheme) error { &Deployment{}, &DeploymentList{}, &DeploymentRollback{}, + &ReplicationControllerDummy{}, &Scale{}, &DaemonSetList{}, &DaemonSet{}, diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go index 8934c06..38e112d 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types.go @@ -18,7 +18,8 @@ package v1beta1 import ( appsv1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -49,24 +50,56 @@ type ScaleStatus struct { TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` } +// +genclient +// +genclient:noVerbs // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // represents a scaling request for a resource. type Scale struct { metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata. + // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. + // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. // +optional Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only. + // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. // +optional Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// Dummy definition +type ReplicationControllerDummy struct { + metav1.TypeMeta `json:",inline"` +} + +// Alpha-level support for Custom Metrics in HPA (as annotations). +type CustomMetricTarget struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricTargetList struct { + Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` +} + +type CustomMetricCurrentStatus struct { + // Custom Metric name. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // Custom Metric value (average). + CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` +} + +type CustomMetricCurrentStatusList struct { + Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` +} + // +genclient // +genclient:method=GetScale,verb=get,subresource=scale,result=Scale // +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale @@ -118,8 +151,6 @@ type DeploymentSpec struct { // The number of old ReplicaSets to retain to allow rollback. // This is a pointer to distinguish between explicit zero and not specified. - // This is set to the max value of int32 (i.e. 2147483647) by default, which - // means "retaining all old RelicaSets". // +optional RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` @@ -222,7 +253,7 @@ type RollingUpdateDeployment struct { // the rolling update starts, such that the total number of old and new pods do not exceed // 130% of desired pods. Once old pods have been killed, // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. + // at any time during the update is atmost 130% of desired pods. // +optional MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` } @@ -482,12 +513,12 @@ type DaemonSetCondition struct { type DaemonSet struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -495,7 +526,7 @@ type DaemonSet struct { // out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -519,7 +550,7 @@ const ( type DaemonSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -534,21 +565,20 @@ type DaemonSetList struct { // endpoints defined by a backend. An Ingress can be configured to give services // externally-reachable urls, load balance traffic, terminate SSL, offer name // based virtual hosting etc. -// DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information. type Ingress struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -559,7 +589,7 @@ type Ingress struct { type IngressList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -569,19 +599,6 @@ type IngressList struct { // IngressSpec describes the Ingress the user wishes to exist. type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - // A default backend capable of servicing requests that don't match any // rule. At least one of 'backend' or 'rules' must be specified. This field // is optional to allow the loadbalancer controller or defaulting logic to @@ -633,28 +650,18 @@ type IngressStatus struct { // the related backend services. Incoming requests are first evaluated for a host // match, then routed to the backend associated with the matching IngressRuleValue. type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. + // Host is the fully qualified domain name of a network host, as defined + // by RFC 3986. Note the following deviations from the "host" part of the + // URI as defined in the RFC: + // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the + // IP in the Spec of the parent Ingress. // 2. The `:` delimiter is not respected because ports are not allowed. // Currently the port of an Ingress is implicitly :80 for http and // :443 for https. // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. + // Incoming requests are matched against the host before the IngressRuleValue. + // If the host is unspecified, the Ingress routes all traffic based on the + // specified IngressRuleValue. // +optional Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` // IngressRuleValue represents a rule to route requests for this IngressRule. @@ -693,63 +700,19 @@ type HTTPIngressRuleValue struct { // options usable by a loadbalancer, like http keep-alive. } -// PathType represents the type of path referred to by a HTTPIngressPath. -type PathType string - -const ( - // PathTypeExact matches the URL path exactly and with case sensitivity. - PathTypeExact = PathType("Exact") - - // PathTypePrefix matches based on a URL path prefix split by '/'. Matching - // is case sensitive and done on a path element by element basis. A path - // element refers to the list of labels in the path split by the '/' - // separator. A request is a match for path p if every p is an element-wise - // prefix of p of the request path. Note that if the last element of the - // path is a substring of the last element in request path, it is not a - // match (e.g. /foo/bar matches /foo/bar/baz, but does not match - // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the - // longest matching path is given priority. - // Examples: - // - /foo/bar does not match requests to /foo/barbaz - // - /foo/bar matches request to /foo/bar and /foo/bar/baz - // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are - // present in an Ingress spec, the longest matching path (/foo/) is given - // priority. - PathTypePrefix = PathType("Prefix") - - // PathTypeImplementationSpecific matching is up to the IngressClass. - // Implementations can treat this as a separate PathType or treat it - // identically to Prefix or Exact path types. - PathTypeImplementationSpecific = PathType("ImplementationSpecific") -) - -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. +// HTTPIngressPath associates a path regex with a backend. Incoming urls matching +// the path are forwarded to the backend. type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. + // Path is an extended POSIX regex as defined by IEEE Std 1003.1, + // (i.e this follows the egrep/unix syntax, not the perl syntax) + // matched against the path of an incoming request. Currently it can + // contain characters disallowed from the conventional "path" + // part of a URL as defined by RFC 3986. Paths must begin with + // a '/'. If unspecified, the path defaults to a catch all sending + // traffic to the backend. // +optional Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - // Backend defines the referenced service endpoint to which the traffic // will be forwarded to. Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` @@ -758,18 +721,10 @@ type HTTPIngressPath struct { // IngressBackend describes all endpoints for a given service and port. type IngressBackend struct { // Specifies the name of the referenced service. - // +optional - ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` + ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` // Specifies the port of the referenced service. - // +optional - ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` + ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` } // +genclient @@ -785,12 +740,12 @@ type ReplicaSet struct { // If the Labels of a ReplicaSet are empty, they are defaulted to // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` @@ -798,7 +753,7 @@ type ReplicaSet struct { // This data may be out of date by some window of time. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -809,7 +764,7 @@ type ReplicaSet struct { type ReplicaSetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -913,7 +868,7 @@ type ReplicaSetCondition struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -963,11 +918,6 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -996,10 +946,6 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -1024,11 +970,6 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -1053,7 +994,7 @@ type AllowedHostPath struct { // Deprecated: use FSType from policy API Group instead. type FSType string -const ( +var ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" @@ -1075,7 +1016,6 @@ const ( ConfigMap FSType = "configMap" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" - CSI FSType = "csi" All FSType = "*" ) @@ -1086,12 +1026,6 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. // Deprecated: use HostPortRange from policy API Group instead. @@ -1138,17 +1072,6 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsGroupStrategyOptions from policy API Group instead. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - // IDRange provides a min/max of an allowed range of IDs. // Deprecated: use IDRange from policy API Group instead. type IDRange struct { @@ -1175,23 +1098,6 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -// Deprecated: use RunAsGroupStrategy from policy API Group instead. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use RunAsGroupStrategyMustRunAs from policy API Group instead. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsGroupStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use RunAsGroupStrategyRunAsAny from policy API Group instead. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. // Deprecated: use FSGroupStrategyOptions from policy API Group instead. type FSGroupStrategyOptions struct { @@ -1244,25 +1150,6 @@ const ( SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. @@ -1270,7 +1157,7 @@ const AllowAllRuntimeClassNames = "*" type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1278,7 +1165,6 @@ type PodSecurityPolicyList struct { Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` } -// +genclient // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. @@ -1286,7 +1172,7 @@ type PodSecurityPolicyList struct { type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -1337,7 +1223,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // Valid options are Ingress, Egress, or Ingress,Egress. // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -1364,7 +1250,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least one item, this rule allows traffic only if the + // If this field is present and contains at least on item, this rule allows traffic only if the // traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` @@ -1409,15 +1295,15 @@ type NetworkPolicyPort struct { } // DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. type IPBlock struct { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` @@ -1456,7 +1342,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go index 9ccad92..cdbc490 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go @@ -27,15 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", "driver": "driver is the name of the Flexvolume driver.", @@ -55,11 +46,30 @@ func (AllowedHostPath) SwaggerDoc() map[string]string { return map_AllowedHostPath } +var map_CustomMetricCurrentStatus = map[string]string{ + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { + return map_CustomMetricCurrentStatus +} + +var map_CustomMetricTarget = map[string]string{ + "": "Alpha-level support for Custom Metrics in HPA (as annotations).", + "name": "Custom Metric name.", + "value": "Custom Metric value (average).", +} + +func (CustomMetricTarget) SwaggerDoc() map[string]string { + return map_CustomMetricTarget +} + var map_DaemonSet = map[string]string{ "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (DaemonSet) SwaggerDoc() map[string]string { @@ -81,7 +91,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string { var map_DaemonSetList = map[string]string{ "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "A list of daemon sets.", } @@ -104,7 +114,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string { } var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", + "": "DaemonSetStatus represents the current status of a daemon set.", "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", @@ -183,7 +193,7 @@ var map_DeploymentSpec = map[string]string{ "template": "Template describes the pods that will be created.", "strategy": "The deployment strategy to use to replace existing pods with new ones.", "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"retaining all old RelicaSets\".", + "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", @@ -230,10 +240,9 @@ func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { } var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", + "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", + "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", + "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", } func (HTTPIngressPath) SwaggerDoc() map[string]string { @@ -270,9 +279,9 @@ func (IDRange) SwaggerDoc() map[string]string { } var map_IPBlock = map[string]string{ - "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -280,10 +289,10 @@ func (IPBlock) SwaggerDoc() map[string]string { } var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc. DEPRECATED - This group version of Ingress is deprecated by networking.k8s.io/v1beta1 Ingress. See the release notes for more information.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (Ingress) SwaggerDoc() map[string]string { @@ -294,7 +303,6 @@ var map_IngressBackend = map[string]string{ "": "IngressBackend describes all endpoints for a given service and port.", "serviceName": "Specifies the name of the referenced service.", "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", } func (IngressBackend) SwaggerDoc() map[string]string { @@ -303,7 +311,7 @@ func (IngressBackend) SwaggerDoc() map[string]string { var map_IngressList = map[string]string{ "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of Ingress.", } @@ -313,7 +321,7 @@ func (IngressList) SwaggerDoc() map[string]string { var map_IngressRule = map[string]string{ "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", + "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", } func (IngressRule) SwaggerDoc() map[string]string { @@ -329,11 +337,10 @@ func (IngressRuleValue) SwaggerDoc() map[string]string { } var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", + "": "IngressSpec describes the Ingress the user wishes to exist.", + "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", + "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", + "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", } func (IngressSpec) SwaggerDoc() map[string]string { @@ -361,7 +368,7 @@ func (IngressTLS) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -382,7 +389,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -391,7 +398,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -425,7 +432,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { @@ -434,7 +441,7 @@ func (NetworkPolicySpec) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -444,7 +451,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -465,7 +472,6 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -473,11 +479,9 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { @@ -486,9 +490,9 @@ func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { var map_ReplicaSet = map[string]string{ "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", + "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", } func (ReplicaSet) SwaggerDoc() map[string]string { @@ -510,7 +514,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string { var map_ReplicaSetList = map[string]string{ "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", } @@ -544,6 +548,14 @@ func (ReplicaSetStatus) SwaggerDoc() map[string]string { return map_ReplicaSetStatus } +var map_ReplicationControllerDummy = map[string]string{ + "": "Dummy definition", +} + +func (ReplicationControllerDummy) SwaggerDoc() map[string]string { + return map_ReplicationControllerDummy +} + var map_RollbackConfig = map[string]string{ "": "DEPRECATED.", "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", @@ -565,23 +577,13 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { var map_RollingUpdateDeployment = map[string]string{ "": "Spec to control the desired behavior of rolling update.", "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", + "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", } func (RollingUpdateDeployment) SwaggerDoc() map[string]string { return map_RollingUpdateDeployment } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", @@ -592,16 +594,6 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", @@ -614,9 +606,9 @@ func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { var map_Scale = map[string]string{ "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.", + "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", + "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", + "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", } func (Scale) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go index 913f485..65801c2 100644 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go @@ -28,49 +28,113 @@ import ( ) // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { +func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. +func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { if in == nil { return nil } - out := new(AllowedCSIDriver) + out := new(AllowedFlexVolume) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { +func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. +func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { if in == nil { return nil } - out := new(AllowedFlexVolume) + out := new(AllowedHostPath) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { +func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { *out = *in + out.CurrentValue = in.CurrentValue.DeepCopy() return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. +func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { if in == nil { return nil } - out := new(AllowedHostPath) + out := new(CustomMetricCurrentStatus) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricCurrentStatus, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. +func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { + if in == nil { + return nil + } + out := new(CustomMetricCurrentStatusList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { + *out = *in + out.TargetValue = in.TargetValue.DeepCopy() + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. +func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { + if in == nil { + return nil + } + out := new(CustomMetricTarget) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { + *out = *in + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]CustomMetricTarget, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. +func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { + if in == nil { + return nil + } + out := new(CustomMetricTargetList) in.DeepCopyInto(out) return out } @@ -124,7 +188,7 @@ func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]DaemonSet, len(*in)) @@ -280,7 +344,7 @@ func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Deployment, len(*in)) @@ -458,12 +522,7 @@ func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { *out = *in - if in.PathType != nil { - in, out := &in.PathType, &out.PathType - *out = new(PathType) - **out = **in - } - in.Backend.DeepCopyInto(&out.Backend) + out.Backend = in.Backend return } @@ -483,9 +542,7 @@ func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { if in.Paths != nil { in, out := &in.Paths, &out.Paths *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } + copy(*out, *in) } return } @@ -585,11 +642,6 @@ func (in *Ingress) DeepCopyObject() runtime.Object { func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { *out = *in out.ServicePort = in.ServicePort - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(corev1.TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } return } @@ -607,7 +659,7 @@ func (in *IngressBackend) DeepCopy() *IngressBackend { func (in *IngressList) DeepCopyInto(out *IngressList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Ingress, len(*in)) @@ -677,15 +729,10 @@ func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { *out = *in - if in.IngressClassName != nil { - in, out := &in.IngressClassName, &out.IngressClassName - *out = new(string) - **out = **in - } if in.Backend != nil { in, out := &in.Backend, &out.Backend *out = new(IngressBackend) - (*in).DeepCopyInto(*out) + **out = **in } if in.TLS != nil { in, out := &in.TLS, &out.TLS @@ -843,7 +890,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) @@ -996,7 +1043,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -1055,11 +1102,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -1082,11 +1124,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) @@ -1102,11 +1139,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } return } @@ -1169,7 +1201,7 @@ func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ReplicaSet, len(*in)) @@ -1248,6 +1280,31 @@ func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy. +func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy { + if in == nil { + return nil + } + out := new(ReplicationControllerDummy) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { *out = *in @@ -1311,27 +1368,6 @@ func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in @@ -1353,32 +1389,6 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go index d3ffd5e..ef9ae2a 100644 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ b/vendor/k8s.io/api/networking/v1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=networking.k8s.io - package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go index 1ff2339..7b1c04b 100644 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ b/vendor/k8s.io/api/networking/v1/generated.pb.go @@ -14,28 +14,42 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto + + It has these top-level messages: + IPBlock + NetworkPolicy + NetworkPolicyEgressRule + NetworkPolicyIngressRule + NetworkPolicyList + NetworkPolicyPeer + NetworkPolicyPort + NetworkPolicySpec +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import strings "strings" +import reflect "reflect" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -46,231 +60,41 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{0} -} -func (m *IPBlock) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IPBlock) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IPBlock) XXX_Merge(src proto.Message) { - xxx_messageInfo_IPBlock.Merge(m, src) -} -func (m *IPBlock) XXX_Size() int { - return m.Size() -} -func (m *IPBlock) XXX_DiscardUnknown() { - xxx_messageInfo_IPBlock.DiscardUnknown(m) -} +func (m *IPBlock) Reset() { *m = IPBlock{} } +func (*IPBlock) ProtoMessage() {} +func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_IPBlock proto.InternalMessageInfo - -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{1} -} -func (m *NetworkPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicy.Merge(m, src) -} -func (m *NetworkPolicy) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicy proto.InternalMessageInfo - -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{2} -} -func (m *NetworkPolicyEgressRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyEgressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyEgressRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyEgressRule.Merge(m, src) -} -func (m *NetworkPolicyEgressRule) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyEgressRule) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyEgressRule.DiscardUnknown(m) -} +func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } +func (*NetworkPolicy) ProtoMessage() {} +func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_NetworkPolicyEgressRule proto.InternalMessageInfo +func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } +func (*NetworkPolicyEgressRule) ProtoMessage() {} +func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } func (*NetworkPolicyIngressRule) ProtoMessage() {} func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{3} -} -func (m *NetworkPolicyIngressRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyIngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyIngressRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyIngressRule.Merge(m, src) -} -func (m *NetworkPolicyIngressRule) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyIngressRule) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyIngressRule.DiscardUnknown(m) + return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_NetworkPolicyIngressRule proto.InternalMessageInfo +func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } +func (*NetworkPolicyList) ProtoMessage() {} +func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{4} -} -func (m *NetworkPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyList.Merge(m, src) -} -func (m *NetworkPolicyList) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyList.DiscardUnknown(m) -} +func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } +func (*NetworkPolicyPeer) ProtoMessage() {} +func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_NetworkPolicyList proto.InternalMessageInfo +func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } +func (*NetworkPolicyPort) ProtoMessage() {} +func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{5} -} -func (m *NetworkPolicyPeer) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyPeer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyPeer) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyPeer.Merge(m, src) -} -func (m *NetworkPolicyPeer) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyPeer) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyPeer.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyPeer proto.InternalMessageInfo - -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{6} -} -func (m *NetworkPolicyPort) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicyPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicyPort) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicyPort.Merge(m, src) -} -func (m *NetworkPolicyPort) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicyPort) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicyPort.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicyPort proto.InternalMessageInfo - -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_1c72867a70a7cc90, []int{7} -} -func (m *NetworkPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *NetworkPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *NetworkPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_NetworkPolicySpec.Merge(m, src) -} -func (m *NetworkPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *NetworkPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_NetworkPolicySpec.DiscardUnknown(m) -} - -var xxx_messageInfo_NetworkPolicySpec proto.InternalMessageInfo +func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } +func (*NetworkPolicySpec) ProtoMessage() {} +func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } func init() { proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") @@ -282,70 +106,10 @@ func init() { proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptor_1c72867a70a7cc90) -} - -var fileDescriptor_1c72867a70a7cc90 = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, - 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, - 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, - 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, - 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, - 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, - 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, - 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, - 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, - 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, - 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, - 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, - 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, - 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, - 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, - 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, - 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, - 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, - 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, - 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, - 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, - 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, - 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, - 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, - 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, - 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, - 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, - 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, - 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, - 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, - 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, - 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, - 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, - 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, - 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, - 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, - 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, - 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, - 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, - 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, - 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, - 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, - 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, - 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, - 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, - 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, - 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, - 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, - 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, - 0x67, 0x08, 0x00, 0x00, -} - func (m *IPBlock) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -353,36 +117,36 @@ func (m *IPBlock) Marshal() (dAtA []byte, err error) { } func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IPBlock) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) + i += copy(dAtA[i:], m.CIDR) if len(m.Except) > 0 { - for iNdEx := len(m.Except) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Except[iNdEx]) - copy(dAtA[i:], m.Except[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Except[iNdEx]))) - i-- + for _, s := range m.Except { dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - i -= len(m.CIDR) - copy(dAtA[i:], m.CIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -390,42 +154,33 @@ func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + return i, nil } func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -433,50 +188,41 @@ func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyEgressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.To) > 0 { - for iNdEx := len(m.To) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.To[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.To) > 0 { + for _, msg := range m.To { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -484,50 +230,41 @@ func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyIngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.From) > 0 { - for iNdEx := len(m.From) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.From[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Ports) > 0 { + for _, msg := range m.Ports { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - if len(m.Ports) > 0 { - for iNdEx := len(m.Ports) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ports[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.From) > 0 { + for _, msg := range m.From { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -535,46 +272,37 @@ func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -582,58 +310,47 @@ func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyPeer) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.IPBlock != nil { - { - size, err := m.IPBlock.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.PodSelector != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n4, err := m.PodSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n4 } if m.NamespaceSelector != nil { - { - size, err := m.NamespaceSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) + n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 } - if m.PodSelector != nil { - { - size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.IPBlock != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) + n6, err := m.IPBlock.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n6 } - return len(dAtA) - i, nil + return i, nil } func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -641,41 +358,33 @@ func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicyPort) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.Port != nil { - { - size, err := m.Port.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } if m.Protocol != nil { - i -= len(*m.Protocol) - copy(dAtA[i:], *m.Protocol) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) + i += copy(dAtA[i:], *m.Protocol) } - return len(dAtA) - i, nil + if m.Port != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) + n7, err := m.Port.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil } func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -683,80 +392,88 @@ func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *NetworkPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.PolicyTypes) > 0 { - for iNdEx := len(m.PolicyTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.PolicyTypes[iNdEx]) - copy(dAtA[i:], m.PolicyTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PolicyTypes[iNdEx]))) - i-- - dAtA[i] = 0x22 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) + n8, err := m.PodSelector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Egress) > 0 { - for iNdEx := len(m.Egress) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Egress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n8 + if len(m.Ingress) > 0 { + for _, msg := range m.Ingress { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n } } - if len(m.Ingress) > 0 { - for iNdEx := len(m.Ingress) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ingress[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Egress) > 0 { + for _, msg := range m.Egress { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - { - size, err := m.PodSelector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.PolicyTypes) > 0 { + for _, s := range m.PolicyTypes { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *IPBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.CIDR) @@ -771,9 +488,6 @@ func (m *IPBlock) Size() (n int) { } func (m *NetworkPolicy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -784,9 +498,6 @@ func (m *NetworkPolicy) Size() (n int) { } func (m *NetworkPolicyEgressRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Ports) > 0 { @@ -805,9 +516,6 @@ func (m *NetworkPolicyEgressRule) Size() (n int) { } func (m *NetworkPolicyIngressRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Ports) > 0 { @@ -826,9 +534,6 @@ func (m *NetworkPolicyIngressRule) Size() (n int) { } func (m *NetworkPolicyList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -843,9 +548,6 @@ func (m *NetworkPolicyList) Size() (n int) { } func (m *NetworkPolicyPeer) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.PodSelector != nil { @@ -864,9 +566,6 @@ func (m *NetworkPolicyPeer) Size() (n int) { } func (m *NetworkPolicyPort) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Protocol != nil { @@ -881,9 +580,6 @@ func (m *NetworkPolicyPort) Size() (n int) { } func (m *NetworkPolicySpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.PodSelector.Size() @@ -910,7 +606,14 @@ func (m *NetworkPolicySpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -931,7 +634,7 @@ func (this *NetworkPolicy) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -941,19 +644,9 @@ func (this *NetworkPolicyEgressRule) String() string { if this == nil { return "nil" } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForTo := "[]NetworkPolicyPeer{" - for _, f := range this.To { - repeatedStringForTo += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForTo += "}" s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `To:` + repeatedStringForTo + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -962,19 +655,9 @@ func (this *NetworkPolicyIngressRule) String() string { if this == nil { return "nil" } - repeatedStringForPorts := "[]NetworkPolicyPort{" - for _, f := range this.Ports { - repeatedStringForPorts += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + "," - } - repeatedStringForPorts += "}" - repeatedStringForFrom := "[]NetworkPolicyPeer{" - for _, f := range this.From { - repeatedStringForFrom += strings.Replace(strings.Replace(f.String(), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + "," - } - repeatedStringForFrom += "}" s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + repeatedStringForPorts + `,`, - `From:` + repeatedStringForFrom + `,`, + `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, + `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -983,14 +666,9 @@ func (this *NetworkPolicyList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]NetworkPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1000,9 +678,9 @@ func (this *NetworkPolicyPeer) String() string { return "nil" } s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(this.IPBlock.String(), "IPBlock", "IPBlock", 1) + `,`, + `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, `}`, }, "") return s @@ -1013,7 +691,7 @@ func (this *NetworkPolicyPort) String() string { } s := strings.Join([]string{`&NetworkPolicyPort{`, `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "intstr.IntOrString", 1) + `,`, + `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -1022,20 +700,10 @@ func (this *NetworkPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForIngress := "[]NetworkPolicyIngressRule{" - for _, f := range this.Ingress { - repeatedStringForIngress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForIngress += "}" - repeatedStringForEgress := "[]NetworkPolicyEgressRule{" - for _, f := range this.Egress { - repeatedStringForEgress += strings.Replace(strings.Replace(f.String(), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForEgress += "}" s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + repeatedStringForIngress + `,`, - `Egress:` + repeatedStringForEgress + `,`, + `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, + `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, `}`, }, "") @@ -1064,7 +732,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1092,7 +760,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1102,9 +770,6 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1124,7 +789,7 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1134,9 +799,6 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1151,9 +813,6 @@ func (m *IPBlock) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1181,7 +840,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1209,7 +868,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1218,9 +877,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1242,7 +898,7 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1251,9 +907,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1270,9 +923,6 @@ func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1300,7 +950,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1328,7 +978,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1337,9 +987,6 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1362,7 +1009,7 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1371,9 +1018,6 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1391,9 +1035,6 @@ func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1421,7 +1062,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1449,7 +1090,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1458,9 +1099,6 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1483,7 +1121,7 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1492,9 +1130,6 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1512,9 +1147,6 @@ func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1542,7 +1174,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1570,7 +1202,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1579,9 +1211,6 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1603,7 +1232,7 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1612,9 +1241,6 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1632,9 +1258,6 @@ func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1662,7 +1285,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1690,7 +1313,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1699,14 +1322,11 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.PodSelector == nil { - m.PodSelector = &v1.LabelSelector{} + m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1726,7 +1346,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1735,14 +1355,11 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.NamespaceSelector == nil { - m.NamespaceSelector = &v1.LabelSelector{} + m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1762,7 +1379,7 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1771,9 +1388,6 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1793,9 +1407,6 @@ func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1823,7 +1434,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1851,7 +1462,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1861,9 +1472,6 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1884,7 +1492,7 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1893,14 +1501,11 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Port == nil { - m.Port = &intstr.IntOrString{} + m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -1915,9 +1520,6 @@ func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1945,7 +1547,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1973,7 +1575,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1982,9 +1584,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2006,7 +1605,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2015,9 +1614,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2040,7 +1636,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2049,9 +1645,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2074,7 +1667,7 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2084,9 +1677,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2101,9 +1691,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2119,7 +1706,6 @@ func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -2151,8 +1737,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -2169,34 +1757,112 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 804 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, + 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, + 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, + 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, + 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, + 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, + 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, + 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, + 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, + 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, + 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, + 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, + 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, + 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, + 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, + 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, + 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, + 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, + 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, + 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, + 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, + 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, + 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, + 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, + 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, + 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, + 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, + 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, + 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, + 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, + 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, + 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, + 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, + 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, + 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, + 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, + 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, + 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, + 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, + 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, + 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, + 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, + 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, + 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, + 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, + 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, + 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, + 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, + 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, + 0x67, 0x08, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto index f2aa969..4e068d0 100644 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ b/vendor/k8s.io/api/networking/v1/generated.proto @@ -30,16 +30,16 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. message IPBlock { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" optional string cidr = 1; // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional repeated string except = 2; @@ -48,7 +48,7 @@ message IPBlock { // NetworkPolicy describes what network traffic is allowed for a set of Pods message NetworkPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -92,7 +92,7 @@ message NetworkPolicyIngressRule { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least one item, this rule + // source). If this field is present and contains at least on item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional repeated NetworkPolicyPeer from = 2; @@ -101,7 +101,7 @@ message NetworkPolicyIngressRule { // NetworkPolicyList is a list of NetworkPolicy objects. message NetworkPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -114,7 +114,7 @@ message NetworkPolicyList { message NetworkPolicyPeer { // This is a label selector which selects Pods. This field follows standard label // selector semantics; if present but empty, it selects all pods. - // + // // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. @@ -123,7 +123,7 @@ message NetworkPolicyPeer { // Selects Namespaces using cluster-scoped labels. This field follows standard label // selector semantics; if present but empty, it selects all namespaces. - // + // // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. @@ -180,7 +180,7 @@ message NetworkPolicySpec { repeated NetworkPolicyEgressRule egress = 3; // List of rule types that the NetworkPolicy relates to. - // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // Valid options are Ingress, Egress, or Ingress,Egress. // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go index 73580a5..ce70448 100644 --- a/vendor/k8s.io/api/networking/v1/types.go +++ b/vendor/k8s.io/api/networking/v1/types.go @@ -29,7 +29,7 @@ import ( type NetworkPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type NetworkPolicySpec struct { Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` // List of rule types that the NetworkPolicy relates to. - // Valid options are "Ingress", "Egress", or "Ingress,Egress". + // Valid options are Ingress, Egress, or Ingress,Egress. // If this field is not specified, it will default based on the existence of Ingress or Egress rules; // policies that contain an Egress section are assumed to affect Egress, and all policies // (whether or not they contain an Ingress section) are assumed to affect Ingress. @@ -107,7 +107,7 @@ type NetworkPolicyIngressRule struct { // List of sources which should be able to access the pods selected for this rule. // Items in this list are combined using a logical OR operation. If this field is // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least one item, this rule + // source). If this field is present and contains at least on item, this rule // allows traffic only if the traffic matches at least one item in the from list. // +optional From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` @@ -147,15 +147,15 @@ type NetworkPolicyPort struct { Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` } -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24","2001:db9::/64") that is allowed -// to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs -// that should not be included within this rule. +// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods +// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should +// not be included within this rule. type IPBlock struct { // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" or "2001:db9::/64" + // Valid examples are "192.168.1.1/24" // Except values will be rejected if they are outside the CIDR range // +optional Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` @@ -194,7 +194,7 @@ type NetworkPolicyPeer struct { type NetworkPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go index b404e5b..f4363bc 100644 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go @@ -28,9 +28,9 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\",\"2001:db9::/64\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" or \"2001:db9::/64\" Except values will be rejected if they are outside the CIDR range", + "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", + "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", + "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", } func (IPBlock) SwaggerDoc() map[string]string { @@ -39,7 +39,7 @@ func (IPBlock) SwaggerDoc() map[string]string { var map_NetworkPolicy = map[string]string{ "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired behavior for this NetworkPolicy.", } @@ -60,7 +60,7 @@ func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyIngressRule = map[string]string{ "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the from list.", + "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", } func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { @@ -69,7 +69,7 @@ func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { var map_NetworkPolicyList = map[string]string{ "": "NetworkPolicyList is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } @@ -103,7 +103,7 @@ var map_NetworkPolicySpec = map[string]string{ "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are \"Ingress\", \"Egress\", or \"Ingress,Egress\". If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", + "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", } func (NetworkPolicySpec) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go index 1833e97..d1e4e88 100644 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go @@ -139,7 +139,7 @@ func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]NetworkPolicy, len(*in)) diff --git a/vendor/k8s.io/api/networking/v1beta1/doc.go b/vendor/k8s.io/api/networking/v1beta1/doc.go deleted file mode 100644 index 12d3d4f..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true -// +groupName=networking.k8s.io - -package v1beta1 // import "k8s.io/api/networking/v1beta1" diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go b/vendor/k8s.io/api/networking/v1beta1/generated.pb.go deleted file mode 100644 index 6f51df8..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/generated.pb.go +++ /dev/null @@ -1,3183 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto - -package v1beta1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{0} -} -func (m *HTTPIngressPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPIngressPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPIngressPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPIngressPath.Merge(m, src) -} -func (m *HTTPIngressPath) XXX_Size() int { - return m.Size() -} -func (m *HTTPIngressPath) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPIngressPath.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPIngressPath proto.InternalMessageInfo - -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{1} -} -func (m *HTTPIngressRuleValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HTTPIngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HTTPIngressRuleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_HTTPIngressRuleValue.Merge(m, src) -} -func (m *HTTPIngressRuleValue) XXX_Size() int { - return m.Size() -} -func (m *HTTPIngressRuleValue) XXX_DiscardUnknown() { - xxx_messageInfo_HTTPIngressRuleValue.DiscardUnknown(m) -} - -var xxx_messageInfo_HTTPIngressRuleValue proto.InternalMessageInfo - -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{2} -} -func (m *Ingress) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Ingress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Ingress) XXX_Merge(src proto.Message) { - xxx_messageInfo_Ingress.Merge(m, src) -} -func (m *Ingress) XXX_Size() int { - return m.Size() -} -func (m *Ingress) XXX_DiscardUnknown() { - xxx_messageInfo_Ingress.DiscardUnknown(m) -} - -var xxx_messageInfo_Ingress proto.InternalMessageInfo - -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{3} -} -func (m *IngressBackend) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressBackend) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressBackend) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressBackend.Merge(m, src) -} -func (m *IngressBackend) XXX_Size() int { - return m.Size() -} -func (m *IngressBackend) XXX_DiscardUnknown() { - xxx_messageInfo_IngressBackend.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressBackend proto.InternalMessageInfo - -func (m *IngressClass) Reset() { *m = IngressClass{} } -func (*IngressClass) ProtoMessage() {} -func (*IngressClass) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{4} -} -func (m *IngressClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressClass.Merge(m, src) -} -func (m *IngressClass) XXX_Size() int { - return m.Size() -} -func (m *IngressClass) XXX_DiscardUnknown() { - xxx_messageInfo_IngressClass.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressClass proto.InternalMessageInfo - -func (m *IngressClassList) Reset() { *m = IngressClassList{} } -func (*IngressClassList) ProtoMessage() {} -func (*IngressClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{5} -} -func (m *IngressClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressClassList.Merge(m, src) -} -func (m *IngressClassList) XXX_Size() int { - return m.Size() -} -func (m *IngressClassList) XXX_DiscardUnknown() { - xxx_messageInfo_IngressClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressClassList proto.InternalMessageInfo - -func (m *IngressClassSpec) Reset() { *m = IngressClassSpec{} } -func (*IngressClassSpec) ProtoMessage() {} -func (*IngressClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{6} -} -func (m *IngressClassSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressClassSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressClassSpec.Merge(m, src) -} -func (m *IngressClassSpec) XXX_Size() int { - return m.Size() -} -func (m *IngressClassSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IngressClassSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressClassSpec proto.InternalMessageInfo - -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{7} -} -func (m *IngressList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressList) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressList.Merge(m, src) -} -func (m *IngressList) XXX_Size() int { - return m.Size() -} -func (m *IngressList) XXX_DiscardUnknown() { - xxx_messageInfo_IngressList.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressList proto.InternalMessageInfo - -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{8} -} -func (m *IngressRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressRule.Merge(m, src) -} -func (m *IngressRule) XXX_Size() int { - return m.Size() -} -func (m *IngressRule) XXX_DiscardUnknown() { - xxx_messageInfo_IngressRule.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressRule proto.InternalMessageInfo - -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{9} -} -func (m *IngressRuleValue) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressRuleValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressRuleValue) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressRuleValue.Merge(m, src) -} -func (m *IngressRuleValue) XXX_Size() int { - return m.Size() -} -func (m *IngressRuleValue) XXX_DiscardUnknown() { - xxx_messageInfo_IngressRuleValue.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressRuleValue proto.InternalMessageInfo - -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{10} -} -func (m *IngressSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressSpec.Merge(m, src) -} -func (m *IngressSpec) XXX_Size() int { - return m.Size() -} -func (m *IngressSpec) XXX_DiscardUnknown() { - xxx_messageInfo_IngressSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressSpec proto.InternalMessageInfo - -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{11} -} -func (m *IngressStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressStatus.Merge(m, src) -} -func (m *IngressStatus) XXX_Size() int { - return m.Size() -} -func (m *IngressStatus) XXX_DiscardUnknown() { - xxx_messageInfo_IngressStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressStatus proto.InternalMessageInfo - -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { - return fileDescriptor_5bea11de0ceb8f53, []int{12} -} -func (m *IngressTLS) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IngressTLS) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IngressTLS) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngressTLS.Merge(m, src) -} -func (m *IngressTLS) XXX_Size() int { - return m.Size() -} -func (m *IngressTLS) XXX_DiscardUnknown() { - xxx_messageInfo_IngressTLS.DiscardUnknown(m) -} - -var xxx_messageInfo_IngressTLS proto.InternalMessageInfo - -func init() { - proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*Ingress)(nil), "k8s.io.api.networking.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.networking.v1beta1.IngressBackend") - proto.RegisterType((*IngressClass)(nil), "k8s.io.api.networking.v1beta1.IngressClass") - proto.RegisterType((*IngressClassList)(nil), "k8s.io.api.networking.v1beta1.IngressClassList") - proto.RegisterType((*IngressClassSpec)(nil), "k8s.io.api.networking.v1beta1.IngressClassSpec") - proto.RegisterType((*IngressList)(nil), "k8s.io.api.networking.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "k8s.io.api.networking.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.networking.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.networking.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.networking.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.networking.v1beta1.IngressTLS") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1beta1/generated.proto", fileDescriptor_5bea11de0ceb8f53) -} - -var fileDescriptor_5bea11de0ceb8f53 = []byte{ - // 990 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4f, 0x6f, 0xe3, 0x44, - 0x14, 0xaf, 0x93, 0x66, 0x9b, 0x4e, 0xb2, 0xdd, 0x6a, 0xe8, 0x21, 0xaa, 0x84, 0x5b, 0xf9, 0x80, - 0xca, 0x9f, 0xda, 0x34, 0xbb, 0x20, 0x8e, 0xc8, 0x2b, 0xa1, 0x56, 0x04, 0x1a, 0x26, 0x16, 0x20, - 0x04, 0xd2, 0x4e, 0x9c, 0xb7, 0x8e, 0x89, 0x63, 0x9b, 0x99, 0x71, 0xd0, 0xde, 0xb8, 0x72, 0x82, - 0x2f, 0x01, 0x9f, 0x81, 0x23, 0x82, 0x4b, 0x8f, 0x7b, 0xdc, 0x53, 0x45, 0xc3, 0xb7, 0xe0, 0x84, - 0x66, 0x3c, 0xb5, 0x9d, 0xa4, 0xa5, 0x59, 0x0e, 0x7b, 0x8a, 0x67, 0xde, 0x7b, 0xbf, 0x37, 0xef, - 0xf7, 0x7e, 0x33, 0x2f, 0xe8, 0xa3, 0xc9, 0x07, 0xdc, 0x0e, 0x13, 0x67, 0x92, 0x0d, 0x81, 0xc5, - 0x20, 0x80, 0x3b, 0x33, 0x88, 0x47, 0x09, 0x73, 0xb4, 0x81, 0xa6, 0xa1, 0x13, 0x83, 0xf8, 0x3e, - 0x61, 0x93, 0x30, 0x0e, 0x9c, 0xd9, 0xc9, 0x10, 0x04, 0x3d, 0x71, 0x02, 0x88, 0x81, 0x51, 0x01, - 0x23, 0x3b, 0x65, 0x89, 0x48, 0xf0, 0xeb, 0xb9, 0xbb, 0x4d, 0xd3, 0xd0, 0x2e, 0xdd, 0x6d, 0xed, - 0xbe, 0x7f, 0x1c, 0x84, 0x62, 0x9c, 0x0d, 0x6d, 0x3f, 0x99, 0x3a, 0x41, 0x12, 0x24, 0x8e, 0x8a, - 0x1a, 0x66, 0x4f, 0xd5, 0x4a, 0x2d, 0xd4, 0x57, 0x8e, 0xb6, 0x6f, 0x55, 0x92, 0xfb, 0x09, 0x03, - 0x67, 0xb6, 0x92, 0x71, 0xff, 0x51, 0xe9, 0x33, 0xa5, 0xfe, 0x38, 0x8c, 0x81, 0x3d, 0x73, 0xd2, - 0x49, 0x20, 0x37, 0xb8, 0x33, 0x05, 0x41, 0x6f, 0x8a, 0x72, 0x6e, 0x8b, 0x62, 0x59, 0x2c, 0xc2, - 0x29, 0xac, 0x04, 0xbc, 0x7f, 0x57, 0x00, 0xf7, 0xc7, 0x30, 0xa5, 0x2b, 0x71, 0x0f, 0x6f, 0x8b, - 0xcb, 0x44, 0x18, 0x39, 0x61, 0x2c, 0xb8, 0x60, 0xcb, 0x41, 0xd6, 0x9f, 0x06, 0x7a, 0x70, 0xea, - 0x79, 0xfd, 0xb3, 0x38, 0x60, 0xc0, 0x79, 0x9f, 0x8a, 0x31, 0x3e, 0x44, 0x9b, 0x29, 0x15, 0xe3, - 0x8e, 0x71, 0x68, 0x1c, 0x6d, 0xbb, 0xed, 0x8b, 0xcb, 0x83, 0x8d, 0xf9, 0xe5, 0xc1, 0xa6, 0xb4, - 0x11, 0x65, 0xc1, 0x8f, 0x50, 0x53, 0xfe, 0x7a, 0xcf, 0x52, 0xe8, 0xd4, 0x95, 0x57, 0x67, 0x7e, - 0x79, 0xd0, 0xec, 0xeb, 0xbd, 0x7f, 0x2a, 0xdf, 0xa4, 0xf0, 0xc4, 0x5f, 0xa2, 0xad, 0x21, 0xf5, - 0x27, 0x10, 0x8f, 0x3a, 0xb5, 0x43, 0xe3, 0xa8, 0xd5, 0x3d, 0xb6, 0xff, 0xb3, 0x87, 0xb6, 0x3e, - 0x94, 0x9b, 0x07, 0xb9, 0x0f, 0xf4, 0x49, 0xb6, 0xf4, 0x06, 0xb9, 0x86, 0xb3, 0x26, 0x68, 0xaf, - 0x52, 0x04, 0xc9, 0x22, 0xf8, 0x9c, 0x46, 0x19, 0xe0, 0x01, 0x6a, 0xc8, 0xec, 0xbc, 0x63, 0x1c, - 0xd6, 0x8f, 0x5a, 0x5d, 0xfb, 0x8e, 0x7c, 0x4b, 0x44, 0xb8, 0xf7, 0x75, 0xc2, 0x86, 0x5c, 0x71, - 0x92, 0x63, 0x59, 0x3f, 0xd5, 0xd0, 0x96, 0xf6, 0xc2, 0x4f, 0x50, 0x53, 0xf6, 0x7d, 0x44, 0x05, - 0x55, 0x74, 0xb5, 0xba, 0xef, 0x56, 0x72, 0x14, 0x6d, 0xb0, 0xd3, 0x49, 0x20, 0x37, 0xb8, 0x2d, - 0xbd, 0xed, 0xd9, 0x89, 0x7d, 0x3e, 0xfc, 0x16, 0x7c, 0xf1, 0x09, 0x08, 0xea, 0x62, 0x9d, 0x05, - 0x95, 0x7b, 0xa4, 0x40, 0xc5, 0x3d, 0xb4, 0xc9, 0x53, 0xf0, 0x35, 0x63, 0x6f, 0xad, 0xc7, 0xd8, - 0x20, 0x05, 0xbf, 0x6c, 0x9c, 0x5c, 0x11, 0x85, 0x82, 0x3d, 0x74, 0x8f, 0x0b, 0x2a, 0x32, 0xae, - 0xda, 0xd6, 0xea, 0xbe, 0xb3, 0x26, 0x9e, 0x8a, 0x71, 0x77, 0x34, 0xe2, 0xbd, 0x7c, 0x4d, 0x34, - 0x96, 0xf5, 0x63, 0x0d, 0xed, 0x2c, 0xf6, 0x0a, 0xbf, 0x87, 0x5a, 0x1c, 0xd8, 0x2c, 0xf4, 0xe1, - 0x53, 0x3a, 0x05, 0x2d, 0xa5, 0xd7, 0x74, 0x7c, 0x6b, 0x50, 0x9a, 0x48, 0xd5, 0x0f, 0x07, 0x45, - 0x58, 0x3f, 0x61, 0x42, 0x17, 0x7d, 0x3b, 0xa5, 0x52, 0xd9, 0x76, 0xae, 0x6c, 0xfb, 0x2c, 0x16, - 0xe7, 0x6c, 0x20, 0x58, 0x18, 0x07, 0x2b, 0x89, 0x24, 0x18, 0xa9, 0x22, 0xe3, 0x2f, 0x50, 0x93, - 0x01, 0x4f, 0x32, 0xe6, 0x83, 0xa6, 0x62, 0x41, 0x8c, 0xf2, 0x09, 0x90, 0x6d, 0x92, 0xba, 0x1d, - 0xf5, 0x12, 0x9f, 0x46, 0x79, 0x73, 0x08, 0x3c, 0x05, 0x06, 0xb1, 0x0f, 0x6e, 0x5b, 0x0a, 0x9e, - 0x68, 0x08, 0x52, 0x80, 0xc9, 0x0b, 0xd5, 0xd6, 0x5c, 0x3c, 0x8e, 0xe8, 0x2b, 0x91, 0xc8, 0x67, - 0x0b, 0x12, 0x71, 0xd6, 0x6b, 0xa9, 0x3a, 0xdc, 0x6d, 0x3a, 0xb1, 0xfe, 0x30, 0xd0, 0x6e, 0xd5, - 0xb1, 0x17, 0x72, 0x81, 0xbf, 0x5e, 0xa9, 0xc4, 0x5e, 0xaf, 0x12, 0x19, 0xad, 0xea, 0xd8, 0xd5, - 0xa9, 0x9a, 0xd7, 0x3b, 0x95, 0x2a, 0xfa, 0xa8, 0x11, 0x0a, 0x98, 0xf2, 0x4e, 0x4d, 0xdd, 0xd5, - 0xb7, 0x5f, 0xa2, 0x8c, 0xf2, 0xa2, 0x9e, 0x49, 0x04, 0x92, 0x03, 0x59, 0xbf, 0x2c, 0x15, 0x21, - 0xeb, 0xc3, 0x5d, 0x84, 0xfc, 0x24, 0x16, 0x2c, 0x89, 0x22, 0x60, 0x5a, 0x97, 0x05, 0xbd, 0x8f, - 0x0b, 0x0b, 0xa9, 0x78, 0xe1, 0x6f, 0x10, 0x4a, 0x29, 0xa3, 0x53, 0x10, 0xc0, 0xf8, 0x4d, 0x6f, - 0xd7, 0xdd, 0x72, 0xd9, 0x91, 0xf0, 0xfd, 0x02, 0x84, 0x54, 0x00, 0xad, 0xdf, 0x0c, 0xd4, 0xd2, - 0xe7, 0x7c, 0x05, 0x3c, 0x7f, 0xbc, 0xc8, 0xf3, 0x1b, 0x6b, 0xbe, 0xc1, 0x37, 0x53, 0xfc, 0x6b, - 0x79, 0x74, 0xf9, 0xea, 0xca, 0xd1, 0x31, 0x4e, 0xb8, 0x58, 0x1e, 0x1d, 0xa7, 0x09, 0x17, 0x44, - 0x59, 0x70, 0x86, 0x76, 0xc3, 0xa5, 0x67, 0xfa, 0xe5, 0x84, 0x5b, 0x84, 0xb9, 0x1d, 0x0d, 0xbf, - 0xbb, 0x6c, 0x21, 0x2b, 0x29, 0x2c, 0x40, 0x2b, 0x5e, 0xf2, 0xde, 0x8c, 0x85, 0x48, 0x35, 0xc7, - 0x0f, 0xd7, 0x1f, 0x0e, 0xe5, 0x11, 0x9a, 0xaa, 0x3a, 0xcf, 0xeb, 0x13, 0x05, 0x65, 0xfd, 0x5e, - 0x2b, 0xf8, 0x50, 0x6a, 0xfb, 0xb0, 0xa8, 0x56, 0x29, 0x50, 0xbd, 0x85, 0x9b, 0x8a, 0x9b, 0xbd, - 0xca, 0xc1, 0x0b, 0x1b, 0x59, 0xf1, 0xc6, 0x5e, 0x39, 0x34, 0x8d, 0xff, 0x33, 0x34, 0x5b, 0x37, - 0x0d, 0x4c, 0x7c, 0x8a, 0xea, 0x22, 0xba, 0x96, 0xc0, 0x9b, 0xeb, 0x21, 0x7a, 0xbd, 0x81, 0xdb, - 0xd2, 0x94, 0xd7, 0xbd, 0xde, 0x80, 0x48, 0x08, 0x7c, 0x8e, 0x1a, 0x2c, 0x8b, 0x40, 0x0e, 0x94, - 0xfa, 0xfa, 0x03, 0x4a, 0x32, 0x58, 0x4a, 0x4a, 0xae, 0x38, 0xc9, 0x71, 0xac, 0xef, 0xd0, 0xfd, - 0x85, 0xa9, 0x83, 0x9f, 0xa0, 0x76, 0x94, 0xd0, 0x91, 0x4b, 0x23, 0x1a, 0xfb, 0xfa, 0xce, 0x2e, - 0xe9, 0xf6, 0xfa, 0xfe, 0xf5, 0x2a, 0x7e, 0x7a, 0x66, 0xed, 0xe9, 0x24, 0xed, 0xaa, 0x8d, 0x2c, - 0x20, 0x5a, 0x14, 0xa1, 0xb2, 0x46, 0x7c, 0x80, 0x1a, 0x52, 0xa9, 0xf9, 0x9f, 0x86, 0x6d, 0x77, - 0x5b, 0x9e, 0x50, 0x0a, 0x98, 0x93, 0x7c, 0x5f, 0x3e, 0x21, 0x1c, 0x7c, 0x06, 0x42, 0xb5, 0xb3, - 0xb6, 0xf8, 0x84, 0x0c, 0x0a, 0x0b, 0xa9, 0x78, 0xb9, 0xc7, 0x17, 0x57, 0xe6, 0xc6, 0xf3, 0x2b, - 0x73, 0xe3, 0xc5, 0x95, 0xb9, 0xf1, 0xc3, 0xdc, 0x34, 0x2e, 0xe6, 0xa6, 0xf1, 0x7c, 0x6e, 0x1a, - 0x2f, 0xe6, 0xa6, 0xf1, 0xd7, 0xdc, 0x34, 0x7e, 0xfe, 0xdb, 0xdc, 0xf8, 0x6a, 0x4b, 0xd3, 0xf4, - 0x6f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6e, 0x54, 0x4d, 0x9d, 0x25, 0x0b, 0x00, 0x00, -} - -func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPIngressPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PathType != nil { - i -= len(*m.PathType) - copy(dAtA[i:], *m.PathType) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PathType))) - i-- - dAtA[i] = 0x1a - } - { - size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Path) - copy(dAtA[i:], m.Path) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HTTPIngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Paths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Ingress) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressBackend) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Resource != nil { - { - size, err := m.Resource.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - { - size, err := m.ServicePort.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.ServiceName) - copy(dAtA[i:], m.ServiceName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressClassSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressClassSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Parameters != nil { - { - size, err := m.Parameters.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.Controller) - copy(dAtA[i:], m.Controller) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Controller))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.IngressRuleValue.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressRuleValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.HTTP != nil { - { - size, err := m.HTTP.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *IngressSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.IngressClassName != nil { - i -= len(*m.IngressClassName) - copy(dAtA[i:], *m.IngressClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.IngressClassName))) - i-- - dAtA[i] = 0x22 - } - if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - } - if len(m.TLS) > 0 { - for iNdEx := len(m.TLS) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.TLS[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if m.Backend != nil { - { - size, err := m.Backend.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *IngressStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.LoadBalancer.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *IngressTLS) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IngressTLS) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.SecretName) - copy(dAtA[i:], m.SecretName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i-- - dAtA[i] = 0x12 - if len(m.Hosts) > 0 { - for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Hosts[iNdEx]) - copy(dAtA[i:], m.Hosts[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hosts[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *HTTPIngressPath) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.PathType != nil { - l = len(*m.PathType) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Ingress) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressBackend) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressClassSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Controller) - n += 1 + l + sovGenerated(uint64(l)) - if m.Parameters != nil { - l = m.Parameters.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressRule) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressRuleValue) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.IngressClassName != nil { - l = len(*m.IngressClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressTLS) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `PathType:` + valueToStringGenerated(this.PathType) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - repeatedStringForPaths := "[]HTTPIngressPath{" - for _, f := range this.Paths { - repeatedStringForPaths += strings.Replace(strings.Replace(f.String(), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + "," - } - repeatedStringForPaths += "}" - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + repeatedStringForPaths + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServicePort), "IntOrString", "intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressClassSpec", "IngressClassSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]IngressClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "IngressClass", "IngressClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressClassSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressClassSpec{`, - `Controller:` + fmt.Sprintf("%v", this.Controller) + `,`, - `Parameters:` + strings.Replace(fmt.Sprintf("%v", this.Parameters), "TypedLocalObjectReference", "v11.TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]Ingress{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Ingress", "Ingress", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(this.HTTP.String(), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForTLS := "[]IngressTLS{" - for _, f := range this.TLS { - repeatedStringForTLS += strings.Replace(strings.Replace(f.String(), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + "," - } - repeatedStringForTLS += "}" - repeatedStringForRules := "[]IngressRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "IngressRule", "IngressRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + repeatedStringForTLS + `,`, - `Rules:` + repeatedStringForRules + `,`, - `IngressClassName:` + valueToStringGenerated(this.IngressClassName) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.LoadBalancer), "LoadBalancerStatus", "v11.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PathType(dAtA[iNdEx:postIndex]) - m.PathType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ingress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressBackend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &v11.TypedLocalObjectReference{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, IngressClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressClassSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressClassSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Controller = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Parameters == nil { - m.Parameters = &v11.TypedLocalObjectReference{} - } - if err := m.Parameters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.IngressClassName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressTLS) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/networking/v1beta1/generated.proto b/vendor/k8s.io/api/networking/v1beta1/generated.proto deleted file mode 100644 index 68bede8..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/generated.proto +++ /dev/null @@ -1,276 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.networking.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. -message HTTPIngressPath { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. - // +optional - optional string path = 1; - - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - optional string pathType = 3; - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - optional IngressBackend backend = 2; -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. - repeated HTTPIngressPath paths = 1; -} - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -message Ingress { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional IngressSpec spec = 2; - - // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional IngressStatus status = 3; -} - -// IngressBackend describes all endpoints for a given service and port. -message IngressBackend { - // Specifies the name of the referenced service. - // +optional - optional string serviceName = 1; - - // Specifies the port of the referenced service. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference resource = 3; -} - -// IngressClass represents the class of the Ingress, referenced by the Ingress -// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be -// used to indicate that an IngressClass should be considered default. When a -// single IngressClass resource has this annotation set to true, new Ingress -// resources without a class specified will be assigned this default class. -message IngressClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec is the desired state of the IngressClass. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - optional IngressClassSpec spec = 2; -} - -// IngressClassList is a collection of IngressClasses. -message IngressClassList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of IngressClasses. - // +listType=set - repeated IngressClass items = 2; -} - -// IngressClassSpec provides information about the class of an Ingress. -message IngressClassSpec { - // Controller refers to the name of the controller that should handle this - // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the - // same implementing controller. This should be specified as a - // domain-prefixed path no more than 250 characters in length, e.g. - // "acme.io/ingress-controller". This field is immutable. - optional string controller = 1; - - // Parameters is a link to a custom resource containing additional - // configuration for the controller. This is optional if the controller does - // not require extra parameters. - // +optional - optional k8s.io.api.core.v1.TypedLocalObjectReference parameters = 2; -} - -// IngressList is a collection of Ingress. -message IngressList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Ingress. - repeated Ingress items = 2; -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -message IngressRule { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. - // +optional - optional string host = 1; - - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - // +optional - optional IngressRuleValue ingressRuleValue = 2; -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -message IngressRuleValue { - // +optional - optional HTTPIngressRuleValue http = 1; -} - -// IngressSpec describes the Ingress the user wishes to exist. -message IngressSpec { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - optional string ingressClassName = 4; - - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - // +optional - optional IngressBackend backend = 1; - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - // +optional - repeated IngressTLS tls = 2; - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - // +optional - repeated IngressRule rules = 3; -} - -// IngressStatus describe the current state of the Ingress. -message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. - // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; -} - -// IngressTLS describes the transport layer security associated with an Ingress. -message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - // +optional - repeated string hosts = 1; - - // SecretName is the name of the secret used to terminate TLS traffic on - // port 443. Field is left optional to allow TLS routing based on SNI - // hostname alone. If the SNI host in a listener conflicts with the "Host" - // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. - // +optional - optional string secretName = 2; -} - diff --git a/vendor/k8s.io/api/networking/v1beta1/register.go b/vendor/k8s.io/api/networking/v1beta1/register.go deleted file mode 100644 index 0423495..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "networking.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder holds functions that add things to a scheme - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - - // AddToScheme adds the types of this group into the given scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Ingress{}, - &IngressList{}, - &IngressClass{}, - &IngressClassList{}, - ) - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/networking/v1beta1/types.go b/vendor/k8s.io/api/networking/v1beta1/types.go deleted file mode 100644 index 46f530b..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/types.go +++ /dev/null @@ -1,320 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -type Ingress struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressList is a collection of Ingress. -type IngressList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Ingress. - Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// IngressSpec describes the Ingress the user wishes to exist. -type IngressSpec struct { - // IngressClassName is the name of the IngressClass cluster resource. The - // associated IngressClass defines which controller will implement the - // resource. This replaces the deprecated `kubernetes.io/ingress.class` - // annotation. For backwards compatibility, when that annotation is set, it - // must be given precedence over this field. The controller may emit a - // warning if the field and annotation have different values. - // Implementations of this API should ignore Ingresses without a class - // specified. An IngressClass resource may be marked as default, which can - // be used to set a default value for this field. For more information, - // refer to the IngressClass documentation. - // +optional - IngressClassName *string `json:"ingressClassName,omitempty" protobuf:"bytes,4,opt,name=ingressClassName"` - - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - // +optional - Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - // +optional - TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - // +optional - Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - // TODO: Add the ability to specify load-balancer IP through claims -} - -// IngressTLS describes the transport layer security associated with an Ingress. -type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - // +optional - Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate TLS traffic on - // port 443. Field is left optional to allow TLS routing based on SNI - // hostname alone. If the SNI host in a listener conflicts with the "Host" - // header field used by an IngressRule, the SNI host is used for termination - // and value of the Host header is used for routing. - // +optional - SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` - // TODO: Consider specifying different modes of termination, protocols etc. -} - -// IngressStatus describe the current state of the Ingress. -type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. - // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined by RFC 3986. - // Note the following deviations from the "host" part of the - // URI as defined in RFC 3986: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to - // the IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the - // IngressRuleValue. If the host is unspecified, the Ingress routes all - // traffic based on the specified IngressRuleValue. - // - // Host can be "precise" which is a domain name without the terminating dot of - // a network host (e.g. "foo.bar.com") or "wildcard", which is a domain name - // prefixed with a single wildcard label (e.g. "*.foo.com"). - // The wildcard character '*' must appear by itself as the first DNS label and - // matches only a single label. You cannot have a wildcard label by itself (e.g. Host == "*"). - // Requests will be matched against the Host field in the following way: - // 1. If Host is precise, the request matches this rule if the http host header is equal to Host. - // 2. If Host is a wildcard, then the request matches this rule if the http host header - // is to equal to the suffix (removing the first label) of the wildcard rule. - // +optional - Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -type IngressRuleValue struct { - //TODO: - // 1. Consider renaming this resource and the associated rules so they - // aren't tied to Ingress. They can be used to route intra-cluster traffic. - // 2. Consider adding fields for ingress-type specific global options - // usable by a loadbalancer, like http keep-alive. - - // +optional - HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` - // TODO: Consider adding fields for ingress-type specific global - // options usable by a loadbalancer, like http keep-alive. -} - -// PathType represents the type of path referred to by a HTTPIngressPath. -type PathType string - -const ( - // PathTypeExact matches the URL path exactly and with case sensitivity. - PathTypeExact = PathType("Exact") - - // PathTypePrefix matches based on a URL path prefix split by '/'. Matching - // is case sensitive and done on a path element by element basis. A path - // element refers to the list of labels in the path split by the '/' - // separator. A request is a match for path p if every p is an element-wise - // prefix of p of the request path. Note that if the last element of the - // path is a substring of the last element in request path, it is not a - // match (e.g. /foo/bar matches /foo/bar/baz, but does not match - // /foo/barbaz). If multiple matching paths exist in an Ingress spec, the - // longest matching path is given priority. - // Examples: - // - /foo/bar does not match requests to /foo/barbaz - // - /foo/bar matches request to /foo/bar and /foo/bar/baz - // - /foo and /foo/ both match requests to /foo and /foo/. If both paths are - // present in an Ingress spec, the longest matching path (/foo/) is given - // priority. - PathTypePrefix = PathType("Prefix") - - // PathTypeImplementationSpecific matching is up to the IngressClass. - // Implementations can treat this as a separate PathType or treat it - // identically to Prefix or Exact path types. - PathTypeImplementationSpecific = PathType("ImplementationSpecific") -) - -// HTTPIngressPath associates a path with a backend. Incoming urls matching the -// path are forwarded to the backend. -type HTTPIngressPath struct { - // Path is matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" part of a URL - // as defined by RFC 3986. Paths must begin with a '/'. When unspecified, - // all paths from incoming requests are matched. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - - // PathType determines the interpretation of the Path matching. PathType can - // be one of the following values: - // * Exact: Matches the URL path exactly. - // * Prefix: Matches based on a URL path prefix split by '/'. Matching is - // done on a path element by element basis. A path element refers is the - // list of labels in the path split by the '/' separator. A request is a - // match for path p if every p is an element-wise prefix of p of the - // request path. Note that if the last element of the path is a substring - // of the last element in request path, it is not a match (e.g. /foo/bar - // matches /foo/bar/baz, but does not match /foo/barbaz). - // * ImplementationSpecific: Interpretation of the Path matching is up to - // the IngressClass. Implementations can treat this as a separate PathType - // or treat it identically to Prefix or Exact path types. - // Implementations are required to support all path types. - // Defaults to ImplementationSpecific. - PathType *PathType `json:"pathType,omitempty" protobuf:"bytes,3,opt,name=pathType"` - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` -} - -// IngressBackend describes all endpoints for a given service and port. -type IngressBackend struct { - // Specifies the name of the referenced service. - // +optional - ServiceName string `json:"serviceName,omitempty" protobuf:"bytes,1,opt,name=serviceName"` - - // Specifies the port of the referenced service. - // +optional - ServicePort intstr.IntOrString `json:"servicePort,omitempty" protobuf:"bytes,2,opt,name=servicePort"` - - // Resource is an ObjectRef to another Kubernetes resource in the namespace - // of the Ingress object. If resource is specified, serviceName and servicePort - // must not be specified. - // +optional - Resource *v1.TypedLocalObjectReference `json:"resource,omitempty" protobuf:"bytes,3,opt,name=resource"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressClass represents the class of the Ingress, referenced by the Ingress -// Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be -// used to indicate that an IngressClass should be considered default. When a -// single IngressClass resource has this annotation set to true, new Ingress -// resources without a class specified will be assigned this default class. -type IngressClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is the desired state of the IngressClass. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - // +optional - Spec IngressClassSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// IngressClassSpec provides information about the class of an Ingress. -type IngressClassSpec struct { - // Controller refers to the name of the controller that should handle this - // class. This allows for different "flavors" that are controlled by the - // same controller. For example, you may have different Parameters for the - // same implementing controller. This should be specified as a - // domain-prefixed path no more than 250 characters in length, e.g. - // "acme.io/ingress-controller". This field is immutable. - Controller string `json:"controller,omitempty" protobuf:"bytes,1,opt,name=controller"` - - // Parameters is a link to a custom resource containing additional - // configuration for the controller. This is optional if the controller does - // not require extra parameters. - // +optional - Parameters *v1.TypedLocalObjectReference `json:"parameters,omitempty" protobuf:"bytes,2,opt,name=parameters"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressClassList is a collection of IngressClasses. -type IngressClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of IngressClasses. - // +listType=set - Items []IngressClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index c774249..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,160 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. When unspecified, all paths from incoming requests are matched.", - "pathType": "PathType determines the interpretation of the Path matching. PathType can be one of the following values: * Exact: Matches the URL path exactly. * Prefix: Matches based on a URL path prefix split by '/'. Matching is\n done on a path element by element basis. A path element refers is the\n list of labels in the path split by the '/' separator. A request is a\n match for path p if every p is an element-wise prefix of p of the\n request path. Note that if the last element of the path is a substring\n of the last element in request path, it is not a match (e.g. /foo/bar\n matches /foo/bar/baz, but does not match /foo/barbaz).\n* ImplementationSpecific: Interpretation of the Path matching is up to\n the IngressClass. Implementations can treat this as a separate PathType\n or treat it identically to Prefix or Exact path types.\nImplementations are required to support all path types. Defaults to ImplementationSpecific.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", -} - -func (HTTPIngressPath) SwaggerDoc() map[string]string { - return map_HTTPIngressPath -} - -var map_HTTPIngressRuleValue = map[string]string{ - "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", -} - -func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { - return map_HTTPIngressRuleValue -} - -var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (Ingress) SwaggerDoc() map[string]string { - return map_Ingress -} - -var map_IngressBackend = map[string]string{ - "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", - "resource": "Resource is an ObjectRef to another Kubernetes resource in the namespace of the Ingress object. If resource is specified, serviceName and servicePort must not be specified.", -} - -func (IngressBackend) SwaggerDoc() map[string]string { - return map_IngressBackend -} - -var map_IngressClass = map[string]string{ - "": "IngressClass represents the class of the Ingress, referenced by the Ingress Spec. The `ingressclass.kubernetes.io/is-default-class` annotation can be used to indicate that an IngressClass should be considered default. When a single IngressClass resource has this annotation set to true, new Ingress resources without a class specified will be assigned this default class.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Spec is the desired state of the IngressClass. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (IngressClass) SwaggerDoc() map[string]string { - return map_IngressClass -} - -var map_IngressClassList = map[string]string{ - "": "IngressClassList is a collection of IngressClasses.", - "metadata": "Standard list metadata.", - "items": "Items is the list of IngressClasses.", -} - -func (IngressClassList) SwaggerDoc() map[string]string { - return map_IngressClassList -} - -var map_IngressClassSpec = map[string]string{ - "": "IngressClassSpec provides information about the class of an Ingress.", - "controller": "Controller refers to the name of the controller that should handle this class. This allows for different \"flavors\" that are controlled by the same controller. For example, you may have different Parameters for the same implementing controller. This should be specified as a domain-prefixed path no more than 250 characters in length, e.g. \"acme.io/ingress-controller\". This field is immutable.", - "parameters": "Parameters is a link to a custom resource containing additional configuration for the controller. This is optional if the controller does not require extra parameters.", -} - -func (IngressClassSpec) SwaggerDoc() map[string]string { - return map_IngressClassSpec -} - -var map_IngressList = map[string]string{ - "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", -} - -func (IngressList) SwaggerDoc() map[string]string { - return map_IngressList -} - -var map_IngressRule = map[string]string{ - "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in RFC 3986: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to\n the IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.\n\nHost can be \"precise\" which is a domain name without the terminating dot of a network host (e.g. \"foo.bar.com\") or \"wildcard\", which is a domain name prefixed with a single wildcard label (e.g. \"*.foo.com\"). The wildcard character '*' must appear by itself as the first DNS label and matches only a single label. You cannot have a wildcard label by itself (e.g. Host == \"*\"). Requests will be matched against the Host field in the following way: 1. If Host is precise, the request matches this rule if the http host header is equal to Host. 2. If Host is a wildcard, then the request matches this rule if the http host header is to equal to the suffix (removing the first label) of the wildcard rule.", -} - -func (IngressRule) SwaggerDoc() map[string]string { - return map_IngressRule -} - -var map_IngressRuleValue = map[string]string{ - "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", -} - -func (IngressRuleValue) SwaggerDoc() map[string]string { - return map_IngressRuleValue -} - -var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "ingressClassName": "IngressClassName is the name of the IngressClass cluster resource. The associated IngressClass defines which controller will implement the resource. This replaces the deprecated `kubernetes.io/ingress.class` annotation. For backwards compatibility, when that annotation is set, it must be given precedence over this field. The controller may emit a warning if the field and annotation have different values. Implementations of this API should ignore Ingresses without a class specified. An IngressClass resource may be marked as default, which can be used to set a default value for this field. For more information, refer to the IngressClass documentation.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", -} - -func (IngressSpec) SwaggerDoc() map[string]string { - return map_IngressSpec -} - -var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", -} - -func (IngressStatus) SwaggerDoc() map[string]string { - return map_IngressStatus -} - -var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate TLS traffic on port 443. Field is left optional to allow TLS routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", -} - -func (IngressTLS) SwaggerDoc() map[string]string { - return map_IngressTLS -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go b/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go deleted file mode 100644 index 1629b5d..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/well_known_annotations.go +++ /dev/null @@ -1,32 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -const ( - // AnnotationIsDefaultIngressClass can be used to indicate that an - // IngressClass should be considered default. When a single IngressClass - // resource has this annotation set to true, new Ingress resources without a - // class specified will be assigned this default class. - AnnotationIsDefaultIngressClass = "ingressclass.kubernetes.io/is-default-class" - - // AnnotationIngressClass indicates the class of an Ingress to be used when - // determining which controller should implement the Ingress. Use of this - // annotation is deprecated. The Ingress class field should be used instead - // of this annotation. - // +deprecated - AnnotationIngressClass = "kubernetes.io/ingress.class" -) diff --git a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index d55ccde..0000000 --- a/vendor/k8s.io/api/networking/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,351 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { - *out = *in - if in.PathType != nil { - in, out := &in.PathType, &out.PathType - *out = new(PathType) - **out = **in - } - in.Backend.DeepCopyInto(&out.Backend) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. -func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { - if in == nil { - return nil - } - out := new(HTTPIngressPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. -func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { - if in == nil { - return nil - } - out := new(HTTPIngressRuleValue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Ingress) DeepCopyInto(out *Ingress) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. -func (in *Ingress) DeepCopy() *Ingress { - if in == nil { - return nil - } - out := new(Ingress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Ingress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { - *out = *in - out.ServicePort = in.ServicePort - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(v1.TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. -func (in *IngressBackend) DeepCopy() *IngressBackend { - if in == nil { - return nil - } - out := new(IngressBackend) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressClass) DeepCopyInto(out *IngressClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClass. -func (in *IngressClass) DeepCopy() *IngressClass { - if in == nil { - return nil - } - out := new(IngressClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressClassList) DeepCopyInto(out *IngressClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]IngressClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassList. -func (in *IngressClassList) DeepCopy() *IngressClassList { - if in == nil { - return nil - } - out := new(IngressClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressClassSpec) DeepCopyInto(out *IngressClassSpec) { - *out = *in - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = new(v1.TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressClassSpec. -func (in *IngressClassSpec) DeepCopy() *IngressClassSpec { - if in == nil { - return nil - } - out := new(IngressClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressList) DeepCopyInto(out *IngressList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. -func (in *IngressList) DeepCopy() *IngressList { - if in == nil { - return nil - } - out := new(IngressList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRule) DeepCopyInto(out *IngressRule) { - *out = *in - in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. -func (in *IngressRule) DeepCopy() *IngressRule { - if in == nil { - return nil - } - out := new(IngressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { - *out = *in - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. -func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { - if in == nil { - return nil - } - out := new(IngressRuleValue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { - *out = *in - if in.IngressClassName != nil { - in, out := &in.IngressClassName, &out.IngressClassName - *out = new(string) - **out = **in - } - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(IngressBackend) - (*in).DeepCopyInto(*out) - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]IngressTLS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]IngressRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. -func (in *IngressSpec) DeepCopy() *IngressSpec { - if in == nil { - return nil - } - out := new(IngressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. -func (in *IngressStatus) DeepCopy() *IngressStatus { - if in == nil { - return nil - } - out := new(IngressStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { - *out = *in - if in.Hosts != nil { - in, out := &in.Hosts, &out.Hosts - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. -func (in *IngressTLS) DeepCopy() *IngressTLS { - if in == nil { - return nil - } - out := new(IngressTLS) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go b/vendor/k8s.io/api/node/v1alpha1/generated.pb.go deleted file mode 100644 index e6658a9..0000000 --- a/vendor/k8s.io/api/node/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1583 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto - -package v1alpha1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" - resource "k8s.io/apimachinery/pkg/api/resource" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Overhead) Reset() { *m = Overhead{} } -func (*Overhead) ProtoMessage() {} -func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{0} -} -func (m *Overhead) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Overhead) XXX_Merge(src proto.Message) { - xxx_messageInfo_Overhead.Merge(m, src) -} -func (m *Overhead) XXX_Size() int { - return m.Size() -} -func (m *Overhead) XXX_DiscardUnknown() { - xxx_messageInfo_Overhead.DiscardUnknown(m) -} - -var xxx_messageInfo_Overhead proto.InternalMessageInfo - -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{1} -} -func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClass.Merge(m, src) -} -func (m *RuntimeClass) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClass) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClass.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo - -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{2} -} -func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassList.Merge(m, src) -} -func (m *RuntimeClassList) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassList) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo - -func (m *RuntimeClassSpec) Reset() { *m = RuntimeClassSpec{} } -func (*RuntimeClassSpec) ProtoMessage() {} -func (*RuntimeClassSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{3} -} -func (m *RuntimeClassSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassSpec.Merge(m, src) -} -func (m *RuntimeClassSpec) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassSpec) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassSpec proto.InternalMessageInfo - -func (m *Scheduling) Reset() { *m = Scheduling{} } -func (*Scheduling) ProtoMessage() {} -func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_82a78945ab308218, []int{4} -} -func (m *Scheduling) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scheduling) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scheduling.Merge(m, src) -} -func (m *Scheduling) XXX_Size() int { - return m.Size() -} -func (m *Scheduling) XXX_DiscardUnknown() { - xxx_messageInfo_Scheduling.DiscardUnknown(m) -} - -var xxx_messageInfo_Scheduling proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1alpha1.Overhead") - proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1alpha1.Overhead.PodFixedEntry") - proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1alpha1.RuntimeClass") - proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassList") - proto.RegisterType((*RuntimeClassSpec)(nil), "k8s.io.api.node.v1alpha1.RuntimeClassSpec") - proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1alpha1.Scheduling") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1alpha1.Scheduling.NodeSelectorEntry") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1alpha1/generated.proto", fileDescriptor_82a78945ab308218) -} - -var fileDescriptor_82a78945ab308218 = []byte{ - // 698 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbf, 0x6f, 0xd3, 0x4e, - 0x14, 0xcf, 0xa5, 0xad, 0x94, 0x5e, 0xd2, 0xaa, 0x5f, 0x7f, 0x2b, 0x14, 0x65, 0x70, 0x2a, 0x0b, - 0xa1, 0x0a, 0xa9, 0x67, 0x5a, 0xa1, 0xaa, 0x62, 0x00, 0x61, 0x7e, 0x08, 0x44, 0x69, 0xc1, 0x2d, - 0x0b, 0x62, 0xe0, 0x62, 0x3f, 0x1c, 0x13, 0xdb, 0x67, 0xd9, 0xe7, 0x88, 0x6c, 0x88, 0x05, 0x89, - 0x89, 0x89, 0xff, 0x06, 0xe6, 0x8e, 0x9d, 0x50, 0xa7, 0x96, 0x86, 0xff, 0x81, 0x81, 0x09, 0x9d, - 0x7d, 0x4e, 0x9c, 0xa4, 0xa1, 0x61, 0xf3, 0xdd, 0x7d, 0x7e, 0xdc, 0xfb, 0xbc, 0x7b, 0xc6, 0x77, - 0x3b, 0x3b, 0x31, 0x71, 0x99, 0xde, 0x49, 0x5a, 0x10, 0x05, 0xc0, 0x21, 0xd6, 0xbb, 0x10, 0xd8, - 0x2c, 0xd2, 0xe5, 0x01, 0x0d, 0x5d, 0x3d, 0x60, 0x36, 0xe8, 0xdd, 0x4d, 0xea, 0x85, 0x6d, 0xba, - 0xa9, 0x3b, 0x10, 0x40, 0x44, 0x39, 0xd8, 0x24, 0x8c, 0x18, 0x67, 0x4a, 0x3d, 0x43, 0x12, 0x1a, - 0xba, 0x44, 0x20, 0x49, 0x8e, 0x6c, 0x6c, 0x38, 0x2e, 0x6f, 0x27, 0x2d, 0x62, 0x31, 0x5f, 0x77, - 0x98, 0xc3, 0xf4, 0x94, 0xd0, 0x4a, 0xde, 0xa4, 0xab, 0x74, 0x91, 0x7e, 0x65, 0x42, 0x0d, 0xad, - 0x60, 0x69, 0xb1, 0x48, 0x58, 0x8e, 0x9b, 0x35, 0x6e, 0x0e, 0x31, 0x3e, 0xb5, 0xda, 0x6e, 0x00, - 0x51, 0x4f, 0x0f, 0x3b, 0x4e, 0x4a, 0x8a, 0x20, 0x66, 0x49, 0x64, 0xc1, 0x3f, 0xb1, 0x62, 0xdd, - 0x07, 0x4e, 0x2f, 0xf2, 0xd2, 0xa7, 0xb1, 0xa2, 0x24, 0xe0, 0xae, 0x3f, 0x69, 0xb3, 0x7d, 0x19, - 0x21, 0xb6, 0xda, 0xe0, 0xd3, 0x71, 0x9e, 0x76, 0x5c, 0xc6, 0x95, 0xfd, 0x2e, 0x44, 0x6d, 0xa0, - 0xb6, 0xf2, 0x1d, 0xe1, 0x4a, 0xc8, 0xec, 0x87, 0xee, 0x3b, 0xb0, 0xeb, 0x68, 0x6d, 0x6e, 0xbd, - 0xba, 0x75, 0x83, 0x4c, 0x8b, 0x98, 0xe4, 0x34, 0xf2, 0x4c, 0x52, 0x1e, 0x04, 0x3c, 0xea, 0x19, - 0x1f, 0xd1, 0xd1, 0x69, 0xb3, 0xd4, 0x3f, 0x6d, 0x56, 0xf2, 0xfd, 0xdf, 0xa7, 0xcd, 0xe6, 0x64, - 0xbe, 0xc4, 0x94, 0x91, 0xed, 0xba, 0x31, 0xff, 0x70, 0xf6, 0x57, 0xc8, 0x1e, 0xf5, 0xe1, 0xd3, - 0x59, 0x73, 0x63, 0x96, 0x0e, 0x90, 0xe7, 0x09, 0x0d, 0xb8, 0xcb, 0x7b, 0xe6, 0xa0, 0x96, 0x46, - 0x07, 0x2f, 0x8d, 0x5c, 0x52, 0x59, 0xc1, 0x73, 0x1d, 0xe8, 0xd5, 0xd1, 0x1a, 0x5a, 0x5f, 0x34, - 0xc5, 0xa7, 0x72, 0x1f, 0x2f, 0x74, 0xa9, 0x97, 0x40, 0xbd, 0xbc, 0x86, 0xd6, 0xab, 0x5b, 0xa4, - 0x50, 0xf7, 0xc0, 0x8b, 0x84, 0x1d, 0x27, 0x0d, 0x62, 0xd2, 0x2b, 0x23, 0xdf, 0x2a, 0xef, 0x20, - 0xed, 0x1b, 0xc2, 0x35, 0x33, 0x4b, 0xfd, 0x9e, 0x47, 0xe3, 0x58, 0x79, 0x8d, 0x2b, 0xa2, 0xcf, - 0x36, 0xe5, 0x34, 0x75, 0x1c, 0x4d, 0x75, 0x42, 0x3d, 0x26, 0x02, 0x4d, 0xba, 0x9b, 0x64, 0xbf, - 0xf5, 0x16, 0x2c, 0xfe, 0x14, 0x38, 0x35, 0x14, 0x19, 0x2a, 0x1e, 0xee, 0x99, 0x03, 0x55, 0x65, - 0x17, 0xcf, 0xc7, 0x21, 0x58, 0xf2, 0xee, 0xd7, 0xa7, 0xf7, 0xac, 0x78, 0xaf, 0x83, 0x10, 0x2c, - 0xa3, 0x26, 0x75, 0xe7, 0xc5, 0xca, 0x4c, 0x55, 0xb4, 0xaf, 0x08, 0xaf, 0x14, 0x81, 0xa2, 0x41, - 0xca, 0xab, 0x89, 0x22, 0xc8, 0x6c, 0x45, 0x08, 0x76, 0x5a, 0xc2, 0x4a, 0xfe, 0x2e, 0xf2, 0x9d, - 0x42, 0x01, 0x4f, 0xf0, 0x82, 0xcb, 0xc1, 0x8f, 0xeb, 0xe5, 0xf4, 0xd5, 0x5d, 0x9b, 0xad, 0x02, - 0x63, 0x49, 0x4a, 0x2e, 0x3c, 0x16, 0x64, 0x33, 0xd3, 0xd0, 0x7e, 0x8d, 0xdd, 0x5f, 0x94, 0xa6, - 0xdc, 0xc6, 0xcb, 0x72, 0x14, 0x1e, 0xd1, 0xc0, 0xf6, 0x20, 0xca, 0x9a, 0x6f, 0x5c, 0x91, 0x12, - 0xcb, 0xe6, 0xc8, 0xa9, 0x39, 0x86, 0x56, 0x76, 0x71, 0x85, 0xc9, 0x07, 0x2f, 0x63, 0xd6, 0x2e, - 0x1f, 0x0d, 0xa3, 0x26, 0xea, 0xcd, 0x57, 0xe6, 0x40, 0x41, 0x39, 0xc4, 0x58, 0x0c, 0xa4, 0x9d, - 0x78, 0x6e, 0xe0, 0xd4, 0xe7, 0x52, 0xbd, 0xab, 0xd3, 0xf5, 0x0e, 0x06, 0x58, 0x63, 0x59, 0x3c, - 0x82, 0xe1, 0xda, 0x2c, 0xe8, 0x68, 0x5f, 0xca, 0xb8, 0x70, 0xa4, 0x84, 0xb8, 0x26, 0x64, 0x0e, - 0xc0, 0x03, 0x8b, 0xb3, 0x48, 0x4e, 0xf4, 0xf6, 0x2c, 0x36, 0x64, 0xaf, 0x40, 0xcc, 0xe6, 0x7a, - 0x55, 0x06, 0x55, 0x2b, 0x1e, 0x99, 0x23, 0x0e, 0xca, 0x0b, 0x5c, 0xe5, 0xcc, 0x13, 0x3f, 0x18, - 0x97, 0x05, 0x79, 0x33, 0xd5, 0xa2, 0xa1, 0x98, 0x6c, 0xf1, 0x2a, 0x0e, 0x07, 0x30, 0xe3, 0x7f, - 0x29, 0x5c, 0x1d, 0xee, 0xc5, 0x66, 0x51, 0xa7, 0x71, 0x07, 0xff, 0x37, 0x71, 0x9f, 0x0b, 0x46, - 0x78, 0xb5, 0x38, 0xc2, 0x8b, 0x85, 0x91, 0x34, 0xc8, 0xd1, 0xb9, 0x5a, 0x3a, 0x3e, 0x57, 0x4b, - 0x27, 0xe7, 0x6a, 0xe9, 0x7d, 0x5f, 0x45, 0x47, 0x7d, 0x15, 0x1d, 0xf7, 0x55, 0x74, 0xd2, 0x57, - 0xd1, 0x8f, 0xbe, 0x8a, 0x3e, 0xff, 0x54, 0x4b, 0x2f, 0x2b, 0x79, 0x10, 0x7f, 0x02, 0x00, 0x00, - 0xff, 0xff, 0xa8, 0x77, 0xef, 0x80, 0x9b, 0x06, 0x00, 0x00, -} - -func (m *Overhead) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PodFixed) > 0 { - keysForPodFixed := make([]string, 0, len(m.PodFixed)) - for k := range m.PodFixed { - keysForPodFixed = append(keysForPodFixed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) - for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { - v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] - baseI := i - { - size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForPodFixed[iNdEx]) - copy(dAtA[i:], keysForPodFixed[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Scheduling != nil { - { - size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if m.Overhead != nil { - { - size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - i -= len(m.RuntimeHandler) - copy(dAtA[i:], m.RuntimeHandler) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RuntimeHandler))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Scheduling) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForNodeSelector[iNdEx]) - copy(dAtA[i:], keysForNodeSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Overhead) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.PodFixed) > 0 { - for k, v := range m.PodFixed { - _ = k - _ = v - l = ((*resource.Quantity)(&v)).Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *RuntimeClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RuntimeClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuntimeClassSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.RuntimeHandler) - n += 1 + l + sovGenerated(uint64(l)) - if m.Overhead != nil { - l = m.Overhead.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Scheduling != nil { - l = m.Scheduling.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scheduling) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Overhead) String() string { - if this == nil { - return "nil" - } - keysForPodFixed := make([]string, 0, len(this.PodFixed)) - for k := range this.PodFixed { - keysForPodFixed = append(keysForPodFixed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) - mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" - for _, k := range keysForPodFixed { - mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForPodFixed += "}" - s := strings.Join([]string{`&Overhead{`, - `PodFixed:` + mapStringForPodFixed + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "RuntimeClassSpec", "RuntimeClassSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]RuntimeClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassSpec{`, - `RuntimeHandler:` + fmt.Sprintf("%v", this.RuntimeHandler) + `,`, - `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, - `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scheduling) String() string { - if this == nil { - return "nil" - } - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTolerations += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&Scheduling{`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Overhead) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Overhead: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodFixed == nil { - m.PodFixed = make(k8s_io_api_core_v1.ResourceList) - } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RuntimeClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeHandler", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RuntimeHandler = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Overhead == nil { - m.Overhead = &Overhead{} - } - if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Scheduling == nil { - m.Scheduling = &Scheduling{} - } - if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scheduling) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.NodeSelector[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tolerations = append(m.Tolerations, v11.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/node/v1alpha1/generated.proto b/vendor/k8s.io/api/node/v1alpha1/generated.proto deleted file mode 100644 index ac05f83..0000000 --- a/vendor/k8s.io/api/node/v1alpha1/generated.proto +++ /dev/null @@ -1,118 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.node.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Overhead structure represents the resource overhead associated with running a pod. -message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. - // +optional - map podFixed = 1; -} - -// RuntimeClass defines a class of container runtime supported in the cluster. -// The RuntimeClass is used to determine which container runtime is used to run -// all containers in a pod. RuntimeClasses are (currently) manually defined by a -// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is -// responsible for resolving the RuntimeClassName reference before running the -// pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md -message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - optional RuntimeClassSpec spec = 2; -} - -// RuntimeClassList is a list of RuntimeClass objects. -message RuntimeClassList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated RuntimeClass items = 2; -} - -// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters -// that are required to describe the RuntimeClass to the Container Runtime -// Interface (CRI) implementation, as well as any other components that need to -// understand how the pod will be run. The RuntimeClassSpec is immutable. -message RuntimeClassSpec { - // RuntimeHandler specifies the underlying runtime and configuration that the - // CRI implementation will use to handle pods of this class. The possible - // values are specific to the node & CRI configuration. It is assumed that - // all handlers are available on every node, and handlers of the same name are - // equivalent on every node. - // For example, a handler called "runc" might specify that the runc OCI - // runtime (using native Linux containers) will be used to run the containers - // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. - optional string runtimeHandler = 1; - - // Overhead represents the resource overhead associated with running a pod for a - // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. - // +optional - optional Overhead overhead = 2; - - // Scheduling holds the scheduling constraints to ensure that pods running - // with this RuntimeClass are scheduled to nodes that support it. - // If scheduling is nil, this RuntimeClass is assumed to be supported by all - // nodes. - // +optional - optional Scheduling scheduling = 3; -} - -// Scheduling specifies the scheduling constraints for nodes supporting a -// RuntimeClass. -message Scheduling { - // nodeSelector lists labels that must be present on nodes that support this - // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a - // node matched by this selector. The RuntimeClass nodeSelector is merged - // with a pod's existing nodeSelector. Any conflicts will cause the pod to - // be rejected in admission. - // +optional - map nodeSelector = 1; - - // tolerations are appended (excluding duplicates) to pods running with this - // RuntimeClass during admission, effectively unioning the set of nodes - // tolerated by the pod and the RuntimeClass. - // +optional - // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; -} - diff --git a/vendor/k8s.io/api/node/v1alpha1/register.go b/vendor/k8s.io/api/node/v1alpha1/register.go deleted file mode 100644 index b608214..0000000 --- a/vendor/k8s.io/api/node/v1alpha1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "node.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &RuntimeClass{}, - &RuntimeClassList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/node/v1alpha1/types.go b/vendor/k8s.io/api/node/v1alpha1/types.go deleted file mode 100644 index b597671..0000000 --- a/vendor/k8s.io/api/node/v1alpha1/types.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClass defines a class of container runtime supported in the cluster. -// The RuntimeClass is used to determine which container runtime is used to run -// all containers in a pod. RuntimeClasses are (currently) manually defined by a -// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is -// responsible for resolving the RuntimeClassName reference before running the -// pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md -type RuntimeClass struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the RuntimeClass - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status - Spec RuntimeClassSpec `json:"spec" protobuf:"bytes,2,name=spec"` -} - -// RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters -// that are required to describe the RuntimeClass to the Container Runtime -// Interface (CRI) implementation, as well as any other components that need to -// understand how the pod will be run. The RuntimeClassSpec is immutable. -type RuntimeClassSpec struct { - // RuntimeHandler specifies the underlying runtime and configuration that the - // CRI implementation will use to handle pods of this class. The possible - // values are specific to the node & CRI configuration. It is assumed that - // all handlers are available on every node, and handlers of the same name are - // equivalent on every node. - // For example, a handler called "runc" might specify that the runc OCI - // runtime (using native Linux containers) will be used to run the containers - // in a pod. - // The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements - // and is immutable. - RuntimeHandler string `json:"runtimeHandler" protobuf:"bytes,1,opt,name=runtimeHandler"` - - // Overhead represents the resource overhead associated with running a pod for a - // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. - // +optional - Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,2,opt,name=overhead"` - - // Scheduling holds the scheduling constraints to ensure that pods running - // with this RuntimeClass are scheduled to nodes that support it. - // If scheduling is nil, this RuntimeClass is assumed to be supported by all - // nodes. - // +optional - Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,3,opt,name=scheduling"` -} - -// Overhead structure represents the resource overhead associated with running a pod. -type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. - // +optional - PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` -} - -// Scheduling specifies the scheduling constraints for nodes supporting a -// RuntimeClass. -type Scheduling struct { - // nodeSelector lists labels that must be present on nodes that support this - // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a - // node matched by this selector. The RuntimeClass nodeSelector is merged - // with a pod's existing nodeSelector. Any conflicts will cause the pod to - // be rejected in admission. - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - - // tolerations are appended (excluding duplicates) to pods running with this - // RuntimeClass during admission, effectively unioning the set of nodes - // tolerated by the pod and the RuntimeClass. - // +optional - // +listType=atomic - Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClassList is a list of RuntimeClass objects. -type RuntimeClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 3900001..0000000 --- a/vendor/k8s.io/api/node/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Overhead = map[string]string{ - "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", -} - -func (Overhead) SwaggerDoc() map[string]string { - return map_Overhead -} - -var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the RuntimeClass More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", -} - -func (RuntimeClass) SwaggerDoc() map[string]string { - return map_RuntimeClass -} - -var map_RuntimeClassList = map[string]string{ - "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (RuntimeClassList) SwaggerDoc() map[string]string { - return map_RuntimeClassList -} - -var map_RuntimeClassSpec = map[string]string{ - "": "RuntimeClassSpec is a specification of a RuntimeClass. It contains parameters that are required to describe the RuntimeClass to the Container Runtime Interface (CRI) implementation, as well as any other components that need to understand how the pod will be run. The RuntimeClassSpec is immutable.", - "runtimeHandler": "RuntimeHandler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The RuntimeHandler must conform to the DNS Label (RFC 1123) requirements and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", -} - -func (RuntimeClassSpec) SwaggerDoc() map[string]string { - return map_RuntimeClassSpec -} - -var map_Scheduling = map[string]string{ - "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", - "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", - "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", -} - -func (Scheduling) SwaggerDoc() map[string]string { - return map_Scheduling -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 20f8051..0000000 --- a/vendor/k8s.io/api/node/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,165 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Overhead) DeepCopyInto(out *Overhead) { - *out = *in - if in.PodFixed != nil { - in, out := &in.PodFixed, &out.PodFixed - *out = make(v1.ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Overhead. -func (in *Overhead) DeepCopy() *Overhead { - if in == nil { - return nil - } - out := new(Overhead) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClass) DeepCopyInto(out *RuntimeClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClass. -func (in *RuntimeClass) DeepCopy() *RuntimeClass { - if in == nil { - return nil - } - out := new(RuntimeClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassList) DeepCopyInto(out *RuntimeClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RuntimeClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassList. -func (in *RuntimeClassList) DeepCopy() *RuntimeClassList { - if in == nil { - return nil - } - out := new(RuntimeClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RuntimeClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassSpec) DeepCopyInto(out *RuntimeClassSpec) { - *out = *in - if in.Overhead != nil { - in, out := &in.Overhead, &out.Overhead - *out = new(Overhead) - (*in).DeepCopyInto(*out) - } - if in.Scheduling != nil { - in, out := &in.Scheduling, &out.Scheduling - *out = new(Scheduling) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassSpec. -func (in *RuntimeClassSpec) DeepCopy() *RuntimeClassSpec { - if in == nil { - return nil - } - out := new(RuntimeClassSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scheduling) DeepCopyInto(out *Scheduling) { - *out = *in - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]v1.Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scheduling. -func (in *Scheduling) DeepCopy() *Scheduling { - if in == nil { - return nil - } - out := new(Scheduling) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/node/v1beta1/doc.go b/vendor/k8s.io/api/node/v1beta1/doc.go deleted file mode 100644 index e87583c..0000000 --- a/vendor/k8s.io/api/node/v1beta1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=node.k8s.io - -package v1beta1 // import "k8s.io/api/node/v1beta1" diff --git a/vendor/k8s.io/api/node/v1beta1/generated.pb.go b/vendor/k8s.io/api/node/v1beta1/generated.pb.go deleted file mode 100644 index b85cbd2..0000000 --- a/vendor/k8s.io/api/node/v1beta1/generated.pb.go +++ /dev/null @@ -1,1412 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto - -package v1beta1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" - resource "k8s.io/apimachinery/pkg/api/resource" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Overhead) Reset() { *m = Overhead{} } -func (*Overhead) ProtoMessage() {} -func (*Overhead) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{0} -} -func (m *Overhead) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Overhead) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Overhead) XXX_Merge(src proto.Message) { - xxx_messageInfo_Overhead.Merge(m, src) -} -func (m *Overhead) XXX_Size() int { - return m.Size() -} -func (m *Overhead) XXX_DiscardUnknown() { - xxx_messageInfo_Overhead.DiscardUnknown(m) -} - -var xxx_messageInfo_Overhead proto.InternalMessageInfo - -func (m *RuntimeClass) Reset() { *m = RuntimeClass{} } -func (*RuntimeClass) ProtoMessage() {} -func (*RuntimeClass) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{1} -} -func (m *RuntimeClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClass.Merge(m, src) -} -func (m *RuntimeClass) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClass) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClass.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClass proto.InternalMessageInfo - -func (m *RuntimeClassList) Reset() { *m = RuntimeClassList{} } -func (*RuntimeClassList) ProtoMessage() {} -func (*RuntimeClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{2} -} -func (m *RuntimeClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassList.Merge(m, src) -} -func (m *RuntimeClassList) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassList) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassList proto.InternalMessageInfo - -func (m *Scheduling) Reset() { *m = Scheduling{} } -func (*Scheduling) ProtoMessage() {} -func (*Scheduling) Descriptor() ([]byte, []int) { - return fileDescriptor_f977b0dddc93b4ec, []int{3} -} -func (m *Scheduling) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Scheduling) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Scheduling) XXX_Merge(src proto.Message) { - xxx_messageInfo_Scheduling.Merge(m, src) -} -func (m *Scheduling) XXX_Size() int { - return m.Size() -} -func (m *Scheduling) XXX_DiscardUnknown() { - xxx_messageInfo_Scheduling.DiscardUnknown(m) -} - -var xxx_messageInfo_Scheduling proto.InternalMessageInfo - -func init() { - proto.RegisterType((*Overhead)(nil), "k8s.io.api.node.v1beta1.Overhead") - proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.api.node.v1beta1.Overhead.PodFixedEntry") - proto.RegisterType((*RuntimeClass)(nil), "k8s.io.api.node.v1beta1.RuntimeClass") - proto.RegisterType((*RuntimeClassList)(nil), "k8s.io.api.node.v1beta1.RuntimeClassList") - proto.RegisterType((*Scheduling)(nil), "k8s.io.api.node.v1beta1.Scheduling") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.node.v1beta1.Scheduling.NodeSelectorEntry") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/node/v1beta1/generated.proto", fileDescriptor_f977b0dddc93b4ec) -} - -var fileDescriptor_f977b0dddc93b4ec = []byte{ - // 666 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xbd, 0x6f, 0xd3, 0x40, - 0x14, 0xcf, 0xa5, 0x54, 0x4d, 0x2f, 0x29, 0x14, 0x53, 0x89, 0x28, 0x83, 0x53, 0x82, 0x90, 0xca, - 0xd0, 0x33, 0xad, 0x00, 0x55, 0x2c, 0x20, 0xf3, 0x21, 0x3e, 0x5b, 0x70, 0x61, 0x41, 0x0c, 0x5c, - 0xec, 0x87, 0x63, 0x12, 0xfb, 0xa2, 0xf3, 0x39, 0x22, 0x1b, 0x62, 0x41, 0x62, 0x62, 0xe1, 0xbf, - 0x81, 0xbd, 0x1b, 0x5d, 0x90, 0x3a, 0xb5, 0x34, 0xfc, 0x17, 0x4c, 0xe8, 0xec, 0x73, 0x72, 0x6d, - 0x9a, 0xb6, 0x6c, 0xbe, 0xf3, 0xef, 0xe3, 0xbd, 0xdf, 0xbb, 0x87, 0xef, 0xb4, 0xd7, 0x62, 0x12, - 0x30, 0xab, 0x9d, 0x34, 0x81, 0x47, 0x20, 0x20, 0xb6, 0x7a, 0x10, 0x79, 0x8c, 0x5b, 0xea, 0x07, - 0xed, 0x06, 0x56, 0xc4, 0x3c, 0xb0, 0x7a, 0x2b, 0x4d, 0x10, 0x74, 0xc5, 0xf2, 0x21, 0x02, 0x4e, - 0x05, 0x78, 0xa4, 0xcb, 0x99, 0x60, 0xc6, 0xc5, 0x0c, 0x48, 0x68, 0x37, 0x20, 0x12, 0x48, 0x14, - 0xb0, 0xb6, 0xec, 0x07, 0xa2, 0x95, 0x34, 0x89, 0xcb, 0x42, 0xcb, 0x67, 0x3e, 0xb3, 0x52, 0x7c, - 0x33, 0x79, 0x97, 0x9e, 0xd2, 0x43, 0xfa, 0x95, 0xe9, 0xd4, 0x1a, 0x9a, 0xa1, 0xcb, 0xb8, 0x34, - 0x3c, 0xec, 0x55, 0xbb, 0x3e, 0xc2, 0x84, 0xd4, 0x6d, 0x05, 0x11, 0xf0, 0xbe, 0xd5, 0x6d, 0xfb, - 0x29, 0x89, 0x43, 0xcc, 0x12, 0xee, 0xc2, 0x7f, 0xb1, 0x62, 0x2b, 0x04, 0x41, 0x8f, 0xf2, 0xb2, - 0x26, 0xb1, 0x78, 0x12, 0x89, 0x20, 0x1c, 0xb7, 0xb9, 0x79, 0x12, 0x21, 0x76, 0x5b, 0x10, 0xd2, - 0xc3, 0xbc, 0xc6, 0xcf, 0x22, 0x2e, 0x6d, 0xf4, 0x80, 0xb7, 0x80, 0x7a, 0xc6, 0x2f, 0x84, 0x4b, - 0x5d, 0xe6, 0x3d, 0x08, 0x3e, 0x80, 0x57, 0x45, 0x8b, 0x53, 0x4b, 0xe5, 0x55, 0x8b, 0x4c, 0x48, - 0x98, 0xe4, 0x2c, 0xf2, 0x5c, 0x31, 0xee, 0x47, 0x82, 0xf7, 0xed, 0xcf, 0x68, 0x6b, 0xb7, 0x5e, - 0x18, 0xec, 0xd6, 0x4b, 0xf9, 0xfd, 0xdf, 0xdd, 0x7a, 0x7d, 0x3c, 0x5e, 0xe2, 0xa8, 0xc4, 0x9e, - 0x06, 0xb1, 0xf8, 0xb4, 0x77, 0x2c, 0x64, 0x9d, 0x86, 0xf0, 0x65, 0xaf, 0xbe, 0x7c, 0x9a, 0x01, - 0x90, 0x17, 0x09, 0x8d, 0x44, 0x20, 0xfa, 0xce, 0xb0, 0x95, 0x5a, 0x1b, 0xcf, 0x1d, 0x28, 0xd2, - 0x98, 0xc7, 0x53, 0x6d, 0xe8, 0x57, 0xd1, 0x22, 0x5a, 0x9a, 0x75, 0xe4, 0xa7, 0x71, 0x0f, 0x4f, - 0xf7, 0x68, 0x27, 0x81, 0x6a, 0x71, 0x11, 0x2d, 0x95, 0x57, 0x89, 0xd6, 0xf6, 0xd0, 0x8b, 0x74, - 0xdb, 0x7e, 0x9a, 0xc3, 0xb8, 0x57, 0x46, 0xbe, 0x55, 0x5c, 0x43, 0x8d, 0x1f, 0x45, 0x5c, 0x71, - 0xb2, 0xd0, 0xef, 0x76, 0x68, 0x1c, 0x1b, 0x6f, 0x71, 0x49, 0x8e, 0xd9, 0xa3, 0x82, 0xa6, 0x8e, - 0xe5, 0xd5, 0x6b, 0xc7, 0xa9, 0xc7, 0x44, 0xa2, 0x49, 0x6f, 0x85, 0x6c, 0x34, 0xdf, 0x83, 0x2b, - 0x9e, 0x81, 0xa0, 0xb6, 0xa1, 0x42, 0xc5, 0xa3, 0x3b, 0x67, 0xa8, 0x6a, 0x5c, 0xc5, 0x33, 0x2d, - 0x1a, 0x79, 0x1d, 0xe0, 0x69, 0xf9, 0xb3, 0xf6, 0x39, 0x05, 0x9f, 0x79, 0x98, 0x5d, 0x3b, 0xf9, - 0x7f, 0xe3, 0x09, 0x2e, 0x31, 0x35, 0xb8, 0xea, 0x54, 0x5a, 0xcc, 0xa5, 0x13, 0x27, 0x6c, 0x57, - 0xe4, 0x38, 0xf3, 0x93, 0x33, 0x14, 0x30, 0x36, 0x31, 0x96, 0xcf, 0xca, 0x4b, 0x3a, 0x41, 0xe4, - 0x57, 0xcf, 0xa4, 0x72, 0x97, 0x27, 0xca, 0x6d, 0x0e, 0xa1, 0xf6, 0x59, 0xd9, 0xca, 0xe8, 0xec, - 0x68, 0x32, 0x8d, 0xef, 0x08, 0xcf, 0xeb, 0xf9, 0xc9, 0xf7, 0x61, 0xbc, 0x19, 0xcb, 0x90, 0x9c, - 0x2e, 0x43, 0xc9, 0x4e, 0x13, 0x9c, 0xcf, 0x9f, 0x65, 0x7e, 0xa3, 0xe5, 0xf7, 0x18, 0x4f, 0x07, - 0x02, 0xc2, 0xb8, 0x5a, 0x4c, 0xdf, 0xfc, 0x95, 0x89, 0x2d, 0xe8, 0x75, 0xd9, 0x73, 0x4a, 0x71, - 0xfa, 0x91, 0xe4, 0x3a, 0x99, 0x44, 0xe3, 0x5b, 0x11, 0x6b, 0x9d, 0x19, 0x0c, 0x57, 0xa4, 0xc2, - 0x26, 0x74, 0xc0, 0x15, 0x8c, 0xab, 0xad, 0xba, 0x71, 0x8a, 0x90, 0xc8, 0xba, 0xc6, 0xcb, 0x76, - 0x6b, 0x41, 0x39, 0x56, 0xf4, 0x5f, 0xce, 0x01, 0x03, 0xe3, 0x15, 0x2e, 0x0b, 0xd6, 0x91, 0x3b, - 0x1e, 0xb0, 0x28, 0xef, 0xc8, 0xd4, 0xfd, 0xe4, 0x76, 0xc9, 0x68, 0x5e, 0x0e, 0x61, 0xf6, 0x05, - 0x25, 0x5c, 0x1e, 0xdd, 0xc5, 0x8e, 0xae, 0x53, 0xbb, 0x8d, 0xcf, 0x8f, 0xd5, 0x73, 0xc4, 0x1a, - 0x2d, 0xe8, 0x6b, 0x34, 0xab, 0xad, 0x85, 0xbd, 0xbc, 0xb5, 0x6f, 0x16, 0xb6, 0xf7, 0xcd, 0xc2, - 0xce, 0xbe, 0x59, 0xf8, 0x38, 0x30, 0xd1, 0xd6, 0xc0, 0x44, 0xdb, 0x03, 0x13, 0xed, 0x0c, 0x4c, - 0xf4, 0x7b, 0x60, 0xa2, 0xaf, 0x7f, 0xcc, 0xc2, 0xeb, 0x19, 0x95, 0xc3, 0xbf, 0x00, 0x00, 0x00, - 0xff, 0xff, 0x5b, 0xcf, 0x13, 0x0c, 0x1b, 0x06, 0x00, 0x00, -} - -func (m *Overhead) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Overhead) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Overhead) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.PodFixed) > 0 { - keysForPodFixed := make([]string, 0, len(m.PodFixed)) - for k := range m.PodFixed { - keysForPodFixed = append(keysForPodFixed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) - for iNdEx := len(keysForPodFixed) - 1; iNdEx >= 0; iNdEx-- { - v := m.PodFixed[k8s_io_api_core_v1.ResourceName(keysForPodFixed[iNdEx])] - baseI := i - { - size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(keysForPodFixed[iNdEx]) - copy(dAtA[i:], keysForPodFixed[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForPodFixed[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RuntimeClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Scheduling != nil { - { - size, err := m.Scheduling.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.Overhead != nil { - { - size, err := m.Overhead.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - i -= len(m.Handler) - copy(dAtA[i:], m.Handler) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Handler))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *Scheduling) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scheduling) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Scheduling) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Tolerations) > 0 { - for iNdEx := len(m.Tolerations) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Tolerations[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for iNdEx := len(keysForNodeSelector) - 1; iNdEx >= 0; iNdEx-- { - v := m.NodeSelector[string(keysForNodeSelector[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForNodeSelector[iNdEx]) - copy(dAtA[i:], keysForNodeSelector[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForNodeSelector[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Overhead) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.PodFixed) > 0 { - for k, v := range m.PodFixed { - _ = k - _ = v - l = ((*resource.Quantity)(&v)).Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *RuntimeClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Handler) - n += 1 + l + sovGenerated(uint64(l)) - if m.Overhead != nil { - l = m.Overhead.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Scheduling != nil { - l = m.Scheduling.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RuntimeClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Scheduling) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Overhead) String() string { - if this == nil { - return "nil" - } - keysForPodFixed := make([]string, 0, len(this.PodFixed)) - for k := range this.PodFixed { - keysForPodFixed = append(keysForPodFixed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForPodFixed) - mapStringForPodFixed := "k8s_io_api_core_v1.ResourceList{" - for _, k := range keysForPodFixed { - mapStringForPodFixed += fmt.Sprintf("%v: %v,", k, this.PodFixed[k8s_io_api_core_v1.ResourceName(k)]) - } - mapStringForPodFixed += "}" - s := strings.Join([]string{`&Overhead{`, - `PodFixed:` + mapStringForPodFixed + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Handler:` + fmt.Sprintf("%v", this.Handler) + `,`, - `Overhead:` + strings.Replace(this.Overhead.String(), "Overhead", "Overhead", 1) + `,`, - `Scheduling:` + strings.Replace(this.Scheduling.String(), "Scheduling", "Scheduling", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]RuntimeClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RuntimeClass", "RuntimeClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&RuntimeClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *Scheduling) String() string { - if this == nil { - return "nil" - } - repeatedStringForTolerations := "[]Toleration{" - for _, f := range this.Tolerations { - repeatedStringForTolerations += fmt.Sprintf("%v", f) + "," - } - repeatedStringForTolerations += "}" - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&Scheduling{`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `Tolerations:` + repeatedStringForTolerations + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Overhead) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Overhead: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Overhead: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodFixed", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodFixed == nil { - m.PodFixed = make(k8s_io_api_core_v1.ResourceList) - } - var mapkey k8s_io_api_core_v1.ResourceName - mapvalue := &resource.Quantity{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.PodFixed[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Handler = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Overhead", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Overhead == nil { - m.Overhead = &Overhead{} - } - if err := m.Overhead.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheduling", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Scheduling == nil { - m.Scheduling = &Scheduling{} - } - if err := m.Scheduling.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuntimeClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RuntimeClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scheduling) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scheduling: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scheduling: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.NodeSelector[mapkey] = mapvalue - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tolerations = append(m.Tolerations, v11.Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/node/v1beta1/generated.proto b/vendor/k8s.io/api/node/v1beta1/generated.proto deleted file mode 100644 index 4916679..0000000 --- a/vendor/k8s.io/api/node/v1beta1/generated.proto +++ /dev/null @@ -1,108 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.node.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Overhead structure represents the resource overhead associated with running a pod. -message Overhead { - // PodFixed represents the fixed resource overhead associated with running a pod. - // +optional - map podFixed = 1; -} - -// RuntimeClass defines a class of container runtime supported in the cluster. -// The RuntimeClass is used to determine which container runtime is used to run -// all containers in a pod. RuntimeClasses are (currently) manually defined by a -// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is -// responsible for resolving the RuntimeClassName reference before running the -// pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md -message RuntimeClass { - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Handler specifies the underlying runtime and configuration that the CRI - // implementation will use to handle pods of this class. The possible values - // are specific to the node & CRI configuration. It is assumed that all - // handlers are available on every node, and handlers of the same name are - // equivalent on every node. - // For example, a handler called "runc" might specify that the runc OCI - // runtime (using native Linux containers) will be used to run the containers - // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. - optional string handler = 2; - - // Overhead represents the resource overhead associated with running a pod for a - // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. - // +optional - optional Overhead overhead = 3; - - // Scheduling holds the scheduling constraints to ensure that pods running - // with this RuntimeClass are scheduled to nodes that support it. - // If scheduling is nil, this RuntimeClass is assumed to be supported by all - // nodes. - // +optional - optional Scheduling scheduling = 4; -} - -// RuntimeClassList is a list of RuntimeClass objects. -message RuntimeClassList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated RuntimeClass items = 2; -} - -// Scheduling specifies the scheduling constraints for nodes supporting a -// RuntimeClass. -message Scheduling { - // nodeSelector lists labels that must be present on nodes that support this - // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a - // node matched by this selector. The RuntimeClass nodeSelector is merged - // with a pod's existing nodeSelector. Any conflicts will cause the pod to - // be rejected in admission. - // +optional - map nodeSelector = 1; - - // tolerations are appended (excluding duplicates) to pods running with this - // RuntimeClass during admission, effectively unioning the set of nodes - // tolerated by the pod and the RuntimeClass. - // +optional - // +listType=atomic - repeated k8s.io.api.core.v1.Toleration tolerations = 2; -} - diff --git a/vendor/k8s.io/api/node/v1beta1/register.go b/vendor/k8s.io/api/node/v1beta1/register.go deleted file mode 100644 index 3c3b61b..0000000 --- a/vendor/k8s.io/api/node/v1beta1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "node.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // SchemeBuilder is the scheme builder with scheme init functions to run for this API package - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme - AddToScheme = SchemeBuilder.AddToScheme -) - -// addKnownTypes adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &RuntimeClass{}, - &RuntimeClassList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/node/v1beta1/types.go b/vendor/k8s.io/api/node/v1beta1/types.go deleted file mode 100644 index 793a48f..0000000 --- a/vendor/k8s.io/api/node/v1beta1/types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClass defines a class of container runtime supported in the cluster. -// The RuntimeClass is used to determine which container runtime is used to run -// all containers in a pod. RuntimeClasses are (currently) manually defined by a -// user or cluster provisioner, and referenced in the PodSpec. The Kubelet is -// responsible for resolving the RuntimeClassName reference before running the -// pod. For more details, see -// https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md -type RuntimeClass struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Handler specifies the underlying runtime and configuration that the CRI - // implementation will use to handle pods of this class. The possible values - // are specific to the node & CRI configuration. It is assumed that all - // handlers are available on every node, and handlers of the same name are - // equivalent on every node. - // For example, a handler called "runc" might specify that the runc OCI - // runtime (using native Linux containers) will be used to run the containers - // in a pod. - // The Handler must conform to the DNS Label (RFC 1123) requirements, and is - // immutable. - Handler string `json:"handler" protobuf:"bytes,2,opt,name=handler"` - - // Overhead represents the resource overhead associated with running a pod for a - // given RuntimeClass. For more details, see - // https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md - // This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature. - // +optional - Overhead *Overhead `json:"overhead,omitempty" protobuf:"bytes,3,opt,name=overhead"` - - // Scheduling holds the scheduling constraints to ensure that pods running - // with this RuntimeClass are scheduled to nodes that support it. - // If scheduling is nil, this RuntimeClass is assumed to be supported by all - // nodes. - // +optional - Scheduling *Scheduling `json:"scheduling,omitempty" protobuf:"bytes,4,opt,name=scheduling"` -} - -// Overhead structure represents the resource overhead associated with running a pod. -type Overhead struct { - // PodFixed represents the fixed resource overhead associated with running a pod. - // +optional - PodFixed corev1.ResourceList `json:"podFixed,omitempty" protobuf:"bytes,1,opt,name=podFixed,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` -} - -// Scheduling specifies the scheduling constraints for nodes supporting a -// RuntimeClass. -type Scheduling struct { - // nodeSelector lists labels that must be present on nodes that support this - // RuntimeClass. Pods using this RuntimeClass can only be scheduled to a - // node matched by this selector. The RuntimeClass nodeSelector is merged - // with a pod's existing nodeSelector. Any conflicts will cause the pod to - // be rejected in admission. - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,1,opt,name=nodeSelector"` - - // tolerations are appended (excluding duplicates) to pods running with this - // RuntimeClass during admission, effectively unioning the set of nodes - // tolerated by the pod and the RuntimeClass. - // +optional - // +listType=atomic - Tolerations []corev1.Toleration `json:"tolerations,omitempty" protobuf:"bytes,2,rep,name=tolerations"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RuntimeClassList is a list of RuntimeClass objects. -type RuntimeClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []RuntimeClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 681f73f..0000000 --- a/vendor/k8s.io/api/node/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Overhead = map[string]string{ - "": "Overhead structure represents the resource overhead associated with running a pod.", - "podFixed": "PodFixed represents the fixed resource overhead associated with running a pod.", -} - -func (Overhead) SwaggerDoc() map[string]string { - return map_Overhead -} - -var map_RuntimeClass = map[string]string{ - "": "RuntimeClass defines a class of container runtime supported in the cluster. The RuntimeClass is used to determine which container runtime is used to run all containers in a pod. RuntimeClasses are (currently) manually defined by a user or cluster provisioner, and referenced in the PodSpec. The Kubelet is responsible for resolving the RuntimeClassName reference before running the pod. For more details, see https://git.k8s.io/enhancements/keps/sig-node/runtime-class.md", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "handler": "Handler specifies the underlying runtime and configuration that the CRI implementation will use to handle pods of this class. The possible values are specific to the node & CRI configuration. It is assumed that all handlers are available on every node, and handlers of the same name are equivalent on every node. For example, a handler called \"runc\" might specify that the runc OCI runtime (using native Linux containers) will be used to run the containers in a pod. The Handler must conform to the DNS Label (RFC 1123) requirements, and is immutable.", - "overhead": "Overhead represents the resource overhead associated with running a pod for a given RuntimeClass. For more details, see https://git.k8s.io/enhancements/keps/sig-node/20190226-pod-overhead.md This field is alpha-level as of Kubernetes v1.15, and is only honored by servers that enable the PodOverhead feature.", - "scheduling": "Scheduling holds the scheduling constraints to ensure that pods running with this RuntimeClass are scheduled to nodes that support it. If scheduling is nil, this RuntimeClass is assumed to be supported by all nodes.", -} - -func (RuntimeClass) SwaggerDoc() map[string]string { - return map_RuntimeClass -} - -var map_RuntimeClassList = map[string]string{ - "": "RuntimeClassList is a list of RuntimeClass objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (RuntimeClassList) SwaggerDoc() map[string]string { - return map_RuntimeClassList -} - -var map_Scheduling = map[string]string{ - "": "Scheduling specifies the scheduling constraints for nodes supporting a RuntimeClass.", - "nodeSelector": "nodeSelector lists labels that must be present on nodes that support this RuntimeClass. Pods using this RuntimeClass can only be scheduled to a node matched by this selector. The RuntimeClass nodeSelector is merged with a pod's existing nodeSelector. Any conflicts will cause the pod to be rejected in admission.", - "tolerations": "tolerations are appended (excluding duplicates) to pods running with this RuntimeClass during admission, effectively unioning the set of nodes tolerated by the pod and the RuntimeClass.", -} - -func (Scheduling) SwaggerDoc() map[string]string { - return map_Scheduling -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go index 05d8332..9c456f9 100644 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ b/vendor/k8s.io/api/policy/v1beta1/doc.go @@ -15,10 +15,9 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true // Package policy is for any kind of policy object. Suitable examples, even if // they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, // NetworkPolicy, etc. +// +k8s:openapi-gen=true package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go index 40ec7ef..d7d62dd 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go @@ -14,29 +14,51 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto + + It has these top-level messages: + AllowedFlexVolume + AllowedHostPath + Eviction + FSGroupStrategyOptions + HostPortRange + IDRange + PodDisruptionBudget + PodDisruptionBudgetList + PodDisruptionBudgetSpec + PodDisruptionBudgetStatus + PodSecurityPolicy + PodSecurityPolicyList + PodSecurityPolicySpec + RunAsUserStrategyOptions + SELinuxStrategyOptions + SupplementalGroupsStrategyOptions +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -47,542 +69,79 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AllowedCSIDriver) Reset() { *m = AllowedCSIDriver{} } -func (*AllowedCSIDriver) ProtoMessage() {} -func (*AllowedCSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{0} -} -func (m *AllowedCSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedCSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedCSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedCSIDriver.Merge(m, src) -} -func (m *AllowedCSIDriver) XXX_Size() int { - return m.Size() -} -func (m *AllowedCSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedCSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedCSIDriver proto.InternalMessageInfo - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{1} -} -func (m *AllowedFlexVolume) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedFlexVolume) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedFlexVolume) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedFlexVolume.Merge(m, src) -} -func (m *AllowedFlexVolume) XXX_Size() int { - return m.Size() -} -func (m *AllowedFlexVolume) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedFlexVolume.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedFlexVolume proto.InternalMessageInfo - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{2} -} -func (m *AllowedHostPath) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AllowedHostPath) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AllowedHostPath) XXX_Merge(src proto.Message) { - xxx_messageInfo_AllowedHostPath.Merge(m, src) -} -func (m *AllowedHostPath) XXX_Size() int { - return m.Size() -} -func (m *AllowedHostPath) XXX_DiscardUnknown() { - xxx_messageInfo_AllowedHostPath.DiscardUnknown(m) -} - -var xxx_messageInfo_AllowedHostPath proto.InternalMessageInfo - -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{3} -} -func (m *Eviction) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Eviction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Eviction) XXX_Merge(src proto.Message) { - xxx_messageInfo_Eviction.Merge(m, src) -} -func (m *Eviction) XXX_Size() int { - return m.Size() -} -func (m *Eviction) XXX_DiscardUnknown() { - xxx_messageInfo_Eviction.DiscardUnknown(m) -} - -var xxx_messageInfo_Eviction proto.InternalMessageInfo - -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{4} -} -func (m *FSGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FSGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FSGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_FSGroupStrategyOptions.Merge(m, src) -} -func (m *FSGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *FSGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_FSGroupStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_FSGroupStrategyOptions proto.InternalMessageInfo - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{5} -} -func (m *HostPortRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostPortRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostPortRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostPortRange.Merge(m, src) -} -func (m *HostPortRange) XXX_Size() int { - return m.Size() -} -func (m *HostPortRange) XXX_DiscardUnknown() { - xxx_messageInfo_HostPortRange.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_HostPortRange proto.InternalMessageInfo +func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } +func (*AllowedFlexVolume) ProtoMessage() {} +func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{6} -} -func (m *IDRange) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IDRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IDRange) XXX_Merge(src proto.Message) { - xxx_messageInfo_IDRange.Merge(m, src) -} -func (m *IDRange) XXX_Size() int { - return m.Size() -} -func (m *IDRange) XXX_DiscardUnknown() { - xxx_messageInfo_IDRange.DiscardUnknown(m) -} +func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } +func (*AllowedHostPath) ProtoMessage() {} +func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_IDRange proto.InternalMessageInfo +func (m *Eviction) Reset() { *m = Eviction{} } +func (*Eviction) ProtoMessage() {} +func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{7} -} -func (m *PodDisruptionBudget) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodDisruptionBudget) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodDisruptionBudget) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodDisruptionBudget.Merge(m, src) -} -func (m *PodDisruptionBudget) XXX_Size() int { - return m.Size() -} -func (m *PodDisruptionBudget) XXX_DiscardUnknown() { - xxx_messageInfo_PodDisruptionBudget.DiscardUnknown(m) -} +func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } +func (*FSGroupStrategyOptions) ProtoMessage() {} +func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_PodDisruptionBudget proto.InternalMessageInfo +func (m *HostPortRange) Reset() { *m = HostPortRange{} } +func (*HostPortRange) ProtoMessage() {} +func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{8} -} -func (m *PodDisruptionBudgetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodDisruptionBudgetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodDisruptionBudgetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodDisruptionBudgetList.Merge(m, src) -} -func (m *PodDisruptionBudgetList) XXX_Size() int { - return m.Size() -} -func (m *PodDisruptionBudgetList) XXX_DiscardUnknown() { - xxx_messageInfo_PodDisruptionBudgetList.DiscardUnknown(m) -} +func (m *IDRange) Reset() { *m = IDRange{} } +func (*IDRange) ProtoMessage() {} +func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_PodDisruptionBudgetList proto.InternalMessageInfo +func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } +func (*PodDisruptionBudget) ProtoMessage() {} +func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{9} -} -func (m *PodDisruptionBudgetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodDisruptionBudgetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodDisruptionBudgetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodDisruptionBudgetSpec.Merge(m, src) -} -func (m *PodDisruptionBudgetSpec) XXX_Size() int { - return m.Size() -} -func (m *PodDisruptionBudgetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodDisruptionBudgetSpec.DiscardUnknown(m) -} +func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } +func (*PodDisruptionBudgetList) ProtoMessage() {} +func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_PodDisruptionBudgetSpec proto.InternalMessageInfo +func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } +func (*PodDisruptionBudgetSpec) ProtoMessage() {} +func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } func (*PodDisruptionBudgetStatus) ProtoMessage() {} func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{10} -} -func (m *PodDisruptionBudgetStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodDisruptionBudgetStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodDisruptionBudgetStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodDisruptionBudgetStatus.Merge(m, src) -} -func (m *PodDisruptionBudgetStatus) XXX_Size() int { - return m.Size() -} -func (m *PodDisruptionBudgetStatus) XXX_DiscardUnknown() { - xxx_messageInfo_PodDisruptionBudgetStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_PodDisruptionBudgetStatus proto.InternalMessageInfo - -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{11} -} -func (m *PodSecurityPolicy) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicy) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicy.Merge(m, src) -} -func (m *PodSecurityPolicy) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicy) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicy.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicy proto.InternalMessageInfo - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{12} -} -func (m *PodSecurityPolicyList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicyList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicyList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicyList.Merge(m, src) -} -func (m *PodSecurityPolicyList) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicyList) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicyList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodSecurityPolicyList proto.InternalMessageInfo - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{13} -} -func (m *PodSecurityPolicySpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodSecurityPolicySpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodSecurityPolicySpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodSecurityPolicySpec.Merge(m, src) -} -func (m *PodSecurityPolicySpec) XXX_Size() int { - return m.Size() -} -func (m *PodSecurityPolicySpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodSecurityPolicySpec.DiscardUnknown(m) + return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_PodSecurityPolicySpec proto.InternalMessageInfo +func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } +func (*PodSecurityPolicy) ProtoMessage() {} +func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *RunAsGroupStrategyOptions) Reset() { *m = RunAsGroupStrategyOptions{} } -func (*RunAsGroupStrategyOptions) ProtoMessage() {} -func (*RunAsGroupStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{14} -} -func (m *RunAsGroupStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsGroupStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsGroupStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsGroupStrategyOptions.Merge(m, src) -} -func (m *RunAsGroupStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsGroupStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsGroupStrategyOptions.DiscardUnknown(m) -} +func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } +func (*PodSecurityPolicyList) ProtoMessage() {} +func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_RunAsGroupStrategyOptions proto.InternalMessageInfo +func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } +func (*PodSecurityPolicySpec) ProtoMessage() {} +func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } func (*RunAsUserStrategyOptions) ProtoMessage() {} func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{15} -} -func (m *RunAsUserStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RunAsUserStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RunAsUserStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RunAsUserStrategyOptions.Merge(m, src) -} -func (m *RunAsUserStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RunAsUserStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RunAsUserStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RunAsUserStrategyOptions proto.InternalMessageInfo - -func (m *RuntimeClassStrategyOptions) Reset() { *m = RuntimeClassStrategyOptions{} } -func (*RuntimeClassStrategyOptions) ProtoMessage() {} -func (*RuntimeClassStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{16} -} -func (m *RuntimeClassStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RuntimeClassStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RuntimeClassStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_RuntimeClassStrategyOptions.Merge(m, src) -} -func (m *RuntimeClassStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *RuntimeClassStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_RuntimeClassStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_RuntimeClassStrategyOptions proto.InternalMessageInfo - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{17} -} -func (m *SELinuxStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SELinuxStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SELinuxStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SELinuxStrategyOptions.Merge(m, src) -} -func (m *SELinuxStrategyOptions) XXX_Size() int { - return m.Size() -} -func (m *SELinuxStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SELinuxStrategyOptions.DiscardUnknown(m) + return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_SELinuxStrategyOptions proto.InternalMessageInfo +func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } +func (*SELinuxStrategyOptions) ProtoMessage() {} +func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_014060e454a820dc, []int{18} -} -func (m *SupplementalGroupsStrategyOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *SupplementalGroupsStrategyOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_SupplementalGroupsStrategyOptions.Merge(m, src) -} -func (m *SupplementalGroupsStrategyOptions) XXX_Size() int { - return m.Size() + return fileDescriptorGenerated, []int{15} } -func (m *SupplementalGroupsStrategyOptions) XXX_DiscardUnknown() { - xxx_messageInfo_SupplementalGroupsStrategyOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_SupplementalGroupsStrategyOptions proto.InternalMessageInfo func init() { - proto.RegisterType((*AllowedCSIDriver)(nil), "k8s.io.api.policy.v1beta1.AllowedCSIDriver") proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") @@ -593,175 +152,17 @@ func init() { proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") - proto.RegisterMapType((map[string]v1.Time)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus.DisruptedPodsEntry") proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*RunAsGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsGroupStrategyOptions") proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*RuntimeClassStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RuntimeClassStrategyOptions") proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptor_014060e454a820dc) -} - -var fileDescriptor_014060e454a820dc = []byte{ - // 1878 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xdd, 0x6e, 0x1b, 0xc7, - 0x15, 0xd6, 0x9a, 0xfa, 0xa1, 0x46, 0x3f, 0x16, 0x47, 0x3f, 0x5e, 0x2b, 0x0d, 0xd7, 0xd9, 0x00, - 0x85, 0x9b, 0x26, 0xcb, 0x58, 0x76, 0x5c, 0xa3, 0x69, 0x8b, 0x68, 0x45, 0xc9, 0x56, 0x60, 0x59, - 0xec, 0xd0, 0x0e, 0xda, 0xc2, 0x2d, 0x3a, 0xe4, 0x8e, 0xa8, 0x8d, 0x96, 0xbb, 0xdb, 0x99, 0x59, - 0x46, 0xbc, 0xeb, 0x45, 0x2f, 0x7a, 0xd9, 0x17, 0x08, 0xfa, 0x00, 0x45, 0xaf, 0xfa, 0x12, 0x0e, - 0x50, 0x04, 0xb9, 0x0c, 0x7a, 0x41, 0xd4, 0xec, 0x5b, 0xf8, 0xaa, 0xd8, 0xe1, 0xec, 0x92, 0xfb, - 0x47, 0x5a, 0x01, 0xec, 0x3b, 0xee, 0x9c, 0xef, 0xfb, 0xce, 0xcc, 0x99, 0x33, 0x67, 0x0e, 0x07, - 0x98, 0x17, 0x0f, 0x98, 0x61, 0x7b, 0xb5, 0x8b, 0xa0, 0x45, 0xa8, 0x4b, 0x38, 0x61, 0xb5, 0x1e, - 0x71, 0x2d, 0x8f, 0xd6, 0xa4, 0x01, 0xfb, 0x76, 0xcd, 0xf7, 0x1c, 0xbb, 0xdd, 0xaf, 0xf5, 0xee, - 0xb4, 0x08, 0xc7, 0x77, 0x6a, 0x1d, 0xe2, 0x12, 0x8a, 0x39, 0xb1, 0x0c, 0x9f, 0x7a, 0xdc, 0x83, - 0x37, 0x47, 0x50, 0x03, 0xfb, 0xb6, 0x31, 0x82, 0x1a, 0x12, 0xba, 0xfb, 0x51, 0xc7, 0xe6, 0xe7, - 0x41, 0xcb, 0x68, 0x7b, 0xdd, 0x5a, 0xc7, 0xeb, 0x78, 0x35, 0xc1, 0x68, 0x05, 0x67, 0xe2, 0x4b, - 0x7c, 0x88, 0x5f, 0x23, 0xa5, 0x5d, 0x7d, 0xc2, 0x69, 0xdb, 0xa3, 0xa4, 0xd6, 0xcb, 0x78, 0xdb, - 0xbd, 0x37, 0xc6, 0x74, 0x71, 0xfb, 0xdc, 0x76, 0x09, 0xed, 0xd7, 0xfc, 0x8b, 0x4e, 0x38, 0xc0, - 0x6a, 0x5d, 0xc2, 0x71, 0x1e, 0xab, 0x56, 0xc4, 0xa2, 0x81, 0xcb, 0xed, 0x2e, 0xc9, 0x10, 0xee, - 0xcf, 0x22, 0xb0, 0xf6, 0x39, 0xe9, 0xe2, 0x0c, 0xef, 0x6e, 0x11, 0x2f, 0xe0, 0xb6, 0x53, 0xb3, - 0x5d, 0xce, 0x38, 0x4d, 0x93, 0xf4, 0x7b, 0x60, 0x63, 0xdf, 0x71, 0xbc, 0xaf, 0x88, 0x75, 0xd0, - 0x3c, 0xae, 0x53, 0xbb, 0x47, 0x28, 0xbc, 0x05, 0xe6, 0x5d, 0xdc, 0x25, 0xaa, 0x72, 0x4b, 0xb9, - 0xbd, 0x6c, 0xae, 0xbe, 0x18, 0x68, 0x73, 0xc3, 0x81, 0x36, 0xff, 0x04, 0x77, 0x09, 0x12, 0x16, - 0xfd, 0x53, 0x50, 0x91, 0xac, 0x23, 0x87, 0x5c, 0x7e, 0xe1, 0x39, 0x41, 0x97, 0xc0, 0x1f, 0x83, - 0x45, 0x4b, 0x08, 0x48, 0xe2, 0xba, 0x24, 0x2e, 0x8e, 0x64, 0x91, 0xb4, 0xea, 0x0c, 0x5c, 0x97, - 0xe4, 0x47, 0x1e, 0xe3, 0x0d, 0xcc, 0xcf, 0xe1, 0x1e, 0x00, 0x3e, 0xe6, 0xe7, 0x0d, 0x4a, 0xce, - 0xec, 0x4b, 0x49, 0x87, 0x92, 0x0e, 0x1a, 0xb1, 0x05, 0x4d, 0xa0, 0xe0, 0x87, 0xa0, 0x4c, 0x09, - 0xb6, 0x4e, 0x5d, 0xa7, 0xaf, 0x5e, 0xbb, 0xa5, 0xdc, 0x2e, 0x9b, 0x1b, 0x92, 0x51, 0x46, 0x72, - 0x1c, 0xc5, 0x08, 0xfd, 0x3f, 0x0a, 0x28, 0x1f, 0xf6, 0xec, 0x36, 0xb7, 0x3d, 0x17, 0xfe, 0x11, - 0x94, 0xc3, 0xdd, 0xb2, 0x30, 0xc7, 0xc2, 0xd9, 0xca, 0xde, 0xc7, 0xc6, 0x38, 0x93, 0xe2, 0xe0, - 0x19, 0xfe, 0x45, 0x27, 0x1c, 0x60, 0x46, 0x88, 0x36, 0x7a, 0x77, 0x8c, 0xd3, 0xd6, 0x97, 0xa4, - 0xcd, 0x4f, 0x08, 0xc7, 0xe3, 0xe9, 0x8d, 0xc7, 0x50, 0xac, 0x0a, 0x1d, 0xb0, 0x66, 0x11, 0x87, - 0x70, 0x72, 0xea, 0x87, 0x1e, 0x99, 0x98, 0xe1, 0xca, 0xde, 0xdd, 0xd7, 0x73, 0x53, 0x9f, 0xa4, - 0x9a, 0x95, 0xe1, 0x40, 0x5b, 0x4b, 0x0c, 0xa1, 0xa4, 0xb8, 0xfe, 0xb5, 0x02, 0x76, 0x8e, 0x9a, - 0x0f, 0xa9, 0x17, 0xf8, 0x4d, 0x1e, 0xee, 0x6e, 0xa7, 0x2f, 0x4d, 0xf0, 0x67, 0x60, 0x9e, 0x06, - 0x4e, 0xb4, 0x97, 0xef, 0x47, 0x7b, 0x89, 0x02, 0x87, 0xbc, 0x1a, 0x68, 0x9b, 0x29, 0xd6, 0xd3, - 0xbe, 0x4f, 0x90, 0x20, 0xc0, 0xcf, 0xc1, 0x22, 0xc5, 0x6e, 0x87, 0x84, 0x53, 0x2f, 0xdd, 0x5e, - 0xd9, 0xd3, 0x8d, 0xc2, 0xb3, 0x66, 0x1c, 0xd7, 0x51, 0x08, 0x1d, 0xef, 0xb8, 0xf8, 0x64, 0x48, - 0x2a, 0xe8, 0x27, 0x60, 0x4d, 0x6c, 0xb5, 0x47, 0xb9, 0xb0, 0xc0, 0x77, 0x41, 0xa9, 0x6b, 0xbb, - 0x62, 0x52, 0x0b, 0xe6, 0x8a, 0x64, 0x95, 0x4e, 0x6c, 0x17, 0x85, 0xe3, 0xc2, 0x8c, 0x2f, 0x45, - 0xcc, 0x26, 0xcd, 0xf8, 0x12, 0x85, 0xe3, 0xfa, 0x43, 0xb0, 0x24, 0x3d, 0x4e, 0x0a, 0x95, 0xa6, - 0x0b, 0x95, 0x72, 0x84, 0xfe, 0x71, 0x0d, 0x6c, 0x36, 0x3c, 0xab, 0x6e, 0x33, 0x1a, 0x88, 0x78, - 0x99, 0x81, 0xd5, 0x21, 0xfc, 0x2d, 0xe4, 0xc7, 0x53, 0x30, 0xcf, 0x7c, 0xd2, 0x96, 0x69, 0xb1, - 0x37, 0x25, 0xb6, 0x39, 0xf3, 0x6b, 0xfa, 0xa4, 0x3d, 0x3e, 0x96, 0xe1, 0x17, 0x12, 0x6a, 0xf0, - 0x39, 0x58, 0x64, 0x1c, 0xf3, 0x80, 0xa9, 0x25, 0xa1, 0x7b, 0xef, 0x8a, 0xba, 0x82, 0x3b, 0xde, - 0xc5, 0xd1, 0x37, 0x92, 0x9a, 0xfa, 0xbf, 0x15, 0x70, 0x23, 0x87, 0xf5, 0xd8, 0x66, 0x1c, 0x3e, - 0xcf, 0x44, 0xcc, 0x78, 0xbd, 0x88, 0x85, 0x6c, 0x11, 0xaf, 0xf8, 0xf0, 0x46, 0x23, 0x13, 0xd1, - 0x6a, 0x82, 0x05, 0x9b, 0x93, 0x6e, 0x94, 0x8a, 0xc6, 0xd5, 0x96, 0x65, 0xae, 0x49, 0xe9, 0x85, - 0xe3, 0x50, 0x04, 0x8d, 0xb4, 0xf4, 0x6f, 0xaf, 0xe5, 0x2e, 0x27, 0x0c, 0x27, 0x3c, 0x03, 0xab, - 0x5d, 0xdb, 0xdd, 0xef, 0x61, 0xdb, 0xc1, 0x2d, 0x79, 0x7a, 0xa6, 0x25, 0x41, 0x58, 0x61, 0x8d, - 0x51, 0x85, 0x35, 0x8e, 0x5d, 0x7e, 0x4a, 0x9b, 0x9c, 0xda, 0x6e, 0xc7, 0xdc, 0x18, 0x0e, 0xb4, - 0xd5, 0x93, 0x09, 0x25, 0x94, 0xd0, 0x85, 0xbf, 0x07, 0x65, 0x46, 0x1c, 0xd2, 0xe6, 0x1e, 0xbd, - 0x5a, 0x85, 0x78, 0x8c, 0x5b, 0xc4, 0x69, 0x4a, 0xaa, 0xb9, 0x1a, 0xc6, 0x2d, 0xfa, 0x42, 0xb1, - 0x24, 0x74, 0xc0, 0x7a, 0x17, 0x5f, 0x3e, 0x73, 0x71, 0xbc, 0x90, 0xd2, 0x0f, 0x5c, 0x08, 0x1c, - 0x0e, 0xb4, 0xf5, 0x93, 0x84, 0x16, 0x4a, 0x69, 0xeb, 0xc3, 0x79, 0x70, 0xb3, 0x30, 0xab, 0xe0, - 0xe7, 0x00, 0x7a, 0x2d, 0x46, 0x68, 0x8f, 0x58, 0x0f, 0x47, 0x77, 0x90, 0xed, 0x45, 0x07, 0x77, - 0x57, 0x6e, 0x10, 0x3c, 0xcd, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x45, 0x01, 0x6b, 0xd6, 0xc8, 0x0d, - 0xb1, 0x1a, 0x9e, 0x15, 0x25, 0xc6, 0xc3, 0x1f, 0x92, 0xef, 0x46, 0x7d, 0x52, 0xe9, 0xd0, 0xe5, - 0xb4, 0x6f, 0x6e, 0xcb, 0x09, 0xad, 0x25, 0x6c, 0x28, 0xe9, 0x34, 0x5c, 0x92, 0x15, 0x4b, 0x32, - 0x79, 0xa7, 0x89, 0x10, 0x2f, 0x8c, 0x97, 0x54, 0xcf, 0x20, 0x50, 0x0e, 0x0b, 0xfe, 0x0a, 0xac, - 0xb7, 0x03, 0x4a, 0x89, 0xcb, 0x1f, 0x11, 0xec, 0xf0, 0xf3, 0xbe, 0x3a, 0x2f, 0x74, 0x76, 0xa4, - 0xce, 0xfa, 0x41, 0xc2, 0x8a, 0x52, 0xe8, 0x90, 0x6f, 0x11, 0x66, 0x53, 0x62, 0x45, 0xfc, 0x85, - 0x24, 0xbf, 0x9e, 0xb0, 0xa2, 0x14, 0x1a, 0x3e, 0x00, 0xab, 0xe4, 0xd2, 0x27, 0xed, 0x28, 0xa0, - 0x8b, 0x82, 0xbd, 0x25, 0xd9, 0xab, 0x87, 0x13, 0x36, 0x94, 0x40, 0xee, 0x3a, 0x00, 0x66, 0x23, - 0x08, 0x37, 0x40, 0xe9, 0x82, 0xf4, 0x47, 0xd7, 0x0e, 0x0a, 0x7f, 0xc2, 0xcf, 0xc0, 0x42, 0x0f, - 0x3b, 0x01, 0x91, 0x89, 0xfe, 0xc1, 0xeb, 0x25, 0xfa, 0x53, 0xbb, 0x4b, 0xd0, 0x88, 0xf8, 0xf3, - 0x6b, 0x0f, 0x14, 0xfd, 0x1b, 0x05, 0x54, 0x1a, 0x9e, 0xd5, 0x24, 0xed, 0x80, 0xda, 0xbc, 0xdf, - 0x10, 0x9b, 0xfc, 0x16, 0x0a, 0x36, 0x4a, 0x14, 0xec, 0x8f, 0xa7, 0x27, 0x5a, 0x72, 0x76, 0x45, - 0xe5, 0x5a, 0x7f, 0xa1, 0x80, 0xed, 0x0c, 0xfa, 0x2d, 0x94, 0xd3, 0x5f, 0x27, 0xcb, 0xe9, 0x87, - 0x57, 0x59, 0x4c, 0x41, 0x31, 0xfd, 0xa6, 0x92, 0xb3, 0x14, 0x51, 0x4a, 0xc3, 0xd6, 0x8e, 0xda, - 0x3d, 0xdb, 0x21, 0x1d, 0x62, 0x89, 0xc5, 0x94, 0x27, 0x5a, 0xbb, 0xd8, 0x82, 0x26, 0x50, 0x90, - 0x81, 0x1d, 0x8b, 0x9c, 0xe1, 0xc0, 0xe1, 0xfb, 0x96, 0x75, 0x80, 0x7d, 0xdc, 0xb2, 0x1d, 0x9b, - 0xdb, 0xb2, 0x17, 0x59, 0x36, 0x3f, 0x1d, 0x0e, 0xb4, 0x9d, 0x7a, 0x2e, 0xe2, 0xd5, 0x40, 0x7b, - 0x37, 0xdb, 0xca, 0x1b, 0x31, 0xa4, 0x8f, 0x0a, 0xa4, 0x61, 0x1f, 0xa8, 0x94, 0xfc, 0x29, 0x08, - 0x0f, 0x45, 0x9d, 0x7a, 0x7e, 0xc2, 0x6d, 0x49, 0xb8, 0xfd, 0xe5, 0x70, 0xa0, 0xa9, 0xa8, 0x00, - 0x33, 0xdb, 0x71, 0xa1, 0x3c, 0xfc, 0x12, 0x6c, 0x62, 0xd9, 0x84, 0x4f, 0x7a, 0x9d, 0x17, 0x5e, - 0x1f, 0x0c, 0x07, 0xda, 0xe6, 0x7e, 0xd6, 0x3c, 0xdb, 0x61, 0x9e, 0x28, 0xac, 0x81, 0xa5, 0x9e, - 0xe8, 0xd7, 0x99, 0xba, 0x20, 0xf4, 0xb7, 0x87, 0x03, 0x6d, 0x69, 0xd4, 0xc2, 0x87, 0x9a, 0x8b, - 0x47, 0x4d, 0xd1, 0x05, 0x46, 0x28, 0xf8, 0x09, 0x58, 0x39, 0xf7, 0x18, 0x7f, 0x42, 0xf8, 0x57, - 0x1e, 0xbd, 0x10, 0x85, 0xa1, 0x6c, 0x6e, 0xca, 0x1d, 0x5c, 0x79, 0x34, 0x36, 0xa1, 0x49, 0x1c, - 0xfc, 0x2d, 0x58, 0x3e, 0x97, 0x3d, 0x1f, 0x53, 0x97, 0x44, 0xa2, 0xdd, 0x9e, 0x92, 0x68, 0x89, - 0xfe, 0xd0, 0xac, 0x48, 0xf9, 0xe5, 0x68, 0x98, 0xa1, 0xb1, 0x1a, 0xfc, 0x09, 0x58, 0x12, 0x1f, - 0xc7, 0x75, 0xb5, 0x2c, 0x66, 0x73, 0x5d, 0xc2, 0x97, 0x1e, 0x8d, 0x86, 0x51, 0x64, 0x8f, 0xa0, - 0xc7, 0x8d, 0x03, 0x75, 0x39, 0x0b, 0x3d, 0x6e, 0x1c, 0xa0, 0xc8, 0x0e, 0x9f, 0x83, 0x25, 0x46, - 0x1e, 0xdb, 0x6e, 0x70, 0xa9, 0x02, 0x71, 0xe4, 0xee, 0x4c, 0x99, 0x6e, 0xf3, 0x50, 0x20, 0x53, - 0xdd, 0xf6, 0x58, 0x5d, 0xda, 0x51, 0x24, 0x09, 0x2d, 0xb0, 0x4c, 0x03, 0x77, 0x9f, 0x3d, 0x63, - 0x84, 0xaa, 0x2b, 0x99, 0xab, 0x3e, 0xad, 0x8f, 0x22, 0x6c, 0xda, 0x43, 0x1c, 0x99, 0x18, 0x81, - 0xc6, 0xc2, 0xd0, 0x02, 0x40, 0x7c, 0x88, 0xa6, 0x5e, 0xdd, 0x99, 0xd9, 0x04, 0xa2, 0x18, 0x9c, - 0xf6, 0xb3, 0x1e, 0x1e, 0xcf, 0xb1, 0x19, 0x4d, 0xe8, 0xc2, 0xbf, 0x2a, 0x00, 0xb2, 0xc0, 0xf7, - 0x1d, 0xd2, 0x25, 0x2e, 0xc7, 0x8e, 0x18, 0x65, 0xea, 0xaa, 0x70, 0xf7, 0x8b, 0x69, 0x51, 0xcb, - 0x90, 0xd2, 0x6e, 0xe3, 0x6b, 0x33, 0x0b, 0x45, 0x39, 0x3e, 0xc3, 0x4d, 0x3b, 0x93, 0xab, 0x5d, - 0x9b, 0xb9, 0x69, 0xf9, 0x7f, 0x91, 0xc6, 0x9b, 0x26, 0xed, 0x28, 0x92, 0x84, 0x5f, 0x80, 0x9d, - 0xe8, 0x0f, 0x24, 0xf2, 0x3c, 0x7e, 0x64, 0x3b, 0x84, 0xf5, 0x19, 0x27, 0x5d, 0x75, 0x5d, 0x24, - 0x53, 0x55, 0x32, 0x77, 0x50, 0x2e, 0x0a, 0x15, 0xb0, 0x61, 0x17, 0x68, 0x51, 0x11, 0x0a, 0x4f, - 0x68, 0x5c, 0x05, 0x0f, 0x59, 0x1b, 0x3b, 0xa3, 0xc6, 0xe8, 0xba, 0x70, 0xf0, 0xfe, 0x70, 0xa0, - 0x69, 0xf5, 0xe9, 0x50, 0x34, 0x4b, 0x0b, 0xfe, 0x06, 0xa8, 0xb8, 0xc8, 0xcf, 0x86, 0xf0, 0xf3, - 0xa3, 0xb0, 0xb2, 0x15, 0x3a, 0x28, 0x64, 0x43, 0x1f, 0x6c, 0xe0, 0xe4, 0x5f, 0x79, 0xa6, 0x56, - 0xc4, 0x59, 0xff, 0x60, 0xca, 0x3e, 0xa4, 0xfe, 0xfd, 0x9b, 0xaa, 0x0c, 0xe3, 0x46, 0xca, 0xc0, - 0x50, 0x46, 0x1d, 0x5e, 0x02, 0x88, 0xd3, 0x2f, 0x0f, 0x4c, 0x85, 0x33, 0x2f, 0xb2, 0xcc, 0x73, - 0xc5, 0x38, 0xd5, 0x32, 0x26, 0x86, 0x72, 0x7c, 0x40, 0x0e, 0x2a, 0x38, 0xf5, 0x52, 0xc2, 0xd4, - 0x1b, 0xc2, 0xf1, 0x4f, 0x67, 0x3b, 0x8e, 0x39, 0xe6, 0x4d, 0xe9, 0xb7, 0x92, 0xb6, 0x30, 0x94, - 0x75, 0x00, 0x1f, 0x83, 0x2d, 0x39, 0xf8, 0xcc, 0x65, 0xf8, 0x8c, 0x34, 0xfb, 0xac, 0xcd, 0x1d, - 0xa6, 0x6e, 0x8a, 0xda, 0xad, 0x0e, 0x07, 0xda, 0xd6, 0x7e, 0x8e, 0x1d, 0xe5, 0xb2, 0xe0, 0x67, - 0x60, 0xe3, 0xcc, 0xa3, 0x2d, 0xdb, 0xb2, 0x88, 0x1b, 0x29, 0x6d, 0x09, 0xa5, 0xad, 0x30, 0xfe, - 0x47, 0x29, 0x1b, 0xca, 0xa0, 0x21, 0x03, 0xdb, 0x52, 0xb9, 0x41, 0xbd, 0xf6, 0x89, 0x17, 0xb8, - 0x3c, 0xbc, 0x2e, 0x98, 0xba, 0x1d, 0x5f, 0x91, 0xdb, 0xfb, 0x79, 0x80, 0x57, 0x03, 0xed, 0x56, - 0xce, 0x75, 0x95, 0x00, 0xa1, 0x7c, 0x6d, 0xe8, 0x80, 0x55, 0xf9, 0xf6, 0x75, 0xe0, 0x60, 0xc6, - 0x54, 0x55, 0x1c, 0xf5, 0xfb, 0xd3, 0x0b, 0x5b, 0x0c, 0x4f, 0x9f, 0x77, 0xf1, 0xa7, 0x6c, 0x12, - 0x80, 0x12, 0xea, 0xfa, 0xdf, 0x15, 0x70, 0xb3, 0xb0, 0x30, 0xc2, 0xfb, 0x89, 0x07, 0x15, 0x3d, - 0xf5, 0xa0, 0x02, 0xb3, 0xc4, 0x37, 0xf0, 0x9e, 0xf2, 0xb5, 0x02, 0xd4, 0xa2, 0x1b, 0x02, 0x7e, - 0x92, 0x98, 0xe0, 0x7b, 0xa9, 0x09, 0x56, 0x32, 0xbc, 0x37, 0x30, 0xbf, 0x6f, 0x15, 0xf0, 0xce, - 0x94, 0x1d, 0x88, 0x0b, 0x12, 0xb1, 0x26, 0x51, 0x4f, 0x70, 0x78, 0x94, 0x15, 0x91, 0x47, 0xe3, - 0x82, 0x94, 0x83, 0x41, 0x85, 0x6c, 0xf8, 0x0c, 0xdc, 0x90, 0xd5, 0x30, 0x6d, 0x13, 0x9d, 0xfb, - 0xb2, 0xf9, 0xce, 0x70, 0xa0, 0xdd, 0xa8, 0xe7, 0x43, 0x50, 0x11, 0x57, 0xff, 0xa7, 0x02, 0x76, - 0xf2, 0xaf, 0x7c, 0x78, 0x37, 0x11, 0x6e, 0x2d, 0x15, 0xee, 0xeb, 0x29, 0x96, 0x0c, 0xf6, 0x1f, - 0xc0, 0xba, 0x6c, 0x0c, 0x92, 0xef, 0x83, 0x89, 0xa0, 0x87, 0x47, 0x24, 0xec, 0xe9, 0xa5, 0x44, - 0x94, 0xbe, 0xe2, 0xaf, 0x78, 0x72, 0x0c, 0xa5, 0xd4, 0xf4, 0x7f, 0x29, 0xe0, 0xbd, 0x99, 0x97, - 0x2d, 0x34, 0x13, 0x53, 0x37, 0x52, 0x53, 0xaf, 0x16, 0x0b, 0xbc, 0x99, 0x67, 0x42, 0xf3, 0xa3, - 0x17, 0x2f, 0xab, 0x73, 0xdf, 0xbd, 0xac, 0xce, 0x7d, 0xff, 0xb2, 0x3a, 0xf7, 0xe7, 0x61, 0x55, - 0x79, 0x31, 0xac, 0x2a, 0xdf, 0x0d, 0xab, 0xca, 0xf7, 0xc3, 0xaa, 0xf2, 0xdf, 0x61, 0x55, 0xf9, - 0xdb, 0xff, 0xaa, 0x73, 0xbf, 0x5b, 0x92, 0x72, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xe0, - 0x55, 0x1c, 0x41, 0x18, 0x00, 0x00, -} - -func (m *AllowedCSIDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedCSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedCSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -769,27 +170,21 @@ func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { } func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedFlexVolume) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Driver) - copy(dAtA[i:], m.Driver) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) + i += copy(dAtA[i:], m.Driver) + return i, nil } func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -797,35 +192,29 @@ func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { } func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AllowedHostPath) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) + i += copy(dAtA[i:], m.PathPrefix) + dAtA[i] = 0x10 + i++ if m.ReadOnly { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - i -= len(m.PathPrefix) - copy(dAtA[i:], m.PathPrefix) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + return i, nil } func (m *Eviction) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -833,44 +222,35 @@ func (m *Eviction) Marshal() (dAtA []byte, err error) { } func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Eviction) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 if m.DeleteOptions != nil { - { - size, err := m.DeleteOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 - } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) + n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n2 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -878,41 +258,33 @@ func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FSGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Ranges { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *HostPortRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -920,28 +292,23 @@ func (m *HostPortRange) Marshal() (dAtA []byte, err error) { } func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostPortRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + return i, nil } func (m *IDRange) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -949,28 +316,23 @@ func (m *IDRange) Marshal() (dAtA []byte, err error) { } func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IDRange) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) + return i, nil } func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -978,52 +340,41 @@ func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDisruptionBudget) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n3 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + return i, nil } func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1031,46 +382,37 @@ func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDisruptionBudgetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1078,58 +420,47 @@ func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDisruptionBudgetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.MaxUnavailable != nil { - { - size, err := m.MaxUnavailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MinAvailable != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) + n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n7 } if m.Selector != nil { - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n8, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 } - if m.MinAvailable != nil { - { - size, err := m.MinAvailable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.MaxUnavailable != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) + n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa + i += n9 } - return len(dAtA) - i, nil + return i, nil } func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1137,66 +468,63 @@ func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { } func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodDisruptionBudgetStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) - i-- - dAtA[i] = 0x30 - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) - i-- - dAtA[i] = 0x28 - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) - i-- - dAtA[i] = 0x20 - i = encodeVarintGenerated(dAtA, i, uint64(m.DisruptionsAllowed)) - i-- - dAtA[i] = 0x18 + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) if len(m.DisruptedPods) > 0 { keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) for k := range m.DisruptedPods { keysForDisruptedPods = append(keysForDisruptedPods, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - for iNdEx := len(keysForDisruptedPods) - 1; iNdEx >= 0; iNdEx-- { - v := m.DisruptedPods[string(keysForDisruptedPods[iNdEx])] - baseI := i - { - size, err := (&v).MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, k := range keysForDisruptedPods { dAtA[i] = 0x12 - i -= len(keysForDisruptedPods[iNdEx]) - copy(dAtA[i:], keysForDisruptedPods[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForDisruptedPods[iNdEx]))) - i-- + i++ + v := m.DisruptedPods[string(k)] + msgSize := 0 + if (&v) != nil { + msgSize = (&v).Size() + msgSize += 1 + sovGenerated(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) + n10, err := (&v).MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 } } - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + dAtA[i] = 0x18 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) + dAtA[i] = 0x20 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) + return i, nil } func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1204,42 +532,33 @@ func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicy) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n11 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n12, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n12 + return i, nil } func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1247,46 +566,37 @@ func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicyList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n13, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n13 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1294,325 +604,262 @@ func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { } func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodSecurityPolicySpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.RuntimeClass != nil { - { - size, err := m.RuntimeClass.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xc2 + dAtA[i] = 0x8 + i++ + if m.Privileged { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - if len(m.AllowedCSIDrivers) > 0 { - for iNdEx := len(m.AllowedCSIDrivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedCSIDrivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + if len(m.DefaultAddCapabilities) > 0 { + for _, s := range m.DefaultAddCapabilities { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xba + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if m.RunAsGroup != nil { - { - size, err := m.RunAsGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.RequiredDropCapabilities) > 0 { + for _, s := range m.RequiredDropCapabilities { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xb2 } - if len(m.AllowedProcMountTypes) > 0 { - for iNdEx := len(m.AllowedProcMountTypes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedProcMountTypes[iNdEx]) - copy(dAtA[i:], m.AllowedProcMountTypes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedProcMountTypes[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xaa - } - } - if len(m.ForbiddenSysctls) > 0 { - for iNdEx := len(m.ForbiddenSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ForbiddenSysctls[iNdEx]) - copy(dAtA[i:], m.ForbiddenSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ForbiddenSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0xa2 - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for iNdEx := len(m.AllowedUnsafeSysctls) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedUnsafeSysctls[iNdEx]) - copy(dAtA[i:], m.AllowedUnsafeSysctls[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedUnsafeSysctls[iNdEx]))) - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x9a - } - } - if len(m.AllowedFlexVolumes) > 0 { - for iNdEx := len(m.AllowedFlexVolumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedFlexVolumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.AllowedCapabilities) > 0 { + for _, s := range m.AllowedCapabilities { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x92 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.AllowedHostPaths) > 0 { - for iNdEx := len(m.AllowedHostPaths) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedHostPaths[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Volumes) > 0 { + for _, s := range m.Volumes { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - if m.AllowPrivilegeEscalation != nil { - i-- - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x80 - } - if m.DefaultAllowPrivilegeEscalation != nil { - i-- - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x78 } - i-- - if m.ReadOnlyRootFilesystem { + dAtA[i] = 0x30 + i++ + if m.HostNetwork { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x70 - { - size, err := m.FSGroup.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - { - size, err := m.SupplementalGroups.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x62 - { - size, err := m.RunAsUser.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x5a - { - size, err := m.SELinux.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + if len(m.HostPorts) > 0 { + for _, msg := range m.HostPorts { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x52 - i-- - if m.HostIPC { + dAtA[i] = 0x40 + i++ + if m.HostPID { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- + i++ dAtA[i] = 0x48 - i-- - if m.HostPID { + i++ + if m.HostIPC { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x40 - if len(m.HostPorts) > 0 { - for iNdEx := len(m.HostPorts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.HostPorts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x3a - } + i++ + dAtA[i] = 0x52 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) + n14, err := m.SELinux.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - if m.HostNetwork { + i += n14 + dAtA[i] = 0x5a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) + n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n15 + dAtA[i] = 0x62 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) + n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n16 + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) + n17, err := m.FSGroup.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n17 + dAtA[i] = 0x70 + i++ + if m.ReadOnlyRootFilesystem { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x30 - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Volumes[iNdEx]) - copy(dAtA[i:], m.Volumes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volumes[iNdEx]))) - i-- - dAtA[i] = 0x2a + i++ + if m.DefaultAllowPrivilegeEscalation != nil { + dAtA[i] = 0x78 + i++ + if *m.DefaultAllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - if len(m.AllowedCapabilities) > 0 { - for iNdEx := len(m.AllowedCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedCapabilities[iNdEx]) - copy(dAtA[i:], m.AllowedCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x22 + if m.AllowPrivilegeEscalation != nil { + dAtA[i] = 0x80 + i++ + dAtA[i] = 0x1 + i++ + if *m.AllowPrivilegeEscalation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ } - if len(m.RequiredDropCapabilities) > 0 { - for iNdEx := len(m.RequiredDropCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.RequiredDropCapabilities[iNdEx]) - copy(dAtA[i:], m.RequiredDropCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RequiredDropCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.AllowedHostPaths) > 0 { + for _, msg := range m.AllowedHostPaths { + dAtA[i] = 0x8a + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.DefaultAddCapabilities) > 0 { - for iNdEx := len(m.DefaultAddCapabilities) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DefaultAddCapabilities[iNdEx]) - copy(dAtA[i:], m.DefaultAddCapabilities[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DefaultAddCapabilities[iNdEx]))) - i-- - dAtA[i] = 0x12 + if len(m.AllowedFlexVolumes) > 0 { + for _, msg := range m.AllowedFlexVolumes { + dAtA[i] = 0x92 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i-- - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if len(m.AllowedUnsafeSysctls) > 0 { + for _, s := range m.AllowedUnsafeSysctls { + dAtA[i] = 0x9a + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *RunAsGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if len(m.ForbiddenSysctls) > 0 { + for _, s := range m.ForbiddenSysctls { + dAtA[i] = 0xa2 + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } } - return dAtA[:n], nil -} - -func (m *RunAsGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsGroupStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.AllowedProcMountTypes) > 0 { + for _, s := range m.AllowedProcMountTypes { + dAtA[i] = 0xaa + i++ + dAtA[i] = 0x1 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x12 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1620,80 +867,33 @@ func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RunAsUserStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Ranges { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *RuntimeClassStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuntimeClassStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RuntimeClassStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DefaultRuntimeClassName != nil { - i -= len(*m.DefaultRuntimeClassName) - copy(dAtA[i:], *m.DefaultRuntimeClassName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.DefaultRuntimeClassName))) - i-- - dAtA[i] = 0x12 - } - if len(m.AllowedRuntimeClassNames) > 0 { - for iNdEx := len(m.AllowedRuntimeClassNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.AllowedRuntimeClassNames[iNdEx]) - copy(dAtA[i:], m.AllowedRuntimeClassNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.AllowedRuntimeClassNames[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + return i, nil } func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1701,39 +901,31 @@ func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SELinuxStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if m.SELinuxOptions != nil { - { - size, err := m.SELinuxOptions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) + n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n18 } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1741,63 +933,57 @@ func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { } func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *SupplementalGroupsStrategyOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) + i += copy(dAtA[i:], m.Rule) if len(m.Ranges) > 0 { - for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Ranges { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.Rule) - copy(dAtA[i:], m.Rule) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } -func (m *AllowedCSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *AllowedFlexVolume) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Driver) @@ -1806,9 +992,6 @@ func (m *AllowedFlexVolume) Size() (n int) { } func (m *AllowedHostPath) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.PathPrefix) @@ -1818,9 +1001,6 @@ func (m *AllowedHostPath) Size() (n int) { } func (m *Eviction) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1833,9 +1013,6 @@ func (m *Eviction) Size() (n int) { } func (m *FSGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -1850,9 +1027,6 @@ func (m *FSGroupStrategyOptions) Size() (n int) { } func (m *HostPortRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1861,9 +1035,6 @@ func (m *HostPortRange) Size() (n int) { } func (m *IDRange) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Min)) @@ -1872,9 +1043,6 @@ func (m *IDRange) Size() (n int) { } func (m *PodDisruptionBudget) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1887,9 +1055,6 @@ func (m *PodDisruptionBudget) Size() (n int) { } func (m *PodDisruptionBudgetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1904,9 +1069,6 @@ func (m *PodDisruptionBudgetList) Size() (n int) { } func (m *PodDisruptionBudgetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MinAvailable != nil { @@ -1925,9 +1087,6 @@ func (m *PodDisruptionBudgetSpec) Size() (n int) { } func (m *PodDisruptionBudgetStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.ObservedGeneration)) @@ -1940,7 +1099,7 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - n += 1 + sovGenerated(uint64(m.DisruptionsAllowed)) + n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) n += 1 + sovGenerated(uint64(m.CurrentHealthy)) n += 1 + sovGenerated(uint64(m.DesiredHealthy)) n += 1 + sovGenerated(uint64(m.ExpectedPods)) @@ -1948,9 +1107,6 @@ func (m *PodDisruptionBudgetStatus) Size() (n int) { } func (m *PodSecurityPolicy) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1961,9 +1117,6 @@ func (m *PodSecurityPolicy) Size() (n int) { } func (m *PodSecurityPolicyList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1978,9 +1131,6 @@ func (m *PodSecurityPolicyList) Size() (n int) { } func (m *PodSecurityPolicySpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -2062,44 +1212,10 @@ func (m *PodSecurityPolicySpec) Size() (n int) { n += 2 + l + sovGenerated(uint64(l)) } } - if m.RunAsGroup != nil { - l = m.RunAsGroup.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedCSIDrivers) > 0 { - for _, e := range m.AllowedCSIDrivers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClass != nil { - l = m.RuntimeClass.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsGroupStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } return n } func (m *RunAsUserStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -2113,29 +1229,7 @@ func (m *RunAsUserStrategyOptions) Size() (n int) { return n } -func (m *RuntimeClassStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.AllowedRuntimeClassNames) > 0 { - for _, s := range m.AllowedRuntimeClassNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultRuntimeClassName != nil { - l = len(*m.DefaultRuntimeClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *SELinuxStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -2148,9 +1242,6 @@ func (m *SELinuxStrategyOptions) Size() (n int) { } func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Rule) @@ -2165,21 +1256,18 @@ func (m *SupplementalGroupsStrategyOptions) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (this *AllowedCSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedCSIDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} func (this *AllowedFlexVolume) String() string { if this == nil { return "nil" @@ -2206,8 +1294,8 @@ func (this *Eviction) String() string { return "nil" } s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "v1.DeleteOptions", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, `}`, }, "") return s @@ -2216,14 +1304,9 @@ func (this *FSGroupStrategyOptions) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" s := strings.Join([]string{`&FSGroupStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2255,7 +1338,7 @@ func (this *PodDisruptionBudget) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -2266,14 +1349,9 @@ func (this *PodDisruptionBudgetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodDisruptionBudget{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2283,9 +1361,9 @@ func (this *PodDisruptionBudgetSpec) String() string { return "nil" } s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "intstr.IntOrString", 1) + `,`, + `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, + `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, + `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, `}`, }, "") return s @@ -2299,7 +1377,7 @@ func (this *PodDisruptionBudgetStatus) String() string { keysForDisruptedPods = append(keysForDisruptedPods, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]v1.Time{" + mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" for _, k := range keysForDisruptedPods { mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) } @@ -2307,7 +1385,7 @@ func (this *PodDisruptionBudgetStatus) String() string { s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `DisruptionsAllowed:` + fmt.Sprintf("%v", this.DisruptionsAllowed) + `,`, + `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, @@ -2320,7 +1398,7 @@ func (this *PodSecurityPolicy) String() string { return "nil" } s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -2330,14 +1408,9 @@ func (this *PodSecurityPolicyList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodSecurityPolicy{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2346,26 +1419,6 @@ func (this *PodSecurityPolicySpec) String() string { if this == nil { return "nil" } - repeatedStringForHostPorts := "[]HostPortRange{" - for _, f := range this.HostPorts { - repeatedStringForHostPorts += strings.Replace(strings.Replace(f.String(), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + "," - } - repeatedStringForHostPorts += "}" - repeatedStringForAllowedHostPaths := "[]AllowedHostPath{" - for _, f := range this.AllowedHostPaths { - repeatedStringForAllowedHostPaths += strings.Replace(strings.Replace(f.String(), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedHostPaths += "}" - repeatedStringForAllowedFlexVolumes := "[]AllowedFlexVolume{" - for _, f := range this.AllowedFlexVolumes { - repeatedStringForAllowedFlexVolumes += strings.Replace(strings.Replace(f.String(), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedFlexVolumes += "}" - repeatedStringForAllowedCSIDrivers := "[]AllowedCSIDriver{" - for _, f := range this.AllowedCSIDrivers { - repeatedStringForAllowedCSIDrivers += strings.Replace(strings.Replace(f.String(), "AllowedCSIDriver", "AllowedCSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForAllowedCSIDrivers += "}" s := strings.Join([]string{`&PodSecurityPolicySpec{`, `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, @@ -2373,7 +1426,7 @@ func (this *PodSecurityPolicySpec) String() string { `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + repeatedStringForHostPorts + `,`, + `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, @@ -2383,30 +1436,11 @@ func (this *PodSecurityPolicySpec) String() string { `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + repeatedStringForAllowedHostPaths + `,`, - `AllowedFlexVolumes:` + repeatedStringForAllowedFlexVolumes + `,`, + `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, + `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `RunAsGroup:` + strings.Replace(this.RunAsGroup.String(), "RunAsGroupStrategyOptions", "RunAsGroupStrategyOptions", 1) + `,`, - `AllowedCSIDrivers:` + repeatedStringForAllowedCSIDrivers + `,`, - `RuntimeClass:` + strings.Replace(this.RuntimeClass.String(), "RuntimeClassStrategyOptions", "RuntimeClassStrategyOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" - s := strings.Join([]string{`&RunAsGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, `}`, }, "") return s @@ -2415,25 +1449,9 @@ func (this *RunAsUserStrategyOptions) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" s := strings.Join([]string{`&RunAsUserStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, - `}`, - }, "") - return s -} -func (this *RuntimeClassStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuntimeClassStrategyOptions{`, - `AllowedRuntimeClassNames:` + fmt.Sprintf("%v", this.AllowedRuntimeClassNames) + `,`, - `DefaultRuntimeClassName:` + valueToStringGenerated(this.DefaultRuntimeClassName) + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2444,7 +1462,7 @@ func (this *SELinuxStrategyOptions) String() string { } s := strings.Join([]string{`&SELinuxStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "v11.SELinuxOptions", 1) + `,`, + `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, `}`, }, "") return s @@ -2453,14 +1471,9 @@ func (this *SupplementalGroupsStrategyOptions) String() string { if this == nil { return "nil" } - repeatedStringForRanges := "[]IDRange{" - for _, f := range this.Ranges { - repeatedStringForRanges += strings.Replace(strings.Replace(f.String(), "IDRange", "IDRange", 1), `&`, ``, 1) + "," - } - repeatedStringForRanges += "}" s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + repeatedStringForRanges + `,`, + `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -2473,91 +1486,6 @@ func valueToStringGenerated(v interface{}) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("*%v", pv) } -func (m *AllowedCSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedCSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedCSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -2573,7 +1501,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2601,7 +1529,7 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2611,9 +1539,6 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2628,9 +1553,6 @@ func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2658,7 +1580,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2686,7 +1608,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2696,9 +1618,6 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2718,7 +1637,7 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2733,9 +1652,6 @@ func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2763,7 +1679,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2791,7 +1707,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2800,9 +1716,6 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2824,7 +1737,7 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2833,14 +1746,11 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.DeleteOptions == nil { - m.DeleteOptions = &v1.DeleteOptions{} + m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} } if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -2855,9 +1765,6 @@ func (m *Eviction) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2885,7 +1792,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2913,7 +1820,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2923,9 +1830,6 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2945,7 +1849,7 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2954,9 +1858,6 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2974,9 +1875,6 @@ func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3004,7 +1902,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3032,7 +1930,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int32(b&0x7F) << shift + m.Min |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3051,7 +1949,7 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int32(b&0x7F) << shift + m.Max |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3065,9 +1963,6 @@ func (m *HostPortRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3095,7 +1990,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3123,7 +2018,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Min |= int64(b&0x7F) << shift + m.Min |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3142,7 +2037,7 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Max |= int64(b&0x7F) << shift + m.Max |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3156,9 +2051,6 @@ func (m *IDRange) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3186,7 +2078,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3214,7 +2106,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3223,9 +2115,6 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3247,7 +2136,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3256,9 +2145,6 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3280,7 +2166,7 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3289,9 +2175,6 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3308,9 +2191,6 @@ func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3338,7 +2218,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3366,7 +2246,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3375,9 +2255,6 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3399,7 +2276,7 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3408,9 +2285,6 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3428,9 +2302,6 @@ func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3458,7 +2329,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3486,7 +2357,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3495,14 +2366,11 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MinAvailable == nil { - m.MinAvailable = &intstr.IntOrString{} + m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3522,7 +2390,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3531,14 +2399,11 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.Selector == nil { - m.Selector = &v1.LabelSelector{} + m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} } if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3558,7 +2423,7 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3567,14 +2432,11 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.MaxUnavailable == nil { - m.MaxUnavailable = &intstr.IntOrString{} + m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} } if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -3589,9 +2451,6 @@ func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3619,7 +2478,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3647,7 +2506,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ObservedGeneration |= int64(b&0x7F) << shift + m.ObservedGeneration |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3666,7 +2525,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3675,20 +2534,69 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]v1.Time) + m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) } - var mapkey string - mapvalue := &v1.Time{} - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var mapmsglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3698,94 +2606,37 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + mapmsglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if postmsgIndex < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue = &v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthGenerated } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + m.DisruptedPods[mapkey] = *mapvalue + } else { + var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time + m.DisruptedPods[mapkey] = mapvalue } - m.DisruptedPods[mapkey] = *mapvalue iNdEx = postIndex case 3: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptionsAllowed", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) } - m.DisruptionsAllowed = 0 + m.PodDisruptionsAllowed = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3795,7 +2646,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DisruptionsAllowed |= int32(b&0x7F) << shift + m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3814,7 +2665,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.CurrentHealthy |= int32(b&0x7F) << shift + m.CurrentHealthy |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3833,7 +2684,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.DesiredHealthy |= int32(b&0x7F) << shift + m.DesiredHealthy |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3852,7 +2703,7 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.ExpectedPods |= int32(b&0x7F) << shift + m.ExpectedPods |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -3866,9 +2717,6 @@ func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3896,7 +2744,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3924,7 +2772,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3933,9 +2781,6 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3957,7 +2802,7 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3966,9 +2811,6 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3985,9 +2827,6 @@ func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4015,7 +2854,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4043,7 +2882,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4052,9 +2891,6 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4076,7 +2912,7 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4085,9 +2921,6 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4105,9 +2938,6 @@ func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4135,7 +2965,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4163,7 +2993,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4183,7 +3013,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4193,9 +3023,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4215,7 +3042,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4225,9 +3052,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4247,7 +3071,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4257,9 +3081,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4279,7 +3100,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4289,9 +3110,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4311,7 +3129,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4331,7 +3149,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4340,9 +3158,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4365,7 +3180,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4385,7 +3200,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4405,7 +3220,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4414,9 +3229,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4438,7 +3250,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4447,9 +3259,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4471,7 +3280,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4480,9 +3289,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4504,7 +3310,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4513,9 +3319,6 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4537,7 +3340,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4557,7 +3360,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4566,230 +3369,9 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { m.DefaultAllowPrivilegeEscalation = &b case 16: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RunAsGroup == nil { - m.RunAsGroup = &RunAsGroupStrategyOptions{} - } - if err := m.RunAsGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCSIDrivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) } - var msglen int + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4799,29 +3381,16 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCSIDrivers = append(m.AllowedCSIDrivers, AllowedCSIDriver{}) - if err := m.AllowedCSIDrivers[len(m.AllowedCSIDrivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: + b := bool(v != 0) + m.AllowPrivilegeEscalation = &b + case 17: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClass", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -4833,7 +3402,7 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4842,77 +3411,19 @@ func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.RuntimeClass == nil { - m.RuntimeClass = &RuntimeClassStrategyOptions{} - } - if err := m.RuntimeClass.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) + if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 18: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4922,29 +3433,28 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsGroupStrategy(dAtA[iNdEx:postIndex]) + m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) + if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 2: + case 19: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4954,82 +3464,24 @@ func (m *RunAsGroupStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 20: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5041,7 +3493,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5051,19 +3503,16 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) + m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: + case 21: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5073,25 +3522,20 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex @@ -5102,9 +3546,6 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5117,7 +3558,7 @@ func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { +func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -5132,7 +3573,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5140,15 +3581,15 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: wiretype end group for non-group") + return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RuntimeClassStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedRuntimeClassNames", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -5160,7 +3601,7 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5170,19 +3611,16 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedRuntimeClassNames = append(m.AllowedRuntimeClassNames, string(dAtA[iNdEx:postIndex])) + m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRuntimeClassName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5192,24 +3630,22 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - s := string(dAtA[iNdEx:postIndex]) - m.DefaultRuntimeClassName = &s + m.Ranges = append(m.Ranges, IDRange{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -5220,9 +3656,6 @@ func (m *RuntimeClassStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5250,7 +3683,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5278,7 +3711,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5288,9 +3721,6 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5310,7 +3740,7 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5319,14 +3749,11 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } if m.SELinuxOptions == nil { - m.SELinuxOptions = &v11.SELinuxOptions{} + m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} } if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err @@ -5341,9 +3768,6 @@ func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5371,7 +3795,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5399,7 +3823,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5409,9 +3833,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5431,7 +3852,7 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5440,9 +3861,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5460,9 +3878,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5478,7 +3893,6 @@ func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -5510,8 +3924,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -5528,34 +3944,169 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 1715 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, + 0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03, + 0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55, + 0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc, + 0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81, + 0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7, + 0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce, + 0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5, + 0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde, + 0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8, + 0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1, + 0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37, + 0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76, + 0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0, + 0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f, + 0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3, + 0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4, + 0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7, + 0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45, + 0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04, + 0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0, + 0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59, + 0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6, + 0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81, + 0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb, + 0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42, + 0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0, + 0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd, + 0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26, + 0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e, + 0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf, + 0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04, + 0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69, + 0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1, + 0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45, + 0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b, + 0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32, + 0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2, + 0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd, + 0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe, + 0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6, + 0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79, + 0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5, + 0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed, + 0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60, + 0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0, + 0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a, + 0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27, + 0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa, + 0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c, + 0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c, + 0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d, + 0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50, + 0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28, + 0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f, + 0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a, + 0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51, + 0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b, + 0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92, + 0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf, + 0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43, + 0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf, + 0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0, + 0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f, + 0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38, + 0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0, + 0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59, + 0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09, + 0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83, + 0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca, + 0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec, + 0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57, + 0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82, + 0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6, + 0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75, + 0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b, + 0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92, + 0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f, + 0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb, + 0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95, + 0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9, + 0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67, + 0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48, + 0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf, + 0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65, + 0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f, + 0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3, + 0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96, + 0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40, + 0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00, + 0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73, + 0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0, + 0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0, + 0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47, + 0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17, + 0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c, + 0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52, + 0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d, + 0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0, + 0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb, + 0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55, + 0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4, + 0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25, + 0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b, + 0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55, + 0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95, + 0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51, + 0x15, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto index d044837..aa37d94 100644 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ b/vendor/k8s.io/api/policy/v1beta1/generated.proto @@ -30,12 +30,6 @@ import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -message AllowedCSIDriver { - // Name is the registered name of the CSI driver - optional string name = 1; -} - // AllowedFlexVolume represents a single Flexvolume that is allowed to be used. message AllowedFlexVolume { // driver is the name of the Flexvolume driver. @@ -48,7 +42,7 @@ message AllowedHostPath { // pathPrefix is the path prefix that the host volume must match. // It does not support `*`. // Trailing slashes are trimmed when validating the path prefix with a host path. - // + // // Examples: // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` // `/foo` would not allow `/food` or `/etc/foo` @@ -64,11 +58,9 @@ message AllowedHostPath { // created by POSTing to .../pods//evictions. message Eviction { // ObjectMeta describes the pod that is being evicted. - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // DeleteOptions may be provided - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; } @@ -105,21 +97,17 @@ message IDRange { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods message PodDisruptionBudget { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Specification of the desired behavior of the PodDisruptionBudget. - // +optional optional PodDisruptionBudgetSpec spec = 2; // Most recently observed status of the PodDisruptionBudget. - // +optional optional PodDisruptionBudgetStatus status = 3; } // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. message PodDisruptionBudgetList { - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; repeated PodDisruptionBudget items = 2; @@ -131,27 +119,24 @@ message PodDisruptionBudgetSpec { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". - // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; // Label query over pods whose evictions are managed by the disruption // budget. - // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". - // +optional optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; } // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. message PodDisruptionBudgetStatus { - // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other - // status information is valid only if observedGeneration equals to PDB's object generation. + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. // +optional optional int64 observedGeneration = 1; @@ -186,7 +171,7 @@ message PodDisruptionBudgetStatus { // that will be applied to a pod and container. message PodSecurityPolicy { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -198,7 +183,7 @@ message PodSecurityPolicy { // PodSecurityPolicyList is a list of PodSecurityPolicy objects. message PodSecurityPolicyList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -257,12 +242,6 @@ message PodSecurityPolicySpec { // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. optional RunAsUserStrategyOptions runAsUser = 11; - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - optional RunAsGroupStrategyOptions runAsGroup = 22; - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. optional SupplementalGroupsStrategyOptions supplementalGroups = 12; @@ -298,17 +277,11 @@ message PodSecurityPolicySpec { // +optional repeated AllowedFlexVolume allowedFlexVolumes = 18; - // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. - // +optional - repeated AllowedCSIDriver allowedCSIDrivers = 23; - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // + // // Examples: // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. @@ -318,7 +291,7 @@ message PodSecurityPolicySpec { // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // + // // Examples: // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. @@ -330,23 +303,6 @@ message PodSecurityPolicySpec { // This requires the ProcMountType feature flag to be enabled. // +optional repeated string allowedProcMountTypes = 21; - - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - optional RuntimeClassStrategyOptions runtimeClass = 24; -} - -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsGroupStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; } // RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. @@ -360,21 +316,6 @@ message RunAsUserStrategyOptions { repeated IDRange ranges = 2; } -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -message RuntimeClassStrategyOptions { - // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - repeated string allowedRuntimeClassNames = 1; - - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - optional string defaultRuntimeClassName = 2; -} - // SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. message SELinuxStrategyOptions { // rule is the strategy that will dictate the allowable labels that may be set. diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go index e6a5976..c1a2727 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ b/vendor/k8s.io/api/policy/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" ) @@ -28,27 +28,24 @@ type PodDisruptionBudgetSpec struct { // "selector" will still be available after the eviction, i.e. even in the // absence of the evicted pod. So for example you can prevent all voluntary // evictions by specifying "100%". - // +optional MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` // Label query over pods whose evictions are managed by the disruption // budget. - // +optional Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` // An eviction is allowed if at most "maxUnavailable" pods selected by // "selector" are unavailable after the eviction, i.e. even in absence of // the evicted pod. For example, one can prevent all voluntary evictions // by specifying 0. This is a mutually exclusive setting with "minAvailable". - // +optional MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` } // PodDisruptionBudgetStatus represents information about the status of a // PodDisruptionBudget. Status may trail the actual state of a system. type PodDisruptionBudgetStatus struct { - // Most recent generation observed when updating this PDB status. DisruptionsAllowed and other - // status information is valid only if observedGeneration equals to PDB's object generation. + // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other + // status informatio is valid only if observedGeneration equals to PDB's object generation. // +optional ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` @@ -67,7 +64,7 @@ type PodDisruptionBudgetStatus struct { DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty" protobuf:"bytes,2,rep,name=disruptedPods"` // Number of pod disruptions that are currently allowed. - DisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` + PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` // current number of healthy pods CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` @@ -84,15 +81,12 @@ type PodDisruptionBudgetStatus struct { // PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` - // +optional + metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Specification of the desired behavior of the PodDisruptionBudget. - // +optional Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` // Most recently observed status of the PodDisruptionBudget. - // +optional Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` } @@ -101,7 +95,6 @@ type PodDisruptionBudget struct { // PodDisruptionBudgetList is a collection of PodDisruptionBudgets. type PodDisruptionBudgetList struct { metav1.TypeMeta `json:",inline"` - // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` } @@ -117,11 +110,9 @@ type Eviction struct { metav1.TypeMeta `json:",inline"` // ObjectMeta describes the pod that is being evicted. - // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // DeleteOptions may be provided - // +optional DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` } @@ -134,7 +125,7 @@ type Eviction struct { type PodSecurityPolicy struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -183,11 +174,6 @@ type PodSecurityPolicySpec struct { SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. - // If this field is omitted, the pod's RunAsGroup can take any value. This field requires the - // RunAsGroup feature gate to be enabled. - // +optional - RunAsGroup *RunAsGroupStrategyOptions `json:"runAsGroup,omitempty" protobuf:"bytes,22,opt,name=runAsGroup"` // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. @@ -216,11 +202,6 @@ type PodSecurityPolicySpec struct { // is allowed in the "volumes" field. // +optional AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. - // An empty value indicates that any CSI driver can be used for inline ephemeral volumes. - // This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate. - // +optional - AllowedCSIDrivers []AllowedCSIDriver `json:"allowedCSIDrivers,omitempty" protobuf:"bytes,23,rep,name=allowedCSIDrivers"` // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. // Each entry is either a plain sysctl name or ends in "*" in which case it is considered // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. @@ -245,11 +226,6 @@ type PodSecurityPolicySpec struct { // This requires the ProcMountType feature flag to be enabled. // +optional AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` - // runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. - // If this field is omitted, the pod's runtimeClassName field is unrestricted. - // Enforcement of this field depends on the RuntimeClass feature gate being enabled. - // +optional - RuntimeClass *RuntimeClassStrategyOptions `json:"runtimeClass,omitempty" protobuf:"bytes,24,opt,name=runtimeClass"` } // AllowedHostPath defines the host volume conditions that will be enabled by a policy @@ -269,14 +245,10 @@ type AllowedHostPath struct { ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` } -// AllowAllCapabilities can be used as a value for the PodSecurityPolicy.AllowAllCapabilities -// field and means that any capabilities are allowed to be requested. -var AllowAllCapabilities v1.Capability = "*" - // FSType gives strong typing to different file systems that are used by volumes. type FSType string -const ( +var ( AzureFile FSType = "azureFile" Flocker FSType = "flocker" FlexVolume FSType = "flexVolume" @@ -296,15 +268,8 @@ const ( DownwardAPI FSType = "downwardAPI" FC FSType = "fc" ConfigMap FSType = "configMap" - VsphereVolume FSType = "vsphereVolume" Quobyte FSType = "quobyte" AzureDisk FSType = "azureDisk" - PhotonPersistentDisk FSType = "photonPersistentDisk" - StorageOS FSType = "storageos" - Projected FSType = "projected" - PortworxVolume FSType = "portworxVolume" - ScaleIO FSType = "scaleIO" - CSI FSType = "csi" All FSType = "*" ) @@ -314,12 +279,6 @@ type AllowedFlexVolume struct { Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` } -// AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used. -type AllowedCSIDriver struct { - // Name is the registered name of the CSI driver - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` -} - // HostPortRange defines a range of host ports that will be enabled by a policy // for pods to use. It requires both the start and end to be defined. type HostPortRange struct { @@ -360,16 +319,6 @@ type RunAsUserStrategyOptions struct { Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` } -// RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsGroupStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsGroup values that may be set. - Rule RunAsGroupStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsGroupStrategy"` - // ranges are the allowed ranges of gids that may be used. If you would like to force a single gid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - // IDRange provides a min/max of an allowed range of IDs. type IDRange struct { // min is the start of the range, inclusive. @@ -391,20 +340,6 @@ const ( RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" ) -// RunAsGroupStrategy denotes strategy types for generating RunAsGroup values for a -// Security Context. -type RunAsGroupStrategy string - -const ( - // RunAsGroupStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when RunAsGroup are specified, they have to fall in the defined range. - RunAsGroupStrategyMayRunAs RunAsGroupStrategy = "MayRunAs" - // RunAsGroupStrategyMustRunAs means that container must run as a particular gid. - RunAsGroupStrategyMustRunAs RunAsGroupStrategy = "MustRunAs" - // RunAsUserStrategyRunAsAny means that container may make requests for any gid. - RunAsGroupStrategyRunAsAny RunAsGroupStrategy = "RunAsAny" -) - // FSGroupStrategyOptions defines the strategy type and options used to create the strategy. type FSGroupStrategyOptions struct { // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. @@ -421,9 +356,6 @@ type FSGroupStrategyOptions struct { type FSGroupStrategyType string const ( - // FSGroupStrategyMayRunAs means that container does not need to have FSGroup of X applied. - // However, when FSGroups are specified, they have to fall in the defined range. - FSGroupStrategyMayRunAs FSGroupStrategyType = "MayRunAs" // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. @@ -446,41 +378,19 @@ type SupplementalGroupsStrategyOptions struct { type SupplementalGroupsStrategyType string const ( - // SupplementalGroupsStrategyMayRunAs means that container does not need to run with a particular gid. - // However, when gids are specified, they have to fall in the defined range. - SupplementalGroupsStrategyMayRunAs SupplementalGroupsStrategyType = "MayRunAs" // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" ) -// RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses -// for a pod. -type RuntimeClassStrategyOptions struct { - // allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. - // A value of "*" means that any RuntimeClass name is allowed, and must be the only item in the - // list. An empty list requires the RuntimeClassName field to be unset. - AllowedRuntimeClassNames []string `json:"allowedRuntimeClassNames" protobuf:"bytes,1,rep,name=allowedRuntimeClassNames"` - // defaultRuntimeClassName is the default RuntimeClassName to set on the pod. - // The default MUST be allowed by the allowedRuntimeClassNames list. - // A value of nil does not mutate the Pod. - // +optional - DefaultRuntimeClassName *string `json:"defaultRuntimeClassName,omitempty" protobuf:"bytes,2,opt,name=defaultRuntimeClassName"` -} - -// AllowAllRuntimeClassNames can be used as a value for the -// RuntimeClassStrategyOptions.AllowedRuntimeClassNames field and means that any RuntimeClassName is -// allowed. -const AllowAllRuntimeClassNames = "*" - // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // PodSecurityPolicyList is a list of PodSecurityPolicy objects. type PodSecurityPolicyList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go index 70f667c..df10b2a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go @@ -27,15 +27,6 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedCSIDriver = map[string]string{ - "": "AllowedCSIDriver represents a single inline CSI Driver that is allowed to be used.", - "name": "Name is the registered name of the CSI driver", -} - -func (AllowedCSIDriver) SwaggerDoc() map[string]string { - return map_AllowedCSIDriver -} - var map_AllowedFlexVolume = map[string]string{ "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", "driver": "driver is the name of the Flexvolume driver.", @@ -126,7 +117,7 @@ func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { var map_PodDisruptionBudgetStatus = map[string]string{ "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "observedGeneration": "Most recent generation observed when updating this PDB status. DisruptionsAllowed and other status information is valid only if observedGeneration equals to PDB's object generation.", + "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", "currentHealthy": "current number of healthy pods", @@ -140,7 +131,7 @@ func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { var map_PodSecurityPolicy = map[string]string{ "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "spec defines the policy enforced.", } @@ -150,7 +141,7 @@ func (PodSecurityPolicy) SwaggerDoc() map[string]string { var map_PodSecurityPolicyList = map[string]string{ "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is a list of schema objects.", } @@ -171,7 +162,6 @@ var map_PodSecurityPolicySpec = map[string]string{ "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "runAsGroup": "RunAsGroup is the strategy that will dictate the allowable RunAsGroup values that may be set. If this field is omitted, the pod's RunAsGroup can take any value. This field requires the RunAsGroup feature gate to be enabled.", "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", @@ -179,27 +169,15 @@ var map_PodSecurityPolicySpec = map[string]string{ "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedCSIDrivers": "AllowedCSIDrivers is a whitelist of inline CSI drivers that must be explicitly set to be embedded within a pod spec. An empty value indicates that any CSI driver can be used for inline ephemeral volumes. This is an alpha field, and is only honored if the API server enables the CSIInlineVolume feature gate.", "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", - "runtimeClass": "runtimeClass is the strategy that will dictate the allowable RuntimeClasses for a pod. If this field is omitted, the pod's runtimeClassName field is unrestricted. Enforcement of this field depends on the RuntimeClass feature gate being enabled.", } func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { return map_PodSecurityPolicySpec } -var map_RunAsGroupStrategyOptions = map[string]string{ - "": "RunAsGroupStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsGroup values that may be set.", - "ranges": "ranges are the allowed ranges of gids that may be used. If you would like to force a single gid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsGroupStrategyOptions -} - var map_RunAsUserStrategyOptions = map[string]string{ "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", @@ -210,16 +188,6 @@ func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { return map_RunAsUserStrategyOptions } -var map_RuntimeClassStrategyOptions = map[string]string{ - "": "RuntimeClassStrategyOptions define the strategy that will dictate the allowable RuntimeClasses for a pod.", - "allowedRuntimeClassNames": "allowedRuntimeClassNames is a whitelist of RuntimeClass names that may be specified on a pod. A value of \"*\" means that any RuntimeClass name is allowed, and must be the only item in the list. An empty list requires the RuntimeClassName field to be unset.", - "defaultRuntimeClassName": "defaultRuntimeClassName is the default RuntimeClassName to set on the pod. The default MUST be allowed by the allowedRuntimeClassNames list. A value of nil does not mutate the Pod.", -} - -func (RuntimeClassStrategyOptions) SwaggerDoc() map[string]string { - return map_RuntimeClassStrategyOptions -} - var map_SELinuxStrategyOptions = map[string]string{ "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", "rule": "rule is the strategy that will dictate the allowable labels that may be set.", diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go index 75851e1..9af268a 100644 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go @@ -27,22 +27,6 @@ import ( intstr "k8s.io/apimachinery/pkg/util/intstr" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedCSIDriver) DeepCopyInto(out *AllowedCSIDriver) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedCSIDriver. -func (in *AllowedCSIDriver) DeepCopy() *AllowedCSIDriver { - if in == nil { - return nil - } - out := new(AllowedCSIDriver) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { *out = *in @@ -191,7 +175,7 @@ func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodDisruptionBudget, len(*in)) @@ -305,7 +289,7 @@ func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodSecurityPolicy, len(*in)) @@ -364,11 +348,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { } in.SELinux.DeepCopyInto(&out.SELinux) in.RunAsUser.DeepCopyInto(&out.RunAsUser) - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(RunAsGroupStrategyOptions) - (*in).DeepCopyInto(*out) - } in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) in.FSGroup.DeepCopyInto(&out.FSGroup) if in.DefaultAllowPrivilegeEscalation != nil { @@ -391,11 +370,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]AllowedFlexVolume, len(*in)) copy(*out, *in) } - if in.AllowedCSIDrivers != nil { - in, out := &in.AllowedCSIDrivers, &out.AllowedCSIDrivers - *out = make([]AllowedCSIDriver, len(*in)) - copy(*out, *in) - } if in.AllowedUnsafeSysctls != nil { in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls *out = make([]string, len(*in)) @@ -411,11 +385,6 @@ func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { *out = make([]corev1.ProcMountType, len(*in)) copy(*out, *in) } - if in.RuntimeClass != nil { - in, out := &in.RuntimeClass, &out.RuntimeClass - *out = new(RuntimeClassStrategyOptions) - (*in).DeepCopyInto(*out) - } return } @@ -429,27 +398,6 @@ func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsGroupStrategyOptions) DeepCopyInto(out *RunAsGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsGroupStrategyOptions. -func (in *RunAsGroupStrategyOptions) DeepCopy() *RunAsGroupStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { *out = *in @@ -471,32 +419,6 @@ func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuntimeClassStrategyOptions) DeepCopyInto(out *RuntimeClassStrategyOptions) { - *out = *in - if in.AllowedRuntimeClassNames != nil { - in, out := &in.AllowedRuntimeClassNames, &out.AllowedRuntimeClassNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.DefaultRuntimeClassName != nil { - in, out := &in.DefaultRuntimeClassName, &out.DefaultRuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuntimeClassStrategyOptions. -func (in *RuntimeClassStrategyOptions) DeepCopy() *RuntimeClassStrategyOptions { - if in == nil { - return nil - } - out := new(RuntimeClassStrategyOptions) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { *out = *in diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go index 80f43ce..28ceb26 100644 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ b/vendor/k8s.io/api/rbac/v1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io - package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go index ba6872d..21010fb 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1/generated.pb.go @@ -14,24 +14,42 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto + + It has these top-level messages: + AggregationRule + ClusterRole + ClusterRoleBinding + ClusterRoleBindingList + ClusterRoleList + PolicyRule + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,343 +60,55 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{0} -} -func (m *AggregationRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AggregationRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_AggregationRule.Merge(m, src) -} -func (m *AggregationRule) XXX_Size() int { - return m.Size() -} -func (m *AggregationRule) XXX_DiscardUnknown() { - xxx_messageInfo_AggregationRule.DiscardUnknown(m) -} +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_AggregationRule proto.InternalMessageInfo +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{1} -} -func (m *ClusterRole) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRole) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRole.Merge(m, src) -} -func (m *ClusterRole) XXX_Size() int { - return m.Size() -} -func (m *ClusterRole) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRole.DiscardUnknown(m) -} +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_ClusterRole proto.InternalMessageInfo +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{2} -} -func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleBinding.Merge(m, src) -} -func (m *ClusterRoleBinding) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleBinding) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{3} -} -func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) -} -func (m *ClusterRoleBindingList) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo - -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{4} -} -func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleList.Merge(m, src) -} -func (m *ClusterRoleList) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo - -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{5} -} -func (m *PolicyRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PolicyRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyRule.Merge(m, src) -} -func (m *PolicyRule) XXX_Size() int { - return m.Size() -} -func (m *PolicyRule) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyRule.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyRule proto.InternalMessageInfo - -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{6} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return m.Size() -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{7} -} -func (m *RoleBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleBinding.Merge(m, src) -} -func (m *RoleBinding) XXX_Size() int { - return m.Size() -} -func (m *RoleBinding) XXX_DiscardUnknown() { - xxx_messageInfo_RoleBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_RoleBinding proto.InternalMessageInfo - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{8} -} -func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleBindingList.Merge(m, src) -} -func (m *RoleBindingList) XXX_Size() int { - return m.Size() -} -func (m *RoleBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_RoleBindingList.DiscardUnknown(m) -} +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{9} -} -func (m *RoleList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleList.Merge(m, src) -} -func (m *RoleList) XXX_Size() int { - return m.Size() -} -func (m *RoleList) XXX_DiscardUnknown() { - xxx_messageInfo_RoleList.DiscardUnknown(m) -} +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_RoleList proto.InternalMessageInfo +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{10} -} -func (m *RoleRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleRef.Merge(m, src) -} -func (m *RoleRef) XXX_Size() int { - return m.Size() -} -func (m *RoleRef) XXX_DiscardUnknown() { - xxx_messageInfo_RoleRef.DiscardUnknown(m) -} +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_RoleRef proto.InternalMessageInfo +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_979ffd7b30c07419, []int{11} -} -func (m *Subject) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Subject) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subject.Merge(m, src) -} -func (m *Subject) XXX_Size() int { - return m.Size() -} -func (m *Subject) XXX_DiscardUnknown() { - xxx_messageInfo_Subject.DiscardUnknown(m) -} +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -var xxx_messageInfo_Subject proto.InternalMessageInfo +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") @@ -394,70 +124,10 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptor_979ffd7b30c07419) -} - -var fileDescriptor_979ffd7b30c07419 = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, -} - func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -465,36 +135,29 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.ClusterRoleSelectors { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -502,58 +165,47 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AggregationRule != nil { - { - size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n1 if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Rules { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n2 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -561,56 +213,45 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n3 if len(m.Subjects) > 0 { - for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Subjects { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n4 + return i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -618,46 +259,37 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -665,46 +297,37 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -712,67 +335,92 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.NonResourceURLs) > 0 { - for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NonResourceURLs[iNdEx]) - copy(dAtA[i:], m.NonResourceURLs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) - i-- - dAtA[i] = 0x2a + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0x22 + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- + for _, s := range m.Resources { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) - i-- - dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -780,46 +428,37 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Rules { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -827,56 +466,45 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n8 if len(m.Subjects) > 0 { - for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Subjects { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n9 + return i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -884,46 +512,37 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -931,46 +550,37 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -978,37 +588,29 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1016,53 +618,57 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *AggregationRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -1075,9 +681,6 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1096,9 +699,6 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1115,9 +715,6 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1132,9 +729,6 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1149,9 +743,6 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1188,9 +779,6 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1205,9 +793,6 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1224,9 +809,6 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1241,9 +823,6 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1258,9 +837,6 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.APIGroup) @@ -1273,9 +849,6 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -1290,7 +863,14 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1299,13 +879,8 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } - repeatedStringForClusterRoleSelectors := "[]LabelSelector{" - for _, f := range this.ClusterRoleSelectors { - repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," - } - repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1314,15 +889,10 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]PolicyRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -1331,14 +901,9 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } - repeatedStringForSubjects := "[]Subject{" - for _, f := range this.Subjects { - repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," - } - repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + repeatedStringForSubjects + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1348,14 +913,9 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterRoleBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1364,14 +924,9 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterRole{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1394,14 +949,9 @@ func (this *Role) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]PolicyRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1410,14 +960,9 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } - repeatedStringForSubjects := "[]Subject{" - for _, f := range this.Subjects { - repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," - } - repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + repeatedStringForSubjects + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1427,14 +972,9 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]RoleBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1443,14 +983,9 @@ func (this *RoleList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Role{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1503,7 +1038,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1531,7 +1066,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1540,13 +1075,10 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1560,9 +1092,6 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1590,7 +1119,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1618,7 +1147,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1627,9 +1156,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1651,7 +1177,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1660,9 +1186,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1685,7 +1208,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1694,9 +1217,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1716,9 +1236,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1746,7 +1263,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1774,7 +1291,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1783,9 +1300,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1807,7 +1321,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1816,9 +1330,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1841,7 +1352,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1850,9 +1361,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1869,9 +1377,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1899,7 +1404,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1927,7 +1432,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1936,9 +1441,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1960,7 +1462,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1969,9 +1471,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1989,9 +1488,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2019,7 +1515,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2047,7 +1543,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2056,9 +1552,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2080,7 +1573,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2089,9 +1582,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2109,9 +1599,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2139,7 +1626,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2167,7 +1654,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2177,9 +1664,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2199,7 +1683,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2209,9 +1693,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2231,7 +1712,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2241,9 +1722,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2263,7 +1741,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2273,9 +1751,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2295,7 +1770,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2305,9 +1780,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2322,9 +1794,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2352,7 +1821,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2380,7 +1849,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2389,9 +1858,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2413,7 +1879,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2422,9 +1888,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2442,9 +1905,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2472,7 +1932,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2500,7 +1960,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2509,9 +1969,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2533,7 +1990,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2542,9 +1999,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2567,7 +2021,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2576,9 +2030,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2595,9 +2046,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2625,7 +2073,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2653,7 +2101,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2662,9 +2110,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2686,7 +2131,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2695,9 +2140,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2715,9 +2157,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2745,7 +2184,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2773,7 +2212,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2782,9 +2221,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2806,7 +2242,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2815,9 +2251,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2835,9 +2268,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2865,7 +2295,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2893,7 +2323,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2903,9 +2333,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2925,7 +2352,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2935,9 +2362,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2957,7 +2381,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2967,9 +2391,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2984,9 +2405,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3014,7 +2432,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3042,7 +2460,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3052,9 +2470,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3074,7 +2489,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3084,9 +2499,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3106,7 +2518,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3116,9 +2528,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3138,7 +2547,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3148,9 +2557,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3165,9 +2571,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3183,7 +2586,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3215,8 +2617,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3233,34 +2637,112 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 807 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, + 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, + 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, + 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, + 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, + 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, + 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, + 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, + 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, + 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, + 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, + 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, + 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, + 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, + 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, + 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, + 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, + 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, + 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, + 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, + 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, + 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, + 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, + 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, + 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, + 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, + 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, + 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, + 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, + 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, + 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, + 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, + 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, + 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, + 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, + 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, + 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, + 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, + 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, + 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, + 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, + 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, + 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, + 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, + 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, + 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, + 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, + 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, + 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, + 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, + 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto index 71fa083..4b321a7 100644 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1/generated.proto @@ -43,7 +43,6 @@ message ClusterRole { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole - // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -122,7 +121,6 @@ message Role { optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role - // +optional repeated PolicyRule rules = 2; } diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go index 7ba7d05..17163cb 100644 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ b/vendor/k8s.io/api/rbac/v1/types.go @@ -108,7 +108,6 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -171,7 +170,6 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go index 83ce310..0ec20c8 100644 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go index 095a5e9..07eb321 100644 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go index 918b8a3..5236a47 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io - package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go index 3b12526..71eced8 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go @@ -14,24 +14,42 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +// DO NOT EDIT! +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto + + It has these top-level messages: + AggregationRule + ClusterRole + ClusterRoleBinding + ClusterRoleBindingList + ClusterRoleList + PolicyRule + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ package v1alpha1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,343 +60,55 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{0} -} -func (m *AggregationRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AggregationRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_AggregationRule.Merge(m, src) -} -func (m *AggregationRule) XXX_Size() int { - return m.Size() -} -func (m *AggregationRule) XXX_DiscardUnknown() { - xxx_messageInfo_AggregationRule.DiscardUnknown(m) -} - -var xxx_messageInfo_AggregationRule proto.InternalMessageInfo - -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{1} -} -func (m *ClusterRole) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRole) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRole.Merge(m, src) -} -func (m *ClusterRole) XXX_Size() int { - return m.Size() -} -func (m *ClusterRole) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRole.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRole proto.InternalMessageInfo - -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{2} -} -func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleBinding.Merge(m, src) -} -func (m *ClusterRoleBinding) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleBinding) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{3} -} -func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) -} -func (m *ClusterRoleBindingList) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{4} -} -func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleList.Merge(m, src) -} -func (m *ClusterRoleList) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) -} +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{5} -} -func (m *PolicyRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PolicyRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyRule.Merge(m, src) -} -func (m *PolicyRule) XXX_Size() int { - return m.Size() -} -func (m *PolicyRule) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyRule.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyRule proto.InternalMessageInfo +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{6} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return m.Size() -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{7} -} -func (m *RoleBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleBinding.Merge(m, src) -} -func (m *RoleBinding) XXX_Size() int { - return m.Size() -} -func (m *RoleBinding) XXX_DiscardUnknown() { - xxx_messageInfo_RoleBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_RoleBinding proto.InternalMessageInfo - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{8} -} -func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleBindingList.Merge(m, src) -} -func (m *RoleBindingList) XXX_Size() int { - return m.Size() -} -func (m *RoleBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_RoleBindingList.DiscardUnknown(m) -} +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{9} -} -func (m *RoleList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleList.Merge(m, src) -} -func (m *RoleList) XXX_Size() int { - return m.Size() -} -func (m *RoleList) XXX_DiscardUnknown() { - xxx_messageInfo_RoleList.DiscardUnknown(m) -} +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_RoleList proto.InternalMessageInfo +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{10} -} -func (m *RoleRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleRef.Merge(m, src) -} -func (m *RoleRef) XXX_Size() int { - return m.Size() -} -func (m *RoleRef) XXX_DiscardUnknown() { - xxx_messageInfo_RoleRef.DiscardUnknown(m) -} +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_RoleRef proto.InternalMessageInfo +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_b59b0bd5e7cb9590, []int{11} -} -func (m *Subject) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Subject) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subject.Merge(m, src) -} -func (m *Subject) XXX_Size() int { - return m.Size() -} -func (m *Subject) XXX_DiscardUnknown() { - xxx_messageInfo_Subject.DiscardUnknown(m) -} +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -var xxx_messageInfo_Subject proto.InternalMessageInfo +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") @@ -394,71 +124,10 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptor_b59b0bd5e7cb9590) -} - -var fileDescriptor_b59b0bd5e7cb9590 = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, -} - func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -466,36 +135,29 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.ClusterRoleSelectors { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -503,58 +165,47 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AggregationRule != nil { - { - size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n1 if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Rules { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n2 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -562,56 +213,45 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n3 if len(m.Subjects) > 0 { - for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Subjects { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n4 + return i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -619,46 +259,37 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -666,46 +297,37 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -713,67 +335,92 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.NonResourceURLs) > 0 { - for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NonResourceURLs[iNdEx]) - copy(dAtA[i:], m.NonResourceURLs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) - i-- - dAtA[i] = 0x32 + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0x2a + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- + for _, s := range m.Resources { dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x32 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -781,46 +428,37 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Rules { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -828,56 +466,45 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n8 if len(m.Subjects) > 0 { - for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Subjects { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n9 + return i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -885,46 +512,37 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n10 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -932,46 +550,37 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -979,37 +588,29 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1017,53 +618,57 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *AggregationRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -1076,9 +681,6 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1097,9 +699,6 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1116,9 +715,6 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1133,9 +729,6 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1150,9 +743,6 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1189,9 +779,6 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1206,9 +793,6 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1225,9 +809,6 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1242,9 +823,6 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1259,9 +837,6 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.APIGroup) @@ -1274,9 +849,6 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -1291,7 +863,14 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1300,13 +879,8 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } - repeatedStringForClusterRoleSelectors := "[]LabelSelector{" - for _, f := range this.ClusterRoleSelectors { - repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," - } - repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1315,15 +889,10 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]PolicyRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -1332,14 +901,9 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } - repeatedStringForSubjects := "[]Subject{" - for _, f := range this.Subjects { - repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," - } - repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + repeatedStringForSubjects + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1349,14 +913,9 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterRoleBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1365,14 +924,9 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterRole{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1395,14 +949,9 @@ func (this *Role) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]PolicyRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1411,14 +960,9 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } - repeatedStringForSubjects := "[]Subject{" - for _, f := range this.Subjects { - repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," - } - repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + repeatedStringForSubjects + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1428,14 +972,9 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]RoleBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1444,14 +983,9 @@ func (this *RoleList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Role{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1504,7 +1038,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1532,7 +1066,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1541,13 +1075,10 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1561,9 +1092,6 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1591,7 +1119,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1619,7 +1147,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1628,9 +1156,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1652,7 +1177,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1661,9 +1186,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1686,7 +1208,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1695,9 +1217,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1717,9 +1236,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1747,7 +1263,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1775,7 +1291,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1784,9 +1300,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1808,7 +1321,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1817,9 +1330,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1842,7 +1352,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1851,9 +1361,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1870,9 +1377,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1900,7 +1404,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1928,7 +1432,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1937,9 +1441,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1961,7 +1462,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1970,9 +1471,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1990,9 +1488,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2020,7 +1515,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2048,7 +1543,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2057,9 +1552,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2081,7 +1573,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2090,9 +1582,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2110,9 +1599,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2140,7 +1626,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2168,7 +1654,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2178,9 +1664,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2200,7 +1683,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2210,9 +1693,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2232,7 +1712,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2242,9 +1722,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2264,7 +1741,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2274,9 +1751,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2296,7 +1770,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2306,9 +1780,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2323,9 +1794,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2353,7 +1821,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2381,7 +1849,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2390,9 +1858,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2414,7 +1879,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2423,9 +1888,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2443,9 +1905,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2473,7 +1932,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2501,7 +1960,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2510,9 +1969,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2534,7 +1990,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2543,9 +1999,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2568,7 +2021,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2577,9 +2030,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2596,9 +2046,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2626,7 +2073,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2654,7 +2101,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2663,9 +2110,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2687,7 +2131,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2696,9 +2140,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2716,9 +2157,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2746,7 +2184,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2774,7 +2212,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2783,9 +2221,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2807,7 +2242,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2816,9 +2251,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2836,9 +2268,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2866,7 +2295,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2894,7 +2323,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2904,9 +2333,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2926,7 +2352,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2936,9 +2362,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2958,7 +2381,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2968,9 +2391,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2985,9 +2405,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3015,7 +2432,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3043,7 +2460,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3053,9 +2470,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3075,7 +2489,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3085,9 +2499,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3107,7 +2518,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3117,9 +2528,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3139,7 +2547,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3149,9 +2557,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3166,9 +2571,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3184,7 +2586,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3216,8 +2617,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3234,34 +2637,113 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 830 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, + 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, + 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, + 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, + 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, + 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, + 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, + 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, + 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, + 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, + 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, + 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, + 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, + 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, + 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, + 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, + 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, + 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, + 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, + 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, + 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, + 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, + 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, + 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, + 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, + 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, + 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, + 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, + 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, + 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, + 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, + 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, + 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, + 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, + 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, + 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, + 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, + 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, + 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, + 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, + 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, + 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, + 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, + 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, + 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, + 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, + 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, + 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, + 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, + 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, + 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, + 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto index 895ab62..cde3aaa 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto @@ -37,14 +37,12 @@ message AggregationRule { } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. message ClusterRole { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole - // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -56,7 +54,6 @@ message ClusterRole { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. message ClusterRoleBinding { // Standard object's metadata. // +optional @@ -71,8 +68,7 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// ClusterRoleBindingList is a collection of ClusterRoleBindings. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. +// ClusterRoleBindingList is a collection of ClusterRoleBindings message ClusterRoleBindingList { // Standard object's metadata. // +optional @@ -82,8 +78,7 @@ message ClusterRoleBindingList { repeated ClusterRoleBinding items = 2; } -// ClusterRoleList is a collection of ClusterRoles. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +// ClusterRoleList is a collection of ClusterRoles message ClusterRoleList { // Standard object's metadata. // +optional @@ -113,6 +108,7 @@ message PolicyRule { repeated string resourceNames = 5; // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional @@ -120,21 +116,18 @@ message PolicyRule { } // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. message Role { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role - // +optional repeated PolicyRule rules = 2; } // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. message RoleBinding { // Standard object's metadata. // +optional @@ -150,7 +143,6 @@ message RoleBinding { } // RoleBindingList is a collection of RoleBindings -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. message RoleBindingList { // Standard object's metadata. // +optional @@ -160,8 +152,7 @@ message RoleBindingList { repeated RoleBinding items = 2; } -// RoleList is a collection of Roles. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. +// RoleList is a collection of Roles message RoleList { // Standard object's metadata. // +optional diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go index ba91ab3..398d6a1 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types.go @@ -62,6 +62,7 @@ type PolicyRule struct { ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path + // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. // +optional @@ -102,7 +103,6 @@ type RoleRef struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. type Role struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -110,7 +110,6 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -120,7 +119,6 @@ type Role struct { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. type RoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -139,7 +137,6 @@ type RoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBindingList is a collection of RoleBindings -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. type RoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -152,8 +149,7 @@ type RoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// RoleList is a collection of Roles. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. +// RoleList is a collection of Roles type RoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -169,7 +165,6 @@ type RoleList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. type ClusterRole struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -177,7 +172,6 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -201,7 +195,6 @@ type AggregationRule struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. type ClusterRoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -219,8 +212,7 @@ type ClusterRoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleBindingList is a collection of ClusterRoleBindings. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20. +// ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -233,8 +225,7 @@ type ClusterRoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleList is a collection of ClusterRoles. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +// ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go index eab08c5..1d6ef30 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } @@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string { } var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this ClusterRole", "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", @@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string { } var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { } var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindings, and will no longer be served in v1.20.", + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoleBindings", } @@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { } var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", + "": "ClusterRoleList is a collection of ClusterRoles", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoles", } @@ -84,7 +84,7 @@ var map_PolicyRule = map[string]string{ "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", + "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", } func (PolicyRule) SwaggerDoc() map[string]string { @@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string { } var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this Role", } @@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string { } var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { } var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", + "": "RoleBindingList is a collection of RoleBindings", "metadata": "Standard object's metadata.", "items": "Items is a list of RoleBindings", } @@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { } var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", + "": "RoleList is a collection of Roles", "metadata": "Standard object's metadata.", "items": "Items is a list of Roles", } diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go index 0358227..97f6333 100644 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go index fe7aae9..4b77c9c 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ b/vendor/k8s.io/api/rbac/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=rbac.authorization.k8s.io - package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go index 53d3632..71e5799 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go @@ -14,24 +14,42 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto + + It has these top-level messages: + AggregationRule + ClusterRole + ClusterRoleBinding + ClusterRoleBindingList + ClusterRoleList + PolicyRule + Role + RoleBinding + RoleBindingList + RoleList + RoleRef + Subject +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - proto "github.com/gogo/protobuf/proto" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,343 +60,55 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{0} -} -func (m *AggregationRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *AggregationRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *AggregationRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_AggregationRule.Merge(m, src) -} -func (m *AggregationRule) XXX_Size() int { - return m.Size() -} -func (m *AggregationRule) XXX_DiscardUnknown() { - xxx_messageInfo_AggregationRule.DiscardUnknown(m) -} +func (m *AggregationRule) Reset() { *m = AggregationRule{} } +func (*AggregationRule) ProtoMessage() {} +func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_AggregationRule proto.InternalMessageInfo +func (m *ClusterRole) Reset() { *m = ClusterRole{} } +func (*ClusterRole) ProtoMessage() {} +func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{1} -} -func (m *ClusterRole) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRole) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRole) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRole.Merge(m, src) -} -func (m *ClusterRole) XXX_Size() int { - return m.Size() -} -func (m *ClusterRole) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRole.DiscardUnknown(m) -} +func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } +func (*ClusterRoleBinding) ProtoMessage() {} +func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_ClusterRole proto.InternalMessageInfo +func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } +func (*ClusterRoleBindingList) ProtoMessage() {} +func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{2} -} -func (m *ClusterRoleBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleBinding.Merge(m, src) -} -func (m *ClusterRoleBinding) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleBinding) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleBinding proto.InternalMessageInfo - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{3} -} -func (m *ClusterRoleBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleBindingList.Merge(m, src) -} -func (m *ClusterRoleBindingList) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleBindingList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleBindingList proto.InternalMessageInfo - -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{4} -} -func (m *ClusterRoleList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ClusterRoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ClusterRoleList) XXX_Merge(src proto.Message) { - xxx_messageInfo_ClusterRoleList.Merge(m, src) -} -func (m *ClusterRoleList) XXX_Size() int { - return m.Size() -} -func (m *ClusterRoleList) XXX_DiscardUnknown() { - xxx_messageInfo_ClusterRoleList.DiscardUnknown(m) -} - -var xxx_messageInfo_ClusterRoleList proto.InternalMessageInfo - -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{5} -} -func (m *PolicyRule) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PolicyRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PolicyRule) XXX_Merge(src proto.Message) { - xxx_messageInfo_PolicyRule.Merge(m, src) -} -func (m *PolicyRule) XXX_Size() int { - return m.Size() -} -func (m *PolicyRule) XXX_DiscardUnknown() { - xxx_messageInfo_PolicyRule.DiscardUnknown(m) -} - -var xxx_messageInfo_PolicyRule proto.InternalMessageInfo - -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{6} -} -func (m *Role) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Role) XXX_Merge(src proto.Message) { - xxx_messageInfo_Role.Merge(m, src) -} -func (m *Role) XXX_Size() int { - return m.Size() -} -func (m *Role) XXX_DiscardUnknown() { - xxx_messageInfo_Role.DiscardUnknown(m) -} - -var xxx_messageInfo_Role proto.InternalMessageInfo - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{7} -} -func (m *RoleBinding) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleBinding) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleBinding) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleBinding.Merge(m, src) -} -func (m *RoleBinding) XXX_Size() int { - return m.Size() -} -func (m *RoleBinding) XXX_DiscardUnknown() { - xxx_messageInfo_RoleBinding.DiscardUnknown(m) -} - -var xxx_messageInfo_RoleBinding proto.InternalMessageInfo - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{8} -} -func (m *RoleBindingList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleBindingList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleBindingList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleBindingList.Merge(m, src) -} -func (m *RoleBindingList) XXX_Size() int { - return m.Size() -} -func (m *RoleBindingList) XXX_DiscardUnknown() { - xxx_messageInfo_RoleBindingList.DiscardUnknown(m) -} +func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } +func (*ClusterRoleList) ProtoMessage() {} +func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_RoleBindingList proto.InternalMessageInfo +func (m *PolicyRule) Reset() { *m = PolicyRule{} } +func (*PolicyRule) ProtoMessage() {} +func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{9} -} -func (m *RoleList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleList) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleList.Merge(m, src) -} -func (m *RoleList) XXX_Size() int { - return m.Size() -} -func (m *RoleList) XXX_DiscardUnknown() { - xxx_messageInfo_RoleList.DiscardUnknown(m) -} +func (m *Role) Reset() { *m = Role{} } +func (*Role) ProtoMessage() {} +func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_RoleList proto.InternalMessageInfo +func (m *RoleBinding) Reset() { *m = RoleBinding{} } +func (*RoleBinding) ProtoMessage() {} +func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{10} -} -func (m *RoleRef) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RoleRef) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RoleRef) XXX_Merge(src proto.Message) { - xxx_messageInfo_RoleRef.Merge(m, src) -} -func (m *RoleRef) XXX_Size() int { - return m.Size() -} -func (m *RoleRef) XXX_DiscardUnknown() { - xxx_messageInfo_RoleRef.DiscardUnknown(m) -} +func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } +func (*RoleBindingList) ProtoMessage() {} +func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -var xxx_messageInfo_RoleRef proto.InternalMessageInfo +func (m *RoleList) Reset() { *m = RoleList{} } +func (*RoleList) ProtoMessage() {} +func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { - return fileDescriptor_99f6bec96facc83d, []int{11} -} -func (m *Subject) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Subject) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Subject) XXX_Merge(src proto.Message) { - xxx_messageInfo_Subject.Merge(m, src) -} -func (m *Subject) XXX_Size() int { - return m.Size() -} -func (m *Subject) XXX_DiscardUnknown() { - xxx_messageInfo_Subject.DiscardUnknown(m) -} +func (m *RoleRef) Reset() { *m = RoleRef{} } +func (*RoleRef) ProtoMessage() {} +func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -var xxx_messageInfo_Subject proto.InternalMessageInfo +func (m *Subject) Reset() { *m = Subject{} } +func (*Subject) ProtoMessage() {} +func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } func init() { proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") @@ -394,70 +124,10 @@ func init() { proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptor_99f6bec96facc83d) -} - -var fileDescriptor_99f6bec96facc83d = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, -} - func (m *AggregationRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -465,36 +135,29 @@ func (m *AggregationRule) Marshal() (dAtA []byte, err error) { } func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *AggregationRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.ClusterRoleSelectors) > 0 { - for iNdEx := len(m.ClusterRoleSelectors) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ClusterRoleSelectors[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.ClusterRoleSelectors { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRole) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -502,58 +165,47 @@ func (m *ClusterRole) Marshal() (dAtA []byte, err error) { } func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRole) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.AggregationRule != nil { - { - size, err := m.AggregationRule.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n1 if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Rules { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.AggregationRule != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) + n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n2 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -561,56 +213,45 @@ func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n3 if len(m.Subjects) > 0 { - for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Subjects { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n4, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n4 + return i, nil } func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -618,46 +259,37 @@ func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -665,46 +297,37 @@ func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { } func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ClusterRoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PolicyRule) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -712,67 +335,92 @@ func (m *PolicyRule) Marshal() (dAtA []byte, err error) { } func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PolicyRule) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.NonResourceURLs) > 0 { - for iNdEx := len(m.NonResourceURLs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.NonResourceURLs[iNdEx]) - copy(dAtA[i:], m.NonResourceURLs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NonResourceURLs[iNdEx]))) - i-- - dAtA[i] = 0x2a + if len(m.Verbs) > 0 { + for _, s := range m.Verbs { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.ResourceNames) > 0 { - for iNdEx := len(m.ResourceNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ResourceNames[iNdEx]) - copy(dAtA[i:], m.ResourceNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceNames[iNdEx]))) - i-- - dAtA[i] = 0x22 + if len(m.APIGroups) > 0 { + for _, s := range m.APIGroups { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } if len(m.Resources) > 0 { - for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Resources[iNdEx]) - copy(dAtA[i:], m.Resources[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resources[iNdEx]))) - i-- + for _, s := range m.Resources { dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.APIGroups) > 0 { - for iNdEx := len(m.APIGroups) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.APIGroups[iNdEx]) - copy(dAtA[i:], m.APIGroups[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroups[iNdEx]))) - i-- - dAtA[i] = 0x12 + if len(m.ResourceNames) > 0 { + for _, s := range m.ResourceNames { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Verbs) > 0 { - for iNdEx := len(m.Verbs) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Verbs[iNdEx]) - copy(dAtA[i:], m.Verbs[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verbs[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.NonResourceURLs) > 0 { + for _, s := range m.NonResourceURLs { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *Role) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -780,46 +428,37 @@ func (m *Role) Marshal() (dAtA []byte, err error) { } func (m *Role) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 if len(m.Rules) > 0 { - for iNdEx := len(m.Rules) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Rules[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Rules { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleBinding) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -827,56 +466,45 @@ func (m *RoleBinding) Marshal() (dAtA []byte, err error) { } func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleBinding) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.RoleRef.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x1a + i += n8 if len(m.Subjects) > 0 { - for iNdEx := len(m.Subjects) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Subjects[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Subjects { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) + n9, err := m.RoleRef.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n9 + return i, nil } func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -884,46 +512,37 @@ func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { } func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleBindingList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n10, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -931,46 +550,37 @@ func (m *RoleList) Marshal() (dAtA []byte, err error) { } func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n11, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *RoleRef) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -978,37 +588,29 @@ func (m *RoleRef) Marshal() (dAtA []byte, err error) { } func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RoleRef) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } func (m *Subject) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1016,53 +618,57 @@ func (m *Subject) Marshal() (dAtA []byte, err error) { } func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Subject) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.APIGroup) - copy(dAtA[i:], m.APIGroup) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) + i += copy(dAtA[i:], m.APIGroup) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *AggregationRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ClusterRoleSelectors) > 0 { @@ -1075,9 +681,6 @@ func (m *AggregationRule) Size() (n int) { } func (m *ClusterRole) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1096,9 +699,6 @@ func (m *ClusterRole) Size() (n int) { } func (m *ClusterRoleBinding) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1115,9 +715,6 @@ func (m *ClusterRoleBinding) Size() (n int) { } func (m *ClusterRoleBindingList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1132,9 +729,6 @@ func (m *ClusterRoleBindingList) Size() (n int) { } func (m *ClusterRoleList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1149,9 +743,6 @@ func (m *ClusterRoleList) Size() (n int) { } func (m *PolicyRule) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Verbs) > 0 { @@ -1188,9 +779,6 @@ func (m *PolicyRule) Size() (n int) { } func (m *Role) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1205,9 +793,6 @@ func (m *Role) Size() (n int) { } func (m *RoleBinding) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -1224,9 +809,6 @@ func (m *RoleBinding) Size() (n int) { } func (m *RoleBindingList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1241,9 +823,6 @@ func (m *RoleBindingList) Size() (n int) { } func (m *RoleList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -1258,9 +837,6 @@ func (m *RoleList) Size() (n int) { } func (m *RoleRef) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.APIGroup) @@ -1273,9 +849,6 @@ func (m *RoleRef) Size() (n int) { } func (m *Subject) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -1290,7 +863,14 @@ func (m *Subject) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -1299,13 +879,8 @@ func (this *AggregationRule) String() string { if this == nil { return "nil" } - repeatedStringForClusterRoleSelectors := "[]LabelSelector{" - for _, f := range this.ClusterRoleSelectors { - repeatedStringForClusterRoleSelectors += fmt.Sprintf("%v", f) + "," - } - repeatedStringForClusterRoleSelectors += "}" s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + repeatedStringForClusterRoleSelectors + `,`, + `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1314,15 +889,10 @@ func (this *ClusterRole) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]PolicyRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, - `AggregationRule:` + strings.Replace(this.AggregationRule.String(), "AggregationRule", "AggregationRule", 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, + `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, `}`, }, "") return s @@ -1331,14 +901,9 @@ func (this *ClusterRoleBinding) String() string { if this == nil { return "nil" } - repeatedStringForSubjects := "[]Subject{" - for _, f := range this.Subjects { - repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," - } - repeatedStringForSubjects += "}" s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + repeatedStringForSubjects + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1348,14 +913,9 @@ func (this *ClusterRoleBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterRoleBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1364,14 +924,9 @@ func (this *ClusterRoleList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]ClusterRole{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1394,14 +949,9 @@ func (this *Role) String() string { if this == nil { return "nil" } - repeatedStringForRules := "[]PolicyRule{" - for _, f := range this.Rules { - repeatedStringForRules += strings.Replace(strings.Replace(f.String(), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + "," - } - repeatedStringForRules += "}" s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + repeatedStringForRules + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1410,14 +960,9 @@ func (this *RoleBinding) String() string { if this == nil { return "nil" } - repeatedStringForSubjects := "[]Subject{" - for _, f := range this.Subjects { - repeatedStringForSubjects += strings.Replace(strings.Replace(f.String(), "Subject", "Subject", 1), `&`, ``, 1) + "," - } - repeatedStringForSubjects += "}" s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + repeatedStringForSubjects + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -1427,14 +972,9 @@ func (this *RoleBindingList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]RoleBinding{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1443,14 +983,9 @@ func (this *RoleList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]Role{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "Role", "Role", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -1503,7 +1038,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1531,7 +1066,7 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1540,13 +1075,10 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, v1.LabelSelector{}) + m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -1560,9 +1092,6 @@ func (m *AggregationRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1590,7 +1119,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1618,7 +1147,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1627,9 +1156,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1651,7 +1177,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1660,9 +1186,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1685,7 +1208,7 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1694,9 +1217,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1716,9 +1236,6 @@ func (m *ClusterRole) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1746,7 +1263,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1774,7 +1291,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1783,9 +1300,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1807,7 +1321,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1816,9 +1330,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1841,7 +1352,7 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1850,9 +1361,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1869,9 +1377,6 @@ func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1899,7 +1404,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1927,7 +1432,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1936,9 +1441,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1960,7 +1462,7 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1969,9 +1471,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1989,9 +1488,6 @@ func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2019,7 +1515,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2047,7 +1543,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2056,9 +1552,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2080,7 +1573,7 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2089,9 +1582,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2109,9 +1599,6 @@ func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2139,7 +1626,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2167,7 +1654,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2177,9 +1664,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2199,7 +1683,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2209,9 +1693,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2231,7 +1712,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2241,9 +1722,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2263,7 +1741,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2273,9 +1751,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2295,7 +1770,7 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2305,9 +1780,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2322,9 +1794,6 @@ func (m *PolicyRule) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2352,7 +1821,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2380,7 +1849,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2389,9 +1858,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2413,7 +1879,7 @@ func (m *Role) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2422,9 +1888,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2442,9 +1905,6 @@ func (m *Role) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2472,7 +1932,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2500,7 +1960,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2509,9 +1969,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2533,7 +1990,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2542,9 +1999,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2567,7 +2021,7 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2576,9 +2030,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2595,9 +2046,6 @@ func (m *RoleBinding) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2625,7 +2073,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2653,7 +2101,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2662,9 +2110,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2686,7 +2131,7 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2695,9 +2140,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2715,9 +2157,6 @@ func (m *RoleBindingList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2745,7 +2184,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2773,7 +2212,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2782,9 +2221,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2806,7 +2242,7 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2815,9 +2251,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2835,9 +2268,6 @@ func (m *RoleList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -2865,7 +2295,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2893,7 +2323,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2903,9 +2333,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2925,7 +2352,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2935,9 +2362,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2957,7 +2381,7 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2967,9 +2391,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -2984,9 +2405,6 @@ func (m *RoleRef) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3014,7 +2432,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3042,7 +2460,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3052,9 +2470,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3074,7 +2489,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3084,9 +2499,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3106,7 +2518,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3116,9 +2528,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3138,7 +2547,7 @@ func (m *Subject) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3148,9 +2557,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3165,9 +2571,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3183,7 +2586,6 @@ func (m *Subject) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -3215,8 +2617,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -3233,34 +2637,112 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 808 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, + 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, + 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, + 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, + 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, + 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, + 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, + 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, + 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, + 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, + 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, + 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, + 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, + 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, + 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, + 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, + 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, + 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, + 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, + 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, + 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, + 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, + 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, + 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, + 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, + 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, + 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, + 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, + 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, + 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, + 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, + 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, + 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, + 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, + 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, + 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, + 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, + 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, + 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, + 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, + 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, + 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, + 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, + 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, + 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, + 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, + 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, + 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, + 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, + 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, + 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto index 87e0dbd..27bd30c 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ b/vendor/k8s.io/api/rbac/v1beta1/generated.proto @@ -37,14 +37,12 @@ message AggregationRule { } // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. message ClusterRole { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this ClusterRole - // +optional repeated PolicyRule rules = 2; // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. @@ -56,7 +54,6 @@ message ClusterRole { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. message ClusterRoleBinding { // Standard object's metadata. // +optional @@ -71,8 +68,7 @@ message ClusterRoleBinding { optional RoleRef roleRef = 3; } -// ClusterRoleBindingList is a collection of ClusterRoleBindings. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. +// ClusterRoleBindingList is a collection of ClusterRoleBindings message ClusterRoleBindingList { // Standard object's metadata. // +optional @@ -82,8 +78,7 @@ message ClusterRoleBindingList { repeated ClusterRoleBinding items = 2; } -// ClusterRoleList is a collection of ClusterRoles. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +// ClusterRoleList is a collection of ClusterRoles message ClusterRoleList { // Standard object's metadata. // +optional @@ -121,21 +116,18 @@ message PolicyRule { } // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. message Role { // Standard object's metadata. // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; // Rules holds all the PolicyRules for this Role - // +optional repeated PolicyRule rules = 2; } // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. message RoleBinding { // Standard object's metadata. // +optional @@ -151,7 +143,6 @@ message RoleBinding { } // RoleBindingList is a collection of RoleBindings -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. message RoleBindingList { // Standard object's metadata. // +optional @@ -162,7 +153,6 @@ message RoleBindingList { } // RoleList is a collection of Roles -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. message RoleList { // Standard object's metadata. // +optional diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go index 74c7093..857b67a 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types.go @@ -102,7 +102,6 @@ type RoleRef struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20. type Role struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -110,7 +109,6 @@ type Role struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this Role - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` } @@ -120,7 +118,6 @@ type Role struct { // RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. // It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given // namespace only have effect in that namespace. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20. type RoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -139,7 +136,6 @@ type RoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleBindingList is a collection of RoleBindings -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20. type RoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -153,7 +149,6 @@ type RoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // RoleList is a collection of Roles -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20. type RoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -169,7 +164,6 @@ type RoleList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20. type ClusterRole struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -177,7 +171,6 @@ type ClusterRole struct { metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Rules holds all the PolicyRules for this ClusterRole - // +optional Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be @@ -200,7 +193,6 @@ type AggregationRule struct { // ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, // and adds who information via Subject. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20. type ClusterRoleBinding struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -218,8 +210,7 @@ type ClusterRoleBinding struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleBindingList is a collection of ClusterRoleBindings. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20. +// ClusterRoleBindingList is a collection of ClusterRoleBindings type ClusterRoleBindingList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. @@ -232,8 +223,7 @@ type ClusterRoleBindingList struct { // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ClusterRoleList is a collection of ClusterRoles. -// Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20. +// ClusterRoleList is a collection of ClusterRoles type ClusterRoleList struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go index 8e9d7ac..66dba6c 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go @@ -28,7 +28,7 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", + "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", } @@ -37,7 +37,7 @@ func (AggregationRule) SwaggerDoc() map[string]string { } var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRole, and will no longer be served in v1.20.", + "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this ClusterRole", "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", @@ -48,7 +48,7 @@ func (ClusterRole) SwaggerDoc() map[string]string { } var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBinding, and will no longer be served in v1.20.", + "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -59,7 +59,7 @@ func (ClusterRoleBinding) SwaggerDoc() map[string]string { } var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoleBindingList, and will no longer be served in v1.20.", + "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoleBindings", } @@ -69,7 +69,7 @@ func (ClusterRoleBindingList) SwaggerDoc() map[string]string { } var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 ClusterRoles, and will no longer be served in v1.20.", + "": "ClusterRoleList is a collection of ClusterRoles", "metadata": "Standard object's metadata.", "items": "Items is a list of ClusterRoles", } @@ -92,7 +92,7 @@ func (PolicyRule) SwaggerDoc() map[string]string { } var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 Role, and will no longer be served in v1.20.", + "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", "metadata": "Standard object's metadata.", "rules": "Rules holds all the PolicyRules for this Role", } @@ -102,7 +102,7 @@ func (Role) SwaggerDoc() map[string]string { } var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace. Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBinding, and will no longer be served in v1.20.", + "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", "metadata": "Standard object's metadata.", "subjects": "Subjects holds references to the objects the role applies to.", "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", @@ -113,7 +113,7 @@ func (RoleBinding) SwaggerDoc() map[string]string { } var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleBindingList, and will no longer be served in v1.20.", + "": "RoleBindingList is a collection of RoleBindings", "metadata": "Standard object's metadata.", "items": "Items is a list of RoleBindings", } @@ -123,7 +123,7 @@ func (RoleBindingList) SwaggerDoc() map[string]string { } var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles Deprecated in v1.17 in favor of rbac.authorization.k8s.io/v1 RoleList, and will no longer be served in v1.20.", + "": "RoleList is a collection of Roles", "metadata": "Standard object's metadata.", "items": "Items is a list of Roles", } diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go index 7ffe581..c085c90 100644 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go @@ -122,7 +122,7 @@ func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRoleBinding, len(*in)) @@ -155,7 +155,7 @@ func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterRole, len(*in)) @@ -294,7 +294,7 @@ func (in *RoleBinding) DeepCopyObject() runtime.Object { func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]RoleBinding, len(*in)) @@ -327,7 +327,7 @@ func (in *RoleBindingList) DeepCopyObject() runtime.Object { func (in *RoleList) DeepCopyInto(out *RoleList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]Role, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1/doc.go b/vendor/k8s.io/api/scheduling/v1/doc.go deleted file mode 100644 index 76c4da0..0000000 --- a/vendor/k8s.io/api/scheduling/v1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package -// +k8s:openapi-gen=true - -// +groupName=scheduling.k8s.io - -package v1 // import "k8s.io/api/scheduling/v1" diff --git a/vendor/k8s.io/api/scheduling/v1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1/generated.pb.go deleted file mode 100644 index efc3102..0000000 --- a/vendor/k8s.io/api/scheduling/v1/generated.pb.go +++ /dev/null @@ -1,735 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto - -package v1 - -import ( - fmt "fmt" - - io "io" - - proto "github.com/gogo/protobuf/proto" - - k8s_io_api_core_v1 "k8s.io/api/core/v1" - - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_277b2f43b72fffd5, []int{0} -} -func (m *PriorityClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PriorityClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_PriorityClass.Merge(m, src) -} -func (m *PriorityClass) XXX_Size() int { - return m.Size() -} -func (m *PriorityClass) XXX_DiscardUnknown() { - xxx_messageInfo_PriorityClass.DiscardUnknown(m) -} - -var xxx_messageInfo_PriorityClass proto.InternalMessageInfo - -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_277b2f43b72fffd5, []int{1} -} -func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PriorityClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PriorityClassList.Merge(m, src) -} -func (m *PriorityClassList) XXX_Size() int { - return m.Size() -} -func (m *PriorityClassList) XXX_DiscardUnknown() { - xxx_messageInfo_PriorityClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo - -func init() { - proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1.PriorityClass") - proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1.PriorityClassList") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1/generated.proto", fileDescriptor_277b2f43b72fffd5) -} - -var fileDescriptor_277b2f43b72fffd5 = []byte{ - // 488 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x95, 0x0e, 0x57, 0x95, 0x4a, 0x10, 0x52, 0xd4, 0x21, 0xad, 0x7a, 0x03, - 0x59, 0xb0, 0xe9, 0x09, 0x10, 0xd2, 0x4d, 0x84, 0x93, 0x10, 0xd2, 0x21, 0xaa, 0x0c, 0x0c, 0x88, - 0x01, 0x27, 0x79, 0x2f, 0x35, 0x4d, 0xe2, 0xc8, 0x76, 0x22, 0x75, 0xe3, 0x23, 0xf0, 0x8d, 0x58, - 0x3b, 0xde, 0x78, 0x53, 0x45, 0xc3, 0x47, 0x60, 0x63, 0x42, 0x49, 0xc3, 0xa5, 0x7f, 0xee, 0x04, - 0x5b, 0xfc, 0x3e, 0xcf, 0xef, 0xb1, 0xfd, 0x24, 0xc1, 0xaf, 0xe6, 0x2f, 0x15, 0xe1, 0x82, 0xce, - 0x33, 0x0f, 0x64, 0x02, 0x1a, 0x14, 0xcd, 0x21, 0x09, 0x84, 0xa4, 0xb5, 0xc0, 0x52, 0x4e, 0x95, - 0x3f, 0x83, 0x20, 0x8b, 0x78, 0x12, 0xd2, 0x7c, 0x42, 0x43, 0x48, 0x40, 0x32, 0x0d, 0x01, 0x49, - 0xa5, 0xd0, 0xc2, 0x30, 0x37, 0x4e, 0xc2, 0x52, 0x4e, 0x1a, 0x27, 0xc9, 0x27, 0x83, 0x27, 0x21, - 0xd7, 0xb3, 0xcc, 0x23, 0xbe, 0x88, 0x69, 0x28, 0x42, 0x41, 0x2b, 0xc0, 0xcb, 0x2e, 0xab, 0x55, - 0xb5, 0xa8, 0x9e, 0x36, 0x41, 0x83, 0xf1, 0xd6, 0x96, 0xbe, 0x90, 0x70, 0xcb, 0x66, 0x83, 0x67, - 0x8d, 0x27, 0x66, 0xfe, 0x8c, 0x27, 0x20, 0x17, 0x34, 0x9d, 0x87, 0xe5, 0x40, 0xd1, 0x18, 0x34, - 0xbb, 0x8d, 0xa2, 0x77, 0x51, 0x32, 0x4b, 0x34, 0x8f, 0xe1, 0x00, 0x78, 0xf1, 0x2f, 0xa0, 0xbc, - 0x68, 0xcc, 0xf6, 0xb9, 0xf1, 0xaf, 0x36, 0xee, 0x4d, 0x25, 0x17, 0x92, 0xeb, 0xc5, 0xeb, 0x88, - 0x29, 0x65, 0x7c, 0xc6, 0xc7, 0xe5, 0xa9, 0x02, 0xa6, 0x99, 0x89, 0x46, 0xc8, 0xee, 0x9e, 0x3e, - 0x25, 0x4d, 0x61, 0x37, 0xe1, 0x24, 0x9d, 0x87, 0xe5, 0x40, 0x91, 0xd2, 0x4d, 0xf2, 0x09, 0x79, - 0xef, 0x7d, 0x01, 0x5f, 0xbf, 0x03, 0xcd, 0x1c, 0x63, 0xb9, 0x1a, 0xb6, 0x8a, 0xd5, 0x10, 0x37, - 0x33, 0xf7, 0x26, 0xd5, 0x38, 0xc1, 0x9d, 0x9c, 0x45, 0x19, 0x98, 0xed, 0x11, 0xb2, 0x3b, 0x4e, - 0xaf, 0x36, 0x77, 0x3e, 0x94, 0x43, 0x77, 0xa3, 0x19, 0x67, 0xb8, 0x17, 0x46, 0xc2, 0x63, 0xd1, - 0x39, 0x5c, 0xb2, 0x2c, 0xd2, 0xe6, 0xd1, 0x08, 0xd9, 0xc7, 0xce, 0xa3, 0xda, 0xdc, 0x7b, 0xb3, - 0x2d, 0xba, 0xbb, 0x5e, 0xe3, 0x39, 0xee, 0x06, 0xa0, 0x7c, 0xc9, 0x53, 0xcd, 0x45, 0x62, 0xde, - 0x1b, 0x21, 0xfb, 0xbe, 0xf3, 0xb0, 0x46, 0xbb, 0xe7, 0x8d, 0xe4, 0x6e, 0xfb, 0x8c, 0x10, 0xf7, - 0x53, 0x09, 0x10, 0x57, 0xab, 0xa9, 0x88, 0xb8, 0xbf, 0x30, 0x3b, 0x15, 0x7b, 0x56, 0xac, 0x86, - 0xfd, 0xe9, 0x9e, 0xf6, 0x7b, 0x35, 0x3c, 0x39, 0xfc, 0x02, 0xc8, 0xbe, 0xcd, 0x3d, 0x08, 0x1d, - 0x7f, 0x47, 0xf8, 0xc1, 0x4e, 0xeb, 0x17, 0x5c, 0x69, 0xe3, 0xd3, 0x41, 0xf3, 0xe4, 0xff, 0x9a, - 0x2f, 0xe9, 0xaa, 0xf7, 0x7e, 0x7d, 0xc5, 0xe3, 0xbf, 0x93, 0xad, 0xd6, 0x2f, 0x70, 0x87, 0x6b, - 0x88, 0x95, 0xd9, 0x1e, 0x1d, 0xd9, 0xdd, 0xd3, 0xc7, 0xe4, 0xae, 0xbf, 0x80, 0xec, 0x9c, 0xac, - 0x79, 0x3d, 0x6f, 0x4b, 0xda, 0xdd, 0x84, 0x38, 0xf6, 0x72, 0x6d, 0xb5, 0xae, 0xd6, 0x56, 0xeb, - 0x7a, 0x6d, 0xb5, 0xbe, 0x16, 0x16, 0x5a, 0x16, 0x16, 0xba, 0x2a, 0x2c, 0x74, 0x5d, 0x58, 0xe8, - 0x47, 0x61, 0xa1, 0x6f, 0x3f, 0xad, 0xd6, 0xc7, 0x76, 0x3e, 0xf9, 0x13, 0x00, 0x00, 0xff, 0xff, - 0x53, 0xd9, 0x28, 0x30, 0xb1, 0x03, 0x00, 0x00, -} - -func (m *PriorityClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.PreemptionPolicy != nil { - i -= len(*m.PreemptionPolicy) - copy(dAtA[i:], *m.PreemptionPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i-- - dAtA[i] = 0x2a - } - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - i-- - if m.GlobalDefault { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x10 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *PriorityClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Value)) - n += 2 - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PriorityClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PriorityClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, - `}`, - }, "") - return s -} -func (this *PriorityClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PriorityClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PriorityClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.GlobalDefault = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PriorityClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PriorityClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") -) diff --git a/vendor/k8s.io/api/scheduling/v1/generated.proto b/vendor/k8s.io/api/scheduling/v1/generated.proto deleted file mode 100644 index 82f6e0a..0000000 --- a/vendor/k8s.io/api/scheduling/v1/generated.proto +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.scheduling.v1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -message PriorityClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - optional int32 value = 2; - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - optional bool globalDefault = 3; - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - optional string description = 4; - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - optional string preemptionPolicy = 5; -} - -// PriorityClassList is a collection of priority classes. -message PriorityClassList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of PriorityClasses - repeated PriorityClass items = 2; -} - diff --git a/vendor/k8s.io/api/scheduling/v1/register.go b/vendor/k8s.io/api/scheduling/v1/register.go deleted file mode 100644 index 33977fe..0000000 --- a/vendor/k8s.io/api/scheduling/v1/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "scheduling.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - - // SchemeBuilder is a collection of functions that add things to a scheme. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - // AddToScheme applies all the stored functions to the scheme. - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PriorityClass{}, - &PriorityClassList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1/types.go b/vendor/k8s.io/api/scheduling/v1/types.go deleted file mode 100644 index 087ee10..0000000 --- a/vendor/k8s.io/api/scheduling/v1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - apiv1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -type PriorityClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClassList is a collection of priority classes. -type PriorityClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of PriorityClasses - Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go deleted file mode 100644 index 4cfb9d3..0000000 --- a/vendor/k8s.io/api/scheduling/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", -} - -func (PriorityClass) SwaggerDoc() map[string]string { - return map_PriorityClass -} - -var map_PriorityClassList = map[string]string{ - "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of PriorityClasses", -} - -func (PriorityClassList) SwaggerDoc() map[string]string { - return map_PriorityClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go deleted file mode 100644 index 63bfe64..0000000 --- a/vendor/k8s.io/api/scheduling/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,90 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(corev1.PreemptionPolicy) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. -func (in *PriorityClass) DeepCopy() *PriorityClass { - if in == nil { - return nil - } - out := new(PriorityClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PriorityClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. -func (in *PriorityClassList) DeepCopy() *PriorityClassList { - if in == nil { - return nil - } - out := new(PriorityClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go index cff47e1..e10d07f 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io - package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go index 8a62104..97c07c9 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go @@ -14,25 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +// DO NOT EDIT! -package v1alpha1 +/* + Package v1alpha1 is a generated protocol buffer package. -import ( - fmt "fmt" + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto - io "io" + It has these top-level messages: + PriorityClass + PriorityClassList +*/ +package v1alpha1 - proto "github.com/gogo/protobuf/proto" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - k8s_io_api_core_v1 "k8s.io/api/core/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,112 +48,24 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_f033641dd0b95dce, []int{0} -} -func (m *PriorityClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PriorityClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_PriorityClass.Merge(m, src) -} -func (m *PriorityClass) XXX_Size() int { - return m.Size() -} -func (m *PriorityClass) XXX_DiscardUnknown() { - xxx_messageInfo_PriorityClass.DiscardUnknown(m) -} +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_PriorityClass proto.InternalMessageInfo - -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_f033641dd0b95dce, []int{1} -} -func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PriorityClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PriorityClassList.Merge(m, src) -} -func (m *PriorityClassList) XXX_Size() int { - return m.Size() -} -func (m *PriorityClassList) XXX_DiscardUnknown() { - xxx_messageInfo_PriorityClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptor_f033641dd0b95dce) -} - -var fileDescriptor_f033641dd0b95dce = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x3b, 0x5d, 0x0b, 0x75, 0x4a, 0xa1, 0x46, 0x84, 0xd0, 0xc3, 0xb4, 0x74, 0x2f, 0xbd, - 0xec, 0x8c, 0x5d, 0x54, 0x84, 0xbd, 0xd5, 0x85, 0x45, 0x50, 0x2c, 0x39, 0x78, 0x10, 0x0f, 0x4e, - 0xd3, 0x77, 0xd3, 0xb1, 0x49, 0x26, 0xcc, 0x4c, 0x02, 0xbd, 0xf9, 0x11, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0x53, 0xb1, 0xf1, 0x23, 0x78, 0xf3, 0x24, 0x49, 0xd3, 0x4d, 0xdb, 0xf8, 0x67, 0x6f, - 0x99, 0xf7, 0xf9, 0x3d, 0xcf, 0xcc, 0x3c, 0x49, 0xf0, 0xd5, 0xe2, 0xa5, 0xa6, 0x42, 0xb2, 0x45, - 0x3c, 0x05, 0x15, 0x82, 0x01, 0xcd, 0x12, 0x08, 0x67, 0x52, 0xb1, 0x42, 0xe0, 0x91, 0x60, 0xda, - 0x9d, 0xc3, 0x2c, 0xf6, 0x45, 0xe8, 0xb1, 0x64, 0xc4, 0xfd, 0x68, 0xce, 0x47, 0xcc, 0x83, 0x10, - 0x14, 0x37, 0x30, 0xa3, 0x91, 0x92, 0x46, 0x5a, 0x64, 0xcb, 0x53, 0x1e, 0x09, 0x5a, 0xf2, 0x74, - 0xc7, 0x77, 0xcf, 0x3c, 0x61, 0xe6, 0xf1, 0x94, 0xba, 0x32, 0x60, 0x9e, 0xf4, 0x24, 0xcb, 0x6d, - 0xd3, 0xf8, 0x3a, 0x5f, 0xe5, 0x8b, 0xfc, 0x69, 0x1b, 0xd7, 0x1d, 0xec, 0x6d, 0xef, 0x4a, 0x05, - 0x2c, 0xa9, 0x6c, 0xd9, 0x7d, 0x56, 0x32, 0x01, 0x77, 0xe7, 0x22, 0x04, 0xb5, 0x64, 0xd1, 0xc2, - 0xcb, 0x06, 0x9a, 0x05, 0x60, 0xf8, 0x9f, 0x5c, 0xec, 0x6f, 0x2e, 0x15, 0x87, 0x46, 0x04, 0x50, - 0x31, 0xbc, 0xf8, 0x9f, 0x21, 0xbb, 0x6e, 0xc0, 0x8f, 0x7d, 0x83, 0x9f, 0x75, 0xdc, 0x9e, 0x28, - 0x21, 0x95, 0x30, 0xcb, 0x57, 0x3e, 0xd7, 0xda, 0xfa, 0x84, 0x9b, 0xd9, 0xa9, 0x66, 0xdc, 0x70, - 0x1b, 0xf5, 0xd1, 0xb0, 0x75, 0xfe, 0x94, 0x96, 0xb5, 0xdd, 0x85, 0xd3, 0x68, 0xe1, 0x65, 0x03, - 0x4d, 0x33, 0x9a, 0x26, 0x23, 0xfa, 0x6e, 0xfa, 0x19, 0x5c, 0xf3, 0x16, 0x0c, 0x1f, 0x5b, 0xab, - 0x75, 0xaf, 0x96, 0xae, 0x7b, 0xb8, 0x9c, 0x39, 0x77, 0xa9, 0xd6, 0x29, 0x6e, 0x24, 0xdc, 0x8f, - 0xc1, 0xae, 0xf7, 0xd1, 0xb0, 0x31, 0x6e, 0x17, 0x70, 0xe3, 0x7d, 0x36, 0x74, 0xb6, 0x9a, 0x75, - 0x81, 0xdb, 0x9e, 0x2f, 0xa7, 0xdc, 0xbf, 0x84, 0x6b, 0x1e, 0xfb, 0xc6, 0x3e, 0xe9, 0xa3, 0x61, - 0x73, 0xfc, 0xa4, 0x80, 0xdb, 0x57, 0xfb, 0xa2, 0x73, 0xc8, 0x5a, 0xcf, 0x71, 0x6b, 0x06, 0xda, - 0x55, 0x22, 0x32, 0x42, 0x86, 0xf6, 0x83, 0x3e, 0x1a, 0x3e, 0x1c, 0x3f, 0x2e, 0xac, 0xad, 0xcb, - 0x52, 0x72, 0xf6, 0x39, 0xcb, 0xc3, 0x9d, 0x48, 0x01, 0x04, 0xf9, 0x6a, 0x22, 0x7d, 0xe1, 0x2e, - 0xed, 0x46, 0xee, 0xbd, 0x48, 0xd7, 0xbd, 0xce, 0xe4, 0x48, 0xfb, 0xb5, 0xee, 0x9d, 0x56, 0xbf, - 0x00, 0x7a, 0x8c, 0x39, 0x95, 0xd0, 0xc1, 0x37, 0x84, 0x1f, 0x1d, 0xb4, 0xfe, 0x46, 0x68, 0x63, - 0x7d, 0xac, 0x34, 0x4f, 0xef, 0xd7, 0x7c, 0xe6, 0xce, 0x7b, 0xef, 0x14, 0x57, 0x6c, 0xee, 0x26, - 0x7b, 0xad, 0x3b, 0xb8, 0x21, 0x0c, 0x04, 0xda, 0xae, 0xf7, 0x4f, 0x86, 0xad, 0xf3, 0x33, 0xfa, - 0xef, 0x7f, 0x81, 0x1e, 0x9c, 0xaf, 0x7c, 0x49, 0xaf, 0xb3, 0x0c, 0x67, 0x1b, 0x35, 0xa6, 0xab, - 0x0d, 0xa9, 0xdd, 0x6c, 0x48, 0xed, 0x76, 0x43, 0x6a, 0x5f, 0x52, 0x82, 0x56, 0x29, 0x41, 0x37, - 0x29, 0x41, 0xb7, 0x29, 0x41, 0xdf, 0x53, 0x82, 0xbe, 0xfe, 0x20, 0xb5, 0x0f, 0xcd, 0x5d, 0xe6, - 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, 0x5c, 0x1a, 0x39, 0xc9, 0x03, 0x00, 0x00, -} - func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -156,55 +73,40 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.PreemptionPolicy != nil { - i -= len(*m.PreemptionPolicy) - copy(dAtA[i:], *m.PreemptionPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - i-- + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + dAtA[i] = 0x18 + i++ if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x10 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -212,57 +114,61 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *PriorityClass) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -271,17 +177,10 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *PriorityClassList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -296,7 +195,14 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -306,11 +212,10 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -319,14 +224,9 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PriorityClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -354,7 +254,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -382,7 +282,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -391,9 +291,6 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -415,7 +312,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= int32(b&0x7F) << shift + m.Value |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -434,7 +331,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -454,7 +351,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -464,47 +361,11 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -514,9 +375,6 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -544,7 +402,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -572,7 +430,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -581,9 +439,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -605,7 +460,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -614,9 +469,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -634,9 +486,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -652,7 +501,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -684,8 +532,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -702,34 +552,89 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 447 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, + 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, + 0x76, 0xc6, 0x2e, 0x2a, 0x82, 0xb7, 0xb8, 0xb0, 0x08, 0x8a, 0x92, 0x83, 0x07, 0xf1, 0xe0, 0x24, + 0x79, 0x37, 0x1d, 0x9b, 0x64, 0xc2, 0xcc, 0x24, 0xb0, 0x37, 0xcf, 0x9e, 0xfc, 0x52, 0x42, 0x8f, + 0x7b, 0xdc, 0xd3, 0x62, 0xe3, 0x17, 0x91, 0xa4, 0x69, 0xd3, 0x5a, 0xfc, 0x73, 0xcb, 0x3c, 0xef, + 0xef, 0x79, 0xe6, 0xcd, 0xc3, 0xe0, 0x8b, 0xc5, 0x73, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, + 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, + 0xa6, 0x22, 0x4f, 0x58, 0x35, 0xe3, 0x69, 0x31, 0xe7, 0x33, 0x96, 0x40, 0x0e, 0x8a, 0x1b, 0x88, + 0x69, 0xa1, 0xa4, 0x91, 0x36, 0x59, 0xf3, 0x94, 0x17, 0x82, 0xf6, 0x3c, 0xdd, 0xf0, 0xd3, 0xd3, + 0x44, 0x98, 0x79, 0x19, 0xd2, 0x48, 0x66, 0x2c, 0x91, 0x89, 0x64, 0xad, 0x2d, 0x2c, 0x2f, 0xdb, + 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x71, 0xd3, 0x27, 0xfd, 0xf5, 0x19, 0x8f, 0xe6, 0x22, 0x07, 0x75, + 0xc5, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x60, 0x38, 0xab, 0x0e, 0x96, 0x98, 0xb2, 0x3f, 0xb9, + 0x54, 0x99, 0x1b, 0x91, 0xc1, 0x81, 0xe1, 0xd9, 0xbf, 0x0c, 0xcd, 0xaf, 0x64, 0xfc, 0x77, 0xdf, + 0xc9, 0xd7, 0x01, 0x9e, 0xbc, 0x53, 0x42, 0x2a, 0x61, 0xae, 0x5e, 0xa6, 0x5c, 0x6b, 0xfb, 0x13, + 0x1e, 0x35, 0x5b, 0xc5, 0xdc, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3d, 0xa6, 0x7d, 0x25, 0xdb, + 0x70, 0x5a, 0x2c, 0x92, 0x46, 0xd0, 0xb4, 0xa1, 0x69, 0x35, 0xa3, 0x6f, 0xc3, 0xcf, 0x10, 0x99, + 0x37, 0x60, 0xb8, 0x6f, 0x2f, 0x6f, 0x8f, 0xad, 0xfa, 0xf6, 0x18, 0xf7, 0x5a, 0xb0, 0x4d, 0xb5, + 0x1f, 0xe1, 0x61, 0xc5, 0xd3, 0x12, 0x9c, 0x81, 0x8b, 0xbc, 0xa1, 0x3f, 0xe9, 0xe0, 0xe1, 0xfb, + 0x46, 0x0c, 0xd6, 0x33, 0xfb, 0x05, 0x9e, 0x24, 0xa9, 0x0c, 0x79, 0x7a, 0x0e, 0x97, 0xbc, 0x4c, + 0x8d, 0x73, 0xe4, 0x22, 0x6f, 0xe4, 0x3f, 0xec, 0xe0, 0xc9, 0xc5, 0xee, 0x30, 0xd8, 0x67, 0xed, + 0xa7, 0x78, 0x1c, 0x83, 0x8e, 0x94, 0x28, 0x8c, 0x90, 0xb9, 0x73, 0xc7, 0x45, 0xde, 0x5d, 0xff, + 0x41, 0x67, 0x1d, 0x9f, 0xf7, 0xa3, 0x60, 0x97, 0x3b, 0xf9, 0x8e, 0xf0, 0xfd, 0xbd, 0x32, 0x5e, + 0x0b, 0x6d, 0xec, 0x8f, 0x07, 0x85, 0xd0, 0xff, 0x2b, 0xa4, 0x71, 0xb7, 0x75, 0xdc, 0xeb, 0x6e, + 0x1e, 0x6d, 0x94, 0x9d, 0x32, 0x02, 0x3c, 0x14, 0x06, 0x32, 0xed, 0x0c, 0xdc, 0x23, 0x6f, 0x7c, + 0x76, 0x4a, 0xff, 0xfe, 0xfc, 0xe8, 0xde, 0x7e, 0x7d, 0x77, 0xaf, 0x9a, 0x8c, 0x60, 0x1d, 0xe5, + 0xd3, 0xe5, 0x8a, 0x58, 0xd7, 0x2b, 0x62, 0xdd, 0xac, 0x88, 0xf5, 0xa5, 0x26, 0x68, 0x59, 0x13, + 0x74, 0x5d, 0x13, 0x74, 0x53, 0x13, 0xf4, 0xa3, 0x26, 0xe8, 0xdb, 0x4f, 0x62, 0x7d, 0x18, 0x6d, + 0x32, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xab, 0x20, 0x12, 0x63, 0x3c, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto index 682fb87..5fb5472 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto @@ -21,7 +21,6 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1alpha1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,12 +28,11 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1alpha1"; -// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,19 +52,12 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go index 86a2c51..21e3df0 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1alpha1 import ( - apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -25,13 +24,12 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,13 +49,6 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,7 +57,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go index 63a9a35..f406f44 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go @@ -28,12 +28,11 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -42,7 +41,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go index 0392823..fe0c860 100644 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,11 +29,6 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(v1.PreemptionPolicy) - **out = **in - } return } @@ -60,7 +54,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go index e661968..f2dd1cf 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=scheduling.k8s.io - package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go index b89af56..ea8f8d5 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go @@ -14,25 +14,30 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +// DO NOT EDIT! -package v1beta1 +/* + Package v1beta1 is a generated protocol buffer package. -import ( - fmt "fmt" + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto - io "io" + It has these top-level messages: + PriorityClass + PriorityClassList +*/ +package v1beta1 - proto "github.com/gogo/protobuf/proto" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - k8s_io_api_core_v1 "k8s.io/api/core/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,112 +48,24 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd406dede2d3f42, []int{0} -} -func (m *PriorityClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PriorityClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PriorityClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_PriorityClass.Merge(m, src) -} -func (m *PriorityClass) XXX_Size() int { - return m.Size() -} -func (m *PriorityClass) XXX_DiscardUnknown() { - xxx_messageInfo_PriorityClass.DiscardUnknown(m) -} +func (m *PriorityClass) Reset() { *m = PriorityClass{} } +func (*PriorityClass) ProtoMessage() {} +func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_PriorityClass proto.InternalMessageInfo - -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_6cd406dede2d3f42, []int{1} -} -func (m *PriorityClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PriorityClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PriorityClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PriorityClassList.Merge(m, src) -} -func (m *PriorityClassList) XXX_Size() int { - return m.Size() -} -func (m *PriorityClassList) XXX_DiscardUnknown() { - xxx_messageInfo_PriorityClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_PriorityClassList proto.InternalMessageInfo +func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } +func (*PriorityClassList) ProtoMessage() {} +func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func init() { proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptor_6cd406dede2d3f42) -} - -var fileDescriptor_6cd406dede2d3f42 = []byte{ - // 494 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x3f, 0x8f, 0xd3, 0x30, - 0x18, 0xc6, 0xeb, 0x1e, 0x15, 0xc5, 0x55, 0xa5, 0x12, 0x84, 0x14, 0x55, 0x22, 0xad, 0x7a, 0x4b, - 0x07, 0xce, 0xa6, 0x27, 0x40, 0x48, 0xb7, 0x95, 0x13, 0x08, 0x09, 0x44, 0xc9, 0xc0, 0x80, 0x18, - 0x70, 0x92, 0xf7, 0x52, 0xd3, 0x24, 0x8e, 0x6c, 0x27, 0x52, 0x37, 0x3e, 0x02, 0x1f, 0x8a, 0xa1, - 0xe3, 0x8d, 0x37, 0x55, 0x34, 0x7c, 0x04, 0x36, 0x26, 0x94, 0x34, 0x5c, 0xda, 0x86, 0x7f, 0x5b, - 0xfc, 0x3e, 0xbf, 0xe7, 0xb1, 0xfd, 0x24, 0xc1, 0xcf, 0x16, 0x4f, 0x14, 0xe1, 0x82, 0x2e, 0x12, - 0x07, 0x64, 0x04, 0x1a, 0x14, 0x4d, 0x21, 0xf2, 0x84, 0xa4, 0xa5, 0xc0, 0x62, 0x4e, 0x95, 0x3b, - 0x07, 0x2f, 0x09, 0x78, 0xe4, 0xd3, 0x74, 0xe2, 0x80, 0x66, 0x13, 0xea, 0x43, 0x04, 0x92, 0x69, - 0xf0, 0x48, 0x2c, 0x85, 0x16, 0xc6, 0xbd, 0x2d, 0x4e, 0x58, 0xcc, 0x49, 0x85, 0x93, 0x12, 0xef, - 0x9f, 0xf8, 0x5c, 0xcf, 0x13, 0x87, 0xb8, 0x22, 0xa4, 0xbe, 0xf0, 0x05, 0x2d, 0x5c, 0x4e, 0x72, - 0x51, 0xac, 0x8a, 0x45, 0xf1, 0xb4, 0x4d, 0xeb, 0x8f, 0x76, 0x36, 0x77, 0x85, 0x04, 0x9a, 0xd6, - 0x76, 0xec, 0x3f, 0xac, 0x98, 0x90, 0xb9, 0x73, 0x1e, 0x81, 0x5c, 0xd2, 0x78, 0xe1, 0xe7, 0x03, - 0x45, 0x43, 0xd0, 0xec, 0x77, 0x2e, 0xfa, 0x27, 0x97, 0x4c, 0x22, 0xcd, 0x43, 0xa8, 0x19, 0x1e, - 0xff, 0xcb, 0x90, 0xdf, 0x36, 0x64, 0x87, 0xbe, 0xd1, 0xf7, 0x26, 0xee, 0xce, 0x24, 0x17, 0x92, - 0xeb, 0xe5, 0xd3, 0x80, 0x29, 0x65, 0x7c, 0xc0, 0xed, 0xfc, 0x54, 0x1e, 0xd3, 0xcc, 0x44, 0x43, - 0x34, 0xee, 0x9c, 0x3e, 0x20, 0x55, 0x6b, 0xd7, 0xe1, 0x24, 0x5e, 0xf8, 0xf9, 0x40, 0x91, 0x9c, - 0x26, 0xe9, 0x84, 0xbc, 0x76, 0x3e, 0x82, 0xab, 0x5f, 0x81, 0x66, 0x53, 0x63, 0xb5, 0x1e, 0x34, - 0xb2, 0xf5, 0x00, 0x57, 0x33, 0xfb, 0x3a, 0xd5, 0x38, 0xc6, 0xad, 0x94, 0x05, 0x09, 0x98, 0xcd, - 0x21, 0x1a, 0xb7, 0xa6, 0xdd, 0x12, 0x6e, 0xbd, 0xcd, 0x87, 0xf6, 0x56, 0x33, 0xce, 0x70, 0xd7, - 0x0f, 0x84, 0xc3, 0x82, 0x73, 0xb8, 0x60, 0x49, 0xa0, 0xcd, 0xa3, 0x21, 0x1a, 0xb7, 0xa7, 0x77, - 0x4b, 0xb8, 0xfb, 0x7c, 0x57, 0xb4, 0xf7, 0x59, 0xe3, 0x11, 0xee, 0x78, 0xa0, 0x5c, 0xc9, 0x63, - 0xcd, 0x45, 0x64, 0xde, 0x18, 0xa2, 0xf1, 0xad, 0xe9, 0x9d, 0xd2, 0xda, 0x39, 0xaf, 0x24, 0x7b, - 0x97, 0x33, 0x7c, 0xdc, 0x8b, 0x25, 0x40, 0x58, 0xac, 0x66, 0x22, 0xe0, 0xee, 0xd2, 0x6c, 0x15, - 0xde, 0xb3, 0x6c, 0x3d, 0xe8, 0xcd, 0x0e, 0xb4, 0x1f, 0xeb, 0xc1, 0x71, 0xfd, 0x0b, 0x20, 0x87, - 0x98, 0x5d, 0x0b, 0x1d, 0x7d, 0x41, 0xf8, 0xf6, 0x5e, 0xeb, 0x2f, 0xb9, 0xd2, 0xc6, 0xfb, 0x5a, - 0xf3, 0xe4, 0xff, 0x9a, 0xcf, 0xdd, 0x45, 0xef, 0xbd, 0xf2, 0x8a, 0xed, 0x5f, 0x93, 0x9d, 0xd6, - 0xdf, 0xe0, 0x16, 0xd7, 0x10, 0x2a, 0xb3, 0x39, 0x3c, 0x1a, 0x77, 0x4e, 0xef, 0x93, 0xbf, 0xfe, - 0x0a, 0x64, 0xef, 0x78, 0xd5, 0x3b, 0x7a, 0x91, 0x47, 0xd8, 0xdb, 0xa4, 0xe9, 0xc9, 0x6a, 0x63, - 0x35, 0x2e, 0x37, 0x56, 0xe3, 0x6a, 0x63, 0x35, 0x3e, 0x65, 0x16, 0x5a, 0x65, 0x16, 0xba, 0xcc, - 0x2c, 0x74, 0x95, 0x59, 0xe8, 0x6b, 0x66, 0xa1, 0xcf, 0xdf, 0xac, 0xc6, 0xbb, 0x9b, 0x65, 0xe4, - 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1a, 0xc2, 0xc0, 0x1f, 0xc5, 0x03, 0x00, 0x00, -} - func (m *PriorityClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -156,55 +73,40 @@ func (m *PriorityClass) Marshal() (dAtA []byte, err error) { } func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PriorityClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.PreemptionPolicy != nil { - i -= len(*m.PreemptionPolicy) - copy(dAtA[i:], *m.PreemptionPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PreemptionPolicy))) - i-- - dAtA[i] = 0x2a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i -= len(m.Description) - copy(dAtA[i:], m.Description) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i-- - dAtA[i] = 0x22 - i-- + i += n1 + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) + dAtA[i] = 0x18 + i++ if m.GlobalDefault { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - i-- - dAtA[i] = 0x10 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + return i, nil } func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -212,57 +114,61 @@ func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { } func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PriorityClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *PriorityClass) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -271,17 +177,10 @@ func (m *PriorityClass) Size() (n int) { n += 2 l = len(m.Description) n += 1 + l + sovGenerated(uint64(l)) - if m.PreemptionPolicy != nil { - l = len(*m.PreemptionPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *PriorityClassList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -296,7 +195,14 @@ func (m *PriorityClassList) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -306,11 +212,10 @@ func (this *PriorityClass) String() string { return "nil" } s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Value:` + fmt.Sprintf("%v", this.Value) + `,`, `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `PreemptionPolicy:` + valueToStringGenerated(this.PreemptionPolicy) + `,`, `}`, }, "") return s @@ -319,14 +224,9 @@ func (this *PriorityClassList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PriorityClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -354,7 +254,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -382,7 +282,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -391,9 +291,6 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -415,7 +312,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Value |= int32(b&0x7F) << shift + m.Value |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -434,7 +331,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -454,7 +351,7 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -464,47 +361,11 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Description = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreemptionPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PreemptionPolicy(dAtA[iNdEx:postIndex]) - m.PreemptionPolicy = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -514,9 +375,6 @@ func (m *PriorityClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -544,7 +402,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -572,7 +430,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -581,9 +439,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -605,7 +460,7 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -614,9 +469,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -634,9 +486,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -652,7 +501,6 @@ func (m *PriorityClassList) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -684,8 +532,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -702,34 +552,89 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 448 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8b, 0xd3, 0x40, + 0x18, 0xc5, 0x33, 0x5d, 0x8b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0x28, 0x38, 0x1b, 0xd6, 0x4b, 0x0e, + 0xee, 0x8c, 0x5d, 0x54, 0x04, 0x6f, 0x71, 0x51, 0x04, 0x45, 0xcd, 0xc1, 0x83, 0x78, 0x70, 0x92, + 0x7c, 0x9b, 0x8e, 0x4d, 0x32, 0x61, 0x66, 0x12, 0xd8, 0x9b, 0x67, 0x4f, 0xfe, 0x51, 0x1e, 0x7a, + 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xff, 0x11, 0x49, 0x1a, 0x37, 0xad, 0x45, 0xdd, 0x5b, 0xe6, 0x7d, + 0xbf, 0xf7, 0xe6, 0xcb, 0x63, 0xf0, 0xf3, 0xc5, 0x13, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, + 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, + 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0xc1, 0xf0, 0x19, 0x4b, 0x20, 0x07, 0xc5, 0x0d, 0xc4, 0xb4, + 0x50, 0xd2, 0x48, 0xfb, 0xee, 0x1a, 0xa7, 0xbc, 0x10, 0xb4, 0xc7, 0x69, 0x87, 0x4f, 0x0f, 0x13, + 0x61, 0xe6, 0x65, 0x48, 0x23, 0x99, 0xb1, 0x44, 0x26, 0x92, 0xb5, 0xae, 0xb0, 0x3c, 0x69, 0x4f, + 0xed, 0xa1, 0xfd, 0x5a, 0xa7, 0x4d, 0x1f, 0xf6, 0x97, 0x67, 0x3c, 0x9a, 0x8b, 0x1c, 0xd4, 0x29, + 0x2b, 0x16, 0x49, 0x23, 0x68, 0x96, 0x81, 0xe1, 0xac, 0xda, 0xd9, 0x61, 0xca, 0xfe, 0xe6, 0x52, + 0x65, 0x6e, 0x44, 0x06, 0x3b, 0x86, 0xc7, 0xff, 0x33, 0x34, 0x7f, 0x92, 0xf1, 0x3f, 0x7d, 0x07, + 0x5f, 0x07, 0x78, 0xf2, 0x56, 0x09, 0xa9, 0x84, 0x39, 0x7d, 0x96, 0x72, 0xad, 0xed, 0x4f, 0x78, + 0xd4, 0x6c, 0x15, 0x73, 0xc3, 0x1d, 0xe4, 0x22, 0x6f, 0x7c, 0xf4, 0x80, 0xf6, 0x8d, 0x5c, 0x86, + 0xd3, 0x62, 0x91, 0x34, 0x82, 0xa6, 0x0d, 0x4d, 0xab, 0x19, 0x7d, 0x13, 0x7e, 0x86, 0xc8, 0xbc, + 0x06, 0xc3, 0x7d, 0x7b, 0x79, 0xb1, 0x6f, 0xd5, 0x17, 0xfb, 0xb8, 0xd7, 0x82, 0xcb, 0x54, 0xfb, + 0x1e, 0x1e, 0x56, 0x3c, 0x2d, 0xc1, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0x0e, 0x1e, 0xbe, 0x6f, + 0xc4, 0x60, 0x3d, 0xb3, 0x9f, 0xe2, 0x49, 0x92, 0xca, 0x90, 0xa7, 0xc7, 0x70, 0xc2, 0xcb, 0xd4, + 0x38, 0x7b, 0x2e, 0xf2, 0x46, 0xfe, 0x9d, 0x0e, 0x9e, 0xbc, 0xd8, 0x1c, 0x06, 0xdb, 0xac, 0xfd, + 0x08, 0x8f, 0x63, 0xd0, 0x91, 0x12, 0x85, 0x11, 0x32, 0x77, 0xae, 0xb9, 0xc8, 0xbb, 0xe1, 0xdf, + 0xee, 0xac, 0xe3, 0xe3, 0x7e, 0x14, 0x6c, 0x72, 0x07, 0xdf, 0x11, 0xbe, 0xb5, 0x55, 0xc6, 0x2b, + 0xa1, 0x8d, 0xfd, 0x71, 0xa7, 0x10, 0x7a, 0xb5, 0x42, 0x1a, 0x77, 0x5b, 0xc7, 0xcd, 0xee, 0xe6, + 0xd1, 0x6f, 0x65, 0xa3, 0x8c, 0x77, 0x78, 0x28, 0x0c, 0x64, 0xda, 0x19, 0xb8, 0x7b, 0xde, 0xf8, + 0xe8, 0x3e, 0xfd, 0xe7, 0xeb, 0xa3, 0x5b, 0xeb, 0xf5, 0xd5, 0xbd, 0x6c, 0x22, 0x82, 0x75, 0x92, + 0x7f, 0xb8, 0x5c, 0x11, 0xeb, 0x6c, 0x45, 0xac, 0xf3, 0x15, 0xb1, 0xbe, 0xd4, 0x04, 0x2d, 0x6b, + 0x82, 0xce, 0x6a, 0x82, 0xce, 0x6b, 0x82, 0x7e, 0xd4, 0x04, 0x7d, 0xfb, 0x49, 0xac, 0x0f, 0xd7, + 0xbb, 0xc8, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x41, 0x74, 0x8a, 0x60, 0x38, 0x03, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto index 2582891..0a95755 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto @@ -21,7 +21,6 @@ syntax = 'proto2'; package k8s.io.api.scheduling.v1beta1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -29,12 +28,11 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. message PriorityClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -54,19 +52,12 @@ message PriorityClass { // when this priority class should be used. // +optional optional string description = 4; - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - optional string preemptionPolicy = 5; } // PriorityClassList is a collection of priority classes. message PriorityClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go index f806ecd..a9aaa86 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types.go @@ -17,7 +17,6 @@ limitations under the License. package v1beta1 import ( - apiv1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -25,13 +24,12 @@ import ( // +genclient:nonNamespaced // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. // PriorityClass defines mapping from a priority class name to the priority // integer value. The value can be any valid integer. type PriorityClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -51,13 +49,6 @@ type PriorityClass struct { // when this priority class should be used. // +optional Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` - - // PreemptionPolicy is the Policy for preempting pods with lower priority. - // One of Never, PreemptLowerPriority. - // Defaults to PreemptLowerPriority if unset. - // This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature. - // +optional - PreemptionPolicy *apiv1.PreemptionPolicy `json:"preemptionPolicy,omitempty" protobuf:"bytes,5,opt,name=preemptionPolicy"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -66,7 +57,7 @@ type PriorityClass struct { type PriorityClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go index ffded9d..c18f54a 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go @@ -28,12 +28,11 @@ package v1beta1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_PriorityClass = map[string]string{ - "": "DEPRECATED - This group version of PriorityClass is deprecated by scheduling.k8s.io/v1/PriorityClass. PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", - "preemptionPolicy": "PreemptionPolicy is the Policy for preempting pods with lower priority. One of Never, PreemptLowerPriority. Defaults to PreemptLowerPriority if unset. This field is alpha-level and is only honored by servers that enable the NonPreemptingPriority feature.", + "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", + "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", + "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", } func (PriorityClass) SwaggerDoc() map[string]string { @@ -42,7 +41,7 @@ func (PriorityClass) SwaggerDoc() map[string]string { var map_PriorityClassList = map[string]string{ "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "items is the list of PriorityClasses", } diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go index 6e20085..6f68e4a 100644 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -30,11 +29,6 @@ func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { *out = *in out.TypeMeta = in.TypeMeta in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.PreemptionPolicy != nil { - in, out := &in.PreemptionPolicy, &out.PreemptionPolicy - *out = new(v1.PreemptionPolicy) - **out = **in - } return } @@ -60,7 +54,7 @@ func (in *PriorityClass) DeepCopyObject() runtime.Object { func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PriorityClass, len(*in)) diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go index 60066bb..05a62c5 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ b/vendor/k8s.io/api/settings/v1alpha1/doc.go @@ -15,9 +15,7 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +k8s:openapi-gen=true // +groupName=settings.k8s.io - package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go index 7ed066d..15285ba 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go @@ -14,24 +14,33 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto +// DO NOT EDIT! +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto + + It has these top-level messages: + PodPreset + PodPresetList + PodPresetSpec +*/ package v1alpha1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - proto "github.com/gogo/protobuf/proto" - v11 "k8s.io/api/core/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -42,144 +51,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{0} -} -func (m *PodPreset) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPreset) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPreset) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPreset.Merge(m, src) -} -func (m *PodPreset) XXX_Size() int { - return m.Size() -} -func (m *PodPreset) XXX_DiscardUnknown() { - xxx_messageInfo_PodPreset.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_PodPreset proto.InternalMessageInfo +func (m *PodPreset) Reset() { *m = PodPreset{} } +func (*PodPreset) ProtoMessage() {} +func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{1} -} -func (m *PodPresetList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetList.Merge(m, src) -} -func (m *PodPresetList) XXX_Size() int { - return m.Size() -} -func (m *PodPresetList) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetList.DiscardUnknown(m) -} - -var xxx_messageInfo_PodPresetList proto.InternalMessageInfo - -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_48fab0a6ea4b79ce, []int{2} -} -func (m *PodPresetSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PodPresetSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PodPresetSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_PodPresetSpec.Merge(m, src) -} -func (m *PodPresetSpec) XXX_Size() int { - return m.Size() -} -func (m *PodPresetSpec) XXX_DiscardUnknown() { - xxx_messageInfo_PodPresetSpec.DiscardUnknown(m) -} +func (m *PodPresetList) Reset() { *m = PodPresetList{} } +func (*PodPresetList) ProtoMessage() {} +func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_PodPresetSpec proto.InternalMessageInfo +func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } +func (*PodPresetSpec) ProtoMessage() {} +func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptor_48fab0a6ea4b79ce) -} - -var fileDescriptor_48fab0a6ea4b79ce = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} - func (m *PodPreset) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -187,42 +81,33 @@ func (m *PodPreset) Marshal() (dAtA []byte, err error) { } func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPreset) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + return i, nil } func (m *PodPresetList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -230,46 +115,37 @@ func (m *PodPresetList) Marshal() (dAtA []byte, err error) { } func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n3, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -277,99 +153,97 @@ func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { } func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PodPresetSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.VolumeMounts) > 0 { - for iNdEx := len(m.VolumeMounts) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.VolumeMounts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) + n4, err := m.Selector.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Volumes) > 0 { - for iNdEx := len(m.Volumes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Volumes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n4 + if len(m.Env) > 0 { + for _, msg := range m.Env { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n } } if len(m.EnvFrom) > 0 { - for iNdEx := len(m.EnvFrom) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.EnvFrom[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.EnvFrom { dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - if len(m.Env) > 0 { - for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Env[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Volumes) > 0 { + for _, msg := range m.Volumes { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - { - size, err := m.Selector.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.VolumeMounts) > 0 { + for _, msg := range m.VolumeMounts { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *PodPreset) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -380,9 +254,6 @@ func (m *PodPreset) Size() (n int) { } func (m *PodPresetList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -397,9 +268,6 @@ func (m *PodPresetList) Size() (n int) { } func (m *PodPresetSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Selector.Size() @@ -432,7 +300,14 @@ func (m *PodPresetSpec) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -442,7 +317,7 @@ func (this *PodPreset) String() string { return "nil" } s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, `}`, }, "") @@ -452,14 +327,9 @@ func (this *PodPresetList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]PodPreset{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodPreset", "PodPreset", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -468,32 +338,12 @@ func (this *PodPresetSpec) String() string { if this == nil { return "nil" } - repeatedStringForEnv := "[]EnvVar{" - for _, f := range this.Env { - repeatedStringForEnv += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnv += "}" - repeatedStringForEnvFrom := "[]EnvFromSource{" - for _, f := range this.EnvFrom { - repeatedStringForEnvFrom += fmt.Sprintf("%v", f) + "," - } - repeatedStringForEnvFrom += "}" - repeatedStringForVolumes := "[]Volume{" - for _, f := range this.Volumes { - repeatedStringForVolumes += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumes += "}" - repeatedStringForVolumeMounts := "[]VolumeMount{" - for _, f := range this.VolumeMounts { - repeatedStringForVolumeMounts += fmt.Sprintf("%v", f) + "," - } - repeatedStringForVolumeMounts += "}" s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + repeatedStringForEnv + `,`, - `EnvFrom:` + repeatedStringForEnvFrom + `,`, - `Volumes:` + repeatedStringForVolumes + `,`, - `VolumeMounts:` + repeatedStringForVolumeMounts + `,`, + `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, + `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, + `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, + `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, + `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -521,7 +371,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -549,7 +399,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -558,9 +408,6 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -582,7 +429,7 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -591,9 +438,6 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -610,9 +454,6 @@ func (m *PodPreset) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -640,7 +481,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -668,7 +509,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -677,9 +518,6 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -701,7 +539,7 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -710,9 +548,6 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -730,9 +565,6 @@ func (m *PodPresetList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -760,7 +592,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -788,7 +620,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -797,9 +629,6 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -821,7 +650,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -830,13 +659,10 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Env = append(m.Env, v11.EnvVar{}) + m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -855,7 +681,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -864,13 +690,10 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.EnvFrom = append(m.EnvFrom, v11.EnvFromSource{}) + m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -889,7 +712,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -898,13 +721,10 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Volumes = append(m.Volumes, v11.Volume{}) + m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -923,7 +743,7 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -932,13 +752,10 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.VolumeMounts = append(m.VolumeMounts, v11.VolumeMount{}) + m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -952,9 +769,6 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -970,7 +784,6 @@ func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1002,8 +815,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1020,34 +835,95 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 542 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, + 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, + 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, + 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, + 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, + 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, + 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, + 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, + 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, + 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, + 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, + 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, + 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, + 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, + 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, + 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, + 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, + 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, + 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, + 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, + 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, + 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, + 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, + 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, + 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, + 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, + 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, + 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, + 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, + 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, + 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, + 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, + 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, + 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto index db1ec93..d5534c4 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/settings/v1alpha1/generated.proto @@ -42,7 +42,7 @@ message PodPreset { // PodPresetList is a list of PodPreset objects. message PodPresetList { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go index 8cc99d4..506aacf 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types.go @@ -61,7 +61,7 @@ type PodPresetSpec struct { type PodPresetList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go index 0501e0a..508c452 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go @@ -37,7 +37,7 @@ func (PodPreset) SwaggerDoc() map[string]string { var map_PodPresetList = map[string]string{ "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is a list of schema objects.", } diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go index ed6c31a..6397a88 100644 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go @@ -56,7 +56,7 @@ func (in *PodPreset) DeepCopyObject() runtime.Object { func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PodPreset, len(*in)) diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go index 75a6489..8f4a404 100644 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ b/vendor/k8s.io/api/storage/v1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true - -package v1 // import "k8s.io/api/storage/v1" +package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go index 9e57369..d43a982 100644 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1/generated.pb.go @@ -14,26 +14,34 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -44,3603 +52,301 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{0} -} -func (m *CSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriver.Merge(m, src) -} -func (m *CSIDriver) XXX_Size() int { - return m.Size() -} -func (m *CSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_CSIDriver proto.InternalMessageInfo - -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{1} -} -func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriverList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriverList.Merge(m, src) -} -func (m *CSIDriverList) XXX_Size() int { - return m.Size() -} -func (m *CSIDriverList) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriverList.DiscardUnknown(m) -} - -var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo - -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{2} -} -func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriverSpec.Merge(m, src) -} -func (m *CSIDriverSpec) XXX_Size() int { - return m.Size() -} -func (m *CSIDriverSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo - -func (m *CSINode) Reset() { *m = CSINode{} } -func (*CSINode) ProtoMessage() {} -func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{3} -} -func (m *CSINode) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSINode) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINode.Merge(m, src) -} -func (m *CSINode) XXX_Size() int { - return m.Size() -} -func (m *CSINode) XXX_DiscardUnknown() { - xxx_messageInfo_CSINode.DiscardUnknown(m) -} - -var xxx_messageInfo_CSINode proto.InternalMessageInfo - -func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } -func (*CSINodeDriver) ProtoMessage() {} -func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{4} -} -func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSINodeDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINodeDriver.Merge(m, src) -} -func (m *CSINodeDriver) XXX_Size() int { - return m.Size() -} -func (m *CSINodeDriver) XXX_DiscardUnknown() { - xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) -} - -var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo - -func (m *CSINodeList) Reset() { *m = CSINodeList{} } -func (*CSINodeList) ProtoMessage() {} -func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{5} -} -func (m *CSINodeList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSINodeList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINodeList.Merge(m, src) -} -func (m *CSINodeList) XXX_Size() int { - return m.Size() -} -func (m *CSINodeList) XXX_DiscardUnknown() { - xxx_messageInfo_CSINodeList.DiscardUnknown(m) -} - -var xxx_messageInfo_CSINodeList proto.InternalMessageInfo - -func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } -func (*CSINodeSpec) ProtoMessage() {} -func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{6} -} -func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSINodeSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINodeSpec.Merge(m, src) -} -func (m *CSINodeSpec) XXX_Size() int { - return m.Size() -} -func (m *CSINodeSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo - -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{7} -} -func (m *StorageClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StorageClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageClass.Merge(m, src) -} -func (m *StorageClass) XXX_Size() int { - return m.Size() -} -func (m *StorageClass) XXX_DiscardUnknown() { - xxx_messageInfo_StorageClass.DiscardUnknown(m) -} - -var xxx_messageInfo_StorageClass proto.InternalMessageInfo - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{8} -} -func (m *StorageClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StorageClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageClassList.Merge(m, src) -} -func (m *StorageClassList) XXX_Size() int { - return m.Size() -} -func (m *StorageClassList) XXX_DiscardUnknown() { - xxx_messageInfo_StorageClassList.DiscardUnknown(m) -} - -var xxx_messageInfo_StorageClassList proto.InternalMessageInfo - -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{9} -} -func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachment) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachment.Merge(m, src) -} -func (m *VolumeAttachment) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachment) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{10} -} -func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentList.Merge(m, src) -} -func (m *VolumeAttachmentList) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentList) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo - -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{11} -} -func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) -} -func (m *VolumeAttachmentSource) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo - -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{12} -} -func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) -} -func (m *VolumeAttachmentSpec) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo - -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{13} -} -func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) -} -func (m *VolumeAttachmentStatus) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{14} -} -func (m *VolumeError) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeError) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeError.Merge(m, src) -} -func (m *VolumeError) XXX_Size() int { - return m.Size() -} -func (m *VolumeError) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeError.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeError proto.InternalMessageInfo - -func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } -func (*VolumeNodeResources) ProtoMessage() {} -func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_3b530c1983504d8d, []int{15} -} -func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeNodeResources.Merge(m, src) -} -func (m *VolumeNodeResources) XXX_Size() int { - return m.Size() -} -func (m *VolumeNodeResources) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) -} +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } func init() { - proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1.CSIDriver") - proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1.CSIDriverList") - proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1.CSIDriverSpec") - proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1.CSINode") - proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1.CSINodeDriver") - proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1.CSINodeList") - proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1.CSINodeSpec") proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.StorageClass.ParametersEntry") proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") - proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1.VolumeAttachment") - proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1.VolumeAttachmentList") - proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSource") - proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1.VolumeAttachmentSpec") - proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1.VolumeAttachmentStatus.AttachmentMetadataEntry") - proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1.VolumeError") - proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1.VolumeNodeResources") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptor_3b530c1983504d8d) -} - -var fileDescriptor_3b530c1983504d8d = []byte{ - // 1336 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4f, 0x6f, 0x1b, 0x45, - 0x14, 0xcf, 0xc6, 0xf9, 0x3b, 0x4e, 0x5a, 0x67, 0x1a, 0xc0, 0xe4, 0xe0, 0x8d, 0x96, 0x0a, 0x42, - 0xa1, 0xeb, 0xa6, 0x94, 0xaa, 0xaa, 0x04, 0x52, 0x36, 0x31, 0x22, 0x22, 0x4e, 0xa2, 0x49, 0xa9, - 0x10, 0x02, 0xc4, 0x64, 0xf7, 0xd5, 0xd9, 0xc6, 0xbb, 0xb3, 0xdd, 0x1d, 0x1b, 0x7c, 0xe3, 0xc4, - 0x0d, 0x09, 0xae, 0x7c, 0x0a, 0x90, 0xe0, 0xc2, 0x91, 0x53, 0xb9, 0x55, 0x9c, 0x7a, 0xb2, 0xe8, - 0x72, 0x05, 0x3e, 0x40, 0x4e, 0x68, 0x66, 0xc7, 0xde, 0xb5, 0xbd, 0x4e, 0xd3, 0x8b, 0x6f, 0x9e, - 0xf7, 0xde, 0xef, 0xf7, 0xde, 0x9b, 0xf7, 0x67, 0xd6, 0xe8, 0xfd, 0xd3, 0x3b, 0x91, 0xe9, 0xb2, - 0xea, 0x69, 0xeb, 0x18, 0x42, 0x1f, 0x38, 0x44, 0xd5, 0x36, 0xf8, 0x0e, 0x0b, 0xab, 0x4a, 0x41, - 0x03, 0xb7, 0x1a, 0x71, 0x16, 0xd2, 0x06, 0x54, 0xdb, 0x9b, 0xd5, 0x06, 0xf8, 0x10, 0x52, 0x0e, - 0x8e, 0x19, 0x84, 0x8c, 0x33, 0xfc, 0x52, 0x62, 0x66, 0xd2, 0xc0, 0x35, 0x95, 0x99, 0xd9, 0xde, - 0x5c, 0xbb, 0xde, 0x70, 0xf9, 0x49, 0xeb, 0xd8, 0xb4, 0x99, 0x57, 0x6d, 0xb0, 0x06, 0xab, 0x4a, - 0xeb, 0xe3, 0xd6, 0x03, 0x79, 0x92, 0x07, 0xf9, 0x2b, 0x61, 0x59, 0x33, 0x32, 0xce, 0x6c, 0x16, - 0xe6, 0x79, 0x5a, 0xbb, 0x95, 0xda, 0x78, 0xd4, 0x3e, 0x71, 0x7d, 0x08, 0x3b, 0xd5, 0xe0, 0xb4, - 0x21, 0x04, 0x51, 0xd5, 0x03, 0x4e, 0xf3, 0x50, 0xd5, 0x71, 0xa8, 0xb0, 0xe5, 0x73, 0xd7, 0x83, - 0x11, 0xc0, 0xed, 0xe7, 0x01, 0x22, 0xfb, 0x04, 0x3c, 0x3a, 0x8c, 0x33, 0x7e, 0xd5, 0xd0, 0xe2, - 0xf6, 0xd1, 0xee, 0x4e, 0xe8, 0xb6, 0x21, 0xc4, 0x5f, 0xa2, 0x05, 0x11, 0x91, 0x43, 0x39, 0x2d, - 0x6b, 0xeb, 0xda, 0x46, 0xf1, 0xe6, 0x0d, 0x33, 0xbd, 0xa9, 0x3e, 0xb1, 0x19, 0x9c, 0x36, 0x84, - 0x20, 0x32, 0x85, 0xb5, 0xd9, 0xde, 0x34, 0x0f, 0x8e, 0x1f, 0x82, 0xcd, 0xeb, 0xc0, 0xa9, 0x85, - 0x1f, 0x77, 0xf5, 0xa9, 0xb8, 0xab, 0xa3, 0x54, 0x46, 0xfa, 0xac, 0xf8, 0x03, 0x34, 0x13, 0x05, - 0x60, 0x97, 0xa7, 0x25, 0xfb, 0x55, 0x33, 0xb7, 0x0e, 0x66, 0x3f, 0xa2, 0xa3, 0x00, 0x6c, 0x6b, - 0x49, 0x31, 0xce, 0x88, 0x13, 0x91, 0x78, 0xe3, 0x17, 0x0d, 0x2d, 0xf7, 0xad, 0xf6, 0xdc, 0x88, - 0xe3, 0xcf, 0x46, 0x62, 0x37, 0x2f, 0x16, 0xbb, 0x40, 0xcb, 0xc8, 0x4b, 0xca, 0xcf, 0x42, 0x4f, - 0x92, 0x89, 0xbb, 0x86, 0x66, 0x5d, 0x0e, 0x5e, 0x54, 0x9e, 0x5e, 0x2f, 0x6c, 0x14, 0x6f, 0xae, - 0x3f, 0x2f, 0x70, 0x6b, 0x59, 0x91, 0xcd, 0xee, 0x0a, 0x18, 0x49, 0xd0, 0xc6, 0x3f, 0xd9, 0xb0, - 0x45, 0x3a, 0xf8, 0x2e, 0xba, 0x44, 0x39, 0xa7, 0xf6, 0x09, 0x81, 0x47, 0x2d, 0x37, 0x04, 0x47, - 0x06, 0xbf, 0x60, 0xe1, 0xb8, 0xab, 0x5f, 0xda, 0x1a, 0xd0, 0x90, 0x21, 0x4b, 0x81, 0x0d, 0x98, - 0xb3, 0xeb, 0x3f, 0x60, 0x07, 0x7e, 0x9d, 0xb5, 0x7c, 0x2e, 0xaf, 0x55, 0x61, 0x0f, 0x07, 0x34, - 0x64, 0xc8, 0x12, 0xdb, 0x68, 0xb5, 0xcd, 0x9a, 0x2d, 0x0f, 0xf6, 0xdc, 0x07, 0x60, 0x77, 0xec, - 0x26, 0xd4, 0x99, 0x03, 0x51, 0xb9, 0xb0, 0x5e, 0xd8, 0x58, 0xb4, 0xaa, 0x71, 0x57, 0x5f, 0xbd, - 0x9f, 0xa3, 0x3f, 0xeb, 0xea, 0x57, 0x72, 0xe4, 0x24, 0x97, 0xcc, 0xf8, 0x59, 0x43, 0xf3, 0xdb, - 0x47, 0xbb, 0xfb, 0xcc, 0x81, 0x09, 0xf4, 0xd6, 0xce, 0x40, 0x6f, 0x19, 0xe3, 0x4b, 0x24, 0xe2, - 0x19, 0xdb, 0x59, 0xff, 0x25, 0x25, 0x12, 0x36, 0x6a, 0x2a, 0xd6, 0xd1, 0x8c, 0x4f, 0x3d, 0x90, - 0x51, 0x2f, 0xa6, 0x98, 0x7d, 0xea, 0x01, 0x91, 0x1a, 0xfc, 0x3a, 0x9a, 0xf3, 0x99, 0x03, 0xbb, - 0x3b, 0xd2, 0xf7, 0xa2, 0x75, 0x49, 0xd9, 0xcc, 0xed, 0x4b, 0x29, 0x51, 0x5a, 0x7c, 0x0b, 0x2d, - 0x71, 0x16, 0xb0, 0x26, 0x6b, 0x74, 0x3e, 0x82, 0x4e, 0xef, 0xb2, 0x4b, 0x71, 0x57, 0x5f, 0xba, - 0x97, 0x91, 0x93, 0x01, 0x2b, 0xfc, 0x39, 0x2a, 0xd2, 0x66, 0x93, 0xd9, 0x94, 0xd3, 0xe3, 0x26, - 0x94, 0x67, 0x64, 0x7a, 0xd7, 0xc6, 0xa4, 0x97, 0x14, 0x47, 0xf8, 0x25, 0x10, 0xb1, 0x56, 0x68, - 0x43, 0x64, 0x5d, 0x8e, 0xbb, 0x7a, 0x71, 0x2b, 0xa5, 0x20, 0x59, 0x3e, 0xe3, 0x27, 0x0d, 0x15, - 0x55, 0xc2, 0x13, 0x18, 0xa4, 0xed, 0xc1, 0x41, 0xaa, 0x9c, 0x5f, 0xa5, 0x31, 0x63, 0xf4, 0x45, - 0x3f, 0x62, 0x39, 0x43, 0x07, 0x68, 0xde, 0x91, 0xa5, 0x8a, 0xca, 0x9a, 0x64, 0xbd, 0x7a, 0x3e, - 0xab, 0x1a, 0xd1, 0xcb, 0x8a, 0x7b, 0x3e, 0x39, 0x47, 0xa4, 0xc7, 0x62, 0x7c, 0x37, 0x87, 0x96, - 0x8e, 0x12, 0xd8, 0x76, 0x93, 0x46, 0xd1, 0x04, 0x9a, 0xf7, 0x5d, 0x54, 0x0c, 0x42, 0xd6, 0x76, - 0x23, 0x97, 0xf9, 0x10, 0xaa, 0x3e, 0xba, 0xa2, 0x20, 0xc5, 0xc3, 0x54, 0x45, 0xb2, 0x76, 0xb8, - 0x81, 0x50, 0x40, 0x43, 0xea, 0x01, 0x17, 0xd9, 0x17, 0x64, 0xf6, 0xef, 0x8c, 0xc9, 0x3e, 0x9b, - 0x91, 0x79, 0xd8, 0x47, 0xd5, 0x7c, 0x1e, 0x76, 0xd2, 0xe8, 0x52, 0x05, 0xc9, 0x50, 0xe3, 0x53, - 0xb4, 0x1c, 0x82, 0xdd, 0xa4, 0xae, 0x77, 0xc8, 0x9a, 0xae, 0xdd, 0x91, 0x6d, 0xb8, 0x68, 0xd5, - 0xe2, 0xae, 0xbe, 0x4c, 0xb2, 0x8a, 0xb3, 0xae, 0x7e, 0x63, 0xf4, 0x5d, 0x34, 0x0f, 0x21, 0x8c, - 0xdc, 0x88, 0x83, 0xcf, 0x93, 0x0e, 0x1d, 0xc0, 0x90, 0x41, 0x6e, 0x31, 0x27, 0x9e, 0xd8, 0x52, - 0x07, 0x01, 0x77, 0x99, 0x1f, 0x95, 0x67, 0xd3, 0x39, 0xa9, 0x67, 0xe4, 0x64, 0xc0, 0x0a, 0xef, - 0xa1, 0x55, 0xd1, 0xd7, 0x5f, 0x25, 0x0e, 0x6a, 0x5f, 0x07, 0xd4, 0x17, 0xb7, 0x54, 0x9e, 0x93, - 0x4b, 0xb1, 0x2c, 0x56, 0xda, 0x56, 0x8e, 0x9e, 0xe4, 0xa2, 0xf0, 0x27, 0x68, 0x25, 0xd9, 0x69, - 0x96, 0xeb, 0x3b, 0xae, 0xdf, 0x10, 0x1b, 0xad, 0x3c, 0x2f, 0x93, 0xbe, 0x16, 0x77, 0xf5, 0x95, - 0xfb, 0xc3, 0xca, 0xb3, 0x3c, 0x21, 0x19, 0x25, 0xc1, 0x8f, 0xd0, 0x8a, 0xf4, 0x08, 0x8e, 0x1a, - 0x7a, 0x17, 0xa2, 0xf2, 0x82, 0x2c, 0xdd, 0x46, 0xb6, 0x74, 0xe2, 0xea, 0x44, 0xdd, 0x7a, 0xab, - 0xe1, 0x08, 0x9a, 0x60, 0x73, 0x16, 0xde, 0x83, 0xd0, 0xb3, 0x5e, 0x55, 0xf5, 0x5a, 0xd9, 0x1a, - 0xa6, 0x22, 0xa3, 0xec, 0x6b, 0xef, 0xa1, 0xcb, 0x43, 0x05, 0xc7, 0x25, 0x54, 0x38, 0x85, 0x4e, - 0xb2, 0xd4, 0x88, 0xf8, 0x89, 0x57, 0xd1, 0x6c, 0x9b, 0x36, 0x5b, 0x90, 0x34, 0x1f, 0x49, 0x0e, - 0x77, 0xa7, 0xef, 0x68, 0xc6, 0x6f, 0x1a, 0x2a, 0x65, 0xbb, 0x67, 0x02, 0x7b, 0xe2, 0xc3, 0xc1, - 0x3d, 0xf1, 0xda, 0x05, 0x7a, 0x7a, 0xcc, 0xb2, 0xf8, 0x71, 0x1a, 0x95, 0x92, 0xba, 0x24, 0xcf, - 0xa9, 0x07, 0x3e, 0x9f, 0xc0, 0x40, 0xd7, 0x07, 0x5e, 0xa3, 0xb7, 0xce, 0x5d, 0xd7, 0x69, 0x60, - 0xe3, 0x9e, 0x25, 0xfc, 0x31, 0x9a, 0x8b, 0x38, 0xe5, 0x2d, 0x31, 0xe4, 0x82, 0xf0, 0xfa, 0x45, - 0x09, 0x25, 0x28, 0x7d, 0x91, 0x92, 0x33, 0x51, 0x64, 0xc6, 0xef, 0x1a, 0x5a, 0x1d, 0x86, 0x4c, - 0xa0, 0xba, 0x7b, 0x83, 0xd5, 0x7d, 0xe3, 0x82, 0xc9, 0x8c, 0xa9, 0xf0, 0x9f, 0x1a, 0x7a, 0x79, - 0x24, 0x6f, 0xf9, 0xf6, 0x89, 0x9d, 0x10, 0x0c, 0x6d, 0x9e, 0xfd, 0xf4, 0x2d, 0x97, 0x3b, 0xe1, - 0x30, 0x47, 0x4f, 0x72, 0x51, 0xf8, 0x21, 0x2a, 0xb9, 0x7e, 0xd3, 0xf5, 0x21, 0x91, 0x1d, 0xa5, - 0xf5, 0xcd, 0x1d, 0xdc, 0x61, 0x66, 0x59, 0xdc, 0xd5, 0xb8, 0xab, 0x97, 0x76, 0x87, 0x58, 0xc8, - 0x08, 0xaf, 0xf1, 0x47, 0x4e, 0x65, 0xe4, 0x6b, 0xf7, 0x36, 0x5a, 0x48, 0xbe, 0x03, 0x21, 0x54, - 0x69, 0xf4, 0x6f, 0x7a, 0x4b, 0xc9, 0x49, 0xdf, 0x42, 0xf6, 0x8d, 0xbc, 0x0a, 0x15, 0xe8, 0x85, - 0xfb, 0x46, 0x82, 0x32, 0x7d, 0x23, 0xcf, 0x44, 0x91, 0x89, 0x20, 0xc4, 0x37, 0x8d, 0xbc, 0xcb, - 0xc2, 0x60, 0x10, 0xfb, 0x4a, 0x4e, 0xfa, 0x16, 0xc6, 0xbf, 0x85, 0x9c, 0x02, 0xc9, 0x06, 0xcc, - 0x64, 0xd3, 0xfb, 0xf2, 0x1d, 0xce, 0xc6, 0xe9, 0x67, 0xe3, 0xe0, 0x1f, 0x34, 0x84, 0x69, 0x9f, - 0xa2, 0xde, 0x6b, 0xd0, 0xa4, 0x8b, 0x6a, 0x2f, 0x34, 0x12, 0xe6, 0xd6, 0x08, 0x4f, 0xf2, 0x12, - 0xae, 0x29, 0xff, 0x78, 0xd4, 0x80, 0xe4, 0x38, 0xc7, 0x0e, 0x2a, 0x26, 0xd2, 0x5a, 0x18, 0xb2, - 0x50, 0x8d, 0xa7, 0x71, 0x6e, 0x2c, 0xd2, 0xd2, 0xaa, 0xc8, 0xcf, 0xb2, 0x14, 0x7a, 0xd6, 0xd5, - 0x8b, 0x19, 0x3d, 0xc9, 0xd2, 0x0a, 0x2f, 0x0e, 0xa4, 0x5e, 0x66, 0x5e, 0xcc, 0xcb, 0x0e, 0x8c, - 0xf7, 0x92, 0xa1, 0x5d, 0xab, 0xa1, 0x57, 0xc6, 0x5c, 0xcb, 0x0b, 0xbd, 0x17, 0xdf, 0x6a, 0x28, - 0xeb, 0x03, 0xef, 0xa1, 0x19, 0xf1, 0x27, 0x54, 0x2d, 0x92, 0x6b, 0x17, 0x5b, 0x24, 0xf7, 0x5c, - 0x0f, 0xd2, 0x55, 0x28, 0x4e, 0x44, 0xb2, 0xe0, 0x37, 0xd1, 0xbc, 0x07, 0x51, 0x44, 0x1b, 0xca, - 0x73, 0xfa, 0x21, 0x57, 0x4f, 0xc4, 0xa4, 0xa7, 0x37, 0x6e, 0xa3, 0x2b, 0x39, 0x1f, 0xc4, 0x58, - 0x47, 0xb3, 0xb6, 0xfc, 0xbf, 0x24, 0x02, 0x9a, 0xb5, 0x16, 0xc5, 0x46, 0xd9, 0x96, 0x7f, 0x93, - 0x12, 0xb9, 0xb5, 0xf1, 0xf8, 0x59, 0x65, 0xea, 0xc9, 0xb3, 0xca, 0xd4, 0xd3, 0x67, 0x95, 0xa9, - 0x6f, 0xe2, 0x8a, 0xf6, 0x38, 0xae, 0x68, 0x4f, 0xe2, 0x8a, 0xf6, 0x34, 0xae, 0x68, 0x7f, 0xc5, - 0x15, 0xed, 0xfb, 0xbf, 0x2b, 0x53, 0x9f, 0x4e, 0xb7, 0x37, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, - 0x9b, 0x74, 0xdf, 0x56, 0x8a, 0x10, 0x00, 0x00, } - -func (m *CSIDriver) Marshal() (dAtA []byte, err error) { +func (m *StorageClass) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i += copy(dAtA[i:], m.Provisioner) + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for _, k := range keysForParameters { + dAtA[i] = 0x1a + i++ + v := m.Parameters[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.ReclaimPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i += copy(dAtA[i:], *m.ReclaimPolicy) } - return dAtA[:n], nil -} - -func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x12 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.AllowVolumeExpansion != nil { + dAtA[i] = 0x30 + i++ + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } + if len(m.AllowedTopologies) > 0 { + for _, msg := range m.AllowedTopologies { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.VolumeLifecycleModes) > 0 { - for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.VolumeLifecycleModes[iNdEx]) - copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.PodInfoOnMount != nil { - i-- - if *m.PodInfoOnMount { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.AttachRequired != nil { - i-- - if *m.AttachRequired { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CSINode) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return dAtA[:n], nil -} - -func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Allocatable != nil { - { - size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if len(m.TopologyKeys) > 0 { - for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TopologyKeys[iNdEx]) - copy(dAtA[i:], m.TopologyKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.NodeID) - copy(dAtA[i:], m.NodeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSINodeList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + return i, nil } -func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Drivers) > 0 { - for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 } - -func (m *StorageClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil -} - -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + dAtA[offset] = uint8(v) + return offset + 1 } - -func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *StorageClass) Size() (n int) { var l int _ = l - if len(m.AllowedTopologies) > 0 { - for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.VolumeBindingMode != nil { - i -= len(*m.VolumeBindingMode) - copy(dAtA[i:], *m.VolumeBindingMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i-- - dAtA[i] = 0x3a - } - if m.AllowVolumeExpansion != nil { - i-- - if *m.AllowVolumeExpansion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if len(m.MountOptions) > 0 { - for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MountOptions[iNdEx]) - copy(dAtA[i:], m.MountOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.ReclaimPolicy != nil { - i -= len(*m.ReclaimPolicy) - copy(dAtA[i:], *m.ReclaimPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i-- - dAtA[i] = 0x22 - } + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { - v := m.Parameters[string(keysForParameters[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForParameters[iNdEx]) - copy(dAtA[i:], keysForParameters[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.Provisioner) - copy(dAtA[i:], m.Provisioner) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StorageClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil -} - -func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if m.AllowVolumeExpansion != nil { + n += 2 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.InlineVolumeSpec != nil { - { - size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.PersistentVolumeName != nil { - i -= len(*m.PersistentVolumeName) - copy(dAtA[i:], *m.PersistentVolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x1a - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Attacher) - copy(dAtA[i:], m.Attacher) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DetachError != nil { - { - size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.AttachError != nil { - { - size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.AttachmentMetadata) > 0 { - keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) - for k := range m.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAttachmentMetadata[iNdEx]) - copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - i-- - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *VolumeError) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Count != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSIDriverList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSIDriverSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AttachRequired != nil { - n += 2 - } - if m.PodInfoOnMount != nil { - n += 2 - } - if len(m.VolumeLifecycleModes) > 0 { - for _, s := range m.VolumeLifecycleModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSINode) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSINodeDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Allocatable != nil { - l = m.Allocatable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CSINodeList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSINodeSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Drivers) > 0 { - for _, e := range m.Drivers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AllowVolumeExpansion != nil { - n += 2 - } - if m.VolumeBindingMode != nil { - l = len(*m.VolumeBindingMode) - n += 1 + l + sovGenerated(uint64(l)) + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) } if len(m.AllowedTopologies) > 0 { for _, e := range m.AllowedTopologies { l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachmentSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PersistentVolumeName != nil { - l = len(*m.PersistentVolumeName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.InlineVolumeSpec != nil { - l = m.InlineVolumeSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeAttachmentSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Attacher) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.AttachmentMetadata) > 0 { - for k, v := range m.AttachmentMetadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AttachError != nil { - l = m.AttachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DetachError != nil { - l = m.DetachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeError) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeNodeResources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != nil { - n += 1 + sovGenerated(uint64(*m.Count)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriver{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIDriverList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CSIDriver{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CSIDriverSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriverSpec{`, - `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, - `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, - `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, - `}`, - }, "") - return s -} -func (this *CSINode) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINodeDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, - `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, - `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CSINode{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CSINodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForDrivers := "[]CSINodeDriver{" - for _, f := range this.Drivers { - repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForDrivers += "}" - s := strings.Join([]string{`&CSINodeSpec{`, - `Drivers:` + repeatedStringForDrivers + `,`, - `}`, - }, "") - return s -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" - for _, f := range this.AllowedTopologies { - repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," - } - repeatedStringForAllowedTopologies += "}" - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, - `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]StorageClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]VolumeAttachment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeNodeResources) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeNodeResources{`, - `Count:` + valueToStringGenerated(this.Count) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSIDriver{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AttachRequired = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PodInfoOnMount = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINode) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINode: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Allocatable == nil { - m.Allocatable = &VolumeNodeResources{} - } - if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSINode{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drivers = append(m.Drivers, CSINodeDriver{}) - if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Parameters[mapkey] = mapvalue - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, VolumeAttachment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + n += 1 + l + sovGenerated(uint64(l)) } } + return n +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return nil + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageClass) String() string { + if this == nil { + return "nil" + } + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) + } + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *StorageClassList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { +func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3655,7 +361,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3663,48 +369,15 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.PersistentVolumeName = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -3716,7 +389,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3725,75 +398,16 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} - } - if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3805,7 +419,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3815,52 +429,16 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attacher = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Provisioner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3870,82 +448,19 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - var v int + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3955,17 +470,12 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - m.Attached = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -3975,29 +485,26 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - if m.AttachmentMetadata == nil { - m.AttachmentMetadata = make(map[string]string) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Parameters == nil { + m.Parameters = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4007,92 +514,47 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Parameters[mapkey] = mapvalue + } else { + var mapvalue string + m.Parameters[mapkey] = mapvalue } - m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex - case 3: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4102,33 +564,27 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.AttachError == nil { - m.AttachError = &VolumeError{} - } - if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) + m.ReclaimPolicy = &s iNdEx = postIndex - case 4: + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4138,86 +594,47 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.DetachError == nil { - m.DetachError = &VolumeError{} - } - if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeError) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + b := bool(v != 0) + m.AllowVolumeExpansion = &b + case 7: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4227,30 +644,27 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + s := VolumeBindingMode(dAtA[iNdEx:postIndex]) + m.VolumeBindingMode = &s iNdEx = postIndex - case 2: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4260,23 +674,22 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = string(dAtA[iNdEx:postIndex]) + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) + if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -4287,9 +700,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4302,7 +712,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } return nil } -func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { +func (m *StorageClassList) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -4317,7 +727,7 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4325,17 +735,47 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") + return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) } - var v int32 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4345,12 +785,23 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int32(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - m.Count = &v + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, StorageClass{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -4360,9 +811,6 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4378,7 +826,6 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4410,8 +857,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4428,34 +877,102 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 656 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4a, + 0x14, 0x8e, 0x93, 0x9b, 0xde, 0x74, 0xd2, 0xea, 0x26, 0xbe, 0xbd, 0x92, 0x6f, 0x16, 0x4e, 0x54, + 0x36, 0x11, 0x12, 0xe3, 0xa6, 0x14, 0x54, 0x21, 0x81, 0x54, 0xa3, 0x4a, 0x20, 0xb5, 0x6a, 0xe4, + 0x56, 0x15, 0x42, 0x2c, 0x98, 0x38, 0x07, 0x77, 0x88, 0xed, 0x31, 0x33, 0x63, 0x43, 0x76, 0xbc, + 0x00, 0x12, 0xcf, 0xc3, 0x13, 0x74, 0xd9, 0x65, 0x57, 0x16, 0x35, 0x6f, 0xd1, 0x15, 0xf2, 0x0f, + 0x8d, 0x9b, 0x04, 0xd1, 0xdd, 0xcc, 0x77, 0xbe, 0xef, 0x3b, 0x33, 0xe7, 0x07, 0x3d, 0x9b, 0xec, + 0x0a, 0x4c, 0x99, 0x31, 0x09, 0x47, 0xc0, 0x7d, 0x90, 0x20, 0x8c, 0x08, 0xfc, 0x31, 0xe3, 0x46, + 0x11, 0x20, 0x01, 0x35, 0x84, 0x64, 0x9c, 0x38, 0x60, 0x44, 0x03, 0xc3, 0x01, 0x1f, 0x38, 0x91, + 0x30, 0xc6, 0x01, 0x67, 0x92, 0xa9, 0xff, 0xe5, 0x34, 0x4c, 0x02, 0x8a, 0x0b, 0x1a, 0x8e, 0x06, + 0x9d, 0x07, 0x0e, 0x95, 0x67, 0xe1, 0x08, 0xdb, 0xcc, 0x33, 0x1c, 0xe6, 0x30, 0x23, 0x63, 0x8f, + 0xc2, 0x77, 0xd9, 0x2d, 0xbb, 0x64, 0xa7, 0xdc, 0xa5, 0xb3, 0x59, 0x4a, 0x66, 0x33, 0xbe, 0x2c, + 0x53, 0x67, 0x67, 0xc6, 0xf1, 0x88, 0x7d, 0x46, 0x7d, 0xe0, 0x53, 0x23, 0x98, 0x38, 0x29, 0x20, + 0x0c, 0x0f, 0x24, 0x59, 0xa6, 0x32, 0x7e, 0xa7, 0xe2, 0xa1, 0x2f, 0xa9, 0x07, 0x0b, 0x82, 0xc7, + 0x7f, 0x12, 0x08, 0xfb, 0x0c, 0x3c, 0x32, 0xaf, 0xdb, 0xfc, 0xb2, 0x82, 0xd6, 0x8e, 0xf3, 0x02, + 0x3c, 0x77, 0x89, 0x10, 0xea, 0x5b, 0xd4, 0x48, 0x1f, 0x35, 0x26, 0x92, 0x68, 0x4a, 0x4f, 0xe9, + 0x37, 0xb7, 0xb7, 0xf0, 0xac, 0x58, 0x37, 0xde, 0x38, 0x98, 0x38, 0x29, 0x20, 0x70, 0xca, 0xc6, + 0xd1, 0x00, 0x1f, 0x8d, 0xde, 0x83, 0x2d, 0x0f, 0x41, 0x12, 0x53, 0x3d, 0x8f, 0xbb, 0x95, 0x24, + 0xee, 0xa2, 0x19, 0x66, 0xdd, 0xb8, 0xaa, 0x8f, 0x50, 0x33, 0xe0, 0x2c, 0xa2, 0x82, 0x32, 0x1f, + 0xb8, 0x56, 0xed, 0x29, 0xfd, 0x55, 0xf3, 0xdf, 0x42, 0xd2, 0x1c, 0xce, 0x42, 0x56, 0x99, 0xa7, + 0x3a, 0x08, 0x05, 0x84, 0x13, 0x0f, 0x24, 0x70, 0xa1, 0xd5, 0x7a, 0xb5, 0x7e, 0x73, 0xfb, 0x21, + 0x5e, 0xda, 0x47, 0x5c, 0xfe, 0x11, 0x1e, 0xde, 0xa8, 0xf6, 0x7d, 0xc9, 0xa7, 0xb3, 0xd7, 0xcd, + 0x02, 0x56, 0xc9, 0x5a, 0x9d, 0xa0, 0x75, 0x0e, 0xb6, 0x4b, 0xa8, 0x37, 0x64, 0x2e, 0xb5, 0xa7, + 0xda, 0x5f, 0xd9, 0x0b, 0xf7, 0x93, 0xb8, 0xbb, 0x6e, 0x95, 0x03, 0xd7, 0x71, 0x77, 0x6b, 0x71, + 0x02, 0xf0, 0x10, 0xb8, 0xa0, 0x42, 0x82, 0x2f, 0x4f, 0x99, 0x1b, 0x7a, 0x70, 0x4b, 0x63, 0xdd, + 0xf6, 0x56, 0x77, 0xd0, 0x9a, 0xc7, 0x42, 0x5f, 0x1e, 0x05, 0x92, 0x32, 0x5f, 0x68, 0xf5, 0x5e, + 0xad, 0xbf, 0x6a, 0xb6, 0x92, 0xb8, 0xbb, 0x76, 0x58, 0xc2, 0xad, 0x5b, 0x2c, 0xf5, 0x00, 0x6d, + 0x10, 0xd7, 0x65, 0x1f, 0xf3, 0x04, 0xfb, 0x9f, 0x02, 0xe2, 0xa7, 0x55, 0xd2, 0x56, 0x7a, 0x4a, + 0xbf, 0x61, 0x6a, 0x49, 0xdc, 0xdd, 0xd8, 0x5b, 0x12, 0xb7, 0x96, 0xaa, 0xd4, 0x57, 0xa8, 0x1d, + 0x65, 0x90, 0x49, 0xfd, 0x31, 0xf5, 0x9d, 0x43, 0x36, 0x06, 0xed, 0xef, 0xec, 0xd3, 0xf7, 0x93, + 0xb8, 0xdb, 0x3e, 0x9d, 0x0f, 0x5e, 0x2f, 0x03, 0xad, 0x45, 0x13, 0xf5, 0x03, 0x6a, 0x67, 0x19, + 0x61, 0x7c, 0xc2, 0x02, 0xe6, 0x32, 0x87, 0x82, 0xd0, 0x1a, 0x59, 0xeb, 0xfa, 0xe5, 0xd6, 0xa5, + 0xa5, 0x4b, 0xfb, 0x56, 0xb0, 0xa6, 0xc7, 0xe0, 0x82, 0x2d, 0x19, 0x3f, 0x01, 0xee, 0x99, 0xff, + 0x17, 0xfd, 0x6a, 0xef, 0xcd, 0x5b, 0x59, 0x8b, 0xee, 0x9d, 0xa7, 0xe8, 0x9f, 0xb9, 0x86, 0xab, + 0x2d, 0x54, 0x9b, 0xc0, 0x34, 0x9b, 0xe6, 0x55, 0x2b, 0x3d, 0xaa, 0x1b, 0xa8, 0x1e, 0x11, 0x37, + 0x84, 0x7c, 0xf8, 0xac, 0xfc, 0xf2, 0xa4, 0xba, 0xab, 0x6c, 0x7e, 0x53, 0x50, 0xab, 0x3c, 0x3d, + 0x07, 0x54, 0x48, 0xf5, 0xcd, 0xc2, 0x4e, 0xe0, 0xbb, 0xed, 0x44, 0xaa, 0xce, 0x36, 0xa2, 0x55, + 0xfc, 0xa1, 0xf1, 0x0b, 0x29, 0xed, 0xc3, 0x0b, 0x54, 0xa7, 0x12, 0x3c, 0xa1, 0x55, 0xb3, 0xc2, + 0xdc, 0xbb, 0xc3, 0x4c, 0x9b, 0xeb, 0x85, 0x5f, 0xfd, 0x65, 0xaa, 0xb4, 0x72, 0x03, 0xb3, 0x7f, + 0x7e, 0xa5, 0x57, 0x2e, 0xae, 0xf4, 0xca, 0xe5, 0x95, 0x5e, 0xf9, 0x9c, 0xe8, 0xca, 0x79, 0xa2, + 0x2b, 0x17, 0x89, 0xae, 0x5c, 0x26, 0xba, 0xf2, 0x3d, 0xd1, 0x95, 0xaf, 0x3f, 0xf4, 0xca, 0xeb, + 0x6a, 0x34, 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x64, 0x41, 0x83, 0x40, 0x05, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto index cb3c42c..d178565 100644 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ b/vendor/k8s.io/api/storage/v1/generated.proto @@ -29,179 +29,14 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1"; -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -message CSIDriver { - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the CSI Driver. - optional CSIDriverSpec spec = 2; -} - -// CSIDriverList is a collection of CSIDriver objects. -message CSIDriverList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CSIDriver - repeated CSIDriver items = 2; -} - -// CSIDriverSpec is the specification of a CSIDriver. -message CSIDriverSpec { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - optional bool attachRequired = 1; - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume - // defined by a CSIVolumeSource, otherwise "false" - // - // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only - // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. - // Other drivers can leave pod info disabled and/or ignore this field. - // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when - // deployed on such a cluster and the deployment determines which mode that is, for example - // via a command line parameter of the driver. - // +optional - optional bool podInfoOnMount = 2; - - // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. - // For more information about implementing this mode, see - // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. - // +optional - // +listType=set - repeated string volumeLifecycleModes = 3; -} - -// CSINode holds information about all CSI drivers installed on a node. -// CSI drivers do not need to create the CSINode object directly. As long as -// they use the node-driver-registrar sidecar container, the kubelet will -// automatically populate the CSINode object for the CSI driver as part of -// kubelet plugin registration. -// CSINode has the same name as a node. If the object is missing, it means either -// there are no CSI Drivers available on the node, or the Kubelet version is low -// enough that it doesn't create this object. -// CSINode has an OwnerReference that points to the corresponding node object. -message CSINode { - // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the specification of CSINode - optional CSINodeSpec spec = 2; -} - -// CSINodeDriver holds information about the specification of one CSI driver installed on a node -message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. - // This MUST be the same name returned by the CSI GetPluginName() call for - // that driver. - optional string name = 1; - - // nodeID of the node from the driver point of view. - // This field enables Kubernetes to communicate with storage systems that do - // not share the same nomenclature for nodes. For example, Kubernetes may - // refer to a given node as "node1", but the storage system may refer to - // the same node as "nodeA". When Kubernetes issues a command to the storage - // system to attach a volume to a specific node, it can use this field to - // refer to the node name using the ID that the storage system will - // understand, e.g. "nodeA" instead of "node1". This field is required. - optional string nodeID = 2; - - // topologyKeys is the list of keys supported by the driver. - // When a driver is initialized on a cluster, it provides a set of topology - // keys that it understands (e.g. "company.com/zone", "company.com/region"). - // When a driver is initialized on a node, it provides the same topology keys - // along with values. Kubelet will expose these topology keys as labels - // on its own node object. - // When Kubernetes does topology aware provisioning, it can use this list to - // determine which labels it should retrieve from the node object and pass - // back to the driver. - // It is possible for different nodes to use different topology keys. - // This can be empty if driver does not support topology. - // +optional - repeated string topologyKeys = 3; - - // allocatable represents the volume resources of a node that are available for scheduling. - // This field is beta. - // +optional - optional VolumeNodeResources allocatable = 4; -} - -// CSINodeList is a collection of CSINode objects. -message CSINodeList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CSINode - repeated CSINode items = 2; -} - -// CSINodeSpec holds information about the specification of all CSI drivers installed on a node -message CSINodeSpec { - // drivers is a list of information of all CSI Drivers existing on a node. - // If all drivers in the list are uninstalled, this can become empty. - // +patchMergeKey=name - // +patchStrategy=merge - repeated CSINodeDriver drivers = 1; -} - // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -245,7 +80,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -253,118 +88,3 @@ message StorageClassList { repeated StorageClass items = 2; } -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -message VolumeAttachment { - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - optional VolumeAttachmentSpec spec = 2; - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - optional VolumeAttachmentStatus status = 3; -} - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -message VolumeAttachmentList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of VolumeAttachments - repeated VolumeAttachment items = 2; -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -message VolumeAttachmentSource { - // Name of the persistent volume to attach. - // +optional - optional string persistentVolumeName = 1; - - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - optional string attacher = 1; - - // Source represents the volume that should be attached. - optional VolumeAttachmentSource source = 2; - - // The node that the volume should be attached to. - optional string nodeName = 3; -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - optional bool attached = 1; - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - map attachmentMetadata = 2; - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - optional VolumeError attachError = 3; - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - optional VolumeError detachError = 4; -} - -// VolumeError captures an error encountered during a volume operation. -message VolumeError { - // Time the error was encountered. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - - // String detailing the error encountered during Attach or Detach operation. - // This string may be logged, so it should not contain sensitive - // information. - // +optional - optional string message = 2; -} - -// VolumeNodeResources is a set of resource limits for scheduling of volumes. -message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. - // A volume that is both attached and mounted on a node is considered to be used once, not twice. - // The same rule applies for a unique volume that is shared among multiple pods on the same node. - // If this field is not specified, then the supported number of volumes on this node is unbounded. - // +optional - optional int32 count = 1; -} - diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go index 1a2f83d..c058add 100644 --- a/vendor/k8s.io/api/storage/v1/register.go +++ b/vendor/k8s.io/api/storage/v1/register.go @@ -46,15 +46,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { scheme.AddKnownTypes(SchemeGroupVersion, &StorageClass{}, &StorageClassList{}, - - &VolumeAttachment{}, - &VolumeAttachmentList{}, - - &CSINode{}, - &CSINodeList{}, - - &CSIDriver{}, - &CSIDriverList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go index 556427b..30e6d6d 100644 --- a/vendor/k8s.io/api/storage/v1/types.go +++ b/vendor/k8s.io/api/storage/v1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -102,337 +102,3 @@ const ( // binding will occur during Pod scheduing. VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" ) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -type VolumeAttachment struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -type VolumeAttachmentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of VolumeAttachments - Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - - // Source represents the volume that should be attached. - Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - - // The node that the volume should be attached to. - NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. - // +optional - PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` -} - -// VolumeError captures an error encountered during a volume operation. -type VolumeError struct { - // Time the error was encountered. - // +optional - Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - - // String detailing the error encountered during Attach or Detach operation. - // This string may be logged, so it should not contain sensitive - // information. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -type CSIDriver struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the CSI Driver. - Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriverList is a collection of CSIDriver objects. -type CSIDriverList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSIDriver - Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CSIDriverSpec is the specification of a CSIDriver. -type CSIDriverSpec struct { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume - // defined by a CSIVolumeSource, otherwise "false" - // - // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only - // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. - // Other drivers can leave pod info disabled and/or ignore this field. - // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when - // deployed on such a cluster and the deployment determines which mode that is, for example - // via a command line parameter of the driver. - // +optional - PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - - // volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. - // For more information about implementing this mode, see - // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // This field is beta. - // +optional - // +listType=set - VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` -} - -// VolumeLifecycleMode is an enumeration of possible usage modes for a volume -// provided by a CSI driver. More modes may be added in the future. -type VolumeLifecycleMode string - -const ( - // VolumeLifecyclePersistent explicitly confirms that the driver implements - // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not - // set. Such volumes are managed in Kubernetes via the persistent volume - // claim mechanism and have a lifecycle that is independent of the pods which - // use them. - VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" - - // VolumeLifecycleEphemeral indicates that the driver can be used for - // ephemeral inline volumes. Such volumes are specified inside the pod - // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have - // a lifecycle that is tied to the lifecycle of the pod. For example, such - // a volume might contain data that gets created specifically for that pod, - // like secrets. - // But how the volume actually gets created and managed is entirely up to - // the driver. It might also use reference counting to share the same volume - // instance among different pods if the CSIVolumeSource of those pods is - // identical. - VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSINode holds information about all CSI drivers installed on a node. -// CSI drivers do not need to create the CSINode object directly. As long as -// they use the node-driver-registrar sidecar container, the kubelet will -// automatically populate the CSINode object for the CSI driver as part of -// kubelet plugin registration. -// CSINode has the same name as a node. If the object is missing, it means either -// there are no CSI Drivers available on the node, or the Kubelet version is low -// enough that it doesn't create this object. -// CSINode has an OwnerReference that points to the corresponding node object. -type CSINode struct { - metav1.TypeMeta `json:",inline"` - - // metadata.name must be the Kubernetes node name. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the specification of CSINode - Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// CSINodeSpec holds information about the specification of all CSI drivers installed on a node -type CSINodeSpec struct { - // drivers is a list of information of all CSI Drivers existing on a node. - // If all drivers in the list are uninstalled, this can become empty. - // +patchMergeKey=name - // +patchStrategy=merge - Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` -} - -// CSINodeDriver holds information about the specification of one CSI driver installed on a node -type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. - // This MUST be the same name returned by the CSI GetPluginName() call for - // that driver. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // nodeID of the node from the driver point of view. - // This field enables Kubernetes to communicate with storage systems that do - // not share the same nomenclature for nodes. For example, Kubernetes may - // refer to a given node as "node1", but the storage system may refer to - // the same node as "nodeA". When Kubernetes issues a command to the storage - // system to attach a volume to a specific node, it can use this field to - // refer to the node name using the ID that the storage system will - // understand, e.g. "nodeA" instead of "node1". This field is required. - NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` - - // topologyKeys is the list of keys supported by the driver. - // When a driver is initialized on a cluster, it provides a set of topology - // keys that it understands (e.g. "company.com/zone", "company.com/region"). - // When a driver is initialized on a node, it provides the same topology keys - // along with values. Kubelet will expose these topology keys as labels - // on its own node object. - // When Kubernetes does topology aware provisioning, it can use this list to - // determine which labels it should retrieve from the node object and pass - // back to the driver. - // It is possible for different nodes to use different topology keys. - // This can be empty if driver does not support topology. - // +optional - TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` - - // allocatable represents the volume resources of a node that are available for scheduling. - // This field is beta. - // +optional - Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` -} - -// VolumeNodeResources is a set of resource limits for scheduling of volumes. -type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. - // A volume that is both attached and mounted on a node is considered to be used once, not twice. - // The same rule applies for a unique volume that is shared among multiple pods on the same node. - // If this field is not specified, then the supported number of volumes on this node is unbounded. - // +optional - Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSINodeList is a collection of CSINode objects. -type CSINodeList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSINode - Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go index 0e524a2..23b76e2 100644 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go @@ -27,81 +27,9 @@ package v1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CSIDriver = map[string]string{ - "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", -} - -func (CSIDriver) SwaggerDoc() map[string]string { - return map_CSIDriver -} - -var map_CSIDriverList = map[string]string{ - "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of CSIDriver", -} - -func (CSIDriverList) SwaggerDoc() map[string]string { - return map_CSIDriverList -} - -var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", - "volumeLifecycleModes": "volumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future. This field is beta.", -} - -func (CSIDriverSpec) SwaggerDoc() map[string]string { - return map_CSIDriverSpec -} - -var map_CSINode = map[string]string{ - "": "CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", - "spec": "spec is the specification of CSINode", -} - -func (CSINode) SwaggerDoc() map[string]string { - return map_CSINode -} - -var map_CSINodeDriver = map[string]string{ - "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", - "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", - "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", - "allocatable": "allocatable represents the volume resources of a node that are available for scheduling. This field is beta.", -} - -func (CSINodeDriver) SwaggerDoc() map[string]string { - return map_CSINodeDriver -} - -var map_CSINodeList = map[string]string{ - "": "CSINodeList is a collection of CSINode objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of CSINode", -} - -func (CSINodeList) SwaggerDoc() map[string]string { - return map_CSINodeList -} - -var map_CSINodeSpec = map[string]string{ - "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", - "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", -} - -func (CSINodeSpec) SwaggerDoc() map[string]string { - return map_CSINodeSpec -} - var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -117,7 +45,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -125,76 +53,4 @@ func (StorageClassList) SwaggerDoc() map[string]string { return map_StorageClassList } -var map_VolumeAttachment = map[string]string{ - "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachment) SwaggerDoc() map[string]string { - return map_VolumeAttachment -} - -var map_VolumeAttachmentList = map[string]string{ - "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", -} - -func (VolumeAttachmentList) SwaggerDoc() map[string]string { - return map_VolumeAttachmentList -} - -var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", -} - -func (VolumeAttachmentSource) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSource -} - -var map_VolumeAttachmentSpec = map[string]string{ - "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", -} - -func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSpec -} - -var map_VolumeAttachmentStatus = map[string]string{ - "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { - return map_VolumeAttachmentStatus -} - -var map_VolumeError = map[string]string{ - "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", -} - -func (VolumeError) SwaggerDoc() map[string]string { - return map_VolumeError -} - -var map_VolumeNodeResources = map[string]string{ - "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is not specified, then the supported number of volumes on this node is unbounded.", -} - -func (VolumeNodeResources) SwaggerDoc() map[string]string { - return map_VolumeNodeResources -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go index efaa40a..0e850dc 100644 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go @@ -25,206 +25,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. -func (in *CSIDriver) DeepCopy() *CSIDriver { - if in == nil { - return nil - } - out := new(CSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriver) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSIDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. -func (in *CSIDriverList) DeepCopy() *CSIDriverList { - if in == nil { - return nil - } - out := new(CSIDriverList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriverList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { - *out = *in - if in.AttachRequired != nil { - in, out := &in.AttachRequired, &out.AttachRequired - *out = new(bool) - **out = **in - } - if in.PodInfoOnMount != nil { - in, out := &in.PodInfoOnMount, &out.PodInfoOnMount - *out = new(bool) - **out = **in - } - if in.VolumeLifecycleModes != nil { - in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes - *out = make([]VolumeLifecycleMode, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. -func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { - if in == nil { - return nil - } - out := new(CSIDriverSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINode) DeepCopyInto(out *CSINode) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. -func (in *CSINode) DeepCopy() *CSINode { - if in == nil { - return nil - } - out := new(CSINode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSINode) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { - *out = *in - if in.TopologyKeys != nil { - in, out := &in.TopologyKeys, &out.TopologyKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = new(VolumeNodeResources) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. -func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { - if in == nil { - return nil - } - out := new(CSINodeDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSINode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. -func (in *CSINodeList) DeepCopy() *CSINodeList { - if in == nil { - return nil - } - out := new(CSINodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSINodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { - *out = *in - if in.Drivers != nil { - in, out := &in.Drivers, &out.Drivers - *out = make([]CSINodeDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. -func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { - if in == nil { - return nil - } - out := new(CSINodeSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -289,7 +89,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -317,178 +117,3 @@ func (in *StorageClassList) DeepCopyObject() runtime.Object { } return nil } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. -func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { - if in == nil { - return nil - } - out := new(VolumeAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. -func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { - if in == nil { - return nil - } - out := new(VolumeAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { - *out = *in - if in.PersistentVolumeName != nil { - in, out := &in.PersistentVolumeName, &out.PersistentVolumeName - *out = new(string) - **out = **in - } - if in.InlineVolumeSpec != nil { - in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec - *out = new(corev1.PersistentVolumeSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. -func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { - if in == nil { - return nil - } - out := new(VolumeAttachmentSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. -func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { - if in == nil { - return nil - } - out := new(VolumeAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { - *out = *in - if in.AttachmentMetadata != nil { - in, out := &in.AttachmentMetadata, &out.AttachmentMetadata - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AttachError != nil { - in, out := &in.AttachError, &out.AttachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - if in.DetachError != nil { - in, out := &in.DetachError, &out.DetachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. -func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { - if in == nil { - return nil - } - out := new(VolumeAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeError) DeepCopyInto(out *VolumeError) { - *out = *in - in.Time.DeepCopyInto(&out.Time) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. -func (in *VolumeError) DeepCopy() *VolumeError { - if in == nil { - return nil - } - out := new(VolumeError) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { - *out = *in - if in.Count != nil { - in, out := &in.Count, &out.Count - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. -func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { - if in == nil { - return nil - } - out := new(VolumeNodeResources) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go index 6f7ad7e..aa94aff 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ b/vendor/k8s.io/api/storage/v1alpha1/doc.go @@ -14,9 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package +// +k8s:deepcopy-gen=package,register // +groupName=storage.k8s.io // +k8s:openapi-gen=true - package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go index 1f9db7a..507b5c1 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go @@ -14,25 +14,36 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto +// DO NOT EDIT! +/* + Package v1alpha1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto + + It has these top-level messages: + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ package v1alpha1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - v11 "k8s.io/api/core/v1" +import strings "strings" +import reflect "reflect" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -43,175 +54,31 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{0} -} -func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachment) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachment.Merge(m, src) -} -func (m *VolumeAttachment) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachment) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{1} -} -func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentList.Merge(m, src) -} -func (m *VolumeAttachmentList) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentList) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) -} +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{2} -} -func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) -} -func (m *VolumeAttachmentSource) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) -} +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{3} -} -func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) -} -func (m *VolumeAttachmentSpec) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) -} +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo - -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{4} -} -func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) -} -func (m *VolumeAttachmentStatus) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo - -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_10f856db1e670dc4, []int{5} -} -func (m *VolumeError) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *VolumeError) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeError.Merge(m, src) -} -func (m *VolumeError) XXX_Size() int { - return m.Size() -} -func (m *VolumeError) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeError.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeError proto.InternalMessageInfo +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } func init() { proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") @@ -219,69 +86,12 @@ func init() { proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus.AttachmentMetadataEntry") proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptor_10f856db1e670dc4) -} - -var fileDescriptor_10f856db1e670dc4 = []byte{ - // 745 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0x0d, 0x1f, 0xd1, 0x2a, 0x82, 0x28, 0x48, 0x4e, 0x95, 0x53, - 0x40, 0x74, 0x4d, 0x0a, 0x42, 0x15, 0xb7, 0x58, 0xed, 0xa1, 0xa2, 0x2d, 0x68, 0x8b, 0x38, 0x00, - 0x07, 0x36, 0xf6, 0xe2, 0xb8, 0x89, 0x3f, 0xe4, 0x5d, 0x47, 0xea, 0x8d, 0x13, 0x67, 0x6e, 0xbc, - 0x01, 0xcf, 0x92, 0x1b, 0x15, 0xa7, 0x9e, 0x22, 0x6a, 0xde, 0x82, 0x0b, 0x68, 0xd7, 0x9b, 0xc4, - 0x24, 0x29, 0xb4, 0xbd, 0x79, 0x66, 0x67, 0x7e, 0x33, 0xf3, 0xdf, 0xf1, 0x82, 0x9d, 0xfe, 0x36, - 0x43, 0x6e, 0x60, 0xf4, 0xe3, 0x2e, 0x8d, 0x7c, 0xca, 0x29, 0x33, 0x86, 0xd4, 0xb7, 0x83, 0xc8, - 0x50, 0x07, 0x24, 0x74, 0x0d, 0xc6, 0x83, 0x88, 0x38, 0xd4, 0x18, 0xb6, 0xc9, 0x20, 0xec, 0x91, - 0xb6, 0xe1, 0x50, 0x9f, 0x46, 0x84, 0x53, 0x1b, 0x85, 0x51, 0xc0, 0x03, 0x78, 0x2f, 0x0d, 0x46, - 0x24, 0x74, 0x91, 0x0a, 0x46, 0x93, 0xe0, 0xfa, 0xa6, 0xe3, 0xf2, 0x5e, 0xdc, 0x45, 0x56, 0xe0, - 0x19, 0x4e, 0xe0, 0x04, 0x86, 0xcc, 0xe9, 0xc6, 0x1f, 0xa4, 0x25, 0x0d, 0xf9, 0x95, 0xb2, 0xea, - 0xcd, 0x4c, 0x61, 0x2b, 0x88, 0x44, 0xd5, 0xf9, 0x7a, 0xf5, 0x27, 0xb3, 0x18, 0x8f, 0x58, 0x3d, - 0xd7, 0xa7, 0xd1, 0x89, 0x11, 0xf6, 0x1d, 0xe1, 0x60, 0x86, 0x47, 0x39, 0x59, 0x96, 0x65, 0x5c, - 0x94, 0x15, 0xc5, 0x3e, 0x77, 0x3d, 0xba, 0x90, 0xf0, 0xf4, 0x7f, 0x09, 0xcc, 0xea, 0x51, 0x8f, - 0xcc, 0xe7, 0x35, 0xbf, 0xe6, 0x41, 0xe5, 0x75, 0x30, 0x88, 0x3d, 0xda, 0xe1, 0x9c, 0x58, 0x3d, - 0x8f, 0xfa, 0x1c, 0xbe, 0x07, 0x25, 0xd1, 0x98, 0x4d, 0x38, 0xa9, 0x69, 0x1b, 0x5a, 0xab, 0xbc, - 0xf5, 0x08, 0xcd, 0x64, 0x9b, 0xf2, 0x51, 0xd8, 0x77, 0x84, 0x83, 0x21, 0x11, 0x8d, 0x86, 0x6d, - 0xf4, 0xa2, 0x7b, 0x4c, 0x2d, 0x7e, 0x40, 0x39, 0x31, 0xe1, 0x68, 0xdc, 0xc8, 0x25, 0xe3, 0x06, - 0x98, 0xf9, 0xf0, 0x94, 0x0a, 0x8f, 0x40, 0x91, 0x85, 0xd4, 0xaa, 0xe5, 0x25, 0xbd, 0x8d, 0xfe, - 0x71, 0x29, 0x68, 0xbe, 0xbd, 0xa3, 0x90, 0x5a, 0xe6, 0x0d, 0x85, 0x2f, 0x0a, 0x0b, 0x4b, 0x18, - 0x7c, 0x0b, 0x56, 0x19, 0x27, 0x3c, 0x66, 0xb5, 0x82, 0xc4, 0x3e, 0xbe, 0x1a, 0x56, 0xa6, 0x9a, - 0xb7, 0x14, 0x78, 0x35, 0xb5, 0xb1, 0x42, 0x36, 0x47, 0x1a, 0xa8, 0xce, 0xa7, 0xec, 0xbb, 0x8c, - 0xc3, 0x77, 0x0b, 0x62, 0xa1, 0xcb, 0x89, 0x25, 0xb2, 0xa5, 0x54, 0x15, 0x55, 0xb2, 0x34, 0xf1, - 0x64, 0x84, 0xc2, 0x60, 0xc5, 0xe5, 0xd4, 0x63, 0xb5, 0xfc, 0x46, 0xa1, 0x55, 0xde, 0xda, 0xbc, - 0xd2, 0x48, 0xe6, 0x4d, 0x45, 0x5e, 0xd9, 0x13, 0x0c, 0x9c, 0xa2, 0x9a, 0xdf, 0x35, 0x70, 0x67, - 0x61, 0xfa, 0x20, 0x8e, 0x2c, 0x0a, 0xf7, 0x41, 0x35, 0xa4, 0x11, 0x73, 0x19, 0xa7, 0x3e, 0x4f, - 0x63, 0x0e, 0x89, 0x47, 0xe5, 0x60, 0xeb, 0x66, 0x2d, 0x19, 0x37, 0xaa, 0x2f, 0x97, 0x9c, 0xe3, - 0xa5, 0x59, 0xf0, 0x18, 0x54, 0x5c, 0x7f, 0xe0, 0xfa, 0x34, 0xf5, 0x1d, 0xcd, 0x6e, 0xbc, 0x95, - 0x9d, 0x43, 0xfc, 0x3a, 0x42, 0x90, 0x79, 0xb2, 0xbc, 0xe8, 0x6a, 0x32, 0x6e, 0x54, 0xf6, 0xe6, - 0x28, 0x78, 0x81, 0xdb, 0xfc, 0xb6, 0xe4, 0x7e, 0xc4, 0x01, 0x7c, 0x08, 0x4a, 0x44, 0x7a, 0x68, - 0xa4, 0xc6, 0x98, 0xea, 0xdd, 0x51, 0x7e, 0x3c, 0x8d, 0x90, 0x3b, 0x24, 0xa5, 0x50, 0x8d, 0x5e, - 0x71, 0x87, 0x64, 0x6a, 0x66, 0x87, 0xa4, 0x8d, 0x15, 0x52, 0xb4, 0xe2, 0x07, 0x76, 0xaa, 0x68, - 0xe1, 0xef, 0x56, 0x0e, 0x95, 0x1f, 0x4f, 0x23, 0x9a, 0xbf, 0x0b, 0x4b, 0xae, 0x49, 0x2e, 0x63, - 0x66, 0x26, 0x5b, 0xce, 0x54, 0x5a, 0x98, 0xc9, 0x9e, 0xce, 0x64, 0xc3, 0x2f, 0x1a, 0x80, 0x64, - 0x8a, 0x38, 0x98, 0x2c, 0x6b, 0xba, 0x51, 0xcf, 0xaf, 0xf1, 0x93, 0xa0, 0xce, 0x02, 0x6d, 0xd7, - 0xe7, 0xd1, 0x89, 0x59, 0x57, 0x5d, 0xc0, 0xc5, 0x00, 0xbc, 0xa4, 0x05, 0x78, 0x0c, 0xca, 0xa9, - 0x77, 0x37, 0x8a, 0x82, 0x48, 0xfd, 0xb6, 0xad, 0x4b, 0x74, 0x24, 0xe3, 0x4d, 0x3d, 0x19, 0x37, - 0xca, 0x9d, 0x19, 0xe0, 0xd7, 0xb8, 0x51, 0xce, 0x9c, 0xe3, 0x2c, 0x5c, 0xd4, 0xb2, 0xe9, 0xac, - 0x56, 0xf1, 0x3a, 0xb5, 0x76, 0xe8, 0xc5, 0xb5, 0x32, 0xf0, 0xfa, 0x2e, 0xb8, 0x7b, 0x81, 0x44, - 0xb0, 0x02, 0x0a, 0x7d, 0x7a, 0x92, 0x6e, 0x22, 0x16, 0x9f, 0xb0, 0x0a, 0x56, 0x86, 0x64, 0x10, - 0xa7, 0x1b, 0xb7, 0x8e, 0x53, 0xe3, 0x59, 0x7e, 0x5b, 0x6b, 0x7e, 0xd2, 0x40, 0xb6, 0x06, 0xdc, - 0x07, 0x45, 0xf1, 0x96, 0xab, 0x67, 0xe6, 0xc1, 0xe5, 0x9e, 0x99, 0x57, 0xae, 0x47, 0x67, 0xcf, - 0xa5, 0xb0, 0xb0, 0xa4, 0xc0, 0xfb, 0x60, 0xcd, 0xa3, 0x8c, 0x11, 0x47, 0x55, 0x36, 0x6f, 0xab, - 0xa0, 0xb5, 0x83, 0xd4, 0x8d, 0x27, 0xe7, 0x26, 0x1a, 0x9d, 0xeb, 0xb9, 0xd3, 0x73, 0x3d, 0x77, - 0x76, 0xae, 0xe7, 0x3e, 0x26, 0xba, 0x36, 0x4a, 0x74, 0xed, 0x34, 0xd1, 0xb5, 0xb3, 0x44, 0xd7, - 0x7e, 0x24, 0xba, 0xf6, 0xf9, 0xa7, 0x9e, 0x7b, 0x53, 0x9a, 0x08, 0xf7, 0x27, 0x00, 0x00, 0xff, - 0xff, 0xe8, 0x45, 0xe3, 0xba, 0xab, 0x07, 0x00, 0x00, -} - func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -289,52 +99,41 @@ func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- + i += n1 dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n2, err := m.Spec.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n2 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n3, err := m.Status.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + return i, nil } func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -342,46 +141,37 @@ func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n4, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Items { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -389,41 +179,23 @@ func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.InlineVolumeSpec != nil { - { - size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } if m.PersistentVolumeName != nil { - i -= len(*m.PersistentVolumeName) - copy(dAtA[i:], *m.PersistentVolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) } - return len(dAtA) - i, nil + return i, nil } func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -431,42 +203,33 @@ func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x1a - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Attacher) - copy(dAtA[i:], m.Attacher) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n5, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil } func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -474,78 +237,67 @@ func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { } func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.DetachError != nil { - { - size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.AttachError != nil { - { - size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 } + i++ if len(m.AttachmentMetadata) > 0 { keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) for k := range m.AttachmentMetadata { keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) } github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + for _, k := range keysForAttachmentMetadata { dAtA[i] = 0x12 - i -= len(keysForAttachmentMetadata[iNdEx]) - copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) - i-- + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - i-- - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n6, err := m.AttachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n7, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil } func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -553,48 +305,53 @@ func (m *VolumeError) Marshal() (dAtA []byte, err error) { } func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n8, err := m.Time.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *VolumeAttachment) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ObjectMeta.Size() @@ -607,9 +364,6 @@ func (m *VolumeAttachment) Size() (n int) { } func (m *VolumeAttachmentList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -624,26 +378,16 @@ func (m *VolumeAttachmentList) Size() (n int) { } func (m *VolumeAttachmentSource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.PersistentVolumeName != nil { l = len(*m.PersistentVolumeName) n += 1 + l + sovGenerated(uint64(l)) } - if m.InlineVolumeSpec != nil { - l = m.InlineVolumeSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *VolumeAttachmentSpec) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Attacher) @@ -656,9 +400,6 @@ func (m *VolumeAttachmentSpec) Size() (n int) { } func (m *VolumeAttachmentStatus) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -682,9 +423,6 @@ func (m *VolumeAttachmentStatus) Size() (n int) { } func (m *VolumeError) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Time.Size() @@ -695,7 +433,14 @@ func (m *VolumeError) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -705,7 +450,7 @@ func (this *VolumeAttachment) String() string { return "nil" } s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, `}`, @@ -716,14 +461,9 @@ func (this *VolumeAttachmentList) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]VolumeAttachment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -734,7 +474,6 @@ func (this *VolumeAttachmentSource) String() string { } s := strings.Join([]string{`&VolumeAttachmentSource{`, `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, `}`, }, "") return s @@ -768,8 +507,8 @@ func (this *VolumeAttachmentStatus) String() string { s := strings.Join([]string{`&VolumeAttachmentStatus{`, `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, `}`, }, "") return s @@ -779,7 +518,7 @@ func (this *VolumeError) String() string { return "nil" } s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `}`, }, "") @@ -808,7 +547,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -836,7 +575,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -845,9 +584,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -869,7 +605,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -878,9 +614,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -902,7 +635,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -911,9 +644,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -930,9 +660,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -960,7 +687,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -988,7 +715,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -997,9 +724,6 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1021,7 +745,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1030,9 +754,6 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1050,9 +771,6 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1080,7 +798,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1108,7 +826,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1118,51 +836,12 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} - } - if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -1172,9 +851,6 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1202,7 +878,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1230,7 +906,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1240,9 +916,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1262,7 +935,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1271,9 +944,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1295,7 +965,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1305,9 +975,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1322,9 +989,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1352,7 +1016,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1380,7 +1044,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1400,7 +1064,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1409,20 +1073,54 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -1432,86 +1130,41 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue } - m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -1527,7 +1180,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1536,9 +1189,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1563,7 +1213,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1572,9 +1222,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1594,9 +1241,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1624,7 +1268,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1652,7 +1296,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -1661,9 +1305,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1685,7 +1326,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -1695,9 +1336,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -1712,9 +1350,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -1730,7 +1365,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -1762,8 +1396,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -1780,34 +1416,105 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 704 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, + 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0xcd, 0xf3, 0x52, 0xad, 0xa2, 0xe7, 0x89, 0x82, 0xe4, 0x54, + 0x39, 0x15, 0x44, 0xd7, 0xa4, 0x20, 0x54, 0x71, 0x8b, 0xd5, 0x1e, 0x10, 0x6d, 0x41, 0x5b, 0xc4, + 0x01, 0x38, 0xb0, 0xb1, 0xa7, 0x8e, 0x9b, 0xfa, 0x45, 0xbb, 0xeb, 0x48, 0xbd, 0x71, 0xe2, 0xcc, + 0x8d, 0x6f, 0xc0, 0x67, 0xc9, 0x8d, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x16, 0x5c, 0x40, 0x5e, 0x6f, + 0x5e, 0x68, 0x52, 0x68, 0x7b, 0xf3, 0xcc, 0xce, 0xfc, 0x66, 0xe6, 0xbf, 0xb3, 0x46, 0x3b, 0xfd, + 0x6d, 0x41, 0xfc, 0xc8, 0xea, 0x27, 0x5d, 0xe0, 0x21, 0x48, 0x10, 0xd6, 0x00, 0x42, 0x37, 0xe2, + 0x96, 0x3e, 0x60, 0xb1, 0x6f, 0x09, 0x19, 0x71, 0xe6, 0x81, 0x35, 0x68, 0xb3, 0x93, 0xb8, 0xc7, + 0xda, 0x96, 0x07, 0x21, 0x70, 0x26, 0xc1, 0x25, 0x31, 0x8f, 0x64, 0x84, 0xef, 0xe4, 0xc1, 0x84, + 0xc5, 0x3e, 0xd1, 0xc1, 0x64, 0x1c, 0xdc, 0xd8, 0xf4, 0x7c, 0xd9, 0x4b, 0xba, 0xc4, 0x89, 0x02, + 0xcb, 0x8b, 0xbc, 0xc8, 0x52, 0x39, 0xdd, 0xe4, 0x48, 0x59, 0xca, 0x50, 0x5f, 0x39, 0xab, 0xf1, + 0x68, 0x5a, 0x38, 0x60, 0x4e, 0xcf, 0x0f, 0x81, 0x9f, 0x5a, 0x71, 0xdf, 0xcb, 0x1c, 0xc2, 0x0a, + 0x40, 0x32, 0x6b, 0x30, 0xd7, 0x41, 0xc3, 0xba, 0x2a, 0x8b, 0x27, 0xa1, 0xf4, 0x03, 0x98, 0x4b, + 0x78, 0xfc, 0xa7, 0x04, 0xe1, 0xf4, 0x20, 0x60, 0x97, 0xf3, 0x5a, 0x9f, 0x8b, 0x68, 0xed, 0x55, + 0x74, 0x92, 0x04, 0xd0, 0x91, 0x92, 0x39, 0xbd, 0x00, 0x42, 0x89, 0xdf, 0xa1, 0x4a, 0xd6, 0x98, + 0xcb, 0x24, 0xab, 0x1b, 0xeb, 0xc6, 0x46, 0x75, 0xeb, 0x01, 0x99, 0x4a, 0x32, 0xe1, 0x93, 0xb8, + 0xef, 0x65, 0x0e, 0x41, 0xb2, 0x68, 0x32, 0x68, 0x93, 0xe7, 0xdd, 0x63, 0x70, 0xe4, 0x3e, 0x48, + 0x66, 0xe3, 0xe1, 0xa8, 0x59, 0x48, 0x47, 0x4d, 0x34, 0xf5, 0xd1, 0x09, 0x15, 0x1f, 0xa2, 0xb2, + 0x88, 0xc1, 0xa9, 0x17, 0x15, 0xbd, 0x4d, 0x7e, 0x23, 0x38, 0xb9, 0xdc, 0xde, 0x61, 0x0c, 0x8e, + 0xfd, 0x97, 0xc6, 0x97, 0x33, 0x8b, 0x2a, 0x18, 0x7e, 0x83, 0x96, 0x85, 0x64, 0x32, 0x11, 0xf5, + 0x92, 0xc2, 0x3e, 0xbc, 0x19, 0x56, 0xa5, 0xda, 0xff, 0x68, 0xf0, 0x72, 0x6e, 0x53, 0x8d, 0x6c, + 0x0d, 0x0d, 0x54, 0xbb, 0x9c, 0xb2, 0xe7, 0x0b, 0x89, 0xdf, 0xce, 0x89, 0x45, 0xae, 0x27, 0x56, + 0x96, 0xad, 0xa4, 0x5a, 0xd3, 0x25, 0x2b, 0x63, 0xcf, 0x8c, 0x50, 0x14, 0x2d, 0xf9, 0x12, 0x02, + 0x51, 0x2f, 0xae, 0x97, 0x36, 0xaa, 0x5b, 0x9b, 0x37, 0x1a, 0xc9, 0xfe, 0x5b, 0x93, 0x97, 0x9e, + 0x66, 0x0c, 0x9a, 0xa3, 0x5a, 0x47, 0xe8, 0xbf, 0xb9, 0xe1, 0xa3, 0x84, 0x3b, 0x80, 0xf7, 0x50, + 0x2d, 0x06, 0x2e, 0x7c, 0x21, 0x21, 0x94, 0x79, 0xcc, 0x01, 0x0b, 0x40, 0xcd, 0xb5, 0x6a, 0xd7, + 0xd3, 0x51, 0xb3, 0xf6, 0x62, 0xc1, 0x39, 0x5d, 0x98, 0xd5, 0xfa, 0xb2, 0x40, 0xb2, 0xec, 0xba, + 0xf0, 0x7d, 0x54, 0x61, 0xca, 0x03, 0x5c, 0xa3, 0x27, 0x12, 0x74, 0xb4, 0x9f, 0x4e, 0x22, 0xd4, + 0xb5, 0xaa, 0xf6, 0xf4, 0xb6, 0xdc, 0xf0, 0x5a, 0x55, 0xea, 0xcc, 0xb5, 0x2a, 0x9b, 0x6a, 0x64, + 0xd6, 0x4a, 0x18, 0xb9, 0xf9, 0x94, 0xa5, 0x5f, 0x5b, 0x39, 0xd0, 0x7e, 0x3a, 0x89, 0x68, 0xfd, + 0x28, 0x2d, 0x90, 0x4e, 0xed, 0xc7, 0xcc, 0x4c, 0xae, 0x9a, 0xa9, 0x32, 0x37, 0x93, 0x3b, 0x99, + 0xc9, 0xc5, 0x9f, 0x0c, 0x84, 0xd9, 0x04, 0xb1, 0x3f, 0xde, 0x9f, 0xfc, 0x92, 0x9f, 0xdd, 0x62, + 0x6f, 0x49, 0x67, 0x8e, 0xb6, 0x1b, 0x4a, 0x7e, 0x6a, 0x37, 0x74, 0x17, 0x78, 0x3e, 0x80, 0x2e, + 0x68, 0x01, 0x1f, 0xa3, 0x6a, 0xee, 0xdd, 0xe5, 0x3c, 0xe2, 0xfa, 0x25, 0x6d, 0x5c, 0xa3, 0x23, + 0x15, 0x6f, 0x9b, 0xe9, 0xa8, 0x59, 0xed, 0x4c, 0x01, 0xdf, 0x47, 0xcd, 0xea, 0xcc, 0x39, 0x9d, + 0x85, 0x67, 0xb5, 0x5c, 0x98, 0xd6, 0x2a, 0xdf, 0xa6, 0xd6, 0x0e, 0x5c, 0x5d, 0x6b, 0x06, 0xde, + 0xd8, 0x45, 0xff, 0x5f, 0x21, 0x11, 0x5e, 0x43, 0xa5, 0x3e, 0x9c, 0xe6, 0x9b, 0x48, 0xb3, 0x4f, + 0x5c, 0x43, 0x4b, 0x03, 0x76, 0x92, 0xe4, 0x1b, 0xb7, 0x4a, 0x73, 0xe3, 0x49, 0x71, 0xdb, 0x68, + 0x7d, 0x30, 0xd0, 0x6c, 0x0d, 0xbc, 0x87, 0xca, 0xd9, 0xef, 0x55, 0xbf, 0xfc, 0x7b, 0xd7, 0x7b, + 0xf9, 0x2f, 0xfd, 0x00, 0xa6, 0x7f, 0xb0, 0xcc, 0xa2, 0x8a, 0x82, 0xef, 0xa2, 0x95, 0x00, 0x84, + 0x60, 0x9e, 0xae, 0x6c, 0xff, 0xab, 0x83, 0x56, 0xf6, 0x73, 0x37, 0x1d, 0x9f, 0xdb, 0x64, 0x78, + 0x61, 0x16, 0xce, 0x2e, 0xcc, 0xc2, 0xf9, 0x85, 0x59, 0x78, 0x9f, 0x9a, 0xc6, 0x30, 0x35, 0x8d, + 0xb3, 0xd4, 0x34, 0xce, 0x53, 0xd3, 0xf8, 0x9a, 0x9a, 0xc6, 0xc7, 0x6f, 0x66, 0xe1, 0x75, 0x65, + 0x2c, 0xdc, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xba, 0xdb, 0x12, 0x1a, 0x07, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto index 7601963..ccb9475 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ b/vendor/k8s.io/api/storage/v1alpha1/generated.proto @@ -21,7 +21,6 @@ syntax = 'proto2'; package k8s.io.api.storage.v1alpha1; -import "k8s.io/api/core/v1/generated.proto"; import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/generated.proto"; import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; @@ -31,11 +30,11 @@ option go_package = "v1alpha1"; // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -53,7 +52,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -69,15 +68,6 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; - - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go index 3940885..964bb5f 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types.go @@ -16,10 +16,7 @@ limitations under the License. package v1alpha1 -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" // +genclient // +genclient:nonNamespaced @@ -33,7 +30,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -54,7 +51,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -84,14 +81,7 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` + // Placeholder for *VolumeSource to accommodate inline volumes in pods. } // VolumeAttachmentStatus is the status of a VolumeAttachment request. diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go index 2e82161..32d7dcc 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go @@ -29,7 +29,7 @@ package v1alpha1 // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -40,7 +40,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -49,7 +49,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go index 3debf9d..e27c6ff 100644 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go @@ -21,7 +21,6 @@ limitations under the License. package v1alpha1 import ( - v1 "k8s.io/api/core/v1" runtime "k8s.io/apimachinery/pkg/runtime" ) @@ -57,7 +56,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -94,11 +93,6 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } - if in.InlineVolumeSpec != nil { - in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec - *out = new(v1.PersistentVolumeSpec) - (*in).DeepCopyInto(*out) - } return } diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go index e3e3626..8957a4c 100644 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ b/vendor/k8s.io/api/storage/v1beta1/doc.go @@ -15,8 +15,6 @@ limitations under the License. */ // +k8s:deepcopy-gen=package -// +k8s:protobuf-gen=package // +groupName=storage.k8s.io // +k8s:openapi-gen=true - package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go index af4ce59..fed8c7a 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go @@ -14,26 +14,40 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto +// DO NOT EDIT! +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto + + It has these top-level messages: + StorageClass + StorageClassList + VolumeAttachment + VolumeAttachmentList + VolumeAttachmentSource + VolumeAttachmentSpec + VolumeAttachmentStatus + VolumeError +*/ package v1beta1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_api_core_v1 "k8s.io/api/core/v1" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_api_core_v1 "k8s.io/api/core/v1" - v11 "k8s.io/api/core/v1" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -44,2606 +58,727 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *CSIDriver) Reset() { *m = CSIDriver{} } -func (*CSIDriver) ProtoMessage() {} -func (*CSIDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{0} -} -func (m *CSIDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriver.Merge(m, src) -} -func (m *CSIDriver) XXX_Size() int { - return m.Size() -} -func (m *CSIDriver) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriver.DiscardUnknown(m) -} +func (m *StorageClass) Reset() { *m = StorageClass{} } +func (*StorageClass) ProtoMessage() {} +func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_CSIDriver proto.InternalMessageInfo +func (m *StorageClassList) Reset() { *m = StorageClassList{} } +func (*StorageClassList) ProtoMessage() {} +func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *CSIDriverList) Reset() { *m = CSIDriverList{} } -func (*CSIDriverList) ProtoMessage() {} -func (*CSIDriverList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{1} -} -func (m *CSIDriverList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriverList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriverList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriverList.Merge(m, src) -} -func (m *CSIDriverList) XXX_Size() int { - return m.Size() -} -func (m *CSIDriverList) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriverList.DiscardUnknown(m) -} +func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } +func (*VolumeAttachment) ProtoMessage() {} +func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -var xxx_messageInfo_CSIDriverList proto.InternalMessageInfo +func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } +func (*VolumeAttachmentList) ProtoMessage() {} +func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -func (m *CSIDriverSpec) Reset() { *m = CSIDriverSpec{} } -func (*CSIDriverSpec) ProtoMessage() {} -func (*CSIDriverSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{2} -} -func (m *CSIDriverSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSIDriverSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSIDriverSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSIDriverSpec.Merge(m, src) -} -func (m *CSIDriverSpec) XXX_Size() int { - return m.Size() -} -func (m *CSIDriverSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CSIDriverSpec.DiscardUnknown(m) -} +func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } +func (*VolumeAttachmentSource) ProtoMessage() {} +func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -var xxx_messageInfo_CSIDriverSpec proto.InternalMessageInfo +func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } +func (*VolumeAttachmentSpec) ProtoMessage() {} +func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -func (m *CSINode) Reset() { *m = CSINode{} } -func (*CSINode) ProtoMessage() {} -func (*CSINode) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{3} -} -func (m *CSINode) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINode) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CSINode) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINode.Merge(m, src) -} -func (m *CSINode) XXX_Size() int { - return m.Size() -} -func (m *CSINode) XXX_DiscardUnknown() { - xxx_messageInfo_CSINode.DiscardUnknown(m) -} +func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } +func (*VolumeAttachmentStatus) ProtoMessage() {} +func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -var xxx_messageInfo_CSINode proto.InternalMessageInfo +func (m *VolumeError) Reset() { *m = VolumeError{} } +func (*VolumeError) ProtoMessage() {} +func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -func (m *CSINodeDriver) Reset() { *m = CSINodeDriver{} } -func (*CSINodeDriver) ProtoMessage() {} -func (*CSINodeDriver) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{4} -} -func (m *CSINodeDriver) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) +func init() { + proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") + proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") + proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") + proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") + proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") + proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") + proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") + proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") } -func (m *CSINodeDriver) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *StorageClass) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *CSINodeDriver) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINodeDriver.Merge(m, src) -} -func (m *CSINodeDriver) XXX_Size() int { - return m.Size() -} -func (m *CSINodeDriver) XXX_DiscardUnknown() { - xxx_messageInfo_CSINodeDriver.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_CSINodeDriver proto.InternalMessageInfo - -func (m *CSINodeList) Reset() { *m = CSINodeList{} } -func (*CSINodeList) ProtoMessage() {} -func (*CSINodeList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{5} -} -func (m *CSINodeList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINodeList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *CSINodeList) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINodeList.Merge(m, src) -} -func (m *CSINodeList) XXX_Size() int { - return m.Size() -} -func (m *CSINodeList) XXX_DiscardUnknown() { - xxx_messageInfo_CSINodeList.DiscardUnknown(m) -} - -var xxx_messageInfo_CSINodeList proto.InternalMessageInfo - -func (m *CSINodeSpec) Reset() { *m = CSINodeSpec{} } -func (*CSINodeSpec) ProtoMessage() {} -func (*CSINodeSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{6} -} -func (m *CSINodeSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CSINodeSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err + i += n1 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) + i += copy(dAtA[i:], m.Provisioner) + if len(m.Parameters) > 0 { + keysForParameters := make([]string, 0, len(m.Parameters)) + for k := range m.Parameters { + keysForParameters = append(keysForParameters, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + for _, k := range keysForParameters { + dAtA[i] = 0x1a + i++ + v := m.Parameters[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } } - return b[:n], nil -} -func (m *CSINodeSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_CSINodeSpec.Merge(m, src) -} -func (m *CSINodeSpec) XXX_Size() int { - return m.Size() -} -func (m *CSINodeSpec) XXX_DiscardUnknown() { - xxx_messageInfo_CSINodeSpec.DiscardUnknown(m) + if m.ReclaimPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) + i += copy(dAtA[i:], *m.ReclaimPolicy) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.AllowVolumeExpansion != nil { + dAtA[i] = 0x30 + i++ + if *m.AllowVolumeExpansion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.VolumeBindingMode != nil { + dAtA[i] = 0x3a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) + i += copy(dAtA[i:], *m.VolumeBindingMode) + } + if len(m.AllowedTopologies) > 0 { + for _, msg := range m.AllowedTopologies { + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_CSINodeSpec proto.InternalMessageInfo - -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{7} -} -func (m *StorageClass) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageClass) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *StorageClassList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *StorageClass) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageClass.Merge(m, src) -} -func (m *StorageClass) XXX_Size() int { - return m.Size() -} -func (m *StorageClass) XXX_DiscardUnknown() { - xxx_messageInfo_StorageClass.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_StorageClass proto.InternalMessageInfo - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{8} -} -func (m *StorageClassList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StorageClassList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n2, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *StorageClassList) XXX_Merge(src proto.Message) { - xxx_messageInfo_StorageClassList.Merge(m, src) -} -func (m *StorageClassList) XXX_Size() int { - return m.Size() -} -func (m *StorageClassList) XXX_DiscardUnknown() { - xxx_messageInfo_StorageClassList.DiscardUnknown(m) + i += n2 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_StorageClassList proto.InternalMessageInfo - -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{9} -} -func (m *VolumeAttachment) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *VolumeAttachment) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachment.Merge(m, src) -} -func (m *VolumeAttachment) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachment) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachment.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_VolumeAttachment proto.InternalMessageInfo - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{10} -} -func (m *VolumeAttachmentList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *VolumeAttachmentList) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentList.Merge(m, src) -} -func (m *VolumeAttachmentList) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentList) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentList.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentList proto.InternalMessageInfo - -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{11} -} -func (m *VolumeAttachmentSource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentSource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + i += n3 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) + n4, err := m.Spec.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *VolumeAttachmentSource) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentSource.Merge(m, src) -} -func (m *VolumeAttachmentSource) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentSource) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentSource.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentSource proto.InternalMessageInfo - -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{12} -} -func (m *VolumeAttachmentSpec) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) + i += n4 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) + n5, err := m.Status.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *VolumeAttachmentSpec) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentSpec.Merge(m, src) -} -func (m *VolumeAttachmentSpec) XXX_Size() int { - return m.Size() + i += n5 + return i, nil } -func (m *VolumeAttachmentSpec) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentSpec.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeAttachmentSpec proto.InternalMessageInfo -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{13} -} -func (m *VolumeAttachmentStatus) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeAttachmentStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *VolumeAttachmentStatus) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeAttachmentStatus.Merge(m, src) -} -func (m *VolumeAttachmentStatus) XXX_Size() int { - return m.Size() -} -func (m *VolumeAttachmentStatus) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeAttachmentStatus.DiscardUnknown(m) + return dAtA[:n], nil } -var xxx_messageInfo_VolumeAttachmentStatus proto.InternalMessageInfo - -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{14} -} -func (m *VolumeError) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n6, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return b[:n], nil -} -func (m *VolumeError) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeError.Merge(m, src) -} -func (m *VolumeError) XXX_Size() int { - return m.Size() -} -func (m *VolumeError) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeError.DiscardUnknown(m) + i += n6 + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil } -var xxx_messageInfo_VolumeError proto.InternalMessageInfo - -func (m *VolumeNodeResources) Reset() { *m = VolumeNodeResources{} } -func (*VolumeNodeResources) ProtoMessage() {} -func (*VolumeNodeResources) Descriptor() ([]byte, []int) { - return fileDescriptor_7d2980599fd0de80, []int{15} -} -func (m *VolumeNodeResources) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *VolumeNodeResources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) +func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return b[:n], nil -} -func (m *VolumeNodeResources) XXX_Merge(src proto.Message) { - xxx_messageInfo_VolumeNodeResources.Merge(m, src) -} -func (m *VolumeNodeResources) XXX_Size() int { - return m.Size() -} -func (m *VolumeNodeResources) XXX_DiscardUnknown() { - xxx_messageInfo_VolumeNodeResources.DiscardUnknown(m) -} - -var xxx_messageInfo_VolumeNodeResources proto.InternalMessageInfo - -func init() { - proto.RegisterType((*CSIDriver)(nil), "k8s.io.api.storage.v1beta1.CSIDriver") - proto.RegisterType((*CSIDriverList)(nil), "k8s.io.api.storage.v1beta1.CSIDriverList") - proto.RegisterType((*CSIDriverSpec)(nil), "k8s.io.api.storage.v1beta1.CSIDriverSpec") - proto.RegisterType((*CSINode)(nil), "k8s.io.api.storage.v1beta1.CSINode") - proto.RegisterType((*CSINodeDriver)(nil), "k8s.io.api.storage.v1beta1.CSINodeDriver") - proto.RegisterType((*CSINodeList)(nil), "k8s.io.api.storage.v1beta1.CSINodeList") - proto.RegisterType((*CSINodeSpec)(nil), "k8s.io.api.storage.v1beta1.CSINodeSpec") - proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.StorageClass.ParametersEntry") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") - proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") - proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") - proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") - proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") - proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus.AttachmentMetadataEntry") - proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") - proto.RegisterType((*VolumeNodeResources)(nil), "k8s.io.api.storage.v1beta1.VolumeNodeResources") -} - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptor_7d2980599fd0de80) + return dAtA[:n], nil } -var fileDescriptor_7d2980599fd0de80 = []byte{ - // 1344 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0xbd, 0x6f, 0xdb, 0x46, - 0x1b, 0x37, 0x2d, 0x7f, 0x9e, 0xec, 0x44, 0xbe, 0x18, 0xef, 0xab, 0x57, 0x83, 0x64, 0xe8, 0x45, - 0x1b, 0x27, 0x48, 0xc8, 0x24, 0x48, 0x83, 0x20, 0x40, 0x07, 0xd3, 0x31, 0x50, 0x25, 0x96, 0xe3, - 0x9e, 0x8d, 0xa0, 0x08, 0x3a, 0xf4, 0x44, 0x3e, 0x91, 0x19, 0x93, 0x3c, 0x86, 0x3c, 0xa9, 0xd5, - 0xd6, 0xa9, 0x73, 0xd1, 0xa1, 0x7f, 0x41, 0xff, 0x85, 0x16, 0x68, 0x97, 0x8e, 0xcd, 0x54, 0x04, - 0x9d, 0x32, 0x09, 0x0d, 0xbb, 0x76, 0xeb, 0x66, 0x74, 0x28, 0xee, 0x78, 0x12, 0x29, 0x89, 0x8a, - 0xed, 0x0e, 0xde, 0x78, 0xcf, 0xc7, 0xef, 0xf9, 0x7e, 0xee, 0x88, 0xb6, 0x8f, 0xef, 0x47, 0xba, - 0xc3, 0x8c, 0xe3, 0x4e, 0x0b, 0x42, 0x1f, 0x38, 0x44, 0x46, 0x17, 0x7c, 0x9b, 0x85, 0x86, 0x62, - 0xd0, 0xc0, 0x31, 0x22, 0xce, 0x42, 0xda, 0x06, 0xa3, 0x7b, 0xbb, 0x05, 0x9c, 0xde, 0x36, 0xda, - 0xe0, 0x43, 0x48, 0x39, 0xd8, 0x7a, 0x10, 0x32, 0xce, 0x70, 0x25, 0x91, 0xd5, 0x69, 0xe0, 0xe8, - 0x4a, 0x56, 0x57, 0xb2, 0x95, 0x9b, 0x6d, 0x87, 0x1f, 0x75, 0x5a, 0xba, 0xc5, 0x3c, 0xa3, 0xcd, - 0xda, 0xcc, 0x90, 0x2a, 0xad, 0xce, 0x73, 0x79, 0x92, 0x07, 0xf9, 0x95, 0x40, 0x55, 0xea, 0x19, - 0xb3, 0x16, 0x0b, 0x85, 0xcd, 0x71, 0x73, 0x95, 0xbb, 0xa9, 0x8c, 0x47, 0xad, 0x23, 0xc7, 0x87, - 0xb0, 0x67, 0x04, 0xc7, 0x6d, 0x41, 0x88, 0x0c, 0x0f, 0x38, 0xcd, 0xd3, 0x32, 0xa6, 0x69, 0x85, - 0x1d, 0x9f, 0x3b, 0x1e, 0x4c, 0x28, 0xdc, 0x3b, 0x4d, 0x21, 0xb2, 0x8e, 0xc0, 0xa3, 0xe3, 0x7a, - 0xf5, 0x9f, 0x34, 0xb4, 0xbc, 0x7d, 0xd0, 0x78, 0x18, 0x3a, 0x5d, 0x08, 0xf1, 0x67, 0x68, 0x49, - 0x78, 0x64, 0x53, 0x4e, 0xcb, 0xda, 0x86, 0xb6, 0x59, 0xbc, 0x73, 0x4b, 0x4f, 0xd3, 0x35, 0x04, - 0xd6, 0x83, 0xe3, 0xb6, 0x20, 0x44, 0xba, 0x90, 0xd6, 0xbb, 0xb7, 0xf5, 0x27, 0xad, 0x17, 0x60, - 0xf1, 0x26, 0x70, 0x6a, 0xe2, 0x57, 0xfd, 0xda, 0x4c, 0xdc, 0xaf, 0xa1, 0x94, 0x46, 0x86, 0xa8, - 0xf8, 0x31, 0x9a, 0x8b, 0x02, 0xb0, 0xca, 0xb3, 0x12, 0xfd, 0x9a, 0x3e, 0xbd, 0x18, 0xfa, 0xd0, - 0xad, 0x83, 0x00, 0x2c, 0x73, 0x45, 0xc1, 0xce, 0x89, 0x13, 0x91, 0x20, 0xf5, 0x1f, 0x35, 0xb4, - 0x3a, 0x94, 0xda, 0x75, 0x22, 0x8e, 0x3f, 0x9d, 0x08, 0x40, 0x3f, 0x5b, 0x00, 0x42, 0x5b, 0xba, - 0x5f, 0x52, 0x76, 0x96, 0x06, 0x94, 0x8c, 0xf3, 0x8f, 0xd0, 0xbc, 0xc3, 0xc1, 0x8b, 0xca, 0xb3, - 0x1b, 0x85, 0xcd, 0xe2, 0x9d, 0xf7, 0xce, 0xe4, 0xbd, 0xb9, 0xaa, 0x10, 0xe7, 0x1b, 0x42, 0x97, - 0x24, 0x10, 0xf5, 0x3f, 0xb3, 0xbe, 0x8b, 0x98, 0xf0, 0x03, 0x74, 0x89, 0x72, 0x4e, 0xad, 0x23, - 0x02, 0x2f, 0x3b, 0x4e, 0x08, 0xb6, 0x8c, 0x60, 0xc9, 0xc4, 0x71, 0xbf, 0x76, 0x69, 0x6b, 0x84, - 0x43, 0xc6, 0x24, 0x85, 0x6e, 0xc0, 0xec, 0x86, 0xff, 0x9c, 0x3d, 0xf1, 0x9b, 0xac, 0xe3, 0x73, - 0x99, 0x60, 0xa5, 0xbb, 0x3f, 0xc2, 0x21, 0x63, 0x92, 0xd8, 0x42, 0xeb, 0x5d, 0xe6, 0x76, 0x3c, - 0xd8, 0x75, 0x9e, 0x83, 0xd5, 0xb3, 0x5c, 0x68, 0x32, 0x1b, 0xa2, 0x72, 0x61, 0xa3, 0xb0, 0xb9, - 0x6c, 0x1a, 0x71, 0xbf, 0xb6, 0xfe, 0x34, 0x87, 0x7f, 0xd2, 0xaf, 0x5d, 0xc9, 0xa1, 0x93, 0x5c, - 0xb0, 0xfa, 0x0f, 0x1a, 0x5a, 0xdc, 0x3e, 0x68, 0xec, 0x31, 0x1b, 0x2e, 0xa0, 0xcb, 0x1a, 0x23, - 0x5d, 0x76, 0xf5, 0x94, 0x3a, 0x09, 0xa7, 0xa6, 0xf6, 0xd8, 0x5f, 0x49, 0x9d, 0x84, 0x8c, 0x1a, - 0x92, 0x0d, 0x34, 0xe7, 0x53, 0x0f, 0xa4, 0xeb, 0xcb, 0xa9, 0xce, 0x1e, 0xf5, 0x80, 0x48, 0x0e, - 0x7e, 0x1f, 0x2d, 0xf8, 0xcc, 0x86, 0xc6, 0x43, 0xe9, 0xc0, 0xb2, 0x79, 0x49, 0xc9, 0x2c, 0xec, - 0x49, 0x2a, 0x51, 0x5c, 0x7c, 0x17, 0xad, 0x70, 0x16, 0x30, 0x97, 0xb5, 0x7b, 0x8f, 0xa1, 0x37, - 0xc8, 0x78, 0x29, 0xee, 0xd7, 0x56, 0x0e, 0x33, 0x74, 0x32, 0x22, 0x85, 0x5b, 0xa8, 0x48, 0x5d, - 0x97, 0x59, 0x94, 0xd3, 0x96, 0x0b, 0xe5, 0x39, 0x19, 0xa3, 0xf1, 0xae, 0x18, 0x93, 0x32, 0x09, - 0xe3, 0x04, 0x22, 0xd6, 0x09, 0x2d, 0x88, 0xcc, 0xcb, 0x71, 0xbf, 0x56, 0xdc, 0x4a, 0x71, 0x48, - 0x16, 0xb4, 0xfe, 0xbd, 0x86, 0x8a, 0x2a, 0xea, 0x0b, 0x98, 0xab, 0x8f, 0x46, 0xe7, 0xea, 0xff, - 0x67, 0xa8, 0xd7, 0x94, 0xa9, 0xb2, 0x86, 0x6e, 0xcb, 0x91, 0x3a, 0x44, 0x8b, 0xb6, 0x2c, 0x5a, - 0x54, 0xd6, 0x24, 0xf4, 0xb5, 0x33, 0x40, 0xab, 0xb1, 0xbd, 0xac, 0x0c, 0x2c, 0x26, 0xe7, 0x88, - 0x0c, 0xa0, 0xea, 0xdf, 0x2c, 0xa0, 0x95, 0x83, 0x44, 0x77, 0xdb, 0xa5, 0x51, 0x74, 0x01, 0x0d, - 0xfd, 0x01, 0x2a, 0x06, 0x21, 0xeb, 0x3a, 0x91, 0xc3, 0x7c, 0x08, 0x55, 0x5b, 0x5d, 0x51, 0x2a, - 0xc5, 0xfd, 0x94, 0x45, 0xb2, 0x72, 0xd8, 0x45, 0x28, 0xa0, 0x21, 0xf5, 0x80, 0x8b, 0x14, 0x14, - 0x64, 0x0a, 0xee, 0xbf, 0x2b, 0x05, 0xd9, 0xb0, 0xf4, 0xfd, 0xa1, 0xea, 0x8e, 0xcf, 0xc3, 0x5e, - 0xea, 0x62, 0xca, 0x20, 0x19, 0x7c, 0x7c, 0x8c, 0x56, 0x43, 0xb0, 0x5c, 0xea, 0x78, 0xfb, 0xcc, - 0x75, 0xac, 0x9e, 0x6c, 0xcd, 0x65, 0x73, 0x27, 0xee, 0xd7, 0x56, 0x49, 0x96, 0x71, 0xd2, 0xaf, - 0xdd, 0x9a, 0xbc, 0x3a, 0xf5, 0x7d, 0x08, 0x23, 0x27, 0xe2, 0xe0, 0xf3, 0xa4, 0x61, 0x47, 0x74, - 0xc8, 0x28, 0xb6, 0x98, 0x1d, 0x4f, 0xac, 0xaf, 0x27, 0x01, 0x77, 0x98, 0x1f, 0x95, 0xe7, 0xd3, - 0xd9, 0x69, 0x66, 0xe8, 0x64, 0x44, 0x0a, 0xef, 0xa2, 0x75, 0xd1, 0xe6, 0x9f, 0x27, 0x06, 0x76, - 0xbe, 0x08, 0xa8, 0x2f, 0x52, 0x55, 0x5e, 0x90, 0xdb, 0xb2, 0x2c, 0x76, 0xdd, 0x56, 0x0e, 0x9f, - 0xe4, 0x6a, 0xe1, 0x4f, 0xd0, 0x5a, 0xb2, 0xec, 0x4c, 0xc7, 0xb7, 0x1d, 0xbf, 0x2d, 0x56, 0x5d, - 0x79, 0x51, 0x06, 0x7d, 0x3d, 0xee, 0xd7, 0xd6, 0x9e, 0x8e, 0x33, 0x4f, 0xf2, 0x88, 0x64, 0x12, - 0x04, 0xbf, 0x44, 0x6b, 0xd2, 0x22, 0xd8, 0x6a, 0x11, 0x38, 0x10, 0x95, 0x97, 0x64, 0xfd, 0x36, - 0xb3, 0xf5, 0x13, 0xa9, 0x13, 0x8d, 0x34, 0x58, 0x17, 0x07, 0xe0, 0x82, 0xc5, 0x59, 0x78, 0x08, - 0xa1, 0x67, 0xfe, 0x4f, 0xd5, 0x6b, 0x6d, 0x6b, 0x1c, 0x8a, 0x4c, 0xa2, 0x57, 0x3e, 0x44, 0x97, - 0xc7, 0x0a, 0x8e, 0x4b, 0xa8, 0x70, 0x0c, 0xbd, 0x64, 0xd1, 0x11, 0xf1, 0x89, 0xd7, 0xd1, 0x7c, - 0x97, 0xba, 0x1d, 0x48, 0x3a, 0x90, 0x24, 0x87, 0x07, 0xb3, 0xf7, 0xb5, 0xfa, 0xcf, 0x1a, 0x2a, - 0x65, 0xbb, 0xe7, 0x02, 0xd6, 0x46, 0x73, 0x74, 0x6d, 0x6c, 0x9e, 0xb5, 0xb1, 0xa7, 0xec, 0x8e, - 0xef, 0x66, 0x51, 0x29, 0x29, 0x4e, 0x72, 0xd9, 0x7a, 0xe0, 0xf3, 0x0b, 0x18, 0x6d, 0x32, 0x72, - 0x57, 0xdd, 0x3a, 0x7d, 0x8f, 0xa7, 0xde, 0x4d, 0xbb, 0xb4, 0xf0, 0x33, 0xb4, 0x10, 0x71, 0xca, - 0x3b, 0x62, 0xe6, 0x05, 0xea, 0x9d, 0x73, 0xa1, 0x4a, 0xcd, 0xf4, 0xd2, 0x4a, 0xce, 0x44, 0x21, - 0xd6, 0x7f, 0xd1, 0xd0, 0xfa, 0xb8, 0xca, 0x05, 0x14, 0xfb, 0xe3, 0xd1, 0x62, 0xdf, 0x38, 0x4f, - 0x44, 0x53, 0x0a, 0xfe, 0x9b, 0x86, 0xfe, 0x33, 0x11, 0xbc, 0xbc, 0x1e, 0xc5, 0x9e, 0x08, 0xc6, - 0xb6, 0xd1, 0x5e, 0x7a, 0xe7, 0xcb, 0x3d, 0xb1, 0x9f, 0xc3, 0x27, 0xb9, 0x5a, 0xf8, 0x05, 0x2a, - 0x39, 0xbe, 0xeb, 0xf8, 0x90, 0xd0, 0x0e, 0xd2, 0x72, 0xe7, 0x0e, 0xf3, 0x38, 0xb2, 0x2c, 0xf3, - 0x7a, 0xdc, 0xaf, 0x95, 0x1a, 0x63, 0x28, 0x64, 0x02, 0xb7, 0xfe, 0x6b, 0x4e, 0x79, 0xe4, 0x5d, - 0x78, 0x03, 0x2d, 0x25, 0x8f, 0x46, 0x08, 0x55, 0x18, 0xc3, 0x74, 0x6f, 0x29, 0x3a, 0x19, 0x4a, - 0xc8, 0x0e, 0x92, 0xa9, 0x50, 0x8e, 0x9e, 0xaf, 0x83, 0xa4, 0x66, 0xa6, 0x83, 0xe4, 0x99, 0x28, - 0x44, 0xe1, 0x89, 0x78, 0x00, 0xc9, 0x84, 0x16, 0x46, 0x3d, 0xd9, 0x53, 0x74, 0x32, 0x94, 0xa8, - 0xff, 0x5d, 0xc8, 0xa9, 0x92, 0x6c, 0xc5, 0x4c, 0x48, 0x83, 0xb7, 0xf2, 0x78, 0x48, 0xf6, 0x30, - 0x24, 0x1b, 0x7f, 0xab, 0x21, 0x4c, 0x87, 0x10, 0xcd, 0x41, 0xab, 0x26, 0xfd, 0xf4, 0xe8, 0xfc, - 0x13, 0xa2, 0x6f, 0x4d, 0x80, 0x25, 0xf7, 0x64, 0x45, 0x39, 0x81, 0x27, 0x05, 0x48, 0x8e, 0x07, - 0xd8, 0x41, 0xc5, 0x84, 0xba, 0x13, 0x86, 0x2c, 0x54, 0x23, 0x7b, 0xf5, 0x74, 0x87, 0xa4, 0xb8, - 0x59, 0x95, 0x0f, 0xb9, 0x54, 0xff, 0xa4, 0x5f, 0x2b, 0x66, 0xf8, 0x24, 0x8b, 0x2d, 0x4c, 0xd9, - 0x90, 0x9a, 0x9a, 0xfb, 0x17, 0xa6, 0x1e, 0xc2, 0x74, 0x53, 0x19, 0xec, 0xca, 0x0e, 0xfa, 0xef, - 0x94, 0x04, 0x9d, 0xeb, 0x5e, 0xf9, 0x4a, 0x43, 0x59, 0x1b, 0x78, 0x17, 0xcd, 0x89, 0xff, 0x59, - 0xb5, 0x61, 0xae, 0x9f, 0x6d, 0xc3, 0x1c, 0x3a, 0x1e, 0xa4, 0x8b, 0x52, 0x9c, 0x88, 0x44, 0xc1, - 0xd7, 0xd0, 0xa2, 0x07, 0x51, 0x44, 0xdb, 0xca, 0x72, 0xfa, 0xea, 0x6b, 0x26, 0x64, 0x32, 0xe0, - 0xd7, 0xef, 0xa1, 0x2b, 0x39, 0xef, 0x68, 0x5c, 0x43, 0xf3, 0x96, 0xfc, 0xe1, 0x12, 0x0e, 0xcd, - 0x9b, 0xcb, 0x62, 0xcb, 0x6c, 0xcb, 0xff, 0xac, 0x84, 0x6e, 0xde, 0x7c, 0xf5, 0xb6, 0x3a, 0xf3, - 0xfa, 0x6d, 0x75, 0xe6, 0xcd, 0xdb, 0xea, 0xcc, 0x97, 0x71, 0x55, 0x7b, 0x15, 0x57, 0xb5, 0xd7, - 0x71, 0x55, 0x7b, 0x13, 0x57, 0xb5, 0xdf, 0xe3, 0xaa, 0xf6, 0xf5, 0x1f, 0xd5, 0x99, 0x67, 0x8b, - 0x2a, 0xdf, 0xff, 0x04, 0x00, 0x00, 0xff, 0xff, 0x72, 0xff, 0xde, 0x2e, 0xe4, 0x10, 0x00, 0x00, +func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.PersistentVolumeName != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) + i += copy(dAtA[i:], *m.PersistentVolumeName) + } + return i, nil } -func (m *CSIDriver) Marshal() (dAtA []byte, err error) { +func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) + i += copy(dAtA[i:], m.Attacher) dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) + n7, err := m.Source.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += n7 + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) + i += copy(dAtA[i:], m.NodeName) + return i, nil } -func (m *CSIDriverList) Marshal() (dAtA []byte, err error) { +func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIDriverList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriverList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0x8 + i++ + if m.Attached { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + if len(m.AttachmentMetadata) > 0 { + keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) + for k := range m.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + for _, k := range keysForAttachmentMetadata { + dAtA[i] = 0x12 + i++ + v := m.AttachmentMetadata[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if m.AttachError != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) + n8, err := m.AttachError.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n8 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + if m.DetachError != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) + n9, err := m.DetachError.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil } -func (m *CSIDriverSpec) Marshal() (dAtA []byte, err error) { +func (m *VolumeError) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *CSIDriverSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSIDriverSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.VolumeLifecycleModes) > 0 { - for iNdEx := len(m.VolumeLifecycleModes) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.VolumeLifecycleModes[iNdEx]) - copy(dAtA[i:], m.VolumeLifecycleModes[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeLifecycleModes[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - if m.PodInfoOnMount != nil { - i-- - if *m.PodInfoOnMount { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if m.AttachRequired != nil { - i-- - if *m.AttachRequired { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func (m *CSINode) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) + n10, err := m.Time.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return dAtA[:n], nil + i += n10 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + return i, nil } -func (m *CSINode) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 } - -func (m *CSINode) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *StorageClass) Size() (n int) { var l int _ = l - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Provisioner) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Parameters) > 0 { + for k, v := range m.Parameters { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.ReclaimPolicy != nil { + l = len(*m.ReclaimPolicy) + n += 1 + l + sovGenerated(uint64(l)) + } + if len(m.MountOptions) > 0 { + for _, s := range m.MountOptions { + l = len(s) + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *CSINodeDriver) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.AllowVolumeExpansion != nil { + n += 2 } - return dAtA[:n], nil -} - -func (m *CSINodeDriver) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CSINodeDriver) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Allocatable != nil { - { - size, err := m.Allocatable.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 + if m.VolumeBindingMode != nil { + l = len(*m.VolumeBindingMode) + n += 1 + l + sovGenerated(uint64(l)) } - if len(m.TopologyKeys) > 0 { - for iNdEx := len(m.TopologyKeys) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.TopologyKeys[iNdEx]) - copy(dAtA[i:], m.TopologyKeys[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKeys[iNdEx]))) - i-- - dAtA[i] = 0x1a + if len(m.AllowedTopologies) > 0 { + for _, e := range m.AllowedTopologies { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } } - i -= len(m.NodeID) - copy(dAtA[i:], m.NodeID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeID))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *CSINodeList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *StorageClassList) Size() (n int) { + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } } - return dAtA[:n], nil + return n } -func (m *CSINodeList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *VolumeAttachment) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Spec.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Status.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *CSINodeList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *VolumeAttachmentList) Size() (n int) { var l int _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return n } -func (m *CSINodeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (m *VolumeAttachmentSource) Size() (n int) { + var l int + _ = l + if m.PersistentVolumeName != nil { + l = len(*m.PersistentVolumeName) + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil + return n } -func (m *CSINodeSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func (m *VolumeAttachmentSpec) Size() (n int) { + var l int + _ = l + l = len(m.Attacher) + n += 1 + l + sovGenerated(uint64(l)) + l = m.Source.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.NodeName) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *CSINodeSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *VolumeAttachmentStatus) Size() (n int) { var l int _ = l - if len(m.Drivers) > 0 { - for iNdEx := len(m.Drivers) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Drivers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa + n += 2 + if len(m.AttachmentMetadata) > 0 { + for k, v := range m.AttachmentMetadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) } } - return len(dAtA) - i, nil -} - -func (m *StorageClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + if m.AttachError != nil { + l = m.AttachError.Size() + n += 1 + l + sovGenerated(uint64(l)) } - return dAtA[:n], nil -} - -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + if m.DetachError != nil { + l = m.DetachError.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + return n } -func (m *StorageClass) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i +func (m *VolumeError) Size() (n int) { var l int _ = l - if len(m.AllowedTopologies) > 0 { - for iNdEx := len(m.AllowedTopologies) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.AllowedTopologies[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x42 - } - } - if m.VolumeBindingMode != nil { - i -= len(*m.VolumeBindingMode) - copy(dAtA[i:], *m.VolumeBindingMode) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i-- - dAtA[i] = 0x3a - } - if m.AllowVolumeExpansion != nil { - i-- - if *m.AllowVolumeExpansion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - if len(m.MountOptions) > 0 { - for iNdEx := len(m.MountOptions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.MountOptions[iNdEx]) - copy(dAtA[i:], m.MountOptions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountOptions[iNdEx]))) - i-- - dAtA[i] = 0x2a - } - } - if m.ReclaimPolicy != nil { - i -= len(*m.ReclaimPolicy) - copy(dAtA[i:], *m.ReclaimPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i-- - dAtA[i] = 0x22 - } - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for iNdEx := len(keysForParameters) - 1; iNdEx >= 0; iNdEx-- { - v := m.Parameters[string(keysForParameters[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForParameters[iNdEx]) - copy(dAtA[i:], keysForParameters[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForParameters[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.Provisioner) - copy(dAtA[i:], m.Provisioner) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StorageClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StorageClassList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + l = m.Time.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = len(m.Message) + n += 1 + l + sovGenerated(uint64(l)) + return n } -func (m *VolumeAttachment) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - { - size, err := m.Status.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - { - size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err } - return dAtA[:n], nil + return n } - -func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } - -func (m *VolumeAttachmentList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } +func (this *StorageClass) String() string { + if this == nil { + return "nil" } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + keysForParameters := make([]string, 0, len(this.Parameters)) + for k := range this.Parameters { + keysForParameters = append(keysForParameters, k) } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err + github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) + mapStringForParameters := "map[string]string{" + for _, k := range keysForParameters { + mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + mapStringForParameters += "}" + s := strings.Join([]string{`&StorageClass{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, + `Parameters:` + mapStringForParameters + `,`, + `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, + `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, + `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, + `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, + `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *VolumeAttachmentSource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.InlineVolumeSpec != nil { - { - size, err := m.InlineVolumeSpec.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - if m.PersistentVolumeName != nil { - i -= len(*m.PersistentVolumeName) - copy(dAtA[i:], *m.PersistentVolumeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i-- - dAtA[i] = 0xa +func (this *StorageClassList) String() string { + if this == nil { + return "nil" } - return len(dAtA) - i, nil + s := strings.Join([]string{`&StorageClassList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *VolumeAttachment) String() string { + if this == nil { + return "nil" } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) + s := strings.Join([]string{`&VolumeAttachment{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, + `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *VolumeAttachmentSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.NodeName) - copy(dAtA[i:], m.NodeName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i-- - dAtA[i] = 0x1a - { - size, err := m.Source.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) +func (this *VolumeAttachmentList) String() string { + if this == nil { + return "nil" } - i-- - dAtA[i] = 0x12 - i -= len(m.Attacher) - copy(dAtA[i:], m.Attacher) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + s := strings.Join([]string{`&VolumeAttachmentList{`, + `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s } - -func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err +func (this *VolumeAttachmentSource) String() string { + if this == nil { + return "nil" } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeAttachmentStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.DetachError != nil { - { - size, err := m.DetachError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - if m.AttachError != nil { - { - size, err := m.AttachError.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x1a - } - if len(m.AttachmentMetadata) > 0 { - keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) - for k := range m.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for iNdEx := len(keysForAttachmentMetadata) - 1; iNdEx >= 0; iNdEx-- { - v := m.AttachmentMetadata[string(keysForAttachmentMetadata[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAttachmentMetadata[iNdEx]) - copy(dAtA[i:], keysForAttachmentMetadata[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAttachmentMetadata[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x12 - } - } - i-- - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *VolumeError) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeError) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - { - size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *VolumeNodeResources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeNodeResources) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *VolumeNodeResources) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Count != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.Count)) - i-- - dAtA[i] = 0x8 - } - return len(dAtA) - i, nil -} - -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *CSIDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSIDriverList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSIDriverSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.AttachRequired != nil { - n += 2 - } - if m.PodInfoOnMount != nil { - n += 2 - } - if len(m.VolumeLifecycleModes) > 0 { - for _, s := range m.VolumeLifecycleModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSINode) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSINodeDriver) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.TopologyKeys) > 0 { - for _, s := range m.TopologyKeys { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Allocatable != nil { - l = m.Allocatable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CSINodeList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CSINodeSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Drivers) > 0 { - for _, e := range m.Drivers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClass) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AllowVolumeExpansion != nil { - n += 2 - } - if m.VolumeBindingMode != nil { - l = len(*m.VolumeBindingMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedTopologies) > 0 { - for _, e := range m.AllowedTopologies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachment) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachmentSource) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.PersistentVolumeName != nil { - l = len(*m.PersistentVolumeName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.InlineVolumeSpec != nil { - l = m.InlineVolumeSpec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeAttachmentSpec) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Attacher) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentStatus) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += 2 - if len(m.AttachmentMetadata) > 0 { - for k, v := range m.AttachmentMetadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AttachError != nil { - l = m.AttachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DetachError != nil { - l = m.DetachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeError) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeNodeResources) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Count != nil { - n += 1 + sovGenerated(uint64(*m.Count)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CSIDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriver{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSIDriverSpec", "CSIDriverSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIDriverList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CSIDriver{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSIDriver", "CSIDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CSIDriverList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CSIDriverSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSIDriverSpec{`, - `AttachRequired:` + valueToStringGenerated(this.AttachRequired) + `,`, - `PodInfoOnMount:` + valueToStringGenerated(this.PodInfoOnMount) + `,`, - `VolumeLifecycleModes:` + fmt.Sprintf("%v", this.VolumeLifecycleModes) + `,`, - `}`, - }, "") - return s -} -func (this *CSINode) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINode{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CSINodeSpec", "CSINodeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeDriver) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CSINodeDriver{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `NodeID:` + fmt.Sprintf("%v", this.NodeID) + `,`, - `TopologyKeys:` + fmt.Sprintf("%v", this.TopologyKeys) + `,`, - `Allocatable:` + strings.Replace(this.Allocatable.String(), "VolumeNodeResources", "VolumeNodeResources", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]CSINode{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "CSINode", "CSINode", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&CSINodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *CSINodeSpec) String() string { - if this == nil { - return "nil" - } - repeatedStringForDrivers := "[]CSINodeDriver{" - for _, f := range this.Drivers { - repeatedStringForDrivers += strings.Replace(strings.Replace(f.String(), "CSINodeDriver", "CSINodeDriver", 1), `&`, ``, 1) + "," - } - repeatedStringForDrivers += "}" - s := strings.Join([]string{`&CSINodeSpec{`, - `Drivers:` + repeatedStringForDrivers + `,`, - `}`, - }, "") - return s -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - repeatedStringForAllowedTopologies := "[]TopologySelectorTerm{" - for _, f := range this.AllowedTopologies { - repeatedStringForAllowedTopologies += fmt.Sprintf("%v", f) + "," - } - repeatedStringForAllowedTopologies += "}" - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, - `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + repeatedStringForAllowedTopologies + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]StorageClass{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "StorageClass", "StorageClass", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]VolumeAttachment{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `InlineVolumeSpec:` + strings.Replace(fmt.Sprintf("%v", this.InlineVolumeSpec), "PersistentVolumeSpec", "v11.PersistentVolumeSpec", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(this.AttachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(this.DetachError.String(), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeNodeResources) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeNodeResources{`, - `Count:` + valueToStringGenerated(this.Count) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CSIDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSIDriver{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIDriverSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIDriverSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIDriverSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachRequired", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AttachRequired = &b - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodInfoOnMount", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.PodInfoOnMount = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeLifecycleModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeLifecycleModes = append(m.VolumeLifecycleModes, VolumeLifecycleMode(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINode) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINode: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINode: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeDriver) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeDriver: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeDriver: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKeys", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKeys = append(m.TopologyKeys, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Allocatable == nil { - m.Allocatable = &VolumeNodeResources{} - } - if err := m.Allocatable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } + s := strings.Join([]string{`&VolumeAttachmentSource{`, + `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentSpec) String() string { + if this == nil { + return "nil" } - - if iNdEx > l { - return io.ErrUnexpectedEOF + s := strings.Join([]string{`&VolumeAttachmentSpec{`, + `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, + `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, + `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeAttachmentStatus) String() string { + if this == nil { + return "nil" } - return nil + keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) + for k := range this.AttachmentMetadata { + keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) + mapStringForAttachmentMetadata := "map[string]string{" + for _, k := range keysForAttachmentMetadata { + mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) + } + mapStringForAttachmentMetadata += "}" + s := strings.Join([]string{`&VolumeAttachmentStatus{`, + `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, + `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, + `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, + `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, + `}`, + }, "") + return s +} +func (this *VolumeError) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&VolumeError{`, + `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, + `Message:` + fmt.Sprintf("%v", this.Message) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) } -func (m *CSINodeList) Unmarshal(dAtA []byte) error { +func (m *StorageClass) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2658,7 +793,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -2666,15 +801,15 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: CSINodeList: wiretype end group for non-group") + return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2686,7 +821,7 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2695,108 +830,18 @@ func (m *CSINodeList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CSINode{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSINodeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSINodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drivers", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2806,82 +851,24 @@ func (m *CSINodeSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Drivers = append(m.Drivers, CSINodeDriver{}) - if err := m.Drivers[len(m.Drivers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Provisioner = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -2893,7 +880,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -2902,21 +889,10 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2926,29 +902,12 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2958,29 +917,41 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.Parameters == nil { m.Parameters = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -2990,86 +961,26 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + stringLenmapvalue |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Parameters[mapkey] = mapvalue + } else { + var mapvalue string + m.Parameters[mapkey] = mapvalue } - m.Parameters[mapkey] = mapvalue iNdEx = postIndex case 4: if wireType != 2 { @@ -3085,7 +996,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3095,9 +1006,6 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3118,7 +1026,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3128,9 +1036,6 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3150,7 +1055,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3171,7 +1076,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3181,9 +1086,6 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3204,7 +1106,7 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3213,13 +1115,10 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.AllowedTopologies = append(m.AllowedTopologies, v11.TopologySelectorTerm{}) + m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -3233,9 +1132,6 @@ func (m *StorageClass) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3263,7 +1159,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3291,7 +1187,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3300,9 +1196,6 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3324,7 +1217,7 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3333,9 +1226,6 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3353,9 +1243,6 @@ func (m *StorageClassList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3383,7 +1270,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3411,7 +1298,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3420,9 +1307,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3444,7 +1328,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3453,9 +1337,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3477,7 +1358,7 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3486,9 +1367,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3505,9 +1383,6 @@ func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3535,7 +1410,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3563,7 +1438,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3572,9 +1447,6 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3596,7 +1468,7 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3605,9 +1477,6 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3625,9 +1494,6 @@ func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3655,7 +1521,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3683,7 +1549,7 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3693,51 +1559,12 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } s := string(dAtA[iNdEx:postIndex]) m.PersistentVolumeName = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InlineVolumeSpec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.InlineVolumeSpec == nil { - m.InlineVolumeSpec = &v11.PersistentVolumeSpec{} - } - if err := m.InlineVolumeSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -3747,9 +1574,6 @@ func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3777,7 +1601,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3805,7 +1629,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3815,9 +1639,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3837,7 +1658,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3846,9 +1667,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3870,7 +1688,7 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3880,9 +1698,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -3897,9 +1712,6 @@ func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -3927,7 +1739,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -3955,7 +1767,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3975,7 +1787,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -3984,20 +1796,54 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.AttachmentMetadata == nil { m.AttachmentMetadata = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -4007,86 +1853,41 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.AttachmentMetadata[mapkey] = mapvalue + } else { + var mapvalue string + m.AttachmentMetadata[mapkey] = mapvalue } - m.AttachmentMetadata[mapkey] = mapvalue iNdEx = postIndex case 3: if wireType != 2 { @@ -4102,7 +1903,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4111,9 +1912,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4138,7 +1936,7 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4147,9 +1945,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4169,9 +1964,6 @@ func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4199,7 +1991,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4227,7 +2019,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4236,9 +2028,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4260,7 +2049,7 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4270,9 +2059,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4287,82 +2073,6 @@ func (m *VolumeError) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeNodeResources: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeNodeResources: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int32(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.Count = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4378,7 +2088,6 @@ func (m *VolumeNodeResources) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -4410,8 +2119,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -4428,34 +2139,123 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 988 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45, + 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x4d, 0x86, 0x08, 0x8c, 0x0f, 0x76, 0xe4, 0x0b, 0xa6, + 0x6a, 0x77, 0x9b, 0xa8, 0xa0, 0x08, 0x89, 0x83, 0xb7, 0xe4, 0x00, 0x8a, 0xdb, 0x30, 0x89, 0x2a, + 0x54, 0x71, 0x60, 0xb2, 0xfb, 0x76, 0xb3, 0x78, 0x77, 0x67, 0x99, 0x19, 0x1b, 0x72, 0xe3, 0xc4, + 0x19, 0x71, 0xe0, 0x17, 0xf0, 0x3f, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, 0x8b, 0x2c, 0xff, 0x22, + 0xe2, 0x80, 0x66, 0x76, 0x62, 0xaf, 0xbd, 0x0e, 0x6d, 0x7a, 0xe8, 0xcd, 0xef, 0xc7, 0xf3, 0xbc, + 0xdf, 0xb3, 0x46, 0x8f, 0xfa, 0xfb, 0xc2, 0x0e, 0x99, 0xd3, 0x1f, 0x9c, 0x02, 0x4f, 0x40, 0x82, + 0x70, 0x86, 0x90, 0xf8, 0x8c, 0x3b, 0xc6, 0x40, 0xd3, 0xd0, 0x11, 0x92, 0x71, 0x1a, 0x80, 0x33, + 0xdc, 0x3d, 0x05, 0x49, 0x77, 0x9d, 0x00, 0x12, 0xe0, 0x54, 0x82, 0x6f, 0xa7, 0x9c, 0x49, 0x86, + 0x1b, 0xb9, 0xaf, 0x4d, 0xd3, 0xd0, 0x36, 0xbe, 0xb6, 0xf1, 0x6d, 0xdc, 0x0f, 0x42, 0x79, 0x36, + 0x38, 0xb5, 0x3d, 0x16, 0x3b, 0x01, 0x0b, 0x98, 0xa3, 0x21, 0xa7, 0x83, 0xe7, 0x5a, 0xd2, 0x82, + 0xfe, 0x95, 0x53, 0x35, 0xda, 0x85, 0xb0, 0x1e, 0xe3, 0x2a, 0xe6, 0x6c, 0xb8, 0xc6, 0xc3, 0x89, + 0x4f, 0x4c, 0xbd, 0xb3, 0x30, 0x01, 0x7e, 0xee, 0xa4, 0xfd, 0x40, 0x29, 0x84, 0x13, 0x83, 0xa4, + 0xf3, 0x50, 0xce, 0x4d, 0x28, 0x3e, 0x48, 0x64, 0x18, 0x43, 0x09, 0xf0, 0xc9, 0xab, 0x00, 0xc2, + 0x3b, 0x83, 0x98, 0xce, 0xe2, 0xda, 0xbf, 0xae, 0xa0, 0xf5, 0xe3, 0xbc, 0x0b, 0x8f, 0x22, 0x2a, + 0x04, 0xfe, 0x16, 0x55, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x6e, 0xed, 0x58, 0x9d, 0xda, 0xde, 0x03, + 0x7b, 0xd2, 0xb1, 0x31, 0xb7, 0x9d, 0xf6, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x87, 0xbb, 0xf6, + 0x93, 0xd3, 0xef, 0xc0, 0x93, 0x3d, 0x90, 0xd4, 0xc5, 0x17, 0xa3, 0xd6, 0x42, 0x36, 0x6a, 0xa1, + 0x89, 0x8e, 0x8c, 0x59, 0xf1, 0xc7, 0xa8, 0x96, 0x72, 0x36, 0x0c, 0x45, 0xc8, 0x12, 0xe0, 0xf5, + 0xc5, 0x1d, 0xab, 0xb3, 0xe6, 0xbe, 0x6b, 0x20, 0xb5, 0xa3, 0x89, 0x89, 0x14, 0xfd, 0x70, 0x84, + 0x50, 0x4a, 0x39, 0x8d, 0x41, 0x02, 0x17, 0xf5, 0xca, 0x4e, 0xa5, 0x53, 0xdb, 0xdb, 0xb7, 0x6f, + 0x1e, 0xa6, 0x5d, 0x2c, 0xcb, 0x3e, 0x1a, 0x43, 0x0f, 0x12, 0xc9, 0xcf, 0x27, 0x29, 0x4e, 0x0c, + 0xa4, 0xc0, 0x8f, 0xfb, 0x68, 0x83, 0x83, 0x17, 0xd1, 0x30, 0x3e, 0x62, 0x51, 0xe8, 0x9d, 0xd7, + 0x97, 0x74, 0x9a, 0x07, 0xd9, 0xa8, 0xb5, 0x41, 0x8a, 0x86, 0xab, 0x51, 0xeb, 0x41, 0x79, 0x0d, + 0xec, 0x23, 0xe0, 0x22, 0x14, 0x12, 0x12, 0xf9, 0x94, 0x45, 0x83, 0x18, 0xa6, 0x30, 0x64, 0x9a, + 0x1b, 0x3f, 0x44, 0xeb, 0x31, 0x1b, 0x24, 0xf2, 0x49, 0x2a, 0x43, 0x96, 0x88, 0xfa, 0xf2, 0x4e, + 0xa5, 0xb3, 0xe6, 0x6e, 0x66, 0xa3, 0xd6, 0x7a, 0xaf, 0xa0, 0x27, 0x53, 0x5e, 0xf8, 0x10, 0x6d, + 0xd3, 0x28, 0x62, 0x3f, 0xe4, 0x01, 0x0e, 0x7e, 0x4c, 0x69, 0xa2, 0x5a, 0x55, 0x5f, 0xd9, 0xb1, + 0x3a, 0x55, 0xb7, 0x9e, 0x8d, 0x5a, 0xdb, 0xdd, 0x39, 0x76, 0x32, 0x17, 0x85, 0xbf, 0x46, 0x5b, + 0x43, 0xad, 0x72, 0xc3, 0xc4, 0x0f, 0x93, 0xa0, 0xc7, 0x7c, 0xa8, 0xaf, 0xea, 0xa2, 0xef, 0x66, + 0xa3, 0xd6, 0xd6, 0xd3, 0x59, 0xe3, 0xd5, 0x3c, 0x25, 0x29, 0x93, 0xe0, 0xef, 0xd1, 0x96, 0x8e, + 0x08, 0xfe, 0x09, 0x4b, 0x59, 0xc4, 0x82, 0x10, 0x44, 0xbd, 0xaa, 0xe7, 0xd7, 0x29, 0xce, 0x4f, + 0xb5, 0x4e, 0x2d, 0x92, 0xf1, 0x3a, 0x3f, 0x86, 0x08, 0x3c, 0xc9, 0xf8, 0x09, 0xf0, 0xd8, 0xfd, + 0xc0, 0xcc, 0x6b, 0xab, 0x3b, 0x4b, 0x45, 0xca, 0xec, 0x8d, 0xcf, 0xd0, 0x9d, 0x99, 0x81, 0xe3, + 0x4d, 0x54, 0xe9, 0xc3, 0xb9, 0x5e, 0xe9, 0x35, 0xa2, 0x7e, 0xe2, 0x6d, 0xb4, 0x3c, 0xa4, 0xd1, + 0x00, 0xf2, 0x0d, 0x24, 0xb9, 0xf0, 0xe9, 0xe2, 0xbe, 0xd5, 0xfe, 0xc3, 0x42, 0x9b, 0xc5, 0xed, + 0x39, 0x0c, 0x85, 0xc4, 0xdf, 0x94, 0x0e, 0xc3, 0x7e, 0xbd, 0xc3, 0x50, 0x68, 0x7d, 0x16, 0x9b, + 0xa6, 0x86, 0xea, 0xb5, 0xa6, 0x70, 0x14, 0x3d, 0xb4, 0x1c, 0x4a, 0x88, 0x45, 0x7d, 0xb1, 0xdc, + 0x98, 0xff, 0x5b, 0x6c, 0x77, 0xc3, 0x90, 0x2e, 0x7f, 0xa1, 0xe0, 0x24, 0x67, 0x69, 0xff, 0xbe, + 0x88, 0x36, 0xf3, 0xe1, 0x74, 0xa5, 0xa4, 0xde, 0x59, 0x0c, 0x89, 0x7c, 0x0b, 0xa7, 0x4d, 0xd0, + 0x92, 0x48, 0xc1, 0xd3, 0x1d, 0x9d, 0x66, 0x2f, 0x15, 0x31, 0x9b, 0xdd, 0x71, 0x0a, 0x9e, 0xbb, + 0x6e, 0xd8, 0x97, 0x94, 0x44, 0x34, 0x17, 0x7e, 0x86, 0x56, 0x84, 0xa4, 0x72, 0xa0, 0x6e, 0x5e, + 0xb1, 0xee, 0xdd, 0x8a, 0x55, 0x23, 0xdd, 0x77, 0x0c, 0xef, 0x4a, 0x2e, 0x13, 0xc3, 0xd8, 0xfe, + 0xd3, 0x42, 0xdb, 0xb3, 0x90, 0xb7, 0x30, 0xec, 0xaf, 0xa6, 0x87, 0x7d, 0xef, 0x36, 0x15, 0xdd, + 0x30, 0xf0, 0xe7, 0xe8, 0xbd, 0x52, 0xed, 0x6c, 0xc0, 0x3d, 0x50, 0xcf, 0x44, 0x3a, 0xf3, 0x18, + 0x3d, 0xa6, 0x31, 0xe4, 0x97, 0x90, 0x3f, 0x13, 0x47, 0x73, 0xec, 0x64, 0x2e, 0xaa, 0xfd, 0xd7, + 0x9c, 0x8e, 0xa9, 0x61, 0xe1, 0x7b, 0xa8, 0x4a, 0xb5, 0x06, 0xb8, 0xa1, 0x1e, 0x77, 0xa0, 0x6b, + 0xf4, 0x64, 0xec, 0xa1, 0x87, 0xaa, 0xd3, 0x33, 0xab, 0x72, 0xbb, 0xa1, 0x6a, 0x64, 0x61, 0xa8, + 0x5a, 0x26, 0x86, 0x51, 0x65, 0x92, 0x30, 0x3f, 0x2f, 0xb2, 0x32, 0x9d, 0xc9, 0x63, 0xa3, 0x27, + 0x63, 0x8f, 0xf6, 0xbf, 0x95, 0x39, 0x9d, 0xd3, 0xdb, 0x51, 0x28, 0xc9, 0xd7, 0x25, 0x55, 0x4b, + 0x25, 0xf9, 0xe3, 0x92, 0x7c, 0xfc, 0x9b, 0x85, 0x30, 0x1d, 0x53, 0xf4, 0xae, 0xb7, 0x27, 0x1f, + 0xf1, 0x97, 0xb7, 0x5f, 0x5a, 0xbb, 0x5b, 0x22, 0xcb, 0x3f, 0x5d, 0x0d, 0x93, 0x04, 0x2e, 0x3b, + 0x90, 0x39, 0x19, 0xe0, 0x10, 0xd5, 0x72, 0xed, 0x01, 0xe7, 0x8c, 0x9b, 0x2b, 0xfa, 0xf0, 0xd5, + 0x09, 0x69, 0x77, 0xb7, 0xa9, 0x3e, 0xca, 0xdd, 0x09, 0xfe, 0x6a, 0xd4, 0xaa, 0x15, 0xec, 0xa4, + 0xc8, 0xad, 0x42, 0xf9, 0x30, 0x09, 0xb5, 0xf4, 0x06, 0xa1, 0x3e, 0x87, 0x9b, 0x43, 0x15, 0xb8, + 0x1b, 0x07, 0xe8, 0xfd, 0x1b, 0x1a, 0x74, 0xab, 0xa7, 0xfe, 0x67, 0x0b, 0x15, 0x63, 0xe0, 0x43, + 0xb4, 0xa4, 0xfe, 0x2e, 0x99, 0xa3, 0xbf, 0xfb, 0x7a, 0x47, 0x7f, 0x12, 0xc6, 0x30, 0x79, 0xbb, + 0x94, 0x44, 0x34, 0x0b, 0xfe, 0x08, 0xad, 0xc6, 0x20, 0x04, 0x0d, 0x4c, 0x64, 0xf7, 0x8e, 0x71, + 0x5a, 0xed, 0xe5, 0x6a, 0x72, 0x6d, 0x77, 0xef, 0x5f, 0x5c, 0x36, 0x17, 0x5e, 0x5c, 0x36, 0x17, + 0x5e, 0x5e, 0x36, 0x17, 0x7e, 0xca, 0x9a, 0xd6, 0x45, 0xd6, 0xb4, 0x5e, 0x64, 0x4d, 0xeb, 0x65, + 0xd6, 0xb4, 0xfe, 0xce, 0x9a, 0xd6, 0x2f, 0xff, 0x34, 0x17, 0x9e, 0xad, 0x9a, 0xbe, 0xfd, 0x17, + 0x00, 0x00, 0xff, 0xff, 0xb4, 0x63, 0x7e, 0xa7, 0x0b, 0x0b, 0x00, 0x00, +} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto index 373a154..ecf53be 100644 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ b/vendor/k8s.io/api/storage/v1beta1/generated.proto @@ -29,181 +29,14 @@ import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; // Package-wide variables from generator "generated". option go_package = "v1beta1"; -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the -// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically -// creates a CSIDriver object representing the driver. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -message CSIDriver { - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the CSI Driver. - optional CSIDriverSpec spec = 2; -} - -// CSIDriverList is a collection of CSIDriver objects. -message CSIDriverList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CSIDriver - repeated CSIDriver items = 2; -} - -// CSIDriverSpec is the specification of a CSIDriver. -message CSIDriverSpec { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - optional bool attachRequired = 1; - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume - // defined by a CSIVolumeSource, otherwise "false" - // - // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only - // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. - // Other drivers can leave pod info disabled and/or ignore this field. - // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when - // deployed on such a cluster and the deployment determines which mode that is, for example - // via a command line parameter of the driver. - // +optional - optional bool podInfoOnMount = 2; - - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. - // For more information about implementing this mode, see - // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // +optional - repeated string volumeLifecycleModes = 3; -} - -// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. -// See the release notes for more information. -// CSINode holds information about all CSI drivers installed on a node. -// CSI drivers do not need to create the CSINode object directly. As long as -// they use the node-driver-registrar sidecar container, the kubelet will -// automatically populate the CSINode object for the CSI driver as part of -// kubelet plugin registration. -// CSINode has the same name as a node. If the object is missing, it means either -// there are no CSI Drivers available on the node, or the Kubelet version is low -// enough that it doesn't create this object. -// CSINode has an OwnerReference that points to the corresponding node object. -message CSINode { - // metadata.name must be the Kubernetes node name. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the specification of CSINode - optional CSINodeSpec spec = 2; -} - -// CSINodeDriver holds information about the specification of one CSI driver installed on a node -message CSINodeDriver { - // This is the name of the CSI driver that this object refers to. - // This MUST be the same name returned by the CSI GetPluginName() call for - // that driver. - optional string name = 1; - - // nodeID of the node from the driver point of view. - // This field enables Kubernetes to communicate with storage systems that do - // not share the same nomenclature for nodes. For example, Kubernetes may - // refer to a given node as "node1", but the storage system may refer to - // the same node as "nodeA". When Kubernetes issues a command to the storage - // system to attach a volume to a specific node, it can use this field to - // refer to the node name using the ID that the storage system will - // understand, e.g. "nodeA" instead of "node1". This field is required. - optional string nodeID = 2; - - // topologyKeys is the list of keys supported by the driver. - // When a driver is initialized on a cluster, it provides a set of topology - // keys that it understands (e.g. "company.com/zone", "company.com/region"). - // When a driver is initialized on a node, it provides the same topology keys - // along with values. Kubelet will expose these topology keys as labels - // on its own node object. - // When Kubernetes does topology aware provisioning, it can use this list to - // determine which labels it should retrieve from the node object and pass - // back to the driver. - // It is possible for different nodes to use different topology keys. - // This can be empty if driver does not support topology. - // +optional - repeated string topologyKeys = 3; - - // allocatable represents the volume resources of a node that are available for scheduling. - // +optional - optional VolumeNodeResources allocatable = 4; -} - -// CSINodeList is a collection of CSINode objects. -message CSINodeList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CSINode - repeated CSINode items = 2; -} - -// CSINodeSpec holds information about the specification of all CSI drivers installed on a node -message CSINodeSpec { - // drivers is a list of information of all CSI Drivers existing on a node. - // If all drivers in the list are uninstalled, this can become empty. - // +patchMergeKey=name - // +patchStrategy=merge - repeated CSINodeDriver drivers = 1; -} - // StorageClass describes the parameters for a class of storage for // which PersistentVolumes can be dynamically provisioned. -// +// // StorageClasses are non-namespaced; the name of the storage class // according to etcd is in ObjectMeta.Name. message StorageClass { // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -247,7 +80,7 @@ message StorageClass { // StorageClassList is a collection of storage classes. message StorageClassList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -257,11 +90,11 @@ message StorageClassList { // VolumeAttachment captures the intent to attach or detach the specified volume // to/from the specified node. -// +// // VolumeAttachment objects are non-namespaced. message VolumeAttachment { // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; @@ -279,7 +112,7 @@ message VolumeAttachment { // VolumeAttachmentList is a collection of VolumeAttachment objects. message VolumeAttachmentList { // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; @@ -295,15 +128,6 @@ message VolumeAttachmentSource { // Name of the persistent volume to attach. // +optional optional string persistentVolumeName = 1; - - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - optional k8s.io.api.core.v1.PersistentVolumeSpec inlineVolumeSpec = 2; } // VolumeAttachmentSpec is the specification of a VolumeAttachment request. @@ -354,19 +178,9 @@ message VolumeError { optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; // String detailing the error encountered during Attach or Detach operation. - // This string may be logged, so it should not contain sensitive + // This string maybe logged, so it should not contain sensitive // information. // +optional optional string message = 2; } -// VolumeNodeResources is a set of resource limits for scheduling of volumes. -message VolumeNodeResources { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. - // A volume that is both attached and mounted on a node is considered to be used once, not twice. - // The same rule applies for a unique volume that is shared among multiple pods on the same node. - // If this field is nil, then the supported number of volumes on this node is unbounded. - // +optional - optional int32 count = 1; -} - diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go index c270ace..06b0f3d 100644 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ b/vendor/k8s.io/api/storage/v1beta1/register.go @@ -49,12 +49,6 @@ func addKnownTypes(scheme *runtime.Scheme) error { &VolumeAttachment{}, &VolumeAttachmentList{}, - - &CSIDriver{}, - &CSIDriverList{}, - - &CSINode{}, - &CSINodeList{}, ) metav1.AddToGroupVersion(scheme, SchemeGroupVersion) diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go index a8faeb9..5702c21 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ b/vendor/k8s.io/api/storage/v1beta1/types.go @@ -17,7 +17,7 @@ limitations under the License. package v1beta1 import ( - v1 "k8s.io/api/core/v1" + "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -33,7 +33,7 @@ import ( type StorageClass struct { metav1.TypeMeta `json:",inline"` // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -80,7 +80,7 @@ type StorageClass struct { type StorageClassList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -115,7 +115,7 @@ type VolumeAttachment struct { metav1.TypeMeta `json:",inline"` // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -136,7 +136,7 @@ type VolumeAttachment struct { type VolumeAttachmentList struct { metav1.TypeMeta `json:",inline"` // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -166,14 +166,7 @@ type VolumeAttachmentSource struct { // +optional PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - // inlineVolumeSpec contains all the information necessary to attach - // a persistent volume defined by a pod's inline VolumeSource. This field - // is populated only for the CSIMigration feature. It contains - // translated fields from a pod's inline VolumeSource to a - // PersistentVolumeSpec. This field is alpha-level and is only - // honored by servers that enabled the CSIMigration feature. - // +optional - InlineVolumeSpec *v1.PersistentVolumeSpec `json:"inlineVolumeSpec,omitempty" protobuf:"bytes,2,opt,name=inlineVolumeSpec"` + // Placeholder for *VolumeSource to accommodate inline volumes in pods. } // VolumeAttachmentStatus is the status of a VolumeAttachment request. @@ -211,230 +204,8 @@ type VolumeError struct { Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` // String detailing the error encountered during Attach or Detach operation. - // This string may be logged, so it should not contain sensitive + // This string maybe logged, so it should not contain sensitive // information. // +optional Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` } - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriver captures information about a Container Storage Interface (CSI) -// volume driver deployed on the cluster. -// CSI drivers do not need to create the CSIDriver object directly. Instead they may use the -// cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically -// creates a CSIDriver object representing the driver. -// Kubernetes attach detach controller uses this object to determine whether attach is required. -// Kubelet uses this object to determine whether pod information needs to be passed on mount. -// CSIDriver objects are non-namespaced. -type CSIDriver struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // metadata.Name indicates the name of the CSI driver that this object - // refers to; it MUST be the same name returned by the CSI GetPluginName() - // call for that driver. - // The driver name must be 63 characters or less, beginning and ending with - // an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and - // alphanumerics between. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the CSI Driver. - Spec CSIDriverSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSIDriverList is a collection of CSIDriver objects. -type CSIDriverList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSIDriver - Items []CSIDriver `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CSIDriverSpec is the specification of a CSIDriver. -type CSIDriverSpec struct { - // attachRequired indicates this CSI volume driver requires an attach - // operation (because it implements the CSI ControllerPublishVolume() - // method), and that the Kubernetes attach detach controller should call - // the attach volume interface which checks the volumeattachment status - // and waits until the volume is attached before proceeding to mounting. - // The CSI external-attacher coordinates with CSI volume driver and updates - // the volumeattachment status when the attach operation is complete. - // If the CSIDriverRegistry feature gate is enabled and the value is - // specified to false, the attach operation will be skipped. - // Otherwise the attach operation will be called. - // +optional - AttachRequired *bool `json:"attachRequired,omitempty" protobuf:"varint,1,opt,name=attachRequired"` - - // If set to true, podInfoOnMount indicates this CSI volume driver - // requires additional pod information (like podName, podUID, etc.) during - // mount operations. - // If set to false, pod information will not be passed on mount. - // Default is false. - // The CSI driver specifies podInfoOnMount as part of driver deployment. - // If true, Kubelet will pass pod information as VolumeContext in the CSI - // NodePublishVolume() calls. - // The CSI driver is responsible for parsing and validating the information - // passed in as VolumeContext. - // The following VolumeConext will be passed if podInfoOnMount is set to true. - // This list might grow, but the prefix will be used. - // "csi.storage.k8s.io/pod.name": pod.Name - // "csi.storage.k8s.io/pod.namespace": pod.Namespace - // "csi.storage.k8s.io/pod.uid": string(pod.UID) - // "csi.storage.k8s.io/ephemeral": "true" iff the volume is an ephemeral inline volume - // defined by a CSIVolumeSource, otherwise "false" - // - // "csi.storage.k8s.io/ephemeral" is a new feature in Kubernetes 1.16. It is only - // required for drivers which support both the "Persistent" and "Ephemeral" VolumeLifecycleMode. - // Other drivers can leave pod info disabled and/or ignore this field. - // As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when - // deployed on such a cluster and the deployment determines which mode that is, for example - // via a command line parameter of the driver. - // +optional - PodInfoOnMount *bool `json:"podInfoOnMount,omitempty" protobuf:"bytes,2,opt,name=podInfoOnMount"` - - // VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. - // The default if the list is empty is "Persistent", which is the usage - // defined by the CSI specification and implemented in Kubernetes via the usual - // PV/PVC mechanism. - // The other mode is "Ephemeral". In this mode, volumes are defined inline - // inside the pod spec with CSIVolumeSource and their lifecycle is tied to - // the lifecycle of that pod. A driver has to be aware of this - // because it is only going to get a NodePublishVolume call for such a volume. - // For more information about implementing this mode, see - // https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html - // A driver can support one or more of these modes and - // more modes may be added in the future. - // +optional - VolumeLifecycleModes []VolumeLifecycleMode `json:"volumeLifecycleModes,omitempty" protobuf:"bytes,3,opt,name=volumeLifecycleModes"` -} - -// VolumeLifecycleMode is an enumeration of possible usage modes for a volume -// provided by a CSI driver. More modes may be added in the future. -type VolumeLifecycleMode string - -const ( - // VolumeLifecyclePersistent explicitly confirms that the driver implements - // the full CSI spec. It is the default when CSIDriverSpec.VolumeLifecycleModes is not - // set. Such volumes are managed in Kubernetes via the persistent volume - // claim mechanism and have a lifecycle that is independent of the pods which - // use them. - VolumeLifecyclePersistent VolumeLifecycleMode = "Persistent" - - // VolumeLifecycleEphemeral indicates that the driver can be used for - // ephemeral inline volumes. Such volumes are specified inside the pod - // spec with a CSIVolumeSource and, as far as Kubernetes is concerned, have - // a lifecycle that is tied to the lifecycle of the pod. For example, such - // a volume might contain data that gets created specifically for that pod, - // like secrets. - // But how the volume actually gets created and managed is entirely up to - // the driver. It might also use reference counting to share the same volume - // instance among different pods if the CSIVolumeSource of those pods is - // identical. - VolumeLifecycleEphemeral VolumeLifecycleMode = "Ephemeral" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. -// See the release notes for more information. -// CSINode holds information about all CSI drivers installed on a node. -// CSI drivers do not need to create the CSINode object directly. As long as -// they use the node-driver-registrar sidecar container, the kubelet will -// automatically populate the CSINode object for the CSI driver as part of -// kubelet plugin registration. -// CSINode has the same name as a node. If the object is missing, it means either -// there are no CSI Drivers available on the node, or the Kubelet version is low -// enough that it doesn't create this object. -// CSINode has an OwnerReference that points to the corresponding node object. -type CSINode struct { - metav1.TypeMeta `json:",inline"` - - // metadata.name must be the Kubernetes node name. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the specification of CSINode - Spec CSINodeSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` -} - -// CSINodeSpec holds information about the specification of all CSI drivers installed on a node -type CSINodeSpec struct { - // drivers is a list of information of all CSI Drivers existing on a node. - // If all drivers in the list are uninstalled, this can become empty. - // +patchMergeKey=name - // +patchStrategy=merge - Drivers []CSINodeDriver `json:"drivers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=drivers"` -} - -// CSINodeDriver holds information about the specification of one CSI driver installed on a node -type CSINodeDriver struct { - // This is the name of the CSI driver that this object refers to. - // This MUST be the same name returned by the CSI GetPluginName() call for - // that driver. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // nodeID of the node from the driver point of view. - // This field enables Kubernetes to communicate with storage systems that do - // not share the same nomenclature for nodes. For example, Kubernetes may - // refer to a given node as "node1", but the storage system may refer to - // the same node as "nodeA". When Kubernetes issues a command to the storage - // system to attach a volume to a specific node, it can use this field to - // refer to the node name using the ID that the storage system will - // understand, e.g. "nodeA" instead of "node1". This field is required. - NodeID string `json:"nodeID" protobuf:"bytes,2,opt,name=nodeID"` - - // topologyKeys is the list of keys supported by the driver. - // When a driver is initialized on a cluster, it provides a set of topology - // keys that it understands (e.g. "company.com/zone", "company.com/region"). - // When a driver is initialized on a node, it provides the same topology keys - // along with values. Kubelet will expose these topology keys as labels - // on its own node object. - // When Kubernetes does topology aware provisioning, it can use this list to - // determine which labels it should retrieve from the node object and pass - // back to the driver. - // It is possible for different nodes to use different topology keys. - // This can be empty if driver does not support topology. - // +optional - TopologyKeys []string `json:"topologyKeys" protobuf:"bytes,3,rep,name=topologyKeys"` - - // allocatable represents the volume resources of a node that are available for scheduling. - // +optional - Allocatable *VolumeNodeResources `json:"allocatable,omitempty" protobuf:"bytes,4,opt,name=allocatable"` -} - -// VolumeNodeResources is a set of resource limits for scheduling of volumes. -type VolumeNodeResources struct { - // Maximum number of unique volumes managed by the CSI driver that can be used on a node. - // A volume that is both attached and mounted on a node is considered to be used once, not twice. - // The same rule applies for a unique volume that is shared among multiple pods on the same node. - // If this field is nil, then the supported number of volumes on this node is unbounded. - // +optional - Count *int32 `json:"count,omitempty" protobuf:"varint,1,opt,name=count"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CSINodeList is a collection of CSINode objects. -type CSINodeList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CSINode - Items []CSINode `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go index 53fa666..044d69f 100644 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go @@ -27,81 +27,9 @@ package v1beta1 // Those methods can be generated by using hack/update-generated-swagger-docs.sh // AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CSIDriver = map[string]string{ - "": "CSIDriver captures information about a Container Storage Interface (CSI) volume driver deployed on the cluster. CSI drivers do not need to create the CSIDriver object directly. Instead they may use the cluster-driver-registrar sidecar container. When deployed with a CSI driver it automatically creates a CSIDriver object representing the driver. Kubernetes attach detach controller uses this object to determine whether attach is required. Kubelet uses this object to determine whether pod information needs to be passed on mount. CSIDriver objects are non-namespaced.", - "metadata": "Standard object metadata. metadata.Name indicates the name of the CSI driver that this object refers to; it MUST be the same name returned by the CSI GetPluginName() call for that driver. The driver name must be 63 characters or less, beginning and ending with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), dots (.), and alphanumerics between. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "spec": "Specification of the CSI Driver.", -} - -func (CSIDriver) SwaggerDoc() map[string]string { - return map_CSIDriver -} - -var map_CSIDriverList = map[string]string{ - "": "CSIDriverList is a collection of CSIDriver objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of CSIDriver", -} - -func (CSIDriverList) SwaggerDoc() map[string]string { - return map_CSIDriverList -} - -var map_CSIDriverSpec = map[string]string{ - "": "CSIDriverSpec is the specification of a CSIDriver.", - "attachRequired": "attachRequired indicates this CSI volume driver requires an attach operation (because it implements the CSI ControllerPublishVolume() method), and that the Kubernetes attach detach controller should call the attach volume interface which checks the volumeattachment status and waits until the volume is attached before proceeding to mounting. The CSI external-attacher coordinates with CSI volume driver and updates the volumeattachment status when the attach operation is complete. If the CSIDriverRegistry feature gate is enabled and the value is specified to false, the attach operation will be skipped. Otherwise the attach operation will be called.", - "podInfoOnMount": "If set to true, podInfoOnMount indicates this CSI volume driver requires additional pod information (like podName, podUID, etc.) during mount operations. If set to false, pod information will not be passed on mount. Default is false. The CSI driver specifies podInfoOnMount as part of driver deployment. If true, Kubelet will pass pod information as VolumeContext in the CSI NodePublishVolume() calls. The CSI driver is responsible for parsing and validating the information passed in as VolumeContext. The following VolumeConext will be passed if podInfoOnMount is set to true. This list might grow, but the prefix will be used. \"csi.storage.k8s.io/pod.name\": pod.Name \"csi.storage.k8s.io/pod.namespace\": pod.Namespace \"csi.storage.k8s.io/pod.uid\": string(pod.UID) \"csi.storage.k8s.io/ephemeral\": \"true\" iff the volume is an ephemeral inline volume\n defined by a CSIVolumeSource, otherwise \"false\"\n\n\"csi.storage.k8s.io/ephemeral\" is a new feature in Kubernetes 1.16. It is only required for drivers which support both the \"Persistent\" and \"Ephemeral\" VolumeLifecycleMode. Other drivers can leave pod info disabled and/or ignore this field. As Kubernetes 1.15 doesn't support this field, drivers can only support one mode when deployed on such a cluster and the deployment determines which mode that is, for example via a command line parameter of the driver.", - "volumeLifecycleModes": "VolumeLifecycleModes defines what kind of volumes this CSI volume driver supports. The default if the list is empty is \"Persistent\", which is the usage defined by the CSI specification and implemented in Kubernetes via the usual PV/PVC mechanism. The other mode is \"Ephemeral\". In this mode, volumes are defined inline inside the pod spec with CSIVolumeSource and their lifecycle is tied to the lifecycle of that pod. A driver has to be aware of this because it is only going to get a NodePublishVolume call for such a volume. For more information about implementing this mode, see https://kubernetes-csi.github.io/docs/ephemeral-local-volumes.html A driver can support one or more of these modes and more modes may be added in the future.", -} - -func (CSIDriverSpec) SwaggerDoc() map[string]string { - return map_CSIDriverSpec -} - -var map_CSINode = map[string]string{ - "": "DEPRECATED - This group version of CSINode is deprecated by storage/v1/CSINode. See the release notes for more information. CSINode holds information about all CSI drivers installed on a node. CSI drivers do not need to create the CSINode object directly. As long as they use the node-driver-registrar sidecar container, the kubelet will automatically populate the CSINode object for the CSI driver as part of kubelet plugin registration. CSINode has the same name as a node. If the object is missing, it means either there are no CSI Drivers available on the node, or the Kubelet version is low enough that it doesn't create this object. CSINode has an OwnerReference that points to the corresponding node object.", - "metadata": "metadata.name must be the Kubernetes node name.", - "spec": "spec is the specification of CSINode", -} - -func (CSINode) SwaggerDoc() map[string]string { - return map_CSINode -} - -var map_CSINodeDriver = map[string]string{ - "": "CSINodeDriver holds information about the specification of one CSI driver installed on a node", - "name": "This is the name of the CSI driver that this object refers to. This MUST be the same name returned by the CSI GetPluginName() call for that driver.", - "nodeID": "nodeID of the node from the driver point of view. This field enables Kubernetes to communicate with storage systems that do not share the same nomenclature for nodes. For example, Kubernetes may refer to a given node as \"node1\", but the storage system may refer to the same node as \"nodeA\". When Kubernetes issues a command to the storage system to attach a volume to a specific node, it can use this field to refer to the node name using the ID that the storage system will understand, e.g. \"nodeA\" instead of \"node1\". This field is required.", - "topologyKeys": "topologyKeys is the list of keys supported by the driver. When a driver is initialized on a cluster, it provides a set of topology keys that it understands (e.g. \"company.com/zone\", \"company.com/region\"). When a driver is initialized on a node, it provides the same topology keys along with values. Kubelet will expose these topology keys as labels on its own node object. When Kubernetes does topology aware provisioning, it can use this list to determine which labels it should retrieve from the node object and pass back to the driver. It is possible for different nodes to use different topology keys. This can be empty if driver does not support topology.", - "allocatable": "allocatable represents the volume resources of a node that are available for scheduling.", -} - -func (CSINodeDriver) SwaggerDoc() map[string]string { - return map_CSINodeDriver -} - -var map_CSINodeList = map[string]string{ - "": "CSINodeList is a collection of CSINode objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "items": "items is the list of CSINode", -} - -func (CSINodeList) SwaggerDoc() map[string]string { - return map_CSINodeList -} - -var map_CSINodeSpec = map[string]string{ - "": "CSINodeSpec holds information about the specification of all CSI drivers installed on a node", - "drivers": "drivers is a list of information of all CSI Drivers existing on a node. If all drivers in the list are uninstalled, this can become empty.", -} - -func (CSINodeSpec) SwaggerDoc() map[string]string { - return map_CSINodeSpec -} - var map_StorageClass = map[string]string{ "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "provisioner": "Provisioner indicates the type of the provisioner.", "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", @@ -117,7 +45,7 @@ func (StorageClass) SwaggerDoc() map[string]string { var map_StorageClassList = map[string]string{ "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of StorageClasses", } @@ -127,7 +55,7 @@ func (StorageClassList) SwaggerDoc() map[string]string { var map_VolumeAttachment = map[string]string{ "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", } @@ -138,7 +66,7 @@ func (VolumeAttachment) SwaggerDoc() map[string]string { var map_VolumeAttachmentList = map[string]string{ "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "items": "Items is the list of VolumeAttachments", } @@ -147,7 +75,7 @@ func (VolumeAttachmentList) SwaggerDoc() map[string]string { } var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", + "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", "persistentVolumeName": "Name of the persistent volume to attach.", } @@ -181,20 +109,11 @@ func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { var map_VolumeError = map[string]string{ "": "VolumeError captures an error encountered during a volume operation.", "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string may be logged, so it should not contain sensitive information.", + "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", } func (VolumeError) SwaggerDoc() map[string]string { return map_VolumeError } -var map_VolumeNodeResources = map[string]string{ - "": "VolumeNodeResources is a set of resource limits for scheduling of volumes.", - "count": "Maximum number of unique volumes managed by the CSI driver that can be used on a node. A volume that is both attached and mounted on a node is considered to be used once, not twice. The same rule applies for a unique volume that is shared among multiple pods on the same node. If this field is nil, then the supported number of volumes on this node is unbounded.", -} - -func (VolumeNodeResources) SwaggerDoc() map[string]string { - return map_VolumeNodeResources -} - // AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go index 52433fc..8096dba 100644 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go @@ -25,206 +25,6 @@ import ( runtime "k8s.io/apimachinery/pkg/runtime" ) -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriver) DeepCopyInto(out *CSIDriver) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriver. -func (in *CSIDriver) DeepCopy() *CSIDriver { - if in == nil { - return nil - } - out := new(CSIDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriver) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverList) DeepCopyInto(out *CSIDriverList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSIDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverList. -func (in *CSIDriverList) DeepCopy() *CSIDriverList { - if in == nil { - return nil - } - out := new(CSIDriverList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSIDriverList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIDriverSpec) DeepCopyInto(out *CSIDriverSpec) { - *out = *in - if in.AttachRequired != nil { - in, out := &in.AttachRequired, &out.AttachRequired - *out = new(bool) - **out = **in - } - if in.PodInfoOnMount != nil { - in, out := &in.PodInfoOnMount, &out.PodInfoOnMount - *out = new(bool) - **out = **in - } - if in.VolumeLifecycleModes != nil { - in, out := &in.VolumeLifecycleModes, &out.VolumeLifecycleModes - *out = make([]VolumeLifecycleMode, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIDriverSpec. -func (in *CSIDriverSpec) DeepCopy() *CSIDriverSpec { - if in == nil { - return nil - } - out := new(CSIDriverSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINode) DeepCopyInto(out *CSINode) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINode. -func (in *CSINode) DeepCopy() *CSINode { - if in == nil { - return nil - } - out := new(CSINode) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSINode) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeDriver) DeepCopyInto(out *CSINodeDriver) { - *out = *in - if in.TopologyKeys != nil { - in, out := &in.TopologyKeys, &out.TopologyKeys - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = new(VolumeNodeResources) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeDriver. -func (in *CSINodeDriver) DeepCopy() *CSINodeDriver { - if in == nil { - return nil - } - out := new(CSINodeDriver) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeList) DeepCopyInto(out *CSINodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CSINode, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeList. -func (in *CSINodeList) DeepCopy() *CSINodeList { - if in == nil { - return nil - } - out := new(CSINodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CSINodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSINodeSpec) DeepCopyInto(out *CSINodeSpec) { - *out = *in - if in.Drivers != nil { - in, out := &in.Drivers, &out.Drivers - *out = make([]CSINodeDriver, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSINodeSpec. -func (in *CSINodeSpec) DeepCopy() *CSINodeSpec { - if in == nil { - return nil - } - out := new(CSINodeSpec) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StorageClass) DeepCopyInto(out *StorageClass) { *out = *in @@ -289,7 +89,7 @@ func (in *StorageClass) DeepCopyObject() runtime.Object { func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]StorageClass, len(*in)) @@ -350,7 +150,7 @@ func (in *VolumeAttachment) DeepCopyObject() runtime.Object { func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]VolumeAttachment, len(*in)) @@ -387,11 +187,6 @@ func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { *out = new(string) **out = **in } - if in.InlineVolumeSpec != nil { - in, out := &in.InlineVolumeSpec, &out.InlineVolumeSpec - *out = new(v1.PersistentVolumeSpec) - (*in).DeepCopyInto(*out) - } return } @@ -471,24 +266,3 @@ func (in *VolumeError) DeepCopy() *VolumeError { in.DeepCopyInto(out) return out } - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeNodeResources) DeepCopyInto(out *VolumeNodeResources) { - *out = *in - if in.Count != nil { - in, out := &in.Count, &out.Count - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeResources. -func (in *VolumeNodeResources) DeepCopy() *VolumeNodeResources { - if in == nil { - return nil - } - out := new(VolumeNodeResources) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go new file mode 100644 index 0000000..e98cad9 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/cached_discovery.go @@ -0,0 +1,282 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "errors" + "io/ioutil" + "net/http" + "os" + "path/filepath" + "sync" + "time" + + "github.com/golang/glog" + "github.com/googleapis/gnostic/OpenAPIv2" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/version" + "k8s.io/client-go/kubernetes/scheme" + restclient "k8s.io/client-go/rest" +) + +// CachedDiscoveryClient implements the functions that discovery server-supported API groups, +// versions and resources. +type CachedDiscoveryClient struct { + delegate DiscoveryInterface + + // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. + cacheDirectory string + + // ttl is how long the cache should be considered valid + ttl time.Duration + + // mutex protects the variables below + mutex sync.Mutex + + // ourFiles are all filenames of cache files created by this process + ourFiles map[string]struct{} + // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) + invalidated bool + // fresh is true if all used cache files were ours + fresh bool +} + +var _ CachedDiscoveryInterface = &CachedDiscoveryClient{} + +// ServerResourcesForGroupVersion returns the supported resources for a group and version. +func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { + filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedResources := &metav1.APIResourceList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedResources, nil + } + } + + liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) + if err != nil { + glog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveResources, err + } + if liveResources == nil || len(liveResources.APIResources) == 0 { + glog.V(3).Infof("skipped caching discovery info, no resources found") + return liveResources, err + } + + if err := d.writeCachedFile(filename, liveResources); err != nil { + glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveResources, nil +} + +// ServerResources returns the supported resources for all groups and versions. +func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return ServerResources(d) +} + +func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { + filename := filepath.Join(d.cacheDirectory, "servergroups.json") + cachedBytes, err := d.getCachedFile(filename) + // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. + if err == nil { + cachedGroups := &metav1.APIGroupList{} + if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { + glog.V(10).Infof("returning cached discovery info from %v", filename) + return cachedGroups, nil + } + } + + liveGroups, err := d.delegate.ServerGroups() + if err != nil { + glog.V(3).Infof("skipped caching discovery info due to %v", err) + return liveGroups, err + } + if liveGroups == nil || len(liveGroups.Groups) == 0 { + glog.V(3).Infof("skipped caching discovery info, no groups found") + return liveGroups, err + } + + if err := d.writeCachedFile(filename, liveGroups); err != nil { + glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) + } + + return liveGroups, nil +} + +func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { + // after invalidation ignore cache files not created by this process + d.mutex.Lock() + _, ourFile := d.ourFiles[filename] + if d.invalidated && !ourFile { + d.mutex.Unlock() + return nil, errors.New("cache invalidated") + } + d.mutex.Unlock() + + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + fileInfo, err := file.Stat() + if err != nil { + return nil, err + } + + if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { + return nil, errors.New("cache expired") + } + + // the cache is present and its valid. Try to read and use it. + cachedBytes, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + + d.mutex.Lock() + defer d.mutex.Unlock() + d.fresh = d.fresh && ourFile + + return cachedBytes, nil +} + +func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { + if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil { + return err + } + + bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) + if err != nil { + return err + } + + f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") + if err != nil { + return err + } + defer os.Remove(f.Name()) + _, err = f.Write(bytes) + if err != nil { + return err + } + + err = os.Chmod(f.Name(), 0660) + if err != nil { + return err + } + + name := f.Name() + err = f.Close() + if err != nil { + return err + } + + // atomic rename + d.mutex.Lock() + defer d.mutex.Unlock() + err = os.Rename(name, filename) + if err == nil { + d.ourFiles[filename] = struct{}{} + } + return err +} + +func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { + return d.delegate.RESTClient() +} + +func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredResources(d) +} + +func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredNamespacedResources(d) +} + +func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { + return d.delegate.ServerVersion() +} + +func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { + return d.delegate.OpenAPISchema() +} + +func (d *CachedDiscoveryClient) Fresh() bool { + d.mutex.Lock() + defer d.mutex.Unlock() + + return d.fresh +} + +func (d *CachedDiscoveryClient) Invalidate() { + d.mutex.Lock() + defer d.mutex.Unlock() + + d.ourFiles = map[string]struct{}{} + d.fresh = true + d.invalidated = true +} + +// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps +// the created client in a CachedDiscoveryClient. The provided configuration is updated with a +// custom transport that understands cache responses. +// We receive two distinct cache directories for now, in order to preserve old behavior +// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, +// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing +// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not +// be updated with a roundtripper that understands cache responses. +// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. +// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir +// so that server resources and http-cache data are stored in the same location, provided via config flags. +func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { + if len(httpCacheDir) > 0 { + // update the given restconfig with a custom roundtripper that + // understands how to handle cache responses. + wt := config.WrapTransport + config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } + return newCacheRoundTripper(httpCacheDir, rt) + } + } + + discoveryClient, err := NewDiscoveryClientForConfig(config) + if err != nil { + return nil, err + } + + return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil +} + +// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. +func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { + return &CachedDiscoveryClient{ + delegate: delegate, + cacheDirectory: cacheDirectory, + ttl: ttl, + ourFiles: map[string]struct{}{}, + fresh: true, + } +} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go index 61b9c44..a966029 100644 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ b/vendor/k8s.io/client-go/discovery/discovery_client.go @@ -26,7 +26,7 @@ import ( "time" "github.com/golang/protobuf/proto" - openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2" + "github.com/googleapis/gnostic/OpenAPIv2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -60,9 +60,6 @@ type DiscoveryInterface interface { } // CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. -// Note that If the ServerResourcesForGroupVersion method returns a cache miss -// error, the user needs to explicitly call Invalidate to clear the cache, -// otherwise the same cache miss error will be returned next time. type CachedDiscoveryInterface interface { DiscoveryInterface // Fresh is supposed to tell the caller whether or not to retry if the cache @@ -71,8 +68,7 @@ type CachedDiscoveryInterface interface { // TODO: this needs to be revisited, this interface can't be locked properly // and doesn't make a lot of sense. Fresh() bool - // Invalidate enforces that no cached data that is older than the current time - // is used. + // Invalidate enforces that no cached data is used in the future that is older than the current time. Invalidate() } @@ -88,28 +84,12 @@ type ServerResourcesInterface interface { // ServerResourcesForGroupVersion returns the supported resources for a group and version. ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) // ServerResources returns the supported resources for all groups and versions. - // - // The returned resource list might be non-nil with partial results even in the case of - // non-nil error. - // - // Deprecated: use ServerGroupsAndResources instead. ServerResources() ([]*metav1.APIResourceList, error) - // ServerResources returns the supported groups and resources for all groups and versions. - // - // The returned group and resource lists might be non-nil with partial results even in the - // case of non-nil error. - ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) // ServerPreferredResources returns the supported resources with the version preferred by the // server. - // - // The returned group and resource lists might be non-nil with partial results even in the - // case of non-nil error. ServerPreferredResources() ([]*metav1.APIResourceList, error) // ServerPreferredNamespacedResources returns the supported namespaced resources with the // version preferred by the server. - // - // The returned resource list might be non-nil with partial results even in the case of - // non-nil error. ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) } @@ -207,18 +187,14 @@ func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (r return resources, nil } -// ServerResources returns the supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. -func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - _, rs, err := d.ServerGroupsAndResources() - return rs, err +// serverResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) { + return ServerResources(d) } -// ServerGroupsAndResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) ServerGroupsAndResources() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - return ServerGroupsAndResources(d) - }) +// ServerResources returns the supported resources for all groups and versions. +func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { + return withRetries(defaultRetries, d.serverResources) } // ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. @@ -244,28 +220,23 @@ func IsGroupDiscoveryFailedError(err error) bool { return err != nil && ok } -// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. -// Deprecated: use ServerGroupsAndResources instead. -func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - _, rs, err := ServerGroupsAndResources(d) - return rs, err +// serverPreferredResources returns the supported resources with the version preferred by the server. +func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) { + return ServerPreferredResources(d) } -func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - sgs, err := d.ServerGroups() - if sgs == nil { - return nil, nil, err - } - resultGroups := []*metav1.APIGroup{} - for i := range sgs.Groups { - resultGroups = append(resultGroups, &sgs.Groups[i]) +// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. +func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { + apiGroups, err := d.ServerGroups() + if err != nil { + return nil, err } - groupVersionResources, failedGroups := fetchGroupVersionResources(d, sgs) + groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups) // order results by group/version discovery order result := []*metav1.APIResourceList{} - for _, apiGroup := range sgs.Groups { + for _, apiGroup := range apiGroups.Groups { for _, version := range apiGroup.Versions { gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} if resources, ok := groupVersionResources[gv]; ok { @@ -275,10 +246,10 @@ func ServerGroupsAndResources(d DiscoveryInterface) ([]*metav1.APIGroup, []*meta } if len(failedGroups) == 0 { - return resultGroups, result, nil + return result, nil } - return resultGroups, result, &ErrGroupDiscoveryFailed{Groups: failedGroups} + return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } // ServerPreferredResources uses the provided discovery interface to look up preferred resources @@ -292,8 +263,8 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, result := []*metav1.APIResourceList{} grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grAPIResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvAPIResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping + grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource + gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping for _, apiGroup := range serverGroupList.Groups { for _, version := range apiGroup.Versions { @@ -305,11 +276,11 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, } // create empty list which is filled later in another loop - emptyAPIResourceList := metav1.APIResourceList{ + emptyApiResourceList := metav1.APIResourceList{ GroupVersion: version.GroupVersion, } - gvAPIResourceLists[groupVersion] = &emptyAPIResourceList - result = append(result, &emptyAPIResourceList) + gvApiResourceLists[groupVersion] = &emptyApiResourceList + result = append(result, &emptyApiResourceList) for i := range apiResourceList.APIResources { apiResource := &apiResourceList.APIResources[i] @@ -317,21 +288,21 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, continue } gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grAPIResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { + if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { // only override with preferred version continue } grVersions[gv] = version.Version - grAPIResources[gv] = apiResource + grApiResources[gv] = apiResource } } } // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grAPIResources { + for groupResource, apiResource := range grApiResources { version := grVersions[groupResource] groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvAPIResourceLists[groupVersion] + apiResourceList := gvApiResourceLists[groupVersion] apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) } @@ -342,7 +313,7 @@ func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} } -// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel. +// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) failedGroups := make(map[schema.GroupVersion]error) @@ -366,9 +337,7 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup if err != nil { // TODO: maybe restrict this to NotFound errors failedGroups[groupVersion] = err - } - if apiResourceList != nil { - // even in case of error, some fallback might have been returned + } else { groupVersionResources[groupVersion] = apiResourceList } }() @@ -382,11 +351,7 @@ func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroup // ServerPreferredResources returns the supported resources with the version preferred by the // server. func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - _, rs, err := withRetries(defaultRetries, func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { - rs, err := ServerPreferredResources(d) - return nil, rs, err - }) - return rs, err + return withRetries(defaultRetries, d.serverPreferredResources) } // ServerPreferredNamespacedResources returns the supported namespaced resources with the @@ -412,7 +377,7 @@ func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { var info version.Info err = json.Unmarshal(body, &info) if err != nil { - return nil, fmt.Errorf("unable to parse the server version: %v", err) + return nil, fmt.Errorf("got '%s': %v", string(body), err) } return &info, nil } @@ -423,7 +388,7 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { if err != nil { if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO: remove this when kubectl/client-go don't work with 1.9 server + // TODO(roycaihw): remove this in 1.11 data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() if err != nil { return nil, err @@ -441,20 +406,19 @@ func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { } // withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func() ([]*metav1.APIGroup, []*metav1.APIResourceList, error)) ([]*metav1.APIGroup, []*metav1.APIResourceList, error) { +func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { var result []*metav1.APIResourceList - var resultGroups []*metav1.APIGroup var err error for i := 0; i < maxRetries; i++ { - resultGroups, result, err = f() + result, err = f() if err == nil { - return resultGroups, result, nil + return result, nil } if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { - return nil, nil, err + return nil, err } } - return resultGroups, result, err + return result, err } func setDiscoveryDefaults(config *restclient.Config) error { @@ -500,9 +464,9 @@ func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (d *DiscoveryClient) RESTClient() restclient.Interface { - if d == nil { +func (c *DiscoveryClient) RESTClient() restclient.Interface { + if c == nil { return nil } - return d.restClient + return c.restClient } diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go index 3bfe514..353d34b 100644 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ b/vendor/k8s.io/client-go/discovery/helper.go @@ -31,11 +31,11 @@ import ( func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { sVer, err := client.ServerVersion() if err != nil { - return fmt.Errorf("couldn't read version from server: %v", err) + return fmt.Errorf("couldn't read version from server: %v\n", err) } // GitVersion includes GitCommit and GitTreeState, but best to be safe? if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)", sVer, clientVersion) + return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) } return nil @@ -101,15 +101,12 @@ func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1 return result } -// ResourcePredicate has a method to check if a resource matches a given condition. type ResourcePredicate interface { Match(groupVersion string, r *metav1.APIResource) bool } -// ResourcePredicateFunc returns true if it matches a resource based on a custom condition. type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool -// Match is a wrapper around ResourcePredicateFunc. func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { return fn(groupVersion, r) } @@ -119,7 +116,6 @@ type SupportsAllVerbs struct { Verbs []string } -// Match checks if a resource contains all the given verbs. func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) } diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go new file mode 100644 index 0000000..86081c1 --- /dev/null +++ b/vendor/k8s.io/client-go/discovery/round_tripper.go @@ -0,0 +1,65 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package discovery + +import ( + "net/http" + "os" + "path/filepath" + + "github.com/golang/glog" + "github.com/gregjones/httpcache" + "github.com/gregjones/httpcache/diskcache" + "github.com/peterbourgon/diskv" +) + +type cacheRoundTripper struct { + rt *httpcache.Transport +} + +// newCacheRoundTripper creates a roundtripper that reads the ETag on +// response headers and send the If-None-Match header on subsequent +// corresponding requests. +func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { + d := diskv.New(diskv.Options{ + PathPerm: os.FileMode(0750), + FilePerm: os.FileMode(0660), + BasePath: cacheDir, + TempDir: filepath.Join(cacheDir, ".diskv-temp"), + }) + t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) + t.Transport = rt + + return &cacheRoundTripper{rt: t} +} + +func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + return rt.rt.RoundTrip(req) +} + +func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := rt.rt.Transport.(canceler); ok { + cr.CancelRequest(req) + } else { + glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) + } +} + +func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go index fb889e6..122e4bb 100644 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ b/vendor/k8s.io/client-go/kubernetes/clientset.go @@ -20,11 +20,11 @@ package kubernetes import ( discovery "k8s.io/client-go/discovery" + admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" - auditregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1" authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" @@ -36,20 +36,15 @@ import ( batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - coordinationv1 "k8s.io/client-go/kubernetes/typed/coordination/v1" coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" - networkingv1beta1 "k8s.io/client-go/kubernetes/typed/networking/v1beta1" - nodev1alpha1 "k8s.io/client-go/kubernetes/typed/node/v1alpha1" - nodev1beta1 "k8s.io/client-go/kubernetes/typed/node/v1beta1" policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - schedulingv1 "k8s.io/client-go/kubernetes/typed/scheduling/v1" schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" @@ -62,41 +57,70 @@ import ( type Interface interface { Discovery() discovery.DiscoveryInterface + AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - AppsV1() appsv1.AppsV1Interface + // Deprecated: please explicitly pick a version if possible. + Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface AppsV1beta1() appsv1beta1.AppsV1beta1Interface AppsV1beta2() appsv1beta2.AppsV1beta2Interface - AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface + AppsV1() appsv1.AppsV1Interface + // Deprecated: please explicitly pick a version if possible. + Apps() appsv1.AppsV1Interface AuthenticationV1() authenticationv1.AuthenticationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authentication() authenticationv1.AuthenticationV1Interface AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface AuthorizationV1() authorizationv1.AuthorizationV1Interface + // Deprecated: please explicitly pick a version if possible. + Authorization() authorizationv1.AuthorizationV1Interface AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface AutoscalingV1() autoscalingv1.AutoscalingV1Interface + // Deprecated: please explicitly pick a version if possible. + Autoscaling() autoscalingv1.AutoscalingV1Interface AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface BatchV1() batchv1.BatchV1Interface + // Deprecated: please explicitly pick a version if possible. + Batch() batchv1.BatchV1Interface BatchV1beta1() batchv1beta1.BatchV1beta1Interface BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Certificates() certificatesv1beta1.CertificatesV1beta1Interface CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface - CoordinationV1() coordinationv1.CoordinationV1Interface + // Deprecated: please explicitly pick a version if possible. + Coordination() coordinationv1beta1.CoordinationV1beta1Interface CoreV1() corev1.CoreV1Interface + // Deprecated: please explicitly pick a version if possible. + Core() corev1.CoreV1Interface EventsV1beta1() eventsv1beta1.EventsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Events() eventsv1beta1.EventsV1beta1Interface ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Extensions() extensionsv1beta1.ExtensionsV1beta1Interface NetworkingV1() networkingv1.NetworkingV1Interface - NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface - NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface - NodeV1beta1() nodev1beta1.NodeV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Networking() networkingv1.NetworkingV1Interface PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface + // Deprecated: please explicitly pick a version if possible. + Policy() policyv1beta1.PolicyV1beta1Interface RbacV1() rbacv1.RbacV1Interface + // Deprecated: please explicitly pick a version if possible. + Rbac() rbacv1.RbacV1Interface RbacV1beta1() rbacv1beta1.RbacV1beta1Interface RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface - SchedulingV1() schedulingv1.SchedulingV1Interface + // Deprecated: please explicitly pick a version if possible. + Scheduling() schedulingv1beta1.SchedulingV1beta1Interface SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface + // Deprecated: please explicitly pick a version if possible. + Settings() settingsv1alpha1.SettingsV1alpha1Interface StorageV1beta1() storagev1beta1.StorageV1beta1Interface StorageV1() storagev1.StorageV1Interface + // Deprecated: please explicitly pick a version if possible. + Storage() storagev1.StorageV1Interface StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface } @@ -104,42 +128,42 @@ type Interface interface { // version included in a Clientset. type Clientset struct { *discovery.DiscoveryClient - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - appsV1 *appsv1.AppsV1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - auditregistrationV1alpha1 *auditregistrationv1alpha1.AuditregistrationV1alpha1Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coordinationV1 *coordinationv1.CoordinationV1Client - coreV1 *corev1.CoreV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - networkingV1 *networkingv1.NetworkingV1Client - networkingV1beta1 *networkingv1beta1.NetworkingV1beta1Client - nodeV1alpha1 *nodev1alpha1.NodeV1alpha1Client - nodeV1beta1 *nodev1beta1.NodeV1beta1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - schedulingV1 *schedulingv1.SchedulingV1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client + admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client + admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client + appsV1beta1 *appsv1beta1.AppsV1beta1Client + appsV1beta2 *appsv1beta2.AppsV1beta2Client + appsV1 *appsv1.AppsV1Client + authenticationV1 *authenticationv1.AuthenticationV1Client + authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client + authorizationV1 *authorizationv1.AuthorizationV1Client + authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client + autoscalingV1 *autoscalingv1.AutoscalingV1Client + autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client + autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client + batchV1 *batchv1.BatchV1Client + batchV1beta1 *batchv1beta1.BatchV1beta1Client + batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client + certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client + coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client + coreV1 *corev1.CoreV1Client + eventsV1beta1 *eventsv1beta1.EventsV1beta1Client + extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client + networkingV1 *networkingv1.NetworkingV1Client + policyV1beta1 *policyv1beta1.PolicyV1beta1Client + rbacV1 *rbacv1.RbacV1Client + rbacV1beta1 *rbacv1beta1.RbacV1beta1Client + rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client + schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client + schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client + settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client + storageV1beta1 *storagev1beta1.StorageV1beta1Client + storageV1 *storagev1.StorageV1Client + storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client +} + +// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client +func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { + return c.admissionregistrationV1alpha1 } // AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client @@ -147,9 +171,10 @@ func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1. return c.admissionregistrationV1beta1 } -// AppsV1 retrieves the AppsV1Client -func (c *Clientset) AppsV1() appsv1.AppsV1Interface { - return c.appsV1 +// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. +// Please explicitly pick a version. +func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { + return c.admissionregistrationV1beta1 } // AppsV1beta1 retrieves the AppsV1beta1Client @@ -162,9 +187,15 @@ func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { return c.appsV1beta2 } -// AuditregistrationV1alpha1 retrieves the AuditregistrationV1alpha1Client -func (c *Clientset) AuditregistrationV1alpha1() auditregistrationv1alpha1.AuditregistrationV1alpha1Interface { - return c.auditregistrationV1alpha1 +// AppsV1 retrieves the AppsV1Client +func (c *Clientset) AppsV1() appsv1.AppsV1Interface { + return c.appsV1 +} + +// Deprecated: Apps retrieves the default version of AppsClient. +// Please explicitly pick a version. +func (c *Clientset) Apps() appsv1.AppsV1Interface { + return c.appsV1 } // AuthenticationV1 retrieves the AuthenticationV1Client @@ -172,6 +203,12 @@ func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interfac return c.authenticationV1 } +// Deprecated: Authentication retrieves the default version of AuthenticationClient. +// Please explicitly pick a version. +func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { + return c.authenticationV1 +} + // AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { return c.authenticationV1beta1 @@ -182,6 +219,12 @@ func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { return c.authorizationV1 } +// Deprecated: Authorization retrieves the default version of AuthorizationClient. +// Please explicitly pick a version. +func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { + return c.authorizationV1 +} + // AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { return c.authorizationV1beta1 @@ -192,6 +235,12 @@ func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { return c.autoscalingV1 } +// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. +// Please explicitly pick a version. +func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { + return c.autoscalingV1 +} + // AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { return c.autoscalingV2beta1 @@ -207,6 +256,12 @@ func (c *Clientset) BatchV1() batchv1.BatchV1Interface { return c.batchV1 } +// Deprecated: Batch retrieves the default version of BatchClient. +// Please explicitly pick a version. +func (c *Clientset) Batch() batchv1.BatchV1Interface { + return c.batchV1 +} + // BatchV1beta1 retrieves the BatchV1beta1Client func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { return c.batchV1beta1 @@ -222,14 +277,21 @@ func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta return c.certificatesV1beta1 } +// Deprecated: Certificates retrieves the default version of CertificatesClient. +// Please explicitly pick a version. +func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { + return c.certificatesV1beta1 +} + // CoordinationV1beta1 retrieves the CoordinationV1beta1Client func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { return c.coordinationV1beta1 } -// CoordinationV1 retrieves the CoordinationV1Client -func (c *Clientset) CoordinationV1() coordinationv1.CoordinationV1Interface { - return c.coordinationV1 +// Deprecated: Coordination retrieves the default version of CoordinationClient. +// Please explicitly pick a version. +func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface { + return c.coordinationV1beta1 } // CoreV1 retrieves the CoreV1Client @@ -237,34 +299,43 @@ func (c *Clientset) CoreV1() corev1.CoreV1Interface { return c.coreV1 } +// Deprecated: Core retrieves the default version of CoreClient. +// Please explicitly pick a version. +func (c *Clientset) Core() corev1.CoreV1Interface { + return c.coreV1 +} + // EventsV1beta1 retrieves the EventsV1beta1Client func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { return c.eventsV1beta1 } +// Deprecated: Events retrieves the default version of EventsClient. +// Please explicitly pick a version. +func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { + return c.eventsV1beta1 +} + // ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { return c.extensionsV1beta1 } +// Deprecated: Extensions retrieves the default version of ExtensionsClient. +// Please explicitly pick a version. +func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { + return c.extensionsV1beta1 +} + // NetworkingV1 retrieves the NetworkingV1Client func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { return c.networkingV1 } -// NetworkingV1beta1 retrieves the NetworkingV1beta1Client -func (c *Clientset) NetworkingV1beta1() networkingv1beta1.NetworkingV1beta1Interface { - return c.networkingV1beta1 -} - -// NodeV1alpha1 retrieves the NodeV1alpha1Client -func (c *Clientset) NodeV1alpha1() nodev1alpha1.NodeV1alpha1Interface { - return c.nodeV1alpha1 -} - -// NodeV1beta1 retrieves the NodeV1beta1Client -func (c *Clientset) NodeV1beta1() nodev1beta1.NodeV1beta1Interface { - return c.nodeV1beta1 +// Deprecated: Networking retrieves the default version of NetworkingClient. +// Please explicitly pick a version. +func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { + return c.networkingV1 } // PolicyV1beta1 retrieves the PolicyV1beta1Client @@ -272,11 +343,23 @@ func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { return c.policyV1beta1 } +// Deprecated: Policy retrieves the default version of PolicyClient. +// Please explicitly pick a version. +func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { + return c.policyV1beta1 +} + // RbacV1 retrieves the RbacV1Client func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { return c.rbacV1 } +// Deprecated: Rbac retrieves the default version of RbacClient. +// Please explicitly pick a version. +func (c *Clientset) Rbac() rbacv1.RbacV1Interface { + return c.rbacV1 +} + // RbacV1beta1 retrieves the RbacV1beta1Client func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { return c.rbacV1beta1 @@ -297,9 +380,10 @@ func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Inter return c.schedulingV1beta1 } -// SchedulingV1 retrieves the SchedulingV1Client -func (c *Clientset) SchedulingV1() schedulingv1.SchedulingV1Interface { - return c.schedulingV1 +// Deprecated: Scheduling retrieves the default version of SchedulingClient. +// Please explicitly pick a version. +func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { + return c.schedulingV1beta1 } // SettingsV1alpha1 retrieves the SettingsV1alpha1Client @@ -307,6 +391,12 @@ func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interfac return c.settingsV1alpha1 } +// Deprecated: Settings retrieves the default version of SettingsClient. +// Please explicitly pick a version. +func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { + return c.settingsV1alpha1 +} + // StorageV1beta1 retrieves the StorageV1beta1Client func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { return c.storageV1beta1 @@ -317,6 +407,12 @@ func (c *Clientset) StorageV1() storagev1.StorageV1Interface { return c.storageV1 } +// Deprecated: Storage retrieves the default version of StorageClient. +// Please explicitly pick a version. +func (c *Clientset) Storage() storagev1.StorageV1Interface { + return c.storageV1 +} + // StorageV1alpha1 retrieves the StorageV1alpha1Client func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { return c.storageV1alpha1 @@ -338,11 +434,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { } var cs Clientset var err error - cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) + cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -354,7 +450,7 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.auditregistrationV1alpha1, err = auditregistrationv1alpha1.NewForConfig(&configShallowCopy) + cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) if err != nil { return nil, err } @@ -406,10 +502,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.coordinationV1, err = coordinationv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -426,18 +518,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.networkingV1beta1, err = networkingv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.nodeV1alpha1, err = nodev1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.nodeV1beta1, err = nodev1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -462,10 +542,6 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } - cs.schedulingV1, err = schedulingv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) if err != nil { return nil, err @@ -494,11 +570,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { // panics if there is an error in the config. func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.NewForConfigOrDie(c) + cs.appsV1 = appsv1.NewForConfigOrDie(c) cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) @@ -511,21 +587,16 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) - cs.coordinationV1 = coordinationv1.NewForConfigOrDie(c) cs.coreV1 = corev1.NewForConfigOrDie(c) cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) cs.networkingV1 = networkingv1.NewForConfigOrDie(c) - cs.networkingV1beta1 = networkingv1beta1.NewForConfigOrDie(c) - cs.nodeV1alpha1 = nodev1alpha1.NewForConfigOrDie(c) - cs.nodeV1beta1 = nodev1beta1.NewForConfigOrDie(c) cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) cs.rbacV1 = rbacv1.NewForConfigOrDie(c) cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) - cs.schedulingV1 = schedulingv1.NewForConfigOrDie(c) cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) cs.storageV1 = storagev1.NewForConfigOrDie(c) @@ -538,11 +609,11 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { // New creates a new Clientset for the given RESTClient. func New(c rest.Interface) *Clientset { var cs Clientset + cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) - cs.appsV1 = appsv1.New(c) cs.appsV1beta1 = appsv1beta1.New(c) cs.appsV1beta2 = appsv1beta2.New(c) - cs.auditregistrationV1alpha1 = auditregistrationv1alpha1.New(c) + cs.appsV1 = appsv1.New(c) cs.authenticationV1 = authenticationv1.New(c) cs.authenticationV1beta1 = authenticationv1beta1.New(c) cs.authorizationV1 = authorizationv1.New(c) @@ -555,21 +626,16 @@ func New(c rest.Interface) *Clientset { cs.batchV2alpha1 = batchv2alpha1.New(c) cs.certificatesV1beta1 = certificatesv1beta1.New(c) cs.coordinationV1beta1 = coordinationv1beta1.New(c) - cs.coordinationV1 = coordinationv1.New(c) cs.coreV1 = corev1.New(c) cs.eventsV1beta1 = eventsv1beta1.New(c) cs.extensionsV1beta1 = extensionsv1beta1.New(c) cs.networkingV1 = networkingv1.New(c) - cs.networkingV1beta1 = networkingv1beta1.New(c) - cs.nodeV1alpha1 = nodev1alpha1.New(c) - cs.nodeV1beta1 = nodev1beta1.New(c) cs.policyV1beta1 = policyv1beta1.New(c) cs.rbacV1 = rbacv1.New(c) cs.rbacV1beta1 = rbacv1beta1.New(c) cs.rbacV1alpha1 = rbacv1alpha1.New(c) cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) cs.schedulingV1beta1 = schedulingv1beta1.New(c) - cs.schedulingV1 = schedulingv1.New(c) cs.settingsV1alpha1 = settingsv1alpha1.New(c) cs.storageV1beta1 = storagev1beta1.New(c) cs.storageV1 = storagev1.New(c) diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go index 8346d26..9ca89b7 100644 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ b/vendor/k8s.io/client-go/kubernetes/scheme/register.go @@ -19,11 +19,11 @@ limitations under the License. package scheme import ( + admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" appsv1 "k8s.io/api/apps/v1" appsv1beta1 "k8s.io/api/apps/v1beta1" appsv1beta2 "k8s.io/api/apps/v1beta2" - auditregistrationv1alpha1 "k8s.io/api/auditregistration/v1alpha1" authenticationv1 "k8s.io/api/authentication/v1" authenticationv1beta1 "k8s.io/api/authentication/v1beta1" authorizationv1 "k8s.io/api/authorization/v1" @@ -35,20 +35,15 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" batchv2alpha1 "k8s.io/api/batch/v2alpha1" certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - coordinationv1 "k8s.io/api/coordination/v1" coordinationv1beta1 "k8s.io/api/coordination/v1beta1" corev1 "k8s.io/api/core/v1" eventsv1beta1 "k8s.io/api/events/v1beta1" extensionsv1beta1 "k8s.io/api/extensions/v1beta1" networkingv1 "k8s.io/api/networking/v1" - networkingv1beta1 "k8s.io/api/networking/v1beta1" - nodev1alpha1 "k8s.io/api/node/v1alpha1" - nodev1beta1 "k8s.io/api/node/v1beta1" policyv1beta1 "k8s.io/api/policy/v1beta1" rbacv1 "k8s.io/api/rbac/v1" rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" rbacv1beta1 "k8s.io/api/rbac/v1beta1" - schedulingv1 "k8s.io/api/scheduling/v1" schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" settingsv1alpha1 "k8s.io/api/settings/v1alpha1" @@ -66,11 +61,11 @@ var Scheme = runtime.NewScheme() var Codecs = serializer.NewCodecFactory(Scheme) var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ + admissionregistrationv1alpha1.AddToScheme, admissionregistrationv1beta1.AddToScheme, - appsv1.AddToScheme, appsv1beta1.AddToScheme, appsv1beta2.AddToScheme, - auditregistrationv1alpha1.AddToScheme, + appsv1.AddToScheme, authenticationv1.AddToScheme, authenticationv1beta1.AddToScheme, authorizationv1.AddToScheme, @@ -83,21 +78,16 @@ var localSchemeBuilder = runtime.SchemeBuilder{ batchv2alpha1.AddToScheme, certificatesv1beta1.AddToScheme, coordinationv1beta1.AddToScheme, - coordinationv1.AddToScheme, corev1.AddToScheme, eventsv1beta1.AddToScheme, extensionsv1beta1.AddToScheme, networkingv1.AddToScheme, - networkingv1beta1.AddToScheme, - nodev1alpha1.AddToScheme, - nodev1beta1.AddToScheme, policyv1beta1.AddToScheme, rbacv1.AddToScheme, rbacv1beta1.AddToScheme, rbacv1alpha1.AddToScheme, schedulingv1alpha1.AddToScheme, schedulingv1beta1.AddToScheme, - schedulingv1.AddToScheme, settingsv1alpha1.AddToScheme, storagev1beta1.AddToScheme, storagev1.AddToScheme, diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go similarity index 59% rename from vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go index f007b05..5e02f72 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditregistration_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go @@ -19,28 +19,28 @@ limitations under the License. package v1alpha1 import ( - v1alpha1 "k8s.io/api/auditregistration/v1alpha1" + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" serializer "k8s.io/apimachinery/pkg/runtime/serializer" "k8s.io/client-go/kubernetes/scheme" rest "k8s.io/client-go/rest" ) -type AuditregistrationV1alpha1Interface interface { +type AdmissionregistrationV1alpha1Interface interface { RESTClient() rest.Interface - AuditSinksGetter + InitializerConfigurationsGetter } -// AuditregistrationV1alpha1Client is used to interact with features provided by the auditregistration.k8s.io group. -type AuditregistrationV1alpha1Client struct { +// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. +type AdmissionregistrationV1alpha1Client struct { restClient rest.Interface } -func (c *AuditregistrationV1alpha1Client) AuditSinks() AuditSinkInterface { - return newAuditSinks(c) +func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { + return newInitializerConfigurations(c) } -// NewForConfig creates a new AuditregistrationV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { +// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. +func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { config := *c if err := setConfigDefaults(&config); err != nil { return nil, err @@ -49,12 +49,12 @@ func NewForConfig(c *rest.Config) (*AuditregistrationV1alpha1Client, error) { if err != nil { return nil, err } - return &AuditregistrationV1alpha1Client{client}, nil + return &AdmissionregistrationV1alpha1Client{client}, nil } -// NewForConfigOrDie creates a new AuditregistrationV1alpha1Client for the given config and +// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and // panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { +func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { client, err := NewForConfig(c) if err != nil { panic(err) @@ -62,9 +62,9 @@ func NewForConfigOrDie(c *rest.Config) *AuditregistrationV1alpha1Client { return client } -// New creates a new AuditregistrationV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *AuditregistrationV1alpha1Client { - return &AuditregistrationV1alpha1Client{c} +// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { + return &AdmissionregistrationV1alpha1Client{c} } func setConfigDefaults(config *rest.Config) error { @@ -82,7 +82,7 @@ func setConfigDefaults(config *rest.Config) error { // RESTClient returns a RESTClient that is used to communicate // with API server by this client implementation. -func (c *AuditregistrationV1alpha1Client) RESTClient() rest.Interface { +func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { if c == nil { return nil } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go similarity index 100% rename from vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/doc.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go similarity index 92% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go index fcef31d..1e29b96 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go @@ -18,4 +18,4 @@ limitations under the License. package v1alpha1 -type RuntimeClassExpansion interface{} +type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go new file mode 100644 index 0000000..e014ea7 --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go @@ -0,0 +1,147 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + scheme "k8s.io/client-go/kubernetes/scheme" + rest "k8s.io/client-go/rest" +) + +// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. +// A group's client should implement this interface. +type InitializerConfigurationsGetter interface { + InitializerConfigurations() InitializerConfigurationInterface +} + +// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. +type InitializerConfigurationInterface interface { + Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) + Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error) + List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) + InitializerConfigurationExpansion +} + +// initializerConfigurations implements InitializerConfigurationInterface +type initializerConfigurations struct { + client rest.Interface +} + +// newInitializerConfigurations returns a InitializerConfigurations +func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations { + return &initializerConfigurations{ + client: c.RESTClient(), + } +} + +// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. +func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Get(). + Resource("initializerconfigurations"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. +func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { + result = &v1alpha1.InitializerConfigurationList{} + err = c.client.Get(). + Resource("initializerconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested initializerConfigurations. +func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { + opts.Watch = true + return c.client.Get(). + Resource("initializerconfigurations"). + VersionedParams(&opts, scheme.ParameterCodec). + Watch() +} + +// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Post(). + Resource("initializerconfigurations"). + Body(initializerConfiguration). + Do(). + Into(result) + return +} + +// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. +func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Put(). + Resource("initializerconfigurations"). + Name(initializerConfiguration.Name). + Body(initializerConfiguration). + Do(). + Into(result) + return +} + +// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. +func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Resource("initializerconfigurations"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + return c.client.Delete(). + Resource("initializerconfigurations"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched initializerConfiguration. +func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { + result = &v1alpha1.InitializerConfiguration{} + err = c.client.Patch(pt). + Resource("initializerconfigurations"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go index 4524896..cb01571 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) // List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.MutatingWebhookConfigurationList{} err = c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1bet // Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("mutatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOp // DeleteCollection deletes a collection of objects. func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("mutatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go index 7e711b3..3a9339f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/admissionregistration/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions // List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ValidatingWebhookConfigurationList{} err = c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1b // Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("validatingwebhookconfigurations"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *validatingWebhookConfigurations) Delete(name string, options *v1.Delete // DeleteCollection deletes a collection of objects. func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("validatingwebhookconfigurations"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go index e28e4d2..1ddaa1a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.Controll // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go index a535cda..03a8706 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.Dae // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, er // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go index f9799a4..73d46f8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go @@ -19,10 +19,7 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,9 +44,6 @@ type DeploymentInterface interface { List(opts metav1.ListOptions) (*v1.DeploymentList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) - GetScale(deploymentName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) - DeploymentExpansion } @@ -82,16 +76,11 @@ func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.De // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +88,11 @@ func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +150,10 @@ func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -193,31 +172,3 @@ func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } - -// GetScale takes name of the deployment, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go index ff3504e..0779411 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go @@ -19,10 +19,7 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,9 +44,6 @@ type ReplicaSetInterface interface { List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) - GetScale(replicaSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) - ReplicaSetExpansion } @@ -82,16 +76,11 @@ func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.Re // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +88,11 @@ func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +150,10 @@ func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -193,31 +172,3 @@ func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subres Into(result) return } - -// GetScale takes name of the replicaSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go index c12c470..54322d9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go @@ -19,10 +19,7 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/apps/v1" - autoscalingv1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,9 +44,6 @@ type StatefulSetInterface interface { List(opts metav1.ListOptions) (*v1.StatefulSetList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) - GetScale(statefulSetName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) - StatefulSetExpansion } @@ -82,16 +76,11 @@ func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.S // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +88,11 @@ func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +150,10 @@ func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -193,31 +172,3 @@ func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subre Into(result) return } - -// GetScale takes name of the statefulSet, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go index 2c9db88..4d882e2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go @@ -29,6 +29,7 @@ type AppsV1beta1Interface interface { RESTClient() rest.Interface ControllerRevisionsGetter DeploymentsGetter + ScalesGetter StatefulSetsGetter } @@ -45,6 +46,10 @@ func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { return newDeployments(c, namespace) } +func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go index 45ddb91..ec8fa92 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go index 05fdcb7..365e06f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go index 113455d..b2bfd73 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go @@ -22,4 +22,6 @@ type ControllerRevisionExpansion interface{} type DeploymentExpansion interface{} +type ScaleExpansion interface{} + type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go similarity index 51% rename from vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go index 1442649..cef27bd 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go @@ -18,4 +18,31 @@ limitations under the License. package v1beta1 -type IngressExpansion interface{} +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go index c4b35b4..6517454 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/apps/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go index 99d677f..2754949 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go @@ -31,6 +31,7 @@ type AppsV1beta2Interface interface { DaemonSetsGetter DeploymentsGetter ReplicaSetsGetter + ScalesGetter StatefulSetsGetter } @@ -55,6 +56,10 @@ func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { return newReplicaSets(c, namespace) } +func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { return newStatefulSets(c, namespace) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go index e1d6025..1271cc6 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.ControllerRevisionList{} err = c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.Control // Watch returns a watch.Interface that watches the requested controllerRevisions. func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("controllerrevisions"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go index f8b7ac2..683c068 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go index 510250b..9a04513 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go index 6a21749..bceae59 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go @@ -26,4 +26,6 @@ type DeploymentExpansion interface{} type ReplicaSetExpansion interface{} +type ScaleExpansion interface{} + type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go index 7b73877..9fd9de9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go new file mode 100644 index 0000000..f8d6a7f --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go @@ -0,0 +1,48 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta2 + +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *AppsV1beta2Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go index de7c3db..095601e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta2 import ( - "time" - v1beta2 "k8s.io/api/apps/v1beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -81,16 +79,11 @@ func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2. // List takes label and field selectors, and returns the list of StatefulSets that match those selectors. func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta2.StatefulSetList{} err = c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -98,16 +91,11 @@ func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetLis // Watch returns a watch.Interface that watches the requested statefulSets. func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -165,15 +153,10 @@ func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("statefulsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go deleted file mode 100644 index 414d480..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/auditsink.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1alpha1 "k8s.io/api/auditregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// AuditSinksGetter has a method to return a AuditSinkInterface. -// A group's client should implement this interface. -type AuditSinksGetter interface { - AuditSinks() AuditSinkInterface -} - -// AuditSinkInterface has methods to work with AuditSink resources. -type AuditSinkInterface interface { - Create(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Update(*v1alpha1.AuditSink) (*v1alpha1.AuditSink, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.AuditSink, error) - List(opts v1.ListOptions) (*v1alpha1.AuditSinkList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) - AuditSinkExpansion -} - -// auditSinks implements AuditSinkInterface -type auditSinks struct { - client rest.Interface -} - -// newAuditSinks returns a AuditSinks -func newAuditSinks(c *AuditregistrationV1alpha1Client) *auditSinks { - return &auditSinks{ - client: c.RESTClient(), - } -} - -// Get takes name of the auditSink, and returns the corresponding auditSink object, and an error if there is any. -func (c *auditSinks) Get(name string, options v1.GetOptions) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Get(). - Resource("auditsinks"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of AuditSinks that match those selectors. -func (c *auditSinks) List(opts v1.ListOptions) (result *v1alpha1.AuditSinkList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.AuditSinkList{} - err = c.client.Get(). - Resource("auditsinks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested auditSinks. -func (c *auditSinks) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("auditsinks"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a auditSink and creates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Create(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Post(). - Resource("auditsinks"). - Body(auditSink). - Do(). - Into(result) - return -} - -// Update takes the representation of a auditSink and updates it. Returns the server's representation of the auditSink, and an error, if there is any. -func (c *auditSinks) Update(auditSink *v1alpha1.AuditSink) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Put(). - Resource("auditsinks"). - Name(auditSink.Name). - Body(auditSink). - Do(). - Into(result) - return -} - -// Delete takes name of the auditSink and deletes it. Returns an error if one occurs. -func (c *auditSinks) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("auditsinks"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *auditSinks) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("auditsinks"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched auditSink. -func (c *auditSinks) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.AuditSink, err error) { - result = &v1alpha1.AuditSink{} - err = c.client.Patch(pt). - Resource("auditsinks"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go deleted file mode 100644 index f0f5117..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type AuditSinkExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go index 0e0839f..6891b6b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/autoscaling/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) ( // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.Hor // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOpt // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go index 02d5cfb..4ac8cce 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go @@ -19,8 +19,6 @@ limitations under the License. package v2beta1 import ( - "time" - v2beta1 "k8s.io/api/autoscaling/v2beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v2beta1.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go index 91a0fa6..ddabda7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go @@ -19,8 +19,6 @@ limitations under the License. package v2beta2 import ( - "time" - v2beta2 "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (resu // List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v2beta2.HorizontalPodAutoscalerList{} err = c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.Ho // Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions // DeleteCollection deletes a collection of objects. func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("horizontalpodautoscalers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go index b55c602..ba8332a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err // List takes label and field selectors, and returns the list of Jobs that match those selectors. func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.JobList{} err = c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { // Watch returns a watch.Interface that watches the requested jobs. func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("jobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("jobs"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go index d89d2fa..04637c3 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/batch/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.Cron // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err e // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go index 19123b6..4d922f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go @@ -19,8 +19,6 @@ limitations under the License. package v2alpha1 import ( - "time" - v2alpha1 "k8s.io/api/batch/v2alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.Cro // List takes label and field selectors, and returns the list of CronJobs that match those selectors. func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v2alpha1.CronJobList{} err = c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err // Watch returns a watch.Interface that watches the requested cronJobs. func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("cronjobs"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go index 712d3a0..b39169a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/certificates/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (re // List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.CertificateSigningRequestList{} err = c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1. // Watch returns a watch.Interface that watches the requested certificateSigningRequests. func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("certificatesigningrequests"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("certificatesigningrequests"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go deleted file mode 100644 index 9b566f3..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/coordination_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/coordination/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type CoordinationV1Interface interface { - RESTClient() rest.Interface - LeasesGetter -} - -// CoordinationV1Client is used to interact with features provided by the coordination.k8s.io group. -type CoordinationV1Client struct { - restClient rest.Interface -} - -func (c *CoordinationV1Client) Leases(namespace string) LeaseInterface { - return newLeases(c, namespace) -} - -// NewForConfig creates a new CoordinationV1Client for the given config. -func NewForConfig(c *rest.Config) (*CoordinationV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoordinationV1Client{client}, nil -} - -// NewForConfigOrDie creates a new CoordinationV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoordinationV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoordinationV1Client for the given RESTClient. -func New(c rest.Interface) *CoordinationV1Client { - return &CoordinationV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoordinationV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go deleted file mode 100644 index ab24f37..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type LeaseExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go deleted file mode 100644 index b6cf1b6..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1/lease.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "k8s.io/api/coordination/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// LeasesGetter has a method to return a LeaseInterface. -// A group's client should implement this interface. -type LeasesGetter interface { - Leases(namespace string) LeaseInterface -} - -// LeaseInterface has methods to work with Lease resources. -type LeaseInterface interface { - Create(*v1.Lease) (*v1.Lease, error) - Update(*v1.Lease) (*v1.Lease, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Lease, error) - List(opts metav1.ListOptions) (*v1.LeaseList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) - LeaseExpansion -} - -// leases implements LeaseInterface -type leases struct { - client rest.Interface - ns string -} - -// newLeases returns a Leases -func newLeases(c *CoordinationV1Client, namespace string) *leases { - return &leases{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(name string, options metav1.GetOptions) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(opts metav1.ListOptions) (result *v1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(lease *v1.Lease) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - Body(lease). - Do(). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(lease *v1.Lease) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - Body(lease). - Do(). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Lease, err error) { - result = &v1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go index 490d815..1627725 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/coordination/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, // List takes label and field selectors, and returns the list of Leases that match those selectors. func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.LeaseList{} err = c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error // Watch returns a watch.Interface that watches the requested leases. func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("leases"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *leases) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("leases"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go index 302b2fd..e497661 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ComponentStatusList{} err = c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentS // Watch returns a watch.Interface that watches the requested componentStatuses. func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("componentstatuses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("componentstatuses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go index 18ce954..0984ae7 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.Con // List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ConfigMapList{} err = c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, er // Watch returns a watch.Interface that watches the requested configMaps. func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("configmaps"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go index 978a2a1..dd82167 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endp // List takes label and field selectors, and returns the list of Endpoints that match those selectors. func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.EndpointsList{} err = c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err // Watch returns a watch.Interface that watches the requested endpoints. func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("endpoints"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go index 55cfa09..57d30f9 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *events) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go index 2eeae11..5b38566 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.Li // List takes label and field selectors, and returns the list of LimitRanges that match those selectors. func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.LimitRangeList{} err = c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, // Watch returns a watch.Interface that watches the requested limitRanges. func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("limitranges"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go index 8a81fe8..e22d07d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Nam // List takes label and field selectors, and returns the list of Namespaces that match those selectors. func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.NamespaceList{} err = c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, er // Watch returns a watch.Interface that watches the requested namespaces. func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("namespaces"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go index d19fab8..5c769c1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, er // List takes label and field selectors, and returns the list of Nodes that match those selectors. func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.NodeList{} err = c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { // Watch returns a watch.Interface that watches the requested nodes. func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("nodes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("nodes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go index 7451482..d5f19ae 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result // List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PersistentVolumeList{} err = c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.Persistent // Watch returns a watch.Interface that watches the requested persistentVolumes. func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("persistentvolumes"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) e // DeleteCollection deletes a collection of objects. func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("persistentvolumes"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go index 410ab37..d32ae5d 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PersistentVolumeClaimList{} err = c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.Persi // Watch returns a watch.Interface that watches the requested persistentVolumeClaims. func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("persistentvolumeclaims"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go index 8d6b6e8..b19c5a5 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err // List takes label and field selectors, and returns the list of Pods that match those selectors. func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PodList{} err = c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { // Watch returns a watch.Interface that watches the requested pods. func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("pods"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("pods"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go index 84d7c98..d644e17 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.P // List takes label and field selectors, and returns the list of PodTemplates that match those selectors. func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.PodTemplateList{} err = c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList // Watch returns a watch.Interface that watches the requested podTemplates. func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("podtemplates"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go index dd3182d..17622f1 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go @@ -19,10 +19,8 @@ limitations under the License. package v1 import ( - "time" - - autoscalingv1 "k8s.io/api/autoscaling/v1" v1 "k8s.io/api/core/v1" + v1beta1 "k8s.io/api/extensions/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" watch "k8s.io/apimachinery/pkg/watch" @@ -47,8 +45,8 @@ type ReplicationControllerInterface interface { List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) Watch(opts metav1.ListOptions) (watch.Interface, error) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) - UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (*autoscalingv1.Scale, error) + GetScale(replicationControllerName string, options metav1.GetOptions) (*v1beta1.Scale, error) + UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) ReplicationControllerExpansion } @@ -82,16 +80,11 @@ func (c *replicationControllers) Get(name string, options metav1.GetOptions) (re // List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ReplicationControllerList{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -99,16 +92,11 @@ func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.Repli // Watch returns a watch.Interface that watches the requested replicationControllers. func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -166,15 +154,10 @@ func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptio // DeleteCollection deletes a collection of objects. func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicationcontrollers"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() @@ -194,9 +177,9 @@ func (c *replicationControllers) Patch(name string, pt types.PatchType, data []b return } -// GetScale takes name of the replicationController, and returns the corresponding autoscalingv1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} +// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. +func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} err = c.client.Get(). Namespace(c.ns). Resource("replicationcontrollers"). @@ -209,8 +192,8 @@ func (c *replicationControllers) GetScale(replicationControllerName string, opti } // UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *autoscalingv1.Scale) (result *autoscalingv1.Scale, err error) { - result = &autoscalingv1.Scale{} +func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} err = c.client.Put(). Namespace(c.ns). Resource("replicationcontrollers"). diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go index 5a17899..8b74a40 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ResourceQuotaList{} err = c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuota // Watch returns a watch.Interface that watches the requested resourceQuotas. func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("resourcequotas"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go index 85c143b..4ea9796 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret // List takes label and field selectors, and returns the list of Secrets that match those selectors. func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.SecretList{} err = c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err erro // Watch returns a watch.Interface that watches the requested secrets. func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("secrets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("secrets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go index b0e0941..6c42ca8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Servi // List takes label and field selectors, and returns the list of Services that match those selectors. func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ServiceList{} err = c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err er // Watch returns a watch.Interface that watches the requested services. func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("services"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go index 50af6a2..f3ab7eb 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ServiceAccountList{} err = c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccou // Watch returns a watch.Interface that watches the requested serviceAccounts. func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("serviceaccounts"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go index 143281b..af7d060 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/events/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, // List takes label and field selectors, and returns the list of Events that match those selectors. func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.EventList{} err = c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error // Watch returns a watch.Interface that watches the requested events. func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("events"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *events) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("events"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go index 93b1ae9..85294be 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.Da // List takes label and field selectors, and returns the list of DaemonSets that match those selectors. func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.DaemonSetList{} err = c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, e // Watch returns a watch.Interface that watches the requested daemonSets. func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("daemonsets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go index 5557b9f..89183d2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -81,16 +79,11 @@ func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.D // List takes label and field selectors, and returns the list of Deployments that match those selectors. func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.DeploymentList{} err = c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -98,16 +91,11 @@ func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, // Watch returns a watch.Interface that watches the requested deployments. func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("deployments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -165,15 +153,10 @@ func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("deployments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go index 0e9edf5..1961ffc 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go @@ -32,6 +32,7 @@ type ExtensionsV1beta1Interface interface { IngressesGetter PodSecurityPoliciesGetter ReplicaSetsGetter + ScalesGetter } // ExtensionsV1beta1Client is used to interact with features provided by the extensions group. @@ -59,6 +60,10 @@ func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterf return newReplicaSets(c, namespace) } +func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { + return newScales(c, namespace) +} + // NewForConfig creates a new ExtensionsV1beta1Client for the given config. func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go index 4da51c3..f8b664c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ing // List takes label and field selectors, and returns the list of Ingresses that match those selectors. func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.IngressList{} err = c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err // Watch returns a watch.Interface that watches the requested ingresses. func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("ingresses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go index a947a54..8099d77 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go index 4440290..7e61fa2 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/extensions/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -81,16 +79,11 @@ func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.R // List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ReplicaSetList{} err = c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -98,16 +91,11 @@ func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, // Watch returns a watch.Interface that watches the requested replicaSets. func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -165,15 +153,10 @@ func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("replicasets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go similarity index 51% rename from vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go rename to vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go index 669dd02..6ee677a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go @@ -18,4 +18,31 @@ limitations under the License. package v1beta1 -type RuntimeClassExpansion interface{} +import ( + rest "k8s.io/client-go/rest" +) + +// ScalesGetter has a method to return a ScaleInterface. +// A group's client should implement this interface. +type ScalesGetter interface { + Scales(namespace string) ScaleInterface +} + +// ScaleInterface has methods to work with Scale resources. +type ScaleInterface interface { + ScaleExpansion +} + +// scales implements ScaleInterface +type scales struct { + client rest.Interface + ns string +} + +// newScales returns a Scales +func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { + return &scales{ + client: c.RESTClient(), + ns: namespace, + } +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go new file mode 100644 index 0000000..c9733cb --- /dev/null +++ b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go @@ -0,0 +1,65 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/api/extensions/v1beta1" + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. +type ScaleExpansion interface { + Get(kind string, name string) (*v1beta1.Scale, error) + Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) +} + +// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. +func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) + + err = c.client.Get(). + Namespace(c.ns). + Resource(resource.Resource). + Name(name). + SubResource("scale"). + Do(). + Into(result) + return +} + +func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { + result = &v1beta1.Scale{} + + // TODO this method needs to take a proper unambiguous kind + fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} + resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) + + err = c.client.Put(). + Namespace(scale.Namespace). + Resource(resource.Resource). + Name(scale.Name). + SubResource("scale"). + Body(scale). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go index 3f39be9..d8f0a6b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/networking/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v // List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.NetworkPolicyList{} err = c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolic // Watch returns a watch.Interface that watches the requested networkPolicies. func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("networkpolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go deleted file mode 100644 index 8d76678..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/ingress.go +++ /dev/null @@ -1,191 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/networking/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) - List(opts v1.ListOptions) (*v1beta1.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client rest.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *NetworkingV1beta1Client, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go deleted file mode 100644 index 541bf6a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1beta1/networking_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/networking/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type NetworkingV1beta1Interface interface { - RESTClient() rest.Interface - IngressesGetter -} - -// NetworkingV1beta1Client is used to interact with features provided by the networking.k8s.io group. -type NetworkingV1beta1Client struct { - restClient rest.Interface -} - -func (c *NetworkingV1beta1Client) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -// NewForConfig creates a new NetworkingV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*NetworkingV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NetworkingV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new NetworkingV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NetworkingV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NetworkingV1beta1Client for the given RESTClient. -func New(c rest.Interface) *NetworkingV1beta1Client { - return &NetworkingV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NetworkingV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go deleted file mode 100644 index df51baa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go deleted file mode 100644 index 863f2d4..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/node_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/node/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type NodeV1alpha1Interface interface { - RESTClient() rest.Interface - RuntimeClassesGetter -} - -// NodeV1alpha1Client is used to interact with features provided by the node.k8s.io group. -type NodeV1alpha1Client struct { - restClient rest.Interface -} - -func (c *NodeV1alpha1Client) RuntimeClasses() RuntimeClassInterface { - return newRuntimeClasses(c) -} - -// NewForConfig creates a new NodeV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*NodeV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NodeV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new NodeV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NodeV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NodeV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *NodeV1alpha1Client { - return &NodeV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NodeV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go deleted file mode 100644 index 044460e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1alpha1/runtimeclass.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - "time" - - v1alpha1 "k8s.io/api/node/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RuntimeClassesGetter has a method to return a RuntimeClassInterface. -// A group's client should implement this interface. -type RuntimeClassesGetter interface { - RuntimeClasses() RuntimeClassInterface -} - -// RuntimeClassInterface has methods to work with RuntimeClass resources. -type RuntimeClassInterface interface { - Create(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Update(*v1alpha1.RuntimeClass) (*v1alpha1.RuntimeClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.RuntimeClass, error) - List(opts v1.ListOptions) (*v1alpha1.RuntimeClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) - RuntimeClassExpansion -} - -// runtimeClasses implements RuntimeClassInterface -type runtimeClasses struct { - client rest.Interface -} - -// newRuntimeClasses returns a RuntimeClasses -func newRuntimeClasses(c *NodeV1alpha1Client) *runtimeClasses { - return &runtimeClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1alpha1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1alpha1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(runtimeClass *v1alpha1.RuntimeClass) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RuntimeClass, err error) { - result = &v1alpha1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go deleted file mode 100644 index 52683e2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/node_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/node/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type NodeV1beta1Interface interface { - RESTClient() rest.Interface - RuntimeClassesGetter -} - -// NodeV1beta1Client is used to interact with features provided by the node.k8s.io group. -type NodeV1beta1Client struct { - restClient rest.Interface -} - -func (c *NodeV1beta1Client) RuntimeClasses() RuntimeClassInterface { - return newRuntimeClasses(c) -} - -// NewForConfig creates a new NodeV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*NodeV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NodeV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new NodeV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NodeV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NodeV1beta1Client for the given RESTClient. -func New(c rest.Interface) *NodeV1beta1Client { - return &NodeV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NodeV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go b/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go deleted file mode 100644 index b3f7c49..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/node/v1beta1/runtimeclass.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/node/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RuntimeClassesGetter has a method to return a RuntimeClassInterface. -// A group's client should implement this interface. -type RuntimeClassesGetter interface { - RuntimeClasses() RuntimeClassInterface -} - -// RuntimeClassInterface has methods to work with RuntimeClass resources. -type RuntimeClassInterface interface { - Create(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Update(*v1beta1.RuntimeClass) (*v1beta1.RuntimeClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.RuntimeClass, error) - List(opts v1.ListOptions) (*v1beta1.RuntimeClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) - RuntimeClassExpansion -} - -// runtimeClasses implements RuntimeClassInterface -type runtimeClasses struct { - client rest.Interface -} - -// newRuntimeClasses returns a RuntimeClasses -func newRuntimeClasses(c *NodeV1beta1Client) *runtimeClasses { - return &runtimeClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the runtimeClass, and returns the corresponding runtimeClass object, and an error if there is any. -func (c *runtimeClasses) Get(name string, options v1.GetOptions) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Get(). - Resource("runtimeclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RuntimeClasses that match those selectors. -func (c *runtimeClasses) List(opts v1.ListOptions) (result *v1beta1.RuntimeClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.RuntimeClassList{} - err = c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested runtimeClasses. -func (c *runtimeClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("runtimeclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a runtimeClass and creates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Create(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Post(). - Resource("runtimeclasses"). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a runtimeClass and updates it. Returns the server's representation of the runtimeClass, and an error, if there is any. -func (c *runtimeClasses) Update(runtimeClass *v1beta1.RuntimeClass) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Put(). - Resource("runtimeclasses"). - Name(runtimeClass.Name). - Body(runtimeClass). - Do(). - Into(result) - return -} - -// Delete takes name of the runtimeClass and deletes it. Returns an error if one occurs. -func (c *runtimeClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("runtimeclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *runtimeClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("runtimeclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched runtimeClass. -func (c *runtimeClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RuntimeClass, err error) { - result = &v1beta1.RuntimeClass{} - err = c.client.Patch(pt). - Resource("runtimeclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go index 864af9a..a11f27e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -78,16 +76,11 @@ func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result * // List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PodDisruptionBudgetList{} err = c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -95,16 +88,11 @@ func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDis // Watch returns a watch.Interface that watches the requested podDisruptionBudgets. func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -162,15 +150,10 @@ func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) er // DeleteCollection deletes a collection of objects. func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("poddisruptionbudgets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go index d02096d..355be1e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/policy/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PodSecurityPolicyList{} err = c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecu // Watch returns a watch.Interface that watches the requested podSecurityPolicies. func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("podsecuritypolicies"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("podsecuritypolicies"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go index 0a47c44..c4299d4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.C // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go index c16ebc3..30c0469 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (resul // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterR // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go index a17d791..81ea12a 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, er // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go index c87e457..17c6f99 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.R // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go index 77e6687..37a5457 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleLi // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go index 0d1b9d2..6050789 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.Cluste // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go index 4a4b672..aa6954b 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go index bf4e5a1..0941b8e 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/rbac/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1 // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingLi // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go index 21d3cab..bac951c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ClusterRoleList{} err = c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleLis // Watch returns a watch.Interface that watches the requested clusterRoles. func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterroles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterroles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go index 47eb9e4..96c91de 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v // List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.ClusterRoleBindingList{} err = c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.Cluster // Watch returns a watch.Interface that watches the requested clusterRoleBindings. func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("clusterrolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) err // DeleteCollection deletes a collection of objects. func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("clusterrolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go index 2b61aad..66f382c 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, e // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roles) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go index 0bd118f..67d3d33 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/rbac/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1. // List takes label and field selectors, and returns the list of RoleBindings that match those selectors. func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.RoleBindingList{} err = c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingLis // Watch returns a watch.Interface that watches the requested roleBindings. func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("rolebindings"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go deleted file mode 100644 index cc32132..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go deleted file mode 100644 index 3abbb7b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/priorityclass.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "k8s.io/api/scheduling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PriorityClassesGetter has a method to return a PriorityClassInterface. -// A group's client should implement this interface. -type PriorityClassesGetter interface { - PriorityClasses() PriorityClassInterface -} - -// PriorityClassInterface has methods to work with PriorityClass resources. -type PriorityClassInterface interface { - Create(*v1.PriorityClass) (*v1.PriorityClass, error) - Update(*v1.PriorityClass) (*v1.PriorityClass, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PriorityClass, error) - List(opts metav1.ListOptions) (*v1.PriorityClassList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) - PriorityClassExpansion -} - -// priorityClasses implements PriorityClassInterface -type priorityClasses struct { - client rest.Interface -} - -// newPriorityClasses returns a PriorityClasses -func newPriorityClasses(c *SchedulingV1Client) *priorityClasses { - return &priorityClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options metav1.GetOptions) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts metav1.ListOptions) (result *v1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1.PriorityClass) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PriorityClass, err error) { - result = &v1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go deleted file mode 100644 index bd7e1e5..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1/scheduling_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/scheduling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type SchedulingV1Interface interface { - RESTClient() rest.Interface - PriorityClassesGetter -} - -// SchedulingV1Client is used to interact with features provided by the scheduling.k8s.io group. -type SchedulingV1Client struct { - restClient rest.Interface -} - -func (c *SchedulingV1Client) PriorityClasses() PriorityClassInterface { - return newPriorityClasses(c) -} - -// NewForConfig creates a new SchedulingV1Client for the given config. -func NewForConfig(c *rest.Config) (*SchedulingV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SchedulingV1Client{client}, nil -} - -// NewForConfigOrDie creates a new SchedulingV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SchedulingV1Client for the given RESTClient. -func New(c rest.Interface) *SchedulingV1Client { - return &SchedulingV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SchedulingV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go index 29d646f..6845d25 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/scheduling/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alp // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityCl // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go index 5e402f8..57b9766 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/scheduling/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1bet // List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.PriorityClassList{} err = c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityCla // Watch returns a watch.Interface that watches the requested priorityClasses. func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("priorityclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("priorityclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go index 8fd6adc..f000ae4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/settings/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -77,16 +75,11 @@ func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.P // List takes label and field selectors, and returns the list of PodPresets that match those selectors. func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.PodPresetList{} err = c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -94,16 +87,11 @@ func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, // Watch returns a watch.Interface that watches the requested podPresets. func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -145,15 +133,10 @@ func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Namespace(c.ns). Resource("podpresets"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go index ccac161..2bea7ec 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go @@ -19,5 +19,3 @@ limitations under the License. package v1 type StorageClassExpansion interface{} - -type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go index 92378cf..ac48f49 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go @@ -28,7 +28,6 @@ import ( type StorageV1Interface interface { RESTClient() rest.Interface StorageClassesGetter - VolumeAttachmentsGetter } // StorageV1Client is used to interact with features provided by the storage.k8s.io group. @@ -40,10 +39,6 @@ func (c *StorageV1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } -func (c *StorageV1Client) VolumeAttachments() VolumeAttachmentInterface { - return newVolumeAttachments(c) -} - // NewForConfig creates a new StorageV1Client for the given config. func NewForConfig(c *rest.Config) (*StorageV1Client, error) { config := *c diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go index 3f4c48f..0f7f57f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1 import ( - "time" - v1 "k8s.io/api/storage/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1 // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassL // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) erro // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go deleted file mode 100644 index 0f45097..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/volumeattachment.go +++ /dev/null @@ -1,180 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - "time" - - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. -// A group's client should implement this interface. -type VolumeAttachmentsGetter interface { - VolumeAttachments() VolumeAttachmentInterface -} - -// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. -type VolumeAttachmentInterface interface { - Create(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Update(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - UpdateStatus(*v1.VolumeAttachment) (*v1.VolumeAttachment, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.VolumeAttachment, error) - List(opts metav1.ListOptions) (*v1.VolumeAttachmentList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) - VolumeAttachmentExpansion -} - -// volumeAttachments implements VolumeAttachmentInterface -type volumeAttachments struct { - client rest.Interface -} - -// newVolumeAttachments returns a VolumeAttachments -func newVolumeAttachments(c *StorageV1Client) *volumeAttachments { - return &volumeAttachments{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options metav1.GetOptions) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts metav1.ListOptions) (result *v1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts metav1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1.VolumeAttachment) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.VolumeAttachment, err error) { - result = &v1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go index 7fef94e..e6af001 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go @@ -19,8 +19,6 @@ limitations under the License. package v1alpha1 import ( - "time" - v1alpha1 "k8s.io/api/storage/v1alpha1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1a // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1alpha1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go deleted file mode 100644 index 86cf9bf..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csidriver.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CSIDriversGetter has a method to return a CSIDriverInterface. -// A group's client should implement this interface. -type CSIDriversGetter interface { - CSIDrivers() CSIDriverInterface -} - -// CSIDriverInterface has methods to work with CSIDriver resources. -type CSIDriverInterface interface { - Create(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Update(*v1beta1.CSIDriver) (*v1beta1.CSIDriver, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CSIDriver, error) - List(opts v1.ListOptions) (*v1beta1.CSIDriverList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) - CSIDriverExpansion -} - -// cSIDrivers implements CSIDriverInterface -type cSIDrivers struct { - client rest.Interface -} - -// newCSIDrivers returns a CSIDrivers -func newCSIDrivers(c *StorageV1beta1Client) *cSIDrivers { - return &cSIDrivers{ - client: c.RESTClient(), - } -} - -// Get takes name of the cSIDriver, and returns the corresponding cSIDriver object, and an error if there is any. -func (c *cSIDrivers) Get(name string, options v1.GetOptions) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Get(). - Resource("csidrivers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSIDrivers that match those selectors. -func (c *cSIDrivers) List(opts v1.ListOptions) (result *v1beta1.CSIDriverList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSIDriverList{} - err = c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSIDrivers. -func (c *cSIDrivers) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csidrivers"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a cSIDriver and creates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Create(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Post(). - Resource("csidrivers"). - Body(cSIDriver). - Do(). - Into(result) - return -} - -// Update takes the representation of a cSIDriver and updates it. Returns the server's representation of the cSIDriver, and an error, if there is any. -func (c *cSIDrivers) Update(cSIDriver *v1beta1.CSIDriver) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Put(). - Resource("csidrivers"). - Name(cSIDriver.Name). - Body(cSIDriver). - Do(). - Into(result) - return -} - -// Delete takes name of the cSIDriver and deletes it. Returns an error if one occurs. -func (c *cSIDrivers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csidrivers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSIDrivers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csidrivers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cSIDriver. -func (c *cSIDrivers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSIDriver, err error) { - result = &v1beta1.CSIDriver{} - err = c.client.Patch(pt). - Resource("csidrivers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go deleted file mode 100644 index e5540c1..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/csinode.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - "time" - - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CSINodesGetter has a method to return a CSINodeInterface. -// A group's client should implement this interface. -type CSINodesGetter interface { - CSINodes() CSINodeInterface -} - -// CSINodeInterface has methods to work with CSINode resources. -type CSINodeInterface interface { - Create(*v1beta1.CSINode) (*v1beta1.CSINode, error) - Update(*v1beta1.CSINode) (*v1beta1.CSINode, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CSINode, error) - List(opts v1.ListOptions) (*v1beta1.CSINodeList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) - CSINodeExpansion -} - -// cSINodes implements CSINodeInterface -type cSINodes struct { - client rest.Interface -} - -// newCSINodes returns a CSINodes -func newCSINodes(c *StorageV1beta1Client) *cSINodes { - return &cSINodes{ - client: c.RESTClient(), - } -} - -// Get takes name of the cSINode, and returns the corresponding cSINode object, and an error if there is any. -func (c *cSINodes) Get(name string, options v1.GetOptions) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Get(). - Resource("csinodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CSINodes that match those selectors. -func (c *cSINodes) List(opts v1.ListOptions) (result *v1beta1.CSINodeList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - result = &v1beta1.CSINodeList{} - err = c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cSINodes. -func (c *cSINodes) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } - opts.Watch = true - return c.client.Get(). - Resource("csinodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). - Watch() -} - -// Create takes the representation of a cSINode and creates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Create(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Post(). - Resource("csinodes"). - Body(cSINode). - Do(). - Into(result) - return -} - -// Update takes the representation of a cSINode and updates it. Returns the server's representation of the cSINode, and an error, if there is any. -func (c *cSINodes) Update(cSINode *v1beta1.CSINode) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Put(). - Resource("csinodes"). - Name(cSINode.Name). - Body(cSINode). - Do(). - Into(result) - return -} - -// Delete takes name of the cSINode and deletes it. Returns an error if one occurs. -func (c *cSINodes) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("csinodes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cSINodes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } - return c.client.Delete(). - Resource("csinodes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cSINode. -func (c *cSINodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CSINode, err error) { - result = &v1beta1.CSINode{} - err = c.client.Patch(pt). - Resource("csinodes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go index 7ba9314..559f88f 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go @@ -18,10 +18,6 @@ limitations under the License. package v1beta1 -type CSIDriverExpansion interface{} - -type CSINodeExpansion interface{} - type StorageClassExpansion interface{} type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go index e9916bc..4bdebb8 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go @@ -27,8 +27,6 @@ import ( type StorageV1beta1Interface interface { RESTClient() rest.Interface - CSIDriversGetter - CSINodesGetter StorageClassesGetter VolumeAttachmentsGetter } @@ -38,14 +36,6 @@ type StorageV1beta1Client struct { restClient rest.Interface } -func (c *StorageV1beta1Client) CSIDrivers() CSIDriverInterface { - return newCSIDrivers(c) -} - -func (c *StorageV1beta1Client) CSINodes() CSINodeInterface { - return newCSINodes(c) -} - func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { return newStorageClasses(c) } diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go index 8a8f389..fbe1fd4 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -74,15 +72,10 @@ func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta // List takes label and field selectors, and returns the list of StorageClasses that match those selectors. func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.StorageClassList{} err = c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -90,15 +83,10 @@ func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClass // Watch returns a watch.Interface that watches the requested storageClasses. func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("storageclasses"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -137,14 +125,9 @@ func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { // DeleteCollection deletes a collection of objects. func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("storageclasses"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go index d319407..5cd2d39 100644 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go @@ -19,8 +19,6 @@ limitations under the License. package v1beta1 import ( - "time" - v1beta1 "k8s.io/api/storage/v1beta1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" types "k8s.io/apimachinery/pkg/types" @@ -75,15 +73,10 @@ func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1b // List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } result = &v1beta1.VolumeAttachmentList{} err = c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Do(). Into(result) return @@ -91,15 +84,10 @@ func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAtt // Watch returns a watch.Interface that watches the requested volumeAttachments. func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { - var timeout time.Duration - if opts.TimeoutSeconds != nil { - timeout = time.Duration(*opts.TimeoutSeconds) * time.Second - } opts.Watch = true return c.client.Get(). Resource("volumeattachments"). VersionedParams(&opts, scheme.ParameterCodec). - Timeout(timeout). Watch() } @@ -153,14 +141,9 @@ func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error // DeleteCollection deletes a collection of objects. func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - var timeout time.Duration - if listOptions.TimeoutSeconds != nil { - timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second - } return c.client.Delete(). Resource("volumeattachments"). VersionedParams(&listOptions, scheme.ParameterCodec). - Timeout(timeout). Body(options). Do(). Error() diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS deleted file mode 100644 index e0ec62d..0000000 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -# approval on api packages bubbles to api-approvers -reviewers: -- sig-auth-authenticators-approvers -- sig-auth-authenticators-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go index b994597..d06482d 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/doc.go @@ -16,5 +16,4 @@ limitations under the License. // +k8s:deepcopy-gen=package // +groupName=client.authentication.k8s.io - package clientauthentication // import "k8s.io/client-go/pkg/apis/clientauthentication" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go index 19ab776..016adb2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/doc.go @@ -20,5 +20,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io - package v1alpha1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1" diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go index c714e24..921f3a2 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1/types.go @@ -22,7 +22,7 @@ import ( // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// ExecCredential is used by exec-based plugins to communicate credentials to +// ExecCredentials is used by exec-based plugins to communicate credentials to // HTTP transports. type ExecCredential struct { metav1.TypeMeta `json:",inline"` diff --git a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go index 22d1c58..fbcd9b7 100644 --- a/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go +++ b/vendor/k8s.io/client-go/pkg/apis/clientauthentication/v1beta1/doc.go @@ -20,5 +20,4 @@ limitations under the License. // +k8s:defaulter-gen=TypeMeta // +groupName=client.authentication.k8s.io - package v1beta1 // import "k8s.io/client-go/pkg/apis/clientauthentication/v1beta1" diff --git a/vendor/k8s.io/client-go/pkg/version/doc.go b/vendor/k8s.io/client-go/pkg/version/doc.go index 05e997e..30399fb 100644 --- a/vendor/k8s.io/client-go/pkg/version/doc.go +++ b/vendor/k8s.io/client-go/pkg/version/doc.go @@ -14,8 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:openapi-gen=true - // Package version supplies version information collected at build time to // kubernetes components. +// +k8s:openapi-gen=true package version // import "k8s.io/client-go/pkg/version" diff --git a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go index b88902c..cae9d0d 100644 --- a/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go +++ b/vendor/k8s.io/client-go/plugin/pkg/client/auth/exec/exec.go @@ -31,9 +31,9 @@ import ( "sync" "time" - "github.com/davecgh/go-spew/spew" + "github.com/golang/glog" "golang.org/x/crypto/ssh/terminal" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer" @@ -44,7 +44,6 @@ import ( "k8s.io/client-go/tools/clientcmd/api" "k8s.io/client-go/transport" "k8s.io/client-go/util/connrotation" - "k8s.io/klog" ) const execInfoEnv = "KUBERNETES_EXEC_INFO" @@ -74,10 +73,8 @@ func newCache() *cache { return &cache{m: make(map[string]*Authenticator)} } -var spewConfig = &spew.ConfigState{DisableMethods: true, Indent: " "} - func cacheKey(c *api.ExecConfig) string { - return spewConfig.Sprint(c) + return fmt.Sprintf("%#v", c) } type cache struct { @@ -175,9 +172,13 @@ type credentials struct { // UpdateTransportConfig updates the transport.Config to use credentials // returned by the plugin. func (a *Authenticator) UpdateTransportConfig(c *transport.Config) error { - c.Wrap(func(rt http.RoundTripper) http.RoundTripper { + wt := c.WrapTransport + c.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + if wt != nil { + rt = wt(rt) + } return &roundTripper{a, rt} - }) + } if c.TLS.GetCert != nil { return errors.New("can't add TLS certificate callback: transport.Config.TLS.GetCert already set") @@ -227,7 +228,7 @@ func (r *roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { Code: int32(res.StatusCode), } if err := r.a.maybeRefreshCreds(creds, resp); err != nil { - klog.Errorf("refreshing credentials: %v", err) + glog.Errorf("refreshing credentials: %v", err) } } return res, nil diff --git a/vendor/k8s.io/client-go/rest/OWNERS b/vendor/k8s.io/client-go/rest/OWNERS index 49dabc6..8d97da0 100644 --- a/vendor/k8s.io/client-go/rest/OWNERS +++ b/vendor/k8s.io/client-go/rest/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - thockin - smarterclayton diff --git a/vendor/k8s.io/client-go/rest/config.go b/vendor/k8s.io/client-go/rest/config.go index 3f6b9bc..10bef7d 100644 --- a/vendor/k8s.io/client-go/rest/config.go +++ b/vendor/k8s.io/client-go/rest/config.go @@ -29,15 +29,14 @@ import ( "strings" "time" + "github.com/golang/glog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/pkg/version" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" - "k8s.io/client-go/transport" certutil "k8s.io/client-go/util/cert" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" ) const ( @@ -96,16 +95,13 @@ type Config struct { // Transport may be used for custom HTTP behavior. This attribute may not // be specified with the TLS client certificate options. Use WrapTransport - // to provide additional per-server middleware behavior. + // for most client level operations. Transport http.RoundTripper // WrapTransport will be invoked for custom HTTP behavior after the underlying // transport is initialized (either the transport created from TLSClientConfig, // Transport, or http.DefaultTransport). The config may layer other RoundTrippers // on top of the returned RoundTripper. - // - // A future release will change this field to an array. Use config.Wrap() - // instead of setting this value directly. - WrapTransport transport.WrapperFunc + WrapTransport func(rt http.RoundTripper) http.RoundTripper // QPS indicates the maximum QPS to the master from this client. // If it's zero, the created RESTClient will use DefaultQPS: 5 @@ -129,47 +125,6 @@ type Config struct { // Version string } -var _ fmt.Stringer = new(Config) -var _ fmt.GoStringer = new(Config) - -type sanitizedConfig *Config - -type sanitizedAuthConfigPersister struct{ AuthProviderConfigPersister } - -func (sanitizedAuthConfigPersister) GoString() string { - return "rest.AuthProviderConfigPersister(--- REDACTED ---)" -} -func (sanitizedAuthConfigPersister) String() string { - return "rest.AuthProviderConfigPersister(--- REDACTED ---)" -} - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of Config -// to prevent accidental leaking via logs. -func (c *Config) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of Config to -// prevent accidental leaking via logs. -func (c *Config) String() string { - if c == nil { - return "" - } - cc := sanitizedConfig(CopyConfig(c)) - // Explicitly mark non-empty credential fields as redacted. - if cc.Password != "" { - cc.Password = "--- REDACTED ---" - } - if cc.BearerToken != "" { - cc.BearerToken = "--- REDACTED ---" - } - if cc.AuthConfigPersister != nil { - cc.AuthConfigPersister = sanitizedAuthConfigPersister{cc.AuthConfigPersister} - } - - return fmt.Sprintf("%#v", cc) -} - // ImpersonationConfig has all the available impersonation options type ImpersonationConfig struct { // UserName is the username to impersonate on each request. @@ -209,40 +164,6 @@ type TLSClientConfig struct { CAData []byte } -var _ fmt.Stringer = TLSClientConfig{} -var _ fmt.GoStringer = TLSClientConfig{} - -type sanitizedTLSClientConfig TLSClientConfig - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of -// TLSClientConfig to prevent accidental leaking via logs. -func (c TLSClientConfig) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of -// TLSClientConfig to prevent accidental leaking via logs. -func (c TLSClientConfig) String() string { - cc := sanitizedTLSClientConfig{ - Insecure: c.Insecure, - ServerName: c.ServerName, - CertFile: c.CertFile, - KeyFile: c.KeyFile, - CAFile: c.CAFile, - CertData: c.CertData, - KeyData: c.KeyData, - CAData: c.CAData, - } - // Explicitly mark non-empty credential fields as redacted. - if len(cc.CertData) != 0 { - cc.CertData = []byte("--- TRUNCATED ---") - } - if len(cc.KeyData) != 0 { - cc.KeyData = []byte("--- REDACTED ---") - } - return fmt.Sprintf("%#v", cc) -} - type ContentConfig struct { // AcceptContentTypes specifies the types the client will accept and is optional. // If not set, ContentType will be used to define the Accept header @@ -414,7 +335,7 @@ func InClusterConfig() (*Config, error) { tlsClientConfig := TLSClientConfig{} if _, err := certutil.NewPool(rootCAFile); err != nil { - klog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) + glog.Errorf("Expected to load root CA config from %s, but got err: %v", rootCAFile, err) } else { tlsClientConfig.CAFile = rootCAFile } diff --git a/vendor/k8s.io/client-go/rest/plugin.go b/vendor/k8s.io/client-go/rest/plugin.go index 83ef5ae..cf8fbab 100644 --- a/vendor/k8s.io/client-go/rest/plugin.go +++ b/vendor/k8s.io/client-go/rest/plugin.go @@ -21,7 +21,7 @@ import ( "net/http" "sync" - "k8s.io/klog" + "github.com/golang/glog" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" ) @@ -57,7 +57,7 @@ func RegisterAuthProviderPlugin(name string, plugin Factory) error { if _, found := plugins[name]; found { return fmt.Errorf("Auth Provider Plugin %q was registered twice", name) } - klog.V(4).Infof("Registered Auth Provider Plugin %q", name) + glog.V(4).Infof("Registered Auth Provider Plugin %q", name) plugins[name] = plugin return nil } diff --git a/vendor/k8s.io/client-go/rest/request.go b/vendor/k8s.io/client-go/rest/request.go index dd06303..9bb3114 100644 --- a/vendor/k8s.io/client-go/rest/request.go +++ b/vendor/k8s.io/client-go/rest/request.go @@ -32,6 +32,7 @@ import ( "strings" "time" + "github.com/golang/glog" "golang.org/x/net/http2" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -43,7 +44,6 @@ import ( restclientwatch "k8s.io/client-go/rest/watch" "k8s.io/client-go/tools/metrics" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" ) var ( @@ -114,7 +114,7 @@ type Request struct { // NewRequest creates a new request helper object for accessing runtime.Objects on a server. func NewRequest(client HTTPClient, verb string, baseURL *url.URL, versionedAPIPath string, content ContentConfig, serializers Serializers, backoff BackoffManager, throttle flowcontrol.RateLimiter, timeout time.Duration) *Request { if backoff == nil { - klog.V(2).Infof("Not implementing request backoff strategy.") + glog.V(2).Infof("Not implementing request backoff strategy.") backoff = &NoBackoff{} } @@ -527,7 +527,7 @@ func (r *Request) tryThrottle() { r.throttle.Accept() } if latency := time.Since(now); latency > longThrottleLatency { - klog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) + glog.V(4).Infof("Throttling request took %v, request: %s:%s", latency, r.verb, r.URL().String()) } } @@ -683,7 +683,7 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { }() if r.err != nil { - klog.V(4).Infof("Error in request: %v", r.err) + glog.V(4).Infof("Error in request: %v", r.err) return r.err } @@ -770,13 +770,13 @@ func (r *Request) request(fn func(*http.Request, *http.Response)) error { if seeker, ok := r.body.(io.Seeker); ok && r.body != nil { _, err := seeker.Seek(0, 0) if err != nil { - klog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) + glog.V(4).Infof("Could not retry request, can't Seek() back to beginning of body for %T", r.body) fn(req, resp) return true } } - klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) + glog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", seconds, retries, url) r.backoffMgr.Sleep(time.Duration(seconds) * time.Second) return false } @@ -844,13 +844,13 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu // 2. Apiserver sends back the headers and then part of the body // 3. Apiserver closes connection. // 4. client-go should catch this and return an error. - klog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) + glog.V(2).Infof("Stream error %#v when reading response body, may be caused by closed connection.", err) streamErr := fmt.Errorf("Stream error %#v when reading response body, may be caused by closed connection. Please retry.", err) return Result{ err: streamErr, } default: - klog.Errorf("Unexpected error when reading response body: %#v", err) + glog.Errorf("Unexpected error when reading response body: %#v", err) unexpectedErr := fmt.Errorf("Unexpected error %#v when reading response body. Please retry.", err) return Result{ err: unexpectedErr, @@ -914,11 +914,11 @@ func (r *Request) transformResponse(resp *http.Response, req *http.Request) Resu func truncateBody(body string) string { max := 0 switch { - case bool(klog.V(10)): + case bool(glog.V(10)): return body - case bool(klog.V(9)): + case bool(glog.V(9)): max = 10240 - case bool(klog.V(8)): + case bool(glog.V(8)): max = 1024 } @@ -933,13 +933,13 @@ func truncateBody(body string) string { // allocating a new string for the body output unless necessary. Uses a simple heuristic to determine // whether the body is printable. func glogBody(prefix string, body []byte) { - if klog.V(8) { + if glog.V(8) { if bytes.IndexFunc(body, func(r rune) bool { return r < 0x0a }) != -1 { - klog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) + glog.Infof("%s:\n%s", prefix, truncateBody(hex.Dump(body))) } else { - klog.Infof("%s: %s", prefix, truncateBody(string(body))) + glog.Infof("%s: %s", prefix, truncateBody(string(body))) } } } @@ -1100,8 +1100,7 @@ func (r Result) Into(obj runtime.Object) error { return fmt.Errorf("serializer for %s doesn't exist", r.contentType) } if len(r.body) == 0 { - return fmt.Errorf("0-length response with status code: %d and content type: %s", - r.statusCode, r.contentType) + return fmt.Errorf("0-length response") } out, _, err := r.decoder.Decode(r.body, nil, obj) @@ -1142,7 +1141,7 @@ func (r Result) Error() error { // to be backwards compatible with old servers that do not return a version, default to "v1" out, _, err := r.decoder.Decode(r.body, &schema.GroupVersionKind{Version: "v1"}, nil) if err != nil { - klog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) + glog.V(5).Infof("body was not decodable (unable to check for Status): %v", err) return r.err } switch t := out.(type) { @@ -1196,6 +1195,7 @@ func IsValidPathSegmentPrefix(name string) []string { func ValidatePathSegmentName(name string, prefix bool) []string { if prefix { return IsValidPathSegmentPrefix(name) + } else { + return IsValidPathSegmentName(name) } - return IsValidPathSegmentName(name) } diff --git a/vendor/k8s.io/client-go/rest/transport.go b/vendor/k8s.io/client-go/rest/transport.go index bd5749d..f81ff09 100644 --- a/vendor/k8s.io/client-go/rest/transport.go +++ b/vendor/k8s.io/client-go/rest/transport.go @@ -74,9 +74,10 @@ func (c *Config) TransportConfig() (*transport.Config, error) { KeyFile: c.KeyFile, KeyData: c.KeyData, }, - Username: c.Username, - Password: c.Password, - BearerToken: c.BearerToken, + Username: c.Username, + Password: c.Password, + BearerToken: c.BearerToken, + BearerTokenFile: c.BearerTokenFile, Impersonate: transport.ImpersonationConfig{ UserName: c.Impersonate.UserName, Groups: c.Impersonate.Groups, @@ -103,15 +104,14 @@ func (c *Config) TransportConfig() (*transport.Config, error) { if err != nil { return nil, err } - conf.Wrap(provider.WrapTransport) + wt := conf.WrapTransport + if wt != nil { + conf.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { + return provider.WrapTransport(wt(rt)) + } + } else { + conf.WrapTransport = provider.WrapTransport + } } return conf, nil } - -// Wrap adds a transport middleware function that will give the caller -// an opportunity to wrap the underlying http.RoundTripper prior to the -// first API call being made. The provided function is invoked after any -// existing transport wrappers are invoked. -func (c *Config) Wrap(fn transport.WrapperFunc) { - c.WrapTransport = transport.Wrappers(c.WrapTransport, fn) -} diff --git a/vendor/k8s.io/client-go/rest/urlbackoff.go b/vendor/k8s.io/client-go/rest/urlbackoff.go index d00e42f..eff848a 100644 --- a/vendor/k8s.io/client-go/rest/urlbackoff.go +++ b/vendor/k8s.io/client-go/rest/urlbackoff.go @@ -20,9 +20,9 @@ import ( "net/url" "time" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/util/flowcontrol" - "k8s.io/klog" ) // Set of resp. Codes that we backoff for. @@ -64,7 +64,7 @@ func (n *NoBackoff) Sleep(d time.Duration) { // Disable makes the backoff trivial, i.e., sets it to zero. This might be used // by tests which want to run 1000s of mock requests without slowing down. func (b *URLBackoff) Disable() { - klog.V(4).Infof("Disabling backoff strategy") + glog.V(4).Infof("Disabling backoff strategy") b.Backoff = flowcontrol.NewBackOff(0*time.Second, 0*time.Second) } @@ -76,7 +76,7 @@ func (b *URLBackoff) baseUrlKey(rawurl *url.URL) string { // in the future. host, err := url.Parse(rawurl.String()) if err != nil { - klog.V(4).Infof("Error extracting url: %v", rawurl) + glog.V(4).Infof("Error extracting url: %v", rawurl) panic("bad url!") } return host.Host @@ -89,7 +89,7 @@ func (b *URLBackoff) UpdateBackoff(actualUrl *url.URL, err error, responseCode i b.Backoff.Next(b.baseUrlKey(actualUrl), b.Backoff.Clock.Now()) return } else if responseCode >= 300 || err != nil { - klog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) + glog.V(4).Infof("Client is returning errors: code %v, error %v", responseCode, err) } //If we got this far, there is no backoff required for this URL anymore. diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go index 5871575..0a08187 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/doc.go @@ -15,5 +15,4 @@ limitations under the License. */ // +k8s:deepcopy-gen=package - package api diff --git a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go index 990a440..1391df7 100644 --- a/vendor/k8s.io/client-go/tools/clientcmd/api/types.go +++ b/vendor/k8s.io/client-go/tools/clientcmd/api/types.go @@ -17,8 +17,6 @@ limitations under the License. package api import ( - "fmt" - "k8s.io/apimachinery/pkg/runtime" ) @@ -152,25 +150,6 @@ type AuthProviderConfig struct { Config map[string]string `json:"config,omitempty"` } -var _ fmt.Stringer = new(AuthProviderConfig) -var _ fmt.GoStringer = new(AuthProviderConfig) - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of -// AuthProviderConfig to prevent accidental leaking via logs. -func (c AuthProviderConfig) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of -// AuthProviderConfig to prevent accidental leaking via logs. -func (c AuthProviderConfig) String() string { - cfg := "" - if c.Config != nil { - cfg = "--- REDACTED ---" - } - return fmt.Sprintf("api.AuthProviderConfig{Name: %q, Config: map[string]string{%s}}", c.Name, cfg) -} - // ExecConfig specifies a command to provide client credentials. The command is exec'd // and outputs structured stdout holding credentials. // @@ -193,29 +172,6 @@ type ExecConfig struct { APIVersion string `json:"apiVersion,omitempty"` } -var _ fmt.Stringer = new(ExecConfig) -var _ fmt.GoStringer = new(ExecConfig) - -// GoString implements fmt.GoStringer and sanitizes sensitive fields of -// ExecConfig to prevent accidental leaking via logs. -func (c ExecConfig) GoString() string { - return c.String() -} - -// String implements fmt.Stringer and sanitizes sensitive fields of ExecConfig -// to prevent accidental leaking via logs. -func (c ExecConfig) String() string { - var args []string - if len(c.Args) > 0 { - args = []string{"--- REDACTED ---"} - } - env := "[]ExecEnvVar(nil)" - if len(c.Env) > 0 { - env = "[]ExecEnvVar{--- REDACTED ---}" - } - return fmt.Sprintf("api.AuthProviderConfig{Command: %q, Args: %#v, Env: %s, APIVersion: %q}", c.Command, args, env, c.APIVersion) -} - // ExecEnvVar is used for setting environment variables when executing an exec-based // credential plugin. type ExecEnvVar struct { diff --git a/vendor/k8s.io/client-go/tools/metrics/OWNERS b/vendor/k8s.io/client-go/tools/metrics/OWNERS index f150be5..ff51798 100644 --- a/vendor/k8s.io/client-go/tools/metrics/OWNERS +++ b/vendor/k8s.io/client-go/tools/metrics/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - wojtek-t - eparis diff --git a/vendor/k8s.io/client-go/transport/OWNERS b/vendor/k8s.io/client-go/transport/OWNERS index a521769..bf0ba5b 100644 --- a/vendor/k8s.io/client-go/transport/OWNERS +++ b/vendor/k8s.io/client-go/transport/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - smarterclayton - wojtek-t diff --git a/vendor/k8s.io/client-go/transport/config.go b/vendor/k8s.io/client-go/transport/config.go index 5de0a2c..acb126d 100644 --- a/vendor/k8s.io/client-go/transport/config.go +++ b/vendor/k8s.io/client-go/transport/config.go @@ -57,10 +57,7 @@ type Config struct { // from TLSClientConfig, Transport, or http.DefaultTransport). The // config may layer other RoundTrippers on top of the returned // RoundTripper. - // - // A future release will change this field to an array. Use config.Wrap() - // instead of setting this value directly. - WrapTransport WrapperFunc + WrapTransport func(rt http.RoundTripper) http.RoundTripper // Dial specifies the dial function for creating unencrypted TCP connections. Dial func(ctx context.Context, network, address string) (net.Conn, error) @@ -101,14 +98,6 @@ func (c *Config) HasCertCallback() bool { return c.TLS.GetCert != nil } -// Wrap adds a transport middleware function that will give the caller -// an opportunity to wrap the underlying http.RoundTripper prior to the -// first API call being made. The provided function is invoked after any -// existing transport wrappers are invoked. -func (c *Config) Wrap(fn WrapperFunc) { - c.WrapTransport = Wrappers(c.WrapTransport, fn) -} - // TLSConfig holds the information needed to set up a TLS transport. type TLSConfig struct { CAFile string // Path of the PEM-encoded server trusted root certificates. diff --git a/vendor/k8s.io/client-go/transport/round_trippers.go b/vendor/k8s.io/client-go/transport/round_trippers.go index 117a9c8..4a210e4 100644 --- a/vendor/k8s.io/client-go/transport/round_trippers.go +++ b/vendor/k8s.io/client-go/transport/round_trippers.go @@ -22,8 +22,8 @@ import ( "strings" "time" + "github.com/golang/glog" "golang.org/x/oauth2" - "k8s.io/klog" utilnet "k8s.io/apimachinery/pkg/util/net" ) @@ -67,13 +67,13 @@ func HTTPWrappersForConfig(config *Config, rt http.RoundTripper) (http.RoundTrip // DebugWrappers wraps a round tripper and logs based on the current log level. func DebugWrappers(rt http.RoundTripper) http.RoundTripper { switch { - case bool(klog.V(9)): + case bool(glog.V(9)): rt = newDebuggingRoundTripper(rt, debugCurlCommand, debugURLTiming, debugResponseHeaders) - case bool(klog.V(8)): + case bool(glog.V(8)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus, debugResponseHeaders) - case bool(klog.V(7)): + case bool(glog.V(7)): rt = newDebuggingRoundTripper(rt, debugJustURL, debugRequestHeaders, debugResponseStatus) - case bool(klog.V(6)): + case bool(glog.V(6)): rt = newDebuggingRoundTripper(rt, debugURLTiming) } @@ -143,7 +143,7 @@ func (rt *authProxyRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -171,7 +171,7 @@ func (rt *userAgentRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -202,7 +202,7 @@ func (rt *basicAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -262,7 +262,7 @@ func (rt *impersonatingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegate.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegate) + glog.Errorf("CancelRequest not implemented by %T", rt.delegate) } } @@ -321,7 +321,7 @@ func (rt *bearerAuthRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.rt.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.rt) + glog.Errorf("CancelRequest not implemented by %T", rt.rt) } } @@ -405,7 +405,7 @@ func (rt *debuggingRoundTripper) CancelRequest(req *http.Request) { if canceler, ok := rt.delegatedRoundTripper.(requestCanceler); ok { canceler.CancelRequest(req) } else { - klog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) + glog.Errorf("CancelRequest not implemented by %T", rt.delegatedRoundTripper) } } @@ -413,17 +413,17 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo := newRequestInfo(req) if rt.levels[debugJustURL] { - klog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) + glog.Infof("%s %s", reqInfo.RequestVerb, reqInfo.RequestURL) } if rt.levels[debugCurlCommand] { - klog.Infof("%s", reqInfo.toCurl()) + glog.Infof("%s", reqInfo.toCurl()) } if rt.levels[debugRequestHeaders] { - klog.Infof("Request Headers:") + glog.Infof("Request Headers:") for key, values := range reqInfo.RequestHeaders { for _, value := range values { - klog.Infof(" %s: %s", key, value) + glog.Infof(" %s: %s", key, value) } } } @@ -435,16 +435,16 @@ func (rt *debuggingRoundTripper) RoundTrip(req *http.Request) (*http.Response, e reqInfo.complete(response, err) if rt.levels[debugURLTiming] { - klog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + glog.Infof("%s %s %s in %d milliseconds", reqInfo.RequestVerb, reqInfo.RequestURL, reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseStatus] { - klog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) + glog.Infof("Response Status: %s in %d milliseconds", reqInfo.ResponseStatus, reqInfo.Duration.Nanoseconds()/int64(time.Millisecond)) } if rt.levels[debugResponseHeaders] { - klog.Infof("Response Headers:") + glog.Infof("Response Headers:") for key, values := range reqInfo.ResponseHeaders { for _, value := range values { - klog.Infof(" %s: %s", key, value) + glog.Infof(" %s: %s", key, value) } } } diff --git a/vendor/k8s.io/client-go/transport/token_source.go b/vendor/k8s.io/client-go/transport/token_source.go index 8595df2..c9bdf9a 100644 --- a/vendor/k8s.io/client-go/transport/token_source.go +++ b/vendor/k8s.io/client-go/transport/token_source.go @@ -24,8 +24,8 @@ import ( "sync" "time" + "github.com/golang/glog" "golang.org/x/oauth2" - "k8s.io/klog" ) // TokenSourceWrapTransport returns a WrapTransport that injects bearer tokens @@ -131,7 +131,7 @@ func (ts *cachingTokenSource) Token() (*oauth2.Token, error) { if ts.tok == nil { return nil, err } - klog.Errorf("Unable to rotate token: %v", err) + glog.Errorf("Unable to rotate token: %v", err) return ts.tok, nil } diff --git a/vendor/k8s.io/client-go/transport/transport.go b/vendor/k8s.io/client-go/transport/transport.go index 2a145c9..c19739f 100644 --- a/vendor/k8s.io/client-go/transport/transport.go +++ b/vendor/k8s.io/client-go/transport/transport.go @@ -17,7 +17,6 @@ limitations under the License. package transport import ( - "context" "crypto/tls" "crypto/x509" "fmt" @@ -168,60 +167,3 @@ func rootCertPool(caData []byte) *x509.CertPool { certPool.AppendCertsFromPEM(caData) return certPool } - -// WrapperFunc wraps an http.RoundTripper when a new transport -// is created for a client, allowing per connection behavior -// to be injected. -type WrapperFunc func(rt http.RoundTripper) http.RoundTripper - -// Wrappers accepts any number of wrappers and returns a wrapper -// function that is the equivalent of calling each of them in order. Nil -// values are ignored, which makes this function convenient for incrementally -// wrapping a function. -func Wrappers(fns ...WrapperFunc) WrapperFunc { - if len(fns) == 0 { - return nil - } - // optimize the common case of wrapping a possibly nil transport wrapper - // with an additional wrapper - if len(fns) == 2 && fns[0] == nil { - return fns[1] - } - return func(rt http.RoundTripper) http.RoundTripper { - base := rt - for _, fn := range fns { - if fn != nil { - base = fn(base) - } - } - return base - } -} - -// ContextCanceller prevents new requests after the provided context is finished. -// err is returned when the context is closed, allowing the caller to provide a context -// appropriate error. -func ContextCanceller(ctx context.Context, err error) WrapperFunc { - return func(rt http.RoundTripper) http.RoundTripper { - return &contextCanceller{ - ctx: ctx, - rt: rt, - err: err, - } - } -} - -type contextCanceller struct { - ctx context.Context - rt http.RoundTripper - err error -} - -func (b *contextCanceller) RoundTrip(req *http.Request) (*http.Response, error) { - select { - case <-b.ctx.Done(): - return nil, b.err - default: - return b.rt.RoundTrip(req) - } -} diff --git a/vendor/k8s.io/client-go/util/cert/OWNERS b/vendor/k8s.io/client-go/util/cert/OWNERS deleted file mode 100644 index 3cf0364..0000000 --- a/vendor/k8s.io/client-go/util/cert/OWNERS +++ /dev/null @@ -1,9 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- sig-auth-certificates-approvers -reviewers: -- sig-auth-certificates-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/util/cert/cert.go b/vendor/k8s.io/client-go/util/cert/cert.go index 9fd097a..fe2158b 100644 --- a/vendor/k8s.io/client-go/util/cert/cert.go +++ b/vendor/k8s.io/client-go/util/cert/cert.go @@ -18,24 +18,29 @@ package cert import ( "bytes" - "crypto" + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" cryptorand "crypto/rand" "crypto/rsa" "crypto/x509" "crypto/x509/pkix" "encoding/pem" + "errors" "fmt" "io/ioutil" + "math" "math/big" "net" "path" "strings" "time" - - "k8s.io/client-go/util/keyutil" ) -const duration365d = time.Hour * 24 * 365 +const ( + rsaKeySize = 2048 + duration365d = time.Hour * 24 * 365 +) // Config contains the basic fields required for creating a certificate type Config struct { @@ -53,8 +58,13 @@ type AltNames struct { IPs []net.IP } +// NewPrivateKey creates an RSA private key +func NewPrivateKey() (*rsa.PrivateKey, error) { + return rsa.GenerateKey(cryptorand.Reader, rsaKeySize) +} + // NewSelfSignedCACert creates a CA certificate -func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, error) { +func NewSelfSignedCACert(cfg Config, key *rsa.PrivateKey) (*x509.Certificate, error) { now := time.Now() tmpl := x509.Certificate{ SerialNumber: new(big.Int).SetInt64(0), @@ -66,7 +76,7 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro NotAfter: now.Add(duration365d * 10).UTC(), KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &tmpl, &tmpl, key.Public(), key) @@ -76,6 +86,58 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro return x509.ParseCertificate(certDERBytes) } +// NewSignedCert creates a signed certificate using the given CA certificate and key +func NewSignedCert(cfg Config, key *rsa.PrivateKey, caCert *x509.Certificate, caKey *rsa.PrivateKey) (*x509.Certificate, error) { + serial, err := rand.Int(rand.Reader, new(big.Int).SetInt64(math.MaxInt64)) + if err != nil { + return nil, err + } + if len(cfg.CommonName) == 0 { + return nil, errors.New("must specify a CommonName") + } + if len(cfg.Usages) == 0 { + return nil, errors.New("must specify at least one ExtKeyUsage") + } + + certTmpl := x509.Certificate{ + Subject: pkix.Name{ + CommonName: cfg.CommonName, + Organization: cfg.Organization, + }, + DNSNames: cfg.AltNames.DNSNames, + IPAddresses: cfg.AltNames.IPs, + SerialNumber: serial, + NotBefore: caCert.NotBefore, + NotAfter: time.Now().Add(duration365d).UTC(), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: cfg.Usages, + } + certDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &certTmpl, caCert, key.Public(), caKey) + if err != nil { + return nil, err + } + return x509.ParseCertificate(certDERBytes) +} + +// MakeEllipticPrivateKeyPEM creates an ECDSA private key +func MakeEllipticPrivateKeyPEM() ([]byte, error) { + privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) + if err != nil { + return nil, err + } + + derBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return nil, err + } + + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil +} + // GenerateSelfSignedCertKey creates a self-signed certificate and key for the given host. // Host may be an IP or a DNS name // You may also specify additional subject alt names (either ip or dns names) for the certificate. @@ -125,7 +187,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign, BasicConstraintsValid: true, - IsCA: true, + IsCA: true, } caDERBytes, err := x509.CreateCertificate(cryptorand.Reader, &caTemplate, &caTemplate, &caKey.PublicKey, caKey) @@ -181,7 +243,7 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a // Generate key keyBuffer := bytes.Buffer{} - if err := pem.Encode(&keyBuffer, &pem.Block{Type: keyutil.RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { + if err := pem.Encode(&keyBuffer, &pem.Block{Type: RSAPrivateKeyBlockType, Bytes: x509.MarshalPKCS1PrivateKey(priv)}); err != nil { return nil, nil, err } @@ -197,6 +259,34 @@ func GenerateSelfSignedCertKeyWithFixtures(host string, alternateIPs []net.IP, a return certBuffer.Bytes(), keyBuffer.Bytes(), nil } +// FormatBytesCert receives byte array certificate and formats in human-readable format +func FormatBytesCert(cert []byte) (string, error) { + block, _ := pem.Decode(cert) + c, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return "", fmt.Errorf("failed to parse certificate [%v]", err) + } + return FormatCert(c), nil +} + +// FormatCert receives certificate and formats in human-readable format +func FormatCert(c *x509.Certificate) string { + var ips []string + for _, ip := range c.IPAddresses { + ips = append(ips, ip.String()) + } + altNames := append(ips, c.DNSNames...) + res := fmt.Sprintf( + "Issuer: CN=%s | Subject: CN=%s | CA: %t\n", + c.Issuer.CommonName, c.Subject.CommonName, c.IsCA, + ) + res += fmt.Sprintf("Not before: %s Not After: %s", c.NotBefore, c.NotAfter) + if len(altNames) > 0 { + res += fmt.Sprintf("\nAlternate Names: %v", altNames) + } + return res +} + func ipsToStrings(ips []net.IP) []string { ss := make([]string, 0, len(ips)) for _, ip := range ips { diff --git a/vendor/k8s.io/client-go/util/cert/io.go b/vendor/k8s.io/client-go/util/cert/io.go index 5efb248..a57bf09 100644 --- a/vendor/k8s.io/client-go/util/cert/io.go +++ b/vendor/k8s.io/client-go/util/cert/io.go @@ -17,7 +17,11 @@ limitations under the License. package cert import ( + "crypto" + "crypto/ecdsa" + "crypto/rsa" "crypto/x509" + "encoding/pem" "fmt" "io/ioutil" "os" @@ -69,6 +73,60 @@ func WriteCert(certPath string, data []byte) error { return ioutil.WriteFile(certPath, data, os.FileMode(0644)) } +// WriteKey writes the pem-encoded key data to keyPath. +// The key file will be created with file mode 0600. +// If the key file already exists, it will be overwritten. +// The parent directory of the keyPath will be created as needed with file mode 0755. +func WriteKey(keyPath string, data []byte) error { + if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { + return err + } + return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) +} + +// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it +// can't find one, it will generate a new key and store it there. +func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { + loadedData, err := ioutil.ReadFile(keyPath) + // Call verifyKeyData to ensure the file wasn't empty/corrupt. + if err == nil && verifyKeyData(loadedData) { + return loadedData, false, err + } + if !os.IsNotExist(err) { + return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) + } + + generatedData, err := MakeEllipticPrivateKeyPEM() + if err != nil { + return nil, false, fmt.Errorf("error generating key: %v", err) + } + if err := WriteKey(keyPath, generatedData); err != nil { + return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) + } + return generatedData, true, nil +} + +// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to +// a PEM encoded block or returns an error. +func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { + switch t := privateKey.(type) { + case *ecdsa.PrivateKey: + derBytes, err := x509.MarshalECPrivateKey(t) + if err != nil { + return nil, err + } + privateKeyPemBlock := &pem.Block{ + Type: ECPrivateKeyBlockType, + Bytes: derBytes, + } + return pem.EncodeToMemory(privateKeyPemBlock), nil + case *rsa.PrivateKey: + return EncodePrivateKeyPEM(t), nil + default: + return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) + } +} + // NewPool returns an x509.CertPool containing the certificates in the given PEM-encoded file. // Returns an error if the file could not be read, a certificate could not be parsed, or if the file does not contain any certificates func NewPool(filename string) (*x509.CertPool, error) { @@ -96,3 +154,40 @@ func CertsFromFile(file string) ([]*x509.Certificate, error) { } return certs, nil } + +// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. +// Returns an error if the file could not be read or if the private key could not be parsed. +func PrivateKeyFromFile(file string) (interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + key, err := ParsePrivateKeyPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading private key file %s: %v", file, err) + } + return key, nil +} + +// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. +// Reads public keys from both public and private key files. +func PublicKeysFromFile(file string) ([]interface{}, error) { + data, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + keys, err := ParsePublicKeysPEM(data) + if err != nil { + return nil, fmt.Errorf("error reading public key file %s: %v", file, err) + } + return keys, nil +} + +// verifyKeyData returns true if the provided data appears to be a valid private key. +func verifyKeyData(data []byte) bool { + if len(data) == 0 { + return false + } + _, err := ParsePrivateKeyPEM(data) + return err == nil +} diff --git a/vendor/k8s.io/client-go/util/cert/pem.go b/vendor/k8s.io/client-go/util/cert/pem.go index 9185e2e..b99e366 100644 --- a/vendor/k8s.io/client-go/util/cert/pem.go +++ b/vendor/k8s.io/client-go/util/cert/pem.go @@ -17,18 +17,136 @@ limitations under the License. package cert import ( + "crypto/ecdsa" + "crypto/rsa" "crypto/x509" "encoding/pem" "errors" + "fmt" ) const ( + // ECPrivateKeyBlockType is a possible value for pem.Block.Type. + ECPrivateKeyBlockType = "EC PRIVATE KEY" + // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. + RSAPrivateKeyBlockType = "RSA PRIVATE KEY" + // PrivateKeyBlockType is a possible value for pem.Block.Type. + PrivateKeyBlockType = "PRIVATE KEY" + // PublicKeyBlockType is a possible value for pem.Block.Type. + PublicKeyBlockType = "PUBLIC KEY" // CertificateBlockType is a possible value for pem.Block.Type. CertificateBlockType = "CERTIFICATE" // CertificateRequestBlockType is a possible value for pem.Block.Type. CertificateRequestBlockType = "CERTIFICATE REQUEST" ) +// EncodePublicKeyPEM returns PEM-encoded public data +func EncodePublicKeyPEM(key *rsa.PublicKey) ([]byte, error) { + der, err := x509.MarshalPKIXPublicKey(key) + if err != nil { + return []byte{}, err + } + block := pem.Block{ + Type: PublicKeyBlockType, + Bytes: der, + } + return pem.EncodeToMemory(&block), nil +} + +// EncodePrivateKeyPEM returns PEM-encoded private key data +func EncodePrivateKeyPEM(key *rsa.PrivateKey) []byte { + block := pem.Block{ + Type: RSAPrivateKeyBlockType, + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + return pem.EncodeToMemory(&block) +} + +// EncodeCertPEM returns PEM-endcoded certificate data +func EncodeCertPEM(cert *x509.Certificate) []byte { + block := pem.Block{ + Type: CertificateBlockType, + Bytes: cert.Raw, + } + return pem.EncodeToMemory(&block) +} + +// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. +// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" +func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { + var privateKeyPemBlock *pem.Block + for { + privateKeyPemBlock, keyData = pem.Decode(keyData) + if privateKeyPemBlock == nil { + break + } + + switch privateKeyPemBlock.Type { + case ECPrivateKeyBlockType: + // ECDSA Private Key in ASN.1 format + if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case RSAPrivateKeyBlockType: + // RSA Private Key in PKCS#1 format + if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + case PrivateKeyBlockType: + // RSA or ECDSA Private Key in unencrypted PKCS#8 format + if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { + return key, nil + } + } + + // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks + // originally, only the first PEM block was parsed and expected to be a key block + } + + // we read all the PEM blocks and didn't recognize one + return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") +} + +// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. +// Reads public keys from both public and private key files. +func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { + var block *pem.Block + keys := []interface{}{} + for { + // read the next block + block, keyData = pem.Decode(keyData) + if block == nil { + break + } + + // test block against parsing functions + if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { + keys = append(keys, &privateKey.PublicKey) + continue + } + if publicKey, err := parseECPublicKey(block.Bytes); err == nil { + keys = append(keys, publicKey) + continue + } + + // tolerate non-key PEM blocks for backwards compatibility + // originally, only the first PEM block was parsed and expected to be a key block + } + + if len(keys) == 0 { + return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") + } + return keys, nil +} + // ParseCertsPEM returns the x509.Certificates contained in the given PEM-encoded byte array // Returns an error if a certificate could not be parsed, or if the data does not contain any certificates func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { @@ -59,3 +177,93 @@ func ParseCertsPEM(pemCerts []byte) ([]*x509.Certificate, error) { } return certs, nil } + +// parseRSAPublicKey parses a single RSA public key from the provided data +func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an RSA Public Key + var pubKey *rsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") + } + + return pubKey, nil +} + +// parseRSAPrivateKey parses a single RSA private key from the provided data +func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { + return nil, err + } + } + + // Test if parsed key is an RSA Private Key + var privKey *rsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") + } + + return privKey, nil +} + +// parseECPublicKey parses a single ECDSA public key from the provided data +func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { + if cert, err := x509.ParseCertificate(data); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + // Test if parsed key is an ECDSA Public Key + var pubKey *ecdsa.PublicKey + var ok bool + if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") + } + + return pubKey, nil +} + +// parseECPrivateKey parses a single ECDSA private key from the provided data +func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { + return nil, err + } + + // Test if parsed key is an ECDSA Private Key + var privKey *ecdsa.PrivateKey + var ok bool + if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") + } + + return privKey, nil +} diff --git a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go index b7cb70e..71d442a 100644 --- a/vendor/k8s.io/client-go/util/flowcontrol/backoff.go +++ b/vendor/k8s.io/client-go/util/flowcontrol/backoff.go @@ -21,7 +21,7 @@ import ( "time" "k8s.io/apimachinery/pkg/util/clock" - "k8s.io/utils/integer" + "k8s.io/client-go/util/integer" ) type backoffEntry struct { diff --git a/vendor/k8s.io/utils/integer/integer.go b/vendor/k8s.io/client-go/util/integer/integer.go similarity index 81% rename from vendor/k8s.io/utils/integer/integer.go rename to vendor/k8s.io/client-go/util/integer/integer.go index e4e740c..c6ea106 100644 --- a/vendor/k8s.io/utils/integer/integer.go +++ b/vendor/k8s.io/client-go/util/integer/integer.go @@ -16,7 +16,6 @@ limitations under the License. package integer -// IntMax returns the maximum of the params func IntMax(a, b int) int { if b > a { return b @@ -24,7 +23,6 @@ func IntMax(a, b int) int { return a } -// IntMin returns the minimum of the params func IntMin(a, b int) int { if b < a { return b @@ -32,7 +30,6 @@ func IntMin(a, b int) int { return a } -// Int32Max returns the maximum of the params func Int32Max(a, b int32) int32 { if b > a { return b @@ -40,7 +37,6 @@ func Int32Max(a, b int32) int32 { return a } -// Int32Min returns the minimum of the params func Int32Min(a, b int32) int32 { if b < a { return b @@ -48,7 +44,6 @@ func Int32Min(a, b int32) int32 { return a } -// Int64Max returns the maximum of the params func Int64Max(a, b int64) int64 { if b > a { return b @@ -56,7 +51,6 @@ func Int64Max(a, b int64) int64 { return a } -// Int64Min returns the minimum of the params func Int64Min(a, b int64) int64 { if b < a { return b diff --git a/vendor/k8s.io/client-go/util/keyutil/OWNERS b/vendor/k8s.io/client-go/util/keyutil/OWNERS deleted file mode 100644 index 470b7a1..0000000 --- a/vendor/k8s.io/client-go/util/keyutil/OWNERS +++ /dev/null @@ -1,7 +0,0 @@ -approvers: -- sig-auth-certificates-approvers -reviewers: -- sig-auth-certificates-reviewers -labels: -- sig/auth - diff --git a/vendor/k8s.io/client-go/util/keyutil/key.go b/vendor/k8s.io/client-go/util/keyutil/key.go deleted file mode 100644 index 83c2c62..0000000 --- a/vendor/k8s.io/client-go/util/keyutil/key.go +++ /dev/null @@ -1,323 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package keyutil contains utilities for managing public/private key pairs. -package keyutil - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - cryptorand "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - "path/filepath" -) - -const ( - // ECPrivateKeyBlockType is a possible value for pem.Block.Type. - ECPrivateKeyBlockType = "EC PRIVATE KEY" - // RSAPrivateKeyBlockType is a possible value for pem.Block.Type. - RSAPrivateKeyBlockType = "RSA PRIVATE KEY" - // PrivateKeyBlockType is a possible value for pem.Block.Type. - PrivateKeyBlockType = "PRIVATE KEY" - // PublicKeyBlockType is a possible value for pem.Block.Type. - PublicKeyBlockType = "PUBLIC KEY" -) - -// MakeEllipticPrivateKeyPEM creates an ECDSA private key -func MakeEllipticPrivateKeyPEM() ([]byte, error) { - privateKey, err := ecdsa.GenerateKey(elliptic.P256(), cryptorand.Reader) - if err != nil { - return nil, err - } - - derBytes, err := x509.MarshalECPrivateKey(privateKey) - if err != nil { - return nil, err - } - - privateKeyPemBlock := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(privateKeyPemBlock), nil -} - -// WriteKey writes the pem-encoded key data to keyPath. -// The key file will be created with file mode 0600. -// If the key file already exists, it will be overwritten. -// The parent directory of the keyPath will be created as needed with file mode 0755. -func WriteKey(keyPath string, data []byte) error { - if err := os.MkdirAll(filepath.Dir(keyPath), os.FileMode(0755)); err != nil { - return err - } - return ioutil.WriteFile(keyPath, data, os.FileMode(0600)) -} - -// LoadOrGenerateKeyFile looks for a key in the file at the given path. If it -// can't find one, it will generate a new key and store it there. -func LoadOrGenerateKeyFile(keyPath string) (data []byte, wasGenerated bool, err error) { - loadedData, err := ioutil.ReadFile(keyPath) - // Call verifyKeyData to ensure the file wasn't empty/corrupt. - if err == nil && verifyKeyData(loadedData) { - return loadedData, false, err - } - if !os.IsNotExist(err) { - return nil, false, fmt.Errorf("error loading key from %s: %v", keyPath, err) - } - - generatedData, err := MakeEllipticPrivateKeyPEM() - if err != nil { - return nil, false, fmt.Errorf("error generating key: %v", err) - } - if err := WriteKey(keyPath, generatedData); err != nil { - return nil, false, fmt.Errorf("error writing key to %s: %v", keyPath, err) - } - return generatedData, true, nil -} - -// MarshalPrivateKeyToPEM converts a known private key type of RSA or ECDSA to -// a PEM encoded block or returns an error. -func MarshalPrivateKeyToPEM(privateKey crypto.PrivateKey) ([]byte, error) { - switch t := privateKey.(type) { - case *ecdsa.PrivateKey: - derBytes, err := x509.MarshalECPrivateKey(t) - if err != nil { - return nil, err - } - block := &pem.Block{ - Type: ECPrivateKeyBlockType, - Bytes: derBytes, - } - return pem.EncodeToMemory(block), nil - case *rsa.PrivateKey: - block := &pem.Block{ - Type: RSAPrivateKeyBlockType, - Bytes: x509.MarshalPKCS1PrivateKey(t), - } - return pem.EncodeToMemory(block), nil - default: - return nil, fmt.Errorf("private key is not a recognized type: %T", privateKey) - } -} - -// PrivateKeyFromFile returns the private key in rsa.PrivateKey or ecdsa.PrivateKey format from a given PEM-encoded file. -// Returns an error if the file could not be read or if the private key could not be parsed. -func PrivateKeyFromFile(file string) (interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - key, err := ParsePrivateKeyPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading private key file %s: %v", file, err) - } - return key, nil -} - -// PublicKeysFromFile returns the public keys in rsa.PublicKey or ecdsa.PublicKey format from a given PEM-encoded file. -// Reads public keys from both public and private key files. -func PublicKeysFromFile(file string) ([]interface{}, error) { - data, err := ioutil.ReadFile(file) - if err != nil { - return nil, err - } - keys, err := ParsePublicKeysPEM(data) - if err != nil { - return nil, fmt.Errorf("error reading public key file %s: %v", file, err) - } - return keys, nil -} - -// verifyKeyData returns true if the provided data appears to be a valid private key. -func verifyKeyData(data []byte) bool { - if len(data) == 0 { - return false - } - _, err := ParsePrivateKeyPEM(data) - return err == nil -} - -// ParsePrivateKeyPEM returns a private key parsed from a PEM block in the supplied data. -// Recognizes PEM blocks for "EC PRIVATE KEY", "RSA PRIVATE KEY", or "PRIVATE KEY" -func ParsePrivateKeyPEM(keyData []byte) (interface{}, error) { - var privateKeyPemBlock *pem.Block - for { - privateKeyPemBlock, keyData = pem.Decode(keyData) - if privateKeyPemBlock == nil { - break - } - - switch privateKeyPemBlock.Type { - case ECPrivateKeyBlockType: - // ECDSA Private Key in ASN.1 format - if key, err := x509.ParseECPrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case RSAPrivateKeyBlockType: - // RSA Private Key in PKCS#1 format - if key, err := x509.ParsePKCS1PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - case PrivateKeyBlockType: - // RSA or ECDSA Private Key in unencrypted PKCS#8 format - if key, err := x509.ParsePKCS8PrivateKey(privateKeyPemBlock.Bytes); err == nil { - return key, nil - } - } - - // tolerate non-key PEM blocks for compatibility with things like "EC PARAMETERS" blocks - // originally, only the first PEM block was parsed and expected to be a key block - } - - // we read all the PEM blocks and didn't recognize one - return nil, fmt.Errorf("data does not contain a valid RSA or ECDSA private key") -} - -// ParsePublicKeysPEM is a helper function for reading an array of rsa.PublicKey or ecdsa.PublicKey from a PEM-encoded byte array. -// Reads public keys from both public and private key files. -func ParsePublicKeysPEM(keyData []byte) ([]interface{}, error) { - var block *pem.Block - keys := []interface{}{} - for { - // read the next block - block, keyData = pem.Decode(keyData) - if block == nil { - break - } - - // test block against parsing functions - if privateKey, err := parseRSAPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseRSAPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - if privateKey, err := parseECPrivateKey(block.Bytes); err == nil { - keys = append(keys, &privateKey.PublicKey) - continue - } - if publicKey, err := parseECPublicKey(block.Bytes); err == nil { - keys = append(keys, publicKey) - continue - } - - // tolerate non-key PEM blocks for backwards compatibility - // originally, only the first PEM block was parsed and expected to be a key block - } - - if len(keys) == 0 { - return nil, fmt.Errorf("data does not contain any valid RSA or ECDSA public keys") - } - return keys, nil -} - -// parseRSAPublicKey parses a single RSA public key from the provided data -func parseRSAPublicKey(data []byte) (*rsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an RSA Public Key - var pubKey *rsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*rsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Public Key") - } - - return pubKey, nil -} - -// parseRSAPrivateKey parses a single RSA private key from the provided data -func parseRSAPrivateKey(data []byte) (*rsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKCS1PrivateKey(data); err != nil { - if parsedKey, err = x509.ParsePKCS8PrivateKey(data); err != nil { - return nil, err - } - } - - // Test if parsed key is an RSA Private Key - var privKey *rsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*rsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid RSA Private Key") - } - - return privKey, nil -} - -// parseECPublicKey parses a single ECDSA public key from the provided data -func parseECPublicKey(data []byte) (*ecdsa.PublicKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParsePKIXPublicKey(data); err != nil { - if cert, err := x509.ParseCertificate(data); err == nil { - parsedKey = cert.PublicKey - } else { - return nil, err - } - } - - // Test if parsed key is an ECDSA Public Key - var pubKey *ecdsa.PublicKey - var ok bool - if pubKey, ok = parsedKey.(*ecdsa.PublicKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Public Key") - } - - return pubKey, nil -} - -// parseECPrivateKey parses a single ECDSA private key from the provided data -func parseECPrivateKey(data []byte) (*ecdsa.PrivateKey, error) { - var err error - - // Parse the key - var parsedKey interface{} - if parsedKey, err = x509.ParseECPrivateKey(data); err != nil { - return nil, err - } - - // Test if parsed key is an ECDSA Private Key - var privKey *ecdsa.PrivateKey - var ok bool - if privKey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { - return nil, fmt.Errorf("data doesn't contain valid ECDSA Private Key") - } - - return privKey, nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 89f8e31..488eb4b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -15,8 +15,6 @@ github.com/alicebob/miniredis/v2/server github.com/beorn7/perks/quantile # github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/bradfitz/gomemcache/memcache -# github.com/davecgh/go-spew v1.1.1 -github.com/davecgh/go-spew/spew # github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 github.com/gin-contrib/sse # github.com/gin-gonic/gin v1.4.0 @@ -27,6 +25,8 @@ github.com/gin-gonic/gin/render # github.com/gogo/protobuf v1.3.1 github.com/gogo/protobuf/proto github.com/gogo/protobuf/sortkeys +# github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b +github.com/golang/glog # github.com/golang/protobuf v1.4.2 github.com/golang/protobuf/proto github.com/golang/protobuf/ptypes @@ -36,6 +36,8 @@ github.com/golang/protobuf/ptypes/timestamp # github.com/gomodule/redigo v2.0.0+incompatible github.com/gomodule/redigo/internal github.com/gomodule/redigo/redis +# github.com/google/btree v1.0.0 +github.com/google/btree # github.com/google/gofuzz v1.1.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 @@ -45,6 +47,9 @@ github.com/googleapis/gnostic/OpenAPIv2 github.com/googleapis/gnostic/compiler github.com/googleapis/gnostic/extensions github.com/googleapis/gnostic/jsonschema +# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 +github.com/gregjones/httpcache +github.com/gregjones/httpcache/diskcache # github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -66,6 +71,8 @@ github.com/modern-go/reflect2 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log +# github.com/peterbourgon/diskv v2.0.1+incompatible +github.com/peterbourgon/diskv # github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -161,12 +168,12 @@ gopkg.in/inf.v0 gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 gopkg.in/yaml.v3 -# k8s.io/api v0.18.6 +# k8s.io/api v0.0.0-20190708094356-59223ed9f6ce +k8s.io/api/admissionregistration/v1alpha1 k8s.io/api/admissionregistration/v1beta1 k8s.io/api/apps/v1 k8s.io/api/apps/v1beta1 k8s.io/api/apps/v1beta2 -k8s.io/api/auditregistration/v1alpha1 k8s.io/api/authentication/v1 k8s.io/api/authentication/v1beta1 k8s.io/api/authorization/v1 @@ -178,20 +185,15 @@ k8s.io/api/batch/v1 k8s.io/api/batch/v1beta1 k8s.io/api/batch/v2alpha1 k8s.io/api/certificates/v1beta1 -k8s.io/api/coordination/v1 k8s.io/api/coordination/v1beta1 k8s.io/api/core/v1 k8s.io/api/events/v1beta1 k8s.io/api/extensions/v1beta1 k8s.io/api/networking/v1 -k8s.io/api/networking/v1beta1 -k8s.io/api/node/v1alpha1 -k8s.io/api/node/v1beta1 k8s.io/api/policy/v1beta1 k8s.io/api/rbac/v1 k8s.io/api/rbac/v1alpha1 k8s.io/api/rbac/v1beta1 -k8s.io/api/scheduling/v1 k8s.io/api/scheduling/v1alpha1 k8s.io/api/scheduling/v1beta1 k8s.io/api/settings/v1alpha1 @@ -233,15 +235,15 @@ k8s.io/apimachinery/pkg/util/yaml k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect -# k8s.io/client-go v11.0.0+incompatible +# k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df k8s.io/client-go/discovery k8s.io/client-go/kubernetes k8s.io/client-go/kubernetes/scheme +k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1 k8s.io/client-go/kubernetes/typed/apps/v1beta1 k8s.io/client-go/kubernetes/typed/apps/v1beta2 -k8s.io/client-go/kubernetes/typed/auditregistration/v1alpha1 k8s.io/client-go/kubernetes/typed/authentication/v1 k8s.io/client-go/kubernetes/typed/authentication/v1beta1 k8s.io/client-go/kubernetes/typed/authorization/v1 @@ -253,20 +255,15 @@ k8s.io/client-go/kubernetes/typed/batch/v1 k8s.io/client-go/kubernetes/typed/batch/v1beta1 k8s.io/client-go/kubernetes/typed/batch/v2alpha1 k8s.io/client-go/kubernetes/typed/certificates/v1beta1 -k8s.io/client-go/kubernetes/typed/coordination/v1 k8s.io/client-go/kubernetes/typed/coordination/v1beta1 k8s.io/client-go/kubernetes/typed/core/v1 k8s.io/client-go/kubernetes/typed/events/v1beta1 k8s.io/client-go/kubernetes/typed/extensions/v1beta1 k8s.io/client-go/kubernetes/typed/networking/v1 -k8s.io/client-go/kubernetes/typed/networking/v1beta1 -k8s.io/client-go/kubernetes/typed/node/v1alpha1 -k8s.io/client-go/kubernetes/typed/node/v1beta1 k8s.io/client-go/kubernetes/typed/policy/v1beta1 k8s.io/client-go/kubernetes/typed/rbac/v1 k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/scheduling/v1 k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 k8s.io/client-go/kubernetes/typed/settings/v1alpha1 @@ -287,11 +284,9 @@ k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/flowcontrol -k8s.io/client-go/util/keyutil +k8s.io/client-go/util/integer # k8s.io/klog v1.0.0 k8s.io/klog -# k8s.io/utils v0.0.0-20200724153422-f32512634ab7 -k8s.io/utils/integer # sigs.k8s.io/structured-merge-diff/v3 v3.0.0 sigs.k8s.io/structured-merge-diff/v3/value # sigs.k8s.io/yaml v1.2.0 From b65e441ab17e826158b56814051c8c4d584f3ce3 Mon Sep 17 00:00:00 2001 From: Ryan Rose Date: Mon, 27 Jul 2020 09:14:43 -0400 Subject: [PATCH 4/6] fix apimachinery version, inCluster bug --- go.mod | 21 +- go.sum | 2 + redispool.go | 2 +- .../ghodss}/yaml/.gitignore | 0 vendor/github.com/ghodss/yaml/.travis.yml | 7 + .../ghodss}/yaml/LICENSE | 0 .../ghodss}/yaml/README.md | 31 +- .../ghodss}/yaml/fields.go | 7 +- .../ghodss}/yaml/yaml.go | 133 +- .../k8s.io/apimachinery/pkg/api/errors/OWNERS | 3 +- .../apimachinery/pkg/api/errors/errors.go | 108 +- .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 6 +- .../k8s.io/apimachinery/pkg/api/meta/help.go | 79 +- .../k8s.io/apimachinery/pkg/api/meta/meta.go | 30 +- .../apimachinery/pkg/api/resource/OWNERS | 4 +- .../pkg/api/resource/generated.pb.go | 51 +- .../pkg/api/resource/generated.proto | 22 +- .../apimachinery/pkg/api/resource/math.go | 12 +- .../apimachinery/pkg/api/resource/quantity.go | 37 +- .../pkg/api/resource/quantity_proto.go | 34 +- .../apimachinery/pkg/apis/meta/v1/OWNERS | 3 +- .../pkg/apis/meta/v1/controller_ref.go | 19 +- .../pkg/apis/meta/v1/conversion.go | 148 +- .../apimachinery/pkg/apis/meta/v1/doc.go | 2 - .../apimachinery/pkg/apis/meta/v1/duration.go | 15 - .../pkg/apis/meta/v1/generated.pb.go | 6799 +++++------------ .../pkg/apis/meta/v1/generated.proto | 301 +- .../apimachinery/pkg/apis/meta/v1/helpers.go | 48 - .../apimachinery/pkg/apis/meta/v1/meta.go | 20 +- .../pkg/apis/meta/v1/micro_time.go | 31 +- .../pkg/apis/meta/v1/micro_time_proto.go | 8 - .../apimachinery/pkg/apis/meta/v1/register.go | 65 +- .../apimachinery/pkg/apis/meta/v1/time.go | 28 +- .../pkg/apis/meta/v1/time_proto.go | 12 +- .../apimachinery/pkg/apis/meta/v1/types.go | 413 +- .../meta/v1/types_swagger_doc_generated.go | 232 +- .../pkg/apis/meta/v1/unstructured/helpers.go | 79 +- .../apis/meta/v1/unstructured/unstructured.go | 98 +- .../meta/v1/unstructured/unstructured_list.go | 22 - .../apis/meta/v1/zz_generated.conversion.go | 523 -- .../pkg/apis/meta/v1/zz_generated.deepcopy.go | 310 +- .../pkg/apis/meta/v1beta1/conversion.go | 27 + .../pkg/apis/meta/{v1 => v1beta1}/deepcopy.go | 8 +- .../pkg/apis/meta/v1beta1}/doc.go | 13 +- .../pkg/apis/meta/v1beta1/generated.pb.go | 632 ++ .../pkg/apis/meta/v1beta1/generated.proto | 57 + .../pkg/apis/meta/v1beta1/register.go | 57 + .../pkg/apis/meta/v1beta1/types.go | 161 + .../v1beta1/types_swagger_doc_generated.go | 104 + .../meta/v1beta1/zz_generated.deepcopy.go | 189 + .../meta/v1beta1/zz_generated.defaults.go | 32 + .../apimachinery/pkg/conversion/converter.go | 155 +- .../pkg/conversion/queryparams/convert.go | 4 + .../k8s.io/apimachinery/pkg/labels/labels.go | 2 +- .../apimachinery/pkg/labels/selector.go | 57 +- .../k8s.io/apimachinery/pkg/runtime/codec.go | 92 +- .../apimachinery/pkg/runtime/conversion.go | 129 +- .../apimachinery/pkg/runtime/converter.go | 136 +- .../apimachinery/pkg/runtime/embedded.go | 15 +- .../k8s.io/apimachinery/pkg/runtime/error.go | 29 - .../apimachinery/pkg/runtime/generated.pb.go | 429 +- .../apimachinery/pkg/runtime/generated.proto | 20 +- .../k8s.io/apimachinery/pkg/runtime/helper.go | 53 +- .../apimachinery/pkg/runtime/interfaces.go | 92 - .../k8s.io/apimachinery/pkg/runtime/mapper.go | 98 - .../apimachinery/pkg/runtime/negotiate.go | 146 - .../apimachinery/pkg/runtime/register.go | 30 + .../pkg/runtime/schema/generated.pb.go | 25 +- .../pkg/runtime/schema/group_version.go | 24 +- .../k8s.io/apimachinery/pkg/runtime/scheme.go | 45 +- .../pkg/runtime/serializer/codec_factory.go | 141 +- .../pkg/runtime/serializer/json/json.go | 143 +- .../runtime/serializer/protobuf/protobuf.go | 117 +- .../runtime/serializer/protobuf_extension.go | 48 + .../serializer/versioning/versioning.go | 166 +- .../k8s.io/apimachinery/pkg/runtime/types.go | 19 +- .../apimachinery/pkg/runtime/types_proto.go | 90 +- .../pkg/runtime/zz_generated.deepcopy.go | 33 + vendor/k8s.io/apimachinery/pkg/types/patch.go | 1 - .../apimachinery/pkg/util/clock/clock.go | 117 +- .../apimachinery/pkg/util/errors/errors.go | 58 +- .../pkg/util/intstr/generated.pb.go | 243 +- .../pkg/util/intstr/generated.proto | 2 +- .../apimachinery/pkg/util/intstr/intstr.go | 13 +- .../k8s.io/apimachinery/pkg/util/json/json.go | 53 +- .../pkg/util/naming/from_stack.go | 2 +- .../k8s.io/apimachinery/pkg/util/net/http.go | 94 +- .../apimachinery/pkg/util/net/interface.go | 111 +- .../k8s.io/apimachinery/pkg/util/net/util.go | 17 - .../apimachinery/pkg/util/runtime/runtime.go | 54 +- .../k8s.io/apimachinery/pkg/util/sets/byte.go | 6 +- .../k8s.io/apimachinery/pkg/util/sets/int.go | 6 +- .../apimachinery/pkg/util/sets/int32.go | 205 - .../apimachinery/pkg/util/sets/int64.go | 6 +- .../apimachinery/pkg/util/sets/string.go | 6 +- .../pkg/util/validation/field/errors.go | 15 +- .../pkg/util/validation/validation.go | 97 +- .../apimachinery/pkg/util/yaml/decoder.go | 8 +- vendor/k8s.io/apimachinery/pkg/version/doc.go | 3 +- .../apimachinery/pkg/watch/streamwatcher.go | 35 +- vendor/k8s.io/apimachinery/pkg/watch/watch.go | 21 +- vendor/k8s.io/klog/.travis.yml | 16 - vendor/k8s.io/klog/CONTRIBUTING.md | 22 - vendor/k8s.io/klog/LICENSE | 191 - vendor/k8s.io/klog/OWNERS | 19 - vendor/k8s.io/klog/README.md | 97 - vendor/k8s.io/klog/RELEASE.md | 9 - vendor/k8s.io/klog/SECURITY_CONTACTS | 20 - vendor/k8s.io/klog/code-of-conduct.md | 3 - vendor/k8s.io/klog/go.mod | 5 - vendor/k8s.io/klog/go.sum | 2 - vendor/k8s.io/klog/klog.go | 1308 ---- vendor/k8s.io/klog/klog_file.go | 139 - vendor/modules.txt | 11 +- .../structured-merge-diff/v3/LICENSE | 201 - .../v3/value/allocator.go | 203 - .../structured-merge-diff/v3/value/fields.go | 97 - .../v3/value/jsontagutil.go | 91 - .../structured-merge-diff/v3/value/list.go | 139 - .../v3/value/listreflect.go | 98 - .../v3/value/listunstructured.go | 74 - .../structured-merge-diff/v3/value/map.go | 270 - .../v3/value/mapreflect.go | 209 - .../v3/value/mapunstructured.go | 190 - .../v3/value/reflectcache.go | 463 -- .../structured-merge-diff/v3/value/scalar.go | 50 - .../v3/value/structreflect.go | 208 - .../structured-merge-diff/v3/value/value.go | 347 - .../v3/value/valuereflect.go | 294 - .../v3/value/valueunstructured.go | 178 - vendor/sigs.k8s.io/yaml/.travis.yml | 13 - vendor/sigs.k8s.io/yaml/CONTRIBUTING.md | 31 - vendor/sigs.k8s.io/yaml/OWNERS | 27 - vendor/sigs.k8s.io/yaml/RELEASE.md | 9 - vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS | 17 - vendor/sigs.k8s.io/yaml/code-of-conduct.md | 3 - vendor/sigs.k8s.io/yaml/go.mod | 8 - vendor/sigs.k8s.io/yaml/go.sum | 9 - vendor/sigs.k8s.io/yaml/yaml_go110.go | 14 - 139 files changed, 5082 insertions(+), 14559 deletions(-) rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/.gitignore (100%) create mode 100644 vendor/github.com/ghodss/yaml/.travis.yml rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/LICENSE (100%) rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/README.md (76%) rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/fields.go (99%) rename vendor/{sigs.k8s.io => github.com/ghodss}/yaml/yaml.go (64%) delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go rename vendor/k8s.io/apimachinery/pkg/apis/meta/{v1 => v1beta1}/deepcopy.go (91%) rename vendor/{sigs.k8s.io/structured-merge-diff/v3/value => k8s.io/apimachinery/pkg/apis/meta/v1beta1}/doc.go (63%) create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go create mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/mapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go create mode 100644 vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/util/sets/int32.go delete mode 100644 vendor/k8s.io/klog/.travis.yml delete mode 100644 vendor/k8s.io/klog/CONTRIBUTING.md delete mode 100644 vendor/k8s.io/klog/LICENSE delete mode 100644 vendor/k8s.io/klog/OWNERS delete mode 100644 vendor/k8s.io/klog/README.md delete mode 100644 vendor/k8s.io/klog/RELEASE.md delete mode 100644 vendor/k8s.io/klog/SECURITY_CONTACTS delete mode 100644 vendor/k8s.io/klog/code-of-conduct.md delete mode 100644 vendor/k8s.io/klog/go.mod delete mode 100644 vendor/k8s.io/klog/go.sum delete mode 100644 vendor/k8s.io/klog/klog.go delete mode 100644 vendor/k8s.io/klog/klog_file.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go delete mode 100644 vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go delete mode 100644 vendor/sigs.k8s.io/yaml/.travis.yml delete mode 100644 vendor/sigs.k8s.io/yaml/CONTRIBUTING.md delete mode 100644 vendor/sigs.k8s.io/yaml/OWNERS delete mode 100644 vendor/sigs.k8s.io/yaml/RELEASE.md delete mode 100644 vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS delete mode 100644 vendor/sigs.k8s.io/yaml/code-of-conduct.md delete mode 100644 vendor/sigs.k8s.io/yaml/go.mod delete mode 100644 vendor/sigs.k8s.io/yaml/go.sum delete mode 100644 vendor/sigs.k8s.io/yaml/yaml_go110.go diff --git a/go.mod b/go.mod index 5eea825..8257cf6 100644 --- a/go.mod +++ b/go.mod @@ -7,24 +7,43 @@ require ( github.com/FZambia/sentinel v1.1.0 github.com/Jim-Lambert-Bose/cache v1.0.3 github.com/alicebob/miniredis/v2 v2.11.0 + github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect + github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 // indirect + github.com/evanphx/json-patch v4.2.0+incompatible // indirect github.com/gin-gonic/gin v1.4.0 + github.com/gogo/protobuf v1.3.1 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 // indirect github.com/gomodule/redigo v2.0.0+incompatible github.com/google/btree v1.0.0 // indirect + github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.1.1 github.com/googleapis/gnostic v0.5.1 // indirect github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/golang-lru v0.5.3 + github.com/json-iterator/go v1.1.8 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/onsi/ginkgo v1.11.0 // indirect + github.com/onsi/gomega v1.7.0 // indirect github.com/opentracing/opentracing-go v1.1.0 github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_golang v1.1.0 github.com/sirupsen/logrus v1.4.2 + github.com/spf13/pflag v1.0.5 // indirect github.com/ugorji/go v1.1.7 // indirect github.com/zsais/go-gin-prometheus v0.1.0 + golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect + golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 // indirect + golang.org/x/text v0.3.2 // indirect golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + gopkg.in/inf.v0 v0.9.1 // indirect k8s.io/api v0.0.0-20190708094356-59223ed9f6ce // indirect - k8s.io/apimachinery v0.18.6 // indirect + k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc // indirect k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df + k8s.io/klog v1.0.0 // indirect + k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 // indirect + sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect + sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 200e56d..324029b 100644 --- a/go.sum +++ b/go.sum @@ -253,6 +253,8 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.0.0-20190708094356-59223ed9f6ce h1:5dyrqZX6sr+cG/e26KF2p6GuTIABBACjjGSOSdjS9sI= k8s.io/api v0.0.0-20190708094356-59223ed9f6ce/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc h1:7z9/6jKWBqkK9GI1RRB0B5fZcmkatLQ/nv8kysch24o= +k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df h1:d28f1QoXWvL+FVVpHzCXdx5020GRqd3X3LZjKwJ3xss= diff --git a/redispool.go b/redispool.go index 6af2e43..0f523e4 100644 --- a/redispool.go +++ b/redispool.go @@ -133,7 +133,7 @@ func inCluster(logger *logrus.Entry) bool { logger.Infof("unable to create k8s config: %s", err) return false } - _, err := kubernetes.NewForConfig(config) + _, err = kubernetes.NewForConfig(config) if err != nil { logger.Infof("unable to create k8s clientset: %s", err) return false diff --git a/vendor/sigs.k8s.io/yaml/.gitignore b/vendor/github.com/ghodss/yaml/.gitignore similarity index 100% rename from vendor/sigs.k8s.io/yaml/.gitignore rename to vendor/github.com/ghodss/yaml/.gitignore diff --git a/vendor/github.com/ghodss/yaml/.travis.yml b/vendor/github.com/ghodss/yaml/.travis.yml new file mode 100644 index 0000000..0e9d6ed --- /dev/null +++ b/vendor/github.com/ghodss/yaml/.travis.yml @@ -0,0 +1,7 @@ +language: go +go: + - 1.3 + - 1.4 +script: + - go test + - go build diff --git a/vendor/sigs.k8s.io/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE similarity index 100% rename from vendor/sigs.k8s.io/yaml/LICENSE rename to vendor/github.com/ghodss/yaml/LICENSE diff --git a/vendor/sigs.k8s.io/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md similarity index 76% rename from vendor/sigs.k8s.io/yaml/README.md rename to vendor/github.com/ghodss/yaml/README.md index 5a651d9..f8f7e36 100644 --- a/vendor/sigs.k8s.io/yaml/README.md +++ b/vendor/github.com/ghodss/yaml/README.md @@ -1,18 +1,16 @@ # YAML marshaling and unmarshaling support for Go -[![Build Status](https://travis-ci.org/kubernetes-sigs/yaml.svg)](https://travis-ci.org/kubernetes-sigs/yaml) - -kubernetes-sigs/yaml is a permanent fork of [ghodss/yaml](https://github.com/ghodss/yaml). +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) ## Introduction -A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. -In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://web.archive.org/web/20190603050330/http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). ## Compatibility -This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). +This package uses [go-yaml v2](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). ## Caveats @@ -34,29 +32,27 @@ GOOD: To install, run: ``` -$ go get sigs.k8s.io/yaml +$ go get github.com/ghodss/yaml ``` And import using: ``` -import "sigs.k8s.io/yaml" +import "github.com/ghodss/yaml" ``` Usage is very similar to the JSON library: ```go -package main - import ( "fmt" - "sigs.k8s.io/yaml" + "github.com/ghodss/yaml" ) type Person struct { - Name string `json:"name"` // Affects YAML field names too. - Age int `json:"age"` + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"name"` } func main() { @@ -69,13 +65,13 @@ func main() { } fmt.Println(string(y)) /* Output: - age: 30 name: John + age: 30 */ // Unmarshal the YAML back into a Person struct. var p2 Person - err = yaml.Unmarshal(y, &p2) + err := yaml.Unmarshal(y, &p2) if err != nil { fmt.Printf("err: %v\n", err) return @@ -90,14 +86,11 @@ func main() { `yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: ```go -package main - import ( "fmt" - "sigs.k8s.io/yaml" + "github.com/ghodss/yaml" ) - func main() { j := []byte(`{"name": "John", "age": 30}`) y, err := yaml.JSONToYAML(j) diff --git a/vendor/sigs.k8s.io/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go similarity index 99% rename from vendor/sigs.k8s.io/yaml/fields.go rename to vendor/github.com/ghodss/yaml/fields.go index 235b7f2..0bd3c2b 100644 --- a/vendor/sigs.k8s.io/yaml/fields.go +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -1,7 +1,6 @@ // Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. - package yaml import ( @@ -46,11 +45,7 @@ func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.Te break } if v.IsNil() { - if v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } else { - v = reflect.New(v.Type().Elem()) - } + v.Set(reflect.New(v.Type().Elem())) } if v.Type().NumMethod() > 0 { if u, ok := v.Interface().(json.Unmarshaler); ok { diff --git a/vendor/sigs.k8s.io/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go similarity index 64% rename from vendor/sigs.k8s.io/yaml/yaml.go rename to vendor/github.com/ghodss/yaml/yaml.go index efbc535..c02beac 100644 --- a/vendor/sigs.k8s.io/yaml/yaml.go +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -4,58 +4,37 @@ import ( "bytes" "encoding/json" "fmt" - "io" "reflect" "strconv" "gopkg.in/yaml.v2" ) -// Marshal marshals the object into JSON then converts JSON to YAML and returns the +// Marshals the object into JSON then converts JSON to YAML and returns the // YAML. func Marshal(o interface{}) ([]byte, error) { j, err := json.Marshal(o) if err != nil { - return nil, fmt.Errorf("error marshaling into JSON: %v", err) + return nil, fmt.Errorf("error marshaling into JSON: ", err) } y, err := JSONToYAML(j) if err != nil { - return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + return nil, fmt.Errorf("error converting JSON to YAML: ", err) } return y, nil } -// JSONOpt is a decoding option for decoding from JSON format. -type JSONOpt func(*json.Decoder) *json.Decoder - -// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, -// optionally configuring the behavior of the JSON unmarshal. -func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, false, opts...) -} - -// UnmarshalStrict strictly converts YAML to JSON then uses JSON to unmarshal -// into an object, optionally configuring the behavior of the JSON unmarshal. -func UnmarshalStrict(y []byte, o interface{}, opts ...JSONOpt) error { - return yamlUnmarshal(y, o, true, append(opts, DisallowUnknownFields)...) -} - -// yamlUnmarshal unmarshals the given YAML byte stream into the given interface, -// optionally performing the unmarshalling strictly -func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error { +// Converts YAML to JSON then uses JSON to unmarshal into an object. +func Unmarshal(y []byte, o interface{}) error { vo := reflect.ValueOf(o) - unmarshalFn := yaml.Unmarshal - if strict { - unmarshalFn = yaml.UnmarshalStrict - } - j, err := yamlToJSON(y, &vo, unmarshalFn) + j, err := yamlToJSON(y, &vo) if err != nil { return fmt.Errorf("error converting YAML to JSON: %v", err) } - err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + err = json.Unmarshal(j, o) if err != nil { return fmt.Errorf("error unmarshaling JSON: %v", err) } @@ -63,28 +42,13 @@ func yamlUnmarshal(y []byte, o interface{}, strict bool, opts ...JSONOpt) error return nil } -// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the -// object, optionally applying decoder options prior to decoding. We are not -// using json.Unmarshal directly as we want the chance to pass in non-default -// options. -func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { - d := json.NewDecoder(r) - for _, opt := range opts { - d = opt(d) - } - if err := d.Decode(&o); err != nil { - return fmt.Errorf("while decoding JSON: %v", err) - } - return nil -} - -// JSONToYAML Converts JSON to YAML. +// Convert JSON to YAML. func JSONToYAML(j []byte) ([]byte, error) { // Convert the JSON to an object. var jsonObj interface{} // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the // Go JSON library doesn't try to pick the right number type (int, float, - // etc.) when unmarshalling to interface{}, it just picks float64 + // etc.) when unmarshling to interface{}, it just picks float64 // universally. go-yaml does go through the effort of picking the right // number type, so we can preserve number type throughout this process. err := yaml.Unmarshal(j, &jsonObj) @@ -96,8 +60,8 @@ func JSONToYAML(j []byte) ([]byte, error) { return yaml.Marshal(jsonObj) } -// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, -// passing JSON through this method should be a no-op. +// Convert YAML to JSON. Since JSON is a subset of YAML, passing JSON through +// this method should be a no-op. // // Things YAML can do that are not supported by JSON: // * In YAML you can have binary and null keys in your maps. These are invalid @@ -106,22 +70,14 @@ func JSONToYAML(j []byte) ([]byte, error) { // use binary data with this library, encode the data as base64 as usual but do // not use the !!binary tag in your YAML. This will ensure the original base64 // encoded data makes it all the way through to the JSON. -// -// For strict decoding of YAML, use YAMLToJSONStrict. func YAMLToJSON(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.Unmarshal) + return yamlToJSON(y, nil) } -// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, -// returning an error on any duplicate field names. -func YAMLToJSONStrict(y []byte) ([]byte, error) { - return yamlToJSON(y, nil, yaml.UnmarshalStrict) -} - -func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { +func yamlToJSON(y []byte, jsonTarget *reflect.Value) ([]byte, error) { // Convert the YAML to an object. var yamlObj interface{} - err := yamlUnmarshal(y, &yamlObj) + err := yaml.Unmarshal(y, &yamlObj) if err != nil { return nil, err } @@ -316,65 +272,6 @@ func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (in } return yamlObj, nil } -} - -// JSONObjectToYAMLObject converts an in-memory JSON object into a YAML in-memory MapSlice, -// without going through a byte representation. A nil or empty map[string]interface{} input is -// converted to an empty map, i.e. yaml.MapSlice(nil). -// -// interface{} slices stay interface{} slices. map[string]interface{} becomes yaml.MapSlice. -// -// int64 and float64 are down casted following the logic of github.com/go-yaml/yaml: -// - float64s are down-casted as far as possible without data-loss to int, int64, uint64. -// - int64s are down-casted to int if possible without data-loss. -// -// Big int/int64/uint64 do not lose precision as in the json-yaml roundtripping case. -// -// string, bool and any other types are unchanged. -func JSONObjectToYAMLObject(j map[string]interface{}) yaml.MapSlice { - if len(j) == 0 { - return nil - } - ret := make(yaml.MapSlice, 0, len(j)) - for k, v := range j { - ret = append(ret, yaml.MapItem{Key: k, Value: jsonToYAMLValue(v)}) - } - return ret -} -func jsonToYAMLValue(j interface{}) interface{} { - switch j := j.(type) { - case map[string]interface{}: - if j == nil { - return interface{}(nil) - } - return JSONObjectToYAMLObject(j) - case []interface{}: - if j == nil { - return interface{}(nil) - } - ret := make([]interface{}, len(j)) - for i := range j { - ret[i] = jsonToYAMLValue(j[i]) - } - return ret - case float64: - // replicate the logic in https://github.com/go-yaml/yaml/blob/51d6538a90f86fe93ac480b35f37b2be17fef232/resolve.go#L151 - if i64 := int64(j); j == float64(i64) { - if i := int(i64); i64 == int64(i) { - return i - } - return i64 - } - if ui64 := uint64(j); j == float64(ui64) { - return ui64 - } - return j - case int64: - if i := int(j); j == int64(i) { - return i - } - return j - } - return j + return nil, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS index 435297a..dc6a4c7 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - thockin - lavalamp @@ -23,3 +21,4 @@ reviewers: - krousey - cjcullen - david-mcmahon +- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go index e53c3e6..77c91bb 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/api/errors/errors.go @@ -20,7 +20,6 @@ import ( "encoding/json" "fmt" "net/http" - "reflect" "strings" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -32,9 +31,7 @@ import ( const ( // StatusTooManyRequests means the server experienced too many requests within a // given window and that the client must wait to perform the action again. - // DEPRECATED: please use http.StatusTooManyRequests, this will be removed in - // the future version. - StatusTooManyRequests = http.StatusTooManyRequests + StatusTooManyRequests = 429 ) // StatusError is an error intended for consumption by a REST API server; it can also be @@ -70,28 +67,6 @@ func (e *StatusError) DebugError() (string, []interface{}) { return "server response object: %#v", []interface{}{e.ErrStatus} } -// HasStatusCause returns true if the provided error has a details cause -// with the provided type name. -func HasStatusCause(err error, name metav1.CauseType) bool { - _, ok := StatusCause(err, name) - return ok -} - -// StatusCause returns the named cause from the provided error if it exists and -// the error is of the type APIStatus. Otherwise it returns false. -func StatusCause(err error, name metav1.CauseType) (metav1.StatusCause, bool) { - apierr, ok := err.(APIStatus) - if !ok || apierr == nil || apierr.Status().Details == nil { - return metav1.StatusCause{}, false - } - for _, cause := range apierr.Status().Details.Causes { - if cause.Type == name { - return cause, true - } - } - return metav1.StatusCause{}, false -} - // UnexpectedObjectError can be returned by FromObject if it's passed a non-status object. type UnexpectedObjectError struct { Object runtime.Object @@ -107,20 +82,7 @@ func (u *UnexpectedObjectError) Error() string { func FromObject(obj runtime.Object) error { switch t := obj.(type) { case *metav1.Status: - return &StatusError{ErrStatus: *t} - case runtime.Unstructured: - var status metav1.Status - obj := t.UnstructuredContent() - if !reflect.DeepEqual(obj["kind"], "Status") { - break - } - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(t.UnstructuredContent(), &status); err != nil { - return err - } - if status.APIVersion != "v1" && status.APIVersion != "meta.k8s.io/v1" { - break - } - return &StatusError{ErrStatus: status} + return &StatusError{*t} } return &UnexpectedObjectError{obj} } @@ -208,22 +170,7 @@ func NewConflict(qualifiedResource schema.GroupResource, name string, err error) }} } -// NewApplyConflict returns an error including details on the requests apply conflicts -func NewApplyConflict(causes []metav1.StatusCause, message string) *StatusError { - return &StatusError{ErrStatus: metav1.Status{ - Status: metav1.StatusFailure, - Code: http.StatusConflict, - Reason: metav1.StatusReasonConflict, - Details: &metav1.StatusDetails{ - // TODO: Get obj details here? - Causes: causes, - }, - Message: message, - }} -} - // NewGone returns an error indicating the item no longer available at the server and no forwarding address is known. -// DEPRECATED: Please use NewResourceExpired instead. func NewGone(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, @@ -374,7 +321,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) *StatusError { func NewTooManyRequestsError(message string) *StatusError { return &StatusError{metav1.Status{ Status: metav1.StatusFailure, - Code: http.StatusTooManyRequests, + Code: StatusTooManyRequests, Reason: metav1.StatusReasonTooManyRequests, Message: fmt.Sprintf("Too many requests: %s", message), }} @@ -419,11 +366,7 @@ func NewGenericServerResponse(code int, verb string, qualifiedResource schema.Gr case http.StatusNotAcceptable: reason = metav1.StatusReasonNotAcceptable // the server message has details about what types are acceptable - if len(serverMessage) == 0 || serverMessage == "unknown" { - message = "the server was unable to respond with a content type that the client supports" - } else { - message = serverMessage - } + message = serverMessage case http.StatusUnsupportedMediaType: reason = metav1.StatusReasonUnsupportedMediaType // the server message has details about what types are acceptable @@ -646,46 +589,3 @@ func ReasonForError(err error) metav1.StatusReason { } return metav1.StatusReasonUnknown } - -// ErrorReporter converts generic errors into runtime.Object errors without -// requiring the caller to take a dependency on meta/v1 (where Status lives). -// This prevents circular dependencies in core watch code. -type ErrorReporter struct { - code int - verb string - reason string -} - -// NewClientErrorReporter will respond with valid v1.Status objects that report -// unexpected server responses. Primarily used by watch to report errors when -// we attempt to decode a response from the server and it is not in the form -// we expect. Because watch is a dependency of the core api, we can't return -// meta/v1.Status in that package and so much inject this interface to convert a -// generic error as appropriate. The reason is passed as a unique status cause -// on the returned status, otherwise the generic "ClientError" is returned. -func NewClientErrorReporter(code int, verb string, reason string) *ErrorReporter { - return &ErrorReporter{ - code: code, - verb: verb, - reason: reason, - } -} - -// AsObject returns a valid error runtime.Object (a v1.Status) for the given -// error, using the code and verb of the reporter type. The error is set to -// indicate that this was an unexpected server response. -func (r *ErrorReporter) AsObject(err error) runtime.Object { - status := NewGenericServerResponse(r.code, r.verb, schema.GroupResource{}, "", err.Error(), 0, true) - if status.ErrStatus.Details == nil { - status.ErrStatus.Details = &metav1.StatusDetails{} - } - reason := r.reason - if len(reason) == 0 { - reason = "ClientError" - } - status.ErrStatus.Details.Causes = append(status.ErrStatus.Details.Causes, metav1.StatusCause{ - Type: metav1.CauseType(reason), - Message: err.Error(), - }) - return &status.ErrStatus -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS index 96bccff..5f729ff 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - thockin - smarterclayton @@ -17,7 +15,11 @@ reviewers: - eparis - dims - krousey +- markturansky +- fabioy - resouer - david-mcmahon - mfojtik - jianhuiz +- feihujiang +- ghodss diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go index 50468b5..c70b3d2 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go @@ -17,76 +17,30 @@ limitations under the License. package meta import ( - "errors" "fmt" "reflect" - "sync" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" ) -var ( - // isListCache maintains a cache of types that are checked for lists - // which is used by IsListType. - // TODO: remove and replace with an interface check - isListCache = struct { - lock sync.RWMutex - byType map[reflect.Type]bool - }{ - byType: make(map[reflect.Type]bool, 1024), - } -) - -// IsListType returns true if the provided Object has a slice called Items. -// TODO: Replace the code in this check with an interface comparison by -// creating and enforcing that lists implement a list accessor. +// IsListType returns true if the provided Object has a slice called Items func IsListType(obj runtime.Object) bool { - switch t := obj.(type) { - case runtime.Unstructured: - return t.IsList() - } - t := reflect.TypeOf(obj) - - isListCache.lock.RLock() - ok, exists := isListCache.byType[t] - isListCache.lock.RUnlock() - - if !exists { - _, err := getItemsPtr(obj) - ok = err == nil - - // cache only the first 1024 types - isListCache.lock.Lock() - if len(isListCache.byType) < 1024 { - isListCache.byType[t] = ok - } - isListCache.lock.Unlock() + // if we're a runtime.Unstructured, check whether this is a list. + // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object + if unstructured, ok := obj.(runtime.Unstructured); ok { + return unstructured.IsList() } - return ok + _, err := GetItemsPtr(obj) + return err == nil } -var ( - errExpectFieldItems = errors.New("no Items field in this object") - errExpectSliceItems = errors.New("Items field must be a slice of objects") -) - // GetItemsPtr returns a pointer to the list object's Items member. // If 'list' doesn't have an Items member, it's not really a list type // and an error will be returned. // This function will either return a pointer to a slice, or an error, but not both. -// TODO: this will be replaced with an interface in the future func GetItemsPtr(list runtime.Object) (interface{}, error) { - obj, err := getItemsPtr(list) - if err != nil { - return nil, fmt.Errorf("%T is not a list: %v", list, err) - } - return obj, nil -} - -// getItemsPtr returns a pointer to the list object's Items member or an error. -func getItemsPtr(list runtime.Object) (interface{}, error) { v, err := conversion.EnforcePtr(list) if err != nil { return nil, err @@ -94,19 +48,19 @@ func getItemsPtr(list runtime.Object) (interface{}, error) { items := v.FieldByName("Items") if !items.IsValid() { - return nil, errExpectFieldItems + return nil, fmt.Errorf("no Items field in %#v", list) } switch items.Kind() { case reflect.Interface, reflect.Ptr: target := reflect.TypeOf(items.Interface()).Elem() if target.Kind() != reflect.Slice { - return nil, errExpectSliceItems + return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) } return items.Interface(), nil case reflect.Slice: return items.Addr().Interface(), nil default: - return nil, errExpectSliceItems + return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) } } @@ -204,19 +158,6 @@ func ExtractList(obj runtime.Object) ([]runtime.Object, error) { // objectSliceType is the type of a slice of Objects var objectSliceType = reflect.TypeOf([]runtime.Object{}) -// LenList returns the length of this list or 0 if it is not a list. -func LenList(list runtime.Object) int { - itemsPtr, err := GetItemsPtr(list) - if err != nil { - return 0 - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return 0 - } - return items.Len() -} - // SetList sets the given list object's Items member have the elements given in // objects. // Returns an error if list is not a List type (does not have an Items member), diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go index fa4b767..1c2a83c 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go @@ -20,12 +20,14 @@ import ( "fmt" "reflect" + "github.com/golang/glog" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" - "k8s.io/klog" ) // errNotList is returned when an object implements the Object style interfaces but not the List style @@ -113,12 +115,12 @@ func Accessor(obj interface{}) (metav1.Object, error) { // AsPartialObjectMetadata takes the metav1 interface and returns a partial object. // TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { +func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { switch t := m.(type) { case *metav1.ObjectMeta: - return &metav1.PartialObjectMetadata{ObjectMeta: *t} + return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} default: - return &metav1.PartialObjectMetadata{ + return &metav1beta1.PartialObjectMetadata{ ObjectMeta: metav1.ObjectMeta{ Name: m.GetName(), GenerateName: m.GetGenerateName(), @@ -130,12 +132,12 @@ func AsPartialObjectMetadata(m metav1.Object) *metav1.PartialObjectMetadata { CreationTimestamp: m.GetCreationTimestamp(), DeletionTimestamp: m.GetDeletionTimestamp(), DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), - ManagedFields: m.GetManagedFields(), + Labels: m.GetLabels(), + Annotations: m.GetAnnotations(), + OwnerReferences: m.GetOwnerReferences(), + Finalizers: m.GetFinalizers(), + ClusterName: m.GetClusterName(), + Initializers: m.GetInitializers(), }, } } @@ -605,7 +607,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { var ret []metav1.OwnerReference s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - klog.Errorf("expect %v to be a pointer to slice", s) + glog.Errorf("expect %v to be a pointer to slice", s) return ret } s = s.Elem() @@ -613,7 +615,7 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) for i := 0; i < s.Len(); i++ { if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - klog.Errorf("extractFromOwnerReference failed: %v", err) + glog.Errorf("extractFromOwnerReference failed: %v", err) return ret } } @@ -623,13 +625,13 @@ func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { s := a.ownerReferences if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - klog.Errorf("expect %v to be a pointer to slice", s) + glog.Errorf("expect %v to be a pointer to slice", s) } s = s.Elem() newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) for i := 0; i < len(references); i++ { if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - klog.Errorf("setOwnerReference failed: %v", err) + glog.Errorf("setOwnerReference failed: %v", err) return } } diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS index dc77401..c430067 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - thockin - lavalamp @@ -11,6 +9,8 @@ reviewers: - janetkuo - tallclair - eparis +- jbeda - xiang90 - mbohlool - david-mcmahon +- goltermann diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go index 2e09f4f..802f22a 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.pb.go @@ -14,18 +14,24 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +// DO NOT EDIT! -package resource +/* +Package resource is a generated protocol buffer package. -import ( - fmt "fmt" +It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto - math "math" +It has these top-level messages: + Quantity +*/ +package resource - proto "github.com/gogo/protobuf/proto" -) +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -36,40 +42,21 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *Quantity) Reset() { *m = Quantity{} } -func (*Quantity) ProtoMessage() {} -func (*Quantity) Descriptor() ([]byte, []int) { - return fileDescriptor_612bba87bd70906c, []int{0} -} -func (m *Quantity) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Quantity.Unmarshal(m, b) -} -func (m *Quantity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Quantity.Marshal(b, m, deterministic) -} -func (m *Quantity) XXX_Merge(src proto.Message) { - xxx_messageInfo_Quantity.Merge(m, src) -} -func (m *Quantity) XXX_Size() int { - return xxx_messageInfo_Quantity.Size(m) -} -func (m *Quantity) XXX_DiscardUnknown() { - xxx_messageInfo_Quantity.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_Quantity proto.InternalMessageInfo +func (m *Quantity) Reset() { *m = Quantity{} } +func (*Quantity) ProtoMessage() {} +func (*Quantity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func init() { proto.RegisterType((*Quantity)(nil), "k8s.io.apimachinery.pkg.api.resource.Quantity") } func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptor_612bba87bd70906c) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto", fileDescriptorGenerated) } -var fileDescriptor_612bba87bd70906c = []byte{ +var fileDescriptorGenerated = []byte{ // 237 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8e, 0xb1, 0x4e, 0xc3, 0x30, 0x10, 0x40, 0xcf, 0x0b, 0x2a, 0x19, 0x2b, 0x84, 0x10, 0xc3, 0xa5, 0x42, 0x0c, 0x2c, 0xd8, 0x6b, diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto index 18a6c7c..2c615d5 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/generated.proto @@ -26,10 +26,10 @@ option go_package = "resource"; // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and AsInt64() accessors. -// +// in addition to String() and Int64() accessors. +// // The serialization format is: -// +// // ::= // (Note that may be empty, from the "" case in .) // ::= 0 | 1 | ... | 9 @@ -43,16 +43,16 @@ option go_package = "resource"; // ::= m | "" | k | M | G | T | P | E // (Note that 1024 = 1Ki but 1000 = 1k; I didn't choose the capitalization.) // ::= "e" | "E" -// +// // No matter which of the three exponent forms is used, no quantity may represent // a number greater than 2^63-1 in magnitude, nor may it have more than 3 decimal // places. Numbers larger or more precise will be capped or rounded up. // (E.g.: 0.1m will rounded up to 1m.) // This may be extended in the future if we require larger or smaller quantities. -// +// // When a Quantity is parsed from a string, it will remember the type of suffix // it had, and will use the same type again when it is serialized. -// +// // Before serializing, Quantity will be put in "canonical form". // This means that Exponent/suffix will be adjusted up or down (with a // corresponding increase or decrease in Mantissa) such that: @@ -60,22 +60,22 @@ option go_package = "resource"; // b. No fractional digits will be emitted // c. The exponent (or suffix) is as large as possible. // The sign will be omitted unless the number is negative. -// +// // Examples: // 1.5 will be serialized as "1500m" // 1.5Gi will be serialized as "1536Mi" -// +// // Note that the quantity will NEVER be internally represented by a // floating point number. That is the whole point of this exercise. -// +// // Non-canonical values will still parse as long as they are well formed, // but will be re-emitted in their canonical form. (So always use canonical // form, or don't diff.) -// +// // This format is intended to make it difficult to use these numbers without // writing some sort of special handling code in the hopes that that will // cause implementors to also use a fixed point implementation. -// +// // +protobuf=true // +protobuf.embed=string // +protobuf.options.marshal=false diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go index 8ffcb9f..72d3880 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/math.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/math.go @@ -37,8 +37,12 @@ var ( big1024 = big.NewInt(1024) // Commonly needed inf.Dec values-- treat as read only! - decZero = inf.NewDec(0, 0) - decOne = inf.NewDec(1, 0) + decZero = inf.NewDec(0, 0) + decOne = inf.NewDec(1, 0) + decMinusOne = inf.NewDec(-1, 0) + decThousand = inf.NewDec(1000, 0) + dec1024 = inf.NewDec(1024, 0) + decMinus1024 = inf.NewDec(-1024, 0) // Largest (in magnitude) number allowed. maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64 @@ -190,9 +194,9 @@ func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) { } if fraction { if base > 0 { - value++ + value += 1 } else { - value-- + value += -1 } } return value, !fraction diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go index d95e03a..b155a62 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity.go @@ -29,7 +29,7 @@ import ( // Quantity is a fixed-point representation of a number. // It provides convenient marshaling/unmarshaling in JSON and YAML, -// in addition to String() and AsInt64() accessors. +// in addition to String() and Int64() accessors. // // The serialization format is: // @@ -584,12 +584,6 @@ func (q *Quantity) Neg() { q.d.Dec.Neg(q.d.Dec) } -// Equal checks equality of two Quantities. This is useful for testing with -// cmp.Equal. -func (q Quantity) Equal(v Quantity) bool { - return q.Cmp(v) == 0 -} - // int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation // of most Quantity values. const int64QuantityExpectedBytes = 18 @@ -634,11 +628,6 @@ func (q Quantity) MarshalJSON() ([]byte, error) { return result, nil } -// ToUnstructured implements the value.UnstructuredConverter interface. -func (q Quantity) ToUnstructured() interface{} { - return q.String() -} - // UnmarshalJSON implements the json.Unmarshaller interface. // TODO: Remove support for leading/trailing whitespace func (q *Quantity) UnmarshalJSON(value []byte) error { @@ -691,7 +680,7 @@ func NewScaledQuantity(value int64, scale Scale) *Quantity { } } -// Value returns the unscaled value of q rounded up to the nearest integer away from 0. +// Value returns the value of q; any fractional part will be lost. func (q *Quantity) Value() int64 { return q.ScaledValue(0) } @@ -702,9 +691,7 @@ func (q *Quantity) MilliValue() int64 { return q.ScaledValue(Milli) } -// ScaledValue returns the value of ceil(q / 10^scale). -// For example, NewQuantity(1, DecimalSI).ScaledValue(Milli) returns 1000. -// This could overflow an int64. +// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64. // To detect overflow, call Value() first and verify the expected magnitude. func (q *Quantity) ScaledValue(scale Scale) int64 { if q.d.Dec == nil { @@ -731,3 +718,21 @@ func (q *Quantity) SetScaled(value int64, scale Scale) { q.d.Dec = nil q.i = int64Amount{value: value, scale: scale} } + +// Copy is a convenience function that makes a deep copy for you. Non-deep +// copies of quantities share pointers and you will regret that. +func (q *Quantity) Copy() *Quantity { + if q.d.Dec == nil { + return &Quantity{ + s: q.s, + i: q.i, + Format: q.Format, + } + } + tmp := &inf.Dec{} + return &Quantity{ + s: q.s, + d: infDecAmount{tmp.Set(q.d.Dec)}, + Format: q.Format, + } +} diff --git a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go index f89ca16..74dfb4e 100644 --- a/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/api/resource/quantity_proto.go @@ -19,7 +19,6 @@ package resource import ( "fmt" "io" - "math/bits" "github.com/gogo/protobuf/proto" ) @@ -29,7 +28,7 @@ var _ proto.Sizer = &Quantity{} func (m *Quantity) Marshal() (data []byte, err error) { size := m.Size() data = make([]byte, size) - n, err := m.MarshalToSizedBuffer(data[:size]) + n, err := m.MarshalTo(data) if err != nil { return nil, err } @@ -39,40 +38,30 @@ func (m *Quantity) Marshal() (data []byte, err error) { // MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct // with a single string field. func (m *Quantity) MarshalTo(data []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(data[:size]) -} - -// MarshalToSizedBuffer is a customized version of the generated -// Protobuf unmarshaler for a struct with a single string field. -func (m *Quantity) MarshalToSizedBuffer(data []byte) (int, error) { - i := len(data) + var i int _ = i var l int _ = l + data[i] = 0xa + i++ // BEGIN CUSTOM MARSHAL out := m.String() - i -= len(out) - copy(data[i:], out) i = encodeVarintGenerated(data, i, uint64(len(out))) + i += copy(data[i:], out) // END CUSTOM MARSHAL - i-- - data[i] = 0xa - return len(data) - i, nil + return i, nil } func encodeVarintGenerated(data []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { data[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } data[offset] = uint8(v) - return base + return offset + 1 } func (m *Quantity) Size() (n int) { @@ -88,7 +77,14 @@ func (m *Quantity) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } // Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS index 77cfb0c..cdb125a 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/OWNERS @@ -1,5 +1,3 @@ -# See the OWNERS docs at https://go.k8s.io/owners - reviewers: - thockin - smarterclayton @@ -30,3 +28,4 @@ reviewers: - mqliang - kevin-wangzefeng - jianhuiz +- feihujiang diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go index 15b45ff..042cd5b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/controller_ref.go @@ -22,7 +22,7 @@ import ( // IsControlledBy checks if the object has a controllerRef set to the given owner func IsControlledBy(obj Object, owner Object) bool { - ref := GetControllerOfNoCopy(obj) + ref := GetControllerOf(obj) if ref == nil { return false } @@ -31,20 +31,9 @@ func IsControlledBy(obj Object, owner Object) bool { // GetControllerOf returns a pointer to a copy of the controllerRef if controllee has a controller func GetControllerOf(controllee Object) *OwnerReference { - ref := GetControllerOfNoCopy(controllee) - if ref == nil { - return nil - } - cp := *ref - return &cp -} - -// GetControllerOf returns a pointer to the controllerRef if controllee has a controller -func GetControllerOfNoCopy(controllee Object) *OwnerReference { - refs := controllee.GetOwnerReferences() - for i := range refs { - if refs[i].Controller != nil && *refs[i].Controller { - return &refs[i] + for _, ref := range controllee.GetOwnerReferences() { + if ref.Controller != nil && *ref.Controller { + return &ref } } return nil diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go index b937398..5c36f82 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/conversion.go @@ -18,7 +18,6 @@ package v1 import ( "fmt" - "net/url" "strconv" "strings" @@ -26,10 +25,61 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/intstr" ) +func AddConversionFuncs(scheme *runtime.Scheme) error { + return scheme.AddConversionFuncs( + Convert_v1_TypeMeta_To_v1_TypeMeta, + + Convert_v1_ListMeta_To_v1_ListMeta, + + Convert_intstr_IntOrString_To_intstr_IntOrString, + + Convert_Pointer_v1_Duration_To_v1_Duration, + Convert_v1_Duration_To_Pointer_v1_Duration, + + Convert_Slice_string_To_v1_Time, + + Convert_v1_Time_To_v1_Time, + Convert_v1_MicroTime_To_v1_MicroTime, + + Convert_resource_Quantity_To_resource_Quantity, + + Convert_string_To_labels_Selector, + Convert_labels_Selector_To_string, + + Convert_string_To_fields_Selector, + Convert_fields_Selector_To_string, + + Convert_Pointer_bool_To_bool, + Convert_bool_To_Pointer_bool, + + Convert_Pointer_string_To_string, + Convert_string_To_Pointer_string, + + Convert_Pointer_int64_To_int, + Convert_int_To_Pointer_int64, + + Convert_Pointer_int32_To_int32, + Convert_int32_To_Pointer_int32, + + Convert_Pointer_int64_To_int64, + Convert_int64_To_Pointer_int64, + + Convert_Pointer_float64_To_float64, + Convert_float64_To_Pointer_float64, + + Convert_Map_string_To_string_To_v1_LabelSelector, + Convert_v1_LabelSelector_To_Map_string_To_string, + + Convert_Slice_string_To_Slice_int32, + + Convert_Slice_string_To_v1_DeletionPropagation, + ) +} + func Convert_Pointer_float64_To_float64(in **float64, out *float64, s conversion.Scope) error { if *in == nil { *out = 0 @@ -142,33 +192,12 @@ func Convert_v1_ListMeta_To_v1_ListMeta(in, out *ListMeta, s conversion.Scope) e return nil } -// +k8s:conversion-fn=copy-only -func Convert_v1_DeleteOptions_To_v1_DeleteOptions(in, out *DeleteOptions, s conversion.Scope) error { - *out = *in - return nil -} - // +k8s:conversion-fn=copy-only func Convert_intstr_IntOrString_To_intstr_IntOrString(in, out *intstr.IntOrString, s conversion.Scope) error { *out = *in return nil } -func Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(in **intstr.IntOrString, out *intstr.IntOrString, s conversion.Scope) error { - if *in == nil { - *out = intstr.IntOrString{} // zero value - return nil - } - *out = **in // copy - return nil -} - -func Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(in *intstr.IntOrString, out **intstr.IntOrString, s conversion.Scope) error { - temp := *in // copy - *out = &temp - return nil -} - // +k8s:conversion-fn=copy-only func Convert_v1_Time_To_v1_Time(in *Time, out *Time, s conversion.Scope) error { // Cannot deep copy these, because time.Time has unexported fields. @@ -199,30 +228,14 @@ func Convert_v1_Duration_To_Pointer_v1_Duration(in *Duration, out **Duration, s } // Convert_Slice_string_To_v1_Time allows converting a URL query parameter value -func Convert_Slice_string_To_v1_Time(in *[]string, out *Time, s conversion.Scope) error { +func Convert_Slice_string_To_v1_Time(input *[]string, out *Time, s conversion.Scope) error { str := "" - if len(*in) > 0 { - str = (*in)[0] + if len(*input) > 0 { + str = (*input)[0] } return out.UnmarshalQueryParameter(str) } -func Convert_Slice_string_To_Pointer_v1_Time(in *[]string, out **Time, s conversion.Scope) error { - if in == nil { - return nil - } - str := "" - if len(*in) > 0 { - str = (*in)[0] - } - temp := Time{} - if err := temp.UnmarshalQueryParameter(str); err != nil { - return err - } - *out = &temp - return nil -} - func Convert_string_To_labels_Selector(in *string, out *labels.Selector, s conversion.Scope) error { selector, err := labels.Parse(*in) if err != nil { @@ -295,53 +308,12 @@ func Convert_Slice_string_To_Slice_int32(in *[]string, out *[]int32, s conversio return nil } -// Convert_Slice_string_To_Pointer_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy -func Convert_Slice_string_To_Pointer_v1_DeletionPropagation(in *[]string, out **DeletionPropagation, s conversion.Scope) error { - var str string - if len(*in) > 0 { - str = (*in)[0] +// Convert_Slice_string_To_v1_DeletionPropagation allows converting a URL query parameter propagationPolicy +func Convert_Slice_string_To_v1_DeletionPropagation(input *[]string, out *DeletionPropagation, s conversion.Scope) error { + if len(*input) > 0 { + *out = DeletionPropagation((*input)[0]) } else { - str = "" - } - temp := DeletionPropagation(str) - *out = &temp - return nil -} - -// Convert_Slice_string_To_v1_IncludeObjectPolicy allows converting a URL query parameter value -func Convert_Slice_string_To_v1_IncludeObjectPolicy(in *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { - if len(*in) > 0 { - *out = IncludeObjectPolicy((*in)[0]) - } - return nil -} - -// Convert_url_Values_To_v1_DeleteOptions allows converting a URL to DeleteOptions. -func Convert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { - if err := autoConvert_url_Values_To_v1_DeleteOptions(in, out, s); err != nil { - return err - } - - uid := types.UID("") - if values, ok := (*in)["uid"]; ok && len(values) > 0 { - uid = types.UID(values[0]) - } - - resourceVersion := "" - if values, ok := (*in)["resourceVersion"]; ok && len(values) > 0 { - resourceVersion = values[0] - } - - if len(uid) > 0 || len(resourceVersion) > 0 { - if out.Preconditions == nil { - out.Preconditions = &Preconditions{} - } - if len(uid) > 0 { - out.Preconditions.UID = &uid - } - if len(resourceVersion) > 0 { - out.Preconditions.ResourceVersion = &resourceVersion - } + *out = "" } return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go index 7736753..61f201c 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/doc.go @@ -14,11 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:conversion-gen=false // +k8s:deepcopy-gen=package // +k8s:openapi-gen=true // +k8s:defaulter-gen=TypeMeta // +groupName=meta.k8s.io - package v1 // import "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go index a22b078..2eaabf0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/duration.go @@ -48,18 +48,3 @@ func (d *Duration) UnmarshalJSON(b []byte) error { func (d Duration) MarshalJSON() ([]byte, error) { return json.Marshal(d.Duration.String()) } - -// ToUnstructured implements the value.UnstructuredConverter interface. -func (d Duration) ToUnstructured() interface{} { - return d.Duration.String() -} - -// OpenAPISchemaType is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -// -// See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (_ Duration) OpenAPISchemaType() []string { return []string{"string"} } - -// OpenAPISchemaFormat is used by the kube-openapi generator when constructing -// the OpenAPI spec of this type. -func (_ Duration) OpenAPISchemaFormat() string { return "" } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go index 3288c56..b7508f0 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.pb.go @@ -14,28 +14,74 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +// DO NOT EDIT! +/* + Package v1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto + + It has these top-level messages: + APIGroup + APIGroupList + APIResource + APIResourceList + APIVersions + CreateOptions + DeleteOptions + Duration + ExportOptions + GetOptions + GroupKind + GroupResource + GroupVersion + GroupVersionForDiscovery + GroupVersionKind + GroupVersionResource + Initializer + Initializers + LabelSelector + LabelSelectorRequirement + List + ListMeta + ListOptions + MicroTime + ObjectMeta + OwnerReference + Patch + Preconditions + RootPaths + ServerAddressByClientCIDR + Status + StatusCause + StatusDetails + Time + Timestamp + TypeMeta + UpdateOptions + Verbs + WatchEvent +*/ package v1 -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" +import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" - proto "github.com/gogo/protobuf/proto" - github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - runtime "k8s.io/apimachinery/pkg/runtime" +import time "time" +import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" - time "time" +import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" -) +import strings "strings" +import reflect "reflect" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -47,1201 +93,169 @@ var _ = time.Kitchen // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *APIGroup) Reset() { *m = APIGroup{} } -func (*APIGroup) ProtoMessage() {} -func (*APIGroup) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{0} -} -func (m *APIGroup) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *APIGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *APIGroup) XXX_Merge(src proto.Message) { - xxx_messageInfo_APIGroup.Merge(m, src) -} -func (m *APIGroup) XXX_Size() int { - return m.Size() -} -func (m *APIGroup) XXX_DiscardUnknown() { - xxx_messageInfo_APIGroup.DiscardUnknown(m) -} - -var xxx_messageInfo_APIGroup proto.InternalMessageInfo - -func (m *APIGroupList) Reset() { *m = APIGroupList{} } -func (*APIGroupList) ProtoMessage() {} -func (*APIGroupList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{1} -} -func (m *APIGroupList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *APIGroupList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *APIGroupList) XXX_Merge(src proto.Message) { - xxx_messageInfo_APIGroupList.Merge(m, src) -} -func (m *APIGroupList) XXX_Size() int { - return m.Size() -} -func (m *APIGroupList) XXX_DiscardUnknown() { - xxx_messageInfo_APIGroupList.DiscardUnknown(m) -} - -var xxx_messageInfo_APIGroupList proto.InternalMessageInfo - -func (m *APIResource) Reset() { *m = APIResource{} } -func (*APIResource) ProtoMessage() {} -func (*APIResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{2} -} -func (m *APIResource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *APIResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *APIResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_APIResource.Merge(m, src) -} -func (m *APIResource) XXX_Size() int { - return m.Size() -} -func (m *APIResource) XXX_DiscardUnknown() { - xxx_messageInfo_APIResource.DiscardUnknown(m) -} - -var xxx_messageInfo_APIResource proto.InternalMessageInfo - -func (m *APIResourceList) Reset() { *m = APIResourceList{} } -func (*APIResourceList) ProtoMessage() {} -func (*APIResourceList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{3} -} -func (m *APIResourceList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *APIResourceList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *APIResourceList) XXX_Merge(src proto.Message) { - xxx_messageInfo_APIResourceList.Merge(m, src) -} -func (m *APIResourceList) XXX_Size() int { - return m.Size() -} -func (m *APIResourceList) XXX_DiscardUnknown() { - xxx_messageInfo_APIResourceList.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_APIResourceList proto.InternalMessageInfo +func (m *APIGroup) Reset() { *m = APIGroup{} } +func (*APIGroup) ProtoMessage() {} +func (*APIGroup) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -func (m *APIVersions) Reset() { *m = APIVersions{} } -func (*APIVersions) ProtoMessage() {} -func (*APIVersions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{4} -} -func (m *APIVersions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *APIVersions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *APIVersions) XXX_Merge(src proto.Message) { - xxx_messageInfo_APIVersions.Merge(m, src) -} -func (m *APIVersions) XXX_Size() int { - return m.Size() -} -func (m *APIVersions) XXX_DiscardUnknown() { - xxx_messageInfo_APIVersions.DiscardUnknown(m) -} - -var xxx_messageInfo_APIVersions proto.InternalMessageInfo - -func (m *CreateOptions) Reset() { *m = CreateOptions{} } -func (*CreateOptions) ProtoMessage() {} -func (*CreateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{5} -} -func (m *CreateOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *CreateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *CreateOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_CreateOptions.Merge(m, src) -} -func (m *CreateOptions) XXX_Size() int { - return m.Size() -} -func (m *CreateOptions) XXX_DiscardUnknown() { - xxx_messageInfo_CreateOptions.DiscardUnknown(m) -} +func (m *APIGroupList) Reset() { *m = APIGroupList{} } +func (*APIGroupList) ProtoMessage() {} +func (*APIGroupList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -var xxx_messageInfo_CreateOptions proto.InternalMessageInfo +func (m *APIResource) Reset() { *m = APIResource{} } +func (*APIResource) ProtoMessage() {} +func (*APIResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } -func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } -func (*DeleteOptions) ProtoMessage() {} -func (*DeleteOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{6} -} -func (m *DeleteOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *DeleteOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *DeleteOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_DeleteOptions.Merge(m, src) -} -func (m *DeleteOptions) XXX_Size() int { - return m.Size() -} -func (m *DeleteOptions) XXX_DiscardUnknown() { - xxx_messageInfo_DeleteOptions.DiscardUnknown(m) -} +func (m *APIResourceList) Reset() { *m = APIResourceList{} } +func (*APIResourceList) ProtoMessage() {} +func (*APIResourceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } -var xxx_messageInfo_DeleteOptions proto.InternalMessageInfo +func (m *APIVersions) Reset() { *m = APIVersions{} } +func (*APIVersions) ProtoMessage() {} +func (*APIVersions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } -func (m *Duration) Reset() { *m = Duration{} } -func (*Duration) ProtoMessage() {} -func (*Duration) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{7} -} -func (m *Duration) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Duration) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Duration) XXX_Merge(src proto.Message) { - xxx_messageInfo_Duration.Merge(m, src) -} -func (m *Duration) XXX_Size() int { - return m.Size() -} -func (m *Duration) XXX_DiscardUnknown() { - xxx_messageInfo_Duration.DiscardUnknown(m) -} +func (m *CreateOptions) Reset() { *m = CreateOptions{} } +func (*CreateOptions) ProtoMessage() {} +func (*CreateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } -var xxx_messageInfo_Duration proto.InternalMessageInfo +func (m *DeleteOptions) Reset() { *m = DeleteOptions{} } +func (*DeleteOptions) ProtoMessage() {} +func (*DeleteOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } -func (m *ExportOptions) Reset() { *m = ExportOptions{} } -func (*ExportOptions) ProtoMessage() {} -func (*ExportOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{8} -} -func (m *ExportOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ExportOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ExportOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ExportOptions.Merge(m, src) -} -func (m *ExportOptions) XXX_Size() int { - return m.Size() -} -func (m *ExportOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ExportOptions.DiscardUnknown(m) -} +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } -var xxx_messageInfo_ExportOptions proto.InternalMessageInfo +func (m *ExportOptions) Reset() { *m = ExportOptions{} } +func (*ExportOptions) ProtoMessage() {} +func (*ExportOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } -func (m *FieldsV1) Reset() { *m = FieldsV1{} } -func (*FieldsV1) ProtoMessage() {} -func (*FieldsV1) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{9} -} -func (m *FieldsV1) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *FieldsV1) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *FieldsV1) XXX_Merge(src proto.Message) { - xxx_messageInfo_FieldsV1.Merge(m, src) -} -func (m *FieldsV1) XXX_Size() int { - return m.Size() -} -func (m *FieldsV1) XXX_DiscardUnknown() { - xxx_messageInfo_FieldsV1.DiscardUnknown(m) -} +func (m *GetOptions) Reset() { *m = GetOptions{} } +func (*GetOptions) ProtoMessage() {} +func (*GetOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } -var xxx_messageInfo_FieldsV1 proto.InternalMessageInfo +func (m *GroupKind) Reset() { *m = GroupKind{} } +func (*GroupKind) ProtoMessage() {} +func (*GroupKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } -func (m *GetOptions) Reset() { *m = GetOptions{} } -func (*GetOptions) ProtoMessage() {} -func (*GetOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{10} -} -func (m *GetOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GetOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GetOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_GetOptions.Merge(m, src) -} -func (m *GetOptions) XXX_Size() int { - return m.Size() -} -func (m *GetOptions) XXX_DiscardUnknown() { - xxx_messageInfo_GetOptions.DiscardUnknown(m) -} +func (m *GroupResource) Reset() { *m = GroupResource{} } +func (*GroupResource) ProtoMessage() {} +func (*GroupResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } -var xxx_messageInfo_GetOptions proto.InternalMessageInfo - -func (m *GroupKind) Reset() { *m = GroupKind{} } -func (*GroupKind) ProtoMessage() {} -func (*GroupKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{11} -} -func (m *GroupKind) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GroupKind) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupKind.Merge(m, src) -} -func (m *GroupKind) XXX_Size() int { - return m.Size() -} -func (m *GroupKind) XXX_DiscardUnknown() { - xxx_messageInfo_GroupKind.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupKind proto.InternalMessageInfo - -func (m *GroupResource) Reset() { *m = GroupResource{} } -func (*GroupResource) ProtoMessage() {} -func (*GroupResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{12} -} -func (m *GroupResource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GroupResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupResource.Merge(m, src) -} -func (m *GroupResource) XXX_Size() int { - return m.Size() -} -func (m *GroupResource) XXX_DiscardUnknown() { - xxx_messageInfo_GroupResource.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupResource proto.InternalMessageInfo - -func (m *GroupVersion) Reset() { *m = GroupVersion{} } -func (*GroupVersion) ProtoMessage() {} -func (*GroupVersion) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{13} -} -func (m *GroupVersion) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupVersion) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GroupVersion) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupVersion.Merge(m, src) -} -func (m *GroupVersion) XXX_Size() int { - return m.Size() -} -func (m *GroupVersion) XXX_DiscardUnknown() { - xxx_messageInfo_GroupVersion.DiscardUnknown(m) -} - -var xxx_messageInfo_GroupVersion proto.InternalMessageInfo +func (m *GroupVersion) Reset() { *m = GroupVersion{} } +func (*GroupVersion) ProtoMessage() {} +func (*GroupVersion) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } func (m *GroupVersionForDiscovery) Reset() { *m = GroupVersionForDiscovery{} } func (*GroupVersionForDiscovery) ProtoMessage() {} func (*GroupVersionForDiscovery) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{14} -} -func (m *GroupVersionForDiscovery) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupVersionForDiscovery) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GroupVersionForDiscovery) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupVersionForDiscovery.Merge(m, src) -} -func (m *GroupVersionForDiscovery) XXX_Size() int { - return m.Size() -} -func (m *GroupVersionForDiscovery) XXX_DiscardUnknown() { - xxx_messageInfo_GroupVersionForDiscovery.DiscardUnknown(m) + return fileDescriptorGenerated, []int{13} } -var xxx_messageInfo_GroupVersionForDiscovery proto.InternalMessageInfo +func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } +func (*GroupVersionKind) ProtoMessage() {} +func (*GroupVersionKind) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } -func (m *GroupVersionKind) Reset() { *m = GroupVersionKind{} } -func (*GroupVersionKind) ProtoMessage() {} -func (*GroupVersionKind) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{15} -} -func (m *GroupVersionKind) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupVersionKind) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GroupVersionKind) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupVersionKind.Merge(m, src) -} -func (m *GroupVersionKind) XXX_Size() int { - return m.Size() -} -func (m *GroupVersionKind) XXX_DiscardUnknown() { - xxx_messageInfo_GroupVersionKind.DiscardUnknown(m) -} +func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } +func (*GroupVersionResource) ProtoMessage() {} +func (*GroupVersionResource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } -var xxx_messageInfo_GroupVersionKind proto.InternalMessageInfo +func (m *Initializer) Reset() { *m = Initializer{} } +func (*Initializer) ProtoMessage() {} +func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } -func (m *GroupVersionResource) Reset() { *m = GroupVersionResource{} } -func (*GroupVersionResource) ProtoMessage() {} -func (*GroupVersionResource) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{16} -} -func (m *GroupVersionResource) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *GroupVersionResource) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *GroupVersionResource) XXX_Merge(src proto.Message) { - xxx_messageInfo_GroupVersionResource.Merge(m, src) -} -func (m *GroupVersionResource) XXX_Size() int { - return m.Size() -} -func (m *GroupVersionResource) XXX_DiscardUnknown() { - xxx_messageInfo_GroupVersionResource.DiscardUnknown(m) -} +func (m *Initializers) Reset() { *m = Initializers{} } +func (*Initializers) ProtoMessage() {} +func (*Initializers) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } -var xxx_messageInfo_GroupVersionResource proto.InternalMessageInfo - -func (m *LabelSelector) Reset() { *m = LabelSelector{} } -func (*LabelSelector) ProtoMessage() {} -func (*LabelSelector) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{17} -} -func (m *LabelSelector) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelSelector) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LabelSelector) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelSelector.Merge(m, src) -} -func (m *LabelSelector) XXX_Size() int { - return m.Size() -} -func (m *LabelSelector) XXX_DiscardUnknown() { - xxx_messageInfo_LabelSelector.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelSelector proto.InternalMessageInfo +func (m *LabelSelector) Reset() { *m = LabelSelector{} } +func (*LabelSelector) ProtoMessage() {} +func (*LabelSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } func (m *LabelSelectorRequirement) Reset() { *m = LabelSelectorRequirement{} } func (*LabelSelectorRequirement) ProtoMessage() {} func (*LabelSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{18} -} -func (m *LabelSelectorRequirement) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *LabelSelectorRequirement) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *LabelSelectorRequirement) XXX_Merge(src proto.Message) { - xxx_messageInfo_LabelSelectorRequirement.Merge(m, src) -} -func (m *LabelSelectorRequirement) XXX_Size() int { - return m.Size() -} -func (m *LabelSelectorRequirement) XXX_DiscardUnknown() { - xxx_messageInfo_LabelSelectorRequirement.DiscardUnknown(m) -} - -var xxx_messageInfo_LabelSelectorRequirement proto.InternalMessageInfo - -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{19} -} -func (m *List) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *List) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *List) XXX_Merge(src proto.Message) { - xxx_messageInfo_List.Merge(m, src) -} -func (m *List) XXX_Size() int { - return m.Size() -} -func (m *List) XXX_DiscardUnknown() { - xxx_messageInfo_List.DiscardUnknown(m) -} - -var xxx_messageInfo_List proto.InternalMessageInfo - -func (m *ListMeta) Reset() { *m = ListMeta{} } -func (*ListMeta) ProtoMessage() {} -func (*ListMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{20} -} -func (m *ListMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ListMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListMeta.Merge(m, src) -} -func (m *ListMeta) XXX_Size() int { - return m.Size() -} -func (m *ListMeta) XXX_DiscardUnknown() { - xxx_messageInfo_ListMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_ListMeta proto.InternalMessageInfo - -func (m *ListOptions) Reset() { *m = ListOptions{} } -func (*ListOptions) ProtoMessage() {} -func (*ListOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{21} -} -func (m *ListOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ListOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ListOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_ListOptions.Merge(m, src) -} -func (m *ListOptions) XXX_Size() int { - return m.Size() -} -func (m *ListOptions) XXX_DiscardUnknown() { - xxx_messageInfo_ListOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_ListOptions proto.InternalMessageInfo - -func (m *ManagedFieldsEntry) Reset() { *m = ManagedFieldsEntry{} } -func (*ManagedFieldsEntry) ProtoMessage() {} -func (*ManagedFieldsEntry) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{22} -} -func (m *ManagedFieldsEntry) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ManagedFieldsEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ManagedFieldsEntry) XXX_Merge(src proto.Message) { - xxx_messageInfo_ManagedFieldsEntry.Merge(m, src) -} -func (m *ManagedFieldsEntry) XXX_Size() int { - return m.Size() -} -func (m *ManagedFieldsEntry) XXX_DiscardUnknown() { - xxx_messageInfo_ManagedFieldsEntry.DiscardUnknown(m) -} - -var xxx_messageInfo_ManagedFieldsEntry proto.InternalMessageInfo - -func (m *MicroTime) Reset() { *m = MicroTime{} } -func (*MicroTime) ProtoMessage() {} -func (*MicroTime) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{23} -} -func (m *MicroTime) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_MicroTime.Unmarshal(m, b) -} -func (m *MicroTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_MicroTime.Marshal(b, m, deterministic) -} -func (m *MicroTime) XXX_Merge(src proto.Message) { - xxx_messageInfo_MicroTime.Merge(m, src) -} -func (m *MicroTime) XXX_Size() int { - return xxx_messageInfo_MicroTime.Size(m) -} -func (m *MicroTime) XXX_DiscardUnknown() { - xxx_messageInfo_MicroTime.DiscardUnknown(m) -} - -var xxx_messageInfo_MicroTime proto.InternalMessageInfo - -func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } -func (*ObjectMeta) ProtoMessage() {} -func (*ObjectMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{24} -} -func (m *ObjectMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ObjectMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ObjectMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_ObjectMeta.Merge(m, src) -} -func (m *ObjectMeta) XXX_Size() int { - return m.Size() -} -func (m *ObjectMeta) XXX_DiscardUnknown() { - xxx_messageInfo_ObjectMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_ObjectMeta proto.InternalMessageInfo - -func (m *OwnerReference) Reset() { *m = OwnerReference{} } -func (*OwnerReference) ProtoMessage() {} -func (*OwnerReference) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{25} -} -func (m *OwnerReference) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *OwnerReference) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *OwnerReference) XXX_Merge(src proto.Message) { - xxx_messageInfo_OwnerReference.Merge(m, src) -} -func (m *OwnerReference) XXX_Size() int { - return m.Size() -} -func (m *OwnerReference) XXX_DiscardUnknown() { - xxx_messageInfo_OwnerReference.DiscardUnknown(m) -} - -var xxx_messageInfo_OwnerReference proto.InternalMessageInfo - -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{26} -} -func (m *PartialObjectMetadata) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PartialObjectMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PartialObjectMetadata) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartialObjectMetadata.Merge(m, src) -} -func (m *PartialObjectMetadata) XXX_Size() int { - return m.Size() -} -func (m *PartialObjectMetadata) XXX_DiscardUnknown() { - xxx_messageInfo_PartialObjectMetadata.DiscardUnknown(m) -} - -var xxx_messageInfo_PartialObjectMetadata proto.InternalMessageInfo - -func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } -func (*PartialObjectMetadataList) ProtoMessage() {} -func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{27} -} -func (m *PartialObjectMetadataList) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PartialObjectMetadataList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PartialObjectMetadataList) XXX_Merge(src proto.Message) { - xxx_messageInfo_PartialObjectMetadataList.Merge(m, src) -} -func (m *PartialObjectMetadataList) XXX_Size() int { - return m.Size() -} -func (m *PartialObjectMetadataList) XXX_DiscardUnknown() { - xxx_messageInfo_PartialObjectMetadataList.DiscardUnknown(m) + return fileDescriptorGenerated, []int{19} } -var xxx_messageInfo_PartialObjectMetadataList proto.InternalMessageInfo +func (m *List) Reset() { *m = List{} } +func (*List) ProtoMessage() {} +func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } -func (m *Patch) Reset() { *m = Patch{} } -func (*Patch) ProtoMessage() {} -func (*Patch) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{28} -} -func (m *Patch) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Patch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Patch) XXX_Merge(src proto.Message) { - xxx_messageInfo_Patch.Merge(m, src) -} -func (m *Patch) XXX_Size() int { - return m.Size() -} -func (m *Patch) XXX_DiscardUnknown() { - xxx_messageInfo_Patch.DiscardUnknown(m) -} +func (m *ListMeta) Reset() { *m = ListMeta{} } +func (*ListMeta) ProtoMessage() {} +func (*ListMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } -var xxx_messageInfo_Patch proto.InternalMessageInfo +func (m *ListOptions) Reset() { *m = ListOptions{} } +func (*ListOptions) ProtoMessage() {} +func (*ListOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } -func (m *PatchOptions) Reset() { *m = PatchOptions{} } -func (*PatchOptions) ProtoMessage() {} -func (*PatchOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{29} -} -func (m *PatchOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *PatchOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *PatchOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_PatchOptions.Merge(m, src) -} -func (m *PatchOptions) XXX_Size() int { - return m.Size() -} -func (m *PatchOptions) XXX_DiscardUnknown() { - xxx_messageInfo_PatchOptions.DiscardUnknown(m) -} +func (m *MicroTime) Reset() { *m = MicroTime{} } +func (*MicroTime) ProtoMessage() {} +func (*MicroTime) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } -var xxx_messageInfo_PatchOptions proto.InternalMessageInfo +func (m *ObjectMeta) Reset() { *m = ObjectMeta{} } +func (*ObjectMeta) ProtoMessage() {} +func (*ObjectMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{30} -} -func (m *Preconditions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Preconditions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Preconditions) XXX_Merge(src proto.Message) { - xxx_messageInfo_Preconditions.Merge(m, src) -} -func (m *Preconditions) XXX_Size() int { - return m.Size() -} -func (m *Preconditions) XXX_DiscardUnknown() { - xxx_messageInfo_Preconditions.DiscardUnknown(m) -} +func (m *OwnerReference) Reset() { *m = OwnerReference{} } +func (*OwnerReference) ProtoMessage() {} +func (*OwnerReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } -var xxx_messageInfo_Preconditions proto.InternalMessageInfo +func (m *Patch) Reset() { *m = Patch{} } +func (*Patch) ProtoMessage() {} +func (*Patch) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } -func (m *RootPaths) Reset() { *m = RootPaths{} } -func (*RootPaths) ProtoMessage() {} -func (*RootPaths) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{31} -} -func (m *RootPaths) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RootPaths) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RootPaths) XXX_Merge(src proto.Message) { - xxx_messageInfo_RootPaths.Merge(m, src) -} -func (m *RootPaths) XXX_Size() int { - return m.Size() -} -func (m *RootPaths) XXX_DiscardUnknown() { - xxx_messageInfo_RootPaths.DiscardUnknown(m) -} +func (m *Preconditions) Reset() { *m = Preconditions{} } +func (*Preconditions) ProtoMessage() {} +func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } -var xxx_messageInfo_RootPaths proto.InternalMessageInfo +func (m *RootPaths) Reset() { *m = RootPaths{} } +func (*RootPaths) ProtoMessage() {} +func (*RootPaths) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } func (m *ServerAddressByClientCIDR) Reset() { *m = ServerAddressByClientCIDR{} } func (*ServerAddressByClientCIDR) ProtoMessage() {} func (*ServerAddressByClientCIDR) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{32} -} -func (m *ServerAddressByClientCIDR) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *ServerAddressByClientCIDR) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *ServerAddressByClientCIDR) XXX_Merge(src proto.Message) { - xxx_messageInfo_ServerAddressByClientCIDR.Merge(m, src) -} -func (m *ServerAddressByClientCIDR) XXX_Size() int { - return m.Size() -} -func (m *ServerAddressByClientCIDR) XXX_DiscardUnknown() { - xxx_messageInfo_ServerAddressByClientCIDR.DiscardUnknown(m) + return fileDescriptorGenerated, []int{29} } -var xxx_messageInfo_ServerAddressByClientCIDR proto.InternalMessageInfo +func (m *Status) Reset() { *m = Status{} } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } -func (m *Status) Reset() { *m = Status{} } -func (*Status) ProtoMessage() {} -func (*Status) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{33} -} -func (m *Status) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Status) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Status) XXX_Merge(src proto.Message) { - xxx_messageInfo_Status.Merge(m, src) -} -func (m *Status) XXX_Size() int { - return m.Size() -} -func (m *Status) XXX_DiscardUnknown() { - xxx_messageInfo_Status.DiscardUnknown(m) -} +func (m *StatusCause) Reset() { *m = StatusCause{} } +func (*StatusCause) ProtoMessage() {} +func (*StatusCause) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } -var xxx_messageInfo_Status proto.InternalMessageInfo +func (m *StatusDetails) Reset() { *m = StatusDetails{} } +func (*StatusDetails) ProtoMessage() {} +func (*StatusDetails) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } -func (m *StatusCause) Reset() { *m = StatusCause{} } -func (*StatusCause) ProtoMessage() {} -func (*StatusCause) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{34} -} -func (m *StatusCause) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusCause) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatusCause) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusCause.Merge(m, src) -} -func (m *StatusCause) XXX_Size() int { - return m.Size() -} -func (m *StatusCause) XXX_DiscardUnknown() { - xxx_messageInfo_StatusCause.DiscardUnknown(m) -} +func (m *Time) Reset() { *m = Time{} } +func (*Time) ProtoMessage() {} +func (*Time) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } -var xxx_messageInfo_StatusCause proto.InternalMessageInfo +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } -func (m *StatusDetails) Reset() { *m = StatusDetails{} } -func (*StatusDetails) ProtoMessage() {} -func (*StatusDetails) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{35} -} -func (m *StatusDetails) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *StatusDetails) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *StatusDetails) XXX_Merge(src proto.Message) { - xxx_messageInfo_StatusDetails.Merge(m, src) -} -func (m *StatusDetails) XXX_Size() int { - return m.Size() -} -func (m *StatusDetails) XXX_DiscardUnknown() { - xxx_messageInfo_StatusDetails.DiscardUnknown(m) -} +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } -var xxx_messageInfo_StatusDetails proto.InternalMessageInfo +func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } +func (*UpdateOptions) ProtoMessage() {} +func (*UpdateOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{36} -} -func (m *TableOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TableOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TableOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_TableOptions.Merge(m, src) -} -func (m *TableOptions) XXX_Size() int { - return m.Size() -} -func (m *TableOptions) XXX_DiscardUnknown() { - xxx_messageInfo_TableOptions.DiscardUnknown(m) -} - -var xxx_messageInfo_TableOptions proto.InternalMessageInfo - -func (m *Time) Reset() { *m = Time{} } -func (*Time) ProtoMessage() {} -func (*Time) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{37} -} -func (m *Time) XXX_Unmarshal(b []byte) error { - return xxx_messageInfo_Time.Unmarshal(m, b) -} -func (m *Time) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - return xxx_messageInfo_Time.Marshal(b, m, deterministic) -} -func (m *Time) XXX_Merge(src proto.Message) { - xxx_messageInfo_Time.Merge(m, src) -} -func (m *Time) XXX_Size() int { - return xxx_messageInfo_Time.Size(m) -} -func (m *Time) XXX_DiscardUnknown() { - xxx_messageInfo_Time.DiscardUnknown(m) -} - -var xxx_messageInfo_Time proto.InternalMessageInfo - -func (m *Timestamp) Reset() { *m = Timestamp{} } -func (*Timestamp) ProtoMessage() {} -func (*Timestamp) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{38} -} -func (m *Timestamp) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Timestamp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Timestamp) XXX_Merge(src proto.Message) { - xxx_messageInfo_Timestamp.Merge(m, src) -} -func (m *Timestamp) XXX_Size() int { - return m.Size() -} -func (m *Timestamp) XXX_DiscardUnknown() { - xxx_messageInfo_Timestamp.DiscardUnknown(m) -} - -var xxx_messageInfo_Timestamp proto.InternalMessageInfo - -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{39} -} -func (m *TypeMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TypeMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_TypeMeta.Merge(m, src) -} -func (m *TypeMeta) XXX_Size() int { - return m.Size() -} -func (m *TypeMeta) XXX_DiscardUnknown() { - xxx_messageInfo_TypeMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_TypeMeta proto.InternalMessageInfo - -func (m *UpdateOptions) Reset() { *m = UpdateOptions{} } -func (*UpdateOptions) ProtoMessage() {} -func (*UpdateOptions) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{40} -} -func (m *UpdateOptions) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *UpdateOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *UpdateOptions) XXX_Merge(src proto.Message) { - xxx_messageInfo_UpdateOptions.Merge(m, src) -} -func (m *UpdateOptions) XXX_Size() int { - return m.Size() -} -func (m *UpdateOptions) XXX_DiscardUnknown() { - xxx_messageInfo_UpdateOptions.DiscardUnknown(m) -} +func (m *Verbs) Reset() { *m = Verbs{} } +func (*Verbs) ProtoMessage() {} +func (*Verbs) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } -var xxx_messageInfo_UpdateOptions proto.InternalMessageInfo - -func (m *Verbs) Reset() { *m = Verbs{} } -func (*Verbs) ProtoMessage() {} -func (*Verbs) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{41} -} -func (m *Verbs) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Verbs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Verbs) XXX_Merge(src proto.Message) { - xxx_messageInfo_Verbs.Merge(m, src) -} -func (m *Verbs) XXX_Size() int { - return m.Size() -} -func (m *Verbs) XXX_DiscardUnknown() { - xxx_messageInfo_Verbs.DiscardUnknown(m) -} - -var xxx_messageInfo_Verbs proto.InternalMessageInfo - -func (m *WatchEvent) Reset() { *m = WatchEvent{} } -func (*WatchEvent) ProtoMessage() {} -func (*WatchEvent) Descriptor() ([]byte, []int) { - return fileDescriptor_cf52fa777ced5367, []int{42} -} -func (m *WatchEvent) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *WatchEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *WatchEvent) XXX_Merge(src proto.Message) { - xxx_messageInfo_WatchEvent.Merge(m, src) -} -func (m *WatchEvent) XXX_Size() int { - return m.Size() -} -func (m *WatchEvent) XXX_DiscardUnknown() { - xxx_messageInfo_WatchEvent.DiscardUnknown(m) -} - -var xxx_messageInfo_WatchEvent proto.InternalMessageInfo +func (m *WatchEvent) Reset() { *m = WatchEvent{} } +func (*WatchEvent) ProtoMessage() {} +func (*WatchEvent) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } func init() { proto.RegisterType((*APIGroup)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.APIGroup") @@ -1253,7 +267,6 @@ func init() { proto.RegisterType((*DeleteOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions") proto.RegisterType((*Duration)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Duration") proto.RegisterType((*ExportOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ExportOptions") - proto.RegisterType((*FieldsV1)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.FieldsV1") proto.RegisterType((*GetOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GetOptions") proto.RegisterType((*GroupKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupKind") proto.RegisterType((*GroupResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupResource") @@ -1261,29 +274,23 @@ func init() { proto.RegisterType((*GroupVersionForDiscovery)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionForDiscovery") proto.RegisterType((*GroupVersionKind)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionKind") proto.RegisterType((*GroupVersionResource)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.GroupVersionResource") + proto.RegisterType((*Initializer)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializer") + proto.RegisterType((*Initializers)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Initializers") proto.RegisterType((*LabelSelector)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector.MatchLabelsEntry") proto.RegisterType((*LabelSelectorRequirement)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelectorRequirement") proto.RegisterType((*List)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.List") proto.RegisterType((*ListMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta") proto.RegisterType((*ListOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ListOptions") - proto.RegisterType((*ManagedFieldsEntry)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry") proto.RegisterType((*MicroTime)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime") proto.RegisterType((*ObjectMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.AnnotationsEntry") - proto.RegisterMapType((map[string]string)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta.LabelsEntry") proto.RegisterType((*OwnerReference)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference") - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadata") - proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PartialObjectMetadataList") proto.RegisterType((*Patch)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Patch") - proto.RegisterType((*PatchOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.PatchOptions") proto.RegisterType((*Preconditions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Preconditions") proto.RegisterType((*RootPaths)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.RootPaths") proto.RegisterType((*ServerAddressByClientCIDR)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.ServerAddressByClientCIDR") proto.RegisterType((*Status)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Status") proto.RegisterType((*StatusCause)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusCause") proto.RegisterType((*StatusDetails)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.StatusDetails") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TableOptions") proto.RegisterType((*Time)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Time") proto.RegisterType((*Timestamp)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Timestamp") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.TypeMeta") @@ -1291,189 +298,10 @@ func init() { proto.RegisterType((*Verbs)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.Verbs") proto.RegisterType((*WatchEvent)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1.WatchEvent") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptor_cf52fa777ced5367) -} - -var fileDescriptor_cf52fa777ced5367 = []byte{ - // 2713 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x19, 0xcd, 0x6f, 0x1b, 0x59, - 0x3d, 0x63, 0xc7, 0x8e, 0xfd, 0x73, 0x9c, 0x8f, 0x97, 0x16, 0xdc, 0x00, 0x71, 0x76, 0x16, 0xad, - 0x52, 0xe8, 0x3a, 0x9b, 0x02, 0xab, 0xd2, 0x65, 0x0b, 0x71, 0x9c, 0x74, 0xc3, 0x36, 0x4d, 0xf4, - 0xd2, 0x16, 0x28, 0x15, 0xea, 0x64, 0xe6, 0xc5, 0x19, 0x32, 0x9e, 0xf1, 0xbe, 0x19, 0x27, 0x35, - 0x1c, 0xd8, 0x03, 0x08, 0x90, 0x60, 0xd5, 0x23, 0xe2, 0x80, 0xb6, 0x82, 0xbf, 0x80, 0x13, 0x7f, - 0x00, 0x12, 0xbd, 0x20, 0xad, 0xc4, 0x65, 0x25, 0x90, 0xb5, 0x0d, 0x07, 0x8e, 0x88, 0x6b, 0x4e, - 0xe8, 0x7d, 0xcd, 0x87, 0x1d, 0x37, 0x63, 0xba, 0xac, 0xf6, 0xe6, 0xf9, 0x7d, 0xff, 0xde, 0xfb, - 0xbd, 0xdf, 0x97, 0x61, 0xeb, 0xf0, 0x9a, 0x5f, 0xb3, 0xbd, 0xe5, 0xc3, 0xce, 0x1e, 0xa1, 0x2e, - 0x09, 0x88, 0xbf, 0x7c, 0x44, 0x5c, 0xcb, 0xa3, 0xcb, 0x12, 0x61, 0xb4, 0xed, 0x96, 0x61, 0x1e, - 0xd8, 0x2e, 0xa1, 0xdd, 0xe5, 0xf6, 0x61, 0x93, 0x01, 0xfc, 0xe5, 0x16, 0x09, 0x8c, 0xe5, 0xa3, - 0x95, 0xe5, 0x26, 0x71, 0x09, 0x35, 0x02, 0x62, 0xd5, 0xda, 0xd4, 0x0b, 0x3c, 0xf4, 0x45, 0xc1, - 0x55, 0x8b, 0x73, 0xd5, 0xda, 0x87, 0x4d, 0x06, 0xf0, 0x6b, 0x8c, 0xab, 0x76, 0xb4, 0x32, 0xff, - 0x6a, 0xd3, 0x0e, 0x0e, 0x3a, 0x7b, 0x35, 0xd3, 0x6b, 0x2d, 0x37, 0xbd, 0xa6, 0xb7, 0xcc, 0x99, - 0xf7, 0x3a, 0xfb, 0xfc, 0x8b, 0x7f, 0xf0, 0x5f, 0x42, 0xe8, 0xfc, 0x50, 0x53, 0x68, 0xc7, 0x0d, - 0xec, 0x16, 0xe9, 0xb7, 0x62, 0xfe, 0xf5, 0xf3, 0x18, 0x7c, 0xf3, 0x80, 0xb4, 0x8c, 0x7e, 0x3e, - 0xfd, 0x2f, 0x59, 0x28, 0xac, 0xee, 0x6c, 0xde, 0xa4, 0x5e, 0xa7, 0x8d, 0x16, 0x61, 0xdc, 0x35, - 0x5a, 0xa4, 0xa2, 0x2d, 0x6a, 0x4b, 0xc5, 0xfa, 0xe4, 0xd3, 0x5e, 0x75, 0xec, 0xa4, 0x57, 0x1d, - 0xbf, 0x6d, 0xb4, 0x08, 0xe6, 0x18, 0xe4, 0x40, 0xe1, 0x88, 0x50, 0xdf, 0xf6, 0x5c, 0xbf, 0x92, - 0x59, 0xcc, 0x2e, 0x95, 0xae, 0xde, 0xa8, 0xa5, 0xf1, 0xbf, 0xc6, 0x15, 0xdc, 0x13, 0xac, 0x1b, - 0x1e, 0x6d, 0xd8, 0xbe, 0xe9, 0x1d, 0x11, 0xda, 0xad, 0xcf, 0x48, 0x2d, 0x05, 0x89, 0xf4, 0x71, - 0xa8, 0x01, 0xfd, 0x54, 0x83, 0x99, 0x36, 0x25, 0xfb, 0x84, 0x52, 0x62, 0x49, 0x7c, 0x25, 0xbb, - 0xa8, 0x7d, 0x0c, 0x6a, 0x2b, 0x52, 0xed, 0xcc, 0x4e, 0x9f, 0x7c, 0x3c, 0xa0, 0x11, 0xfd, 0x5e, - 0x83, 0x79, 0x9f, 0xd0, 0x23, 0x42, 0x57, 0x2d, 0x8b, 0x12, 0xdf, 0xaf, 0x77, 0xd7, 0x1c, 0x9b, - 0xb8, 0xc1, 0xda, 0x66, 0x03, 0xfb, 0x95, 0x71, 0x7e, 0x0e, 0xdf, 0x4c, 0x67, 0xd0, 0xee, 0x30, - 0x39, 0x75, 0x5d, 0x5a, 0x34, 0x3f, 0x94, 0xc4, 0xc7, 0xcf, 0x31, 0x43, 0xdf, 0x87, 0x49, 0x75, - 0x91, 0xb7, 0x6c, 0x3f, 0x40, 0xf7, 0x20, 0xdf, 0x64, 0x1f, 0x7e, 0x45, 0xe3, 0x06, 0xd6, 0xd2, - 0x19, 0xa8, 0x64, 0xd4, 0xa7, 0xa4, 0x3d, 0x79, 0xfe, 0xe9, 0x63, 0x29, 0x4d, 0xff, 0xe5, 0x38, - 0x94, 0x56, 0x77, 0x36, 0x31, 0xf1, 0xbd, 0x0e, 0x35, 0x49, 0x8a, 0xa0, 0xb9, 0x06, 0x93, 0xbe, - 0xed, 0x36, 0x3b, 0x8e, 0x41, 0x19, 0xb4, 0x92, 0xe7, 0x94, 0x17, 0x24, 0xe5, 0xe4, 0x6e, 0x0c, - 0x87, 0x13, 0x94, 0xe8, 0x2a, 0x00, 0x93, 0xe0, 0xb7, 0x0d, 0x93, 0x58, 0x95, 0xcc, 0xa2, 0xb6, - 0x54, 0xa8, 0x23, 0xc9, 0x07, 0xb7, 0x43, 0x0c, 0x8e, 0x51, 0xa1, 0x97, 0x21, 0xc7, 0x2d, 0xad, - 0x14, 0xb8, 0x9a, 0xb2, 0x24, 0xcf, 0x71, 0x37, 0xb0, 0xc0, 0xa1, 0xcb, 0x30, 0x21, 0xa3, 0xac, - 0x52, 0xe4, 0x64, 0xd3, 0x92, 0x6c, 0x42, 0x85, 0x81, 0xc2, 0x33, 0xff, 0x0e, 0x6d, 0xd7, 0xe2, - 0x71, 0x17, 0xf3, 0xef, 0x6d, 0xdb, 0xb5, 0x30, 0xc7, 0xa0, 0x5b, 0x90, 0x3b, 0x22, 0x74, 0x8f, - 0x45, 0x02, 0x0b, 0xcd, 0x2f, 0xa7, 0x3b, 0xe8, 0x7b, 0x8c, 0xa5, 0x5e, 0x64, 0xa6, 0xf1, 0x9f, - 0x58, 0x08, 0x41, 0x35, 0x00, 0xff, 0xc0, 0xa3, 0x01, 0x77, 0xaf, 0x92, 0x5b, 0xcc, 0x2e, 0x15, - 0xeb, 0x53, 0xcc, 0xdf, 0xdd, 0x10, 0x8a, 0x63, 0x14, 0x8c, 0xde, 0x34, 0x02, 0xd2, 0xf4, 0xa8, - 0x4d, 0xfc, 0xca, 0x44, 0x44, 0xbf, 0x16, 0x42, 0x71, 0x8c, 0x02, 0x7d, 0x1b, 0x90, 0x1f, 0x78, - 0xd4, 0x68, 0x12, 0xe9, 0xea, 0x5b, 0x86, 0x7f, 0x50, 0x01, 0xee, 0xdd, 0xbc, 0xf4, 0x0e, 0xed, - 0x0e, 0x50, 0xe0, 0x33, 0xb8, 0xf4, 0x3f, 0x6a, 0x30, 0x1d, 0x8b, 0x05, 0x1e, 0x77, 0xd7, 0x60, - 0xb2, 0x19, 0x7b, 0x75, 0x32, 0x2e, 0xc2, 0xdb, 0x8e, 0xbf, 0x48, 0x9c, 0xa0, 0x44, 0x04, 0x8a, - 0x54, 0x4a, 0x52, 0xd9, 0x65, 0x25, 0x75, 0xd0, 0x2a, 0x1b, 0x22, 0x4d, 0x31, 0xa0, 0x8f, 0x23, - 0xc9, 0xfa, 0xbf, 0x34, 0x1e, 0xc0, 0x2a, 0xdf, 0xa0, 0xa5, 0x58, 0x4e, 0xd3, 0xf8, 0xf1, 0x4d, - 0x0e, 0xc9, 0x47, 0xe7, 0x24, 0x82, 0xcc, 0xa7, 0x22, 0x11, 0x5c, 0x2f, 0xfc, 0xe6, 0xfd, 0xea, - 0xd8, 0xbb, 0xff, 0x58, 0x1c, 0xd3, 0x5b, 0x50, 0x5e, 0xa3, 0xc4, 0x08, 0xc8, 0x76, 0x3b, 0xe0, - 0x0e, 0xe8, 0x90, 0xb7, 0x68, 0x17, 0x77, 0x5c, 0xe9, 0x28, 0xb0, 0xf7, 0xdd, 0xe0, 0x10, 0x2c, - 0x31, 0xec, 0xfe, 0xf6, 0x6d, 0xe2, 0x58, 0x5b, 0x86, 0x6b, 0x34, 0x09, 0x95, 0x71, 0x1f, 0x9e, - 0xea, 0x46, 0x0c, 0x87, 0x13, 0x94, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, 0x10, 0x87, 0x44, 0xfa, 0x36, - 0x00, 0x35, 0xa9, 0x61, 0x92, 0x1d, 0x42, 0x6d, 0xcf, 0xda, 0x25, 0xa6, 0xe7, 0x5a, 0x3e, 0x8f, - 0x88, 0x6c, 0xfd, 0x33, 0x2c, 0xce, 0x6e, 0x0e, 0x60, 0xf1, 0x19, 0x1c, 0xc8, 0x81, 0x72, 0x9b, - 0xf2, 0xdf, 0x76, 0x20, 0x6b, 0x0f, 0x7b, 0x69, 0x5f, 0x49, 0x77, 0xd4, 0x3b, 0x71, 0xd6, 0xfa, - 0xec, 0x49, 0xaf, 0x5a, 0x4e, 0x80, 0x70, 0x52, 0x38, 0xfa, 0x16, 0xcc, 0x78, 0xb4, 0x7d, 0x60, - 0xb8, 0x0d, 0xd2, 0x26, 0xae, 0x45, 0xdc, 0xc0, 0xe7, 0xa7, 0x50, 0xa8, 0x5f, 0x60, 0x15, 0x63, - 0xbb, 0x0f, 0x87, 0x07, 0xa8, 0xd1, 0x7d, 0x98, 0x6d, 0x53, 0xaf, 0x6d, 0x34, 0x0d, 0x26, 0x71, - 0xc7, 0x73, 0x6c, 0xb3, 0xcb, 0xb3, 0x43, 0xb1, 0x7e, 0xe5, 0xa4, 0x57, 0x9d, 0xdd, 0xe9, 0x47, - 0x9e, 0xf6, 0xaa, 0x73, 0xfc, 0xe8, 0x18, 0x24, 0x42, 0xe2, 0x41, 0x31, 0xb1, 0x3b, 0xcc, 0x0d, - 0xbb, 0x43, 0x7d, 0x13, 0x0a, 0x8d, 0x0e, 0xe5, 0x5c, 0xe8, 0x4d, 0x28, 0x58, 0xf2, 0xb7, 0x3c, - 0xf9, 0x97, 0x54, 0xc9, 0x55, 0x34, 0xa7, 0xbd, 0x6a, 0x99, 0x35, 0x09, 0x35, 0x05, 0xc0, 0x21, - 0x8b, 0xfe, 0x00, 0xca, 0xeb, 0x8f, 0xda, 0x1e, 0x0d, 0xd4, 0x9d, 0xbe, 0x02, 0x79, 0xc2, 0x01, - 0x5c, 0x5a, 0x21, 0xaa, 0x13, 0x82, 0x0c, 0x4b, 0x2c, 0xcb, 0xc3, 0xe4, 0x91, 0x61, 0x06, 0x32, - 0x6d, 0x87, 0x79, 0x78, 0x9d, 0x01, 0xb1, 0xc0, 0xe9, 0x9f, 0x87, 0x02, 0x0f, 0x28, 0xff, 0xde, - 0x0a, 0x9a, 0x81, 0x2c, 0x36, 0x8e, 0xb9, 0xd4, 0x49, 0x9c, 0xa5, 0xc6, 0xb1, 0xbe, 0x0d, 0x70, - 0x93, 0x84, 0x8a, 0x57, 0x61, 0x5a, 0x3d, 0xe2, 0x64, 0x6e, 0xf9, 0xac, 0x14, 0x3d, 0x8d, 0x93, - 0x68, 0xdc, 0x4f, 0xaf, 0x3f, 0x80, 0x22, 0xcf, 0x3f, 0x2c, 0x79, 0x47, 0x85, 0x42, 0x7b, 0x4e, - 0xa1, 0x50, 0xd9, 0x3f, 0x33, 0x2c, 0xfb, 0xc7, 0x9e, 0x9b, 0x03, 0x65, 0xc1, 0xab, 0x4a, 0x63, - 0x2a, 0x0d, 0x57, 0xa0, 0xa0, 0xcc, 0x94, 0x5a, 0xc2, 0x96, 0x48, 0x09, 0xc2, 0x21, 0x45, 0x4c, - 0xdb, 0x01, 0x24, 0x72, 0x69, 0x3a, 0x65, 0xb1, 0xba, 0x97, 0x79, 0x7e, 0xdd, 0x8b, 0x69, 0xfa, - 0x09, 0x54, 0x86, 0xf5, 0x51, 0x2f, 0x90, 0xed, 0xd3, 0x9b, 0xa2, 0xbf, 0xa7, 0xc1, 0x4c, 0x5c, - 0x52, 0xfa, 0xeb, 0x4b, 0xaf, 0xe4, 0xfc, 0x3a, 0x1f, 0x3b, 0x91, 0xdf, 0x69, 0x70, 0x21, 0xe1, - 0xda, 0x48, 0x37, 0x3e, 0x82, 0x51, 0xf1, 0xe0, 0xc8, 0x8e, 0x10, 0x1c, 0x7f, 0xcb, 0x40, 0xf9, - 0x96, 0xb1, 0x47, 0x9c, 0x5d, 0xe2, 0x10, 0x33, 0xf0, 0x28, 0xfa, 0x31, 0x94, 0x5a, 0x46, 0x60, - 0x1e, 0x70, 0xa8, 0xea, 0x09, 0x1b, 0xe9, 0x12, 0x68, 0x42, 0x52, 0x6d, 0x2b, 0x12, 0xb3, 0xee, - 0x06, 0xb4, 0x5b, 0x9f, 0x93, 0x26, 0x95, 0x62, 0x18, 0x1c, 0xd7, 0xc6, 0x1b, 0x79, 0xfe, 0xbd, - 0xfe, 0xa8, 0xcd, 0x0a, 0xd6, 0xe8, 0xf3, 0x43, 0xc2, 0x04, 0x4c, 0xde, 0xe9, 0xd8, 0x94, 0xb4, - 0x88, 0x1b, 0x44, 0x8d, 0xfc, 0x56, 0x9f, 0x7c, 0x3c, 0xa0, 0x71, 0xfe, 0x06, 0xcc, 0xf4, 0x1b, - 0xcf, 0xb2, 0xce, 0x21, 0xe9, 0x8a, 0xfb, 0xc2, 0xec, 0x27, 0xba, 0x00, 0xb9, 0x23, 0xc3, 0xe9, - 0xc8, 0xd7, 0x88, 0xc5, 0xc7, 0xf5, 0xcc, 0x35, 0x4d, 0xff, 0x83, 0x06, 0x95, 0x61, 0x86, 0xa0, - 0x2f, 0xc4, 0x04, 0xd5, 0x4b, 0xd2, 0xaa, 0xec, 0xdb, 0xa4, 0x2b, 0xa4, 0xae, 0x43, 0xc1, 0x6b, - 0xb3, 0xd1, 0xcb, 0xa3, 0xf2, 0xd6, 0x2f, 0xab, 0x9b, 0xdc, 0x96, 0xf0, 0xd3, 0x5e, 0xf5, 0x62, - 0x42, 0xbc, 0x42, 0xe0, 0x90, 0x95, 0x65, 0x7f, 0x6e, 0x0f, 0xab, 0x48, 0x61, 0xf6, 0xbf, 0xc7, - 0x21, 0x58, 0x62, 0xf4, 0x3f, 0x69, 0x30, 0xce, 0x5b, 0xb1, 0x07, 0x50, 0x60, 0xe7, 0x67, 0x19, - 0x81, 0xc1, 0xed, 0x4a, 0x3d, 0x04, 0x30, 0xee, 0x2d, 0x12, 0x18, 0x51, 0xb4, 0x29, 0x08, 0x0e, - 0x25, 0x22, 0x0c, 0x39, 0x3b, 0x20, 0x2d, 0x75, 0x91, 0xaf, 0x0e, 0x15, 0x2d, 0x47, 0xd0, 0x1a, - 0x36, 0x8e, 0xd7, 0x1f, 0x05, 0xc4, 0x65, 0x97, 0x11, 0x3d, 0x8d, 0x4d, 0x26, 0x03, 0x0b, 0x51, - 0xfa, 0x7f, 0x34, 0x08, 0x55, 0xb1, 0xe0, 0xf7, 0x89, 0xb3, 0x7f, 0xcb, 0x76, 0x0f, 0xe5, 0xb1, - 0x86, 0xe6, 0xec, 0x4a, 0x38, 0x0e, 0x29, 0xce, 0x2a, 0x0f, 0x99, 0xd1, 0xca, 0x03, 0x53, 0x68, - 0x7a, 0x6e, 0x60, 0xbb, 0x9d, 0x81, 0xd7, 0xb6, 0x26, 0xe1, 0x38, 0xa4, 0x60, 0xcd, 0x0d, 0x25, - 0x2d, 0xc3, 0x76, 0x6d, 0xb7, 0xc9, 0x9c, 0x58, 0xf3, 0x3a, 0x6e, 0xc0, 0xab, 0xbc, 0x6c, 0x6e, - 0xf0, 0x00, 0x16, 0x9f, 0xc1, 0xa1, 0xff, 0x35, 0x0b, 0x25, 0xe6, 0xb3, 0xaa, 0x73, 0x6f, 0x40, - 0xd9, 0x89, 0x47, 0x81, 0xf4, 0xfd, 0xa2, 0x34, 0x25, 0xf9, 0xae, 0x71, 0x92, 0x96, 0x31, 0xf3, - 0x9e, 0x2c, 0x64, 0xce, 0x24, 0x99, 0x37, 0xe2, 0x48, 0x9c, 0xa4, 0x65, 0xd9, 0xeb, 0x98, 0xbd, - 0x0f, 0xd9, 0xed, 0x84, 0x57, 0xf4, 0x1d, 0x06, 0xc4, 0x02, 0x87, 0xb6, 0x60, 0xce, 0x70, 0x1c, - 0xef, 0x98, 0x03, 0xeb, 0x9e, 0x77, 0xd8, 0x32, 0xe8, 0xa1, 0xcf, 0xc7, 0xa8, 0x42, 0xfd, 0x73, - 0x92, 0x65, 0x6e, 0x75, 0x90, 0x04, 0x9f, 0xc5, 0x77, 0xd6, 0xb5, 0x8d, 0x8f, 0x78, 0x6d, 0xd7, - 0x61, 0x8a, 0xc5, 0x97, 0xd7, 0x09, 0x54, 0x87, 0x99, 0xe3, 0x97, 0x80, 0x4e, 0x7a, 0xd5, 0xa9, - 0x3b, 0x09, 0x0c, 0xee, 0xa3, 0x64, 0x2e, 0x3b, 0x76, 0xcb, 0x0e, 0x2a, 0x13, 0x9c, 0x25, 0x74, - 0xf9, 0x16, 0x03, 0x62, 0x81, 0x4b, 0xc4, 0x45, 0xe1, 0xbc, 0xb8, 0xd0, 0x7f, 0x9b, 0x05, 0x24, - 0x5a, 0x62, 0x4b, 0xf4, 0x36, 0x22, 0xd1, 0x5c, 0x86, 0x89, 0x96, 0x6c, 0xa9, 0xb5, 0x64, 0xd6, - 0x57, 0xdd, 0xb4, 0xc2, 0xa3, 0x2d, 0x28, 0x8a, 0x07, 0x1f, 0x05, 0xf1, 0xb2, 0x24, 0x2e, 0x6e, - 0x2b, 0xc4, 0x69, 0xaf, 0x3a, 0x9f, 0x50, 0x13, 0x62, 0xee, 0x74, 0xdb, 0x04, 0x47, 0x12, 0xd8, - 0x14, 0x6d, 0xb4, 0xed, 0xf8, 0xfe, 0xa4, 0x18, 0x4d, 0xd1, 0xd1, 0x24, 0x84, 0x63, 0x54, 0xe8, - 0x2d, 0x18, 0x67, 0x27, 0x25, 0x47, 0xda, 0x2f, 0xa5, 0x4b, 0x1b, 0xec, 0xac, 0xeb, 0x05, 0x56, - 0x35, 0xd9, 0x2f, 0xcc, 0x25, 0x30, 0xed, 0x3c, 0xca, 0x7c, 0x66, 0x96, 0x9c, 0xfd, 0x43, 0xed, - 0x1b, 0x21, 0x06, 0xc7, 0xa8, 0xd0, 0x77, 0xa1, 0xb0, 0x2f, 0xdb, 0x42, 0x7e, 0x31, 0xa9, 0x13, - 0x97, 0x6a, 0x26, 0xc5, 0x08, 0xa7, 0xbe, 0x70, 0x28, 0x4d, 0x7f, 0x07, 0x8a, 0x5b, 0xb6, 0x49, - 0x3d, 0x66, 0x20, 0xbb, 0x12, 0x3f, 0x31, 0x93, 0x84, 0x57, 0xa2, 0xc2, 0x45, 0xe1, 0x59, 0x9c, - 0xb8, 0x86, 0xeb, 0x89, 0xc9, 0x23, 0x17, 0xc5, 0xc9, 0x6d, 0x06, 0xc4, 0x02, 0x77, 0xfd, 0x02, - 0xab, 0xbf, 0xbf, 0x78, 0x52, 0x1d, 0x7b, 0xfc, 0xa4, 0x3a, 0xf6, 0xfe, 0x13, 0x59, 0x8b, 0x4f, - 0x01, 0x60, 0x7b, 0xef, 0x87, 0xc4, 0x14, 0x59, 0x2d, 0xd5, 0xbe, 0x44, 0xad, 0xe9, 0xf8, 0xbe, - 0x24, 0xd3, 0xd7, 0x53, 0xc5, 0x70, 0x38, 0x41, 0x89, 0x96, 0xa1, 0x18, 0x6e, 0x42, 0xe4, 0x45, - 0xcf, 0xaa, 0xc0, 0x09, 0xd7, 0x25, 0x38, 0xa2, 0x49, 0xa4, 0xd8, 0xf1, 0x73, 0x53, 0x6c, 0x1d, - 0xb2, 0x1d, 0xdb, 0xe2, 0xaf, 0xab, 0x58, 0x7f, 0x4d, 0x95, 0xb8, 0xbb, 0x9b, 0x8d, 0xd3, 0x5e, - 0xf5, 0xa5, 0x61, 0x0b, 0xc8, 0xa0, 0xdb, 0x26, 0x7e, 0xed, 0xee, 0x66, 0x03, 0x33, 0xe6, 0xb3, - 0xde, 0x7b, 0x7e, 0xc4, 0xf7, 0x7e, 0x15, 0x40, 0x7a, 0xcd, 0xb8, 0xc5, 0xc3, 0x0d, 0x23, 0xea, - 0x66, 0x88, 0xc1, 0x31, 0x2a, 0xe4, 0xc3, 0xac, 0xc9, 0x46, 0x61, 0xf6, 0x3c, 0xec, 0x16, 0xf1, - 0x03, 0xa3, 0x25, 0x36, 0x44, 0xa3, 0x05, 0xf7, 0x25, 0xa9, 0x66, 0x76, 0xad, 0x5f, 0x18, 0x1e, - 0x94, 0x8f, 0x3c, 0x98, 0xb5, 0xe4, 0x50, 0x17, 0x29, 0x2d, 0x8e, 0xac, 0xf4, 0x22, 0x53, 0xd8, - 0xe8, 0x17, 0x84, 0x07, 0x65, 0xa3, 0x1f, 0xc0, 0xbc, 0x02, 0x0e, 0x4e, 0xd6, 0x7c, 0xc7, 0x93, - 0xad, 0x2f, 0x9c, 0xf4, 0xaa, 0xf3, 0x8d, 0xa1, 0x54, 0xf8, 0x39, 0x12, 0x90, 0x05, 0x79, 0x47, - 0xf4, 0x8f, 0x25, 0x5e, 0xf3, 0xbf, 0x91, 0xce, 0x8b, 0x28, 0xfa, 0x6b, 0xf1, 0xbe, 0x31, 0x9c, - 0x1c, 0x65, 0xcb, 0x28, 0x65, 0xa3, 0x47, 0x50, 0x32, 0x5c, 0xd7, 0x0b, 0x0c, 0x31, 0xeb, 0x4f, - 0x72, 0x55, 0xab, 0x23, 0xab, 0x5a, 0x8d, 0x64, 0xf4, 0xf5, 0xa9, 0x31, 0x0c, 0x8e, 0xab, 0x42, - 0xc7, 0x30, 0xed, 0x1d, 0xbb, 0x84, 0x62, 0xb2, 0x4f, 0x28, 0x71, 0x4d, 0xe2, 0x57, 0xca, 0x5c, - 0xfb, 0x57, 0x53, 0x6a, 0x4f, 0x30, 0x47, 0x21, 0x9d, 0x84, 0xfb, 0xb8, 0x5f, 0x0b, 0xaa, 0xb1, - 0x24, 0xe9, 0x1a, 0x8e, 0xfd, 0x23, 0x42, 0xfd, 0xca, 0x54, 0xb4, 0xc4, 0xdb, 0x08, 0xa1, 0x38, - 0x46, 0x81, 0xbe, 0x06, 0x25, 0xd3, 0xe9, 0xf8, 0x01, 0x11, 0x1b, 0xd5, 0x69, 0xfe, 0x82, 0x42, - 0xff, 0xd6, 0x22, 0x14, 0x8e, 0xd3, 0xa1, 0x0e, 0x94, 0x5b, 0xf1, 0x92, 0x51, 0x99, 0xe5, 0xde, - 0x5d, 0x4b, 0xe7, 0xdd, 0x60, 0x51, 0x8b, 0xfa, 0x8a, 0x04, 0x0e, 0x27, 0xb5, 0xcc, 0x7f, 0x1d, - 0x4a, 0xff, 0x63, 0xcb, 0xcd, 0x5a, 0xf6, 0xfe, 0x7b, 0x1c, 0xa9, 0x65, 0xff, 0x73, 0x06, 0xa6, - 0x92, 0xa7, 0xdf, 0x57, 0x0e, 0x73, 0xa9, 0xca, 0xa1, 0x1a, 0x0e, 0xb5, 0xa1, 0x4b, 0x60, 0x95, - 0xd6, 0xb3, 0x43, 0xd3, 0xba, 0xcc, 0x9e, 0xe3, 0x2f, 0x92, 0x3d, 0x6b, 0x00, 0xac, 0xcf, 0xa0, - 0x9e, 0xe3, 0x10, 0xca, 0x13, 0x67, 0x41, 0x2e, 0x7b, 0x43, 0x28, 0x8e, 0x51, 0xb0, 0x1e, 0x75, - 0xcf, 0xf1, 0xcc, 0x43, 0x7e, 0x04, 0xea, 0xd1, 0xf3, 0x94, 0x59, 0x10, 0x3d, 0x6a, 0x7d, 0x00, - 0x8b, 0xcf, 0xe0, 0xd0, 0xbb, 0x70, 0x71, 0xc7, 0xa0, 0x81, 0x6d, 0x38, 0xd1, 0x03, 0xe3, 0x43, - 0xc0, 0xc3, 0x81, 0x11, 0xe3, 0xb5, 0x51, 0x1f, 0x6a, 0x74, 0xf8, 0x11, 0x2c, 0x1a, 0x33, 0xf4, - 0xbf, 0x6b, 0x70, 0xe9, 0x4c, 0xdd, 0x9f, 0xc0, 0x88, 0xf3, 0x30, 0x39, 0xe2, 0xbc, 0x91, 0x72, - 0xdf, 0x78, 0x96, 0xb5, 0x43, 0x06, 0x9e, 0x09, 0xc8, 0xed, 0xb0, 0x86, 0x58, 0xff, 0xb5, 0x06, - 0x93, 0xfc, 0xd7, 0x28, 0xbb, 0xda, 0x2a, 0xe4, 0xf6, 0x3d, 0xb5, 0x38, 0x2a, 0x88, 0x3f, 0x13, - 0x36, 0x18, 0x00, 0x0b, 0xf8, 0x0b, 0x2c, 0x73, 0xdf, 0xd3, 0x20, 0xb9, 0x25, 0x45, 0x37, 0x44, - 0xfc, 0x6a, 0xe1, 0x1a, 0x73, 0xc4, 0xd8, 0x7d, 0x73, 0xd8, 0x80, 0x36, 0x97, 0x6a, 0x77, 0x77, - 0x05, 0x8a, 0xd8, 0xf3, 0x82, 0x1d, 0x23, 0x38, 0xf0, 0x99, 0xe3, 0x6d, 0xf6, 0x43, 0x9e, 0x0d, - 0x77, 0x9c, 0x63, 0xb0, 0x80, 0xeb, 0xbf, 0xd2, 0xe0, 0xd2, 0xd0, 0xfd, 0x39, 0x4b, 0x01, 0x66, - 0xf8, 0x25, 0x3d, 0x0a, 0xa3, 0x30, 0xa2, 0xc3, 0x31, 0x2a, 0x36, 0x59, 0x25, 0x96, 0xee, 0xfd, - 0x93, 0x55, 0x42, 0x1b, 0x4e, 0xd2, 0xea, 0xff, 0xce, 0x40, 0x7e, 0x37, 0x30, 0x82, 0x8e, 0xff, - 0x7f, 0x8e, 0xd8, 0x57, 0x20, 0xef, 0x73, 0x3d, 0xd2, 0xbc, 0xb0, 0xc6, 0x0a, 0xed, 0x58, 0x62, - 0xf9, 0x34, 0x42, 0x7c, 0xdf, 0x68, 0xaa, 0x8c, 0x15, 0x4d, 0x23, 0x02, 0x8c, 0x15, 0x1e, 0xbd, - 0x0e, 0x79, 0x4a, 0x0c, 0x3f, 0x1c, 0xcc, 0x16, 0x94, 0x48, 0xcc, 0xa1, 0xa7, 0xbd, 0xea, 0xa4, - 0x14, 0xce, 0xbf, 0xb1, 0xa4, 0x46, 0xf7, 0x61, 0xc2, 0x22, 0x81, 0x61, 0x3b, 0x62, 0x1e, 0x4b, - 0xbd, 0xae, 0x17, 0xc2, 0x1a, 0x82, 0xb5, 0x5e, 0x62, 0x36, 0xc9, 0x0f, 0xac, 0x04, 0xb2, 0x6c, - 0x6b, 0x7a, 0x96, 0x18, 0x27, 0x72, 0x51, 0xb6, 0x5d, 0xf3, 0x2c, 0x82, 0x39, 0x46, 0x7f, 0xac, - 0x41, 0x49, 0x48, 0x5a, 0x33, 0x3a, 0x3e, 0x41, 0x2b, 0xa1, 0x17, 0xe2, 0xba, 0x55, 0x27, 0x37, - 0xce, 0x06, 0x8e, 0xd3, 0x5e, 0xb5, 0xc8, 0xc9, 0xf8, 0x24, 0xa2, 0x1c, 0x88, 0x9d, 0x51, 0xe6, - 0x9c, 0x33, 0x7a, 0x19, 0x72, 0xfc, 0xf5, 0xc8, 0xc3, 0x0c, 0xdf, 0x3a, 0x7f, 0x60, 0x58, 0xe0, - 0xf4, 0x8f, 0x32, 0x50, 0x4e, 0x38, 0x97, 0x62, 0x16, 0x08, 0x17, 0x8a, 0x99, 0x14, 0x4b, 0xea, - 0xe1, 0x7f, 0x51, 0xca, 0xda, 0x93, 0x7f, 0x91, 0xda, 0xf3, 0x3d, 0xc8, 0x9b, 0xec, 0x8c, 0xd4, - 0x3f, 0xde, 0x2b, 0xa3, 0x5c, 0x27, 0x3f, 0xdd, 0x28, 0x1a, 0xf9, 0xa7, 0x8f, 0xa5, 0x40, 0x74, - 0x13, 0x66, 0x29, 0x09, 0x68, 0x77, 0x75, 0x3f, 0x20, 0x34, 0x3e, 0xc4, 0xe7, 0xa2, 0x8e, 0x1b, - 0xf7, 0x13, 0xe0, 0x41, 0x1e, 0x7d, 0x0f, 0x26, 0xef, 0x18, 0x7b, 0x4e, 0xf8, 0x07, 0x14, 0x86, - 0xb2, 0xed, 0x9a, 0x4e, 0xc7, 0x22, 0x22, 0x1b, 0xab, 0xec, 0xa5, 0x1e, 0xed, 0x66, 0x1c, 0x79, - 0xda, 0xab, 0xce, 0x25, 0x00, 0xe2, 0x1f, 0x17, 0x9c, 0x14, 0xa1, 0x3b, 0x30, 0xfe, 0x09, 0x4e, - 0x8f, 0xdf, 0x87, 0x62, 0xd4, 0xdf, 0x7f, 0xcc, 0x2a, 0xf5, 0x87, 0x50, 0x60, 0x11, 0xaf, 0xe6, - 0xd2, 0x73, 0x5a, 0x9c, 0x64, 0xe3, 0x94, 0x49, 0xd3, 0x38, 0xe9, 0x2d, 0x28, 0xdf, 0x6d, 0x5b, - 0x2f, 0xf8, 0x17, 0x64, 0x26, 0x75, 0xd5, 0xba, 0x0a, 0xe2, 0xcf, 0x74, 0x56, 0x20, 0x44, 0xe5, - 0x8e, 0x15, 0x88, 0x78, 0xe1, 0x8d, 0xed, 0xca, 0x7f, 0xa6, 0x01, 0xf0, 0xa5, 0xd4, 0xfa, 0x11, - 0x71, 0x03, 0x76, 0x0e, 0x2c, 0xf0, 0xfb, 0xcf, 0x81, 0x67, 0x06, 0x8e, 0x41, 0x77, 0x21, 0xef, - 0x89, 0x68, 0x12, 0x7f, 0x43, 0x8e, 0xb8, 0xf9, 0x0c, 0x1f, 0x81, 0x88, 0x27, 0x2c, 0x85, 0xd5, - 0x97, 0x9e, 0x3e, 0x5b, 0x18, 0xfb, 0xe0, 0xd9, 0xc2, 0xd8, 0x87, 0xcf, 0x16, 0xc6, 0xde, 0x3d, - 0x59, 0xd0, 0x9e, 0x9e, 0x2c, 0x68, 0x1f, 0x9c, 0x2c, 0x68, 0x1f, 0x9e, 0x2c, 0x68, 0x1f, 0x9d, - 0x2c, 0x68, 0x8f, 0xff, 0xb9, 0x30, 0x76, 0x3f, 0x73, 0xb4, 0xf2, 0xdf, 0x00, 0x00, 0x00, 0xff, - 0xff, 0x61, 0xb7, 0xc5, 0x7c, 0xc2, 0x24, 0x00, 0x00, -} - func (m *APIGroup) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1481,65 +309,53 @@ func (m *APIGroup) Marshal() (dAtA []byte, err error) { } func (m *APIGroup) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *APIGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + if len(m.Versions) > 0 { + for _, msg := range m.Versions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x22 + i += n } } - { - size, err := m.PreferredVersion.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0x1a - if len(m.Versions) > 0 { - for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Versions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.PreferredVersion.Size())) + n1, err := m.PreferredVersion.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - i-- - dAtA[i] = 0x12 + i += n } } - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *APIGroupList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1547,127 +363,115 @@ func (m *APIGroupList) Marshal() (dAtA []byte, err error) { } func (m *APIGroupList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *APIGroupList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Groups) > 0 { - for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.Groups { dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *APIResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *APIResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *APIResource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.StorageVersionHash) - copy(dAtA[i:], m.StorageVersionHash) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageVersionHash))) - i-- - dAtA[i] = 0x52 - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x4a - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0x42 - if len(m.Categories) > 0 { - for iNdEx := len(m.Categories) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Categories[iNdEx]) - copy(dAtA[i:], m.Categories[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Categories[iNdEx]))) - i-- - dAtA[i] = 0x3a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x10 + i++ + if m.Namespaced { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + if m.Verbs != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Verbs.Size())) + n2, err := m.Verbs.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } + i += n2 } - i -= len(m.SingularName) - copy(dAtA[i:], m.SingularName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) - i-- - dAtA[i] = 0x32 if len(m.ShortNames) > 0 { - for iNdEx := len(m.ShortNames) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.ShortNames[iNdEx]) - copy(dAtA[i:], m.ShortNames[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShortNames[iNdEx]))) - i-- + for _, s := range m.ShortNames { dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if m.Verbs != nil { - { - size, err := m.Verbs.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SingularName))) + i += copy(dAtA[i:], m.SingularName) + if len(m.Categories) > 0 { + for _, s := range m.Categories { + dAtA[i] = 0x3a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x22 - } - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x1a - i-- - if m.Namespaced { - dAtA[i] = 1 - } else { - dAtA[i] = 0 } - i-- - dAtA[i] = 0x10 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil } func (m *APIResourceList) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1675,41 +479,33 @@ func (m *APIResourceList) Marshal() (dAtA []byte, err error) { } func (m *APIResourceList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *APIResourceList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) if len(m.APIResources) > 0 { - for iNdEx := len(m.APIResources) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.APIResources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + for _, msg := range m.APIResources { dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - i -= len(m.GroupVersion) - copy(dAtA[i:], m.GroupVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } func (m *APIVersions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1717,45 +513,44 @@ func (m *APIVersions) Marshal() (dAtA []byte, err error) { } func (m *APIVersions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *APIVersions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.ServerAddressByClientCIDRs) > 0 { - for iNdEx := len(m.ServerAddressByClientCIDRs) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ServerAddressByClientCIDRs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.Versions) > 0 { + for _, s := range m.Versions { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x12 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.Versions) > 0 { - for iNdEx := len(m.Versions) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Versions[iNdEx]) - copy(dAtA[i:], m.Versions[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Versions[iNdEx]))) - i-- - dAtA[i] = 0xa + if len(m.ServerAddressByClientCIDRs) > 0 { + for _, msg := range m.ServerAddressByClientCIDRs { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n } } - return len(dAtA) - i, nil + return i, nil } func (m *CreateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1763,36 +558,40 @@ func (m *CreateOptions) Marshal() (dAtA []byte, err error) { } func (m *CreateOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *CreateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.FieldManager) - copy(dAtA[i:], m.FieldManager) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i-- - dAtA[i] = 0x1a if len(m.DryRun) > 0 { - for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DryRun[iNdEx]) - copy(dAtA[i:], m.DryRun[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) - i-- + for _, s := range m.DryRun { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + dAtA[i] = 0x10 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1800,65 +599,63 @@ func (m *DeleteOptions) Marshal() (dAtA []byte, err error) { } func (m *DeleteOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *DeleteOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if len(m.DryRun) > 0 { - for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DryRun[iNdEx]) - copy(dAtA[i:], m.DryRun[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) - i-- - dAtA[i] = 0x2a - } + if m.GracePeriodSeconds != nil { + dAtA[i] = 0x8 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) } - if m.PropagationPolicy != nil { - i -= len(*m.PropagationPolicy) - copy(dAtA[i:], *m.PropagationPolicy) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) - i-- - dAtA[i] = 0x22 + if m.Preconditions != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Preconditions.Size())) + n3, err := m.Preconditions.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 } if m.OrphanDependents != nil { - i-- + dAtA[i] = 0x18 + i++ if *m.OrphanDependents { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x18 + i++ } - if m.Preconditions != nil { - { - size, err := m.Preconditions.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + if m.PropagationPolicy != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PropagationPolicy))) + i += copy(dAtA[i:], *m.PropagationPolicy) + } + if len(m.DryRun) > 0 { + for _, s := range m.DryRun { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } - i-- - dAtA[i] = 0x12 - } - if m.GracePeriodSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.GracePeriodSeconds)) - i-- - dAtA[i] = 0x8 } - return len(dAtA) - i, nil + return i, nil } func (m *Duration) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1866,25 +663,20 @@ func (m *Duration) Marshal() (dAtA []byte, err error) { } func (m *Duration) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Duration) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Duration)) + return i, nil } func (m *ExportOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1892,68 +684,33 @@ func (m *ExportOptions) Marshal() (dAtA []byte, err error) { } func (m *ExportOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ExportOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i-- - if m.Exact { + dAtA[i] = 0x8 + i++ + if m.Export { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- + i++ dAtA[i] = 0x10 - i-- - if m.Export { + i++ + if m.Exact { dAtA[i] = 1 } else { dAtA[i] = 0 } - i-- - dAtA[i] = 0x8 - return len(dAtA) - i, nil -} - -func (m *FieldsV1) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FieldsV1) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *FieldsV1) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if m.Raw != nil { - i -= len(m.Raw) - copy(dAtA[i:], m.Raw) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + i++ + return i, nil } func (m *GetOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1961,27 +718,29 @@ func (m *GetOptions) Marshal() (dAtA []byte, err error) { } func (m *GetOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GetOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.ResourceVersion) - copy(dAtA[i:], m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x10 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil } func (m *GroupKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -1989,32 +748,25 @@ func (m *GroupKind) Marshal() (dAtA []byte, err error) { } func (m *GroupKind) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil } func (m *GroupResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2022,889 +774,645 @@ func (m *GroupResource) Marshal() (dAtA []byte, err error) { } func (m *GroupResource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GroupVersion) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupVersion) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) + i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupVersionForDiscovery) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - i -= len(m.GroupVersion) - copy(dAtA[i:], m.GroupVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupVersionKind) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x1a - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- + i += copy(dAtA[i:], m.Group) dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + return i, nil } -func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { +func (m *GroupVersion) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *GroupVersionResource) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GroupVersion) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.Resource) - copy(dAtA[i:], m.Resource) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i-- - dAtA[i] = 0x1a - i -= len(m.Version) - copy(dAtA[i:], m.Version) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i-- - dAtA[i] = 0x12 - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil } -func (m *LabelSelector) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionForDiscovery) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelSelector) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GroupVersionForDiscovery) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.MatchExpressions) > 0 { - for iNdEx := len(m.MatchExpressions) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.MatchExpressions[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - if len(m.MatchLabels) > 0 { - keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) - for k := range m.MatchLabels { - keysForMatchLabels = append(keysForMatchLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) - for iNdEx := len(keysForMatchLabels) - 1; iNdEx >= 0; iNdEx-- { - v := m.MatchLabels[string(keysForMatchLabels[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForMatchLabels[iNdEx]) - copy(dAtA[i:], keysForMatchLabels[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForMatchLabels[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GroupVersion))) + i += copy(dAtA[i:], m.GroupVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + return i, nil } -func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionKind) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *LabelSelectorRequirement) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GroupVersionKind) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Values) > 0 { - for iNdEx := len(m.Values) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Values[iNdEx]) - copy(dAtA[i:], m.Values[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Values[iNdEx]))) - i-- - dAtA[i] = 0x1a - } - } - i -= len(m.Operator) - copy(dAtA[i:], m.Operator) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i-- - dAtA[i] = 0x12 - i -= len(m.Key) - copy(dAtA[i:], m.Key) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil } -func (m *List) Marshal() (dAtA []byte, err error) { +func (m *GroupVersionResource) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *List) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *List) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *GroupVersionResource) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - } - } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) + i += copy(dAtA[i:], m.Resource) + return i, nil } -func (m *ListMeta) Marshal() (dAtA []byte, err error) { +func (m *Initializer) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.RemainingItemCount != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.RemainingItemCount)) - i-- - dAtA[i] = 0x20 - } - i -= len(m.Continue) - copy(dAtA[i:], m.Continue) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i-- - dAtA[i] = 0x1a - i -= len(m.ResourceVersion) - copy(dAtA[i:], m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i-- - dAtA[i] = 0x12 - i -= len(m.SelfLink) - copy(dAtA[i:], m.SelfLink) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + return i, nil } -func (m *ListOptions) Marshal() (dAtA []byte, err error) { +func (m *Initializers) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ListOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Initializers) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i-- - if m.AllowWatchBookmarks { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x48 - i -= len(m.Continue) - copy(dAtA[i:], m.Continue) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) - i-- - dAtA[i] = 0x42 - i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) - i-- - dAtA[i] = 0x38 - if m.TimeoutSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - i-- - dAtA[i] = 0x28 + if len(m.Pending) > 0 { + for _, msg := range m.Pending { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } } - i -= len(m.ResourceVersion) - copy(dAtA[i:], m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i-- - dAtA[i] = 0x22 - i-- - if m.Watch { - dAtA[i] = 1 - } else { - dAtA[i] = 0 + if m.Result != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Result.Size())) + n4, err := m.Result.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 } - i-- - dAtA[i] = 0x18 - i -= len(m.FieldSelector) - copy(dAtA[i:], m.FieldSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) - i-- - dAtA[i] = 0x12 - i -= len(m.LabelSelector) - copy(dAtA[i:], m.LabelSelector) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ManagedFieldsEntry) Marshal() (dAtA []byte, err error) { +func (m *LabelSelector) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ManagedFieldsEntry) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ManagedFieldsEntry) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LabelSelector) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.FieldsV1 != nil { - { - size, err := m.FieldsV1.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + if len(m.MatchLabels) > 0 { + keysForMatchLabels := make([]string, 0, len(m.MatchLabels)) + for k := range m.MatchLabels { + keysForMatchLabels = append(keysForMatchLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMatchLabels) + for _, k := range keysForMatchLabels { + dAtA[i] = 0xa + i++ + v := m.MatchLabels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } - i-- - dAtA[i] = 0x3a } - i -= len(m.FieldsType) - copy(dAtA[i:], m.FieldsType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldsType))) - i-- - dAtA[i] = 0x32 - if m.Time != nil { - { - size, err := m.Time.MarshalToSizedBuffer(dAtA[:i]) + if len(m.MatchExpressions) > 0 { + for _, msg := range m.MatchExpressions { + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n } - i-- - dAtA[i] = 0x22 } - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x1a - i -= len(m.Operation) - copy(dAtA[i:], m.Operation) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operation))) - i-- - dAtA[i] = 0x12 - i -= len(m.Manager) - copy(dAtA[i:], m.Manager) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Manager))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { +func (m *LabelSelectorRequirement) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ObjectMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *LabelSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.ManagedFields) > 0 { - for iNdEx := len(m.ManagedFields) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.ManagedFields[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) + i += copy(dAtA[i:], m.Operator) + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ } - i-- - dAtA[i] = 0x1 - i-- - dAtA[i] = 0x8a - } - } - i -= len(m.ClusterName) - copy(dAtA[i:], m.ClusterName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) - i-- - dAtA[i] = 0x7a - if len(m.Finalizers) > 0 { - for iNdEx := len(m.Finalizers) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Finalizers[iNdEx]) - copy(dAtA[i:], m.Finalizers[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Finalizers[iNdEx]))) - i-- - dAtA[i] = 0x72 + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - if len(m.OwnerReferences) > 0 { - for iNdEx := len(m.OwnerReferences) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.OwnerReferences[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x6a - } + return i, nil +} + +func (m *List) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err } - if len(m.Annotations) > 0 { - keysForAnnotations := make([]string, 0, len(m.Annotations)) - for k := range m.Annotations { - keysForAnnotations = append(keysForAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) - for iNdEx := len(keysForAnnotations) - 1; iNdEx >= 0; iNdEx-- { - v := m.Annotations[string(keysForAnnotations[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- - dAtA[i] = 0x12 - i -= len(keysForAnnotations[iNdEx]) - copy(dAtA[i:], keysForAnnotations[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForAnnotations[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x62 - } + return dAtA[:n], nil +} + +func (m *List) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n5, err := m.ListMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err } - if len(m.Labels) > 0 { - keysForLabels := make([]string, 0, len(m.Labels)) - for k := range m.Labels { - keysForLabels = append(keysForLabels, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) - for iNdEx := len(keysForLabels) - 1; iNdEx >= 0; iNdEx-- { - v := m.Labels[string(keysForLabels[iNdEx])] - baseI := i - i -= len(v) - copy(dAtA[i:], v) - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i-- + i += n5 + if len(m.Items) > 0 { + for _, msg := range m.Items { dAtA[i] = 0x12 - i -= len(keysForLabels[iNdEx]) - copy(dAtA[i:], keysForLabels[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(keysForLabels[iNdEx]))) - i-- - dAtA[i] = 0xa - i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) - i-- - dAtA[i] = 0x5a - } - } - if m.DeletionGracePeriodSeconds != nil { - i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) - i-- - dAtA[i] = 0x50 - } - if m.DeletionTimestamp != nil { - { - size, err := m.DeletionTimestamp.MarshalToSizedBuffer(dAtA[:i]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x4a - } - { - size, err := m.CreationTimestamp.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err + i += n } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) } - i-- - dAtA[i] = 0x42 - i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) - i-- - dAtA[i] = 0x38 - i -= len(m.ResourceVersion) - copy(dAtA[i:], m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i-- - dAtA[i] = 0x32 - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x2a - i -= len(m.SelfLink) - copy(dAtA[i:], m.SelfLink) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) - i-- - dAtA[i] = 0x22 - i -= len(m.Namespace) - copy(dAtA[i:], m.Namespace) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i-- - dAtA[i] = 0x1a - i -= len(m.GenerateName) - copy(dAtA[i:], m.GenerateName) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *OwnerReference) Marshal() (dAtA []byte, err error) { +func (m *ListMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *OwnerReference) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ListMeta) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if m.BlockOwnerDeletion != nil { - i-- - if *m.BlockOwnerDeletion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x38 - } - if m.Controller != nil { - i-- - if *m.Controller { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x30 - } - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x2a - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x22 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x1a - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) + return i, nil } -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { +func (m *ListOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PartialObjectMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ListOptions) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - { - size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.LabelSelector))) + i += copy(dAtA[i:], m.LabelSelector) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldSelector))) + i += copy(dAtA[i:], m.FieldSelector) + dAtA[i] = 0x18 + i++ + if m.Watch { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + if m.TimeoutSeconds != nil { + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) + } + dAtA[i] = 0x30 + i++ + if m.IncludeUninitialized { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Limit)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Continue))) + i += copy(dAtA[i:], m.Continue) + return i, nil } -func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { +func (m *ObjectMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PartialObjectMetadataList) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *ObjectMeta) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - if len(m.Items) > 0 { - for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.GenerateName))) + i += copy(dAtA[i:], m.GenerateName) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) + i += copy(dAtA[i:], m.Namespace) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.SelfLink))) + i += copy(dAtA[i:], m.SelfLink) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) + i += copy(dAtA[i:], m.ResourceVersion) + dAtA[i] = 0x38 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Generation)) + dAtA[i] = 0x42 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.CreationTimestamp.Size())) + n6, err := m.CreationTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + if m.DeletionTimestamp != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.DeletionTimestamp.Size())) + n7, err := m.DeletionTimestamp.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + if m.DeletionGracePeriodSeconds != nil { + dAtA[i] = 0x50 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(*m.DeletionGracePeriodSeconds)) + } + if len(m.Labels) > 0 { + keysForLabels := make([]string, 0, len(m.Labels)) + for k := range m.Labels { + keysForLabels = append(keysForLabels, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForLabels) + for _, k := range keysForLabels { + dAtA[i] = 0x5a + i++ + v := m.Labels[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.Annotations) > 0 { + keysForAnnotations := make([]string, 0, len(m.Annotations)) + for k := range m.Annotations { + keysForAnnotations = append(keysForAnnotations, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForAnnotations) + for _, k := range keysForAnnotations { + dAtA[i] = 0x62 + i++ + v := m.Annotations[string(k)] + mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) + i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if len(m.OwnerReferences) > 0 { + for _, msg := range m.OwnerReferences { + dAtA[i] = 0x6a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Finalizers) > 0 { + for _, s := range m.Finalizers { + dAtA[i] = 0x72 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + dAtA[i] = 0x7a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterName))) + i += copy(dAtA[i:], m.ClusterName) + if m.Initializers != nil { + dAtA[i] = 0x82 + i++ + dAtA[i] = 0x1 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Initializers.Size())) + n8, err := m.Initializers.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) + i += n8 } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + return i, nil } -func (m *Patch) Marshal() (dAtA []byte, err error) { +func (m *OwnerReference) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *Patch) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Patch) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *OwnerReference) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - return len(dAtA) - i, nil + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + if m.Controller != nil { + dAtA[i] = 0x30 + i++ + if *m.Controller { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if m.BlockOwnerDeletion != nil { + dAtA[i] = 0x38 + i++ + if *m.BlockOwnerDeletion { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil } -func (m *PatchOptions) Marshal() (dAtA []byte, err error) { +func (m *Patch) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *PatchOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *PatchOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *Patch) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.FieldManager) - copy(dAtA[i:], m.FieldManager) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i-- - dAtA[i] = 0x1a - if m.Force != nil { - i-- - if *m.Force { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i-- - dAtA[i] = 0x10 - } - if len(m.DryRun) > 0 { - for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DryRun[iNdEx]) - copy(dAtA[i:], m.DryRun[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil + return i, nil } func (m *Preconditions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2912,36 +1420,23 @@ func (m *Preconditions) Marshal() (dAtA []byte, err error) { } func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Preconditions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - if m.ResourceVersion != nil { - i -= len(*m.ResourceVersion) - copy(dAtA[i:], *m.ResourceVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ResourceVersion))) - i-- - dAtA[i] = 0x12 - } if m.UID != nil { - i -= len(*m.UID) - copy(dAtA[i:], *m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) + i += copy(dAtA[i:], *m.UID) } - return len(dAtA) - i, nil + return i, nil } func (m *RootPaths) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2949,31 +1444,32 @@ func (m *RootPaths) Marshal() (dAtA []byte, err error) { } func (m *RootPaths) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RootPaths) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m.Paths) > 0 { - for iNdEx := len(m.Paths) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Paths[iNdEx]) - copy(dAtA[i:], m.Paths[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Paths[iNdEx]))) - i-- + for _, s := range m.Paths { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -2981,32 +1477,25 @@ func (m *ServerAddressByClientCIDR) Marshal() (dAtA []byte, err error) { } func (m *ServerAddressByClientCIDR) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *ServerAddressByClientCIDR) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.ServerAddress) - copy(dAtA[i:], m.ServerAddress) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) - i-- - dAtA[i] = 0x12 - i -= len(m.ClientCIDR) - copy(dAtA[i:], m.ClientCIDR) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClientCIDR))) + i += copy(dAtA[i:], m.ClientCIDR) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServerAddress))) + i += copy(dAtA[i:], m.ServerAddress) + return i, nil } func (m *Status) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3014,188 +1503,129 @@ func (m *Status) Marshal() (dAtA []byte, err error) { } func (m *Status) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Status) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) - i-- - dAtA[i] = 0x30 - if m.Details != nil { - { - size, err := m.Details.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x2a - } - i -= len(m.Reason) - copy(dAtA[i:], m.Reason) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i-- - dAtA[i] = 0x22 - i -= len(m.Message) - copy(dAtA[i:], m.Message) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x1a - i -= len(m.Status) - copy(dAtA[i:], m.Status) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i-- - dAtA[i] = 0x12 - { - size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil -} - -func (m *StatusCause) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) + n9, err := m.ListMeta.MarshalTo(dAtA[i:]) if err != nil { - return nil, err + return 0, err } - return dAtA[:n], nil -} - -func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusCause) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - i -= len(m.Field) - copy(dAtA[i:], m.Field) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) - i-- + i += n9 + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) + i += copy(dAtA[i:], m.Status) dAtA[i] = 0x1a - i -= len(m.Message) - copy(dAtA[i:], m.Message) + i++ i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) + i += copy(dAtA[i:], m.Reason) + if m.Details != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Details.Size())) + n10, err := m.Details.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + dAtA[i] = 0x30 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Code)) + return i, nil } -func (m *StatusDetails) Marshal() (dAtA []byte, err error) { +func (m *StatusCause) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *StatusDetails) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *StatusCause) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.UID) - copy(dAtA[i:], m.UID) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i-- - dAtA[i] = 0x32 - i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) - i-- - dAtA[i] = 0x28 - if len(m.Causes) > 0 { - for iNdEx := len(m.Causes) - 1; iNdEx >= 0; iNdEx-- { - { - size, err := m.Causes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x22 - } - } - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x1a - i -= len(m.Group) - copy(dAtA[i:], m.Group) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i-- - dAtA[i] = 0x12 - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Field))) + i += copy(dAtA[i:], m.Field) + return i, nil } -func (m *TableOptions) Marshal() (dAtA []byte, err error) { +func (m *StatusDetails) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TableOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) +func (m *StatusDetails) MarshalTo(dAtA []byte) (int, error) { + var i int _ = i var l int _ = l - i -= len(m.IncludeObject) - copy(dAtA[i:], m.IncludeObject) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) + i += copy(dAtA[i:], m.Group) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + if len(m.Causes) > 0 { + for _, msg := range m.Causes { + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + dAtA[i] = 0x28 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.RetryAfterSeconds)) + dAtA[i] = 0x32 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) + i += copy(dAtA[i:], m.UID) + return i, nil } func (m *Timestamp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3203,28 +1633,23 @@ func (m *Timestamp) Marshal() (dAtA []byte, err error) { } func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Timestamp) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Seconds)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Nanos)) + return i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3232,32 +1657,25 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- - dAtA[i] = 0x12 - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + return i, nil } func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3265,36 +1683,32 @@ func (m *UpdateOptions) Marshal() (dAtA []byte, err error) { } func (m *UpdateOptions) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *UpdateOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.FieldManager) - copy(dAtA[i:], m.FieldManager) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldManager))) - i-- - dAtA[i] = 0x12 if len(m.DryRun) > 0 { - for iNdEx := len(m.DryRun) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.DryRun[iNdEx]) - copy(dAtA[i:], m.DryRun[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DryRun[iNdEx]))) - i-- + for _, s := range m.DryRun { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m Verbs) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3302,31 +1716,32 @@ func (m Verbs) Marshal() (dAtA []byte, err error) { } func (m Verbs) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m Verbs) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if len(m) > 0 { - for iNdEx := len(m) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m[iNdEx]) - copy(dAtA[i:], m[iNdEx]) - i = encodeVarintGenerated(dAtA, i, uint64(len(m[iNdEx]))) - i-- + for _, s := range m { dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) } } - return len(dAtA) - i, nil + return i, nil } func (m *WatchEvent) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -3334,48 +1749,53 @@ func (m *WatchEvent) Marshal() (dAtA []byte, err error) { } func (m *WatchEvent) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *WatchEvent) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - { - size, err := m.Object.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0x12 - i -= len(m.Type) - copy(dAtA[i:], m.Type) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) + n11, err := m.Object.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n11 + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *APIGroup) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -3398,9 +1818,6 @@ func (m *APIGroup) Size() (n int) { } func (m *APIGroupList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Groups) > 0 { @@ -3413,9 +1830,6 @@ func (m *APIGroupList) Size() (n int) { } func (m *APIResource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -3445,15 +1859,10 @@ func (m *APIResource) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Version) n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageVersionHash) - n += 1 + l + sovGenerated(uint64(l)) return n } func (m *APIResourceList) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.GroupVersion) @@ -3468,9 +1877,6 @@ func (m *APIResourceList) Size() (n int) { } func (m *APIVersions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Versions) > 0 { @@ -3489,9 +1895,6 @@ func (m *APIVersions) Size() (n int) { } func (m *CreateOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.DryRun) > 0 { @@ -3500,15 +1903,11 @@ func (m *CreateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.FieldManager) - n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } func (m *DeleteOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.GracePeriodSeconds != nil { @@ -3535,9 +1934,6 @@ func (m *DeleteOptions) Size() (n int) { } func (m *Duration) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Duration)) @@ -3545,9 +1941,6 @@ func (m *Duration) Size() (n int) { } func (m *ExportOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 2 @@ -3555,34 +1948,16 @@ func (m *ExportOptions) Size() (n int) { return n } -func (m *FieldsV1) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Raw != nil { - l = len(m.Raw) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - func (m *GetOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.ResourceVersion) n += 1 + l + sovGenerated(uint64(l)) + n += 2 return n } func (m *GroupKind) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Group) @@ -3593,9 +1968,6 @@ func (m *GroupKind) Size() (n int) { } func (m *GroupResource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Group) @@ -3606,9 +1978,6 @@ func (m *GroupResource) Size() (n int) { } func (m *GroupVersion) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Group) @@ -3619,9 +1988,6 @@ func (m *GroupVersion) Size() (n int) { } func (m *GroupVersionForDiscovery) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.GroupVersion) @@ -3632,9 +1998,6 @@ func (m *GroupVersionForDiscovery) Size() (n int) { } func (m *GroupVersionKind) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Group) @@ -3647,9 +2010,6 @@ func (m *GroupVersionKind) Size() (n int) { } func (m *GroupVersionResource) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Group) @@ -3661,10 +2021,31 @@ func (m *GroupVersionResource) Size() (n int) { return n } -func (m *LabelSelector) Size() (n int) { - if m == nil { - return 0 +func (m *Initializer) Size() (n int) { + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *Initializers) Size() (n int) { + var l int + _ = l + if len(m.Pending) > 0 { + for _, e := range m.Pending { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + if m.Result != nil { + l = m.Result.Size() + n += 1 + l + sovGenerated(uint64(l)) } + return n +} + +func (m *LabelSelector) Size() (n int) { var l int _ = l if len(m.MatchLabels) > 0 { @@ -3685,9 +2066,6 @@ func (m *LabelSelector) Size() (n int) { } func (m *LabelSelectorRequirement) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -3704,9 +2082,6 @@ func (m *LabelSelectorRequirement) Size() (n int) { } func (m *List) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -3721,9 +2096,6 @@ func (m *List) Size() (n int) { } func (m *ListMeta) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.SelfLink) @@ -3732,16 +2104,10 @@ func (m *ListMeta) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) - if m.RemainingItemCount != nil { - n += 1 + sovGenerated(uint64(*m.RemainingItemCount)) - } return n } func (m *ListOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.LabelSelector) @@ -3754,42 +2120,14 @@ func (m *ListOptions) Size() (n int) { if m.TimeoutSeconds != nil { n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) } + n += 2 n += 1 + sovGenerated(uint64(m.Limit)) l = len(m.Continue) n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ManagedFieldsEntry) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Manager) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operation) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - if m.Time != nil { - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.FieldsType) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldsV1 != nil { - l = m.FieldsV1.Size() - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *ObjectMeta) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -3844,19 +2182,14 @@ func (m *ObjectMeta) Size() (n int) { } l = len(m.ClusterName) n += 1 + l + sovGenerated(uint64(l)) - if len(m.ManagedFields) > 0 { - for _, e := range m.ManagedFields { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } + if m.Initializers != nil { + l = m.Initializers.Size() + n += 2 + l + sovGenerated(uint64(l)) } return n } func (m *OwnerReference) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -3876,84 +2209,23 @@ func (m *OwnerReference) Size() (n int) { return n } -func (m *PartialObjectMetadata) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PartialObjectMetadataList) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - func (m *Patch) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - return n -} - -func (m *PatchOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l - if len(m.DryRun) > 0 { - for _, s := range m.DryRun { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Force != nil { - n += 2 - } - l = len(m.FieldManager) - n += 1 + l + sovGenerated(uint64(l)) return n } func (m *Preconditions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.UID != nil { l = len(*m.UID) n += 1 + l + sovGenerated(uint64(l)) } - if m.ResourceVersion != nil { - l = len(*m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - } return n } func (m *RootPaths) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Paths) > 0 { @@ -3966,9 +2238,6 @@ func (m *RootPaths) Size() (n int) { } func (m *ServerAddressByClientCIDR) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.ClientCIDR) @@ -3979,9 +2248,6 @@ func (m *ServerAddressByClientCIDR) Size() (n int) { } func (m *Status) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.ListMeta.Size() @@ -4001,9 +2267,6 @@ func (m *Status) Size() (n int) { } func (m *StatusCause) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -4016,9 +2279,6 @@ func (m *StatusCause) Size() (n int) { } func (m *StatusDetails) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Name) @@ -4039,21 +2299,7 @@ func (m *StatusDetails) Size() (n int) { return n } -func (m *TableOptions) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.IncludeObject) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - func (m *Timestamp) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Seconds)) @@ -4062,9 +2308,6 @@ func (m *Timestamp) Size() (n int) { } func (m *TypeMeta) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Kind) @@ -4075,9 +2318,6 @@ func (m *TypeMeta) Size() (n int) { } func (m *UpdateOptions) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.DryRun) > 0 { @@ -4086,15 +2326,10 @@ func (m *UpdateOptions) Size() (n int) { n += 1 + l + sovGenerated(uint64(l)) } } - l = len(m.FieldManager) - n += 1 + l + sovGenerated(uint64(l)) return n } func (m Verbs) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m) > 0 { @@ -4107,9 +2342,6 @@ func (m Verbs) Size() (n int) { } func (m *WatchEvent) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -4120,7 +2352,14 @@ func (m *WatchEvent) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -4129,21 +2368,11 @@ func (this *APIGroup) String() string { if this == nil { return "nil" } - repeatedStringForVersions := "[]GroupVersionForDiscovery{" - for _, f := range this.Versions { - repeatedStringForVersions += strings.Replace(strings.Replace(f.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + "," - } - repeatedStringForVersions += "}" - repeatedStringForServerAddressByClientCIDRs := "[]ServerAddressByClientCIDR{" - for _, f := range this.ServerAddressByClientCIDRs { - repeatedStringForServerAddressByClientCIDRs += strings.Replace(strings.Replace(f.String(), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + "," - } - repeatedStringForServerAddressByClientCIDRs += "}" s := strings.Join([]string{`&APIGroup{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Versions:` + repeatedStringForVersions + `,`, + `Versions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Versions), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, `PreferredVersion:` + strings.Replace(strings.Replace(this.PreferredVersion.String(), "GroupVersionForDiscovery", "GroupVersionForDiscovery", 1), `&`, ``, 1) + `,`, - `ServerAddressByClientCIDRs:` + repeatedStringForServerAddressByClientCIDRs + `,`, + `ServerAddressByClientCIDRs:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ServerAddressByClientCIDRs), "ServerAddressByClientCIDR", "ServerAddressByClientCIDR", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4152,13 +2381,8 @@ func (this *APIGroupList) String() string { if this == nil { return "nil" } - repeatedStringForGroups := "[]APIGroup{" - for _, f := range this.Groups { - repeatedStringForGroups += strings.Replace(strings.Replace(f.String(), "APIGroup", "APIGroup", 1), `&`, ``, 1) + "," - } - repeatedStringForGroups += "}" s := strings.Join([]string{`&APIGroupList{`, - `Groups:` + repeatedStringForGroups + `,`, + `Groups:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Groups), "APIGroup", "APIGroup", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4177,7 +2401,6 @@ func (this *APIResource) String() string { `Categories:` + fmt.Sprintf("%v", this.Categories) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `StorageVersionHash:` + fmt.Sprintf("%v", this.StorageVersionHash) + `,`, `}`, }, "") return s @@ -4186,14 +2409,9 @@ func (this *APIResourceList) String() string { if this == nil { return "nil" } - repeatedStringForAPIResources := "[]APIResource{" - for _, f := range this.APIResources { - repeatedStringForAPIResources += strings.Replace(strings.Replace(f.String(), "APIResource", "APIResource", 1), `&`, ``, 1) + "," - } - repeatedStringForAPIResources += "}" s := strings.Join([]string{`&APIResourceList{`, `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `APIResources:` + repeatedStringForAPIResources + `,`, + `APIResources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.APIResources), "APIResource", "APIResource", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4204,7 +2422,7 @@ func (this *CreateOptions) String() string { } s := strings.Join([]string{`&CreateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `}`, }, "") return s @@ -4215,7 +2433,7 @@ func (this *DeleteOptions) String() string { } s := strings.Join([]string{`&DeleteOptions{`, `GracePeriodSeconds:` + valueToStringGenerated(this.GracePeriodSeconds) + `,`, - `Preconditions:` + strings.Replace(this.Preconditions.String(), "Preconditions", "Preconditions", 1) + `,`, + `Preconditions:` + strings.Replace(fmt.Sprintf("%v", this.Preconditions), "Preconditions", "Preconditions", 1) + `,`, `OrphanDependents:` + valueToStringGenerated(this.OrphanDependents) + `,`, `PropagationPolicy:` + valueToStringGenerated(this.PropagationPolicy) + `,`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, @@ -4244,33 +2462,45 @@ func (this *ExportOptions) String() string { }, "") return s } -func (this *FieldsV1) String() string { +func (this *GetOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&GetOptions{`, + `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, + `}`, + }, "") + return s +} +func (this *GroupVersionForDiscovery) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&FieldsV1{`, - `Raw:` + valueToStringGenerated(this.Raw) + `,`, + s := strings.Join([]string{`&GroupVersionForDiscovery{`, + `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, `}`, }, "") return s } -func (this *GetOptions) String() string { +func (this *Initializer) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&GetOptions{`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, + s := strings.Join([]string{`&Initializer{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `}`, }, "") return s } -func (this *GroupVersionForDiscovery) String() string { +func (this *Initializers) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&GroupVersionForDiscovery{`, - `GroupVersion:` + fmt.Sprintf("%v", this.GroupVersion) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + s := strings.Join([]string{`&Initializers{`, + `Pending:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Pending), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, + `Result:` + strings.Replace(fmt.Sprintf("%v", this.Result), "Status", "Status", 1) + `,`, `}`, }, "") return s @@ -4279,11 +2509,6 @@ func (this *LabelSelector) String() string { if this == nil { return "nil" } - repeatedStringForMatchExpressions := "[]LabelSelectorRequirement{" - for _, f := range this.MatchExpressions { - repeatedStringForMatchExpressions += strings.Replace(strings.Replace(f.String(), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + "," - } - repeatedStringForMatchExpressions += "}" keysForMatchLabels := make([]string, 0, len(this.MatchLabels)) for k := range this.MatchLabels { keysForMatchLabels = append(keysForMatchLabels, k) @@ -4296,7 +2521,7 @@ func (this *LabelSelector) String() string { mapStringForMatchLabels += "}" s := strings.Join([]string{`&LabelSelector{`, `MatchLabels:` + mapStringForMatchLabels + `,`, - `MatchExpressions:` + repeatedStringForMatchExpressions + `,`, + `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "LabelSelectorRequirement", "LabelSelectorRequirement", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4317,14 +2542,9 @@ func (this *List) String() string { if this == nil { return "nil" } - repeatedStringForItems := "[]RawExtension{" - for _, f := range this.Items { - repeatedStringForItems += fmt.Sprintf("%v", f) + "," - } - repeatedStringForItems += "}" s := strings.Join([]string{`&List{`, `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, + `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4337,7 +2557,6 @@ func (this *ListMeta) String() string { `SelfLink:` + fmt.Sprintf("%v", this.SelfLink) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, - `RemainingItemCount:` + valueToStringGenerated(this.RemainingItemCount) + `,`, `}`, }, "") return s @@ -4352,24 +2571,9 @@ func (this *ListOptions) String() string { `Watch:` + fmt.Sprintf("%v", this.Watch) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, + `IncludeUninitialized:` + fmt.Sprintf("%v", this.IncludeUninitialized) + `,`, `Limit:` + fmt.Sprintf("%v", this.Limit) + `,`, `Continue:` + fmt.Sprintf("%v", this.Continue) + `,`, - `AllowWatchBookmarks:` + fmt.Sprintf("%v", this.AllowWatchBookmarks) + `,`, - `}`, - }, "") - return s -} -func (this *ManagedFieldsEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ManagedFieldsEntry{`, - `Manager:` + fmt.Sprintf("%v", this.Manager) + `,`, - `Operation:` + fmt.Sprintf("%v", this.Operation) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Time:` + strings.Replace(fmt.Sprintf("%v", this.Time), "Time", "Time", 1) + `,`, - `FieldsType:` + fmt.Sprintf("%v", this.FieldsType) + `,`, - `FieldsV1:` + strings.Replace(this.FieldsV1.String(), "FieldsV1", "FieldsV1", 1) + `,`, `}`, }, "") return s @@ -4378,16 +2582,6 @@ func (this *ObjectMeta) String() string { if this == nil { return "nil" } - repeatedStringForOwnerReferences := "[]OwnerReference{" - for _, f := range this.OwnerReferences { - repeatedStringForOwnerReferences += strings.Replace(strings.Replace(f.String(), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + "," - } - repeatedStringForOwnerReferences += "}" - repeatedStringForManagedFields := "[]ManagedFieldsEntry{" - for _, f := range this.ManagedFields { - repeatedStringForManagedFields += strings.Replace(strings.Replace(f.String(), "ManagedFieldsEntry", "ManagedFieldsEntry", 1), `&`, ``, 1) + "," - } - repeatedStringForManagedFields += "}" keysForLabels := make([]string, 0, len(this.Labels)) for k := range this.Labels { keysForLabels = append(keysForLabels, k) @@ -4416,15 +2610,15 @@ func (this *ObjectMeta) String() string { `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, `Generation:` + fmt.Sprintf("%v", this.Generation) + `,`, - `CreationTimestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CreationTimestamp), "Time", "Time", 1), `&`, ``, 1) + `,`, + `CreationTimestamp:` + strings.Replace(strings.Replace(this.CreationTimestamp.String(), "Time", "Time", 1), `&`, ``, 1) + `,`, `DeletionTimestamp:` + strings.Replace(fmt.Sprintf("%v", this.DeletionTimestamp), "Time", "Time", 1) + `,`, `DeletionGracePeriodSeconds:` + valueToStringGenerated(this.DeletionGracePeriodSeconds) + `,`, `Labels:` + mapStringForLabels + `,`, `Annotations:` + mapStringForAnnotations + `,`, - `OwnerReferences:` + repeatedStringForOwnerReferences + `,`, + `OwnerReferences:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.OwnerReferences), "OwnerReference", "OwnerReference", 1), `&`, ``, 1) + `,`, `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, `ClusterName:` + fmt.Sprintf("%v", this.ClusterName) + `,`, - `ManagedFields:` + repeatedStringForManagedFields + `,`, + `Initializers:` + strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializers", "Initializers", 1) + `,`, `}`, }, "") return s @@ -4444,32 +2638,6 @@ func (this *OwnerReference) String() string { }, "") return s } -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PartialObjectMetadataList) String() string { - if this == nil { - return "nil" - } - repeatedStringForItems := "[]PartialObjectMetadata{" - for _, f := range this.Items { - repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PartialObjectMetadata", "PartialObjectMetadata", 1), `&`, ``, 1) + "," - } - repeatedStringForItems += "}" - s := strings.Join([]string{`&PartialObjectMetadataList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + repeatedStringForItems + `,`, - `}`, - }, "") - return s -} func (this *Patch) String() string { if this == nil { return "nil" @@ -4479,25 +2647,12 @@ func (this *Patch) String() string { }, "") return s } -func (this *PatchOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PatchOptions{`, - `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `Force:` + valueToStringGenerated(this.Force) + `,`, - `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, - `}`, - }, "") - return s -} func (this *Preconditions) String() string { if this == nil { return "nil" } s := strings.Join([]string{`&Preconditions{`, `UID:` + valueToStringGenerated(this.UID) + `,`, - `ResourceVersion:` + valueToStringGenerated(this.ResourceVersion) + `,`, `}`, }, "") return s @@ -4532,7 +2687,7 @@ func (this *Status) String() string { `Status:` + fmt.Sprintf("%v", this.Status) + `,`, `Message:` + fmt.Sprintf("%v", this.Message) + `,`, `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Details:` + strings.Replace(this.Details.String(), "StatusDetails", "StatusDetails", 1) + `,`, + `Details:` + strings.Replace(fmt.Sprintf("%v", this.Details), "StatusDetails", "StatusDetails", 1) + `,`, `Code:` + fmt.Sprintf("%v", this.Code) + `,`, `}`, }, "") @@ -4554,32 +2709,17 @@ func (this *StatusDetails) String() string { if this == nil { return "nil" } - repeatedStringForCauses := "[]StatusCause{" - for _, f := range this.Causes { - repeatedStringForCauses += strings.Replace(strings.Replace(f.String(), "StatusCause", "StatusCause", 1), `&`, ``, 1) + "," - } - repeatedStringForCauses += "}" s := strings.Join([]string{`&StatusDetails{`, `Name:` + fmt.Sprintf("%v", this.Name) + `,`, `Group:` + fmt.Sprintf("%v", this.Group) + `,`, `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Causes:` + repeatedStringForCauses + `,`, + `Causes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Causes), "StatusCause", "StatusCause", 1), `&`, ``, 1) + `,`, `RetryAfterSeconds:` + fmt.Sprintf("%v", this.RetryAfterSeconds) + `,`, `UID:` + fmt.Sprintf("%v", this.UID) + `,`, `}`, }, "") return s } -func (this *TableOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, - `}`, - }, "") - return s -} func (this *Timestamp) String() string { if this == nil { return "nil" @@ -4608,7 +2748,6 @@ func (this *UpdateOptions) String() string { } s := strings.Join([]string{`&UpdateOptions{`, `DryRun:` + fmt.Sprintf("%v", this.DryRun) + `,`, - `FieldManager:` + fmt.Sprintf("%v", this.FieldManager) + `,`, `}`, }, "") return s @@ -4619,7 +2758,7 @@ func (this *WatchEvent) String() string { } s := strings.Join([]string{`&WatchEvent{`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Object), "RawExtension", "runtime.RawExtension", 1), `&`, ``, 1) + `,`, + `Object:` + strings.Replace(strings.Replace(this.Object.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, `}`, }, "") return s @@ -4647,7 +2786,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4675,7 +2814,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4685,9 +2824,6 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4707,7 +2843,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4716,9 +2852,6 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4741,7 +2874,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4750,9 +2883,6 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4774,7 +2904,7 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4783,9 +2913,6 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4803,9 +2930,6 @@ func (m *APIGroup) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4833,7 +2957,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4861,7 +2985,7 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -4870,9 +2994,6 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4890,9 +3011,6 @@ func (m *APIGroupList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -4920,7 +3038,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4948,7 +3066,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -4958,9 +3076,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -4980,7 +3095,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5000,7 +3115,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5010,9 +3125,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5032,7 +3144,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5041,9 +3153,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5068,7 +3177,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5078,9 +3187,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5100,7 +3206,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5110,9 +3216,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5132,7 +3235,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5142,9 +3245,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5164,7 +3264,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5174,9 +3274,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5196,7 +3293,7 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5206,46 +3303,11 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageVersionHash", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StorageVersionHash = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5255,9 +3317,6 @@ func (m *APIResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5285,7 +3344,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5313,7 +3372,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5323,9 +3382,6 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5345,7 +3401,7 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5354,9 +3410,6 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5374,9 +3427,6 @@ func (m *APIResourceList) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5404,7 +3454,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5432,7 +3482,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5442,9 +3492,6 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5464,7 +3511,7 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5473,9 +3520,6 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5493,9 +3537,6 @@ func (m *APIVersions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5523,7 +3564,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5551,7 +3592,7 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5561,19 +3602,16 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -5583,24 +3621,12 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldManager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + m.IncludeUninitialized = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -5610,9 +3636,6 @@ func (m *CreateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5640,7 +3663,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5668,7 +3691,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5688,7 +3711,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5697,9 +3720,6 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5724,7 +3744,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5745,7 +3765,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5755,9 +3775,6 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5778,7 +3795,7 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5788,9 +3805,6 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -5805,9 +3819,6 @@ func (m *DeleteOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5835,7 +3846,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5863,7 +3874,7 @@ func (m *Duration) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Duration |= time.Duration(b&0x7F) << shift + m.Duration |= (time.Duration(b) & 0x7F) << shift if b < 0x80 { break } @@ -5877,9 +3888,6 @@ func (m *Duration) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5907,7 +3915,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -5935,7 +3943,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5955,7 +3963,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -5970,9 +3978,6 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -5985,7 +3990,7 @@ func (m *ExportOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *FieldsV1) Unmarshal(dAtA []byte) error { +func (m *GetOptions) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6000,7 +4005,7 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6008,17 +4013,17 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: FieldsV1: wiretype end group for non-group") + return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: FieldsV1: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Raw", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) } - var byteLen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6028,26 +4033,41 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Raw = append(m.Raw[:0], dAtA[iNdEx:postIndex]...) - if m.Raw == nil { - m.Raw = []byte{} - } + m.ResourceVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.IncludeUninitialized = bool(v != 0) default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -6057,9 +4077,6 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6072,7 +4089,7 @@ func (m *FieldsV1) Unmarshal(dAtA []byte) error { } return nil } -func (m *GetOptions) Unmarshal(dAtA []byte) error { +func (m *GroupKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6087,7 +4104,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6095,15 +4112,15 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetOptions: wiretype end group for non-group") + return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetOptions: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6115,7 +4132,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6125,13 +4142,39 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Group = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6142,9 +4185,6 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6157,7 +4197,7 @@ func (m *GetOptions) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupKind) Unmarshal(dAtA []byte) error { +func (m *GroupResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6172,7 +4212,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6180,10 +4220,10 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupKind: wiretype end group for non-group") + return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6200,7 +4240,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6210,9 +4250,6 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6220,7 +4257,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6232,7 +4269,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6242,13 +4279,10 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6259,9 +4293,6 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6274,7 +4305,7 @@ func (m *GroupKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupResource) Unmarshal(dAtA []byte) error { +func (m *GroupVersion) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6289,7 +4320,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6297,10 +4328,10 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupResource: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6317,7 +4348,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6327,9 +4358,6 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6337,7 +4365,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6349,7 +4377,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6359,13 +4387,10 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + m.Version = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6376,9 +4401,6 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6391,7 +4413,7 @@ func (m *GroupResource) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersion) Unmarshal(dAtA []byte) error { +func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6406,7 +4428,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6414,15 +4436,15 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersion: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersion: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6434,7 +4456,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6444,13 +4466,10 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + m.GroupVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6466,7 +4485,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6476,9 +4495,6 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6493,9 +4509,6 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6508,7 +4521,7 @@ func (m *GroupVersion) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { +func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6523,7 +4536,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6531,15 +4544,15 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionForDiscovery: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionForDiscovery: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GroupVersion", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6551,7 +4564,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6561,13 +4574,10 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.GroupVersion = string(dAtA[iNdEx:postIndex]) + m.Group = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 2: if wireType != 2 { @@ -6583,7 +4593,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6593,13 +4603,39 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6610,9 +4646,6 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6625,7 +4658,7 @@ func (m *GroupVersionForDiscovery) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { +func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6640,7 +4673,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6648,10 +4681,10 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionKind: wiretype end group for non-group") + return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionKind: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: @@ -6668,7 +4701,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6678,9 +4711,6 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6700,7 +4730,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6710,9 +4740,6 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -6720,7 +4747,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { iNdEx = postIndex case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6732,7 +4759,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6742,13 +4769,10 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Kind = string(dAtA[iNdEx:postIndex]) + m.Resource = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -6759,9 +4783,6 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6774,7 +4795,7 @@ func (m *GroupVersionKind) Unmarshal(dAtA []byte) error { } return nil } -func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { +func (m *Initializer) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -6789,7 +4810,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6797,15 +4818,15 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GroupVersionResource: wiretype end group for non-group") + return fmt.Errorf("proto: Initializer: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GroupVersionResource: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -6817,7 +4838,7 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6827,19 +4848,66 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Group = string(dAtA[iNdEx:postIndex]) + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Initializers) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Initializers: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Initializers: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Pending", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6849,29 +4917,28 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Version = string(dAtA[iNdEx:postIndex]) + m.Pending = append(m.Pending, Initializer{}) + if err := m.Pending[len(m.Pending)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex - case 3: + case 2: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6881,23 +4948,24 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Resource = string(dAtA[iNdEx:postIndex]) + if m.Result == nil { + m.Result = &Status{} + } + if err := m.Result.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -6908,9 +4976,6 @@ func (m *GroupVersionResource) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -6938,7 +5003,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -6966,7 +5031,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -6975,20 +5040,54 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey if m.MatchLabels == nil { m.MatchLabels = make(map[string]string) } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 + if iNdEx < postIndex { + var valuekey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -6998,86 +5097,41 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + valuekey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated } - if postStringIndexmapvalue > l { + if iNdEx >= l { return io.ErrUnexpectedEOF } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break } - iNdEx += skippy } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.MatchLabels[mapkey] = mapvalue + } else { + var mapvalue string + m.MatchLabels[mapkey] = mapvalue } - m.MatchLabels[mapkey] = mapvalue iNdEx = postIndex case 2: if wireType != 2 { @@ -7093,7 +5147,7 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7102,9 +5156,6 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7122,9 +5173,6 @@ func (m *LabelSelector) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7152,7 +5200,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7180,7 +5228,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7190,9 +5238,6 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7212,7 +5257,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7222,9 +5267,6 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7244,7 +5286,7 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7254,9 +5296,6 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7271,9 +5310,6 @@ func (m *LabelSelectorRequirement) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7301,7 +5337,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7329,7 +5365,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7338,9 +5374,6 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7362,7 +5395,7 @@ func (m *List) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7371,13 +5404,10 @@ func (m *List) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, runtime.RawExtension{}) + m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } @@ -7391,9 +5421,6 @@ func (m *List) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7421,7 +5448,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7449,7 +5476,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7459,9 +5486,6 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7481,7 +5505,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7491,9 +5515,6 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7513,7 +5534,7 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7523,34 +5544,11 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RemainingItemCount", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.RemainingItemCount = &v default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -7560,9 +5558,6 @@ func (m *ListMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -7590,7 +5585,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7618,7 +5613,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7628,9 +5623,6 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7650,7 +5642,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7660,9 +5652,6 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7682,7 +5671,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -7702,7 +5691,7 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -7712,9 +5701,6 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -7734,66 +5720,15 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } m.TimeoutSeconds = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) - } - m.Limit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Limit |= int64(b&0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Continue = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowWatchBookmarks", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field IncludeUninitialized", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -7805,202 +5740,17 @@ func (m *ListOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - m.AllowWatchBookmarks = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ManagedFieldsEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ManagedFieldsEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Manager", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Manager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operation = ManagedFieldsOperationType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Time == nil { - m.Time = &Time{} - } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldsType", wireType) + m.IncludeUninitialized = bool(v != 0) + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType) } - var stringLen uint64 + m.Limit = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8010,29 +5760,16 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + m.Limit |= (int64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldsType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: + case 8: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldsV1", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Continue", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8042,27 +5779,20 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if m.FieldsV1 == nil { - m.FieldsV1 = &FieldsV1{} - } - if err := m.FieldsV1.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Continue = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex default: iNdEx = preIndex @@ -8073,9 +5803,6 @@ func (m *ManagedFieldsEntry) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -8103,7 +5830,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8131,7 +5858,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8141,9 +5868,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8163,7 +5887,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8173,9 +5897,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8195,7 +5916,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8205,9 +5926,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8227,7 +5945,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8237,9 +5955,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8259,7 +5974,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8269,9 +5984,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8291,7 +6003,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8301,9 +6013,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8323,7 +6032,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Generation |= int64(b&0x7F) << shift + m.Generation |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8342,7 +6051,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8351,9 +6060,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8375,7 +6081,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8384,9 +6090,6 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -8411,7 +6114,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int64(b&0x7F) << shift + v |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -8431,359 +6134,7 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Labels == nil { - m.Labels = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Labels[mapkey] = mapvalue - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Annotations == nil { - m.Annotations = make(map[string]string) - } - var mapkey string - var mapvalue string - for iNdEx < postIndex { - entryPreIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - if fieldNum == 1 { - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - } else if fieldNum == 2 { - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue < 0 { - return ErrInvalidLengthGenerated - } - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - } else { - iNdEx = entryPreIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > postIndex { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - m.Annotations[mapkey] = mapvalue - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) - if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ManagedFields", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -8792,75 +6143,10 @@ func (m *ObjectMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.ManagedFields = append(m.ManagedFields, ManagedFieldsEntry{}) - if err := m.ManagedFields[len(m.ManagedFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *OwnerReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8870,29 +6156,12 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 + var stringLenmapkey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8902,29 +6171,76 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLenmapkey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - m.Name = string(dAtA[iNdEx:postIndex]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Labels == nil { + m.Labels = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Labels[mapkey] = mapvalue + } else { + var mapvalue string + m.Labels[mapkey] = mapvalue + } iNdEx = postIndex - case 4: + case 12: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Annotations", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8934,29 +6250,19 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if msglen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 + var keykey uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8966,29 +6272,91 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + keykey |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { return io.ErrUnexpectedEOF } - m.APIVersion = string(dAtA[iNdEx:postIndex]) + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.Annotations == nil { + m.Annotations = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.Annotations[mapkey] = mapvalue + } else { + var mapvalue string + m.Annotations[mapkey] = mapvalue + } iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OwnerReferences", wireType) } - var v int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -8998,18 +6366,28 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.Controller = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) + if msglen < 0 { + return ErrInvalidLengthGenerated } - var v int + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.OwnerReferences = append(m.OwnerReferences, OwnerReference{}) + if err := m.OwnerReferences[len(m.OwnerReferences)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) + } + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9019,69 +6397,53 @@ func (m *OwnerReference) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - b := bool(v != 0) - m.BlockOwnerDeletion = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated + m.Finalizers = append(m.Finalizers, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ClusterName", wireType) } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ClusterName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 16: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) } var msglen int for shift := uint(0); ; shift += 7 { @@ -9093,7 +6455,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9102,13 +6464,13 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + if m.Initializers == nil { + m.Initializers = &Initializers{} + } + if err := m.Initializers.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -9121,9 +6483,6 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9136,7 +6495,7 @@ func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { } return nil } -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { +func (m *OwnerReference) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -9151,7 +6510,7 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9159,17 +6518,17 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + return fmt.Errorf("proto: OwnerReference: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: OwnerReference: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9179,30 +6538,26 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Kind = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) } - var msglen int + var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9212,135 +6567,53 @@ func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } + postIndex := iNdEx + intStringLen if postIndex > l { return io.ErrUnexpectedEOF } - m.Items = append(m.Items, PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } + m.Name = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Patch) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Patch: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) } - if skippy < 0 { - return ErrInvalidLengthGenerated + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) < 0 { + intStringLen := int(stringLen) + if intStringLen < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PatchOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { + postIndex := iNdEx + intStringLen + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PatchOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PatchOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DryRun", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -9352,7 +6625,7 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9362,17 +6635,14 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } - m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) + m.APIVersion = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - case 2: + case 6: if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Force", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Controller", wireType) } var v int for shift := uint(0); ; shift += 7 { @@ -9384,18 +6654,18 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - v |= int(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } b := bool(v != 0) - m.Force = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) + m.Controller = &b + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BlockOwnerDeletion", wireType) } - var stringLen uint64 + var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowGenerated @@ -9405,24 +6675,63 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + v |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated + b := bool(v != 0) + m.BlockOwnerDeletion = &b + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err } - postIndex := iNdEx + intStringLen - if postIndex < 0 { + if skippy < 0 { return ErrInvalidLengthGenerated } - if postIndex > l { + if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.FieldManager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Patch) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Patch: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Patch: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9432,9 +6741,6 @@ func (m *PatchOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9462,7 +6768,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9490,7 +6796,7 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9500,48 +6806,12 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) m.UID = &s iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ResourceVersion = &s - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -9551,9 +6821,6 @@ func (m *Preconditions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9581,7 +6848,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9609,7 +6876,7 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9619,9 +6886,6 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9636,9 +6900,6 @@ func (m *RootPaths) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9666,7 +6927,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9694,7 +6955,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9704,9 +6965,6 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9726,7 +6984,7 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9736,9 +6994,6 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9753,9 +7008,6 @@ func (m *ServerAddressByClientCIDR) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -9783,7 +7035,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9811,7 +7063,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9820,9 +7072,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9844,7 +7093,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9854,9 +7103,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9876,7 +7122,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9886,9 +7132,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9908,7 +7151,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -9918,9 +7161,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9940,7 +7180,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -9949,9 +7189,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -9976,7 +7213,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Code |= int32(b&0x7F) << shift + m.Code |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -9990,9 +7227,6 @@ func (m *Status) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10020,7 +7254,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10048,7 +7282,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10058,9 +7292,6 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10080,7 +7311,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10090,9 +7321,6 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10112,7 +7340,7 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10122,9 +7350,6 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10139,9 +7364,6 @@ func (m *StatusCause) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10169,7 +7391,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10197,7 +7419,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10207,9 +7429,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10229,7 +7448,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10239,9 +7458,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10261,7 +7477,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10271,9 +7487,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10293,7 +7506,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10302,9 +7515,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10327,7 +7537,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.RetryAfterSeconds |= int32(b&0x7F) << shift + m.RetryAfterSeconds |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -10346,7 +7556,7 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10356,9 +7566,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10373,94 +7580,6 @@ func (m *StatusDetails) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10488,7 +7607,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10516,7 +7635,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Seconds |= int64(b&0x7F) << shift + m.Seconds |= (int64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10535,7 +7654,7 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Nanos |= int32(b&0x7F) << shift + m.Nanos |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -10549,9 +7668,6 @@ func (m *Timestamp) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10579,7 +7695,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10607,7 +7723,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10617,9 +7733,6 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10639,7 +7752,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10649,9 +7762,6 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10666,9 +7776,6 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10696,7 +7803,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10724,7 +7831,7 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10734,46 +7841,11 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } m.DryRun = append(m.DryRun, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldManager", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldManager = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGenerated(dAtA[iNdEx:]) @@ -10783,9 +7855,6 @@ func (m *UpdateOptions) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10813,7 +7882,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10841,7 +7910,7 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10851,9 +7920,6 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10868,9 +7934,6 @@ func (m *Verbs) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -10898,7 +7961,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10926,7 +7989,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -10936,9 +7999,6 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10958,7 +8018,7 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -10967,9 +8027,6 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -10986,9 +8043,6 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -11004,7 +8058,6 @@ func (m *WatchEvent) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -11036,8 +8089,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -11054,34 +8109,216 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 2465 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x6c, 0x23, 0x49, + 0xf5, 0x4f, 0xdb, 0xb1, 0x63, 0x3f, 0xc7, 0xf9, 0xa8, 0xcd, 0xfe, 0xff, 0xde, 0x08, 0xec, 0x6c, + 0x2f, 0x5a, 0x65, 0x61, 0xd6, 0x26, 0x59, 0x58, 0x0d, 0x03, 0x2c, 0xc4, 0x71, 0x66, 0x14, 0xed, + 0x64, 0xc6, 0xaa, 0xec, 0x0c, 0x62, 0x18, 0x21, 0x3a, 0xdd, 0x15, 0xa7, 0x49, 0xbb, 0xdb, 0x5b, + 0xd5, 0xce, 0x8c, 0xe1, 0xc0, 0x1e, 0x40, 0x70, 0x40, 0x68, 0x8e, 0x9c, 0xd0, 0x8e, 0xe0, 0xc2, + 0x95, 0x13, 0x17, 0x38, 0x21, 0x31, 0xc7, 0x91, 0xb8, 0xec, 0x01, 0x59, 0x3b, 0xe6, 0xc0, 0x09, + 0x71, 0xcf, 0x09, 0x55, 0x75, 0x75, 0x75, 0xb7, 0x1d, 0x4f, 0xda, 0x3b, 0xbb, 0x88, 0x53, 0xd2, + 0xef, 0xe3, 0xf7, 0x5e, 0x55, 0xbd, 0x7a, 0xef, 0xd5, 0x33, 0x1c, 0x9c, 0x5e, 0x65, 0x75, 0xdb, + 0x6b, 0x9c, 0xf6, 0x8f, 0x08, 0x75, 0x89, 0x4f, 0x58, 0xe3, 0x8c, 0xb8, 0x96, 0x47, 0x1b, 0x92, + 0x61, 0xf4, 0xec, 0xae, 0x61, 0x9e, 0xd8, 0x2e, 0xa1, 0x83, 0x46, 0xef, 0xb4, 0xc3, 0x09, 0xac, + 0xd1, 0x25, 0xbe, 0xd1, 0x38, 0xdb, 0x6a, 0x74, 0x88, 0x4b, 0xa8, 0xe1, 0x13, 0xab, 0xde, 0xa3, + 0x9e, 0xef, 0xa1, 0x2f, 0x04, 0x5a, 0xf5, 0xb8, 0x56, 0xbd, 0x77, 0xda, 0xe1, 0x04, 0x56, 0xe7, + 0x5a, 0xf5, 0xb3, 0xad, 0xf5, 0x37, 0x3b, 0xb6, 0x7f, 0xd2, 0x3f, 0xaa, 0x9b, 0x5e, 0xb7, 0xd1, + 0xf1, 0x3a, 0x5e, 0x43, 0x28, 0x1f, 0xf5, 0x8f, 0xc5, 0x97, 0xf8, 0x10, 0xff, 0x05, 0xa0, 0xeb, + 0x53, 0x5d, 0xa1, 0x7d, 0xd7, 0xb7, 0xbb, 0x64, 0xdc, 0x8b, 0xf5, 0xb7, 0x2f, 0x53, 0x60, 0xe6, + 0x09, 0xe9, 0x1a, 0xe3, 0x7a, 0xfa, 0x5f, 0xb3, 0x50, 0xd8, 0x69, 0xef, 0xdf, 0xa0, 0x5e, 0xbf, + 0x87, 0x36, 0x60, 0xde, 0x35, 0xba, 0xa4, 0xa2, 0x6d, 0x68, 0x9b, 0xc5, 0xe6, 0xe2, 0x93, 0x61, + 0x6d, 0x6e, 0x34, 0xac, 0xcd, 0xdf, 0x32, 0xba, 0x04, 0x0b, 0x0e, 0x72, 0xa0, 0x70, 0x46, 0x28, + 0xb3, 0x3d, 0x97, 0x55, 0x32, 0x1b, 0xd9, 0xcd, 0xd2, 0xf6, 0x3b, 0xf5, 0x34, 0xeb, 0xaf, 0x0b, + 0x03, 0x77, 0x03, 0xd5, 0xeb, 0x1e, 0x6d, 0xd9, 0xcc, 0xf4, 0xce, 0x08, 0x1d, 0x34, 0x57, 0xa4, + 0x95, 0x82, 0x64, 0x32, 0xac, 0x2c, 0xa0, 0x9f, 0x6a, 0xb0, 0xd2, 0xa3, 0xe4, 0x98, 0x50, 0x4a, + 0x2c, 0xc9, 0xaf, 0x64, 0x37, 0xb4, 0x4f, 0xc1, 0x6c, 0x45, 0x9a, 0x5d, 0x69, 0x8f, 0xe1, 0xe3, + 0x09, 0x8b, 0xe8, 0xb7, 0x1a, 0xac, 0x33, 0x42, 0xcf, 0x08, 0xdd, 0xb1, 0x2c, 0x4a, 0x18, 0x6b, + 0x0e, 0x76, 0x1d, 0x9b, 0xb8, 0xfe, 0xee, 0x7e, 0x0b, 0xb3, 0xca, 0xbc, 0xd8, 0x87, 0x6f, 0xa5, + 0x73, 0xe8, 0x70, 0x1a, 0x4e, 0x53, 0x97, 0x1e, 0xad, 0x4f, 0x15, 0x61, 0xf8, 0x39, 0x6e, 0xe8, + 0xc7, 0xb0, 0x18, 0x1e, 0xe4, 0x4d, 0x9b, 0xf9, 0xe8, 0x2e, 0xe4, 0x3b, 0xfc, 0x83, 0x55, 0x34, + 0xe1, 0x60, 0x3d, 0x9d, 0x83, 0x21, 0x46, 0x73, 0x49, 0xfa, 0x93, 0x17, 0x9f, 0x0c, 0x4b, 0x34, + 0xfd, 0x4f, 0x59, 0x28, 0xed, 0xb4, 0xf7, 0x31, 0x61, 0x5e, 0x9f, 0x9a, 0x24, 0x45, 0xd0, 0x6c, + 0x03, 0xf0, 0xbf, 0xac, 0x67, 0x98, 0xc4, 0xaa, 0x64, 0x36, 0xb4, 0xcd, 0x42, 0x13, 0x49, 0x39, + 0xb8, 0xa5, 0x38, 0x38, 0x26, 0xc5, 0x51, 0x4f, 0x6d, 0xd7, 0x12, 0xa7, 0x1d, 0x43, 0x7d, 0xd7, + 0x76, 0x2d, 0x2c, 0x38, 0xe8, 0x26, 0xe4, 0xce, 0x08, 0x3d, 0xe2, 0xfb, 0xcf, 0x03, 0xe2, 0x4b, + 0xe9, 0x96, 0x77, 0x97, 0xab, 0x34, 0x8b, 0xa3, 0x61, 0x2d, 0x27, 0xfe, 0xc5, 0x01, 0x08, 0xaa, + 0x03, 0xb0, 0x13, 0x8f, 0xfa, 0xc2, 0x9d, 0x4a, 0x6e, 0x23, 0xbb, 0x59, 0x6c, 0x2e, 0x71, 0xff, + 0x0e, 0x15, 0x15, 0xc7, 0x24, 0xd0, 0x55, 0x58, 0x64, 0xb6, 0xdb, 0xe9, 0x3b, 0x06, 0xe5, 0x84, + 0x4a, 0x5e, 0xf8, 0xb9, 0x26, 0xfd, 0x5c, 0x3c, 0x8c, 0xf1, 0x70, 0x42, 0x92, 0x5b, 0x32, 0x0d, + 0x9f, 0x74, 0x3c, 0x6a, 0x13, 0x56, 0x59, 0x88, 0x2c, 0xed, 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xd7, + 0x20, 0x27, 0x76, 0xbe, 0x52, 0x10, 0x26, 0xca, 0xd2, 0x44, 0x4e, 0x1c, 0x0b, 0x0e, 0x78, 0xe8, + 0x0d, 0x58, 0x90, 0xb7, 0xa6, 0x52, 0x14, 0x62, 0xcb, 0x52, 0x6c, 0x21, 0x0c, 0xeb, 0x90, 0xaf, + 0xff, 0x41, 0x83, 0xe5, 0xd8, 0xf9, 0x89, 0x58, 0xb9, 0x0a, 0x8b, 0x9d, 0xd8, 0x4d, 0x91, 0x67, + 0xa9, 0x56, 0x13, 0xbf, 0x45, 0x38, 0x21, 0x89, 0x08, 0x14, 0xa9, 0x44, 0x0a, 0x33, 0xc2, 0x56, + 0xea, 0x40, 0x0b, 0x7d, 0x88, 0x2c, 0xc5, 0x88, 0x0c, 0x47, 0xc8, 0xfa, 0x3f, 0x35, 0x11, 0x74, + 0x61, 0x8e, 0x40, 0x9b, 0xb1, 0x3c, 0xa4, 0x89, 0x2d, 0x5c, 0x9c, 0x92, 0x43, 0x2e, 0xb9, 0xbc, + 0x99, 0xff, 0x89, 0xcb, 0x7b, 0xad, 0xf0, 0xeb, 0x0f, 0x6b, 0x73, 0x1f, 0xfc, 0x7d, 0x63, 0x4e, + 0xff, 0x99, 0x06, 0xe5, 0x5d, 0x4a, 0x0c, 0x9f, 0xdc, 0xee, 0xf9, 0x62, 0x05, 0x3a, 0xe4, 0x2d, + 0x3a, 0xc0, 0x7d, 0x57, 0xae, 0x14, 0xf8, 0xa5, 0x6c, 0x09, 0x0a, 0x96, 0x1c, 0xd4, 0x86, 0x35, + 0xdb, 0x35, 0x9d, 0xbe, 0x45, 0xee, 0xb8, 0xb6, 0x6b, 0xfb, 0xb6, 0xe1, 0xd8, 0x3f, 0x52, 0x97, + 0xed, 0x73, 0xd2, 0xbb, 0xb5, 0xfd, 0x0b, 0x64, 0xf0, 0x85, 0x9a, 0xfa, 0xcf, 0xb3, 0x50, 0x6e, + 0x11, 0x87, 0x44, 0x7e, 0x5c, 0x07, 0xd4, 0xa1, 0x86, 0x49, 0xda, 0x84, 0xda, 0x9e, 0x75, 0x48, + 0x4c, 0xcf, 0xb5, 0x98, 0x08, 0x95, 0x6c, 0xf3, 0xff, 0x46, 0xc3, 0x1a, 0xba, 0x31, 0xc1, 0xc5, + 0x17, 0x68, 0x20, 0x07, 0xca, 0x3d, 0x2a, 0xfe, 0xb7, 0x7d, 0x59, 0x48, 0xf8, 0x05, 0x7e, 0x2b, + 0xdd, 0x19, 0xb4, 0xe3, 0xaa, 0xcd, 0xd5, 0xd1, 0xb0, 0x56, 0x4e, 0x90, 0x70, 0x12, 0x1c, 0x7d, + 0x1b, 0x56, 0x3c, 0xda, 0x3b, 0x31, 0xdc, 0x16, 0xe9, 0x11, 0xd7, 0x22, 0xae, 0xcf, 0x44, 0x52, + 0x29, 0x34, 0xd7, 0x78, 0xfa, 0xbf, 0x3d, 0xc6, 0xc3, 0x13, 0xd2, 0xe8, 0x1e, 0xac, 0xf6, 0xa8, + 0xd7, 0x33, 0x3a, 0x06, 0x47, 0x6c, 0x7b, 0x8e, 0x6d, 0x0e, 0x44, 0xd2, 0x29, 0x36, 0xaf, 0x8c, + 0x86, 0xb5, 0xd5, 0xf6, 0x38, 0xf3, 0x7c, 0x58, 0x7b, 0x49, 0x6c, 0x1d, 0xa7, 0x44, 0x4c, 0x3c, + 0x09, 0x13, 0x3b, 0xdb, 0xdc, 0xb4, 0xb3, 0xd5, 0xf7, 0xa1, 0xd0, 0xea, 0x53, 0xa1, 0x85, 0xbe, + 0x09, 0x05, 0x4b, 0xfe, 0x2f, 0x77, 0xfe, 0xd5, 0xb0, 0x7e, 0x86, 0x32, 0xe7, 0xc3, 0x5a, 0x99, + 0x57, 0xfc, 0x7a, 0x48, 0xc0, 0x4a, 0x45, 0xbf, 0x0f, 0xe5, 0xbd, 0x87, 0x3d, 0x8f, 0xfa, 0xe1, + 0x99, 0xbe, 0x0e, 0x79, 0x22, 0x08, 0x02, 0xad, 0x10, 0x25, 0xfd, 0x40, 0x0c, 0x4b, 0x2e, 0x4f, + 0x42, 0xe4, 0xa1, 0x61, 0xfa, 0x32, 0xa0, 0x54, 0x12, 0xda, 0xe3, 0x44, 0x1c, 0xf0, 0xf4, 0xc7, + 0x1a, 0xc0, 0x0d, 0xa2, 0xb0, 0x77, 0x60, 0x39, 0xbc, 0xc0, 0xc9, 0xbc, 0xf2, 0xff, 0x52, 0x7b, + 0x19, 0x27, 0xd9, 0x78, 0x5c, 0xfe, 0x33, 0x08, 0xeb, 0xfb, 0x50, 0x14, 0xd9, 0x8c, 0x17, 0x92, + 0x28, 0xb5, 0x6a, 0xcf, 0x49, 0xad, 0x61, 0x25, 0xca, 0x4c, 0xab, 0x44, 0xb1, 0xcb, 0xeb, 0x40, + 0x39, 0xd0, 0x0d, 0x8b, 0x63, 0x2a, 0x0b, 0x57, 0xa0, 0x10, 0x2e, 0x5c, 0x5a, 0x51, 0x4d, 0x51, + 0x08, 0x84, 0x95, 0x44, 0xcc, 0xda, 0x09, 0x24, 0x32, 0x73, 0x3a, 0x63, 0xb1, 0x4a, 0x91, 0x79, + 0x7e, 0xa5, 0x88, 0x59, 0xfa, 0x09, 0x54, 0xa6, 0x75, 0x52, 0x2f, 0x50, 0x3b, 0xd2, 0xbb, 0xa2, + 0xff, 0x4a, 0x83, 0x95, 0x38, 0x52, 0xfa, 0xe3, 0x4b, 0x6f, 0xe4, 0xf2, 0x9e, 0x23, 0xb6, 0x23, + 0xbf, 0xd1, 0x60, 0x2d, 0xb1, 0xb4, 0x99, 0x4e, 0x7c, 0x06, 0xa7, 0xe2, 0xc1, 0x91, 0x9d, 0x21, + 0x38, 0x1a, 0x50, 0xda, 0x57, 0x71, 0x4f, 0x2f, 0xef, 0xd2, 0xf4, 0x3f, 0x6b, 0xb0, 0x18, 0xd3, + 0x60, 0xe8, 0x3e, 0x2c, 0xf0, 0x1c, 0x68, 0xbb, 0x1d, 0xd9, 0x41, 0xa6, 0x2c, 0xec, 0x31, 0x90, + 0x68, 0x5d, 0xed, 0x00, 0x09, 0x87, 0x90, 0xa8, 0x0d, 0x79, 0x4a, 0x58, 0xdf, 0xf1, 0x65, 0xfa, + 0xbf, 0x92, 0xb2, 0x04, 0xfb, 0x86, 0xdf, 0x67, 0x41, 0x9e, 0xc4, 0x42, 0x1f, 0x4b, 0x1c, 0xfd, + 0x6f, 0x19, 0x28, 0xdf, 0x34, 0x8e, 0x88, 0x73, 0x48, 0x1c, 0x62, 0xfa, 0x1e, 0x45, 0x3f, 0x86, + 0x52, 0xd7, 0xf0, 0xcd, 0x13, 0x41, 0x0d, 0xfb, 0xe0, 0x56, 0x3a, 0x43, 0x09, 0xa4, 0xfa, 0x41, + 0x04, 0xb3, 0xe7, 0xfa, 0x74, 0xd0, 0x7c, 0x49, 0x2e, 0xac, 0x14, 0xe3, 0xe0, 0xb8, 0x35, 0xf1, + 0x78, 0x11, 0xdf, 0x7b, 0x0f, 0x7b, 0xbc, 0xe0, 0xcf, 0xfe, 0x66, 0x4a, 0xb8, 0x80, 0xc9, 0xfb, + 0x7d, 0x9b, 0x92, 0x2e, 0x71, 0xfd, 0xe8, 0xf1, 0x72, 0x30, 0x86, 0x8f, 0x27, 0x2c, 0xae, 0xbf, + 0x03, 0x2b, 0xe3, 0xce, 0xa3, 0x15, 0xc8, 0x9e, 0x92, 0x41, 0x10, 0x0b, 0x98, 0xff, 0x8b, 0xd6, + 0x20, 0x77, 0x66, 0x38, 0x7d, 0x99, 0x7f, 0x70, 0xf0, 0x71, 0x2d, 0x73, 0x55, 0xd3, 0x7f, 0xa7, + 0x41, 0x65, 0x9a, 0x23, 0xe8, 0xf3, 0x31, 0xa0, 0x66, 0x49, 0x7a, 0x95, 0x7d, 0x97, 0x0c, 0x02, + 0xd4, 0x3d, 0x28, 0x78, 0x3d, 0xfe, 0xdc, 0xf4, 0xa8, 0x8c, 0xf3, 0x37, 0xc2, 0xd8, 0xbd, 0x2d, + 0xe9, 0xe7, 0xc3, 0xda, 0xcb, 0x09, 0xf8, 0x90, 0x81, 0x95, 0x2a, 0x2f, 0x92, 0xc2, 0x1f, 0x5e, + 0xb8, 0x55, 0x91, 0xbc, 0x2b, 0x28, 0x58, 0x72, 0xf4, 0x3f, 0x6a, 0x30, 0x2f, 0x5a, 0xd9, 0xfb, + 0x50, 0xe0, 0xfb, 0x67, 0x19, 0xbe, 0x21, 0xfc, 0x4a, 0xfd, 0xf0, 0xe1, 0xda, 0x07, 0xc4, 0x37, + 0xa2, 0xfb, 0x15, 0x52, 0xb0, 0x42, 0x44, 0x18, 0x72, 0xb6, 0x4f, 0xba, 0xe1, 0x41, 0xbe, 0x39, + 0x15, 0x5a, 0x3e, 0xbb, 0xeb, 0xd8, 0x78, 0xb0, 0xf7, 0xd0, 0x27, 0x2e, 0x3f, 0x8c, 0x28, 0x19, + 0xec, 0x73, 0x0c, 0x1c, 0x40, 0xe9, 0xbf, 0xd7, 0x40, 0x99, 0xe2, 0xd7, 0x9d, 0x11, 0xe7, 0xf8, + 0xa6, 0xed, 0x9e, 0xca, 0x6d, 0x55, 0xee, 0x1c, 0x4a, 0x3a, 0x56, 0x12, 0x17, 0x95, 0xd8, 0xcc, + 0x8c, 0x25, 0xf6, 0x0a, 0x14, 0x4c, 0xcf, 0xf5, 0x6d, 0xb7, 0x3f, 0x91, 0x5f, 0x76, 0x25, 0x1d, + 0x2b, 0x09, 0xfd, 0x69, 0x16, 0x4a, 0xdc, 0xd7, 0xb0, 0xc6, 0x7f, 0x1d, 0xca, 0x4e, 0xfc, 0xf4, + 0xa4, 0xcf, 0x2f, 0x4b, 0x88, 0xe4, 0x7d, 0xc4, 0x49, 0x59, 0xae, 0x7c, 0x6c, 0x13, 0xc7, 0x52, + 0xca, 0x99, 0xa4, 0xf2, 0xf5, 0x38, 0x13, 0x27, 0x65, 0x79, 0x9e, 0x7d, 0xc0, 0xe3, 0x5a, 0x36, + 0x73, 0x6a, 0x6b, 0xbf, 0xc3, 0x89, 0x38, 0xe0, 0x5d, 0xb4, 0x3f, 0xf3, 0x33, 0xee, 0xcf, 0x35, + 0x58, 0xe2, 0x07, 0xe9, 0xf5, 0xfd, 0xb0, 0xe3, 0xcd, 0x89, 0xbe, 0x0b, 0x8d, 0x86, 0xb5, 0xa5, + 0xf7, 0x12, 0x1c, 0x3c, 0x26, 0x39, 0xb5, 0x7d, 0xc9, 0x7f, 0xd2, 0xf6, 0x85, 0xaf, 0xda, 0xb1, + 0xbb, 0xb6, 0x5f, 0x59, 0x10, 0x4e, 0xa8, 0x55, 0xdf, 0xe4, 0x44, 0x1c, 0xf0, 0x12, 0x47, 0x5a, + 0xb8, 0xf4, 0x48, 0xdf, 0x87, 0xe2, 0x81, 0x6d, 0x52, 0x8f, 0xaf, 0x85, 0x17, 0x26, 0x96, 0x68, + 0xec, 0x55, 0x02, 0x0f, 0xd7, 0x18, 0xf2, 0xb9, 0x2b, 0xae, 0xe1, 0x7a, 0x41, 0xfb, 0x9e, 0x8b, + 0x5c, 0xb9, 0xc5, 0x89, 0x38, 0xe0, 0x5d, 0x5b, 0xe3, 0xf5, 0xe8, 0x17, 0x8f, 0x6b, 0x73, 0x8f, + 0x1e, 0xd7, 0xe6, 0x3e, 0x7c, 0x2c, 0x6b, 0xd3, 0xbf, 0x00, 0xe0, 0xf6, 0xd1, 0x0f, 0x89, 0x19, + 0xc4, 0xfc, 0xe5, 0x13, 0x04, 0xde, 0x63, 0xc8, 0xc1, 0x95, 0x78, 0x6d, 0x67, 0xc6, 0x7a, 0x8c, + 0x18, 0x0f, 0x27, 0x24, 0x51, 0x03, 0x8a, 0x6a, 0xaa, 0x20, 0xe3, 0x7b, 0x55, 0xaa, 0x15, 0xd5, + 0xe8, 0x01, 0x47, 0x32, 0x89, 0x0b, 0x38, 0x7f, 0xe9, 0x05, 0x6c, 0x42, 0xb6, 0x6f, 0x5b, 0x22, + 0x24, 0x8a, 0xcd, 0x2f, 0x87, 0x09, 0xf0, 0xce, 0x7e, 0xeb, 0x7c, 0x58, 0x7b, 0x75, 0xda, 0x48, + 0xce, 0x1f, 0xf4, 0x08, 0xab, 0xdf, 0xd9, 0x6f, 0x61, 0xae, 0x7c, 0x51, 0x90, 0xe6, 0x67, 0x0c, + 0xd2, 0x6d, 0x00, 0xb9, 0x6a, 0xae, 0x1d, 0xc4, 0x86, 0x9a, 0xb0, 0xdc, 0x50, 0x1c, 0x1c, 0x93, + 0x42, 0x0c, 0x56, 0x4d, 0xfe, 0xce, 0xb4, 0x3d, 0x97, 0x1f, 0x3d, 0xf3, 0x8d, 0x6e, 0x30, 0x63, + 0x28, 0x6d, 0x7f, 0x31, 0x5d, 0xc6, 0xe4, 0x6a, 0xcd, 0x57, 0xa4, 0x99, 0xd5, 0xdd, 0x71, 0x30, + 0x3c, 0x89, 0x8f, 0x3c, 0x58, 0xb5, 0xe4, 0xcb, 0x28, 0x32, 0x5a, 0x9c, 0xd9, 0xe8, 0xcb, 0xdc, + 0x60, 0x6b, 0x1c, 0x08, 0x4f, 0x62, 0xa3, 0xef, 0xc3, 0x7a, 0x48, 0x9c, 0x7c, 0x9e, 0x56, 0x40, + 0xec, 0x54, 0x95, 0x3f, 0xdc, 0x5b, 0x53, 0xa5, 0xf0, 0x73, 0x10, 0x90, 0x05, 0x79, 0x27, 0xe8, + 0x2e, 0x4a, 0xa2, 0x22, 0x7c, 0x23, 0xdd, 0x2a, 0xa2, 0xe8, 0xaf, 0xc7, 0xbb, 0x0a, 0xf5, 0xfc, + 0x92, 0x0d, 0x85, 0xc4, 0x46, 0x0f, 0xa1, 0x64, 0xb8, 0xae, 0xe7, 0x1b, 0xc1, 0x83, 0x79, 0x51, + 0x98, 0xda, 0x99, 0xd9, 0xd4, 0x4e, 0x84, 0x31, 0xd6, 0xc5, 0xc4, 0x38, 0x38, 0x6e, 0x0a, 0x3d, + 0x80, 0x65, 0xef, 0x81, 0x4b, 0x28, 0x26, 0xc7, 0x84, 0x12, 0xd7, 0x24, 0xac, 0x52, 0x16, 0xd6, + 0xbf, 0x92, 0xd2, 0x7a, 0x42, 0x39, 0x0a, 0xe9, 0x24, 0x9d, 0xe1, 0x71, 0x2b, 0xa8, 0x0e, 0x70, + 0x6c, 0xbb, 0xb2, 0x17, 0xad, 0x2c, 0x45, 0x63, 0xb2, 0xeb, 0x8a, 0x8a, 0x63, 0x12, 0xe8, 0xab, + 0x50, 0x32, 0x9d, 0x3e, 0xf3, 0x49, 0x30, 0x8f, 0x5b, 0x16, 0x37, 0x48, 0xad, 0x6f, 0x37, 0x62, + 0xe1, 0xb8, 0x1c, 0x3a, 0x81, 0x45, 0x3b, 0xd6, 0xf4, 0x56, 0x56, 0x44, 0x2c, 0x6e, 0xcf, 0xdc, + 0xe9, 0xb2, 0xe6, 0x0a, 0xcf, 0x44, 0x71, 0x0a, 0x4e, 0x20, 0xaf, 0x7f, 0x0d, 0x4a, 0x9f, 0xb0, + 0x07, 0xe3, 0x3d, 0xdc, 0xf8, 0xd1, 0xcd, 0xd4, 0xc3, 0xfd, 0x25, 0x03, 0x4b, 0xc9, 0x0d, 0x57, + 0x6f, 0x1d, 0x6d, 0xea, 0x7c, 0x35, 0xcc, 0xca, 0xd9, 0xa9, 0x59, 0x59, 0x26, 0xbf, 0xf9, 0x17, + 0x49, 0x7e, 0xdb, 0x00, 0x46, 0xcf, 0x0e, 0xf3, 0x5e, 0x90, 0x47, 0x55, 0xe6, 0x8a, 0x26, 0x7e, + 0x38, 0x26, 0x25, 0x26, 0xa8, 0x9e, 0xeb, 0x53, 0xcf, 0x71, 0x08, 0x95, 0xc5, 0x34, 0x98, 0xa0, + 0x2a, 0x2a, 0x8e, 0x49, 0xa0, 0xeb, 0x80, 0x8e, 0x1c, 0xcf, 0x3c, 0x15, 0x5b, 0x10, 0xde, 0x73, + 0x91, 0x25, 0x0b, 0xc1, 0xe0, 0xaa, 0x39, 0xc1, 0xc5, 0x17, 0x68, 0xe8, 0x0b, 0x90, 0x6b, 0xf3, + 0xb6, 0x42, 0xbf, 0x0d, 0xc9, 0x99, 0x13, 0x7a, 0x27, 0xd8, 0x09, 0x4d, 0x0d, 0x85, 0x66, 0xdb, + 0x05, 0xfd, 0x0a, 0x14, 0xb1, 0xe7, 0xf9, 0x6d, 0xc3, 0x3f, 0x61, 0xa8, 0x06, 0xb9, 0x1e, 0xff, + 0x47, 0x8e, 0xfb, 0xc4, 0xac, 0x5a, 0x70, 0x70, 0x40, 0xd7, 0x7f, 0xa9, 0xc1, 0x2b, 0x53, 0xe7, + 0x8c, 0x7c, 0x47, 0x4d, 0xf5, 0x25, 0x5d, 0x52, 0x3b, 0x1a, 0xc9, 0xe1, 0x98, 0x14, 0xef, 0xc4, + 0x12, 0xc3, 0xc9, 0xf1, 0x4e, 0x2c, 0x61, 0x0d, 0x27, 0x65, 0xf5, 0x7f, 0x67, 0x20, 0x1f, 0x3c, + 0xcb, 0x3e, 0xe3, 0xe6, 0xfb, 0x75, 0xc8, 0x33, 0x61, 0x47, 0xba, 0xa7, 0xb2, 0x65, 0x60, 0x1d, + 0x4b, 0x2e, 0x6f, 0x62, 0xba, 0x84, 0x31, 0xa3, 0x13, 0x06, 0xaf, 0x6a, 0x62, 0x0e, 0x02, 0x32, + 0x0e, 0xf9, 0xe8, 0x6d, 0xfe, 0x0a, 0x35, 0x98, 0xea, 0x0b, 0xab, 0x21, 0x24, 0x16, 0xd4, 0xf3, + 0x61, 0x6d, 0x51, 0x82, 0x8b, 0x6f, 0x2c, 0xa5, 0xd1, 0x3d, 0x58, 0xb0, 0x88, 0x6f, 0xd8, 0x4e, + 0xd0, 0x0e, 0xa6, 0x9e, 0x5e, 0x06, 0x60, 0xad, 0x40, 0xb5, 0x59, 0xe2, 0x3e, 0xc9, 0x0f, 0x1c, + 0x02, 0xf2, 0x8b, 0x67, 0x7a, 0x56, 0xf0, 0x93, 0x42, 0x2e, 0xba, 0x78, 0xbb, 0x9e, 0x45, 0xb0, + 0xe0, 0xe8, 0x8f, 0x34, 0x28, 0x05, 0x48, 0xbb, 0x46, 0x9f, 0x11, 0xb4, 0xa5, 0x56, 0x11, 0x1c, + 0x77, 0x58, 0x93, 0xe7, 0xdf, 0x1b, 0xf4, 0xc8, 0xf9, 0xb0, 0x56, 0x14, 0x62, 0xfc, 0x43, 0x2d, + 0x20, 0xb6, 0x47, 0x99, 0x4b, 0xf6, 0xe8, 0x35, 0xc8, 0x89, 0xd6, 0x5b, 0x6e, 0xa6, 0x6a, 0xf4, + 0x44, 0x7b, 0x8e, 0x03, 0x9e, 0xfe, 0x71, 0x06, 0xca, 0x89, 0xc5, 0xa5, 0xe8, 0xea, 0xd4, 0xa8, + 0x24, 0x93, 0x62, 0xfc, 0x36, 0xfd, 0x87, 0xa0, 0xef, 0x42, 0xde, 0xe4, 0xeb, 0x0b, 0x7f, 0x89, + 0xdb, 0x9a, 0xe5, 0x28, 0xc4, 0xce, 0x44, 0x91, 0x24, 0x3e, 0x19, 0x96, 0x80, 0xe8, 0x06, 0xac, + 0x52, 0xe2, 0xd3, 0xc1, 0xce, 0xb1, 0x4f, 0x68, 0xbc, 0xff, 0xcf, 0x45, 0x7d, 0x0f, 0x1e, 0x17, + 0xc0, 0x93, 0x3a, 0x61, 0xaa, 0xcc, 0xbf, 0x40, 0xaa, 0xd4, 0x1d, 0x98, 0xff, 0x2f, 0xf6, 0xe8, + 0xdf, 0x83, 0x62, 0xd4, 0x45, 0x7d, 0xca, 0x26, 0xf5, 0x1f, 0x40, 0x81, 0x47, 0x63, 0xd8, 0xfd, + 0x5f, 0x52, 0x89, 0x92, 0x35, 0x22, 0x93, 0xa6, 0x46, 0xe8, 0x6f, 0x41, 0xf9, 0x4e, 0xcf, 0x9a, + 0xed, 0x57, 0x14, 0x7d, 0x1b, 0x82, 0x1f, 0x05, 0x79, 0x0a, 0x0e, 0x9e, 0xf9, 0xb1, 0x14, 0x1c, + 0x7f, 0xb3, 0x27, 0x7f, 0xaf, 0x01, 0xf1, 0xe6, 0xdc, 0x3b, 0x23, 0xae, 0xcf, 0x57, 0xc3, 0x8f, + 0x6d, 0x7c, 0x35, 0xe2, 0xee, 0x09, 0x0e, 0xba, 0x03, 0x79, 0x4f, 0xb4, 0x64, 0x72, 0xf0, 0x35, + 0xe3, 0x0c, 0x41, 0x85, 0x6a, 0xd0, 0xd7, 0x61, 0x09, 0xd6, 0xdc, 0x7c, 0xf2, 0xac, 0x3a, 0xf7, + 0xf4, 0x59, 0x75, 0xee, 0xa3, 0x67, 0xd5, 0xb9, 0x0f, 0x46, 0x55, 0xed, 0xc9, 0xa8, 0xaa, 0x3d, + 0x1d, 0x55, 0xb5, 0x8f, 0x46, 0x55, 0xed, 0xe3, 0x51, 0x55, 0x7b, 0xf4, 0x8f, 0xea, 0xdc, 0xbd, + 0xcc, 0xd9, 0xd6, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xab, 0xec, 0x02, 0x4a, 0x00, 0x21, 0x00, + 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto index ba1194d..eb3237f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto @@ -92,16 +92,6 @@ message APIResource { // categories is a list of the grouped resources this resource belongs to (e.g. 'all') repeated string categories = 7; - - // The hash value of the storage version, the version this resource is - // converted to when written to the data store. Value must be treated - // as opaque by clients. Only equality comparison on the value is valid. - // This is an alpha feature and may change or be removed in the future. - // The field is populated by the apiserver only if the - // StorageVersionHash feature gate is enabled. - // This field will remain optional even if it graduates. - // +optional - optional string storageVersionHash = 10; } // APIResourceList is a list of APIResource, it is used to expose the name of the @@ -117,7 +107,7 @@ message APIResourceList { // APIVersions lists the versions that are available, to allow clients to // discover the API at /api, which is the root path of the legacy v1 API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object message APIVersions { @@ -144,12 +134,9 @@ message CreateOptions { // +optional repeated string dryRun = 1; - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. - // +optional - optional string fieldManager = 3; + // If IncludeUninitialized is specified, the object may be + // returned without completing initialization. + optional bool includeUninitialized = 2; } // DeleteOptions may be provided when deleting an API object. @@ -163,7 +150,6 @@ message DeleteOptions { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. - // +k8s:conversion-gen=false // +optional optional Preconditions preconditions = 2; @@ -202,33 +188,14 @@ message Duration { } // ExportOptions is the query options to the standard REST get call. -// Deprecated. Planned for removal in 1.18. message ExportOptions { // Should this value be exported. Export strips fields that a user can not specify. - // Deprecated. Planned for removal in 1.18. optional bool export = 1; // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. - // Deprecated. Planned for removal in 1.18. optional bool exact = 2; } -// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. -// -// Each key is either a '.' representing the field itself, and will always map to an empty set, -// or a string representing a sub-field or item. The string will follow one of these four formats: -// 'f:', where is the name of a field in a struct, or key in a map -// 'v:', where is the exact json formatted value of a list item -// 'i:', where is position of a item in a list -// 'k:', where is a map of a list item's key fields to their unique values -// If a key maps to an empty Fields value, the field that key represents is part of the set. -// -// The exact format is defined in sigs.k8s.io/structured-merge-diff -message FieldsV1 { - // Raw is the underlying serialization of this object. - optional bytes Raw = 1; -} - // GetOptions is the standard query options to the standard REST get call. message GetOptions { // When specified: @@ -236,11 +203,15 @@ message GetOptions { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. optional string resourceVersion = 1; + + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 2; } // GroupKind specifies a Group and a Kind, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupKind { optional string group = 1; @@ -250,7 +221,7 @@ message GroupKind { // GroupResource specifies a Group and a Resource, but does not force a version. This is useful for identifying // concepts during lookup stages without having partially valid types -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupResource { optional string group = 1; @@ -259,7 +230,7 @@ message GroupResource { } // GroupVersion contains the "group" and the "version", which uniquely identifies the API. -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersion { optional string group = 1; @@ -280,7 +251,7 @@ message GroupVersionForDiscovery { // GroupVersionKind unambiguously identifies a kind. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionKind { optional string group = 1; @@ -292,7 +263,7 @@ message GroupVersionKind { // GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion // to avoid automatic coersion. It doesn't use a GroupVersion to avoid custom marshalling -// +// // +protobuf.options.(gogoproto.goproto_stringer)=false message GroupVersionResource { optional string group = 1; @@ -302,6 +273,27 @@ message GroupVersionResource { optional string resource = 3; } +// Initializer is information about an initializer that has not yet completed. +message Initializer { + // name of the process that is responsible for initializing this object. + optional string name = 1; +} + +// Initializers tracks the progress of initialization. +message Initializers { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + repeated Initializer pending = 1; + + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + optional Status result = 2; +} + // A label selector is a label query over a set of resources. The result of matchLabels and // matchExpressions are ANDed. An empty label selector matches all objects. A null // label selector matches no objects. @@ -340,7 +332,7 @@ message LabelSelectorRequirement { // List holds a list of objects, which may not be known by the server. message List { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; @@ -354,10 +346,6 @@ message ListMeta { // selfLink is a URL representing this object. // Populated by the system. // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. // +optional optional string selfLink = 1; @@ -366,7 +354,7 @@ message ListMeta { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 2; @@ -378,18 +366,6 @@ message ListMeta { // identical to the value in the first response, unless you have received this token from an error // message. optional string continue = 3; - - // remainingItemCount is the number of subsequent items in the list which are not included in this - // list response. If the list request contained label or field selectors, then the number of - // remaining items is unknown and the field will be left unset and omitted during serialization. - // If the list is complete (either because it is not chunking or because this is the last chunk), - // then there are no more remaining items and this field will be left unset and omitted during - // serialization. - // Servers older than v1.15 do not set this field. - // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients - // should not rely on the remainingItemCount to be set or to be exact. - // +optional - optional int64 remainingItemCount = 4; } // ListOptions is the query options to a standard REST list call. @@ -404,22 +380,15 @@ message ListOptions { // +optional optional string fieldSelector = 2; + // If true, partially initialized resources are included in the response. + // +optional + optional bool includeUninitialized = 6; + // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional optional bool watch = 3; - // allowWatchBookmarks requests watch events with type "BOOKMARK". - // Servers that do not implement bookmarks may ignore this flag and - // bookmarks are sent at the server's discretion. Clients should not - // assume bookmarks are returned at any specific interval, nor may they - // assume the server will send any BOOKMARK event during a session. - // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. - // +optional - optional bool allowWatchBookmarks = 9; - // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -442,7 +411,7 @@ message ListOptions { // more results are available. Servers may choose not to support the limit argument and will return // all of the available results. If limit is specified and the continue field is empty, clients may // assume that no more results are available. This field is not supported if watch is true. - // + // // The server guarantees that the objects returned when using continue will be identical to issuing // a single list call without a limit - that is, no objects created, modified, or deleted after the // first request is issued will be included in any subsequent continued requests. This is sometimes @@ -463,43 +432,14 @@ message ListOptions { // a list starting from the next key, but from the latest snapshot, which is inconsistent from the // previous list results - objects that are created, modified, or deleted after the first list request // will be included in the response, as long as their keys are after the "next key". - // + // // This field is not supported when watch is true. Clients may start a watch from the last // resourceVersion value returned by the server and not miss any modifications. optional string continue = 8; } -// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource -// that the fieldset applies to. -message ManagedFieldsEntry { - // Manager is an identifier of the workflow managing these fields. - optional string manager = 1; - - // Operation is the type of operation which lead to this ManagedFieldsEntry being created. - // The only valid values for this field are 'Apply' and 'Update'. - optional string operation = 2; - - // APIVersion defines the version of this resource that this field set - // applies to. The format is "group/version" just like the top-level - // APIVersion field. It is necessary to track the version of a field - // set because it cannot be automatically converted. - optional string apiVersion = 3; - - // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' - // +optional - optional Time time = 4; - - // FieldsType is the discriminator for the different fields format and version. - // There is currently only one possible value: "FieldsV1" - optional string fieldsType = 6; - - // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. - // +optional - optional FieldsV1 fieldsV1 = 7; -} - // MicroTime is version of Time with microsecond level precision. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -535,14 +475,14 @@ message ObjectMeta { // The provided value has the same validation rules as the Name field, // and may be truncated by the length of the suffix required to make the value // unique on the server. - // + // // If this field is specified and the generated name exists, the server will // NOT return a 409 - instead, it will either return 201 Created or 500 with Reason // ServerTimeout indicating a unique name could not be found in the time allotted, and the client // should retry (optionally after the time indicated in the Retry-After header). - // + // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional optional string generateName = 2; @@ -550,7 +490,7 @@ message ObjectMeta { // equivalent to the "default" namespace, but "default" is the canonical representation. // Not all objects are required to be scoped to a namespace - the value of this field for // those objects will be empty. - // + // // Must be a DNS_LABEL. // Cannot be updated. // More info: http://kubernetes.io/docs/user-guide/namespaces @@ -560,17 +500,13 @@ message ObjectMeta { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. // +optional optional string selfLink = 4; // UID is the unique in time and space value for this object. It is typically generated by // the server on successful creation of a resource and is not allowed to change on PUT // operations. - // + // // Populated by the system. // Read-only. // More info: http://kubernetes.io/docs/user-guide/identifiers#uids @@ -582,11 +518,11 @@ message ObjectMeta { // concurrency, change detection, and the watch operation on a resource or set of resources. // Clients must treat these values as opaque and passed unmodified back to the server. // They may only be valid for a particular resource or set of resources. - // + // // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional optional string resourceVersion = 6; @@ -598,11 +534,11 @@ message ObjectMeta { // CreationTimestamp is a timestamp representing the server time when this object was // created. It is not guaranteed to be set in happens-before order across separate operations. // Clients may not set this value. It is represented in RFC3339 form and is in UTC. - // + // // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional Time creationTimestamp = 8; @@ -620,10 +556,10 @@ message ObjectMeta { // exist after this timestamp, until an administrator or automated process can determine the // resource is fully terminated. // If not set, graceful deletion of the object has not been requested. - // + // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional optional Time deletionTimestamp = 9; @@ -657,19 +593,21 @@ message ObjectMeta { // +patchStrategy=merge repeated OwnerReference ownerReferences = 13; + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + optional Initializers initializers = 16; + // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. - // Finalizers may be processed and removed in any order. Order is NOT enforced - // because it introduces significant risk of stuck finalizers. - // finalizers is a shared field, any actor with permission can reorder it. - // If the finalizer list is processed in order, then this can lead to a situation - // in which the component responsible for the first finalizer in the list is - // waiting for a signal (field value, external system, or other) produced by a - // component responsible for a finalizer later in the list, resulting in a deadlock. - // Without enforced ordering finalizers are free to order amongst themselves and - // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge repeated string finalizers = 14; @@ -679,28 +617,17 @@ message ObjectMeta { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional optional string clusterName = 15; - - // ManagedFields maps workflow-id and version to the set of fields - // that are managed by that workflow. This is mostly for internal - // housekeeping, and users typically shouldn't need to set or - // understand this field. A workflow can be the user's name, a - // controller's name, or the name of a specific apply path like - // "ci-cd". The set of fields is always in the version that the - // workflow used when modifying the object. - // - // +optional - repeated ManagedFieldsEntry managedFields = 17; } // OwnerReference contains enough information to let you identify an owning -// object. An owning object must be in the same namespace as the dependent, or -// be cluster-scoped, so there is no namespace field. +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. message OwnerReference { // API version of the referent. optional string apiVersion = 5; // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds optional string kind = 1; // Name of the referent. @@ -725,69 +652,15 @@ message OwnerReference { optional bool blockOwnerDeletion = 7; } -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadata { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - optional ObjectMeta metadata = 1; -} - -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadataList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - // +optional - optional ListMeta metadata = 1; - - // items contains each of the included items. - repeated PartialObjectMetadata items = 2; -} - // Patch is provided to give a concrete name and type to the Kubernetes PATCH request body. message Patch { } -// PatchOptions may be provided when patching an API object. -// PatchOptions is meant to be a superset of UpdateOptions. -message PatchOptions { - // When present, indicates that modifications should not be - // persisted. An invalid or unrecognized dryRun directive will - // result in an error response and no further processing of the - // request. Valid values are: - // - All: all dry run stages will be processed - // +optional - repeated string dryRun = 1; - - // Force is going to "force" Apply requests. It means user will - // re-acquire conflicting fields owned by other people. Force - // flag must be unset for non-apply patch requests. - // +optional - optional bool force = 2; - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. This - // field is required for apply requests - // (application/apply-patch) but optional for non-apply patch - // types (JsonPatch, MergePatch, StrategicMergePatch). - // +optional - optional string fieldManager = 3; -} - // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. message Preconditions { // Specifies the target UID. // +optional optional string uid = 1; - - // Specifies the target ResourceVersion - // +optional - optional string resourceVersion = 2; } // RootPaths lists the paths available at root. @@ -810,13 +683,13 @@ message ServerAddressByClientCIDR { // Status is a return value for calls that don't return other objects. message Status { // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional ListMeta metadata = 1; // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional optional string status = 2; @@ -861,7 +734,7 @@ message StatusCause { // Arrays are zero-indexed. Fields may appear more than once in an array of // causes due to fields having multiple errors. // Optional. - // + // // Examples: // "name" - the field "name" on the current resource // "items[0].name" - the field "name" on the first array entry in "items" @@ -887,7 +760,7 @@ message StatusDetails { // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional string kind = 3; @@ -909,20 +782,10 @@ message StatusDetails { optional int32 retryAfterSeconds = 5; } -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message TableOptions { - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - optional string includeObject = 1; -} - // Time is a wrapper around time.Time which supports correct // marshaling to YAML and JSON. Wrappers are provided for many // of the factory methods that the time package offers. -// +// // +protobuf.options.marshal=false // +protobuf.as=Timestamp // +protobuf.options.(gogoproto.goproto_stringer)=false @@ -958,27 +821,26 @@ message Timestamp { // TypeMeta describes an individual object in an API response or request // with strings representing the type of the object and its API schema version. // Structures that are versioned or persisted should inline TypeMeta. -// +// // +k8s:deepcopy-gen=false message TypeMeta { // Kind is a string value representing the REST resource this object represents. // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional optional string kind = 1; // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources // +optional optional string apiVersion = 2; } // UpdateOptions may be provided when updating an API object. -// All fields in UpdateOptions should also be present in PatchOptions. message UpdateOptions { // When present, indicates that modifications should not be // persisted. An invalid or unrecognized dryRun directive will @@ -987,17 +849,10 @@ message UpdateOptions { // - All: all dry run stages will be processed // +optional repeated string dryRun = 1; - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. - // +optional - optional string fieldManager = 2; } // Verbs masks the value so protobuf can generate -// +// // +protobuf.nullable=true // +protobuf.options.(gogoproto.goproto_stringer)=false message Verbs { @@ -1007,7 +862,7 @@ message Verbs { } // Event represents a single event to a watched resource. -// +// // +protobuf=true // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go index ad989ad..d845d7b 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/helpers.go @@ -17,9 +17,6 @@ limitations under the License. package v1 import ( - "bytes" - "encoding/json" - "errors" "fmt" "k8s.io/apimachinery/pkg/fields" @@ -230,53 +227,8 @@ func NewUIDPreconditions(uid string) *Preconditions { return &Preconditions{UID: &u} } -// NewRVDeletionPrecondition returns a DeleteOptions with a ResourceVersion precondition set. -func NewRVDeletionPrecondition(rv string) *DeleteOptions { - p := Preconditions{ResourceVersion: &rv} - return &DeleteOptions{Preconditions: &p} -} - // HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values. func HasObjectMetaSystemFieldValues(meta Object) bool { return !meta.GetCreationTimestamp().Time.IsZero() || len(meta.GetUID()) != 0 } - -// ResetObjectMetaForStatus forces the meta fields for a status update to match the meta fields -// for a pre-existing object. This is opt-in for new objects with Status subresource. -func ResetObjectMetaForStatus(meta, existingMeta Object) { - meta.SetDeletionTimestamp(existingMeta.GetDeletionTimestamp()) - meta.SetGeneration(existingMeta.GetGeneration()) - meta.SetSelfLink(existingMeta.GetSelfLink()) - meta.SetLabels(existingMeta.GetLabels()) - meta.SetAnnotations(existingMeta.GetAnnotations()) - meta.SetFinalizers(existingMeta.GetFinalizers()) - meta.SetOwnerReferences(existingMeta.GetOwnerReferences()) - // managedFields must be preserved since it's been modified to - // track changed fields in the status update. - //meta.SetManagedFields(existingMeta.GetManagedFields()) -} - -// MarshalJSON implements json.Marshaler -// MarshalJSON may get called on pointers or values, so implement MarshalJSON on value. -// http://stackoverflow.com/questions/21390979/custom-marshaljson-never-gets-called-in-go -func (f FieldsV1) MarshalJSON() ([]byte, error) { - if f.Raw == nil { - return []byte("null"), nil - } - return f.Raw, nil -} - -// UnmarshalJSON implements json.Unmarshaler -func (f *FieldsV1) UnmarshalJSON(b []byte) error { - if f == nil { - return errors.New("metav1.Fields: UnmarshalJSON on nil pointer") - } - if !bytes.Equal(b, []byte("null")) { - f.Raw = append(f.Raw[0:0], b...) - } - return nil -} - -var _ json.Marshaler = FieldsV1{} -var _ json.Unmarshaler = &FieldsV1{} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go index 912cf20..ee14475 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/meta.go @@ -55,14 +55,14 @@ type Object interface { SetLabels(labels map[string]string) GetAnnotations() map[string]string SetAnnotations(annotations map[string]string) + GetInitializers() *Initializers + SetInitializers(initializers *Initializers) GetFinalizers() []string SetFinalizers(finalizers []string) GetOwnerReferences() []OwnerReference SetOwnerReferences([]OwnerReference) GetClusterName() string SetClusterName(clusterName string) - GetManagedFields() []ManagedFieldsEntry - SetManagedFields(managedFields []ManagedFieldsEntry) } // ListMetaAccessor retrieves the list interface from an object @@ -92,8 +92,6 @@ type ListInterface interface { SetSelfLink(selfLink string) GetContinue() string SetContinue(c string) - GetRemainingItemCount() *int64 - SetRemainingItemCount(c *int64) } // Type exposes the type and APIVersion of versioned or internal API objects. @@ -105,16 +103,12 @@ type Type interface { SetKind(kind string) } -var _ ListInterface = &ListMeta{} - func (meta *ListMeta) GetResourceVersion() string { return meta.ResourceVersion } func (meta *ListMeta) SetResourceVersion(version string) { meta.ResourceVersion = version } func (meta *ListMeta) GetSelfLink() string { return meta.SelfLink } func (meta *ListMeta) SetSelfLink(selfLink string) { meta.SelfLink = selfLink } func (meta *ListMeta) GetContinue() string { return meta.Continue } func (meta *ListMeta) SetContinue(c string) { meta.Continue = c } -func (meta *ListMeta) GetRemainingItemCount() *int64 { return meta.RemainingItemCount } -func (meta *ListMeta) SetRemainingItemCount(c *int64) { meta.RemainingItemCount = c } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } @@ -164,15 +158,13 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels } func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations } func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations } +func (meta *ObjectMeta) GetInitializers() *Initializers { return meta.Initializers } +func (meta *ObjectMeta) SetInitializers(initializers *Initializers) { meta.Initializers = initializers } func (meta *ObjectMeta) GetFinalizers() []string { return meta.Finalizers } func (meta *ObjectMeta) SetFinalizers(finalizers []string) { meta.Finalizers = finalizers } func (meta *ObjectMeta) GetOwnerReferences() []OwnerReference { return meta.OwnerReferences } func (meta *ObjectMeta) SetOwnerReferences(references []OwnerReference) { meta.OwnerReferences = references } -func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } -func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } -func (meta *ObjectMeta) GetManagedFields() []ManagedFieldsEntry { return meta.ManagedFields } -func (meta *ObjectMeta) SetManagedFields(managedFields []ManagedFieldsEntry) { - meta.ManagedFields = managedFields -} +func (meta *ObjectMeta) GetClusterName() string { return meta.ClusterName } +func (meta *ObjectMeta) SetClusterName(clusterName string) { meta.ClusterName = clusterName } diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go index cdd9a6a..6f6c511 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time.go @@ -41,6 +41,11 @@ func (t *MicroTime) DeepCopyInto(out *MicroTime) { *out = *t } +// String returns the representation of the time. +func (t MicroTime) String() string { + return t.Time.String() +} + // NewMicroTime returns a wrapped instance of the provided time func NewMicroTime(time time.Time) MicroTime { return MicroTime{time} @@ -67,40 +72,22 @@ func (t *MicroTime) IsZero() bool { // Before reports whether the time instant t is before u. func (t *MicroTime) Before(u *MicroTime) bool { - if t != nil && u != nil { - return t.Time.Before(u.Time) - } - return false + return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. func (t *MicroTime) Equal(u *MicroTime) bool { - if t == nil && u == nil { - return true - } - if t != nil && u != nil { - return t.Time.Equal(u.Time) - } - return false + return t.Time.Equal(u.Time) } // BeforeTime reports whether the time instant t is before second-lever precision u. func (t *MicroTime) BeforeTime(u *Time) bool { - if t != nil && u != nil { - return t.Time.Before(u.Time) - } - return false + return t.Time.Before(u.Time) } // EqualTime reports whether the time instant t is equal to second-lever precision u. func (t *MicroTime) EqualTime(u *Time) bool { - if t == nil && u == nil { - return true - } - if t != nil && u != nil { - return t.Time.Equal(u.Time) - } - return false + return t.Time.Equal(u.Time) } // UnixMicro returns the local time corresponding to the given Unix time diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go index 6dd6d89..14841be 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/micro_time_proto.go @@ -70,11 +70,3 @@ func (m *MicroTime) MarshalTo(data []byte) (int, error) { } return m.ProtoMicroTime().MarshalTo(data) } - -// MarshalToSizedBuffer implements the protobuf marshalling interface. -func (m *MicroTime) MarshalToSizedBuffer(data []byte) (int, error) { - if m == nil || m.Time.IsZero() { - return 0, nil - } - return m.ProtoMicroTime().MarshalToSizedBuffer(data) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go index c1a0771..0827729 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/register.go @@ -25,13 +25,6 @@ import ( // GroupName is the group name for this API. const GroupName = "meta.k8s.io" -var ( - // localSchemeBuilder is used to make compiler happy for autogenerated - // conversions. However, it's not used. - schemeBuilder runtime.SchemeBuilder - localSchemeBuilder = &schemeBuilder -) - // SchemeGroupVersion is group version used to register these objects var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} @@ -47,22 +40,6 @@ func Kind(kind string) schema.GroupKind { return SchemeGroupVersion.WithKind(kind).GroupKind() } -// scheme is the registry for the common types that adhere to the meta v1 API spec. -var scheme = runtime.NewScheme() - -// ParameterCodec knows about query parameters used with the meta v1 API spec. -var ParameterCodec = runtime.NewParameterCodec(scheme) - -var optionsTypes = []runtime.Object{ - &ListOptions{}, - &ExportOptions{}, - &GetOptions{}, - &DeleteOptions{}, - &CreateOptions{}, - &UpdateOptions{}, - &PatchOptions{}, -} - // AddToGroupVersion registers common meta types into schemas. func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) { scheme.AddKnownTypeWithName(groupVersion.WithKind(WatchEventKind), &WatchEvent{}) @@ -71,7 +48,20 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) &InternalEvent{}, ) // Supports legacy code paths, most callers should use metav1.ParameterCodec for now - scheme.AddKnownTypes(groupVersion, optionsTypes...) + scheme.AddKnownTypes(groupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + ) + utilruntime.Must(scheme.AddConversionFuncs( + Convert_v1_WatchEvent_To_watch_Event, + Convert_v1_InternalEvent_To_v1_WatchEvent, + Convert_watch_Event_To_v1_WatchEvent, + Convert_v1_WatchEvent_To_v1_InternalEvent, + )) // Register Unversioned types under their own special group scheme.AddUnversionedTypes(Unversioned, &Status{}, @@ -82,26 +72,25 @@ func AddToGroupVersion(scheme *runtime.Scheme, groupVersion schema.GroupVersion) ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - utilruntime.Must(RegisterConversions(scheme)) + utilruntime.Must(AddConversionFuncs(scheme)) utilruntime.Must(RegisterDefaults(scheme)) } -// AddMetaToScheme registers base meta types into schemas. -func AddMetaToScheme(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Table{}, - &TableOptions{}, - &PartialObjectMetadata{}, - &PartialObjectMetadataList{}, - ) +// scheme is the registry for the common types that adhere to the meta v1 API spec. +var scheme = runtime.NewScheme() - return nil -} +// ParameterCodec knows about query parameters used with the meta v1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) func init() { - scheme.AddUnversionedTypes(SchemeGroupVersion, optionsTypes...) - - utilruntime.Must(AddMetaToScheme(scheme)) + scheme.AddUnversionedTypes(SchemeGroupVersion, + &ListOptions{}, + &ExportOptions{}, + &GetOptions{}, + &DeleteOptions{}, + &CreateOptions{}, + &UpdateOptions{}, + ) // register manually. This usually goes through the SchemeBuilder, which we cannot use here. utilruntime.Must(RegisterDefaults(scheme)) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go index 4a1d89c..efff656 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time.go @@ -20,7 +20,7 @@ import ( "encoding/json" "time" - fuzz "github.com/google/gofuzz" + "github.com/google/gofuzz" ) // Time is a wrapper around time.Time which supports correct @@ -41,6 +41,11 @@ func (t *Time) DeepCopyInto(out *Time) { *out = *t } +// String returns the representation of the time. +func (t Time) String() string { + return t.Time.String() +} + // NewTime returns a wrapped instance of the provided time func NewTime(time time.Time) Time { return Time{time} @@ -67,10 +72,7 @@ func (t *Time) IsZero() bool { // Before reports whether the time instant t is before u. func (t *Time) Before(u *Time) bool { - if t != nil && u != nil { - return t.Time.Before(u.Time) - } - return false + return t.Time.Before(u.Time) } // Equal reports whether the time instant t is equal to u. @@ -145,22 +147,8 @@ func (t Time) MarshalJSON() ([]byte, error) { // Encode unset/nil objects as JSON's "null". return []byte("null"), nil } - buf := make([]byte, 0, len(time.RFC3339)+2) - buf = append(buf, '"') - // time cannot contain non escapable JSON characters - buf = t.UTC().AppendFormat(buf, time.RFC3339) - buf = append(buf, '"') - return buf, nil -} -// ToUnstructured implements the value.UnstructuredConverter interface. -func (t Time) ToUnstructured() interface{} { - if t.IsZero() { - return nil - } - buf := make([]byte, 0, len(time.RFC3339)) - buf = t.UTC().AppendFormat(buf, time.RFC3339) - return string(buf) + return json.Marshal(t.UTC().Format(time.RFC3339)) } // OpenAPISchemaType is used by the kube-openapi generator when constructing diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go index eac8d96..ed72186 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/time_proto.go @@ -75,7 +75,7 @@ func (m *Time) Unmarshal(data []byte) error { return nil } -// Marshal implements the protobuf marshaling interface. +// Marshal implements the protobuf marshalling interface. func (m *Time) Marshal() (data []byte, err error) { if m == nil || m.Time.IsZero() { return nil, nil @@ -83,18 +83,10 @@ func (m *Time) Marshal() (data []byte, err error) { return m.ProtoTime().Marshal() } -// MarshalTo implements the protobuf marshaling interface. +// MarshalTo implements the protobuf marshalling interface. func (m *Time) MarshalTo(data []byte) (int, error) { if m == nil || m.Time.IsZero() { return 0, nil } return m.ProtoTime().MarshalTo(data) } - -// MarshalToSizedBuffer implements the protobuf reverse marshaling interface. -func (m *Time) MarshalToSizedBuffer(data []byte) (int, error) { - if m == nil || m.Time.IsZero() { - return 0, nil - } - return m.ProtoTime().MarshalToSizedBuffer(data) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go index e7aaead..c174338 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types.go @@ -43,14 +43,14 @@ type TypeMeta struct { // Servers may infer this from the endpoint the client submits requests to. // Cannot be updated. // In CamelCase. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` // APIVersion defines the versioned schema of this representation of an object. // Servers should convert recognized schemas to the latest internal value, and // may reject unrecognized values. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources // +optional APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=apiVersion"` } @@ -61,10 +61,6 @@ type ListMeta struct { // selfLink is a URL representing this object. // Populated by the system. // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,1,opt,name=selfLink"` @@ -73,7 +69,7 @@ type ListMeta struct { // Value must be treated as opaque by clients and passed unmodified back to the server. // Populated by the system. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` @@ -85,18 +81,6 @@ type ListMeta struct { // identical to the value in the first response, unless you have received this token from an error // message. Continue string `json:"continue,omitempty" protobuf:"bytes,3,opt,name=continue"` - - // remainingItemCount is the number of subsequent items in the list which are not included in this - // list response. If the list request contained label or field selectors, then the number of - // remaining items is unknown and the field will be left unset and omitted during serialization. - // If the list is complete (either because it is not chunking or because this is the last chunk), - // then there are no more remaining items and this field will be left unset and omitted during - // serialization. - // Servers older than v1.15 do not set this field. - // The intended use of the remainingItemCount is *estimating* the size of a collection. Clients - // should not rely on the remainingItemCount to be set or to be exact. - // +optional - RemainingItemCount *int64 `json:"remainingItemCount,omitempty" protobuf:"bytes,4,opt,name=remainingItemCount"` } // These are internal finalizer values for Kubernetes-like APIs, must be qualified name unless defined here @@ -131,7 +115,7 @@ type ObjectMeta struct { // should retry (optionally after the time indicated in the Retry-After header). // // Applied only if Name is not specified. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency // +optional GenerateName string `json:"generateName,omitempty" protobuf:"bytes,2,opt,name=generateName"` @@ -149,10 +133,6 @@ type ObjectMeta struct { // SelfLink is a URL representing this object. // Populated by the system. // Read-only. - // - // DEPRECATED - // Kubernetes will stop propagating this field in 1.20 release and the field is planned - // to be removed in 1.21 release. // +optional SelfLink string `json:"selfLink,omitempty" protobuf:"bytes,4,opt,name=selfLink"` @@ -175,7 +155,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Value must be treated as opaque by clients and . - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency // +optional ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` @@ -191,7 +171,7 @@ type ObjectMeta struct { // Populated by the system. // Read-only. // Null for lists. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional CreationTimestamp Time `json:"creationTimestamp,omitempty" protobuf:"bytes,8,opt,name=creationTimestamp"` @@ -212,7 +192,7 @@ type ObjectMeta struct { // // Populated by the system when a graceful deletion is requested. // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata // +optional DeletionTimestamp *Time `json:"deletionTimestamp,omitempty" protobuf:"bytes,9,opt,name=deletionTimestamp"` @@ -246,19 +226,21 @@ type ObjectMeta struct { // +patchStrategy=merge OwnerReferences []OwnerReference `json:"ownerReferences,omitempty" patchStrategy:"merge" patchMergeKey:"uid" protobuf:"bytes,13,rep,name=ownerReferences"` + // An initializer is a controller which enforces some system invariant at object creation time. + // This field is a list of initializers that have not yet acted on this object. If nil or empty, + // this object has been completely initialized. Otherwise, the object is considered uninitialized + // and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to + // observe uninitialized objects. + // + // When an object is created, the system will populate this list with the current set of initializers. + // Only privileged users may set or modify this list. Once it is empty, it may not be modified further + // by any user. + Initializers *Initializers `json:"initializers,omitempty" protobuf:"bytes,16,opt,name=initializers"` + // Must be empty before the object is deleted from the registry. Each entry // is an identifier for the responsible component that will remove the entry // from the list. If the deletionTimestamp of the object is non-nil, entries // in this list can only be removed. - // Finalizers may be processed and removed in any order. Order is NOT enforced - // because it introduces significant risk of stuck finalizers. - // finalizers is a shared field, any actor with permission can reorder it. - // If the finalizer list is processed in order, then this can lead to a situation - // in which the component responsible for the first finalizer in the list is - // waiting for a signal (field value, external system, or other) produced by a - // component responsible for a finalizer later in the list, resulting in a deadlock. - // Without enforced ordering finalizers are free to order amongst themselves and - // are not vulnerable to ordering changes in the list. // +optional // +patchStrategy=merge Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"` @@ -268,17 +250,26 @@ type ObjectMeta struct { // This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request. // +optional ClusterName string `json:"clusterName,omitempty" protobuf:"bytes,15,opt,name=clusterName"` +} - // ManagedFields maps workflow-id and version to the set of fields - // that are managed by that workflow. This is mostly for internal - // housekeeping, and users typically shouldn't need to set or - // understand this field. A workflow can be the user's name, a - // controller's name, or the name of a specific apply path like - // "ci-cd". The set of fields is always in the version that the - // workflow used when modifying the object. - // - // +optional - ManagedFields []ManagedFieldsEntry `json:"managedFields,omitempty" protobuf:"bytes,17,rep,name=managedFields"` +// Initializers tracks the progress of initialization. +type Initializers struct { + // Pending is a list of initializers that must execute in order before this object is visible. + // When the last pending initializer is removed, and no failing result is set, the initializers + // struct will be set to nil and the object is considered as initialized and visible to all + // clients. + // +patchMergeKey=name + // +patchStrategy=merge + Pending []Initializer `json:"pending" protobuf:"bytes,1,rep,name=pending" patchStrategy:"merge" patchMergeKey:"name"` + // If result is set with the Failure field, the object will be persisted to storage and then deleted, + // ensuring that other clients can observe the deletion. + Result *Status `json:"result,omitempty" protobuf:"bytes,2,opt,name=result"` +} + +// Initializer is information about an initializer that has not yet completed. +type Initializer struct { + // name of the process that is responsible for initializing this object. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` } const ( @@ -295,13 +286,13 @@ const ( ) // OwnerReference contains enough information to let you identify an owning -// object. An owning object must be in the same namespace as the dependent, or -// be cluster-scoped, so there is no namespace field. +// object. Currently, an owning object must be in the same namespace, so there +// is no namespace field. type OwnerReference struct { // API version of the referent. APIVersion string `json:"apiVersion" protobuf:"bytes,5,opt,name=apiVersion"` // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` // Name of the referent. // More info: http://kubernetes.io/docs/user-guide/identifiers#names @@ -322,7 +313,6 @@ type OwnerReference struct { BlockOwnerDeletion *bool `json:"blockOwnerDeletion,omitempty" protobuf:"varint,7,opt,name=blockOwnerDeletion"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ListOptions is the query options to a standard REST list call. @@ -337,24 +327,13 @@ type ListOptions struct { // Defaults to everything. // +optional FieldSelector string `json:"fieldSelector,omitempty" protobuf:"bytes,2,opt,name=fieldSelector"` - - // +k8s:deprecated=includeUninitialized,protobuf=6 - + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,6,opt,name=includeUninitialized"` // Watch for changes to the described resources and return them as a stream of // add, update, and remove notifications. Specify resourceVersion. // +optional Watch bool `json:"watch,omitempty" protobuf:"varint,3,opt,name=watch"` - // allowWatchBookmarks requests watch events with type "BOOKMARK". - // Servers that do not implement bookmarks may ignore this flag and - // bookmarks are sent at the server's discretion. Clients should not - // assume bookmarks are returned at any specific interval, nor may they - // assume the server will send any BOOKMARK event during a session. - // If this is not a watch, this field is ignored. - // If the feature gate WatchBookmarks is not enabled in apiserver, - // this field is ignored. - // +optional - AllowWatchBookmarks bool `json:"allowWatchBookmarks,omitempty" protobuf:"varint,9,opt,name=allowWatchBookmarks"` - // When specified with a watch call, shows changes that occur after that particular version of a resource. // Defaults to changes from the beginning of history. // When specified for list: @@ -402,22 +381,17 @@ type ListOptions struct { Continue string `json:"continue,omitempty" protobuf:"bytes,8,opt,name=continue"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // ExportOptions is the query options to the standard REST get call. -// Deprecated. Planned for removal in 1.18. type ExportOptions struct { TypeMeta `json:",inline"` // Should this value be exported. Export strips fields that a user can not specify. - // Deprecated. Planned for removal in 1.18. Export bool `json:"export" protobuf:"varint,1,opt,name=export"` // Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. - // Deprecated. Planned for removal in 1.18. Exact bool `json:"exact" protobuf:"varint,2,opt,name=exact"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // GetOptions is the standard query options to the standard REST get call. @@ -428,7 +402,9 @@ type GetOptions struct { // - if it's 0, then we simply return what we currently have in cache, no guarantee; // - if set to non zero, then the result is at least as fresh as given rv. ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,1,opt,name=resourceVersion"` - // +k8s:deprecated=includeUninitialized,protobuf=2 + // If true, partially initialized resources are included in the response. + // +optional + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` } // DeletionPropagation decides if a deletion will propagate to the dependents of @@ -455,7 +431,6 @@ const ( DryRunAll = "All" ) -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // DeleteOptions may be provided when deleting an API object. @@ -471,7 +446,6 @@ type DeleteOptions struct { // Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be // returned. - // +k8s:conversion-gen=false // +optional Preconditions *Preconditions `json:"preconditions,omitempty" protobuf:"bytes,2,opt,name=preconditions"` @@ -502,38 +476,12 @@ type DeleteOptions struct { DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,5,rep,name=dryRun"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // CreateOptions may be provided when creating an API object. type CreateOptions struct { TypeMeta `json:",inline"` - // When present, indicates that modifications should not be - // persisted. An invalid or unrecognized dryRun directive will - // result in an error response and no further processing of the - // request. Valid values are: - // - All: all dry run stages will be processed - // +optional - DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - // +k8s:deprecated=includeUninitialized,protobuf=2 - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. - // +optional - FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` -} - -// +k8s:conversion-gen:explicit-from=net/url.Values -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PatchOptions may be provided when patching an API object. -// PatchOptions is meant to be a superset of UpdateOptions. -type PatchOptions struct { - TypeMeta `json:",inline"` - // When present, indicates that modifications should not be // persisted. An invalid or unrecognized dryRun directive will // result in an error response and no further processing of the @@ -542,28 +490,14 @@ type PatchOptions struct { // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - // Force is going to "force" Apply requests. It means user will - // re-acquire conflicting fields owned by other people. Force - // flag must be unset for non-apply patch requests. - // +optional - Force *bool `json:"force,omitempty" protobuf:"varint,2,opt,name=force"` - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. This - // field is required for apply requests - // (application/apply-patch) but optional for non-apply patch - // types (JsonPatch, MergePatch, StrategicMergePatch). - // +optional - FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,3,name=fieldManager"` + // If IncludeUninitialized is specified, the object may be + // returned without completing initialization. + IncludeUninitialized bool `json:"includeUninitialized,omitempty" protobuf:"varint,2,opt,name=includeUninitialized"` } -// +k8s:conversion-gen:explicit-from=net/url.Values // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // UpdateOptions may be provided when updating an API object. -// All fields in UpdateOptions should also be present in PatchOptions. type UpdateOptions struct { TypeMeta `json:",inline"` @@ -574,13 +508,6 @@ type UpdateOptions struct { // - All: all dry run stages will be processed // +optional DryRun []string `json:"dryRun,omitempty" protobuf:"bytes,1,rep,name=dryRun"` - - // fieldManager is a name associated with the actor or entity - // that is making these changes. The value must be less than or - // 128 characters long, and only contain printable characters, - // as defined by https://golang.org/pkg/unicode/#IsPrint. - // +optional - FieldManager string `json:"fieldManager,omitempty" protobuf:"bytes,2,name=fieldManager"` } // Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. @@ -588,9 +515,6 @@ type Preconditions struct { // Specifies the target UID. // +optional UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` - // Specifies the target ResourceVersion - // +optional - ResourceVersion *string `json:"resourceVersion,omitempty" protobuf:"bytes,2,opt,name=resourceVersion"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -599,13 +523,13 @@ type Preconditions struct { type Status struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` // Status of the operation. // One of: "Success" or "Failure". - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status // +optional Status string `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` // A human-readable description of the status of this operation. @@ -644,7 +568,7 @@ type StatusDetails struct { Group string `json:"group,omitempty" protobuf:"bytes,2,opt,name=group"` // The kind attribute of the resource associated with the status StatusReason. // On some operations may differ from the requested resource Kind. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional Kind string `json:"kind,omitempty" protobuf:"bytes,3,opt,name=kind"` // UID of the resource. @@ -776,13 +700,11 @@ const ( // doesn't make any sense, for example deleting a read-only object. This is different than // StatusReasonInvalid above which indicates that the API call could possibly succeed, but the // data was invalid. API calls that return BadRequest can never succeed. - // Status code 400 StatusReasonBadRequest StatusReason = "BadRequest" // StatusReasonMethodNotAllowed means that the action the client attempted to perform on the // resource was not supported by the code - for instance, attempting to delete a resource that // can only be created. API calls that return MethodNotAllowed can never succeed. - // Status code 405 StatusReasonMethodNotAllowed StatusReason = "MethodNotAllowed" // StatusReasonNotAcceptable means that the accept types indicated by the client were not acceptable @@ -870,12 +792,6 @@ const ( // without the expected return type. The presence of this cause indicates the error may be // due to an intervening proxy or the server software malfunctioning. CauseTypeUnexpectedServerResponse CauseType = "UnexpectedServerResponse" - // FieldManagerConflict is used to report when another client claims to manage this field, - // It should only be returned for a request using server-side apply. - CauseTypeFieldManagerConflict CauseType = "FieldManagerConflict" - // CauseTypeResourceVersionTooLarge is used to report that the requested resource version - // is newer than the data observed by the API server, so the request cannot be served. - CauseTypeResourceVersionTooLarge CauseType = "ResourceVersionTooLarge" ) // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -884,7 +800,7 @@ const ( type List struct { TypeMeta `json:",inline"` // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds // +optional ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` @@ -990,15 +906,6 @@ type APIResource struct { ShortNames []string `json:"shortNames,omitempty" protobuf:"bytes,5,rep,name=shortNames"` // categories is a list of the grouped resources this resource belongs to (e.g. 'all') Categories []string `json:"categories,omitempty" protobuf:"bytes,7,rep,name=categories"` - // The hash value of the storage version, the version this resource is - // converted to when written to the data store. Value must be treated - // as opaque by clients. Only equality comparison on the value is valid. - // This is an alpha feature and may change or be removed in the future. - // The field is populated by the apiserver only if the - // StorageVersionHash feature gate is enabled. - // This field will remain optional even if it graduates. - // +optional - StorageVersionHash string `json:"storageVersionHash,omitempty" protobuf:"bytes,10,opt,name=storageVersionHash"` } // Verbs masks the value so protobuf can generate @@ -1100,217 +1007,3 @@ const ( LabelSelectorOpExists LabelSelectorOperator = "Exists" LabelSelectorOpDoesNotExist LabelSelectorOperator = "DoesNotExist" ) - -// ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource -// that the fieldset applies to. -type ManagedFieldsEntry struct { - // Manager is an identifier of the workflow managing these fields. - Manager string `json:"manager,omitempty" protobuf:"bytes,1,opt,name=manager"` - // Operation is the type of operation which lead to this ManagedFieldsEntry being created. - // The only valid values for this field are 'Apply' and 'Update'. - Operation ManagedFieldsOperationType `json:"operation,omitempty" protobuf:"bytes,2,opt,name=operation,casttype=ManagedFieldsOperationType"` - // APIVersion defines the version of this resource that this field set - // applies to. The format is "group/version" just like the top-level - // APIVersion field. It is necessary to track the version of a field - // set because it cannot be automatically converted. - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` - // Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply' - // +optional - Time *Time `json:"time,omitempty" protobuf:"bytes,4,opt,name=time"` - - // Fields is tombstoned to show why 5 is a reserved protobuf tag. - //Fields *Fields `json:"fields,omitempty" protobuf:"bytes,5,opt,name=fields,casttype=Fields"` - - // FieldsType is the discriminator for the different fields format and version. - // There is currently only one possible value: "FieldsV1" - FieldsType string `json:"fieldsType,omitempty" protobuf:"bytes,6,opt,name=fieldsType"` - // FieldsV1 holds the first JSON version format as described in the "FieldsV1" type. - // +optional - FieldsV1 *FieldsV1 `json:"fieldsV1,omitempty" protobuf:"bytes,7,opt,name=fieldsV1"` -} - -// ManagedFieldsOperationType is the type of operation which lead to a ManagedFieldsEntry being created. -type ManagedFieldsOperationType string - -const ( - ManagedFieldsOperationApply ManagedFieldsOperationType = "Apply" - ManagedFieldsOperationUpdate ManagedFieldsOperationType = "Update" -) - -// FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format. -// -// Each key is either a '.' representing the field itself, and will always map to an empty set, -// or a string representing a sub-field or item. The string will follow one of these four formats: -// 'f:', where is the name of a field in a struct, or key in a map -// 'v:', where is the exact json formatted value of a list item -// 'i:', where is position of a item in a list -// 'k:', where is a map of a list item's key fields to their unique values -// If a key maps to an empty Fields value, the field that key represents is part of the set. -// -// The exact format is defined in sigs.k8s.io/structured-merge-diff -type FieldsV1 struct { - // Raw is the underlying serialization of this object. - Raw []byte `json:"-" protobuf:"bytes,1,opt,name=Raw"` -} - -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. This can be introduced -// in a v1 because clients a) receive an error if they try to access proto today, and b) -// once introduced they would be able to gracefully switch over to using it. - -// Table is a tabular representation of a set of API resources. The server transforms the -// object into a set of preferred columns for quickly reviewing the objects. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -// +protobuf=false -type Table struct { - TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - // +optional - ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} - -// TableColumnDefinition contains information about a column returned in the Table. -// +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column, such as number, integer, string, or - // array. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type modifier for this column. A format modifies the type and - // imposes additional rules, like date or time formatting for a string. The 'name' format is applied - // to the primary identifier column which has type 'string' to assist in clients identifying column - // is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} - -// TableRow is an individual row in a table. -// +protobuf=false -type TableRow struct { - // cells will be as wide as the column definitions array and may contain strings, numbers (float64 or - // int64), booleans, simple maps, lists, or null. See the type field of the column definition for a - // more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. These conditions - // apply to the row, not to the object, and will be specific to table output. The only defined - // condition type is 'Completed', for a row that indicates a resource that has run to completion and - // can be given less visual priority. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // The media type of the object will always match the enclosing list - if this as a JSON table, these - // will be JSON encoded objects. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} - -// TableRowCondition allows a row to be marked with additional information. -// +protobuf=false -type TableRowCondition struct { - // Type of row condition. The only defined value is 'Completed' indicating that the - // object this row represents has reached a completed state and may be given less visual - // priority than other rows. Clients are not required to honor any conditions but should - // be consistent where possible about handling the conditions. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -type RowConditionType string - -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) - -// TableOptions are used when a Table is requested by the caller. -// +k8s:conversion-gen:explicit-from=net/url.Values -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - TypeMeta `json:",inline"` - - // NoHeaders is only exposed for internal callers. It is not included in our OpenAPI definitions - // and may be removed as a field in a future release. - NoHeaders bool `json:"-"` - - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} - -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata - // +optional - ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} - -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadataList struct { - TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds - // +optional - ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items contains each of the included items. - Items []PartialObjectMetadata `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go index b62e591..35e800f 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/types_swagger_doc_generated.go @@ -49,17 +49,16 @@ func (APIGroupList) SwaggerDoc() map[string]string { } var map_APIResource = map[string]string{ - "": "APIResource specifies the name of a resource and whether it is namespaced.", - "name": "name is the plural name of the resource.", - "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", - "namespaced": "namespaced indicates if a resource is namespaced or not.", - "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", - "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", - "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", - "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", - "shortNames": "shortNames is a list of suggested short names of the resource.", - "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", - "storageVersionHash": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.", + "": "APIResource specifies the name of a resource and whether it is namespaced.", + "name": "name is the plural name of the resource.", + "singularName": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.", + "namespaced": "namespaced indicates if a resource is namespaced or not.", + "group": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\".", + "version": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\".", + "kind": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')", + "verbs": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)", + "shortNames": "shortNames is a list of suggested short names of the resource.", + "categories": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')", } func (APIResource) SwaggerDoc() map[string]string { @@ -87,9 +86,9 @@ func (APIVersions) SwaggerDoc() map[string]string { } var map_CreateOptions = map[string]string{ - "": "CreateOptions may be provided when creating an API object.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "CreateOptions may be provided when creating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", + "includeUninitialized": "If IncludeUninitialized is specified, the object may be returned without completing initialization.", } func (CreateOptions) SwaggerDoc() map[string]string { @@ -110,26 +109,19 @@ func (DeleteOptions) SwaggerDoc() map[string]string { } var map_ExportOptions = map[string]string{ - "": "ExportOptions is the query options to the standard REST get call. Deprecated. Planned for removal in 1.18.", - "export": "Should this value be exported. Export strips fields that a user can not specify. Deprecated. Planned for removal in 1.18.", - "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'. Deprecated. Planned for removal in 1.18.", + "": "ExportOptions is the query options to the standard REST get call.", + "export": "Should this value be exported. Export strips fields that a user can not specify.", + "exact": "Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.", } func (ExportOptions) SwaggerDoc() map[string]string { return map_ExportOptions } -var map_FieldsV1 = map[string]string{ - "": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\n\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:', where is the name of a field in a struct, or key in a map 'v:', where is the exact json formatted value of a list item 'i:', where is position of a item in a list 'k:', where is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\n\nThe exact format is defined in sigs.k8s.io/structured-merge-diff", -} - -func (FieldsV1) SwaggerDoc() map[string]string { - return map_FieldsV1 -} - var map_GetOptions = map[string]string{ - "": "GetOptions is the standard query options to the standard REST get call.", - "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "": "GetOptions is the standard query options to the standard REST get call.", + "resourceVersion": "When specified: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", } func (GetOptions) SwaggerDoc() map[string]string { @@ -146,6 +138,25 @@ func (GroupVersionForDiscovery) SwaggerDoc() map[string]string { return map_GroupVersionForDiscovery } +var map_Initializer = map[string]string{ + "": "Initializer is information about an initializer that has not yet completed.", + "name": "name of the process that is responsible for initializing this object.", +} + +func (Initializer) SwaggerDoc() map[string]string { + return map_Initializer +} + +var map_Initializers = map[string]string{ + "": "Initializers tracks the progress of initialization.", + "pending": "Pending is a list of initializers that must execute in order before this object is visible. When the last pending initializer is removed, and no failing result is set, the initializers struct will be set to nil and the object is considered as initialized and visible to all clients.", + "result": "If result is set with the Failure field, the object will be persisted to storage and then deleted, ensuring that other clients can observe the deletion.", +} + +func (Initializers) SwaggerDoc() map[string]string { + return map_Initializers +} + var map_LabelSelector = map[string]string{ "": "A label selector is a label query over a set of resources. The result of matchLabels and matchExpressions are ANDed. An empty label selector matches all objects. A null label selector matches no objects.", "matchLabels": "matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is \"key\", the operator is \"In\", and the values array contains only \"value\". The requirements are ANDed.", @@ -169,7 +180,7 @@ func (LabelSelectorRequirement) SwaggerDoc() map[string]string { var map_List = map[string]string{ "": "List holds a list of objects, which may not be known by the server.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "items": "List of objects", } @@ -178,11 +189,10 @@ func (List) SwaggerDoc() map[string]string { } var map_ListMeta = map[string]string{ - "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", - "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", - "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", - "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", - "remainingItemCount": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.", + "": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.", + "selfLink": "selfLink is a URL representing this object. Populated by the system. Read-only.", + "resourceVersion": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", + "continue": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.", } func (ListMeta) SwaggerDoc() map[string]string { @@ -190,53 +200,39 @@ func (ListMeta) SwaggerDoc() map[string]string { } var map_ListOptions = map[string]string{ - "": "ListOptions is the query options to a standard REST list call.", - "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", - "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", - "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", - "allowWatchBookmarks": "allowWatchBookmarks requests watch events with type \"BOOKMARK\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored. If the feature gate WatchBookmarks is not enabled in apiserver, this field is ignored.", - "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", - "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", - "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", - "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", + "": "ListOptions is the query options to a standard REST list call.", + "labelSelector": "A selector to restrict the list of returned objects by their labels. Defaults to everything.", + "fieldSelector": "A selector to restrict the list of returned objects by their fields. Defaults to everything.", + "includeUninitialized": "If true, partially initialized resources are included in the response.", + "watch": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.", + "resourceVersion": "When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.", + "timeoutSeconds": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.", + "limit": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\n\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.", + "continue": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\".\n\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.", } func (ListOptions) SwaggerDoc() map[string]string { return map_ListOptions } -var map_ManagedFieldsEntry = map[string]string{ - "": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.", - "manager": "Manager is an identifier of the workflow managing these fields.", - "operation": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.", - "apiVersion": "APIVersion defines the version of this resource that this field set applies to. The format is \"group/version\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.", - "time": "Time is timestamp of when these fields were set. It should always be empty if Operation is 'Apply'", - "fieldsType": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \"FieldsV1\"", - "fieldsV1": "FieldsV1 holds the first JSON version format as described in the \"FieldsV1\" type.", -} - -func (ManagedFieldsEntry) SwaggerDoc() map[string]string { - return map_ManagedFieldsEntry -} - var map_ObjectMeta = map[string]string{ "": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.", "name": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency", + "generateName": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\n\nIf this field is specified and the generated name exists, the server will NOT return a 409 - instead, it will either return 201 Created or 500 with Reason ServerTimeout indicating a unique name could not be found in the time allotted, and the client should retry (optionally after the time indicated in the Retry-After header).\n\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#idempotency", "namespace": "Namespace defines the space within each name must be unique. An empty namespace is equivalent to the \"default\" namespace, but \"default\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\n\nMust be a DNS_LABEL. Cannot be updated. More info: http://kubernetes.io/docs/user-guide/namespaces", - "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.\n\nDEPRECATED Kubernetes will stop propagating this field in 1.20 release and the field is planned to be removed in 1.21 release.", + "selfLink": "SelfLink is a URL representing this object. Populated by the system. Read-only.", "uid": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\n\nPopulated by the system. Read-only. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", - "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency", + "resourceVersion": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\n\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", "generation": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.", - "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", - "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", + "creationTimestamp": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\n\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", + "deletionTimestamp": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\n\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", "deletionGracePeriodSeconds": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.", "labels": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: http://kubernetes.io/docs/user-guide/labels", "annotations": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: http://kubernetes.io/docs/user-guide/annotations", "ownerReferences": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.", - "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.", + "initializers": "An initializer is a controller which enforces some system invariant at object creation time. This field is a list of initializers that have not yet acted on this object. If nil or empty, this object has been completely initialized. Otherwise, the object is considered uninitialized and is hidden (in list/watch and get calls) from clients that haven't explicitly asked to observe uninitialized objects.\n\nWhen an object is created, the system will populate this list with the current set of initializers. Only privileged users may set or modify this list. Once it is empty, it may not be modified further by any user.", + "finalizers": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed.", "clusterName": "The name of the cluster which the object belongs to. This is used to distinguish resources with same name and namespace in different clusters. This field is not set anywhere right now and apiserver is going to ignore it if set in create or update request.", - "managedFields": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \"ci-cd\". The set of fields is always in the version that the workflow used when modifying the object.", } func (ObjectMeta) SwaggerDoc() map[string]string { @@ -244,9 +240,9 @@ func (ObjectMeta) SwaggerDoc() map[string]string { } var map_OwnerReference = map[string]string{ - "": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.", + "": "OwnerReference contains enough information to let you identify an owning object. Currently, an owning object must be in the same namespace, so there is no namespace field.", "apiVersion": "API version of the referent.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "name": "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names", "uid": "UID of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "controller": "If true, this reference points to the managing controller.", @@ -257,25 +253,6 @@ func (OwnerReference) SwaggerDoc() map[string]string { return map_OwnerReference } -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - -var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "items": "items contains each of the included items.", -} - -func (PartialObjectMetadataList) SwaggerDoc() map[string]string { - return map_PartialObjectMetadataList -} - var map_Patch = map[string]string{ "": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.", } @@ -284,21 +261,9 @@ func (Patch) SwaggerDoc() map[string]string { return map_Patch } -var map_PatchOptions = map[string]string{ - "": "PatchOptions may be provided when patching an API object. PatchOptions is meant to be a superset of UpdateOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "force": "Force is going to \"force\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).", -} - -func (PatchOptions) SwaggerDoc() map[string]string { - return map_PatchOptions -} - var map_Preconditions = map[string]string{ - "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - "uid": "Specifies the target UID.", - "resourceVersion": "Specifies the target ResourceVersion", + "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", + "uid": "Specifies the target UID.", } func (Preconditions) SwaggerDoc() map[string]string { @@ -326,8 +291,8 @@ func (ServerAddressByClientCIDR) SwaggerDoc() map[string]string { var map_Status = map[string]string{ "": "Status is a return value for calls that don't return other objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "status": "Status of the operation. One of: \"Success\" or \"Failure\". More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", "message": "A human-readable description of the status of this operation.", "reason": "A machine-readable description of why this operation is in the \"Failure\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.", "details": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type.", @@ -353,7 +318,7 @@ var map_StatusDetails = map[string]string{ "": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.", "name": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).", "group": "The group attribute of the resource associated with the status StatusReason.", - "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + "kind": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", "uid": "UID of the resource. (when there is a single resource which can be described). More info: http://kubernetes.io/docs/user-guide/identifiers#uids", "causes": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.", "retryAfterSeconds": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.", @@ -363,66 +328,10 @@ func (StatusDetails) SwaggerDoc() map[string]string { return map_StatusDetails } -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column, such as number, integer, string, or array. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type modifier for this column. A format modifies the type and imposes additional rules, like date or time formatting for a string. The 'name' format is applied to the primary identifier column which has type 'string' to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as the column definitions array and may contain strings, numbers (float64 or int64), booleans, simple maps, lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user. These conditions apply to the row, not to the object, and will be specific to table output. The only defined condition type is 'Completed', for a row that indicates a resource that has run to completion and can be given less visual priority.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing. The media type of the object will always match the enclosing list - if this as a JSON table, these will be JSON encoded objects.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition. The only defined value is 'Completed' indicating that the object this row represents has reached a completed state and may be given less visual priority than other rows. Clients are not required to honor any conditions but should be consistent where possible about handling the conditions.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - var map_TypeMeta = map[string]string{ "": "TypeMeta describes an individual object in an API response or request with strings representing the type of the object and its API schema version. Structures that are versioned or persisted should inline TypeMeta.", - "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", - "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + "kind": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "apiVersion": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources", } func (TypeMeta) SwaggerDoc() map[string]string { @@ -430,9 +339,8 @@ func (TypeMeta) SwaggerDoc() map[string]string { } var map_UpdateOptions = map[string]string{ - "": "UpdateOptions may be provided when updating an API object. All fields in UpdateOptions should also be present in PatchOptions.", - "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", - "fieldManager": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.", + "": "UpdateOptions may be provided when updating an API object.", + "dryRun": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed", } func (UpdateOptions) SwaggerDoc() map[string]string { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go index 4244b8a..fc138e7 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/helpers.go @@ -27,15 +27,11 @@ import ( "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/json" - "k8s.io/klog" ) // NestedFieldCopy returns a deep copy of the value of a nested field. // Returns false if the value is missing. // No error is returned for a nil field. -// -// Note: fields passed to this function are treated as keys within the passed -// object; no array/slice syntax is supported. func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { val, found, err := NestedFieldNoCopy(obj, fields...) if !found || err != nil { @@ -47,16 +43,10 @@ func NestedFieldCopy(obj map[string]interface{}, fields ...string) (interface{}, // NestedFieldNoCopy returns a reference to a nested field. // Returns false if value is not found and an error if unable // to traverse obj. -// -// Note: fields passed to this function are treated as keys within the passed -// object; no array/slice syntax is supported. func NestedFieldNoCopy(obj map[string]interface{}, fields ...string) (interface{}, bool, error) { var val interface{} = obj for i, field := range fields { - if val == nil { - return nil, false, nil - } if m, ok := val.(map[string]interface{}); ok { val, ok = m[field] if !ok { @@ -282,22 +272,6 @@ func getNestedString(obj map[string]interface{}, fields ...string) string { return val } -func getNestedInt64(obj map[string]interface{}, fields ...string) int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return 0 - } - return val -} - -func getNestedInt64Pointer(obj map[string]interface{}, fields ...string) *int64 { - val, found, err := NestedInt64(obj, fields...) - if !found || err != nil { - return nil - } - return &val -} - func jsonPath(fields []string) string { return "." + strings.Join(fields, ".") } @@ -330,8 +304,6 @@ var UnstructuredJSONScheme runtime.Codec = unstructuredJSONScheme{} type unstructuredJSONScheme struct{} -const unstructuredJSONSchemeIdentifier runtime.Identifier = "unstructuredJSON" - func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, obj runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { var err error if obj != nil { @@ -352,14 +324,7 @@ func (s unstructuredJSONScheme) Decode(data []byte, _ *schema.GroupVersionKind, return obj, &gvk, nil } -func (s unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { - if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, w) - } - return s.doEncode(obj, w) -} - -func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error { +func (unstructuredJSONScheme) Encode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case *Unstructured: return json.NewEncoder(w).Encode(t.Object) @@ -383,11 +348,6 @@ func (unstructuredJSONScheme) doEncode(obj runtime.Object, w io.Writer) error { } } -// Identifier implements runtime.Encoder interface. -func (unstructuredJSONScheme) Identifier() runtime.Identifier { - return unstructuredJSONSchemeIdentifier -} - func (s unstructuredJSONScheme) decode(data []byte) (runtime.Object, error) { type detector struct { Items gojson.RawMessage @@ -415,6 +375,12 @@ func (s unstructuredJSONScheme) decodeInto(data []byte, obj runtime.Object) erro return s.decodeToUnstructured(data, x) case *UnstructuredList: return s.decodeToList(data, x) + case *runtime.VersionedObjects: + o, err := s.decode(data) + if err == nil { + x.Objects = []runtime.Object{o} + } + return err default: return json.Unmarshal(data, x) } @@ -469,30 +435,12 @@ func (s unstructuredJSONScheme) decodeToList(data []byte, list *UnstructuredList return nil } -type jsonFallbackEncoder struct { - encoder runtime.Encoder - identifier runtime.Identifier +type JSONFallbackEncoder struct { + runtime.Encoder } -func NewJSONFallbackEncoder(encoder runtime.Encoder) runtime.Encoder { - result := map[string]string{ - "name": "fallback", - "base": string(encoder.Identifier()), - } - identifier, err := gojson.Marshal(result) - if err != nil { - klog.Fatalf("Failed marshaling identifier for jsonFallbackEncoder: %v", err) - } - return &jsonFallbackEncoder{ - encoder: encoder, - identifier: runtime.Identifier(identifier), - } -} - -func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { - // There is no need to handle runtime.CacheableObject, as we only - // fallback to other encoders here. - err := c.encoder.Encode(obj, w) +func (c JSONFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { + err := c.Encoder.Encode(obj, w) if runtime.IsNotRegisteredError(err) { switch obj.(type) { case *Unstructured, *UnstructuredList: @@ -501,8 +449,3 @@ func (c *jsonFallbackEncoder) Encode(obj runtime.Object, w io.Writer) error { } return err } - -// Identifier implements runtime.Encoder interface. -func (c *jsonFallbackEncoder) Identifier() runtime.Identifier { - return c.identifier -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go index d190339..781469e 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured.go @@ -47,7 +47,6 @@ type Unstructured struct { var _ metav1.Object = &Unstructured{} var _ runtime.Unstructured = &Unstructured{} -var _ metav1.ListInterface = &Unstructured{} func (obj *Unstructured) GetObjectKind() schema.ObjectKind { return obj } @@ -127,16 +126,6 @@ func (u *Unstructured) UnmarshalJSON(b []byte) error { return err } -// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. -// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. -func (in *Unstructured) NewEmptyInstance() runtime.Unstructured { - out := new(Unstructured) - if in != nil { - out.GetObjectKind().SetGroupVersionKind(in.GetObjectKind().GroupVersionKind()) - } - return out -} - func (in *Unstructured) DeepCopy() *Unstructured { if in == nil { return nil @@ -154,20 +143,13 @@ func (u *Unstructured) setNestedField(value interface{}, fields ...string) { SetNestedField(u.Object, value, fields...) } -func (u *Unstructured) setNestedStringSlice(value []string, fields ...string) { +func (u *Unstructured) setNestedSlice(value []string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) } SetNestedStringSlice(u.Object, value, fields...) } -func (u *Unstructured) setNestedSlice(value []interface{}, fields ...string) { - if u.Object == nil { - u.Object = make(map[string]interface{}) - } - SetNestedSlice(u.Object, value, fields...) -} - func (u *Unstructured) setNestedMap(value map[string]string, fields ...string) { if u.Object == nil { u.Object = make(map[string]interface{}) @@ -330,18 +312,6 @@ func (u *Unstructured) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } -func (u *Unstructured) GetRemainingItemCount() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") -} - -func (u *Unstructured) SetRemainingItemCount(c *int64) { - if c == nil { - RemoveNestedField(u.Object, "metadata", "remainingItemCount") - } else { - u.setNestedField(*c, "metadata", "remainingItemCount") - } -} - func (u *Unstructured) GetCreationTimestamp() metav1.Time { var timestamp metav1.Time timestamp.UnmarshalQueryParameter(getNestedString(u.Object, "metadata", "creationTimestamp")) @@ -431,6 +401,31 @@ func (u *Unstructured) GroupVersionKind() schema.GroupVersionKind { return gvk } +func (u *Unstructured) GetInitializers() *metav1.Initializers { + m, found, err := nestedMapNoCopy(u.Object, "metadata", "initializers") + if !found || err != nil { + return nil + } + out := &metav1.Initializers{} + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, out); err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + return nil + } + return out +} + +func (u *Unstructured) SetInitializers(initializers *metav1.Initializers) { + if initializers == nil { + RemoveNestedField(u.Object, "metadata", "initializers") + return + } + out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(initializers) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to retrieve initializers for object: %v", err)) + } + u.setNestedField(out, "metadata", "initializers") +} + func (u *Unstructured) GetFinalizers() []string { val, _, _ := NestedStringSlice(u.Object, "metadata", "finalizers") return val @@ -441,7 +436,7 @@ func (u *Unstructured) SetFinalizers(finalizers []string) { RemoveNestedField(u.Object, "metadata", "finalizers") return } - u.setNestedStringSlice(finalizers, "metadata", "finalizers") + u.setNestedSlice(finalizers, "metadata", "finalizers") } func (u *Unstructured) GetClusterName() string { @@ -455,42 +450,3 @@ func (u *Unstructured) SetClusterName(clusterName string) { } u.setNestedField(clusterName, "metadata", "clusterName") } - -func (u *Unstructured) GetManagedFields() []metav1.ManagedFieldsEntry { - items, found, err := NestedSlice(u.Object, "metadata", "managedFields") - if !found || err != nil { - return nil - } - managedFields := []metav1.ManagedFieldsEntry{} - for _, item := range items { - m, ok := item.(map[string]interface{}) - if !ok { - utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object, item %v is not a map", item)) - return nil - } - out := metav1.ManagedFieldsEntry{} - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(m, &out); err != nil { - utilruntime.HandleError(fmt.Errorf("unable to retrieve managedFields for object: %v", err)) - return nil - } - managedFields = append(managedFields, out) - } - return managedFields -} - -func (u *Unstructured) SetManagedFields(managedFields []metav1.ManagedFieldsEntry) { - if managedFields == nil { - RemoveNestedField(u.Object, "metadata", "managedFields") - return - } - items := []interface{}{} - for _, managedFieldsEntry := range managedFields { - out, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&managedFieldsEntry) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to set managedFields for object: %v", err)) - return - } - items = append(items, out) - } - u.setNestedSlice(items, "metadata", "managedFields") -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go index 5028f5f..bf3fd02 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured/unstructured_list.go @@ -52,16 +52,6 @@ func (u *UnstructuredList) EachListItem(fn func(runtime.Object) error) error { return nil } -// NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. -// This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. -func (u *UnstructuredList) NewEmptyInstance() runtime.Unstructured { - out := new(UnstructuredList) - if u != nil { - out.SetGroupVersionKind(u.GroupVersionKind()) - } - return out -} - // UnstructuredContent returns a map contain an overlay of the Items field onto // the Object field. Items always overwrites overlay. func (u *UnstructuredList) UnstructuredContent() map[string]interface{} { @@ -176,18 +166,6 @@ func (u *UnstructuredList) SetContinue(c string) { u.setNestedField(c, "metadata", "continue") } -func (u *UnstructuredList) GetRemainingItemCount() *int64 { - return getNestedInt64Pointer(u.Object, "metadata", "remainingItemCount") -} - -func (u *UnstructuredList) SetRemainingItemCount(c *int64) { - if c == nil { - RemoveNestedField(u.Object, "metadata", "remainingItemCount") - } else { - u.setNestedField(*c, "metadata", "remainingItemCount") - } -} - func (u *UnstructuredList) SetGroupVersionKind(gvk schema.GroupVersionKind) { u.SetAPIVersion(gvk.GroupVersion().String()) u.SetKind(gvk.Kind) diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go deleted file mode 100644 index 2ade69d..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.conversion.go +++ /dev/null @@ -1,523 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by conversion-gen. DO NOT EDIT. - -package v1 - -import ( - url "net/url" - unsafe "unsafe" - - resource "k8s.io/apimachinery/pkg/api/resource" - conversion "k8s.io/apimachinery/pkg/conversion" - fields "k8s.io/apimachinery/pkg/fields" - labels "k8s.io/apimachinery/pkg/labels" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" - watch "k8s.io/apimachinery/pkg/watch" -) - -func init() { - localSchemeBuilder.Register(RegisterConversions) -} - -// RegisterConversions adds conversion functions to the given scheme. -// Public to allow building arbitrary schemes. -func RegisterConversions(s *runtime.Scheme) error { - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*CreateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_CreateOptions(a.(*url.Values), b.(*CreateOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ExportOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_ExportOptions(a.(*url.Values), b.(*ExportOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*GetOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_GetOptions(a.(*url.Values), b.(*GetOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*ListOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_ListOptions(a.(*url.Values), b.(*ListOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*PatchOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_PatchOptions(a.(*url.Values), b.(*PatchOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*TableOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_TableOptions(a.(*url.Values), b.(*TableOptions), scope) - }); err != nil { - return err - } - if err := s.AddGeneratedConversionFunc((*url.Values)(nil), (*UpdateOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_UpdateOptions(a.(*url.Values), b.(*UpdateOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*map[string]string)(nil), (*LabelSelector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Map_string_To_string_To_v1_LabelSelector(a.(*map[string]string), b.(*LabelSelector), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**bool)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_bool_To_bool(a.(**bool), b.(*bool), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**float64)(nil), (*float64)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_float64_To_float64(a.(**float64), b.(*float64), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**int32)(nil), (*int32)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_int32_To_int32(a.(**int32), b.(*int32), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**int64)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_int64_To_int(a.(**int64), b.(*int), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**int64)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_int64_To_int64(a.(**int64), b.(*int64), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_intstr_IntOrString_To_intstr_IntOrString(a.(**intstr.IntOrString), b.(*intstr.IntOrString), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_string_To_string(a.(**string), b.(*string), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((**Duration)(nil), (*Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Pointer_v1_Duration_To_v1_Duration(a.(**Duration), b.(*Duration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (**DeletionPropagation)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_Pointer_v1_DeletionPropagation(a.(*[]string), b.(**DeletionPropagation), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (**Time)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_Pointer_v1_Time(a.(*[]string), b.(**Time), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (*[]int32)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_Slice_int32(a.(*[]string), b.(*[]int32), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (*IncludeObjectPolicy)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_v1_IncludeObjectPolicy(a.(*[]string), b.(*IncludeObjectPolicy), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_v1_Time(a.(*[]string), b.(*Time), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*bool)(nil), (**bool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_bool_To_Pointer_bool(a.(*bool), b.(**bool), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*fields.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_fields_Selector_To_string(a.(*fields.Selector), b.(*string), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*float64)(nil), (**float64)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_float64_To_Pointer_float64(a.(*float64), b.(**float64), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*int32)(nil), (**int32)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_int32_To_Pointer_int32(a.(*int32), b.(**int32), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*int64)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_int64_To_Pointer_int64(a.(*int64), b.(**int64), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*int)(nil), (**int64)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_int_To_Pointer_int64(a.(*int), b.(**int64), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (**intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_intstr_IntOrString_To_Pointer_intstr_IntOrString(a.(*intstr.IntOrString), b.(**intstr.IntOrString), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*intstr.IntOrString)(nil), (*intstr.IntOrString)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_intstr_IntOrString_To_intstr_IntOrString(a.(*intstr.IntOrString), b.(*intstr.IntOrString), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*labels.Selector)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_labels_Selector_To_string(a.(*labels.Selector), b.(*string), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*resource.Quantity)(nil), (*resource.Quantity)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_resource_Quantity_To_resource_Quantity(a.(*resource.Quantity), b.(*resource.Quantity), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*string)(nil), (**string)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_string_To_Pointer_string(a.(*string), b.(**string), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*string)(nil), (*fields.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_string_To_fields_Selector(a.(*string), b.(*fields.Selector), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*string)(nil), (*labels.Selector)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_string_To_labels_Selector(a.(*string), b.(*labels.Selector), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*url.Values)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_url_Values_To_v1_DeleteOptions(a.(*url.Values), b.(*DeleteOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*DeleteOptions)(nil), (*DeleteOptions)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_DeleteOptions_To_v1_DeleteOptions(a.(*DeleteOptions), b.(*DeleteOptions), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*Duration)(nil), (**Duration)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Duration_To_Pointer_v1_Duration(a.(*Duration), b.(**Duration), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*InternalEvent)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_InternalEvent_To_v1_WatchEvent(a.(*InternalEvent), b.(*WatchEvent), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*LabelSelector)(nil), (*map[string]string)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_LabelSelector_To_Map_string_To_string(a.(*LabelSelector), b.(*map[string]string), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*ListMeta)(nil), (*ListMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_ListMeta_To_v1_ListMeta(a.(*ListMeta), b.(*ListMeta), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*MicroTime)(nil), (*MicroTime)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_MicroTime_To_v1_MicroTime(a.(*MicroTime), b.(*MicroTime), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*Time)(nil), (*Time)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_Time_To_v1_Time(a.(*Time), b.(*Time), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*TypeMeta)(nil), (*TypeMeta)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_TypeMeta_To_v1_TypeMeta(a.(*TypeMeta), b.(*TypeMeta), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*WatchEvent)(nil), (*InternalEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_WatchEvent_To_v1_InternalEvent(a.(*WatchEvent), b.(*InternalEvent), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*WatchEvent)(nil), (*watch.Event)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_v1_WatchEvent_To_watch_Event(a.(*WatchEvent), b.(*watch.Event), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*watch.Event)(nil), (*WatchEvent)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_watch_Event_To_v1_WatchEvent(a.(*watch.Event), b.(*WatchEvent), scope) - }); err != nil { - return err - } - return nil -} - -func autoConvert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { - out.DryRun = *(*[]string)(unsafe.Pointer(&values)) - } else { - out.DryRun = nil - } - if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { - return err - } - } else { - out.FieldManager = "" - } - return nil -} - -// Convert_url_Values_To_v1_CreateOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_CreateOptions(in *url.Values, out *CreateOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_CreateOptions(in, out, s) -} - -func autoConvert_url_Values_To_v1_DeleteOptions(in *url.Values, out *DeleteOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["gracePeriodSeconds"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.GracePeriodSeconds, s); err != nil { - return err - } - } else { - out.GracePeriodSeconds = nil - } - // INFO: in.Preconditions opted out of conversion generation - if values, ok := map[string][]string(*in)["orphanDependents"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.OrphanDependents, s); err != nil { - return err - } - } else { - out.OrphanDependents = nil - } - if values, ok := map[string][]string(*in)["propagationPolicy"]; ok && len(values) > 0 { - if err := Convert_Slice_string_To_Pointer_v1_DeletionPropagation(&values, &out.PropagationPolicy, s); err != nil { - return err - } - } else { - out.PropagationPolicy = nil - } - if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { - out.DryRun = *(*[]string)(unsafe.Pointer(&values)) - } else { - out.DryRun = nil - } - return nil -} - -func autoConvert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["export"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_bool(&values, &out.Export, s); err != nil { - return err - } - } else { - out.Export = false - } - if values, ok := map[string][]string(*in)["exact"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_bool(&values, &out.Exact, s); err != nil { - return err - } - } else { - out.Exact = false - } - return nil -} - -// Convert_url_Values_To_v1_ExportOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_ExportOptions(in *url.Values, out *ExportOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_ExportOptions(in, out, s) -} - -func autoConvert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { - return err - } - } else { - out.ResourceVersion = "" - } - return nil -} - -// Convert_url_Values_To_v1_GetOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_GetOptions(in *url.Values, out *GetOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_GetOptions(in, out, s) -} - -func autoConvert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["labelSelector"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.LabelSelector, s); err != nil { - return err - } - } else { - out.LabelSelector = "" - } - if values, ok := map[string][]string(*in)["fieldSelector"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldSelector, s); err != nil { - return err - } - } else { - out.FieldSelector = "" - } - if values, ok := map[string][]string(*in)["watch"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_bool(&values, &out.Watch, s); err != nil { - return err - } - } else { - out.Watch = false - } - if values, ok := map[string][]string(*in)["allowWatchBookmarks"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_bool(&values, &out.AllowWatchBookmarks, s); err != nil { - return err - } - } else { - out.AllowWatchBookmarks = false - } - if values, ok := map[string][]string(*in)["resourceVersion"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.ResourceVersion, s); err != nil { - return err - } - } else { - out.ResourceVersion = "" - } - if values, ok := map[string][]string(*in)["timeoutSeconds"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_Pointer_int64(&values, &out.TimeoutSeconds, s); err != nil { - return err - } - } else { - out.TimeoutSeconds = nil - } - if values, ok := map[string][]string(*in)["limit"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_int64(&values, &out.Limit, s); err != nil { - return err - } - } else { - out.Limit = 0 - } - if values, ok := map[string][]string(*in)["continue"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.Continue, s); err != nil { - return err - } - } else { - out.Continue = "" - } - return nil -} - -// Convert_url_Values_To_v1_ListOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_ListOptions(in *url.Values, out *ListOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_ListOptions(in, out, s) -} - -func autoConvert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { - out.DryRun = *(*[]string)(unsafe.Pointer(&values)) - } else { - out.DryRun = nil - } - if values, ok := map[string][]string(*in)["force"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_Pointer_bool(&values, &out.Force, s); err != nil { - return err - } - } else { - out.Force = nil - } - if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { - return err - } - } else { - out.FieldManager = "" - } - return nil -} - -// Convert_url_Values_To_v1_PatchOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_PatchOptions(in *url.Values, out *PatchOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_PatchOptions(in, out, s) -} - -func autoConvert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["-"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_bool(&values, &out.NoHeaders, s); err != nil { - return err - } - } else { - out.NoHeaders = false - } - if values, ok := map[string][]string(*in)["includeObject"]; ok && len(values) > 0 { - if err := Convert_Slice_string_To_v1_IncludeObjectPolicy(&values, &out.IncludeObject, s); err != nil { - return err - } - } else { - out.IncludeObject = "" - } - return nil -} - -// Convert_url_Values_To_v1_TableOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_TableOptions(in *url.Values, out *TableOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_TableOptions(in, out, s) -} - -func autoConvert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { - // WARNING: Field TypeMeta does not have json tag, skipping. - - if values, ok := map[string][]string(*in)["dryRun"]; ok && len(values) > 0 { - out.DryRun = *(*[]string)(unsafe.Pointer(&values)) - } else { - out.DryRun = nil - } - if values, ok := map[string][]string(*in)["fieldManager"]; ok && len(values) > 0 { - if err := runtime.Convert_Slice_string_To_string(&values, &out.FieldManager, s); err != nil { - return err - } - } else { - out.FieldManager = "" - } - return nil -} - -// Convert_url_Values_To_v1_UpdateOptions is an autogenerated conversion function. -func Convert_url_Values_To_v1_UpdateOptions(in *url.Values, out *UpdateOptions, s conversion.Scope) error { - return autoConvert_url_Values_To_v1_UpdateOptions(in, out, s) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go index b82fdf2..1084599 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/zz_generated.deepcopy.go @@ -312,27 +312,6 @@ func (in *ExportOptions) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FieldsV1) DeepCopyInto(out *FieldsV1) { - *out = *in - if in.Raw != nil { - in, out := &in.Raw, &out.Raw - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FieldsV1. -func (in *FieldsV1) DeepCopy() *FieldsV1 { - if in == nil { - return nil - } - out := new(FieldsV1) - in.DeepCopyInto(out) - return out -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *GetOptions) DeepCopyInto(out *GetOptions) { *out = *in @@ -454,6 +433,48 @@ func (in *GroupVersionResource) DeepCopy() *GroupVersionResource { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializer) DeepCopyInto(out *Initializer) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. +func (in *Initializer) DeepCopy() *Initializer { + if in == nil { + return nil + } + out := new(Initializer) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Initializers) DeepCopyInto(out *Initializers) { + *out = *in + if in.Pending != nil { + in, out := &in.Pending, &out.Pending + *out = make([]Initializer, len(*in)) + copy(*out, *in) + } + if in.Result != nil { + in, out := &in.Result, &out.Result + *out = new(Status) + (*in).DeepCopyInto(*out) + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializers. +func (in *Initializers) DeepCopy() *Initializers { + if in == nil { + return nil + } + out := new(Initializers) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InternalEvent) DeepCopyInto(out *InternalEvent) { *out = *in @@ -528,7 +549,7 @@ func (in *LabelSelectorRequirement) DeepCopy() *LabelSelectorRequirement { func (in *List) DeepCopyInto(out *List) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]runtime.RawExtension, len(*in)) @@ -560,11 +581,6 @@ func (in *List) DeepCopyObject() runtime.Object { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *ListMeta) DeepCopyInto(out *ListMeta) { *out = *in - if in.RemainingItemCount != nil { - in, out := &in.RemainingItemCount, &out.RemainingItemCount - *out = new(int64) - **out = **in - } return } @@ -608,31 +624,6 @@ func (in *ListOptions) DeepCopyObject() runtime.Object { return nil } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ManagedFieldsEntry) DeepCopyInto(out *ManagedFieldsEntry) { - *out = *in - if in.Time != nil { - in, out := &in.Time, &out.Time - *out = (*in).DeepCopy() - } - if in.FieldsV1 != nil { - in, out := &in.FieldsV1, &out.FieldsV1 - *out = new(FieldsV1) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedFieldsEntry. -func (in *ManagedFieldsEntry) DeepCopy() *ManagedFieldsEntry { - if in == nil { - return nil - } - out := new(ManagedFieldsEntry) - in.DeepCopyInto(out) - return out -} - // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MicroTime. func (in *MicroTime) DeepCopy() *MicroTime { if in == nil { @@ -677,18 +668,16 @@ func (in *ObjectMeta) DeepCopyInto(out *ObjectMeta) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Initializers != nil { + in, out := &in.Initializers, &out.Initializers + *out = new(Initializers) + (*in).DeepCopyInto(*out) + } if in.Finalizers != nil { in, out := &in.Finalizers, &out.Finalizers *out = make([]string, len(*in)) copy(*out, *in) } - if in.ManagedFields != nil { - in, out := &in.ManagedFields, &out.ManagedFields - *out = make([]ManagedFieldsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } return } @@ -728,65 +717,6 @@ func (in *OwnerReference) DeepCopy() *OwnerReference { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PartialObjectMetadata, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. -func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { - if in == nil { - return nil - } - out := new(PartialObjectMetadataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Patch) DeepCopyInto(out *Patch) { *out = *in @@ -803,41 +733,6 @@ func (in *Patch) DeepCopy() *Patch { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PatchOptions) DeepCopyInto(out *PatchOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.DryRun != nil { - in, out := &in.DryRun, &out.DryRun - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Force != nil { - in, out := &in.Force, &out.Force - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatchOptions. -func (in *PatchOptions) DeepCopy() *PatchOptions { - if in == nil { - return nil - } - out := new(PatchOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PatchOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = *in @@ -846,11 +741,6 @@ func (in *Preconditions) DeepCopyInto(out *Preconditions) { *out = new(types.UID) **out = **in } - if in.ResourceVersion != nil { - in, out := &in.ResourceVersion, &out.ResourceVersion - *out = new(string) - **out = **in - } return } @@ -905,7 +795,7 @@ func (in *ServerAddressByClientCIDR) DeepCopy() *ServerAddressByClientCIDR { func (in *Status) DeepCopyInto(out *Status) { *out = *in out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) + out.ListMeta = in.ListMeta if in.Details != nil { in, out := &in.Details, &out.Details *out = new(StatusDetails) @@ -969,108 +859,6 @@ func (in *StatusDetails) DeepCopy() *StatusDetails { return out } -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ListMeta.DeepCopyInto(&out.ListMeta) - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} - // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Time. func (in *Time) DeepCopy() *Time { if in == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go new file mode 100644 index 0000000..f3e5e4c --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go @@ -0,0 +1,27 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import "k8s.io/apimachinery/pkg/conversion" + +// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value +func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { + if len(*input) > 0 { + *out = IncludeObjectPolicy((*input)[0]) + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go similarity index 91% rename from vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go index 8751d05..3b2bedd 100644 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1/deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,11 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -package v1 +package v1beta1 -import ( - "k8s.io/apimachinery/pkg/runtime" -) +import "k8s.io/apimachinery/pkg/runtime" func (in *TableRow) DeepCopy() *TableRow { if in == nil { diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go similarity index 63% rename from vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go rename to vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go index 84d7f0f..dc461cc 100644 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go @@ -1,5 +1,5 @@ /* -Copyright 2018 The Kubernetes Authors. +Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -14,8 +14,9 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Package value defines types for an in-memory representation of yaml or json -// objects, organized for convenient comparison with a schema (as defined by -// the sibling schema package). Functions for reading and writing the objects -// are also provided. -package value +// +k8s:deepcopy-gen=package +// +k8s:openapi-gen=true +// +k8s:defaulter-gen=TypeMeta + +// +groupName=meta.k8s.io +package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go new file mode 100644 index 0000000..fe3df69 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go @@ -0,0 +1,632 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. +// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +// DO NOT EDIT! + +/* + Package v1beta1 is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto + + It has these top-level messages: + PartialObjectMetadata + PartialObjectMetadataList + TableOptions +*/ +package v1beta1 + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } +func (*PartialObjectMetadata) ProtoMessage() {} +func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } + +func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } +func (*PartialObjectMetadataList) ProtoMessage() {} +func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { + return fileDescriptorGenerated, []int{1} +} + +func (m *TableOptions) Reset() { *m = TableOptions{} } +func (*TableOptions) ProtoMessage() {} +func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } + +func init() { + proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") + proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") + proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") +} +func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) + n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + return i, nil +} + +func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for _, msg := range m.Items { + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *TableOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) + i += copy(dAtA[i:], m.IncludeObject) + return i, nil +} + +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PartialObjectMetadata) Size() (n int) { + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func (m *PartialObjectMetadataList) Size() (n int) { + var l int + _ = l + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *TableOptions) Size() (n int) { + var l int + _ = l + l = len(m.IncludeObject) + n += 1 + l + sovGenerated(uint64(l)) + return n +} + +func sovGenerated(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *PartialObjectMetadata) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadata{`, + `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `}`, + }, "") + return s +} +func (this *PartialObjectMetadataList) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&PartialObjectMetadataList{`, + `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, + `}`, + }, "") + return s +} +func (this *TableOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&TableOptions{`, + `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, &PartialObjectMetadata{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TableOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") +) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 375 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x0a, 0xd3, 0x40, + 0x10, 0xc7, 0xb3, 0x48, 0xd1, 0x6e, 0xed, 0x25, 0x22, 0xd4, 0x1e, 0x36, 0xa5, 0xa7, 0x0a, 0x76, + 0xd7, 0x16, 0x11, 0x8f, 0x92, 0x5b, 0x41, 0x69, 0x09, 0x9e, 0x3c, 0xb9, 0x49, 0xc6, 0x74, 0xcd, + 0xc7, 0x86, 0xec, 0xa6, 0xd0, 0x8b, 0xf8, 0x08, 0x3e, 0x56, 0x8f, 0x3d, 0xf6, 0x14, 0x6c, 0x7c, + 0x0b, 0x4f, 0x92, 0x0f, 0xec, 0x87, 0x15, 0x7b, 0x9b, 0xf9, 0x0f, 0xbf, 0x5f, 0x66, 0xb2, 0xd8, + 0x09, 0xdf, 0x28, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, 0xb6, 0x81, 0xc4, 0x97, + 0x19, 0x6b, 0x07, 0x3c, 0x15, 0x31, 0xf7, 0xd6, 0x22, 0x81, 0x6c, 0xcb, 0xd2, 0x30, 0xa8, 0x02, + 0xc5, 0x62, 0xd0, 0x9c, 0x6d, 0x66, 0x2e, 0x68, 0x3e, 0x63, 0x01, 0x24, 0x90, 0x71, 0x0d, 0x3e, + 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xbc, 0x41, 0xe9, 0x39, 0x4a, 0xd3, 0x30, 0xa8, 0x02, 0x45, 0x2b, + 0x94, 0xb6, 0xe8, 0x70, 0x1a, 0x08, 0xbd, 0xce, 0x5d, 0xea, 0xc9, 0x98, 0x05, 0x32, 0x90, 0xac, + 0x36, 0xb8, 0xf9, 0xe7, 0xba, 0xab, 0x9b, 0xba, 0x6a, 0xcc, 0xc3, 0x57, 0xf7, 0x2c, 0x75, 0xbd, + 0xcf, 0xf0, 0x9f, 0xa7, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0x7f, 0x01, 0xaf, 0xff, 0x07, 0x28, 0x6f, + 0x0d, 0x31, 0xbf, 0xe6, 0xc6, 0x5b, 0xfc, 0x74, 0xc5, 0x33, 0x2d, 0x78, 0xb4, 0x74, 0xbf, 0x80, + 0xa7, 0xdf, 0x83, 0xe6, 0x3e, 0xd7, 0xdc, 0xfc, 0x84, 0x1f, 0xc5, 0x6d, 0x3d, 0x40, 0x23, 0x34, + 0xe9, 0xcd, 0x5f, 0xd2, 0x7b, 0x7e, 0x12, 0x3d, 0x79, 0x6c, 0x73, 0x57, 0x58, 0x46, 0x59, 0x58, + 0xf8, 0x94, 0x39, 0x7f, 0xac, 0xe3, 0xaf, 0xf8, 0xd9, 0xcd, 0x4f, 0xbf, 0x13, 0x4a, 0x9b, 0x1c, + 0x77, 0x84, 0x86, 0x58, 0x0d, 0xd0, 0xe8, 0xc1, 0xa4, 0x37, 0x7f, 0x4b, 0xef, 0x7e, 0x20, 0x7a, + 0x53, 0x6a, 0x77, 0xcb, 0xc2, 0xea, 0x2c, 0x2a, 0xa5, 0xd3, 0x98, 0xc7, 0x2e, 0x7e, 0xfc, 0x81, + 0xbb, 0x11, 0x2c, 0x53, 0x2d, 0x64, 0xa2, 0x4c, 0x07, 0xf7, 0x45, 0xe2, 0x45, 0xb9, 0x0f, 0x0d, + 0x5a, 0x9f, 0xdd, 0xb5, 0x5f, 0xb4, 0x47, 0xf4, 0x17, 0xe7, 0xc3, 0x5f, 0x85, 0xf5, 0xe4, 0x22, + 0x58, 0xc9, 0x48, 0x78, 0x5b, 0xe7, 0x52, 0x61, 0x4f, 0x77, 0x47, 0x62, 0xec, 0x8f, 0xc4, 0x38, + 0x1c, 0x89, 0xf1, 0xad, 0x24, 0x68, 0x57, 0x12, 0xb4, 0x2f, 0x09, 0x3a, 0x94, 0x04, 0xfd, 0x28, + 0x09, 0xfa, 0xfe, 0x93, 0x18, 0x1f, 0x1f, 0xb6, 0xab, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf3, + 0xe1, 0xde, 0x86, 0xdb, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto new file mode 100644 index 0000000..83be997 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto @@ -0,0 +1,57 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = 'proto2'; + +package k8s.io.apimachinery.pkg.apis.meta.v1beta1; + +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "v1beta1"; + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadata { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message PartialObjectMetadataList { + // items contains each of the included items. + repeated PartialObjectMetadata items = 1; +} + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +message TableOptions { + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + optional string includeObject = 1; +} + diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go new file mode 100644 index 0000000..d13254b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go @@ -0,0 +1,57 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name for this API. +const GroupName = "meta.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Kind takes an unqualified kind and returns a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// scheme is the registry for the common types that adhere to the meta v1beta1 API spec. +var scheme = runtime.NewScheme() + +// ParameterCodec knows about query parameters used with the meta v1beta1 API spec. +var ParameterCodec = runtime.NewParameterCodec(scheme) + +func init() { + scheme.AddKnownTypes(SchemeGroupVersion, + &Table{}, + &TableOptions{}, + &PartialObjectMetadata{}, + &PartialObjectMetadataList{}, + ) + + if err := scheme.AddConversionFuncs( + Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, + ); err != nil { + panic(err) + } + + // register manually. This usually goes through the SchemeBuilder, which we cannot use here. + //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go new file mode 100644 index 0000000..344c533 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go @@ -0,0 +1,161 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// package v1beta1 is alpha objects from meta that will be introduced. +package v1beta1 + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" +) + +// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf +// generation to support a meta type that can accept any valid JSON. + +// Table is a tabular representation of a set of API resources. The server transforms the +// object into a set of preferred columns for quickly reviewing the objects. +// +protobuf=false +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type Table struct { + v1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds + // +optional + v1.ListMeta `json:"metadata,omitempty"` + + // columnDefinitions describes each column in the returned items array. The number of cells per row + // will always match the number of column definitions. + ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` + // rows is the list of items in the table. + Rows []TableRow `json:"rows"` +} + +// TableColumnDefinition contains information about a column returned in the Table. +// +protobuf=false +type TableColumnDefinition struct { + // name is a human readable name for the column. + Name string `json:"name"` + // type is an OpenAPI type definition for this column. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Type string `json:"type"` + // format is an optional OpenAPI type definition for this column. The 'name' format is applied + // to the primary identifier column to assist in clients identifying column is the resource name. + // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. + Format string `json:"format"` + // description is a human readable description of this column. + Description string `json:"description"` + // priority is an integer defining the relative importance of this column compared to others. Lower + // numbers are considered higher priority. Columns that may be omitted in limited space scenarios + // should be given a higher priority. + Priority int32 `json:"priority"` +} + +// TableRow is an individual row in a table. +// +protobuf=false +type TableRow struct { + // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple + // maps, or lists, or null. See the type field of the column definition for a more detailed description. + Cells []interface{} `json:"cells"` + // conditions describe additional status of a row that are relevant for a human user. + // +optional + Conditions []TableRowCondition `json:"conditions,omitempty"` + // This field contains the requested additional information about each object based on the includeObject + // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the + // default serialization of the object for the current API version, and if "Metadata" (the default) will + // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. + // +optional + Object runtime.RawExtension `json:"object,omitempty"` +} + +// TableRowCondition allows a row to be marked with additional information. +// +protobuf=false +type TableRowCondition struct { + // Type of row condition. + Type RowConditionType `json:"type"` + // Status of the condition, one of True, False, Unknown. + Status ConditionStatus `json:"status"` + // (brief) machine readable reason for the condition's last transition. + // +optional + Reason string `json:"reason,omitempty"` + // Human readable message indicating details about last transition. + // +optional + Message string `json:"message,omitempty"` +} + +type RowConditionType string + +// These are valid conditions of a row. This list is not exhaustive and new conditions may be +// included by other resources. +const ( + // RowCompleted means the underlying resource has reached completion and may be given less + // visual priority than other resources. + RowCompleted RowConditionType = "Completed" +) + +type ConditionStatus string + +// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. +// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes +// can't decide if a resource is in the condition or not. In the future, we could add other +// intermediate conditions, e.g. ConditionDegraded. +const ( + ConditionTrue ConditionStatus = "True" + ConditionFalse ConditionStatus = "False" + ConditionUnknown ConditionStatus = "Unknown" +) + +// IncludeObjectPolicy controls which portion of the object is returned with a Table. +type IncludeObjectPolicy string + +const ( + // IncludeNone returns no object. + IncludeNone IncludeObjectPolicy = "None" + // IncludeMetadata serializes the object containing only its metadata field. + IncludeMetadata IncludeObjectPolicy = "Metadata" + // IncludeObject contains the full object. + IncludeObject IncludeObjectPolicy = "Object" +) + +// TableOptions are used when a Table is requested by the caller. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type TableOptions struct { + v1.TypeMeta `json:",inline"` + // includeObject decides whether to include each object along with its columnar information. + // Specifying "None" will return no object, specifying "Object" will return the full object contents, and + // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind + // in version v1beta1 of the meta.k8s.io API group. + IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` +} + +// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients +// to get access to a particular ObjectMeta schema without knowing the details of the version. +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadata struct { + v1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata + // +optional + v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` +} + +// PartialObjectMetadataList contains a list of objects containing only their metadata +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +type PartialObjectMetadataList struct { + v1.TypeMeta `json:",inline"` + + // items contains each of the included items. + Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go new file mode 100644 index 0000000..7394535 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go @@ -0,0 +1,104 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +// This file contains a collection of methods that can be used from go-restful to +// generate Swagger API documentation for its models. Please read this PR for more +// information on the implementation: https://github.com/emicklei/go-restful/pull/215 +// +// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if +// they are on one line! For multiple line or blocks that you want to ignore use ---. +// Any context after a --- is ignored. +// +// Those methods can be generated by using hack/update-generated-swagger-docs.sh + +// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. +var map_PartialObjectMetadata = map[string]string{ + "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", + "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", +} + +func (PartialObjectMetadata) SwaggerDoc() map[string]string { + return map_PartialObjectMetadata +} + +var map_PartialObjectMetadataList = map[string]string{ + "": "PartialObjectMetadataList contains a list of objects containing only their metadata", + "items": "items contains each of the included items.", +} + +func (PartialObjectMetadataList) SwaggerDoc() map[string]string { + return map_PartialObjectMetadataList +} + +var map_Table = map[string]string{ + "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", + "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", + "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", + "rows": "rows is the list of items in the table.", +} + +func (Table) SwaggerDoc() map[string]string { + return map_Table +} + +var map_TableColumnDefinition = map[string]string{ + "": "TableColumnDefinition contains information about a column returned in the Table.", + "name": "name is a human readable name for the column.", + "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", + "description": "description is a human readable description of this column.", + "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", +} + +func (TableColumnDefinition) SwaggerDoc() map[string]string { + return map_TableColumnDefinition +} + +var map_TableOptions = map[string]string{ + "": "TableOptions are used when a Table is requested by the caller.", + "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", +} + +func (TableOptions) SwaggerDoc() map[string]string { + return map_TableOptions +} + +var map_TableRow = map[string]string{ + "": "TableRow is an individual row in a table.", + "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", + "conditions": "conditions describe additional status of a row that are relevant for a human user.", + "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", +} + +func (TableRow) SwaggerDoc() map[string]string { + return map_TableRow +} + +var map_TableRowCondition = map[string]string{ + "": "TableRowCondition allows a row to be marked with additional information.", + "type": "Type of row condition.", + "status": "Status of the condition, one of True, False, Unknown.", + "reason": "(brief) machine readable reason for the condition's last transition.", + "message": "Human readable message indicating details about last transition.", +} + +func (TableRowCondition) SwaggerDoc() map[string]string { + return map_TableRowCondition +} + +// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 0000000..b77db1b --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,189 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. +func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { + if in == nil { + return nil + } + out := new(PartialObjectMetadata) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { + *out = *in + out.TypeMeta = in.TypeMeta + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]*PartialObjectMetadata, len(*in)) + for i := range *in { + if (*in)[i] != nil { + in, out := &(*in)[i], &(*out)[i] + *out = new(PartialObjectMetadata) + (*in).DeepCopyInto(*out) + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. +func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { + if in == nil { + return nil + } + out := new(PartialObjectMetadataList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *Table) DeepCopyInto(out *Table) { + *out = *in + out.TypeMeta = in.TypeMeta + out.ListMeta = in.ListMeta + if in.ColumnDefinitions != nil { + in, out := &in.ColumnDefinitions, &out.ColumnDefinitions + *out = make([]TableColumnDefinition, len(*in)) + copy(*out, *in) + } + if in.Rows != nil { + in, out := &in.Rows, &out.Rows + *out = make([]TableRow, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. +func (in *Table) DeepCopy() *Table { + if in == nil { + return nil + } + out := new(Table) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *Table) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. +func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { + if in == nil { + return nil + } + out := new(TableColumnDefinition) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableOptions) DeepCopyInto(out *TableOptions) { + *out = *in + out.TypeMeta = in.TypeMeta + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. +func (in *TableOptions) DeepCopy() *TableOptions { + if in == nil { + return nil + } + out := new(TableOptions) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TableOptions) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRow) DeepCopyInto(out *TableRow) { + clone := in.DeepCopy() + *out = *clone + return +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. +func (in *TableRowCondition) DeepCopy() *TableRowCondition { + if in == nil { + return nil + } + out := new(TableRowCondition) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go new file mode 100644 index 0000000..73e63fc --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1beta1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go index 2d7c8bd..bc615dc 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/converter.go @@ -54,8 +54,7 @@ type Converter struct { generatedConversionFuncs ConversionFuncs // Set of conversions that should be treated as a no-op - ignoredConversions map[typePair]struct{} - ignoredUntypedConversions map[typePair]struct{} + ignoredConversions map[typePair]struct{} // This is a map from a source field type and name, to a list of destination // field type and name. @@ -84,23 +83,17 @@ type Converter struct { // NewConverter creates a new Converter object. func NewConverter(nameFn NameFunc) *Converter { c := &Converter{ - conversionFuncs: NewConversionFuncs(), - generatedConversionFuncs: NewConversionFuncs(), - ignoredConversions: make(map[typePair]struct{}), - ignoredUntypedConversions: make(map[typePair]struct{}), - nameFunc: nameFn, - structFieldDests: make(map[typeNamePair][]typeNamePair), - structFieldSources: make(map[typeNamePair][]typeNamePair), + conversionFuncs: NewConversionFuncs(), + generatedConversionFuncs: NewConversionFuncs(), + ignoredConversions: make(map[typePair]struct{}), + nameFunc: nameFn, + structFieldDests: make(map[typeNamePair][]typeNamePair), + structFieldSources: make(map[typeNamePair][]typeNamePair), inputFieldMappingFuncs: make(map[reflect.Type]FieldMappingFunc), inputDefaultFlags: make(map[reflect.Type]FieldMatchingFlags), } - c.RegisterUntypedConversionFunc( - (*[]byte)(nil), (*[]byte)(nil), - func(a, b interface{}, s Scope) error { - return Convert_Slice_byte_To_Slice_byte(a.(*[]byte), b.(*[]byte), s) - }, - ) + c.RegisterConversionFunc(Convert_Slice_byte_To_Slice_byte) return c } @@ -138,6 +131,10 @@ type Scope interface { // parameters, you'll run out of stack space before anything useful happens. Convert(src, dest interface{}, flags FieldMatchingFlags) error + // DefaultConvert performs the default conversion, without calling a conversion func + // on the current stack frame. This makes it safe to call from a conversion func. + DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error + // SrcTags and DestTags contain the struct tags that src and dest had, respectively. // If the enclosing object was not a struct, then these will contain no tags, of course. SrcTag() reflect.StructTag @@ -156,14 +153,31 @@ type FieldMappingFunc func(key string, sourceTag, destTag reflect.StructTag) (so func NewConversionFuncs() ConversionFuncs { return ConversionFuncs{ + fns: make(map[typePair]reflect.Value), untyped: make(map[typePair]ConversionFunc), } } type ConversionFuncs struct { + fns map[typePair]reflect.Value untyped map[typePair]ConversionFunc } +// Add adds the provided conversion functions to the lookup table - they must have the signature +// `func(type1, type2, Scope) error`. Functions are added in the order passed and will override +// previously registered pairs. +func (c ConversionFuncs) Add(fns ...interface{}) error { + for _, fn := range fns { + fv := reflect.ValueOf(fn) + ft := fv.Type() + if err := verifyConversionFunctionSignature(ft); err != nil { + return err + } + c.fns[typePair{ft.In(0).Elem(), ft.In(1).Elem()}] = fv + } + return nil +} + // AddUntyped adds the provided conversion function to the lookup table for the types that are // supplied as a and b. a and b must be pointers or an error is returned. This method overwrites // previously defined functions. @@ -183,6 +197,12 @@ func (c ConversionFuncs) AddUntyped(a, b interface{}, fn ConversionFunc) error { // both other and c, with other conversions taking precedence. func (c ConversionFuncs) Merge(other ConversionFuncs) ConversionFuncs { merged := NewConversionFuncs() + for k, v := range c.fns { + merged.fns[k] = v + } + for k, v := range other.fns { + merged.fns[k] = v + } for k, v := range c.untyped { merged.untyped[k] = v } @@ -270,6 +290,12 @@ func (s *scope) Convert(src, dest interface{}, flags FieldMatchingFlags) error { return s.converter.Convert(src, dest, flags, s.meta) } +// DefaultConvert continues a conversion, performing a default conversion (no conversion func) +// for the current stack frame. +func (s *scope) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags) error { + return s.converter.DefaultConvert(src, dest, flags, s.meta) +} + // SrcTag returns the tag of the struct containing the current source item, if any. func (s *scope) SrcTag() reflect.StructTag { return s.srcStack.top().tag @@ -334,6 +360,29 @@ func verifyConversionFunctionSignature(ft reflect.Type) error { return nil } +// RegisterConversionFunc registers a conversion func with the +// Converter. conversionFunc must take three parameters: a pointer to the input +// type, a pointer to the output type, and a conversion.Scope (which should be +// used if recursive conversion calls are desired). It must return an error. +// +// Example: +// c.RegisterConversionFunc( +// func(in *Pod, out *v1.Pod, s Scope) error { +// // conversion logic... +// return nil +// }) +// DEPRECATED: Will be removed in favor of RegisterUntypedConversionFunc +func (c *Converter) RegisterConversionFunc(conversionFunc interface{}) error { + return c.conversionFuncs.Add(conversionFunc) +} + +// Similar to RegisterConversionFunc, but registers conversion function that were +// automatically generated. +// DEPRECATED: Will be removed in favor of RegisterGeneratedUntypedConversionFunc +func (c *Converter) RegisterGeneratedConversionFunc(conversionFunc interface{}) error { + return c.generatedConversionFuncs.Add(conversionFunc) +} + // RegisterUntypedConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. @@ -360,7 +409,6 @@ func (c *Converter) RegisterIgnoredConversion(from, to interface{}) error { return fmt.Errorf("expected pointer arg for 'to' param 1, got: %v", typeTo) } c.ignoredConversions[typePair{typeFrom.Elem(), typeTo.Elem()}] = struct{}{} - c.ignoredUntypedConversions[typePair{typeFrom, typeTo}] = struct{}{} return nil } @@ -422,6 +470,18 @@ func (c *Converter) Convert(src, dest interface{}, flags FieldMatchingFlags, met return c.doConversion(src, dest, flags, meta, c.convert) } +// DefaultConvert will translate src to dest if it knows how. Both must be pointers. +// No conversion func is used. If the default copying mechanism +// doesn't work on this type pair, an error will be returned. +// Read the comments on the various FieldMatchingFlags constants to understand +// what the 'flags' parameter does. +// 'meta' is given to allow you to pass information to conversion functions, +// it is not used by DefaultConvert() other than storing it in the scope. +// Not safe for objects with cyclic references! +func (c *Converter) DefaultConvert(src, dest interface{}, flags FieldMatchingFlags, meta *Meta) error { + return c.doConversion(src, dest, flags, meta, c.defaultConvert) +} + type conversionFunc func(sv, dv reflect.Value, scope *scope) error func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags, meta *Meta, f conversionFunc) error { @@ -431,11 +491,6 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags flags: flags, meta: meta, } - - // ignore conversions of this type - if _, ok := c.ignoredUntypedConversions[pair]; ok { - return nil - } if fn, ok := c.conversionFuncs.untyped[pair]; ok { return fn(src, dest, scope) } @@ -462,20 +517,33 @@ func (c *Converter) doConversion(src, dest interface{}, flags FieldMatchingFlags return f(sv, dv, scope) } -// callUntyped calls predefined conversion func. -func (c *Converter) callUntyped(sv, dv reflect.Value, f ConversionFunc, scope *scope) error { - if !dv.CanAddr() { - return scope.errorf("cant addr dest") +// callCustom calls 'custom' with sv & dv. custom must be a conversion function. +func (c *Converter) callCustom(sv, dv, custom reflect.Value, scope *scope) error { + if !sv.CanAddr() { + sv2 := reflect.New(sv.Type()) + sv2.Elem().Set(sv) + sv = sv2 + } else { + sv = sv.Addr() } - var svPointer reflect.Value - if sv.CanAddr() { - svPointer = sv.Addr() + if !dv.CanAddr() { + if !dv.CanSet() { + return scope.errorf("can't addr or set dest.") + } + dvOrig := dv + dv := reflect.New(dvOrig.Type()) + defer func() { dvOrig.Set(dv) }() } else { - svPointer = reflect.New(sv.Type()) - svPointer.Elem().Set(sv) + dv = dv.Addr() } - dvPointer := dv.Addr() - return f(svPointer.Interface(), dvPointer.Interface(), scope) + args := []reflect.Value{sv, dv, reflect.ValueOf(scope)} + ret := custom.Call(args)[0].Interface() + // This convolution is necessary because nil interfaces won't convert + // to errors. + if ret == nil { + return nil + } + return ret.(error) } // convert recursively copies sv into dv, calling an appropriate conversion function if @@ -493,14 +561,27 @@ func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error { } // Convert sv to dv. - pair = typePair{reflect.PtrTo(sv.Type()), reflect.PtrTo(dv.Type())} - if f, ok := c.conversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) + if fv, ok := c.conversionFuncs.fns[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Calling custom conversion of '%v' to '%v'", st, dt) + } + return c.callCustom(sv, dv, fv, scope) } - if f, ok := c.generatedConversionFuncs.untyped[pair]; ok { - return c.callUntyped(sv, dv, f, scope) + if fv, ok := c.generatedConversionFuncs.fns[pair]; ok { + if c.Debug != nil { + c.Debug.Logf("Calling generated conversion of '%v' to '%v'", st, dt) + } + return c.callCustom(sv, dv, fv, scope) } + return c.defaultConvert(sv, dv, scope) +} + +// defaultConvert recursively copies sv into dv. no conversion function is called +// for the current stack frame (but conversion functions may be called for nested objects) +func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error { + dt, st := dv.Type(), sv.Type() + if !dv.CanSet() { return scope.errorf("Cannot set dest. (Tried to deep copy something with unexported fields?)") } diff --git a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go index 2f0dd00..b3804aa 100644 --- a/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go +++ b/vendor/k8s.io/apimachinery/pkg/conversion/queryparams/convert.go @@ -54,6 +54,10 @@ func jsonTag(field reflect.StructField) (string, bool) { return tag, omitempty } +func formatValue(value interface{}) string { + return fmt.Sprintf("%v", value) +} + func isPointerKind(kind reflect.Kind) bool { return kind == reflect.Ptr } diff --git a/vendor/k8s.io/apimachinery/pkg/labels/labels.go b/vendor/k8s.io/apimachinery/pkg/labels/labels.go index abf3ace..32db4d9 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/labels.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/labels.go @@ -172,7 +172,7 @@ func ConvertSelectorToLabelsMap(selector string) (Set, error) { return labelsMap, err } value := strings.TrimSpace(l[1]) - if err := validateLabelValue(key, value); err != nil { + if err := validateLabelValue(value); err != nil { return labelsMap, err } labelsMap[key] = value diff --git a/vendor/k8s.io/apimachinery/pkg/labels/selector.go b/vendor/k8s.io/apimachinery/pkg/labels/selector.go index 2f8e1e2..374d2ef 100644 --- a/vendor/k8s.io/apimachinery/pkg/labels/selector.go +++ b/vendor/k8s.io/apimachinery/pkg/labels/selector.go @@ -23,10 +23,10 @@ import ( "strconv" "strings" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/validation" - "k8s.io/klog" ) // Requirements is AND of all requirements. @@ -54,11 +54,6 @@ type Selector interface { // Make a deep copy of the selector. DeepCopySelector() Selector - - // RequiresExactMatch allows a caller to introspect whether a given selector - // requires a single specific label to be set, and if so returns the value it - // requires. - RequiresExactMatch(label string) (value string, found bool) } // Everything returns a selector that matches all labels. @@ -68,13 +63,12 @@ func Everything() Selector { type nothingSelector struct{} -func (n nothingSelector) Matches(_ Labels) bool { return false } -func (n nothingSelector) Empty() bool { return false } -func (n nothingSelector) String() string { return "" } -func (n nothingSelector) Add(_ ...Requirement) Selector { return n } -func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } -func (n nothingSelector) DeepCopySelector() Selector { return n } -func (n nothingSelector) RequiresExactMatch(label string) (value string, found bool) { return "", false } +func (n nothingSelector) Matches(_ Labels) bool { return false } +func (n nothingSelector) Empty() bool { return false } +func (n nothingSelector) String() string { return "" } +func (n nothingSelector) Add(_ ...Requirement) Selector { return n } +func (n nothingSelector) Requirements() (Requirements, bool) { return nil, false } +func (n nothingSelector) DeepCopySelector() Selector { return n } // Nothing returns a selector that matches no labels func Nothing() Selector { @@ -168,7 +162,7 @@ func NewRequirement(key string, op selection.Operator, vals []string) (*Requirem } for i := range vals { - if err := validateLabelValue(key, vals[i]); err != nil { + if err := validateLabelValue(vals[i]); err != nil { return nil, err } } @@ -217,13 +211,13 @@ func (r *Requirement) Matches(ls Labels) bool { } lsValue, err := strconv.ParseInt(ls.Get(r.key), 10, 64) if err != nil { - klog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) + glog.V(10).Infof("ParseInt failed for value %+v in label %+v, %+v", ls.Get(r.key), ls, err) return false } // There should be only one strValue in r.strValues, and can be converted to a integer. if len(r.strValues) != 1 { - klog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) + glog.V(10).Infof("Invalid values count %+v of requirement %#v, for 'Gt', 'Lt' operators, exactly one value is required", len(r.strValues), r) return false } @@ -231,7 +225,7 @@ func (r *Requirement) Matches(ls Labels) bool { for i := range r.strValues { rValue, err = strconv.ParseInt(r.strValues[i], 10, 64) if err != nil { - klog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) + glog.V(10).Infof("ParseInt failed for value %+v in requirement %#v, for 'Gt', 'Lt' operators, the value must be an integer", r.strValues[i], r) return false } } @@ -364,23 +358,6 @@ func (lsel internalSelector) String() string { return strings.Join(reqs, ",") } -// RequiresExactMatch introspect whether a given selector requires a single specific field -// to be set, and if so returns the value it requires. -func (lsel internalSelector) RequiresExactMatch(label string) (value string, found bool) { - for ix := range lsel { - if lsel[ix].key == label { - switch lsel[ix].operator { - case selection.Equals, selection.DoubleEquals, selection.In: - if len(lsel[ix].strValues) == 1 { - return lsel[ix].strValues[0], true - } - } - return "", false - } - } - return "", false -} - // Token represents constant definition for lexer token type Token int @@ -860,9 +837,9 @@ func validateLabelKey(k string) error { return nil } -func validateLabelValue(k, v string) error { +func validateLabelValue(v string) error { if errs := validation.IsValidLabelValue(v); len(errs) != 0 { - return fmt.Errorf("invalid label value: %q: at key: %q: %s", v, k, strings.Join(errs, "; ")) + return fmt.Errorf("invalid label value: %q: %s", v, strings.Join(errs, "; ")) } return nil } @@ -873,7 +850,7 @@ func SelectorFromSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - requirements := make([]Requirement, 0, len(ls)) + var requirements internalSelector for label, value := range ls { r, err := NewRequirement(label, selection.Equals, []string{value}) if err == nil { @@ -885,7 +862,7 @@ func SelectorFromSet(ls Set) Selector { } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return internalSelector(requirements) + return requirements } // SelectorFromValidatedSet returns a Selector which will match exactly the given Set. @@ -895,13 +872,13 @@ func SelectorFromValidatedSet(ls Set) Selector { if ls == nil || len(ls) == 0 { return internalSelector{} } - requirements := make([]Requirement, 0, len(ls)) + var requirements internalSelector for label, value := range ls { requirements = append(requirements, Requirement{key: label, operator: selection.Equals, strValues: []string{value}}) } // sort to have deterministic string representation sort.Sort(ByKey(requirements)) - return internalSelector(requirements) + return requirements } // ParseToRequirements takes a string representing a selector and returns a list of diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go index 0bccf9d..284e32b 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/codec.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/codec.go @@ -19,17 +19,13 @@ package runtime import ( "bytes" "encoding/base64" - "encoding/json" "fmt" "io" "net/url" "reflect" - "strconv" - "strings" "k8s.io/apimachinery/pkg/conversion/queryparams" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog" ) // codec binds an encoder and decoder. @@ -104,19 +100,10 @@ type NoopEncoder struct { var _ Serializer = NoopEncoder{} -const noopEncoderIdentifier Identifier = "noop" - func (n NoopEncoder) Encode(obj Object, w io.Writer) error { - // There is no need to handle runtime.CacheableObject, as we don't - // process the obj at all. return fmt.Errorf("encoding is not allowed for this codec: %v", reflect.TypeOf(n.Decoder)) } -// Identifier implements runtime.Encoder interface. -func (n NoopEncoder) Identifier() Identifier { - return noopEncoderIdentifier -} - // NoopDecoder converts an Encoder to a Serializer or Codec for code that expects them but only uses encoding. type NoopDecoder struct { Encoder @@ -206,51 +193,19 @@ func (c *parameterCodec) EncodeParameters(obj Object, to schema.GroupVersion) (u type base64Serializer struct { Encoder Decoder - - identifier Identifier } func NewBase64Serializer(e Encoder, d Decoder) Serializer { - return &base64Serializer{ - Encoder: e, - Decoder: d, - identifier: identifier(e), - } -} - -func identifier(e Encoder) Identifier { - result := map[string]string{ - "name": "base64", - } - if e != nil { - result["encoder"] = string(e.Identifier()) - } - identifier, err := json.Marshal(result) - if err != nil { - klog.Fatalf("Failed marshaling identifier for base64Serializer: %v", err) - } - return Identifier(identifier) + return &base64Serializer{e, d} } func (s base64Serializer) Encode(obj Object, stream io.Writer) error { - if co, ok := obj.(CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, stream) - } - return s.doEncode(obj, stream) -} - -func (s base64Serializer) doEncode(obj Object, stream io.Writer) error { e := base64.NewEncoder(base64.StdEncoding, stream) err := s.Encoder.Encode(obj, e) e.Close() return err } -// Identifier implements runtime.Encoder interface. -func (s base64Serializer) Identifier() Identifier { - return s.identifier -} - func (s base64Serializer) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { out := make([]byte, base64.StdEncoding.DecodedLen(len(data))) n, err := base64.StdEncoding.Decode(out, data) @@ -283,11 +238,6 @@ var ( DisabledGroupVersioner GroupVersioner = disabledGroupVersioner{} ) -const ( - internalGroupVersionerIdentifier = "internal" - disabledGroupVersionerIdentifier = "disabled" -) - type internalGroupVersioner struct{} // KindForGroupVersionKinds returns an internal Kind if one is found, or converts the first provided kind to the internal version. @@ -303,11 +253,6 @@ func (internalGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } -// Identifier implements GroupVersioner interface. -func (internalGroupVersioner) Identifier() string { - return internalGroupVersionerIdentifier -} - type disabledGroupVersioner struct{} // KindForGroupVersionKinds returns false for any input. @@ -315,9 +260,19 @@ func (disabledGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersi return schema.GroupVersionKind{}, false } -// Identifier implements GroupVersioner interface. -func (disabledGroupVersioner) Identifier() string { - return disabledGroupVersionerIdentifier +// GroupVersioners implements GroupVersioner and resolves to the first exact match for any kind. +type GroupVersioners []GroupVersioner + +// KindForGroupVersionKinds returns the first match of any of the group versioners, or false if no match occurred. +func (gvs GroupVersioners) KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (schema.GroupVersionKind, bool) { + for _, gv := range gvs { + target, ok := gv.KindForGroupVersionKinds(kinds) + if !ok { + continue + } + return target, true + } + return schema.GroupVersionKind{}, false } // Assert that schema.GroupVersion and GroupVersions implement GroupVersioner @@ -375,22 +330,3 @@ func (v multiGroupVersioner) KindForGroupVersionKinds(kinds []schema.GroupVersio } return schema.GroupVersionKind{}, false } - -// Identifier implements GroupVersioner interface. -func (v multiGroupVersioner) Identifier() string { - groupKinds := make([]string, 0, len(v.acceptedGroupKinds)) - for _, gk := range v.acceptedGroupKinds { - groupKinds = append(groupKinds, gk.String()) - } - result := map[string]string{ - "name": "multi", - "target": v.target.String(), - "accepted": strings.Join(groupKinds, ","), - "coerce": strconv.FormatBool(v.coerce), - } - identifier, err := json.Marshal(result) - if err != nil { - klog.Fatalf("Failed marshaling Identifier for %#v: %v", v, err) - } - return string(identifier) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go index d04d701..08d2abf 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/conversion.go @@ -53,21 +53,27 @@ func JSONKeyMapper(key string, sourceTag, destTag reflect.StructTag) (string, st return key, key } -func Convert_Slice_string_To_string(in *[]string, out *string, s conversion.Scope) error { - if len(*in) == 0 { +// DefaultStringConversions are helpers for converting []string and string to real values. +var DefaultStringConversions = []interface{}{ + Convert_Slice_string_To_string, + Convert_Slice_string_To_int, + Convert_Slice_string_To_bool, + Convert_Slice_string_To_int64, +} + +func Convert_Slice_string_To_string(input *[]string, out *string, s conversion.Scope) error { + if len(*input) == 0 { *out = "" - return nil } - *out = (*in)[0] + *out = (*input)[0] return nil } -func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) error { - if len(*in) == 0 { +func Convert_Slice_string_To_int(input *[]string, out *int, s conversion.Scope) error { + if len(*input) == 0 { *out = 0 - return nil } - str := (*in)[0] + str := (*input)[0] i, err := strconv.Atoi(str) if err != nil { return err @@ -77,16 +83,15 @@ func Convert_Slice_string_To_int(in *[]string, out *int, s conversion.Scope) err } // Convert_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value (i.e. zero-length slice), a value of "false", or a -// value of "0" resolve to false. +// Only the absence of a value, a value of "false", or a value of "0" resolve to false. // Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) error { - if len(*in) == 0 { +func Convert_Slice_string_To_bool(input *[]string, out *bool, s conversion.Scope) error { + if len(*input) == 0 { *out = false return nil } - switch { - case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): + switch strings.ToLower((*input)[0]) { + case "false", "0": *out = false default: *out = true @@ -94,103 +99,15 @@ func Convert_Slice_string_To_bool(in *[]string, out *bool, s conversion.Scope) e return nil } -// Convert_Slice_string_To_bool will convert a string parameter to boolean. -// Only the absence of a value (i.e. zero-length slice), a value of "false", or a -// value of "0" resolve to false. -// Any other value (including empty string) resolves to true. -func Convert_Slice_string_To_Pointer_bool(in *[]string, out **bool, s conversion.Scope) error { - if len(*in) == 0 { - boolVar := false - *out = &boolVar - return nil - } - switch { - case (*in)[0] == "0", strings.EqualFold((*in)[0], "false"): - boolVar := false - *out = &boolVar - default: - boolVar := true - *out = &boolVar - } - return nil -} - -func string_to_int64(in string) (int64, error) { - return strconv.ParseInt(in, 10, 64) -} - -func Convert_string_To_int64(in *string, out *int64, s conversion.Scope) error { - if in == nil { +func Convert_Slice_string_To_int64(input *[]string, out *int64, s conversion.Scope) error { + if len(*input) == 0 { *out = 0 - return nil } - i, err := string_to_int64(*in) + str := (*input)[0] + i, err := strconv.ParseInt(str, 10, 64) if err != nil { return err } *out = i return nil } - -func Convert_Slice_string_To_int64(in *[]string, out *int64, s conversion.Scope) error { - if len(*in) == 0 { - *out = 0 - return nil - } - i, err := string_to_int64((*in)[0]) - if err != nil { - return err - } - *out = i - return nil -} - -func Convert_string_To_Pointer_int64(in *string, out **int64, s conversion.Scope) error { - if in == nil { - *out = nil - return nil - } - i, err := string_to_int64(*in) - if err != nil { - return err - } - *out = &i - return nil -} - -func Convert_Slice_string_To_Pointer_int64(in *[]string, out **int64, s conversion.Scope) error { - if len(*in) == 0 { - *out = nil - return nil - } - i, err := string_to_int64((*in)[0]) - if err != nil { - return err - } - *out = &i - return nil -} - -func RegisterStringConversions(s *Scheme) error { - if err := s.AddConversionFunc((*[]string)(nil), (*string)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_string(a.(*[]string), b.(*string), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (*int)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_int(a.(*[]string), b.(*int), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (*bool)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_bool(a.(*[]string), b.(*bool), scope) - }); err != nil { - return err - } - if err := s.AddConversionFunc((*[]string)(nil), (*int64)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_Slice_string_To_int64(a.(*[]string), b.(*int64), scope) - }); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go index 918d083..291d7a4 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/converter.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/converter.go @@ -17,6 +17,7 @@ limitations under the License. package runtime import ( + "bytes" encodingjson "encoding/json" "fmt" "math" @@ -31,9 +32,8 @@ import ( "k8s.io/apimachinery/pkg/conversion" "k8s.io/apimachinery/pkg/util/json" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "sigs.k8s.io/structured-merge-diff/v3/value" - "k8s.io/klog" + "github.com/golang/glog" ) // UnstructuredConverter is an interface for converting between interface{} @@ -68,8 +68,13 @@ func newFieldsCache() *fieldsCache { } var ( + marshalerType = reflect.TypeOf(new(encodingjson.Marshaler)).Elem() + unmarshalerType = reflect.TypeOf(new(encodingjson.Unmarshaler)).Elem() mapStringInterfaceType = reflect.TypeOf(map[string]interface{}{}) stringType = reflect.TypeOf(string("")) + int64Type = reflect.TypeOf(int64(0)) + float64Type = reflect.TypeOf(float64(0)) + boolType = reflect.TypeOf(bool(false)) fieldCache = newFieldsCache() // DefaultUnstructuredConverter performs unstructured to Go typed object conversions. @@ -89,7 +94,7 @@ func parseBool(key string) bool { } value, err := strconv.ParseBool(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("couldn't parse '%s' as bool for unstructured mismatch detection", key)) + utilruntime.HandleError(fmt.Errorf("Couldn't parse '%s' as bool for unstructured mismatch detection", key)) } return value } @@ -128,10 +133,10 @@ func (c *unstructuredConverter) FromUnstructured(u map[string]interface{}, obj i newObj := reflect.New(t.Elem()).Interface() newErr := fromUnstructuredViaJSON(u, newObj) if (err != nil) != (newErr != nil) { - klog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) + glog.Fatalf("FromUnstructured unexpected error for %v: error: %v", u, err) } if err == nil && !c.comparison.DeepEqual(obj, newObj) { - klog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) + glog.Fatalf("FromUnstructured mismatch\nobj1: %#v\nobj2: %#v", obj, newObj) } } return err @@ -203,9 +208,13 @@ func fromUnstructured(sv, dv reflect.Value) error { } // Check if the object has a custom JSON marshaller/unmarshaller. - entry := value.TypeReflectEntryOf(dv.Type()) - if entry.CanConvertFromUnstructured() { - return entry.FromUnstructured(sv, dv) + if reflect.PtrTo(dt).Implements(unmarshalerType) { + data, err := json.Marshal(sv.Interface()) + if err != nil { + return fmt.Errorf("error encoding %s to json: %v", st.String(), err) + } + unmarshaler := dv.Addr().Interface().(encodingjson.Unmarshaler) + return unmarshaler.UnmarshalJSON(data) } switch dt.Kind() { @@ -247,7 +256,6 @@ func fieldInfoFromField(structType reflect.Type, field int) *fieldInfo { for i := range items { if items[i] == "omitempty" { info.omitempty = true - break } } } @@ -416,10 +424,10 @@ func (c *unstructuredConverter) ToUnstructured(obj interface{}) (map[string]inte newUnstr := map[string]interface{}{} newErr := toUnstructuredViaJSON(obj, &newUnstr) if (err != nil) != (newErr != nil) { - klog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) + glog.Fatalf("ToUnstructured unexpected error for %v: error: %v; newErr: %v", obj, err, newErr) } if err == nil && !c.comparison.DeepEqual(u, newUnstr) { - klog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) + glog.Fatalf("ToUnstructured mismatch\nobj1: %#v\nobj2: %#v", u, newUnstr) } } if err != nil { @@ -475,28 +483,112 @@ func toUnstructuredViaJSON(obj interface{}, u *map[string]interface{}) error { return json.Unmarshal(data, u) } +var ( + nullBytes = []byte("null") + trueBytes = []byte("true") + falseBytes = []byte("false") +) + +func getMarshaler(v reflect.Value) (encodingjson.Marshaler, bool) { + // Check value receivers if v is not a pointer and pointer receivers if v is a pointer + if v.Type().Implements(marshalerType) { + return v.Interface().(encodingjson.Marshaler), true + } + // Check pointer receivers if v is not a pointer + if v.Kind() != reflect.Ptr && v.CanAddr() { + v = v.Addr() + if v.Type().Implements(marshalerType) { + return v.Interface().(encodingjson.Marshaler), true + } + } + return nil, false +} + func toUnstructured(sv, dv reflect.Value) error { - // Check if the object has a custom string converter. - entry := value.TypeReflectEntryOf(sv.Type()) - if entry.CanConvertToUnstructured() { - v, err := entry.ToUnstructured(sv) + // Check if the object has a custom JSON marshaller/unmarshaller. + if marshaler, ok := getMarshaler(sv); ok { + if sv.Kind() == reflect.Ptr && sv.IsNil() { + // We're done - we don't need to store anything. + return nil + } + + data, err := marshaler.MarshalJSON() if err != nil { return err } - if v != nil { - dv.Set(reflect.ValueOf(v)) + switch { + case len(data) == 0: + return fmt.Errorf("error decoding from json: empty value") + + case bytes.Equal(data, nullBytes): + // We're done - we don't need to store anything. + + case bytes.Equal(data, trueBytes): + dv.Set(reflect.ValueOf(true)) + + case bytes.Equal(data, falseBytes): + dv.Set(reflect.ValueOf(false)) + + case data[0] == '"': + var result string + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding string from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + case data[0] == '{': + result := make(map[string]interface{}) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding object from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + case data[0] == '[': + result := make([]interface{}, 0) + err := json.Unmarshal(data, &result) + if err != nil { + return fmt.Errorf("error decoding array from json: %v", err) + } + dv.Set(reflect.ValueOf(result)) + + default: + var ( + resultInt int64 + resultFloat float64 + err error + ) + if err = json.Unmarshal(data, &resultInt); err == nil { + dv.Set(reflect.ValueOf(resultInt)) + } else if err = json.Unmarshal(data, &resultFloat); err == nil { + dv.Set(reflect.ValueOf(resultFloat)) + } else { + return fmt.Errorf("error decoding number from json: %v", err) + } } + return nil } - st := sv.Type() + + st, dt := sv.Type(), dv.Type() switch st.Kind() { case reflect.String: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(stringType)) + } dv.Set(reflect.ValueOf(sv.String())) return nil case reflect.Bool: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(boolType)) + } dv.Set(reflect.ValueOf(sv.Bool())) return nil case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } dv.Set(reflect.ValueOf(sv.Int())) return nil case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: @@ -504,9 +596,15 @@ func toUnstructured(sv, dv reflect.Value) error { if uVal > math.MaxInt64 { return fmt.Errorf("unsigned value %d does not fit into int64 (overflow)", uVal) } + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(int64Type)) + } dv.Set(reflect.ValueOf(int64(uVal))) return nil case reflect.Float32, reflect.Float64: + if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { + dv.Set(reflect.New(float64Type)) + } dv.Set(reflect.ValueOf(sv.Float())) return nil case reflect.Map: @@ -648,7 +746,7 @@ func isZero(v reflect.Value) bool { func structToUnstructured(sv, dv reflect.Value) error { st, dt := sv.Type(), dv.Type() if dt.Kind() == reflect.Interface && dv.NumMethod() == 0 { - dv.Set(reflect.MakeMapWithSize(mapStringInterfaceType, st.NumField())) + dv.Set(reflect.MakeMap(mapStringInterfaceType)) dv = dv.Elem() dt = dv.Type() } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go index 7251e65..db11eb8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/embedded.go @@ -134,16 +134,9 @@ func Convert_runtime_RawExtension_To_runtime_Object(in *RawExtension, out *Objec return nil } -func RegisterEmbeddedConversions(s *Scheme) error { - if err := s.AddConversionFunc((*Object)(nil), (*RawExtension)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_runtime_Object_To_runtime_RawExtension(a.(*Object), b.(*RawExtension), scope) - }); err != nil { - return err +func DefaultEmbeddedConversions() []interface{} { + return []interface{}{ + Convert_runtime_Object_To_runtime_RawExtension, + Convert_runtime_RawExtension_To_runtime_Object, } - if err := s.AddConversionFunc((*RawExtension)(nil), (*Object)(nil), func(a, b interface{}, scope conversion.Scope) error { - return Convert_runtime_RawExtension_To_runtime_Object(a.(*RawExtension), b.(*Object), scope) - }); err != nil { - return err - } - return nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/error.go b/vendor/k8s.io/apimachinery/pkg/runtime/error.go index be0c5ed..322b031 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/error.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/error.go @@ -120,32 +120,3 @@ func IsMissingVersion(err error) bool { _, ok := err.(*missingVersionErr) return ok } - -// strictDecodingError is a base error type that is returned by a strict Decoder such -// as UniversalStrictDecoder. -type strictDecodingError struct { - message string - data string -} - -// NewStrictDecodingError creates a new strictDecodingError object. -func NewStrictDecodingError(message string, data string) error { - return &strictDecodingError{ - message: message, - data: data, - } -} - -func (e *strictDecodingError) Error() string { - return fmt.Sprintf("strict decoder error for %s: %s", e.data, e.message) -} - -// IsStrictDecodingError returns true if the error indicates that the provided object -// strictness violations. -func IsStrictDecodingError(err error) bool { - if err == nil { - return false - } - _, ok := err.(*strictDecodingError) - return ok -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go index 0719718..967e0f5 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.pb.go @@ -14,22 +14,31 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +// DO NOT EDIT! +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto + + It has these top-level messages: + RawExtension + TypeMeta + Unknown +*/ package runtime -import ( - fmt "fmt" +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" - io "io" - math "math" - math_bits "math/bits" - reflect "reflect" - strings "strings" +import strings "strings" +import reflect "reflect" - proto "github.com/gogo/protobuf/proto" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -40,134 +49,29 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -func (m *RawExtension) Reset() { *m = RawExtension{} } -func (*RawExtension) ProtoMessage() {} -func (*RawExtension) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{0} -} -func (m *RawExtension) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *RawExtension) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *RawExtension) XXX_Merge(src proto.Message) { - xxx_messageInfo_RawExtension.Merge(m, src) -} -func (m *RawExtension) XXX_Size() int { - return m.Size() -} -func (m *RawExtension) XXX_DiscardUnknown() { - xxx_messageInfo_RawExtension.DiscardUnknown(m) -} +func (m *RawExtension) Reset() { *m = RawExtension{} } +func (*RawExtension) ProtoMessage() {} +func (*RawExtension) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } -var xxx_messageInfo_RawExtension proto.InternalMessageInfo +func (m *TypeMeta) Reset() { *m = TypeMeta{} } +func (*TypeMeta) ProtoMessage() {} +func (*TypeMeta) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } -func (m *TypeMeta) Reset() { *m = TypeMeta{} } -func (*TypeMeta) ProtoMessage() {} -func (*TypeMeta) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{1} -} -func (m *TypeMeta) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *TypeMeta) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *TypeMeta) XXX_Merge(src proto.Message) { - xxx_messageInfo_TypeMeta.Merge(m, src) -} -func (m *TypeMeta) XXX_Size() int { - return m.Size() -} -func (m *TypeMeta) XXX_DiscardUnknown() { - xxx_messageInfo_TypeMeta.DiscardUnknown(m) -} - -var xxx_messageInfo_TypeMeta proto.InternalMessageInfo - -func (m *Unknown) Reset() { *m = Unknown{} } -func (*Unknown) ProtoMessage() {} -func (*Unknown) Descriptor() ([]byte, []int) { - return fileDescriptor_9d3c45d7f546725c, []int{2} -} -func (m *Unknown) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *Unknown) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *Unknown) XXX_Merge(src proto.Message) { - xxx_messageInfo_Unknown.Merge(m, src) -} -func (m *Unknown) XXX_Size() int { - return m.Size() -} -func (m *Unknown) XXX_DiscardUnknown() { - xxx_messageInfo_Unknown.DiscardUnknown(m) -} - -var xxx_messageInfo_Unknown proto.InternalMessageInfo +func (m *Unknown) Reset() { *m = Unknown{} } +func (*Unknown) ProtoMessage() {} +func (*Unknown) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } func init() { proto.RegisterType((*RawExtension)(nil), "k8s.io.apimachinery.pkg.runtime.RawExtension") proto.RegisterType((*TypeMeta)(nil), "k8s.io.apimachinery.pkg.runtime.TypeMeta") proto.RegisterType((*Unknown)(nil), "k8s.io.apimachinery.pkg.runtime.Unknown") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptor_9d3c45d7f546725c) -} - -var fileDescriptor_9d3c45d7f546725c = []byte{ - // 378 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, - 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, - 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, - 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, - 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, - 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, - 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, - 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, - 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, - 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, - 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, - 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, - 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, - 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, - 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, - 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, - 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, - 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, - 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, - 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, - 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, - 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, - 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, - 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, -} - func (m *RawExtension) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -175,29 +79,23 @@ func (m *RawExtension) Marshal() (dAtA []byte, err error) { } func (m *RawExtension) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *RawExtension) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l if m.Raw != nil { - i -= len(m.Raw) - copy(dAtA[i:], m.Raw) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i-- dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) } - return len(dAtA) - i, nil + return i, nil } func (m *TypeMeta) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -205,32 +103,25 @@ func (m *TypeMeta) Marshal() (dAtA []byte, err error) { } func (m *TypeMeta) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *TypeMeta) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.Kind) - copy(dAtA[i:], m.Kind) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i-- - dAtA[i] = 0x12 - i -= len(m.APIVersion) - copy(dAtA[i:], m.APIVersion) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i-- dAtA[i] = 0xa - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) + i += copy(dAtA[i:], m.APIVersion) + dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) + i += copy(dAtA[i:], m.Kind) + return i, nil } func (m *Unknown) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -238,60 +129,63 @@ func (m *Unknown) Marshal() (dAtA []byte, err error) { } func (m *Unknown) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *Unknown) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.ContentType) - copy(dAtA[i:], m.ContentType) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) - i-- - dAtA[i] = 0x22 - i -= len(m.ContentEncoding) - copy(dAtA[i:], m.ContentEncoding) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) - i-- - dAtA[i] = 0x1a + dAtA[i] = 0xa + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 if m.Raw != nil { - i -= len(m.Raw) - copy(dAtA[i:], m.Raw) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) - i-- dAtA[i] = 0x12 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Raw))) + i += copy(dAtA[i:], m.Raw) } - { - size, err := m.TypeMeta.MarshalToSizedBuffer(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarintGenerated(dAtA, i, uint64(size)) - } - i-- - dAtA[i] = 0xa - return len(dAtA) - i, nil + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentEncoding))) + i += copy(dAtA[i:], m.ContentEncoding) + dAtA[i] = 0x22 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContentType))) + i += copy(dAtA[i:], m.ContentType) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *RawExtension) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Raw != nil { @@ -302,9 +196,6 @@ func (m *RawExtension) Size() (n int) { } func (m *TypeMeta) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.APIVersion) @@ -315,9 +206,6 @@ func (m *TypeMeta) Size() (n int) { } func (m *Unknown) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.TypeMeta.Size() @@ -334,7 +222,14 @@ func (m *Unknown) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -396,7 +291,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -424,7 +319,7 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -433,9 +328,6 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -453,9 +345,6 @@ func (m *RawExtension) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -483,7 +372,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -511,7 +400,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -521,9 +410,6 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -543,7 +429,7 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -553,9 +439,6 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -570,9 +453,6 @@ func (m *TypeMeta) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -600,7 +480,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -628,7 +508,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -637,9 +517,6 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -661,7 +538,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= int(b&0x7F) << shift + byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { break } @@ -670,9 +547,6 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -695,7 +569,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -705,9 +579,6 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -727,7 +598,7 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -737,9 +608,6 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -754,9 +622,6 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -772,7 +637,6 @@ func (m *Unknown) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -804,8 +668,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -822,34 +688,85 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 378 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x8f, 0x4f, 0xab, 0x13, 0x31, + 0x14, 0xc5, 0x27, 0xaf, 0x85, 0x3e, 0xd3, 0xc2, 0x93, 0xb8, 0x70, 0x74, 0x91, 0x79, 0x74, 0xe5, + 0x5b, 0xbc, 0x04, 0x1e, 0x08, 0x6e, 0x3b, 0xa5, 0xa0, 0x88, 0x20, 0xc1, 0x3f, 0xe0, 0xca, 0x74, + 0x26, 0x4e, 0xc3, 0xd0, 0x9b, 0x21, 0xcd, 0x38, 0x76, 0xe7, 0x47, 0xf0, 0x63, 0x75, 0xd9, 0x65, + 0x57, 0xc5, 0x8e, 0x1f, 0xc2, 0xad, 0x34, 0x4d, 0x6b, 0xd5, 0x85, 0xbb, 0xe4, 0x9e, 0xf3, 0x3b, + 0xf7, 0x1e, 0xfc, 0xbc, 0x7c, 0xb6, 0x60, 0xda, 0xf0, 0xb2, 0x9e, 0x2a, 0x0b, 0xca, 0xa9, 0x05, + 0xff, 0xac, 0x20, 0x37, 0x96, 0x07, 0x41, 0x56, 0x7a, 0x2e, 0xb3, 0x99, 0x06, 0x65, 0x97, 0xbc, + 0x2a, 0x0b, 0x6e, 0x6b, 0x70, 0x7a, 0xae, 0x78, 0xa1, 0x40, 0x59, 0xe9, 0x54, 0xce, 0x2a, 0x6b, + 0x9c, 0x21, 0xc9, 0x01, 0x60, 0xe7, 0x00, 0xab, 0xca, 0x82, 0x05, 0xe0, 0xf1, 0x6d, 0xa1, 0xdd, + 0xac, 0x9e, 0xb2, 0xcc, 0xcc, 0x79, 0x61, 0x0a, 0xc3, 0x3d, 0x37, 0xad, 0x3f, 0xf9, 0x9f, 0xff, + 0xf8, 0xd7, 0x21, 0x6f, 0x78, 0x83, 0x07, 0x42, 0x36, 0x93, 0x2f, 0x4e, 0xc1, 0x42, 0x1b, 0x20, + 0x8f, 0x70, 0xc7, 0xca, 0x26, 0x46, 0xd7, 0xe8, 0xc9, 0x20, 0xed, 0xb5, 0xdb, 0xa4, 0x23, 0x64, + 0x23, 0xf6, 0xb3, 0xe1, 0x47, 0x7c, 0xf9, 0x66, 0x59, 0xa9, 0x57, 0xca, 0x49, 0x72, 0x87, 0xb1, + 0xac, 0xf4, 0x3b, 0x65, 0xf7, 0x90, 0x77, 0xdf, 0x4b, 0xc9, 0x6a, 0x9b, 0x44, 0xed, 0x36, 0xc1, + 0xa3, 0xd7, 0x2f, 0x82, 0x22, 0xce, 0x5c, 0xe4, 0x1a, 0x77, 0x4b, 0x0d, 0x79, 0x7c, 0xe1, 0xdd, + 0x83, 0xe0, 0xee, 0xbe, 0xd4, 0x90, 0x0b, 0xaf, 0x0c, 0x7f, 0x22, 0xdc, 0x7b, 0x0b, 0x25, 0x98, + 0x06, 0xc8, 0x7b, 0x7c, 0xe9, 0xc2, 0x36, 0x9f, 0xdf, 0xbf, 0xbb, 0x61, 0xff, 0xe9, 0xce, 0x8e, + 0xe7, 0xa5, 0xf7, 0x43, 0xf8, 0xe9, 0x60, 0x71, 0x0a, 0x3b, 0x36, 0xbc, 0xf8, 0xb7, 0x21, 0x19, + 0xe1, 0xab, 0xcc, 0x80, 0x53, 0xe0, 0x26, 0x90, 0x99, 0x5c, 0x43, 0x11, 0x77, 0xfc, 0xb1, 0x0f, + 0x43, 0xde, 0xd5, 0xf8, 0x4f, 0x59, 0xfc, 0xed, 0x27, 0x4f, 0x71, 0x3f, 0x8c, 0xf6, 0xab, 0xe3, + 0xae, 0xc7, 0x1f, 0x04, 0xbc, 0x3f, 0xfe, 0x2d, 0x89, 0x73, 0x5f, 0x7a, 0xbb, 0xda, 0xd1, 0x68, + 0xbd, 0xa3, 0xd1, 0x66, 0x47, 0xa3, 0xaf, 0x2d, 0x45, 0xab, 0x96, 0xa2, 0x75, 0x4b, 0xd1, 0xa6, + 0xa5, 0xe8, 0x7b, 0x4b, 0xd1, 0xb7, 0x1f, 0x34, 0xfa, 0xd0, 0x0b, 0x45, 0x7f, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe3, 0x33, 0x18, 0x0b, 0x50, 0x02, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto index 0e212ec..fb61ac9 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/runtime/generated.proto @@ -25,11 +25,11 @@ package k8s.io.apimachinery.pkg.runtime; option go_package = "runtime"; // RawExtension is used to hold extensions in external versions. -// +// // To use this, make a field which has RawExtension as its type in your external, versioned // struct, and Object in your internal struct. You also need to register your // various plugin types. -// +// // // Internal package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -38,7 +38,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // External package: // type MyAPIObject struct { // runtime.TypeMeta `json:",inline"` @@ -47,7 +47,7 @@ option go_package = "runtime"; // type PluginA struct { // AOption string `json:"aOption"` // } -// +// // // On the wire, the JSON will look something like this: // { // "kind":"MyAPIObject", @@ -57,7 +57,7 @@ option go_package = "runtime"; // "aOption":"foo", // }, // } -// +// // So what happens? Decode first uses json or yaml to unmarshal the serialized data into // your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. // The next step is to copy (using pkg/conversion) into the internal struct. The runtime @@ -65,13 +65,13 @@ option go_package = "runtime"; // JSON stored in RawExtension, turning it into the correct object type, and storing it // in the Object. (TODO: In the case where the object is of an unknown type, a // runtime.Unknown object will be created and stored.) -// +// // +k8s:deepcopy-gen=true // +protobuf=true // +k8s:openapi-gen=true message RawExtension { // Raw is the underlying serialization of this object. - // + // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. optional bytes raw = 1; } @@ -83,10 +83,10 @@ message RawExtension { // ... // other fields // } // func (obj *MyAwesomeAPIObject) SetGroupVersionKind(gvk *metav1.GroupVersionKind) { metav1.UpdateTypeMeta(obj,gvk) }; GroupVersionKind() *GroupVersionKind -// +// // TypeMeta is provided here for convenience. You may use it directly from this package or define // your own with the same fields. -// +// // +k8s:deepcopy-gen=false // +protobuf=true // +k8s:openapi-gen=true @@ -103,7 +103,7 @@ message TypeMeta { // TypeMeta features-- kind, version, etc. // TODO: Make this object have easy access to field based accessors and settors for // metadata and field mutatation. -// +// // +k8s:deepcopy-gen=true // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // +protobuf=true diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go index 7bd1a3a..33f11eb 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/helper.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/helper.go @@ -51,7 +51,7 @@ func UnsafeObjectConvertor(scheme *Scheme) ObjectConvertor { func SetField(src interface{}, v reflect.Value, fieldName string) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } srcValue := reflect.ValueOf(src) if srcValue.Type().AssignableTo(field.Type()) { @@ -70,7 +70,7 @@ func SetField(src interface{}, v reflect.Value, fieldName string) error { func Field(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } destValue, err := conversion.EnforcePtr(dest) if err != nil { @@ -93,7 +93,7 @@ func Field(v reflect.Value, fieldName string, dest interface{}) error { func FieldPtr(v reflect.Value, fieldName string, dest interface{}) error { field := v.FieldByName(fieldName) if !field.IsValid() { - return fmt.Errorf("couldn't find %v field in %T", fieldName, v.Interface()) + return fmt.Errorf("couldn't find %v field in %#v", fieldName, v.Interface()) } v, err := conversion.EnforcePtr(dest) if err != nil { @@ -210,50 +210,3 @@ type defaultFramer struct{} func (defaultFramer) NewFrameReader(r io.ReadCloser) io.ReadCloser { return r } func (defaultFramer) NewFrameWriter(w io.Writer) io.Writer { return w } - -// WithVersionEncoder serializes an object and ensures the GVK is set. -type WithVersionEncoder struct { - Version GroupVersioner - Encoder - ObjectTyper -} - -// Encode does not do conversion. It sets the gvk during serialization. -func (e WithVersionEncoder) Encode(obj Object, stream io.Writer) error { - gvks, _, err := e.ObjectTyper.ObjectKinds(obj) - if err != nil { - if IsNotRegisteredError(err) { - return e.Encoder.Encode(obj, stream) - } - return err - } - kind := obj.GetObjectKind() - oldGVK := kind.GroupVersionKind() - gvk := gvks[0] - if e.Version != nil { - preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) - if ok { - gvk = preferredGVK - } - } - kind.SetGroupVersionKind(gvk) - err = e.Encoder.Encode(obj, stream) - kind.SetGroupVersionKind(oldGVK) - return err -} - -// WithoutVersionDecoder clears the group version kind of a deserialized object. -type WithoutVersionDecoder struct { - Decoder -} - -// Decode does not do conversion. It removes the gvk during deserialization. -func (d WithoutVersionDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into Object) (Object, *schema.GroupVersionKind, error) { - obj, gvk, err := d.Decoder.Decode(data, defaults, into) - if obj != nil { - kind := obj.GetObjectKind() - // clearing the gvk is just a convention of a codec - kind.SetGroupVersionKind(schema.GroupVersionKind{}) - } - return obj, gvk, err -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go index f44693c..699ff13 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/interfaces.go @@ -37,36 +37,13 @@ type GroupVersioner interface { // Scheme.New(target) and then perform a conversion between the current Go type and the destination Go type. // Sophisticated implementations may use additional information about the input kinds to pick a destination kind. KindForGroupVersionKinds(kinds []schema.GroupVersionKind) (target schema.GroupVersionKind, ok bool) - // Identifier returns string representation of the object. - // Identifiers of two different encoders should be equal only if for every input - // kinds they return the same result. - Identifier() string } -// Identifier represents an identifier. -// Identitier of two different objects should be equal if and only if for every -// input the output they produce is exactly the same. -type Identifier string - // Encoder writes objects to a serialized form type Encoder interface { // Encode writes an object to a stream. Implementations may return errors if the versions are // incompatible, or if no conversion is defined. Encode(obj Object, w io.Writer) error - // Identifier returns an identifier of the encoder. - // Identifiers of two different encoders should be equal if and only if for every input - // object it will be encoded to the same representation by both of them. - // - // Identifier is inteted for use with CacheableObject#CacheEncode method. In order to - // correctly handle CacheableObject, Encode() method should look similar to below, where - // doEncode() is the encoding logic of implemented encoder: - // func (e *MyEncoder) Encode(obj Object, w io.Writer) error { - // if co, ok := obj.(CacheableObject); ok { - // return co.CacheEncode(e.Identifier(), e.doEncode, w) - // } - // return e.doEncode(obj, w) - // } - Identifier() Identifier } // Decoder attempts to load an object from data. @@ -114,10 +91,6 @@ type Framer interface { type SerializerInfo struct { // MediaType is the value that represents this serializer over the wire. MediaType string - // MediaTypeType is the first part of the MediaType ("application" in "application/json"). - MediaTypeType string - // MediaTypeSubType is the second part of the MediaType ("json" in "application/json"). - MediaTypeSubType string // EncodesAsText indicates this serializer can be encoded to UTF-8 safely. EncodesAsText bool // Serializer is the individual object serializer for this media type. @@ -155,28 +128,6 @@ type NegotiatedSerializer interface { DecoderToVersion(serializer Decoder, gv GroupVersioner) Decoder } -// ClientNegotiator handles turning an HTTP content type into the appropriate encoder. -// Use NewClientNegotiator or NewVersionedClientNegotiator to create this interface from -// a NegotiatedSerializer. -type ClientNegotiator interface { - // Encoder returns the appropriate encoder for the provided contentType (e.g. application/json) - // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found - // a NegotiateError will be returned. The current client implementations consider params to be - // optional modifiers to the contentType and will ignore unrecognized parameters. - Encoder(contentType string, params map[string]string) (Encoder, error) - // Decoder returns the appropriate decoder for the provided contentType (e.g. application/json) - // and any optional mediaType parameters (e.g. pretty=1), or an error. If no serializer is found - // a NegotiateError will be returned. The current client implementations consider params to be - // optional modifiers to the contentType and will ignore unrecognized parameters. - Decoder(contentType string, params map[string]string) (Decoder, error) - // StreamDecoder returns the appropriate stream decoder for the provided contentType (e.g. - // application/json) and any optional mediaType parameters (e.g. pretty=1), or an error. If no - // serializer is found a NegotiateError will be returned. The Serializer and Framer will always - // be returned if a Decoder is returned. The current client implementations consider params to be - // optional modifiers to the contentType and will ignore unrecognized parameters. - StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) -} - // StorageSerializer is an interface used for obtaining encoders, decoders, and serializers // that can read and write data at rest. This would commonly be used by client tools that must // read files, or server side storage interfaces that persist restful objects. @@ -255,25 +206,6 @@ type ObjectCreater interface { New(kind schema.GroupVersionKind) (out Object, err error) } -// EquivalentResourceMapper provides information about resources that address the same underlying data as a specified resource -type EquivalentResourceMapper interface { - // EquivalentResourcesFor returns a list of resources that address the same underlying data as resource. - // If subresource is specified, only equivalent resources which also have the same subresource are included. - // The specified resource can be included in the returned list. - EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource - // KindFor returns the kind expected by the specified resource[/subresource]. - // A zero value is returned if the kind is unknown. - KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind -} - -// EquivalentResourceRegistry provides an EquivalentResourceMapper interface, -// and allows registering known resource[/subresource] -> kind -type EquivalentResourceRegistry interface { - EquivalentResourceMapper - // RegisterKindFor registers the existence of the specified resource[/subresource] along with its expected kind. - RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) -} - // ResourceVersioner provides methods for setting and retrieving // the resource version from an API object. type ResourceVersioner interface { @@ -301,34 +233,10 @@ type Object interface { DeepCopyObject() Object } -// CacheableObject allows an object to cache its different serializations -// to avoid performing the same serialization multiple times. -type CacheableObject interface { - // CacheEncode writes an object to a stream. The function will - // be used in case of cache miss. The function takes ownership - // of the object. - // If CacheableObject is a wrapper, then deep-copy of the wrapped object - // should be passed to function. - // CacheEncode assumes that for two different calls with the same , - // function will also be the same. - CacheEncode(id Identifier, encode func(Object, io.Writer) error, w io.Writer) error - // GetObject returns a deep-copy of an object to be encoded - the caller of - // GetObject() is the owner of returned object. The reason for making a copy - // is to avoid bugs, where caller modifies the object and forgets to copy it, - // thus modifying the object for everyone. - // The object returned by GetObject should be the same as the one that is supposed - // to be passed to function in CacheEncode method. - // If CacheableObject is a wrapper, the copy of wrapped object should be returned. - GetObject() Object -} - // Unstructured objects store values as map[string]interface{}, with only values that can be serialized // to JSON allowed. type Unstructured interface { Object - // NewEmptyInstance returns a new instance of the concrete type containing only kind/apiVersion and no other data. - // This should be called instead of reflect.New() for unstructured types because the go type alone does not preserve kind/apiVersion info. - NewEmptyInstance() Unstructured // UnstructuredContent returns a non-nil map with this object's contents. Values may be // []interface{}, map[string]interface{}, or any primitive type. Contents are typically serialized to // and from JSON. SetUnstructuredContent should be used to mutate the contents. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go b/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go deleted file mode 100644 index 3ff8461..0000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/mapper.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "sync" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -type equivalentResourceRegistry struct { - // keyFunc computes a key for the specified resource (this allows honoring colocated resources across API groups). - // if null, or if "" is returned, resource.String() is used as the key - keyFunc func(resource schema.GroupResource) string - // resources maps key -> subresource -> equivalent resources (subresource is not included in the returned resources). - // main resources are stored with subresource="". - resources map[string]map[string][]schema.GroupVersionResource - // kinds maps resource -> subresource -> kind - kinds map[schema.GroupVersionResource]map[string]schema.GroupVersionKind - // keys caches the computed key for each GroupResource - keys map[schema.GroupResource]string - - mutex sync.RWMutex -} - -var _ EquivalentResourceMapper = (*equivalentResourceRegistry)(nil) -var _ EquivalentResourceRegistry = (*equivalentResourceRegistry)(nil) - -// NewEquivalentResourceRegistry creates a resource registry that considers all versions of a GroupResource to be equivalent. -func NewEquivalentResourceRegistry() EquivalentResourceRegistry { - return &equivalentResourceRegistry{} -} - -// NewEquivalentResourceRegistryWithIdentity creates a resource mapper with a custom identity function. -// If "" is returned by the function, GroupResource#String is used as the identity. -// GroupResources with the same identity string are considered equivalent. -func NewEquivalentResourceRegistryWithIdentity(keyFunc func(schema.GroupResource) string) EquivalentResourceRegistry { - return &equivalentResourceRegistry{keyFunc: keyFunc} -} - -func (r *equivalentResourceRegistry) EquivalentResourcesFor(resource schema.GroupVersionResource, subresource string) []schema.GroupVersionResource { - r.mutex.RLock() - defer r.mutex.RUnlock() - return r.resources[r.keys[resource.GroupResource()]][subresource] -} -func (r *equivalentResourceRegistry) KindFor(resource schema.GroupVersionResource, subresource string) schema.GroupVersionKind { - r.mutex.RLock() - defer r.mutex.RUnlock() - return r.kinds[resource][subresource] -} -func (r *equivalentResourceRegistry) RegisterKindFor(resource schema.GroupVersionResource, subresource string, kind schema.GroupVersionKind) { - r.mutex.Lock() - defer r.mutex.Unlock() - if r.kinds == nil { - r.kinds = map[schema.GroupVersionResource]map[string]schema.GroupVersionKind{} - } - if r.kinds[resource] == nil { - r.kinds[resource] = map[string]schema.GroupVersionKind{} - } - r.kinds[resource][subresource] = kind - - // get the shared key of the parent resource - key := "" - gr := resource.GroupResource() - if r.keyFunc != nil { - key = r.keyFunc(gr) - } - if key == "" { - key = gr.String() - } - - if r.keys == nil { - r.keys = map[schema.GroupResource]string{} - } - r.keys[gr] = key - - if r.resources == nil { - r.resources = map[string]map[string][]schema.GroupVersionResource{} - } - if r.resources[key] == nil { - r.resources[key] = map[string][]schema.GroupVersionResource{} - } - r.resources[key][subresource] = append(r.resources[key][subresource], resource) -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go b/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go deleted file mode 100644 index 159b301..0000000 --- a/vendor/k8s.io/apimachinery/pkg/runtime/negotiate.go +++ /dev/null @@ -1,146 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package runtime - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// NegotiateError is returned when a ClientNegotiator is unable to locate -// a serializer for the requested operation. -type NegotiateError struct { - ContentType string - Stream bool -} - -func (e NegotiateError) Error() string { - if e.Stream { - return fmt.Sprintf("no stream serializers registered for %s", e.ContentType) - } - return fmt.Sprintf("no serializers registered for %s", e.ContentType) -} - -type clientNegotiator struct { - serializer NegotiatedSerializer - encode, decode GroupVersioner -} - -func (n *clientNegotiator) Encoder(contentType string, params map[string]string) (Encoder, error) { - // TODO: `pretty=1` is handled in NegotiateOutputMediaType, consider moving it to this method - // if client negotiators truly need to use it - mediaTypes := n.serializer.SupportedMediaTypes() - info, ok := SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, NegotiateError{ContentType: contentType} - } - info = mediaTypes[0] - } - return n.serializer.EncoderForVersion(info.Serializer, n.encode), nil -} - -func (n *clientNegotiator) Decoder(contentType string, params map[string]string) (Decoder, error) { - mediaTypes := n.serializer.SupportedMediaTypes() - info, ok := SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, NegotiateError{ContentType: contentType} - } - info = mediaTypes[0] - } - return n.serializer.DecoderToVersion(info.Serializer, n.decode), nil -} - -func (n *clientNegotiator) StreamDecoder(contentType string, params map[string]string) (Decoder, Serializer, Framer, error) { - mediaTypes := n.serializer.SupportedMediaTypes() - info, ok := SerializerInfoForMediaType(mediaTypes, contentType) - if !ok { - if len(contentType) != 0 || len(mediaTypes) == 0 { - return nil, nil, nil, NegotiateError{ContentType: contentType, Stream: true} - } - info = mediaTypes[0] - } - if info.StreamSerializer == nil { - return nil, nil, nil, NegotiateError{ContentType: info.MediaType, Stream: true} - } - return n.serializer.DecoderToVersion(info.Serializer, n.decode), info.StreamSerializer.Serializer, info.StreamSerializer.Framer, nil -} - -// NewClientNegotiator will attempt to retrieve the appropriate encoder, decoder, or -// stream decoder for a given content type. Does not perform any conversion, but will -// encode the object to the desired group, version, and kind. Use when creating a client. -func NewClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { - return &clientNegotiator{ - serializer: serializer, - encode: gv, - } -} - -// NewInternalClientNegotiator applies the default client rules for connecting to a Kubernetes apiserver -// where objects are converted to gv prior to sending and decoded to their internal representation prior -// to retrieval. -// -// DEPRECATED: Internal clients are deprecated and will be removed in a future Kubernetes release. -func NewInternalClientNegotiator(serializer NegotiatedSerializer, gv schema.GroupVersion) ClientNegotiator { - decode := schema.GroupVersions{ - { - Group: gv.Group, - Version: APIVersionInternal, - }, - // always include the legacy group as a decoding target to handle non-error `Status` return types - { - Group: "", - Version: APIVersionInternal, - }, - } - return &clientNegotiator{ - encode: gv, - decode: decode, - serializer: serializer, - } -} - -// NewSimpleClientNegotiator will negotiate for a single serializer. This should only be used -// for testing or when the caller is taking responsibility for setting the GVK on encoded objects. -func NewSimpleClientNegotiator(info SerializerInfo, gv schema.GroupVersion) ClientNegotiator { - return &clientNegotiator{ - serializer: &simpleNegotiatedSerializer{info: info}, - encode: gv, - } -} - -type simpleNegotiatedSerializer struct { - info SerializerInfo -} - -func NewSimpleNegotiatedSerializer(info SerializerInfo) NegotiatedSerializer { - return &simpleNegotiatedSerializer{info: info} -} - -func (n *simpleNegotiatedSerializer) SupportedMediaTypes() []SerializerInfo { - return []SerializerInfo{n.info} -} - -func (n *simpleNegotiatedSerializer) EncoderForVersion(e Encoder, _ GroupVersioner) Encoder { - return e -} - -func (n *simpleNegotiatedSerializer) DecoderToVersion(d Decoder, _gv GroupVersioner) Decoder { - return d -} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/register.go b/vendor/k8s.io/apimachinery/pkg/runtime/register.go index 1cd2e4c..eeb380c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/register.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/register.go @@ -29,3 +29,33 @@ func (obj *TypeMeta) GroupVersionKind() schema.GroupVersionKind { } func (obj *TypeMeta) GetObjectKind() schema.ObjectKind { return obj } + +// GetObjectKind implements Object for VersionedObjects, returning an empty ObjectKind +// interface if no objects are provided, or the ObjectKind interface of the object in the +// highest array position. +func (obj *VersionedObjects) GetObjectKind() schema.ObjectKind { + last := obj.Last() + if last == nil { + return schema.EmptyObjectKind + } + return last.GetObjectKind() +} + +// First returns the leftmost object in the VersionedObjects array, which is usually the +// object as serialized on the wire. +func (obj *VersionedObjects) First() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[0] +} + +// Last is the rightmost object in the VersionedObjects array, which is the object after +// all transformations have been applied. This is the same object that would be returned +// by Decode in a normal invocation (without VersionedObjects in the into argument). +func (obj *VersionedObjects) Last() Object { + if len(obj.Objects) == 0 { + return nil + } + return obj.Objects[len(obj.Objects)-1] +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go index 29d3ac4..5c9934c 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.pb.go @@ -14,18 +14,23 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto +// DO NOT EDIT! -package schema +/* +Package schema is a generated protocol buffer package. -import ( - fmt "fmt" +It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto - math "math" +It has these top-level messages: +*/ +package schema - proto "github.com/gogo/protobuf/proto" -) +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -36,13 +41,13 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptor_0462724132518e0d) + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/runtime/schema/generated.proto", fileDescriptorGenerated) } -var fileDescriptor_0462724132518e0d = []byte{ +var fileDescriptorGenerated = []byte{ // 185 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0xcc, 0xaf, 0x6e, 0xc3, 0x30, 0x10, 0xc7, 0x71, 0x9b, 0x0c, 0x0c, 0x0e, 0x0e, 0x1c, 0x1c, 0xda, 0x7c, 0x74, 0xb8, 0x2f, 0x50, diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go index 6361033..5f02961 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/schema/group_version.go @@ -66,7 +66,7 @@ func (gr GroupResource) Empty() bool { return len(gr.Group) == 0 && len(gr.Resource) == 0 } -func (gr GroupResource) String() string { +func (gr *GroupResource) String() string { if len(gr.Group) == 0 { return gr.Resource } @@ -111,7 +111,7 @@ func (gvr GroupVersionResource) GroupVersion() GroupVersion { return GroupVersion{Group: gvr.Group, Version: gvr.Version} } -func (gvr GroupVersionResource) String() string { +func (gvr *GroupVersionResource) String() string { return strings.Join([]string{gvr.Group, "/", gvr.Version, ", Resource=", gvr.Resource}, "") } @@ -130,7 +130,7 @@ func (gk GroupKind) WithVersion(version string) GroupVersionKind { return GroupVersionKind{Group: gk.Group, Version: version, Kind: gk.Kind} } -func (gk GroupKind) String() string { +func (gk *GroupKind) String() string { if len(gk.Group) == 0 { return gk.Kind } @@ -191,11 +191,6 @@ func (gv GroupVersion) String() string { return gv.Version } -// Identifier implements runtime.GroupVersioner interface. -func (gv GroupVersion) Identifier() string { - return gv.String() -} - // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. It prefers a match to group and version over just group. // TODO: Move GroupVersion to a package under pkg/runtime, since it's used by scheme. @@ -251,15 +246,6 @@ func (gv GroupVersion) WithResource(resource string) GroupVersionResource { // in fewer places. type GroupVersions []GroupVersion -// Identifier implements runtime.GroupVersioner interface. -func (gv GroupVersions) Identifier() string { - groupVersions := make([]string, 0, len(gv)) - for i := range gv { - groupVersions = append(groupVersions, gv[i].String()) - } - return fmt.Sprintf("[%s]", strings.Join(groupVersions, ",")) -} - // KindForGroupVersionKinds identifies the preferred GroupVersionKind out of a list. It returns ok false // if none of the options match the group. func (gvs GroupVersions) KindForGroupVersionKinds(kinds []GroupVersionKind) (GroupVersionKind, bool) { @@ -295,8 +281,8 @@ func bestMatch(kinds []GroupVersionKind, targets []GroupVersionKind) GroupVersio // ToAPIVersionAndKind is a convenience method for satisfying runtime.Object on types that // do not use TypeMeta. -func (gvk GroupVersionKind) ToAPIVersionAndKind() (string, string) { - if gvk.Empty() { +func (gvk *GroupVersionKind) ToAPIVersionAndKind() (string, string) { + if gvk == nil { return "", "" } return gvk.GroupVersion().String(), gvk.Kind diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go index 4b739ec..fd37e29 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/scheme.go @@ -102,10 +102,10 @@ func NewScheme() *Scheme { } s.converter = conversion.NewConverter(s.nameFunc) - // Enable couple default conversions by default. - utilruntime.Must(RegisterEmbeddedConversions(s)) - utilruntime.Must(RegisterStringConversions(s)) + utilruntime.Must(s.AddConversionFuncs(DefaultEmbeddedConversions()...)) + // Enable map[string][]string conversions by default + utilruntime.Must(s.AddConversionFuncs(DefaultStringConversions...)) utilruntime.Must(s.RegisterInputDefaults(&map[string][]string{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) utilruntime.Must(s.RegisterInputDefaults(&url.Values{}, JSONKeyMapper, conversion.AllowDifferentFieldTypeNames|conversion.IgnoreMissingFields)) return s @@ -308,6 +308,45 @@ func (s *Scheme) AddIgnoredConversionType(from, to interface{}) error { return s.converter.RegisterIgnoredConversion(from, to) } +// AddConversionFuncs adds functions to the list of conversion functions. The given +// functions should know how to convert between two of your API objects, or their +// sub-objects. We deduce how to call these functions from the types of their two +// parameters; see the comment for Converter.Register. +// +// Note that, if you need to copy sub-objects that didn't change, you can use the +// conversion.Scope object that will be passed to your conversion function. +// Additionally, all conversions started by Scheme will set the SrcVersion and +// DestVersion fields on the Meta object. Example: +// +// s.AddConversionFuncs( +// func(in *InternalObject, out *ExternalObject, scope conversion.Scope) error { +// // You can depend on Meta() being non-nil, and this being set to +// // the source version, e.g., "" +// s.Meta().SrcVersion +// // You can depend on this being set to the destination version, +// // e.g., "v1". +// s.Meta().DestVersion +// // Call scope.Convert to copy sub-fields. +// s.Convert(&in.SubFieldThatMoved, &out.NewLocation.NewName, 0) +// return nil +// }, +// ) +// +// (For more detail about conversion functions, see Converter.Register's comment.) +// +// Also note that the default behavior, if you don't add a conversion function, is to +// sanely copy fields that have the same names and same type names. It's OK if the +// destination type has extra fields, but it must not remove any. So you only need to +// add conversion functions for things with changed/removed fields. +func (s *Scheme) AddConversionFuncs(conversionFuncs ...interface{}) error { + for _, f := range conversionFuncs { + if err := s.converter.RegisterConversionFunc(f); err != nil { + return err + } + } + return nil +} + // AddConversionFunc registers a function that converts between a and b by passing objects of those // types to the provided function. The function *must* accept objects of a and b - this machinery will not enforce // any other guarantee. diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go index f21b0ef..65f4511 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/codec_factory.go @@ -17,13 +17,9 @@ limitations under the License. package serializer import ( - "mime" - "strings" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/json" - "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/runtime/serializer/versioning" ) @@ -48,52 +44,29 @@ type serializerType struct { StreamSerializer runtime.Serializer } -func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory, options CodecFactoryOptions) []serializerType { - jsonSerializer := json.NewSerializerWithOptions( - mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: false, Strict: options.Strict}, - ) - jsonSerializerType := serializerType{ - AcceptContentTypes: []string{runtime.ContentTypeJSON}, - ContentType: runtime.ContentTypeJSON, - FileExtensions: []string{"json"}, - EncodesAsText: true, - Serializer: jsonSerializer, - - Framer: json.Framer, - StreamSerializer: jsonSerializer, - } - if options.Pretty { - jsonSerializerType.PrettySerializer = json.NewSerializerWithOptions( - mf, scheme, scheme, - json.SerializerOptions{Yaml: false, Pretty: true, Strict: options.Strict}, - ) - } - - yamlSerializer := json.NewSerializerWithOptions( - mf, scheme, scheme, - json.SerializerOptions{Yaml: true, Pretty: false, Strict: options.Strict}, - ) - protoSerializer := protobuf.NewSerializer(scheme, scheme) - protoRawSerializer := protobuf.NewRawSerializer(scheme, scheme) +func newSerializersForScheme(scheme *runtime.Scheme, mf json.MetaFactory) []serializerType { + jsonSerializer := json.NewSerializer(mf, scheme, scheme, false) + jsonPrettySerializer := json.NewSerializer(mf, scheme, scheme, true) + yamlSerializer := json.NewYAMLSerializer(mf, scheme, scheme) serializers := []serializerType{ - jsonSerializerType, { - AcceptContentTypes: []string{runtime.ContentTypeYAML}, - ContentType: runtime.ContentTypeYAML, - FileExtensions: []string{"yaml"}, + AcceptContentTypes: []string{"application/json"}, + ContentType: "application/json", + FileExtensions: []string{"json"}, EncodesAsText: true, - Serializer: yamlSerializer, + Serializer: jsonSerializer, + PrettySerializer: jsonPrettySerializer, + + Framer: json.Framer, + StreamSerializer: jsonSerializer, }, { - AcceptContentTypes: []string{runtime.ContentTypeProtobuf}, - ContentType: runtime.ContentTypeProtobuf, - FileExtensions: []string{"pb"}, - Serializer: protoSerializer, - - Framer: protobuf.LengthDelimitedFramer, - StreamSerializer: protoRawSerializer, + AcceptContentTypes: []string{"application/yaml"}, + ContentType: "application/yaml", + FileExtensions: []string{"yaml"}, + EncodesAsText: true, + Serializer: yamlSerializer, }, } @@ -116,56 +89,14 @@ type CodecFactory struct { legacySerializer runtime.Serializer } -// CodecFactoryOptions holds the options for configuring CodecFactory behavior -type CodecFactoryOptions struct { - // Strict configures all serializers in strict mode - Strict bool - // Pretty includes a pretty serializer along with the non-pretty one - Pretty bool -} - -// CodecFactoryOptionsMutator takes a pointer to an options struct and then modifies it. -// Functions implementing this type can be passed to the NewCodecFactory() constructor. -type CodecFactoryOptionsMutator func(*CodecFactoryOptions) - -// EnablePretty enables including a pretty serializer along with the non-pretty one -func EnablePretty(options *CodecFactoryOptions) { - options.Pretty = true -} - -// DisablePretty disables including a pretty serializer along with the non-pretty one -func DisablePretty(options *CodecFactoryOptions) { - options.Pretty = false -} - -// EnableStrict enables configuring all serializers in strict mode -func EnableStrict(options *CodecFactoryOptions) { - options.Strict = true -} - -// DisableStrict disables configuring all serializers in strict mode -func DisableStrict(options *CodecFactoryOptions) { - options.Strict = false -} - // NewCodecFactory provides methods for retrieving serializers for the supported wire formats // and conversion wrappers to define preferred internal and external versions. In the future, // as the internal version is used less, callers may instead use a defaulting serializer and // only convert objects which are shared internally (Status, common API machinery). -// -// Mutators can be passed to change the CodecFactoryOptions before construction of the factory. -// It is recommended to explicitly pass mutators instead of relying on defaults. -// By default, Pretty is enabled -- this is conformant with previously supported behavior. -// // TODO: allow other codecs to be compiled in? // TODO: accept a scheme interface -func NewCodecFactory(scheme *runtime.Scheme, mutators ...CodecFactoryOptionsMutator) CodecFactory { - options := CodecFactoryOptions{Pretty: true} - for _, fn := range mutators { - fn(&options) - } - - serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory, options) +func NewCodecFactory(scheme *runtime.Scheme) CodecFactory { + serializers := newSerializersForScheme(scheme, json.DefaultMetaFactory) return newCodecFactory(scheme, serializers) } @@ -189,15 +120,6 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec Serializer: d.Serializer, PrettySerializer: d.PrettySerializer, } - - mediaType, _, err := mime.ParseMediaType(info.MediaType) - if err != nil { - panic(err) - } - parts := strings.SplitN(mediaType, "/", 2) - info.MediaTypeType = parts[0] - info.MediaTypeSubType = parts[1] - if d.StreamSerializer != nil { info.StreamSerializer = &runtime.StreamSerializerInfo{ Serializer: d.StreamSerializer, @@ -226,12 +148,6 @@ func newCodecFactory(scheme *runtime.Scheme, serializers []serializerType) Codec } } -// WithoutConversion returns a NegotiatedSerializer that performs no conversion, even if the -// caller requests it. -func (f CodecFactory) WithoutConversion() runtime.NegotiatedSerializer { - return WithoutConversionCodecFactory{f} -} - // SupportedMediaTypes returns the RFC2046 media types that this factory has serializers for. func (f CodecFactory) SupportedMediaTypes() []runtime.SerializerInfo { return f.accepts @@ -299,26 +215,23 @@ func (f CodecFactory) EncoderForVersion(encoder runtime.Encoder, gv runtime.Grou return f.CodecForVersions(encoder, nil, gv, nil) } -// WithoutConversionCodecFactory is a CodecFactory that will explicitly ignore requests to perform conversion. -// This wrapper is used while code migrates away from using conversion (such as external clients) and in the future -// will be unnecessary when we change the signature of NegotiatedSerializer. -type WithoutConversionCodecFactory struct { +// DirectCodecFactory provides methods for retrieving "DirectCodec"s, which do not do conversion. +type DirectCodecFactory struct { CodecFactory } -// EncoderForVersion returns an encoder that does not do conversion, but does set the group version kind of the object -// when serialized. -func (f WithoutConversionCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { - return runtime.WithVersionEncoder{ +// EncoderForVersion returns an encoder that does not do conversion. +func (f DirectCodecFactory) EncoderForVersion(serializer runtime.Encoder, version runtime.GroupVersioner) runtime.Encoder { + return versioning.DirectEncoder{ Version: version, Encoder: serializer, ObjectTyper: f.CodecFactory.scheme, } } -// DecoderToVersion returns an decoder that does not do conversion. -func (f WithoutConversionCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { - return runtime.WithoutVersionDecoder{ +// DecoderToVersion returns an decoder that does not do conversion. gv is ignored. +func (f DirectCodecFactory) DecoderToVersion(serializer runtime.Decoder, _ runtime.GroupVersioner) runtime.Decoder { + return versioning.DirectDecoder{ Decoder: serializer, } } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go index 9d17f09..382c485 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/json/json.go @@ -22,87 +22,47 @@ import ( "strconv" "unsafe" + "github.com/ghodss/yaml" jsoniter "github.com/json-iterator/go" "github.com/modern-go/reflect2" - "sigs.k8s.io/yaml" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/serializer/recognizer" "k8s.io/apimachinery/pkg/util/framer" utilyaml "k8s.io/apimachinery/pkg/util/yaml" - "k8s.io/klog" ) // NewSerializer creates a JSON serializer that handles encoding versioned objects into the proper JSON form. If typer // is not nil, the object has the group, version, and kind fields set. -// Deprecated: use NewSerializerWithOptions instead. func NewSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, pretty bool) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{false, pretty, false}) + return &Serializer{ + meta: meta, + creater: creater, + typer: typer, + yaml: false, + pretty: pretty, + } } // NewYAMLSerializer creates a YAML serializer that handles encoding versioned objects into the proper YAML form. If typer // is not nil, the object has the group, version, and kind fields set. This serializer supports only the subset of YAML that // matches JSON, and will error if constructs are used that do not serialize to JSON. -// Deprecated: use NewSerializerWithOptions instead. func NewYAMLSerializer(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { - return NewSerializerWithOptions(meta, creater, typer, SerializerOptions{true, false, false}) -} - -// NewSerializerWithOptions creates a JSON/YAML serializer that handles encoding versioned objects into the proper JSON/YAML -// form. If typer is not nil, the object has the group, version, and kind fields set. Options are copied into the Serializer -// and are immutable. -func NewSerializerWithOptions(meta MetaFactory, creater runtime.ObjectCreater, typer runtime.ObjectTyper, options SerializerOptions) *Serializer { return &Serializer{ - meta: meta, - creater: creater, - typer: typer, - options: options, - identifier: identifier(options), - } -} - -// identifier computes Identifier of Encoder based on the given options. -func identifier(options SerializerOptions) runtime.Identifier { - result := map[string]string{ - "name": "json", - "yaml": strconv.FormatBool(options.Yaml), - "pretty": strconv.FormatBool(options.Pretty), + meta: meta, + creater: creater, + typer: typer, + yaml: true, } - identifier, err := json.Marshal(result) - if err != nil { - klog.Fatalf("Failed marshaling identifier for json Serializer: %v", err) - } - return runtime.Identifier(identifier) -} - -// SerializerOptions holds the options which are used to configure a JSON/YAML serializer. -// example: -// (1) To configure a JSON serializer, set `Yaml` to `false`. -// (2) To configure a YAML serializer, set `Yaml` to `true`. -// (3) To configure a strict serializer that can return strictDecodingError, set `Strict` to `true`. -type SerializerOptions struct { - // Yaml: configures the Serializer to work with JSON(false) or YAML(true). - // When `Yaml` is enabled, this serializer only supports the subset of YAML that - // matches JSON, and will error if constructs are used that do not serialize to JSON. - Yaml bool - - // Pretty: configures a JSON enabled Serializer(`Yaml: false`) to produce human-readable output. - // This option is silently ignored when `Yaml` is `true`. - Pretty bool - - // Strict: configures the Serializer to return strictDecodingError's when duplicate fields are present decoding JSON or YAML. - // Note that enabling this option is not as performant as the non-strict variant, and should not be used in fast paths. - Strict bool } type Serializer struct { meta MetaFactory - options SerializerOptions creater runtime.ObjectCreater typer runtime.ObjectTyper - - identifier runtime.Identifier + yaml bool + pretty bool } // Serializer implements Serializer @@ -159,28 +119,11 @@ func CaseSensitiveJsonIterator() jsoniter.API { return config } -// StrictCaseSensitiveJsonIterator returns a jsoniterator API that's configured to be -// case-sensitive, but also disallows unknown fields when unmarshalling. It is compatible with -// the encoding/json standard library. -func StrictCaseSensitiveJsonIterator() jsoniter.API { - config := jsoniter.Config{ - EscapeHTML: true, - SortMapKeys: true, - ValidateJsonRawMessage: true, - CaseSensitive: true, - DisallowUnknownFields: true, - }.Froze() - // Force jsoniter to decode number to interface{} via int64/float64, if possible. - config.RegisterExtension(&customNumberExtension{}) - return config -} - -// Private copies of jsoniter to try to shield against possible mutations +// Private copy of jsoniter to try to shield against possible mutations // from outside. Still does not protect from package level jsoniter.Register*() functions - someone calling them // in some other library will mess with every usage of the jsoniter library in the whole program. // See https://github.com/json-iterator/go/issues/265 var caseSensitiveJsonIterator = CaseSensitiveJsonIterator() -var strictCaseSensitiveJsonIterator = StrictCaseSensitiveJsonIterator() // gvkWithDefaults returns group kind and version defaulting from provided default func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVersionKind { @@ -206,8 +149,18 @@ func gvkWithDefaults(actual, defaultGVK schema.GroupVersionKind) schema.GroupVer // On success or most errors, the method will return the calculated schema kind. // The gvk calculate priority will be originalData > default gvk > into func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + versioned.Objects = []runtime.Object{obj} + return versioned, actual, nil + } + data := originalData - if s.options.Yaml { + if s.yaml { altered, err := yaml.YAMLToJSON(data) if err != nil { return nil, nil, err @@ -263,45 +216,12 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if err := caseSensitiveJsonIterator.Unmarshal(data, obj); err != nil { return nil, actual, err } - - // If the deserializer is non-strict, return successfully here. - if !s.options.Strict { - return obj, actual, nil - } - - // In strict mode pass the data trough the YAMLToJSONStrict converter. - // This is done to catch duplicate fields regardless of encoding (JSON or YAML). For JSON data, - // the output would equal the input, unless there is a parsing error such as duplicate fields. - // As we know this was successful in the non-strict case, the only error that may be returned here - // is because of the newly-added strictness. hence we know we can return the typed strictDecoderError - // the actual error is that the object contains duplicate fields. - altered, err := yaml.YAMLToJSONStrict(originalData) - if err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) - } - // As performance is not an issue for now for the strict deserializer (one has regardless to do - // the unmarshal twice), we take the sanitized, altered data that is guaranteed to have no duplicated - // fields, and unmarshal this into a copy of the already-populated obj. Any error that occurs here is - // due to that a matching field doesn't exist in the object. hence we can return a typed strictDecoderError, - // the actual error is that the object contains unknown field. - strictObj := obj.DeepCopyObject() - if err := strictCaseSensitiveJsonIterator.Unmarshal(altered, strictObj); err != nil { - return nil, actual, runtime.NewStrictDecodingError(err.Error(), string(originalData)) - } - // Always return the same object as the non-strict serializer to avoid any deviations. return obj, actual, nil } // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, w) - } - return s.doEncode(obj, w) -} - -func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { - if s.options.Yaml { + if s.yaml { json, err := caseSensitiveJsonIterator.Marshal(obj) if err != nil { return err @@ -314,7 +234,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { return err } - if s.options.Pretty { + if s.pretty { data, err := caseSensitiveJsonIterator.MarshalIndent(obj, "", " ") if err != nil { return err @@ -326,14 +246,9 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { return encoder.Encode(obj) } -// Identifier implements runtime.Encoder interface. -func (s *Serializer) Identifier() runtime.Identifier { - return s.identifier -} - // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (ok, unknown bool, err error) { - if s.options.Yaml { + if s.yaml { // we could potentially look for '---' return false, true, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go index f606b7d..b99ba25 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf/protobuf.go @@ -69,25 +69,27 @@ func IsNotMarshalable(err error) bool { // NewSerializer creates a Protobuf serializer that handles encoding versioned objects into the proper wire form. If a typer // is passed, the encoded object will have group, version, and kind fields set. If typer is nil, the objects will be written // as-is (any type info passed with the object will be used). -func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *Serializer { +// +// This encoding scheme is experimental, and is subject to change at any time. +func NewSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *Serializer { return &Serializer{ - prefix: protoEncodingPrefix, - creater: creater, - typer: typer, + prefix: protoEncodingPrefix, + creater: creater, + typer: typer, + contentType: defaultContentType, } } type Serializer struct { - prefix []byte - creater runtime.ObjectCreater - typer runtime.ObjectTyper + prefix []byte + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string } var _ runtime.Serializer = &Serializer{} var _ recognizer.RecognizingDecoder = &Serializer{} -const serializerIdentifier runtime.Identifier = "protobuf" - // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, // the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will @@ -95,6 +97,23 @@ const serializerIdentifier runtime.Identifier = "protobuf" // not fully qualified with kind/version/group, the type of the into will be used to alter the returned gvk. On success or most // errors, the method will return the calculated schema kind. func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + // the last item in versioned becomes into, so if versioned was not originally empty we reset the object + // array so the first position is the decoded object and the second position is the outermost object. + // if there were no objects in the versioned list passed to us, only add ourselves. + if into != nil && into != obj { + versioned.Objects = []runtime.Object{obj, into} + } else { + versioned.Objects = []runtime.Object{obj} + } + return versioned, actual, err + } + prefixLen := len(s.prefix) switch { case len(originalData) == 0: @@ -119,7 +138,7 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { *intoUnknown = unk if ok, _, _ := s.RecognizesData(bytes.NewBuffer(unk.Raw)); ok { - intoUnknown.ContentType = runtime.ContentTypeProtobuf + intoUnknown.ContentType = s.contentType } return intoUnknown, &actual, nil } @@ -161,13 +180,6 @@ func (s *Serializer) Decode(originalData []byte, gvk *schema.GroupVersionKind, i // Encode serializes the provided object to the given writer. func (s *Serializer) Encode(obj runtime.Object, w io.Writer) error { - if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, w) - } - return s.doEncode(obj, w) -} - -func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { prefixSize := uint64(len(s.prefix)) var unk runtime.Unknown @@ -195,7 +207,7 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalToSizedBuffer methods + // the more efficient Size and MarshalTo methods encodedSize := uint64(t.Size()) estimatedSize := prefixSize + estimateUnknownSize(&unk, encodedSize) data := make([]byte, estimatedSize) @@ -237,11 +249,6 @@ func (s *Serializer) doEncode(obj runtime.Object, w io.Writer) error { } } -// Identifier implements runtime.Encoder interface. -func (s *Serializer) Identifier() runtime.Identifier { - return serializerIdentifier -} - // RecognizesData implements the RecognizingDecoder interface. func (s *Serializer) RecognizesData(peek io.Reader) (bool, bool, error) { prefix := make([]byte, 4) @@ -280,12 +287,6 @@ type bufferedMarshaller interface { runtime.ProtobufMarshaller } -// Like bufferedMarshaller, but is able to marshal backwards, which is more efficient since it doesn't call Size() as frequently. -type bufferedReverseMarshaller interface { - proto.Sizer - runtime.ProtobufReverseMarshaller -} - // estimateUnknownSize returns the expected bytes consumed by a given runtime.Unknown // object with a nil RawJSON struct and the expected size of the provided buffer. The // returned size will not be correct if RawJSOn is set on unk. @@ -302,24 +303,24 @@ func estimateUnknownSize(unk *runtime.Unknown, byteSize uint64) uint64 { // encoded object, and thus is not self describing (callers must know what type is being described in order to decode). // // This encoding scheme is experimental, and is subject to change at any time. -func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper) *RawSerializer { +func NewRawSerializer(creater runtime.ObjectCreater, typer runtime.ObjectTyper, defaultContentType string) *RawSerializer { return &RawSerializer{ - creater: creater, - typer: typer, + creater: creater, + typer: typer, + contentType: defaultContentType, } } // RawSerializer encodes and decodes objects without adding a runtime.Unknown wrapper (objects are encoded without identifying // type). type RawSerializer struct { - creater runtime.ObjectCreater - typer runtime.ObjectTyper + creater runtime.ObjectCreater + typer runtime.ObjectTyper + contentType string } var _ runtime.Serializer = &RawSerializer{} -const rawSerializerIdentifier runtime.Identifier = "raw-protobuf" - // Decode attempts to convert the provided data into a protobuf message, extract the stored schema kind, apply the provided default // gvk, and then load that data into an object matching the desired schema kind or the provided into. If into is *runtime.Unknown, // the raw data will be extracted and no decoding will be performed. If into is not registered with the typer, then the object will @@ -331,6 +332,20 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind return nil, nil, fmt.Errorf("this serializer requires an object to decode into: %#v", s) } + if versioned, ok := into.(*runtime.VersionedObjects); ok { + into = versioned.Last() + obj, actual, err := s.Decode(originalData, gvk, into) + if err != nil { + return nil, actual, err + } + if into != nil && into != obj { + versioned.Objects = []runtime.Object{obj, into} + } else { + versioned.Objects = []runtime.Object{obj} + } + return versioned, actual, err + } + if len(originalData) == 0 { // TODO: treat like decoding {} from JSON with defaulting return nil, nil, fmt.Errorf("empty data") @@ -343,7 +358,7 @@ func (s *RawSerializer) Decode(originalData []byte, gvk *schema.GroupVersionKind if intoUnknown, ok := into.(*runtime.Unknown); ok && intoUnknown != nil { intoUnknown.Raw = data intoUnknown.ContentEncoding = "" - intoUnknown.ContentType = runtime.ContentTypeProtobuf + intoUnknown.ContentType = s.contentType intoUnknown.SetGroupVersionKind(*actual) return intoUnknown, actual, nil } @@ -396,35 +411,12 @@ func unmarshalToObject(typer runtime.ObjectTyper, creater runtime.ObjectCreater, if err := proto.Unmarshal(data, pb); err != nil { return nil, actual, err } - if actual != nil { - obj.GetObjectKind().SetGroupVersionKind(*actual) - } return obj, actual, nil } // Encode serializes the provided object to the given writer. Overrides is ignored. func (s *RawSerializer) Encode(obj runtime.Object, w io.Writer) error { - if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(s.Identifier(), s.doEncode, w) - } - return s.doEncode(obj, w) -} - -func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { switch t := obj.(type) { - case bufferedReverseMarshaller: - // this path performs a single allocation during write but requires the caller to implement - // the more efficient Size and MarshalToSizedBuffer methods - encodedSize := uint64(t.Size()) - data := make([]byte, encodedSize) - - n, err := t.MarshalToSizedBuffer(data) - if err != nil { - return err - } - _, err = w.Write(data[:n]) - return err - case bufferedMarshaller: // this path performs a single allocation during write but requires the caller to implement // the more efficient Size and MarshalTo methods @@ -452,11 +444,6 @@ func (s *RawSerializer) doEncode(obj runtime.Object, w io.Writer) error { } } -// Identifier implements runtime.Encoder interface. -func (s *RawSerializer) Identifier() runtime.Identifier { - return rawSerializerIdentifier -} - var LengthDelimitedFramer = lengthDelimitedFramer{} type lengthDelimitedFramer struct{} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go new file mode 100644 index 0000000..545cf78 --- /dev/null +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/protobuf_extension.go @@ -0,0 +1,48 @@ +/* +Copyright 2014 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package serializer + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/serializer/protobuf" +) + +const ( + // contentTypeProtobuf is the protobuf type exposed for Kubernetes. It is private to prevent others from + // depending on it unintentionally. + // TODO: potentially move to pkg/api (since it's part of the Kube public API) and pass it in to the + // CodecFactory on initialization. + contentTypeProtobuf = "application/vnd.kubernetes.protobuf" +) + +func protobufSerializer(scheme *runtime.Scheme) (serializerType, bool) { + serializer := protobuf.NewSerializer(scheme, scheme, contentTypeProtobuf) + raw := protobuf.NewRawSerializer(scheme, scheme, contentTypeProtobuf) + return serializerType{ + AcceptContentTypes: []string{contentTypeProtobuf}, + ContentType: contentTypeProtobuf, + FileExtensions: []string{"pb"}, + Serializer: serializer, + + Framer: protobuf.LengthDelimitedFramer, + StreamSerializer: raw, + }, true +} + +func init() { + serializerExtensions = append(serializerExtensions, protobufSerializer) +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go index ced184c..a5ae3ac 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/serializer/versioning/versioning.go @@ -17,15 +17,11 @@ limitations under the License. package versioning import ( - "encoding/json" "io" - "reflect" - "sync" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/klog" ) // NewDefaultingCodecForScheme is a convenience method for callers that are using a scheme. @@ -65,8 +61,6 @@ func NewCodec( encodeVersion: encodeVersion, decodeVersion: decodeVersion, - identifier: identifier(encodeVersion, encoder), - originalSchemeName: originalSchemeName, } return internal @@ -83,86 +77,69 @@ type codec struct { encodeVersion runtime.GroupVersioner decodeVersion runtime.GroupVersioner - identifier runtime.Identifier - // originalSchemeName is optional, but when filled in it holds the name of the scheme from which this codec originates originalSchemeName string } -var identifiersMap sync.Map - -type codecIdentifier struct { - EncodeGV string `json:"encodeGV,omitempty"` - Encoder string `json:"encoder,omitempty"` - Name string `json:"name,omitempty"` -} - -// identifier computes Identifier of Encoder based on codec parameters. -func identifier(encodeGV runtime.GroupVersioner, encoder runtime.Encoder) runtime.Identifier { - result := codecIdentifier{ - Name: "versioning", - } - - if encodeGV != nil { - result.EncodeGV = encodeGV.Identifier() - } - if encoder != nil { - result.Encoder = string(encoder.Identifier()) - } - if id, ok := identifiersMap.Load(result); ok { - return id.(runtime.Identifier) - } - identifier, err := json.Marshal(result) - if err != nil { - klog.Fatalf("Failed marshaling identifier for codec: %v", err) - } - identifiersMap.Store(result, runtime.Identifier(identifier)) - return runtime.Identifier(identifier) -} - // Decode attempts a decode of the object, then tries to convert it to the internal version. If into is provided and the decoding is // successful, the returned runtime.Object will be the value passed as into. Note that this may bypass conversion if you pass an // into that matches the serialized version. func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { - // If the into object is unstructured and expresses an opinion about its group/version, - // create a new instance of the type so we always exercise the conversion path (skips short-circuiting on `into == obj`) - decodeInto := into - if into != nil { - if _, ok := into.(runtime.Unstructured); ok && !into.GetObjectKind().GroupVersionKind().GroupVersion().Empty() { - decodeInto = reflect.New(reflect.TypeOf(into).Elem()).Interface().(runtime.Object) - } + versioned, isVersioned := into.(*runtime.VersionedObjects) + if isVersioned { + into = versioned.Last() } - obj, gvk, err := c.decoder.Decode(data, defaultGVK, decodeInto) + obj, gvk, err := c.decoder.Decode(data, defaultGVK, into) if err != nil { return nil, gvk, err } if d, ok := obj.(runtime.NestedObjectDecoder); ok { - if err := d.DecodeNestedObjects(runtime.WithoutVersionDecoder{c.decoder}); err != nil { + if err := d.DecodeNestedObjects(DirectDecoder{c.decoder}); err != nil { return nil, gvk, err } } // if we specify a target, use generic conversion. if into != nil { + if into == obj { + if isVersioned { + return versioned, gvk, nil + } + return into, gvk, nil + } + // perform defaulting if requested if c.defaulter != nil { + // create a copy to ensure defaulting is not applied to the original versioned objects + if isVersioned { + versioned.Objects = []runtime.Object{obj.DeepCopyObject()} + } c.defaulter.Default(obj) - } - - // Short-circuit conversion if the into object is same object - if into == obj { - return into, gvk, nil + } else { + if isVersioned { + versioned.Objects = []runtime.Object{obj} + } } if err := c.convertor.Convert(obj, into, c.decodeVersion); err != nil { return nil, gvk, err } + if isVersioned { + versioned.Objects = append(versioned.Objects, into) + return versioned, gvk, nil + } return into, gvk, nil } + // Convert if needed. + if isVersioned { + // create a copy, because ConvertToVersion does not guarantee non-mutation of objects + versioned.Objects = []runtime.Object{obj.DeepCopyObject()} + } + // perform defaulting if requested if c.defaulter != nil { c.defaulter.Default(obj) @@ -172,19 +149,18 @@ func (c *codec) Decode(data []byte, defaultGVK *schema.GroupVersionKind, into ru if err != nil { return nil, gvk, err } + if isVersioned { + if versioned.Last() != out { + versioned.Objects = append(versioned.Objects, out) + } + return versioned, gvk, nil + } return out, gvk, nil } // Encode ensures the provided object is output in the appropriate group and version, invoking // conversion if necessary. Unversioned objects (according to the ObjectTyper) are output as is. func (c *codec) Encode(obj runtime.Object, w io.Writer) error { - if co, ok := obj.(runtime.CacheableObject); ok { - return co.CacheEncode(c.Identifier(), c.doEncode, w) - } - return c.doEncode(obj, w) -} - -func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { switch obj := obj.(type) { case *runtime.Unknown: return c.encoder.Encode(obj, w) @@ -213,38 +189,84 @@ func (c *codec) doEncode(obj runtime.Object, w io.Writer) error { return err } - objectKind := obj.GetObjectKind() - old := objectKind.GroupVersionKind() - // restore the old GVK after encoding - defer objectKind.SetGroupVersionKind(old) - if c.encodeVersion == nil || isUnversioned { if e, ok := obj.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(DirectEncoder{Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() objectKind.SetGroupVersionKind(gvks[0]) - return c.encoder.Encode(obj, w) + err = c.encoder.Encode(obj, w) + objectKind.SetGroupVersionKind(old) + return err } // Perform a conversion if necessary + objectKind := obj.GetObjectKind() + old := objectKind.GroupVersionKind() out, err := c.convertor.ConvertToVersion(obj, c.encodeVersion) if err != nil { return err } if e, ok := out.(runtime.NestedObjectEncoder); ok { - if err := e.EncodeNestedObjects(runtime.WithVersionEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { + if err := e.EncodeNestedObjects(DirectEncoder{Version: c.encodeVersion, Encoder: c.encoder, ObjectTyper: c.typer}); err != nil { return err } } // Conversion is responsible for setting the proper group, version, and kind onto the outgoing object - return c.encoder.Encode(out, w) + err = c.encoder.Encode(out, w) + // restore the old GVK, in case conversion returned the same object + objectKind.SetGroupVersionKind(old) + return err } -// Identifier implements runtime.Encoder interface. -func (c *codec) Identifier() runtime.Identifier { - return c.identifier +// DirectEncoder serializes an object and ensures the GVK is set. +type DirectEncoder struct { + Version runtime.GroupVersioner + runtime.Encoder + runtime.ObjectTyper +} + +// Encode does not do conversion. It sets the gvk during serialization. +func (e DirectEncoder) Encode(obj runtime.Object, stream io.Writer) error { + gvks, _, err := e.ObjectTyper.ObjectKinds(obj) + if err != nil { + if runtime.IsNotRegisteredError(err) { + return e.Encoder.Encode(obj, stream) + } + return err + } + kind := obj.GetObjectKind() + oldGVK := kind.GroupVersionKind() + gvk := gvks[0] + if e.Version != nil { + preferredGVK, ok := e.Version.KindForGroupVersionKinds(gvks) + if ok { + gvk = preferredGVK + } + } + kind.SetGroupVersionKind(gvk) + err = e.Encoder.Encode(obj, stream) + kind.SetGroupVersionKind(oldGVK) + return err +} + +// DirectDecoder clears the group version kind of a deserialized object. +type DirectDecoder struct { + runtime.Decoder +} + +// Decode does not do conversion. It removes the gvk during deserialization. +func (d DirectDecoder) Decode(data []byte, defaults *schema.GroupVersionKind, into runtime.Object) (runtime.Object, *schema.GroupVersionKind, error) { + obj, gvk, err := d.Decoder.Decode(data, defaults, into) + if obj != nil { + kind := obj.GetObjectKind() + // clearing the gvk is just a convention of a codec + kind.SetGroupVersionKind(schema.GroupVersionKind{}) + } + return obj, gvk, err } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types.go b/vendor/k8s.io/apimachinery/pkg/runtime/types.go index 31359f3..e4515d8 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types.go @@ -41,9 +41,7 @@ type TypeMeta struct { } const ( - ContentTypeJSON string = "application/json" - ContentTypeYAML string = "application/yaml" - ContentTypeProtobuf string = "application/vnd.kubernetes.protobuf" + ContentTypeJSON string = "application/json" ) // RawExtension is used to hold extensions in external versions. @@ -95,7 +93,7 @@ type RawExtension struct { // Raw is the underlying serialization of this object. // // TODO: Determine how to detect ContentType and ContentEncoding of 'Raw' data. - Raw []byte `json:"-" protobuf:"bytes,1,opt,name=raw"` + Raw []byte `protobuf:"bytes,1,opt,name=raw"` // Object can hold a representation of this extension - useful for working with versioned // structs. Object Object `json:"-"` @@ -124,3 +122,16 @@ type Unknown struct { // Unspecified means ContentTypeJSON. ContentType string `protobuf:"bytes,4,opt,name=contentType"` } + +// VersionedObjects is used by Decoders to give callers a way to access all versions +// of an object during the decoding process. +// +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object +// +k8s:deepcopy-gen=true +type VersionedObjects struct { + // Objects is the set of objects retrieved during decoding, in order of conversion. + // The 0 index is the object as serialized on the wire. If conversion has occurred, + // other objects may be present. The right most object is the same as would be returned + // by a normal Decode call. + Objects []Object +} diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go index a82227b..ead96ee 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/types_proto.go @@ -24,66 +24,46 @@ type ProtobufMarshaller interface { MarshalTo(data []byte) (int, error) } -type ProtobufReverseMarshaller interface { - MarshalToSizedBuffer(data []byte) (int, error) -} - // NestedMarshalTo allows a caller to avoid extra allocations during serialization of an Unknown -// that will contain an object that implements ProtobufMarshaller or ProtobufReverseMarshaller. +// that will contain an object that implements ProtobufMarshaller. func (m *Unknown) NestedMarshalTo(data []byte, b ProtobufMarshaller, size uint64) (int, error) { - // Calculate the full size of the message. - msgSize := m.Size() - if b != nil { - msgSize += int(size) + sovGenerated(size) + 1 + var i int + _ = i + var l int + _ = l + data[i] = 0xa + i++ + i = encodeVarintGenerated(data, i, uint64(m.TypeMeta.Size())) + n1, err := m.TypeMeta.MarshalTo(data[i:]) + if err != nil { + return 0, err } + i += n1 - // Reverse marshal the fields of m. - i := msgSize - i -= len(m.ContentType) - copy(data[i:], m.ContentType) - i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) - i-- - data[i] = 0x22 - i -= len(m.ContentEncoding) - copy(data[i:], m.ContentEncoding) - i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) - i-- - data[i] = 0x1a if b != nil { - if r, ok := b.(ProtobufReverseMarshaller); ok { - n1, err := r.MarshalToSizedBuffer(data[:i]) - if err != nil { - return 0, err - } - i -= int(size) - if uint64(n1) != size { - // programmer error: the Size() method for protobuf does not match the results of LashramOt, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) - } - } else { - i -= int(size) - n1, err := b.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - if uint64(n1) != size { - // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto - // struct returned would be wrong. - return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n1) - } - } - i = encodeVarintGenerated(data, i, size) - i-- data[i] = 0x12 + i++ + i = encodeVarintGenerated(data, i, size) + n2, err := b.MarshalTo(data[i:]) + if err != nil { + return 0, err + } + if uint64(n2) != size { + // programmer error: the Size() method for protobuf does not match the results of MarshalTo, which means the proto + // struct returned would be wrong. + return 0, fmt.Errorf("the Size() value of %T was %d, but NestedMarshalTo wrote %d bytes to data", b, size, n2) + } + i += n2 } - n2, err := m.TypeMeta.MarshalToSizedBuffer(data[:i]) - if err != nil { - return 0, err - } - i -= n2 - i = encodeVarintGenerated(data, i, uint64(n2)) - i-- - data[i] = 0xa - return msgSize - i, nil + + data[i] = 0x1a + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentEncoding))) + i += copy(data[i:], m.ContentEncoding) + + data[i] = 0x22 + i++ + i = encodeVarintGenerated(data, i, uint64(len(m.ContentType))) + i += copy(data[i:], m.ContentType) + return i, nil } diff --git a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go index b039383..8b9182f 100644 --- a/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go +++ b/vendor/k8s.io/apimachinery/pkg/runtime/zz_generated.deepcopy.go @@ -73,3 +73,36 @@ func (in *Unknown) DeepCopyObject() Object { } return nil } + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *VersionedObjects) DeepCopyInto(out *VersionedObjects) { + *out = *in + if in.Objects != nil { + in, out := &in.Objects, &out.Objects + *out = make([]Object, len(*in)) + for i := range *in { + if (*in)[i] != nil { + (*out)[i] = (*in)[i].DeepCopyObject() + } + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VersionedObjects. +func (in *VersionedObjects) DeepCopy() *VersionedObjects { + if in == nil { + return nil + } + out := new(VersionedObjects) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new Object. +func (in *VersionedObjects) DeepCopyObject() Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/apimachinery/pkg/types/patch.go b/vendor/k8s.io/apimachinery/pkg/types/patch.go index fe8ecaa..d522d1d 100644 --- a/vendor/k8s.io/apimachinery/pkg/types/patch.go +++ b/vendor/k8s.io/apimachinery/pkg/types/patch.go @@ -25,5 +25,4 @@ const ( JSONPatchType PatchType = "application/json-patch+json" MergePatchType PatchType = "application/merge-patch+json" StrategicMergePatchType PatchType = "application/strategic-merge-patch+json" - ApplyPatchType PatchType = "application/apply-patch+yaml" ) diff --git a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go index 6cf13d8..9567f90 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go +++ b/vendor/k8s.io/apimachinery/pkg/util/clock/clock.go @@ -21,18 +21,11 @@ import ( "time" ) -// PassiveClock allows for injecting fake or real clocks into code -// that needs to read the current time but does not support scheduling -// activity in the future. -type PassiveClock interface { - Now() time.Time - Since(time.Time) time.Duration -} - // Clock allows for injecting fake or real clocks into code that // needs to do arbitrary things based on time. type Clock interface { - PassiveClock + Now() time.Time + Since(time.Time) time.Duration After(time.Duration) <-chan time.Time NewTimer(time.Duration) Timer Sleep(time.Duration) @@ -52,39 +45,31 @@ func (RealClock) Since(ts time.Time) time.Duration { return time.Since(ts) } -// After is the same as time.After(d). +// Same as time.After(d). func (RealClock) After(d time.Duration) <-chan time.Time { return time.After(d) } -// NewTimer returns a new Timer. func (RealClock) NewTimer(d time.Duration) Timer { return &realTimer{ timer: time.NewTimer(d), } } -// NewTicker returns a new Ticker. func (RealClock) NewTicker(d time.Duration) Ticker { return &realTicker{ ticker: time.NewTicker(d), } } -// Sleep pauses the RealClock for duration d. func (RealClock) Sleep(d time.Duration) { time.Sleep(d) } -// FakePassiveClock implements PassiveClock, but returns an arbitrary time. -type FakePassiveClock struct { - lock sync.RWMutex - time time.Time -} - // FakeClock implements Clock, but returns an arbitrary time. type FakeClock struct { - FakePassiveClock + lock sync.RWMutex + time time.Time // waiters are waiting for the fake time to pass their specified time waiters []fakeClockWaiter @@ -95,44 +80,30 @@ type fakeClockWaiter struct { stepInterval time.Duration skipIfBlocked bool destChan chan time.Time + fired bool } -// NewFakePassiveClock returns a new FakePassiveClock. -func NewFakePassiveClock(t time.Time) *FakePassiveClock { - return &FakePassiveClock{ - time: t, - } -} - -// NewFakeClock returns a new FakeClock func NewFakeClock(t time.Time) *FakeClock { return &FakeClock{ - FakePassiveClock: *NewFakePassiveClock(t), + time: t, } } // Now returns f's time. -func (f *FakePassiveClock) Now() time.Time { +func (f *FakeClock) Now() time.Time { f.lock.RLock() defer f.lock.RUnlock() return f.time } // Since returns time since the time in f. -func (f *FakePassiveClock) Since(ts time.Time) time.Duration { +func (f *FakeClock) Since(ts time.Time) time.Duration { f.lock.RLock() defer f.lock.RUnlock() return f.time.Sub(ts) } -// SetTime sets the time on the FakePassiveClock. -func (f *FakePassiveClock) SetTime(t time.Time) { - f.lock.Lock() - defer f.lock.Unlock() - f.time = t -} - -// After is the Fake version of time.After(d). +// Fake version of time.After(d). func (f *FakeClock) After(d time.Duration) <-chan time.Time { f.lock.Lock() defer f.lock.Unlock() @@ -145,7 +116,7 @@ func (f *FakeClock) After(d time.Duration) <-chan time.Time { return ch } -// NewTimer is the Fake version of time.NewTimer(d). +// Fake version of time.NewTimer(d). func (f *FakeClock) NewTimer(d time.Duration) Timer { f.lock.Lock() defer f.lock.Unlock() @@ -162,7 +133,6 @@ func (f *FakeClock) NewTimer(d time.Duration) Timer { return timer } -// NewTicker returns a new Ticker. func (f *FakeClock) NewTicker(d time.Duration) Ticker { f.lock.Lock() defer f.lock.Unlock() @@ -180,14 +150,14 @@ func (f *FakeClock) NewTicker(d time.Duration) Ticker { } } -// Step moves clock by Duration, notifies anyone that's called After, Tick, or NewTimer +// Move clock by Duration, notify anyone that's called After, Tick, or NewTimer func (f *FakeClock) Step(d time.Duration) { f.lock.Lock() defer f.lock.Unlock() f.setTimeLocked(f.time.Add(d)) } -// SetTime sets the time on a FakeClock. +// Sets the time. func (f *FakeClock) SetTime(t time.Time) { f.lock.Lock() defer f.lock.Unlock() @@ -205,10 +175,12 @@ func (f *FakeClock) setTimeLocked(t time.Time) { if w.skipIfBlocked { select { case w.destChan <- t: + w.fired = true default: } } else { w.destChan <- t + w.fired = true } if w.stepInterval > 0 { @@ -225,7 +197,7 @@ func (f *FakeClock) setTimeLocked(t time.Time) { f.waiters = newWaiters } -// HasWaiters returns true if After has been called on f but not yet satisfied (so you can +// Returns true if After has been called on f but not yet satisfied (so you can // write race-free tests). func (f *FakeClock) HasWaiters() bool { f.lock.RLock() @@ -233,7 +205,6 @@ func (f *FakeClock) HasWaiters() bool { return len(f.waiters) > 0 } -// Sleep pauses the FakeClock for duration d. func (f *FakeClock) Sleep(d time.Duration) { f.Step(d) } @@ -255,25 +226,24 @@ func (i *IntervalClock) Since(ts time.Time) time.Duration { return i.Time.Sub(ts) } -// After is currently unimplemented, will panic. +// Unimplemented, will panic. // TODO: make interval clock use FakeClock so this can be implemented. func (*IntervalClock) After(d time.Duration) <-chan time.Time { panic("IntervalClock doesn't implement After") } -// NewTimer is currently unimplemented, will panic. +// Unimplemented, will panic. // TODO: make interval clock use FakeClock so this can be implemented. func (*IntervalClock) NewTimer(d time.Duration) Timer { panic("IntervalClock doesn't implement NewTimer") } -// NewTicker is currently unimplemented, will panic. +// Unimplemented, will panic. // TODO: make interval clock use FakeClock so this can be implemented. func (*IntervalClock) NewTicker(d time.Duration) Ticker { panic("IntervalClock doesn't implement NewTicker") } -// Sleep is currently unimplemented; will panic. func (*IntervalClock) Sleep(d time.Duration) { panic("IntervalClock doesn't implement Sleep") } @@ -317,53 +287,38 @@ func (f *fakeTimer) C() <-chan time.Time { return f.waiter.destChan } -// Stop conditionally stops the timer. If the timer has neither fired -// nor been stopped then this call stops the timer and returns true, -// otherwise this call returns false. This is like time.Timer::Stop. +// Stop stops the timer and returns true if the timer has not yet fired, or false otherwise. func (f *fakeTimer) Stop() bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - // The timer has already fired or been stopped, unless it is found - // among the clock's waiters. - stopped := false - oldWaiters := f.fakeClock.waiters - newWaiters := make([]fakeClockWaiter, 0, len(oldWaiters)) - seekChan := f.waiter.destChan - for i := range oldWaiters { - // Identify the timer's fakeClockWaiter by the identity of the - // destination channel, nothing else is necessarily unique and - // constant since the timer's creation. - if oldWaiters[i].destChan == seekChan { - stopped = true - } else { - newWaiters = append(newWaiters, oldWaiters[i]) + + newWaiters := make([]fakeClockWaiter, 0, len(f.fakeClock.waiters)) + for i := range f.fakeClock.waiters { + w := &f.fakeClock.waiters[i] + if w != &f.waiter { + newWaiters = append(newWaiters, *w) } } f.fakeClock.waiters = newWaiters - return stopped + return !f.waiter.fired } -// Reset conditionally updates the firing time of the timer. If the -// timer has neither fired nor been stopped then this call resets the -// timer to the fake clock's "now" + d and returns true, otherwise -// this call returns false. This is like time.Timer::Reset. +// Reset resets the timer to the fake clock's "now" + d. It returns true if the timer has not yet +// fired, or false otherwise. func (f *fakeTimer) Reset(d time.Duration) bool { f.fakeClock.lock.Lock() defer f.fakeClock.lock.Unlock() - waiters := f.fakeClock.waiters - seekChan := f.waiter.destChan - for i := range waiters { - if waiters[i].destChan == seekChan { - waiters[i].targetTime = f.fakeClock.time.Add(d) - return true - } - } - return false + + active := !f.waiter.fired + + f.waiter.fired = false + f.waiter.targetTime = f.fakeClock.time.Add(d) + + return active } -// Ticker defines the Ticker interface type Ticker interface { C() <-chan time.Time Stop() diff --git a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go index 5bafc21..88e9376 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/errors/errors.go @@ -19,8 +19,6 @@ package errors import ( "errors" "fmt" - - "k8s.io/apimachinery/pkg/util/sets" ) // MessageCountMap contains occurrence for each error message. @@ -28,14 +26,9 @@ type MessageCountMap map[string]int // Aggregate represents an object that contains multiple errors, but does not // necessarily have singular semantic meaning. -// The aggregate can be used with `errors.Is()` to check for the occurrence of -// a specific error type. -// Errors.As() is not supported, because the caller presumably cares about a -// specific error of potentially multiple that match the given type. type Aggregate interface { error Errors() []error - Is(error) bool } // NewAggregate converts a slice of errors into an Aggregate interface, which @@ -74,53 +67,12 @@ func (agg aggregate) Error() string { if len(agg) == 1 { return agg[0].Error() } - seenerrs := sets.NewString() - result := "" - agg.visit(func(err error) bool { - msg := err.Error() - if seenerrs.Has(msg) { - return false - } - seenerrs.Insert(msg) - if len(seenerrs) > 1 { - result += ", " - } - result += msg - return false - }) - if len(seenerrs) == 1 { - return result + result := fmt.Sprintf("[%s", agg[0].Error()) + for i := 1; i < len(agg); i++ { + result += fmt.Sprintf(", %s", agg[i].Error()) } - return "[" + result + "]" -} - -func (agg aggregate) Is(target error) bool { - return agg.visit(func(err error) bool { - return errors.Is(err, target) - }) -} - -func (agg aggregate) visit(f func(err error) bool) bool { - for _, err := range agg { - switch err := err.(type) { - case aggregate: - if match := err.visit(f); match { - return match - } - case Aggregate: - for _, nestedErr := range err.Errors() { - if match := f(nestedErr); match { - return match - } - } - default: - if match := f(err); match { - return match - } - } - } - - return false + result += "]" + return result } // Errors is part of the Aggregate interface. diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go index ec1cb70..5c2ac4f 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.pb.go @@ -14,20 +14,26 @@ See the License for the specific language governing permissions and limitations under the License. */ -// Code generated by protoc-gen-gogo. DO NOT EDIT. +// Code generated by protoc-gen-gogo. // source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +// DO NOT EDIT! -package intstr +/* + Package intstr is a generated protocol buffer package. -import ( - fmt "fmt" + It is generated from these files: + k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto - io "io" - math "math" - math_bits "math/bits" + It has these top-level messages: + IntOrString +*/ +package intstr - proto "github.com/gogo/protobuf/proto" -) +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -38,71 +44,19 @@ var _ = math.Inf // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package - -func (m *IntOrString) Reset() { *m = IntOrString{} } -func (*IntOrString) ProtoMessage() {} -func (*IntOrString) Descriptor() ([]byte, []int) { - return fileDescriptor_94e046ae3ce6121c, []int{0} -} -func (m *IntOrString) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *IntOrString) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *IntOrString) XXX_Merge(src proto.Message) { - xxx_messageInfo_IntOrString.Merge(m, src) -} -func (m *IntOrString) XXX_Size() int { - return m.Size() -} -func (m *IntOrString) XXX_DiscardUnknown() { - xxx_messageInfo_IntOrString.DiscardUnknown(m) -} +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -var xxx_messageInfo_IntOrString proto.InternalMessageInfo +func (m *IntOrString) Reset() { *m = IntOrString{} } +func (*IntOrString) ProtoMessage() {} +func (*IntOrString) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } func init() { proto.RegisterType((*IntOrString)(nil), "k8s.io.apimachinery.pkg.util.intstr.IntOrString") } - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptor_94e046ae3ce6121c) -} - -var fileDescriptor_94e046ae3ce6121c = []byte{ - // 292 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, - 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, - 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, - 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, - 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, - 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, - 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, - 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, - 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, - 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, - 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, - 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, - 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, - 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, - 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, - 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, - 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, - 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, - 0x64, 0x01, 0x00, 0x00, -} - func (m *IntOrString) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } @@ -110,44 +64,51 @@ func (m *IntOrString) Marshal() (dAtA []byte, err error) { } func (m *IntOrString) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *IntOrString) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) + var i int _ = i var l int _ = l - i -= len(m.StrVal) - copy(dAtA[i:], m.StrVal) - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) - i-- - dAtA[i] = 0x1a - i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) - i-- - dAtA[i] = 0x10 - i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) - i-- dAtA[i] = 0x8 - return len(dAtA) - i, nil + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.Type)) + dAtA[i] = 0x10 + i++ + i = encodeVarintGenerated(dAtA, i, uint64(m.IntVal)) + dAtA[i] = 0x1a + i++ + i = encodeVarintGenerated(dAtA, i, uint64(len(m.StrVal))) + i += copy(dAtA[i:], m.StrVal) + return i, nil } +func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - offset -= sovGenerated(v) - base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) - return base + return offset + 1 } func (m *IntOrString) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l n += 1 + sovGenerated(uint64(m.Type)) @@ -158,7 +119,14 @@ func (m *IntOrString) Size() (n int) { } func sovGenerated(x uint64) (n int) { - return (math_bits.Len64(x|1) + 6) / 7 + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n } func sozGenerated(x uint64) (n int) { return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) @@ -178,7 +146,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - wire |= uint64(b&0x7F) << shift + wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -206,7 +174,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.Type |= Type(b&0x7F) << shift + m.Type |= (Type(b) & 0x7F) << shift if b < 0x80 { break } @@ -225,7 +193,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - m.IntVal |= int32(b&0x7F) << shift + m.IntVal |= (int32(b) & 0x7F) << shift if b < 0x80 { break } @@ -244,7 +212,7 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } @@ -254,9 +222,6 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { return ErrInvalidLengthGenerated } postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthGenerated - } if postIndex > l { return io.ErrUnexpectedEOF } @@ -271,9 +236,6 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { if skippy < 0 { return ErrInvalidLengthGenerated } - if (iNdEx + skippy) < 0 { - return ErrInvalidLengthGenerated - } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } @@ -289,7 +251,6 @@ func (m *IntOrString) Unmarshal(dAtA []byte) error { func skipGenerated(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 - depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { @@ -321,8 +282,10 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + return iNdEx, nil case 1: iNdEx += 8 + return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { @@ -339,34 +302,80 @@ func skipGenerated(dAtA []byte) (n int, err error) { break } } + iNdEx += length if length < 0 { return 0, ErrInvalidLengthGenerated } - iNdEx += length + return iNdEx, nil case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroupGenerated + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipGenerated(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next } - depth-- + return iNdEx, nil + case 4: + return iNdEx, nil case 5: iNdEx += 4 + return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } - if iNdEx < 0 { - return 0, ErrInvalidLengthGenerated - } - if depth == 0 { - return iNdEx, nil - } } - return 0, io.ErrUnexpectedEOF + panic("unreachable") } var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") ) + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto", fileDescriptorGenerated) +} + +var fileDescriptorGenerated = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x8f, 0x31, 0x4b, 0x33, 0x31, + 0x1c, 0xc6, 0x93, 0xb7, 0x7d, 0x8b, 0x9e, 0xe0, 0x50, 0x1c, 0x8a, 0x43, 0x7a, 0x28, 0xc8, 0x0d, + 0x9a, 0xac, 0xe2, 0xd8, 0xad, 0x20, 0x08, 0x57, 0x71, 0x70, 0xbb, 0x6b, 0x63, 0x1a, 0xae, 0x4d, + 0x42, 0xee, 0x7f, 0xc2, 0x6d, 0xfd, 0x08, 0xba, 0x39, 0xfa, 0x71, 0x6e, 0xec, 0xd8, 0x41, 0x8a, + 0x17, 0xbf, 0x85, 0x93, 0x5c, 0xee, 0x40, 0xa7, 0xe4, 0x79, 0x9e, 0xdf, 0x2f, 0x90, 0xe0, 0x36, + 0xbb, 0xce, 0xa9, 0xd4, 0x2c, 0x2b, 0x52, 0x6e, 0x15, 0x07, 0x9e, 0xb3, 0x67, 0xae, 0x16, 0xda, + 0xb2, 0x6e, 0x48, 0x8c, 0x5c, 0x27, 0xf3, 0xa5, 0x54, 0xdc, 0x96, 0xcc, 0x64, 0x82, 0x15, 0x20, + 0x57, 0x4c, 0x2a, 0xc8, 0xc1, 0x32, 0xc1, 0x15, 0xb7, 0x09, 0xf0, 0x05, 0x35, 0x56, 0x83, 0x1e, + 0x9e, 0xb7, 0x12, 0xfd, 0x2b, 0x51, 0x93, 0x09, 0xda, 0x48, 0xb4, 0x95, 0x4e, 0xaf, 0x84, 0x84, + 0x65, 0x91, 0xd2, 0xb9, 0x5e, 0x33, 0xa1, 0x85, 0x66, 0xde, 0x4d, 0x8b, 0x27, 0x9f, 0x7c, 0xf0, + 0xb7, 0xf6, 0xcd, 0xb3, 0x57, 0x1c, 0x1c, 0x4d, 0x15, 0xdc, 0xd9, 0x19, 0x58, 0xa9, 0xc4, 0x30, + 0x0a, 0xfa, 0x50, 0x1a, 0x3e, 0xc2, 0x21, 0x8e, 0x7a, 0x93, 0x93, 0x6a, 0x3f, 0x46, 0x6e, 0x3f, + 0xee, 0xdf, 0x97, 0x86, 0x7f, 0x77, 0x67, 0xec, 0x89, 0xe1, 0x45, 0x30, 0x90, 0x0a, 0x1e, 0x92, + 0xd5, 0xe8, 0x5f, 0x88, 0xa3, 0xff, 0x93, 0xe3, 0x8e, 0x1d, 0x4c, 0x7d, 0x1b, 0x77, 0x6b, 0xc3, + 0xe5, 0x60, 0x1b, 0xae, 0x17, 0xe2, 0xe8, 0xf0, 0x97, 0x9b, 0xf9, 0x36, 0xee, 0xd6, 0x9b, 0x83, + 0xb7, 0xf7, 0x31, 0xda, 0x7c, 0x84, 0x68, 0x72, 0x59, 0xd5, 0x04, 0x6d, 0x6b, 0x82, 0x76, 0x35, + 0x41, 0x1b, 0x47, 0x70, 0xe5, 0x08, 0xde, 0x3a, 0x82, 0x77, 0x8e, 0xe0, 0x4f, 0x47, 0xf0, 0xcb, + 0x17, 0x41, 0x8f, 0x83, 0xf6, 0xc3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x52, 0xa0, 0xb5, 0xc9, + 0x64, 0x01, 0x00, 0x00, +} diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto index e79fb9e..1c3ec73 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/generated.proto @@ -29,7 +29,7 @@ option go_package = "intstr"; // inner type. This allows you to have, for example, a JSON field that can // accept a name or number. // TODO: Rename to Int32OrString -// +// // +protobuf=true // +protobuf.options.(gogoproto.goproto_stringer)=false // +k8s:openapi-gen=true diff --git a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go index cb974dc..642b83c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go +++ b/vendor/k8s.io/apimachinery/pkg/util/intstr/intstr.go @@ -25,8 +25,8 @@ import ( "strconv" "strings" + "github.com/golang/glog" "github.com/google/gofuzz" - "k8s.io/klog" ) // IntOrString is a type that can hold an int32 or a string. When used in @@ -45,7 +45,7 @@ type IntOrString struct { } // Type represents the stored type of IntOrString. -type Type int64 +type Type int const ( Int Type = iota // The IntOrString holds an int. @@ -58,7 +58,7 @@ const ( // TODO: convert to (val int32) func FromInt(val int) IntOrString { if val > math.MaxInt32 || val < math.MinInt32 { - klog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) + glog.Errorf("value: %d overflows int32\n%s\n", val, debug.Stack()) } return IntOrString{Type: Int, IntVal: int32(val)} } @@ -97,8 +97,7 @@ func (intstr *IntOrString) String() string { } // IntValue returns the IntVal if type Int, or if -// it is a String, will attempt a conversion to int, -// returning 0 if a parsing error occurs. +// it is a String, will attempt a conversion to int. func (intstr *IntOrString) IntValue() int { if intstr.Type == String { i, _ := strconv.Atoi(intstr.StrVal) @@ -123,11 +122,11 @@ func (intstr IntOrString) MarshalJSON() ([]byte, error) { // the OpenAPI spec of this type. // // See: https://github.com/kubernetes/kube-openapi/tree/master/pkg/generators -func (IntOrString) OpenAPISchemaType() []string { return []string{"string"} } +func (_ IntOrString) OpenAPISchemaType() []string { return []string{"string"} } // OpenAPISchemaFormat is used by the kube-openapi generator when constructing // the OpenAPI spec of this type. -func (IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } +func (_ IntOrString) OpenAPISchemaFormat() string { return "int-or-string" } func (intstr *IntOrString) Fuzz(c fuzz.Continue) { if intstr == nil { diff --git a/vendor/k8s.io/apimachinery/pkg/util/json/json.go b/vendor/k8s.io/apimachinery/pkg/util/json/json.go index 2048348..10c8cb8 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/json/json.go +++ b/vendor/k8s.io/apimachinery/pkg/util/json/json.go @@ -19,7 +19,6 @@ package json import ( "bytes" "encoding/json" - "fmt" "io" ) @@ -35,9 +34,6 @@ func Marshal(v interface{}) ([]byte, error) { return json.Marshal(v) } -// limit recursive depth to prevent stack overflow errors -const maxDepth = 10000 - // Unmarshal unmarshals the given data // If v is a *map[string]interface{}, numbers are converted to int64 or float64 func Unmarshal(data []byte, v interface{}) error { @@ -52,7 +48,7 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v, 0) + return convertMapNumbers(*v) case *[]interface{}: // Build a decoder from the given data @@ -64,54 +60,25 @@ func Unmarshal(data []byte, v interface{}) error { return err } // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v, 0) - - case *interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertInterfaceNumbers(v, 0) + return convertSliceNumbers(*v) default: return json.Unmarshal(data, v) } } -func convertInterfaceNumbers(v *interface{}, depth int) error { - var err error - switch v2 := (*v).(type) { - case json.Number: - *v, err = convertNumber(v2) - case map[string]interface{}: - err = convertMapNumbers(v2, depth+1) - case []interface{}: - err = convertSliceNumbers(v2, depth+1) - } - return err -} - // convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}, depth int) error { - if depth > maxDepth { - return fmt.Errorf("exceeded max depth of %d", maxDepth) - } - +func convertMapNumbers(m map[string]interface{}) error { var err error for k, v := range m { switch v := v.(type) { case json.Number: m[k], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = convertMapNumbers(v) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = convertSliceNumbers(v) } if err != nil { return err @@ -122,20 +89,16 @@ func convertMapNumbers(m map[string]interface{}, depth int) error { // convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. // values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}, depth int) error { - if depth > maxDepth { - return fmt.Errorf("exceeded max depth of %d", maxDepth) - } - +func convertSliceNumbers(s []interface{}) error { var err error for i, v := range s { switch v := v.(type) { case json.Number: s[i], err = convertNumber(v) case map[string]interface{}: - err = convertMapNumbers(v, depth+1) + err = convertMapNumbers(v) case []interface{}: - err = convertSliceNumbers(v, depth+1) + err = convertSliceNumbers(v) } if err != nil { return err diff --git a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go index d69bf32..2965d5a 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go +++ b/vendor/k8s.io/apimachinery/pkg/util/naming/from_stack.go @@ -82,7 +82,7 @@ var stackCreator = regexp.MustCompile(`(?m)^created by (.*)\n\s+(.*):(\d+) \+0x[ func extractStackCreator() (string, int, bool) { stack := debug.Stack() matches := stackCreator.FindStringSubmatch(string(stack)) - if len(matches) != 4 { + if matches == nil || len(matches) != 4 { return "", 0, false } line, err := strconv.Atoi(matches[3]) diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/http.go b/vendor/k8s.io/apimachinery/pkg/util/net/http.go index 7b64e68..7c2a5e6 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/http.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/http.go @@ -31,8 +31,8 @@ import ( "strconv" "strings" + "github.com/golang/glog" "golang.org/x/net/http2" - "k8s.io/klog" ) // JoinPreservingTrailingSlash does a path.Join of the specified elements, @@ -55,12 +55,6 @@ func JoinPreservingTrailingSlash(elem ...string) string { return result } -// IsTimeout returns true if the given error is a network timeout error -func IsTimeout(err error) bool { - neterr, ok := err.(net.Error) - return ok && neterr != nil && neterr.Timeout() -} - // IsProbableEOF returns true if the given error resembles a connection termination // scenario that would justify assuming that the watch is empty. // These errors are what the Go http stack returns back to us which are general @@ -74,17 +68,14 @@ func IsProbableEOF(err error) bool { if uerr, ok := err.(*url.Error); ok { err = uerr.Err } - msg := err.Error() switch { case err == io.EOF: return true - case msg == "http: can't write HTTP request on broken connection": - return true - case strings.Contains(msg, "http2: server sent GOAWAY and closed the connection"): + case err.Error() == "http: can't write HTTP request on broken connection": return true - case strings.Contains(msg, "connection reset by peer"): + case strings.Contains(err.Error(), "connection reset by peer"): return true - case strings.Contains(strings.ToLower(msg), "use of closed network connection"): + case strings.Contains(strings.ToLower(err.Error()), "use of closed network connection"): return true } return false @@ -107,9 +98,6 @@ func SetOldTransportDefaults(t *http.Transport) *http.Transport { if t.TLSHandshakeTimeout == 0 { t.TLSHandshakeTimeout = defaultTransport.TLSHandshakeTimeout } - if t.IdleConnTimeout == 0 { - t.IdleConnTimeout = defaultTransport.IdleConnTimeout - } return t } @@ -119,30 +107,15 @@ func SetTransportDefaults(t *http.Transport) *http.Transport { t = SetOldTransportDefaults(t) // Allow clients to disable http2 if needed. if s := os.Getenv("DISABLE_HTTP2"); len(s) > 0 { - klog.Infof("HTTP2 has been explicitly disabled") - } else if allowsHTTP2(t) { + glog.Infof("HTTP2 has been explicitly disabled") + } else { if err := http2.ConfigureTransport(t); err != nil { - klog.Warningf("Transport failed http2 configuration: %v", err) + glog.Warningf("Transport failed http2 configuration: %v", err) } } return t } -func allowsHTTP2(t *http.Transport) bool { - if t.TLSClientConfig == nil || len(t.TLSClientConfig.NextProtos) == 0 { - // the transport expressed no NextProto preference, allow - return true - } - for _, p := range t.TLSClientConfig.NextProtos { - if p == http2.NextProtoTLS { - // the transport explicitly allowed http/2 - return true - } - } - // the transport explicitly set NextProtos and excluded http/2 - return false -} - type RoundTripperWrapper interface { http.RoundTripper WrappedRoundTripper() http.RoundTripper @@ -212,17 +185,13 @@ func GetHTTPClient(req *http.Request) string { return "unknown" } -// SourceIPs splits the comma separated X-Forwarded-For header and joins it with -// the X-Real-Ip header and/or req.RemoteAddr, ignoring invalid IPs. -// The X-Real-Ip is omitted if it's already present in the X-Forwarded-For chain. -// The req.RemoteAddr is always the last IP in the returned list. -// It returns nil if all of these are empty or invalid. +// SourceIPs splits the comma separated X-Forwarded-For header or returns the X-Real-Ip header or req.RemoteAddr, +// in that order, ignoring invalid IPs. It returns nil if all of these are empty or invalid. func SourceIPs(req *http.Request) []net.IP { - var srcIPs []net.IP - hdr := req.Header // First check the X-Forwarded-For header for requests via proxy. hdrForwardedFor := hdr.Get("X-Forwarded-For") + forwardedForIPs := []net.IP{} if hdrForwardedFor != "" { // X-Forwarded-For can be a csv of IPs in case of multiple proxies. // Use the first valid one. @@ -230,49 +199,38 @@ func SourceIPs(req *http.Request) []net.IP { for _, part := range parts { ip := net.ParseIP(strings.TrimSpace(part)) if ip != nil { - srcIPs = append(srcIPs, ip) + forwardedForIPs = append(forwardedForIPs, ip) } } } + if len(forwardedForIPs) > 0 { + return forwardedForIPs + } // Try the X-Real-Ip header. hdrRealIp := hdr.Get("X-Real-Ip") if hdrRealIp != "" { ip := net.ParseIP(hdrRealIp) - // Only append the X-Real-Ip if it's not already contained in the X-Forwarded-For chain. - if ip != nil && !containsIP(srcIPs, ip) { - srcIPs = append(srcIPs, ip) + if ip != nil { + return []net.IP{ip} } } - // Always include the request Remote Address as it cannot be easily spoofed. - var remoteIP net.IP + // Fallback to Remote Address in request, which will give the correct client IP when there is no proxy. // Remote Address in Go's HTTP server is in the form host:port so we need to split that first. host, _, err := net.SplitHostPort(req.RemoteAddr) if err == nil { - remoteIP = net.ParseIP(host) - } - // Fallback if Remote Address was just IP. - if remoteIP == nil { - remoteIP = net.ParseIP(req.RemoteAddr) + if remoteIP := net.ParseIP(host); remoteIP != nil { + return []net.IP{remoteIP} + } } - // Don't duplicate remote IP if it's already the last address in the chain. - if remoteIP != nil && (len(srcIPs) == 0 || !remoteIP.Equal(srcIPs[len(srcIPs)-1])) { - srcIPs = append(srcIPs, remoteIP) + // Fallback if Remote Address was just IP. + if remoteIP := net.ParseIP(req.RemoteAddr); remoteIP != nil { + return []net.IP{remoteIP} } - return srcIPs -} - -// Checks whether the given IP address is contained in the list of IPs. -func containsIP(ips []net.IP, ip net.IP) bool { - for _, v := range ips { - if v.Equal(ip) { - return true - } - } - return false + return nil } // Extracts and returns the clients IP from the given request. @@ -410,7 +368,7 @@ redirectLoop: resp, err := http.ReadResponse(respReader, nil) if err != nil { // Unable to read the backend response; let the client handle it. - klog.Warningf("Error reading backend response: %v", err) + glog.Warningf("Error reading backend response: %v", err) break redirectLoop } @@ -446,7 +404,7 @@ redirectLoop: // Only follow redirects to the same host. Otherwise, propagate the redirect response back. if requireSameHostRedirects && location.Hostname() != originalLocation.Hostname() { - return nil, nil, fmt.Errorf("hostname mismatch: expected %s, found %s", originalLocation.Hostname(), location.Hostname()) + break redirectLoop } // Reset the connection. diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go index 836494d..0ab9b36 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/interface.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/interface.go @@ -26,7 +26,7 @@ import ( "strings" - "k8s.io/klog" + "github.com/golang/glog" ) type AddressFamily uint @@ -36,18 +36,6 @@ const ( familyIPv6 AddressFamily = 6 ) -type AddressFamilyPreference []AddressFamily - -var ( - preferIPv4 = AddressFamilyPreference{familyIPv4, familyIPv6} - preferIPv6 = AddressFamilyPreference{familyIPv6, familyIPv4} -) - -const ( - // LoopbackInterfaceName is the default name of the loopback interface - LoopbackInterfaceName = "lo" -) - const ( ipv4RouteFile = "/proc/net/route" ipv6RouteFile = "/proc/net/ipv6_route" @@ -65,7 +53,7 @@ type RouteFile struct { parse func(input io.Reader) ([]Route, error) } -// noRoutesError can be returned in case of no routes +// noRoutesError can be returned by ChooseBindAddress() in case of no routes type noRoutesError struct { message string } @@ -205,7 +193,7 @@ func isInterfaceUp(intf *net.Interface) bool { return false } if intf.Flags&net.FlagUp != 0 { - klog.V(4).Infof("Interface %v is up", intf.Name) + glog.V(4).Infof("Interface %v is up", intf.Name) return true } return false @@ -220,20 +208,20 @@ func isLoopbackOrPointToPoint(intf *net.Interface) bool { func getMatchingGlobalIP(addrs []net.Addr, family AddressFamily) (net.IP, error) { if len(addrs) > 0 { for i := range addrs { - klog.V(4).Infof("Checking addr %s.", addrs[i].String()) + glog.V(4).Infof("Checking addr %s.", addrs[i].String()) ip, _, err := net.ParseCIDR(addrs[i].String()) if err != nil { return nil, err } if memberOf(ip, family) { if ip.IsGlobalUnicast() { - klog.V(4).Infof("IP found %v", ip) + glog.V(4).Infof("IP found %v", ip) return ip, nil } else { - klog.V(4).Infof("Non-global unicast address found %v", ip) + glog.V(4).Infof("Non-global unicast address found %v", ip) } } else { - klog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) + glog.V(4).Infof("%v is not an IPv%d address", ip, int(family)) } } @@ -253,20 +241,20 @@ func getIPFromInterface(intfName string, forFamily AddressFamily, nw networkInte if err != nil { return nil, err } - klog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) + glog.V(4).Infof("Interface %q has %d addresses :%v.", intfName, len(addrs), addrs) matchingIP, err := getMatchingGlobalIP(addrs, forFamily) if err != nil { return nil, err } if matchingIP != nil { - klog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) + glog.V(4).Infof("Found valid IPv%d address %v for interface %q.", int(forFamily), matchingIP, intfName) return matchingIP, nil } } return nil, nil } -// memberOf tells if the IP is of the desired family. Used for checking interface addresses. +// memberOF tells if the IP is of the desired family. Used for checking interface addresses. func memberOf(ip net.IP, family AddressFamily) bool { if ip.To4() != nil { return family == familyIPv4 @@ -277,8 +265,8 @@ func memberOf(ip net.IP, family AddressFamily) bool { // chooseIPFromHostInterfaces looks at all system interfaces, trying to find one that is up that // has a global unicast address (non-loopback, non-link local, non-point2point), and returns the IP. -// addressFamilies determines whether it prefers IPv4 or IPv6 -func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { +// Searches for IPv4 addresses, and then IPv6 addresses. +func chooseIPFromHostInterfaces(nw networkInterfacer) (net.IP, error) { intfs, err := nw.Interfaces() if err != nil { return nil, err @@ -286,15 +274,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam if len(intfs) == 0 { return nil, fmt.Errorf("no interfaces found on host.") } - for _, family := range addressFamilies { - klog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for system interface with a global IPv%d address", uint(family)) for _, intf := range intfs { if !isInterfaceUp(&intf) { - klog.V(4).Infof("Skipping: down interface %q", intf.Name) + glog.V(4).Infof("Skipping: down interface %q", intf.Name) continue } if isLoopbackOrPointToPoint(&intf) { - klog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) + glog.V(4).Infof("Skipping: LB or P2P interface %q", intf.Name) continue } addrs, err := nw.Addrs(&intf) @@ -302,7 +290,7 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam return nil, err } if len(addrs) == 0 { - klog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) + glog.V(4).Infof("Skipping: no addresses on interface %q", intf.Name) continue } for _, addr := range addrs { @@ -311,15 +299,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam return nil, fmt.Errorf("Unable to parse CIDR for interface %q: %s", intf.Name, err) } if !memberOf(ip, family) { - klog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) + glog.V(4).Infof("Skipping: no address family match for %q on interface %q.", ip, intf.Name) continue } // TODO: Decide if should open up to allow IPv6 LLAs in future. if !ip.IsGlobalUnicast() { - klog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) + glog.V(4).Infof("Skipping: non-global address %q on interface %q.", ip, intf.Name) continue } - klog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) + glog.V(4).Infof("Found global unicast address %q on interface %q.", ip, intf.Name) return ip, nil } } @@ -333,19 +321,15 @@ func chooseIPFromHostInterfaces(nw networkInterfacer, addressFamilies AddressFam // IP of the interface with a gateway on it (with priority given to IPv4). For a node // with no internet connection, it returns error. func ChooseHostInterface() (net.IP, error) { - return chooseHostInterface(preferIPv4) -} - -func chooseHostInterface(addressFamilies AddressFamilyPreference) (net.IP, error) { var nw networkInterfacer = networkInterface{} if _, err := os.Stat(ipv4RouteFile); os.IsNotExist(err) { - return chooseIPFromHostInterfaces(nw, addressFamilies) + return chooseIPFromHostInterfaces(nw) } routes, err := getAllDefaultRoutes() if err != nil { return nil, err } - return chooseHostInterfaceFromRoute(routes, nw, addressFamilies) + return chooseHostInterfaceFromRoute(routes, nw) } // networkInterfacer defines an interface for several net library functions. Production @@ -393,43 +377,36 @@ func getAllDefaultRoutes() ([]Route, error) { } // chooseHostInterfaceFromRoute cycles through each default route provided, looking for a -// global IP address from the interface for the route. addressFamilies determines whether it -// prefers IPv4 or IPv6 -func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer, addressFamilies AddressFamilyPreference) (net.IP, error) { - for _, family := range addressFamilies { - klog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) +// global IP address from the interface for the route. Will first look all each IPv4 route for +// an IPv4 IP, and then will look at each IPv6 route for an IPv6 IP. +func chooseHostInterfaceFromRoute(routes []Route, nw networkInterfacer) (net.IP, error) { + for _, family := range []AddressFamily{familyIPv4, familyIPv6} { + glog.V(4).Infof("Looking for default routes with IPv%d addresses", uint(family)) for _, route := range routes { if route.Family != family { continue } - klog.V(4).Infof("Default route transits interface %q", route.Interface) + glog.V(4).Infof("Default route transits interface %q", route.Interface) finalIP, err := getIPFromInterface(route.Interface, family, nw) if err != nil { return nil, err } if finalIP != nil { - klog.V(4).Infof("Found active IP %v ", finalIP) + glog.V(4).Infof("Found active IP %v ", finalIP) return finalIP, nil } } } - klog.V(4).Infof("No active IP found by looking at default routes") + glog.V(4).Infof("No active IP found by looking at default routes") return nil, fmt.Errorf("unable to select an IP from default routes.") } -// ResolveBindAddress returns the IP address of a daemon, based on the given bindAddress: -// If bindAddress is unset, it returns the host's default IP, as with ChooseHostInterface(). -// If bindAddress is unspecified or loopback, it returns the default IP of the same -// address family as bindAddress. -// Otherwise, it just returns bindAddress. -func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { - addressFamilies := preferIPv4 - if bindAddress != nil && memberOf(bindAddress, familyIPv6) { - addressFamilies = preferIPv6 - } - +// If bind-address is usable, return it directly +// If bind-address is not usable (unset, 0.0.0.0, or loopback), we will use the host's default +// interface. +func ChooseBindAddress(bindAddress net.IP) (net.IP, error) { if bindAddress == nil || bindAddress.IsUnspecified() || bindAddress.IsLoopback() { - hostIP, err := chooseHostInterface(addressFamilies) + hostIP, err := ChooseHostInterface() if err != nil { return nil, err } @@ -437,21 +414,3 @@ func ResolveBindAddress(bindAddress net.IP) (net.IP, error) { } return bindAddress, nil } - -// ChooseBindAddressForInterface choose a global IP for a specific interface, with priority given to IPv4. -// This is required in case of network setups where default routes are present, but network -// interfaces use only link-local addresses (e.g. as described in RFC5549). -// e.g when using BGP to announce a host IP over link-local ip addresses and this ip address is attached to the lo interface. -func ChooseBindAddressForInterface(intfName string) (net.IP, error) { - var nw networkInterfacer = networkInterface{} - for _, family := range preferIPv4 { - ip, err := getIPFromInterface(intfName, family, nw) - if err != nil { - return nil, err - } - if ip != nil { - return ip, nil - } - } - return nil, fmt.Errorf("unable to select an IP from %s network interface", intfName) -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/net/util.go b/vendor/k8s.io/apimachinery/pkg/util/net/util.go index 2e7cb94..8344d10 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/net/util.go +++ b/vendor/k8s.io/apimachinery/pkg/util/net/util.go @@ -54,20 +54,3 @@ func IsConnectionReset(err error) bool { } return false } - -// Returns if the given err is "connection refused" error -func IsConnectionRefused(err error) bool { - if urlErr, ok := err.(*url.Error); ok { - err = urlErr.Err - } - if opErr, ok := err.(*net.OpError); ok { - err = opErr.Err - } - if osErr, ok := err.(*os.SyscallError); ok { - err = osErr.Err - } - if errno, ok := err.(syscall.Errno); ok && errno == syscall.ECONNREFUSED { - return true - } - return false -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go index 1428443..76e203b 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go +++ b/vendor/k8s.io/apimachinery/pkg/util/runtime/runtime.go @@ -18,12 +18,11 @@ package runtime import ( "fmt" - "net/http" "runtime" "sync" "time" - "k8s.io/klog" + "github.com/golang/glog" ) var ( @@ -41,7 +40,11 @@ var PanicHandlers = []func(interface{}){logPanic} // called in case of panic. HandleCrash actually crashes, after calling the // handlers and logging the panic message. // -// E.g., you can provide one or more additional handlers for something like shutting down go routines gracefully. +// TODO: remove this function. We are switching to a world where it's safe for +// apiserver to panic, since it will be restarted by kubelet. At the beginning +// of the Kubernetes project, nothing was going to restart apiserver and so +// catching panics was important. But it's actually much simpler for monitoring +// software if we just exit when an unexpected panic happens. func HandleCrash(additionalHandlers ...func(interface{})) { if r := recover(); r != nil { for _, fn := range PanicHandlers { @@ -57,26 +60,27 @@ func HandleCrash(additionalHandlers ...func(interface{})) { } } -// logPanic logs the caller tree when a panic occurs (except in the special case of http.ErrAbortHandler). +// logPanic logs the caller tree when a panic occurs. func logPanic(r interface{}) { - if r == http.ErrAbortHandler { - // honor the http.ErrAbortHandler sentinel panic value: - // ErrAbortHandler is a sentinel panic value to abort a handler. - // While any panic from ServeHTTP aborts the response to the client, - // panicking with ErrAbortHandler also suppresses logging of a stack trace to the server's error log. - return - } - - // Same as stdlib http server code. Manually allocate stack trace buffer size - // to prevent excessively large logs - const size = 64 << 10 - stacktrace := make([]byte, size) - stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + callers := getCallers(r) if _, ok := r.(string); ok { - klog.Errorf("Observed a panic: %s\n%s", r, stacktrace) + glog.Errorf("Observed a panic: %s\n%v", r, callers) } else { - klog.Errorf("Observed a panic: %#v (%v)\n%s", r, r, stacktrace) + glog.Errorf("Observed a panic: %#v (%v)\n%v", r, r, callers) + } +} + +func getCallers(r interface{}) string { + callers := "" + for i := 0; true; i++ { + _, file, line, ok := runtime.Caller(i) + if !ok { + break + } + callers = callers + fmt.Sprintf("%v:%v\n", file, line) } + + return callers } // ErrorHandlers is a list of functions which will be invoked when an unreturnable @@ -111,7 +115,7 @@ func HandleError(err error) { // logError prints an error with the call stack of the location it was reported func logError(err error) { - klog.ErrorDepth(2, err) + glog.ErrorDepth(2, err) } type rudimentaryErrorBackoff struct { @@ -151,17 +155,13 @@ func GetCaller() string { // handlers to handle errors and panics the same way. func RecoverFromPanic(err *error) { if r := recover(); r != nil { - // Same as stdlib http server code. Manually allocate stack trace buffer size - // to prevent excessively large logs - const size = 64 << 10 - stacktrace := make([]byte, size) - stacktrace = stacktrace[:runtime.Stack(stacktrace, false)] + callers := getCallers(r) *err = fmt.Errorf( - "recovered from panic %q. (err=%v) Call stack:\n%s", + "recovered from panic %q. (err=%v) Call stack:\n%v", r, *err, - stacktrace) + callers) } } diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go index 9bfa85d..766f450 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/byte.go @@ -46,19 +46,17 @@ func ByteKeySet(theMap interface{}) Byte { } // Insert adds items to the set. -func (s Byte) Insert(items ...byte) Byte { +func (s Byte) Insert(items ...byte) { for _, item := range items { s[item] = Empty{} } - return s } // Delete removes all items from the set. -func (s Byte) Delete(items ...byte) Byte { +func (s Byte) Delete(items ...byte) { for _, item := range items { delete(s, item) } - return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go index 88bd709..a0a513c 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int.go @@ -46,19 +46,17 @@ func IntKeySet(theMap interface{}) Int { } // Insert adds items to the set. -func (s Int) Insert(items ...int) Int { +func (s Int) Insert(items ...int) { for _, item := range items { s[item] = Empty{} } - return s } // Delete removes all items from the set. -func (s Int) Delete(items ...int) Int { +func (s Int) Delete(items ...int) { for _, item := range items { delete(s, item) } - return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go deleted file mode 100644 index 96a4855..0000000 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int32.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by set-gen. DO NOT EDIT. - -package sets - -import ( - "reflect" - "sort" -) - -// sets.Int32 is a set of int32s, implemented via map[int32]struct{} for minimal memory consumption. -type Int32 map[int32]Empty - -// NewInt32 creates a Int32 from a list of values. -func NewInt32(items ...int32) Int32 { - ss := Int32{} - ss.Insert(items...) - return ss -} - -// Int32KeySet creates a Int32 from a keys of a map[int32](? extends interface{}). -// If the value passed in is not actually a map, this will panic. -func Int32KeySet(theMap interface{}) Int32 { - v := reflect.ValueOf(theMap) - ret := Int32{} - - for _, keyValue := range v.MapKeys() { - ret.Insert(keyValue.Interface().(int32)) - } - return ret -} - -// Insert adds items to the set. -func (s Int32) Insert(items ...int32) Int32 { - for _, item := range items { - s[item] = Empty{} - } - return s -} - -// Delete removes all items from the set. -func (s Int32) Delete(items ...int32) Int32 { - for _, item := range items { - delete(s, item) - } - return s -} - -// Has returns true if and only if item is contained in the set. -func (s Int32) Has(item int32) bool { - _, contained := s[item] - return contained -} - -// HasAll returns true if and only if all items are contained in the set. -func (s Int32) HasAll(items ...int32) bool { - for _, item := range items { - if !s.Has(item) { - return false - } - } - return true -} - -// HasAny returns true if any items are contained in the set. -func (s Int32) HasAny(items ...int32) bool { - for _, item := range items { - if s.Has(item) { - return true - } - } - return false -} - -// Difference returns a set of objects that are not in s2 -// For example: -// s1 = {a1, a2, a3} -// s2 = {a1, a2, a4, a5} -// s1.Difference(s2) = {a3} -// s2.Difference(s1) = {a4, a5} -func (s Int32) Difference(s2 Int32) Int32 { - result := NewInt32() - for key := range s { - if !s2.Has(key) { - result.Insert(key) - } - } - return result -} - -// Union returns a new set which includes items in either s1 or s2. -// For example: -// s1 = {a1, a2} -// s2 = {a3, a4} -// s1.Union(s2) = {a1, a2, a3, a4} -// s2.Union(s1) = {a1, a2, a3, a4} -func (s1 Int32) Union(s2 Int32) Int32 { - result := NewInt32() - for key := range s1 { - result.Insert(key) - } - for key := range s2 { - result.Insert(key) - } - return result -} - -// Intersection returns a new set which includes the item in BOTH s1 and s2 -// For example: -// s1 = {a1, a2} -// s2 = {a2, a3} -// s1.Intersection(s2) = {a2} -func (s1 Int32) Intersection(s2 Int32) Int32 { - var walk, other Int32 - result := NewInt32() - if s1.Len() < s2.Len() { - walk = s1 - other = s2 - } else { - walk = s2 - other = s1 - } - for key := range walk { - if other.Has(key) { - result.Insert(key) - } - } - return result -} - -// IsSuperset returns true if and only if s1 is a superset of s2. -func (s1 Int32) IsSuperset(s2 Int32) bool { - for item := range s2 { - if !s1.Has(item) { - return false - } - } - return true -} - -// Equal returns true if and only if s1 is equal (as a set) to s2. -// Two sets are equal if their membership is identical. -// (In practice, this means same elements, order doesn't matter) -func (s1 Int32) Equal(s2 Int32) bool { - return len(s1) == len(s2) && s1.IsSuperset(s2) -} - -type sortableSliceOfInt32 []int32 - -func (s sortableSliceOfInt32) Len() int { return len(s) } -func (s sortableSliceOfInt32) Less(i, j int) bool { return lessInt32(s[i], s[j]) } -func (s sortableSliceOfInt32) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// List returns the contents as a sorted int32 slice. -func (s Int32) List() []int32 { - res := make(sortableSliceOfInt32, 0, len(s)) - for key := range s { - res = append(res, key) - } - sort.Sort(res) - return []int32(res) -} - -// UnsortedList returns the slice with contents in random order. -func (s Int32) UnsortedList() []int32 { - res := make([]int32, 0, len(s)) - for key := range s { - res = append(res, key) - } - return res -} - -// Returns a single element from the set. -func (s Int32) PopAny() (int32, bool) { - for key := range s { - s.Delete(key) - return key, true - } - var zeroValue int32 - return zeroValue, false -} - -// Len returns the size of the set. -func (s Int32) Len() int { - return len(s) -} - -func lessInt32(lhs, rhs int32) bool { - return lhs < rhs -} diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go index b375a1b..9ca9af0 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/int64.go @@ -46,19 +46,17 @@ func Int64KeySet(theMap interface{}) Int64 { } // Insert adds items to the set. -func (s Int64) Insert(items ...int64) Int64 { +func (s Int64) Insert(items ...int64) { for _, item := range items { s[item] = Empty{} } - return s } // Delete removes all items from the set. -func (s Int64) Delete(items ...int64) Int64 { +func (s Int64) Delete(items ...int64) { for _, item := range items { delete(s, item) } - return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go index e6f37db..ba00ad7 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/sets/string.go +++ b/vendor/k8s.io/apimachinery/pkg/util/sets/string.go @@ -46,19 +46,17 @@ func StringKeySet(theMap interface{}) String { } // Insert adds items to the set. -func (s String) Insert(items ...string) String { +func (s String) Insert(items ...string) { for _, item := range items { s[item] = Empty{} } - return s } // Delete removes all items from the set. -func (s String) Delete(items ...string) String { +func (s String) Delete(items ...string) { for _, item := range items { delete(s, item) } - return s } // Has returns true if and only if item is contained in the set. diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go index 0cd5d65..4767fd1 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/field/errors.go @@ -116,10 +116,6 @@ const ( // This is similar to ErrorTypeInvalid, but the error will not include the // too-long value. See TooLong(). ErrorTypeTooLong ErrorType = "FieldValueTooLong" - // ErrorTypeTooMany is used to report "too many". This is used to - // report that a given list has too many items. This is similar to FieldValueTooLong, - // but the error indicates quantity instead of length. - ErrorTypeTooMany ErrorType = "FieldValueTooMany" // ErrorTypeInternal is used to report other errors that are not related // to user input. See InternalError(). ErrorTypeInternal ErrorType = "InternalError" @@ -142,8 +138,6 @@ func (t ErrorType) String() string { return "Forbidden" case ErrorTypeTooLong: return "Too long" - case ErrorTypeTooMany: - return "Too many" case ErrorTypeInternal: return "Internal error" default: @@ -204,14 +198,7 @@ func Forbidden(field *Path, detail string) *Error { // Invalid, but the returned error will not include the too-long // value. func TooLong(field *Path, value interface{}, maxLength int) *Error { - return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d bytes", maxLength)} -} - -// TooMany returns a *Error indicating "too many". This is used to -// report that a given list has too many items. This is similar to TooLong, -// but the returned error indicates quantity instead of length. -func TooMany(field *Path, actualQuantity, maxQuantity int) *Error { - return &Error{ErrorTypeTooMany, field.String(), actualQuantity, fmt.Sprintf("must have at most %d items", maxQuantity)} + return &Error{ErrorTypeTooLong, field.String(), value, fmt.Sprintf("must have at most %d characters", maxLength)} } // InternalError returns a *Error indicating "internal error". This is used diff --git a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go index 915231f..e0d1715 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go +++ b/vendor/k8s.io/apimachinery/pkg/util/validation/validation.go @@ -70,11 +70,7 @@ func IsQualifiedName(value string) []string { return errs } -// IsFullyQualifiedName checks if the name is fully qualified. This is similar -// to IsFullyQualifiedDomainName but requires a minimum of 3 segments instead of -// 2 and does not accept a trailing . as valid. -// TODO: This function is deprecated and preserved until all callers migrate to -// IsFullyQualifiedDomainName; please don't add new callers. +// IsFullyQualifiedName checks if the name is fully qualified. func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { var allErrors field.ErrorList if len(name) == 0 { @@ -89,68 +85,8 @@ func IsFullyQualifiedName(fldPath *field.Path, name string) field.ErrorList { return allErrors } -// IsFullyQualifiedDomainName checks if the domain name is fully qualified. This -// is similar to IsFullyQualifiedName but only requires a minimum of 2 segments -// instead of 3 and accepts a trailing . as valid. -func IsFullyQualifiedDomainName(fldPath *field.Path, name string) field.ErrorList { - var allErrors field.ErrorList - if len(name) == 0 { - return append(allErrors, field.Required(fldPath, "")) - } - if strings.HasSuffix(name, ".") { - name = name[:len(name)-1] - } - if errs := IsDNS1123Subdomain(name); len(errs) > 0 { - return append(allErrors, field.Invalid(fldPath, name, strings.Join(errs, ","))) - } - if len(strings.Split(name, ".")) < 2 { - return append(allErrors, field.Invalid(fldPath, name, "should be a domain with at least two segments separated by dots")) - } - return allErrors -} - -// Allowed characters in an HTTP Path as defined by RFC 3986. A HTTP path may -// contain: -// * unreserved characters (alphanumeric, '-', '.', '_', '~') -// * percent-encoded octets -// * sub-delims ("!", "$", "&", "'", "(", ")", "*", "+", ",", ";", "=") -// * a colon character (":") -const httpPathFmt string = `[A-Za-z0-9/\-._~%!$&'()*+,;=:]+` - -var httpPathRegexp = regexp.MustCompile("^" + httpPathFmt + "$") - -// IsDomainPrefixedPath checks if the given string is a domain-prefixed path -// (e.g. acme.io/foo). All characters before the first "/" must be a valid -// subdomain as defined by RFC 1123. All characters trailing the first "/" must -// be valid HTTP Path characters as defined by RFC 3986. -func IsDomainPrefixedPath(fldPath *field.Path, dpPath string) field.ErrorList { - var allErrs field.ErrorList - if len(dpPath) == 0 { - return append(allErrs, field.Required(fldPath, "")) - } - - segments := strings.SplitN(dpPath, "/", 2) - if len(segments) != 2 || len(segments[0]) == 0 || len(segments[1]) == 0 { - return append(allErrs, field.Invalid(fldPath, dpPath, "must be a domain-prefixed path (such as \"acme.io/foo\")")) - } - - host := segments[0] - for _, err := range IsDNS1123Subdomain(host) { - allErrs = append(allErrs, field.Invalid(fldPath, host, err)) - } - - path := segments[1] - if !httpPathRegexp.MatchString(path) { - return append(allErrs, field.Invalid(fldPath, path, RegexError("Invalid path", httpPathFmt))) - } - - return allErrs -} - const labelValueFmt string = "(" + qualifiedNameFmt + ")?" const labelValueErrMsg string = "a valid label must be an empty string or consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character" - -// LabelValueMaxLength is a label's max length const LabelValueMaxLength int = 63 var labelValueRegexp = regexp.MustCompile("^" + labelValueFmt + "$") @@ -171,8 +107,6 @@ func IsValidLabelValue(value string) []string { const dns1123LabelFmt string = "[a-z0-9]([-a-z0-9]*[a-z0-9])?" const dns1123LabelErrMsg string = "a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character" - -// DNS1123LabelMaxLength is a label's max length in DNS (RFC 1123) const DNS1123LabelMaxLength int = 63 var dns1123LabelRegexp = regexp.MustCompile("^" + dns1123LabelFmt + "$") @@ -192,8 +126,6 @@ func IsDNS1123Label(value string) []string { const dns1123SubdomainFmt string = dns1123LabelFmt + "(\\." + dns1123LabelFmt + ")*" const dns1123SubdomainErrorMsg string = "a DNS-1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character" - -// DNS1123SubdomainMaxLength is a subdomain's max length in DNS (RFC 1123) const DNS1123SubdomainMaxLength int = 253 var dns1123SubdomainRegexp = regexp.MustCompile("^" + dns1123SubdomainFmt + "$") @@ -213,8 +145,6 @@ func IsDNS1123Subdomain(value string) []string { const dns1035LabelFmt string = "[a-z]([-a-z0-9]*[a-z0-9])?" const dns1035LabelErrMsg string = "a DNS-1035 label must consist of lower case alphanumeric characters or '-', start with an alphabetic character, and end with an alphanumeric character" - -// DNS1035LabelMaxLength is a label's max length in DNS (RFC 1035) const DNS1035LabelMaxLength int = 63 var dns1035LabelRegexp = regexp.MustCompile("^" + dns1035LabelFmt + "$") @@ -347,32 +277,11 @@ func IsValidIP(value string) []string { return nil } -// IsValidIPv4Address tests that the argument is a valid IPv4 address. -func IsValidIPv4Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := net.ParseIP(value) - if ip == nil || ip.To4() == nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv4 address")) - } - return allErrors -} - -// IsValidIPv6Address tests that the argument is a valid IPv6 address. -func IsValidIPv6Address(fldPath *field.Path, value string) field.ErrorList { - var allErrors field.ErrorList - ip := net.ParseIP(value) - if ip == nil || ip.To4() != nil { - allErrors = append(allErrors, field.Invalid(fldPath, value, "must be a valid IPv6 address")) - } - return allErrors -} - const percentFmt string = "[0-9]+%" const percentErrMsg string = "a valid percent string must be a numeric string followed by an ending '%'" var percentRegexp = regexp.MustCompile("^" + percentFmt + "$") -// IsValidPercent checks that string is in the form of a percentage func IsValidPercent(percent string) []string { if !percentRegexp.MatchString(percent) { return []string{RegexError(percentErrMsg, percentFmt, "1%", "93%")} @@ -482,13 +391,13 @@ func hasChDirPrefix(value string) []string { return errs } -// IsValidSocketAddr checks that string represents a valid socket address +// IsSocketAddr checks that a string conforms is a valid socket address // as defined in RFC 789. (e.g 0.0.0.0:10254 or [::]:10254)) func IsValidSocketAddr(value string) []string { var errs []string ip, port, err := net.SplitHostPort(value) if err != nil { - errs = append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") + return append(errs, "must be a valid socket address format, (e.g. 0.0.0.0:10254 or [::]:10254)") return errs } portInt, _ := strconv.Atoi(port) diff --git a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go index a9a3853..3cd8551 100644 --- a/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go +++ b/vendor/k8s.io/apimachinery/pkg/util/yaml/decoder.go @@ -26,8 +26,8 @@ import ( "strings" "unicode" - "k8s.io/klog" - "sigs.k8s.io/yaml" + "github.com/ghodss/yaml" + "github.com/golang/glog" ) // ToJSON converts a single YAML document into a JSON document @@ -217,9 +217,11 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if d.decoder == nil { buffer, origData, isJSON := GuessJSONStream(d.r, d.bufferSize) if isJSON { + glog.V(4).Infof("decoding stream as JSON") d.decoder = json.NewDecoder(buffer) d.rawData = origData } else { + glog.V(4).Infof("decoding stream as YAML") d.decoder = NewYAMLToJSONDecoder(buffer) } } @@ -228,7 +230,7 @@ func (d *YAMLOrJSONDecoder) Decode(into interface{}) error { if syntax, ok := err.(*json.SyntaxError); ok { data, readErr := ioutil.ReadAll(jsonDecoder.Buffered()) if readErr != nil { - klog.V(4).Infof("reading stream failed: %v", readErr) + glog.V(4).Infof("reading stream failed: %v", readErr) } js := string(data) diff --git a/vendor/k8s.io/apimachinery/pkg/version/doc.go b/vendor/k8s.io/apimachinery/pkg/version/doc.go index 29574fd..5e77af7 100644 --- a/vendor/k8s.io/apimachinery/pkg/version/doc.go +++ b/vendor/k8s.io/apimachinery/pkg/version/doc.go @@ -14,7 +14,6 @@ See the License for the specific language governing permissions and limitations under the License. */ -// +k8s:openapi-gen=true - // Package version supplies the type for version information collected at build time. +// +k8s:openapi-gen=true package version // import "k8s.io/apimachinery/pkg/version" diff --git a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go index 4269a83..93bb1cd 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/streamwatcher.go @@ -17,12 +17,10 @@ limitations under the License. package watch import ( - "fmt" "io" "sync" - "k8s.io/klog" - + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/util/net" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -41,28 +39,19 @@ type Decoder interface { Close() } -// Reporter hides the details of how an error is turned into a runtime.Object for -// reporting on a watch stream since this package may not import a higher level report. -type Reporter interface { - // AsObject must convert err into a valid runtime.Object for the watch stream. - AsObject(err error) runtime.Object -} - // StreamWatcher turns any stream for which you can write a Decoder interface // into a watch.Interface. type StreamWatcher struct { sync.Mutex - source Decoder - reporter Reporter - result chan Event - stopped bool + source Decoder + result chan Event + stopped bool } // NewStreamWatcher creates a StreamWatcher from the given decoder. -func NewStreamWatcher(d Decoder, r Reporter) *StreamWatcher { +func NewStreamWatcher(d Decoder) *StreamWatcher { sw := &StreamWatcher{ - source: d, - reporter: r, + source: d, // It's easy for a consumer to add buffering via an extra // goroutine/channel, but impossible for them to remove it, // so nonbuffered is better. @@ -111,15 +100,13 @@ func (sw *StreamWatcher) receive() { case io.EOF: // watch closed normally case io.ErrUnexpectedEOF: - klog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) + glog.V(1).Infof("Unexpected EOF during watch stream event decoding: %v", err) default: - if net.IsProbableEOF(err) || net.IsTimeout(err) { - klog.V(5).Infof("Unable to decode an event from the watch stream: %v", err) + msg := "Unable to decode an event from the watch stream: %v" + if net.IsProbableEOF(err) { + glog.V(5).Infof(msg, err) } else { - sw.result <- Event{ - Type: Error, - Object: sw.reporter.AsObject(fmt.Errorf("unable to decode an event from the watch stream: %v", err)), - } + glog.Errorf(msg, err) } } return diff --git a/vendor/k8s.io/apimachinery/pkg/watch/watch.go b/vendor/k8s.io/apimachinery/pkg/watch/watch.go index 988aba3..a627d1d 100644 --- a/vendor/k8s.io/apimachinery/pkg/watch/watch.go +++ b/vendor/k8s.io/apimachinery/pkg/watch/watch.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "k8s.io/klog" + "github.com/golang/glog" "k8s.io/apimachinery/pkg/runtime" ) @@ -44,7 +44,6 @@ const ( Added EventType = "ADDED" Modified EventType = "MODIFIED" Deleted EventType = "DELETED" - Bookmark EventType = "BOOKMARK" Error EventType = "ERROR" DefaultChanSize int32 = 100 @@ -58,10 +57,6 @@ type Event struct { // Object is: // * If Type is Added or Modified: the new state of the object. // * If Type is Deleted: the state of the object immediately before deletion. - // * If Type is Bookmark: the object (instance of a type being watched) where - // only ResourceVersion field is set. On successful restart of watch from a - // bookmark resourceVersion, client is guaranteed to not get repeat event - // nor miss any events. // * If Type is Error: *api.Status is recommended; other types may make sense // depending on context. Object runtime.Object @@ -90,7 +85,7 @@ func (w emptyWatch) ResultChan() <-chan Event { // FakeWatcher lets you test anything that consumes a watch.Interface; threadsafe. type FakeWatcher struct { result chan Event - stopped bool + Stopped bool sync.Mutex } @@ -110,24 +105,24 @@ func NewFakeWithChanSize(size int, blocking bool) *FakeWatcher { func (f *FakeWatcher) Stop() { f.Lock() defer f.Unlock() - if !f.stopped { - klog.V(4).Infof("Stopping fake watcher.") + if !f.Stopped { + glog.V(4).Infof("Stopping fake watcher.") close(f.result) - f.stopped = true + f.Stopped = true } } func (f *FakeWatcher) IsStopped() bool { f.Lock() defer f.Unlock() - return f.stopped + return f.Stopped } // Reset prepares the watcher to be reused. func (f *FakeWatcher) Reset() { f.Lock() defer f.Unlock() - f.stopped = false + f.Stopped = false f.result = make(chan Event) } @@ -178,7 +173,7 @@ func (f *RaceFreeFakeWatcher) Stop() { f.Lock() defer f.Unlock() if !f.Stopped { - klog.V(4).Infof("Stopping fake watcher.") + glog.V(4).Infof("Stopping fake watcher.") close(f.result) f.Stopped = true } diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml deleted file mode 100644 index 5677664..0000000 --- a/vendor/k8s.io/klog/.travis.yml +++ /dev/null @@ -1,16 +0,0 @@ -language: go -go_import_path: k8s.io/klog -dist: xenial -go: - - 1.9.x - - 1.10.x - - 1.11.x - - 1.12.x -script: - - go get -t -v ./... - - diff -u <(echo -n) <(gofmt -d .) - - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . || go vet . - - go test -v -race ./... -install: - - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/CONTRIBUTING.md b/vendor/k8s.io/klog/CONTRIBUTING.md deleted file mode 100644 index 574a56a..0000000 --- a/vendor/k8s.io/klog/CONTRIBUTING.md +++ /dev/null @@ -1,22 +0,0 @@ -# Contributing Guidelines - -Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: - -_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ - -## Getting Started - -We have full documentation on how to get started contributing here: - -- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests -- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) -- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers - -## Mentorship - -- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! - -## Contact Information - -- [Slack](https://kubernetes.slack.com/messages/sig-architecture) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) diff --git a/vendor/k8s.io/klog/LICENSE b/vendor/k8s.io/klog/LICENSE deleted file mode 100644 index 37ec93a..0000000 --- a/vendor/k8s.io/klog/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/klog/OWNERS b/vendor/k8s.io/klog/OWNERS deleted file mode 100644 index 380e514..0000000 --- a/vendor/k8s.io/klog/OWNERS +++ /dev/null @@ -1,19 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners -reviewers: - - jayunit100 - - hoegaarden - - andyxning - - neolit123 - - pohly - - yagonobre - - vincepri - - detiber -approvers: - - dims - - thockin - - justinsb - - tallclair - - piosz - - brancz - - DirectXMan12 - - lavalamp diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md deleted file mode 100644 index 841468b..0000000 --- a/vendor/k8s.io/klog/README.md +++ /dev/null @@ -1,97 +0,0 @@ -klog -==== - -klog is a permanent fork of https://github.com/golang/glog. - -## Why was klog created? - -The decision to create klog was one that wasn't made lightly, but it was necessary due to some -drawbacks that are present in [glog](https://github.com/golang/glog). Ultimately, the fork was created due to glog not being under active development; this can be seen in the glog README: - -> The code in this repo [...] is not itself under development - -This makes us unable to solve many use cases without a fork. The factors that contributed to needing feature development are listed below: - - * `glog` [presents a lot "gotchas"](https://github.com/kubernetes/kubernetes/issues/61006) and introduces challenges in containerized environments, all of which aren't well documented. - * `glog` doesn't provide an easy way to test logs, which detracts from the stability of software using it - * A long term goal is to implement a logging interface that allows us to add context, change output format, etc. - -Historical context is available here: - - * https://github.com/kubernetes/kubernetes/issues/61006 - * https://github.com/kubernetes/kubernetes/issues/70264 - * https://groups.google.com/forum/#!msg/kubernetes-sig-architecture/wCWiWf3Juzs/hXRVBH90CgAJ - * https://groups.google.com/forum/#!msg/kubernetes-dev/7vnijOMhLS0/1oRiNtigBgAJ - ----- - -How to use klog -=============== -- Replace imports for `github.com/golang/glog` with `k8s.io/klog` -- Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags -- You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) -- If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) - -### Coexisting with glog -This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. - -## Community, discussion, contribution, and support - -Learn how to engage with the Kubernetes community on the [community page](http://kubernetes.io/community/). - -You can reach the maintainers of this project at: - -- [Slack](https://kubernetes.slack.com/messages/sig-architecture) -- [Mailing List](https://groups.google.com/forum/#!forum/kubernetes-sig-architecture) - -### Code of conduct - -Participation in the Kubernetes community is governed by the [Kubernetes Code of Conduct](code-of-conduct.md). - ----- - -glog -==== - -Leveled execution logs for Go. - -This is an efficient pure Go implementation of leveled logs in the -manner of the open source C++ package - https://github.com/google/glog - -By binding methods to booleans it is possible to use the log package -without paying the expense of evaluating the arguments to the log. -Through the -vmodule flag, the package also provides fine-grained -control over logging at the file level. - -The comment from glog.go introduces the ideas: - - Package glog implements logging analogous to the Google-internal - C++ INFO/ERROR/V setup. It provides functions Info, Warning, - Error, Fatal, plus formatting variants such as Infof. It - also provides V-style logging controlled by the -v and - -vmodule=file=2 flags. - - Basic examples: - - glog.Info("Prepare to repel boarders") - - glog.Fatalf("Initialization failed: %s", err) - - See the documentation for the V function for an explanation - of these examples: - - if glog.V(2) { - glog.Info("Starting transaction...") - } - - glog.V(2).Infoln("Processed", nItems, "elements") - - -The repository contains an open source version of the log package -used inside Google. The master copy of the source lives inside -Google, not here. The code in this repo is for export only and is not itself -under development. Feature requests will be ignored. - -Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/k8s.io/klog/RELEASE.md b/vendor/k8s.io/klog/RELEASE.md deleted file mode 100644 index b53eb96..0000000 --- a/vendor/k8s.io/klog/RELEASE.md +++ /dev/null @@ -1,9 +0,0 @@ -# Release Process - -The `klog` is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release -1. All [OWNERS](OWNERS) must LGTM this release -1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` -1. The release issue is closed -1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/k8s.io/klog/SECURITY_CONTACTS b/vendor/k8s.io/klog/SECURITY_CONTACTS deleted file mode 100644 index 6128a58..0000000 --- a/vendor/k8s.io/klog/SECURITY_CONTACTS +++ /dev/null @@ -1,20 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Committee to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://git.k8s.io/security/private-distributors-list.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ - -dims -thockin -justinsb -tallclair -piosz -brancz -DirectXMan12 -lavalamp diff --git a/vendor/k8s.io/klog/code-of-conduct.md b/vendor/k8s.io/klog/code-of-conduct.md deleted file mode 100644 index 0d15c00..0000000 --- a/vendor/k8s.io/klog/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod deleted file mode 100644 index 3877d85..0000000 --- a/vendor/k8s.io/klog/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module k8s.io/klog - -go 1.12 - -require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum deleted file mode 100644 index fb64d27..0000000 --- a/vendor/k8s.io/klog/go.sum +++ /dev/null @@ -1,2 +0,0 @@ -github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go deleted file mode 100644 index 2712ce0..0000000 --- a/vendor/k8s.io/klog/klog.go +++ /dev/null @@ -1,1308 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package klog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. -// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as -// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. -// -// Basic examples: -// -// klog.Info("Prepare to repel boarders") -// -// klog.Fatalf("Initialization failed: %s", err) -// -// See the documentation for the V function for an explanation of these examples: -// -// if klog.V(2) { -// klog.Info("Starting transaction...") -// } -// -// klog.V(2).Infoln("Processed", nItems, "elements") -// -// Log output is buffered and written periodically using Flush. Programs -// should call Flush before exiting to guarantee all log output is written. -// -// By default, all log statements write to standard error. -// This package provides several flags that modify this behavior. -// As a result, flag.Parse must be called before any logging is done. -// -// -logtostderr=true -// Logs are written to standard error instead of to files. -// -alsologtostderr=false -// Logs are written to standard error as well as to files. -// -stderrthreshold=ERROR -// Log events at or above this severity are logged to standard -// error as well as to files. -// -log_dir="" -// Log files will be written to this directory instead of the -// default temporary directory. -// -// Other flags provide aids to debugging. -// -// -log_backtrace_at="" -// When set to a file and line number holding a logging statement, -// such as -// -log_backtrace_at=gopherflakes.go:234 -// a stack trace will be written to the Info log whenever execution -// hits that statement. (Unlike with -vmodule, the ".go" must be -// present.) -// -v=0 -// Enable V-leveled logging at the specified level. -// -vmodule="" -// The syntax of the argument is a comma-separated list of pattern=N, -// where pattern is a literal file name (minus the ".go" suffix) or -// "glob" pattern and N is a V level. For instance, -// -vmodule=gopher*=3 -// sets the V level to 3 in all Go files whose names begin "gopher". -// -package klog - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "io" - stdLog "log" - "math" - "os" - "path/filepath" - "runtime" - "strconv" - "strings" - "sync" - "sync/atomic" - "time" -) - -// severity identifies the sort of log: info, warning etc. It also implements -// the flag.Value interface. The -stderrthreshold flag is of type severity and -// should be modified only through the flag.Value interface. The values match -// the corresponding constants in C++. -type severity int32 // sync/atomic int32 - -// These constants identify the log levels in order of increasing severity. -// A message written to a high-severity log file is also written to each -// lower-severity log file. -const ( - infoLog severity = iota - warningLog - errorLog - fatalLog - numSeverity = 4 -) - -const severityChar = "IWEF" - -var severityName = []string{ - infoLog: "INFO", - warningLog: "WARNING", - errorLog: "ERROR", - fatalLog: "FATAL", -} - -// get returns the value of the severity. -func (s *severity) get() severity { - return severity(atomic.LoadInt32((*int32)(s))) -} - -// set sets the value of the severity. -func (s *severity) set(val severity) { - atomic.StoreInt32((*int32)(s), int32(val)) -} - -// String is part of the flag.Value interface. -func (s *severity) String() string { - return strconv.FormatInt(int64(*s), 10) -} - -// Get is part of the flag.Value interface. -func (s *severity) Get() interface{} { - return *s -} - -// Set is part of the flag.Value interface. -func (s *severity) Set(value string) error { - var threshold severity - // Is it a known name? - if v, ok := severityByName(value); ok { - threshold = v - } else { - v, err := strconv.ParseInt(value, 10, 32) - if err != nil { - return err - } - threshold = severity(v) - } - logging.stderrThreshold.set(threshold) - return nil -} - -func severityByName(s string) (severity, bool) { - s = strings.ToUpper(s) - for i, name := range severityName { - if name == s { - return severity(i), true - } - } - return 0, false -} - -// OutputStats tracks the number of output lines and bytes written. -type OutputStats struct { - lines int64 - bytes int64 -} - -// Lines returns the number of lines written. -func (s *OutputStats) Lines() int64 { - return atomic.LoadInt64(&s.lines) -} - -// Bytes returns the number of bytes written. -func (s *OutputStats) Bytes() int64 { - return atomic.LoadInt64(&s.bytes) -} - -// Stats tracks the number of lines of output and number of bytes -// per severity level. Values must be read with atomic.LoadInt64. -var Stats struct { - Info, Warning, Error OutputStats -} - -var severityStats = [numSeverity]*OutputStats{ - infoLog: &Stats.Info, - warningLog: &Stats.Warning, - errorLog: &Stats.Error, -} - -// Level is exported because it appears in the arguments to V and is -// the type of the v flag, which can be set programmatically. -// It's a distinct type because we want to discriminate it from logType. -// Variables of type level are only changed under logging.mu. -// The -v flag is read only with atomic ops, so the state of the logging -// module is consistent. - -// Level is treated as a sync/atomic int32. - -// Level specifies a level of verbosity for V logs. *Level implements -// flag.Value; the -v flag is of type Level and should be modified -// only through the flag.Value interface. -type Level int32 - -// get returns the value of the Level. -func (l *Level) get() Level { - return Level(atomic.LoadInt32((*int32)(l))) -} - -// set sets the value of the Level. -func (l *Level) set(val Level) { - atomic.StoreInt32((*int32)(l), int32(val)) -} - -// String is part of the flag.Value interface. -func (l *Level) String() string { - return strconv.FormatInt(int64(*l), 10) -} - -// Get is part of the flag.Value interface. -func (l *Level) Get() interface{} { - return *l -} - -// Set is part of the flag.Value interface. -func (l *Level) Set(value string) error { - v, err := strconv.ParseInt(value, 10, 32) - if err != nil { - return err - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(Level(v), logging.vmodule.filter, false) - return nil -} - -// moduleSpec represents the setting of the -vmodule flag. -type moduleSpec struct { - filter []modulePat -} - -// modulePat contains a filter for the -vmodule flag. -// It holds a verbosity level and a file pattern to match. -type modulePat struct { - pattern string - literal bool // The pattern is a literal string - level Level -} - -// match reports whether the file matches the pattern. It uses a string -// comparison if the pattern contains no metacharacters. -func (m *modulePat) match(file string) bool { - if m.literal { - return file == m.pattern - } - match, _ := filepath.Match(m.pattern, file) - return match -} - -func (m *moduleSpec) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - var b bytes.Buffer - for i, f := range m.filter { - if i > 0 { - b.WriteRune(',') - } - fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) - } - return b.String() -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported. -func (m *moduleSpec) Get() interface{} { - return nil -} - -var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") - -// Syntax: -vmodule=recordio=2,file=1,gfs*=3 -func (m *moduleSpec) Set(value string) error { - var filter []modulePat - for _, pat := range strings.Split(value, ",") { - if len(pat) == 0 { - // Empty strings such as from a trailing comma can be ignored. - continue - } - patLev := strings.Split(pat, "=") - if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { - return errVmoduleSyntax - } - pattern := patLev[0] - v, err := strconv.ParseInt(patLev[1], 10, 32) - if err != nil { - return errors.New("syntax error: expect comma-separated list of filename=N") - } - if v < 0 { - return errors.New("negative value for vmodule level") - } - if v == 0 { - continue // Ignore. It's harmless but no point in paying the overhead. - } - // TODO: check syntax of filter? - filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) - } - logging.mu.Lock() - defer logging.mu.Unlock() - logging.setVState(logging.verbosity, filter, true) - return nil -} - -// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters -// that require filepath.Match to be called to match the pattern. -func isLiteral(pattern string) bool { - return !strings.ContainsAny(pattern, `\*?[]`) -} - -// traceLocation represents the setting of the -log_backtrace_at flag. -type traceLocation struct { - file string - line int -} - -// isSet reports whether the trace location has been specified. -// logging.mu is held. -func (t *traceLocation) isSet() bool { - return t.line > 0 -} - -// match reports whether the specified file and line matches the trace location. -// The argument file name is the full path, not the basename specified in the flag. -// logging.mu is held. -func (t *traceLocation) match(file string, line int) bool { - if t.line != line { - return false - } - if i := strings.LastIndex(file, "/"); i >= 0 { - file = file[i+1:] - } - return t.file == file -} - -func (t *traceLocation) String() string { - // Lock because the type is not atomic. TODO: clean this up. - logging.mu.Lock() - defer logging.mu.Unlock() - return fmt.Sprintf("%s:%d", t.file, t.line) -} - -// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the -// struct is not exported -func (t *traceLocation) Get() interface{} { - return nil -} - -var errTraceSyntax = errors.New("syntax error: expect file.go:234") - -// Syntax: -log_backtrace_at=gopherflakes.go:234 -// Note that unlike vmodule the file extension is included here. -func (t *traceLocation) Set(value string) error { - if value == "" { - // Unset. - t.line = 0 - t.file = "" - } - fields := strings.Split(value, ":") - if len(fields) != 2 { - return errTraceSyntax - } - file, line := fields[0], fields[1] - if !strings.Contains(file, ".") { - return errTraceSyntax - } - v, err := strconv.Atoi(line) - if err != nil { - return errTraceSyntax - } - if v <= 0 { - return errors.New("negative or zero value for level") - } - logging.mu.Lock() - defer logging.mu.Unlock() - t.line = v - t.file = file - return nil -} - -// flushSyncWriter is the interface satisfied by logging destinations. -type flushSyncWriter interface { - Flush() error - Sync() error - io.Writer -} - -// init sets up the defaults and runs flushDaemon. -func init() { - logging.stderrThreshold = errorLog // Default stderrThreshold is ERROR. - logging.setVState(0, nil, false) - logging.logDir = "" - logging.logFile = "" - logging.logFileMaxSizeMB = 1800 - logging.toStderr = true - logging.alsoToStderr = false - logging.skipHeaders = false - logging.addDirHeader = false - logging.skipLogHeaders = false - go logging.flushDaemon() -} - -// InitFlags is for explicitly initializing the flags. -func InitFlags(flagset *flag.FlagSet) { - if flagset == nil { - flagset = flag.CommandLine - } - - flagset.StringVar(&logging.logDir, "log_dir", logging.logDir, "If non-empty, write log files in this directory") - flagset.StringVar(&logging.logFile, "log_file", logging.logFile, "If non-empty, use this log file") - flagset.Uint64Var(&logging.logFileMaxSizeMB, "log_file_max_size", logging.logFileMaxSizeMB, - "Defines the maximum size a log file can grow to. Unit is megabytes. "+ - "If the value is 0, the maximum file size is unlimited.") - flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") - flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") - flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") - flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") - flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") - flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") - flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") - flagset.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") - flagset.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") -} - -// Flush flushes all pending log I/O. -func Flush() { - logging.lockAndFlushAll() -} - -// loggingT collects all the global state of the logging setup. -type loggingT struct { - // Boolean flags. Not handled atomically because the flag.Value interface - // does not let us avoid the =true, and that shorthand is necessary for - // compatibility. TODO: does this matter enough to fix? Seems unlikely. - toStderr bool // The -logtostderr flag. - alsoToStderr bool // The -alsologtostderr flag. - - // Level flag. Handled atomically. - stderrThreshold severity // The -stderrthreshold flag. - - // freeList is a list of byte buffers, maintained under freeListMu. - freeList *buffer - // freeListMu maintains the free list. It is separate from the main mutex - // so buffers can be grabbed and printed to without holding the main lock, - // for better parallelization. - freeListMu sync.Mutex - - // mu protects the remaining elements of this structure and is - // used to synchronize logging. - mu sync.Mutex - // file holds writer for each of the log types. - file [numSeverity]flushSyncWriter - // pcs is used in V to avoid an allocation when computing the caller's PC. - pcs [1]uintptr - // vmap is a cache of the V Level for each V() call site, identified by PC. - // It is wiped whenever the vmodule flag changes state. - vmap map[uintptr]Level - // filterLength stores the length of the vmodule filter chain. If greater - // than zero, it means vmodule is enabled. It may be read safely - // using sync.LoadInt32, but is only modified under mu. - filterLength int32 - // traceLocation is the state of the -log_backtrace_at flag. - traceLocation traceLocation - // These flags are modified only under lock, although verbosity may be fetched - // safely using atomic.LoadInt32. - vmodule moduleSpec // The state of the -vmodule flag. - verbosity Level // V logging level, the value of the -v flag/ - - // If non-empty, overrides the choice of directory in which to write logs. - // See createLogDirs for the full list of possible destinations. - logDir string - - // If non-empty, specifies the path of the file to write logs. mutually exclusive - // with the log-dir option. - logFile string - - // When logFile is specified, this limiter makes sure the logFile won't exceeds a certain size. When exceeds, the - // logFile will be cleaned up. If this value is 0, no size limitation will be applied to logFile. - logFileMaxSizeMB uint64 - - // If true, do not add the prefix headers, useful when used with SetOutput - skipHeaders bool - - // If true, do not add the headers to log files - skipLogHeaders bool - - // If true, add the file directory to the header - addDirHeader bool -} - -// buffer holds a byte Buffer for reuse. The zero value is ready for use. -type buffer struct { - bytes.Buffer - tmp [64]byte // temporary byte array for creating headers. - next *buffer -} - -var logging loggingT - -// setVState sets a consistent state for V logging. -// l.mu is held. -func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { - // Turn verbosity off so V will not fire while we are in transition. - logging.verbosity.set(0) - // Ditto for filter length. - atomic.StoreInt32(&logging.filterLength, 0) - - // Set the new filters and wipe the pc->Level map if the filter has changed. - if setFilter { - logging.vmodule.filter = filter - logging.vmap = make(map[uintptr]Level) - } - - // Things are consistent now, so enable filtering and verbosity. - // They are enabled in order opposite to that in V. - atomic.StoreInt32(&logging.filterLength, int32(len(filter))) - logging.verbosity.set(verbosity) -} - -// getBuffer returns a new, ready-to-use buffer. -func (l *loggingT) getBuffer() *buffer { - l.freeListMu.Lock() - b := l.freeList - if b != nil { - l.freeList = b.next - } - l.freeListMu.Unlock() - if b == nil { - b = new(buffer) - } else { - b.next = nil - b.Reset() - } - return b -} - -// putBuffer returns a buffer to the free list. -func (l *loggingT) putBuffer(b *buffer) { - if b.Len() >= 256 { - // Let big buffers die a natural death. - return - } - l.freeListMu.Lock() - b.next = l.freeList - l.freeList = b - l.freeListMu.Unlock() -} - -var timeNow = time.Now // Stubbed out for testing. - -/* -header formats a log header as defined by the C++ implementation. -It returns a buffer containing the formatted header and the user's file and line number. -The depth specifies how many stack frames above lives the source line to be identified in the log message. - -Log lines have this form: - Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... -where the fields are defined as follows: - L A single character, representing the log level (eg 'I' for INFO) - mm The month (zero padded; ie May is '05') - dd The day (zero padded) - hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds - threadid The space-padded thread ID as returned by GetTID() - file The file name - line The line number - msg The user-supplied message -*/ -func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { - _, file, line, ok := runtime.Caller(3 + depth) - if !ok { - file = "???" - line = 1 - } else { - if slash := strings.LastIndex(file, "/"); slash >= 0 { - path := file - file = path[slash+1:] - if l.addDirHeader { - if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { - file = path[dirsep+1:] - } - } - } - } - return l.formatHeader(s, file, line), file, line -} - -// formatHeader formats a log header using the provided file name and line number. -func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { - now := timeNow() - if line < 0 { - line = 0 // not a real line number, but acceptable to someDigits - } - if s > fatalLog { - s = infoLog // for safety. - } - buf := l.getBuffer() - if l.skipHeaders { - return buf - } - - // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. - // It's worth about 3X. Fprintf is hard. - _, month, day := now.Date() - hour, minute, second := now.Clock() - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - buf.tmp[0] = severityChar[s] - buf.twoDigits(1, int(month)) - buf.twoDigits(3, day) - buf.tmp[5] = ' ' - buf.twoDigits(6, hour) - buf.tmp[8] = ':' - buf.twoDigits(9, minute) - buf.tmp[11] = ':' - buf.twoDigits(12, second) - buf.tmp[14] = '.' - buf.nDigits(6, 15, now.Nanosecond()/1000, '0') - buf.tmp[21] = ' ' - buf.nDigits(7, 22, pid, ' ') // TODO: should be TID - buf.tmp[29] = ' ' - buf.Write(buf.tmp[:30]) - buf.WriteString(file) - buf.tmp[0] = ':' - n := buf.someDigits(1, line) - buf.tmp[n+1] = ']' - buf.tmp[n+2] = ' ' - buf.Write(buf.tmp[:n+3]) - return buf -} - -// Some custom tiny helper functions to print the log header efficiently. - -const digits = "0123456789" - -// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. -func (buf *buffer) twoDigits(i, d int) { - buf.tmp[i+1] = digits[d%10] - d /= 10 - buf.tmp[i] = digits[d%10] -} - -// nDigits formats an n-digit integer at buf.tmp[i], -// padding with pad on the left. -// It assumes d >= 0. -func (buf *buffer) nDigits(n, i, d int, pad byte) { - j := n - 1 - for ; j >= 0 && d > 0; j-- { - buf.tmp[i+j] = digits[d%10] - d /= 10 - } - for ; j >= 0; j-- { - buf.tmp[i+j] = pad - } -} - -// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. -func (buf *buffer) someDigits(i, d int) int { - // Print into the top, then copy down. We know there's space for at least - // a 10-digit number. - j := len(buf.tmp) - for { - j-- - buf.tmp[j] = digits[d%10] - d /= 10 - if d == 0 { - break - } - } - return copy(buf.tmp[i:], buf.tmp[j:]) -} - -func (l *loggingT) println(s severity, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintln(buf, args...) - l.output(s, buf, file, line, false) -} - -func (l *loggingT) print(s severity, args ...interface{}) { - l.printDepth(s, 1, args...) -} - -func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { - buf, file, line := l.header(s, depth) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -func (l *loggingT) printf(s severity, format string, args ...interface{}) { - buf, file, line := l.header(s, 0) - fmt.Fprintf(buf, format, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, false) -} - -// printWithFileLine behaves like print but uses the provided file and line number. If -// alsoLogToStderr is true, the log message always appears on standard error; it -// will also appear in the log file unless --logtostderr is set. -func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { - buf := l.formatHeader(s, file, line) - fmt.Fprint(buf, args...) - if buf.Bytes()[buf.Len()-1] != '\n' { - buf.WriteByte('\n') - } - l.output(s, buf, file, line, alsoToStderr) -} - -// redirectBuffer is used to set an alternate destination for the logs -type redirectBuffer struct { - w io.Writer -} - -func (rb *redirectBuffer) Sync() error { - return nil -} - -func (rb *redirectBuffer) Flush() error { - return nil -} - -func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { - return rb.w.Write(bytes) -} - -// SetOutput sets the output destination for all severities -func SetOutput(w io.Writer) { - logging.mu.Lock() - defer logging.mu.Unlock() - for s := fatalLog; s >= infoLog; s-- { - rb := &redirectBuffer{ - w: w, - } - logging.file[s] = rb - } -} - -// SetOutputBySeverity sets the output destination for specific severity -func SetOutputBySeverity(name string, w io.Writer) { - logging.mu.Lock() - defer logging.mu.Unlock() - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) - } - rb := &redirectBuffer{ - w: w, - } - logging.file[sev] = rb -} - -// output writes the data to the log files and releases the buffer. -func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { - l.mu.Lock() - if l.traceLocation.isSet() { - if l.traceLocation.match(file, line) { - buf.Write(stacks(false)) - } - } - data := buf.Bytes() - if l.toStderr { - os.Stderr.Write(data) - } else { - if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { - os.Stderr.Write(data) - } - - if logging.logFile != "" { - // Since we are using a single log file, all of the items in l.file array - // will point to the same file, so just use one of them to write data. - if l.file[infoLog] == nil { - if err := l.createFiles(infoLog); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - l.file[infoLog].Write(data) - } else { - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) - } - } - - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: - l.file[infoLog].Write(data) - } - } - } - if s == fatalLog { - // If we got here via Exit rather than Fatal, print no stacks. - if atomic.LoadUint32(&fatalNoStacks) > 0 { - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(1) - } - // Dump all goroutine stacks before exiting. - // First, make sure we see the trace for the current goroutine on standard error. - // If -logtostderr has been specified, the loop below will do that anyway - // as the first stack in the full dump. - if !l.toStderr { - os.Stderr.Write(stacks(false)) - } - // Write the stack trace for all goroutines to the files. - trace := stacks(true) - logExitFunc = func(error) {} // If we get a write error, we'll still exit below. - for log := fatalLog; log >= infoLog; log-- { - if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. - f.Write(trace) - } - } - l.mu.Unlock() - timeoutFlush(10 * time.Second) - os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. - } - l.putBuffer(buf) - l.mu.Unlock() - if stats := severityStats[s]; stats != nil { - atomic.AddInt64(&stats.lines, 1) - atomic.AddInt64(&stats.bytes, int64(len(data))) - } -} - -// timeoutFlush calls Flush and returns when it completes or after timeout -// elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when klog.Fatal is called from a hook that holds -// a lock. -func timeoutFlush(timeout time.Duration) { - done := make(chan bool, 1) - go func() { - Flush() // calls logging.lockAndFlushAll() - done <- true - }() - select { - case <-done: - case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) - } -} - -// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. -func stacks(all bool) []byte { - // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. - n := 10000 - if all { - n = 100000 - } - var trace []byte - for i := 0; i < 5; i++ { - trace = make([]byte, n) - nbytes := runtime.Stack(trace, all) - if nbytes < len(trace) { - return trace[:nbytes] - } - n *= 2 - } - return trace -} - -// logExitFunc provides a simple mechanism to override the default behavior -// of exiting on error. Used in testing and to guarantee we reach a required exit -// for fatal logs. Instead, exit could be a function rather than a method but that -// would make its use clumsier. -var logExitFunc func(error) - -// exit is called if there is trouble creating or writing log files. -// It flushes the logs and exits the program; there's no point in hanging around. -// l.mu is held. -func (l *loggingT) exit(err error) { - fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) - // If logExitFunc is set, we do that instead of exiting. - if logExitFunc != nil { - logExitFunc(err) - return - } - l.flushAll() - os.Exit(2) -} - -// syncBuffer joins a bufio.Writer to its underlying file, providing access to the -// file's Sync method and providing a wrapper for the Write method that provides log -// file rotation. There are conflicting methods, so the file cannot be embedded. -// l.mu is held for all its methods. -type syncBuffer struct { - logger *loggingT - *bufio.Writer - file *os.File - sev severity - nbytes uint64 // The number of bytes written to this file - maxbytes uint64 // The max number of bytes this syncBuffer.file can hold before cleaning up. -} - -func (sb *syncBuffer) Sync() error { - return sb.file.Sync() -} - -// CalculateMaxSize returns the real max size in bytes after considering the default max size and the flag options. -func CalculateMaxSize() uint64 { - if logging.logFile != "" { - if logging.logFileMaxSizeMB == 0 { - // If logFileMaxSizeMB is zero, we don't have limitations on the log size. - return math.MaxUint64 - } - // Flag logFileMaxSizeMB is in MB for user convenience. - return logging.logFileMaxSizeMB * 1024 * 1024 - } - // If "log_file" flag is not specified, the target file (sb.file) will be cleaned up when reaches a fixed size. - return MaxSize -} - -func (sb *syncBuffer) Write(p []byte) (n int, err error) { - if sb.nbytes+uint64(len(p)) >= sb.maxbytes { - if err := sb.rotateFile(time.Now(), false); err != nil { - sb.logger.exit(err) - } - } - n, err = sb.Writer.Write(p) - sb.nbytes += uint64(n) - if err != nil { - sb.logger.exit(err) - } - return -} - -// rotateFile closes the syncBuffer's file and starts a new one. -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func (sb *syncBuffer) rotateFile(now time.Time, startup bool) error { - if sb.file != nil { - sb.Flush() - sb.file.Close() - } - var err error - sb.file, _, err = create(severityName[sb.sev], now, startup) - sb.nbytes = 0 - if err != nil { - return err - } - - sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) - - if sb.logger.skipLogHeaders { - return nil - } - - // Write header. - var buf bytes.Buffer - fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) - fmt.Fprintf(&buf, "Running on machine: %s\n", host) - fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) - fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") - n, err := sb.file.Write(buf.Bytes()) - sb.nbytes += uint64(n) - return err -} - -// bufferSize sizes the buffer associated with each log file. It's large -// so that log records can accumulate without the logging thread blocking -// on disk I/O. The flushDaemon will block instead. -const bufferSize = 256 * 1024 - -// createFiles creates all the log files for severity from sev down to infoLog. -// l.mu is held. -func (l *loggingT) createFiles(sev severity) error { - now := time.Now() - // Files are created in decreasing severity order, so as soon as we find one - // has already been created, we can stop. - for s := sev; s >= infoLog && l.file[s] == nil; s-- { - sb := &syncBuffer{ - logger: l, - sev: s, - maxbytes: CalculateMaxSize(), - } - if err := sb.rotateFile(now, true); err != nil { - return err - } - l.file[s] = sb - } - return nil -} - -const flushInterval = 5 * time.Second - -// flushDaemon periodically flushes the log file buffers. -func (l *loggingT) flushDaemon() { - for range time.NewTicker(flushInterval).C { - l.lockAndFlushAll() - } -} - -// lockAndFlushAll is like flushAll but locks l.mu first. -func (l *loggingT) lockAndFlushAll() { - l.mu.Lock() - l.flushAll() - l.mu.Unlock() -} - -// flushAll flushes all the logs and attempts to "sync" their data to disk. -// l.mu is held. -func (l *loggingT) flushAll() { - // Flush from fatal down, in case there's trouble flushing. - for s := fatalLog; s >= infoLog; s-- { - file := l.file[s] - if file != nil { - file.Flush() // ignore error - file.Sync() // ignore error - } - } -} - -// CopyStandardLogTo arranges for messages written to the Go "log" package's -// default logs to also appear in the Google logs for the named and lower -// severities. Subsequent changes to the standard log's default output location -// or format may break this behavior. -// -// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not -// recognized, CopyStandardLogTo panics. -func CopyStandardLogTo(name string) { - sev, ok := severityByName(name) - if !ok { - panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) - } - // Set a log format that captures the user's file and line: - // d.go:23: message - stdLog.SetFlags(stdLog.Lshortfile) - stdLog.SetOutput(logBridge(sev)) -} - -// logBridge provides the Write method that enables CopyStandardLogTo to connect -// Go's standard logs to the logs provided by this package. -type logBridge severity - -// Write parses the standard logging line and passes its components to the -// logger for severity(lb). -func (lb logBridge) Write(b []byte) (n int, err error) { - var ( - file = "???" - line = 1 - text string - ) - // Split "d.go:23: message" into "d.go", "23", and "message". - if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { - text = fmt.Sprintf("bad log format: %s", b) - } else { - file = string(parts[0]) - text = string(parts[2][1:]) // skip leading space - line, err = strconv.Atoi(string(parts[1])) - if err != nil { - text = fmt.Sprintf("bad line number: %s", b) - line = 1 - } - } - // printWithFileLine with alsoToStderr=true, so standard log messages - // always appear on standard error. - logging.printWithFileLine(severity(lb), file, line, true, text) - return len(b), nil -} - -// setV computes and remembers the V level for a given PC -// when vmodule is enabled. -// File pattern matching takes the basename of the file, stripped -// of its .go suffix, and uses filepath.Match, which is a little more -// general than the *? matching used in C++. -// l.mu is held. -func (l *loggingT) setV(pc uintptr) Level { - fn := runtime.FuncForPC(pc) - file, _ := fn.FileLine(pc) - // The file is something like /a/b/c/d.go. We want just the d. - if strings.HasSuffix(file, ".go") { - file = file[:len(file)-3] - } - if slash := strings.LastIndex(file, "/"); slash >= 0 { - file = file[slash+1:] - } - for _, filter := range l.vmodule.filter { - if filter.match(file) { - l.vmap[pc] = filter.level - return filter.level - } - } - l.vmap[pc] = 0 - return 0 -} - -// Verbose is a boolean type that implements Infof (like Printf) etc. -// See the documentation of V for more information. -type Verbose bool - -// V reports whether verbosity at the call site is at least the requested level. -// The returned value is a boolean of type Verbose, which implements Info, Infoln -// and Infof. These methods will write to the Info log if called. -// Thus, one may write either -// if klog.V(2) { klog.Info("log this") } -// or -// klog.V(2).Info("log this") -// The second form is shorter but the first is cheaper if logging is off because it does -// not evaluate its arguments. -// -// Whether an individual call to V generates a log record depends on the setting of -// the -v and --vmodule flags; both are off by default. If the level in the call to -// V is at least the value of -v, or of -vmodule for the source file containing the -// call, the V call will log. -func V(level Level) Verbose { - // This function tries hard to be cheap unless there's work to do. - // The fast path is two atomic loads and compares. - - // Here is a cheap but safe test to see if V logging is enabled globally. - if logging.verbosity.get() >= level { - return Verbose(true) - } - - // It's off globally but it vmodule may still be set. - // Here is another cheap but safe test to see if vmodule is enabled. - if atomic.LoadInt32(&logging.filterLength) > 0 { - // Now we need a proper lock to use the logging structure. The pcs field - // is shared so we must lock before accessing it. This is fairly expensive, - // but if V logging is enabled we're slow anyway. - logging.mu.Lock() - defer logging.mu.Unlock() - if runtime.Callers(2, logging.pcs[:]) == 0 { - return Verbose(false) - } - v, ok := logging.vmap[logging.pcs[0]] - if !ok { - v = logging.setV(logging.pcs[0]) - } - return Verbose(v >= level) - } - return Verbose(false) -} - -// Info is equivalent to the global Info function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Info(args ...interface{}) { - if v { - logging.print(infoLog, args...) - } -} - -// Infoln is equivalent to the global Infoln function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infoln(args ...interface{}) { - if v { - logging.println(infoLog, args...) - } -} - -// Infof is equivalent to the global Infof function, guarded by the value of v. -// See the documentation of V for usage. -func (v Verbose) Infof(format string, args ...interface{}) { - if v { - logging.printf(infoLog, format, args...) - } -} - -// Info logs to the INFO log. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Info(args ...interface{}) { - logging.print(infoLog, args...) -} - -// InfoDepth acts as Info but uses depth to determine which call frame to log. -// InfoDepth(0, "msg") is the same as Info("msg"). -func InfoDepth(depth int, args ...interface{}) { - logging.printDepth(infoLog, depth, args...) -} - -// Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Infoln(args ...interface{}) { - logging.println(infoLog, args...) -} - -// Infof logs to the INFO log. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Infof(format string, args ...interface{}) { - logging.printf(infoLog, format, args...) -} - -// Warning logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Warning(args ...interface{}) { - logging.print(warningLog, args...) -} - -// WarningDepth acts as Warning but uses depth to determine which call frame to log. -// WarningDepth(0, "msg") is the same as Warning("msg"). -func WarningDepth(depth int, args ...interface{}) { - logging.printDepth(warningLog, depth, args...) -} - -// Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Warningln(args ...interface{}) { - logging.println(warningLog, args...) -} - -// Warningf logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Warningf(format string, args ...interface{}) { - logging.printf(warningLog, format, args...) -} - -// Error logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Error(args ...interface{}) { - logging.print(errorLog, args...) -} - -// ErrorDepth acts as Error but uses depth to determine which call frame to log. -// ErrorDepth(0, "msg") is the same as Error("msg"). -func ErrorDepth(depth int, args ...interface{}) { - logging.printDepth(errorLog, depth, args...) -} - -// Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Errorln(args ...interface{}) { - logging.println(errorLog, args...) -} - -// Errorf logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Errorf(format string, args ...interface{}) { - logging.printf(errorLog, format, args...) -} - -// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Fatal(args ...interface{}) { - logging.print(fatalLog, args...) -} - -// FatalDepth acts as Fatal but uses depth to determine which call frame to log. -// FatalDepth(0, "msg") is the same as Fatal("msg"). -func FatalDepth(depth int, args ...interface{}) { - logging.printDepth(fatalLog, depth, args...) -} - -// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is always appended. -func Fatalln(args ...interface{}) { - logging.println(fatalLog, args...) -} - -// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, -// including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Fatalf(format string, args ...interface{}) { - logging.printf(fatalLog, format, args...) -} - -// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. -// It allows Exit and relatives to use the Fatal logs. -var fatalNoStacks uint32 - -// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. -func Exit(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.print(fatalLog, args...) -} - -// ExitDepth acts as Exit but uses depth to determine which call frame to log. -// ExitDepth(0, "msg") is the same as Exit("msg"). -func ExitDepth(depth int, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printDepth(fatalLog, depth, args...) -} - -// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -func Exitln(args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.println(fatalLog, args...) -} - -// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). -// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. -func Exitf(format string, args ...interface{}) { - atomic.StoreUint32(&fatalNoStacks, 1) - logging.printf(fatalLog, format, args...) -} diff --git a/vendor/k8s.io/klog/klog_file.go b/vendor/k8s.io/klog/klog_file.go deleted file mode 100644 index e4010ad..0000000 --- a/vendor/k8s.io/klog/klog_file.go +++ /dev/null @@ -1,139 +0,0 @@ -// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ -// -// Copyright 2013 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// File I/O for logs. - -package klog - -import ( - "errors" - "fmt" - "os" - "os/user" - "path/filepath" - "strings" - "sync" - "time" -) - -// MaxSize is the maximum size of a log file in bytes. -var MaxSize uint64 = 1024 * 1024 * 1800 - -// logDirs lists the candidate directories for new log files. -var logDirs []string - -func createLogDirs() { - if logging.logDir != "" { - logDirs = append(logDirs, logging.logDir) - } - logDirs = append(logDirs, os.TempDir()) -} - -var ( - pid = os.Getpid() - program = filepath.Base(os.Args[0]) - host = "unknownhost" - userName = "unknownuser" -) - -func init() { - h, err := os.Hostname() - if err == nil { - host = shortHostname(h) - } - - current, err := user.Current() - if err == nil { - userName = current.Username - } - - // Sanitize userName since it may contain filepath separators on Windows. - userName = strings.Replace(userName, `\`, "_", -1) -} - -// shortHostname returns its argument, truncating at the first period. -// For instance, given "www.google.com" it returns "www". -func shortHostname(hostname string) string { - if i := strings.Index(hostname, "."); i >= 0 { - return hostname[:i] - } - return hostname -} - -// logName returns a new log file name containing tag, with start time t, and -// the name for the symlink for tag. -func logName(tag string, t time.Time) (name, link string) { - name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", - program, - host, - userName, - tag, - t.Year(), - t.Month(), - t.Day(), - t.Hour(), - t.Minute(), - t.Second(), - pid) - return name, program + "." + tag -} - -var onceLogDirs sync.Once - -// create creates a new log file and returns the file and its filename, which -// contains tag ("INFO", "FATAL", etc.) and t. If the file is created -// successfully, create also attempts to update the symlink for that tag, ignoring -// errors. -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func create(tag string, t time.Time, startup bool) (f *os.File, filename string, err error) { - if logging.logFile != "" { - f, err := openOrCreate(logging.logFile, startup) - if err == nil { - return f, logging.logFile, nil - } - return nil, "", fmt.Errorf("log: unable to create log: %v", err) - } - onceLogDirs.Do(createLogDirs) - if len(logDirs) == 0 { - return nil, "", errors.New("log: no log dirs") - } - name, link := logName(tag, t) - var lastErr error - for _, dir := range logDirs { - fname := filepath.Join(dir, name) - f, err := openOrCreate(fname, startup) - if err == nil { - symlink := filepath.Join(dir, link) - os.Remove(symlink) // ignore err - os.Symlink(name, symlink) // ignore err - return f, fname, nil - } - lastErr = err - } - return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) -} - -// The startup argument indicates whether this is the initial startup of klog. -// If startup is true, existing files are opened for appending instead of truncated. -func openOrCreate(name string, startup bool) (*os.File, error) { - if startup { - f, err := os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666) - return f, err - } - f, err := os.Create(name) - return f, err -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 488eb4b..a353e95 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -15,6 +15,8 @@ github.com/alicebob/miniredis/v2/server github.com/beorn7/perks/quantile # github.com/bradfitz/gomemcache v0.0.0-20190329173943-551aad21a668 github.com/bradfitz/gomemcache/memcache +# github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 +github.com/ghodss/yaml # github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 github.com/gin-contrib/sse # github.com/gin-gonic/gin v1.4.0 @@ -200,12 +202,13 @@ k8s.io/api/settings/v1alpha1 k8s.io/api/storage/v1 k8s.io/api/storage/v1alpha1 k8s.io/api/storage/v1beta1 -# k8s.io/apimachinery v0.18.6 +# k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc k8s.io/apimachinery/pkg/api/errors k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured +k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -285,9 +288,3 @@ k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation k8s.io/client-go/util/flowcontrol k8s.io/client-go/util/integer -# k8s.io/klog v1.0.0 -k8s.io/klog -# sigs.k8s.io/structured-merge-diff/v3 v3.0.0 -sigs.k8s.io/structured-merge-diff/v3/value -# sigs.k8s.io/yaml v1.2.0 -sigs.k8s.io/yaml diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE b/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE deleted file mode 100644 index 8dada3e..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go deleted file mode 100644 index f70cd41..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/allocator.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -// Allocator provides a value object allocation strategy. -// Value objects can be allocated by passing an allocator to the "Using" -// receiver functions on the value interfaces, e.g. Map.ZipUsing(allocator, ...). -// Value objects returned from "Using" functions should be given back to the allocator -// once longer needed by calling Allocator.Free(Value). -type Allocator interface { - // Free gives the allocator back any value objects returned by the "Using" - // receiver functions on the value interfaces. - // interface{} may be any of: Value, Map, List or Range. - Free(interface{}) - - // The unexported functions are for "Using" receiver functions of the value types - // to request what they need from the allocator. - allocValueUnstructured() *valueUnstructured - allocListUnstructuredRange() *listUnstructuredRange - allocValueReflect() *valueReflect - allocMapReflect() *mapReflect - allocStructReflect() *structReflect - allocListReflect() *listReflect - allocListReflectRange() *listReflectRange -} - -// HeapAllocator simply allocates objects to the heap. It is the default -// allocator used receiver functions on the value interfaces that do not accept -// an allocator and should be used whenever allocating objects that will not -// be given back to an allocator by calling Allocator.Free(Value). -var HeapAllocator = &heapAllocator{} - -type heapAllocator struct{} - -func (p *heapAllocator) allocValueUnstructured() *valueUnstructured { - return &valueUnstructured{} -} - -func (p *heapAllocator) allocListUnstructuredRange() *listUnstructuredRange { - return &listUnstructuredRange{vv: &valueUnstructured{}} -} - -func (p *heapAllocator) allocValueReflect() *valueReflect { - return &valueReflect{} -} - -func (p *heapAllocator) allocStructReflect() *structReflect { - return &structReflect{} -} - -func (p *heapAllocator) allocMapReflect() *mapReflect { - return &mapReflect{} -} - -func (p *heapAllocator) allocListReflect() *listReflect { - return &listReflect{} -} - -func (p *heapAllocator) allocListReflectRange() *listReflectRange { - return &listReflectRange{vr: &valueReflect{}} -} - -func (p *heapAllocator) Free(_ interface{}) {} - -// NewFreelistAllocator creates freelist based allocator. -// This allocator provides fast allocation and freeing of short lived value objects. -// -// The freelists are bounded in size by freelistMaxSize. If more than this amount of value objects is -// allocated at once, the excess will be returned to the heap for garbage collection when freed. -// -// This allocator is unsafe and must not be accessed concurrently by goroutines. -// -// This allocator works well for traversal of value data trees. Typical usage is to acquire -// a freelist at the beginning of the traversal and use it through out -// for all temporary value access. -func NewFreelistAllocator() Allocator { - return &freelistAllocator{ - valueUnstructured: &freelist{new: func() interface{} { - return &valueUnstructured{} - }}, - listUnstructuredRange: &freelist{new: func() interface{} { - return &listUnstructuredRange{vv: &valueUnstructured{}} - }}, - valueReflect: &freelist{new: func() interface{} { - return &valueReflect{} - }}, - mapReflect: &freelist{new: func() interface{} { - return &mapReflect{} - }}, - structReflect: &freelist{new: func() interface{} { - return &structReflect{} - }}, - listReflect: &freelist{new: func() interface{} { - return &listReflect{} - }}, - listReflectRange: &freelist{new: func() interface{} { - return &listReflectRange{vr: &valueReflect{}} - }}, - } -} - -// Bound memory usage of freelists. This prevents the processing of very large lists from leaking memory. -// This limit is large enough for endpoints objects containing 1000 IP address entries. Freed objects -// that don't fit into the freelist are orphaned on the heap to be garbage collected. -const freelistMaxSize = 1000 - -type freelistAllocator struct { - valueUnstructured *freelist - listUnstructuredRange *freelist - valueReflect *freelist - mapReflect *freelist - structReflect *freelist - listReflect *freelist - listReflectRange *freelist -} - -type freelist struct { - list []interface{} - new func() interface{} -} - -func (f *freelist) allocate() interface{} { - var w2 interface{} - if n := len(f.list); n > 0 { - w2, f.list = f.list[n-1], f.list[:n-1] - } else { - w2 = f.new() - } - return w2 -} - -func (f *freelist) free(v interface{}) { - if len(f.list) < freelistMaxSize { - f.list = append(f.list, v) - } -} - -func (w *freelistAllocator) Free(value interface{}) { - switch v := value.(type) { - case *valueUnstructured: - v.Value = nil // don't hold references to unstructured objects - w.valueUnstructured.free(v) - case *listUnstructuredRange: - v.vv.Value = nil // don't hold references to unstructured objects - w.listUnstructuredRange.free(v) - case *valueReflect: - v.ParentMapKey = nil - v.ParentMap = nil - w.valueReflect.free(v) - case *mapReflect: - w.mapReflect.free(v) - case *structReflect: - w.structReflect.free(v) - case *listReflect: - w.listReflect.free(v) - case *listReflectRange: - v.vr.ParentMapKey = nil - v.vr.ParentMap = nil - w.listReflectRange.free(v) - } -} - -func (w *freelistAllocator) allocValueUnstructured() *valueUnstructured { - return w.valueUnstructured.allocate().(*valueUnstructured) -} - -func (w *freelistAllocator) allocListUnstructuredRange() *listUnstructuredRange { - return w.listUnstructuredRange.allocate().(*listUnstructuredRange) -} - -func (w *freelistAllocator) allocValueReflect() *valueReflect { - return w.valueReflect.allocate().(*valueReflect) -} - -func (w *freelistAllocator) allocStructReflect() *structReflect { - return w.structReflect.allocate().(*structReflect) -} - -func (w *freelistAllocator) allocMapReflect() *mapReflect { - return w.mapReflect.allocate().(*mapReflect) -} - -func (w *freelistAllocator) allocListReflect() *listReflect { - return w.listReflect.allocate().(*listReflect) -} - -func (w *freelistAllocator) allocListReflectRange() *listReflectRange { - return w.listReflectRange.allocate().(*listReflectRange) -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go deleted file mode 100644 index be3c672..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/fields.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "sort" - "strings" -) - -// Field is an individual key-value pair. -type Field struct { - Name string - Value Value -} - -// FieldList is a list of key-value pairs. Each field is expected to -// have a different name. -type FieldList []Field - -// Sort sorts the field list by Name. -func (f FieldList) Sort() { - if len(f) < 2 { - return - } - if len(f) == 2 { - if f[1].Name < f[0].Name { - f[0], f[1] = f[1], f[0] - } - return - } - sort.SliceStable(f, func(i, j int) bool { - return f[i].Name < f[j].Name - }) -} - -// Less compares two lists lexically. -func (f FieldList) Less(rhs FieldList) bool { - return f.Compare(rhs) == -1 -} - -// Compare compares two lists lexically. The result will be 0 if f==rhs, -1 -// if f < rhs, and +1 if f > rhs. -func (f FieldList) Compare(rhs FieldList) int { - i := 0 - for { - if i >= len(f) && i >= len(rhs) { - // Maps are the same length and all items are equal. - return 0 - } - if i >= len(f) { - // F is shorter. - return -1 - } - if i >= len(rhs) { - // RHS is shorter. - return 1 - } - if c := strings.Compare(f[i].Name, rhs[i].Name); c != 0 { - return c - } - if c := Compare(f[i].Value, rhs[i].Value); c != 0 { - return c - } - // The items are equal; continue. - i++ - } -} - -// Equals returns true if the two fieldslist are equals, false otherwise. -func (f FieldList) Equals(rhs FieldList) bool { - if len(f) != len(rhs) { - return false - } - for i := range f { - if f[i].Name != rhs[i].Name { - return false - } - if !Equals(f[i].Value, rhs[i].Value) { - return false - } - } - return true -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go deleted file mode 100644 index d4adb8f..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/jsontagutil.go +++ /dev/null @@ -1,91 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "fmt" - "reflect" - "strings" -) - -// TODO: This implements the same functionality as https://github.com/kubernetes/kubernetes/blob/master/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L236 -// but is based on the highly efficient approach from https://golang.org/src/encoding/json/encode.go - -func lookupJsonTags(f reflect.StructField) (name string, omit bool, inline bool, omitempty bool) { - tag := f.Tag.Get("json") - if tag == "-" { - return "", true, false, false - } - name, opts := parseTag(tag) - if name == "" { - name = f.Name - } - return name, false, opts.Contains("inline"), opts.Contains("omitempty") -} - -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Chan, reflect.Func: - panic(fmt.Sprintf("unsupported type: %v", v.Type())) - } - return false -} - -type tagOptions string - -// parseTag splits a struct field's json tag into its name and -// comma-separated options. -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// Contains reports whether a comma-separated list of options -// contains a particular substr flag. substr must be surrounded by a -// string boundary or commas. -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go deleted file mode 100644 index 0748f18..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/list.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -// List represents a list object. -type List interface { - // Length returns how many items can be found in the map. - Length() int - // At returns the item at the given position in the map. It will - // panic if the index is out of range. - At(int) Value - // AtUsing uses the provided allocator and returns the item at the given - // position in the map. It will panic if the index is out of range. - // The returned Value should be given back to the Allocator when no longer needed - // by calling Allocator.Free(Value). - AtUsing(Allocator, int) Value - // Range returns a ListRange for iterating over the items in the list. - Range() ListRange - // RangeUsing uses the provided allocator and returns a ListRange for - // iterating over the items in the list. - // The returned Range should be given back to the Allocator when no longer needed - // by calling Allocator.Free(Value). - RangeUsing(Allocator) ListRange - // Equals compares the two lists, and return true if they are the same, false otherwise. - // Implementations can use ListEquals as a general implementation for this methods. - Equals(List) bool - // EqualsUsing uses the provided allocator and compares the two lists, and return true if - // they are the same, false otherwise. Implementations can use ListEqualsUsing as a general - // implementation for this methods. - EqualsUsing(Allocator, List) bool -} - -// ListRange represents a single iteration across the items of a list. -type ListRange interface { - // Next increments to the next item in the range, if there is one, and returns true, or returns false if there are no more items. - Next() bool - // Item returns the index and value of the current item in the range. or panics if there is no current item. - // For efficiency, Item may reuse the values returned by previous Item calls. Callers should be careful avoid holding - // pointers to the value returned by Item() that escape the iteration loop since they become invalid once either - // Item() or Allocator.Free() is called. - Item() (index int, value Value) -} - -var EmptyRange = &emptyRange{} - -type emptyRange struct{} - -func (_ *emptyRange) Next() bool { - return false -} - -func (_ *emptyRange) Item() (index int, value Value) { - panic("Item called on empty ListRange") -} - -// ListEquals compares two lists lexically. -// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. -func ListEquals(lhs, rhs List) bool { - return ListEqualsUsing(HeapAllocator, lhs, rhs) -} - -// ListEqualsUsing uses the provided allocator and compares two lists lexically. -// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. -func ListEqualsUsing(a Allocator, lhs, rhs List) bool { - if lhs.Length() != rhs.Length() { - return false - } - - lhsRange := lhs.RangeUsing(a) - defer a.Free(lhsRange) - rhsRange := rhs.RangeUsing(a) - defer a.Free(rhsRange) - - for lhsRange.Next() && rhsRange.Next() { - _, lv := lhsRange.Item() - _, rv := rhsRange.Item() - if !EqualsUsing(a, lv, rv) { - return false - } - } - return true -} - -// ListLess compares two lists lexically. -func ListLess(lhs, rhs List) bool { - return ListCompare(lhs, rhs) == -1 -} - -// ListCompare compares two lists lexically. The result will be 0 if l==rhs, -1 -// if l < rhs, and +1 if l > rhs. -func ListCompare(lhs, rhs List) int { - return ListCompareUsing(HeapAllocator, lhs, rhs) -} - -// ListCompareUsing uses the provided allocator and compares two lists lexically. The result will be 0 if l==rhs, -1 -// if l < rhs, and +1 if l > rhs. -func ListCompareUsing(a Allocator, lhs, rhs List) int { - lhsRange := lhs.RangeUsing(a) - defer a.Free(lhsRange) - rhsRange := rhs.RangeUsing(a) - defer a.Free(rhsRange) - - for { - lhsOk := lhsRange.Next() - rhsOk := rhsRange.Next() - if !lhsOk && !rhsOk { - // Lists are the same length and all items are equal. - return 0 - } - if !lhsOk { - // LHS is shorter. - return -1 - } - if !rhsOk { - // RHS is shorter. - return 1 - } - _, lv := lhsRange.Item() - _, rv := rhsRange.Item() - if c := CompareUsing(a, lv, rv); c != 0 { - return c - } - // The items are equal; continue. - } -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go deleted file mode 100644 index 197d4c9..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listreflect.go +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "reflect" -) - -type listReflect struct { - Value reflect.Value -} - -func (r listReflect) Length() int { - val := r.Value - return val.Len() -} - -func (r listReflect) At(i int) Value { - val := r.Value - return mustWrapValueReflect(val.Index(i), nil, nil) -} - -func (r listReflect) AtUsing(a Allocator, i int) Value { - val := r.Value - return a.allocValueReflect().mustReuse(val.Index(i), nil, nil, nil) -} - -func (r listReflect) Unstructured() interface{} { - l := r.Length() - result := make([]interface{}, l) - for i := 0; i < l; i++ { - result[i] = r.At(i).Unstructured() - } - return result -} - -func (r listReflect) Range() ListRange { - return r.RangeUsing(HeapAllocator) -} - -func (r listReflect) RangeUsing(a Allocator) ListRange { - length := r.Value.Len() - if length == 0 { - return EmptyRange - } - rr := a.allocListReflectRange() - rr.list = r.Value - rr.i = -1 - rr.entry = TypeReflectEntryOf(r.Value.Type().Elem()) - return rr -} - -func (r listReflect) Equals(other List) bool { - return r.EqualsUsing(HeapAllocator, other) -} -func (r listReflect) EqualsUsing(a Allocator, other List) bool { - if otherReflectList, ok := other.(*listReflect); ok { - return reflect.DeepEqual(r.Value.Interface(), otherReflectList.Value.Interface()) - } - return ListEqualsUsing(a, &r, other) -} - -type listReflectRange struct { - list reflect.Value - vr *valueReflect - i int - entry *TypeReflectCacheEntry -} - -func (r *listReflectRange) Next() bool { - r.i += 1 - return r.i < r.list.Len() -} - -func (r *listReflectRange) Item() (index int, value Value) { - if r.i < 0 { - panic("Item() called before first calling Next()") - } - if r.i >= r.list.Len() { - panic("Item() called on ListRange with no more items") - } - v := r.list.Index(r.i) - return r.i, r.vr.mustReuse(v, r.entry, nil, nil) -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go deleted file mode 100644 index 64cd8e7..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/listunstructured.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -type listUnstructured []interface{} - -func (l listUnstructured) Length() int { - return len(l) -} - -func (l listUnstructured) At(i int) Value { - return NewValueInterface(l[i]) -} - -func (l listUnstructured) AtUsing(a Allocator, i int) Value { - return a.allocValueUnstructured().reuse(l[i]) -} - -func (l listUnstructured) Equals(other List) bool { - return l.EqualsUsing(HeapAllocator, other) -} - -func (l listUnstructured) EqualsUsing(a Allocator, other List) bool { - return ListEqualsUsing(a, &l, other) -} - -func (l listUnstructured) Range() ListRange { - return l.RangeUsing(HeapAllocator) -} - -func (l listUnstructured) RangeUsing(a Allocator) ListRange { - if len(l) == 0 { - return EmptyRange - } - r := a.allocListUnstructuredRange() - r.list = l - r.i = -1 - return r -} - -type listUnstructuredRange struct { - list listUnstructured - vv *valueUnstructured - i int -} - -func (r *listUnstructuredRange) Next() bool { - r.i += 1 - return r.i < len(r.list) -} - -func (r *listUnstructuredRange) Item() (index int, value Value) { - if r.i < 0 { - panic("Item() called before first calling Next()") - } - if r.i >= len(r.list) { - panic("Item() called on ListRange with no more items") - } - return r.i, r.vv.reuse(r.list[r.i]) -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go deleted file mode 100644 index 168b9fa..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/map.go +++ /dev/null @@ -1,270 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "sort" -) - -// Map represents a Map or go structure. -type Map interface { - // Set changes or set the value of the given key. - Set(key string, val Value) - // Get returns the value for the given key, if present, or (nil, false) otherwise. - Get(key string) (Value, bool) - // GetUsing uses the provided allocator and returns the value for the given key, - // if present, or (nil, false) otherwise. - // The returned Value should be given back to the Allocator when no longer needed - // by calling Allocator.Free(Value). - GetUsing(a Allocator, key string) (Value, bool) - // Has returns true if the key is present, or false otherwise. - Has(key string) bool - // Delete removes the key from the map. - Delete(key string) - // Equals compares the two maps, and return true if they are the same, false otherwise. - // Implementations can use MapEquals as a general implementation for this methods. - Equals(other Map) bool - // EqualsUsing uses the provided allocator and compares the two maps, and return true if - // they are the same, false otherwise. Implementations can use MapEqualsUsing as a general - // implementation for this methods. - EqualsUsing(a Allocator, other Map) bool - // Iterate runs the given function for each key/value in the - // map. Returning false in the closure prematurely stops the - // iteration. - Iterate(func(key string, value Value) bool) bool - // IterateUsing uses the provided allocator and runs the given function for each key/value - // in the map. Returning false in the closure prematurely stops the iteration. - IterateUsing(Allocator, func(key string, value Value) bool) bool - // Length returns the number of items in the map. - Length() int - // Empty returns true if the map is empty. - Empty() bool - // Zip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called - // with the values from both maps, otherwise it is called with the value of the map that contains the key and nil - // for the map that does not contain the key. Returning false in the closure prematurely stops the iteration. - Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool - // ZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps - // contain a value for a given key, fn is called with the values from both maps, otherwise it is called with - // the value of the map that contains the key and nil for the map that does not contain the key. Returning - // false in the closure prematurely stops the iteration. - ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool -} - -// MapTraverseOrder defines the map traversal ordering available. -type MapTraverseOrder int - -const ( - // Unordered indicates that the map traversal has no ordering requirement. - Unordered = iota - // LexicalKeyOrder indicates that the map traversal is ordered by key, lexically. - LexicalKeyOrder -) - -// MapZip iterates over the entries of two maps together. If both maps contain a value for a given key, fn is called -// with the values from both maps, otherwise it is called with the value of the map that contains the key and nil -// for the other map. Returning false in the closure prematurely stops the iteration. -func MapZip(lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return MapZipUsing(HeapAllocator, lhs, rhs, order, fn) -} - -// MapZipUsing uses the provided allocator and iterates over the entries of two maps together. If both maps -// contain a value for a given key, fn is called with the values from both maps, otherwise it is called with -// the value of the map that contains the key and nil for the other map. Returning false in the closure -// prematurely stops the iteration. -func MapZipUsing(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - if lhs != nil { - return lhs.ZipUsing(a, rhs, order, fn) - } - if rhs != nil { - return rhs.ZipUsing(a, lhs, order, func(key string, rhs, lhs Value) bool { // arg positions of lhs and rhs deliberately swapped - return fn(key, lhs, rhs) - }) - } - return true -} - -// defaultMapZip provides a default implementation of Zip for implementations that do not need to provide -// their own optimized implementation. -func defaultMapZip(a Allocator, lhs, rhs Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - switch order { - case Unordered: - return unorderedMapZip(a, lhs, rhs, fn) - case LexicalKeyOrder: - return lexicalKeyOrderedMapZip(a, lhs, rhs, fn) - default: - panic("Unsupported map order") - } -} - -func unorderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { - if (lhs == nil || lhs.Empty()) && (rhs == nil || rhs.Empty()) { - return true - } - - if lhs != nil { - ok := lhs.IterateUsing(a, func(key string, lhsValue Value) bool { - var rhsValue Value - if rhs != nil { - if item, ok := rhs.GetUsing(a, key); ok { - rhsValue = item - defer a.Free(rhsValue) - } - } - return fn(key, lhsValue, rhsValue) - }) - if !ok { - return false - } - } - if rhs != nil { - return rhs.IterateUsing(a, func(key string, rhsValue Value) bool { - if lhs == nil || !lhs.Has(key) { - return fn(key, nil, rhsValue) - } - return true - }) - } - return true -} - -func lexicalKeyOrderedMapZip(a Allocator, lhs, rhs Map, fn func(key string, lhs, rhs Value) bool) bool { - var lhsLength, rhsLength int - var orderedLength int // rough estimate of length of union of map keys - if lhs != nil { - lhsLength = lhs.Length() - orderedLength = lhsLength - } - if rhs != nil { - rhsLength = rhs.Length() - if rhsLength > orderedLength { - orderedLength = rhsLength - } - } - if lhsLength == 0 && rhsLength == 0 { - return true - } - - ordered := make([]string, 0, orderedLength) - if lhs != nil { - lhs.IterateUsing(a, func(key string, _ Value) bool { - ordered = append(ordered, key) - return true - }) - } - if rhs != nil { - rhs.IterateUsing(a, func(key string, _ Value) bool { - if lhs == nil || !lhs.Has(key) { - ordered = append(ordered, key) - } - return true - }) - } - sort.Strings(ordered) - for _, key := range ordered { - var litem, ritem Value - if lhs != nil { - litem, _ = lhs.GetUsing(a, key) - } - if rhs != nil { - ritem, _ = rhs.GetUsing(a, key) - } - ok := fn(key, litem, ritem) - if litem != nil { - a.Free(litem) - } - if ritem != nil { - a.Free(ritem) - } - if !ok { - return false - } - } - return true -} - -// MapLess compares two maps lexically. -func MapLess(lhs, rhs Map) bool { - return MapCompare(lhs, rhs) == -1 -} - -// MapCompare compares two maps lexically. -func MapCompare(lhs, rhs Map) int { - return MapCompareUsing(HeapAllocator, lhs, rhs) -} - -// MapCompareUsing uses the provided allocator and compares two maps lexically. -func MapCompareUsing(a Allocator, lhs, rhs Map) int { - c := 0 - var llength, rlength int - if lhs != nil { - llength = lhs.Length() - } - if rhs != nil { - rlength = rhs.Length() - } - if llength == 0 && rlength == 0 { - return 0 - } - i := 0 - MapZipUsing(a, lhs, rhs, LexicalKeyOrder, func(key string, lhs, rhs Value) bool { - switch { - case i == llength: - c = -1 - case i == rlength: - c = 1 - case lhs == nil: - c = 1 - case rhs == nil: - c = -1 - default: - c = CompareUsing(a, lhs, rhs) - } - i++ - return c == 0 - }) - return c -} - -// MapEquals returns true if lhs == rhs, false otherwise. This function -// acts on generic types and should not be used by callers, but can help -// implement Map.Equals. -// WARN: This is a naive implementation, calling lhs.Equals(rhs) is typically the most efficient. -func MapEquals(lhs, rhs Map) bool { - return MapEqualsUsing(HeapAllocator, lhs, rhs) -} - -// MapEqualsUsing uses the provided allocator and returns true if lhs == rhs, -// false otherwise. This function acts on generic types and should not be used -// by callers, but can help implement Map.Equals. -// WARN: This is a naive implementation, calling lhs.EqualsUsing(allocator, rhs) is typically the most efficient. -func MapEqualsUsing(a Allocator, lhs, rhs Map) bool { - if lhs == nil && rhs == nil { - return true - } - if lhs == nil || rhs == nil { - return false - } - if lhs.Length() != rhs.Length() { - return false - } - return MapZipUsing(a, lhs, rhs, Unordered, func(key string, lhs, rhs Value) bool { - if lhs == nil || rhs == nil { - return false - } - return EqualsUsing(a, lhs, rhs) - }) -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go deleted file mode 100644 index dc8b8c7..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapreflect.go +++ /dev/null @@ -1,209 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "reflect" -) - -type mapReflect struct { - valueReflect -} - -func (r mapReflect) Length() int { - val := r.Value - return val.Len() -} - -func (r mapReflect) Empty() bool { - val := r.Value - return val.Len() == 0 -} - -func (r mapReflect) Get(key string) (Value, bool) { - return r.GetUsing(HeapAllocator, key) -} - -func (r mapReflect) GetUsing(a Allocator, key string) (Value, bool) { - k, v, ok := r.get(key) - if !ok { - return nil, false - } - return a.allocValueReflect().mustReuse(v, nil, &r.Value, &k), true -} - -func (r mapReflect) get(k string) (key, value reflect.Value, ok bool) { - mapKey := r.toMapKey(k) - val := r.Value.MapIndex(mapKey) - return mapKey, val, val.IsValid() && val != reflect.Value{} -} - -func (r mapReflect) Has(key string) bool { - var val reflect.Value - val = r.Value.MapIndex(r.toMapKey(key)) - if !val.IsValid() { - return false - } - return val != reflect.Value{} -} - -func (r mapReflect) Set(key string, val Value) { - r.Value.SetMapIndex(r.toMapKey(key), reflect.ValueOf(val.Unstructured())) -} - -func (r mapReflect) Delete(key string) { - val := r.Value - val.SetMapIndex(r.toMapKey(key), reflect.Value{}) -} - -// TODO: Do we need to support types that implement json.Marshaler and are used as string keys? -func (r mapReflect) toMapKey(key string) reflect.Value { - val := r.Value - return reflect.ValueOf(key).Convert(val.Type().Key()) -} - -func (r mapReflect) Iterate(fn func(string, Value) bool) bool { - return r.IterateUsing(HeapAllocator, fn) -} - -func (r mapReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { - if r.Value.Len() == 0 { - return true - } - v := a.allocValueReflect() - defer a.Free(v) - return eachMapEntry(r.Value, func(e *TypeReflectCacheEntry, key reflect.Value, value reflect.Value) bool { - return fn(key.String(), v.mustReuse(value, e, &r.Value, &key)) - }) -} - -func eachMapEntry(val reflect.Value, fn func(*TypeReflectCacheEntry, reflect.Value, reflect.Value) bool) bool { - iter := val.MapRange() - entry := TypeReflectEntryOf(val.Type().Elem()) - for iter.Next() { - next := iter.Value() - if !next.IsValid() { - continue - } - if !fn(entry, iter.Key(), next) { - return false - } - } - return true -} - -func (r mapReflect) Unstructured() interface{} { - result := make(map[string]interface{}, r.Length()) - r.Iterate(func(s string, value Value) bool { - result[s] = value.Unstructured() - return true - }) - return result -} - -func (r mapReflect) Equals(m Map) bool { - return r.EqualsUsing(HeapAllocator, m) -} - -func (r mapReflect) EqualsUsing(a Allocator, m Map) bool { - lhsLength := r.Length() - rhsLength := m.Length() - if lhsLength != rhsLength { - return false - } - if lhsLength == 0 { - return true - } - vr := a.allocValueReflect() - defer a.Free(vr) - entry := TypeReflectEntryOf(r.Value.Type().Elem()) - return m.Iterate(func(key string, value Value) bool { - _, lhsVal, ok := r.get(key) - if !ok { - return false - } - return Equals(vr.mustReuse(lhsVal, entry, nil, nil), value) - }) -} - -func (r mapReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return r.ZipUsing(HeapAllocator, other, order, fn) -} - -func (r mapReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - if otherMapReflect, ok := other.(*mapReflect); ok && order == Unordered { - return r.unorderedReflectZip(a, otherMapReflect, fn) - } - return defaultMapZip(a, &r, other, order, fn) -} - -// unorderedReflectZip provides an optimized unordered zip for mapReflect types. -func (r mapReflect) unorderedReflectZip(a Allocator, other *mapReflect, fn func(key string, lhs, rhs Value) bool) bool { - if r.Empty() && (other == nil || other.Empty()) { - return true - } - - lhs := r.Value - lhsEntry := TypeReflectEntryOf(lhs.Type().Elem()) - - // map lookup via reflection is expensive enough that it is better to keep track of visited keys - visited := map[string]struct{}{} - - vlhs, vrhs := a.allocValueReflect(), a.allocValueReflect() - defer a.Free(vlhs) - defer a.Free(vrhs) - - if other != nil { - rhs := other.Value - rhsEntry := TypeReflectEntryOf(rhs.Type().Elem()) - iter := rhs.MapRange() - - for iter.Next() { - key := iter.Key() - keyString := key.String() - next := iter.Value() - if !next.IsValid() { - continue - } - rhsVal := vrhs.mustReuse(next, rhsEntry, &rhs, &key) - visited[keyString] = struct{}{} - var lhsVal Value - if _, v, ok := r.get(keyString); ok { - lhsVal = vlhs.mustReuse(v, lhsEntry, &lhs, &key) - } - if !fn(keyString, lhsVal, rhsVal) { - return false - } - } - } - - iter := lhs.MapRange() - for iter.Next() { - key := iter.Key() - if _, ok := visited[key.String()]; ok { - continue - } - next := iter.Value() - if !next.IsValid() { - continue - } - if !fn(key.String(), vlhs.mustReuse(next, lhsEntry, &lhs, &key), nil) { - return false - } - } - return true -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go deleted file mode 100644 index d8e2086..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/mapunstructured.go +++ /dev/null @@ -1,190 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -type mapUnstructuredInterface map[interface{}]interface{} - -func (m mapUnstructuredInterface) Set(key string, val Value) { - m[key] = val.Unstructured() -} - -func (m mapUnstructuredInterface) Get(key string) (Value, bool) { - return m.GetUsing(HeapAllocator, key) -} - -func (m mapUnstructuredInterface) GetUsing(a Allocator, key string) (Value, bool) { - if v, ok := m[key]; !ok { - return nil, false - } else { - return a.allocValueUnstructured().reuse(v), true - } -} - -func (m mapUnstructuredInterface) Has(key string) bool { - _, ok := m[key] - return ok -} - -func (m mapUnstructuredInterface) Delete(key string) { - delete(m, key) -} - -func (m mapUnstructuredInterface) Iterate(fn func(key string, value Value) bool) bool { - return m.IterateUsing(HeapAllocator, fn) -} - -func (m mapUnstructuredInterface) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { - if len(m) == 0 { - return true - } - vv := a.allocValueUnstructured() - defer a.Free(vv) - for k, v := range m { - if ks, ok := k.(string); !ok { - continue - } else { - if !fn(ks, vv.reuse(v)) { - return false - } - } - } - return true -} - -func (m mapUnstructuredInterface) Length() int { - return len(m) -} - -func (m mapUnstructuredInterface) Empty() bool { - return len(m) == 0 -} - -func (m mapUnstructuredInterface) Equals(other Map) bool { - return m.EqualsUsing(HeapAllocator, other) -} - -func (m mapUnstructuredInterface) EqualsUsing(a Allocator, other Map) bool { - lhsLength := m.Length() - rhsLength := other.Length() - if lhsLength != rhsLength { - return false - } - if lhsLength == 0 { - return true - } - vv := a.allocValueUnstructured() - defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { - lhsVal, ok := m[key] - if !ok { - return false - } - return Equals(vv.reuse(lhsVal), value) - }) -} - -func (m mapUnstructuredInterface) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return m.ZipUsing(HeapAllocator, other, order, fn) -} - -func (m mapUnstructuredInterface) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return defaultMapZip(a, m, other, order, fn) -} - -type mapUnstructuredString map[string]interface{} - -func (m mapUnstructuredString) Set(key string, val Value) { - m[key] = val.Unstructured() -} - -func (m mapUnstructuredString) Get(key string) (Value, bool) { - return m.GetUsing(HeapAllocator, key) -} -func (m mapUnstructuredString) GetUsing(a Allocator, key string) (Value, bool) { - if v, ok := m[key]; !ok { - return nil, false - } else { - return a.allocValueUnstructured().reuse(v), true - } -} - -func (m mapUnstructuredString) Has(key string) bool { - _, ok := m[key] - return ok -} - -func (m mapUnstructuredString) Delete(key string) { - delete(m, key) -} - -func (m mapUnstructuredString) Iterate(fn func(key string, value Value) bool) bool { - return m.IterateUsing(HeapAllocator, fn) -} - -func (m mapUnstructuredString) IterateUsing(a Allocator, fn func(key string, value Value) bool) bool { - if len(m) == 0 { - return true - } - vv := a.allocValueUnstructured() - defer a.Free(vv) - for k, v := range m { - if !fn(k, vv.reuse(v)) { - return false - } - } - return true -} - -func (m mapUnstructuredString) Length() int { - return len(m) -} - -func (m mapUnstructuredString) Equals(other Map) bool { - return m.EqualsUsing(HeapAllocator, other) -} - -func (m mapUnstructuredString) EqualsUsing(a Allocator, other Map) bool { - lhsLength := m.Length() - rhsLength := other.Length() - if lhsLength != rhsLength { - return false - } - if lhsLength == 0 { - return true - } - vv := a.allocValueUnstructured() - defer a.Free(vv) - return other.Iterate(func(key string, value Value) bool { - lhsVal, ok := m[key] - if !ok { - return false - } - return Equals(vv.reuse(lhsVal), value) - }) -} - -func (m mapUnstructuredString) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return m.ZipUsing(HeapAllocator, other, order, fn) -} - -func (m mapUnstructuredString) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return defaultMapZip(a, m, other, order, fn) -} - -func (m mapUnstructuredString) Empty() bool { - return len(m) == 0 -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go deleted file mode 100644 index 49e6dd1..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/reflectcache.go +++ /dev/null @@ -1,463 +0,0 @@ -/* -Copyright 2020 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "sort" - "sync" - "sync/atomic" -) - -// UnstructuredConverter defines how a type can be converted directly to unstructured. -// Types that implement json.Marshaler may also optionally implement this interface to provide a more -// direct and more efficient conversion. All types that choose to implement this interface must still -// implement this same conversion via json.Marshaler. -type UnstructuredConverter interface { - json.Marshaler // require that json.Marshaler is implemented - - // ToUnstructured returns the unstructured representation. - ToUnstructured() interface{} -} - -// TypeReflectCacheEntry keeps data gathered using reflection about how a type is converted to/from unstructured. -type TypeReflectCacheEntry struct { - isJsonMarshaler bool - ptrIsJsonMarshaler bool - isJsonUnmarshaler bool - ptrIsJsonUnmarshaler bool - isStringConvertable bool - ptrIsStringConvertable bool - - structFields map[string]*FieldCacheEntry - orderedStructFields []*FieldCacheEntry -} - -// FieldCacheEntry keeps data gathered using reflection about how the field of a struct is converted to/from -// unstructured. -type FieldCacheEntry struct { - // JsonName returns the name of the field according to the json tags on the struct field. - JsonName string - // isOmitEmpty is true if the field has the json 'omitempty' tag. - isOmitEmpty bool - // fieldPath is a list of field indices (see FieldByIndex) to lookup the value of - // a field in a reflect.Value struct. The field indices in the list form a path used - // to traverse through intermediary 'inline' fields. - fieldPath [][]int - - fieldType reflect.Type - TypeEntry *TypeReflectCacheEntry -} - -func (f *FieldCacheEntry) CanOmit(fieldVal reflect.Value) bool { - return f.isOmitEmpty && (safeIsNil(fieldVal) || isZero(fieldVal)) -} - -// GetUsing returns the field identified by this FieldCacheEntry from the provided struct. -func (f *FieldCacheEntry) GetFrom(structVal reflect.Value) reflect.Value { - // field might be nested within 'inline' structs - for _, elem := range f.fieldPath { - structVal = structVal.FieldByIndex(elem) - } - return structVal -} - -var marshalerType = reflect.TypeOf(new(json.Marshaler)).Elem() -var unmarshalerType = reflect.TypeOf(new(json.Unmarshaler)).Elem() -var unstructuredConvertableType = reflect.TypeOf(new(UnstructuredConverter)).Elem() -var defaultReflectCache = newReflectCache() - -// TypeReflectEntryOf returns the TypeReflectCacheEntry of the provided reflect.Type. -func TypeReflectEntryOf(t reflect.Type) *TypeReflectCacheEntry { - cm := defaultReflectCache.get() - if record, ok := cm[t]; ok { - return record - } - updates := reflectCacheMap{} - result := typeReflectEntryOf(cm, t, updates) - if len(updates) > 0 { - defaultReflectCache.update(updates) - } - return result -} - -// TypeReflectEntryOf returns all updates needed to add provided reflect.Type, and the types its fields transitively -// depend on, to the cache. -func typeReflectEntryOf(cm reflectCacheMap, t reflect.Type, updates reflectCacheMap) *TypeReflectCacheEntry { - if record, ok := cm[t]; ok { - return record - } - if record, ok := updates[t]; ok { - return record - } - typeEntry := &TypeReflectCacheEntry{ - isJsonMarshaler: t.Implements(marshalerType), - ptrIsJsonMarshaler: reflect.PtrTo(t).Implements(marshalerType), - isJsonUnmarshaler: reflect.PtrTo(t).Implements(unmarshalerType), - isStringConvertable: t.Implements(unstructuredConvertableType), - ptrIsStringConvertable: reflect.PtrTo(t).Implements(unstructuredConvertableType), - } - if t.Kind() == reflect.Struct { - fieldEntries := map[string]*FieldCacheEntry{} - buildStructCacheEntry(t, fieldEntries, nil) - typeEntry.structFields = fieldEntries - sortedByJsonName := make([]*FieldCacheEntry, len(fieldEntries)) - i := 0 - for _, entry := range fieldEntries { - sortedByJsonName[i] = entry - i++ - } - sort.Slice(sortedByJsonName, func(i, j int) bool { - return sortedByJsonName[i].JsonName < sortedByJsonName[j].JsonName - }) - typeEntry.orderedStructFields = sortedByJsonName - } - - // cyclic type references are allowed, so we must add the typeEntry to the updates map before resolving - // the field.typeEntry references, or creating them if they are not already in the cache - updates[t] = typeEntry - - for _, field := range typeEntry.structFields { - if field.TypeEntry == nil { - field.TypeEntry = typeReflectEntryOf(cm, field.fieldType, updates) - } - } - return typeEntry -} - -func buildStructCacheEntry(t reflect.Type, infos map[string]*FieldCacheEntry, fieldPath [][]int) { - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - jsonName, omit, isInline, isOmitempty := lookupJsonTags(field) - if omit { - continue - } - if isInline { - buildStructCacheEntry(field.Type, infos, append(fieldPath, field.Index)) - continue - } - info := &FieldCacheEntry{JsonName: jsonName, isOmitEmpty: isOmitempty, fieldPath: append(fieldPath, field.Index), fieldType: field.Type} - infos[jsonName] = info - } -} - -// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. -func (e TypeReflectCacheEntry) Fields() map[string]*FieldCacheEntry { - return e.structFields -} - -// Fields returns a map of JSON field name to FieldCacheEntry for structs, or nil for non-structs. -func (e TypeReflectCacheEntry) OrderedFields() []*FieldCacheEntry { - return e.orderedStructFields -} - -// CanConvertToUnstructured returns true if this TypeReflectCacheEntry can convert values of its type to unstructured. -func (e TypeReflectCacheEntry) CanConvertToUnstructured() bool { - return e.isJsonMarshaler || e.ptrIsJsonMarshaler || e.isStringConvertable || e.ptrIsStringConvertable -} - -// ToUnstructured converts the provided value to unstructured and returns it. -func (e TypeReflectCacheEntry) ToUnstructured(sv reflect.Value) (interface{}, error) { - // This is based on https://github.com/kubernetes/kubernetes/blob/82c9e5c814eb7acc6cc0a090c057294d0667ad66/staging/src/k8s.io/apimachinery/pkg/runtime/converter.go#L505 - // and is intended to replace it. - - // Check if the object has a custom string converter and use it if available, since it is much more efficient - // than round tripping through json. - if converter, ok := e.getUnstructuredConverter(sv); ok { - return converter.ToUnstructured(), nil - } - // Check if the object has a custom JSON marshaller/unmarshaller. - if marshaler, ok := e.getJsonMarshaler(sv); ok { - if sv.Kind() == reflect.Ptr && sv.IsNil() { - // We're done - we don't need to store anything. - return nil, nil - } - - data, err := marshaler.MarshalJSON() - if err != nil { - return nil, err - } - switch { - case len(data) == 0: - return nil, fmt.Errorf("error decoding from json: empty value") - - case bytes.Equal(data, nullBytes): - // We're done - we don't need to store anything. - return nil, nil - - case bytes.Equal(data, trueBytes): - return true, nil - - case bytes.Equal(data, falseBytes): - return false, nil - - case data[0] == '"': - var result string - err := unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("error decoding string from json: %v", err) - } - return result, nil - - case data[0] == '{': - result := make(map[string]interface{}) - err := unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("error decoding object from json: %v", err) - } - return result, nil - - case data[0] == '[': - result := make([]interface{}, 0) - err := unmarshal(data, &result) - if err != nil { - return nil, fmt.Errorf("error decoding array from json: %v", err) - } - return result, nil - - default: - var ( - resultInt int64 - resultFloat float64 - err error - ) - if err = unmarshal(data, &resultInt); err == nil { - return resultInt, nil - } else if err = unmarshal(data, &resultFloat); err == nil { - return resultFloat, nil - } else { - return nil, fmt.Errorf("error decoding number from json: %v", err) - } - } - } - - return nil, fmt.Errorf("provided type cannot be converted: %v", sv.Type()) -} - -// CanConvertFromUnstructured returns true if this TypeReflectCacheEntry can convert objects of the type from unstructured. -func (e TypeReflectCacheEntry) CanConvertFromUnstructured() bool { - return e.isJsonUnmarshaler -} - -// FromUnstructured converts the provided source value from unstructured into the provided destination value. -func (e TypeReflectCacheEntry) FromUnstructured(sv, dv reflect.Value) error { - // TODO: this could be made much more efficient using direct conversions like - // UnstructuredConverter.ToUnstructured provides. - st := dv.Type() - data, err := json.Marshal(sv.Interface()) - if err != nil { - return fmt.Errorf("error encoding %s to json: %v", st.String(), err) - } - if unmarshaler, ok := e.getJsonUnmarshaler(dv); ok { - return unmarshaler.UnmarshalJSON(data) - } - return fmt.Errorf("unable to unmarshal %v into %v", sv.Type(), dv.Type()) -} - -var ( - nullBytes = []byte("null") - trueBytes = []byte("true") - falseBytes = []byte("false") -) - -func (e TypeReflectCacheEntry) getJsonMarshaler(v reflect.Value) (json.Marshaler, bool) { - if e.isJsonMarshaler { - return v.Interface().(json.Marshaler), true - } - if e.ptrIsJsonMarshaler { - // Check pointer receivers if v is not a pointer - if v.Kind() != reflect.Ptr && v.CanAddr() { - v = v.Addr() - return v.Interface().(json.Marshaler), true - } - } - return nil, false -} - -func (e TypeReflectCacheEntry) getJsonUnmarshaler(v reflect.Value) (json.Unmarshaler, bool) { - if !e.isJsonUnmarshaler { - return nil, false - } - return v.Addr().Interface().(json.Unmarshaler), true -} - -func (e TypeReflectCacheEntry) getUnstructuredConverter(v reflect.Value) (UnstructuredConverter, bool) { - if e.isStringConvertable { - return v.Interface().(UnstructuredConverter), true - } - if e.ptrIsStringConvertable { - // Check pointer receivers if v is not a pointer - if v.CanAddr() { - v = v.Addr() - return v.Interface().(UnstructuredConverter), true - } - } - return nil, false -} - -type typeReflectCache struct { - // use an atomic and copy-on-write since there are a fixed (typically very small) number of structs compiled into any - // go program using this cache - value atomic.Value - // mu is held by writers when performing load/modify/store operations on the cache, readers do not need to hold a - // read-lock since the atomic value is always read-only - mu sync.Mutex -} - -func newReflectCache() *typeReflectCache { - cache := &typeReflectCache{} - cache.value.Store(make(reflectCacheMap)) - return cache -} - -type reflectCacheMap map[reflect.Type]*TypeReflectCacheEntry - -// get returns the reflectCacheMap. -func (c *typeReflectCache) get() reflectCacheMap { - return c.value.Load().(reflectCacheMap) -} - -// update merges the provided updates into the cache. -func (c *typeReflectCache) update(updates reflectCacheMap) { - c.mu.Lock() - defer c.mu.Unlock() - - currentCacheMap := c.value.Load().(reflectCacheMap) - - hasNewEntries := false - for t := range updates { - if _, ok := currentCacheMap[t]; !ok { - hasNewEntries = true - break - } - } - if !hasNewEntries { - // Bail if the updates have been set while waiting for lock acquisition. - // This is safe since setting entries is idempotent. - return - } - - newCacheMap := make(reflectCacheMap, len(currentCacheMap)+len(updates)) - for k, v := range currentCacheMap { - newCacheMap[k] = v - } - for t, update := range updates { - newCacheMap[t] = update - } - c.value.Store(newCacheMap) -} - -// Below json Unmarshal is fromk8s.io/apimachinery/pkg/util/json -// to handle number conversions as expected by Kubernetes - -// limit recursive depth to prevent stack overflow errors -const maxDepth = 10000 - -// unmarshal unmarshals the given data -// If v is a *map[string]interface{}, numbers are converted to int64 or float64 -func unmarshal(data []byte, v interface{}) error { - switch v := v.(type) { - case *map[string]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertMapNumbers(*v, 0) - - case *[]interface{}: - // Build a decoder from the given data - decoder := json.NewDecoder(bytes.NewBuffer(data)) - // Preserve numbers, rather than casting to float64 automatically - decoder.UseNumber() - // Run the decode - if err := decoder.Decode(v); err != nil { - return err - } - // If the decode succeeds, post-process the map to convert json.Number objects to int64 or float64 - return convertSliceNumbers(*v, 0) - - default: - return json.Unmarshal(data, v) - } -} - -// convertMapNumbers traverses the map, converting any json.Number values to int64 or float64. -// values which are map[string]interface{} or []interface{} are recursively visited -func convertMapNumbers(m map[string]interface{}, depth int) error { - if depth > maxDepth { - return fmt.Errorf("exceeded max depth of %d", maxDepth) - } - - var err error - for k, v := range m { - switch v := v.(type) { - case json.Number: - m[k], err = convertNumber(v) - case map[string]interface{}: - err = convertMapNumbers(v, depth+1) - case []interface{}: - err = convertSliceNumbers(v, depth+1) - } - if err != nil { - return err - } - } - return nil -} - -// convertSliceNumbers traverses the slice, converting any json.Number values to int64 or float64. -// values which are map[string]interface{} or []interface{} are recursively visited -func convertSliceNumbers(s []interface{}, depth int) error { - if depth > maxDepth { - return fmt.Errorf("exceeded max depth of %d", maxDepth) - } - - var err error - for i, v := range s { - switch v := v.(type) { - case json.Number: - s[i], err = convertNumber(v) - case map[string]interface{}: - err = convertMapNumbers(v, depth+1) - case []interface{}: - err = convertSliceNumbers(v, depth+1) - } - if err != nil { - return err - } - } - return nil -} - -// convertNumber converts a json.Number to an int64 or float64, or returns an error -func convertNumber(n json.Number) (interface{}, error) { - // Attempt to convert to an int64 first - if i, err := n.Int64(); err == nil { - return i, nil - } - // Return a float64 (default json.Decode() behavior) - // An overflow will return an error - return n.Float64() -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go deleted file mode 100644 index c78a4c1..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/scalar.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -// Compare compares floats. The result will be 0 if lhs==rhs, -1 if f < -// rhs, and +1 if f > rhs. -func FloatCompare(lhs, rhs float64) int { - if lhs > rhs { - return 1 - } else if lhs < rhs { - return -1 - } - return 0 -} - -// IntCompare compares integers. The result will be 0 if i==rhs, -1 if i < -// rhs, and +1 if i > rhs. -func IntCompare(lhs, rhs int64) int { - if lhs > rhs { - return 1 - } else if lhs < rhs { - return -1 - } - return 0 -} - -// Compare compares booleans. The result will be 0 if b==rhs, -1 if b < -// rhs, and +1 if b > rhs. -func BoolCompare(lhs, rhs bool) int { - if lhs == rhs { - return 0 - } else if lhs == false { - return -1 - } - return 1 -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go deleted file mode 100644 index 4a7bb5c..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/structreflect.go +++ /dev/null @@ -1,208 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "fmt" - "reflect" -) - -type structReflect struct { - valueReflect -} - -func (r structReflect) Length() int { - i := 0 - eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { - i++ - return true - }) - return i -} - -func (r structReflect) Empty() bool { - return eachStructField(r.Value, func(_ *TypeReflectCacheEntry, s string, value reflect.Value) bool { - return false // exit early if the struct is non-empty - }) -} - -func (r structReflect) Get(key string) (Value, bool) { - return r.GetUsing(HeapAllocator, key) -} - -func (r structReflect) GetUsing(a Allocator, key string) (Value, bool) { - if val, ok := r.findJsonNameField(key); ok { - return a.allocValueReflect().mustReuse(val, nil, nil, nil), true - } - return nil, false -} - -func (r structReflect) Has(key string) bool { - _, ok := r.findJsonNameField(key) - return ok -} - -func (r structReflect) Set(key string, val Value) { - fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] - if !ok { - panic(fmt.Sprintf("key %s may not be set on struct %T: field does not exist", key, r.Value.Interface())) - } - oldVal := fieldEntry.GetFrom(r.Value) - newVal := reflect.ValueOf(val.Unstructured()) - r.update(fieldEntry, key, oldVal, newVal) -} - -func (r structReflect) Delete(key string) { - fieldEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[key] - if !ok { - panic(fmt.Sprintf("key %s may not be deleted on struct %T: field does not exist", key, r.Value.Interface())) - } - oldVal := fieldEntry.GetFrom(r.Value) - if oldVal.Kind() != reflect.Ptr && !fieldEntry.isOmitEmpty { - panic(fmt.Sprintf("key %s may not be deleted on struct: %T: value is neither a pointer nor an omitempty field", key, r.Value.Interface())) - } - r.update(fieldEntry, key, oldVal, reflect.Zero(oldVal.Type())) -} - -func (r structReflect) update(fieldEntry *FieldCacheEntry, key string, oldVal, newVal reflect.Value) { - if oldVal.CanSet() { - oldVal.Set(newVal) - return - } - - // map items are not addressable, so if a struct is contained in a map, the only way to modify it is - // to write a replacement fieldEntry into the map. - if r.ParentMap != nil { - if r.ParentMapKey == nil { - panic("ParentMapKey must not be nil if ParentMap is not nil") - } - replacement := reflect.New(r.Value.Type()).Elem() - fieldEntry.GetFrom(replacement).Set(newVal) - r.ParentMap.SetMapIndex(*r.ParentMapKey, replacement) - return - } - - // This should never happen since NewValueReflect ensures that the root object reflected on is a pointer and map - // item replacement is handled above. - panic(fmt.Sprintf("key %s may not be modified on struct: %T: struct is not settable", key, r.Value.Interface())) -} - -func (r structReflect) Iterate(fn func(string, Value) bool) bool { - return r.IterateUsing(HeapAllocator, fn) -} - -func (r structReflect) IterateUsing(a Allocator, fn func(string, Value) bool) bool { - vr := a.allocValueReflect() - defer a.Free(vr) - return eachStructField(r.Value, func(e *TypeReflectCacheEntry, s string, value reflect.Value) bool { - return fn(s, vr.mustReuse(value, e, nil, nil)) - }) -} - -func eachStructField(structVal reflect.Value, fn func(*TypeReflectCacheEntry, string, reflect.Value) bool) bool { - for _, fieldCacheEntry := range TypeReflectEntryOf(structVal.Type()).OrderedFields() { - fieldVal := fieldCacheEntry.GetFrom(structVal) - if fieldCacheEntry.CanOmit(fieldVal) { - // omit it - continue - } - ok := fn(fieldCacheEntry.TypeEntry, fieldCacheEntry.JsonName, fieldVal) - if !ok { - return false - } - } - return true -} - -func (r structReflect) Unstructured() interface{} { - // Use number of struct fields as a cheap way to rough estimate map size - result := make(map[string]interface{}, r.Value.NumField()) - r.Iterate(func(s string, value Value) bool { - result[s] = value.Unstructured() - return true - }) - return result -} - -func (r structReflect) Equals(m Map) bool { - return r.EqualsUsing(HeapAllocator, m) -} - -func (r structReflect) EqualsUsing(a Allocator, m Map) bool { - // MapEquals uses zip and is fairly efficient for structReflect - return MapEqualsUsing(a, &r, m) -} - -func (r structReflect) findJsonNameFieldAndNotEmpty(jsonName string) (reflect.Value, bool) { - structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] - if !ok { - return reflect.Value{}, false - } - fieldVal := structCacheEntry.GetFrom(r.Value) - return fieldVal, !structCacheEntry.CanOmit(fieldVal) -} - -func (r structReflect) findJsonNameField(jsonName string) (val reflect.Value, ok bool) { - structCacheEntry, ok := TypeReflectEntryOf(r.Value.Type()).Fields()[jsonName] - if !ok { - return reflect.Value{}, false - } - fieldVal := structCacheEntry.GetFrom(r.Value) - return fieldVal, !structCacheEntry.CanOmit(fieldVal) -} - -func (r structReflect) Zip(other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - return r.ZipUsing(HeapAllocator, other, order, fn) -} - -func (r structReflect) ZipUsing(a Allocator, other Map, order MapTraverseOrder, fn func(key string, lhs, rhs Value) bool) bool { - if otherStruct, ok := other.(*structReflect); ok && r.Value.Type() == otherStruct.Value.Type() { - lhsvr, rhsvr := a.allocValueReflect(), a.allocValueReflect() - defer a.Free(lhsvr) - defer a.Free(rhsvr) - return r.structZip(otherStruct, lhsvr, rhsvr, fn) - } - return defaultMapZip(a, &r, other, order, fn) -} - -// structZip provides an optimized zip for structReflect types. The zip is always lexical key ordered since there is -// no additional cost to ordering the zip for structured types. -func (r structReflect) structZip(other *structReflect, lhsvr, rhsvr *valueReflect, fn func(key string, lhs, rhs Value) bool) bool { - lhsVal := r.Value - rhsVal := other.Value - - for _, fieldCacheEntry := range TypeReflectEntryOf(lhsVal.Type()).OrderedFields() { - lhsFieldVal := fieldCacheEntry.GetFrom(lhsVal) - rhsFieldVal := fieldCacheEntry.GetFrom(rhsVal) - lhsOmit := fieldCacheEntry.CanOmit(lhsFieldVal) - rhsOmit := fieldCacheEntry.CanOmit(rhsFieldVal) - if lhsOmit && rhsOmit { - continue - } - var lhsVal, rhsVal Value - if !lhsOmit { - lhsVal = lhsvr.mustReuse(lhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) - } - if !rhsOmit { - rhsVal = rhsvr.mustReuse(rhsFieldVal, fieldCacheEntry.TypeEntry, nil, nil) - } - if !fn(fieldCacheEntry.JsonName, lhsVal, rhsVal) { - return false - } - } - return true -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go deleted file mode 100644 index ea79e3a..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/value.go +++ /dev/null @@ -1,347 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "bytes" - "fmt" - "io" - "strings" - - jsoniter "github.com/json-iterator/go" - "gopkg.in/yaml.v2" -) - -var ( - readPool = jsoniter.NewIterator(jsoniter.ConfigCompatibleWithStandardLibrary).Pool() - writePool = jsoniter.NewStream(jsoniter.ConfigCompatibleWithStandardLibrary, nil, 1024).Pool() -) - -// A Value corresponds to an 'atom' in the schema. It should return true -// for at least one of the IsXXX methods below, or the value is -// considered "invalid" -type Value interface { - // IsMap returns true if the Value is a Map, false otherwise. - IsMap() bool - // IsList returns true if the Value is a List, false otherwise. - IsList() bool - // IsBool returns true if the Value is a bool, false otherwise. - IsBool() bool - // IsInt returns true if the Value is a int64, false otherwise. - IsInt() bool - // IsFloat returns true if the Value is a float64, false - // otherwise. - IsFloat() bool - // IsString returns true if the Value is a string, false - // otherwise. - IsString() bool - // IsMap returns true if the Value is null, false otherwise. - IsNull() bool - - // AsMap converts the Value into a Map (or panic if the type - // doesn't allow it). - AsMap() Map - // AsMapUsing uses the provided allocator and converts the Value - // into a Map (or panic if the type doesn't allow it). - AsMapUsing(Allocator) Map - // AsList converts the Value into a List (or panic if the type - // doesn't allow it). - AsList() List - // AsListUsing uses the provided allocator and converts the Value - // into a List (or panic if the type doesn't allow it). - AsListUsing(Allocator) List - // AsBool converts the Value into a bool (or panic if the type - // doesn't allow it). - AsBool() bool - // AsInt converts the Value into an int64 (or panic if the type - // doesn't allow it). - AsInt() int64 - // AsFloat converts the Value into a float64 (or panic if the type - // doesn't allow it). - AsFloat() float64 - // AsString converts the Value into a string (or panic if the type - // doesn't allow it). - AsString() string - - // Unstructured converts the Value into an Unstructured interface{}. - Unstructured() interface{} -} - -// FromJSON is a helper function for reading a JSON document. -func FromJSON(input []byte) (Value, error) { - return FromJSONFast(input) -} - -// FromJSONFast is a helper function for reading a JSON document. -func FromJSONFast(input []byte) (Value, error) { - iter := readPool.BorrowIterator(input) - defer readPool.ReturnIterator(iter) - return ReadJSONIter(iter) -} - -// ToJSON is a helper function for producing a JSon document. -func ToJSON(v Value) ([]byte, error) { - buf := bytes.Buffer{} - stream := writePool.BorrowStream(&buf) - defer writePool.ReturnStream(stream) - WriteJSONStream(v, stream) - b := stream.Buffer() - err := stream.Flush() - // Help jsoniter manage its buffers--without this, the next - // use of the stream is likely to require an allocation. Look - // at the jsoniter stream code to understand why. They were probably - // optimizing for folks using the buffer directly. - stream.SetBuffer(b[:0]) - return buf.Bytes(), err -} - -// ReadJSONIter reads a Value from a JSON iterator. -func ReadJSONIter(iter *jsoniter.Iterator) (Value, error) { - v := iter.Read() - if iter.Error != nil && iter.Error != io.EOF { - return nil, iter.Error - } - return NewValueInterface(v), nil -} - -// WriteJSONStream writes a value into a JSON stream. -func WriteJSONStream(v Value, stream *jsoniter.Stream) { - stream.WriteVal(v.Unstructured()) -} - -// ToYAML marshals a value as YAML. -func ToYAML(v Value) ([]byte, error) { - return yaml.Marshal(v.Unstructured()) -} - -// Equals returns true iff the two values are equal. -func Equals(lhs, rhs Value) bool { - return EqualsUsing(HeapAllocator, lhs, rhs) -} - -// EqualsUsing uses the provided allocator and returns true iff the two values are equal. -func EqualsUsing(a Allocator, lhs, rhs Value) bool { - if lhs.IsFloat() || rhs.IsFloat() { - var lf float64 - if lhs.IsFloat() { - lf = lhs.AsFloat() - } else if lhs.IsInt() { - lf = float64(lhs.AsInt()) - } else { - return false - } - var rf float64 - if rhs.IsFloat() { - rf = rhs.AsFloat() - } else if rhs.IsInt() { - rf = float64(rhs.AsInt()) - } else { - return false - } - return lf == rf - } - if lhs.IsInt() { - if rhs.IsInt() { - return lhs.AsInt() == rhs.AsInt() - } - return false - } else if rhs.IsInt() { - return false - } - if lhs.IsString() { - if rhs.IsString() { - return lhs.AsString() == rhs.AsString() - } - return false - } else if rhs.IsString() { - return false - } - if lhs.IsBool() { - if rhs.IsBool() { - return lhs.AsBool() == rhs.AsBool() - } - return false - } else if rhs.IsBool() { - return false - } - if lhs.IsList() { - if rhs.IsList() { - lhsList := lhs.AsListUsing(a) - defer a.Free(lhsList) - rhsList := rhs.AsListUsing(a) - defer a.Free(rhsList) - return lhsList.EqualsUsing(a, rhsList) - } - return false - } else if rhs.IsList() { - return false - } - if lhs.IsMap() { - if rhs.IsMap() { - lhsList := lhs.AsMapUsing(a) - defer a.Free(lhsList) - rhsList := rhs.AsMapUsing(a) - defer a.Free(rhsList) - return lhsList.EqualsUsing(a, rhsList) - } - return false - } else if rhs.IsMap() { - return false - } - if lhs.IsNull() { - if rhs.IsNull() { - return true - } - return false - } else if rhs.IsNull() { - return false - } - // No field is set, on either objects. - return true -} - -// ToString returns a human-readable representation of the value. -func ToString(v Value) string { - if v.IsNull() { - return "null" - } - switch { - case v.IsFloat(): - return fmt.Sprintf("%v", v.AsFloat()) - case v.IsInt(): - return fmt.Sprintf("%v", v.AsInt()) - case v.IsString(): - return fmt.Sprintf("%q", v.AsString()) - case v.IsBool(): - return fmt.Sprintf("%v", v.AsBool()) - case v.IsList(): - strs := []string{} - list := v.AsList() - for i := 0; i < list.Length(); i++ { - strs = append(strs, ToString(list.At(i))) - } - return "[" + strings.Join(strs, ",") + "]" - case v.IsMap(): - strs := []string{} - v.AsMap().Iterate(func(k string, v Value) bool { - strs = append(strs, fmt.Sprintf("%v=%v", k, ToString(v))) - return true - }) - return strings.Join(strs, "") - } - // No field is set, on either objects. - return "{{undefined}}" -} - -// Less provides a total ordering for Value (so that they can be sorted, even -// if they are of different types). -func Less(lhs, rhs Value) bool { - return Compare(lhs, rhs) == -1 -} - -// Compare provides a total ordering for Value (so that they can be -// sorted, even if they are of different types). The result will be 0 if -// v==rhs, -1 if v < rhs, and +1 if v > rhs. -func Compare(lhs, rhs Value) int { - return CompareUsing(HeapAllocator, lhs, rhs) -} - -// CompareUsing uses the provided allocator and provides a total -// ordering for Value (so that they can be sorted, even if they -// are of different types). The result will be 0 if v==rhs, -1 -// if v < rhs, and +1 if v > rhs. -func CompareUsing(a Allocator, lhs, rhs Value) int { - if lhs.IsFloat() { - if !rhs.IsFloat() { - // Extra: compare floats and ints numerically. - if rhs.IsInt() { - return FloatCompare(lhs.AsFloat(), float64(rhs.AsInt())) - } - return -1 - } - return FloatCompare(lhs.AsFloat(), rhs.AsFloat()) - } else if rhs.IsFloat() { - // Extra: compare floats and ints numerically. - if lhs.IsInt() { - return FloatCompare(float64(lhs.AsInt()), rhs.AsFloat()) - } - return 1 - } - - if lhs.IsInt() { - if !rhs.IsInt() { - return -1 - } - return IntCompare(lhs.AsInt(), rhs.AsInt()) - } else if rhs.IsInt() { - return 1 - } - - if lhs.IsString() { - if !rhs.IsString() { - return -1 - } - return strings.Compare(lhs.AsString(), rhs.AsString()) - } else if rhs.IsString() { - return 1 - } - - if lhs.IsBool() { - if !rhs.IsBool() { - return -1 - } - return BoolCompare(lhs.AsBool(), rhs.AsBool()) - } else if rhs.IsBool() { - return 1 - } - - if lhs.IsList() { - if !rhs.IsList() { - return -1 - } - lhsList := lhs.AsListUsing(a) - defer a.Free(lhsList) - rhsList := rhs.AsListUsing(a) - defer a.Free(rhsList) - return ListCompareUsing(a, lhsList, rhsList) - } else if rhs.IsList() { - return 1 - } - if lhs.IsMap() { - if !rhs.IsMap() { - return -1 - } - lhsMap := lhs.AsMapUsing(a) - defer a.Free(lhsMap) - rhsMap := rhs.AsMapUsing(a) - defer a.Free(rhsMap) - return MapCompareUsing(a, lhsMap, rhsMap) - } else if rhs.IsMap() { - return 1 - } - if lhs.IsNull() { - if !rhs.IsNull() { - return -1 - } - return 0 - } else if rhs.IsNull() { - return 1 - } - - // Invalid Value-- nothing is set. - return 0 -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go deleted file mode 100644 index 05e70de..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valuereflect.go +++ /dev/null @@ -1,294 +0,0 @@ -/* -Copyright 2019 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "encoding/base64" - "fmt" - "reflect" -) - -// NewValueReflect creates a Value backed by an "interface{}" type, -// typically an structured object in Kubernetes world that is uses reflection to expose. -// The provided "interface{}" value must be a pointer so that the value can be modified via reflection. -// The provided "interface{}" may contain structs and types that are converted to Values -// by the jsonMarshaler interface. -func NewValueReflect(value interface{}) (Value, error) { - if value == nil { - return NewValueInterface(nil), nil - } - v := reflect.ValueOf(value) - if v.Kind() != reflect.Ptr { - // The root value to reflect on must be a pointer so that map.Set() and map.Delete() operations are possible. - return nil, fmt.Errorf("value provided to NewValueReflect must be a pointer") - } - return wrapValueReflect(v, nil, nil) -} - -// wrapValueReflect wraps the provide reflect.Value as a value. If parent in the data tree is a map, parentMap -// and parentMapKey must be provided so that the returned value may be set and deleted. -func wrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) (Value, error) { - val := HeapAllocator.allocValueReflect() - return val.reuse(value, nil, parentMap, parentMapKey) -} - -// wrapValueReflect wraps the provide reflect.Value as a value, and panics if there is an error. If parent in the data -// tree is a map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. -func mustWrapValueReflect(value reflect.Value, parentMap, parentMapKey *reflect.Value) Value { - v, err := wrapValueReflect(value, parentMap, parentMapKey) - if err != nil { - panic(err) - } - return v -} - -// the value interface doesn't care about the type for value.IsNull, so we can use a constant -var nilType = reflect.TypeOf(&struct{}{}) - -// reuse replaces the value of the valueReflect. If parent in the data tree is a map, parentMap and parentMapKey -// must be provided so that the returned value may be set and deleted. -func (r *valueReflect) reuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) (Value, error) { - if cacheEntry == nil { - cacheEntry = TypeReflectEntryOf(value.Type()) - } - if cacheEntry.CanConvertToUnstructured() { - u, err := cacheEntry.ToUnstructured(value) - if err != nil { - return nil, err - } - if u == nil { - value = reflect.Zero(nilType) - } else { - value = reflect.ValueOf(u) - } - } - r.Value = dereference(value) - r.ParentMap = parentMap - r.ParentMapKey = parentMapKey - r.kind = kind(r.Value) - return r, nil -} - -// mustReuse replaces the value of the valueReflect and panics if there is an error. If parent in the data tree is a -// map, parentMap and parentMapKey must be provided so that the returned value may be set and deleted. -func (r *valueReflect) mustReuse(value reflect.Value, cacheEntry *TypeReflectCacheEntry, parentMap, parentMapKey *reflect.Value) Value { - v, err := r.reuse(value, cacheEntry, parentMap, parentMapKey) - if err != nil { - panic(err) - } - return v -} - -func dereference(val reflect.Value) reflect.Value { - kind := val.Kind() - if (kind == reflect.Interface || kind == reflect.Ptr) && !safeIsNil(val) { - return val.Elem() - } - return val -} - -type valueReflect struct { - ParentMap *reflect.Value - ParentMapKey *reflect.Value - Value reflect.Value - kind reflectType -} - -func (r valueReflect) IsMap() bool { - return r.kind == mapType || r.kind == structMapType -} - -func (r valueReflect) IsList() bool { - return r.kind == listType -} - -func (r valueReflect) IsBool() bool { - return r.kind == boolType -} - -func (r valueReflect) IsInt() bool { - return r.kind == intType || r.kind == uintType -} - -func (r valueReflect) IsFloat() bool { - return r.kind == floatType -} - -func (r valueReflect) IsString() bool { - return r.kind == stringType || r.kind == byteStringType -} - -func (r valueReflect) IsNull() bool { - return r.kind == nullType -} - -type reflectType = int - -const ( - mapType = iota - structMapType - listType - intType - uintType - floatType - stringType - byteStringType - boolType - nullType -) - -func kind(v reflect.Value) reflectType { - typ := v.Type() - rk := typ.Kind() - switch rk { - case reflect.Map: - if v.IsNil() { - return nullType - } - return mapType - case reflect.Struct: - return structMapType - case reflect.Int, reflect.Int64, reflect.Int32, reflect.Int16, reflect.Int8: - return intType - case reflect.Uint, reflect.Uint32, reflect.Uint16, reflect.Uint8: - // Uint64 deliberately excluded, see valueUnstructured.Int. - return uintType - case reflect.Float64, reflect.Float32: - return floatType - case reflect.String: - return stringType - case reflect.Bool: - return boolType - case reflect.Slice: - if v.IsNil() { - return nullType - } - elemKind := typ.Elem().Kind() - if elemKind == reflect.Uint8 { - return byteStringType - } - return listType - case reflect.Chan, reflect.Func, reflect.Ptr, reflect.UnsafePointer, reflect.Interface: - if v.IsNil() { - return nullType - } - panic(fmt.Sprintf("unsupported type: %v", v.Type())) - default: - panic(fmt.Sprintf("unsupported type: %v", v.Type())) - } -} - -// TODO find a cleaner way to avoid panics from reflect.IsNil() -func safeIsNil(v reflect.Value) bool { - k := v.Kind() - switch k { - case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.UnsafePointer, reflect.Interface, reflect.Slice: - return v.IsNil() - } - return false -} - -func (r valueReflect) AsMap() Map { - return r.AsMapUsing(HeapAllocator) -} - -func (r valueReflect) AsMapUsing(a Allocator) Map { - switch r.kind { - case structMapType: - v := a.allocStructReflect() - v.valueReflect = r - return v - case mapType: - v := a.allocMapReflect() - v.valueReflect = r - return v - default: - panic("value is not a map or struct") - } -} - -func (r valueReflect) AsList() List { - return r.AsListUsing(HeapAllocator) -} - -func (r valueReflect) AsListUsing(a Allocator) List { - if r.IsList() { - v := a.allocListReflect() - v.Value = r.Value - return v - } - panic("value is not a list") -} - -func (r valueReflect) AsBool() bool { - if r.IsBool() { - return r.Value.Bool() - } - panic("value is not a bool") -} - -func (r valueReflect) AsInt() int64 { - if r.kind == intType { - return r.Value.Int() - } - if r.kind == uintType { - return int64(r.Value.Uint()) - } - - panic("value is not an int") -} - -func (r valueReflect) AsFloat() float64 { - if r.IsFloat() { - return r.Value.Float() - } - panic("value is not a float") -} - -func (r valueReflect) AsString() string { - switch r.kind { - case stringType: - return r.Value.String() - case byteStringType: - return base64.StdEncoding.EncodeToString(r.Value.Bytes()) - } - panic("value is not a string") -} - -func (r valueReflect) Unstructured() interface{} { - val := r.Value - switch { - case r.IsNull(): - return nil - case val.Kind() == reflect.Struct: - return structReflect{r}.Unstructured() - case val.Kind() == reflect.Map: - return mapReflect{valueReflect: r}.Unstructured() - case r.IsList(): - return listReflect{r.Value}.Unstructured() - case r.IsString(): - return r.AsString() - case r.IsInt(): - return r.AsInt() - case r.IsBool(): - return r.AsBool() - case r.IsFloat(): - return r.AsFloat() - default: - panic(fmt.Sprintf("value of type %s is not a supported by value reflector", val.Type())) - } -} diff --git a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go b/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go deleted file mode 100644 index ac5a926..0000000 --- a/vendor/sigs.k8s.io/structured-merge-diff/v3/value/valueunstructured.go +++ /dev/null @@ -1,178 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package value - -import ( - "fmt" -) - -// NewValueInterface creates a Value backed by an "interface{}" type, -// typically an unstructured object in Kubernetes world. -// interface{} must be one of: map[string]interface{}, map[interface{}]interface{}, []interface{}, int types, float types, -// string or boolean. Nested interface{} must also be one of these types. -func NewValueInterface(v interface{}) Value { - return Value(HeapAllocator.allocValueUnstructured().reuse(v)) -} - -type valueUnstructured struct { - Value interface{} -} - -// reuse replaces the value of the valueUnstructured. -func (vi *valueUnstructured) reuse(value interface{}) Value { - vi.Value = value - return vi -} - -func (v valueUnstructured) IsMap() bool { - if _, ok := v.Value.(map[string]interface{}); ok { - return true - } - if _, ok := v.Value.(map[interface{}]interface{}); ok { - return true - } - return false -} - -func (v valueUnstructured) AsMap() Map { - return v.AsMapUsing(HeapAllocator) -} - -func (v valueUnstructured) AsMapUsing(_ Allocator) Map { - if v.Value == nil { - panic("invalid nil") - } - switch t := v.Value.(type) { - case map[string]interface{}: - return mapUnstructuredString(t) - case map[interface{}]interface{}: - return mapUnstructuredInterface(t) - } - panic(fmt.Errorf("not a map: %#v", v)) -} - -func (v valueUnstructured) IsList() bool { - if v.Value == nil { - return false - } - _, ok := v.Value.([]interface{}) - return ok -} - -func (v valueUnstructured) AsList() List { - return v.AsListUsing(HeapAllocator) -} - -func (v valueUnstructured) AsListUsing(_ Allocator) List { - return listUnstructured(v.Value.([]interface{})) -} - -func (v valueUnstructured) IsFloat() bool { - if v.Value == nil { - return false - } else if _, ok := v.Value.(float64); ok { - return true - } else if _, ok := v.Value.(float32); ok { - return true - } - return false -} - -func (v valueUnstructured) AsFloat() float64 { - if f, ok := v.Value.(float32); ok { - return float64(f) - } - return v.Value.(float64) -} - -func (v valueUnstructured) IsInt() bool { - if v.Value == nil { - return false - } else if _, ok := v.Value.(int); ok { - return true - } else if _, ok := v.Value.(int8); ok { - return true - } else if _, ok := v.Value.(int16); ok { - return true - } else if _, ok := v.Value.(int32); ok { - return true - } else if _, ok := v.Value.(int64); ok { - return true - } else if _, ok := v.Value.(uint); ok { - return true - } else if _, ok := v.Value.(uint8); ok { - return true - } else if _, ok := v.Value.(uint16); ok { - return true - } else if _, ok := v.Value.(uint32); ok { - return true - } - return false -} - -func (v valueUnstructured) AsInt() int64 { - if i, ok := v.Value.(int); ok { - return int64(i) - } else if i, ok := v.Value.(int8); ok { - return int64(i) - } else if i, ok := v.Value.(int16); ok { - return int64(i) - } else if i, ok := v.Value.(int32); ok { - return int64(i) - } else if i, ok := v.Value.(uint); ok { - return int64(i) - } else if i, ok := v.Value.(uint8); ok { - return int64(i) - } else if i, ok := v.Value.(uint16); ok { - return int64(i) - } else if i, ok := v.Value.(uint32); ok { - return int64(i) - } - return v.Value.(int64) -} - -func (v valueUnstructured) IsString() bool { - if v.Value == nil { - return false - } - _, ok := v.Value.(string) - return ok -} - -func (v valueUnstructured) AsString() string { - return v.Value.(string) -} - -func (v valueUnstructured) IsBool() bool { - if v.Value == nil { - return false - } - _, ok := v.Value.(bool) - return ok -} - -func (v valueUnstructured) AsBool() bool { - return v.Value.(bool) -} - -func (v valueUnstructured) IsNull() bool { - return v.Value == nil -} - -func (v valueUnstructured) Unstructured() interface{} { - return v.Value -} diff --git a/vendor/sigs.k8s.io/yaml/.travis.yml b/vendor/sigs.k8s.io/yaml/.travis.yml deleted file mode 100644 index d20e23e..0000000 --- a/vendor/sigs.k8s.io/yaml/.travis.yml +++ /dev/null @@ -1,13 +0,0 @@ -language: go -dist: xenial -go: - - 1.12.x - - 1.13.x -script: - - diff -u <(echo -n) <(gofmt -d *.go) - - diff -u <(echo -n) <(golint $(go list -e ./...) | grep -v YAMLToJSON) - - GO111MODULE=on go vet . - - GO111MODULE=on go test -v -race ./... - - git diff --exit-code -install: - - GO111MODULE=off go get golang.org/x/lint/golint diff --git a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md b/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md deleted file mode 100644 index de47115..0000000 --- a/vendor/sigs.k8s.io/yaml/CONTRIBUTING.md +++ /dev/null @@ -1,31 +0,0 @@ -# Contributing Guidelines - -Welcome to Kubernetes. We are excited about the prospect of you joining our [community](https://github.com/kubernetes/community)! The Kubernetes community abides by the CNCF [code of conduct](code-of-conduct.md). Here is an excerpt: - -_As contributors and maintainers of this project, and in the interest of fostering an open and welcoming community, we pledge to respect all people who contribute through reporting issues, posting feature requests, updating documentation, submitting pull requests or patches, and other activities._ - -## Getting Started - -We have full documentation on how to get started contributing here: - - - -- [Contributor License Agreement](https://git.k8s.io/community/CLA.md) Kubernetes projects require that you sign a Contributor License Agreement (CLA) before we can accept your pull requests -- [Kubernetes Contributor Guide](http://git.k8s.io/community/contributors/guide) - Main contributor documentation, or you can just jump directly to the [contributing section](http://git.k8s.io/community/contributors/guide#contributing) -- [Contributor Cheat Sheet](https://git.k8s.io/community/contributors/guide/contributor-cheatsheet.md) - Common resources for existing developers - -## Mentorship - -- [Mentoring Initiatives](https://git.k8s.io/community/mentoring) - We have a diverse set of mentorship programs available that are always looking for volunteers! - - diff --git a/vendor/sigs.k8s.io/yaml/OWNERS b/vendor/sigs.k8s.io/yaml/OWNERS deleted file mode 100644 index 325b40b..0000000 --- a/vendor/sigs.k8s.io/yaml/OWNERS +++ /dev/null @@ -1,27 +0,0 @@ -# See the OWNERS docs at https://go.k8s.io/owners - -approvers: -- dims -- lavalamp -- smarterclayton -- deads2k -- sttts -- liggitt -- caesarxuchao -reviewers: -- dims -- thockin -- lavalamp -- smarterclayton -- wojtek-t -- deads2k -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- gmarek -- sttts -- ncdc -- tallclair -labels: -- sig/api-machinery diff --git a/vendor/sigs.k8s.io/yaml/RELEASE.md b/vendor/sigs.k8s.io/yaml/RELEASE.md deleted file mode 100644 index 6b64246..0000000 --- a/vendor/sigs.k8s.io/yaml/RELEASE.md +++ /dev/null @@ -1,9 +0,0 @@ -# Release Process - -The `yaml` Project is released on an as-needed basis. The process is as follows: - -1. An issue is proposing a new release with a changelog since the last release -1. All [OWNERS](OWNERS) must LGTM this release -1. An OWNER runs `git tag -s $VERSION` and inserts the changelog and pushes the tag with `git push $VERSION` -1. The release issue is closed -1. An announcement email is sent to `kubernetes-dev@googlegroups.com` with the subject `[ANNOUNCE] kubernetes-template-project $VERSION is released` diff --git a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS b/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS deleted file mode 100644 index 0648a8e..0000000 --- a/vendor/sigs.k8s.io/yaml/SECURITY_CONTACTS +++ /dev/null @@ -1,17 +0,0 @@ -# Defined below are the security contacts for this repo. -# -# They are the contact point for the Product Security Team to reach out -# to for triaging and handling of incoming issues. -# -# The below names agree to abide by the -# [Embargo Policy](https://github.com/kubernetes/sig-release/blob/master/security-release-process-documentation/security-release-process.md#embargo-policy) -# and will be removed and replaced if they violate that agreement. -# -# DO NOT REPORT SECURITY VULNERABILITIES DIRECTLY TO THESE NAMES, FOLLOW THE -# INSTRUCTIONS AT https://kubernetes.io/security/ - -cjcullen -jessfraz -liggitt -philips -tallclair diff --git a/vendor/sigs.k8s.io/yaml/code-of-conduct.md b/vendor/sigs.k8s.io/yaml/code-of-conduct.md deleted file mode 100644 index 0d15c00..0000000 --- a/vendor/sigs.k8s.io/yaml/code-of-conduct.md +++ /dev/null @@ -1,3 +0,0 @@ -# Kubernetes Community Code of Conduct - -Please refer to our [Kubernetes Community Code of Conduct](https://git.k8s.io/community/code-of-conduct.md) diff --git a/vendor/sigs.k8s.io/yaml/go.mod b/vendor/sigs.k8s.io/yaml/go.mod deleted file mode 100644 index 7224f34..0000000 --- a/vendor/sigs.k8s.io/yaml/go.mod +++ /dev/null @@ -1,8 +0,0 @@ -module sigs.k8s.io/yaml - -go 1.12 - -require ( - github.com/davecgh/go-spew v1.1.1 - gopkg.in/yaml.v2 v2.2.8 -) diff --git a/vendor/sigs.k8s.io/yaml/go.sum b/vendor/sigs.k8s.io/yaml/go.sum deleted file mode 100644 index 76e4948..0000000 --- a/vendor/sigs.k8s.io/yaml/go.sum +++ /dev/null @@ -1,9 +0,0 @@ -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/sigs.k8s.io/yaml/yaml_go110.go b/vendor/sigs.k8s.io/yaml/yaml_go110.go deleted file mode 100644 index ab3e06a..0000000 --- a/vendor/sigs.k8s.io/yaml/yaml_go110.go +++ /dev/null @@ -1,14 +0,0 @@ -// This file contains changes that are only compatible with go 1.10 and onwards. - -// +build go1.10 - -package yaml - -import "encoding/json" - -// DisallowUnknownFields configures the JSON decoder to error out if unknown -// fields come along, instead of dropping them by default. -func DisallowUnknownFields(d *json.Decoder) *json.Decoder { - d.DisallowUnknownFields() - return d -} From 60bdd34681d95e2b0bad2957602f58378ec68c15 Mon Sep 17 00:00:00 2001 From: Ryan Rose Date: Mon, 27 Jul 2020 10:37:38 -0400 Subject: [PATCH 5/6] clean up inCluster --- redispool.go | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/redispool.go b/redispool.go index 0f523e4..c6187bc 100644 --- a/redispool.go +++ b/redispool.go @@ -113,21 +113,7 @@ func NewSentinelPool( // inCluster - are we executing in the K8s cluster func inCluster(logger *logrus.Entry) bool { - // // globals.InCluster = true - // // return - // namespace := os.Getenv("MY_POD_NAMESPACE") - // logger.Infof("inCluster: env == %s", namespace) - // // defer recoverInCluster() // maybe it won't panic, so we don't need this - // _, err := k8s.NewInClusterClient() - // // standard service account has no privs to list pods!!! - // // so we're done - // if err != nil { - // logger.Infof("inCluster: false") - // return false - // } - // logger.Infof("inCluster: true") - // return true - + // Try to create an in-cluster k8s client config, err := rest.InClusterConfig() if err != nil { logger.Infof("unable to create k8s config: %s", err) From 2ee3308b80397cdcfa7e530254a21011bc8680d2 Mon Sep 17 00:00:00 2001 From: Ryan Rose Date: Mon, 27 Jul 2020 10:43:55 -0400 Subject: [PATCH 6/6] remove unnecessary client creation --- go.mod | 21 +- go.sum | 67 - redispool.go | 10 +- .../github.com/golang/protobuf/ptypes/any.go | 165 - .../golang/protobuf/ptypes/any/any.pb.go | 62 - .../github.com/golang/protobuf/ptypes/doc.go | 6 - .../golang/protobuf/ptypes/duration.go | 72 - .../protobuf/ptypes/duration/duration.pb.go | 63 - .../golang/protobuf/ptypes/timestamp.go | 103 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 64 - vendor/github.com/google/btree/.travis.yml | 1 - vendor/github.com/google/btree/LICENSE | 202 - vendor/github.com/google/btree/README.md | 12 - vendor/github.com/google/btree/btree.go | 890 - vendor/github.com/googleapis/gnostic/LICENSE | 203 - .../googleapis/gnostic/OpenAPIv2/OpenAPIv2.go | 8789 --- .../gnostic/OpenAPIv2/OpenAPIv2.pb.go | 7347 --- .../gnostic/OpenAPIv2/OpenAPIv2.proto | 666 - .../googleapis/gnostic/OpenAPIv2/README.md | 14 - .../googleapis/gnostic/OpenAPIv2/document.go | 26 - .../gnostic/OpenAPIv2/openapi-2.0.json | 1610 - .../googleapis/gnostic/compiler/README.md | 4 - .../googleapis/gnostic/compiler/context.go | 43 - .../googleapis/gnostic/compiler/error.go | 61 - .../googleapis/gnostic/compiler/extensions.go | 85 - .../googleapis/gnostic/compiler/helpers.go | 385 - .../googleapis/gnostic/compiler/main.go | 16 - .../googleapis/gnostic/compiler/reader.go | 307 - .../googleapis/gnostic/extensions/README.md | 13 - .../gnostic/extensions/extension.pb.go | 465 - .../gnostic/extensions/extension.proto | 96 - .../gnostic/extensions/extensions.go | 64 - .../googleapis/gnostic/jsonschema/README.md | 4 - .../googleapis/gnostic/jsonschema/base.go | 84 - .../googleapis/gnostic/jsonschema/display.go | 229 - .../googleapis/gnostic/jsonschema/models.go | 228 - .../gnostic/jsonschema/operations.go | 394 - .../googleapis/gnostic/jsonschema/reader.go | 442 - .../googleapis/gnostic/jsonschema/schema.json | 150 - .../googleapis/gnostic/jsonschema/writer.go | 369 - .../gregjones/httpcache/.travis.yml | 18 - .../gregjones/httpcache/LICENSE.txt | 7 - .../github.com/gregjones/httpcache/README.md | 29 - .../httpcache/diskcache/diskcache.go | 61 - .../gregjones/httpcache/httpcache.go | 551 - vendor/github.com/peterbourgon/diskv/LICENSE | 19 - .../github.com/peterbourgon/diskv/README.md | 141 - .../peterbourgon/diskv/compression.go | 64 - vendor/github.com/peterbourgon/diskv/diskv.go | 624 - vendor/github.com/peterbourgon/diskv/index.go | 115 - .../protobuf/types/known/anypb/any.pb.go | 287 - .../types/known/durationpb/duration.pb.go | 249 - .../types/known/timestamppb/timestamp.pb.go | 271 - vendor/gopkg.in/yaml.v3/.travis.yml | 17 - vendor/gopkg.in/yaml.v3/LICENSE | 50 - vendor/gopkg.in/yaml.v3/NOTICE | 13 - vendor/gopkg.in/yaml.v3/README.md | 150 - vendor/gopkg.in/yaml.v3/apic.go | 747 - vendor/gopkg.in/yaml.v3/decode.go | 948 - vendor/gopkg.in/yaml.v3/emitterc.go | 2022 - vendor/gopkg.in/yaml.v3/encode.go | 572 - vendor/gopkg.in/yaml.v3/go.mod | 5 - vendor/gopkg.in/yaml.v3/parserc.go | 1249 - vendor/gopkg.in/yaml.v3/readerc.go | 434 - vendor/gopkg.in/yaml.v3/resolve.go | 326 - vendor/gopkg.in/yaml.v3/scannerc.go | 3028 - vendor/gopkg.in/yaml.v3/sorter.go | 134 - vendor/gopkg.in/yaml.v3/writerc.go | 48 - vendor/gopkg.in/yaml.v3/yaml.go | 693 - vendor/gopkg.in/yaml.v3/yamlh.go | 807 - vendor/gopkg.in/yaml.v3/yamlprivateh.go | 198 - vendor/k8s.io/api/LICENSE | 202 - .../api/admissionregistration/v1alpha1/doc.go | 25 - .../v1alpha1/generated.pb.go | 1027 - .../v1alpha1/generated.proto | 107 - .../v1alpha1/register.go | 51 - .../admissionregistration/v1alpha1/types.go | 106 - .../v1alpha1/types_swagger_doc_generated.go | 71 - .../v1alpha1/zz_generated.deepcopy.go | 145 - .../api/admissionregistration/v1beta1/doc.go | 25 - .../v1beta1/generated.pb.go | 2190 - .../v1beta1/generated.proto | 269 - .../admissionregistration/v1beta1/register.go | 53 - .../admissionregistration/v1beta1/types.go | 306 - .../v1beta1/types_swagger_doc_generated.go | 126 - .../v1beta1/zz_generated.deepcopy.go | 302 - vendor/k8s.io/api/apps/v1/doc.go | 20 - vendor/k8s.io/api/apps/v1/generated.pb.go | 6943 -- vendor/k8s.io/api/apps/v1/generated.proto | 700 - vendor/k8s.io/api/apps/v1/register.go | 60 - vendor/k8s.io/api/apps/v1/types.go | 819 - .../apps/v1/types_swagger_doc_generated.go | 365 - .../api/apps/v1/zz_generated.deepcopy.go | 772 - vendor/k8s.io/api/apps/v1beta1/doc.go | 20 - .../k8s.io/api/apps/v1beta1/generated.pb.go | 5290 -- .../k8s.io/api/apps/v1beta1/generated.proto | 483 - vendor/k8s.io/api/apps/v1beta1/register.go | 58 - vendor/k8s.io/api/apps/v1beta1/types.go | 568 - .../v1beta1/types_swagger_doc_generated.go | 273 - .../api/apps/v1beta1/zz_generated.deepcopy.go | 594 - vendor/k8s.io/api/apps/v1beta2/doc.go | 20 - .../k8s.io/api/apps/v1beta2/generated.pb.go | 7584 --- .../k8s.io/api/apps/v1beta2/generated.proto | 751 - vendor/k8s.io/api/apps/v1beta2/register.go | 61 - vendor/k8s.io/api/apps/v1beta2/types.go | 877 - .../v1beta2/types_swagger_doc_generated.go | 396 - .../api/apps/v1beta2/zz_generated.deepcopy.go | 839 - vendor/k8s.io/api/authentication/v1/doc.go | 20 - .../api/authentication/v1/generated.pb.go | 2147 - .../api/authentication/v1/generated.proto | 159 - .../k8s.io/api/authentication/v1/register.go | 52 - vendor/k8s.io/api/authentication/v1/types.go | 168 - .../v1/types_swagger_doc_generated.go | 113 - .../v1/zz_generated.deepcopy.go | 234 - .../k8s.io/api/authentication/v1beta1/doc.go | 20 - .../authentication/v1beta1/generated.pb.go | 1301 - .../authentication/v1beta1/generated.proto | 98 - .../api/authentication/v1beta1/register.go | 51 - .../api/authentication/v1beta1/types.go | 92 - .../v1beta1/types_swagger_doc_generated.go | 72 - .../v1beta1/zz_generated.deepcopy.go | 142 - vendor/k8s.io/api/authorization/v1/doc.go | 21 - .../api/authorization/v1/generated.pb.go | 3528 -- .../api/authorization/v1/generated.proto | 272 - .../k8s.io/api/authorization/v1/register.go | 55 - vendor/k8s.io/api/authorization/v1/types.go | 268 - .../v1/types_swagger_doc_generated.go | 173 - .../authorization/v1/zz_generated.deepcopy.go | 385 - .../k8s.io/api/authorization/v1beta1/doc.go | 21 - .../api/authorization/v1beta1/generated.pb.go | 3528 -- .../api/authorization/v1beta1/generated.proto | 272 - .../api/authorization/v1beta1/register.go | 55 - .../k8s.io/api/authorization/v1beta1/types.go | 268 - .../v1beta1/types_swagger_doc_generated.go | 173 - .../v1beta1/zz_generated.deepcopy.go | 385 - vendor/k8s.io/api/autoscaling/v1/doc.go | 20 - .../k8s.io/api/autoscaling/v1/generated.pb.go | 4710 -- .../k8s.io/api/autoscaling/v1/generated.proto | 415 - vendor/k8s.io/api/autoscaling/v1/register.go | 53 - vendor/k8s.io/api/autoscaling/v1/types.go | 428 - .../v1/types_swagger_doc_generated.go | 250 - .../autoscaling/v1/zz_generated.deepcopy.go | 515 - vendor/k8s.io/api/autoscaling/v2beta1/doc.go | 20 - .../api/autoscaling/v2beta1/generated.pb.go | 4326 -- .../api/autoscaling/v2beta1/generated.proto | 397 - .../api/autoscaling/v2beta1/register.go | 52 - .../k8s.io/api/autoscaling/v2beta1/types.go | 405 - .../v2beta1/types_swagger_doc_generated.go | 221 - .../v2beta1/zz_generated.deepcopy.go | 466 - vendor/k8s.io/api/autoscaling/v2beta2/doc.go | 20 - .../api/autoscaling/v2beta2/generated.pb.go | 4438 -- .../api/autoscaling/v2beta2/generated.proto | 369 - .../api/autoscaling/v2beta2/register.go | 50 - .../k8s.io/api/autoscaling/v2beta2/types.go | 393 - .../v2beta2/types_swagger_doc_generated.go | 240 - .../v2beta2/zz_generated.deepcopy.go | 487 - vendor/k8s.io/api/batch/v1/doc.go | 20 - vendor/k8s.io/api/batch/v1/generated.pb.go | 1646 - vendor/k8s.io/api/batch/v1/generated.proto | 184 - vendor/k8s.io/api/batch/v1/register.go | 52 - vendor/k8s.io/api/batch/v1/types.go | 193 - .../batch/v1/types_swagger_doc_generated.go | 95 - .../api/batch/v1/zz_generated.deepcopy.go | 188 - vendor/k8s.io/api/batch/v1beta1/doc.go | 20 - .../k8s.io/api/batch/v1beta1/generated.pb.go | 1509 - .../k8s.io/api/batch/v1beta1/generated.proto | 137 - vendor/k8s.io/api/batch/v1beta1/register.go | 53 - vendor/k8s.io/api/batch/v1beta1/types.go | 158 - .../v1beta1/types_swagger_doc_generated.go | 96 - .../batch/v1beta1/zz_generated.deepcopy.go | 194 - vendor/k8s.io/api/batch/v2alpha1/doc.go | 20 - .../k8s.io/api/batch/v2alpha1/generated.pb.go | 1509 - .../k8s.io/api/batch/v2alpha1/generated.proto | 135 - vendor/k8s.io/api/batch/v2alpha1/register.go | 53 - vendor/k8s.io/api/batch/v2alpha1/types.go | 156 - .../v2alpha1/types_swagger_doc_generated.go | 96 - .../batch/v2alpha1/zz_generated.deepcopy.go | 194 - vendor/k8s.io/api/certificates/v1beta1/doc.go | 21 - .../api/certificates/v1beta1/generated.pb.go | 1693 - .../api/certificates/v1beta1/generated.proto | 121 - .../api/certificates/v1beta1/register.go | 59 - .../k8s.io/api/certificates/v1beta1/types.go | 155 - .../v1beta1/types_swagger_doc_generated.go | 74 - .../v1beta1/zz_generated.deepcopy.go | 197 - vendor/k8s.io/api/coordination/v1beta1/doc.go | 21 - .../api/coordination/v1beta1/generated.pb.go | 883 - .../api/coordination/v1beta1/generated.proto | 80 - .../api/coordination/v1beta1/register.go | 53 - .../k8s.io/api/coordination/v1beta1/types.go | 74 - .../v1beta1/types_swagger_doc_generated.go | 63 - .../v1beta1/zz_generated.deepcopy.go | 124 - .../api/core/v1/annotation_key_constants.go | 81 - vendor/k8s.io/api/core/v1/doc.go | 21 - vendor/k8s.io/api/core/v1/generated.pb.go | 52149 ---------------- vendor/k8s.io/api/core/v1/generated.proto | 4757 -- vendor/k8s.io/api/core/v1/objectreference.go | 33 - vendor/k8s.io/api/core/v1/register.go | 99 - vendor/k8s.io/api/core/v1/resource.go | 56 - vendor/k8s.io/api/core/v1/taint.go | 33 - vendor/k8s.io/api/core/v1/toleration.go | 56 - vendor/k8s.io/api/core/v1/types.go | 5320 -- .../core/v1/types_swagger_doc_generated.go | 2333 - .../api/core/v1/zz_generated.deepcopy.go | 5404 -- vendor/k8s.io/api/events/v1beta1/doc.go | 21 - .../k8s.io/api/events/v1beta1/generated.pb.go | 1306 - .../k8s.io/api/events/v1beta1/generated.proto | 121 - vendor/k8s.io/api/events/v1beta1/register.go | 53 - vendor/k8s.io/api/events/v1beta1/types.go | 122 - .../v1beta1/types_swagger_doc_generated.go | 73 - .../events/v1beta1/zz_generated.deepcopy.go | 117 - vendor/k8s.io/api/extensions/v1beta1/doc.go | 20 - .../api/extensions/v1beta1/generated.pb.go | 12708 ---- .../api/extensions/v1beta1/generated.proto | 1166 - .../k8s.io/api/extensions/v1beta1/register.go | 66 - vendor/k8s.io/api/extensions/v1beta1/types.go | 1351 - .../v1beta1/types_swagger_doc_generated.go | 648 - .../v1beta1/zz_generated.deepcopy.go | 1499 - vendor/k8s.io/api/networking/v1/doc.go | 20 - .../k8s.io/api/networking/v1/generated.pb.go | 1868 - .../k8s.io/api/networking/v1/generated.proto | 195 - vendor/k8s.io/api/networking/v1/register.go | 53 - vendor/k8s.io/api/networking/v1/types.go | 203 - .../v1/types_swagger_doc_generated.go | 113 - .../networking/v1/zz_generated.deepcopy.go | 262 - vendor/k8s.io/api/policy/v1beta1/doc.go | 23 - .../k8s.io/api/policy/v1beta1/generated.pb.go | 4112 -- .../k8s.io/api/policy/v1beta1/generated.proto | 341 - vendor/k8s.io/api/policy/v1beta1/register.go | 56 - vendor/k8s.io/api/policy/v1beta1/types.go | 399 - .../v1beta1/types_swagger_doc_generated.go | 211 - .../policy/v1beta1/zz_generated.deepcopy.go | 462 - vendor/k8s.io/api/rbac/v1/doc.go | 21 - vendor/k8s.io/api/rbac/v1/generated.pb.go | 2748 - vendor/k8s.io/api/rbac/v1/generated.proto | 197 - vendor/k8s.io/api/rbac/v1/register.go | 58 - vendor/k8s.io/api/rbac/v1/types.go | 235 - .../rbac/v1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1/zz_generated.deepcopy.go | 389 - vendor/k8s.io/api/rbac/v1alpha1/doc.go | 21 - .../k8s.io/api/rbac/v1alpha1/generated.pb.go | 2749 - .../k8s.io/api/rbac/v1alpha1/generated.proto | 199 - vendor/k8s.io/api/rbac/v1alpha1/register.go | 58 - vendor/k8s.io/api/rbac/v1alpha1/types.go | 237 - .../v1alpha1/types_swagger_doc_generated.go | 158 - .../rbac/v1alpha1/zz_generated.deepcopy.go | 389 - vendor/k8s.io/api/rbac/v1beta1/doc.go | 21 - .../k8s.io/api/rbac/v1beta1/generated.pb.go | 2748 - .../k8s.io/api/rbac/v1beta1/generated.proto | 198 - vendor/k8s.io/api/rbac/v1beta1/register.go | 58 - vendor/k8s.io/api/rbac/v1beta1/types.go | 235 - .../v1beta1/types_swagger_doc_generated.go | 158 - .../api/rbac/v1beta1/zz_generated.deepcopy.go | 389 - vendor/k8s.io/api/scheduling/v1alpha1/doc.go | 21 - .../api/scheduling/v1alpha1/generated.pb.go | 640 - .../api/scheduling/v1alpha1/generated.proto | 67 - .../api/scheduling/v1alpha1/register.go | 52 - .../k8s.io/api/scheduling/v1alpha1/types.go | 66 - .../v1alpha1/types_swagger_doc_generated.go | 52 - .../v1alpha1/zz_generated.deepcopy.go | 84 - vendor/k8s.io/api/scheduling/v1beta1/doc.go | 21 - .../api/scheduling/v1beta1/generated.pb.go | 640 - .../api/scheduling/v1beta1/generated.proto | 67 - .../k8s.io/api/scheduling/v1beta1/register.go | 52 - vendor/k8s.io/api/scheduling/v1beta1/types.go | 66 - .../v1beta1/types_swagger_doc_generated.go | 52 - .../v1beta1/zz_generated.deepcopy.go | 84 - vendor/k8s.io/api/settings/v1alpha1/doc.go | 21 - .../api/settings/v1alpha1/generated.pb.go | 929 - .../api/settings/v1alpha1/generated.proto | 75 - .../k8s.io/api/settings/v1alpha1/register.go | 52 - vendor/k8s.io/api/settings/v1alpha1/types.go | 70 - .../v1alpha1/types_swagger_doc_generated.go | 61 - .../v1alpha1/zz_generated.deepcopy.go | 131 - vendor/k8s.io/api/storage/v1/doc.go | 20 - vendor/k8s.io/api/storage/v1/generated.pb.go | 978 - vendor/k8s.io/api/storage/v1/generated.proto | 90 - vendor/k8s.io/api/storage/v1/register.go | 53 - vendor/k8s.io/api/storage/v1/types.go | 104 - .../storage/v1/types_swagger_doc_generated.go | 56 - .../api/storage/v1/zz_generated.deepcopy.go | 119 - vendor/k8s.io/api/storage/v1alpha1/doc.go | 20 - .../api/storage/v1alpha1/generated.pb.go | 1520 - .../api/storage/v1alpha1/generated.proto | 126 - .../k8s.io/api/storage/v1alpha1/register.go | 50 - vendor/k8s.io/api/storage/v1alpha1/types.go | 126 - .../v1alpha1/types_swagger_doc_generated.go | 93 - .../storage/v1alpha1/zz_generated.deepcopy.go | 174 - vendor/k8s.io/api/storage/v1beta1/doc.go | 20 - .../api/storage/v1beta1/generated.pb.go | 2261 - .../api/storage/v1beta1/generated.proto | 186 - vendor/k8s.io/api/storage/v1beta1/register.go | 56 - vendor/k8s.io/api/storage/v1beta1/types.go | 211 - .../v1beta1/types_swagger_doc_generated.go | 119 - .../storage/v1beta1/zz_generated.deepcopy.go | 268 - .../k8s.io/apimachinery/pkg/api/meta/OWNERS | 25 - .../k8s.io/apimachinery/pkg/api/meta/doc.go | 19 - .../apimachinery/pkg/api/meta/errors.go | 121 - .../pkg/api/meta/firsthit_restmapper.go | 97 - .../k8s.io/apimachinery/pkg/api/meta/help.go | 205 - .../apimachinery/pkg/api/meta/interfaces.go | 134 - .../k8s.io/apimachinery/pkg/api/meta/lazy.go | 104 - .../k8s.io/apimachinery/pkg/api/meta/meta.go | 650 - .../pkg/api/meta/multirestmapper.go | 210 - .../apimachinery/pkg/api/meta/priority.go | 222 - .../apimachinery/pkg/api/meta/restmapper.go | 518 - .../pkg/apis/meta/v1beta1/conversion.go | 27 - .../pkg/apis/meta/v1beta1/deepcopy.go | 44 - .../apimachinery/pkg/apis/meta/v1beta1/doc.go | 22 - .../pkg/apis/meta/v1beta1/generated.pb.go | 632 - .../pkg/apis/meta/v1beta1/generated.proto | 57 - .../pkg/apis/meta/v1beta1/register.go | 57 - .../pkg/apis/meta/v1beta1/types.go | 161 - .../v1beta1/types_swagger_doc_generated.go | 104 - .../meta/v1beta1/zz_generated.deepcopy.go | 189 - .../meta/v1beta1/zz_generated.defaults.go | 32 - .../client-go/discovery/cached_discovery.go | 282 - .../client-go/discovery/discovery_client.go | 472 - vendor/k8s.io/client-go/discovery/doc.go | 19 - vendor/k8s.io/client-go/discovery/helper.go | 121 - .../client-go/discovery/round_tripper.go | 65 - .../k8s.io/client-go/kubernetes/clientset.go | 646 - vendor/k8s.io/client-go/kubernetes/doc.go | 20 - vendor/k8s.io/client-go/kubernetes/import.go | 19 - .../k8s.io/client-go/kubernetes/scheme/doc.go | 20 - .../client-go/kubernetes/scheme/register.go | 116 - .../v1alpha1/admissionregistration_client.go | 90 - .../admissionregistration/v1alpha1/doc.go | 20 - .../v1alpha1/generated_expansion.go | 21 - .../v1alpha1/initializerconfiguration.go | 147 - .../v1beta1/admissionregistration_client.go | 95 - .../admissionregistration/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 23 - .../v1beta1/mutatingwebhookconfiguration.go | 147 - .../v1beta1/validatingwebhookconfiguration.go | 147 - .../kubernetes/typed/apps/v1/apps_client.go | 110 - .../typed/apps/v1/controllerrevision.go | 157 - .../kubernetes/typed/apps/v1/daemonset.go | 174 - .../kubernetes/typed/apps/v1/deployment.go | 174 - .../client-go/kubernetes/typed/apps/v1/doc.go | 20 - .../typed/apps/v1/generated_expansion.go | 29 - .../kubernetes/typed/apps/v1/replicaset.go | 174 - .../kubernetes/typed/apps/v1/statefulset.go | 174 - .../typed/apps/v1beta1/apps_client.go | 105 - .../typed/apps/v1beta1/controllerrevision.go | 157 - .../typed/apps/v1beta1/deployment.go | 174 - .../kubernetes/typed/apps/v1beta1/doc.go | 20 - .../typed/apps/v1beta1/generated_expansion.go | 27 - .../kubernetes/typed/apps/v1beta1/scale.go | 48 - .../typed/apps/v1beta1/statefulset.go | 174 - .../typed/apps/v1beta2/apps_client.go | 115 - .../typed/apps/v1beta2/controllerrevision.go | 157 - .../typed/apps/v1beta2/daemonset.go | 174 - .../typed/apps/v1beta2/deployment.go | 174 - .../kubernetes/typed/apps/v1beta2/doc.go | 20 - .../typed/apps/v1beta2/generated_expansion.go | 31 - .../typed/apps/v1beta2/replicaset.go | 174 - .../kubernetes/typed/apps/v1beta2/scale.go | 48 - .../typed/apps/v1beta2/statefulset.go | 205 - .../v1/authentication_client.go | 90 - .../kubernetes/typed/authentication/v1/doc.go | 20 - .../authentication/v1/generated_expansion.go | 19 - .../typed/authentication/v1/tokenreview.go | 46 - .../v1/tokenreview_expansion.go | 35 - .../v1beta1/authentication_client.go | 90 - .../typed/authentication/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 19 - .../authentication/v1beta1/tokenreview.go | 46 - .../v1beta1/tokenreview_expansion.go | 35 - .../authorization/v1/authorization_client.go | 105 - .../kubernetes/typed/authorization/v1/doc.go | 20 - .../authorization/v1/generated_expansion.go | 19 - .../v1/localsubjectaccessreview.go | 48 - .../v1/localsubjectaccessreview_expansion.go | 36 - .../v1/selfsubjectaccessreview.go | 46 - .../v1/selfsubjectaccessreview_expansion.go | 35 - .../v1/selfsubjectrulesreview.go | 46 - .../v1/selfsubjectrulesreview_expansion.go | 35 - .../authorization/v1/subjectaccessreview.go | 46 - .../v1/subjectaccessreview_expansion.go | 36 - .../v1beta1/authorization_client.go | 105 - .../typed/authorization/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 19 - .../v1beta1/localsubjectaccessreview.go | 48 - .../localsubjectaccessreview_expansion.go | 36 - .../v1beta1/selfsubjectaccessreview.go | 46 - .../selfsubjectaccessreview_expansion.go | 35 - .../v1beta1/selfsubjectrulesreview.go | 46 - .../selfsubjectrulesreview_expansion.go | 35 - .../v1beta1/subjectaccessreview.go | 46 - .../v1beta1/subjectaccessreview_expansion.go | 36 - .../autoscaling/v1/autoscaling_client.go | 90 - .../kubernetes/typed/autoscaling/v1/doc.go | 20 - .../autoscaling/v1/generated_expansion.go | 21 - .../autoscaling/v1/horizontalpodautoscaler.go | 174 - .../autoscaling/v2beta1/autoscaling_client.go | 90 - .../typed/autoscaling/v2beta1/doc.go | 20 - .../v2beta1/generated_expansion.go | 21 - .../v2beta1/horizontalpodautoscaler.go | 174 - .../autoscaling/v2beta2/autoscaling_client.go | 90 - .../typed/autoscaling/v2beta2/doc.go | 20 - .../v2beta2/generated_expansion.go | 21 - .../v2beta2/horizontalpodautoscaler.go | 174 - .../kubernetes/typed/batch/v1/batch_client.go | 90 - .../kubernetes/typed/batch/v1/doc.go | 20 - .../typed/batch/v1/generated_expansion.go | 21 - .../kubernetes/typed/batch/v1/job.go | 174 - .../typed/batch/v1beta1/batch_client.go | 90 - .../kubernetes/typed/batch/v1beta1/cronjob.go | 174 - .../kubernetes/typed/batch/v1beta1/doc.go | 20 - .../batch/v1beta1/generated_expansion.go | 21 - .../typed/batch/v2alpha1/batch_client.go | 90 - .../typed/batch/v2alpha1/cronjob.go | 174 - .../kubernetes/typed/batch/v2alpha1/doc.go | 20 - .../batch/v2alpha1/generated_expansion.go | 21 - .../v1beta1/certificates_client.go | 90 - .../v1beta1/certificatesigningrequest.go | 163 - .../certificatesigningrequest_expansion.go | 37 - .../typed/certificates/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 19 - .../v1beta1/coordination_client.go | 90 - .../typed/coordination/v1beta1/doc.go | 20 - .../v1beta1/generated_expansion.go | 21 - .../typed/coordination/v1beta1/lease.go | 157 - .../typed/core/v1/componentstatus.go | 147 - .../kubernetes/typed/core/v1/configmap.go | 157 - .../kubernetes/typed/core/v1/core_client.go | 165 - .../client-go/kubernetes/typed/core/v1/doc.go | 20 - .../kubernetes/typed/core/v1/endpoints.go | 157 - .../kubernetes/typed/core/v1/event.go | 157 - .../typed/core/v1/event_expansion.go | 164 - .../typed/core/v1/generated_expansion.go | 39 - .../kubernetes/typed/core/v1/limitrange.go | 157 - .../kubernetes/typed/core/v1/namespace.go | 152 - .../typed/core/v1/namespace_expansion.go | 31 - .../kubernetes/typed/core/v1/node.go | 163 - .../typed/core/v1/node_expansion.go | 43 - .../typed/core/v1/persistentvolume.go | 163 - .../typed/core/v1/persistentvolumeclaim.go | 174 - .../client-go/kubernetes/typed/core/v1/pod.go | 174 - .../kubernetes/typed/core/v1/pod_expansion.go | 45 - .../kubernetes/typed/core/v1/podtemplate.go | 157 - .../typed/core/v1/replicationcontroller.go | 206 - .../kubernetes/typed/core/v1/resourcequota.go | 174 - .../kubernetes/typed/core/v1/secret.go | 157 - .../kubernetes/typed/core/v1/service.go | 162 - .../typed/core/v1/service_expansion.go | 41 - .../typed/core/v1/serviceaccount.go | 157 - .../typed/core/v1/serviceaccount_expansion.go | 41 - .../kubernetes/typed/events/v1beta1/doc.go | 20 - .../kubernetes/typed/events/v1beta1/event.go | 157 - .../typed/events/v1beta1/events_client.go | 90 - .../events/v1beta1/generated_expansion.go | 21 - .../typed/extensions/v1beta1/daemonset.go | 174 - .../typed/extensions/v1beta1/deployment.go | 205 - .../v1beta1/deployment_expansion.go | 29 - .../typed/extensions/v1beta1/doc.go | 20 - .../extensions/v1beta1/extensions_client.go | 115 - .../extensions/v1beta1/generated_expansion.go | 27 - .../typed/extensions/v1beta1/ingress.go | 174 - .../extensions/v1beta1/podsecuritypolicy.go | 147 - .../typed/extensions/v1beta1/replicaset.go | 205 - .../typed/extensions/v1beta1/scale.go | 48 - .../extensions/v1beta1/scale_expansion.go | 65 - .../kubernetes/typed/networking/v1/doc.go | 20 - .../networking/v1/generated_expansion.go | 21 - .../typed/networking/v1/networking_client.go | 90 - .../typed/networking/v1/networkpolicy.go | 157 - .../kubernetes/typed/policy/v1beta1/doc.go | 20 - .../typed/policy/v1beta1/eviction.go | 48 - .../policy/v1beta1/eviction_expansion.go | 38 - .../policy/v1beta1/generated_expansion.go | 23 - .../policy/v1beta1/poddisruptionbudget.go | 174 - .../typed/policy/v1beta1/podsecuritypolicy.go | 147 - .../typed/policy/v1beta1/policy_client.go | 100 - .../kubernetes/typed/rbac/v1/clusterrole.go | 147 - .../typed/rbac/v1/clusterrolebinding.go | 147 - .../client-go/kubernetes/typed/rbac/v1/doc.go | 20 - .../typed/rbac/v1/generated_expansion.go | 27 - .../kubernetes/typed/rbac/v1/rbac_client.go | 105 - .../kubernetes/typed/rbac/v1/role.go | 157 - .../kubernetes/typed/rbac/v1/rolebinding.go | 157 - .../typed/rbac/v1alpha1/clusterrole.go | 147 - .../typed/rbac/v1alpha1/clusterrolebinding.go | 147 - .../kubernetes/typed/rbac/v1alpha1/doc.go | 20 - .../rbac/v1alpha1/generated_expansion.go | 27 - .../typed/rbac/v1alpha1/rbac_client.go | 105 - .../kubernetes/typed/rbac/v1alpha1/role.go | 157 - .../typed/rbac/v1alpha1/rolebinding.go | 157 - .../typed/rbac/v1beta1/clusterrole.go | 147 - .../typed/rbac/v1beta1/clusterrolebinding.go | 147 - .../kubernetes/typed/rbac/v1beta1/doc.go | 20 - .../typed/rbac/v1beta1/generated_expansion.go | 27 - .../typed/rbac/v1beta1/rbac_client.go | 105 - .../kubernetes/typed/rbac/v1beta1/role.go | 157 - .../typed/rbac/v1beta1/rolebinding.go | 157 - .../typed/scheduling/v1alpha1/doc.go | 20 - .../v1alpha1/generated_expansion.go | 21 - .../scheduling/v1alpha1/priorityclass.go | 147 - .../scheduling/v1alpha1/scheduling_client.go | 90 - .../typed/scheduling/v1beta1/doc.go | 20 - .../scheduling/v1beta1/generated_expansion.go | 21 - .../typed/scheduling/v1beta1/priorityclass.go | 147 - .../scheduling/v1beta1/scheduling_client.go | 90 - .../kubernetes/typed/settings/v1alpha1/doc.go | 20 - .../settings/v1alpha1/generated_expansion.go | 21 - .../typed/settings/v1alpha1/podpreset.go | 157 - .../settings/v1alpha1/settings_client.go | 90 - .../kubernetes/typed/storage/v1/doc.go | 20 - .../typed/storage/v1/generated_expansion.go | 21 - .../typed/storage/v1/storage_client.go | 90 - .../typed/storage/v1/storageclass.go | 147 - .../kubernetes/typed/storage/v1alpha1/doc.go | 20 - .../storage/v1alpha1/generated_expansion.go | 21 - .../typed/storage/v1alpha1/storage_client.go | 90 - .../storage/v1alpha1/volumeattachment.go | 163 - .../kubernetes/typed/storage/v1beta1/doc.go | 20 - .../storage/v1beta1/generated_expansion.go | 23 - .../typed/storage/v1beta1/storage_client.go | 95 - .../typed/storage/v1beta1/storageclass.go | 147 - .../typed/storage/v1beta1/volumeattachment.go | 163 - .../k8s.io/client-go/tools/reference/ref.go | 126 - vendor/modules.txt | 90 - 522 files changed, 8 insertions(+), 254825 deletions(-) delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/any/any.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/github.com/google/btree/.travis.yml delete mode 100644 vendor/github.com/google/btree/LICENSE delete mode 100644 vendor/github.com/google/btree/README.md delete mode 100644 vendor/github.com/google/btree/btree.go delete mode 100644 vendor/github.com/googleapis/gnostic/LICENSE delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go delete mode 100644 vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/context.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/error.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/extensions.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/helpers.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/main.go delete mode 100644 vendor/github.com/googleapis/gnostic/compiler/reader.go delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.pb.go delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extension.proto delete mode 100644 vendor/github.com/googleapis/gnostic/extensions/extensions.go delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/README.md delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/base.go delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/display.go delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/models.go delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/operations.go delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/reader.go delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/schema.json delete mode 100644 vendor/github.com/googleapis/gnostic/jsonschema/writer.go delete mode 100644 vendor/github.com/gregjones/httpcache/.travis.yml delete mode 100644 vendor/github.com/gregjones/httpcache/LICENSE.txt delete mode 100644 vendor/github.com/gregjones/httpcache/README.md delete mode 100644 vendor/github.com/gregjones/httpcache/diskcache/diskcache.go delete mode 100644 vendor/github.com/gregjones/httpcache/httpcache.go delete mode 100644 vendor/github.com/peterbourgon/diskv/LICENSE delete mode 100644 vendor/github.com/peterbourgon/diskv/README.md delete mode 100644 vendor/github.com/peterbourgon/diskv/compression.go delete mode 100644 vendor/github.com/peterbourgon/diskv/diskv.go delete mode 100644 vendor/github.com/peterbourgon/diskv/index.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go delete mode 100644 vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go delete mode 100644 vendor/gopkg.in/yaml.v3/.travis.yml delete mode 100644 vendor/gopkg.in/yaml.v3/LICENSE delete mode 100644 vendor/gopkg.in/yaml.v3/NOTICE delete mode 100644 vendor/gopkg.in/yaml.v3/README.md delete mode 100644 vendor/gopkg.in/yaml.v3/apic.go delete mode 100644 vendor/gopkg.in/yaml.v3/decode.go delete mode 100644 vendor/gopkg.in/yaml.v3/emitterc.go delete mode 100644 vendor/gopkg.in/yaml.v3/encode.go delete mode 100644 vendor/gopkg.in/yaml.v3/go.mod delete mode 100644 vendor/gopkg.in/yaml.v3/parserc.go delete mode 100644 vendor/gopkg.in/yaml.v3/readerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/resolve.go delete mode 100644 vendor/gopkg.in/yaml.v3/scannerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/sorter.go delete mode 100644 vendor/gopkg.in/yaml.v3/writerc.go delete mode 100644 vendor/gopkg.in/yaml.v3/yaml.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlh.go delete mode 100644 vendor/gopkg.in/yaml.v3/yamlprivateh.go delete mode 100644 vendor/k8s.io/api/LICENSE delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1/generated.proto delete mode 100644 vendor/k8s.io/api/apps/v1/register.go delete mode 100644 vendor/k8s.io/api/apps/v1/types.go delete mode 100644 vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/apps/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/doc.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.pb.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/generated.proto delete mode 100644 vendor/k8s.io/api/apps/v1beta2/register.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/types.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/api/authentication/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authentication/v1/generated.proto delete mode 100644 vendor/k8s.io/api/authentication/v1/register.go delete mode 100644 vendor/k8s.io/api/authentication/v1/types.go delete mode 100644 vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/api/authorization/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authorization/v1/generated.proto delete mode 100644 vendor/k8s.io/api/authorization/v1/register.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types.go delete mode 100644 vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/generated.proto delete mode 100644 vendor/k8s.io/api/autoscaling/v1/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/generated.proto delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/doc.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/generated.proto delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/register.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v1/generated.proto delete mode 100644 vendor/k8s.io/api/batch/v1/register.go delete mode 100644 vendor/k8s.io/api/batch/v1/types.go delete mode 100644 vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/batch/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/register.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/core/v1/annotation_key_constants.go delete mode 100644 vendor/k8s.io/api/core/v1/doc.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/core/v1/generated.proto delete mode 100644 vendor/k8s.io/api/core/v1/objectreference.go delete mode 100644 vendor/k8s.io/api/core/v1/register.go delete mode 100644 vendor/k8s.io/api/core/v1/resource.go delete mode 100644 vendor/k8s.io/api/core/v1/taint.go delete mode 100644 vendor/k8s.io/api/core/v1/toleration.go delete mode 100644 vendor/k8s.io/api/core/v1/types.go delete mode 100644 vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/events/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/networking/v1/doc.go delete mode 100644 vendor/k8s.io/api/networking/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/networking/v1/generated.proto delete mode 100644 vendor/k8s.io/api/networking/v1/register.go delete mode 100644 vendor/k8s.io/api/networking/v1/types.go delete mode 100644 vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/policy/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1/generated.proto delete mode 100644 vendor/k8s.io/api/rbac/v1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1/generated.proto delete mode 100644 vendor/k8s.io/api/storage/v1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/generated.proto delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/api/storage/v1beta1/register.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/errors.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/help.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/meta.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/priority.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go delete mode 100644 vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go delete mode 100644 vendor/k8s.io/client-go/discovery/cached_discovery.go delete mode 100644 vendor/k8s.io/client-go/discovery/discovery_client.go delete mode 100644 vendor/k8s.io/client-go/discovery/doc.go delete mode 100644 vendor/k8s.io/client-go/discovery/helper.go delete mode 100644 vendor/k8s.io/client-go/discovery/round_tripper.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/clientset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/import.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/scheme/register.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go delete mode 100644 vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go delete mode 100644 vendor/k8s.io/client-go/tools/reference/ref.go diff --git a/go.mod b/go.mod index 8257cf6..ba9fd5d 100644 --- a/go.mod +++ b/go.mod @@ -7,30 +7,23 @@ require ( github.com/FZambia/sentinel v1.1.0 github.com/Jim-Lambert-Bose/cache v1.0.3 github.com/alicebob/miniredis/v2 v2.11.0 - github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 // indirect - github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 // indirect - github.com/evanphx/json-patch v4.2.0+incompatible // indirect + github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 // indirect github.com/gin-gonic/gin v1.4.0 github.com/gogo/protobuf v1.3.1 // indirect github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect - github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 // indirect + github.com/golang/protobuf v1.4.2 // indirect github.com/gomodule/redigo v2.0.0+incompatible - github.com/google/btree v1.0.0 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/uuid v1.1.1 - github.com/googleapis/gnostic v0.5.1 // indirect - github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/hashicorp/golang-lru v0.5.3 github.com/json-iterator/go v1.1.8 // indirect github.com/konsorten/go-windows-terminal-sequences v1.0.2 // indirect - github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/onsi/ginkgo v1.11.0 // indirect - github.com/onsi/gomega v1.7.0 // indirect + github.com/kr/pretty v0.2.0 // indirect github.com/opentracing/opentracing-go v1.1.0 - github.com/peterbourgon/diskv v2.0.1+incompatible // indirect github.com/prometheus/client_golang v1.1.0 github.com/sirupsen/logrus v1.4.2 github.com/spf13/pflag v1.0.5 // indirect + github.com/stretchr/testify v1.5.1 // indirect github.com/ugorji/go v1.1.7 // indirect github.com/zsais/go-gin-prometheus v0.1.0 golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 // indirect @@ -38,12 +31,10 @@ require ( golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 // indirect golang.org/x/text v0.3.2 // indirect golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect + gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/yaml.v2 v2.2.8 // indirect k8s.io/api v0.0.0-20190708094356-59223ed9f6ce // indirect k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc // indirect k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df - k8s.io/klog v1.0.0 // indirect - k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 // indirect - sigs.k8s.io/structured-merge-diff/v3 v3.0.0 // indirect - sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 324029b..a57599d 100644 --- a/go.sum +++ b/go.sum @@ -6,9 +6,6 @@ github.com/FZambia/sentinel v1.1.0 h1:qrCBfxc8SvJihYNjBWgwUI93ZCvFe/PJIPTHKmlp8a github.com/FZambia/sentinel v1.1.0/go.mod h1:ytL1Am/RLlAoAXG6Kj5LNuw/TRRQrv2rt2FT26vP5gI= github.com/Jim-Lambert-Bose/cache v1.0.3 h1:kgkWQcKRtswM08CStWuucbLbjeK8TmaWBTr/S2Ei/x4= github.com/Jim-Lambert-Bose/cache v1.0.3/go.mod h1:rZHUsMi0xjPwopKMTEGC3HfmnrMvqPsIPvaQ70bDzps= -github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 h1:45bxf7AZMwWcqkLzDAQugVEwedisr5nRJ1r+7LYnv0U= @@ -27,12 +24,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= -github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= -github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= -github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680 h1:ZktWZesgun21uEDrwW7iEV1zPCGQldM2atlJZ3TdvVM= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.0.0-20190301062529-5545eab6dad3 h1:t8FVkw33L+wilf2QiWkw0UV77qRpcH/JHPKGpKa2E8g= @@ -42,19 +33,12 @@ github.com/gin-gonic/gin v1.4.0/go.mod h1:OW2EZn3DO8Ln9oIKOvM++LBO+5UPHJJDH72/q/ github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= @@ -69,8 +53,6 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/gomodule/redigo v1.7.1-0.20190322064113-39e2c31b7ca3/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= github.com/gomodule/redigo v2.0.0+incompatible h1:K/R+8tc58AaqLkqG2Ol3Qk+DR/TlNuhuh457pBFPtt0= github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= -github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= @@ -80,16 +62,8 @@ github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.5.1 h1:A8Yhf6EtqTv9RMsU6MQTyrtV1TjWlR6xU9BsZIwuTCM= -github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= -github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.3 h1:YPkqC67at8FYaadspW/6uE0COsBxS2656RLEr8Bppgk= github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -102,14 +76,11 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxv github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/matryer/is v1.2.0 h1:92UTHpy8CDwaJ08GqLDzhhuixiBUUD1p3AU6PHddz4A= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= github.com/mattn/go-isatty v0.0.7 h1:UvyT9uN+3r7yLEYSlJsbQGdsaB/a0DlgWP3pql6iwOc= @@ -124,18 +95,9 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= -github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -158,16 +120,13 @@ github.com/robfig/go-cache v0.0.0-20130306151617-9fc39e0dbf62/go.mod h1:65XQgovT github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -183,9 +142,7 @@ github.com/zsais/go-gin-prometheus v0.1.0/go.mod h1:Slirjzuz8uM8Cw0jmPNqbneoqcUt golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= @@ -194,12 +151,9 @@ golang.org/x/net v0.0.0-20191004110552-13f9640d40b9 h1:rjwSpXsdiK0dV8/Naq3kAw9ym golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -209,7 +163,6 @@ golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3 h1:4y9KwBHBgBNwDbtu44R5o1fdO golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7 h1:HmbHVPwrPEKPGLAcHSrMe6+hqSUlvZU0rab6x5EXfGU= golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -217,7 +170,6 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -232,41 +184,22 @@ google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyz google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/go-playground/assert.v1 v1.2.1 h1:xoYuJVE7KT85PYWrN730RguIQO0ePzVRfFMXadIrXTM= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v8 v8.18.2 h1:lFB4DoMU6B626w8ny76MV7VX6W2VHct2GVOI3xgiMrQ= gopkg.in/go-playground/validator.v8 v8.18.2/go.mod h1:RX2a/7Ha8BgOhfk7j780h4/u/RRjR0eouCJSH80/M2Y= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= k8s.io/api v0.0.0-20190708094356-59223ed9f6ce h1:5dyrqZX6sr+cG/e26KF2p6GuTIABBACjjGSOSdjS9sI= k8s.io/api v0.0.0-20190708094356-59223ed9f6ce/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc h1:7z9/6jKWBqkK9GI1RRB0B5fZcmkatLQ/nv8kysch24o= k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= -k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag= -k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df h1:d28f1QoXWvL+FVVpHzCXdx5020GRqd3X3LZjKwJ3xss= k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/redispool.go b/redispool.go index c6187bc..dc717f3 100644 --- a/redispool.go +++ b/redispool.go @@ -11,7 +11,6 @@ import ( "github.com/Jim-Lambert-Bose/cache/persistence" "github.com/gomodule/redigo/redis" "github.com/sirupsen/logrus" - "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" ) @@ -114,14 +113,9 @@ func NewSentinelPool( // inCluster - are we executing in the K8s cluster func inCluster(logger *logrus.Entry) bool { // Try to create an in-cluster k8s client - config, err := rest.InClusterConfig() + _, err := rest.InClusterConfig() if err != nil { - logger.Infof("unable to create k8s config: %s", err) - return false - } - _, err = kubernetes.NewForConfig(config) - if err != nil { - logger.Infof("unable to create k8s clientset: %s", err) + logger.Infof("unable to create k8s config, assuming outside of cluster: %s", err) return false } return true diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go deleted file mode 100644 index e729dcf..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any.go +++ /dev/null @@ -1,165 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "fmt" - "strings" - - "github.com/golang/protobuf/proto" - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" - - anypb "github.com/golang/protobuf/ptypes/any" -) - -const urlPrefix = "type.googleapis.com/" - -// AnyMessageName returns the message name contained in an anypb.Any message. -// Most type assertions should use the Is function instead. -func AnyMessageName(any *anypb.Any) (string, error) { - name, err := anyMessageName(any) - return string(name), err -} -func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) { - if any == nil { - return "", fmt.Errorf("message is nil") - } - name := protoreflect.FullName(any.TypeUrl) - if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 { - name = name[i+len("/"):] - } - if !name.IsValid() { - return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) - } - return name, nil -} - -// MarshalAny marshals the given message m into an anypb.Any message. -func MarshalAny(m proto.Message) (*anypb.Any, error) { - switch dm := m.(type) { - case DynamicAny: - m = dm.Message - case *DynamicAny: - if dm == nil { - return nil, proto.ErrNil - } - m = dm.Message - } - b, err := proto.Marshal(m) - if err != nil { - return nil, err - } - return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil -} - -// Empty returns a new message of the type specified in an anypb.Any message. -// It returns protoregistry.NotFound if the corresponding message type could not -// be resolved in the global registry. -func Empty(any *anypb.Any) (proto.Message, error) { - name, err := anyMessageName(any) - if err != nil { - return nil, err - } - mt, err := protoregistry.GlobalTypes.FindMessageByName(name) - if err != nil { - return nil, err - } - return proto.MessageV1(mt.New().Interface()), nil -} - -// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message -// into the provided message m. It returns an error if the target message -// does not match the type in the Any message or if an unmarshal error occurs. -// -// The target message m may be a *DynamicAny message. If the underlying message -// type could not be resolved, then this returns protoregistry.NotFound. -func UnmarshalAny(any *anypb.Any, m proto.Message) error { - if dm, ok := m.(*DynamicAny); ok { - if dm.Message == nil { - var err error - dm.Message, err = Empty(any) - if err != nil { - return err - } - } - m = dm.Message - } - - anyName, err := AnyMessageName(any) - if err != nil { - return err - } - msgName := proto.MessageName(m) - if anyName != msgName { - return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName) - } - return proto.Unmarshal(any.Value, m) -} - -// Is reports whether the Any message contains a message of the specified type. -func Is(any *anypb.Any, m proto.Message) bool { - if any == nil || m == nil { - return false - } - name := proto.MessageName(m) - if !strings.HasSuffix(any.TypeUrl, name) { - return false - } - return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/' -} - -// DynamicAny is a value that can be passed to UnmarshalAny to automatically -// allocate a proto.Message for the type specified in an anypb.Any message. -// The allocated message is stored in the embedded proto.Message. -// -// Example: -// var x ptypes.DynamicAny -// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } -// fmt.Printf("unmarshaled message: %v", x.Message) -type DynamicAny struct{ proto.Message } - -func (m DynamicAny) String() string { - if m.Message == nil { - return "" - } - return m.Message.String() -} -func (m DynamicAny) Reset() { - if m.Message == nil { - return - } - m.Message.Reset() -} -func (m DynamicAny) ProtoMessage() { - return -} -func (m DynamicAny) ProtoReflect() protoreflect.Message { - if m.Message == nil { - return nil - } - return dynamicAny{proto.MessageReflect(m.Message)} -} - -type dynamicAny struct{ protoreflect.Message } - -func (m dynamicAny) Type() protoreflect.MessageType { - return dynamicAnyType{m.Message.Type()} -} -func (m dynamicAny) New() protoreflect.Message { - return dynamicAnyType{m.Message.Type()}.New() -} -func (m dynamicAny) Interface() protoreflect.ProtoMessage { - return DynamicAny{proto.MessageV1(m.Message.Interface())} -} - -type dynamicAnyType struct{ protoreflect.MessageType } - -func (t dynamicAnyType) New() protoreflect.Message { - return dynamicAny{t.MessageType.New()} -} -func (t dynamicAnyType) Zero() protoreflect.Message { - return dynamicAny{t.MessageType.Zero()} -} diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go deleted file mode 100644 index 0ef27d3..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ /dev/null @@ -1,62 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/any/any.proto - -package any - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - anypb "google.golang.org/protobuf/types/known/anypb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/any.proto. - -type Any = anypb.Any - -var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{ - 0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29, - 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, - 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, - 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() } -func file_github_com_golang_protobuf_ptypes_any_any_proto_init() { - if File_github_com_golang_protobuf_ptypes_any_any_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_any_any_proto = out.File - file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go deleted file mode 100644 index fb9edd5..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/doc.go +++ /dev/null @@ -1,6 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ptypes provides functionality for interacting with well-known types. -package ptypes diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go deleted file mode 100644 index 6110ae8..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration.go +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - durationpb "github.com/golang/protobuf/ptypes/duration" -) - -// Range of google.protobuf.Duration as specified in duration.proto. -// This is about 10,000 years in seconds. -const ( - maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) - minSeconds = -maxSeconds -) - -// Duration converts a durationpb.Duration to a time.Duration. -// Duration returns an error if dur is invalid or overflows a time.Duration. -func Duration(dur *durationpb.Duration) (time.Duration, error) { - if err := validateDuration(dur); err != nil { - return 0, err - } - d := time.Duration(dur.Seconds) * time.Second - if int64(d/time.Second) != dur.Seconds { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - if dur.Nanos != 0 { - d += time.Duration(dur.Nanos) * time.Nanosecond - if (d < 0) != (dur.Nanos < 0) { - return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur) - } - } - return d, nil -} - -// DurationProto converts a time.Duration to a durationpb.Duration. -func DurationProto(d time.Duration) *durationpb.Duration { - nanos := d.Nanoseconds() - secs := nanos / 1e9 - nanos -= secs * 1e9 - return &durationpb.Duration{ - Seconds: int64(secs), - Nanos: int32(nanos), - } -} - -// validateDuration determines whether the durationpb.Duration is valid -// according to the definition in google/protobuf/duration.proto. -// A valid durpb.Duration may still be too large to fit into a time.Duration -// Note that the range of durationpb.Duration is about 10,000 years, -// while the range of time.Duration is about 290 years. -func validateDuration(dur *durationpb.Duration) error { - if dur == nil { - return errors.New("duration: nil Duration") - } - if dur.Seconds < minSeconds || dur.Seconds > maxSeconds { - return fmt.Errorf("duration: %v: seconds out of range", dur) - } - if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 { - return fmt.Errorf("duration: %v: nanos out of range", dur) - } - // Seconds and Nanos must have the same sign, unless d.Nanos is zero. - if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) { - return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go deleted file mode 100644 index d0079ee..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ /dev/null @@ -1,63 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto - -package duration - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - durationpb "google.golang.org/protobuf/types/known/durationpb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/duration.proto. - -type Duration = durationpb.Duration - -var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{ - 0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67, - 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, - 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() } -func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() { - if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File - file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go deleted file mode 100644 index 026d0d4..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package ptypes - -import ( - "errors" - "fmt" - "time" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" -) - -// Range of google.protobuf.Duration as specified in timestamp.proto. -const ( - // Seconds field of the earliest valid Timestamp. - // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - minValidSeconds = -62135596800 - // Seconds field just after the latest valid Timestamp. - // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). - maxValidSeconds = 253402300800 -) - -// Timestamp converts a timestamppb.Timestamp to a time.Time. -// It returns an error if the argument is invalid. -// -// Unlike most Go functions, if Timestamp returns an error, the first return -// value is not the zero time.Time. Instead, it is the value obtained from the -// time.Unix function when passed the contents of the Timestamp, in the UTC -// locale. This may or may not be a meaningful time; many invalid Timestamps -// do map to valid time.Times. -// -// A nil Timestamp returns an error. The first return value in that case is -// undefined. -func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) { - // Don't return the zero value on error, because corresponds to a valid - // timestamp. Instead return whatever time.Unix gives us. - var t time.Time - if ts == nil { - t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp - } else { - t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() - } - return t, validateTimestamp(ts) -} - -// TimestampNow returns a google.protobuf.Timestamp for the current time. -func TimestampNow() *timestamppb.Timestamp { - ts, err := TimestampProto(time.Now()) - if err != nil { - panic("ptypes: time.Now() out of Timestamp range") - } - return ts -} - -// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. -// It returns an error if the resulting Timestamp is invalid. -func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) { - ts := ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } - if err := validateTimestamp(ts); err != nil { - return nil, err - } - return ts, nil -} - -// TimestampString returns the RFC 3339 string for valid Timestamps. -// For invalid Timestamps, it returns an error message in parentheses. -func TimestampString(ts *timestamppb.Timestamp) string { - t, err := Timestamp(ts) - if err != nil { - return fmt.Sprintf("(%v)", err) - } - return t.Format(time.RFC3339Nano) -} - -// validateTimestamp determines whether a Timestamp is valid. -// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01) -// and has a Nanos field in the range [0, 1e9). -// -// If the Timestamp is valid, validateTimestamp returns nil. -// Otherwise, it returns an error that describes the problem. -// -// Every valid Timestamp can be represented by a time.Time, -// but the converse is not true. -func validateTimestamp(ts *timestamppb.Timestamp) error { - if ts == nil { - return errors.New("timestamp: nil Timestamp") - } - if ts.Seconds < minValidSeconds { - return fmt.Errorf("timestamp: %v before 0001-01-01", ts) - } - if ts.Seconds >= maxValidSeconds { - return fmt.Errorf("timestamp: %v after 10000-01-01", ts) - } - if ts.Nanos < 0 || ts.Nanos >= 1e9 { - return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts) - } - return nil -} diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go deleted file mode 100644 index a76f807..0000000 --- a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto - -package timestamp - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" - reflect "reflect" -) - -// Symbols defined in public import of google/protobuf/timestamp.proto. - -type Timestamp = timestamppb.Timestamp - -var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, - 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37, - 0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, - 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{} -var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() } -func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() { - if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 0, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes, - DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs, - }.Build() - File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil - file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil -} diff --git a/vendor/github.com/google/btree/.travis.yml b/vendor/github.com/google/btree/.travis.yml deleted file mode 100644 index 4f2ee4d..0000000 --- a/vendor/github.com/google/btree/.travis.yml +++ /dev/null @@ -1 +0,0 @@ -language: go diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/github.com/google/btree/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md deleted file mode 100644 index 6062a4d..0000000 --- a/vendor/github.com/google/btree/README.md +++ /dev/null @@ -1,12 +0,0 @@ -# BTree implementation for Go - -![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) - -This package provides an in-memory B-Tree implementation for Go, useful as -an ordered, mutable data structure. - -The API is based off of the wonderful -http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to -act as a drop-in replacement for gollrb trees. - -See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go deleted file mode 100644 index 6ff062f..0000000 --- a/vendor/github.com/google/btree/btree.go +++ /dev/null @@ -1,890 +0,0 @@ -// Copyright 2014 Google Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package btree implements in-memory B-Trees of arbitrary degree. -// -// btree implements an in-memory B-Tree for use as an ordered data structure. -// It is not meant for persistent storage solutions. -// -// It has a flatter structure than an equivalent red-black or other binary tree, -// which in some cases yields better memory usage and/or performance. -// See some discussion on the matter here: -// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html -// Note, though, that this project is in no way related to the C++ B-Tree -// implementation written about there. -// -// Within this tree, each node contains a slice of items and a (possibly nil) -// slice of children. For basic numeric values or raw structs, this can cause -// efficiency differences when compared to equivalent C++ template code that -// stores values in arrays within the node: -// * Due to the overhead of storing values as interfaces (each -// value needs to be stored as the value itself, then 2 words for the -// interface pointing to that value and its type), resulting in higher -// memory use. -// * Since interfaces can point to values anywhere in memory, values are -// most likely not stored in contiguous blocks, resulting in a higher -// number of cache misses. -// These issues don't tend to matter, though, when working with strings or other -// heap-allocated structures, since C++-equivalent structures also must store -// pointers and also distribute their values across the heap. -// -// This implementation is designed to be a drop-in replacement to gollrb.LLRB -// trees, (http://github.com/petar/gollrb), an excellent and probably the most -// widely used ordered tree implementation in the Go ecosystem currently. -// Its functions, therefore, exactly mirror those of -// llrb.LLRB where possible. Unlike gollrb, though, we currently don't -// support storing multiple equivalent values. -package btree - -import ( - "fmt" - "io" - "sort" - "strings" - "sync" -) - -// Item represents a single object in the tree. -type Item interface { - // Less tests whether the current item is less than the given argument. - // - // This must provide a strict weak ordering. - // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only - // hold one of either a or b in the tree). - Less(than Item) bool -} - -const ( - DefaultFreeListSize = 32 -) - -var ( - nilItems = make(items, 16) - nilChildren = make(children, 16) -) - -// FreeList represents a free list of btree nodes. By default each -// BTree has its own FreeList, but multiple BTrees can share the same -// FreeList. -// Two Btrees using the same freelist are safe for concurrent write access. -type FreeList struct { - mu sync.Mutex - freelist []*node -} - -// NewFreeList creates a new free list. -// size is the maximum size of the returned free list. -func NewFreeList(size int) *FreeList { - return &FreeList{freelist: make([]*node, 0, size)} -} - -func (f *FreeList) newNode() (n *node) { - f.mu.Lock() - index := len(f.freelist) - 1 - if index < 0 { - f.mu.Unlock() - return new(node) - } - n = f.freelist[index] - f.freelist[index] = nil - f.freelist = f.freelist[:index] - f.mu.Unlock() - return -} - -// freeNode adds the given node to the list, returning true if it was added -// and false if it was discarded. -func (f *FreeList) freeNode(n *node) (out bool) { - f.mu.Lock() - if len(f.freelist) < cap(f.freelist) { - f.freelist = append(f.freelist, n) - out = true - } - f.mu.Unlock() - return -} - -// ItemIterator allows callers of Ascend* to iterate in-order over portions of -// the tree. When this function returns false, iteration will stop and the -// associated Ascend* function will immediately return. -type ItemIterator func(i Item) bool - -// New creates a new B-Tree with the given degree. -// -// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items -// and 2-4 children). -func New(degree int) *BTree { - return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) -} - -// NewWithFreeList creates a new B-Tree that uses the given node free list. -func NewWithFreeList(degree int, f *FreeList) *BTree { - if degree <= 1 { - panic("bad degree") - } - return &BTree{ - degree: degree, - cow: ©OnWriteContext{freelist: f}, - } -} - -// items stores items in a node. -type items []Item - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *items) insertAt(index int, item Item) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = item -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *items) removeAt(index int) Item { - item := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return item -} - -// pop removes and returns the last element in the list. -func (s *items) pop() (out Item) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index items. index must be less than or equal to length. -func (s *items) truncate(index int) { - var toClear items - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilItems):] - } -} - -// find returns the index where the given item should be inserted into this -// list. 'found' is true if the item already exists in the list at the given -// index. -func (s items) find(item Item) (index int, found bool) { - i := sort.Search(len(s), func(i int) bool { - return item.Less(s[i]) - }) - if i > 0 && !s[i-1].Less(item) { - return i - 1, true - } - return i, false -} - -// children stores child nodes in a node. -type children []*node - -// insertAt inserts a value into the given index, pushing all subsequent values -// forward. -func (s *children) insertAt(index int, n *node) { - *s = append(*s, nil) - if index < len(*s) { - copy((*s)[index+1:], (*s)[index:]) - } - (*s)[index] = n -} - -// removeAt removes a value at a given index, pulling all subsequent values -// back. -func (s *children) removeAt(index int) *node { - n := (*s)[index] - copy((*s)[index:], (*s)[index+1:]) - (*s)[len(*s)-1] = nil - *s = (*s)[:len(*s)-1] - return n -} - -// pop removes and returns the last element in the list. -func (s *children) pop() (out *node) { - index := len(*s) - 1 - out = (*s)[index] - (*s)[index] = nil - *s = (*s)[:index] - return -} - -// truncate truncates this instance at index so that it contains only the -// first index children. index must be less than or equal to length. -func (s *children) truncate(index int) { - var toClear children - *s, toClear = (*s)[:index], (*s)[index:] - for len(toClear) > 0 { - toClear = toClear[copy(toClear, nilChildren):] - } -} - -// node is an internal node in a tree. -// -// It must at all times maintain the invariant that either -// * len(children) == 0, len(items) unconstrained -// * len(children) == len(items) + 1 -type node struct { - items items - children children - cow *copyOnWriteContext -} - -func (n *node) mutableFor(cow *copyOnWriteContext) *node { - if n.cow == cow { - return n - } - out := cow.newNode() - if cap(out.items) >= len(n.items) { - out.items = out.items[:len(n.items)] - } else { - out.items = make(items, len(n.items), cap(n.items)) - } - copy(out.items, n.items) - // Copy children - if cap(out.children) >= len(n.children) { - out.children = out.children[:len(n.children)] - } else { - out.children = make(children, len(n.children), cap(n.children)) - } - copy(out.children, n.children) - return out -} - -func (n *node) mutableChild(i int) *node { - c := n.children[i].mutableFor(n.cow) - n.children[i] = c - return c -} - -// split splits the given node at the given index. The current node shrinks, -// and this function returns the item that existed at that index and a new node -// containing all items/children after it. -func (n *node) split(i int) (Item, *node) { - item := n.items[i] - next := n.cow.newNode() - next.items = append(next.items, n.items[i+1:]...) - n.items.truncate(i) - if len(n.children) > 0 { - next.children = append(next.children, n.children[i+1:]...) - n.children.truncate(i + 1) - } - return item, next -} - -// maybeSplitChild checks if a child should be split, and if so splits it. -// Returns whether or not a split occurred. -func (n *node) maybeSplitChild(i, maxItems int) bool { - if len(n.children[i].items) < maxItems { - return false - } - first := n.mutableChild(i) - item, second := first.split(maxItems / 2) - n.items.insertAt(i, item) - n.children.insertAt(i+1, second) - return true -} - -// insert inserts an item into the subtree rooted at this node, making sure -// no nodes in the subtree exceed maxItems items. Should an equivalent item be -// be found/replaced by insert, it will be returned. -func (n *node) insert(item Item, maxItems int) Item { - i, found := n.items.find(item) - if found { - out := n.items[i] - n.items[i] = item - return out - } - if len(n.children) == 0 { - n.items.insertAt(i, item) - return nil - } - if n.maybeSplitChild(i, maxItems) { - inTree := n.items[i] - switch { - case item.Less(inTree): - // no change, we want first split node - case inTree.Less(item): - i++ // we want second split node - default: - out := n.items[i] - n.items[i] = item - return out - } - } - return n.mutableChild(i).insert(item, maxItems) -} - -// get finds the given key in the subtree and returns it. -func (n *node) get(key Item) Item { - i, found := n.items.find(key) - if found { - return n.items[i] - } else if len(n.children) > 0 { - return n.children[i].get(key) - } - return nil -} - -// min returns the first item in the subtree. -func min(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[0] - } - if len(n.items) == 0 { - return nil - } - return n.items[0] -} - -// max returns the last item in the subtree. -func max(n *node) Item { - if n == nil { - return nil - } - for len(n.children) > 0 { - n = n.children[len(n.children)-1] - } - if len(n.items) == 0 { - return nil - } - return n.items[len(n.items)-1] -} - -// toRemove details what item to remove in a node.remove call. -type toRemove int - -const ( - removeItem toRemove = iota // removes the given item - removeMin // removes smallest item in the subtree - removeMax // removes largest item in the subtree -) - -// remove removes an item from the subtree rooted at this node. -func (n *node) remove(item Item, minItems int, typ toRemove) Item { - var i int - var found bool - switch typ { - case removeMax: - if len(n.children) == 0 { - return n.items.pop() - } - i = len(n.items) - case removeMin: - if len(n.children) == 0 { - return n.items.removeAt(0) - } - i = 0 - case removeItem: - i, found = n.items.find(item) - if len(n.children) == 0 { - if found { - return n.items.removeAt(i) - } - return nil - } - default: - panic("invalid type") - } - // If we get to here, we have children. - if len(n.children[i].items) <= minItems { - return n.growChildAndRemove(i, item, minItems, typ) - } - child := n.mutableChild(i) - // Either we had enough items to begin with, or we've done some - // merging/stealing, because we've got enough now and we're ready to return - // stuff. - if found { - // The item exists at index 'i', and the child we've selected can give us a - // predecessor, since if we've gotten here it's got > minItems items in it. - out := n.items[i] - // We use our special-case 'remove' call with typ=maxItem to pull the - // predecessor of item i (the rightmost leaf of our immediate left child) - // and set it into where we pulled the item from. - n.items[i] = child.remove(nil, minItems, removeMax) - return out - } - // Final recursive call. Once we're here, we know that the item isn't in this - // node and that the child is big enough to remove from. - return child.remove(item, minItems, typ) -} - -// growChildAndRemove grows child 'i' to make sure it's possible to remove an -// item from it while keeping it at minItems, then calls remove to actually -// remove it. -// -// Most documentation says we have to do two sets of special casing: -// 1) item is in this node -// 2) item is in child -// In both cases, we need to handle the two subcases: -// A) node has enough values that it can spare one -// B) node doesn't have enough values -// For the latter, we have to check: -// a) left sibling has node to spare -// b) right sibling has node to spare -// c) we must merge -// To simplify our code here, we handle cases #1 and #2 the same: -// If a node doesn't have enough items, we make sure it does (using a,b,c). -// We then simply redo our remove call, and the second time (regardless of -// whether we're in case 1 or 2), we'll have enough items and can guarantee -// that we hit case A. -func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { - if i > 0 && len(n.children[i-1].items) > minItems { - // Steal from left child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i - 1) - stolenItem := stealFrom.items.pop() - child.items.insertAt(0, n.items[i-1]) - n.items[i-1] = stolenItem - if len(stealFrom.children) > 0 { - child.children.insertAt(0, stealFrom.children.pop()) - } - } else if i < len(n.items) && len(n.children[i+1].items) > minItems { - // steal from right child - child := n.mutableChild(i) - stealFrom := n.mutableChild(i + 1) - stolenItem := stealFrom.items.removeAt(0) - child.items = append(child.items, n.items[i]) - n.items[i] = stolenItem - if len(stealFrom.children) > 0 { - child.children = append(child.children, stealFrom.children.removeAt(0)) - } - } else { - if i >= len(n.items) { - i-- - } - child := n.mutableChild(i) - // merge with right child - mergeItem := n.items.removeAt(i) - mergeChild := n.children.removeAt(i + 1) - child.items = append(child.items, mergeItem) - child.items = append(child.items, mergeChild.items...) - child.children = append(child.children, mergeChild.children...) - n.cow.freeNode(mergeChild) - } - return n.remove(item, minItems, typ) -} - -type direction int - -const ( - descend = direction(-1) - ascend = direction(+1) -) - -// iterate provides a simple method for iterating over elements in the tree. -// -// When ascending, the 'start' should be less than 'stop' and when descending, -// the 'start' should be greater than 'stop'. Setting 'includeStart' to true -// will force the iterator to include the first item when it equals 'start', -// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a -// "greaterThan" or "lessThan" queries. -func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { - var ok, found bool - var index int - switch dir { - case ascend: - if start != nil { - index, _ = n.items.find(start) - } - for i := index; i < len(n.items); i++ { - if len(n.children) > 0 { - if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { - hit = true - continue - } - hit = true - if stop != nil && !n.items[i].Less(stop) { - return hit, false - } - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - case descend: - if start != nil { - index, found = n.items.find(start) - if !found { - index = index - 1 - } - } else { - index = len(n.items) - 1 - } - for i := index; i >= 0; i-- { - if start != nil && !n.items[i].Less(start) { - if !includeStart || hit || start.Less(n.items[i]) { - continue - } - } - if len(n.children) > 0 { - if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - if stop != nil && !stop.Less(n.items[i]) { - return hit, false // continue - } - hit = true - if !iter(n.items[i]) { - return hit, false - } - } - if len(n.children) > 0 { - if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { - return hit, false - } - } - } - return hit, true -} - -// Used for testing/debugging purposes. -func (n *node) print(w io.Writer, level int) { - fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) - for _, c := range n.children { - c.print(w, level+1) - } -} - -// BTree is an implementation of a B-Tree. -// -// BTree stores Item instances in an ordered structure, allowing easy insertion, -// removal, and iteration. -// -// Write operations are not safe for concurrent mutation by multiple -// goroutines, but Read operations are. -type BTree struct { - degree int - length int - root *node - cow *copyOnWriteContext -} - -// copyOnWriteContext pointers determine node ownership... a tree with a write -// context equivalent to a node's write context is allowed to modify that node. -// A tree whose write context does not match a node's is not allowed to modify -// it, and must create a new, writable copy (IE: it's a Clone). -// -// When doing any write operation, we maintain the invariant that the current -// node's context is equal to the context of the tree that requested the write. -// We do this by, before we descend into any node, creating a copy with the -// correct context if the contexts don't match. -// -// Since the node we're currently visiting on any write has the requesting -// tree's context, that node is modifiable in place. Children of that node may -// not share context, but before we descend into them, we'll make a mutable -// copy. -type copyOnWriteContext struct { - freelist *FreeList -} - -// Clone clones the btree, lazily. Clone should not be called concurrently, -// but the original tree (t) and the new tree (t2) can be used concurrently -// once the Clone call completes. -// -// The internal tree structure of b is marked read-only and shared between t and -// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes -// whenever one of b's original nodes would have been modified. Read operations -// should have no performance degredation. Write operations for both t and t2 -// will initially experience minor slow-downs caused by additional allocs and -// copies due to the aforementioned copy-on-write logic, but should converge to -// the original performance characteristics of the original tree. -func (t *BTree) Clone() (t2 *BTree) { - // Create two entirely new copy-on-write contexts. - // This operation effectively creates three trees: - // the original, shared nodes (old b.cow) - // the new b.cow nodes - // the new out.cow nodes - cow1, cow2 := *t.cow, *t.cow - out := *t - t.cow = &cow1 - out.cow = &cow2 - return &out -} - -// maxItems returns the max number of items to allow per node. -func (t *BTree) maxItems() int { - return t.degree*2 - 1 -} - -// minItems returns the min number of items to allow per node (ignored for the -// root node). -func (t *BTree) minItems() int { - return t.degree - 1 -} - -func (c *copyOnWriteContext) newNode() (n *node) { - n = c.freelist.newNode() - n.cow = c - return -} - -type freeType int - -const ( - ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) - ftStored // node was stored in the freelist for later use - ftNotOwned // node was ignored by COW, since it's owned by another one -) - -// freeNode frees a node within a given COW context, if it's owned by that -// context. It returns what happened to the node (see freeType const -// documentation). -func (c *copyOnWriteContext) freeNode(n *node) freeType { - if n.cow == c { - // clear to allow GC - n.items.truncate(0) - n.children.truncate(0) - n.cow = nil - if c.freelist.freeNode(n) { - return ftStored - } else { - return ftFreelistFull - } - } else { - return ftNotOwned - } -} - -// ReplaceOrInsert adds the given item to the tree. If an item in the tree -// already equals the given one, it is removed from the tree and returned. -// Otherwise, nil is returned. -// -// nil cannot be added to the tree (will panic). -func (t *BTree) ReplaceOrInsert(item Item) Item { - if item == nil { - panic("nil item being added to BTree") - } - if t.root == nil { - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item) - t.length++ - return nil - } else { - t.root = t.root.mutableFor(t.cow) - if len(t.root.items) >= t.maxItems() { - item2, second := t.root.split(t.maxItems() / 2) - oldroot := t.root - t.root = t.cow.newNode() - t.root.items = append(t.root.items, item2) - t.root.children = append(t.root.children, oldroot, second) - } - } - out := t.root.insert(item, t.maxItems()) - if out == nil { - t.length++ - } - return out -} - -// Delete removes an item equal to the passed in item from the tree, returning -// it. If no such item exists, returns nil. -func (t *BTree) Delete(item Item) Item { - return t.deleteItem(item, removeItem) -} - -// DeleteMin removes the smallest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMin() Item { - return t.deleteItem(nil, removeMin) -} - -// DeleteMax removes the largest item in the tree and returns it. -// If no such item exists, returns nil. -func (t *BTree) DeleteMax() Item { - return t.deleteItem(nil, removeMax) -} - -func (t *BTree) deleteItem(item Item, typ toRemove) Item { - if t.root == nil || len(t.root.items) == 0 { - return nil - } - t.root = t.root.mutableFor(t.cow) - out := t.root.remove(item, t.minItems(), typ) - if len(t.root.items) == 0 && len(t.root.children) > 0 { - oldroot := t.root - t.root = t.root.children[0] - t.cow.freeNode(oldroot) - } - if out != nil { - t.length-- - } - return out -} - -// AscendRange calls the iterator for every value in the tree within the range -// [greaterOrEqual, lessThan), until iterator returns false. -func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) -} - -// AscendLessThan calls the iterator for every value in the tree within the range -// [first, pivot), until iterator returns false. -func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, pivot, false, false, iterator) -} - -// AscendGreaterOrEqual calls the iterator for every value in the tree within -// the range [pivot, last], until iterator returns false. -func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, pivot, nil, true, false, iterator) -} - -// Ascend calls the iterator for every value in the tree within the range -// [first, last], until iterator returns false. -func (t *BTree) Ascend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(ascend, nil, nil, false, false, iterator) -} - -// DescendRange calls the iterator for every value in the tree within the range -// [lessOrEqual, greaterThan), until iterator returns false. -func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) -} - -// DescendLessOrEqual calls the iterator for every value in the tree within the range -// [pivot, first], until iterator returns false. -func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, pivot, nil, true, false, iterator) -} - -// DescendGreaterThan calls the iterator for every value in the tree within -// the range (pivot, last], until iterator returns false. -func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, pivot, false, false, iterator) -} - -// Descend calls the iterator for every value in the tree within the range -// [last, first], until iterator returns false. -func (t *BTree) Descend(iterator ItemIterator) { - if t.root == nil { - return - } - t.root.iterate(descend, nil, nil, false, false, iterator) -} - -// Get looks for the key item in the tree, returning it. It returns nil if -// unable to find that item. -func (t *BTree) Get(key Item) Item { - if t.root == nil { - return nil - } - return t.root.get(key) -} - -// Min returns the smallest item in the tree, or nil if the tree is empty. -func (t *BTree) Min() Item { - return min(t.root) -} - -// Max returns the largest item in the tree, or nil if the tree is empty. -func (t *BTree) Max() Item { - return max(t.root) -} - -// Has returns true if the given key is in the tree. -func (t *BTree) Has(key Item) bool { - return t.Get(key) != nil -} - -// Len returns the number of items currently in the tree. -func (t *BTree) Len() int { - return t.length -} - -// Clear removes all items from the btree. If addNodesToFreelist is true, -// t's nodes are added to its freelist as part of this call, until the freelist -// is full. Otherwise, the root node is simply dereferenced and the subtree -// left to Go's normal GC processes. -// -// This can be much faster -// than calling Delete on all elements, because that requires finding/removing -// each element in the tree and updating the tree accordingly. It also is -// somewhat faster than creating a new tree to replace the old one, because -// nodes from the old tree are reclaimed into the freelist for use by the new -// one, instead of being lost to the garbage collector. -// -// This call takes: -// O(1): when addNodesToFreelist is false, this is a single operation. -// O(1): when the freelist is already full, it breaks out immediately -// O(freelist size): when the freelist is empty and the nodes are all owned -// by this tree, nodes are added to the freelist until full. -// O(tree size): when all nodes are owned by another tree, all nodes are -// iterated over looking for nodes to add to the freelist, and due to -// ownership, none are. -func (t *BTree) Clear(addNodesToFreelist bool) { - if t.root != nil && addNodesToFreelist { - t.root.reset(t.cow) - } - t.root, t.length = nil, 0 -} - -// reset returns a subtree to the freelist. It breaks out immediately if the -// freelist is full, since the only benefit of iterating is to fill that -// freelist up. Returns true if parent reset call should continue. -func (n *node) reset(c *copyOnWriteContext) bool { - for _, child := range n.children { - if !child.reset(c) { - return false - } - } - return c.freeNode(n) != ftFreelistFull -} - -// Int implements the Item interface for integers. -type Int int - -// Less returns true if int(a) < int(b). -func (a Int) Less(b Item) bool { - return a < b.(Int) -} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE deleted file mode 100644 index 6b0b127..0000000 --- a/vendor/github.com/googleapis/gnostic/LICENSE +++ /dev/null @@ -1,203 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go deleted file mode 100644 index af6c0ee..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go +++ /dev/null @@ -1,8789 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package openapi_v2 - -import ( - "fmt" - "github.com/googleapis/gnostic/compiler" - "gopkg.in/yaml.v3" - "regexp" - "strings" -) - -// Version returns the package name (and OpenAPI version). -func Version() string { - return "openapi_v2" -} - -// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. -func NewAdditionalPropertiesItem(in *yaml.Node, context *compiler.Context) (*AdditionalPropertiesItem, error) { - errors := make([]error, 0) - x := &AdditionalPropertiesItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // bool boolean = 2; - boolValue, ok := compiler.BoolForScalarNode(in) - if ok { - x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} - matched = true - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewAny creates an object of type Any if possible, returning an error if not. -func NewAny(in *yaml.Node, context *compiler.Context) (*Any, error) { - errors := make([]error, 0) - x := &Any{} - bytes := compiler.Marshal(in) - x.Yaml = string(bytes) - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. -func NewApiKeySecurity(in *yaml.Node, context *compiler.Context) (*ApiKeySecurity, error) { - errors := make([]error, 0) - x := &ApiKeySecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [apiKey] - if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header query] - if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. -func NewBasicAuthenticationSecurity(in *yaml.Node, context *compiler.Context) (*BasicAuthenticationSecurity, error) { - errors := make([]error, 0) - x := &BasicAuthenticationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [basic] - if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. -func NewBodyParameter(in *yaml.Node, context *compiler.Context) (*BodyParameter, error) { - errors := make([]error, 0) - x := &BodyParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "schema"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "in", "name", "required", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 2; - v2 := compiler.MapValueForKey(m, "name") - if v2 != nil { - x.Name, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 3; - v3 := compiler.MapValueForKey(m, "in") - if v3 != nil { - x.In, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [body] - if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool required = 4; - v4 := compiler.MapValueForKey(m, "required") - if v4 != nil { - x.Required, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema schema = 5; - v5 := compiler.MapValueForKey(m, "schema") - if v5 != nil { - var err error - x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewContact creates an object of type Contact if possible, returning an error if not. -func NewContact(in *yaml.Node, context *compiler.Context) (*Contact, error) { - errors := make([]error, 0) - x := &Contact{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"email", "name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string email = 3; - v3 := compiler.MapValueForKey(m, "email") - if v3 != nil { - x.Email, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for email: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefault creates an object of type Default if possible, returning an error if not. -func NewDefault(in *yaml.Node, context *compiler.Context) (*Default, error) { - errors := make([]error, 0) - x := &Default{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDefinitions creates an object of type Definitions if possible, returning an error if not. -func NewDefinitions(in *yaml.Node, context *compiler.Context) (*Definitions, error) { - errors := make([]error, 0) - x := &Definitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewDocument creates an object of type Document if possible, returning an error if not. -func NewDocument(in *yaml.Node, context *compiler.Context) (*Document, error) { - errors := make([]error, 0) - x := &Document{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"info", "paths", "swagger"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string swagger = 1; - v1 := compiler.MapValueForKey(m, "swagger") - if v1 != nil { - x.Swagger, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [2.0] - if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { - message := fmt.Sprintf("has unexpected value for swagger: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Info info = 2; - v2 := compiler.MapValueForKey(m, "info") - if v2 != nil { - var err error - x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) - if err != nil { - errors = append(errors, err) - } - } - // string host = 3; - v3 := compiler.MapValueForKey(m, "host") - if v3 != nil { - x.Host, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for host: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string base_path = 4; - v4 := compiler.MapValueForKey(m, "basePath") - if v4 != nil { - x.BasePath, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for basePath: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string schemes = 5; - v5 := compiler.MapValueForKey(m, "schemes") - if v5 != nil { - v, ok := compiler.SequenceNodeForNode(v5) - if ok { - x.Schemes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 6; - v6 := compiler.MapValueForKey(m, "consumes") - if v6 != nil { - v, ok := compiler.SequenceNodeForNode(v6) - if ok { - x.Consumes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 7; - v7 := compiler.MapValueForKey(m, "produces") - if v7 != nil { - v, ok := compiler.SequenceNodeForNode(v7) - if ok { - x.Produces = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Paths paths = 8; - v8 := compiler.MapValueForKey(m, "paths") - if v8 != nil { - var err error - x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) - if err != nil { - errors = append(errors, err) - } - } - // Definitions definitions = 9; - v9 := compiler.MapValueForKey(m, "definitions") - if v9 != nil { - var err error - x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // ParameterDefinitions parameters = 10; - v10 := compiler.MapValueForKey(m, "parameters") - if v10 != nil { - var err error - x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - } - // ResponseDefinitions responses = 11; - v11 := compiler.MapValueForKey(m, "responses") - if v11 != nil { - var err error - x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // SecurityDefinitions security_definitions = 13; - v13 := compiler.MapValueForKey(m, "securityDefinitions") - if v13 != nil { - var err error - x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Tag tags = 14; - v14 := compiler.MapValueForKey(m, "tags") - if v14 != nil { - // repeated Tag - x.Tags = make([]*Tag, 0) - a, ok := compiler.SequenceNodeForNode(v14) - if ok { - for _, item := range a.Content { - y, err := NewTag(item, compiler.NewContext("tags", context)) - if err != nil { - errors = append(errors, err) - } - x.Tags = append(x.Tags, y) - } - } - } - // ExternalDocs external_docs = 15; - v15 := compiler.MapValueForKey(m, "externalDocs") - if v15 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 16; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExamples creates an object of type Examples if possible, returning an error if not. -func NewExamples(in *yaml.Node, context *compiler.Context) (*Examples, error) { - errors := make([]error, 0) - x := &Examples{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. -func NewExternalDocs(in *yaml.Node, context *compiler.Context) (*ExternalDocs, error) { - errors := make([]error, 0) - x := &ExternalDocs{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"url"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. -func NewFileSchema(in *yaml.Node, context *compiler.Context) (*FileSchema, error) { - errors := make([]error, 0) - x := &FileSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string format = 1; - v1 := compiler.MapValueForKey(m, "format") - if v1 != nil { - x.Format, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 2; - v2 := compiler.MapValueForKey(m, "title") - if v2 != nil { - x.Title, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 4; - v4 := compiler.MapValueForKey(m, "default") - if v4 != nil { - var err error - x.Default, err = NewAny(v4, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string required = 5; - v5 := compiler.MapValueForKey(m, "required") - if v5 != nil { - v, ok := compiler.SequenceNodeForNode(v5) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [file] - if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 7; - v7 := compiler.MapValueForKey(m, "readOnly") - if v7 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 8; - v8 := compiler.MapValueForKey(m, "externalDocs") - if v8 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 9; - v9 := compiler.MapValueForKey(m, "example") - if v9 != nil { - var err error - x.Example, err = NewAny(v9, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. -func NewFormDataParameterSubSchema(in *yaml.Node, context *compiler.Context) (*FormDataParameterSubSchema, error) { - errors := make([]error, 0) - x := &FormDataParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [formData] - if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array file] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v21) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - v, ok := compiler.FloatForScalarNode(v22) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeader creates an object of type Header if possible, returning an error if not. -func NewHeader(in *yaml.Node, context *compiler.Context) (*Header, error) { - errors := make([]error, 0) - x := &Header{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - v, ok := compiler.FloatForScalarNode(v8) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := compiler.IntForScalarNode(v10) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := compiler.IntForScalarNode(v13) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v15) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v16) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - v, ok := compiler.FloatForScalarNode(v17) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 18; - v18 := compiler.MapValueForKey(m, "description") - if v18 != nil { - x.Description, ok = compiler.StringForScalarNode(v18) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 19; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. -func NewHeaderParameterSubSchema(in *yaml.Node, context *compiler.Context) (*HeaderParameterSubSchema, error) { - errors := make([]error, 0) - x := &HeaderParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [header] - if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - v, ok := compiler.FloatForScalarNode(v12) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v19) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - v, ok := compiler.FloatForScalarNode(v21) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewHeaders creates an object of type Headers if possible, returning an error if not. -func NewHeaders(in *yaml.Node, context *compiler.Context) (*Headers, error) { - errors := make([]error, 0) - x := &Headers{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedHeader additional_properties = 1; - // MAP: Header - x.AdditionalProperties = make([]*NamedHeader, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedHeader{} - pair.Name = k - var err error - pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewInfo creates an object of type Info if possible, returning an error if not. -func NewInfo(in *yaml.Node, context *compiler.Context) (*Info, error) { - errors := make([]error, 0) - x := &Info{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"title", "version"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string title = 1; - v1 := compiler.MapValueForKey(m, "title") - if v1 != nil { - x.Title, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string version = 2; - v2 := compiler.MapValueForKey(m, "version") - if v2 != nil { - x.Version, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for version: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string terms_of_service = 4; - v4 := compiler.MapValueForKey(m, "termsOfService") - if v4 != nil { - x.TermsOfService, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for termsOfService: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Contact contact = 5; - v5 := compiler.MapValueForKey(m, "contact") - if v5 != nil { - var err error - x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) - if err != nil { - errors = append(errors, err) - } - } - // License license = 6; - v6 := compiler.MapValueForKey(m, "license") - if v6 != nil { - var err error - x.License, err = NewLicense(v6, compiler.NewContext("license", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. -func NewItemsItem(in *yaml.Node, context *compiler.Context) (*ItemsItem, error) { - errors := make([]error, 0) - x := &ItemsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - x.Schema = make([]*Schema, 0) - y, err := NewSchema(m, compiler.NewContext("", context)) - if err != nil { - return nil, err - } - x.Schema = append(x.Schema, y) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. -func NewJsonReference(in *yaml.Node, context *compiler.Context) (*JsonReference, error) { - errors := make([]error, 0) - x := &JsonReference{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"$ref"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewLicense creates an object of type License if possible, returning an error if not. -func NewLicense(in *yaml.Node, context *compiler.Context) (*License, error) { - errors := make([]error, 0) - x := &License{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"name", "url"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string url = 2; - v2 := compiler.MapValueForKey(m, "url") - if v2 != nil { - x.Url, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for url: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 3; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. -func NewNamedAny(in *yaml.Node, context *compiler.Context) (*NamedAny, error) { - errors := make([]error, 0) - x := &NamedAny{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewAny(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. -func NewNamedHeader(in *yaml.Node, context *compiler.Context) (*NamedHeader, error) { - errors := make([]error, 0) - x := &NamedHeader{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Header value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. -func NewNamedParameter(in *yaml.Node, context *compiler.Context) (*NamedParameter, error) { - errors := make([]error, 0) - x := &NamedParameter{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Parameter value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. -func NewNamedPathItem(in *yaml.Node, context *compiler.Context) (*NamedPathItem, error) { - errors := make([]error, 0) - x := &NamedPathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PathItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. -func NewNamedResponse(in *yaml.Node, context *compiler.Context) (*NamedResponse, error) { - errors := make([]error, 0) - x := &NamedResponse{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Response value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. -func NewNamedResponseValue(in *yaml.Node, context *compiler.Context) (*NamedResponseValue, error) { - errors := make([]error, 0) - x := &NamedResponseValue{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ResponseValue value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. -func NewNamedSchema(in *yaml.Node, context *compiler.Context) (*NamedSchema, error) { - errors := make([]error, 0) - x := &NamedSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Schema value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. -func NewNamedSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &NamedSecurityDefinitionsItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SecurityDefinitionsItem value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedString creates an object of type NamedString if possible, returning an error if not. -func NewNamedString(in *yaml.Node, context *compiler.Context) (*NamedString, error) { - errors := make([]error, 0) - x := &NamedString{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - x.Value, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for value: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. -func NewNamedStringArray(in *yaml.Node, context *compiler.Context) (*NamedStringArray, error) { - errors := make([]error, 0) - x := &NamedStringArray{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"name", "value"} - var allowedPatterns []*regexp.Regexp - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // StringArray value = 2; - v2 := compiler.MapValueForKey(m, "value") - if v2 != nil { - var err error - x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) - if err != nil { - errors = append(errors, err) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. -func NewNonBodyParameter(in *yaml.Node, context *compiler.Context) (*NonBodyParameter, error) { - errors := make([]error, 0) - x := &NonBodyParameter{} - matched := false - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"in", "name", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // HeaderParameterSubSchema header_parameter_sub_schema = 1; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // QueryParameterSubSchema query_parameter_sub_schema = 3; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - // PathParameterSubSchema path_parameter_sub_schema = 4; - { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) - if matchingError == nil { - x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. -func NewOauth2AccessCodeSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { - errors := make([]error, 0) - x := &Oauth2AccessCodeSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [accessCode] - if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string token_url = 5; - v5 := compiler.MapValueForKey(m, "tokenUrl") - if v5 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 6; - v6 := compiler.MapValueForKey(m, "description") - if v6 != nil { - x.Description, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 7; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. -func NewOauth2ApplicationSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ApplicationSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [application] - if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. -func NewOauth2ImplicitSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { - errors := make([]error, 0) - x := &Oauth2ImplicitSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"authorizationUrl", "flow", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [implicit] - if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string authorization_url = 4; - v4 := compiler.MapValueForKey(m, "authorizationUrl") - if v4 != nil { - x.AuthorizationUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for authorizationUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. -func NewOauth2PasswordSecurity(in *yaml.Node, context *compiler.Context) (*Oauth2PasswordSecurity, error) { - errors := make([]error, 0) - x := &Oauth2PasswordSecurity{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"flow", "tokenUrl", "type"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [oauth2] - if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string flow = 2; - v2 := compiler.MapValueForKey(m, "flow") - if v2 != nil { - x.Flow, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [password] - if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { - message := fmt.Sprintf("has unexpected value for flow: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Oauth2Scopes scopes = 3; - v3 := compiler.MapValueForKey(m, "scopes") - if v3 != nil { - var err error - x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) - if err != nil { - errors = append(errors, err) - } - } - // string token_url = 4; - v4 := compiler.MapValueForKey(m, "tokenUrl") - if v4 != nil { - x.TokenUrl, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for tokenUrl: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 5; - v5 := compiler.MapValueForKey(m, "description") - if v5 != nil { - x.Description, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. -func NewOauth2Scopes(in *yaml.Node, context *compiler.Context) (*Oauth2Scopes, error) { - errors := make([]error, 0) - x := &Oauth2Scopes{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedString additional_properties = 1; - // MAP: string - x.AdditionalProperties = make([]*NamedString, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedString{} - pair.Name = k - pair.Value, _ = compiler.StringForScalarNode(v) - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewOperation creates an object of type Operation if possible, returning an error if not. -func NewOperation(in *yaml.Node, context *compiler.Context) (*Operation, error) { - errors := make([]error, 0) - x := &Operation{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"responses"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated string tags = 1; - v1 := compiler.MapValueForKey(m, "tags") - if v1 != nil { - v, ok := compiler.SequenceNodeForNode(v1) - if ok { - x.Tags = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for tags: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string summary = 2; - v2 := compiler.MapValueForKey(m, "summary") - if v2 != nil { - x.Summary, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for summary: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 4; - v4 := compiler.MapValueForKey(m, "externalDocs") - if v4 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // string operation_id = 5; - v5 := compiler.MapValueForKey(m, "operationId") - if v5 != nil { - x.OperationId, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for operationId: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string produces = 6; - v6 := compiler.MapValueForKey(m, "produces") - if v6 != nil { - v, ok := compiler.SequenceNodeForNode(v6) - if ok { - x.Produces = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for produces: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string consumes = 7; - v7 := compiler.MapValueForKey(m, "consumes") - if v7 != nil { - v, ok := compiler.SequenceNodeForNode(v7) - if ok { - x.Consumes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for consumes: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated ParametersItem parameters = 8; - v8 := compiler.MapValueForKey(m, "parameters") - if v8 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := compiler.SequenceNodeForNode(v8) - if ok { - for _, item := range a.Content { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // Responses responses = 9; - v9 := compiler.MapValueForKey(m, "responses") - if v9 != nil { - var err error - x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated string schemes = 10; - v10 := compiler.MapValueForKey(m, "schemes") - if v10 != nil { - v, ok := compiler.SequenceNodeForNode(v10) - if ok { - x.Schemes = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [http https ws wss] - if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { - message := fmt.Sprintf("has unexpected value for schemes: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool deprecated = 11; - v11 := compiler.MapValueForKey(m, "deprecated") - if v11 != nil { - x.Deprecated, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for deprecated: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated SecurityRequirement security = 12; - v12 := compiler.MapValueForKey(m, "security") - if v12 != nil { - // repeated SecurityRequirement - x.Security = make([]*SecurityRequirement, 0) - a, ok := compiler.SequenceNodeForNode(v12) - if ok { - for _, item := range a.Content { - y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) - if err != nil { - errors = append(errors, err) - } - x.Security = append(x.Security, y) - } - } - } - // repeated NamedAny vendor_extension = 13; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameter creates an object of type Parameter if possible, returning an error if not. -func NewParameter(in *yaml.Node, context *compiler.Context) (*Parameter, error) { - errors := make([]error, 0) - x := &Parameter{} - matched := false - // BodyParameter body_parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_BodyParameter{BodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // NonBodyParameter non_body_parameter = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) - if matchingError == nil { - x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. -func NewParameterDefinitions(in *yaml.Node, context *compiler.Context) (*ParameterDefinitions, error) { - errors := make([]error, 0) - x := &ParameterDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedParameter additional_properties = 1; - // MAP: Parameter - x.AdditionalProperties = make([]*NamedParameter, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedParameter{} - pair.Name = k - var err error - pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. -func NewParametersItem(in *yaml.Node, context *compiler.Context) (*ParametersItem, error) { - errors := make([]error, 0) - x := &ParametersItem{} - matched := false - // Parameter parameter = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_Parameter{Parameter: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ParametersItem_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathItem creates an object of type PathItem if possible, returning an error if not. -func NewPathItem(in *yaml.Node, context *compiler.Context) (*PathItem, error) { - errors := make([]error, 0) - x := &PathItem{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Operation get = 2; - v2 := compiler.MapValueForKey(m, "get") - if v2 != nil { - var err error - x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation put = 3; - v3 := compiler.MapValueForKey(m, "put") - if v3 != nil { - var err error - x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation post = 4; - v4 := compiler.MapValueForKey(m, "post") - if v4 != nil { - var err error - x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation delete = 5; - v5 := compiler.MapValueForKey(m, "delete") - if v5 != nil { - var err error - x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation options = 6; - v6 := compiler.MapValueForKey(m, "options") - if v6 != nil { - var err error - x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation head = 7; - v7 := compiler.MapValueForKey(m, "head") - if v7 != nil { - var err error - x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) - if err != nil { - errors = append(errors, err) - } - } - // Operation patch = 8; - v8 := compiler.MapValueForKey(m, "patch") - if v8 != nil { - var err error - x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated ParametersItem parameters = 9; - v9 := compiler.MapValueForKey(m, "parameters") - if v9 != nil { - // repeated ParametersItem - x.Parameters = make([]*ParametersItem, 0) - a, ok := compiler.SequenceNodeForNode(v9) - if ok { - for _, item := range a.Content { - y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) - if err != nil { - errors = append(errors, err) - } - x.Parameters = append(x.Parameters, y) - } - } - } - // repeated NamedAny vendor_extension = 10; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. -func NewPathParameterSubSchema(in *yaml.Node, context *compiler.Context) (*PathParameterSubSchema, error) { - errors := make([]error, 0) - x := &PathParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"required"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [path] - if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 5; - v5 := compiler.MapValueForKey(m, "type") - if v5 != nil { - x.Type, ok = compiler.StringForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 6; - v6 := compiler.MapValueForKey(m, "format") - if v6 != nil { - x.Format, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 7; - v7 := compiler.MapValueForKey(m, "items") - if v7 != nil { - var err error - x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 8; - v8 := compiler.MapValueForKey(m, "collectionFormat") - if v8 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 9; - v9 := compiler.MapValueForKey(m, "default") - if v9 != nil { - var err error - x.Default, err = NewAny(v9, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 10; - v10 := compiler.MapValueForKey(m, "maximum") - if v10 != nil { - v, ok := compiler.FloatForScalarNode(v10) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 11; - v11 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v11 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v11) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 12; - v12 := compiler.MapValueForKey(m, "minimum") - if v12 != nil { - v, ok := compiler.FloatForScalarNode(v12) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 13; - v13 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v13 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 14; - v14 := compiler.MapValueForKey(m, "maxLength") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 15; - v15 := compiler.MapValueForKey(m, "minLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 16; - v16 := compiler.MapValueForKey(m, "pattern") - if v16 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 17; - v17 := compiler.MapValueForKey(m, "maxItems") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 18; - v18 := compiler.MapValueForKey(m, "minItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 19; - v19 := compiler.MapValueForKey(m, "uniqueItems") - if v19 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v19) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 21; - v21 := compiler.MapValueForKey(m, "multipleOf") - if v21 != nil { - v, ok := compiler.FloatForScalarNode(v21) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v21)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 22; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPaths creates an object of type Paths if possible, returning an error if not. -func NewPaths(in *yaml.Node, context *compiler.Context) (*Paths, error) { - errors := make([]error, 0) - x := &Paths{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern0, pattern1} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedAny vendor_extension = 1; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - // repeated NamedPathItem path = 2; - // MAP: PathItem ^/ - x.Path = make([]*NamedPathItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "/") { - pair := &NamedPathItem{} - pair.Name = k - var err error - pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.Path = append(x.Path, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. -func NewPrimitivesItems(in *yaml.Node, context *compiler.Context) (*PrimitivesItems, error) { - errors := make([]error, 0) - x := &PrimitivesItems{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string type = 1; - v1 := compiler.MapValueForKey(m, "type") - if v1 != nil { - x.Type, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number integer boolean array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 3; - v3 := compiler.MapValueForKey(m, "items") - if v3 != nil { - var err error - x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 4; - v4 := compiler.MapValueForKey(m, "collectionFormat") - if v4 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 6; - v6 := compiler.MapValueForKey(m, "maximum") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 7; - v7 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v7 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 8; - v8 := compiler.MapValueForKey(m, "minimum") - if v8 != nil { - v, ok := compiler.FloatForScalarNode(v8) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 9; - v9 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v9 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 10; - v10 := compiler.MapValueForKey(m, "maxLength") - if v10 != nil { - t, ok := compiler.IntForScalarNode(v10) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 11; - v11 := compiler.MapValueForKey(m, "minLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 12; - v12 := compiler.MapValueForKey(m, "pattern") - if v12 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 13; - v13 := compiler.MapValueForKey(m, "maxItems") - if v13 != nil { - t, ok := compiler.IntForScalarNode(v13) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 14; - v14 := compiler.MapValueForKey(m, "minItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 15; - v15 := compiler.MapValueForKey(m, "uniqueItems") - if v15 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v15) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 16; - v16 := compiler.MapValueForKey(m, "enum") - if v16 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v16) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 17; - v17 := compiler.MapValueForKey(m, "multipleOf") - if v17 != nil { - v, ok := compiler.FloatForScalarNode(v17) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 18; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewProperties creates an object of type Properties if possible, returning an error if not. -func NewProperties(in *yaml.Node, context *compiler.Context) (*Properties, error) { - errors := make([]error, 0) - x := &Properties{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSchema additional_properties = 1; - // MAP: Schema - x.AdditionalProperties = make([]*NamedSchema, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSchema{} - pair.Name = k - var err error - pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. -func NewQueryParameterSubSchema(in *yaml.Node, context *compiler.Context) (*QueryParameterSubSchema, error) { - errors := make([]error, 0) - x := &QueryParameterSubSchema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // bool required = 1; - v1 := compiler.MapValueForKey(m, "required") - if v1 != nil { - x.Required, ok = compiler.BoolForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string in = 2; - v2 := compiler.MapValueForKey(m, "in") - if v2 != nil { - x.In, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [query] - if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { - message := fmt.Sprintf("has unexpected value for in: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 3; - v3 := compiler.MapValueForKey(m, "description") - if v3 != nil { - x.Description, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string name = 4; - v4 := compiler.MapValueForKey(m, "name") - if v4 != nil { - x.Name, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool allow_empty_value = 5; - v5 := compiler.MapValueForKey(m, "allowEmptyValue") - if v5 != nil { - x.AllowEmptyValue, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for allowEmptyValue: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string type = 6; - v6 := compiler.MapValueForKey(m, "type") - if v6 != nil { - x.Type, ok = compiler.StringForScalarNode(v6) - if !ok { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [string number boolean integer array] - if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { - message := fmt.Sprintf("has unexpected value for type: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 7; - v7 := compiler.MapValueForKey(m, "format") - if v7 != nil { - x.Format, ok = compiler.StringForScalarNode(v7) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // PrimitivesItems items = 8; - v8 := compiler.MapValueForKey(m, "items") - if v8 != nil { - var err error - x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // string collection_format = 9; - v9 := compiler.MapValueForKey(m, "collectionFormat") - if v9 != nil { - x.CollectionFormat, ok = compiler.StringForScalarNode(v9) - if !ok { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - // check for valid enum values - // [csv ssv tsv pipes multi] - if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { - message := fmt.Sprintf("has unexpected value for collectionFormat: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 10; - v10 := compiler.MapValueForKey(m, "default") - if v10 != nil { - var err error - x.Default, err = NewAny(v10, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float maximum = 11; - v11 := compiler.MapValueForKey(m, "maximum") - if v11 != nil { - v, ok := compiler.FloatForScalarNode(v11) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 12; - v12 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v12 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v12) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 13; - v13 := compiler.MapValueForKey(m, "minimum") - if v13 != nil { - v, ok := compiler.FloatForScalarNode(v13) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 14; - v14 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v14 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v14) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 15; - v15 := compiler.MapValueForKey(m, "maxLength") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 16; - v16 := compiler.MapValueForKey(m, "minLength") - if v16 != nil { - t, ok := compiler.IntForScalarNode(v16) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 17; - v17 := compiler.MapValueForKey(m, "pattern") - if v17 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v17) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 18; - v18 := compiler.MapValueForKey(m, "maxItems") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 19; - v19 := compiler.MapValueForKey(m, "minItems") - if v19 != nil { - t, ok := compiler.IntForScalarNode(v19) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 20; - v20 := compiler.MapValueForKey(m, "uniqueItems") - if v20 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v20) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v20)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 21; - v21 := compiler.MapValueForKey(m, "enum") - if v21 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v21) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // float multiple_of = 22; - v22 := compiler.MapValueForKey(m, "multipleOf") - if v22 != nil { - v, ok := compiler.FloatForScalarNode(v22) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v22)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 23; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponse creates an object of type Response if possible, returning an error if not. -func NewResponse(in *yaml.Node, context *compiler.Context) (*Response, error) { - errors := make([]error, 0) - x := &Response{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"description"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "examples", "headers", "schema"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string description = 1; - v1 := compiler.MapValueForKey(m, "description") - if v1 != nil { - x.Description, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // SchemaItem schema = 2; - v2 := compiler.MapValueForKey(m, "schema") - if v2 != nil { - var err error - x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) - if err != nil { - errors = append(errors, err) - } - } - // Headers headers = 3; - v3 := compiler.MapValueForKey(m, "headers") - if v3 != nil { - var err error - x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) - if err != nil { - errors = append(errors, err) - } - } - // Examples examples = 4; - v4 := compiler.MapValueForKey(m, "examples") - if v4 != nil { - var err error - x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 5; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. -func NewResponseDefinitions(in *yaml.Node, context *compiler.Context) (*ResponseDefinitions, error) { - errors := make([]error, 0) - x := &ResponseDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedResponse additional_properties = 1; - // MAP: Response - x.AdditionalProperties = make([]*NamedResponse, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedResponse{} - pair.Name = k - var err error - pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. -func NewResponseValue(in *yaml.Node, context *compiler.Context) (*ResponseValue, error) { - errors := make([]error, 0) - x := &ResponseValue{} - matched := false - // Response response = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewResponse(m, compiler.NewContext("response", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_Response{Response: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // JsonReference json_reference = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) - if matchingError == nil { - x.Oneof = &ResponseValue_JsonReference{JsonReference: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewResponses creates an object of type Responses if possible, returning an error if not. -func NewResponses(in *yaml.Node, context *compiler.Context) (*Responses, error) { - errors := make([]error, 0) - x := &Responses{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{} - allowedPatterns := []*regexp.Regexp{pattern2, pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // repeated NamedResponseValue response_code = 1; - // MAP: ResponseValue ^([0-9]{3})$|^(default)$ - x.ResponseCode = make([]*NamedResponseValue, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if pattern2.MatchString(k) { - pair := &NamedResponseValue{} - pair.Name = k - var err error - pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.ResponseCode = append(x.ResponseCode, pair) - } - } - } - // repeated NamedAny vendor_extension = 2; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchema creates an object of type Schema if possible, returning an error if not. -func NewSchema(in *yaml.Node, context *compiler.Context) (*Schema, error) { - errors := make([]error, 0) - x := &Schema{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string _ref = 1; - v1 := compiler.MapValueForKey(m, "$ref") - if v1 != nil { - x.XRef, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for $ref: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string format = 2; - v2 := compiler.MapValueForKey(m, "format") - if v2 != nil { - x.Format, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for format: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string title = 3; - v3 := compiler.MapValueForKey(m, "title") - if v3 != nil { - x.Title, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for title: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 4; - v4 := compiler.MapValueForKey(m, "description") - if v4 != nil { - x.Description, ok = compiler.StringForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Any default = 5; - v5 := compiler.MapValueForKey(m, "default") - if v5 != nil { - var err error - x.Default, err = NewAny(v5, compiler.NewContext("default", context)) - if err != nil { - errors = append(errors, err) - } - } - // float multiple_of = 6; - v6 := compiler.MapValueForKey(m, "multipleOf") - if v6 != nil { - v, ok := compiler.FloatForScalarNode(v6) - if ok { - x.MultipleOf = v - } else { - message := fmt.Sprintf("has unexpected value for multipleOf: %s", compiler.Display(v6)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float maximum = 7; - v7 := compiler.MapValueForKey(m, "maximum") - if v7 != nil { - v, ok := compiler.FloatForScalarNode(v7) - if ok { - x.Maximum = v - } else { - message := fmt.Sprintf("has unexpected value for maximum: %s", compiler.Display(v7)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_maximum = 8; - v8 := compiler.MapValueForKey(m, "exclusiveMaximum") - if v8 != nil { - x.ExclusiveMaximum, ok = compiler.BoolForScalarNode(v8) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %s", compiler.Display(v8)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // float minimum = 9; - v9 := compiler.MapValueForKey(m, "minimum") - if v9 != nil { - v, ok := compiler.FloatForScalarNode(v9) - if ok { - x.Minimum = v - } else { - message := fmt.Sprintf("has unexpected value for minimum: %s", compiler.Display(v9)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool exclusive_minimum = 10; - v10 := compiler.MapValueForKey(m, "exclusiveMinimum") - if v10 != nil { - x.ExclusiveMinimum, ok = compiler.BoolForScalarNode(v10) - if !ok { - message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %s", compiler.Display(v10)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_length = 11; - v11 := compiler.MapValueForKey(m, "maxLength") - if v11 != nil { - t, ok := compiler.IntForScalarNode(v11) - if ok { - x.MaxLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxLength: %s", compiler.Display(v11)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_length = 12; - v12 := compiler.MapValueForKey(m, "minLength") - if v12 != nil { - t, ok := compiler.IntForScalarNode(v12) - if ok { - x.MinLength = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minLength: %s", compiler.Display(v12)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string pattern = 13; - v13 := compiler.MapValueForKey(m, "pattern") - if v13 != nil { - x.Pattern, ok = compiler.StringForScalarNode(v13) - if !ok { - message := fmt.Sprintf("has unexpected value for pattern: %s", compiler.Display(v13)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_items = 14; - v14 := compiler.MapValueForKey(m, "maxItems") - if v14 != nil { - t, ok := compiler.IntForScalarNode(v14) - if ok { - x.MaxItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxItems: %s", compiler.Display(v14)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_items = 15; - v15 := compiler.MapValueForKey(m, "minItems") - if v15 != nil { - t, ok := compiler.IntForScalarNode(v15) - if ok { - x.MinItems = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minItems: %s", compiler.Display(v15)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool unique_items = 16; - v16 := compiler.MapValueForKey(m, "uniqueItems") - if v16 != nil { - x.UniqueItems, ok = compiler.BoolForScalarNode(v16) - if !ok { - message := fmt.Sprintf("has unexpected value for uniqueItems: %s", compiler.Display(v16)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 max_properties = 17; - v17 := compiler.MapValueForKey(m, "maxProperties") - if v17 != nil { - t, ok := compiler.IntForScalarNode(v17) - if ok { - x.MaxProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for maxProperties: %s", compiler.Display(v17)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // int64 min_properties = 18; - v18 := compiler.MapValueForKey(m, "minProperties") - if v18 != nil { - t, ok := compiler.IntForScalarNode(v18) - if ok { - x.MinProperties = int64(t) - } else { - message := fmt.Sprintf("has unexpected value for minProperties: %s", compiler.Display(v18)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated string required = 19; - v19 := compiler.MapValueForKey(m, "required") - if v19 != nil { - v, ok := compiler.SequenceNodeForNode(v19) - if ok { - x.Required = compiler.StringArrayForSequenceNode(v) - } else { - message := fmt.Sprintf("has unexpected value for required: %s", compiler.Display(v19)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated Any enum = 20; - v20 := compiler.MapValueForKey(m, "enum") - if v20 != nil { - // repeated Any - x.Enum = make([]*Any, 0) - a, ok := compiler.SequenceNodeForNode(v20) - if ok { - for _, item := range a.Content { - y, err := NewAny(item, compiler.NewContext("enum", context)) - if err != nil { - errors = append(errors, err) - } - x.Enum = append(x.Enum, y) - } - } - } - // AdditionalPropertiesItem additional_properties = 21; - v21 := compiler.MapValueForKey(m, "additionalProperties") - if v21 != nil { - var err error - x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) - if err != nil { - errors = append(errors, err) - } - } - // TypeItem type = 22; - v22 := compiler.MapValueForKey(m, "type") - if v22 != nil { - var err error - x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) - if err != nil { - errors = append(errors, err) - } - } - // ItemsItem items = 23; - v23 := compiler.MapValueForKey(m, "items") - if v23 != nil { - var err error - x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated Schema all_of = 24; - v24 := compiler.MapValueForKey(m, "allOf") - if v24 != nil { - // repeated Schema - x.AllOf = make([]*Schema, 0) - a, ok := compiler.SequenceNodeForNode(v24) - if ok { - for _, item := range a.Content { - y, err := NewSchema(item, compiler.NewContext("allOf", context)) - if err != nil { - errors = append(errors, err) - } - x.AllOf = append(x.AllOf, y) - } - } - } - // Properties properties = 25; - v25 := compiler.MapValueForKey(m, "properties") - if v25 != nil { - var err error - x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) - if err != nil { - errors = append(errors, err) - } - } - // string discriminator = 26; - v26 := compiler.MapValueForKey(m, "discriminator") - if v26 != nil { - x.Discriminator, ok = compiler.StringForScalarNode(v26) - if !ok { - message := fmt.Sprintf("has unexpected value for discriminator: %s", compiler.Display(v26)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool read_only = 27; - v27 := compiler.MapValueForKey(m, "readOnly") - if v27 != nil { - x.ReadOnly, ok = compiler.BoolForScalarNode(v27) - if !ok { - message := fmt.Sprintf("has unexpected value for readOnly: %s", compiler.Display(v27)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // Xml xml = 28; - v28 := compiler.MapValueForKey(m, "xml") - if v28 != nil { - var err error - x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) - if err != nil { - errors = append(errors, err) - } - } - // ExternalDocs external_docs = 29; - v29 := compiler.MapValueForKey(m, "externalDocs") - if v29 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // Any example = 30; - v30 := compiler.MapValueForKey(m, "example") - if v30 != nil { - var err error - x.Example, err = NewAny(v30, compiler.NewContext("example", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 31; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. -func NewSchemaItem(in *yaml.Node, context *compiler.Context) (*SchemaItem, error) { - errors := make([]error, 0) - x := &SchemaItem{} - matched := false - // Schema schema = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_Schema{Schema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // FileSchema file_schema = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) - if matchingError == nil { - x.Oneof = &SchemaItem_FileSchema{FileSchema: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. -func NewSecurityDefinitions(in *yaml.Node, context *compiler.Context) (*SecurityDefinitions, error) { - errors := make([]error, 0) - x := &SecurityDefinitions{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedSecurityDefinitionsItem additional_properties = 1; - // MAP: SecurityDefinitionsItem - x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedSecurityDefinitionsItem{} - pair.Name = k - var err error - pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. -func NewSecurityDefinitionsItem(in *yaml.Node, context *compiler.Context) (*SecurityDefinitionsItem, error) { - errors := make([]error, 0) - x := &SecurityDefinitionsItem{} - matched := false - // BasicAuthenticationSecurity basic_authentication_security = 1; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // ApiKeySecurity api_key_security = 2; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ImplicitSecurity oauth2_implicit_security = 3; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2PasswordSecurity oauth2_password_security = 4; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2ApplicationSecurity oauth2_application_security = 5; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - { - m, ok := compiler.UnpackMap(in) - if ok { - // errors might be ok here, they mean we just don't have the right subtype - t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) - if matchingError == nil { - x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} - matched = true - } else { - errors = append(errors, matchingError) - } - } - } - if matched { - // since the oneof matched one of its possibilities, discard any matching errors - errors = make([]error, 0) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. -func NewSecurityRequirement(in *yaml.Node, context *compiler.Context) (*SecurityRequirement, error) { - errors := make([]error, 0) - x := &SecurityRequirement{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedStringArray additional_properties = 1; - // MAP: StringArray - x.AdditionalProperties = make([]*NamedStringArray, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedStringArray{} - pair.Name = k - var err error - pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewStringArray creates an object of type StringArray if possible, returning an error if not. -func NewStringArray(in *yaml.Node, context *compiler.Context) (*StringArray, error) { - errors := make([]error, 0) - x := &StringArray{} - x.Value = make([]string, 0) - for _, node := range in.Content { - s, _ := compiler.StringForScalarNode(node) - x.Value = append(x.Value, s) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTag creates an object of type Tag if possible, returning an error if not. -func NewTag(in *yaml.Node, context *compiler.Context) (*Tag, error) { - errors := make([]error, 0) - x := &Tag{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - requiredKeys := []string{"name"} - missingKeys := compiler.MissingKeysInMap(m, requiredKeys) - if len(missingKeys) > 0 { - message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - allowedKeys := []string{"description", "externalDocs", "name"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string description = 2; - v2 := compiler.MapValueForKey(m, "description") - if v2 != nil { - x.Description, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for description: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // ExternalDocs external_docs = 3; - v3 := compiler.MapValueForKey(m, "externalDocs") - if v3 != nil { - var err error - x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) - if err != nil { - errors = append(errors, err) - } - } - // repeated NamedAny vendor_extension = 4; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. -func NewTypeItem(in *yaml.Node, context *compiler.Context) (*TypeItem, error) { - errors := make([]error, 0) - x := &TypeItem{} - v1 := in - switch v1.Kind { - case yaml.ScalarNode: - x.Value = make([]string, 0) - x.Value = append(x.Value, v1.Value) - case yaml.SequenceNode: - x.Value = make([]string, 0) - for _, v := range v1.Content { - value := v.Value - ok := v.Kind == yaml.ScalarNode - if ok { - x.Value = append(x.Value, value) - } else { - message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) - errors = append(errors, compiler.NewError(context, message)) - } - } - default: - message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. -func NewVendorExtension(in *yaml.Node, context *compiler.Context) (*VendorExtension, error) { - errors := make([]error, 0) - x := &VendorExtension{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - // repeated NamedAny additional_properties = 1; - // MAP: Any - x.AdditionalProperties = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.AdditionalProperties = append(x.AdditionalProperties, pair) - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// NewXml creates an object of type Xml if possible, returning an error if not. -func NewXml(in *yaml.Node, context *compiler.Context) (*Xml, error) { - errors := make([]error, 0) - x := &Xml{} - m, ok := compiler.UnpackMap(in) - if !ok { - message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) - errors = append(errors, compiler.NewError(context, message)) - } else { - allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} - allowedPatterns := []*regexp.Regexp{pattern0} - invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) - if len(invalidKeys) > 0 { - message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) - errors = append(errors, compiler.NewError(context, message)) - } - // string name = 1; - v1 := compiler.MapValueForKey(m, "name") - if v1 != nil { - x.Name, ok = compiler.StringForScalarNode(v1) - if !ok { - message := fmt.Sprintf("has unexpected value for name: %s", compiler.Display(v1)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string namespace = 2; - v2 := compiler.MapValueForKey(m, "namespace") - if v2 != nil { - x.Namespace, ok = compiler.StringForScalarNode(v2) - if !ok { - message := fmt.Sprintf("has unexpected value for namespace: %s", compiler.Display(v2)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // string prefix = 3; - v3 := compiler.MapValueForKey(m, "prefix") - if v3 != nil { - x.Prefix, ok = compiler.StringForScalarNode(v3) - if !ok { - message := fmt.Sprintf("has unexpected value for prefix: %s", compiler.Display(v3)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool attribute = 4; - v4 := compiler.MapValueForKey(m, "attribute") - if v4 != nil { - x.Attribute, ok = compiler.BoolForScalarNode(v4) - if !ok { - message := fmt.Sprintf("has unexpected value for attribute: %s", compiler.Display(v4)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // bool wrapped = 5; - v5 := compiler.MapValueForKey(m, "wrapped") - if v5 != nil { - x.Wrapped, ok = compiler.BoolForScalarNode(v5) - if !ok { - message := fmt.Sprintf("has unexpected value for wrapped: %s", compiler.Display(v5)) - errors = append(errors, compiler.NewError(context, message)) - } - } - // repeated NamedAny vendor_extension = 6; - // MAP: Any ^x- - x.VendorExtension = make([]*NamedAny, 0) - for i := 0; i < len(m.Content); i += 2 { - k, ok := compiler.StringForScalarNode(m.Content[i]) - if ok { - v := m.Content[i+1] - if strings.HasPrefix(k, "x-") { - pair := &NamedAny{} - pair.Name = k - result := &Any{} - handled, resultFromExt, err := compiler.CallExtension(context, v, k) - if handled { - if err != nil { - errors = append(errors, err) - } else { - bytes := compiler.Marshal(v) - result.Yaml = string(bytes) - result.Value = resultFromExt - pair.Value = result - } - } else { - pair.Value, err = NewAny(v, compiler.NewContext(k, context)) - if err != nil { - errors = append(errors, err) - } - } - x.VendorExtension = append(x.VendorExtension, pair) - } - } - } - } - return x, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. -func (m *AdditionalPropertiesItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Any objects. -func (m *Any) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ApiKeySecurity objects. -func (m *ApiKeySecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. -func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside BodyParameter objects. -func (m *BodyParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Contact objects. -func (m *Contact) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Default objects. -func (m *Default) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Definitions objects. -func (m *Definitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Document objects. -func (m *Document) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Info != nil { - _, err := m.Info.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Paths != nil { - _, err := m.Paths.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Definitions != nil { - _, err := m.Definitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Parameters != nil { - _, err := m.Parameters.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.SecurityDefinitions != nil { - _, err := m.SecurityDefinitions.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Tags { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Examples objects. -func (m *Examples) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ExternalDocs objects. -func (m *ExternalDocs) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FileSchema objects. -func (m *FileSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. -func (m *FormDataParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Header objects. -func (m *Header) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. -func (m *HeaderParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Headers objects. -func (m *Headers) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Info objects. -func (m *Info) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Contact != nil { - _, err := m.Contact.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.License != nil { - _, err := m.License.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ItemsItem objects. -func (m *ItemsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.Schema { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside JsonReference objects. -func (m *JsonReference) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewJsonReference(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside License objects. -func (m *License) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedAny objects. -func (m *NamedAny) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedHeader objects. -func (m *NamedHeader) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedParameter objects. -func (m *NamedParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedPathItem objects. -func (m *NamedPathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponse objects. -func (m *NamedResponse) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedResponseValue objects. -func (m *NamedResponseValue) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSchema objects. -func (m *NamedSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. -func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedString objects. -func (m *NamedString) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NamedStringArray objects. -func (m *NamedStringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Value != nil { - _, err := m.Value.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside NonBodyParameter objects. -func (m *NonBodyParameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) - if ok { - _, err := p.HeaderParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) - if ok { - _, err := p.FormDataParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) - if ok { - _, err := p.QueryParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) - if ok { - _, err := p.PathParameterSubSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. -func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. -func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. -func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. -func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Scopes != nil { - _, err := m.Scopes.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Oauth2Scopes objects. -func (m *Oauth2Scopes) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Operation objects. -func (m *Operation) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Responses != nil { - _, err := m.Responses.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Security { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Parameter objects. -func (m *Parameter) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*Parameter_BodyParameter) - if ok { - _, err := p.BodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*Parameter_NonBodyParameter) - if ok { - _, err := p.NonBodyParameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParameterDefinitions objects. -func (m *ParameterDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ParametersItem objects. -func (m *ParametersItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ParametersItem_Parameter) - if ok { - _, err := p.Parameter.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ParametersItem_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewParametersItem(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathItem objects. -func (m *PathItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewPathItem(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Get != nil { - _, err := m.Get.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Put != nil { - _, err := m.Put.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Post != nil { - _, err := m.Post.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Delete != nil { - _, err := m.Delete.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Options != nil { - _, err := m.Options.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Head != nil { - _, err := m.Head.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Patch != nil { - _, err := m.Patch.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Parameters { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PathParameterSubSchema objects. -func (m *PathParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Paths objects. -func (m *Paths) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.Path { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside PrimitivesItems objects. -func (m *PrimitivesItems) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Properties objects. -func (m *Properties) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside QueryParameterSubSchema objects. -func (m *QueryParameterSubSchema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Response objects. -func (m *Response) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.Schema != nil { - _, err := m.Schema.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Headers != nil { - _, err := m.Headers.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Examples != nil { - _, err := m.Examples.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseDefinitions objects. -func (m *ResponseDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside ResponseValue objects. -func (m *ResponseValue) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*ResponseValue_Response) - if ok { - _, err := p.Response.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*ResponseValue_JsonReference) - if ok { - info, err := p.JsonReference.ResolveReferences(root) - if err != nil { - return nil, err - } else if info != nil { - n, err := NewResponseValue(info, nil) - if err != nil { - return nil, err - } else if n != nil { - *m = *n - return nil, nil - } - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Responses objects. -func (m *Responses) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.ResponseCode { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Schema objects. -func (m *Schema) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.XRef != "" { - info, err := compiler.ReadInfoForRef(root, m.XRef) - if err != nil { - return nil, err - } - if info != nil { - replacement, err := NewSchema(info, nil) - if err == nil { - *m = *replacement - return m.ResolveReferences(root) - } - } - return info, nil - } - if m.Default != nil { - _, err := m.Default.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.Enum { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.AdditionalProperties != nil { - _, err := m.AdditionalProperties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Type != nil { - _, err := m.Type.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Items != nil { - _, err := m.Items.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.AllOf { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - if m.Properties != nil { - _, err := m.Properties.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Xml != nil { - _, err := m.Xml.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - if m.Example != nil { - _, err := m.Example.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SchemaItem objects. -func (m *SchemaItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SchemaItem_Schema) - if ok { - _, err := p.Schema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SchemaItem_FileSchema) - if ok { - _, err := p.FileSchema.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitions objects. -func (m *SecurityDefinitions) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. -func (m *SecurityDefinitionsItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) - if ok { - _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) - if ok { - _, err := p.ApiKeySecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) - if ok { - _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) - if ok { - _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) - if ok { - _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - { - p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) - if ok { - _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) - if err != nil { - return nil, err - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside SecurityRequirement objects. -func (m *SecurityRequirement) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside StringArray objects. -func (m *StringArray) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Tag objects. -func (m *Tag) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - if m.ExternalDocs != nil { - _, err := m.ExternalDocs.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside TypeItem objects. -func (m *TypeItem) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside VendorExtension objects. -func (m *VendorExtension) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.AdditionalProperties { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ResolveReferences resolves references found inside Xml objects. -func (m *Xml) ResolveReferences(root string) (*yaml.Node, error) { - errors := make([]error, 0) - for _, item := range m.VendorExtension { - if item != nil { - _, err := item.ResolveReferences(root) - if err != nil { - errors = append(errors, err) - } - } - } - return nil, compiler.NewErrorGroupOrNil(errors) -} - -// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. -func (m *AdditionalPropertiesItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // AdditionalPropertiesItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return compiler.NewScalarNodeForBool(v1.Boolean) - } - return nil -} - -// ToRawInfo returns a description of Any suitable for JSON or YAML export. -func (m *Any) ToRawInfo() *yaml.Node { - var err error - var node yaml.Node - err = yaml.Unmarshal([]byte(m.Yaml), &node) - if err == nil { - if node.Kind == yaml.DocumentNode { - return node.Content[0] - } - return &node - } else { - return nil - } - return nil -} - -// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. -func (m *ApiKeySecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. -func (m *BasicAuthenticationSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. -func (m *BodyParameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Contact suitable for JSON or YAML export. -func (m *Contact) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.Email != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("email")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Email)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Default suitable for JSON or YAML export. -func (m *Default) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. -func (m *Definitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Document suitable for JSON or YAML export. -func (m *Document) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("swagger")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Swagger)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("info")) - info.Content = append(info.Content, m.Info.ToRawInfo()) - if m.Host != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("host")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Host)) - } - if m.BasePath != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("basePath")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.BasePath)) - } - if len(m.Schemes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) - } - if len(m.Consumes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) - } - if len(m.Produces) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("paths")) - info.Content = append(info.Content, m.Paths.ToRawInfo()) - if m.Definitions != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("definitions")) - info.Content = append(info.Content, m.Definitions.ToRawInfo()) - } - if m.Parameters != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, m.Parameters.ToRawInfo()) - } - if m.Responses != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if m.SecurityDefinitions != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("securityDefinitions")) - info.Content = append(info.Content, m.SecurityDefinitions.ToRawInfo()) - } - if len(m.Tags) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Tags { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, items) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Examples suitable for JSON or YAML export. -func (m *Examples) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. -func (m *ExternalDocs) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. -func (m *FileSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. -func (m *FormDataParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Header suitable for JSON or YAML export. -func (m *Header) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. -func (m *HeaderParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Headers suitable for JSON or YAML export. -func (m *Headers) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Info suitable for JSON or YAML export. -func (m *Info) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("version")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Version)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.TermsOfService != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("termsOfService")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TermsOfService)) - } - if m.Contact != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("contact")) - info.Content = append(info.Content, m.Contact.ToRawInfo()) - } - if m.License != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("license")) - info.Content = append(info.Content, m.License.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. -func (m *ItemsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Schema) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Schema { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, items) - } - return info -} - -// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. -func (m *JsonReference) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - return info -} - -// ToRawInfo returns a description of License suitable for JSON or YAML export. -func (m *License) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Url != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("url")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Url)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. -func (m *NamedAny) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. -func (m *NamedHeader) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. -func (m *NamedParameter) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. -func (m *NamedPathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. -func (m *NamedResponse) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. -func (m *NamedResponseValue) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. -func (m *NamedSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. -func (m *NamedSecurityDefinitionsItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. -func (m *NamedString) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Value != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Value)) - } - return info -} - -// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. -func (m *NamedStringArray) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} - return info -} - -// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. -func (m *NonBodyParameter) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // NonBodyParameter - // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetHeaderParameterSubSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFormDataParameterSubSchema() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetQueryParameterSubSchema() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetPathParameterSubSchema() - if v3 != nil { - return v3.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. -func (m *Oauth2AccessCodeSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. -func (m *Oauth2ApplicationSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. -func (m *Oauth2ImplicitSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("authorizationUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.AuthorizationUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. -func (m *Oauth2PasswordSecurity) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("flow")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Flow)) - if m.Scopes != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("scopes")) - info.Content = append(info.Content, m.Scopes.ToRawInfo()) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("tokenUrl")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.TokenUrl)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. -func (m *Oauth2Scopes) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} - return info -} - -// ToRawInfo returns a description of Operation suitable for JSON or YAML export. -func (m *Operation) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Tags) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("tags")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Tags)) - } - if m.Summary != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("summary")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Summary)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.OperationId != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("operationId")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.OperationId)) - } - if len(m.Produces) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("produces")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Produces)) - } - if len(m.Consumes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("consumes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Consumes)) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("responses")) - info.Content = append(info.Content, m.Responses.ToRawInfo()) - if len(m.Schemes) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schemes")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Schemes)) - } - if m.Deprecated != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("deprecated")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Deprecated)) - } - if len(m.Security) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Security { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("security")) - info.Content = append(info.Content, items) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. -func (m *Parameter) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // Parameter - // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBodyParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetNonBodyParameter() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. -func (m *ParameterDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. -func (m *ParametersItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ParametersItem - // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetParameter() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. -func (m *PathItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Get != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("get")) - info.Content = append(info.Content, m.Get.ToRawInfo()) - } - if m.Put != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("put")) - info.Content = append(info.Content, m.Put.ToRawInfo()) - } - if m.Post != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("post")) - info.Content = append(info.Content, m.Post.ToRawInfo()) - } - if m.Delete != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("delete")) - info.Content = append(info.Content, m.Delete.ToRawInfo()) - } - if m.Options != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("options")) - info.Content = append(info.Content, m.Options.ToRawInfo()) - } - if m.Head != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("head")) - info.Content = append(info.Content, m.Head.ToRawInfo()) - } - if m.Patch != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("patch")) - info.Content = append(info.Content, m.Patch.ToRawInfo()) - } - if len(m.Parameters) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Parameters { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("parameters")) - info.Content = append(info.Content, items) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. -func (m *PathParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Paths suitable for JSON or YAML export. -func (m *Paths) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.Path != nil { - for _, item := range m.Path { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. -func (m *PrimitivesItems) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Properties suitable for JSON or YAML export. -func (m *Properties) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. -func (m *QueryParameterSubSchema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Required != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Required)) - } - if m.In != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("in")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.In)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.AllowEmptyValue != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("allowEmptyValue")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.AllowEmptyValue)) - } - if m.Type != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Items != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, m.Items.ToRawInfo()) - } - if m.CollectionFormat != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("collectionFormat")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.CollectionFormat)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Response suitable for JSON or YAML export. -func (m *Response) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - if m.Schema != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("schema")) - info.Content = append(info.Content, m.Schema.ToRawInfo()) - } - if m.Headers != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("headers")) - info.Content = append(info.Content, m.Headers.ToRawInfo()) - } - if m.Examples != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("examples")) - info.Content = append(info.Content, m.Examples.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. -func (m *ResponseDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. -func (m *ResponseValue) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // ResponseValue - // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetResponse() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetJsonReference() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of Responses suitable for JSON or YAML export. -func (m *Responses) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.ResponseCode != nil { - for _, item := range m.ResponseCode { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Schema suitable for JSON or YAML export. -func (m *Schema) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.XRef != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("$ref")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.XRef)) - } - if m.Format != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("format")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Format)) - } - if m.Title != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("title")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Title)) - } - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.Default != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("default")) - info.Content = append(info.Content, m.Default.ToRawInfo()) - } - if m.MultipleOf != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("multipleOf")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.MultipleOf)) - } - if m.Maximum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Maximum)) - } - if m.ExclusiveMaximum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMaximum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMaximum)) - } - if m.Minimum != 0.0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForFloat(m.Minimum)) - } - if m.ExclusiveMinimum != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("exclusiveMinimum")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ExclusiveMinimum)) - } - if m.MaxLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxLength)) - } - if m.MinLength != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minLength")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinLength)) - } - if m.Pattern != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("pattern")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Pattern)) - } - if m.MaxItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxItems)) - } - if m.MinItems != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinItems)) - } - if m.UniqueItems != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("uniqueItems")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.UniqueItems)) - } - if m.MaxProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("maxProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MaxProperties)) - } - if m.MinProperties != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("minProperties")) - info.Content = append(info.Content, compiler.NewScalarNodeForInt(m.MinProperties)) - } - if len(m.Required) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("required")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Required)) - } - if len(m.Enum) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.Enum { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("enum")) - info.Content = append(info.Content, items) - } - if m.AdditionalProperties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("additionalProperties")) - info.Content = append(info.Content, m.AdditionalProperties.ToRawInfo()) - } - if m.Type != nil { - if len(m.Type.Value) == 1 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Type.Value[0])) - } else { - info.Content = append(info.Content, compiler.NewScalarNodeForString("type")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Type.Value)) - } - } - if m.Items != nil { - items := compiler.NewSequenceNode() - for _, item := range m.Items.Schema { - items.Content = append(items.Content, item.ToRawInfo()) - } - if len(items.Content) == 1 { - items = items.Content[0] - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("items")) - info.Content = append(info.Content, items) - } - if len(m.AllOf) != 0 { - items := compiler.NewSequenceNode() - for _, item := range m.AllOf { - items.Content = append(items.Content, item.ToRawInfo()) - } - info.Content = append(info.Content, compiler.NewScalarNodeForString("allOf")) - info.Content = append(info.Content, items) - } - if m.Properties != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("properties")) - info.Content = append(info.Content, m.Properties.ToRawInfo()) - } - if m.Discriminator != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("discriminator")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Discriminator)) - } - if m.ReadOnly != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("readOnly")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.ReadOnly)) - } - if m.Xml != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("xml")) - info.Content = append(info.Content, m.Xml.ToRawInfo()) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.Example != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("example")) - info.Content = append(info.Content, m.Example.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. -func (m *SchemaItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SchemaItem - // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetSchema() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetFileSchema() - if v1 != nil { - return v1.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. -func (m *SecurityDefinitions) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. -func (m *SecurityDefinitionsItem) ToRawInfo() *yaml.Node { - // ONE OF WRAPPER - // SecurityDefinitionsItem - // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v0 := m.GetBasicAuthenticationSecurity() - if v0 != nil { - return v0.ToRawInfo() - } - // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v1 := m.GetApiKeySecurity() - if v1 != nil { - return v1.ToRawInfo() - } - // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v2 := m.GetOauth2ImplicitSecurity() - if v2 != nil { - return v2.ToRawInfo() - } - // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v3 := m.GetOauth2PasswordSecurity() - if v3 != nil { - return v3.ToRawInfo() - } - // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v4 := m.GetOauth2ApplicationSecurity() - if v4 != nil { - return v4.ToRawInfo() - } - // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} - v5 := m.GetOauth2AccessCodeSecurity() - if v5 != nil { - return v5.ToRawInfo() - } - return nil -} - -// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. -func (m *SecurityRequirement) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. -func (m *StringArray) ToRawInfo() *yaml.Node { - return compiler.NewSequenceNodeForStringArray(m.Value) -} - -// ToRawInfo returns a description of Tag suitable for JSON or YAML export. -func (m *Tag) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - // always include this required field. - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - if m.Description != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("description")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Description)) - } - if m.ExternalDocs != nil { - info.Content = append(info.Content, compiler.NewScalarNodeForString("externalDocs")) - info.Content = append(info.Content, m.ExternalDocs.ToRawInfo()) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. -func (m *TypeItem) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if len(m.Value) != 0 { - info.Content = append(info.Content, compiler.NewScalarNodeForString("value")) - info.Content = append(info.Content, compiler.NewSequenceNodeForStringArray(m.Value)) - } - return info -} - -// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. -func (m *VendorExtension) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.AdditionalProperties != nil { - for _, item := range m.AdditionalProperties { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -// ToRawInfo returns a description of Xml suitable for JSON or YAML export. -func (m *Xml) ToRawInfo() *yaml.Node { - info := compiler.NewMappingNode() - if m == nil { - return info - } - if m.Name != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("name")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Name)) - } - if m.Namespace != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("namespace")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Namespace)) - } - if m.Prefix != "" { - info.Content = append(info.Content, compiler.NewScalarNodeForString("prefix")) - info.Content = append(info.Content, compiler.NewScalarNodeForString(m.Prefix)) - } - if m.Attribute != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("attribute")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Attribute)) - } - if m.Wrapped != false { - info.Content = append(info.Content, compiler.NewScalarNodeForString("wrapped")) - info.Content = append(info.Content, compiler.NewScalarNodeForBool(m.Wrapped)) - } - if m.VendorExtension != nil { - for _, item := range m.VendorExtension { - info.Content = append(info.Content, compiler.NewScalarNodeForString(item.Name)) - info.Content = append(info.Content, item.Value.ToRawInfo()) - } - } - return info -} - -var ( - pattern0 = regexp.MustCompile("^x-") - pattern1 = regexp.MustCompile("^/") - pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") -) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go deleted file mode 100644 index 559ddea..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go +++ /dev/null @@ -1,7347 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.23.0 -// protoc v3.12.3 -// source: openapiv2/OpenAPIv2.proto - -package openapi_v2 - -import ( - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -type AdditionalPropertiesItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *AdditionalPropertiesItem_Schema - // *AdditionalPropertiesItem_Boolean - Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *AdditionalPropertiesItem) Reset() { - *x = AdditionalPropertiesItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AdditionalPropertiesItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AdditionalPropertiesItem) ProtoMessage() {} - -func (x *AdditionalPropertiesItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AdditionalPropertiesItem.ProtoReflect.Descriptor instead. -func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{0} -} - -func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *AdditionalPropertiesItem) GetSchema() *Schema { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Schema); ok { - return x.Schema - } - return nil -} - -func (x *AdditionalPropertiesItem) GetBoolean() bool { - if x, ok := x.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { - return x.Boolean - } - return false -} - -type isAdditionalPropertiesItem_Oneof interface { - isAdditionalPropertiesItem_Oneof() -} - -type AdditionalPropertiesItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type AdditionalPropertiesItem_Boolean struct { - Boolean bool `protobuf:"varint,2,opt,name=boolean,proto3,oneof"` -} - -func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} - -func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} - -type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value *any.Any `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` - Yaml string `protobuf:"bytes,2,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Any) Reset() { - *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{1} -} - -func (x *Any) GetValue() *any.Any { - if x != nil { - return x.Value - } - return nil -} - -func (x *Any) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -type ApiKeySecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *ApiKeySecurity) Reset() { - *x = ApiKeySecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ApiKeySecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ApiKeySecurity) ProtoMessage() {} - -func (x *ApiKeySecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ApiKeySecurity.ProtoReflect.Descriptor instead. -func (*ApiKeySecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{2} -} - -func (x *ApiKeySecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *ApiKeySecurity) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ApiKeySecurity) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *ApiKeySecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ApiKeySecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type BasicAuthenticationSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *BasicAuthenticationSecurity) Reset() { - *x = BasicAuthenticationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BasicAuthenticationSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BasicAuthenticationSecurity) ProtoMessage() {} - -func (x *BasicAuthenticationSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BasicAuthenticationSecurity.ProtoReflect.Descriptor instead. -func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{3} -} - -func (x *BasicAuthenticationSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *BasicAuthenticationSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type BodyParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,3,opt,name=in,proto3" json:"in,omitempty"` - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,4,opt,name=required,proto3" json:"required,omitempty"` - Schema *Schema `protobuf:"bytes,5,opt,name=schema,proto3" json:"schema,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *BodyParameter) Reset() { - *x = BodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *BodyParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BodyParameter) ProtoMessage() {} - -func (x *BodyParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BodyParameter.ProtoReflect.Descriptor instead. -func (*BodyParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{4} -} - -func (x *BodyParameter) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *BodyParameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *BodyParameter) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *BodyParameter) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *BodyParameter) GetSchema() *Schema { - if x != nil { - return x.Schema - } - return nil -} - -func (x *BodyParameter) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Contact information for the owners of the API. -type Contact struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The identifying name of the contact person/organization. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the contact information. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - // The email address of the contact person/organization. - Email string `protobuf:"bytes,3,opt,name=email,proto3" json:"email,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Contact) Reset() { - *x = Contact{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Contact) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Contact) ProtoMessage() {} - -func (x *Contact) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Contact.ProtoReflect.Descriptor instead. -func (*Contact) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{5} -} - -func (x *Contact) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Contact) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *Contact) GetEmail() string { - if x != nil { - return x.Email - } - return "" -} - -func (x *Contact) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Default struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Default) Reset() { - *x = Default{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Default) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Default) ProtoMessage() {} - -func (x *Default) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Default.ProtoReflect.Descriptor instead. -func (*Default) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{6} -} - -func (x *Default) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -type Definitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Definitions) Reset() { - *x = Definitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Definitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Definitions) ProtoMessage() {} - -func (x *Definitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Definitions.ProtoReflect.Descriptor instead. -func (*Definitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{7} -} - -func (x *Definitions) GetAdditionalProperties() []*NamedSchema { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Document struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The Swagger version of this document. - Swagger string `protobuf:"bytes,1,opt,name=swagger,proto3" json:"swagger,omitempty"` - Info *Info `protobuf:"bytes,2,opt,name=info,proto3" json:"info,omitempty"` - // The host (name or ip) of the API. Example: 'swagger.io' - Host string `protobuf:"bytes,3,opt,name=host,proto3" json:"host,omitempty"` - // The base path to the API. Example: '/api'. - BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath,proto3" json:"base_path,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,5,rep,name=schemes,proto3" json:"schemes,omitempty"` - // A list of MIME types accepted by the API. - Consumes []string `protobuf:"bytes,6,rep,name=consumes,proto3" json:"consumes,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,7,rep,name=produces,proto3" json:"produces,omitempty"` - Paths *Paths `protobuf:"bytes,8,opt,name=paths,proto3" json:"paths,omitempty"` - Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions,proto3" json:"definitions,omitempty"` - Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters,proto3" json:"parameters,omitempty"` - Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses,proto3" json:"responses,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions,proto3" json:"security_definitions,omitempty"` - Tags []*Tag `protobuf:"bytes,14,rep,name=tags,proto3" json:"tags,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Document) Reset() { - *x = Document{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Document) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Document) ProtoMessage() {} - -func (x *Document) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Document.ProtoReflect.Descriptor instead. -func (*Document) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{8} -} - -func (x *Document) GetSwagger() string { - if x != nil { - return x.Swagger - } - return "" -} - -func (x *Document) GetInfo() *Info { - if x != nil { - return x.Info - } - return nil -} - -func (x *Document) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *Document) GetBasePath() string { - if x != nil { - return x.BasePath - } - return "" -} - -func (x *Document) GetSchemes() []string { - if x != nil { - return x.Schemes - } - return nil -} - -func (x *Document) GetConsumes() []string { - if x != nil { - return x.Consumes - } - return nil -} - -func (x *Document) GetProduces() []string { - if x != nil { - return x.Produces - } - return nil -} - -func (x *Document) GetPaths() *Paths { - if x != nil { - return x.Paths - } - return nil -} - -func (x *Document) GetDefinitions() *Definitions { - if x != nil { - return x.Definitions - } - return nil -} - -func (x *Document) GetParameters() *ParameterDefinitions { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Document) GetResponses() *ResponseDefinitions { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Document) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Document) GetSecurityDefinitions() *SecurityDefinitions { - if x != nil { - return x.SecurityDefinitions - } - return nil -} - -func (x *Document) GetTags() []*Tag { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Document) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Document) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Examples struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Examples) Reset() { - *x = Examples{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Examples) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Examples) ProtoMessage() {} - -func (x *Examples) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Examples.ProtoReflect.Descriptor instead. -func (*Examples) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{9} -} - -func (x *Examples) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// information about external documentation -type ExternalDocs struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *ExternalDocs) Reset() { - *x = ExternalDocs{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExternalDocs) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExternalDocs) ProtoMessage() {} - -func (x *ExternalDocs) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExternalDocs.ProtoReflect.Descriptor instead. -func (*ExternalDocs) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{10} -} - -func (x *ExternalDocs) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *ExternalDocs) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *ExternalDocs) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type FileSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Format string `protobuf:"bytes,1,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,2,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,4,opt,name=default,proto3" json:"default,omitempty"` - Required []string `protobuf:"bytes,5,rep,name=required,proto3" json:"required,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,9,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *FileSchema) Reset() { - *x = FileSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FileSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FileSchema) ProtoMessage() {} - -func (x *FileSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FileSchema.ProtoReflect.Descriptor instead. -func (*FileSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{11} -} - -func (x *FileSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *FileSchema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *FileSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FileSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *FileSchema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *FileSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *FileSchema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *FileSchema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *FileSchema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *FileSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type FormDataParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *FormDataParameterSubSchema) Reset() { - *x = FormDataParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *FormDataParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*FormDataParameterSubSchema) ProtoMessage() {} - -func (x *FormDataParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use FormDataParameterSubSchema.ProtoReflect.Descriptor instead. -func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{12} -} - -func (x *FormDataParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *FormDataParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *FormDataParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *FormDataParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *FormDataParameterSubSchema) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *FormDataParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *FormDataParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *FormDataParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *FormDataParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *FormDataParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *FormDataParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *FormDataParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *FormDataParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *FormDataParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *FormDataParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *FormDataParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Header struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Description string `protobuf:"bytes,18,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Header) Reset() { - *x = Header{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Header) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Header) ProtoMessage() {} - -func (x *Header) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Header.ProtoReflect.Descriptor instead. -func (*Header) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{13} -} - -func (x *Header) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Header) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Header) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *Header) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *Header) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *Header) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Header) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Header) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Header) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Header) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Header) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Header) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Header) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Header) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Header) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Header) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Header) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Header) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Header) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type HeaderParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *HeaderParameterSubSchema) Reset() { - *x = HeaderParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *HeaderParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HeaderParameterSubSchema) ProtoMessage() {} - -func (x *HeaderParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HeaderParameterSubSchema.ProtoReflect.Descriptor instead. -func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{14} -} - -func (x *HeaderParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *HeaderParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *HeaderParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *HeaderParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *HeaderParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *HeaderParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *HeaderParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *HeaderParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *HeaderParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *HeaderParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *HeaderParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *HeaderParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *HeaderParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *HeaderParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *HeaderParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Headers struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Headers) Reset() { - *x = Headers{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Headers) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Headers) ProtoMessage() {} - -func (x *Headers) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Headers.ProtoReflect.Descriptor instead. -func (*Headers) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{15} -} - -func (x *Headers) GetAdditionalProperties() []*NamedHeader { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -// General information about the API. -type Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A unique and precise title of the API. - Title string `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"` - // A semantic version number of the API. - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The terms of service for the API. - TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService,proto3" json:"terms_of_service,omitempty"` - Contact *Contact `protobuf:"bytes,5,opt,name=contact,proto3" json:"contact,omitempty"` - License *License `protobuf:"bytes,6,opt,name=license,proto3" json:"license,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Info) Reset() { - *x = Info{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Info) ProtoMessage() {} - -func (x *Info) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Info.ProtoReflect.Descriptor instead. -func (*Info) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{16} -} - -func (x *Info) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Info) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Info) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Info) GetTermsOfService() string { - if x != nil { - return x.TermsOfService - } - return "" -} - -func (x *Info) GetContact() *Contact { - if x != nil { - return x.Contact - } - return nil -} - -func (x *Info) GetLicense() *License { - if x != nil { - return x.License - } - return nil -} - -func (x *Info) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type ItemsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Schema []*Schema `protobuf:"bytes,1,rep,name=schema,proto3" json:"schema,omitempty"` -} - -func (x *ItemsItem) Reset() { - *x = ItemsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ItemsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ItemsItem) ProtoMessage() {} - -func (x *ItemsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ItemsItem.ProtoReflect.Descriptor instead. -func (*ItemsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{17} -} - -func (x *ItemsItem) GetSchema() []*Schema { - if x != nil { - return x.Schema - } - return nil -} - -type JsonReference struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` -} - -func (x *JsonReference) Reset() { - *x = JsonReference{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *JsonReference) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*JsonReference) ProtoMessage() {} - -func (x *JsonReference) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use JsonReference.ProtoReflect.Descriptor instead. -func (*JsonReference) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{18} -} - -func (x *JsonReference) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *JsonReference) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -type License struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The name of the license type. It's encouraged to use an OSI compatible license. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The URL pointing to the license. - Url string `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *License) Reset() { - *x = License{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *License) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*License) ProtoMessage() {} - -func (x *License) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use License.ProtoReflect.Descriptor instead. -func (*License) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{19} -} - -func (x *License) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *License) GetUrl() string { - if x != nil { - return x.Url - } - return "" -} - -func (x *License) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -type NamedAny struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Any `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedAny) Reset() { - *x = NamedAny{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedAny) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedAny) ProtoMessage() {} - -func (x *NamedAny) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedAny.ProtoReflect.Descriptor instead. -func (*NamedAny) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{20} -} - -func (x *NamedAny) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedAny) GetValue() *Any { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -type NamedHeader struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Header `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedHeader) Reset() { - *x = NamedHeader{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedHeader) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedHeader) ProtoMessage() {} - -func (x *NamedHeader) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedHeader.ProtoReflect.Descriptor instead. -func (*NamedHeader) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{21} -} - -func (x *NamedHeader) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedHeader) GetValue() *Header { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -type NamedParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Parameter `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedParameter) Reset() { - *x = NamedParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedParameter) ProtoMessage() {} - -func (x *NamedParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedParameter.ProtoReflect.Descriptor instead. -func (*NamedParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{22} -} - -func (x *NamedParameter) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedParameter) GetValue() *Parameter { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -type NamedPathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *PathItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedPathItem) Reset() { - *x = NamedPathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedPathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedPathItem) ProtoMessage() {} - -func (x *NamedPathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedPathItem.ProtoReflect.Descriptor instead. -func (*NamedPathItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{23} -} - -func (x *NamedPathItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedPathItem) GetValue() *PathItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -type NamedResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Response `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponse) Reset() { - *x = NamedResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponse) ProtoMessage() {} - -func (x *NamedResponse) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponse.ProtoReflect.Descriptor instead. -func (*NamedResponse) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{24} -} - -func (x *NamedResponse) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponse) GetValue() *Response { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -type NamedResponseValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *ResponseValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedResponseValue) Reset() { - *x = NamedResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedResponseValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedResponseValue) ProtoMessage() {} - -func (x *NamedResponseValue) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedResponseValue.ProtoReflect.Descriptor instead. -func (*NamedResponseValue) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{25} -} - -func (x *NamedResponseValue) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedResponseValue) GetValue() *ResponseValue { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -type NamedSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *Schema `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSchema) Reset() { - *x = NamedSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSchema) ProtoMessage() {} - -func (x *NamedSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSchema.ProtoReflect.Descriptor instead. -func (*NamedSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{26} -} - -func (x *NamedSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSchema) GetValue() *Schema { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -type NamedSecurityDefinitionsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedSecurityDefinitionsItem) Reset() { - *x = NamedSecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedSecurityDefinitionsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedSecurityDefinitionsItem) ProtoMessage() {} - -func (x *NamedSecurityDefinitionsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedSecurityDefinitionsItem.ProtoReflect.Descriptor instead. -func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{27} -} - -func (x *NamedSecurityDefinitionsItem) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { - if x != nil { - return x.Value - } - return nil -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -type NamedString struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedString) Reset() { - *x = NamedString{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedString) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedString) ProtoMessage() {} - -func (x *NamedString) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[28] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedString.ProtoReflect.Descriptor instead. -func (*NamedString) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{28} -} - -func (x *NamedString) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedString) GetValue() string { - if x != nil { - return x.Value - } - return "" -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -type NamedStringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Map key - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Mapped value - Value *StringArray `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *NamedStringArray) Reset() { - *x = NamedStringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NamedStringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NamedStringArray) ProtoMessage() {} - -func (x *NamedStringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[29] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NamedStringArray.ProtoReflect.Descriptor instead. -func (*NamedStringArray) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{29} -} - -func (x *NamedStringArray) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *NamedStringArray) GetValue() *StringArray { - if x != nil { - return x.Value - } - return nil -} - -type NonBodyParameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *NonBodyParameter_HeaderParameterSubSchema - // *NonBodyParameter_FormDataParameterSubSchema - // *NonBodyParameter_QueryParameterSubSchema - // *NonBodyParameter_PathParameterSubSchema - Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (x *NonBodyParameter) Reset() { - *x = NonBodyParameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *NonBodyParameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*NonBodyParameter) ProtoMessage() {} - -func (x *NonBodyParameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[30] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use NonBodyParameter.ProtoReflect.Descriptor instead. -func (*NonBodyParameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{30} -} - -func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { - return x.HeaderParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { - return x.FormDataParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { - return x.QueryParameterSubSchema - } - return nil -} - -func (x *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { - if x, ok := x.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { - return x.PathParameterSubSchema - } - return nil -} - -type isNonBodyParameter_Oneof interface { - isNonBodyParameter_Oneof() -} - -type NonBodyParameter_HeaderParameterSubSchema struct { - HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_FormDataParameterSubSchema struct { - FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_QueryParameterSubSchema struct { - QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,proto3,oneof"` -} - -type NonBodyParameter_PathParameterSubSchema struct { - PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,proto3,oneof"` -} - -func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} - -func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} - -type Oauth2AccessCodeSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2AccessCodeSecurity) Reset() { - *x = Oauth2AccessCodeSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2AccessCodeSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2AccessCodeSecurity) ProtoMessage() {} - -func (x *Oauth2AccessCodeSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[31] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2AccessCodeSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{31} -} - -func (x *Oauth2AccessCodeSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2ApplicationSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2ApplicationSecurity) Reset() { - *x = Oauth2ApplicationSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2ApplicationSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2ApplicationSecurity) ProtoMessage() {} - -func (x *Oauth2ApplicationSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[32] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2ApplicationSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{32} -} - -func (x *Oauth2ApplicationSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2ApplicationSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2ImplicitSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl,proto3" json:"authorization_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2ImplicitSecurity) Reset() { - *x = Oauth2ImplicitSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2ImplicitSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2ImplicitSecurity) ProtoMessage() {} - -func (x *Oauth2ImplicitSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[33] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2ImplicitSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{33} -} - -func (x *Oauth2ImplicitSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { - if x != nil { - return x.AuthorizationUrl - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2PasswordSecurity struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Flow string `protobuf:"bytes,2,opt,name=flow,proto3" json:"flow,omitempty"` - Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes,proto3" json:"scopes,omitempty"` - TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl,proto3" json:"token_url,omitempty"` - Description string `protobuf:"bytes,5,opt,name=description,proto3" json:"description,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Oauth2PasswordSecurity) Reset() { - *x = Oauth2PasswordSecurity{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2PasswordSecurity) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2PasswordSecurity) ProtoMessage() {} - -func (x *Oauth2PasswordSecurity) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[34] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2PasswordSecurity.ProtoReflect.Descriptor instead. -func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{34} -} - -func (x *Oauth2PasswordSecurity) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetFlow() string { - if x != nil { - return x.Flow - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { - if x != nil { - return x.Scopes - } - return nil -} - -func (x *Oauth2PasswordSecurity) GetTokenUrl() string { - if x != nil { - return x.TokenUrl - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Oauth2Scopes struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Oauth2Scopes) Reset() { - *x = Oauth2Scopes{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Oauth2Scopes) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Oauth2Scopes) ProtoMessage() {} - -func (x *Oauth2Scopes) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[35] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Oauth2Scopes.ProtoReflect.Descriptor instead. -func (*Oauth2Scopes) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{35} -} - -func (x *Oauth2Scopes) GetAdditionalProperties() []*NamedString { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Operation struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Tags []string `protobuf:"bytes,1,rep,name=tags,proto3" json:"tags,omitempty"` - // A brief summary of the operation. - Summary string `protobuf:"bytes,2,opt,name=summary,proto3" json:"summary,omitempty"` - // A longer description of the operation, GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - // A unique identifier of the operation. - OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"` - // A list of MIME types the API can produce. - Produces []string `protobuf:"bytes,6,rep,name=produces,proto3" json:"produces,omitempty"` - // A list of MIME types the API can consume. - Consumes []string `protobuf:"bytes,7,rep,name=consumes,proto3" json:"consumes,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters,proto3" json:"parameters,omitempty"` - Responses *Responses `protobuf:"bytes,9,opt,name=responses,proto3" json:"responses,omitempty"` - // The transfer protocol of the API. - Schemes []string `protobuf:"bytes,10,rep,name=schemes,proto3" json:"schemes,omitempty"` - Deprecated bool `protobuf:"varint,11,opt,name=deprecated,proto3" json:"deprecated,omitempty"` - Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security,proto3" json:"security,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Operation) Reset() { - *x = Operation{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Operation) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Operation) ProtoMessage() {} - -func (x *Operation) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[36] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Operation.ProtoReflect.Descriptor instead. -func (*Operation) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{36} -} - -func (x *Operation) GetTags() []string { - if x != nil { - return x.Tags - } - return nil -} - -func (x *Operation) GetSummary() string { - if x != nil { - return x.Summary - } - return "" -} - -func (x *Operation) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Operation) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Operation) GetOperationId() string { - if x != nil { - return x.OperationId - } - return "" -} - -func (x *Operation) GetProduces() []string { - if x != nil { - return x.Produces - } - return nil -} - -func (x *Operation) GetConsumes() []string { - if x != nil { - return x.Consumes - } - return nil -} - -func (x *Operation) GetParameters() []*ParametersItem { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *Operation) GetResponses() *Responses { - if x != nil { - return x.Responses - } - return nil -} - -func (x *Operation) GetSchemes() []string { - if x != nil { - return x.Schemes - } - return nil -} - -func (x *Operation) GetDeprecated() bool { - if x != nil { - return x.Deprecated - } - return false -} - -func (x *Operation) GetSecurity() []*SecurityRequirement { - if x != nil { - return x.Security - } - return nil -} - -func (x *Operation) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Parameter struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *Parameter_BodyParameter - // *Parameter_NonBodyParameter - Oneof isParameter_Oneof `protobuf_oneof:"oneof"` -} - -func (x *Parameter) Reset() { - *x = Parameter{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Parameter) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Parameter) ProtoMessage() {} - -func (x *Parameter) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[37] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Parameter.ProtoReflect.Descriptor instead. -func (*Parameter) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{37} -} - -func (m *Parameter) GetOneof() isParameter_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *Parameter) GetBodyParameter() *BodyParameter { - if x, ok := x.GetOneof().(*Parameter_BodyParameter); ok { - return x.BodyParameter - } - return nil -} - -func (x *Parameter) GetNonBodyParameter() *NonBodyParameter { - if x, ok := x.GetOneof().(*Parameter_NonBodyParameter); ok { - return x.NonBodyParameter - } - return nil -} - -type isParameter_Oneof interface { - isParameter_Oneof() -} - -type Parameter_BodyParameter struct { - BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,proto3,oneof"` -} - -type Parameter_NonBodyParameter struct { - NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,proto3,oneof"` -} - -func (*Parameter_BodyParameter) isParameter_Oneof() {} - -func (*Parameter_NonBodyParameter) isParameter_Oneof() {} - -// One or more JSON representations for parameters -type ParameterDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ParameterDefinitions) Reset() { - *x = ParameterDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParameterDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParameterDefinitions) ProtoMessage() {} - -func (x *ParameterDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[38] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParameterDefinitions.ProtoReflect.Descriptor instead. -func (*ParameterDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{38} -} - -func (x *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type ParametersItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ParametersItem_Parameter - // *ParametersItem_JsonReference - Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ParametersItem) Reset() { - *x = ParametersItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ParametersItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ParametersItem) ProtoMessage() {} - -func (x *ParametersItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[39] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ParametersItem.ProtoReflect.Descriptor instead. -func (*ParametersItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{39} -} - -func (m *ParametersItem) GetOneof() isParametersItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ParametersItem) GetParameter() *Parameter { - if x, ok := x.GetOneof().(*ParametersItem_Parameter); ok { - return x.Parameter - } - return nil -} - -func (x *ParametersItem) GetJsonReference() *JsonReference { - if x, ok := x.GetOneof().(*ParametersItem_JsonReference); ok { - return x.JsonReference - } - return nil -} - -type isParametersItem_Oneof interface { - isParametersItem_Oneof() -} - -type ParametersItem_Parameter struct { - Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,proto3,oneof"` -} - -type ParametersItem_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ParametersItem_Parameter) isParametersItem_Oneof() {} - -func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} - -type PathItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Get *Operation `protobuf:"bytes,2,opt,name=get,proto3" json:"get,omitempty"` - Put *Operation `protobuf:"bytes,3,opt,name=put,proto3" json:"put,omitempty"` - Post *Operation `protobuf:"bytes,4,opt,name=post,proto3" json:"post,omitempty"` - Delete *Operation `protobuf:"bytes,5,opt,name=delete,proto3" json:"delete,omitempty"` - Options *Operation `protobuf:"bytes,6,opt,name=options,proto3" json:"options,omitempty"` - Head *Operation `protobuf:"bytes,7,opt,name=head,proto3" json:"head,omitempty"` - Patch *Operation `protobuf:"bytes,8,opt,name=patch,proto3" json:"patch,omitempty"` - // The parameters needed to send a valid API call. - Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters,proto3" json:"parameters,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PathItem) Reset() { - *x = PathItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathItem) ProtoMessage() {} - -func (x *PathItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[40] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathItem.ProtoReflect.Descriptor instead. -func (*PathItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{40} -} - -func (x *PathItem) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *PathItem) GetGet() *Operation { - if x != nil { - return x.Get - } - return nil -} - -func (x *PathItem) GetPut() *Operation { - if x != nil { - return x.Put - } - return nil -} - -func (x *PathItem) GetPost() *Operation { - if x != nil { - return x.Post - } - return nil -} - -func (x *PathItem) GetDelete() *Operation { - if x != nil { - return x.Delete - } - return nil -} - -func (x *PathItem) GetOptions() *Operation { - if x != nil { - return x.Options - } - return nil -} - -func (x *PathItem) GetHead() *Operation { - if x != nil { - return x.Head - } - return nil -} - -func (x *PathItem) GetPatch() *Operation { - if x != nil { - return x.Patch - } - return nil -} - -func (x *PathItem) GetParameters() []*ParametersItem { - if x != nil { - return x.Parameters - } - return nil -} - -func (x *PathItem) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type PathParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - Type string `protobuf:"bytes,5,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,6,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,9,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,10,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,12,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,16,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PathParameterSubSchema) Reset() { - *x = PathParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PathParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PathParameterSubSchema) ProtoMessage() {} - -func (x *PathParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[41] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PathParameterSubSchema.ProtoReflect.Descriptor instead. -func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{41} -} - -func (x *PathParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *PathParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *PathParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *PathParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *PathParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PathParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *PathParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *PathParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *PathParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *PathParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *PathParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *PathParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *PathParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *PathParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *PathParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *PathParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *PathParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *PathParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *PathParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *PathParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *PathParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *PathParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -type Paths struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` - Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path,proto3" json:"path,omitempty"` -} - -func (x *Paths) Reset() { - *x = Paths{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Paths) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Paths) ProtoMessage() {} - -func (x *Paths) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[42] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Paths.ProtoReflect.Descriptor instead. -func (*Paths) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{42} -} - -func (x *Paths) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -func (x *Paths) GetPath() []*NamedPathItem { - if x != nil { - return x.Path - } - return nil -} - -type PrimitivesItems struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,6,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,8,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,12,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,16,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *PrimitivesItems) Reset() { - *x = PrimitivesItems{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PrimitivesItems) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PrimitivesItems) ProtoMessage() {} - -func (x *PrimitivesItems) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[43] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PrimitivesItems.ProtoReflect.Descriptor instead. -func (*PrimitivesItems) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{43} -} - -func (x *PrimitivesItems) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *PrimitivesItems) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *PrimitivesItems) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *PrimitivesItems) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *PrimitivesItems) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *PrimitivesItems) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *PrimitivesItems) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *PrimitivesItems) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *PrimitivesItems) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *PrimitivesItems) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *PrimitivesItems) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *PrimitivesItems) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *PrimitivesItems) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *PrimitivesItems) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *PrimitivesItems) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *PrimitivesItems) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *PrimitivesItems) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *PrimitivesItems) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Properties struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *Properties) Reset() { - *x = Properties{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Properties) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Properties) ProtoMessage() {} - -func (x *Properties) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[44] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Properties.ProtoReflect.Descriptor instead. -func (*Properties) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{44} -} - -func (x *Properties) GetAdditionalProperties() []*NamedSchema { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type QueryParameterSubSchema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Determines whether or not this parameter is required or optional. - Required bool `protobuf:"varint,1,opt,name=required,proto3" json:"required,omitempty"` - // Determines the location of the parameter. - In string `protobuf:"bytes,2,opt,name=in,proto3" json:"in,omitempty"` - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` - // The name of the parameter. - Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` - // allows sending a parameter by name only or with an empty value. - AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue,proto3" json:"allow_empty_value,omitempty"` - Type string `protobuf:"bytes,6,opt,name=type,proto3" json:"type,omitempty"` - Format string `protobuf:"bytes,7,opt,name=format,proto3" json:"format,omitempty"` - Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items,proto3" json:"items,omitempty"` - CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat,proto3" json:"collection_format,omitempty"` - Default *Any `protobuf:"bytes,10,opt,name=default,proto3" json:"default,omitempty"` - Maximum float64 `protobuf:"fixed64,11,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,13,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,17,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - Enum []*Any `protobuf:"bytes,21,rep,name=enum,proto3" json:"enum,omitempty"` - MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *QueryParameterSubSchema) Reset() { - *x = QueryParameterSubSchema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *QueryParameterSubSchema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*QueryParameterSubSchema) ProtoMessage() {} - -func (x *QueryParameterSubSchema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[45] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use QueryParameterSubSchema.ProtoReflect.Descriptor instead. -func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{45} -} - -func (x *QueryParameterSubSchema) GetRequired() bool { - if x != nil { - return x.Required - } - return false -} - -func (x *QueryParameterSubSchema) GetIn() string { - if x != nil { - return x.In - } - return "" -} - -func (x *QueryParameterSubSchema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *QueryParameterSubSchema) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *QueryParameterSubSchema) GetAllowEmptyValue() bool { - if x != nil { - return x.AllowEmptyValue - } - return false -} - -func (x *QueryParameterSubSchema) GetType() string { - if x != nil { - return x.Type - } - return "" -} - -func (x *QueryParameterSubSchema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *QueryParameterSubSchema) GetItems() *PrimitivesItems { - if x != nil { - return x.Items - } - return nil -} - -func (x *QueryParameterSubSchema) GetCollectionFormat() string { - if x != nil { - return x.CollectionFormat - } - return "" -} - -func (x *QueryParameterSubSchema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *QueryParameterSubSchema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *QueryParameterSubSchema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *QueryParameterSubSchema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *QueryParameterSubSchema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *QueryParameterSubSchema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *QueryParameterSubSchema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *QueryParameterSubSchema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *QueryParameterSubSchema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *QueryParameterSubSchema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *QueryParameterSubSchema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *QueryParameterSubSchema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *QueryParameterSubSchema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type Response struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Description string `protobuf:"bytes,1,opt,name=description,proto3" json:"description,omitempty"` - Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema,proto3" json:"schema,omitempty"` - Headers *Headers `protobuf:"bytes,3,opt,name=headers,proto3" json:"headers,omitempty"` - Examples *Examples `protobuf:"bytes,4,opt,name=examples,proto3" json:"examples,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Response) Reset() { - *x = Response{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Response) ProtoMessage() {} - -func (x *Response) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[46] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Response.ProtoReflect.Descriptor instead. -func (*Response) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{46} -} - -func (x *Response) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Response) GetSchema() *SchemaItem { - if x != nil { - return x.Schema - } - return nil -} - -func (x *Response) GetHeaders() *Headers { - if x != nil { - return x.Headers - } - return nil -} - -func (x *Response) GetExamples() *Examples { - if x != nil { - return x.Examples - } - return nil -} - -func (x *Response) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// One or more JSON representations for responses -type ResponseDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *ResponseDefinitions) Reset() { - *x = ResponseDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseDefinitions) ProtoMessage() {} - -func (x *ResponseDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[47] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseDefinitions.ProtoReflect.Descriptor instead. -func (*ResponseDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{47} -} - -func (x *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type ResponseValue struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *ResponseValue_Response - // *ResponseValue_JsonReference - Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` -} - -func (x *ResponseValue) Reset() { - *x = ResponseValue{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ResponseValue) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ResponseValue) ProtoMessage() {} - -func (x *ResponseValue) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[48] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ResponseValue.ProtoReflect.Descriptor instead. -func (*ResponseValue) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{48} -} - -func (m *ResponseValue) GetOneof() isResponseValue_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *ResponseValue) GetResponse() *Response { - if x, ok := x.GetOneof().(*ResponseValue_Response); ok { - return x.Response - } - return nil -} - -func (x *ResponseValue) GetJsonReference() *JsonReference { - if x, ok := x.GetOneof().(*ResponseValue_JsonReference); ok { - return x.JsonReference - } - return nil -} - -type isResponseValue_Oneof interface { - isResponseValue_Oneof() -} - -type ResponseValue_Response struct { - Response *Response `protobuf:"bytes,1,opt,name=response,proto3,oneof"` -} - -type ResponseValue_JsonReference struct { - JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,proto3,oneof"` -} - -func (*ResponseValue_Response) isResponseValue_Oneof() {} - -func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} - -// Response objects names can either be any valid HTTP status code or 'default'. -type Responses struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode,proto3" json:"response_code,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Responses) Reset() { - *x = Responses{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Responses) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Responses) ProtoMessage() {} - -func (x *Responses) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[49] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Responses.ProtoReflect.Descriptor instead. -func (*Responses) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{49} -} - -func (x *Responses) GetResponseCode() []*NamedResponseValue { - if x != nil { - return x.ResponseCode - } - return nil -} - -func (x *Responses) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -// A deterministic version of a JSON Schema object. -type Schema struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref,proto3" json:"_ref,omitempty"` - Format string `protobuf:"bytes,2,opt,name=format,proto3" json:"format,omitempty"` - Title string `protobuf:"bytes,3,opt,name=title,proto3" json:"title,omitempty"` - Description string `protobuf:"bytes,4,opt,name=description,proto3" json:"description,omitempty"` - Default *Any `protobuf:"bytes,5,opt,name=default,proto3" json:"default,omitempty"` - MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf,proto3" json:"multiple_of,omitempty"` - Maximum float64 `protobuf:"fixed64,7,opt,name=maximum,proto3" json:"maximum,omitempty"` - ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum,proto3" json:"exclusive_maximum,omitempty"` - Minimum float64 `protobuf:"fixed64,9,opt,name=minimum,proto3" json:"minimum,omitempty"` - ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum,proto3" json:"exclusive_minimum,omitempty"` - MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength,proto3" json:"max_length,omitempty"` - MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength,proto3" json:"min_length,omitempty"` - Pattern string `protobuf:"bytes,13,opt,name=pattern,proto3" json:"pattern,omitempty"` - MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems,proto3" json:"max_items,omitempty"` - MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems,proto3" json:"min_items,omitempty"` - UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems,proto3" json:"unique_items,omitempty"` - MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties,proto3" json:"max_properties,omitempty"` - MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties,proto3" json:"min_properties,omitempty"` - Required []string `protobuf:"bytes,19,rep,name=required,proto3" json:"required,omitempty"` - Enum []*Any `protobuf:"bytes,20,rep,name=enum,proto3" json:"enum,omitempty"` - AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` - Type *TypeItem `protobuf:"bytes,22,opt,name=type,proto3" json:"type,omitempty"` - Items *ItemsItem `protobuf:"bytes,23,opt,name=items,proto3" json:"items,omitempty"` - AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf,proto3" json:"all_of,omitempty"` - Properties *Properties `protobuf:"bytes,25,opt,name=properties,proto3" json:"properties,omitempty"` - Discriminator string `protobuf:"bytes,26,opt,name=discriminator,proto3" json:"discriminator,omitempty"` - ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly,proto3" json:"read_only,omitempty"` - Xml *Xml `protobuf:"bytes,28,opt,name=xml,proto3" json:"xml,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - Example *Any `protobuf:"bytes,30,opt,name=example,proto3" json:"example,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Schema) Reset() { - *x = Schema{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Schema) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Schema) ProtoMessage() {} - -func (x *Schema) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[50] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Schema.ProtoReflect.Descriptor instead. -func (*Schema) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{50} -} - -func (x *Schema) GetXRef() string { - if x != nil { - return x.XRef - } - return "" -} - -func (x *Schema) GetFormat() string { - if x != nil { - return x.Format - } - return "" -} - -func (x *Schema) GetTitle() string { - if x != nil { - return x.Title - } - return "" -} - -func (x *Schema) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Schema) GetDefault() *Any { - if x != nil { - return x.Default - } - return nil -} - -func (x *Schema) GetMultipleOf() float64 { - if x != nil { - return x.MultipleOf - } - return 0 -} - -func (x *Schema) GetMaximum() float64 { - if x != nil { - return x.Maximum - } - return 0 -} - -func (x *Schema) GetExclusiveMaximum() bool { - if x != nil { - return x.ExclusiveMaximum - } - return false -} - -func (x *Schema) GetMinimum() float64 { - if x != nil { - return x.Minimum - } - return 0 -} - -func (x *Schema) GetExclusiveMinimum() bool { - if x != nil { - return x.ExclusiveMinimum - } - return false -} - -func (x *Schema) GetMaxLength() int64 { - if x != nil { - return x.MaxLength - } - return 0 -} - -func (x *Schema) GetMinLength() int64 { - if x != nil { - return x.MinLength - } - return 0 -} - -func (x *Schema) GetPattern() string { - if x != nil { - return x.Pattern - } - return "" -} - -func (x *Schema) GetMaxItems() int64 { - if x != nil { - return x.MaxItems - } - return 0 -} - -func (x *Schema) GetMinItems() int64 { - if x != nil { - return x.MinItems - } - return 0 -} - -func (x *Schema) GetUniqueItems() bool { - if x != nil { - return x.UniqueItems - } - return false -} - -func (x *Schema) GetMaxProperties() int64 { - if x != nil { - return x.MaxProperties - } - return 0 -} - -func (x *Schema) GetMinProperties() int64 { - if x != nil { - return x.MinProperties - } - return 0 -} - -func (x *Schema) GetRequired() []string { - if x != nil { - return x.Required - } - return nil -} - -func (x *Schema) GetEnum() []*Any { - if x != nil { - return x.Enum - } - return nil -} - -func (x *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -func (x *Schema) GetType() *TypeItem { - if x != nil { - return x.Type - } - return nil -} - -func (x *Schema) GetItems() *ItemsItem { - if x != nil { - return x.Items - } - return nil -} - -func (x *Schema) GetAllOf() []*Schema { - if x != nil { - return x.AllOf - } - return nil -} - -func (x *Schema) GetProperties() *Properties { - if x != nil { - return x.Properties - } - return nil -} - -func (x *Schema) GetDiscriminator() string { - if x != nil { - return x.Discriminator - } - return "" -} - -func (x *Schema) GetReadOnly() bool { - if x != nil { - return x.ReadOnly - } - return false -} - -func (x *Schema) GetXml() *Xml { - if x != nil { - return x.Xml - } - return nil -} - -func (x *Schema) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Schema) GetExample() *Any { - if x != nil { - return x.Example - } - return nil -} - -func (x *Schema) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type SchemaItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SchemaItem_Schema - // *SchemaItem_FileSchema - Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SchemaItem) Reset() { - *x = SchemaItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SchemaItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SchemaItem) ProtoMessage() {} - -func (x *SchemaItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[51] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SchemaItem.ProtoReflect.Descriptor instead. -func (*SchemaItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{51} -} - -func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SchemaItem) GetSchema() *Schema { - if x, ok := x.GetOneof().(*SchemaItem_Schema); ok { - return x.Schema - } - return nil -} - -func (x *SchemaItem) GetFileSchema() *FileSchema { - if x, ok := x.GetOneof().(*SchemaItem_FileSchema); ok { - return x.FileSchema - } - return nil -} - -type isSchemaItem_Oneof interface { - isSchemaItem_Oneof() -} - -type SchemaItem_Schema struct { - Schema *Schema `protobuf:"bytes,1,opt,name=schema,proto3,oneof"` -} - -type SchemaItem_FileSchema struct { - FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,proto3,oneof"` -} - -func (*SchemaItem_Schema) isSchemaItem_Oneof() {} - -func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} - -type SecurityDefinitions struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityDefinitions) Reset() { - *x = SecurityDefinitions{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityDefinitions) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityDefinitions) ProtoMessage() {} - -func (x *SecurityDefinitions) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[52] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityDefinitions.ProtoReflect.Descriptor instead. -func (*SecurityDefinitions) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{52} -} - -func (x *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type SecurityDefinitionsItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Types that are assignable to Oneof: - // *SecurityDefinitionsItem_BasicAuthenticationSecurity - // *SecurityDefinitionsItem_ApiKeySecurity - // *SecurityDefinitionsItem_Oauth2ImplicitSecurity - // *SecurityDefinitionsItem_Oauth2PasswordSecurity - // *SecurityDefinitionsItem_Oauth2ApplicationSecurity - // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity - Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` -} - -func (x *SecurityDefinitionsItem) Reset() { - *x = SecurityDefinitionsItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityDefinitionsItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityDefinitionsItem) ProtoMessage() {} - -func (x *SecurityDefinitionsItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[53] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityDefinitionsItem.ProtoReflect.Descriptor instead. -func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{53} -} - -func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { - if m != nil { - return m.Oneof - } - return nil -} - -func (x *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { - return x.BasicAuthenticationSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { - return x.ApiKeySecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { - return x.Oauth2ImplicitSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { - return x.Oauth2PasswordSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { - return x.Oauth2ApplicationSecurity - } - return nil -} - -func (x *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { - if x, ok := x.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { - return x.Oauth2AccessCodeSecurity - } - return nil -} - -type isSecurityDefinitionsItem_Oneof interface { - isSecurityDefinitionsItem_Oneof() -} - -type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { - BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_ApiKeySecurity struct { - ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { - Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { - Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { - Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,proto3,oneof"` -} - -type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { - Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,proto3,oneof"` -} - -func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} - -func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} - -type SecurityRequirement struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *SecurityRequirement) Reset() { - *x = SecurityRequirement{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SecurityRequirement) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SecurityRequirement) ProtoMessage() {} - -func (x *SecurityRequirement) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[54] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SecurityRequirement.ProtoReflect.Descriptor instead. -func (*SecurityRequirement) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{54} -} - -func (x *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type StringArray struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *StringArray) Reset() { - *x = StringArray{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *StringArray) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*StringArray) ProtoMessage() {} - -func (x *StringArray) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[55] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use StringArray.ProtoReflect.Descriptor instead. -func (*StringArray) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{55} -} - -func (x *StringArray) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -type Tag struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` - ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs,proto3" json:"external_docs,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Tag) Reset() { - *x = Tag{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Tag) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Tag) ProtoMessage() {} - -func (x *Tag) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[56] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Tag.ProtoReflect.Descriptor instead. -func (*Tag) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{56} -} - -func (x *Tag) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Tag) GetDescription() string { - if x != nil { - return x.Description - } - return "" -} - -func (x *Tag) GetExternalDocs() *ExternalDocs { - if x != nil { - return x.ExternalDocs - } - return nil -} - -func (x *Tag) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -type TypeItem struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Value []string `protobuf:"bytes,1,rep,name=value,proto3" json:"value,omitempty"` -} - -func (x *TypeItem) Reset() { - *x = TypeItem{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *TypeItem) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*TypeItem) ProtoMessage() {} - -func (x *TypeItem) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[57] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use TypeItem.ProtoReflect.Descriptor instead. -func (*TypeItem) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{57} -} - -func (x *TypeItem) GetValue() []string { - if x != nil { - return x.Value - } - return nil -} - -// Any property starting with x- is valid. -type VendorExtension struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties,proto3" json:"additional_properties,omitempty"` -} - -func (x *VendorExtension) Reset() { - *x = VendorExtension{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *VendorExtension) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*VendorExtension) ProtoMessage() {} - -func (x *VendorExtension) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[58] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use VendorExtension.ProtoReflect.Descriptor instead. -func (*VendorExtension) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{58} -} - -func (x *VendorExtension) GetAdditionalProperties() []*NamedAny { - if x != nil { - return x.AdditionalProperties - } - return nil -} - -type Xml struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - Prefix string `protobuf:"bytes,3,opt,name=prefix,proto3" json:"prefix,omitempty"` - Attribute bool `protobuf:"varint,4,opt,name=attribute,proto3" json:"attribute,omitempty"` - Wrapped bool `protobuf:"varint,5,opt,name=wrapped,proto3" json:"wrapped,omitempty"` - VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension,proto3" json:"vendor_extension,omitempty"` -} - -func (x *Xml) Reset() { - *x = Xml{} - if protoimpl.UnsafeEnabled { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Xml) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Xml) ProtoMessage() {} - -func (x *Xml) ProtoReflect() protoreflect.Message { - mi := &file_openapiv2_OpenAPIv2_proto_msgTypes[59] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Xml.ProtoReflect.Descriptor instead. -func (*Xml) Descriptor() ([]byte, []int) { - return file_openapiv2_OpenAPIv2_proto_rawDescGZIP(), []int{59} -} - -func (x *Xml) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *Xml) GetNamespace() string { - if x != nil { - return x.Namespace - } - return "" -} - -func (x *Xml) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *Xml) GetAttribute() bool { - if x != nil { - return x.Attribute - } - return false -} - -func (x *Xml) GetWrapped() bool { - if x != nil { - return x.Wrapped - } - return false -} - -func (x *Xml) GetVendorExtension() []*NamedAny { - if x != nil { - return x.VendorExtension - } - return nil -} - -var File_openapiv2_OpenAPIv2_proto protoreflect.FileDescriptor - -var file_openapiv2_OpenAPIv2_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, 0x32, 0x2f, 0x4f, 0x70, 0x65, 0x6e, - 0x41, 0x50, 0x49, 0x76, 0x32, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x22, 0x6d, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, - 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x07, - 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, - 0x07, 0x62, 0x6f, 0x6f, 0x6c, 0x65, 0x61, 0x6e, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, - 0x66, 0x22, 0x45, 0x0a, 0x03, 0x41, 0x6e, 0x79, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x22, 0xab, 0x01, 0x0a, 0x0e, 0x41, 0x70, 0x69, - 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, - 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x94, 0x01, 0x0a, 0x1b, 0x42, 0x61, 0x73, 0x69, 0x63, - 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, - 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xde, 0x01, - 0x0a, 0x0d, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, - 0x64, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x86, - 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, - 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x54, 0x0a, 0x07, 0x44, 0x65, 0x66, 0x61, 0x75, - 0x6c, 0x74, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x5b, 0x0a, - 0x0b, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4c, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xe8, 0x05, 0x0a, 0x08, 0x44, - 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, - 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, 0x77, 0x61, 0x67, 0x67, 0x65, - 0x72, 0x12, 0x24, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x10, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x62, - 0x61, 0x73, 0x65, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, - 0x62, 0x61, 0x73, 0x65, 0x50, 0x61, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x65, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, - 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x18, 0x06, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, 0x65, 0x73, 0x12, 0x1a, - 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x08, 0x70, 0x72, 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x27, 0x0a, 0x05, 0x70, 0x61, - 0x74, 0x68, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x11, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x73, 0x52, 0x05, 0x70, 0x61, - 0x74, 0x68, 0x73, 0x12, 0x39, 0x0a, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x0b, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x40, - 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x0a, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x3d, 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x0b, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x12, - 0x3b, 0x0a, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, - 0x6e, 0x74, 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x52, 0x0a, 0x14, - 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x13, 0x73, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x12, 0x23, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x67, 0x52, - 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, - 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, - 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, - 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x55, 0x0a, 0x08, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, - 0x73, 0x12, 0x49, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x83, 0x01, 0x0a, - 0x0c, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, - 0x6c, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xff, 0x02, 0x0a, 0x0a, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, - 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x1a, 0x0a, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, - 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x1b, 0x0a, 0x09, - 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, - 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xab, 0x06, 0x0a, 0x1a, 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, - 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, - 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, - 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, - 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0xab, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, - 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, - 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, - 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, - 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0c, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, - 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, - 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, - 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, - 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, - 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x11, 0x20, 0x01, 0x28, - 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x13, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0xfd, 0x05, 0x0a, 0x18, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, - 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, - 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, - 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, - 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, - 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, - 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, - 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, - 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0d, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, - 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x10, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, - 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, - 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, - 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, - 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, - 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, - 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, - 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, - 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, - 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x22, 0x57, 0x0a, 0x07, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x4c, 0x0a, 0x15, 0x61, - 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, - 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, 0x61, - 0x64, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa1, 0x02, 0x0a, 0x04, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x0a, 0x10, 0x74, 0x65, 0x72, 0x6d, 0x73, 0x5f, 0x6f, 0x66, - 0x5f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, - 0x74, 0x65, 0x72, 0x6d, 0x73, 0x4f, 0x66, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x2d, - 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6f, 0x6e, - 0x74, 0x61, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x63, 0x74, 0x12, 0x2d, 0x0a, - 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x63, 0x65, - 0x6e, 0x73, 0x65, 0x52, 0x07, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x3f, 0x0a, 0x10, - 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x37, 0x0a, - 0x09, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2a, 0x0a, 0x06, 0x73, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x22, 0x44, 0x0a, 0x0d, 0x4a, 0x73, 0x6f, 0x6e, 0x52, 0x65, - 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x70, 0x0a, 0x07, - 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, - 0x72, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x45, - 0x0a, 0x08, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x48, 0x65, - 0x61, 0x64, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x51, 0x0a, 0x0e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x05, - 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, - 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, 0x61, - 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4f, 0x0a, 0x0d, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x59, 0x0a, 0x12, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x2f, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, - 0x75, 0x65, 0x22, 0x4b, 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x28, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, - 0x6d, 0x0a, 0x1c, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, - 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x37, - 0x0a, 0x0b, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x55, 0x0a, 0x10, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0x2d, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x74, 0x72, 0x69, - 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xb5, - 0x03, 0x0a, 0x10, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x12, 0x65, 0x0a, 0x1b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, - 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, - 0x52, 0x18, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x6c, 0x0a, 0x1e, 0x66, 0x6f, - 0x72, 0x6d, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x46, 0x6f, 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x1a, 0x66, 0x6f, - 0x72, 0x6d, 0x44, 0x61, 0x74, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, - 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x62, 0x0a, 0x1a, 0x71, 0x75, 0x65, 0x72, - 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, 0x75, 0x62, 0x5f, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x23, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x48, 0x00, 0x52, 0x17, 0x71, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x5f, 0x0a, 0x19, - 0x70, 0x61, 0x74, 0x68, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x5f, 0x73, - 0x75, 0x62, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x74, - 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x16, 0x70, 0x61, 0x74, 0x68, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x42, 0x07, 0x0a, - 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xa1, 0x02, 0x0a, 0x18, 0x4f, 0x61, 0x75, 0x74, 0x68, - 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, - 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, - 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, - 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, - 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, - 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x07, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf5, 0x01, 0x0a, 0x19, 0x4f, - 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, - 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, - 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, - 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, - 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, - 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x82, 0x02, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, - 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x12, 0x0a, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, - 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x52, - 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x75, 0x74, 0x68, 0x6f, - 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xf2, 0x01, 0x0a, 0x16, 0x4f, 0x61, 0x75, 0x74, - 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x30, 0x0a, 0x06, 0x73, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, - 0x6f, 0x70, 0x65, 0x73, 0x52, 0x06, 0x73, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x1b, 0x0a, 0x09, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x55, 0x72, 0x6c, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, - 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x0a, 0x10, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, - 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x5c, 0x0a, 0x0c, - 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x53, 0x63, 0x6f, 0x70, 0x65, 0x73, 0x12, 0x4c, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, - 0x72, 0x69, 0x6e, 0x67, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, - 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x9e, 0x04, 0x0a, 0x09, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x61, 0x67, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x04, 0x74, 0x61, 0x67, 0x73, 0x12, 0x18, 0x0a, 0x07, - 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x73, - 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, - 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, - 0x6f, 0x64, 0x75, 0x63, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x73, 0x75, 0x6d, - 0x65, 0x73, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, - 0x18, 0x08, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, - 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x33, - 0x0a, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x73, 0x52, 0x09, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x18, 0x0a, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x65, 0x73, 0x12, 0x1e, 0x0a, - 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, - 0x08, 0x52, 0x0a, 0x64, 0x65, 0x70, 0x72, 0x65, 0x63, 0x61, 0x74, 0x65, 0x64, 0x12, 0x3b, 0x0a, - 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x65, 0x63, - 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, 0x65, 0x6e, 0x74, - 0x52, 0x08, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, - 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0d, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xa6, 0x01, 0x0a, 0x09, - 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x62, 0x6f, 0x64, - 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x42, - 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0d, - 0x62, 0x6f, 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x4c, 0x0a, - 0x12, 0x6e, 0x6f, 0x6e, 0x5f, 0x62, 0x6f, 0x64, 0x79, 0x5f, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, - 0x74, 0x65, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x6e, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x10, 0x6e, 0x6f, 0x6e, 0x42, 0x6f, - 0x64, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x67, 0x0a, 0x14, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x4f, 0x0a, 0x15, - 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, - 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x94, 0x01, - 0x0a, 0x0e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x12, 0x35, 0x0a, 0x09, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x48, 0x00, 0x52, 0x09, 0x70, 0x61, - 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0xcf, 0x03, 0x0a, 0x08, 0x50, 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, - 0x6d, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x03, 0x52, 0x65, 0x66, 0x12, 0x27, 0x0a, 0x03, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x03, 0x67, 0x65, 0x74, 0x12, 0x27, 0x0a, - 0x03, 0x70, 0x75, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, - 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x52, 0x03, 0x70, 0x75, 0x74, 0x12, 0x29, 0x0a, 0x04, 0x70, 0x6f, 0x73, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, - 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x70, 0x6f, 0x73, - 0x74, 0x12, 0x2d, 0x0a, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x06, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x12, 0x2f, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x29, 0x0a, 0x04, 0x68, 0x65, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x68, 0x65, 0x61, 0x64, 0x12, 0x2b, 0x0a, 0x05, - 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x3a, 0x0a, 0x0a, 0x70, 0x61, 0x72, - 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, - 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, - 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfb, 0x05, 0x0a, 0x16, 0x50, 0x61, 0x74, 0x68, 0x50, - 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, - 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, - 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, - 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, - 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, - 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, - 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, - 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, 0x20, 0x01, - 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, - 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, - 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, - 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, - 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, - 0x6e, 0x18, 0x10, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, - 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x11, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, - 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, - 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, - 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, - 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, - 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, - 0x66, 0x18, 0x15, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, - 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x16, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x77, 0x0a, 0x05, 0x50, 0x61, 0x74, 0x68, 0x73, 0x12, 0x3f, 0x0a, - 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x2d, - 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x50, - 0x61, 0x74, 0x68, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x22, 0x92, 0x05, - 0x0a, 0x0f, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, - 0x73, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, - 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, - 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, - 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, - 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, - 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, - 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, - 0x6d, 0x75, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, - 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, - 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, - 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, - 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, - 0x67, 0x74, 0x68, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, - 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, - 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, - 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, - 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, - 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, - 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, - 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, - 0x6e, 0x75, 0x6d, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, - 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, - 0x11, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, - 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x12, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, - 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, - 0x6f, 0x6e, 0x22, 0x5a, 0x0a, 0x0a, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x12, 0x4c, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x17, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, - 0x65, 0x64, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, - 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xa8, - 0x06, 0x0a, 0x17, 0x51, 0x75, 0x65, 0x72, 0x79, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, - 0x72, 0x53, 0x75, 0x62, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, - 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x6e, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2a, 0x0a, 0x11, - 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x5f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, - 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x31, 0x0a, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x08, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x72, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x76, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x73, - 0x52, 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x09, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x10, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x46, 0x6f, - 0x72, 0x6d, 0x61, 0x74, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x18, - 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x01, - 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0c, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, - 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x69, - 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, - 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x1d, 0x0a, - 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0f, 0x20, 0x01, 0x28, - 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a, - 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x10, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x11, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, - 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x12, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x13, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, - 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, - 0x14, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x15, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, - 0x79, 0x52, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, - 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, 0x18, 0x16, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, - 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x4f, 0x66, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x17, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xfe, 0x01, 0x0a, 0x08, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, - 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2e, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, - 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x2d, 0x0a, 0x07, 0x68, 0x65, 0x61, 0x64, - 0x65, 0x72, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x52, 0x07, - 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x30, 0x0a, 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, - 0x6c, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x52, - 0x08, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x65, 0x0a, 0x13, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x4e, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x14, 0x61, 0x64, 0x64, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, - 0x73, 0x22, 0x90, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, - 0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x48, 0x00, 0x52, 0x08, 0x72, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x42, 0x0a, 0x0e, 0x6a, 0x73, 0x6f, 0x6e, 0x5f, - 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x19, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4a, 0x73, 0x6f, - 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x48, 0x00, 0x52, 0x0d, 0x6a, 0x73, - 0x6f, 0x6e, 0x52, 0x65, 0x66, 0x65, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x6f, - 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x91, 0x01, 0x0a, 0x09, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x73, 0x12, 0x43, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, - 0x6f, 0x64, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x03, 0x28, - 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, - 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, - 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0xaf, 0x09, 0x0a, 0x06, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x12, 0x11, 0x0a, 0x04, 0x5f, 0x72, 0x65, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x03, 0x52, 0x65, 0x66, 0x12, 0x16, 0x0a, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x69, 0x74, 0x6c, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, - 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x29, 0x0a, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, - 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x07, 0x64, 0x65, 0x66, 0x61, 0x75, 0x6c, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, 0x5f, 0x6f, 0x66, - 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x6d, 0x75, 0x6c, 0x74, 0x69, 0x70, 0x6c, 0x65, - 0x4f, 0x66, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x07, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x5f, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, - 0x6d, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, - 0x76, 0x65, 0x4d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x69, 0x6e, - 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x09, 0x20, 0x01, 0x28, 0x01, 0x52, 0x07, 0x6d, 0x69, 0x6e, 0x69, - 0x6d, 0x75, 0x6d, 0x12, 0x2b, 0x0a, 0x11, 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, - 0x5f, 0x6d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x08, 0x52, 0x10, - 0x65, 0x78, 0x63, 0x6c, 0x75, 0x73, 0x69, 0x76, 0x65, 0x4d, 0x69, 0x6e, 0x69, 0x6d, 0x75, 0x6d, - 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x61, 0x78, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x61, 0x78, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x69, 0x6e, 0x5f, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x03, 0x52, 0x09, 0x6d, 0x69, 0x6e, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x18, - 0x0a, 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x70, 0x61, 0x74, 0x74, 0x65, 0x72, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x61, 0x78, 0x5f, - 0x69, 0x74, 0x65, 0x6d, 0x73, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x61, 0x78, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x6d, 0x69, 0x6e, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x6d, 0x69, 0x6e, 0x49, 0x74, 0x65, - 0x6d, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, 0x5f, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x75, 0x6e, 0x69, 0x71, 0x75, 0x65, - 0x49, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x61, 0x78, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x11, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, - 0x61, 0x78, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x25, 0x0a, 0x0e, - 0x6d, 0x69, 0x6e, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x12, - 0x20, 0x01, 0x28, 0x03, 0x52, 0x0d, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, - 0x69, 0x65, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x18, - 0x13, 0x20, 0x03, 0x28, 0x09, 0x52, 0x08, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x12, - 0x23, 0x0a, 0x04, 0x65, 0x6e, 0x75, 0x6d, 0x18, 0x14, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x04, - 0x65, 0x6e, 0x75, 0x6d, 0x12, 0x59, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x15, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x41, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, - 0x72, 0x74, 0x69, 0x65, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, - 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, - 0x28, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x49, - 0x74, 0x65, 0x6d, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2b, 0x0a, 0x05, 0x69, 0x74, 0x65, - 0x6d, 0x73, 0x18, 0x17, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x74, 0x65, 0x6d, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, - 0x05, 0x69, 0x74, 0x65, 0x6d, 0x73, 0x12, 0x29, 0x0a, 0x06, 0x61, 0x6c, 0x6c, 0x5f, 0x6f, 0x66, - 0x18, 0x18, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, 0x05, 0x61, 0x6c, 0x6c, 0x4f, - 0x66, 0x12, 0x36, 0x0a, 0x0a, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, - 0x19, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0a, 0x70, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x12, 0x24, 0x0a, 0x0d, 0x64, 0x69, 0x73, - 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x1a, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x64, 0x69, 0x73, 0x63, 0x72, 0x69, 0x6d, 0x69, 0x6e, 0x61, 0x74, 0x6f, 0x72, 0x12, - 0x1b, 0x0a, 0x09, 0x72, 0x65, 0x61, 0x64, 0x5f, 0x6f, 0x6e, 0x6c, 0x79, 0x18, 0x1b, 0x20, 0x01, - 0x28, 0x08, 0x52, 0x08, 0x72, 0x65, 0x61, 0x64, 0x4f, 0x6e, 0x6c, 0x79, 0x12, 0x21, 0x0a, 0x03, - 0x78, 0x6d, 0x6c, 0x18, 0x1c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x58, 0x6d, 0x6c, 0x52, 0x03, 0x78, 0x6d, 0x6c, 0x12, - 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, - 0x18, 0x1d, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, - 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, - 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x29, - 0x0a, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x18, 0x1e, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x0f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x6e, 0x79, - 0x52, 0x07, 0x65, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, - 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x1f, 0x20, - 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, - 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x7e, 0x0a, 0x0a, 0x53, 0x63, - 0x68, 0x65, 0x6d, 0x61, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x06, - 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12, 0x39, 0x0a, 0x0b, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x73, - 0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6f, 0x70, - 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x46, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x48, 0x00, 0x52, 0x0a, 0x66, 0x69, 0x6c, 0x65, 0x53, 0x63, 0x68, 0x65, 0x6d, - 0x61, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x74, 0x0a, 0x13, 0x53, 0x65, - 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x5d, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, - 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, - 0x6d, 0x65, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, 0x69, 0x6e, - 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, - 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, - 0x22, 0xe9, 0x04, 0x0a, 0x17, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x44, 0x65, 0x66, - 0x69, 0x6e, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x6d, 0x0a, 0x1d, - 0x62, 0x61, 0x73, 0x69, 0x63, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x1b, - 0x62, 0x61, 0x73, 0x69, 0x63, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x46, 0x0a, 0x10, 0x61, - 0x70, 0x69, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x41, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, - 0x79, 0x48, 0x00, 0x52, 0x0e, 0x61, 0x70, 0x69, 0x4b, 0x65, 0x79, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x69, 0x6d, - 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, - 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x32, 0x49, 0x6d, 0x70, 0x6c, 0x69, 0x63, 0x69, 0x74, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x5e, 0x0a, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x70, 0x61, - 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, - 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, - 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x16, 0x6f, 0x61, 0x75, - 0x74, 0x68, 0x32, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x12, 0x67, 0x0a, 0x1b, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, - 0x74, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x25, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, - 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, - 0x00, 0x52, 0x19, 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x12, 0x65, 0x0a, 0x1b, - 0x6f, 0x61, 0x75, 0x74, 0x68, 0x32, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x5f, 0x63, 0x6f, - 0x64, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x18, 0x06, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4f, - 0x61, 0x75, 0x74, 0x68, 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, - 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x48, 0x00, 0x52, 0x18, 0x6f, 0x61, 0x75, 0x74, 0x68, - 0x32, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x53, 0x65, 0x63, 0x75, 0x72, - 0x69, 0x74, 0x79, 0x42, 0x07, 0x0a, 0x05, 0x6f, 0x6e, 0x65, 0x6f, 0x66, 0x22, 0x68, 0x0a, 0x13, - 0x53, 0x65, 0x63, 0x75, 0x72, 0x69, 0x74, 0x79, 0x52, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x6d, - 0x65, 0x6e, 0x74, 0x12, 0x51, 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, - 0x6c, 0x5f, 0x70, 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x41, 0x72, 0x72, 0x61, 0x79, - 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, 0x72, 0x6f, 0x70, - 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0x23, 0x0a, 0x0b, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, - 0x41, 0x72, 0x72, 0x61, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xbb, 0x01, 0x0a, 0x03, - 0x54, 0x61, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, - 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65, - 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3d, 0x0a, 0x0d, 0x65, 0x78, 0x74, - 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x64, 0x6f, 0x63, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, - 0x32, 0x18, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, - 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, - 0x72, 0x6e, 0x61, 0x6c, 0x44, 0x6f, 0x63, 0x73, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, - 0x6f, 0x72, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, - 0x4e, 0x61, 0x6d, 0x65, 0x64, 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, - 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x20, 0x0a, 0x08, 0x54, 0x79, 0x70, - 0x65, 0x49, 0x74, 0x65, 0x6d, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5c, 0x0a, 0x0f, 0x56, - 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x49, - 0x0a, 0x15, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x5f, 0x70, 0x72, 0x6f, - 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x14, 0x61, 0x64, 0x64, 0x69, 0x74, 0x69, 0x6f, 0x6e, 0x61, 0x6c, 0x50, - 0x72, 0x6f, 0x70, 0x65, 0x72, 0x74, 0x69, 0x65, 0x73, 0x22, 0xc8, 0x01, 0x0a, 0x03, 0x58, 0x6d, - 0x6c, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, - 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, - 0x61, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x1c, 0x0a, 0x09, 0x61, - 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, - 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x77, 0x72, 0x61, - 0x70, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x77, 0x72, 0x61, 0x70, - 0x70, 0x65, 0x64, 0x12, 0x3f, 0x0a, 0x10, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x5f, 0x65, 0x78, - 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, - 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x61, 0x6d, 0x65, 0x64, - 0x41, 0x6e, 0x79, 0x52, 0x0f, 0x76, 0x65, 0x6e, 0x64, 0x6f, 0x72, 0x45, 0x78, 0x74, 0x65, 0x6e, - 0x73, 0x69, 0x6f, 0x6e, 0x42, 0x3c, 0x0a, 0x0e, 0x6f, 0x72, 0x67, 0x2e, 0x6f, 0x70, 0x65, 0x6e, - 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0x42, 0x0c, 0x4f, 0x70, 0x65, 0x6e, 0x41, 0x50, 0x49, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x14, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x76, - 0x32, 0x3b, 0x6f, 0x70, 0x65, 0x6e, 0x61, 0x70, 0x69, 0x5f, 0x76, 0x32, 0xa2, 0x02, 0x03, 0x4f, - 0x41, 0x53, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_openapiv2_OpenAPIv2_proto_rawDescOnce sync.Once - file_openapiv2_OpenAPIv2_proto_rawDescData = file_openapiv2_OpenAPIv2_proto_rawDesc -) - -func file_openapiv2_OpenAPIv2_proto_rawDescGZIP() []byte { - file_openapiv2_OpenAPIv2_proto_rawDescOnce.Do(func() { - file_openapiv2_OpenAPIv2_proto_rawDescData = protoimpl.X.CompressGZIP(file_openapiv2_OpenAPIv2_proto_rawDescData) - }) - return file_openapiv2_OpenAPIv2_proto_rawDescData -} - -var file_openapiv2_OpenAPIv2_proto_msgTypes = make([]protoimpl.MessageInfo, 60) -var file_openapiv2_OpenAPIv2_proto_goTypes = []interface{}{ - (*AdditionalPropertiesItem)(nil), // 0: openapi.v2.AdditionalPropertiesItem - (*Any)(nil), // 1: openapi.v2.Any - (*ApiKeySecurity)(nil), // 2: openapi.v2.ApiKeySecurity - (*BasicAuthenticationSecurity)(nil), // 3: openapi.v2.BasicAuthenticationSecurity - (*BodyParameter)(nil), // 4: openapi.v2.BodyParameter - (*Contact)(nil), // 5: openapi.v2.Contact - (*Default)(nil), // 6: openapi.v2.Default - (*Definitions)(nil), // 7: openapi.v2.Definitions - (*Document)(nil), // 8: openapi.v2.Document - (*Examples)(nil), // 9: openapi.v2.Examples - (*ExternalDocs)(nil), // 10: openapi.v2.ExternalDocs - (*FileSchema)(nil), // 11: openapi.v2.FileSchema - (*FormDataParameterSubSchema)(nil), // 12: openapi.v2.FormDataParameterSubSchema - (*Header)(nil), // 13: openapi.v2.Header - (*HeaderParameterSubSchema)(nil), // 14: openapi.v2.HeaderParameterSubSchema - (*Headers)(nil), // 15: openapi.v2.Headers - (*Info)(nil), // 16: openapi.v2.Info - (*ItemsItem)(nil), // 17: openapi.v2.ItemsItem - (*JsonReference)(nil), // 18: openapi.v2.JsonReference - (*License)(nil), // 19: openapi.v2.License - (*NamedAny)(nil), // 20: openapi.v2.NamedAny - (*NamedHeader)(nil), // 21: openapi.v2.NamedHeader - (*NamedParameter)(nil), // 22: openapi.v2.NamedParameter - (*NamedPathItem)(nil), // 23: openapi.v2.NamedPathItem - (*NamedResponse)(nil), // 24: openapi.v2.NamedResponse - (*NamedResponseValue)(nil), // 25: openapi.v2.NamedResponseValue - (*NamedSchema)(nil), // 26: openapi.v2.NamedSchema - (*NamedSecurityDefinitionsItem)(nil), // 27: openapi.v2.NamedSecurityDefinitionsItem - (*NamedString)(nil), // 28: openapi.v2.NamedString - (*NamedStringArray)(nil), // 29: openapi.v2.NamedStringArray - (*NonBodyParameter)(nil), // 30: openapi.v2.NonBodyParameter - (*Oauth2AccessCodeSecurity)(nil), // 31: openapi.v2.Oauth2AccessCodeSecurity - (*Oauth2ApplicationSecurity)(nil), // 32: openapi.v2.Oauth2ApplicationSecurity - (*Oauth2ImplicitSecurity)(nil), // 33: openapi.v2.Oauth2ImplicitSecurity - (*Oauth2PasswordSecurity)(nil), // 34: openapi.v2.Oauth2PasswordSecurity - (*Oauth2Scopes)(nil), // 35: openapi.v2.Oauth2Scopes - (*Operation)(nil), // 36: openapi.v2.Operation - (*Parameter)(nil), // 37: openapi.v2.Parameter - (*ParameterDefinitions)(nil), // 38: openapi.v2.ParameterDefinitions - (*ParametersItem)(nil), // 39: openapi.v2.ParametersItem - (*PathItem)(nil), // 40: openapi.v2.PathItem - (*PathParameterSubSchema)(nil), // 41: openapi.v2.PathParameterSubSchema - (*Paths)(nil), // 42: openapi.v2.Paths - (*PrimitivesItems)(nil), // 43: openapi.v2.PrimitivesItems - (*Properties)(nil), // 44: openapi.v2.Properties - (*QueryParameterSubSchema)(nil), // 45: openapi.v2.QueryParameterSubSchema - (*Response)(nil), // 46: openapi.v2.Response - (*ResponseDefinitions)(nil), // 47: openapi.v2.ResponseDefinitions - (*ResponseValue)(nil), // 48: openapi.v2.ResponseValue - (*Responses)(nil), // 49: openapi.v2.Responses - (*Schema)(nil), // 50: openapi.v2.Schema - (*SchemaItem)(nil), // 51: openapi.v2.SchemaItem - (*SecurityDefinitions)(nil), // 52: openapi.v2.SecurityDefinitions - (*SecurityDefinitionsItem)(nil), // 53: openapi.v2.SecurityDefinitionsItem - (*SecurityRequirement)(nil), // 54: openapi.v2.SecurityRequirement - (*StringArray)(nil), // 55: openapi.v2.StringArray - (*Tag)(nil), // 56: openapi.v2.Tag - (*TypeItem)(nil), // 57: openapi.v2.TypeItem - (*VendorExtension)(nil), // 58: openapi.v2.VendorExtension - (*Xml)(nil), // 59: openapi.v2.Xml - (*any.Any)(nil), // 60: google.protobuf.Any -} -var file_openapiv2_OpenAPIv2_proto_depIdxs = []int32{ - 50, // 0: openapi.v2.AdditionalPropertiesItem.schema:type_name -> openapi.v2.Schema - 60, // 1: openapi.v2.Any.value:type_name -> google.protobuf.Any - 20, // 2: openapi.v2.ApiKeySecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 3: openapi.v2.BasicAuthenticationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 4: openapi.v2.BodyParameter.schema:type_name -> openapi.v2.Schema - 20, // 5: openapi.v2.BodyParameter.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 6: openapi.v2.Contact.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 7: openapi.v2.Default.additional_properties:type_name -> openapi.v2.NamedAny - 26, // 8: openapi.v2.Definitions.additional_properties:type_name -> openapi.v2.NamedSchema - 16, // 9: openapi.v2.Document.info:type_name -> openapi.v2.Info - 42, // 10: openapi.v2.Document.paths:type_name -> openapi.v2.Paths - 7, // 11: openapi.v2.Document.definitions:type_name -> openapi.v2.Definitions - 38, // 12: openapi.v2.Document.parameters:type_name -> openapi.v2.ParameterDefinitions - 47, // 13: openapi.v2.Document.responses:type_name -> openapi.v2.ResponseDefinitions - 54, // 14: openapi.v2.Document.security:type_name -> openapi.v2.SecurityRequirement - 52, // 15: openapi.v2.Document.security_definitions:type_name -> openapi.v2.SecurityDefinitions - 56, // 16: openapi.v2.Document.tags:type_name -> openapi.v2.Tag - 10, // 17: openapi.v2.Document.external_docs:type_name -> openapi.v2.ExternalDocs - 20, // 18: openapi.v2.Document.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 19: openapi.v2.Examples.additional_properties:type_name -> openapi.v2.NamedAny - 20, // 20: openapi.v2.ExternalDocs.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 21: openapi.v2.FileSchema.default:type_name -> openapi.v2.Any - 10, // 22: openapi.v2.FileSchema.external_docs:type_name -> openapi.v2.ExternalDocs - 1, // 23: openapi.v2.FileSchema.example:type_name -> openapi.v2.Any - 20, // 24: openapi.v2.FileSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 25: openapi.v2.FormDataParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 26: openapi.v2.FormDataParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 27: openapi.v2.FormDataParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 28: openapi.v2.FormDataParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 29: openapi.v2.Header.items:type_name -> openapi.v2.PrimitivesItems - 1, // 30: openapi.v2.Header.default:type_name -> openapi.v2.Any - 1, // 31: openapi.v2.Header.enum:type_name -> openapi.v2.Any - 20, // 32: openapi.v2.Header.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 33: openapi.v2.HeaderParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 34: openapi.v2.HeaderParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 35: openapi.v2.HeaderParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 36: openapi.v2.HeaderParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 21, // 37: openapi.v2.Headers.additional_properties:type_name -> openapi.v2.NamedHeader - 5, // 38: openapi.v2.Info.contact:type_name -> openapi.v2.Contact - 19, // 39: openapi.v2.Info.license:type_name -> openapi.v2.License - 20, // 40: openapi.v2.Info.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 41: openapi.v2.ItemsItem.schema:type_name -> openapi.v2.Schema - 20, // 42: openapi.v2.License.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 43: openapi.v2.NamedAny.value:type_name -> openapi.v2.Any - 13, // 44: openapi.v2.NamedHeader.value:type_name -> openapi.v2.Header - 37, // 45: openapi.v2.NamedParameter.value:type_name -> openapi.v2.Parameter - 40, // 46: openapi.v2.NamedPathItem.value:type_name -> openapi.v2.PathItem - 46, // 47: openapi.v2.NamedResponse.value:type_name -> openapi.v2.Response - 48, // 48: openapi.v2.NamedResponseValue.value:type_name -> openapi.v2.ResponseValue - 50, // 49: openapi.v2.NamedSchema.value:type_name -> openapi.v2.Schema - 53, // 50: openapi.v2.NamedSecurityDefinitionsItem.value:type_name -> openapi.v2.SecurityDefinitionsItem - 55, // 51: openapi.v2.NamedStringArray.value:type_name -> openapi.v2.StringArray - 14, // 52: openapi.v2.NonBodyParameter.header_parameter_sub_schema:type_name -> openapi.v2.HeaderParameterSubSchema - 12, // 53: openapi.v2.NonBodyParameter.form_data_parameter_sub_schema:type_name -> openapi.v2.FormDataParameterSubSchema - 45, // 54: openapi.v2.NonBodyParameter.query_parameter_sub_schema:type_name -> openapi.v2.QueryParameterSubSchema - 41, // 55: openapi.v2.NonBodyParameter.path_parameter_sub_schema:type_name -> openapi.v2.PathParameterSubSchema - 35, // 56: openapi.v2.Oauth2AccessCodeSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 57: openapi.v2.Oauth2AccessCodeSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 58: openapi.v2.Oauth2ApplicationSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 59: openapi.v2.Oauth2ApplicationSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 60: openapi.v2.Oauth2ImplicitSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 61: openapi.v2.Oauth2ImplicitSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 35, // 62: openapi.v2.Oauth2PasswordSecurity.scopes:type_name -> openapi.v2.Oauth2Scopes - 20, // 63: openapi.v2.Oauth2PasswordSecurity.vendor_extension:type_name -> openapi.v2.NamedAny - 28, // 64: openapi.v2.Oauth2Scopes.additional_properties:type_name -> openapi.v2.NamedString - 10, // 65: openapi.v2.Operation.external_docs:type_name -> openapi.v2.ExternalDocs - 39, // 66: openapi.v2.Operation.parameters:type_name -> openapi.v2.ParametersItem - 49, // 67: openapi.v2.Operation.responses:type_name -> openapi.v2.Responses - 54, // 68: openapi.v2.Operation.security:type_name -> openapi.v2.SecurityRequirement - 20, // 69: openapi.v2.Operation.vendor_extension:type_name -> openapi.v2.NamedAny - 4, // 70: openapi.v2.Parameter.body_parameter:type_name -> openapi.v2.BodyParameter - 30, // 71: openapi.v2.Parameter.non_body_parameter:type_name -> openapi.v2.NonBodyParameter - 22, // 72: openapi.v2.ParameterDefinitions.additional_properties:type_name -> openapi.v2.NamedParameter - 37, // 73: openapi.v2.ParametersItem.parameter:type_name -> openapi.v2.Parameter - 18, // 74: openapi.v2.ParametersItem.json_reference:type_name -> openapi.v2.JsonReference - 36, // 75: openapi.v2.PathItem.get:type_name -> openapi.v2.Operation - 36, // 76: openapi.v2.PathItem.put:type_name -> openapi.v2.Operation - 36, // 77: openapi.v2.PathItem.post:type_name -> openapi.v2.Operation - 36, // 78: openapi.v2.PathItem.delete:type_name -> openapi.v2.Operation - 36, // 79: openapi.v2.PathItem.options:type_name -> openapi.v2.Operation - 36, // 80: openapi.v2.PathItem.head:type_name -> openapi.v2.Operation - 36, // 81: openapi.v2.PathItem.patch:type_name -> openapi.v2.Operation - 39, // 82: openapi.v2.PathItem.parameters:type_name -> openapi.v2.ParametersItem - 20, // 83: openapi.v2.PathItem.vendor_extension:type_name -> openapi.v2.NamedAny - 43, // 84: openapi.v2.PathParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 85: openapi.v2.PathParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 86: openapi.v2.PathParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 87: openapi.v2.PathParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 88: openapi.v2.Paths.vendor_extension:type_name -> openapi.v2.NamedAny - 23, // 89: openapi.v2.Paths.path:type_name -> openapi.v2.NamedPathItem - 43, // 90: openapi.v2.PrimitivesItems.items:type_name -> openapi.v2.PrimitivesItems - 1, // 91: openapi.v2.PrimitivesItems.default:type_name -> openapi.v2.Any - 1, // 92: openapi.v2.PrimitivesItems.enum:type_name -> openapi.v2.Any - 20, // 93: openapi.v2.PrimitivesItems.vendor_extension:type_name -> openapi.v2.NamedAny - 26, // 94: openapi.v2.Properties.additional_properties:type_name -> openapi.v2.NamedSchema - 43, // 95: openapi.v2.QueryParameterSubSchema.items:type_name -> openapi.v2.PrimitivesItems - 1, // 96: openapi.v2.QueryParameterSubSchema.default:type_name -> openapi.v2.Any - 1, // 97: openapi.v2.QueryParameterSubSchema.enum:type_name -> openapi.v2.Any - 20, // 98: openapi.v2.QueryParameterSubSchema.vendor_extension:type_name -> openapi.v2.NamedAny - 51, // 99: openapi.v2.Response.schema:type_name -> openapi.v2.SchemaItem - 15, // 100: openapi.v2.Response.headers:type_name -> openapi.v2.Headers - 9, // 101: openapi.v2.Response.examples:type_name -> openapi.v2.Examples - 20, // 102: openapi.v2.Response.vendor_extension:type_name -> openapi.v2.NamedAny - 24, // 103: openapi.v2.ResponseDefinitions.additional_properties:type_name -> openapi.v2.NamedResponse - 46, // 104: openapi.v2.ResponseValue.response:type_name -> openapi.v2.Response - 18, // 105: openapi.v2.ResponseValue.json_reference:type_name -> openapi.v2.JsonReference - 25, // 106: openapi.v2.Responses.response_code:type_name -> openapi.v2.NamedResponseValue - 20, // 107: openapi.v2.Responses.vendor_extension:type_name -> openapi.v2.NamedAny - 1, // 108: openapi.v2.Schema.default:type_name -> openapi.v2.Any - 1, // 109: openapi.v2.Schema.enum:type_name -> openapi.v2.Any - 0, // 110: openapi.v2.Schema.additional_properties:type_name -> openapi.v2.AdditionalPropertiesItem - 57, // 111: openapi.v2.Schema.type:type_name -> openapi.v2.TypeItem - 17, // 112: openapi.v2.Schema.items:type_name -> openapi.v2.ItemsItem - 50, // 113: openapi.v2.Schema.all_of:type_name -> openapi.v2.Schema - 44, // 114: openapi.v2.Schema.properties:type_name -> openapi.v2.Properties - 59, // 115: openapi.v2.Schema.xml:type_name -> openapi.v2.Xml - 10, // 116: openapi.v2.Schema.external_docs:type_name -> openapi.v2.ExternalDocs - 1, // 117: openapi.v2.Schema.example:type_name -> openapi.v2.Any - 20, // 118: openapi.v2.Schema.vendor_extension:type_name -> openapi.v2.NamedAny - 50, // 119: openapi.v2.SchemaItem.schema:type_name -> openapi.v2.Schema - 11, // 120: openapi.v2.SchemaItem.file_schema:type_name -> openapi.v2.FileSchema - 27, // 121: openapi.v2.SecurityDefinitions.additional_properties:type_name -> openapi.v2.NamedSecurityDefinitionsItem - 3, // 122: openapi.v2.SecurityDefinitionsItem.basic_authentication_security:type_name -> openapi.v2.BasicAuthenticationSecurity - 2, // 123: openapi.v2.SecurityDefinitionsItem.api_key_security:type_name -> openapi.v2.ApiKeySecurity - 33, // 124: openapi.v2.SecurityDefinitionsItem.oauth2_implicit_security:type_name -> openapi.v2.Oauth2ImplicitSecurity - 34, // 125: openapi.v2.SecurityDefinitionsItem.oauth2_password_security:type_name -> openapi.v2.Oauth2PasswordSecurity - 32, // 126: openapi.v2.SecurityDefinitionsItem.oauth2_application_security:type_name -> openapi.v2.Oauth2ApplicationSecurity - 31, // 127: openapi.v2.SecurityDefinitionsItem.oauth2_access_code_security:type_name -> openapi.v2.Oauth2AccessCodeSecurity - 29, // 128: openapi.v2.SecurityRequirement.additional_properties:type_name -> openapi.v2.NamedStringArray - 10, // 129: openapi.v2.Tag.external_docs:type_name -> openapi.v2.ExternalDocs - 20, // 130: openapi.v2.Tag.vendor_extension:type_name -> openapi.v2.NamedAny - 20, // 131: openapi.v2.VendorExtension.additional_properties:type_name -> openapi.v2.NamedAny - 20, // 132: openapi.v2.Xml.vendor_extension:type_name -> openapi.v2.NamedAny - 133, // [133:133] is the sub-list for method output_type - 133, // [133:133] is the sub-list for method input_type - 133, // [133:133] is the sub-list for extension type_name - 133, // [133:133] is the sub-list for extension extendee - 0, // [0:133] is the sub-list for field type_name -} - -func init() { file_openapiv2_OpenAPIv2_proto_init() } -func file_openapiv2_OpenAPIv2_proto_init() { - if File_openapiv2_OpenAPIv2_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_openapiv2_OpenAPIv2_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AdditionalPropertiesItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApiKeySecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BasicAuthenticationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*BodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Contact); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Default); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Definitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Document); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Examples); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExternalDocs); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FileSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*FormDataParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Header); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*HeaderParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Headers); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ItemsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*JsonReference); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*License); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedAny); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedHeader); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedPathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedSecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedString); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NamedStringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*NonBodyParameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2AccessCodeSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ApplicationSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2ImplicitSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2PasswordSecurity); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Oauth2Scopes); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Operation); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Parameter); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParameterDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ParametersItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PathParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Paths); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PrimitivesItems); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Properties); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*QueryParameterSubSchema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Response); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ResponseValue); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Responses); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Schema); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SchemaItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitions); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityDefinitionsItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SecurityRequirement); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StringArray); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Tag); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*TypeItem); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*VendorExtension); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Xml); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - file_openapiv2_OpenAPIv2_proto_msgTypes[0].OneofWrappers = []interface{}{ - (*AdditionalPropertiesItem_Schema)(nil), - (*AdditionalPropertiesItem_Boolean)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[30].OneofWrappers = []interface{}{ - (*NonBodyParameter_HeaderParameterSubSchema)(nil), - (*NonBodyParameter_FormDataParameterSubSchema)(nil), - (*NonBodyParameter_QueryParameterSubSchema)(nil), - (*NonBodyParameter_PathParameterSubSchema)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[37].OneofWrappers = []interface{}{ - (*Parameter_BodyParameter)(nil), - (*Parameter_NonBodyParameter)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[39].OneofWrappers = []interface{}{ - (*ParametersItem_Parameter)(nil), - (*ParametersItem_JsonReference)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[48].OneofWrappers = []interface{}{ - (*ResponseValue_Response)(nil), - (*ResponseValue_JsonReference)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[51].OneofWrappers = []interface{}{ - (*SchemaItem_Schema)(nil), - (*SchemaItem_FileSchema)(nil), - } - file_openapiv2_OpenAPIv2_proto_msgTypes[53].OneofWrappers = []interface{}{ - (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), - (*SecurityDefinitionsItem_ApiKeySecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), - (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_openapiv2_OpenAPIv2_proto_rawDesc, - NumEnums: 0, - NumMessages: 60, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_openapiv2_OpenAPIv2_proto_goTypes, - DependencyIndexes: file_openapiv2_OpenAPIv2_proto_depIdxs, - MessageInfos: file_openapiv2_OpenAPIv2_proto_msgTypes, - }.Build() - File_openapiv2_OpenAPIv2_proto = out.File - file_openapiv2_OpenAPIv2_proto_rawDesc = nil - file_openapiv2_OpenAPIv2_proto_goTypes = nil - file_openapiv2_OpenAPIv2_proto_depIdxs = nil -} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto deleted file mode 100644 index 00ac1b0..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto +++ /dev/null @@ -1,666 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// THIS FILE IS AUTOMATICALLY GENERATED. - -syntax = "proto3"; - -package openapi.v2; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "OpenAPIProto"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.openapi_v2"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -option objc_class_prefix = "OAS"; - -// The Go package name. -option go_package = "openapiv2;openapi_v2"; - -message AdditionalPropertiesItem { - oneof oneof { - Schema schema = 1; - bool boolean = 2; - } -} - -message Any { - google.protobuf.Any value = 1; - string yaml = 2; -} - -message ApiKeySecurity { - string type = 1; - string name = 2; - string in = 3; - string description = 4; - repeated NamedAny vendor_extension = 5; -} - -message BasicAuthenticationSecurity { - string type = 1; - string description = 2; - repeated NamedAny vendor_extension = 3; -} - -message BodyParameter { - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 1; - // The name of the parameter. - string name = 2; - // Determines the location of the parameter. - string in = 3; - // Determines whether or not this parameter is required or optional. - bool required = 4; - Schema schema = 5; - repeated NamedAny vendor_extension = 6; -} - -// Contact information for the owners of the API. -message Contact { - // The identifying name of the contact person/organization. - string name = 1; - // The URL pointing to the contact information. - string url = 2; - // The email address of the contact person/organization. - string email = 3; - repeated NamedAny vendor_extension = 4; -} - -message Default { - repeated NamedAny additional_properties = 1; -} - -// One or more JSON objects describing the schemas being consumed and produced by the API. -message Definitions { - repeated NamedSchema additional_properties = 1; -} - -message Document { - // The Swagger version of this document. - string swagger = 1; - Info info = 2; - // The host (name or ip) of the API. Example: 'swagger.io' - string host = 3; - // The base path to the API. Example: '/api'. - string base_path = 4; - // The transfer protocol of the API. - repeated string schemes = 5; - // A list of MIME types accepted by the API. - repeated string consumes = 6; - // A list of MIME types the API can produce. - repeated string produces = 7; - Paths paths = 8; - Definitions definitions = 9; - ParameterDefinitions parameters = 10; - ResponseDefinitions responses = 11; - repeated SecurityRequirement security = 12; - SecurityDefinitions security_definitions = 13; - repeated Tag tags = 14; - ExternalDocs external_docs = 15; - repeated NamedAny vendor_extension = 16; -} - -message Examples { - repeated NamedAny additional_properties = 1; -} - -// information about external documentation -message ExternalDocs { - string description = 1; - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// A deterministic version of a JSON Schema object. -message FileSchema { - string format = 1; - string title = 2; - string description = 3; - Any default = 4; - repeated string required = 5; - string type = 6; - bool read_only = 7; - ExternalDocs external_docs = 8; - Any example = 9; - repeated NamedAny vendor_extension = 10; -} - -message FormDataParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Header { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - string description = 18; - repeated NamedAny vendor_extension = 19; -} - -message HeaderParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -message Headers { - repeated NamedHeader additional_properties = 1; -} - -// General information about the API. -message Info { - // A unique and precise title of the API. - string title = 1; - // A semantic version number of the API. - string version = 2; - // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. - string description = 3; - // The terms of service for the API. - string terms_of_service = 4; - Contact contact = 5; - License license = 6; - repeated NamedAny vendor_extension = 7; -} - -message ItemsItem { - repeated Schema schema = 1; -} - -message JsonReference { - string _ref = 1; - string description = 2; -} - -message License { - // The name of the license type. It's encouraged to use an OSI compatible license. - string name = 1; - // The URL pointing to the license. - string url = 2; - repeated NamedAny vendor_extension = 3; -} - -// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. -message NamedAny { - // Map key - string name = 1; - // Mapped value - Any value = 2; -} - -// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. -message NamedHeader { - // Map key - string name = 1; - // Mapped value - Header value = 2; -} - -// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. -message NamedParameter { - // Map key - string name = 1; - // Mapped value - Parameter value = 2; -} - -// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. -message NamedPathItem { - // Map key - string name = 1; - // Mapped value - PathItem value = 2; -} - -// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. -message NamedResponse { - // Map key - string name = 1; - // Mapped value - Response value = 2; -} - -// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. -message NamedResponseValue { - // Map key - string name = 1; - // Mapped value - ResponseValue value = 2; -} - -// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. -message NamedSchema { - // Map key - string name = 1; - // Mapped value - Schema value = 2; -} - -// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. -message NamedSecurityDefinitionsItem { - // Map key - string name = 1; - // Mapped value - SecurityDefinitionsItem value = 2; -} - -// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. -message NamedString { - // Map key - string name = 1; - // Mapped value - string value = 2; -} - -// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. -message NamedStringArray { - // Map key - string name = 1; - // Mapped value - StringArray value = 2; -} - -message NonBodyParameter { - oneof oneof { - HeaderParameterSubSchema header_parameter_sub_schema = 1; - FormDataParameterSubSchema form_data_parameter_sub_schema = 2; - QueryParameterSubSchema query_parameter_sub_schema = 3; - PathParameterSubSchema path_parameter_sub_schema = 4; - } -} - -message Oauth2AccessCodeSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string token_url = 5; - string description = 6; - repeated NamedAny vendor_extension = 7; -} - -message Oauth2ApplicationSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2ImplicitSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string authorization_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2PasswordSecurity { - string type = 1; - string flow = 2; - Oauth2Scopes scopes = 3; - string token_url = 4; - string description = 5; - repeated NamedAny vendor_extension = 6; -} - -message Oauth2Scopes { - repeated NamedString additional_properties = 1; -} - -message Operation { - repeated string tags = 1; - // A brief summary of the operation. - string summary = 2; - // A longer description of the operation, GitHub Flavored Markdown is allowed. - string description = 3; - ExternalDocs external_docs = 4; - // A unique identifier of the operation. - string operation_id = 5; - // A list of MIME types the API can produce. - repeated string produces = 6; - // A list of MIME types the API can consume. - repeated string consumes = 7; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 8; - Responses responses = 9; - // The transfer protocol of the API. - repeated string schemes = 10; - bool deprecated = 11; - repeated SecurityRequirement security = 12; - repeated NamedAny vendor_extension = 13; -} - -message Parameter { - oneof oneof { - BodyParameter body_parameter = 1; - NonBodyParameter non_body_parameter = 2; - } -} - -// One or more JSON representations for parameters -message ParameterDefinitions { - repeated NamedParameter additional_properties = 1; -} - -message ParametersItem { - oneof oneof { - Parameter parameter = 1; - JsonReference json_reference = 2; - } -} - -message PathItem { - string _ref = 1; - Operation get = 2; - Operation put = 3; - Operation post = 4; - Operation delete = 5; - Operation options = 6; - Operation head = 7; - Operation patch = 8; - // The parameters needed to send a valid API call. - repeated ParametersItem parameters = 9; - repeated NamedAny vendor_extension = 10; -} - -message PathParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - string type = 5; - string format = 6; - PrimitivesItems items = 7; - string collection_format = 8; - Any default = 9; - double maximum = 10; - bool exclusive_maximum = 11; - double minimum = 12; - bool exclusive_minimum = 13; - int64 max_length = 14; - int64 min_length = 15; - string pattern = 16; - int64 max_items = 17; - int64 min_items = 18; - bool unique_items = 19; - repeated Any enum = 20; - double multiple_of = 21; - repeated NamedAny vendor_extension = 22; -} - -// Relative paths to the individual endpoints. They must be relative to the 'basePath'. -message Paths { - repeated NamedAny vendor_extension = 1; - repeated NamedPathItem path = 2; -} - -message PrimitivesItems { - string type = 1; - string format = 2; - PrimitivesItems items = 3; - string collection_format = 4; - Any default = 5; - double maximum = 6; - bool exclusive_maximum = 7; - double minimum = 8; - bool exclusive_minimum = 9; - int64 max_length = 10; - int64 min_length = 11; - string pattern = 12; - int64 max_items = 13; - int64 min_items = 14; - bool unique_items = 15; - repeated Any enum = 16; - double multiple_of = 17; - repeated NamedAny vendor_extension = 18; -} - -message Properties { - repeated NamedSchema additional_properties = 1; -} - -message QueryParameterSubSchema { - // Determines whether or not this parameter is required or optional. - bool required = 1; - // Determines the location of the parameter. - string in = 2; - // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. - string description = 3; - // The name of the parameter. - string name = 4; - // allows sending a parameter by name only or with an empty value. - bool allow_empty_value = 5; - string type = 6; - string format = 7; - PrimitivesItems items = 8; - string collection_format = 9; - Any default = 10; - double maximum = 11; - bool exclusive_maximum = 12; - double minimum = 13; - bool exclusive_minimum = 14; - int64 max_length = 15; - int64 min_length = 16; - string pattern = 17; - int64 max_items = 18; - int64 min_items = 19; - bool unique_items = 20; - repeated Any enum = 21; - double multiple_of = 22; - repeated NamedAny vendor_extension = 23; -} - -message Response { - string description = 1; - SchemaItem schema = 2; - Headers headers = 3; - Examples examples = 4; - repeated NamedAny vendor_extension = 5; -} - -// One or more JSON representations for responses -message ResponseDefinitions { - repeated NamedResponse additional_properties = 1; -} - -message ResponseValue { - oneof oneof { - Response response = 1; - JsonReference json_reference = 2; - } -} - -// Response objects names can either be any valid HTTP status code or 'default'. -message Responses { - repeated NamedResponseValue response_code = 1; - repeated NamedAny vendor_extension = 2; -} - -// A deterministic version of a JSON Schema object. -message Schema { - string _ref = 1; - string format = 2; - string title = 3; - string description = 4; - Any default = 5; - double multiple_of = 6; - double maximum = 7; - bool exclusive_maximum = 8; - double minimum = 9; - bool exclusive_minimum = 10; - int64 max_length = 11; - int64 min_length = 12; - string pattern = 13; - int64 max_items = 14; - int64 min_items = 15; - bool unique_items = 16; - int64 max_properties = 17; - int64 min_properties = 18; - repeated string required = 19; - repeated Any enum = 20; - AdditionalPropertiesItem additional_properties = 21; - TypeItem type = 22; - ItemsItem items = 23; - repeated Schema all_of = 24; - Properties properties = 25; - string discriminator = 26; - bool read_only = 27; - Xml xml = 28; - ExternalDocs external_docs = 29; - Any example = 30; - repeated NamedAny vendor_extension = 31; -} - -message SchemaItem { - oneof oneof { - Schema schema = 1; - FileSchema file_schema = 2; - } -} - -message SecurityDefinitions { - repeated NamedSecurityDefinitionsItem additional_properties = 1; -} - -message SecurityDefinitionsItem { - oneof oneof { - BasicAuthenticationSecurity basic_authentication_security = 1; - ApiKeySecurity api_key_security = 2; - Oauth2ImplicitSecurity oauth2_implicit_security = 3; - Oauth2PasswordSecurity oauth2_password_security = 4; - Oauth2ApplicationSecurity oauth2_application_security = 5; - Oauth2AccessCodeSecurity oauth2_access_code_security = 6; - } -} - -message SecurityRequirement { - repeated NamedStringArray additional_properties = 1; -} - -message StringArray { - repeated string value = 1; -} - -message Tag { - string name = 1; - string description = 2; - ExternalDocs external_docs = 3; - repeated NamedAny vendor_extension = 4; -} - -message TypeItem { - repeated string value = 1; -} - -// Any property starting with x- is valid. -message VendorExtension { - repeated NamedAny additional_properties = 1; -} - -message Xml { - string name = 1; - string namespace = 2; - string prefix = 3; - bool attribute = 4; - bool wrapped = 5; - repeated NamedAny vendor_extension = 6; -} - diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md deleted file mode 100644 index 5276128..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# OpenAPI v2 Protocol Buffer Models - -This directory contains a Protocol Buffer-language model and related code for -supporting OpenAPI v2. - -Gnostic applications and plugins can use OpenAPIv2.proto to generate Protocol -Buffer support code for their preferred languages. - -OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI descriptions into -the Protocol Buffer-based datastructures generated from OpenAPIv2.proto. - -OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic compiler -generator, and OpenAPIv2.pb.go is generated by protoc, the Protocol Buffer -compiler, and protoc-gen-go, the Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go deleted file mode 100644 index ddeed5c..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/document.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2020 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package openapi_v2 - -import "github.com/googleapis/gnostic/compiler" - -// ParseDocument reads an OpenAPI v2 description from a YAML/JSON representation. -func ParseDocument(b []byte) (*Document, error) { - info, err := compiler.ReadInfoFromBytes("", b) - if err != nil { - return nil, err - } - return NewDocument(info.Content[0], compiler.NewContextWithExtensions("$root", nil, nil)) -} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json deleted file mode 100644 index afa12b7..0000000 --- a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json +++ /dev/null @@ -1,1610 +0,0 @@ -{ - "title": "A JSON Schema for Swagger 2.0 API.", - "id": "http://swagger.io/v2/schema.json#", - "$schema": "http://json-schema.org/draft-04/schema#", - "type": "object", - "required": [ - "swagger", - "info", - "paths" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "swagger": { - "type": "string", - "enum": [ - "2.0" - ], - "description": "The Swagger version of this document." - }, - "info": { - "$ref": "#/definitions/info" - }, - "host": { - "type": "string", - "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", - "description": "The host (name or ip) of the API. Example: 'swagger.io'" - }, - "basePath": { - "type": "string", - "pattern": "^/", - "description": "The base path to the API. Example: '/api'." - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "consumes": { - "description": "A list of MIME types accepted by the API.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "paths": { - "$ref": "#/definitions/paths" - }, - "definitions": { - "$ref": "#/definitions/definitions" - }, - "parameters": { - "$ref": "#/definitions/parameterDefinitions" - }, - "responses": { - "$ref": "#/definitions/responseDefinitions" - }, - "security": { - "$ref": "#/definitions/security" - }, - "securityDefinitions": { - "$ref": "#/definitions/securityDefinitions" - }, - "tags": { - "type": "array", - "items": { - "$ref": "#/definitions/tag" - }, - "uniqueItems": true - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "definitions": { - "info": { - "type": "object", - "description": "General information about the API.", - "required": [ - "version", - "title" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "title": { - "type": "string", - "description": "A unique and precise title of the API." - }, - "version": { - "type": "string", - "description": "A semantic version number of the API." - }, - "description": { - "type": "string", - "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." - }, - "termsOfService": { - "type": "string", - "description": "The terms of service for the API." - }, - "contact": { - "$ref": "#/definitions/contact" - }, - "license": { - "$ref": "#/definitions/license" - } - } - }, - "contact": { - "type": "object", - "description": "Contact information for the owners of the API.", - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The identifying name of the contact person/organization." - }, - "url": { - "type": "string", - "description": "The URL pointing to the contact information.", - "format": "uri" - }, - "email": { - "type": "string", - "description": "The email address of the contact person/organization.", - "format": "email" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "license": { - "type": "object", - "required": [ - "name" - ], - "additionalProperties": false, - "properties": { - "name": { - "type": "string", - "description": "The name of the license type. It's encouraged to use an OSI compatible license." - }, - "url": { - "type": "string", - "description": "The URL pointing to the license.", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "paths": { - "type": "object", - "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - }, - "^/": { - "$ref": "#/definitions/pathItem" - } - }, - "additionalProperties": false - }, - "definitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "description": "One or more JSON objects describing the schemas being consumed and produced by the API." - }, - "parameterDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/parameter" - }, - "description": "One or more JSON representations for parameters" - }, - "responseDefinitions": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/response" - }, - "description": "One or more JSON representations for responses" - }, - "externalDocs": { - "type": "object", - "additionalProperties": false, - "description": "information about external documentation", - "required": [ - "url" - ], - "properties": { - "description": { - "type": "string" - }, - "url": { - "type": "string", - "format": "uri" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "examples": { - "type": "object", - "additionalProperties": true - }, - "mimeType": { - "type": "string", - "description": "The MIME type of the HTTP message." - }, - "operation": { - "type": "object", - "required": [ - "responses" - ], - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "tags": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - }, - "summary": { - "type": "string", - "description": "A brief summary of the operation." - }, - "description": { - "type": "string", - "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "operationId": { - "type": "string", - "description": "A unique identifier of the operation." - }, - "produces": { - "description": "A list of MIME types the API can produce.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "consumes": { - "description": "A list of MIME types the API can consume.", - "allOf": [ - { - "$ref": "#/definitions/mediaTypeList" - } - ] - }, - "parameters": { - "$ref": "#/definitions/parametersList" - }, - "responses": { - "$ref": "#/definitions/responses" - }, - "schemes": { - "$ref": "#/definitions/schemesList" - }, - "deprecated": { - "type": "boolean", - "default": false - }, - "security": { - "$ref": "#/definitions/security" - } - } - }, - "pathItem": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "get": { - "$ref": "#/definitions/operation" - }, - "put": { - "$ref": "#/definitions/operation" - }, - "post": { - "$ref": "#/definitions/operation" - }, - "delete": { - "$ref": "#/definitions/operation" - }, - "options": { - "$ref": "#/definitions/operation" - }, - "head": { - "$ref": "#/definitions/operation" - }, - "patch": { - "$ref": "#/definitions/operation" - }, - "parameters": { - "$ref": "#/definitions/parametersList" - } - } - }, - "responses": { - "type": "object", - "description": "Response objects names can either be any valid HTTP status code or 'default'.", - "minProperties": 1, - "additionalProperties": false, - "patternProperties": { - "^([0-9]{3})$|^(default)$": { - "$ref": "#/definitions/responseValue" - }, - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "not": { - "type": "object", - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - } - }, - "responseValue": { - "oneOf": [ - { - "$ref": "#/definitions/response" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "response": { - "type": "object", - "required": [ - "description" - ], - "properties": { - "description": { - "type": "string" - }, - "schema": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "$ref": "#/definitions/fileSchema" - } - ] - }, - "headers": { - "$ref": "#/definitions/headers" - }, - "examples": { - "$ref": "#/definitions/examples" - } - }, - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "headers": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/header" - } - }, - "header": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "vendorExtension": { - "description": "Any property starting with x- is valid.", - "additionalProperties": true, - "additionalItems": true - }, - "bodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "schema" - ], - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "body" - ] - }, - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "schema": { - "$ref": "#/definitions/schema" - } - }, - "additionalProperties": false - }, - "headerParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "header" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "queryParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "query" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "formDataParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "required": { - "type": "boolean", - "description": "Determines whether or not this parameter is required or optional.", - "default": false - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "formData" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "allowEmptyValue": { - "type": "boolean", - "default": false, - "description": "allows sending a parameter by name only or with an empty value." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array", - "file" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormatWithMulti" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "pathParameterSubSchema": { - "additionalProperties": false, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "required" - ], - "properties": { - "required": { - "type": "boolean", - "enum": [ - true - ], - "description": "Determines whether or not this parameter is required or optional." - }, - "in": { - "type": "string", - "description": "Determines the location of the parameter.", - "enum": [ - "path" - ] - }, - "description": { - "type": "string", - "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." - }, - "name": { - "type": "string", - "description": "The name of the parameter." - }, - "type": { - "type": "string", - "enum": [ - "string", - "number", - "boolean", - "integer", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - } - }, - "nonBodyParameter": { - "type": "object", - "required": [ - "name", - "in", - "type" - ], - "oneOf": [ - { - "$ref": "#/definitions/headerParameterSubSchema" - }, - { - "$ref": "#/definitions/formDataParameterSubSchema" - }, - { - "$ref": "#/definitions/queryParameterSubSchema" - }, - { - "$ref": "#/definitions/pathParameterSubSchema" - } - ] - }, - "parameter": { - "oneOf": [ - { - "$ref": "#/definitions/bodyParameter" - }, - { - "$ref": "#/definitions/nonBodyParameter" - } - ] - }, - "schema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "properties": { - "$ref": { - "type": "string" - }, - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "maxProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minProperties": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "boolean" - } - ], - "default": {} - }, - "type": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/type" - }, - "items": { - "anyOf": [ - { - "$ref": "#/definitions/schema" - }, - { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - } - ], - "default": {} - }, - "allOf": { - "type": "array", - "minItems": 1, - "items": { - "$ref": "#/definitions/schema" - } - }, - "properties": { - "type": "object", - "additionalProperties": { - "$ref": "#/definitions/schema" - }, - "default": {} - }, - "discriminator": { - "type": "string" - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "xml": { - "$ref": "#/definitions/xml" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "fileSchema": { - "type": "object", - "description": "A deterministic version of a JSON Schema object.", - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - }, - "required": [ - "type" - ], - "properties": { - "format": { - "type": "string" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "required": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" - }, - "type": { - "type": "string", - "enum": [ - "file" - ] - }, - "readOnly": { - "type": "boolean", - "default": false - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - }, - "example": {} - }, - "additionalProperties": false - }, - "primitivesItems": { - "type": "object", - "additionalProperties": false, - "properties": { - "type": { - "type": "string", - "enum": [ - "string", - "number", - "integer", - "boolean", - "array" - ] - }, - "format": { - "type": "string" - }, - "items": { - "$ref": "#/definitions/primitivesItems" - }, - "collectionFormat": { - "$ref": "#/definitions/collectionFormat" - }, - "default": { - "$ref": "#/definitions/default" - }, - "maximum": { - "$ref": "#/definitions/maximum" - }, - "exclusiveMaximum": { - "$ref": "#/definitions/exclusiveMaximum" - }, - "minimum": { - "$ref": "#/definitions/minimum" - }, - "exclusiveMinimum": { - "$ref": "#/definitions/exclusiveMinimum" - }, - "maxLength": { - "$ref": "#/definitions/maxLength" - }, - "minLength": { - "$ref": "#/definitions/minLength" - }, - "pattern": { - "$ref": "#/definitions/pattern" - }, - "maxItems": { - "$ref": "#/definitions/maxItems" - }, - "minItems": { - "$ref": "#/definitions/minItems" - }, - "uniqueItems": { - "$ref": "#/definitions/uniqueItems" - }, - "enum": { - "$ref": "#/definitions/enum" - }, - "multipleOf": { - "$ref": "#/definitions/multipleOf" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "security": { - "type": "array", - "items": { - "$ref": "#/definitions/securityRequirement" - }, - "uniqueItems": true - }, - "securityRequirement": { - "type": "object", - "additionalProperties": { - "type": "array", - "items": { - "type": "string" - }, - "uniqueItems": true - } - }, - "xml": { - "type": "object", - "additionalProperties": false, - "properties": { - "name": { - "type": "string" - }, - "namespace": { - "type": "string" - }, - "prefix": { - "type": "string" - }, - "attribute": { - "type": "boolean", - "default": false - }, - "wrapped": { - "type": "boolean", - "default": false - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "tag": { - "type": "object", - "additionalProperties": false, - "required": [ - "name" - ], - "properties": { - "name": { - "type": "string" - }, - "description": { - "type": "string" - }, - "externalDocs": { - "$ref": "#/definitions/externalDocs" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "securityDefinitions": { - "type": "object", - "additionalProperties": { - "oneOf": [ - { - "$ref": "#/definitions/basicAuthenticationSecurity" - }, - { - "$ref": "#/definitions/apiKeySecurity" - }, - { - "$ref": "#/definitions/oauth2ImplicitSecurity" - }, - { - "$ref": "#/definitions/oauth2PasswordSecurity" - }, - { - "$ref": "#/definitions/oauth2ApplicationSecurity" - }, - { - "$ref": "#/definitions/oauth2AccessCodeSecurity" - } - ] - } - }, - "basicAuthenticationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "basic" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "apiKeySecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "name", - "in" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "apiKey" - ] - }, - "name": { - "type": "string" - }, - "in": { - "type": "string", - "enum": [ - "header", - "query" - ] - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ImplicitSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "implicit" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2PasswordSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "password" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2ApplicationSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "application" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2AccessCodeSecurity": { - "type": "object", - "additionalProperties": false, - "required": [ - "type", - "flow", - "authorizationUrl", - "tokenUrl" - ], - "properties": { - "type": { - "type": "string", - "enum": [ - "oauth2" - ] - }, - "flow": { - "type": "string", - "enum": [ - "accessCode" - ] - }, - "scopes": { - "$ref": "#/definitions/oauth2Scopes" - }, - "authorizationUrl": { - "type": "string", - "format": "uri" - }, - "tokenUrl": { - "type": "string", - "format": "uri" - }, - "description": { - "type": "string" - } - }, - "patternProperties": { - "^x-": { - "$ref": "#/definitions/vendorExtension" - } - } - }, - "oauth2Scopes": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "mediaTypeList": { - "type": "array", - "items": { - "$ref": "#/definitions/mimeType" - }, - "uniqueItems": true - }, - "parametersList": { - "type": "array", - "description": "The parameters needed to send a valid API call.", - "additionalItems": false, - "items": { - "oneOf": [ - { - "$ref": "#/definitions/parameter" - }, - { - "$ref": "#/definitions/jsonReference" - } - ] - }, - "uniqueItems": true - }, - "schemesList": { - "type": "array", - "description": "The transfer protocol of the API.", - "items": { - "type": "string", - "enum": [ - "http", - "https", - "ws", - "wss" - ] - }, - "uniqueItems": true - }, - "collectionFormat": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes" - ], - "default": "csv" - }, - "collectionFormatWithMulti": { - "type": "string", - "enum": [ - "csv", - "ssv", - "tsv", - "pipes", - "multi" - ], - "default": "csv" - }, - "title": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/title" - }, - "description": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/description" - }, - "default": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/default" - }, - "multipleOf": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" - }, - "maximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" - }, - "exclusiveMaximum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" - }, - "minimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" - }, - "exclusiveMinimum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" - }, - "maxLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minLength": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "pattern": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" - }, - "maxItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" - }, - "minItems": { - "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" - }, - "uniqueItems": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" - }, - "enum": { - "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" - }, - "jsonReference": { - "type": "object", - "required": [ - "$ref" - ], - "additionalProperties": false, - "properties": { - "$ref": { - "type": "string" - }, - "description": { - "type": "string" - } - } - } - } -} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md deleted file mode 100644 index ee9783d..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Compiler support code - -This directory contains compiler support code used by Gnostic and Gnostic -extensions. diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go deleted file mode 100644 index 1250a6c..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Context contains state of the compiler as it traverses a document. -type Context struct { - Parent *Context - Name string - ExtensionHandlers *[]ExtensionHandler -} - -// NewContextWithExtensions returns a new object representing the compiler state -func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { - return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} -} - -// NewContext returns a new object representing the compiler state -func NewContext(name string, parent *Context) *Context { - if parent != nil { - return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} - } - return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} -} - -// Description returns a text description of the compiler state -func (context *Context) Description() string { - if context.Parent != nil { - return context.Parent.Description() + "." + context.Name - } - return context.Name -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go deleted file mode 100644 index 7db2399..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/error.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -// Error represents compiler errors and their location in the document. -type Error struct { - Context *Context - Message string -} - -// NewError creates an Error. -func NewError(context *Context, message string) *Error { - return &Error{Context: context, Message: message} -} - -// Error returns the string value of an Error. -func (err *Error) Error() string { - if err.Context == nil { - return "ERROR " + err.Message - } - return "ERROR " + err.Context.Description() + " " + err.Message -} - -// ErrorGroup is a container for groups of Error values. -type ErrorGroup struct { - Errors []error -} - -// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. -func NewErrorGroupOrNil(errors []error) error { - if len(errors) == 0 { - return nil - } else if len(errors) == 1 { - return errors[0] - } else { - return &ErrorGroup{Errors: errors} - } -} - -func (group *ErrorGroup) Error() string { - result := "" - for i, err := range group.Errors { - if i > 0 { - result += "\n" - } - result += err.Error() - } - return result -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extensions.go b/vendor/github.com/googleapis/gnostic/compiler/extensions.go deleted file mode 100644 index 20848a0..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/extensions.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "bytes" - "fmt" - "os/exec" - "strings" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes/any" - extensions "github.com/googleapis/gnostic/extensions" - yaml "gopkg.in/yaml.v3" -) - -// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. -type ExtensionHandler struct { - Name string -} - -// CallExtension calls a binary extension handler. -func CallExtension(context *Context, in *yaml.Node, extensionName string) (handled bool, response *any.Any, err error) { - if context == nil || context.ExtensionHandlers == nil { - return false, nil, nil - } - handled = false - for _, handler := range *(context.ExtensionHandlers) { - response, err = handler.handle(in, extensionName) - if response == nil { - continue - } else { - handled = true - break - } - } - return handled, response, err -} - -func (extensionHandlers *ExtensionHandler) handle(in *yaml.Node, extensionName string) (*any.Any, error) { - if extensionHandlers.Name != "" { - yamlData, _ := yaml.Marshal(in) - request := &extensions.ExtensionHandlerRequest{ - CompilerVersion: &extensions.Version{ - Major: 0, - Minor: 1, - Patch: 0, - }, - Wrapper: &extensions.Wrapper{ - Version: "unknown", // TODO: set this to the type/version of spec being parsed. - Yaml: string(yamlData), - ExtensionName: extensionName, - }, - } - requestBytes, _ := proto.Marshal(request) - cmd := exec.Command(extensionHandlers.Name) - cmd.Stdin = bytes.NewReader(requestBytes) - output, err := cmd.Output() - if err != nil { - return nil, err - } - response := &extensions.ExtensionHandlerResponse{} - err = proto.Unmarshal(output, response) - if err != nil || !response.Handled { - return nil, err - } - if len(response.Errors) != 0 { - return nil, fmt.Errorf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Errors, ",")) - } - return response.Value, nil - } - return nil, nil -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go deleted file mode 100644 index a580f40..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/helpers.go +++ /dev/null @@ -1,385 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "regexp" - "sort" - "strconv" - - "github.com/googleapis/gnostic/jsonschema" - "gopkg.in/yaml.v3" -) - -// compiler helper functions, usually called from generated code - -// UnpackMap gets a *yaml.Node if possible. -func UnpackMap(in *yaml.Node) (*yaml.Node, bool) { - if in == nil { - return nil, false - } - return in, true -} - -// SortedKeysForMap returns the sorted keys of a yamlv2.MapSlice. -func SortedKeysForMap(m *yaml.Node) []string { - keys := make([]string, 0) - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - keys = append(keys, m.Content[i].Value) - } - } - sort.Strings(keys) - return keys -} - -// MapHasKey returns true if a yamlv2.MapSlice contains a specified key. -func MapHasKey(m *yaml.Node, key string) bool { - if m == nil { - return false - } - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - itemKey := m.Content[i].Value - if key == itemKey { - return true - } - } - } - return false -} - -// MapValueForKey gets the value of a map value for a specified key. -func MapValueForKey(m *yaml.Node, key string) *yaml.Node { - if m == nil { - return nil - } - if m.Kind == yaml.MappingNode { - for i := 0; i < len(m.Content); i += 2 { - itemKey := m.Content[i].Value - if key == itemKey { - return m.Content[i+1] - } - } - } - return nil -} - -// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. -func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { - stringArray := make([]string, 0) - for _, item := range interfaceArray { - v, ok := item.(string) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// SequenceNodeForNode returns a node if it is a SequenceNode. -func SequenceNodeForNode(node *yaml.Node) (*yaml.Node, bool) { - if node.Kind != yaml.SequenceNode { - return nil, false - } - return node, true -} - -// BoolForScalarNode returns the bool value of a node. -func BoolForScalarNode(node *yaml.Node) (bool, bool) { - if node == nil { - return false, false - } - if node.Kind == yaml.DocumentNode { - return BoolForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return false, false - } - if node.Tag != "!!bool" { - return false, false - } - v, err := strconv.ParseBool(node.Value) - if err != nil { - return false, false - } - return v, true -} - -// IntForScalarNode returns the integer value of a node. -func IntForScalarNode(node *yaml.Node) (int64, bool) { - if node == nil { - return 0, false - } - if node.Kind == yaml.DocumentNode { - return IntForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return 0, false - } - if node.Tag != "!!int" { - return 0, false - } - v, err := strconv.ParseInt(node.Value, 10, 64) - if err != nil { - return 0, false - } - return v, true -} - -// FloatForScalarNode returns the float value of a node. -func FloatForScalarNode(node *yaml.Node) (float64, bool) { - if node == nil { - return 0.0, false - } - if node.Kind == yaml.DocumentNode { - return FloatForScalarNode(node.Content[0]) - } - if node.Kind != yaml.ScalarNode { - return 0.0, false - } - if (node.Tag != "!!int") && (node.Tag != "!!float") { - return 0.0, false - } - v, err := strconv.ParseFloat(node.Value, 64) - if err != nil { - return 0.0, false - } - return v, true -} - -// StringForScalarNode returns the string value of a node. -func StringForScalarNode(node *yaml.Node) (string, bool) { - if node == nil { - return "", false - } - if node.Kind == yaml.DocumentNode { - return StringForScalarNode(node.Content[0]) - } - switch node.Kind { - case yaml.ScalarNode: - switch node.Tag { - case "!!int": - return node.Value, true - case "!!str": - return node.Value, true - case "!!null": - return "", true - default: - return "", false - } - default: - return "", false - } -} - -// StringArrayForSequenceNode converts a sequence node to an array of strings, if possible. -func StringArrayForSequenceNode(node *yaml.Node) []string { - stringArray := make([]string, 0) - for _, item := range node.Content { - v, ok := StringForScalarNode(item) - if ok { - stringArray = append(stringArray, v) - } - } - return stringArray -} - -// MissingKeysInMap identifies which keys from a list of required keys are not in a map. -func MissingKeysInMap(m *yaml.Node, requiredKeys []string) []string { - missingKeys := make([]string, 0) - for _, k := range requiredKeys { - if !MapHasKey(m, k) { - missingKeys = append(missingKeys, k) - } - } - return missingKeys -} - -// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. -func InvalidKeysInMap(m *yaml.Node, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { - invalidKeys := make([]string, 0) - if m == nil || m.Kind != yaml.MappingNode { - return invalidKeys - } - for i := 0; i < len(m.Content); i += 2 { - key := m.Content[i].Value - found := false - // does the key match an allowed key? - for _, allowedKey := range allowedKeys { - if key == allowedKey { - found = true - break - } - } - if !found { - // does the key match an allowed pattern? - for _, allowedPattern := range allowedPatterns { - if allowedPattern.MatchString(key) { - found = true - break - } - } - if !found { - invalidKeys = append(invalidKeys, key) - } - } - } - return invalidKeys -} - -// NewMappingNode creates a new Mapping node. -func NewMappingNode() *yaml.Node { - return &yaml.Node{ - Kind: yaml.MappingNode, - Content: make([]*yaml.Node, 0), - } -} - -// NewSequenceNode creates a new Sequence node. -func NewSequenceNode() *yaml.Node { - node := &yaml.Node{ - Kind: yaml.SequenceNode, - Content: make([]*yaml.Node, 0), - } - return node -} - -// NewScalarNodeForString creates a new node to hold a string. -func NewScalarNodeForString(s string) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: s, - } -} - -// NewSequenceNodeForStringArray creates a new node to hold an array of strings. -func NewSequenceNodeForStringArray(strings []string) *yaml.Node { - node := &yaml.Node{ - Kind: yaml.SequenceNode, - Content: make([]*yaml.Node, 0), - } - for _, s := range strings { - node.Content = append(node.Content, NewScalarNodeForString(s)) - } - return node -} - -// NewScalarNodeForBool creates a new node to hold a bool. -func NewScalarNodeForBool(b bool) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!bool", - Value: fmt.Sprintf("%t", b), - } -} - -// NewScalarNodeForFloat creates a new node to hold a float. -func NewScalarNodeForFloat(f float64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!float", - Value: fmt.Sprintf("%g", f), - } -} - -// NewScalarNodeForInt creates a new node to hold an integer. -func NewScalarNodeForInt(i int64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: fmt.Sprintf("%d", i), - } -} - -// PluralProperties returns the string "properties" pluralized. -func PluralProperties(count int) string { - if count == 1 { - return "property" - } - return "properties" -} - -// StringArrayContainsValue returns true if a string array contains a specified value. -func StringArrayContainsValue(array []string, value string) bool { - for _, item := range array { - if item == value { - return true - } - } - return false -} - -// StringArrayContainsValues returns true if a string array contains all of a list of specified values. -func StringArrayContainsValues(array []string, values []string) bool { - for _, value := range values { - if !StringArrayContainsValue(array, value) { - return false - } - } - return true -} - -// StringValue returns the string value of an item. -func StringValue(item interface{}) (value string, ok bool) { - value, ok = item.(string) - if ok { - return value, ok - } - intValue, ok := item.(int) - if ok { - return strconv.Itoa(intValue), true - } - return "", false -} - -// Description returns a human-readable represention of an item. -func Description(item interface{}) string { - value, ok := item.(*yaml.Node) - if ok { - return jsonschema.Render(value) - } - return fmt.Sprintf("%+v", item) -} - -// Display returns a description of a node for use in error messages. -func Display(node *yaml.Node) string { - switch node.Kind { - case yaml.ScalarNode: - switch node.Tag { - case "!!str": - return fmt.Sprintf("%s (string)", node.Value) - } - } - return fmt.Sprintf("%+v (%T)", node, node) -} - -// Marshal creates a yaml version of a structure in our preferred style -func Marshal(in *yaml.Node) []byte { - clearStyle(in) - //bytes, _ := yaml.Marshal(&yaml.Node{Kind: yaml.DocumentNode, Content: []*yaml.Node{in}}) - bytes, _ := yaml.Marshal(in) - - return bytes -} - -func clearStyle(node *yaml.Node) { - node.Style = 0 - for _, c := range node.Content { - clearStyle(c) - } -} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go deleted file mode 100644 index ce9fcc4..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/main.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package compiler provides support functions to generated compiler code. -package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go deleted file mode 100644 index be0e8b4..0000000 --- a/vendor/github.com/googleapis/gnostic/compiler/reader.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package compiler - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/url" - "path/filepath" - "strings" - "sync" - - yaml "gopkg.in/yaml.v3" -) - -var verboseReader = false - -var fileCache map[string][]byte -var infoCache map[string]*yaml.Node - -var fileCacheEnable = true -var infoCacheEnable = true - -// These locks are used to synchronize accesses to the fileCache and infoCache -// maps (above). They are global state and can throw thread-related errors -// when modified from separate goroutines. The general strategy is to protect -// all public functions in this file with mutex Lock() calls. As a result, to -// avoid deadlock, these public functions should not call other public -// functions, so some public functions have private equivalents. -// In the future, we might consider replacing the maps with sync.Map and -// eliminating these mutexes. -var fileCacheMutex sync.Mutex -var infoCacheMutex sync.Mutex - -func initializeFileCache() { - if fileCache == nil { - fileCache = make(map[string][]byte, 0) - } -} - -func initializeInfoCache() { - if infoCache == nil { - infoCache = make(map[string]*yaml.Node, 0) - } -} - -// EnableFileCache turns on file caching. -func EnableFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCacheEnable = true -} - -// EnableInfoCache turns on parsed info caching. -func EnableInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCacheEnable = true -} - -// DisableFileCache turns off file caching. -func DisableFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCacheEnable = false -} - -// DisableInfoCache turns off parsed info caching. -func DisableInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCacheEnable = false -} - -// RemoveFromFileCache removes an entry from the file cache. -func RemoveFromFileCache(fileurl string) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - if !fileCacheEnable { - return - } - initializeFileCache() - delete(fileCache, fileurl) -} - -// RemoveFromInfoCache removes an entry from the info cache. -func RemoveFromInfoCache(filename string) { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - if !infoCacheEnable { - return - } - initializeInfoCache() - delete(infoCache, filename) -} - -// GetInfoCache returns the info cache map. -func GetInfoCache() map[string]*yaml.Node { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - if infoCache == nil { - initializeInfoCache() - } - return infoCache -} - -// ClearFileCache clears the file cache. -func ClearFileCache() { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - fileCache = make(map[string][]byte, 0) -} - -// ClearInfoCache clears the info cache. -func ClearInfoCache() { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - infoCache = make(map[string]*yaml.Node) -} - -// ClearCaches clears all caches. -func ClearCaches() { - ClearFileCache() - ClearInfoCache() -} - -// FetchFile gets a specified file from the local filesystem or a remote location. -func FetchFile(fileurl string) ([]byte, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - return fetchFile(fileurl) -} - -func fetchFile(fileurl string) ([]byte, error) { - var bytes []byte - initializeFileCache() - if fileCacheEnable { - bytes, ok := fileCache[fileurl] - if ok { - if verboseReader { - log.Printf("Cache hit %s", fileurl) - } - return bytes, nil - } - if verboseReader { - log.Printf("Fetching %s", fileurl) - } - } - response, err := http.Get(fileurl) - if err != nil { - return nil, err - } - defer response.Body.Close() - if response.StatusCode != 200 { - return nil, fmt.Errorf("Error downloading %s: %s", fileurl, response.Status) - } - bytes, err = ioutil.ReadAll(response.Body) - if fileCacheEnable && err == nil { - fileCache[fileurl] = bytes - } - return bytes, err -} - -// ReadBytesForFile reads the bytes of a file. -func ReadBytesForFile(filename string) ([]byte, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - return readBytesForFile(filename) -} - -func readBytesForFile(filename string) ([]byte, error) { - // is the filename a url? - fileurl, _ := url.Parse(filename) - if fileurl.Scheme != "" { - // yes, fetch it - bytes, err := fetchFile(filename) - if err != nil { - return nil, err - } - return bytes, nil - } - // no, it's a local filename - bytes, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - return bytes, nil -} - -// ReadInfoFromBytes unmarshals a file as a *yaml.Node. -func ReadInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - return readInfoFromBytes(filename, bytes) -} - -func readInfoFromBytes(filename string, bytes []byte) (*yaml.Node, error) { - initializeInfoCache() - if infoCacheEnable { - cachedInfo, ok := infoCache[filename] - if ok { - if verboseReader { - log.Printf("Cache hit info for file %s", filename) - } - return cachedInfo, nil - } - if verboseReader { - log.Printf("Reading info for file %s", filename) - } - } - var info yaml.Node - err := yaml.Unmarshal(bytes, &info) - if err != nil { - return nil, err - } - if infoCacheEnable && len(filename) > 0 { - infoCache[filename] = &info - } - return &info, nil -} - -// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. -func ReadInfoForRef(basefile string, ref string) (*yaml.Node, error) { - fileCacheMutex.Lock() - defer fileCacheMutex.Unlock() - infoCacheMutex.Lock() - defer infoCacheMutex.Unlock() - initializeInfoCache() - if infoCacheEnable { - info, ok := infoCache[ref] - if ok { - if verboseReader { - log.Printf("Cache hit for ref %s#%s", basefile, ref) - } - return info, nil - } - if verboseReader { - log.Printf("Reading info for ref %s#%s", basefile, ref) - } - } - basedir, _ := filepath.Split(basefile) - parts := strings.Split(ref, "#") - var filename string - if parts[0] != "" { - filename = parts[0] - if _, err := url.ParseRequestURI(parts[0]); err != nil { - // It is not an URL, so the file is local - filename = basedir + parts[0] - } - } else { - filename = basefile - } - bytes, err := readBytesForFile(filename) - if err != nil { - return nil, err - } - info, err := readInfoFromBytes(filename, bytes) - if info != nil && info.Kind == yaml.DocumentNode { - info = info.Content[0] - } - if err != nil { - log.Printf("File error: %v\n", err) - } else { - if info == nil { - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - if len(parts) > 1 { - path := strings.Split(parts[1], "/") - for i, key := range path { - if i > 0 { - m := info - if true { - found := false - for i := 0; i < len(m.Content); i += 2 { - if m.Content[i].Value == key { - info = m.Content[i+1] - found = true - } - } - if !found { - infoCache[ref] = nil - return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) - } - } - } - } - } - } - if infoCacheEnable { - infoCache[ref] = info - } - return info, nil -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md deleted file mode 100644 index 4b5d63e..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Extensions - -**Extension Support is experimental.** - -This directory contains support code for building Gnostic extensio handlers and -associated examples. - -Extension handlers can be used to compile vendor or specification extensions -into protocol buffer structures. - -Like plugins, extension handlers are built as separate executables. Extension -bodies are written to extension handlers as serialized -ExtensionHandlerRequests. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go deleted file mode 100644 index 4198fa1..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go +++ /dev/null @@ -1,465 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.23.0 -// protoc v3.12.3 -// source: extensions/extension.proto - -package gnostic_extension_v1 - -import ( - proto "github.com/golang/protobuf/proto" - any "github.com/golang/protobuf/ptypes/any" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -// This is a compile-time assertion that a sufficiently up-to-date version -// of the legacy proto package is being used. -const _ = proto.ProtoPackageIsVersion4 - -// The version number of Gnostic. -type Version struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Major int32 `protobuf:"varint,1,opt,name=major,proto3" json:"major,omitempty"` - Minor int32 `protobuf:"varint,2,opt,name=minor,proto3" json:"minor,omitempty"` - Patch int32 `protobuf:"varint,3,opt,name=patch,proto3" json:"patch,omitempty"` - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - Suffix string `protobuf:"bytes,4,opt,name=suffix,proto3" json:"suffix,omitempty"` -} - -func (x *Version) Reset() { - *x = Version{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Version) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Version) ProtoMessage() {} - -func (x *Version) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Version.ProtoReflect.Descriptor instead. -func (*Version) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{0} -} - -func (x *Version) GetMajor() int32 { - if x != nil { - return x.Major - } - return 0 -} - -func (x *Version) GetMinor() int32 { - if x != nil { - return x.Minor - } - return 0 -} - -func (x *Version) GetPatch() int32 { - if x != nil { - return x.Patch - } - return 0 -} - -func (x *Version) GetSuffix() string { - if x != nil { - return x.Suffix - } - return "" -} - -// An encoded Request is written to the ExtensionHandler's stdin. -type ExtensionHandlerRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The extension to process. - Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper,proto3" json:"wrapper,omitempty"` - // The version number of Gnostic. - CompilerVersion *Version `protobuf:"bytes,2,opt,name=compiler_version,json=compilerVersion,proto3" json:"compiler_version,omitempty"` -} - -func (x *ExtensionHandlerRequest) Reset() { - *x = ExtensionHandlerRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionHandlerRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionHandlerRequest) ProtoMessage() {} - -func (x *ExtensionHandlerRequest) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionHandlerRequest.ProtoReflect.Descriptor instead. -func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{1} -} - -func (x *ExtensionHandlerRequest) GetWrapper() *Wrapper { - if x != nil { - return x.Wrapper - } - return nil -} - -func (x *ExtensionHandlerRequest) GetCompilerVersion() *Version { - if x != nil { - return x.CompilerVersion - } - return nil -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -type ExtensionHandlerResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // true if the extension is handled by the extension handler; false otherwise - Handled bool `protobuf:"varint,1,opt,name=handled,proto3" json:"handled,omitempty"` - // Error message(s). If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - Errors []string `protobuf:"bytes,2,rep,name=errors,proto3" json:"errors,omitempty"` - // text output - Value *any.Any `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *ExtensionHandlerResponse) Reset() { - *x = ExtensionHandlerResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ExtensionHandlerResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ExtensionHandlerResponse) ProtoMessage() {} - -func (x *ExtensionHandlerResponse) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ExtensionHandlerResponse.ProtoReflect.Descriptor instead. -func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{2} -} - -func (x *ExtensionHandlerResponse) GetHandled() bool { - if x != nil { - return x.Handled - } - return false -} - -func (x *ExtensionHandlerResponse) GetErrors() []string { - if x != nil { - return x.Errors - } - return nil -} - -func (x *ExtensionHandlerResponse) GetValue() *any.Any { - if x != nil { - return x.Value - } - return nil -} - -type Wrapper struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // version of the OpenAPI specification in which this extension was written. - Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"` - // Name of the extension. - ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName,proto3" json:"extension_name,omitempty"` - // YAML-formatted extension value. - Yaml string `protobuf:"bytes,3,opt,name=yaml,proto3" json:"yaml,omitempty"` -} - -func (x *Wrapper) Reset() { - *x = Wrapper{} - if protoimpl.UnsafeEnabled { - mi := &file_extensions_extension_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Wrapper) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Wrapper) ProtoMessage() {} - -func (x *Wrapper) ProtoReflect() protoreflect.Message { - mi := &file_extensions_extension_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Wrapper.ProtoReflect.Descriptor instead. -func (*Wrapper) Descriptor() ([]byte, []int) { - return file_extensions_extension_proto_rawDescGZIP(), []int{3} -} - -func (x *Wrapper) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *Wrapper) GetExtensionName() string { - if x != nil { - return x.ExtensionName - } - return "" -} - -func (x *Wrapper) GetYaml() string { - if x != nil { - return x.Yaml - } - return "" -} - -var File_extensions_extension_proto protoreflect.FileDescriptor - -var file_extensions_extension_proto_rawDesc = []byte{ - 0x0a, 0x1a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x65, 0x78, 0x74, - 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x14, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, - 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x63, 0x0a, - 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x6d, 0x61, 0x6a, 0x6f, - 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, 0x61, 0x6a, 0x6f, 0x72, 0x12, 0x14, - 0x0a, 0x05, 0x6d, 0x69, 0x6e, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6d, - 0x69, 0x6e, 0x6f, 0x72, 0x12, 0x14, 0x0a, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x05, 0x52, 0x05, 0x70, 0x61, 0x74, 0x63, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x75, - 0x66, 0x66, 0x69, 0x78, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x75, 0x66, 0x66, - 0x69, 0x78, 0x22, 0x9c, 0x01, 0x0a, 0x17, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, - 0x48, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x37, - 0x0a, 0x07, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, - 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x57, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x52, 0x07, - 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x48, 0x0a, 0x10, 0x63, 0x6f, 0x6d, 0x70, 0x69, - 0x6c, 0x65, 0x72, 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x65, 0x78, 0x74, 0x65, - 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x0f, 0x63, 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x72, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x22, 0x78, 0x0a, 0x18, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x48, 0x61, - 0x6e, 0x64, 0x6c, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, - 0x68, 0x61, 0x6e, 0x64, 0x6c, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x12, - 0x2a, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x5e, 0x0a, 0x07, 0x57, - 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x25, 0x0a, 0x0e, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, - 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x79, 0x61, 0x6d, 0x6c, 0x42, 0x4b, 0x0a, 0x0e, 0x6f, - 0x72, 0x67, 0x2e, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x2e, 0x76, 0x31, 0x42, 0x10, 0x47, - 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x45, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x50, - 0x01, 0x5a, 0x1f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3b, 0x67, 0x6e, - 0x6f, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x5f, - 0x76, 0x31, 0xa2, 0x02, 0x03, 0x47, 0x4e, 0x58, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_extensions_extension_proto_rawDescOnce sync.Once - file_extensions_extension_proto_rawDescData = file_extensions_extension_proto_rawDesc -) - -func file_extensions_extension_proto_rawDescGZIP() []byte { - file_extensions_extension_proto_rawDescOnce.Do(func() { - file_extensions_extension_proto_rawDescData = protoimpl.X.CompressGZIP(file_extensions_extension_proto_rawDescData) - }) - return file_extensions_extension_proto_rawDescData -} - -var file_extensions_extension_proto_msgTypes = make([]protoimpl.MessageInfo, 4) -var file_extensions_extension_proto_goTypes = []interface{}{ - (*Version)(nil), // 0: gnostic.extension.v1.Version - (*ExtensionHandlerRequest)(nil), // 1: gnostic.extension.v1.ExtensionHandlerRequest - (*ExtensionHandlerResponse)(nil), // 2: gnostic.extension.v1.ExtensionHandlerResponse - (*Wrapper)(nil), // 3: gnostic.extension.v1.Wrapper - (*any.Any)(nil), // 4: google.protobuf.Any -} -var file_extensions_extension_proto_depIdxs = []int32{ - 3, // 0: gnostic.extension.v1.ExtensionHandlerRequest.wrapper:type_name -> gnostic.extension.v1.Wrapper - 0, // 1: gnostic.extension.v1.ExtensionHandlerRequest.compiler_version:type_name -> gnostic.extension.v1.Version - 4, // 2: gnostic.extension.v1.ExtensionHandlerResponse.value:type_name -> google.protobuf.Any - 3, // [3:3] is the sub-list for method output_type - 3, // [3:3] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_extensions_extension_proto_init() } -func file_extensions_extension_proto_init() { - if File_extensions_extension_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_extensions_extension_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Version); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ExtensionHandlerResponse); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_extensions_extension_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Wrapper); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_extensions_extension_proto_rawDesc, - NumEnums: 0, - NumMessages: 4, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_extensions_extension_proto_goTypes, - DependencyIndexes: file_extensions_extension_proto_depIdxs, - MessageInfos: file_extensions_extension_proto_msgTypes, - }.Build() - File_extensions_extension_proto = out.File - file_extensions_extension_proto_rawDesc = nil - file_extensions_extension_proto_goTypes = nil - file_extensions_extension_proto_depIdxs = nil -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto deleted file mode 100644 index 8ac1faf..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extension.proto +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package gnostic.extension.v1; - -import "google/protobuf/any.proto"; - -// This option lets the proto compiler generate Java code inside the package -// name (see below) instead of inside an outer class. It creates a simpler -// developer experience by reducing one-level of name nesting and be -// consistent with most programming languages that don't support outer classes. -option java_multiple_files = true; - -// The Java outer classname should be the filename in UpperCamelCase. This -// class is only used to hold proto descriptor, so developers don't need to -// work with it directly. -option java_outer_classname = "GnosticExtension"; - -// The Java package name must be proto package name with proper prefix. -option java_package = "org.gnostic.v1"; - -// A reasonable prefix for the Objective-C symbols generated from the package. -// It should at a minimum be 3 characters long, all uppercase, and convention -// is to use an abbreviation of the package name. Something short, but -// hopefully unique enough to not conflict with things that may come along in -// the future. 'GPB' is reserved for the protocol buffer implementation itself. -// -option objc_class_prefix = "GNX"; // "Gnostic Extension" - -// The Go package name. -option go_package = "extensions;gnostic_extension_v1"; - -// The version number of Gnostic. -message Version { - int32 major = 1; - int32 minor = 2; - int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - string suffix = 4; -} - -// An encoded Request is written to the ExtensionHandler's stdin. -message ExtensionHandlerRequest { - - // The extension to process. - Wrapper wrapper = 1; - - // The version number of Gnostic. - Version compiler_version = 2; -} - -// The extensions writes an encoded ExtensionHandlerResponse to stdout. -message ExtensionHandlerResponse { - - // true if the extension is handled by the extension handler; false otherwise - bool handled = 1; - - // Error message(s). If non-empty, the extension handling failed. - // The extension handler process should exit with status code zero - // even if it reports an error in this way. - // - // This should be used to indicate errors which prevent the extension from - // operating as intended. Errors which indicate a problem in gnostic - // itself -- such as the input Document being unparseable -- should be - // reported by writing a message to stderr and exiting with a non-zero - // status code. - repeated string errors = 2; - - // text output - google.protobuf.Any value = 3; -} - -message Wrapper { - // version of the OpenAPI specification in which this extension was written. - string version = 1; - - // Name of the extension. - string extension_name = 2; - - // YAML-formatted extension value. - string yaml = 3; -} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go deleted file mode 100644 index ec8afd0..0000000 --- a/vendor/github.com/googleapis/gnostic/extensions/extensions.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package gnostic_extension_v1 - -import ( - "io/ioutil" - "log" - "os" - - "github.com/golang/protobuf/proto" - "github.com/golang/protobuf/ptypes" -) - -type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) - -// Main implements the main program of an extension handler. -func Main(handler extensionHandler) { - // unpack the request - data, err := ioutil.ReadAll(os.Stdin) - if err != nil { - log.Println("File error:", err.Error()) - os.Exit(1) - } - if len(data) == 0 { - log.Println("No input data.") - os.Exit(1) - } - request := &ExtensionHandlerRequest{} - err = proto.Unmarshal(data, request) - if err != nil { - log.Println("Input error:", err.Error()) - os.Exit(1) - } - // call the handler - handled, output, err := handler(request.Wrapper.ExtensionName, request.Wrapper.Yaml) - // respond with the output of the handler - response := &ExtensionHandlerResponse{ - Handled: false, // default assumption - Errors: make([]string, 0), - } - if err != nil { - response.Errors = append(response.Errors, err.Error()) - } else if handled { - response.Handled = true - response.Value, err = ptypes.MarshalAny(output) - if err != nil { - response.Errors = append(response.Errors, err.Error()) - } - } - responseBytes, _ := proto.Marshal(response) - os.Stdout.Write(responseBytes) -} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/README.md b/vendor/github.com/googleapis/gnostic/jsonschema/README.md deleted file mode 100644 index 6793c51..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# jsonschema - -This directory contains code for reading, writing, and manipulating JSON -schemas. diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/base.go b/vendor/github.com/googleapis/gnostic/jsonschema/base.go deleted file mode 100644 index 0af8b14..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/base.go +++ /dev/null @@ -1,84 +0,0 @@ - -// THIS FILE IS AUTOMATICALLY GENERATED. - -package jsonschema - -import ( - "encoding/base64" -) - -func baseSchemaBytes() ([]byte, error){ - return base64.StdEncoding.DecodeString( -`ewogICAgImlkIjogImh0dHA6Ly9qc29uLXNjaGVtYS5vcmcvZHJhZnQtMDQvc2NoZW1hIyIsCiAgICAi -JHNjaGVtYSI6ICJodHRwOi8vanNvbi1zY2hlbWEub3JnL2RyYWZ0LTA0L3NjaGVtYSMiLAogICAgImRl -c2NyaXB0aW9uIjogIkNvcmUgc2NoZW1hIG1ldGEtc2NoZW1hIiwKICAgICJkZWZpbml0aW9ucyI6IHsK -ICAgICAgICAic2NoZW1hQXJyYXkiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImFycmF5IiwKICAgICAg -ICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjIiB9CiAg -ICAgICAgfSwKICAgICAgICAicG9zaXRpdmVJbnRlZ2VyIjogewogICAgICAgICAgICAidHlwZSI6ICJp -bnRlZ2VyIiwKICAgICAgICAgICAgIm1pbmltdW0iOiAwCiAgICAgICAgfSwKICAgICAgICAicG9zaXRp -dmVJbnRlZ2VyRGVmYXVsdDAiOiB7CiAgICAgICAgICAgICJhbGxPZiI6IFsgeyAiJHJlZiI6ICIjL2Rl -ZmluaXRpb25zL3Bvc2l0aXZlSW50ZWdlciIgfSwgeyAiZGVmYXVsdCI6IDAgfSBdCiAgICAgICAgfSwK -ICAgICAgICAic2ltcGxlVHlwZXMiOiB7CiAgICAgICAgICAgICJlbnVtIjogWyAiYXJyYXkiLCAiYm9v -bGVhbiIsICJpbnRlZ2VyIiwgIm51bGwiLCAibnVtYmVyIiwgIm9iamVjdCIsICJzdHJpbmciIF0KICAg -ICAgICB9LAogICAgICAgICJzdHJpbmdBcnJheSI6IHsKICAgICAgICAgICAgInR5cGUiOiAiYXJyYXki -LAogICAgICAgICAgICAiaXRlbXMiOiB7ICJ0eXBlIjogInN0cmluZyIgfSwKICAgICAgICAgICAgIm1p -bkl0ZW1zIjogMSwKICAgICAgICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0KICAgIH0s -CiAgICAidHlwZSI6ICJvYmplY3QiLAogICAgInByb3BlcnRpZXMiOiB7CiAgICAgICAgImlkIjogewog -ICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAogICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAg -ICAgICB9LAogICAgICAgICIkc2NoZW1hIjogewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciLAog -ICAgICAgICAgICAiZm9ybWF0IjogInVyaSIKICAgICAgICB9LAogICAgICAgICJ0aXRsZSI6IHsKICAg -ICAgICAgICAgInR5cGUiOiAic3RyaW5nIgogICAgICAgIH0sCiAgICAgICAgImRlc2NyaXB0aW9uIjog -ewogICAgICAgICAgICAidHlwZSI6ICJzdHJpbmciCiAgICAgICAgfSwKICAgICAgICAiZGVmYXVsdCI6 -IHt9LAogICAgICAgICJtdWx0aXBsZU9mIjogewogICAgICAgICAgICAidHlwZSI6ICJudW1iZXIiLAog -ICAgICAgICAgICAibWluaW11bSI6IDAsCiAgICAgICAgICAgICJleGNsdXNpdmVNaW5pbXVtIjogdHJ1 -ZQogICAgICAgIH0sCiAgICAgICAgIm1heGltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJl -ciIKICAgICAgICB9LAogICAgICAgICJleGNsdXNpdmVNYXhpbXVtIjogewogICAgICAgICAgICAidHlw -ZSI6ICJib29sZWFuIiwKICAgICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAg -ICAgIm1pbmltdW0iOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm51bWJlciIKICAgICAgICB9LAogICAg -ICAgICJleGNsdXNpdmVNaW5pbXVtIjogewogICAgICAgICAgICAidHlwZSI6ICJib29sZWFuIiwKICAg -ICAgICAgICAgImRlZmF1bHQiOiBmYWxzZQogICAgICAgIH0sCiAgICAgICAgIm1heExlbmd0aCI6IHsg -IiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pbkxlbmd0 -aCI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAg -ICAgICAicGF0dGVybiI6IHsKICAgICAgICAgICAgInR5cGUiOiAic3RyaW5nIiwKICAgICAgICAgICAg -ImZvcm1hdCI6ICJyZWdleCIKICAgICAgICB9LAogICAgICAgICJhZGRpdGlvbmFsSXRlbXMiOiB7CiAg -ICAgICAgICAgICJhbnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgInR5cGUiOiAiYm9vbGVhbiIgfSwK -ICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfQogICAgICAgICAgICBdLAogICAgICAgICAgICAi -ZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAiaXRlbXMiOiB7CiAgICAgICAgICAgICJhbnlP -ZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgICAgIHsgIiRy -ZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIgfQogICAgICAgICAgICBdLAogICAgICAgICAg -ICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAibWF4SXRlbXMiOiB7ICIkcmVmIjogIiMv -ZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyIiB9LAogICAgICAgICJtaW5JdGVtcyI6IHsgIiRyZWYi -OiAiIy9kZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXJEZWZhdWx0MCIgfSwKICAgICAgICAidW5pcXVl -SXRlbXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogImJvb2xlYW4iLAogICAgICAgICAgICAiZGVmYXVs -dCI6IGZhbHNlCiAgICAgICAgfSwKICAgICAgICAibWF4UHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIy9k -ZWZpbml0aW9ucy9wb3NpdGl2ZUludGVnZXIiIH0sCiAgICAgICAgIm1pblByb3BlcnRpZXMiOiB7ICIk -cmVmIjogIiMvZGVmaW5pdGlvbnMvcG9zaXRpdmVJbnRlZ2VyRGVmYXVsdDAiIH0sCiAgICAgICAgInJl -cXVpcmVkIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3N0cmluZ0FycmF5IiB9LAogICAgICAgICJh -ZGRpdGlvbmFsUHJvcGVydGllcyI6IHsKICAgICAgICAgICAgImFueU9mIjogWwogICAgICAgICAgICAg -ICAgeyAidHlwZSI6ICJib29sZWFuIiB9LAogICAgICAgICAgICAgICAgeyAiJHJlZiI6ICIjIiB9CiAg -ICAgICAgICAgIF0sCiAgICAgICAgICAgICJkZWZhdWx0Ijoge30KICAgICAgICB9LAogICAgICAgICJk -ZWZpbml0aW9ucyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2JqZWN0IiwKICAgICAgICAgICAgImFk -ZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9LAogICAgICAgICAgICAiZGVmYXVsdCI6 -IHt9CiAgICAgICAgfSwKICAgICAgICAicHJvcGVydGllcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAi -b2JqZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogeyAiJHJlZiI6ICIjIiB9 -LAogICAgICAgICAgICAiZGVmYXVsdCI6IHt9CiAgICAgICAgfSwKICAgICAgICAicGF0dGVyblByb3Bl -cnRpZXMiOiB7CiAgICAgICAgICAgICJ0eXBlIjogIm9iamVjdCIsCiAgICAgICAgICAgICJhZGRpdGlv -bmFsUHJvcGVydGllcyI6IHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAgImRlZmF1bHQiOiB7fQog -ICAgICAgIH0sCiAgICAgICAgImRlcGVuZGVuY2llcyI6IHsKICAgICAgICAgICAgInR5cGUiOiAib2Jq -ZWN0IiwKICAgICAgICAgICAgImFkZGl0aW9uYWxQcm9wZXJ0aWVzIjogewogICAgICAgICAgICAgICAg -ImFueU9mIjogWwogICAgICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIyIgfSwKICAgICAgICAgICAg -ICAgICAgICB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc3RyaW5nQXJyYXkiIH0KICAgICAgICAgICAg -ICAgIF0KICAgICAgICAgICAgfQogICAgICAgIH0sCiAgICAgICAgImVudW0iOiB7CiAgICAgICAgICAg -ICJ0eXBlIjogImFycmF5IiwKICAgICAgICAgICAgIm1pbkl0ZW1zIjogMSwKICAgICAgICAgICAgInVu -aXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgIH0sCiAgICAgICAgInR5cGUiOiB7CiAgICAgICAgICAgICJh -bnlPZiI6IFsKICAgICAgICAgICAgICAgIHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zaW1wbGVUeXBl -cyIgfSwKICAgICAgICAgICAgICAgIHsKICAgICAgICAgICAgICAgICAgICAidHlwZSI6ICJhcnJheSIs -CiAgICAgICAgICAgICAgICAgICAgIml0ZW1zIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NpbXBs -ZVR5cGVzIiB9LAogICAgICAgICAgICAgICAgICAgICJtaW5JdGVtcyI6IDEsCiAgICAgICAgICAgICAg -ICAgICAgInVuaXF1ZUl0ZW1zIjogdHJ1ZQogICAgICAgICAgICAgICAgfQogICAgICAgICAgICBdCiAg -ICAgICAgfSwKICAgICAgICAiYWxsT2YiOiB7ICIkcmVmIjogIiMvZGVmaW5pdGlvbnMvc2NoZW1hQXJy -YXkiIH0sCiAgICAgICAgImFueU9mIjogeyAiJHJlZiI6ICIjL2RlZmluaXRpb25zL3NjaGVtYUFycmF5 -IiB9LAogICAgICAgICJvbmVPZiI6IHsgIiRyZWYiOiAiIy9kZWZpbml0aW9ucy9zY2hlbWFBcnJheSIg -fSwKICAgICAgICAibm90IjogeyAiJHJlZiI6ICIjIiB9CiAgICB9LAogICAgImRlcGVuZGVuY2llcyI6 -IHsKICAgICAgICAiZXhjbHVzaXZlTWF4aW11bSI6IFsgIm1heGltdW0iIF0sCiAgICAgICAgImV4Y2x1 -c2l2ZU1pbmltdW0iOiBbICJtaW5pbXVtIiBdCiAgICB9LAogICAgImRlZmF1bHQiOiB7fQp9Cg==`)} \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/display.go b/vendor/github.com/googleapis/gnostic/jsonschema/display.go deleted file mode 100644 index 028a760..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/display.go +++ /dev/null @@ -1,229 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - "strings" -) - -// -// DISPLAY -// The following methods display Schemas. -// - -// Description returns a string representation of a string or string array. -func (s *StringOrStringArray) Description() string { - if s.String != nil { - return *s.String - } - if s.StringArray != nil { - return strings.Join(*s.StringArray, ", ") - } - return "" -} - -// Returns a string representation of a Schema. -func (schema *Schema) String() string { - return schema.describeSchema("") -} - -// Helper: Returns a string representation of a Schema indented by a specified string. -func (schema *Schema) describeSchema(indent string) string { - result := "" - if schema.Schema != nil { - result += indent + "$schema: " + *(schema.Schema) + "\n" - } - if schema.ID != nil { - result += indent + "id: " + *(schema.ID) + "\n" - } - if schema.MultipleOf != nil { - result += indent + fmt.Sprintf("multipleOf: %+v\n", *(schema.MultipleOf)) - } - if schema.Maximum != nil { - result += indent + fmt.Sprintf("maximum: %+v\n", *(schema.Maximum)) - } - if schema.ExclusiveMaximum != nil { - result += indent + fmt.Sprintf("exclusiveMaximum: %+v\n", *(schema.ExclusiveMaximum)) - } - if schema.Minimum != nil { - result += indent + fmt.Sprintf("minimum: %+v\n", *(schema.Minimum)) - } - if schema.ExclusiveMinimum != nil { - result += indent + fmt.Sprintf("exclusiveMinimum: %+v\n", *(schema.ExclusiveMinimum)) - } - if schema.MaxLength != nil { - result += indent + fmt.Sprintf("maxLength: %+v\n", *(schema.MaxLength)) - } - if schema.MinLength != nil { - result += indent + fmt.Sprintf("minLength: %+v\n", *(schema.MinLength)) - } - if schema.Pattern != nil { - result += indent + fmt.Sprintf("pattern: %+v\n", *(schema.Pattern)) - } - if schema.AdditionalItems != nil { - s := schema.AdditionalItems.Schema - if s != nil { - result += indent + "additionalItems:\n" - result += s.describeSchema(indent + " ") - } else { - b := *(schema.AdditionalItems.Boolean) - result += indent + fmt.Sprintf("additionalItems: %+v\n", b) - } - } - if schema.Items != nil { - result += indent + "items:\n" - items := schema.Items - if items.SchemaArray != nil { - for i, s := range *(items.SchemaArray) { - result += indent + " " + fmt.Sprintf("%d", i) + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } else if items.Schema != nil { - result += items.Schema.describeSchema(indent + " " + " ") - } - } - if schema.MaxItems != nil { - result += indent + fmt.Sprintf("maxItems: %+v\n", *(schema.MaxItems)) - } - if schema.MinItems != nil { - result += indent + fmt.Sprintf("minItems: %+v\n", *(schema.MinItems)) - } - if schema.UniqueItems != nil { - result += indent + fmt.Sprintf("uniqueItems: %+v\n", *(schema.UniqueItems)) - } - if schema.MaxProperties != nil { - result += indent + fmt.Sprintf("maxProperties: %+v\n", *(schema.MaxProperties)) - } - if schema.MinProperties != nil { - result += indent + fmt.Sprintf("minProperties: %+v\n", *(schema.MinProperties)) - } - if schema.Required != nil { - result += indent + fmt.Sprintf("required: %+v\n", *(schema.Required)) - } - if schema.AdditionalProperties != nil { - s := schema.AdditionalProperties.Schema - if s != nil { - result += indent + "additionalProperties:\n" - result += s.describeSchema(indent + " ") - } else { - b := *(schema.AdditionalProperties.Boolean) - result += indent + fmt.Sprintf("additionalProperties: %+v\n", b) - } - } - if schema.Properties != nil { - result += indent + "properties:\n" - for _, pair := range *(schema.Properties) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.PatternProperties != nil { - result += indent + "patternProperties:\n" - for _, pair := range *(schema.PatternProperties) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.Dependencies != nil { - result += indent + "dependencies:\n" - for _, pair := range *(schema.Dependencies) { - name := pair.Name - schemaOrStringArray := pair.Value - s := schemaOrStringArray.Schema - if s != nil { - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } else { - a := schemaOrStringArray.StringArray - if a != nil { - result += indent + " " + name + ":\n" - for _, s2 := range *a { - result += indent + " " + " " + s2 + "\n" - } - } - } - - } - } - if schema.Enumeration != nil { - result += indent + "enumeration:\n" - for _, value := range *(schema.Enumeration) { - if value.String != nil { - result += indent + " " + fmt.Sprintf("%+v\n", *value.String) - } else { - result += indent + " " + fmt.Sprintf("%+v\n", *value.Bool) - } - } - } - if schema.Type != nil { - result += indent + fmt.Sprintf("type: %+v\n", schema.Type.Description()) - } - if schema.AllOf != nil { - result += indent + "allOf:\n" - for _, s := range *(schema.AllOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.AnyOf != nil { - result += indent + "anyOf:\n" - for _, s := range *(schema.AnyOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.OneOf != nil { - result += indent + "oneOf:\n" - for _, s := range *(schema.OneOf) { - result += s.describeSchema(indent + " ") - result += indent + "-\n" - } - } - if schema.Not != nil { - result += indent + "not:\n" - result += schema.Not.describeSchema(indent + " ") - } - if schema.Definitions != nil { - result += indent + "definitions:\n" - for _, pair := range *(schema.Definitions) { - name := pair.Name - s := pair.Value - result += indent + " " + name + ":\n" - result += s.describeSchema(indent + " " + " ") - } - } - if schema.Title != nil { - result += indent + "title: " + *(schema.Title) + "\n" - } - if schema.Description != nil { - result += indent + "description: " + *(schema.Description) + "\n" - } - if schema.Default != nil { - result += indent + "default:\n" - result += indent + fmt.Sprintf(" %+v\n", *(schema.Default)) - } - if schema.Format != nil { - result += indent + "format: " + *(schema.Format) + "\n" - } - if schema.Ref != nil { - result += indent + "$ref: " + *(schema.Ref) + "\n" - } - return result -} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/models.go b/vendor/github.com/googleapis/gnostic/jsonschema/models.go deleted file mode 100644 index 4781bdc..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/models.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package jsonschema supports the reading, writing, and manipulation -// of JSON Schemas. -package jsonschema - -import "gopkg.in/yaml.v3" - -// The Schema struct models a JSON Schema and, because schemas are -// defined hierarchically, contains many references to itself. -// All fields are pointers and are nil if the associated values -// are not specified. -type Schema struct { - Schema *string // $schema - ID *string // id keyword used for $ref resolution scope - Ref *string // $ref, i.e. JSON Pointers - - // http://json-schema.org/latest/json-schema-validation.html - // 5.1. Validation keywords for numeric instances (number and integer) - MultipleOf *SchemaNumber - Maximum *SchemaNumber - ExclusiveMaximum *bool - Minimum *SchemaNumber - ExclusiveMinimum *bool - - // 5.2. Validation keywords for strings - MaxLength *int64 - MinLength *int64 - Pattern *string - - // 5.3. Validation keywords for arrays - AdditionalItems *SchemaOrBoolean - Items *SchemaOrSchemaArray - MaxItems *int64 - MinItems *int64 - UniqueItems *bool - - // 5.4. Validation keywords for objects - MaxProperties *int64 - MinProperties *int64 - Required *[]string - AdditionalProperties *SchemaOrBoolean - Properties *[]*NamedSchema - PatternProperties *[]*NamedSchema - Dependencies *[]*NamedSchemaOrStringArray - - // 5.5. Validation keywords for any instance type - Enumeration *[]SchemaEnumValue - Type *StringOrStringArray - AllOf *[]*Schema - AnyOf *[]*Schema - OneOf *[]*Schema - Not *Schema - Definitions *[]*NamedSchema - - // 6. Metadata keywords - Title *string - Description *string - Default *yaml.Node - - // 7. Semantic validation with "format" - Format *string -} - -// These helper structs represent "combination" types that generally can -// have values of one type or another. All are used to represent parts -// of Schemas. - -// SchemaNumber represents a value that can be either an Integer or a Float. -type SchemaNumber struct { - Integer *int64 - Float *float64 -} - -// NewSchemaNumberWithInteger creates and returns a new object -func NewSchemaNumberWithInteger(i int64) *SchemaNumber { - result := &SchemaNumber{} - result.Integer = &i - return result -} - -// NewSchemaNumberWithFloat creates and returns a new object -func NewSchemaNumberWithFloat(f float64) *SchemaNumber { - result := &SchemaNumber{} - result.Float = &f - return result -} - -// SchemaOrBoolean represents a value that can be either a Schema or a Boolean. -type SchemaOrBoolean struct { - Schema *Schema - Boolean *bool -} - -// NewSchemaOrBooleanWithSchema creates and returns a new object -func NewSchemaOrBooleanWithSchema(s *Schema) *SchemaOrBoolean { - result := &SchemaOrBoolean{} - result.Schema = s - return result -} - -// NewSchemaOrBooleanWithBoolean creates and returns a new object -func NewSchemaOrBooleanWithBoolean(b bool) *SchemaOrBoolean { - result := &SchemaOrBoolean{} - result.Boolean = &b - return result -} - -// StringOrStringArray represents a value that can be either -// a String or an Array of Strings. -type StringOrStringArray struct { - String *string - StringArray *[]string -} - -// NewStringOrStringArrayWithString creates and returns a new object -func NewStringOrStringArrayWithString(s string) *StringOrStringArray { - result := &StringOrStringArray{} - result.String = &s - return result -} - -// NewStringOrStringArrayWithStringArray creates and returns a new object -func NewStringOrStringArrayWithStringArray(a []string) *StringOrStringArray { - result := &StringOrStringArray{} - result.StringArray = &a - return result -} - -// SchemaOrStringArray represents a value that can be either -// a Schema or an Array of Strings. -type SchemaOrStringArray struct { - Schema *Schema - StringArray *[]string -} - -// SchemaOrSchemaArray represents a value that can be either -// a Schema or an Array of Schemas. -type SchemaOrSchemaArray struct { - Schema *Schema - SchemaArray *[]*Schema -} - -// NewSchemaOrSchemaArrayWithSchema creates and returns a new object -func NewSchemaOrSchemaArrayWithSchema(s *Schema) *SchemaOrSchemaArray { - result := &SchemaOrSchemaArray{} - result.Schema = s - return result -} - -// NewSchemaOrSchemaArrayWithSchemaArray creates and returns a new object -func NewSchemaOrSchemaArrayWithSchemaArray(a []*Schema) *SchemaOrSchemaArray { - result := &SchemaOrSchemaArray{} - result.SchemaArray = &a - return result -} - -// SchemaEnumValue represents a value that can be part of an -// enumeration in a Schema. -type SchemaEnumValue struct { - String *string - Bool *bool -} - -// NamedSchema is a name-value pair that is used to emulate maps -// with ordered keys. -type NamedSchema struct { - Name string - Value *Schema -} - -// NewNamedSchema creates and returns a new object -func NewNamedSchema(name string, value *Schema) *NamedSchema { - return &NamedSchema{Name: name, Value: value} -} - -// NamedSchemaOrStringArray is a name-value pair that is used -// to emulate maps with ordered keys. -type NamedSchemaOrStringArray struct { - Name string - Value *SchemaOrStringArray -} - -// Access named subschemas by name - -func namedSchemaArrayElementWithName(array *[]*NamedSchema, name string) *Schema { - if array == nil { - return nil - } - for _, pair := range *array { - if pair.Name == name { - return pair.Value - } - } - return nil -} - -// PropertyWithName returns the selected element. -func (s *Schema) PropertyWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.Properties, name) -} - -// PatternPropertyWithName returns the selected element. -func (s *Schema) PatternPropertyWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.PatternProperties, name) -} - -// DefinitionWithName returns the selected element. -func (s *Schema) DefinitionWithName(name string) *Schema { - return namedSchemaArrayElementWithName(s.Definitions, name) -} - -// AddProperty adds a named property. -func (s *Schema) AddProperty(name string, property *Schema) { - *s.Properties = append(*s.Properties, NewNamedSchema(name, property)) -} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go b/vendor/github.com/googleapis/gnostic/jsonschema/operations.go deleted file mode 100644 index ba8dd4a..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/operations.go +++ /dev/null @@ -1,394 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - "log" - "strings" -) - -// -// OPERATIONS -// The following methods perform operations on Schemas. -// - -// IsEmpty returns true if no members of the Schema are specified. -func (schema *Schema) IsEmpty() bool { - return (schema.Schema == nil) && - (schema.ID == nil) && - (schema.MultipleOf == nil) && - (schema.Maximum == nil) && - (schema.ExclusiveMaximum == nil) && - (schema.Minimum == nil) && - (schema.ExclusiveMinimum == nil) && - (schema.MaxLength == nil) && - (schema.MinLength == nil) && - (schema.Pattern == nil) && - (schema.AdditionalItems == nil) && - (schema.Items == nil) && - (schema.MaxItems == nil) && - (schema.MinItems == nil) && - (schema.UniqueItems == nil) && - (schema.MaxProperties == nil) && - (schema.MinProperties == nil) && - (schema.Required == nil) && - (schema.AdditionalProperties == nil) && - (schema.Properties == nil) && - (schema.PatternProperties == nil) && - (schema.Dependencies == nil) && - (schema.Enumeration == nil) && - (schema.Type == nil) && - (schema.AllOf == nil) && - (schema.AnyOf == nil) && - (schema.OneOf == nil) && - (schema.Not == nil) && - (schema.Definitions == nil) && - (schema.Title == nil) && - (schema.Description == nil) && - (schema.Default == nil) && - (schema.Format == nil) && - (schema.Ref == nil) -} - -// IsEqual returns true if two schemas are equal. -func (schema *Schema) IsEqual(schema2 *Schema) bool { - return schema.String() == schema2.String() -} - -// SchemaOperation represents a function that can be applied to a Schema. -type SchemaOperation func(schema *Schema, context string) - -// Applies a specified function to a Schema and all of the Schemas that it contains. -func (schema *Schema) applyToSchemas(operation SchemaOperation, context string) { - - if schema.AdditionalItems != nil { - s := schema.AdditionalItems.Schema - if s != nil { - s.applyToSchemas(operation, "AdditionalItems") - } - } - - if schema.Items != nil { - if schema.Items.SchemaArray != nil { - for _, s := range *(schema.Items.SchemaArray) { - s.applyToSchemas(operation, "Items.SchemaArray") - } - } else if schema.Items.Schema != nil { - schema.Items.Schema.applyToSchemas(operation, "Items.Schema") - } - } - - if schema.AdditionalProperties != nil { - s := schema.AdditionalProperties.Schema - if s != nil { - s.applyToSchemas(operation, "AdditionalProperties") - } - } - - if schema.Properties != nil { - for _, pair := range *(schema.Properties) { - s := pair.Value - s.applyToSchemas(operation, "Properties") - } - } - if schema.PatternProperties != nil { - for _, pair := range *(schema.PatternProperties) { - s := pair.Value - s.applyToSchemas(operation, "PatternProperties") - } - } - - if schema.Dependencies != nil { - for _, pair := range *(schema.Dependencies) { - schemaOrStringArray := pair.Value - s := schemaOrStringArray.Schema - if s != nil { - s.applyToSchemas(operation, "Dependencies") - } - } - } - - if schema.AllOf != nil { - for _, s := range *(schema.AllOf) { - s.applyToSchemas(operation, "AllOf") - } - } - if schema.AnyOf != nil { - for _, s := range *(schema.AnyOf) { - s.applyToSchemas(operation, "AnyOf") - } - } - if schema.OneOf != nil { - for _, s := range *(schema.OneOf) { - s.applyToSchemas(operation, "OneOf") - } - } - if schema.Not != nil { - schema.Not.applyToSchemas(operation, "Not") - } - - if schema.Definitions != nil { - for _, pair := range *(schema.Definitions) { - s := pair.Value - s.applyToSchemas(operation, "Definitions") - } - } - - operation(schema, context) -} - -// CopyProperties copies all non-nil properties from the source Schema to the schema Schema. -func (schema *Schema) CopyProperties(source *Schema) { - if source.Schema != nil { - schema.Schema = source.Schema - } - if source.ID != nil { - schema.ID = source.ID - } - if source.MultipleOf != nil { - schema.MultipleOf = source.MultipleOf - } - if source.Maximum != nil { - schema.Maximum = source.Maximum - } - if source.ExclusiveMaximum != nil { - schema.ExclusiveMaximum = source.ExclusiveMaximum - } - if source.Minimum != nil { - schema.Minimum = source.Minimum - } - if source.ExclusiveMinimum != nil { - schema.ExclusiveMinimum = source.ExclusiveMinimum - } - if source.MaxLength != nil { - schema.MaxLength = source.MaxLength - } - if source.MinLength != nil { - schema.MinLength = source.MinLength - } - if source.Pattern != nil { - schema.Pattern = source.Pattern - } - if source.AdditionalItems != nil { - schema.AdditionalItems = source.AdditionalItems - } - if source.Items != nil { - schema.Items = source.Items - } - if source.MaxItems != nil { - schema.MaxItems = source.MaxItems - } - if source.MinItems != nil { - schema.MinItems = source.MinItems - } - if source.UniqueItems != nil { - schema.UniqueItems = source.UniqueItems - } - if source.MaxProperties != nil { - schema.MaxProperties = source.MaxProperties - } - if source.MinProperties != nil { - schema.MinProperties = source.MinProperties - } - if source.Required != nil { - schema.Required = source.Required - } - if source.AdditionalProperties != nil { - schema.AdditionalProperties = source.AdditionalProperties - } - if source.Properties != nil { - schema.Properties = source.Properties - } - if source.PatternProperties != nil { - schema.PatternProperties = source.PatternProperties - } - if source.Dependencies != nil { - schema.Dependencies = source.Dependencies - } - if source.Enumeration != nil { - schema.Enumeration = source.Enumeration - } - if source.Type != nil { - schema.Type = source.Type - } - if source.AllOf != nil { - schema.AllOf = source.AllOf - } - if source.AnyOf != nil { - schema.AnyOf = source.AnyOf - } - if source.OneOf != nil { - schema.OneOf = source.OneOf - } - if source.Not != nil { - schema.Not = source.Not - } - if source.Definitions != nil { - schema.Definitions = source.Definitions - } - if source.Title != nil { - schema.Title = source.Title - } - if source.Description != nil { - schema.Description = source.Description - } - if source.Default != nil { - schema.Default = source.Default - } - if source.Format != nil { - schema.Format = source.Format - } - if source.Ref != nil { - schema.Ref = source.Ref - } -} - -// TypeIs returns true if the Type of a Schema includes the specified type -func (schema *Schema) TypeIs(typeName string) bool { - if schema.Type != nil { - // the schema Type is either a string or an array of strings - if schema.Type.String != nil { - return (*(schema.Type.String) == typeName) - } else if schema.Type.StringArray != nil { - for _, n := range *(schema.Type.StringArray) { - if n == typeName { - return true - } - } - } - } - return false -} - -// ResolveRefs resolves "$ref" elements in a Schema and its children. -// But if a reference refers to an object type, is inside a oneOf, or contains a oneOf, -// the reference is kept and we expect downstream tools to separately model these -// referenced schemas. -func (schema *Schema) ResolveRefs() { - rootSchema := schema - count := 1 - for count > 0 { - count = 0 - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.Ref != nil { - resolvedRef, err := rootSchema.resolveJSONPointer(*(schema.Ref)) - if err != nil { - log.Printf("%+v", err) - } else if resolvedRef.TypeIs("object") { - // don't substitute for objects, we'll model the referenced schema with a class - } else if context == "OneOf" { - // don't substitute for references inside oneOf declarations - } else if resolvedRef.OneOf != nil { - // don't substitute for references that contain oneOf declarations - } else if resolvedRef.AdditionalProperties != nil { - // don't substitute for references that look like objects - } else { - schema.Ref = nil - schema.CopyProperties(resolvedRef) - count++ - } - } - }, "") - } -} - -// resolveJSONPointer resolves JSON pointers. -// This current implementation is very crude and custom for OpenAPI 2.0 schemas. -// It panics for any pointer that it is unable to resolve. -func (schema *Schema) resolveJSONPointer(ref string) (result *Schema, err error) { - parts := strings.Split(ref, "#") - if len(parts) == 2 { - documentName := parts[0] + "#" - if documentName == "#" && schema.ID != nil { - documentName = *(schema.ID) - } - path := parts[1] - document := schemas[documentName] - pathParts := strings.Split(path, "/") - - // we currently do a very limited (hard-coded) resolution of certain paths and log errors for missed cases - if len(pathParts) == 1 { - return document, nil - } else if len(pathParts) == 3 { - switch pathParts[1] { - case "definitions": - dictionary := document.Definitions - for _, pair := range *dictionary { - if pair.Name == pathParts[2] { - result = pair.Value - } - } - case "properties": - dictionary := document.Properties - for _, pair := range *dictionary { - if pair.Name == pathParts[2] { - result = pair.Value - } - } - default: - break - } - } - } - if result == nil { - return nil, fmt.Errorf("unresolved pointer: %+v", ref) - } - return result, nil -} - -// ResolveAllOfs replaces "allOf" elements by merging their properties into the parent Schema. -func (schema *Schema) ResolveAllOfs() { - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.AllOf != nil { - for _, allOf := range *(schema.AllOf) { - schema.CopyProperties(allOf) - } - schema.AllOf = nil - } - }, "resolveAllOfs") -} - -// ResolveAnyOfs replaces all "anyOf" elements with "oneOf". -func (schema *Schema) ResolveAnyOfs() { - schema.applyToSchemas( - func(schema *Schema, context string) { - if schema.AnyOf != nil { - schema.OneOf = schema.AnyOf - schema.AnyOf = nil - } - }, "resolveAnyOfs") -} - -// return a pointer to a copy of a passed-in string -func stringptr(input string) (output *string) { - return &input -} - -// CopyOfficialSchemaProperty copies a named property from the official JSON Schema definition -func (schema *Schema) CopyOfficialSchemaProperty(name string) { - *schema.Properties = append(*schema.Properties, - NewNamedSchema(name, - &Schema{Ref: stringptr("http://json-schema.org/draft-04/schema#/properties/" + name)})) -} - -// CopyOfficialSchemaProperties copies named properties from the official JSON Schema definition -func (schema *Schema) CopyOfficialSchemaProperties(names []string) { - for _, name := range names { - schema.CopyOfficialSchemaProperty(name) - } -} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go b/vendor/github.com/googleapis/gnostic/jsonschema/reader.go deleted file mode 100644 index b8583d4..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/reader.go +++ /dev/null @@ -1,442 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -//go:generate go run generate-base.go - -package jsonschema - -import ( - "fmt" - "io/ioutil" - "strconv" - - "gopkg.in/yaml.v3" -) - -// This is a global map of all known Schemas. -// It is initialized when the first Schema is created and inserted. -var schemas map[string]*Schema - -// NewBaseSchema builds a schema object from an embedded json representation. -func NewBaseSchema() (schema *Schema, err error) { - b, err := baseSchemaBytes() - if err != nil { - return nil, err - } - var node yaml.Node - err = yaml.Unmarshal(b, &node) - if err != nil { - return nil, err - } - return NewSchemaFromObject(&node), nil -} - -// NewSchemaFromFile reads a schema from a file. -// Currently this assumes that schemas are stored in the source distribution of this project. -func NewSchemaFromFile(filename string) (schema *Schema, err error) { - file, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - var node yaml.Node - err = yaml.Unmarshal(file, &node) - if err != nil { - return nil, err - } - return NewSchemaFromObject(&node), nil -} - -// NewSchemaFromObject constructs a schema from a parsed JSON object. -// Due to the complexity of the schema representation, this is a -// custom reader and not the standard Go JSON reader (encoding/json). -func NewSchemaFromObject(jsonData *yaml.Node) *Schema { - switch jsonData.Kind { - case yaml.DocumentNode: - return NewSchemaFromObject(jsonData.Content[0]) - case yaml.MappingNode: - schema := &Schema{} - - for i := 0; i < len(jsonData.Content); i += 2 { - k := jsonData.Content[i].Value - v := jsonData.Content[i+1] - - switch k { - case "$schema": - schema.Schema = schema.stringValue(v) - case "id": - schema.ID = schema.stringValue(v) - - case "multipleOf": - schema.MultipleOf = schema.numberValue(v) - case "maximum": - schema.Maximum = schema.numberValue(v) - case "exclusiveMaximum": - schema.ExclusiveMaximum = schema.boolValue(v) - case "minimum": - schema.Minimum = schema.numberValue(v) - case "exclusiveMinimum": - schema.ExclusiveMinimum = schema.boolValue(v) - - case "maxLength": - schema.MaxLength = schema.intValue(v) - case "minLength": - schema.MinLength = schema.intValue(v) - case "pattern": - schema.Pattern = schema.stringValue(v) - - case "additionalItems": - schema.AdditionalItems = schema.schemaOrBooleanValue(v) - case "items": - schema.Items = schema.schemaOrSchemaArrayValue(v) - case "maxItems": - schema.MaxItems = schema.intValue(v) - case "minItems": - schema.MinItems = schema.intValue(v) - case "uniqueItems": - schema.UniqueItems = schema.boolValue(v) - - case "maxProperties": - schema.MaxProperties = schema.intValue(v) - case "minProperties": - schema.MinProperties = schema.intValue(v) - case "required": - schema.Required = schema.arrayOfStringsValue(v) - case "additionalProperties": - schema.AdditionalProperties = schema.schemaOrBooleanValue(v) - case "properties": - schema.Properties = schema.mapOfSchemasValue(v) - case "patternProperties": - schema.PatternProperties = schema.mapOfSchemasValue(v) - case "dependencies": - schema.Dependencies = schema.mapOfSchemasOrStringArraysValue(v) - - case "enum": - schema.Enumeration = schema.arrayOfEnumValuesValue(v) - - case "type": - schema.Type = schema.stringOrStringArrayValue(v) - case "allOf": - schema.AllOf = schema.arrayOfSchemasValue(v) - case "anyOf": - schema.AnyOf = schema.arrayOfSchemasValue(v) - case "oneOf": - schema.OneOf = schema.arrayOfSchemasValue(v) - case "not": - schema.Not = NewSchemaFromObject(v) - case "definitions": - schema.Definitions = schema.mapOfSchemasValue(v) - - case "title": - schema.Title = schema.stringValue(v) - case "description": - schema.Description = schema.stringValue(v) - - case "default": - schema.Default = v - - case "format": - schema.Format = schema.stringValue(v) - case "$ref": - schema.Ref = schema.stringValue(v) - default: - fmt.Printf("UNSUPPORTED (%s)\n", k) - } - } - - // insert schema in global map - if schema.ID != nil { - if schemas == nil { - schemas = make(map[string]*Schema, 0) - } - schemas[*(schema.ID)] = schema - } - return schema - - default: - fmt.Printf("schemaValue: unexpected node %+v\n", jsonData) - return nil - } - - return nil -} - -// -// BUILDERS -// The following methods build elements of Schemas from interface{} values. -// Each returns nil if it is unable to build the desired element. -// - -// Gets the string value of an interface{} value if possible. -func (schema *Schema) stringValue(v *yaml.Node) *string { - switch v.Kind { - case yaml.ScalarNode: - return &v.Value - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the numeric value of an interface{} value if possible. -func (schema *Schema) numberValue(v *yaml.Node) *SchemaNumber { - number := &SchemaNumber{} - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!float": - v2, _ := strconv.ParseFloat(v.Value, 64) - number.Float = &v2 - return number - case "!!int": - v2, _ := strconv.ParseInt(v.Value, 10, 64) - number.Integer = &v2 - return number - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("stringValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the integer value of an interface{} value if possible. -func (schema *Schema) intValue(v *yaml.Node) *int64 { - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!float": - v2, _ := strconv.ParseFloat(v.Value, 64) - v3 := int64(v2) - return &v3 - case "!!int": - v2, _ := strconv.ParseInt(v.Value, 10, 64) - return &v2 - default: - fmt.Printf("intValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("intValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets the bool value of an interface{} value if possible. -func (schema *Schema) boolValue(v *yaml.Node) *bool { - switch v.Kind { - case yaml.ScalarNode: - switch v.Tag { - case "!!bool": - v2, _ := strconv.ParseBool(v.Value) - return &v2 - default: - fmt.Printf("boolValue: unexpected node %+v\n", v) - } - default: - fmt.Printf("boolValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a map of Schemas from an interface{} value if possible. -func (schema *Schema) mapOfSchemasValue(v *yaml.Node) *[]*NamedSchema { - switch v.Kind { - case yaml.MappingNode: - m := make([]*NamedSchema, 0) - for i := 0; i < len(v.Content); i += 2 { - k2 := v.Content[i].Value - v2 := v.Content[i+1] - pair := &NamedSchema{Name: k2, Value: NewSchemaFromObject(v2)} - m = append(m, pair) - } - return &m - default: - fmt.Printf("mapOfSchemasValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of Schemas from an interface{} value if possible. -func (schema *Schema) arrayOfSchemasValue(v *yaml.Node) *[]*Schema { - switch v.Kind { - case yaml.SequenceNode: - m := make([]*Schema, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.MappingNode: - s := NewSchemaFromObject(v2) - m = append(m, s) - default: - fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v2) - } - } - return &m - case yaml.MappingNode: - m := make([]*Schema, 0) - s := NewSchemaFromObject(v) - m = append(m, s) - return &m - default: - fmt.Printf("arrayOfSchemasValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a Schema or an array of Schemas from an interface{} value if possible. -func (schema *Schema) schemaOrSchemaArrayValue(v *yaml.Node) *SchemaOrSchemaArray { - switch v.Kind { - case yaml.SequenceNode: - m := make([]*Schema, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.MappingNode: - s := NewSchemaFromObject(v2) - m = append(m, s) - default: - fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v2) - } - } - return &SchemaOrSchemaArray{SchemaArray: &m} - case yaml.MappingNode: - s := NewSchemaFromObject(v) - return &SchemaOrSchemaArray{Schema: s} - default: - fmt.Printf("schemaOrSchemaArrayValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of strings from an interface{} value if possible. -func (schema *Schema) arrayOfStringsValue(v *yaml.Node) *[]string { - switch v.Kind { - case yaml.ScalarNode: - a := []string{v.Value} - return &a - case yaml.SequenceNode: - a := make([]string, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - a = append(a, v2.Value) - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) - } - } - return &a - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets a string or an array of strings from an interface{} value if possible. -func (schema *Schema) stringOrStringArrayValue(v *yaml.Node) *StringOrStringArray { - switch v.Kind { - case yaml.ScalarNode: - s := &StringOrStringArray{} - s.String = &v.Value - return s - case yaml.SequenceNode: - a := make([]string, 0) - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - a = append(a, v2.Value) - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v2) - } - } - s := &StringOrStringArray{} - s.StringArray = &a - return s - default: - fmt.Printf("arrayOfStringsValue: unexpected node %+v\n", v) - } - return nil -} - -// Gets an array of enum values from an interface{} value if possible. -func (schema *Schema) arrayOfEnumValuesValue(v *yaml.Node) *[]SchemaEnumValue { - a := make([]SchemaEnumValue, 0) - switch v.Kind { - case yaml.SequenceNode: - for _, v2 := range v.Content { - switch v2.Kind { - case yaml.ScalarNode: - switch v2.Tag { - case "!!str": - a = append(a, SchemaEnumValue{String: &v2.Value}) - case "!!bool": - v3, _ := strconv.ParseBool(v2.Value) - a = append(a, SchemaEnumValue{Bool: &v3}) - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected type %s\n", v2.Tag) - } - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v2) - } - } - default: - fmt.Printf("arrayOfEnumValuesValue: unexpected node %+v\n", v) - } - return &a -} - -// Gets a map of schemas or string arrays from an interface{} value if possible. -func (schema *Schema) mapOfSchemasOrStringArraysValue(v *yaml.Node) *[]*NamedSchemaOrStringArray { - m := make([]*NamedSchemaOrStringArray, 0) - switch v.Kind { - case yaml.MappingNode: - for i := 0; i < len(v.Content); i += 2 { - k2 := v.Content[i].Value - v2 := v.Content[i+1] - switch v2.Kind { - case yaml.SequenceNode: - a := make([]string, 0) - for _, v3 := range v2.Content { - switch v3.Kind { - case yaml.ScalarNode: - a = append(a, v3.Value) - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v3) - } - } - s := &SchemaOrStringArray{} - s.StringArray = &a - pair := &NamedSchemaOrStringArray{Name: k2, Value: s} - m = append(m, pair) - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v2) - } - } - default: - fmt.Printf("mapOfSchemasOrStringArraysValue: unexpected node %+v\n", v) - } - return &m -} - -// Gets a schema or a boolean value from an interface{} value if possible. -func (schema *Schema) schemaOrBooleanValue(v *yaml.Node) *SchemaOrBoolean { - schemaOrBoolean := &SchemaOrBoolean{} - switch v.Kind { - case yaml.ScalarNode: - v2, _ := strconv.ParseBool(v.Value) - schemaOrBoolean.Boolean = &v2 - case yaml.MappingNode: - schemaOrBoolean.Schema = NewSchemaFromObject(v) - default: - fmt.Printf("schemaOrBooleanValue: unexpected node %+v\n", v) - } - return schemaOrBoolean -} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json b/vendor/github.com/googleapis/gnostic/jsonschema/schema.json deleted file mode 100644 index 85eb502..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/schema.json +++ /dev/null @@ -1,150 +0,0 @@ -{ - "id": "http://json-schema.org/draft-04/schema#", - "$schema": "http://json-schema.org/draft-04/schema#", - "description": "Core schema meta-schema", - "definitions": { - "schemaArray": { - "type": "array", - "minItems": 1, - "items": { "$ref": "#" } - }, - "positiveInteger": { - "type": "integer", - "minimum": 0 - }, - "positiveIntegerDefault0": { - "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] - }, - "simpleTypes": { - "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] - }, - "stringArray": { - "type": "array", - "items": { "type": "string" }, - "minItems": 1, - "uniqueItems": true - } - }, - "type": "object", - "properties": { - "id": { - "type": "string", - "format": "uri" - }, - "$schema": { - "type": "string", - "format": "uri" - }, - "title": { - "type": "string" - }, - "description": { - "type": "string" - }, - "default": {}, - "multipleOf": { - "type": "number", - "minimum": 0, - "exclusiveMinimum": true - }, - "maximum": { - "type": "number" - }, - "exclusiveMaximum": { - "type": "boolean", - "default": false - }, - "minimum": { - "type": "number" - }, - "exclusiveMinimum": { - "type": "boolean", - "default": false - }, - "maxLength": { "$ref": "#/definitions/positiveInteger" }, - "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "pattern": { - "type": "string", - "format": "regex" - }, - "additionalItems": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "items": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/schemaArray" } - ], - "default": {} - }, - "maxItems": { "$ref": "#/definitions/positiveInteger" }, - "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "uniqueItems": { - "type": "boolean", - "default": false - }, - "maxProperties": { "$ref": "#/definitions/positiveInteger" }, - "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, - "required": { "$ref": "#/definitions/stringArray" }, - "additionalProperties": { - "anyOf": [ - { "type": "boolean" }, - { "$ref": "#" } - ], - "default": {} - }, - "definitions": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "properties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "patternProperties": { - "type": "object", - "additionalProperties": { "$ref": "#" }, - "default": {} - }, - "dependencies": { - "type": "object", - "additionalProperties": { - "anyOf": [ - { "$ref": "#" }, - { "$ref": "#/definitions/stringArray" } - ] - } - }, - "enum": { - "type": "array", - "minItems": 1, - "uniqueItems": true - }, - "type": { - "anyOf": [ - { "$ref": "#/definitions/simpleTypes" }, - { - "type": "array", - "items": { "$ref": "#/definitions/simpleTypes" }, - "minItems": 1, - "uniqueItems": true - } - ] - }, - "allOf": { "$ref": "#/definitions/schemaArray" }, - "anyOf": { "$ref": "#/definitions/schemaArray" }, - "oneOf": { "$ref": "#/definitions/schemaArray" }, - "not": { "$ref": "#" } - }, - "dependencies": { - "exclusiveMaximum": [ "maximum" ], - "exclusiveMinimum": [ "minimum" ] - }, - "default": {} -} diff --git a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go b/vendor/github.com/googleapis/gnostic/jsonschema/writer.go deleted file mode 100644 index 340dc5f..0000000 --- a/vendor/github.com/googleapis/gnostic/jsonschema/writer.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2017 Google LLC. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jsonschema - -import ( - "fmt" - - "gopkg.in/yaml.v3" -) - -const indentation = " " - -func renderMappingNode(node *yaml.Node, indent string) (result string) { - result = "{\n" - innerIndent := indent + indentation - for i := 0; i < len(node.Content); i += 2 { - // first print the key - key := node.Content[i].Value - result += fmt.Sprintf("%s\"%+v\": ", innerIndent, key) - // then the value - value := node.Content[i+1] - switch value.Kind { - case yaml.ScalarNode: - result += "\"" + value.Value + "\"" - case yaml.MappingNode: - result += renderMappingNode(value, innerIndent) - case yaml.SequenceNode: - result += renderSequenceNode(value, innerIndent) - default: - result += fmt.Sprintf("???MapItem(Key:%+v, Value:%T)", value, value) - } - if i < len(node.Content)-2 { - result += "," - } - result += "\n" - } - - result += indent + "}" - return result -} - -func renderSequenceNode(node *yaml.Node, indent string) (result string) { - result = "[\n" - innerIndent := indent + indentation - for i := 0; i < len(node.Content); i++ { - item := node.Content[i] - switch item.Kind { - case yaml.ScalarNode: - result += innerIndent + "\"" + item.Value + "\"" - case yaml.MappingNode: - result += innerIndent + renderMappingNode(item, innerIndent) + "" - default: - result += innerIndent + fmt.Sprintf("???ArrayItem(%+v)", item) - } - if i < len(node.Content)-1 { - result += "," - } - result += "\n" - } - result += indent + "]" - return result -} - -func renderStringArray(array []string, indent string) (result string) { - result = "[\n" - innerIndent := indent + indentation - for i, item := range array { - result += innerIndent + "\"" + item + "\"" - if i < len(array)-1 { - result += "," - } - result += "\n" - } - result += indent + "]" - return result -} - -// Render renders a yaml.Node as JSON -func Render(node *yaml.Node) string { - if node.Kind == yaml.DocumentNode { - if len(node.Content) == 1 { - return Render(node.Content[0]) - } - } else if node.Kind == yaml.MappingNode { - return renderMappingNode(node, "") + "\n" - } else if node.Kind == yaml.SequenceNode { - return renderSequenceNode(node, "") + "\n" - } - return "" -} - -func (object *SchemaNumber) nodeValue() *yaml.Node { - if object.Integer != nil { - return nodeForInt64(*object.Integer) - } else if object.Float != nil { - return nodeForFloat64(*object.Float) - } else { - return nil - } -} - -func (object *SchemaOrBoolean) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.Boolean != nil { - return nodeForBoolean(*object.Boolean) - } else { - return nil - } -} - -func nodeForStringArray(array []string) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range array { - content = append(content, nodeForString(item)) - } - return nodeForSequence(content) -} - -func nodeForSchemaArray(array []*Schema) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range array { - content = append(content, item.nodeValue()) - } - return nodeForSequence(content) -} - -func (object *StringOrStringArray) nodeValue() *yaml.Node { - if object.String != nil { - return nodeForString(*object.String) - } else if object.StringArray != nil { - return nodeForStringArray(*(object.StringArray)) - } else { - return nil - } -} - -func (object *SchemaOrStringArray) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.StringArray != nil { - return nodeForStringArray(*(object.StringArray)) - } else { - return nil - } -} - -func (object *SchemaOrSchemaArray) nodeValue() *yaml.Node { - if object.Schema != nil { - return object.Schema.nodeValue() - } else if object.SchemaArray != nil { - return nodeForSchemaArray(*(object.SchemaArray)) - } else { - return nil - } -} - -func (object *SchemaEnumValue) nodeValue() *yaml.Node { - if object.String != nil { - return nodeForString(*object.String) - } else if object.Bool != nil { - return nodeForBoolean(*object.Bool) - } else { - return nil - } -} - -func nodeForNamedSchemaArray(array *[]*NamedSchema) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, pair := range *(array) { - content = appendPair(content, pair.Name, pair.Value.nodeValue()) - } - return nodeForMapping(content) -} - -func nodeForNamedSchemaOrStringArray(array *[]*NamedSchemaOrStringArray) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, pair := range *(array) { - content = appendPair(content, pair.Name, pair.Value.nodeValue()) - } - return nodeForMapping(content) -} - -func nodeForSchemaEnumArray(array *[]SchemaEnumValue) *yaml.Node { - content := make([]*yaml.Node, 0) - for _, item := range *array { - content = append(content, item.nodeValue()) - } - return nodeForSequence(content) -} - -func nodeForMapping(content []*yaml.Node) *yaml.Node { - return &yaml.Node{ - Kind: yaml.MappingNode, - Content: content, - } -} - -func nodeForSequence(content []*yaml.Node) *yaml.Node { - return &yaml.Node{ - Kind: yaml.SequenceNode, - Content: content, - } -} - -func nodeForString(value string) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!str", - Value: value, - } -} - -func nodeForBoolean(value bool) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!bool", - Value: fmt.Sprintf("%t", value), - } -} - -func nodeForInt64(value int64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!int", - Value: fmt.Sprintf("%d", value), - } -} - -func nodeForFloat64(value float64) *yaml.Node { - return &yaml.Node{ - Kind: yaml.ScalarNode, - Tag: "!!float", - Value: fmt.Sprintf("%f", value), - } -} - -func appendPair(nodes []*yaml.Node, name string, value *yaml.Node) []*yaml.Node { - nodes = append(nodes, nodeForString(name)) - nodes = append(nodes, value) - return nodes -} - -func (schema *Schema) nodeValue() *yaml.Node { - n := &yaml.Node{Kind: yaml.MappingNode} - content := make([]*yaml.Node, 0) - if schema.Title != nil { - content = appendPair(content, "title", nodeForString(*schema.Title)) - } - if schema.ID != nil { - content = appendPair(content, "id", nodeForString(*schema.ID)) - } - if schema.Schema != nil { - content = appendPair(content, "$schema", nodeForString(*schema.Schema)) - } - if schema.Type != nil { - content = appendPair(content, "type", schema.Type.nodeValue()) - } - if schema.Items != nil { - content = appendPair(content, "items", schema.Items.nodeValue()) - } - if schema.Description != nil { - content = appendPair(content, "description", nodeForString(*schema.Description)) - } - if schema.Required != nil { - content = appendPair(content, "required", nodeForStringArray(*schema.Required)) - } - if schema.AdditionalProperties != nil { - content = appendPair(content, "additionalProperties", schema.AdditionalProperties.nodeValue()) - } - if schema.PatternProperties != nil { - content = appendPair(content, "patternProperties", nodeForNamedSchemaArray(schema.PatternProperties)) - } - if schema.Properties != nil { - content = appendPair(content, "properties", nodeForNamedSchemaArray(schema.Properties)) - } - if schema.Dependencies != nil { - content = appendPair(content, "dependencies", nodeForNamedSchemaOrStringArray(schema.Dependencies)) - } - if schema.Ref != nil { - content = appendPair(content, "$ref", nodeForString(*schema.Ref)) - } - if schema.MultipleOf != nil { - content = appendPair(content, "multipleOf", schema.MultipleOf.nodeValue()) - } - if schema.Maximum != nil { - content = appendPair(content, "maximum", schema.Maximum.nodeValue()) - } - if schema.ExclusiveMaximum != nil { - content = appendPair(content, "exclusiveMaximum", nodeForBoolean(*schema.ExclusiveMaximum)) - } - if schema.Minimum != nil { - content = appendPair(content, "minimum", schema.Minimum.nodeValue()) - } - if schema.ExclusiveMinimum != nil { - content = appendPair(content, "exclusiveMinimum", nodeForBoolean(*schema.ExclusiveMinimum)) - } - if schema.MaxLength != nil { - content = appendPair(content, "maxLength", nodeForInt64(*schema.MaxLength)) - } - if schema.MinLength != nil { - content = appendPair(content, "minLength", nodeForInt64(*schema.MinLength)) - } - if schema.Pattern != nil { - content = appendPair(content, "pattern", nodeForString(*schema.Pattern)) - } - if schema.AdditionalItems != nil { - content = appendPair(content, "additionalItems", schema.AdditionalItems.nodeValue()) - } - if schema.MaxItems != nil { - content = appendPair(content, "maxItems", nodeForInt64(*schema.MaxItems)) - } - if schema.MinItems != nil { - content = appendPair(content, "minItems", nodeForInt64(*schema.MinItems)) - } - if schema.UniqueItems != nil { - content = appendPair(content, "uniqueItems", nodeForBoolean(*schema.UniqueItems)) - } - if schema.MaxProperties != nil { - content = appendPair(content, "maxProperties", nodeForInt64(*schema.MaxProperties)) - } - if schema.MinProperties != nil { - content = appendPair(content, "minProperties", nodeForInt64(*schema.MinProperties)) - } - if schema.Enumeration != nil { - content = appendPair(content, "enum", nodeForSchemaEnumArray(schema.Enumeration)) - } - if schema.AllOf != nil { - content = appendPair(content, "allOf", nodeForSchemaArray(*schema.AllOf)) - } - if schema.AnyOf != nil { - content = appendPair(content, "anyOf", nodeForSchemaArray(*schema.AnyOf)) - } - if schema.OneOf != nil { - content = appendPair(content, "oneOf", nodeForSchemaArray(*schema.OneOf)) - } - if schema.Not != nil { - content = appendPair(content, "not", schema.Not.nodeValue()) - } - if schema.Definitions != nil { - content = appendPair(content, "definitions", nodeForNamedSchemaArray(schema.Definitions)) - } - if schema.Default != nil { - // m = append(m, yaml.MapItem{Key: "default", Value: *schema.Default}) - } - if schema.Format != nil { - content = appendPair(content, "format", nodeForString(*schema.Format)) - } - n.Content = content - return n -} - -// JSONString returns a json representation of a schema. -func (schema *Schema) JSONString() string { - node := schema.nodeValue() - return Render(node) -} diff --git a/vendor/github.com/gregjones/httpcache/.travis.yml b/vendor/github.com/gregjones/httpcache/.travis.yml deleted file mode 100644 index 597bc99..0000000 --- a/vendor/github.com/gregjones/httpcache/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: false -language: go -matrix: - allow_failures: - - go: master - fast_finish: true - include: - - go: 1.10.x - - go: 1.11.x - env: GOFMT=1 - - go: master -install: - - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). -script: - - go get -t -v ./... - - if test -n "${GOFMT}"; then gofmt -w -s . && git diff --exit-code; fi - - go tool vet . - - go test -v -race ./... diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt deleted file mode 100644 index 81316be..0000000 --- a/vendor/github.com/gregjones/httpcache/LICENSE.txt +++ /dev/null @@ -1,7 +0,0 @@ -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md deleted file mode 100644 index 51e7d23..0000000 --- a/vendor/github.com/gregjones/httpcache/README.md +++ /dev/null @@ -1,29 +0,0 @@ -httpcache -========= - -[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) - -Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. - -It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). - -This project isn't actively maintained; it works for what I, and seemingly others, want to do with it, and I consider it "done". That said, if you find any issues, please open a Pull Request and I will try to review it. Any changes now that change the public API won't be considered. - -Cache Backends --------------- - -- The built-in 'memory' cache stores responses in an in-memory map. -- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. -- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. -- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. -- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). -- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. -- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. -- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). - -If you implement any other backend and wish it to be linked here, please send a PR editing this file. - -License -------- - -- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go deleted file mode 100644 index 42e3129..0000000 --- a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go +++ /dev/null @@ -1,61 +0,0 @@ -// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package -// to supplement an in-memory map with persistent storage -// -package diskcache - -import ( - "bytes" - "crypto/md5" - "encoding/hex" - "github.com/peterbourgon/diskv" - "io" -) - -// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage -type Cache struct { - d *diskv.Diskv -} - -// Get returns the response corresponding to key if present -func (c *Cache) Get(key string) (resp []byte, ok bool) { - key = keyToFilename(key) - resp, err := c.d.Read(key) - if err != nil { - return []byte{}, false - } - return resp, true -} - -// Set saves a response to the cache as key -func (c *Cache) Set(key string, resp []byte) { - key = keyToFilename(key) - c.d.WriteStream(key, bytes.NewReader(resp), true) -} - -// Delete removes the response with key from the cache -func (c *Cache) Delete(key string) { - key = keyToFilename(key) - c.d.Erase(key) -} - -func keyToFilename(key string) string { - h := md5.New() - io.WriteString(h, key) - return hex.EncodeToString(h.Sum(nil)) -} - -// New returns a new Cache that will store files in basePath -func New(basePath string) *Cache { - return &Cache{ - d: diskv.New(diskv.Options{ - BasePath: basePath, - CacheSizeMax: 100 * 1024 * 1024, // 100MB - }), - } -} - -// NewWithDiskv returns a new Cache using the provided Diskv as underlying -// storage. -func NewWithDiskv(d *diskv.Diskv) *Cache { - return &Cache{d} -} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go deleted file mode 100644 index b41a63d..0000000 --- a/vendor/github.com/gregjones/httpcache/httpcache.go +++ /dev/null @@ -1,551 +0,0 @@ -// Package httpcache provides a http.RoundTripper implementation that works as a -// mostly RFC-compliant cache for http responses. -// -// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client -// and not for a shared proxy). -// -package httpcache - -import ( - "bufio" - "bytes" - "errors" - "io" - "io/ioutil" - "net/http" - "net/http/httputil" - "strings" - "sync" - "time" -) - -const ( - stale = iota - fresh - transparent - // XFromCache is the header added to responses that are returned from the cache - XFromCache = "X-From-Cache" -) - -// A Cache interface is used by the Transport to store and retrieve responses. -type Cache interface { - // Get returns the []byte representation of a cached response and a bool - // set to true if the value isn't empty - Get(key string) (responseBytes []byte, ok bool) - // Set stores the []byte representation of a response against a key - Set(key string, responseBytes []byte) - // Delete removes the value associated with the key - Delete(key string) -} - -// cacheKey returns the cache key for req. -func cacheKey(req *http.Request) string { - if req.Method == http.MethodGet { - return req.URL.String() - } else { - return req.Method + " " + req.URL.String() - } -} - -// CachedResponse returns the cached http.Response for req if present, and nil -// otherwise. -func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { - cachedVal, ok := c.Get(cacheKey(req)) - if !ok { - return - } - - b := bytes.NewBuffer(cachedVal) - return http.ReadResponse(bufio.NewReader(b), req) -} - -// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. -type MemoryCache struct { - mu sync.RWMutex - items map[string][]byte -} - -// Get returns the []byte representation of the response and true if present, false if not -func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { - c.mu.RLock() - resp, ok = c.items[key] - c.mu.RUnlock() - return resp, ok -} - -// Set saves response resp to the cache with key -func (c *MemoryCache) Set(key string, resp []byte) { - c.mu.Lock() - c.items[key] = resp - c.mu.Unlock() -} - -// Delete removes key from the cache -func (c *MemoryCache) Delete(key string) { - c.mu.Lock() - delete(c.items, key) - c.mu.Unlock() -} - -// NewMemoryCache returns a new Cache that will store items in an in-memory map -func NewMemoryCache() *MemoryCache { - c := &MemoryCache{items: map[string][]byte{}} - return c -} - -// Transport is an implementation of http.RoundTripper that will return values from a cache -// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) -// to repeated requests allowing servers to return 304 / Not Modified -type Transport struct { - // The RoundTripper interface actually used to make requests - // If nil, http.DefaultTransport is used - Transport http.RoundTripper - Cache Cache - // If true, responses returned from the cache will be given an extra header, X-From-Cache - MarkCachedResponses bool -} - -// NewTransport returns a new Transport with the -// provided Cache implementation and MarkCachedResponses set to true -func NewTransport(c Cache) *Transport { - return &Transport{Cache: c, MarkCachedResponses: true} -} - -// Client returns an *http.Client that caches responses. -func (t *Transport) Client() *http.Client { - return &http.Client{Transport: t} -} - -// varyMatches will return false unless all of the cached values for the headers listed in Vary -// match the new request -func varyMatches(cachedResp *http.Response, req *http.Request) bool { - for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { - header = http.CanonicalHeaderKey(header) - if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { - return false - } - } - return true -} - -// RoundTrip takes a Request and returns a Response -// -// If there is a fresh Response already in cache, then it will be returned without connecting to -// the server. -// -// If there is a stale Response, then any validators it contains will be set on the new request -// to give the server a chance to respond with NotModified. If this happens, then the cached Response -// will be returned. -func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { - cacheKey := cacheKey(req) - cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" - var cachedResp *http.Response - if cacheable { - cachedResp, err = CachedResponse(t.Cache, req) - } else { - // Need to invalidate an existing value - t.Cache.Delete(cacheKey) - } - - transport := t.Transport - if transport == nil { - transport = http.DefaultTransport - } - - if cacheable && cachedResp != nil && err == nil { - if t.MarkCachedResponses { - cachedResp.Header.Set(XFromCache, "1") - } - - if varyMatches(cachedResp, req) { - // Can only use cached value if the new request doesn't Vary significantly - freshness := getFreshness(cachedResp.Header, req.Header) - if freshness == fresh { - return cachedResp, nil - } - - if freshness == stale { - var req2 *http.Request - // Add validators if caller hasn't already done so - etag := cachedResp.Header.Get("etag") - if etag != "" && req.Header.Get("etag") == "" { - req2 = cloneRequest(req) - req2.Header.Set("if-none-match", etag) - } - lastModified := cachedResp.Header.Get("last-modified") - if lastModified != "" && req.Header.Get("last-modified") == "" { - if req2 == nil { - req2 = cloneRequest(req) - } - req2.Header.Set("if-modified-since", lastModified) - } - if req2 != nil { - req = req2 - } - } - } - - resp, err = transport.RoundTrip(req) - if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { - // Replace the 304 response with the one from cache, but update with some new headers - endToEndHeaders := getEndToEndHeaders(resp.Header) - for _, header := range endToEndHeaders { - cachedResp.Header[header] = resp.Header[header] - } - resp = cachedResp - } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && - req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { - // In case of transport failure and stale-if-error activated, returns cached content - // when available - return cachedResp, nil - } else { - if err != nil || resp.StatusCode != http.StatusOK { - t.Cache.Delete(cacheKey) - } - if err != nil { - return nil, err - } - } - } else { - reqCacheControl := parseCacheControl(req.Header) - if _, ok := reqCacheControl["only-if-cached"]; ok { - resp = newGatewayTimeoutResponse(req) - } else { - resp, err = transport.RoundTrip(req) - if err != nil { - return nil, err - } - } - } - - if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { - for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { - varyKey = http.CanonicalHeaderKey(varyKey) - fakeHeader := "X-Varied-" + varyKey - reqValue := req.Header.Get(varyKey) - if reqValue != "" { - resp.Header.Set(fakeHeader, reqValue) - } - } - switch req.Method { - case "GET": - // Delay caching until EOF is reached. - resp.Body = &cachingReadCloser{ - R: resp.Body, - OnEOF: func(r io.Reader) { - resp := *resp - resp.Body = ioutil.NopCloser(r) - respBytes, err := httputil.DumpResponse(&resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - }, - } - default: - respBytes, err := httputil.DumpResponse(resp, true) - if err == nil { - t.Cache.Set(cacheKey, respBytes) - } - } - } else { - t.Cache.Delete(cacheKey) - } - return resp, nil -} - -// ErrNoDateHeader indicates that the HTTP headers contained no Date header. -var ErrNoDateHeader = errors.New("no Date header") - -// Date parses and returns the value of the Date header. -func Date(respHeaders http.Header) (date time.Time, err error) { - dateHeader := respHeaders.Get("date") - if dateHeader == "" { - err = ErrNoDateHeader - return - } - - return time.Parse(time.RFC1123, dateHeader) -} - -type realClock struct{} - -func (c *realClock) since(d time.Time) time.Duration { - return time.Since(d) -} - -type timer interface { - since(d time.Time) time.Duration -} - -var clock timer = &realClock{} - -// getFreshness will return one of fresh/stale/transparent based on the cache-control -// values of the request and the response -// -// fresh indicates the response can be returned -// stale indicates that the response needs validating before it is returned -// transparent indicates the response should not be used to fulfil the request -// -// Because this is only a private cache, 'public' and 'private' in cache-control aren't -// signficant. Similarly, smax-age isn't used. -func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - if _, ok := reqCacheControl["no-cache"]; ok { - return transparent - } - if _, ok := respCacheControl["no-cache"]; ok { - return stale - } - if _, ok := reqCacheControl["only-if-cached"]; ok { - return fresh - } - - date, err := Date(respHeaders) - if err != nil { - return stale - } - currentAge := clock.since(date) - - var lifetime time.Duration - var zeroDuration time.Duration - - // If a response includes both an Expires header and a max-age directive, - // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. - if maxAge, ok := respCacheControl["max-age"]; ok { - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } else { - expiresHeader := respHeaders.Get("Expires") - if expiresHeader != "" { - expires, err := time.Parse(time.RFC1123, expiresHeader) - if err != nil { - lifetime = zeroDuration - } else { - lifetime = expires.Sub(date) - } - } - } - - if maxAge, ok := reqCacheControl["max-age"]; ok { - // the client is willing to accept a response whose age is no greater than the specified time in seconds - lifetime, err = time.ParseDuration(maxAge + "s") - if err != nil { - lifetime = zeroDuration - } - } - if minfresh, ok := reqCacheControl["min-fresh"]; ok { - // the client wants a response that will still be fresh for at least the specified number of seconds. - minfreshDuration, err := time.ParseDuration(minfresh + "s") - if err == nil { - currentAge = time.Duration(currentAge + minfreshDuration) - } - } - - if maxstale, ok := reqCacheControl["max-stale"]; ok { - // Indicates that the client is willing to accept a response that has exceeded its expiration time. - // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded - // its expiration time by no more than the specified number of seconds. - // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. - // - // Responses served only because of a max-stale value are supposed to have a Warning header added to them, - // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different - // return-value available here. - if maxstale == "" { - return fresh - } - maxstaleDuration, err := time.ParseDuration(maxstale + "s") - if err == nil { - currentAge = time.Duration(currentAge - maxstaleDuration) - } - } - - if lifetime > currentAge { - return fresh - } - - return stale -} - -// Returns true if either the request or the response includes the stale-if-error -// cache control extension: https://tools.ietf.org/html/rfc5861 -func canStaleOnError(respHeaders, reqHeaders http.Header) bool { - respCacheControl := parseCacheControl(respHeaders) - reqCacheControl := parseCacheControl(reqHeaders) - - var err error - lifetime := time.Duration(-1) - - if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { - if staleMaxAge != "" { - lifetime, err = time.ParseDuration(staleMaxAge + "s") - if err != nil { - return false - } - } else { - return true - } - } - - if lifetime >= 0 { - date, err := Date(respHeaders) - if err != nil { - return false - } - currentAge := clock.since(date) - if lifetime > currentAge { - return true - } - } - - return false -} - -func getEndToEndHeaders(respHeaders http.Header) []string { - // These headers are always hop-by-hop - hopByHopHeaders := map[string]struct{}{ - "Connection": {}, - "Keep-Alive": {}, - "Proxy-Authenticate": {}, - "Proxy-Authorization": {}, - "Te": {}, - "Trailers": {}, - "Transfer-Encoding": {}, - "Upgrade": {}, - } - - for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { - // any header listed in connection, if present, is also considered hop-by-hop - if strings.Trim(extra, " ") != "" { - hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} - } - } - endToEndHeaders := []string{} - for respHeader := range respHeaders { - if _, ok := hopByHopHeaders[respHeader]; !ok { - endToEndHeaders = append(endToEndHeaders, respHeader) - } - } - return endToEndHeaders -} - -func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { - if _, ok := respCacheControl["no-store"]; ok { - return false - } - if _, ok := reqCacheControl["no-store"]; ok { - return false - } - return true -} - -func newGatewayTimeoutResponse(req *http.Request) *http.Response { - var braw bytes.Buffer - braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") - resp, err := http.ReadResponse(bufio.NewReader(&braw), req) - if err != nil { - panic(err) - } - return resp -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} - -type cacheControl map[string]string - -func parseCacheControl(headers http.Header) cacheControl { - cc := cacheControl{} - ccHeader := headers.Get("Cache-Control") - for _, part := range strings.Split(ccHeader, ",") { - part = strings.Trim(part, " ") - if part == "" { - continue - } - if strings.ContainsRune(part, '=') { - keyval := strings.Split(part, "=") - cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") - } else { - cc[part] = "" - } - } - return cc -} - -// headerAllCommaSepValues returns all comma-separated values (each -// with whitespace trimmed) for header name in headers. According to -// Section 4.2 of the HTTP/1.1 spec -// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), -// values from multiple occurrences of a header should be concatenated, if -// the header's value is a comma-separated list. -func headerAllCommaSepValues(headers http.Header, name string) []string { - var vals []string - for _, val := range headers[http.CanonicalHeaderKey(name)] { - fields := strings.Split(val, ",") - for i, f := range fields { - fields[i] = strings.TrimSpace(f) - } - vals = append(vals, fields...) - } - return vals -} - -// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF -// handler with a full copy of the content read from R when EOF is -// reached. -type cachingReadCloser struct { - // Underlying ReadCloser. - R io.ReadCloser - // OnEOF is called with a copy of the content of R when EOF is reached. - OnEOF func(io.Reader) - - buf bytes.Buffer // buf stores a copy of the content of R. -} - -// Read reads the next len(p) bytes from R or until R is drained. The -// return value n is the number of bytes read. If R has no data to -// return, err is io.EOF and OnEOF is called with a full copy of what -// has been read so far. -func (r *cachingReadCloser) Read(p []byte) (n int, err error) { - n, err = r.R.Read(p) - r.buf.Write(p[:n]) - if err == io.EOF { - r.OnEOF(bytes.NewReader(r.buf.Bytes())) - } - return n, err -} - -func (r *cachingReadCloser) Close() error { - return r.R.Close() -} - -// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation -func NewMemoryCacheTransport() *Transport { - c := NewMemoryCache() - t := NewTransport(c) - return t -} diff --git a/vendor/github.com/peterbourgon/diskv/LICENSE b/vendor/github.com/peterbourgon/diskv/LICENSE deleted file mode 100644 index 41ce7f1..0000000 --- a/vendor/github.com/peterbourgon/diskv/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2011-2012 Peter Bourgon - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/github.com/peterbourgon/diskv/README.md b/vendor/github.com/peterbourgon/diskv/README.md deleted file mode 100644 index 3474739..0000000 --- a/vendor/github.com/peterbourgon/diskv/README.md +++ /dev/null @@ -1,141 +0,0 @@ -# What is diskv? - -Diskv (disk-vee) is a simple, persistent key-value store written in the Go -language. It starts with an incredibly simple API for storing arbitrary data on -a filesystem by key, and builds several layers of performance-enhancing -abstraction on top. The end result is a conceptually simple, but highly -performant, disk-backed storage system. - -[![Build Status][1]][2] - -[1]: https://drone.io/github.com/peterbourgon/diskv/status.png -[2]: https://drone.io/github.com/peterbourgon/diskv/latest - - -# Installing - -Install [Go 1][3], either [from source][4] or [with a prepackaged binary][5]. -Then, - -```bash -$ go get github.com/peterbourgon/diskv -``` - -[3]: http://golang.org -[4]: http://golang.org/doc/install/source -[5]: http://golang.org/doc/install - - -# Usage - -```go -package main - -import ( - "fmt" - "github.com/peterbourgon/diskv" -) - -func main() { - // Simplest transform function: put all the data files into the base dir. - flatTransform := func(s string) []string { return []string{} } - - // Initialize a new diskv store, rooted at "my-data-dir", with a 1MB cache. - d := diskv.New(diskv.Options{ - BasePath: "my-data-dir", - Transform: flatTransform, - CacheSizeMax: 1024 * 1024, - }) - - // Write three bytes to the key "alpha". - key := "alpha" - d.Write(key, []byte{'1', '2', '3'}) - - // Read the value back out of the store. - value, _ := d.Read(key) - fmt.Printf("%v\n", value) - - // Erase the key+value from the store (and the disk). - d.Erase(key) -} -``` - -More complex examples can be found in the "examples" subdirectory. - - -# Theory - -## Basic idea - -At its core, diskv is a map of a key (`string`) to arbitrary data (`[]byte`). -The data is written to a single file on disk, with the same name as the key. -The key determines where that file will be stored, via a user-provided -`TransformFunc`, which takes a key and returns a slice (`[]string`) -corresponding to a path list where the key file will be stored. The simplest -TransformFunc, - -```go -func SimpleTransform (key string) []string { - return []string{} -} -``` - -will place all keys in the same, base directory. The design is inspired by -[Redis diskstore][6]; a TransformFunc which emulates the default diskstore -behavior is available in the content-addressable-storage example. - -[6]: http://groups.google.com/group/redis-db/browse_thread/thread/d444bc786689bde9?pli=1 - -**Note** that your TransformFunc should ensure that one valid key doesn't -transform to a subset of another valid key. That is, it shouldn't be possible -to construct valid keys that resolve to directory names. As a concrete example, -if your TransformFunc splits on every 3 characters, then - -```go -d.Write("abcabc", val) // OK: written to /abc/abc/abcabc -d.Write("abc", val) // Error: attempted write to /abc/abc, but it's a directory -``` - -This will be addressed in an upcoming version of diskv. - -Probably the most important design principle behind diskv is that your data is -always flatly available on the disk. diskv will never do anything that would -prevent you from accessing, copying, backing up, or otherwise interacting with -your data via common UNIX commandline tools. - -## Adding a cache - -An in-memory caching layer is provided by combining the BasicStore -functionality with a simple map structure, and keeping it up-to-date as -appropriate. Since the map structure in Go is not threadsafe, it's combined -with a RWMutex to provide safe concurrent access. - -## Adding order - -diskv is a key-value store and therefore inherently unordered. An ordering -system can be injected into the store by passing something which satisfies the -diskv.Index interface. (A default implementation, using Google's -[btree][7] package, is provided.) Basically, diskv keeps an ordered (by a -user-provided Less function) index of the keys, which can be queried. - -[7]: https://github.com/google/btree - -## Adding compression - -Something which implements the diskv.Compression interface may be passed -during store creation, so that all Writes and Reads are filtered through -a compression/decompression pipeline. Several default implementations, -using stdlib compression algorithms, are provided. Note that data is cached -compressed; the cost of decompression is borne with each Read. - -## Streaming - -diskv also now provides ReadStream and WriteStream methods, to allow very large -data to be handled efficiently. - - -# Future plans - - * Needs plenty of robust testing: huge datasets, etc... - * More thorough benchmarking - * Your suggestions for use-cases I haven't thought of diff --git a/vendor/github.com/peterbourgon/diskv/compression.go b/vendor/github.com/peterbourgon/diskv/compression.go deleted file mode 100644 index 5192b02..0000000 --- a/vendor/github.com/peterbourgon/diskv/compression.go +++ /dev/null @@ -1,64 +0,0 @@ -package diskv - -import ( - "compress/flate" - "compress/gzip" - "compress/zlib" - "io" -) - -// Compression is an interface that Diskv uses to implement compression of -// data. Writer takes a destination io.Writer and returns a WriteCloser that -// compresses all data written through it. Reader takes a source io.Reader and -// returns a ReadCloser that decompresses all data read through it. You may -// define these methods on your own type, or use one of the NewCompression -// helpers. -type Compression interface { - Writer(dst io.Writer) (io.WriteCloser, error) - Reader(src io.Reader) (io.ReadCloser, error) -} - -// NewGzipCompression returns a Gzip-based Compression. -func NewGzipCompression() Compression { - return NewGzipCompressionLevel(flate.DefaultCompression) -} - -// NewGzipCompressionLevel returns a Gzip-based Compression with the given level. -func NewGzipCompressionLevel(level int) Compression { - return &genericCompression{ - wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) }, - rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) }, - } -} - -// NewZlibCompression returns a Zlib-based Compression. -func NewZlibCompression() Compression { - return NewZlibCompressionLevel(flate.DefaultCompression) -} - -// NewZlibCompressionLevel returns a Zlib-based Compression with the given level. -func NewZlibCompressionLevel(level int) Compression { - return NewZlibCompressionLevelDict(level, nil) -} - -// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given -// level, based on the given dictionary. -func NewZlibCompressionLevelDict(level int, dict []byte) Compression { - return &genericCompression{ - func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) }, - func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) }, - } -} - -type genericCompression struct { - wf func(w io.Writer) (io.WriteCloser, error) - rf func(r io.Reader) (io.ReadCloser, error) -} - -func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) { - return g.wf(dst) -} - -func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) { - return g.rf(src) -} diff --git a/vendor/github.com/peterbourgon/diskv/diskv.go b/vendor/github.com/peterbourgon/diskv/diskv.go deleted file mode 100644 index 524dc0a..0000000 --- a/vendor/github.com/peterbourgon/diskv/diskv.go +++ /dev/null @@ -1,624 +0,0 @@ -// Diskv (disk-vee) is a simple, persistent, key-value store. -// It stores all data flatly on the filesystem. - -package diskv - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "sync" - "syscall" -) - -const ( - defaultBasePath = "diskv" - defaultFilePerm os.FileMode = 0666 - defaultPathPerm os.FileMode = 0777 -) - -var ( - defaultTransform = func(s string) []string { return []string{} } - errCanceled = errors.New("canceled") - errEmptyKey = errors.New("empty key") - errBadKey = errors.New("bad key") - errImportDirectory = errors.New("can't import a directory") -) - -// TransformFunction transforms a key into a slice of strings, with each -// element in the slice representing a directory in the file path where the -// key's entry will eventually be stored. -// -// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"], -// the final location of the data file will be /ab/cde/f/abcdef -type TransformFunction func(s string) []string - -// Options define a set of properties that dictate Diskv behavior. -// All values are optional. -type Options struct { - BasePath string - Transform TransformFunction - CacheSizeMax uint64 // bytes - PathPerm os.FileMode - FilePerm os.FileMode - // If TempDir is set, it will enable filesystem atomic writes by - // writing temporary files to that location before being moved - // to BasePath. - // Note that TempDir MUST be on the same device/partition as - // BasePath. - TempDir string - - Index Index - IndexLess LessFunction - - Compression Compression -} - -// Diskv implements the Diskv interface. You shouldn't construct Diskv -// structures directly; instead, use the New constructor. -type Diskv struct { - Options - mu sync.RWMutex - cache map[string][]byte - cacheSize uint64 -} - -// New returns an initialized Diskv structure, ready to use. -// If the path identified by baseDir already contains data, -// it will be accessible, but not yet cached. -func New(o Options) *Diskv { - if o.BasePath == "" { - o.BasePath = defaultBasePath - } - if o.Transform == nil { - o.Transform = defaultTransform - } - if o.PathPerm == 0 { - o.PathPerm = defaultPathPerm - } - if o.FilePerm == 0 { - o.FilePerm = defaultFilePerm - } - - d := &Diskv{ - Options: o, - cache: map[string][]byte{}, - cacheSize: 0, - } - - if d.Index != nil && d.IndexLess != nil { - d.Index.Initialize(d.IndexLess, d.Keys(nil)) - } - - return d -} - -// Write synchronously writes the key-value pair to disk, making it immediately -// available for reads. Write relies on the filesystem to perform an eventual -// sync to physical media. If you need stronger guarantees, see WriteStream. -func (d *Diskv) Write(key string, val []byte) error { - return d.WriteStream(key, bytes.NewBuffer(val), false) -} - -// WriteStream writes the data represented by the io.Reader to the disk, under -// the provided key. If sync is true, WriteStream performs an explicit sync on -// the file as soon as it's written. -// -// bytes.Buffer provides io.Reader semantics for basic data types. -func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error { - if len(key) <= 0 { - return errEmptyKey - } - - d.mu.Lock() - defer d.mu.Unlock() - - return d.writeStreamWithLock(key, r, sync) -} - -// createKeyFileWithLock either creates the key file directly, or -// creates a temporary file in TempDir if it is set. -func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) { - if d.TempDir != "" { - if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil { - return nil, fmt.Errorf("temp mkdir: %s", err) - } - f, err := ioutil.TempFile(d.TempDir, "") - if err != nil { - return nil, fmt.Errorf("temp file: %s", err) - } - - if err := f.Chmod(d.FilePerm); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return nil, fmt.Errorf("chmod: %s", err) - } - return f, nil - } - - mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists - f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm) - if err != nil { - return nil, fmt.Errorf("open file: %s", err) - } - return f, nil -} - -// writeStream does no input validation checking. -func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error { - if err := d.ensurePathWithLock(key); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - f, err := d.createKeyFileWithLock(key) - if err != nil { - return fmt.Errorf("create key file: %s", err) - } - - wc := io.WriteCloser(&nopWriteCloser{f}) - if d.Compression != nil { - wc, err = d.Compression.Writer(f) - if err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression writer: %s", err) - } - } - - if _, err := io.Copy(wc, r); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("i/o copy: %s", err) - } - - if err := wc.Close(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("compression close: %s", err) - } - - if sync { - if err := f.Sync(); err != nil { - f.Close() // error deliberately ignored - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("file sync: %s", err) - } - } - - if err := f.Close(); err != nil { - return fmt.Errorf("file close: %s", err) - } - - if f.Name() != d.completeFilename(key) { - if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil { - os.Remove(f.Name()) // error deliberately ignored - return fmt.Errorf("rename: %s", err) - } - } - - if d.Index != nil { - d.Index.Insert(key) - } - - d.bustCacheWithLock(key) // cache only on read - - return nil -} - -// Import imports the source file into diskv under the destination key. If the -// destination key already exists, it's overwritten. If move is true, the -// source file is removed after a successful import. -func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) { - if dstKey == "" { - return errEmptyKey - } - - if fi, err := os.Stat(srcFilename); err != nil { - return err - } else if fi.IsDir() { - return errImportDirectory - } - - d.mu.Lock() - defer d.mu.Unlock() - - if err := d.ensurePathWithLock(dstKey); err != nil { - return fmt.Errorf("ensure path: %s", err) - } - - if move { - if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil { - d.bustCacheWithLock(dstKey) - return nil - } else if err != syscall.EXDEV { - // If it failed due to being on a different device, fall back to copying - return err - } - } - - f, err := os.Open(srcFilename) - if err != nil { - return err - } - defer f.Close() - err = d.writeStreamWithLock(dstKey, f, false) - if err == nil && move { - err = os.Remove(srcFilename) - } - return err -} - -// Read reads the key and returns the value. -// If the key is available in the cache, Read won't touch the disk. -// If the key is not in the cache, Read will have the side-effect of -// lazily caching the value. -func (d *Diskv) Read(key string) ([]byte, error) { - rc, err := d.ReadStream(key, false) - if err != nil { - return []byte{}, err - } - defer rc.Close() - return ioutil.ReadAll(rc) -} - -// ReadStream reads the key and returns the value (data) as an io.ReadCloser. -// If the value is cached from a previous read, and direct is false, -// ReadStream will use the cached value. Otherwise, it will return a handle to -// the file on disk, and cache the data on read. -// -// If direct is true, ReadStream will lazily delete any cached value for the -// key, and return a direct handle to the file on disk. -// -// If compression is enabled, ReadStream taps into the io.Reader stream prior -// to decompression, and caches the compressed data. -func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) { - d.mu.RLock() - defer d.mu.RUnlock() - - if val, ok := d.cache[key]; ok { - if !direct { - buf := bytes.NewBuffer(val) - if d.Compression != nil { - return d.Compression.Reader(buf) - } - return ioutil.NopCloser(buf), nil - } - - go func() { - d.mu.Lock() - defer d.mu.Unlock() - d.uncacheWithLock(key, uint64(len(val))) - }() - } - - return d.readWithRLock(key) -} - -// read ignores the cache, and returns an io.ReadCloser representing the -// decompressed data for the given key, streamed from the disk. Clients should -// acquire a read lock on the Diskv and check the cache themselves before -// calling read. -func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) { - filename := d.completeFilename(key) - - fi, err := os.Stat(filename) - if err != nil { - return nil, err - } - if fi.IsDir() { - return nil, os.ErrNotExist - } - - f, err := os.Open(filename) - if err != nil { - return nil, err - } - - var r io.Reader - if d.CacheSizeMax > 0 { - r = newSiphon(f, d, key) - } else { - r = &closingReader{f} - } - - var rc = io.ReadCloser(ioutil.NopCloser(r)) - if d.Compression != nil { - rc, err = d.Compression.Reader(r) - if err != nil { - return nil, err - } - } - - return rc, nil -} - -// closingReader provides a Reader that automatically closes the -// embedded ReadCloser when it reaches EOF -type closingReader struct { - rc io.ReadCloser -} - -func (cr closingReader) Read(p []byte) (int, error) { - n, err := cr.rc.Read(p) - if err == io.EOF { - if closeErr := cr.rc.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - } - return n, err -} - -// siphon is like a TeeReader: it copies all data read through it to an -// internal buffer, and moves that buffer to the cache at EOF. -type siphon struct { - f *os.File - d *Diskv - key string - buf *bytes.Buffer -} - -// newSiphon constructs a siphoning reader that represents the passed file. -// When a successful series of reads ends in an EOF, the siphon will write -// the buffered data to Diskv's cache under the given key. -func newSiphon(f *os.File, d *Diskv, key string) io.Reader { - return &siphon{ - f: f, - d: d, - key: key, - buf: &bytes.Buffer{}, - } -} - -// Read implements the io.Reader interface for siphon. -func (s *siphon) Read(p []byte) (int, error) { - n, err := s.f.Read(p) - - if err == nil { - return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed - } - - if err == io.EOF { - s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail - if closeErr := s.f.Close(); closeErr != nil { - return n, closeErr // close must succeed for Read to succeed - } - return n, err - } - - return n, err -} - -// Erase synchronously erases the given key from the disk and the cache. -func (d *Diskv) Erase(key string) error { - d.mu.Lock() - defer d.mu.Unlock() - - d.bustCacheWithLock(key) - - // erase from index - if d.Index != nil { - d.Index.Delete(key) - } - - // erase from disk - filename := d.completeFilename(key) - if s, err := os.Stat(filename); err == nil { - if s.IsDir() { - return errBadKey - } - if err = os.Remove(filename); err != nil { - return err - } - } else { - // Return err as-is so caller can do os.IsNotExist(err). - return err - } - - // clean up and return - d.pruneDirsWithLock(key) - return nil -} - -// EraseAll will delete all of the data from the store, both in the cache and on -// the disk. Note that EraseAll doesn't distinguish diskv-related data from non- -// diskv-related data. Care should be taken to always specify a diskv base -// directory that is exclusively for diskv data. -func (d *Diskv) EraseAll() error { - d.mu.Lock() - defer d.mu.Unlock() - d.cache = make(map[string][]byte) - d.cacheSize = 0 - if d.TempDir != "" { - os.RemoveAll(d.TempDir) // errors ignored - } - return os.RemoveAll(d.BasePath) -} - -// Has returns true if the given key exists. -func (d *Diskv) Has(key string) bool { - d.mu.Lock() - defer d.mu.Unlock() - - if _, ok := d.cache[key]; ok { - return true - } - - filename := d.completeFilename(key) - s, err := os.Stat(filename) - if err != nil { - return false - } - if s.IsDir() { - return false - } - - return true -} - -// Keys returns a channel that will yield every key accessible by the store, -// in undefined order. If a cancel channel is provided, closing it will -// terminate and close the keys channel. -func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string { - return d.KeysPrefix("", cancel) -} - -// KeysPrefix returns a channel that will yield every key accessible by the -// store with the given prefix, in undefined order. If a cancel channel is -// provided, closing it will terminate and close the keys channel. If the -// provided prefix is the empty string, all keys will be yielded. -func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string { - var prepath string - if prefix == "" { - prepath = d.BasePath - } else { - prepath = d.pathFor(prefix) - } - c := make(chan string) - go func() { - filepath.Walk(prepath, walker(c, prefix, cancel)) - close(c) - }() - return c -} - -// walker returns a function which satisfies the filepath.WalkFunc interface. -// It sends every non-directory file entry down the channel c. -func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc { - return func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) { - return nil // "pass" - } - - select { - case c <- info.Name(): - case <-cancel: - return errCanceled - } - - return nil - } -} - -// pathFor returns the absolute path for location on the filesystem where the -// data for the given key will be stored. -func (d *Diskv) pathFor(key string) string { - return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...)) -} - -// ensurePathWithLock is a helper function that generates all necessary -// directories on the filesystem for the given key. -func (d *Diskv) ensurePathWithLock(key string) error { - return os.MkdirAll(d.pathFor(key), d.PathPerm) -} - -// completeFilename returns the absolute path to the file for the given key. -func (d *Diskv) completeFilename(key string) string { - return filepath.Join(d.pathFor(key), key) -} - -// cacheWithLock attempts to cache the given key-value pair in the store's -// cache. It can fail if the value is larger than the cache's maximum size. -func (d *Diskv) cacheWithLock(key string, val []byte) error { - valueSize := uint64(len(val)) - if err := d.ensureCacheSpaceWithLock(valueSize); err != nil { - return fmt.Errorf("%s; not caching", err) - } - - // be very strict about memory guarantees - if (d.cacheSize + valueSize) > d.CacheSizeMax { - panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax)) - } - - d.cache[key] = val - d.cacheSize += valueSize - return nil -} - -// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock. -func (d *Diskv) cacheWithoutLock(key string, val []byte) error { - d.mu.Lock() - defer d.mu.Unlock() - return d.cacheWithLock(key, val) -} - -func (d *Diskv) bustCacheWithLock(key string) { - if val, ok := d.cache[key]; ok { - d.uncacheWithLock(key, uint64(len(val))) - } -} - -func (d *Diskv) uncacheWithLock(key string, sz uint64) { - d.cacheSize -= sz - delete(d.cache, key) -} - -// pruneDirsWithLock deletes empty directories in the path walk leading to the -// key k. Typically this function is called after an Erase is made. -func (d *Diskv) pruneDirsWithLock(key string) error { - pathlist := d.Transform(key) - for i := range pathlist { - dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...)) - - // thanks to Steven Blenkinsop for this snippet - switch fi, err := os.Stat(dir); true { - case err != nil: - return err - case !fi.IsDir(): - panic(fmt.Sprintf("corrupt dirstate at %s", dir)) - } - - nlinks, err := filepath.Glob(filepath.Join(dir, "*")) - if err != nil { - return err - } else if len(nlinks) > 0 { - return nil // has subdirs -- do not prune - } - if err = os.Remove(dir); err != nil { - return err - } - } - - return nil -} - -// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order -// until the cache has at least valueSize bytes available. -func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error { - if valueSize > d.CacheSizeMax { - return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax) - } - - safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax } - - for key, val := range d.cache { - if safe() { - break - } - - d.uncacheWithLock(key, uint64(len(val))) - } - - if !safe() { - panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax)) - } - - return nil -} - -// nopWriteCloser wraps an io.Writer and provides a no-op Close method to -// satisfy the io.WriteCloser interface. -type nopWriteCloser struct { - io.Writer -} - -func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) } -func (wc *nopWriteCloser) Close() error { return nil } diff --git a/vendor/github.com/peterbourgon/diskv/index.go b/vendor/github.com/peterbourgon/diskv/index.go deleted file mode 100644 index 96fee51..0000000 --- a/vendor/github.com/peterbourgon/diskv/index.go +++ /dev/null @@ -1,115 +0,0 @@ -package diskv - -import ( - "sync" - - "github.com/google/btree" -) - -// Index is a generic interface for things that can -// provide an ordered list of keys. -type Index interface { - Initialize(less LessFunction, keys <-chan string) - Insert(key string) - Delete(key string) - Keys(from string, n int) []string -} - -// LessFunction is used to initialize an Index of keys in a specific order. -type LessFunction func(string, string) bool - -// btreeString is a custom data type that satisfies the BTree Less interface, -// making the strings it wraps sortable by the BTree package. -type btreeString struct { - s string - l LessFunction -} - -// Less satisfies the BTree.Less interface using the btreeString's LessFunction. -func (s btreeString) Less(i btree.Item) bool { - return s.l(s.s, i.(btreeString).s) -} - -// BTreeIndex is an implementation of the Index interface using google/btree. -type BTreeIndex struct { - sync.RWMutex - LessFunction - *btree.BTree -} - -// Initialize populates the BTree tree with data from the keys channel, -// according to the passed less function. It's destructive to the BTreeIndex. -func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) { - i.Lock() - defer i.Unlock() - i.LessFunction = less - i.BTree = rebuild(less, keys) -} - -// Insert inserts the given key (only) into the BTree tree. -func (i *BTreeIndex) Insert(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction}) -} - -// Delete removes the given key (only) from the BTree tree. -func (i *BTreeIndex) Delete(key string) { - i.Lock() - defer i.Unlock() - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - i.BTree.Delete(btreeString{s: key, l: i.LessFunction}) -} - -// Keys yields a maximum of n keys in order. If the passed 'from' key is empty, -// Keys will return the first n keys. If the passed 'from' key is non-empty, the -// first key in the returned slice will be the key that immediately follows the -// passed key, in key order. -func (i *BTreeIndex) Keys(from string, n int) []string { - i.RLock() - defer i.RUnlock() - - if i.BTree == nil || i.LessFunction == nil { - panic("uninitialized index") - } - - if i.BTree.Len() <= 0 { - return []string{} - } - - btreeFrom := btreeString{s: from, l: i.LessFunction} - skipFirst := true - if len(from) <= 0 || !i.BTree.Has(btreeFrom) { - // no such key, so fabricate an always-smallest item - btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }} - skipFirst = false - } - - keys := []string{} - iterator := func(i btree.Item) bool { - keys = append(keys, i.(btreeString).s) - return len(keys) < n - } - i.BTree.AscendGreaterOrEqual(btreeFrom, iterator) - - if skipFirst && len(keys) > 0 { - keys = keys[1:] - } - - return keys -} - -// rebuildIndex does the work of regenerating the index -// with the given keys. -func rebuild(less LessFunction, keys <-chan string) *btree.BTree { - tree := btree.New(2) - for key := range keys { - tree.ReplaceOrInsert(btreeString{s: key, l: less}) - } - return tree -} diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go deleted file mode 100644 index 5f9498e..0000000 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ /dev/null @@ -1,287 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/any.proto - -package anypb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -type Any struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` - // Must be a valid serialized protocol buffer of the above specified type. - Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` -} - -func (x *Any) Reset() { - *x = Any{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_any_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Any) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Any) ProtoMessage() {} - -func (x *Any) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_any_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Any.ProtoReflect.Descriptor instead. -func (*Any) Descriptor() ([]byte, []int) { - return file_google_protobuf_any_proto_rawDescGZIP(), []int{0} -} - -func (x *Any) GetTypeUrl() string { - if x != nil { - return x.TypeUrl - } - return "" -} - -func (x *Any) GetValue() []byte { - if x != nil { - return x.Value - } - return nil -} - -var File_google_protobuf_any_proto protoreflect.FileDescriptor - -var file_google_protobuf_any_proto_rawDesc = []byte{ - 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, - 0x41, 0x6e, 0x79, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x75, 0x72, 0x6c, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x79, 0x70, 0x65, 0x55, 0x72, 0x6c, 0x12, 0x14, - 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x42, 0x6f, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x08, 0x41, 0x6e, 0x79, - 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x25, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0xa2, 0x02, - 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, - 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc -) - -func file_google_protobuf_any_proto_rawDescGZIP() []byte { - file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) - }) - return file_google_protobuf_any_proto_rawDescData -} - -var file_google_protobuf_any_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_any_proto_goTypes = []interface{}{ - (*Any)(nil), // 0: google.protobuf.Any -} -var file_google_protobuf_any_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_any_proto_init() } -func file_google_protobuf_any_proto_init() { - if File_google_protobuf_any_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_any_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Any); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_any_proto_goTypes, - DependencyIndexes: file_google_protobuf_any_proto_depIdxs, - MessageInfos: file_google_protobuf_any_proto_msgTypes, - }.Build() - File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil - file_google_protobuf_any_proto_goTypes = nil - file_google_protobuf_any_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go b/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go deleted file mode 100644 index 3997c60..0000000 --- a/vendor/google.golang.org/protobuf/types/known/durationpb/duration.pb.go +++ /dev/null @@ -1,249 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/duration.proto - -package durationpb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (duration.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -type Duration struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (x *Duration) Reset() { - *x = Duration{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Duration) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Duration) ProtoMessage() {} - -func (x *Duration) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_duration_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Duration.ProtoReflect.Descriptor instead. -func (*Duration) Descriptor() ([]byte, []int) { - return file_google_protobuf_duration_proto_rawDescGZIP(), []int{0} -} - -func (x *Duration) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *Duration) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -var File_google_protobuf_duration_proto protoreflect.FileDescriptor - -var file_google_protobuf_duration_proto_rawDesc = []byte{ - 0x0a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x22, 0x3a, 0x0a, 0x08, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x07, - 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, 0x7c, 0x0a, - 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0d, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, - 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_duration_proto_rawDescOnce sync.Once - file_google_protobuf_duration_proto_rawDescData = file_google_protobuf_duration_proto_rawDesc -) - -func file_google_protobuf_duration_proto_rawDescGZIP() []byte { - file_google_protobuf_duration_proto_rawDescOnce.Do(func() { - file_google_protobuf_duration_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_duration_proto_rawDescData) - }) - return file_google_protobuf_duration_proto_rawDescData -} - -var file_google_protobuf_duration_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_duration_proto_goTypes = []interface{}{ - (*Duration)(nil), // 0: google.protobuf.Duration -} -var file_google_protobuf_duration_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_duration_proto_init() } -func file_google_protobuf_duration_proto_init() { - if File_google_protobuf_duration_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_duration_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Duration); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_duration_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_duration_proto_goTypes, - DependencyIndexes: file_google_protobuf_duration_proto_depIdxs, - MessageInfos: file_google_protobuf_duration_proto_msgTypes, - }.Build() - File_google_protobuf_duration_proto = out.File - file_google_protobuf_duration_proto_rawDesc = nil - file_google_protobuf_duration_proto_goTypes = nil - file_google_protobuf_duration_proto_depIdxs = nil -} diff --git a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go b/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go deleted file mode 100644 index 6fe6d42..0000000 --- a/vendor/google.golang.org/protobuf/types/known/timestamppb/timestamp.pb.go +++ /dev/null @@ -1,271 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Code generated by protoc-gen-go. DO NOT EDIT. -// source: google/protobuf/timestamp.proto - -package timestamppb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard -// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using -// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with -// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use -// the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -type Timestamp struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` -} - -func (x *Timestamp) Reset() { - *x = Timestamp{} - if protoimpl.UnsafeEnabled { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Timestamp) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Timestamp) ProtoMessage() {} - -func (x *Timestamp) ProtoReflect() protoreflect.Message { - mi := &file_google_protobuf_timestamp_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Timestamp.ProtoReflect.Descriptor instead. -func (*Timestamp) Descriptor() ([]byte, []int) { - return file_google_protobuf_timestamp_proto_rawDescGZIP(), []int{0} -} - -func (x *Timestamp) GetSeconds() int64 { - if x != nil { - return x.Seconds - } - return 0 -} - -func (x *Timestamp) GetNanos() int32 { - if x != nil { - return x.Nanos - } - return 0 -} - -var File_google_protobuf_timestamp_proto protoreflect.FileDescriptor - -var file_google_protobuf_timestamp_proto_rawDesc = []byte{ - 0x0a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x22, 0x3b, 0x0a, 0x09, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, - 0x18, 0x0a, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, - 0x52, 0x07, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x14, 0x0a, 0x05, 0x6e, 0x61, 0x6e, - 0x6f, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x6e, 0x61, 0x6e, 0x6f, 0x73, 0x42, - 0x7e, 0x0a, 0x13, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x42, 0x0e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, - 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, - 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} - -var ( - file_google_protobuf_timestamp_proto_rawDescOnce sync.Once - file_google_protobuf_timestamp_proto_rawDescData = file_google_protobuf_timestamp_proto_rawDesc -) - -func file_google_protobuf_timestamp_proto_rawDescGZIP() []byte { - file_google_protobuf_timestamp_proto_rawDescOnce.Do(func() { - file_google_protobuf_timestamp_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_timestamp_proto_rawDescData) - }) - return file_google_protobuf_timestamp_proto_rawDescData -} - -var file_google_protobuf_timestamp_proto_msgTypes = make([]protoimpl.MessageInfo, 1) -var file_google_protobuf_timestamp_proto_goTypes = []interface{}{ - (*Timestamp)(nil), // 0: google.protobuf.Timestamp -} -var file_google_protobuf_timestamp_proto_depIdxs = []int32{ - 0, // [0:0] is the sub-list for method output_type - 0, // [0:0] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_google_protobuf_timestamp_proto_init() } -func file_google_protobuf_timestamp_proto_init() { - if File_google_protobuf_timestamp_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_google_protobuf_timestamp_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*Timestamp); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_timestamp_proto_rawDesc, - NumEnums: 0, - NumMessages: 1, - NumExtensions: 0, - NumServices: 0, - }, - GoTypes: file_google_protobuf_timestamp_proto_goTypes, - DependencyIndexes: file_google_protobuf_timestamp_proto_depIdxs, - MessageInfos: file_google_protobuf_timestamp_proto_msgTypes, - }.Build() - File_google_protobuf_timestamp_proto = out.File - file_google_protobuf_timestamp_proto_rawDesc = nil - file_google_protobuf_timestamp_proto_goTypes = nil - file_google_protobuf_timestamp_proto_depIdxs = nil -} diff --git a/vendor/gopkg.in/yaml.v3/.travis.yml b/vendor/gopkg.in/yaml.v3/.travis.yml deleted file mode 100644 index a130fe8..0000000 --- a/vendor/gopkg.in/yaml.v3/.travis.yml +++ /dev/null @@ -1,17 +0,0 @@ -language: go - -go: - - "1.4.x" - - "1.5.x" - - "1.6.x" - - "1.7.x" - - "1.8.x" - - "1.9.x" - - "1.10.x" - - "1.11.x" - - "1.12.x" - - "1.13.x" - - "1.14.x" - - "tip" - -go_import_path: gopkg.in/yaml.v3 diff --git a/vendor/gopkg.in/yaml.v3/LICENSE b/vendor/gopkg.in/yaml.v3/LICENSE deleted file mode 100644 index 2683e4b..0000000 --- a/vendor/gopkg.in/yaml.v3/LICENSE +++ /dev/null @@ -1,50 +0,0 @@ - -This project is covered by two different licenses: MIT and Apache. - -#### MIT License #### - -The following files were ported to Go from C files of libyaml, and thus -are still covered by their original MIT license, with the additional -copyright staring in 2011 when the project was ported over: - - apic.go emitterc.go parserc.go readerc.go scannerc.go - writerc.go yamlh.go yamlprivateh.go - -Copyright (c) 2006-2010 Kirill Simonov -Copyright (c) 2006-2011 Kirill Simonov - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -### Apache License ### - -All the remaining project files are covered by the Apache license: - -Copyright (c) 2011-2019 Canonical Ltd - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/NOTICE b/vendor/gopkg.in/yaml.v3/NOTICE deleted file mode 100644 index 866d74a..0000000 --- a/vendor/gopkg.in/yaml.v3/NOTICE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright 2011-2016 Canonical Ltd. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. diff --git a/vendor/gopkg.in/yaml.v3/README.md b/vendor/gopkg.in/yaml.v3/README.md deleted file mode 100644 index 08eb1ba..0000000 --- a/vendor/gopkg.in/yaml.v3/README.md +++ /dev/null @@ -1,150 +0,0 @@ -# YAML support for the Go language - -Introduction ------------- - -The yaml package enables Go programs to comfortably encode and decode YAML -values. It was developed within [Canonical](https://www.canonical.com) as -part of the [juju](https://juju.ubuntu.com) project, and is based on a -pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) -C library to parse and generate YAML data quickly and reliably. - -Compatibility -------------- - -The yaml package supports most of YAML 1.2, but preserves some behavior -from 1.1 for backwards compatibility. - -Specifically, as of v3 of the yaml package: - - - YAML 1.1 bools (_yes/no, on/off_) are supported as long as they are being - decoded into a typed bool value. Otherwise they behave as a string. Booleans - in YAML 1.2 are _true/false_ only. - - Octals encode and decode as _0777_ per YAML 1.1, rather than _0o777_ - as specified in YAML 1.2, because most parsers still use the old format. - Octals in the _0o777_ format are supported though, so new files work. - - Does not support base-60 floats. These are gone from YAML 1.2, and were - actually never supported by this package as it's clearly a poor choice. - -and offers backwards -compatibility with YAML 1.1 in some cases. -1.2, including support for -anchors, tags, map merging, etc. Multi-document unmarshalling is not yet -implemented, and base-60 floats from YAML 1.1 are purposefully not -supported since they're a poor design and are gone in YAML 1.2. - -Installation and usage ----------------------- - -The import path for the package is *gopkg.in/yaml.v3*. - -To install it, run: - - go get gopkg.in/yaml.v3 - -API documentation ------------------ - -If opened in a browser, the import path itself leads to the API documentation: - - - [https://gopkg.in/yaml.v3](https://gopkg.in/yaml.v3) - -API stability -------------- - -The package API for yaml v3 will remain stable as described in [gopkg.in](https://gopkg.in). - - -License -------- - -The yaml package is licensed under the MIT and Apache License 2.0 licenses. -Please see the LICENSE file for details. - - -Example -------- - -```Go -package main - -import ( - "fmt" - "log" - - "gopkg.in/yaml.v3" -) - -var data = ` -a: Easy! -b: - c: 2 - d: [3, 4] -` - -// Note: struct fields must be public in order for unmarshal to -// correctly populate the data. -type T struct { - A string - B struct { - RenamedC int `yaml:"c"` - D []int `yaml:",flow"` - } -} - -func main() { - t := T{} - - err := yaml.Unmarshal([]byte(data), &t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t:\n%v\n\n", t) - - d, err := yaml.Marshal(&t) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- t dump:\n%s\n\n", string(d)) - - m := make(map[interface{}]interface{}) - - err = yaml.Unmarshal([]byte(data), &m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m:\n%v\n\n", m) - - d, err = yaml.Marshal(&m) - if err != nil { - log.Fatalf("error: %v", err) - } - fmt.Printf("--- m dump:\n%s\n\n", string(d)) -} -``` - -This example will generate the following output: - -``` ---- t: -{Easy! {2 [3 4]}} - ---- t dump: -a: Easy! -b: - c: 2 - d: [3, 4] - - ---- m: -map[a:Easy! b:map[c:2 d:[3 4]]] - ---- m dump: -a: Easy! -b: - c: 2 - d: - - 3 - - 4 -``` - diff --git a/vendor/gopkg.in/yaml.v3/apic.go b/vendor/gopkg.in/yaml.v3/apic.go deleted file mode 100644 index ae7d049..0000000 --- a/vendor/gopkg.in/yaml.v3/apic.go +++ /dev/null @@ -1,747 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { - //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) - - // Check if we can move the queue at the beginning of the buffer. - if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { - if parser.tokens_head != len(parser.tokens) { - copy(parser.tokens, parser.tokens[parser.tokens_head:]) - } - parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] - parser.tokens_head = 0 - } - parser.tokens = append(parser.tokens, *token) - if pos < 0 { - return - } - copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) - parser.tokens[parser.tokens_head+pos] = *token -} - -// Create a new parser object. -func yaml_parser_initialize(parser *yaml_parser_t) bool { - *parser = yaml_parser_t{ - raw_buffer: make([]byte, 0, input_raw_buffer_size), - buffer: make([]byte, 0, input_buffer_size), - } - return true -} - -// Destroy a parser object. -func yaml_parser_delete(parser *yaml_parser_t) { - *parser = yaml_parser_t{} -} - -// String read handler. -func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - if parser.input_pos == len(parser.input) { - return 0, io.EOF - } - n = copy(buffer, parser.input[parser.input_pos:]) - parser.input_pos += n - return n, nil -} - -// Reader read handler. -func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { - return parser.input_reader.Read(buffer) -} - -// Set a string input. -func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_string_read_handler - parser.input = input - parser.input_pos = 0 -} - -// Set a file input. -func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { - if parser.read_handler != nil { - panic("must set the input source only once") - } - parser.read_handler = yaml_reader_read_handler - parser.input_reader = r -} - -// Set the source encoding. -func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { - if parser.encoding != yaml_ANY_ENCODING { - panic("must set the encoding only once") - } - parser.encoding = encoding -} - -// Create a new emitter object. -func yaml_emitter_initialize(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{ - buffer: make([]byte, output_buffer_size), - raw_buffer: make([]byte, 0, output_raw_buffer_size), - states: make([]yaml_emitter_state_t, 0, initial_stack_size), - events: make([]yaml_event_t, 0, initial_queue_size), - best_width: -1, - } -} - -// Destroy an emitter object. -func yaml_emitter_delete(emitter *yaml_emitter_t) { - *emitter = yaml_emitter_t{} -} - -// String write handler. -func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - *emitter.output_buffer = append(*emitter.output_buffer, buffer...) - return nil -} - -// yaml_writer_write_handler uses emitter.output_writer to write the -// emitted text. -func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { - _, err := emitter.output_writer.Write(buffer) - return err -} - -// Set a string output. -func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_string_write_handler - emitter.output_buffer = output_buffer -} - -// Set a file output. -func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { - if emitter.write_handler != nil { - panic("must set the output target only once") - } - emitter.write_handler = yaml_writer_write_handler - emitter.output_writer = w -} - -// Set the output encoding. -func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { - if emitter.encoding != yaml_ANY_ENCODING { - panic("must set the output encoding only once") - } - emitter.encoding = encoding -} - -// Set the canonical output style. -func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { - emitter.canonical = canonical -} - -// Set the indentation increment. -func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { - if indent < 2 || indent > 9 { - indent = 2 - } - emitter.best_indent = indent -} - -// Set the preferred line width. -func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { - if width < 0 { - width = -1 - } - emitter.best_width = width -} - -// Set if unescaped non-ASCII characters are allowed. -func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { - emitter.unicode = unicode -} - -// Set the preferred line break character. -func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { - emitter.line_break = line_break -} - -///* -// * Destroy a token object. -// */ -// -//YAML_DECLARE(void) -//yaml_token_delete(yaml_token_t *token) -//{ -// assert(token); // Non-NULL token object expected. -// -// switch (token.type) -// { -// case YAML_TAG_DIRECTIVE_TOKEN: -// yaml_free(token.data.tag_directive.handle); -// yaml_free(token.data.tag_directive.prefix); -// break; -// -// case YAML_ALIAS_TOKEN: -// yaml_free(token.data.alias.value); -// break; -// -// case YAML_ANCHOR_TOKEN: -// yaml_free(token.data.anchor.value); -// break; -// -// case YAML_TAG_TOKEN: -// yaml_free(token.data.tag.handle); -// yaml_free(token.data.tag.suffix); -// break; -// -// case YAML_SCALAR_TOKEN: -// yaml_free(token.data.scalar.value); -// break; -// -// default: -// break; -// } -// -// memset(token, 0, sizeof(yaml_token_t)); -//} -// -///* -// * Check if a string is a valid UTF-8 sequence. -// * -// * Check 'reader.c' for more details on UTF-8 encoding. -// */ -// -//static int -//yaml_check_utf8(yaml_char_t *start, size_t length) -//{ -// yaml_char_t *end = start+length; -// yaml_char_t *pointer = start; -// -// while (pointer < end) { -// unsigned char octet; -// unsigned int width; -// unsigned int value; -// size_t k; -// -// octet = pointer[0]; -// width = (octet & 0x80) == 0x00 ? 1 : -// (octet & 0xE0) == 0xC0 ? 2 : -// (octet & 0xF0) == 0xE0 ? 3 : -// (octet & 0xF8) == 0xF0 ? 4 : 0; -// value = (octet & 0x80) == 0x00 ? octet & 0x7F : -// (octet & 0xE0) == 0xC0 ? octet & 0x1F : -// (octet & 0xF0) == 0xE0 ? octet & 0x0F : -// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; -// if (!width) return 0; -// if (pointer+width > end) return 0; -// for (k = 1; k < width; k ++) { -// octet = pointer[k]; -// if ((octet & 0xC0) != 0x80) return 0; -// value = (value << 6) + (octet & 0x3F); -// } -// if (!((width == 1) || -// (width == 2 && value >= 0x80) || -// (width == 3 && value >= 0x800) || -// (width == 4 && value >= 0x10000))) return 0; -// -// pointer += width; -// } -// -// return 1; -//} -// - -// Create STREAM-START. -func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - encoding: encoding, - } -} - -// Create STREAM-END. -func yaml_stream_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - } -} - -// Create DOCUMENT-START. -func yaml_document_start_event_initialize( - event *yaml_event_t, - version_directive *yaml_version_directive_t, - tag_directives []yaml_tag_directive_t, - implicit bool, -) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: implicit, - } -} - -// Create DOCUMENT-END. -func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - implicit: implicit, - } -} - -// Create ALIAS. -func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) bool { - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - anchor: anchor, - } - return true -} - -// Create SCALAR. -func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - anchor: anchor, - tag: tag, - value: value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-START. -func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } - return true -} - -// Create SEQUENCE-END. -func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - } - return true -} - -// Create MAPPING-START. -func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(style), - } -} - -// Create MAPPING-END. -func yaml_mapping_end_event_initialize(event *yaml_event_t) { - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - } -} - -// Destroy an event object. -func yaml_event_delete(event *yaml_event_t) { - *event = yaml_event_t{} -} - -///* -// * Create a document object. -// */ -// -//YAML_DECLARE(int) -//yaml_document_initialize(document *yaml_document_t, -// version_directive *yaml_version_directive_t, -// tag_directives_start *yaml_tag_directive_t, -// tag_directives_end *yaml_tag_directive_t, -// start_implicit int, end_implicit int) -//{ -// struct { -// error yaml_error_type_t -// } context -// struct { -// start *yaml_node_t -// end *yaml_node_t -// top *yaml_node_t -// } nodes = { NULL, NULL, NULL } -// version_directive_copy *yaml_version_directive_t = NULL -// struct { -// start *yaml_tag_directive_t -// end *yaml_tag_directive_t -// top *yaml_tag_directive_t -// } tag_directives_copy = { NULL, NULL, NULL } -// value yaml_tag_directive_t = { NULL, NULL } -// mark yaml_mark_t = { 0, 0, 0 } -// -// assert(document) // Non-NULL document object is expected. -// assert((tag_directives_start && tag_directives_end) || -// (tag_directives_start == tag_directives_end)) -// // Valid tag directives are expected. -// -// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error -// -// if (version_directive) { -// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) -// if (!version_directive_copy) goto error -// version_directive_copy.major = version_directive.major -// version_directive_copy.minor = version_directive.minor -// } -// -// if (tag_directives_start != tag_directives_end) { -// tag_directive *yaml_tag_directive_t -// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) -// goto error -// for (tag_directive = tag_directives_start -// tag_directive != tag_directives_end; tag_directive ++) { -// assert(tag_directive.handle) -// assert(tag_directive.prefix) -// if (!yaml_check_utf8(tag_directive.handle, -// strlen((char *)tag_directive.handle))) -// goto error -// if (!yaml_check_utf8(tag_directive.prefix, -// strlen((char *)tag_directive.prefix))) -// goto error -// value.handle = yaml_strdup(tag_directive.handle) -// value.prefix = yaml_strdup(tag_directive.prefix) -// if (!value.handle || !value.prefix) goto error -// if (!PUSH(&context, tag_directives_copy, value)) -// goto error -// value.handle = NULL -// value.prefix = NULL -// } -// } -// -// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, -// tag_directives_copy.start, tag_directives_copy.top, -// start_implicit, end_implicit, mark, mark) -// -// return 1 -// -//error: -// STACK_DEL(&context, nodes) -// yaml_free(version_directive_copy) -// while (!STACK_EMPTY(&context, tag_directives_copy)) { -// value yaml_tag_directive_t = POP(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// } -// STACK_DEL(&context, tag_directives_copy) -// yaml_free(value.handle) -// yaml_free(value.prefix) -// -// return 0 -//} -// -///* -// * Destroy a document object. -// */ -// -//YAML_DECLARE(void) -//yaml_document_delete(document *yaml_document_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// tag_directive *yaml_tag_directive_t -// -// context.error = YAML_NO_ERROR // Eliminate a compiler warning. -// -// assert(document) // Non-NULL document object is expected. -// -// while (!STACK_EMPTY(&context, document.nodes)) { -// node yaml_node_t = POP(&context, document.nodes) -// yaml_free(node.tag) -// switch (node.type) { -// case YAML_SCALAR_NODE: -// yaml_free(node.data.scalar.value) -// break -// case YAML_SEQUENCE_NODE: -// STACK_DEL(&context, node.data.sequence.items) -// break -// case YAML_MAPPING_NODE: -// STACK_DEL(&context, node.data.mapping.pairs) -// break -// default: -// assert(0) // Should not happen. -// } -// } -// STACK_DEL(&context, document.nodes) -// -// yaml_free(document.version_directive) -// for (tag_directive = document.tag_directives.start -// tag_directive != document.tag_directives.end -// tag_directive++) { -// yaml_free(tag_directive.handle) -// yaml_free(tag_directive.prefix) -// } -// yaml_free(document.tag_directives.start) -// -// memset(document, 0, sizeof(yaml_document_t)) -//} -// -///** -// * Get a document node. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_node(document *yaml_document_t, index int) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (index > 0 && document.nodes.start + index <= document.nodes.top) { -// return document.nodes.start + index - 1 -// } -// return NULL -//} -// -///** -// * Get the root object. -// */ -// -//YAML_DECLARE(yaml_node_t *) -//yaml_document_get_root_node(document *yaml_document_t) -//{ -// assert(document) // Non-NULL document object is expected. -// -// if (document.nodes.top != document.nodes.start) { -// return document.nodes.start -// } -// return NULL -//} -// -///* -// * Add a scalar node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_scalar(document *yaml_document_t, -// tag *yaml_char_t, value *yaml_char_t, length int, -// style yaml_scalar_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// value_copy *yaml_char_t = NULL -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// assert(value) // Non-NULL value is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (length < 0) { -// length = strlen((char *)value) -// } -// -// if (!yaml_check_utf8(value, length)) goto error -// value_copy = yaml_malloc(length+1) -// if (!value_copy) goto error -// memcpy(value_copy, value, length) -// value_copy[length] = '\0' -// -// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// yaml_free(tag_copy) -// yaml_free(value_copy) -// -// return 0 -//} -// -///* -// * Add a sequence node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_sequence(document *yaml_document_t, -// tag *yaml_char_t, style yaml_sequence_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_item_t -// end *yaml_node_item_t -// top *yaml_node_item_t -// } items = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error -// -// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, items) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Add a mapping node to a document. -// */ -// -//YAML_DECLARE(int) -//yaml_document_add_mapping(document *yaml_document_t, -// tag *yaml_char_t, style yaml_mapping_style_t) -//{ -// struct { -// error yaml_error_type_t -// } context -// mark yaml_mark_t = { 0, 0, 0 } -// tag_copy *yaml_char_t = NULL -// struct { -// start *yaml_node_pair_t -// end *yaml_node_pair_t -// top *yaml_node_pair_t -// } pairs = { NULL, NULL, NULL } -// node yaml_node_t -// -// assert(document) // Non-NULL document object is expected. -// -// if (!tag) { -// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG -// } -// -// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error -// tag_copy = yaml_strdup(tag) -// if (!tag_copy) goto error -// -// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error -// -// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, -// style, mark, mark) -// if (!PUSH(&context, document.nodes, node)) goto error -// -// return document.nodes.top - document.nodes.start -// -//error: -// STACK_DEL(&context, pairs) -// yaml_free(tag_copy) -// -// return 0 -//} -// -///* -// * Append an item to a sequence node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_sequence_item(document *yaml_document_t, -// sequence int, item int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// assert(document) // Non-NULL document is required. -// assert(sequence > 0 -// && document.nodes.start + sequence <= document.nodes.top) -// // Valid sequence id is required. -// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) -// // A sequence node is required. -// assert(item > 0 && document.nodes.start + item <= document.nodes.top) -// // Valid item id is required. -// -// if (!PUSH(&context, -// document.nodes.start[sequence-1].data.sequence.items, item)) -// return 0 -// -// return 1 -//} -// -///* -// * Append a pair of a key and a value to a mapping node. -// */ -// -//YAML_DECLARE(int) -//yaml_document_append_mapping_pair(document *yaml_document_t, -// mapping int, key int, value int) -//{ -// struct { -// error yaml_error_type_t -// } context -// -// pair yaml_node_pair_t -// -// assert(document) // Non-NULL document is required. -// assert(mapping > 0 -// && document.nodes.start + mapping <= document.nodes.top) -// // Valid mapping id is required. -// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) -// // A mapping node is required. -// assert(key > 0 && document.nodes.start + key <= document.nodes.top) -// // Valid key id is required. -// assert(value > 0 && document.nodes.start + value <= document.nodes.top) -// // Valid value id is required. -// -// pair.key = key -// pair.value = value -// -// if (!PUSH(&context, -// document.nodes.start[mapping-1].data.mapping.pairs, pair)) -// return 0 -// -// return 1 -//} -// -// diff --git a/vendor/gopkg.in/yaml.v3/decode.go b/vendor/gopkg.in/yaml.v3/decode.go deleted file mode 100644 index 21c0dac..0000000 --- a/vendor/gopkg.in/yaml.v3/decode.go +++ /dev/null @@ -1,948 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "encoding/base64" - "fmt" - "io" - "math" - "reflect" - "strconv" - "time" -) - -// ---------------------------------------------------------------------------- -// Parser, produces a node tree out of a libyaml event stream. - -type parser struct { - parser yaml_parser_t - event yaml_event_t - doc *Node - anchors map[string]*Node - doneInit bool - textless bool -} - -func newParser(b []byte) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - if len(b) == 0 { - b = []byte{'\n'} - } - yaml_parser_set_input_string(&p.parser, b) - return &p -} - -func newParserFromReader(r io.Reader) *parser { - p := parser{} - if !yaml_parser_initialize(&p.parser) { - panic("failed to initialize YAML emitter") - } - yaml_parser_set_input_reader(&p.parser, r) - return &p -} - -func (p *parser) init() { - if p.doneInit { - return - } - p.anchors = make(map[string]*Node) - p.expect(yaml_STREAM_START_EVENT) - p.doneInit = true -} - -func (p *parser) destroy() { - if p.event.typ != yaml_NO_EVENT { - yaml_event_delete(&p.event) - } - yaml_parser_delete(&p.parser) -} - -// expect consumes an event from the event stream and -// checks that it's of the expected type. -func (p *parser) expect(e yaml_event_type_t) { - if p.event.typ == yaml_NO_EVENT { - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - } - if p.event.typ == yaml_STREAM_END_EVENT { - failf("attempted to go past the end of stream; corrupted value?") - } - if p.event.typ != e { - p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) - p.fail() - } - yaml_event_delete(&p.event) - p.event.typ = yaml_NO_EVENT -} - -// peek peeks at the next event in the event stream, -// puts the results into p.event and returns the event type. -func (p *parser) peek() yaml_event_type_t { - if p.event.typ != yaml_NO_EVENT { - return p.event.typ - } - if !yaml_parser_parse(&p.parser, &p.event) { - p.fail() - } - return p.event.typ -} - -func (p *parser) fail() { - var where string - var line int - if p.parser.context_mark.line != 0 { - line = p.parser.context_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } else if p.parser.problem_mark.line != 0 { - line = p.parser.problem_mark.line - // Scanner errors don't iterate line before returning error - if p.parser.error == yaml_SCANNER_ERROR { - line++ - } - } - if line != 0 { - where = "line " + strconv.Itoa(line) + ": " - } - var msg string - if len(p.parser.problem) > 0 { - msg = p.parser.problem - } else { - msg = "unknown problem parsing YAML content" - } - failf("%s%s", where, msg) -} - -func (p *parser) anchor(n *Node, anchor []byte) { - if anchor != nil { - n.Anchor = string(anchor) - p.anchors[n.Anchor] = n - } -} - -func (p *parser) parse() *Node { - p.init() - switch p.peek() { - case yaml_SCALAR_EVENT: - return p.scalar() - case yaml_ALIAS_EVENT: - return p.alias() - case yaml_MAPPING_START_EVENT: - return p.mapping() - case yaml_SEQUENCE_START_EVENT: - return p.sequence() - case yaml_DOCUMENT_START_EVENT: - return p.document() - case yaml_STREAM_END_EVENT: - // Happens when attempting to decode an empty buffer. - return nil - case yaml_TAIL_COMMENT_EVENT: - panic("internal error: unexpected tail comment event (please report)") - default: - panic("internal error: attempted to parse unknown event (please report): " + p.event.typ.String()) - } -} - -func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { - var style Style - if tag != "" && tag != "!" { - tag = shortTag(tag) - style = TaggedStyle - } else if defaultTag != "" { - tag = defaultTag - } else if kind == ScalarNode { - tag, _ = resolve("", value) - } - n := &Node{ - Kind: kind, - Tag: tag, - Value: value, - Style: style, - } - if !p.textless { - n.Line = p.event.start_mark.line + 1 - n.Column = p.event.start_mark.column + 1 - n.HeadComment = string(p.event.head_comment) - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - } - return n -} - -func (p *parser) parseChild(parent *Node) *Node { - child := p.parse() - parent.Content = append(parent.Content, child) - return child -} - -func (p *parser) document() *Node { - n := p.node(DocumentNode, "", "", "") - p.doc = n - p.expect(yaml_DOCUMENT_START_EVENT) - p.parseChild(n) - if p.peek() == yaml_DOCUMENT_END_EVENT { - n.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_DOCUMENT_END_EVENT) - return n -} - -func (p *parser) alias() *Node { - n := p.node(AliasNode, "", "", string(p.event.anchor)) - n.Alias = p.anchors[n.Value] - if n.Alias == nil { - failf("unknown anchor '%s' referenced", n.Value) - } - p.expect(yaml_ALIAS_EVENT) - return n -} - -func (p *parser) scalar() *Node { - var parsedStyle = p.event.scalar_style() - var nodeStyle Style - switch { - case parsedStyle&yaml_DOUBLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = DoubleQuotedStyle - case parsedStyle&yaml_SINGLE_QUOTED_SCALAR_STYLE != 0: - nodeStyle = SingleQuotedStyle - case parsedStyle&yaml_LITERAL_SCALAR_STYLE != 0: - nodeStyle = LiteralStyle - case parsedStyle&yaml_FOLDED_SCALAR_STYLE != 0: - nodeStyle = FoldedStyle - } - var nodeValue = string(p.event.value) - var nodeTag = string(p.event.tag) - var defaultTag string - if nodeStyle == 0 { - if nodeValue == "<<" { - defaultTag = mergeTag - } - } else { - defaultTag = strTag - } - n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) - n.Style |= nodeStyle - p.anchor(n, p.event.anchor) - p.expect(yaml_SCALAR_EVENT) - return n -} - -func (p *parser) sequence() *Node { - n := p.node(SequenceNode, seqTag, string(p.event.tag), "") - if p.event.sequence_style()&yaml_FLOW_SEQUENCE_STYLE != 0 { - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_SEQUENCE_START_EVENT) - for p.peek() != yaml_SEQUENCE_END_EVENT { - p.parseChild(n) - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - p.expect(yaml_SEQUENCE_END_EVENT) - return n -} - -func (p *parser) mapping() *Node { - n := p.node(MappingNode, mapTag, string(p.event.tag), "") - block := true - if p.event.mapping_style()&yaml_FLOW_MAPPING_STYLE != 0 { - block = false - n.Style |= FlowStyle - } - p.anchor(n, p.event.anchor) - p.expect(yaml_MAPPING_START_EVENT) - for p.peek() != yaml_MAPPING_END_EVENT { - k := p.parseChild(n) - if block && k.FootComment != "" { - // Must be a foot comment for the prior value when being dedented. - if len(n.Content) > 2 { - n.Content[len(n.Content)-3].FootComment = k.FootComment - k.FootComment = "" - } - } - v := p.parseChild(n) - if k.FootComment == "" && v.FootComment != "" { - k.FootComment = v.FootComment - v.FootComment = "" - } - if p.peek() == yaml_TAIL_COMMENT_EVENT { - if k.FootComment == "" { - k.FootComment = string(p.event.foot_comment) - } - p.expect(yaml_TAIL_COMMENT_EVENT) - } - } - n.LineComment = string(p.event.line_comment) - n.FootComment = string(p.event.foot_comment) - if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { - n.Content[len(n.Content)-2].FootComment = n.FootComment - n.FootComment = "" - } - p.expect(yaml_MAPPING_END_EVENT) - return n -} - -// ---------------------------------------------------------------------------- -// Decoder, unmarshals a node into a provided value. - -type decoder struct { - doc *Node - aliases map[*Node]bool - terrors []string - - stringMapType reflect.Type - generalMapType reflect.Type - - knownFields bool - uniqueKeys bool - decodeCount int - aliasCount int - aliasDepth int -} - -var ( - nodeType = reflect.TypeOf(Node{}) - durationType = reflect.TypeOf(time.Duration(0)) - stringMapType = reflect.TypeOf(map[string]interface{}{}) - generalMapType = reflect.TypeOf(map[interface{}]interface{}{}) - ifaceType = generalMapType.Elem() - timeType = reflect.TypeOf(time.Time{}) - ptrTimeType = reflect.TypeOf(&time.Time{}) -) - -func newDecoder() *decoder { - d := &decoder{ - stringMapType: stringMapType, - generalMapType: generalMapType, - uniqueKeys: true, - } - d.aliases = make(map[*Node]bool) - return d -} - -func (d *decoder) terror(n *Node, tag string, out reflect.Value) { - if n.Tag != "" { - tag = n.Tag - } - value := n.Value - if tag != seqTag && tag != mapTag { - if len(value) > 10 { - value = " `" + value[:7] + "...`" - } else { - value = " `" + value + "`" - } - } - d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.Line, shortTag(tag), value, out.Type())) -} - -func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { - err := u.UnmarshalYAML(n) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { - terrlen := len(d.terrors) - err := u.UnmarshalYAML(func(v interface{}) (err error) { - defer handleErr(&err) - d.unmarshal(n, reflect.ValueOf(v)) - if len(d.terrors) > terrlen { - issues := d.terrors[terrlen:] - d.terrors = d.terrors[:terrlen] - return &TypeError{issues} - } - return nil - }) - if e, ok := err.(*TypeError); ok { - d.terrors = append(d.terrors, e.Errors...) - return false - } - if err != nil { - fail(err) - } - return true -} - -// d.prepare initializes and dereferences pointers and calls UnmarshalYAML -// if a value is found to implement it. -// It returns the initialized and dereferenced out value, whether -// unmarshalling was already done by UnmarshalYAML, and if so whether -// its types unmarshalled appropriately. -// -// If n holds a null value, prepare returns before doing anything. -func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { - if n.ShortTag() == nullTag || n.Kind == 0 && n.IsZero() { - return out, false, false - } - again := true - for again { - again = false - if out.Kind() == reflect.Ptr { - if out.IsNil() { - out.Set(reflect.New(out.Type().Elem())) - } - out = out.Elem() - again = true - } - if out.CanAddr() { - outi := out.Addr().Interface() - if u, ok := outi.(Unmarshaler); ok { - good = d.callUnmarshaler(n, u) - return out, true, good - } - if u, ok := outi.(obsoleteUnmarshaler); ok { - good = d.callObsoleteUnmarshaler(n, u) - return out, true, good - } - } - } - return out, false, false -} - -func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { - if n.ShortTag() == nullTag { - return reflect.Value{} - } - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -const ( - // 400,000 decode operations is ~500kb of dense object declarations, or - // ~5kb of dense object declarations with 10000% alias expansion - alias_ratio_range_low = 400000 - - // 4,000,000 decode operations is ~5MB of dense object declarations, or - // ~4.5MB of dense object declarations with 10% alias expansion - alias_ratio_range_high = 4000000 - - // alias_ratio_range is the range over which we scale allowed alias ratios - alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) -) - -func allowedAliasRatio(decodeCount int) float64 { - switch { - case decodeCount <= alias_ratio_range_low: - // allow 99% to come from alias expansion for small-to-medium documents - return 0.99 - case decodeCount >= alias_ratio_range_high: - // allow 10% to come from alias expansion for very large documents - return 0.10 - default: - // scale smoothly from 99% down to 10% over the range. - // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. - // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). - return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) - } -} - -func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { - d.decodeCount++ - if d.aliasDepth > 0 { - d.aliasCount++ - } - if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { - failf("document contains excessive aliasing") - } - if out.Type() == nodeType { - out.Set(reflect.ValueOf(n).Elem()) - return true - } - switch n.Kind { - case DocumentNode: - return d.document(n, out) - case AliasNode: - return d.alias(n, out) - } - out, unmarshaled, good := d.prepare(n, out) - if unmarshaled { - return good - } - switch n.Kind { - case ScalarNode: - good = d.scalar(n, out) - case MappingNode: - good = d.mapping(n, out) - case SequenceNode: - good = d.sequence(n, out) - case 0: - if n.IsZero() { - return d.null(out) - } - fallthrough - default: - failf("cannot decode node with unknown kind %d", n.Kind) - } - return good -} - -func (d *decoder) document(n *Node, out reflect.Value) (good bool) { - if len(n.Content) == 1 { - d.doc = n - d.unmarshal(n.Content[0], out) - return true - } - return false -} - -func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { - if d.aliases[n] { - // TODO this could actually be allowed in some circumstances. - failf("anchor '%s' value contains itself", n.Value) - } - d.aliases[n] = true - d.aliasDepth++ - good = d.unmarshal(n.Alias, out) - d.aliasDepth-- - delete(d.aliases, n) - return good -} - -var zeroValue reflect.Value - -func resetMap(out reflect.Value) { - for _, k := range out.MapKeys() { - out.SetMapIndex(k, zeroValue) - } -} - -func (d *decoder) null(out reflect.Value) bool { - if out.CanAddr() { - switch out.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - out.Set(reflect.Zero(out.Type())) - return true - } - } - return false -} - -func (d *decoder) scalar(n *Node, out reflect.Value) bool { - var tag string - var resolved interface{} - if n.indicatedString() { - tag = strTag - resolved = n.Value - } else { - tag, resolved = resolve(n.Tag, n.Value) - if tag == binaryTag { - data, err := base64.StdEncoding.DecodeString(resolved.(string)) - if err != nil { - failf("!!binary value contains invalid base64 data") - } - resolved = string(data) - } - } - if resolved == nil { - return d.null(out) - } - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - // We've resolved to exactly the type we want, so use that. - out.Set(resolvedv) - return true - } - // Perhaps we can use the value as a TextUnmarshaler to - // set its value. - if out.CanAddr() { - u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) - if ok { - var text []byte - if tag == binaryTag { - text = []byte(resolved.(string)) - } else { - // We let any value be unmarshaled into TextUnmarshaler. - // That might be more lax than we'd like, but the - // TextUnmarshaler itself should bowl out any dubious values. - text = []byte(n.Value) - } - err := u.UnmarshalText(text) - if err != nil { - fail(err) - } - return true - } - } - switch out.Kind() { - case reflect.String: - if tag == binaryTag { - out.SetString(resolved.(string)) - return true - } - out.SetString(n.Value) - return true - case reflect.Interface: - out.Set(reflect.ValueOf(resolved)) - return true - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - // This used to work in v2, but it's very unfriendly. - isDuration := out.Type() == durationType - - switch resolved := resolved.(type) { - case int: - if !isDuration && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case int64: - if !isDuration && !out.OverflowInt(resolved) { - out.SetInt(resolved) - return true - } - case uint64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case float64: - if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { - out.SetInt(int64(resolved)) - return true - } - case string: - if out.Type() == durationType { - d, err := time.ParseDuration(resolved) - if err == nil { - out.SetInt(int64(d)) - return true - } - } - } - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - switch resolved := resolved.(type) { - case int: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case int64: - if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case uint64: - if !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - case float64: - if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { - out.SetUint(uint64(resolved)) - return true - } - } - case reflect.Bool: - switch resolved := resolved.(type) { - case bool: - out.SetBool(resolved) - return true - case string: - // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). - // It only works if explicitly attempting to unmarshal into a typed bool value. - switch resolved { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": - out.SetBool(true) - return true - case "n", "N", "no", "No", "NO", "off", "Off", "OFF": - out.SetBool(false) - return true - } - } - case reflect.Float32, reflect.Float64: - switch resolved := resolved.(type) { - case int: - out.SetFloat(float64(resolved)) - return true - case int64: - out.SetFloat(float64(resolved)) - return true - case uint64: - out.SetFloat(float64(resolved)) - return true - case float64: - out.SetFloat(resolved) - return true - } - case reflect.Struct: - if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { - out.Set(resolvedv) - return true - } - case reflect.Ptr: - panic("yaml internal error: please report the issue") - } - d.terror(n, tag, out) - return false -} - -func settableValueOf(i interface{}) reflect.Value { - v := reflect.ValueOf(i) - sv := reflect.New(v.Type()).Elem() - sv.Set(v) - return sv -} - -func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - - var iface reflect.Value - switch out.Kind() { - case reflect.Slice: - out.Set(reflect.MakeSlice(out.Type(), l, l)) - case reflect.Array: - if l != out.Len() { - failf("invalid array: want %d elements but got %d", out.Len(), l) - } - case reflect.Interface: - // No type hints. Will have to use a generic sequence. - iface = out - out = settableValueOf(make([]interface{}, l)) - default: - d.terror(n, seqTag, out) - return false - } - et := out.Type().Elem() - - j := 0 - for i := 0; i < l; i++ { - e := reflect.New(et).Elem() - if ok := d.unmarshal(n.Content[i], e); ok { - out.Index(j).Set(e) - j++ - } - } - if out.Kind() != reflect.Array { - out.Set(out.Slice(0, j)) - } - if iface.IsValid() { - iface.Set(out) - } - return true -} - -func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { - l := len(n.Content) - if d.uniqueKeys { - nerrs := len(d.terrors) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - for j := i + 2; j < l; j += 2 { - nj := n.Content[j] - if ni.Kind == nj.Kind && ni.Value == nj.Value { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: mapping key %#v already defined at line %d", nj.Line, nj.Value, ni.Line)) - } - } - } - if len(d.terrors) > nerrs { - return false - } - } - switch out.Kind() { - case reflect.Struct: - return d.mappingStruct(n, out) - case reflect.Map: - // okay - case reflect.Interface: - iface := out - if isStringMap(n) { - out = reflect.MakeMap(d.stringMapType) - } else { - out = reflect.MakeMap(d.generalMapType) - } - iface.Set(out) - default: - d.terror(n, mapTag, out) - return false - } - - outt := out.Type() - kt := outt.Key() - et := outt.Elem() - - stringMapType := d.stringMapType - generalMapType := d.generalMapType - if outt.Elem() == ifaceType { - if outt.Key().Kind() == reflect.String { - d.stringMapType = outt - } else if outt.Key() == ifaceType { - d.generalMapType = outt - } - } - - if out.IsNil() { - out.Set(reflect.MakeMap(outt)) - } - for i := 0; i < l; i += 2 { - if isMerge(n.Content[i]) { - d.merge(n.Content[i+1], out) - continue - } - k := reflect.New(kt).Elem() - if d.unmarshal(n.Content[i], k) { - kkind := k.Kind() - if kkind == reflect.Interface { - kkind = k.Elem().Kind() - } - if kkind == reflect.Map || kkind == reflect.Slice { - failf("invalid map key: %#v", k.Interface()) - } - e := reflect.New(et).Elem() - if d.unmarshal(n.Content[i+1], e) { - out.SetMapIndex(k, e) - } - } - } - d.stringMapType = stringMapType - d.generalMapType = generalMapType - return true -} - -func isStringMap(n *Node) bool { - if n.Kind != MappingNode { - return false - } - l := len(n.Content) - for i := 0; i < l; i += 2 { - if n.Content[i].ShortTag() != strTag { - return false - } - } - return true -} - -func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { - sinfo, err := getStructInfo(out.Type()) - if err != nil { - panic(err) - } - - var inlineMap reflect.Value - var elemType reflect.Type - if sinfo.InlineMap != -1 { - inlineMap = out.Field(sinfo.InlineMap) - inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) - elemType = inlineMap.Type().Elem() - } - - for _, index := range sinfo.InlineUnmarshalers { - field := d.fieldByIndex(n, out, index) - d.prepare(n, field) - } - - var doneFields []bool - if d.uniqueKeys { - doneFields = make([]bool, len(sinfo.FieldsList)) - } - name := settableValueOf("") - l := len(n.Content) - for i := 0; i < l; i += 2 { - ni := n.Content[i] - if isMerge(ni) { - d.merge(n.Content[i+1], out) - continue - } - if !d.unmarshal(ni, name) { - continue - } - if info, ok := sinfo.FieldsMap[name.String()]; ok { - if d.uniqueKeys { - if doneFields[info.Id] { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.Line, name.String(), out.Type())) - continue - } - doneFields[info.Id] = true - } - var field reflect.Value - if info.Inline == nil { - field = out.Field(info.Num) - } else { - field = d.fieldByIndex(n, out, info.Inline) - } - d.unmarshal(n.Content[i+1], field) - } else if sinfo.InlineMap != -1 { - if inlineMap.IsNil() { - inlineMap.Set(reflect.MakeMap(inlineMap.Type())) - } - value := reflect.New(elemType).Elem() - d.unmarshal(n.Content[i+1], value) - inlineMap.SetMapIndex(name, value) - } else if d.knownFields { - d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.Line, name.String(), out.Type())) - } - } - return true -} - -func failWantMap() { - failf("map merge requires map or sequence of maps as the value") -} - -func (d *decoder) merge(n *Node, out reflect.Value) { - switch n.Kind { - case MappingNode: - d.unmarshal(n, out) - case AliasNode: - if n.Alias != nil && n.Alias.Kind != MappingNode { - failWantMap() - } - d.unmarshal(n, out) - case SequenceNode: - // Step backwards as earlier nodes take precedence. - for i := len(n.Content) - 1; i >= 0; i-- { - ni := n.Content[i] - if ni.Kind == AliasNode { - if ni.Alias != nil && ni.Alias.Kind != MappingNode { - failWantMap() - } - } else if ni.Kind != MappingNode { - failWantMap() - } - d.unmarshal(ni, out) - } - default: - failWantMap() - } -} - -func isMerge(n *Node) bool { - return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) -} diff --git a/vendor/gopkg.in/yaml.v3/emitterc.go b/vendor/gopkg.in/yaml.v3/emitterc.go deleted file mode 100644 index c29217e..0000000 --- a/vendor/gopkg.in/yaml.v3/emitterc.go +++ /dev/null @@ -1,2022 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Flush the buffer if needed. -func flush(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) { - return yaml_emitter_flush(emitter) - } - return true -} - -// Put a character to the output buffer. -func put(emitter *yaml_emitter_t, value byte) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - emitter.buffer[emitter.buffer_pos] = value - emitter.buffer_pos++ - emitter.column++ - return true -} - -// Put a line break to the output buffer. -func put_break(emitter *yaml_emitter_t) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - switch emitter.line_break { - case yaml_CR_BREAK: - emitter.buffer[emitter.buffer_pos] = '\r' - emitter.buffer_pos += 1 - case yaml_LN_BREAK: - emitter.buffer[emitter.buffer_pos] = '\n' - emitter.buffer_pos += 1 - case yaml_CRLN_BREAK: - emitter.buffer[emitter.buffer_pos+0] = '\r' - emitter.buffer[emitter.buffer_pos+1] = '\n' - emitter.buffer_pos += 2 - default: - panic("unknown line break setting") - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and below and drop from everywhere else (see commented lines). - emitter.indention = true - return true -} - -// Copy a character from a string into buffer. -func write(emitter *yaml_emitter_t, s []byte, i *int) bool { - if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { - return false - } - p := emitter.buffer_pos - w := width(s[*i]) - switch w { - case 4: - emitter.buffer[p+3] = s[*i+3] - fallthrough - case 3: - emitter.buffer[p+2] = s[*i+2] - fallthrough - case 2: - emitter.buffer[p+1] = s[*i+1] - fallthrough - case 1: - emitter.buffer[p+0] = s[*i+0] - default: - panic("unknown character width") - } - emitter.column++ - emitter.buffer_pos += w - *i += w - return true -} - -// Write a whole string into buffer. -func write_all(emitter *yaml_emitter_t, s []byte) bool { - for i := 0; i < len(s); { - if !write(emitter, s, &i) { - return false - } - } - return true -} - -// Copy a line break character from a string into buffer. -func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { - if s[*i] == '\n' { - if !put_break(emitter) { - return false - } - *i++ - } else { - if !write(emitter, s, i) { - return false - } - if emitter.column == 0 { - emitter.space_above = true - } - emitter.column = 0 - emitter.line++ - // [Go] Do this here and above and drop from everywhere else (see commented lines). - emitter.indention = true - } - return true -} - -// Set an emitter error and return false. -func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_EMITTER_ERROR - emitter.problem = problem - return false -} - -// Emit an event. -func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.events = append(emitter.events, *event) - for !yaml_emitter_need_more_events(emitter) { - event := &emitter.events[emitter.events_head] - if !yaml_emitter_analyze_event(emitter, event) { - return false - } - if !yaml_emitter_state_machine(emitter, event) { - return false - } - yaml_event_delete(event) - emitter.events_head++ - } - return true -} - -// Check if we need to accumulate more events before emitting. -// -// We accumulate extra -// - 1 event for DOCUMENT-START -// - 2 events for SEQUENCE-START -// - 3 events for MAPPING-START -// -func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { - if emitter.events_head == len(emitter.events) { - return true - } - var accumulate int - switch emitter.events[emitter.events_head].typ { - case yaml_DOCUMENT_START_EVENT: - accumulate = 1 - break - case yaml_SEQUENCE_START_EVENT: - accumulate = 2 - break - case yaml_MAPPING_START_EVENT: - accumulate = 3 - break - default: - return false - } - if len(emitter.events)-emitter.events_head > accumulate { - return false - } - var level int - for i := emitter.events_head; i < len(emitter.events); i++ { - switch emitter.events[i].typ { - case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: - level++ - case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: - level-- - } - if level == 0 { - return false - } - } - return true -} - -// Append a directive to the directives stack. -func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { - for i := 0; i < len(emitter.tag_directives); i++ { - if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") - } - } - - // [Go] Do we actually need to copy this given garbage collection - // and the lack of deallocating destructors? - tag_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(tag_copy.handle, value.handle) - copy(tag_copy.prefix, value.prefix) - emitter.tag_directives = append(emitter.tag_directives, tag_copy) - return true -} - -// Increase the indentation level. -func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { - emitter.indents = append(emitter.indents, emitter.indent) - if emitter.indent < 0 { - if flow { - emitter.indent = emitter.best_indent - } else { - emitter.indent = 0 - } - } else if !indentless { - // [Go] This was changed so that indentations are more regular. - if emitter.states[len(emitter.states)-1] == yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE { - // The first indent inside a sequence will just skip the "- " indicator. - emitter.indent += 2 - } else { - // Everything else aligns to the chosen indentation. - emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) - } - } - return true -} - -// State dispatcher. -func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { - switch emitter.state { - default: - case yaml_EMIT_STREAM_START_STATE: - return yaml_emitter_emit_stream_start(emitter, event) - - case yaml_EMIT_FIRST_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, true) - - case yaml_EMIT_DOCUMENT_START_STATE: - return yaml_emitter_emit_document_start(emitter, event, false) - - case yaml_EMIT_DOCUMENT_CONTENT_STATE: - return yaml_emitter_emit_document_content(emitter, event) - - case yaml_EMIT_DOCUMENT_END_STATE: - return yaml_emitter_emit_document_end(emitter, event) - - case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, true, false) - - case yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, true) - - case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_flow_sequence_item(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, true, false) - - case yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, true) - - case yaml_EMIT_FLOW_MAPPING_KEY_STATE: - return yaml_emitter_emit_flow_mapping_key(emitter, event, false, false) - - case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, true) - - case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: - return yaml_emitter_emit_flow_mapping_value(emitter, event, false) - - case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, true) - - case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: - return yaml_emitter_emit_block_sequence_item(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: - return yaml_emitter_emit_block_mapping_key(emitter, event, false) - - case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, true) - - case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: - return yaml_emitter_emit_block_mapping_value(emitter, event, false) - - case yaml_EMIT_END_STATE: - return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") - } - panic("invalid emitter state") -} - -// Expect STREAM-START. -func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_STREAM_START_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") - } - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = event.encoding - if emitter.encoding == yaml_ANY_ENCODING { - emitter.encoding = yaml_UTF8_ENCODING - } - } - if emitter.best_indent < 2 || emitter.best_indent > 9 { - emitter.best_indent = 2 - } - if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { - emitter.best_width = 80 - } - if emitter.best_width < 0 { - emitter.best_width = 1<<31 - 1 - } - if emitter.line_break == yaml_ANY_BREAK { - emitter.line_break = yaml_LN_BREAK - } - - emitter.indent = -1 - emitter.line = 0 - emitter.column = 0 - emitter.whitespace = true - emitter.indention = true - emitter.space_above = true - emitter.foot_indent = -1 - - if emitter.encoding != yaml_UTF8_ENCODING { - if !yaml_emitter_write_bom(emitter) { - return false - } - } - emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE - return true -} - -// Expect DOCUMENT-START or STREAM-END. -func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - - if event.typ == yaml_DOCUMENT_START_EVENT { - - if event.version_directive != nil { - if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { - return false - } - } - - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { - return false - } - if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { - return false - } - } - - for i := 0; i < len(default_tag_directives); i++ { - tag_directive := &default_tag_directives[i] - if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { - return false - } - } - - implicit := event.implicit - if !first || emitter.canonical { - implicit = false - } - - if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if event.version_directive != nil { - implicit = false - if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if len(event.tag_directives) > 0 { - implicit = false - for i := 0; i < len(event.tag_directives); i++ { - tag_directive := &event.tag_directives[i] - if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { - return false - } - if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if yaml_emitter_check_empty_document(emitter) { - implicit = false - } - if !implicit { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { - return false - } - if emitter.canonical || true { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - - if len(emitter.head_comment) > 0 { - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !put_break(emitter) { - return false - } - } - - emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE - return true - } - - if event.typ == yaml_STREAM_END_EVENT { - if emitter.open_ended { - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_END_STATE - return true - } - - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") -} - -// Expect the root node. -func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { - emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_emit_node(emitter, event, true, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect DOCUMENT-END. -func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if event.typ != yaml_DOCUMENT_END_EVENT { - return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") - } - // [Go] Force document foot separation. - emitter.foot_indent = 0 - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.foot_indent = -1 - if !yaml_emitter_write_indent(emitter) { - return false - } - if !event.implicit { - // [Go] Allocate the slice elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_flush(emitter) { - return false - } - emitter.state = yaml_EMIT_DOCUMENT_START_STATE - emitter.tag_directives = emitter.tag_directives[:0] - return true -} - -// Expect a flow item node. -func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_SEQUENCE_END_EVENT { - if emitter.canonical && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.column == 0 || emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a flow key node. -func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first, trail bool) bool { - if first { - if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - emitter.flow_level++ - } - - if event.typ == yaml_MAPPING_END_EVENT { - if (emitter.canonical || len(emitter.head_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0) && !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - emitter.flow_level-- - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - if emitter.canonical && !first { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - - if !first && !trail { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - - if !yaml_emitter_process_head_comment(emitter) { - return false - } - - if emitter.column == 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - - if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a flow value node. -func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if emitter.canonical || emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { - return false - } - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) - } else { - emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) - } - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if len(emitter.line_comment)+len(emitter.foot_comment)+len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { - return false - } - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block item node. -func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if event.typ == yaml_SEQUENCE_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) - if !yaml_emitter_emit_node(emitter, event, false, true, false, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -// Expect a block key node. -func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { - if first { - if !yaml_emitter_increase_indent(emitter, false, false) { - return false - } - } - if !yaml_emitter_process_head_comment(emitter) { - return false - } - if event.typ == yaml_MAPPING_END_EVENT { - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if len(emitter.line_comment) > 0 { - // [Go] A line comment was provided for the key. That's unusual as the - // scanner associates line comments with the value. Either way, - // save the line comment and render it appropriately later. - emitter.key_line_comment = emitter.line_comment - emitter.line_comment = nil - } - if yaml_emitter_check_simple_key(emitter) { - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, true) - } - if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { - return false - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) - return yaml_emitter_emit_node(emitter, event, false, false, true, false) -} - -// Expect a block value node. -func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { - if simple { - if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { - return false - } - } else { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { - return false - } - } - if len(emitter.key_line_comment) > 0 { - // [Go] A line comment was previously provided for the key. Handle it before - // the value so the inline comments are placed correctly. - if yaml_emitter_silent_nil_event(emitter, event) && len(emitter.line_comment) == 0 { - // Nothing other than the line comment will be written on the line. - emitter.line_comment = emitter.key_line_comment - emitter.key_line_comment = nil - } else { - // An actual value is coming, so emit the comment line. - emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment - if !yaml_emitter_process_line_comment(emitter) { - return false - } - emitter.line_comment, emitter.key_line_comment = emitter.key_line_comment, emitter.line_comment - // Indent in unless it's a block that will reindent anyway. - if event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || (event.typ != yaml_MAPPING_START_EVENT && event.typ != yaml_SEQUENCE_START_EVENT) { - emitter.indent = emitter.best_indent*((emitter.indent+emitter.best_indent)/emitter.best_indent) - if !yaml_emitter_write_indent(emitter) { - return false - } - } - } - } - emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) - if !yaml_emitter_emit_node(emitter, event, false, false, true, false) { - return false - } - if !yaml_emitter_process_line_comment(emitter) { - return false - } - if !yaml_emitter_process_foot_comment(emitter) { - return false - } - return true -} - -func yaml_emitter_silent_nil_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - return event.typ == yaml_SCALAR_EVENT && event.implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 -} - -// Expect a node. -func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, - root bool, sequence bool, mapping bool, simple_key bool) bool { - - emitter.root_context = root - emitter.sequence_context = sequence - emitter.mapping_context = mapping - emitter.simple_key_context = simple_key - - switch event.typ { - case yaml_ALIAS_EVENT: - return yaml_emitter_emit_alias(emitter, event) - case yaml_SCALAR_EVENT: - return yaml_emitter_emit_scalar(emitter, event) - case yaml_SEQUENCE_START_EVENT: - return yaml_emitter_emit_sequence_start(emitter, event) - case yaml_MAPPING_START_EVENT: - return yaml_emitter_emit_mapping_start(emitter, event) - default: - return yaml_emitter_set_emitter_error(emitter, - fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) - } -} - -// Expect ALIAS. -func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SCALAR. -func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_select_scalar_style(emitter, event) { - return false - } - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if !yaml_emitter_increase_indent(emitter, true, false) { - return false - } - if !yaml_emitter_process_scalar(emitter) { - return false - } - emitter.indent = emitter.indents[len(emitter.indents)-1] - emitter.indents = emitter.indents[:len(emitter.indents)-1] - emitter.state = emitter.states[len(emitter.states)-1] - emitter.states = emitter.states[:len(emitter.states)-1] - return true -} - -// Expect SEQUENCE-START. -func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || - yaml_emitter_check_empty_sequence(emitter) { - emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE - } - return true -} - -// Expect MAPPING-START. -func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { - if !yaml_emitter_process_anchor(emitter) { - return false - } - if !yaml_emitter_process_tag(emitter) { - return false - } - if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || - yaml_emitter_check_empty_mapping(emitter) { - emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE - } else { - emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE - } - return true -} - -// Check if the document content is an empty scalar. -func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { - return false // [Go] Huh? -} - -// Check if the next events represent an empty sequence. -func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT -} - -// Check if the next events represent an empty mapping. -func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { - if len(emitter.events)-emitter.events_head < 2 { - return false - } - return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && - emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT -} - -// Check if the next node can be expressed as a simple key. -func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { - length := 0 - switch emitter.events[emitter.events_head].typ { - case yaml_ALIAS_EVENT: - length += len(emitter.anchor_data.anchor) - case yaml_SCALAR_EVENT: - if emitter.scalar_data.multiline { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) + - len(emitter.scalar_data.value) - case yaml_SEQUENCE_START_EVENT: - if !yaml_emitter_check_empty_sequence(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - case yaml_MAPPING_START_EVENT: - if !yaml_emitter_check_empty_mapping(emitter) { - return false - } - length += len(emitter.anchor_data.anchor) + - len(emitter.tag_data.handle) + - len(emitter.tag_data.suffix) - default: - return false - } - return length <= 128 -} - -// Determine an acceptable scalar style. -func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 - if no_tag && !event.implicit && !event.quoted_implicit { - return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") - } - - style := event.scalar_style() - if style == yaml_ANY_SCALAR_STYLE { - style = yaml_PLAIN_SCALAR_STYLE - } - if emitter.canonical { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - if emitter.simple_key_context && emitter.scalar_data.multiline { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - if style == yaml_PLAIN_SCALAR_STYLE { - if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || - emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - if no_tag && !event.implicit { - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { - if !emitter.scalar_data.single_quoted_allowed { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { - if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - } - - if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { - emitter.tag_data.handle = []byte{'!'} - } - emitter.scalar_data.style = style - return true -} - -// Write an anchor. -func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { - if emitter.anchor_data.anchor == nil { - return true - } - c := []byte{'&'} - if emitter.anchor_data.alias { - c[0] = '*' - } - if !yaml_emitter_write_indicator(emitter, c, true, false, false) { - return false - } - return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) -} - -// Write a tag. -func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { - if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { - return true - } - if len(emitter.tag_data.handle) > 0 { - if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { - return false - } - if len(emitter.tag_data.suffix) > 0 { - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - } - } else { - // [Go] Allocate these slices elsewhere. - if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { - return false - } - if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { - return false - } - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { - return false - } - } - return true -} - -// Write a scalar. -func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { - switch emitter.scalar_data.style { - case yaml_PLAIN_SCALAR_STYLE: - return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_SINGLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_DOUBLE_QUOTED_SCALAR_STYLE: - return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) - - case yaml_LITERAL_SCALAR_STYLE: - return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) - - case yaml_FOLDED_SCALAR_STYLE: - return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) - } - panic("unknown scalar style") -} - -// Write a head comment. -func yaml_emitter_process_head_comment(emitter *yaml_emitter_t) bool { - if len(emitter.tail_comment) > 0 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.tail_comment) { - return false - } - emitter.tail_comment = emitter.tail_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - } - - if len(emitter.head_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.head_comment) { - return false - } - emitter.head_comment = emitter.head_comment[:0] - return true -} - -// Write an line comment. -func yaml_emitter_process_line_comment(emitter *yaml_emitter_t) bool { - if len(emitter.line_comment) == 0 { - return true - } - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !yaml_emitter_write_comment(emitter, emitter.line_comment) { - return false - } - emitter.line_comment = emitter.line_comment[:0] - return true -} - -// Write a foot comment. -func yaml_emitter_process_foot_comment(emitter *yaml_emitter_t) bool { - if len(emitter.foot_comment) == 0 { - return true - } - if !yaml_emitter_write_indent(emitter) { - return false - } - if !yaml_emitter_write_comment(emitter, emitter.foot_comment) { - return false - } - emitter.foot_comment = emitter.foot_comment[:0] - emitter.foot_indent = emitter.indent - if emitter.foot_indent < 0 { - emitter.foot_indent = 0 - } - return true -} - -// Check if a %YAML directive is valid. -func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { - if version_directive.major != 1 || version_directive.minor != 1 { - return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") - } - return true -} - -// Check if a %TAG directive is valid. -func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { - handle := tag_directive.handle - prefix := tag_directive.prefix - if len(handle) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") - } - if handle[0] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") - } - if handle[len(handle)-1] != '!' { - return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") - } - for i := 1; i < len(handle)-1; i += width(handle[i]) { - if !is_alpha(handle, i) { - return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") - } - } - if len(prefix) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") - } - return true -} - -// Check if an anchor is valid. -func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { - if len(anchor) == 0 { - problem := "anchor value must not be empty" - if alias { - problem = "alias value must not be empty" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - for i := 0; i < len(anchor); i += width(anchor[i]) { - if !is_alpha(anchor, i) { - problem := "anchor value must contain alphanumerical characters only" - if alias { - problem = "alias value must contain alphanumerical characters only" - } - return yaml_emitter_set_emitter_error(emitter, problem) - } - } - emitter.anchor_data.anchor = anchor - emitter.anchor_data.alias = alias - return true -} - -// Check if a tag is valid. -func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { - if len(tag) == 0 { - return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") - } - for i := 0; i < len(emitter.tag_directives); i++ { - tag_directive := &emitter.tag_directives[i] - if bytes.HasPrefix(tag, tag_directive.prefix) { - emitter.tag_data.handle = tag_directive.handle - emitter.tag_data.suffix = tag[len(tag_directive.prefix):] - return true - } - } - emitter.tag_data.suffix = tag - return true -} - -// Check if a scalar is valid. -func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { - var ( - block_indicators = false - flow_indicators = false - line_breaks = false - special_characters = false - tab_characters = false - - leading_space = false - leading_break = false - trailing_space = false - trailing_break = false - break_space = false - space_break = false - - preceded_by_whitespace = false - followed_by_whitespace = false - previous_space = false - previous_break = false - ) - - emitter.scalar_data.value = value - - if len(value) == 0 { - emitter.scalar_data.multiline = false - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = false - return true - } - - if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { - block_indicators = true - flow_indicators = true - } - - preceded_by_whitespace = true - for i, w := 0, 0; i < len(value); i += w { - w = width(value[i]) - followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) - - if i == 0 { - switch value[i] { - case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': - flow_indicators = true - block_indicators = true - case '?', ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '-': - if followed_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } else { - switch value[i] { - case ',', '?', '[', ']', '{', '}': - flow_indicators = true - case ':': - flow_indicators = true - if followed_by_whitespace { - block_indicators = true - } - case '#': - if preceded_by_whitespace { - flow_indicators = true - block_indicators = true - } - } - } - - if value[i] == '\t' { - tab_characters = true - } else if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { - special_characters = true - } - if is_space(value, i) { - if i == 0 { - leading_space = true - } - if i+width(value[i]) == len(value) { - trailing_space = true - } - if previous_break { - break_space = true - } - previous_space = true - previous_break = false - } else if is_break(value, i) { - line_breaks = true - if i == 0 { - leading_break = true - } - if i+width(value[i]) == len(value) { - trailing_break = true - } - if previous_space { - space_break = true - } - previous_space = false - previous_break = true - } else { - previous_space = false - previous_break = false - } - - // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. - preceded_by_whitespace = is_blankz(value, i) - } - - emitter.scalar_data.multiline = line_breaks - emitter.scalar_data.flow_plain_allowed = true - emitter.scalar_data.block_plain_allowed = true - emitter.scalar_data.single_quoted_allowed = true - emitter.scalar_data.block_allowed = true - - if leading_space || leading_break || trailing_space || trailing_break { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if trailing_space { - emitter.scalar_data.block_allowed = false - } - if break_space { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || tab_characters || special_characters { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - emitter.scalar_data.single_quoted_allowed = false - } - if space_break || special_characters { - emitter.scalar_data.block_allowed = false - } - if line_breaks { - emitter.scalar_data.flow_plain_allowed = false - emitter.scalar_data.block_plain_allowed = false - } - if flow_indicators { - emitter.scalar_data.flow_plain_allowed = false - } - if block_indicators { - emitter.scalar_data.block_plain_allowed = false - } - return true -} - -// Check if the event data is valid. -func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { - - emitter.anchor_data.anchor = nil - emitter.tag_data.handle = nil - emitter.tag_data.suffix = nil - emitter.scalar_data.value = nil - - if len(event.head_comment) > 0 { - emitter.head_comment = event.head_comment - } - if len(event.line_comment) > 0 { - emitter.line_comment = event.line_comment - } - if len(event.foot_comment) > 0 { - emitter.foot_comment = event.foot_comment - } - if len(event.tail_comment) > 0 { - emitter.tail_comment = event.tail_comment - } - - switch event.typ { - case yaml_ALIAS_EVENT: - if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { - return false - } - - case yaml_SCALAR_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - if !yaml_emitter_analyze_scalar(emitter, event.value) { - return false - } - - case yaml_SEQUENCE_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - - case yaml_MAPPING_START_EVENT: - if len(event.anchor) > 0 { - if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { - return false - } - } - if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { - if !yaml_emitter_analyze_tag(emitter, event.tag) { - return false - } - } - } - return true -} - -// Write the BOM character. -func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { - if !flush(emitter) { - return false - } - pos := emitter.buffer_pos - emitter.buffer[pos+0] = '\xEF' - emitter.buffer[pos+1] = '\xBB' - emitter.buffer[pos+2] = '\xBF' - emitter.buffer_pos += 3 - return true -} - -func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { - indent := emitter.indent - if indent < 0 { - indent = 0 - } - if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { - if !put_break(emitter) { - return false - } - } - if emitter.foot_indent == indent { - if !put_break(emitter) { - return false - } - } - for emitter.column < indent { - if !put(emitter, ' ') { - return false - } - } - emitter.whitespace = true - //emitter.indention = true - emitter.space_above = false - emitter.foot_indent = -1 - return true -} - -func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, indicator) { - return false - } - emitter.whitespace = is_whitespace - emitter.indention = (emitter.indention && is_indention) - emitter.open_ended = false - return true -} - -func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { - if !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - if !write_all(emitter, value) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { - if need_whitespace && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - for i := 0; i < len(value); { - var must_write bool - switch value[i] { - case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': - must_write = true - default: - must_write = is_alpha(value, i) - } - if must_write { - if !write(emitter, value, &i) { - return false - } - } else { - w := width(value[i]) - for k := 0; k < w; k++ { - octet := value[i] - i++ - if !put(emitter, '%') { - return false - } - - c := octet >> 4 - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - - c = octet & 0x0f - if c < 10 { - c += '0' - } else { - c += 'A' - 10 - } - if !put(emitter, c) { - return false - } - } - } - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - if len(value) > 0 && !emitter.whitespace { - if !put(emitter, ' ') { - return false - } - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - - if len(value) > 0 { - emitter.whitespace = false - } - emitter.indention = false - if emitter.root_context { - emitter.open_ended = true - } - - return true -} - -func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { - return false - } - - spaces := false - breaks := false - for i := 0; i < len(value); { - if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - spaces = true - } else if is_break(value, i) { - if !breaks && value[i] == '\n' { - if !put_break(emitter) { - return false - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if value[i] == '\'' { - if !put(emitter, '\'') { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - spaces = false - breaks = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { - spaces := false - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { - return false - } - - for i := 0; i < len(value); { - if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || - is_bom(value, i) || is_break(value, i) || - value[i] == '"' || value[i] == '\\' { - - octet := value[i] - - var w int - var v rune - switch { - case octet&0x80 == 0x00: - w, v = 1, rune(octet&0x7F) - case octet&0xE0 == 0xC0: - w, v = 2, rune(octet&0x1F) - case octet&0xF0 == 0xE0: - w, v = 3, rune(octet&0x0F) - case octet&0xF8 == 0xF0: - w, v = 4, rune(octet&0x07) - } - for k := 1; k < w; k++ { - octet = value[i+k] - v = (v << 6) + (rune(octet) & 0x3F) - } - i += w - - if !put(emitter, '\\') { - return false - } - - var ok bool - switch v { - case 0x00: - ok = put(emitter, '0') - case 0x07: - ok = put(emitter, 'a') - case 0x08: - ok = put(emitter, 'b') - case 0x09: - ok = put(emitter, 't') - case 0x0A: - ok = put(emitter, 'n') - case 0x0b: - ok = put(emitter, 'v') - case 0x0c: - ok = put(emitter, 'f') - case 0x0d: - ok = put(emitter, 'r') - case 0x1b: - ok = put(emitter, 'e') - case 0x22: - ok = put(emitter, '"') - case 0x5c: - ok = put(emitter, '\\') - case 0x85: - ok = put(emitter, 'N') - case 0xA0: - ok = put(emitter, '_') - case 0x2028: - ok = put(emitter, 'L') - case 0x2029: - ok = put(emitter, 'P') - default: - if v <= 0xFF { - ok = put(emitter, 'x') - w = 2 - } else if v <= 0xFFFF { - ok = put(emitter, 'u') - w = 4 - } else { - ok = put(emitter, 'U') - w = 8 - } - for k := (w - 1) * 4; ok && k >= 0; k -= 4 { - digit := byte((v >> uint(k)) & 0x0F) - if digit < 10 { - ok = put(emitter, digit+'0') - } else { - ok = put(emitter, digit+'A'-10) - } - } - } - if !ok { - return false - } - spaces = false - } else if is_space(value, i) { - if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { - if !yaml_emitter_write_indent(emitter) { - return false - } - if is_space(value, i+1) { - if !put(emitter, '\\') { - return false - } - } - i += width(value[i]) - } else if !write(emitter, value, &i) { - return false - } - spaces = true - } else { - if !write(emitter, value, &i) { - return false - } - spaces = false - } - } - if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { - return false - } - emitter.whitespace = false - emitter.indention = false - return true -} - -func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { - if is_space(value, 0) || is_break(value, 0) { - indent_hint := []byte{'0' + byte(emitter.best_indent)} - if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { - return false - } - } - - emitter.open_ended = false - - var chomp_hint [1]byte - if len(value) == 0 { - chomp_hint[0] = '-' - } else { - i := len(value) - 1 - for value[i]&0xC0 == 0x80 { - i-- - } - if !is_break(value, i) { - chomp_hint[0] = '-' - } else if i == 0 { - chomp_hint[0] = '+' - emitter.open_ended = true - } else { - i-- - for value[i]&0xC0 == 0x80 { - i-- - } - if is_break(value, i) { - chomp_hint[0] = '+' - emitter.open_ended = true - } - } - } - if chomp_hint[0] != 0 { - if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { - return false - } - } - return true -} - -func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - if !put_break(emitter) { - return false - } - //emitter.indention = true - emitter.whitespace = true - breaks := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - } - if !write(emitter, value, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - - return true -} - -func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { - if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { - return false - } - if !yaml_emitter_write_block_scalar_hints(emitter, value) { - return false - } - - if !put_break(emitter) { - return false - } - //emitter.indention = true - emitter.whitespace = true - - breaks := true - leading_spaces := true - for i := 0; i < len(value); { - if is_break(value, i) { - if !breaks && !leading_spaces && value[i] == '\n' { - k := 0 - for is_break(value, k) { - k += width(value[k]) - } - if !is_blankz(value, k) { - if !put_break(emitter) { - return false - } - } - } - if !write_break(emitter, value, &i) { - return false - } - //emitter.indention = true - breaks = true - } else { - if breaks { - if !yaml_emitter_write_indent(emitter) { - return false - } - leading_spaces = is_blank(value, i) - } - if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { - if !yaml_emitter_write_indent(emitter) { - return false - } - i += width(value[i]) - } else { - if !write(emitter, value, &i) { - return false - } - } - emitter.indention = false - breaks = false - } - } - return true -} - -func yaml_emitter_write_comment(emitter *yaml_emitter_t, comment []byte) bool { - breaks := false - pound := false - for i := 0; i < len(comment); { - if is_break(comment, i) { - if !write_break(emitter, comment, &i) { - return false - } - //emitter.indention = true - breaks = true - pound = false - } else { - if breaks && !yaml_emitter_write_indent(emitter) { - return false - } - if !pound { - if comment[i] != '#' && (!put(emitter, '#') || !put(emitter, ' ')) { - return false - } - pound = true - } - if !write(emitter, comment, &i) { - return false - } - emitter.indention = false - breaks = false - } - } - if !breaks && !put_break(emitter) { - return false - } - - emitter.whitespace = true - //emitter.indention = true - return true -} diff --git a/vendor/gopkg.in/yaml.v3/encode.go b/vendor/gopkg.in/yaml.v3/encode.go deleted file mode 100644 index 45e8d1e..0000000 --- a/vendor/gopkg.in/yaml.v3/encode.go +++ /dev/null @@ -1,572 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding" - "fmt" - "io" - "reflect" - "regexp" - "sort" - "strconv" - "strings" - "time" - "unicode/utf8" -) - -type encoder struct { - emitter yaml_emitter_t - event yaml_event_t - out []byte - flow bool - indent int - doneInit bool -} - -func newEncoder() *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_string(&e.emitter, &e.out) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func newEncoderWithWriter(w io.Writer) *encoder { - e := &encoder{} - yaml_emitter_initialize(&e.emitter) - yaml_emitter_set_output_writer(&e.emitter, w) - yaml_emitter_set_unicode(&e.emitter, true) - return e -} - -func (e *encoder) init() { - if e.doneInit { - return - } - if e.indent == 0 { - e.indent = 4 - } - e.emitter.best_indent = e.indent - yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) - e.emit() - e.doneInit = true -} - -func (e *encoder) finish() { - e.emitter.open_ended = false - yaml_stream_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) destroy() { - yaml_emitter_delete(&e.emitter) -} - -func (e *encoder) emit() { - // This will internally delete the e.event value. - e.must(yaml_emitter_emit(&e.emitter, &e.event)) -} - -func (e *encoder) must(ok bool) { - if !ok { - msg := e.emitter.problem - if msg == "" { - msg = "unknown problem generating YAML content" - } - failf("%s", msg) - } -} - -func (e *encoder) marshalDoc(tag string, in reflect.Value) { - e.init() - var node *Node - if in.IsValid() { - node, _ = in.Interface().(*Node) - } - if node != nil && node.Kind == DocumentNode { - e.nodev(in) - } else { - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.emit() - e.marshal(tag, in) - yaml_document_end_event_initialize(&e.event, true) - e.emit() - } -} - -func (e *encoder) marshal(tag string, in reflect.Value) { - tag = shortTag(tag) - if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { - e.nilv() - return - } - iface := in.Interface() - switch value := iface.(type) { - case *Node: - e.nodev(in) - return - case Node: - e.nodev(in.Addr()) - return - case time.Time: - e.timev(tag, in) - return - case *time.Time: - e.timev(tag, in.Elem()) - return - case time.Duration: - e.stringv(tag, reflect.ValueOf(value.String())) - return - case Marshaler: - v, err := value.MarshalYAML() - if err != nil { - fail(err) - } - if v == nil { - e.nilv() - return - } - e.marshal(tag, reflect.ValueOf(v)) - return - case encoding.TextMarshaler: - text, err := value.MarshalText() - if err != nil { - fail(err) - } - in = reflect.ValueOf(string(text)) - case nil: - e.nilv() - return - } - switch in.Kind() { - case reflect.Interface: - e.marshal(tag, in.Elem()) - case reflect.Map: - e.mapv(tag, in) - case reflect.Ptr: - e.marshal(tag, in.Elem()) - case reflect.Struct: - e.structv(tag, in) - case reflect.Slice, reflect.Array: - e.slicev(tag, in) - case reflect.String: - e.stringv(tag, in) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - e.intv(tag, in) - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - e.uintv(tag, in) - case reflect.Float32, reflect.Float64: - e.floatv(tag, in) - case reflect.Bool: - e.boolv(tag, in) - default: - panic("cannot marshal type: " + in.Type().String()) - } -} - -func (e *encoder) mapv(tag string, in reflect.Value) { - e.mappingv(tag, func() { - keys := keyList(in.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - e.marshal("", k) - e.marshal("", in.MapIndex(k)) - } - }) -} - -func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { - for _, num := range index { - for { - if v.Kind() == reflect.Ptr { - if v.IsNil() { - return reflect.Value{} - } - v = v.Elem() - continue - } - break - } - v = v.Field(num) - } - return v -} - -func (e *encoder) structv(tag string, in reflect.Value) { - sinfo, err := getStructInfo(in.Type()) - if err != nil { - panic(err) - } - e.mappingv(tag, func() { - for _, info := range sinfo.FieldsList { - var value reflect.Value - if info.Inline == nil { - value = in.Field(info.Num) - } else { - value = e.fieldByIndex(in, info.Inline) - if !value.IsValid() { - continue - } - } - if info.OmitEmpty && isZero(value) { - continue - } - e.marshal("", reflect.ValueOf(info.Key)) - e.flow = info.Flow - e.marshal("", value) - } - if sinfo.InlineMap >= 0 { - m := in.Field(sinfo.InlineMap) - if m.Len() > 0 { - e.flow = false - keys := keyList(m.MapKeys()) - sort.Sort(keys) - for _, k := range keys { - if _, found := sinfo.FieldsMap[k.String()]; found { - panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) - } - e.marshal("", k) - e.flow = false - e.marshal("", m.MapIndex(k)) - } - } - } - }) -} - -func (e *encoder) mappingv(tag string, f func()) { - implicit := tag == "" - style := yaml_BLOCK_MAPPING_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) - e.emit() - f() - yaml_mapping_end_event_initialize(&e.event) - e.emit() -} - -func (e *encoder) slicev(tag string, in reflect.Value) { - implicit := tag == "" - style := yaml_BLOCK_SEQUENCE_STYLE - if e.flow { - e.flow = false - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) - e.emit() - n := in.Len() - for i := 0; i < n; i++ { - e.marshal("", in.Index(i)) - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.emit() -} - -// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. -// -// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported -// in YAML 1.2 and by this package, but these should be marshalled quoted for -// the time being for compatibility with other parsers. -func isBase60Float(s string) (result bool) { - // Fast path. - if s == "" { - return false - } - c := s[0] - if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { - return false - } - // Do the full match. - return base60float.MatchString(s) -} - -// From http://yaml.org/type/float.html, except the regular expression there -// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. -var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) - -// isOldBool returns whether s is bool notation as defined in YAML 1.1. -// -// We continue to force strings that YAML 1.1 would interpret as booleans to be -// rendered as quotes strings so that the marshalled output valid for YAML 1.1 -// parsing. -func isOldBool(s string) (result bool) { - switch s { - case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", - "n", "N", "no", "No", "NO", "off", "Off", "OFF": - return true - default: - return false - } -} - -func (e *encoder) stringv(tag string, in reflect.Value) { - var style yaml_scalar_style_t - s := in.String() - canUsePlain := true - switch { - case !utf8.ValidString(s): - if tag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if tag != "" { - failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - s = encodeBase64(s) - case tag == "": - // Check to see if it would resolve to a specific - // tag when encoded unquoted. If it doesn't, - // there's no need to quote it. - rtag, _ := resolve("", s) - canUsePlain = rtag == strTag && !(isBase60Float(s) || isOldBool(s)) - } - // Note: it's possible for user code to emit invalid YAML - // if they explicitly specify a tag and a string containing - // text that's incompatible with that tag. - switch { - case strings.Contains(s, "\n"): - if e.flow { - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } else { - style = yaml_LITERAL_SCALAR_STYLE - } - case canUsePlain: - style = yaml_PLAIN_SCALAR_STYLE - default: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - e.emitScalar(s, "", tag, style, nil, nil, nil, nil) -} - -func (e *encoder) boolv(tag string, in reflect.Value) { - var s string - if in.Bool() { - s = "true" - } else { - s = "false" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) intv(tag string, in reflect.Value) { - s := strconv.FormatInt(in.Int(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) uintv(tag string, in reflect.Value) { - s := strconv.FormatUint(in.Uint(), 10) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) timev(tag string, in reflect.Value) { - t := in.Interface().(time.Time) - s := t.Format(time.RFC3339Nano) - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) floatv(tag string, in reflect.Value) { - // Issue #352: When formatting, use the precision of the underlying value - precision := 64 - if in.Kind() == reflect.Float32 { - precision = 32 - } - - s := strconv.FormatFloat(in.Float(), 'g', -1, precision) - switch s { - case "+Inf": - s = ".inf" - case "-Inf": - s = "-.inf" - case "NaN": - s = ".nan" - } - e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) nilv() { - e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE, nil, nil, nil, nil) -} - -func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t, head, line, foot, tail []byte) { - // TODO Kill this function. Replace all initialize calls by their underlining Go literals. - implicit := tag == "" - if !implicit { - tag = longTag(tag) - } - e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) - e.event.head_comment = head - e.event.line_comment = line - e.event.foot_comment = foot - e.event.tail_comment = tail - e.emit() -} - -func (e *encoder) nodev(in reflect.Value) { - e.node(in.Interface().(*Node), "") -} - -func (e *encoder) node(node *Node, tail string) { - // Zero nodes behave as nil. - if node.Kind == 0 && node.IsZero() { - e.nilv() - return - } - - // If the tag was not explicitly requested, and dropping it won't change the - // implicit tag of the value, don't include it in the presentation. - var tag = node.Tag - var stag = shortTag(tag) - var forceQuoting bool - if tag != "" && node.Style&TaggedStyle == 0 { - if node.Kind == ScalarNode { - if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { - tag = "" - } else { - rtag, _ := resolve("", node.Value) - if rtag == stag { - tag = "" - } else if stag == strTag { - tag = "" - forceQuoting = true - } - } - } else { - var rtag string - switch node.Kind { - case MappingNode: - rtag = mapTag - case SequenceNode: - rtag = seqTag - } - if rtag == stag { - tag = "" - } - } - } - - switch node.Kind { - case DocumentNode: - yaml_document_start_event_initialize(&e.event, nil, nil, true) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - yaml_document_end_event_initialize(&e.event, true) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case SequenceNode: - style := yaml_BLOCK_SEQUENCE_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_SEQUENCE_STYLE - } - e.must(yaml_sequence_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style)) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - for _, node := range node.Content { - e.node(node, "") - } - e.must(yaml_sequence_end_event_initialize(&e.event)) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case MappingNode: - style := yaml_BLOCK_MAPPING_STYLE - if node.Style&FlowStyle != 0 { - style = yaml_FLOW_MAPPING_STYLE - } - yaml_mapping_start_event_initialize(&e.event, []byte(node.Anchor), []byte(longTag(tag)), tag == "", style) - e.event.tail_comment = []byte(tail) - e.event.head_comment = []byte(node.HeadComment) - e.emit() - - // The tail logic below moves the foot comment of prior keys to the following key, - // since the value for each key may be a nested structure and the foot needs to be - // processed only the entirety of the value is streamed. The last tail is processed - // with the mapping end event. - var tail string - for i := 0; i+1 < len(node.Content); i += 2 { - k := node.Content[i] - foot := k.FootComment - if foot != "" { - kopy := *k - kopy.FootComment = "" - k = &kopy - } - e.node(k, tail) - tail = foot - - v := node.Content[i+1] - e.node(v, "") - } - - yaml_mapping_end_event_initialize(&e.event) - e.event.tail_comment = []byte(tail) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case AliasNode: - yaml_alias_event_initialize(&e.event, []byte(node.Value)) - e.event.head_comment = []byte(node.HeadComment) - e.event.line_comment = []byte(node.LineComment) - e.event.foot_comment = []byte(node.FootComment) - e.emit() - - case ScalarNode: - value := node.Value - if !utf8.ValidString(value) { - if stag == binaryTag { - failf("explicitly tagged !!binary data must be base64-encoded") - } - if stag != "" { - failf("cannot marshal invalid UTF-8 data as %s", stag) - } - // It can't be encoded directly as YAML so use a binary tag - // and encode it as base64. - tag = binaryTag - value = encodeBase64(value) - } - - style := yaml_PLAIN_SCALAR_STYLE - switch { - case node.Style&DoubleQuotedStyle != 0: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - case node.Style&SingleQuotedStyle != 0: - style = yaml_SINGLE_QUOTED_SCALAR_STYLE - case node.Style&LiteralStyle != 0: - style = yaml_LITERAL_SCALAR_STYLE - case node.Style&FoldedStyle != 0: - style = yaml_FOLDED_SCALAR_STYLE - case strings.Contains(value, "\n"): - style = yaml_LITERAL_SCALAR_STYLE - case forceQuoting: - style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - - e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) - default: - failf("cannot encode node with unknown kind %d", node.Kind) - } -} diff --git a/vendor/gopkg.in/yaml.v3/go.mod b/vendor/gopkg.in/yaml.v3/go.mod deleted file mode 100644 index f407ea3..0000000 --- a/vendor/gopkg.in/yaml.v3/go.mod +++ /dev/null @@ -1,5 +0,0 @@ -module "gopkg.in/yaml.v3" - -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) diff --git a/vendor/gopkg.in/yaml.v3/parserc.go b/vendor/gopkg.in/yaml.v3/parserc.go deleted file mode 100644 index ac66fcc..0000000 --- a/vendor/gopkg.in/yaml.v3/parserc.go +++ /dev/null @@ -1,1249 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" -) - -// The parser implements the following grammar: -// -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// implicit_document ::= block_node DOCUMENT-END* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// block_node_or_indentless_sequence ::= -// ALIAS -// | properties (block_content | indentless_block_sequence)? -// | block_content -// | indentless_block_sequence -// block_node ::= ALIAS -// | properties block_content? -// | block_content -// flow_node ::= ALIAS -// | properties flow_content? -// | flow_content -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// block_content ::= block_collection | flow_collection | SCALAR -// flow_content ::= flow_collection | SCALAR -// block_collection ::= block_sequence | block_mapping -// flow_collection ::= flow_sequence | flow_mapping -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// block_mapping ::= BLOCK-MAPPING_START -// ((KEY block_node_or_indentless_sequence?)? -// (VALUE block_node_or_indentless_sequence?)?)* -// BLOCK-END -// flow_sequence ::= FLOW-SEQUENCE-START -// (flow_sequence_entry FLOW-ENTRY)* -// flow_sequence_entry? -// FLOW-SEQUENCE-END -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// flow_mapping ::= FLOW-MAPPING-START -// (flow_mapping_entry FLOW-ENTRY)* -// flow_mapping_entry? -// FLOW-MAPPING-END -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? - -// Peek the next token in the token queue. -func peek_token(parser *yaml_parser_t) *yaml_token_t { - if parser.token_available || yaml_parser_fetch_more_tokens(parser) { - token := &parser.tokens[parser.tokens_head] - yaml_parser_unfold_comments(parser, token) - return token - } - return nil -} - -// yaml_parser_unfold_comments walks through the comments queue and joins all -// comments behind the position of the provided token into the respective -// top-level comment slices in the parser. -func yaml_parser_unfold_comments(parser *yaml_parser_t, token *yaml_token_t) { - for parser.comments_head < len(parser.comments) && token.start_mark.index >= parser.comments[parser.comments_head].token_mark.index { - comment := &parser.comments[parser.comments_head] - if len(comment.head) > 0 { - if token.typ == yaml_BLOCK_END_TOKEN { - // No heads on ends, so keep comment.head for a follow up token. - break - } - if len(parser.head_comment) > 0 { - parser.head_comment = append(parser.head_comment, '\n') - } - parser.head_comment = append(parser.head_comment, comment.head...) - } - if len(comment.foot) > 0 { - if len(parser.foot_comment) > 0 { - parser.foot_comment = append(parser.foot_comment, '\n') - } - parser.foot_comment = append(parser.foot_comment, comment.foot...) - } - if len(comment.line) > 0 { - if len(parser.line_comment) > 0 { - parser.line_comment = append(parser.line_comment, '\n') - } - parser.line_comment = append(parser.line_comment, comment.line...) - } - *comment = yaml_comment_t{} - parser.comments_head++ - } -} - -// Remove the next token from the queue (must be called after peek_token). -func skip_token(parser *yaml_parser_t) { - parser.token_available = false - parser.tokens_parsed++ - parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN - parser.tokens_head++ -} - -// Get the next event. -func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { - // Erase the event object. - *event = yaml_event_t{} - - // No events after the end of the stream or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { - return true - } - - // Generate the next event. - return yaml_parser_state_machine(parser, event) -} - -// Set parser error. -func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { - parser.error = yaml_PARSER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = problem_mark - return false -} - -// State dispatcher. -func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { - //trace("yaml_parser_state_machine", "state:", parser.state.String()) - - switch parser.state { - case yaml_PARSE_STREAM_START_STATE: - return yaml_parser_parse_stream_start(parser, event) - - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, true) - - case yaml_PARSE_DOCUMENT_START_STATE: - return yaml_parser_parse_document_start(parser, event, false) - - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return yaml_parser_parse_document_content(parser, event) - - case yaml_PARSE_DOCUMENT_END_STATE: - return yaml_parser_parse_document_end(parser, event) - - case yaml_PARSE_BLOCK_NODE_STATE: - return yaml_parser_parse_node(parser, event, true, false) - - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return yaml_parser_parse_node(parser, event, true, true) - - case yaml_PARSE_FLOW_NODE_STATE: - return yaml_parser_parse_node(parser, event, false, false) - - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, true) - - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_block_sequence_entry(parser, event, false) - - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_indentless_sequence_entry(parser, event) - - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, true) - - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return yaml_parser_parse_block_mapping_key(parser, event, false) - - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return yaml_parser_parse_block_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, true) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return yaml_parser_parse_flow_sequence_entry(parser, event, false) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) - - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) - - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, true) - - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return yaml_parser_parse_flow_mapping_key(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, false) - - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return yaml_parser_parse_flow_mapping_value(parser, event, true) - - default: - panic("invalid parser state") - } -} - -// Parse the production: -// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END -// ************ -func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_STREAM_START_TOKEN { - return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) - } - parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - encoding: token.encoding, - } - skip_token(parser) - return true -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// * -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// ************************* -func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { - - token := peek_token(parser) - if token == nil { - return false - } - - // Parse extra document end indicators. - if !implicit { - for token.typ == yaml_DOCUMENT_END_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && - token.typ != yaml_TAG_DIRECTIVE_TOKEN && - token.typ != yaml_DOCUMENT_START_TOKEN && - token.typ != yaml_STREAM_END_TOKEN { - // Parse an implicit document. - if !yaml_parser_process_directives(parser, nil, nil) { - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_BLOCK_NODE_STATE - - var head_comment []byte - if len(parser.head_comment) > 0 { - // [Go] Scan the header comment backwards, and if an empty line is found, break - // the header so the part before the last empty line goes into the - // document header, while the bottom of it goes into a follow up event. - for i := len(parser.head_comment) - 1; i > 0; i-- { - if parser.head_comment[i] == '\n' { - if i == len(parser.head_comment)-1 { - head_comment = parser.head_comment[:i] - parser.head_comment = parser.head_comment[i+1:] - break - } else if parser.head_comment[i-1] == '\n' { - head_comment = parser.head_comment[:i-1] - parser.head_comment = parser.head_comment[i+1:] - break - } - } - } - } - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - - head_comment: head_comment, - } - - } else if token.typ != yaml_STREAM_END_TOKEN { - // Parse an explicit document. - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - start_mark := token.start_mark - if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { - return false - } - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_DOCUMENT_START_TOKEN { - yaml_parser_set_parser_error(parser, - "did not find expected ", token.start_mark) - return false - } - parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) - parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE - end_mark := token.end_mark - - *event = yaml_event_t{ - typ: yaml_DOCUMENT_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - version_directive: version_directive, - tag_directives: tag_directives, - implicit: false, - } - skip_token(parser) - - } else { - // Parse the stream end. - parser.state = yaml_PARSE_END_STATE - *event = yaml_event_t{ - typ: yaml_STREAM_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - skip_token(parser) - } - - return true -} - -// Parse the productions: -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// *********** -// -func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || - token.typ == yaml_TAG_DIRECTIVE_TOKEN || - token.typ == yaml_DOCUMENT_START_TOKEN || - token.typ == yaml_DOCUMENT_END_TOKEN || - token.typ == yaml_STREAM_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - return yaml_parser_process_empty_scalar(parser, event, - token.start_mark) - } - return yaml_parser_parse_node(parser, event, true, false) -} - -// Parse the productions: -// implicit_document ::= block_node DOCUMENT-END* -// ************* -// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* -// -func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - start_mark := token.start_mark - end_mark := token.start_mark - - implicit := true - if token.typ == yaml_DOCUMENT_END_TOKEN { - end_mark = token.end_mark - skip_token(parser) - implicit = false - } - - parser.tag_directives = parser.tag_directives[:0] - - parser.state = yaml_PARSE_DOCUMENT_START_STATE - *event = yaml_event_t{ - typ: yaml_DOCUMENT_END_EVENT, - start_mark: start_mark, - end_mark: end_mark, - implicit: implicit, - } - yaml_parser_set_event_comments(parser, event) - if len(event.head_comment) > 0 && len(event.foot_comment) == 0 { - event.foot_comment = event.head_comment - event.head_comment = nil - } - return true -} - -func yaml_parser_set_event_comments(parser *yaml_parser_t, event *yaml_event_t) { - event.head_comment = parser.head_comment - event.line_comment = parser.line_comment - event.foot_comment = parser.foot_comment - parser.head_comment = nil - parser.line_comment = nil - parser.foot_comment = nil - parser.tail_comment = nil - parser.stem_comment = nil -} - -// Parse the productions: -// block_node_or_indentless_sequence ::= -// ALIAS -// ***** -// | properties (block_content | indentless_block_sequence)? -// ********** * -// | block_content | indentless_block_sequence -// * -// block_node ::= ALIAS -// ***** -// | properties block_content? -// ********** * -// | block_content -// * -// flow_node ::= ALIAS -// ***** -// | properties flow_content? -// ********** * -// | flow_content -// * -// properties ::= TAG ANCHOR? | ANCHOR TAG? -// ************************* -// block_content ::= block_collection | flow_collection | SCALAR -// ****** -// flow_content ::= flow_collection | SCALAR -// ****** -func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { - //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_ALIAS_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - *event = yaml_event_t{ - typ: yaml_ALIAS_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - anchor: token.value, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - start_mark := token.start_mark - end_mark := token.start_mark - - var tag_token bool - var tag_handle, tag_suffix, anchor []byte - var tag_mark yaml_mark_t - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - start_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } else if token.typ == yaml_TAG_TOKEN { - tag_token = true - tag_handle = token.value - tag_suffix = token.suffix - start_mark = token.start_mark - tag_mark = token.start_mark - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_ANCHOR_TOKEN { - anchor = token.value - end_mark = token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - } - - var tag []byte - if tag_token { - if len(tag_handle) == 0 { - tag = tag_suffix - tag_suffix = nil - } else { - for i := range parser.tag_directives { - if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { - tag = append([]byte(nil), parser.tag_directives[i].prefix...) - tag = append(tag, tag_suffix...) - break - } - } - if len(tag) == 0 { - yaml_parser_set_parser_error_context(parser, - "while parsing a node", start_mark, - "found undefined tag handle", tag_mark) - return false - } - } - } - - implicit := len(tag) == 0 - if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - return true - } - if token.typ == yaml_SCALAR_TOKEN { - var plain_implicit, quoted_implicit bool - end_mark = token.end_mark - if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { - plain_implicit = true - } else if len(tag) == 0 { - quoted_implicit = true - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - value: token.value, - implicit: plain_implicit, - quoted_implicit: quoted_implicit, - style: yaml_style_t(token.style), - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { - // [Go] Some of the events below can be merged as they differ only on style. - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if token.typ == yaml_FLOW_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - yaml_parser_set_event_comments(parser, event) - return true - } - if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_SEQUENCE_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), - } - if parser.stem_comment != nil { - event.head_comment = parser.stem_comment - parser.stem_comment = nil - } - return true - } - if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { - end_mark = token.end_mark - parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), - } - if parser.stem_comment != nil { - event.head_comment = parser.stem_comment - parser.stem_comment = nil - } - return true - } - if len(anchor) > 0 || len(tag) > 0 { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: start_mark, - end_mark: end_mark, - anchor: anchor, - tag: tag, - implicit: implicit, - quoted_implicit: false, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true - } - - context := "while parsing a flow node" - if block { - context = "while parsing a block node" - } - yaml_parser_set_parser_error_context(parser, context, start_mark, - "did not find expected node content", token.start_mark) - return false -} - -// Parse the productions: -// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END -// ******************** *********** * ********* -// -func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - prior_head_len := len(parser.head_comment) - skip_token(parser) - yaml_parser_split_stem_comment(parser, prior_head_len) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } else { - parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } - if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block collection", context_mark, - "did not find expected '-' indicator", token.start_mark) -} - -// Parse the productions: -// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ -// *********** * -func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ == yaml_BLOCK_ENTRY_TOKEN { - mark := token.end_mark - prior_head_len := len(parser.head_comment) - skip_token(parser) - yaml_parser_split_stem_comment(parser, prior_head_len) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_BLOCK_ENTRY_TOKEN && - token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, true, false) - } - parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? - } - return true -} - -// Split stem comment from head comment. -// -// When a sequence or map is found under a sequence entry, the former head comment -// is assigned to the underlying sequence or map as a whole, not the individual -// sequence or map entry as would be expected otherwise. To handle this case the -// previous head comment is moved aside as the stem comment. -func yaml_parser_split_stem_comment(parser *yaml_parser_t, stem_len int) { - if stem_len == 0 { - return - } - - token := peek_token(parser) - if token.typ != yaml_BLOCK_SEQUENCE_START_TOKEN && token.typ != yaml_BLOCK_MAPPING_START_TOKEN { - return - } - - parser.stem_comment = parser.head_comment[:stem_len] - if len(parser.head_comment) == stem_len { - parser.head_comment = nil - } else { - // Copy suffix to prevent very strange bugs if someone ever appends - // further bytes to the prefix in the stem_comment slice above. - parser.head_comment = append([]byte(nil), parser.head_comment[stem_len+1:]...) - } -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// ******************* -// ((KEY block_node_or_indentless_sequence?)? -// *** * -// (VALUE block_node_or_indentless_sequence?)?)* -// -// BLOCK-END -// ********* -// -func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - // [Go] A tail comment was left from the prior mapping value processed. Emit an event - // as it needs to be processed with that value and not the following key. - if len(parser.tail_comment) > 0 { - *event = yaml_event_t{ - typ: yaml_TAIL_COMMENT_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - foot_comment: parser.tail_comment, - } - parser.tail_comment = nil - return true - } - - if token.typ == yaml_KEY_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } else { - parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - } else if token.typ == yaml_BLOCK_END_TOKEN { - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true - } - - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a block mapping", context_mark, - "did not find expected key", token.start_mark) -} - -// Parse the productions: -// block_mapping ::= BLOCK-MAPPING_START -// -// ((KEY block_node_or_indentless_sequence?)? -// -// (VALUE block_node_or_indentless_sequence?)?)* -// ***** * -// BLOCK-END -// -// -func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - mark := token.end_mark - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_KEY_TOKEN && - token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_BLOCK_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, true, true) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) - } - parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence ::= FLOW-SEQUENCE-START -// ******************* -// (flow_sequence_entry FLOW-ENTRY)* -// * ********** -// flow_sequence_entry? -// * -// FLOW-SEQUENCE-END -// ***************** -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow sequence", context_mark, - "did not find expected ',' or ']'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_START_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - implicit: true, - style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), - } - skip_token(parser) - return true - } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - - *event = yaml_event_t{ - typ: yaml_SEQUENCE_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - - skip_token(parser) - return true -} - -// -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// *** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - mark := token.end_mark - skip_token(parser) - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// ***** * -// -func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token := peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Parse the productions: -// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * -// -func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { - token := peek_token(parser) - if token == nil { - return false - } - parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? - } - return true -} - -// Parse the productions: -// flow_mapping ::= FLOW-MAPPING-START -// ****************** -// (flow_mapping_entry FLOW-ENTRY)* -// * ********** -// flow_mapping_entry? -// ****************** -// FLOW-MAPPING-END -// **************** -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * *** * -// -func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { - if first { - token := peek_token(parser) - parser.marks = append(parser.marks, token.start_mark) - skip_token(parser) - } - - token := peek_token(parser) - if token == nil { - return false - } - - if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - if !first { - if token.typ == yaml_FLOW_ENTRY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } else { - context_mark := parser.marks[len(parser.marks)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - return yaml_parser_set_parser_error_context(parser, - "while parsing a flow mapping", context_mark, - "did not find expected ',' or '}'", token.start_mark) - } - } - - if token.typ == yaml_KEY_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_VALUE_TOKEN && - token.typ != yaml_FLOW_ENTRY_TOKEN && - token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } else { - parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - - parser.state = parser.states[len(parser.states)-1] - parser.states = parser.states[:len(parser.states)-1] - parser.marks = parser.marks[:len(parser.marks)-1] - *event = yaml_event_t{ - typ: yaml_MAPPING_END_EVENT, - start_mark: token.start_mark, - end_mark: token.end_mark, - } - yaml_parser_set_event_comments(parser, event) - skip_token(parser) - return true -} - -// Parse the productions: -// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? -// * ***** * -// -func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { - token := peek_token(parser) - if token == nil { - return false - } - if empty { - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) - } - if token.typ == yaml_VALUE_TOKEN { - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { - parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) - return yaml_parser_parse_node(parser, event, false, false) - } - } - parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE - return yaml_parser_process_empty_scalar(parser, event, token.start_mark) -} - -// Generate an empty scalar event. -func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { - *event = yaml_event_t{ - typ: yaml_SCALAR_EVENT, - start_mark: mark, - end_mark: mark, - value: nil, // Empty - implicit: true, - style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), - } - return true -} - -var default_tag_directives = []yaml_tag_directive_t{ - {[]byte("!"), []byte("!")}, - {[]byte("!!"), []byte("tag:yaml.org,2002:")}, -} - -// Parse directives. -func yaml_parser_process_directives(parser *yaml_parser_t, - version_directive_ref **yaml_version_directive_t, - tag_directives_ref *[]yaml_tag_directive_t) bool { - - var version_directive *yaml_version_directive_t - var tag_directives []yaml_tag_directive_t - - token := peek_token(parser) - if token == nil { - return false - } - - for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { - if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { - if version_directive != nil { - yaml_parser_set_parser_error(parser, - "found duplicate %YAML directive", token.start_mark) - return false - } - if token.major != 1 || token.minor != 1 { - yaml_parser_set_parser_error(parser, - "found incompatible YAML document", token.start_mark) - return false - } - version_directive = &yaml_version_directive_t{ - major: token.major, - minor: token.minor, - } - } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { - value := yaml_tag_directive_t{ - handle: token.value, - prefix: token.prefix, - } - if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { - return false - } - tag_directives = append(tag_directives, value) - } - - skip_token(parser) - token = peek_token(parser) - if token == nil { - return false - } - } - - for i := range default_tag_directives { - if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { - return false - } - } - - if version_directive_ref != nil { - *version_directive_ref = version_directive - } - if tag_directives_ref != nil { - *tag_directives_ref = tag_directives - } - return true -} - -// Append a tag directive to the directives stack. -func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { - for i := range parser.tag_directives { - if bytes.Equal(value.handle, parser.tag_directives[i].handle) { - if allow_duplicates { - return true - } - return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) - } - } - - // [Go] I suspect the copy is unnecessary. This was likely done - // because there was no way to track ownership of the data. - value_copy := yaml_tag_directive_t{ - handle: make([]byte, len(value.handle)), - prefix: make([]byte, len(value.prefix)), - } - copy(value_copy.handle, value.handle) - copy(value_copy.prefix, value.prefix) - parser.tag_directives = append(parser.tag_directives, value_copy) - return true -} diff --git a/vendor/gopkg.in/yaml.v3/readerc.go b/vendor/gopkg.in/yaml.v3/readerc.go deleted file mode 100644 index b7de0a8..0000000 --- a/vendor/gopkg.in/yaml.v3/readerc.go +++ /dev/null @@ -1,434 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "io" -) - -// Set the reader error and return 0. -func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { - parser.error = yaml_READER_ERROR - parser.problem = problem - parser.problem_offset = offset - parser.problem_value = value - return false -} - -// Byte order marks. -const ( - bom_UTF8 = "\xef\xbb\xbf" - bom_UTF16LE = "\xff\xfe" - bom_UTF16BE = "\xfe\xff" -) - -// Determine the input stream encoding by checking the BOM symbol. If no BOM is -// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. -func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { - // Ensure that we had enough bytes in the raw buffer. - for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { - if !yaml_parser_update_raw_buffer(parser) { - return false - } - } - - // Determine the encoding. - buf := parser.raw_buffer - pos := parser.raw_buffer_pos - avail := len(buf) - pos - if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { - parser.encoding = yaml_UTF16LE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { - parser.encoding = yaml_UTF16BE_ENCODING - parser.raw_buffer_pos += 2 - parser.offset += 2 - } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { - parser.encoding = yaml_UTF8_ENCODING - parser.raw_buffer_pos += 3 - parser.offset += 3 - } else { - parser.encoding = yaml_UTF8_ENCODING - } - return true -} - -// Update the raw buffer. -func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { - size_read := 0 - - // Return if the raw buffer is full. - if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { - return true - } - - // Return on EOF. - if parser.eof { - return true - } - - // Move the remaining bytes in the raw buffer to the beginning. - if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { - copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) - } - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] - parser.raw_buffer_pos = 0 - - // Call the read handler to fill the buffer. - size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) - parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] - if err == io.EOF { - parser.eof = true - } else if err != nil { - return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) - } - return true -} - -// Ensure that the buffer contains at least `length` characters. -// Return true on success, false on failure. -// -// The length is supposed to be significantly less that the buffer size. -func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { - if parser.read_handler == nil { - panic("read handler must be set") - } - - // [Go] This function was changed to guarantee the requested length size at EOF. - // The fact we need to do this is pretty awful, but the description above implies - // for that to be the case, and there are tests - - // If the EOF flag is set and the raw buffer is empty, do nothing. - if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { - // [Go] ACTUALLY! Read the documentation of this function above. - // This is just broken. To return true, we need to have the - // given length in the buffer. Not doing that means every single - // check that calls this function to make sure the buffer has a - // given length is Go) panicking; or C) accessing invalid memory. - //return true - } - - // Return if the buffer contains enough characters. - if parser.unread >= length { - return true - } - - // Determine the input encoding if it is not known yet. - if parser.encoding == yaml_ANY_ENCODING { - if !yaml_parser_determine_encoding(parser) { - return false - } - } - - // Move the unread characters to the beginning of the buffer. - buffer_len := len(parser.buffer) - if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { - copy(parser.buffer, parser.buffer[parser.buffer_pos:]) - buffer_len -= parser.buffer_pos - parser.buffer_pos = 0 - } else if parser.buffer_pos == buffer_len { - buffer_len = 0 - parser.buffer_pos = 0 - } - - // Open the whole buffer for writing, and cut it before returning. - parser.buffer = parser.buffer[:cap(parser.buffer)] - - // Fill the buffer until it has enough characters. - first := true - for parser.unread < length { - - // Fill the raw buffer if necessary. - if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { - if !yaml_parser_update_raw_buffer(parser) { - parser.buffer = parser.buffer[:buffer_len] - return false - } - } - first = false - - // Decode the raw buffer. - inner: - for parser.raw_buffer_pos != len(parser.raw_buffer) { - var value rune - var width int - - raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos - - // Decode the next character. - switch parser.encoding { - case yaml_UTF8_ENCODING: - // Decode a UTF-8 character. Check RFC 3629 - // (http://www.ietf.org/rfc/rfc3629.txt) for more details. - // - // The following table (taken from the RFC) is used for - // decoding. - // - // Char. number range | UTF-8 octet sequence - // (hexadecimal) | (binary) - // --------------------+------------------------------------ - // 0000 0000-0000 007F | 0xxxxxxx - // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx - // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx - // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - // - // Additionally, the characters in the range 0xD800-0xDFFF - // are prohibited as they are reserved for use with UTF-16 - // surrogate pairs. - - // Determine the length of the UTF-8 sequence. - octet := parser.raw_buffer[parser.raw_buffer_pos] - switch { - case octet&0x80 == 0x00: - width = 1 - case octet&0xE0 == 0xC0: - width = 2 - case octet&0xF0 == 0xE0: - width = 3 - case octet&0xF8 == 0xF0: - width = 4 - default: - // The leading octet is invalid. - return yaml_parser_set_reader_error(parser, - "invalid leading UTF-8 octet", - parser.offset, int(octet)) - } - - // Check if the raw buffer contains an incomplete character. - if width > raw_unread { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-8 octet sequence", - parser.offset, -1) - } - break inner - } - - // Decode the leading octet. - switch { - case octet&0x80 == 0x00: - value = rune(octet & 0x7F) - case octet&0xE0 == 0xC0: - value = rune(octet & 0x1F) - case octet&0xF0 == 0xE0: - value = rune(octet & 0x0F) - case octet&0xF8 == 0xF0: - value = rune(octet & 0x07) - default: - value = 0 - } - - // Check and decode the trailing octets. - for k := 1; k < width; k++ { - octet = parser.raw_buffer[parser.raw_buffer_pos+k] - - // Check if the octet is valid. - if (octet & 0xC0) != 0x80 { - return yaml_parser_set_reader_error(parser, - "invalid trailing UTF-8 octet", - parser.offset+k, int(octet)) - } - - // Decode the octet. - value = (value << 6) + rune(octet&0x3F) - } - - // Check the length of the sequence against the value. - switch { - case width == 1: - case width == 2 && value >= 0x80: - case width == 3 && value >= 0x800: - case width == 4 && value >= 0x10000: - default: - return yaml_parser_set_reader_error(parser, - "invalid length of a UTF-8 sequence", - parser.offset, -1) - } - - // Check the range of the value. - if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { - return yaml_parser_set_reader_error(parser, - "invalid Unicode character", - parser.offset, int(value)) - } - - case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: - var low, high int - if parser.encoding == yaml_UTF16LE_ENCODING { - low, high = 0, 1 - } else { - low, high = 1, 0 - } - - // The UTF-16 encoding is not as simple as one might - // naively think. Check RFC 2781 - // (http://www.ietf.org/rfc/rfc2781.txt). - // - // Normally, two subsequent bytes describe a Unicode - // character. However a special technique (called a - // surrogate pair) is used for specifying character - // values larger than 0xFFFF. - // - // A surrogate pair consists of two pseudo-characters: - // high surrogate area (0xD800-0xDBFF) - // low surrogate area (0xDC00-0xDFFF) - // - // The following formulas are used for decoding - // and encoding characters using surrogate pairs: - // - // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) - // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) - // W1 = 110110yyyyyyyyyy - // W2 = 110111xxxxxxxxxx - // - // where U is the character value, W1 is the high surrogate - // area, W2 is the low surrogate area. - - // Check for incomplete UTF-16 character. - if raw_unread < 2 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 character", - parser.offset, -1) - } - break inner - } - - // Get the character. - value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) - - // Check for unexpected low surrogate area. - if value&0xFC00 == 0xDC00 { - return yaml_parser_set_reader_error(parser, - "unexpected low surrogate area", - parser.offset, int(value)) - } - - // Check for a high surrogate area. - if value&0xFC00 == 0xD800 { - width = 4 - - // Check for incomplete surrogate pair. - if raw_unread < 4 { - if parser.eof { - return yaml_parser_set_reader_error(parser, - "incomplete UTF-16 surrogate pair", - parser.offset, -1) - } - break inner - } - - // Get the next character. - value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + - (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) - - // Check for a low surrogate area. - if value2&0xFC00 != 0xDC00 { - return yaml_parser_set_reader_error(parser, - "expected low surrogate area", - parser.offset+2, int(value2)) - } - - // Generate the value of the surrogate pair. - value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) - } else { - width = 2 - } - - default: - panic("impossible") - } - - // Check if the character is in the allowed range: - // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) - // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) - // | [#x10000-#x10FFFF] (32 bit) - switch { - case value == 0x09: - case value == 0x0A: - case value == 0x0D: - case value >= 0x20 && value <= 0x7E: - case value == 0x85: - case value >= 0xA0 && value <= 0xD7FF: - case value >= 0xE000 && value <= 0xFFFD: - case value >= 0x10000 && value <= 0x10FFFF: - default: - return yaml_parser_set_reader_error(parser, - "control characters are not allowed", - parser.offset, int(value)) - } - - // Move the raw pointers. - parser.raw_buffer_pos += width - parser.offset += width - - // Finally put the character into the buffer. - if value <= 0x7F { - // 0000 0000-0000 007F . 0xxxxxxx - parser.buffer[buffer_len+0] = byte(value) - buffer_len += 1 - } else if value <= 0x7FF { - // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) - parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) - buffer_len += 2 - } else if value <= 0xFFFF { - // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) - buffer_len += 3 - } else { - // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx - parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) - parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) - parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) - parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) - buffer_len += 4 - } - - parser.unread++ - } - - // On EOF, put NUL into the buffer and return. - if parser.eof { - parser.buffer[buffer_len] = 0 - buffer_len++ - parser.unread++ - break - } - } - // [Go] Read the documentation of this function above. To return true, - // we need to have the given length in the buffer. Not doing that means - // every single check that calls this function to make sure the buffer - // has a given length is Go) panicking; or C) accessing invalid memory. - // This happens here due to the EOF above breaking early. - for buffer_len < length { - parser.buffer[buffer_len] = 0 - buffer_len++ - } - parser.buffer = parser.buffer[:buffer_len] - return true -} diff --git a/vendor/gopkg.in/yaml.v3/resolve.go b/vendor/gopkg.in/yaml.v3/resolve.go deleted file mode 100644 index 64ae888..0000000 --- a/vendor/gopkg.in/yaml.v3/resolve.go +++ /dev/null @@ -1,326 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "encoding/base64" - "math" - "regexp" - "strconv" - "strings" - "time" -) - -type resolveMapItem struct { - value interface{} - tag string -} - -var resolveTable = make([]byte, 256) -var resolveMap = make(map[string]resolveMapItem) - -func init() { - t := resolveTable - t[int('+')] = 'S' // Sign - t[int('-')] = 'S' - for _, c := range "0123456789" { - t[int(c)] = 'D' // Digit - } - for _, c := range "yYnNtTfFoO~" { - t[int(c)] = 'M' // In map - } - t[int('.')] = '.' // Float (potentially in map) - - var resolveMapList = []struct { - v interface{} - tag string - l []string - }{ - {true, boolTag, []string{"true", "True", "TRUE"}}, - {false, boolTag, []string{"false", "False", "FALSE"}}, - {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, - {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, - {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, - {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, - {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, - {"<<", mergeTag, []string{"<<"}}, - } - - m := resolveMap - for _, item := range resolveMapList { - for _, s := range item.l { - m[s] = resolveMapItem{item.v, item.tag} - } - } -} - -const ( - nullTag = "!!null" - boolTag = "!!bool" - strTag = "!!str" - intTag = "!!int" - floatTag = "!!float" - timestampTag = "!!timestamp" - seqTag = "!!seq" - mapTag = "!!map" - binaryTag = "!!binary" - mergeTag = "!!merge" -) - -var longTags = make(map[string]string) -var shortTags = make(map[string]string) - -func init() { - for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { - ltag := longTag(stag) - longTags[stag] = ltag - shortTags[ltag] = stag - } -} - -const longTagPrefix = "tag:yaml.org,2002:" - -func shortTag(tag string) string { - if strings.HasPrefix(tag, longTagPrefix) { - if stag, ok := shortTags[tag]; ok { - return stag - } - return "!!" + tag[len(longTagPrefix):] - } - return tag -} - -func longTag(tag string) string { - if strings.HasPrefix(tag, "!!") { - if ltag, ok := longTags[tag]; ok { - return ltag - } - return longTagPrefix + tag[2:] - } - return tag -} - -func resolvableTag(tag string) bool { - switch tag { - case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: - return true - } - return false -} - -var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) - -func resolve(tag string, in string) (rtag string, out interface{}) { - tag = shortTag(tag) - if !resolvableTag(tag) { - return tag, in - } - - defer func() { - switch tag { - case "", rtag, strTag, binaryTag: - return - case floatTag: - if rtag == intTag { - switch v := out.(type) { - case int64: - rtag = floatTag - out = float64(v) - return - case int: - rtag = floatTag - out = float64(v) - return - } - } - } - failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) - }() - - // Any data is accepted as a !!str or !!binary. - // Otherwise, the prefix is enough of a hint about what it might be. - hint := byte('N') - if in != "" { - hint = resolveTable[in[0]] - } - if hint != 0 && tag != strTag && tag != binaryTag { - // Handle things we can lookup in a map. - if item, ok := resolveMap[in]; ok { - return item.tag, item.value - } - - // Base 60 floats are a bad idea, were dropped in YAML 1.2, and - // are purposefully unsupported here. They're still quoted on - // the way out for compatibility with other parser, though. - - switch hint { - case 'M': - // We've already checked the map above. - - case '.': - // Not in the map, so maybe a normal float. - floatv, err := strconv.ParseFloat(in, 64) - if err == nil { - return floatTag, floatv - } - - case 'D', 'S': - // Int, float, or timestamp. - // Only try values as a timestamp if the value is unquoted or there's an explicit - // !!timestamp tag. - if tag == "" || tag == timestampTag { - t, ok := parseTimestamp(in) - if ok { - return timestampTag, t - } - } - - plain := strings.Replace(in, "_", "", -1) - intv, err := strconv.ParseInt(plain, 0, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain, 0, 64) - if err == nil { - return intTag, uintv - } - if yamlStyleFloat.MatchString(plain) { - floatv, err := strconv.ParseFloat(plain, 64) - if err == nil { - return floatTag, floatv - } - } - if strings.HasPrefix(plain, "0b") { - intv, err := strconv.ParseInt(plain[2:], 2, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 2, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0b") { - intv, err := strconv.ParseInt("-"+plain[3:], 2, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - // Octals as introduced in version 1.2 of the spec. - // Octals from the 1.1 spec, spelled as 0777, are still - // decoded by default in v3 as well for compatibility. - // May be dropped in v4 depending on how usage evolves. - if strings.HasPrefix(plain, "0o") { - intv, err := strconv.ParseInt(plain[2:], 8, 64) - if err == nil { - if intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - uintv, err := strconv.ParseUint(plain[2:], 8, 64) - if err == nil { - return intTag, uintv - } - } else if strings.HasPrefix(plain, "-0o") { - intv, err := strconv.ParseInt("-"+plain[3:], 8, 64) - if err == nil { - if true || intv == int64(int(intv)) { - return intTag, int(intv) - } else { - return intTag, intv - } - } - } - default: - panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") - } - } - return strTag, in -} - -// encodeBase64 encodes s as base64 that is broken up into multiple lines -// as appropriate for the resulting length. -func encodeBase64(s string) string { - const lineLen = 70 - encLen := base64.StdEncoding.EncodedLen(len(s)) - lines := encLen/lineLen + 1 - buf := make([]byte, encLen*2+lines) - in := buf[0:encLen] - out := buf[encLen:] - base64.StdEncoding.Encode(in, []byte(s)) - k := 0 - for i := 0; i < len(in); i += lineLen { - j := i + lineLen - if j > len(in) { - j = len(in) - } - k += copy(out[k:], in[i:j]) - if lines > 1 { - out[k] = '\n' - k++ - } - } - return string(out[:k]) -} - -// This is a subset of the formats allowed by the regular expression -// defined at http://yaml.org/type/timestamp.html. -var allowedTimestampFormats = []string{ - "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. - "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". - "2006-1-2 15:4:5.999999999", // space separated with no time zone - "2006-1-2", // date only - // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" - // from the set of examples. -} - -// parseTimestamp parses s as a timestamp string and -// returns the timestamp and reports whether it succeeded. -// Timestamp formats are defined at http://yaml.org/type/timestamp.html -func parseTimestamp(s string) (time.Time, bool) { - // TODO write code to check all the formats supported by - // http://yaml.org/type/timestamp.html instead of using time.Parse. - - // Quick check: all date formats start with YYYY-. - i := 0 - for ; i < len(s); i++ { - if c := s[i]; c < '0' || c > '9' { - break - } - } - if i != 4 || i == len(s) || s[i] != '-' { - return time.Time{}, false - } - for _, format := range allowedTimestampFormats { - if t, err := time.Parse(format, s); err == nil { - return t, true - } - } - return time.Time{}, false -} diff --git a/vendor/gopkg.in/yaml.v3/scannerc.go b/vendor/gopkg.in/yaml.v3/scannerc.go deleted file mode 100644 index d9a539c..0000000 --- a/vendor/gopkg.in/yaml.v3/scannerc.go +++ /dev/null @@ -1,3028 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "bytes" - "fmt" -) - -// Introduction -// ************ -// -// The following notes assume that you are familiar with the YAML specification -// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in -// some cases we are less restrictive that it requires. -// -// The process of transforming a YAML stream into a sequence of events is -// divided on two steps: Scanning and Parsing. -// -// The Scanner transforms the input stream into a sequence of tokens, while the -// parser transform the sequence of tokens produced by the Scanner into a -// sequence of parsing events. -// -// The Scanner is rather clever and complicated. The Parser, on the contrary, -// is a straightforward implementation of a recursive-descendant parser (or, -// LL(1) parser, as it is usually called). -// -// Actually there are two issues of Scanning that might be called "clever", the -// rest is quite straightforward. The issues are "block collection start" and -// "simple keys". Both issues are explained below in details. -// -// Here the Scanning step is explained and implemented. We start with the list -// of all the tokens produced by the Scanner together with short descriptions. -// -// Now, tokens: -// -// STREAM-START(encoding) # The stream start. -// STREAM-END # The stream end. -// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. -// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. -// DOCUMENT-START # '---' -// DOCUMENT-END # '...' -// BLOCK-SEQUENCE-START # Indentation increase denoting a block -// BLOCK-MAPPING-START # sequence or a block mapping. -// BLOCK-END # Indentation decrease. -// FLOW-SEQUENCE-START # '[' -// FLOW-SEQUENCE-END # ']' -// BLOCK-SEQUENCE-START # '{' -// BLOCK-SEQUENCE-END # '}' -// BLOCK-ENTRY # '-' -// FLOW-ENTRY # ',' -// KEY # '?' or nothing (simple keys). -// VALUE # ':' -// ALIAS(anchor) # '*anchor' -// ANCHOR(anchor) # '&anchor' -// TAG(handle,suffix) # '!handle!suffix' -// SCALAR(value,style) # A scalar. -// -// The following two tokens are "virtual" tokens denoting the beginning and the -// end of the stream: -// -// STREAM-START(encoding) -// STREAM-END -// -// We pass the information about the input stream encoding with the -// STREAM-START token. -// -// The next two tokens are responsible for tags: -// -// VERSION-DIRECTIVE(major,minor) -// TAG-DIRECTIVE(handle,prefix) -// -// Example: -// -// %YAML 1.1 -// %TAG ! !foo -// %TAG !yaml! tag:yaml.org,2002: -// --- -// -// The correspoding sequence of tokens: -// -// STREAM-START(utf-8) -// VERSION-DIRECTIVE(1,1) -// TAG-DIRECTIVE("!","!foo") -// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") -// DOCUMENT-START -// STREAM-END -// -// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole -// line. -// -// The document start and end indicators are represented by: -// -// DOCUMENT-START -// DOCUMENT-END -// -// Note that if a YAML stream contains an implicit document (without '---' -// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be -// produced. -// -// In the following examples, we present whole documents together with the -// produced tokens. -// -// 1. An implicit document: -// -// 'a scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// STREAM-END -// -// 2. An explicit document: -// -// --- -// 'a scalar' -// ... -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// SCALAR("a scalar",single-quoted) -// DOCUMENT-END -// STREAM-END -// -// 3. Several documents in a stream: -// -// 'a scalar' -// --- -// 'another scalar' -// --- -// 'yet another scalar' -// -// Tokens: -// -// STREAM-START(utf-8) -// SCALAR("a scalar",single-quoted) -// DOCUMENT-START -// SCALAR("another scalar",single-quoted) -// DOCUMENT-START -// SCALAR("yet another scalar",single-quoted) -// STREAM-END -// -// We have already introduced the SCALAR token above. The following tokens are -// used to describe aliases, anchors, tag, and scalars: -// -// ALIAS(anchor) -// ANCHOR(anchor) -// TAG(handle,suffix) -// SCALAR(value,style) -// -// The following series of examples illustrate the usage of these tokens: -// -// 1. A recursive sequence: -// -// &A [ *A ] -// -// Tokens: -// -// STREAM-START(utf-8) -// ANCHOR("A") -// FLOW-SEQUENCE-START -// ALIAS("A") -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A tagged scalar: -// -// !!float "3.14" # A good approximation. -// -// Tokens: -// -// STREAM-START(utf-8) -// TAG("!!","float") -// SCALAR("3.14",double-quoted) -// STREAM-END -// -// 3. Various scalar styles: -// -// --- # Implicit empty plain scalars do not produce tokens. -// --- a plain scalar -// --- 'a single-quoted scalar' -// --- "a double-quoted scalar" -// --- |- -// a literal scalar -// --- >- -// a folded -// scalar -// -// Tokens: -// -// STREAM-START(utf-8) -// DOCUMENT-START -// DOCUMENT-START -// SCALAR("a plain scalar",plain) -// DOCUMENT-START -// SCALAR("a single-quoted scalar",single-quoted) -// DOCUMENT-START -// SCALAR("a double-quoted scalar",double-quoted) -// DOCUMENT-START -// SCALAR("a literal scalar",literal) -// DOCUMENT-START -// SCALAR("a folded scalar",folded) -// STREAM-END -// -// Now it's time to review collection-related tokens. We will start with -// flow collections: -// -// FLOW-SEQUENCE-START -// FLOW-SEQUENCE-END -// FLOW-MAPPING-START -// FLOW-MAPPING-END -// FLOW-ENTRY -// KEY -// VALUE -// -// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and -// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' -// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the -// indicators '?' and ':', which are used for denoting mapping keys and values, -// are represented by the KEY and VALUE tokens. -// -// The following examples show flow collections: -// -// 1. A flow sequence: -// -// [item 1, item 2, item 3] -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-SEQUENCE-START -// SCALAR("item 1",plain) -// FLOW-ENTRY -// SCALAR("item 2",plain) -// FLOW-ENTRY -// SCALAR("item 3",plain) -// FLOW-SEQUENCE-END -// STREAM-END -// -// 2. A flow mapping: -// -// { -// a simple key: a value, # Note that the KEY token is produced. -// ? a complex key: another value, -// } -// -// Tokens: -// -// STREAM-START(utf-8) -// FLOW-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// FLOW-ENTRY -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// FLOW-ENTRY -// FLOW-MAPPING-END -// STREAM-END -// -// A simple key is a key which is not denoted by the '?' indicator. Note that -// the Scanner still produce the KEY token whenever it encounters a simple key. -// -// For scanning block collections, the following tokens are used (note that we -// repeat KEY and VALUE here): -// -// BLOCK-SEQUENCE-START -// BLOCK-MAPPING-START -// BLOCK-END -// BLOCK-ENTRY -// KEY -// VALUE -// -// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation -// increase that precedes a block collection (cf. the INDENT token in Python). -// The token BLOCK-END denote indentation decrease that ends a block collection -// (cf. the DEDENT token in Python). However YAML has some syntax pecularities -// that makes detections of these tokens more complex. -// -// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators -// '-', '?', and ':' correspondingly. -// -// The following examples show how the tokens BLOCK-SEQUENCE-START, -// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: -// -// 1. Block sequences: -// -// - item 1 -// - item 2 -// - -// - item 3.1 -// - item 3.2 -// - -// key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 3.1",plain) -// BLOCK-ENTRY -// SCALAR("item 3.2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Block mappings: -// -// a simple key: a value # The KEY token is produced here. -// ? a complex key -// : another value -// a mapping: -// key 1: value 1 -// key 2: value 2 -// a sequence: -// - item 1 -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a simple key",plain) -// VALUE -// SCALAR("a value",plain) -// KEY -// SCALAR("a complex key",plain) -// VALUE -// SCALAR("another value",plain) -// KEY -// SCALAR("a mapping",plain) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML does not always require to start a new block collection from a new -// line. If the current line contains only '-', '?', and ':' indicators, a new -// block collection may start at the current line. The following examples -// illustrate this case: -// -// 1. Collections in a sequence: -// -// - - item 1 -// - item 2 -// - key 1: value 1 -// key 2: value 2 -// - ? complex key -// : complex value -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-ENTRY -// BLOCK-MAPPING-START -// KEY -// SCALAR("complex key") -// VALUE -// SCALAR("complex value") -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// 2. Collections in a mapping: -// -// ? a sequence -// : - item 1 -// - item 2 -// ? a mapping -// : key 1: value 1 -// key 2: value 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("a sequence",plain) -// VALUE -// BLOCK-SEQUENCE-START -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// KEY -// SCALAR("a mapping",plain) -// VALUE -// BLOCK-MAPPING-START -// KEY -// SCALAR("key 1",plain) -// VALUE -// SCALAR("value 1",plain) -// KEY -// SCALAR("key 2",plain) -// VALUE -// SCALAR("value 2",plain) -// BLOCK-END -// BLOCK-END -// STREAM-END -// -// YAML also permits non-indented sequences if they are included into a block -// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: -// -// key: -// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. -// - item 2 -// -// Tokens: -// -// STREAM-START(utf-8) -// BLOCK-MAPPING-START -// KEY -// SCALAR("key",plain) -// VALUE -// BLOCK-ENTRY -// SCALAR("item 1",plain) -// BLOCK-ENTRY -// SCALAR("item 2",plain) -// BLOCK-END -// - -// Ensure that the buffer contains the required number of characters. -// Return true on success, false on failure (reader error or memory error). -func cache(parser *yaml_parser_t, length int) bool { - // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) - return parser.unread >= length || yaml_parser_update_buffer(parser, length) -} - -// Advance the buffer pointer. -func skip(parser *yaml_parser_t) { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) -} - -func skip_line(parser *yaml_parser_t) { - if is_crlf(parser.buffer, parser.buffer_pos) { - parser.mark.index += 2 - parser.mark.column = 0 - parser.mark.line++ - parser.unread -= 2 - parser.buffer_pos += 2 - parser.newlines++ - } else if is_break(parser.buffer, parser.buffer_pos) { - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) - parser.newlines++ - } -} - -// Copy a character to a string buffer and advance pointers. -func read(parser *yaml_parser_t, s []byte) []byte { - if !is_blank(parser.buffer, parser.buffer_pos) { - parser.newlines = 0 - } - w := width(parser.buffer[parser.buffer_pos]) - if w == 0 { - panic("invalid character sequence") - } - if len(s) == 0 { - s = make([]byte, 0, 32) - } - if w == 1 && len(s)+w <= cap(s) { - s = s[:len(s)+1] - s[len(s)-1] = parser.buffer[parser.buffer_pos] - parser.buffer_pos++ - } else { - s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) - parser.buffer_pos += w - } - parser.mark.index++ - parser.mark.column++ - parser.unread-- - return s -} - -// Copy a line break character to a string buffer and advance pointers. -func read_line(parser *yaml_parser_t, s []byte) []byte { - buf := parser.buffer - pos := parser.buffer_pos - switch { - case buf[pos] == '\r' && buf[pos+1] == '\n': - // CR LF . LF - s = append(s, '\n') - parser.buffer_pos += 2 - parser.mark.index++ - parser.unread-- - case buf[pos] == '\r' || buf[pos] == '\n': - // CR|LF . LF - s = append(s, '\n') - parser.buffer_pos += 1 - case buf[pos] == '\xC2' && buf[pos+1] == '\x85': - // NEL . LF - s = append(s, '\n') - parser.buffer_pos += 2 - case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): - // LS|PS . LS|PS - s = append(s, buf[parser.buffer_pos:pos+3]...) - parser.buffer_pos += 3 - default: - return s - } - parser.mark.index++ - parser.mark.column = 0 - parser.mark.line++ - parser.unread-- - parser.newlines++ - return s -} - -// Get the next token. -func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { - // Erase the token object. - *token = yaml_token_t{} // [Go] Is this necessary? - - // No tokens after STREAM-END or error. - if parser.stream_end_produced || parser.error != yaml_NO_ERROR { - return true - } - - // Ensure that the tokens queue contains enough tokens. - if !parser.token_available { - if !yaml_parser_fetch_more_tokens(parser) { - return false - } - } - - // Fetch the next token from the queue. - *token = parser.tokens[parser.tokens_head] - parser.tokens_head++ - parser.tokens_parsed++ - parser.token_available = false - - if token.typ == yaml_STREAM_END_TOKEN { - parser.stream_end_produced = true - } - return true -} - -// Set the scanner error and return false. -func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { - parser.error = yaml_SCANNER_ERROR - parser.context = context - parser.context_mark = context_mark - parser.problem = problem - parser.problem_mark = parser.mark - return false -} - -func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { - context := "while parsing a tag" - if directive { - context = "while parsing a %TAG directive" - } - return yaml_parser_set_scanner_error(parser, context, context_mark, problem) -} - -func trace(args ...interface{}) func() { - pargs := append([]interface{}{"+++"}, args...) - fmt.Println(pargs...) - pargs = append([]interface{}{"---"}, args...) - return func() { fmt.Println(pargs...) } -} - -// Ensure that the tokens queue contains at least one token which can be -// returned to the Parser. -func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { - // While we need more tokens to fetch, do it. - for { - // [Go] The comment parsing logic requires a lookahead of two tokens - // so that foot comments may be parsed in time of associating them - // with the tokens that are parsed before them, and also for line - // comments to be transformed into head comments in some edge cases. - if parser.tokens_head < len(parser.tokens)-2 { - // If a potential simple key is at the head position, we need to fetch - // the next token to disambiguate it. - head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] - if !ok { - break - } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { - return false - } else if !valid { - break - } - } - // Fetch the next token. - if !yaml_parser_fetch_next_token(parser) { - return false - } - } - - parser.token_available = true - return true -} - -// The dispatcher for token fetchers. -func yaml_parser_fetch_next_token(parser *yaml_parser_t) (ok bool) { - // Ensure that the buffer is initialized. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we just started scanning. Fetch STREAM-START then. - if !parser.stream_start_produced { - return yaml_parser_fetch_stream_start(parser) - } - - scan_mark := parser.mark - - // Eat whitespaces and comments until we reach the next token. - if !yaml_parser_scan_to_next_token(parser) { - return false - } - - // [Go] While unrolling indents, transform the head comments of prior - // indentation levels observed after scan_start into foot comments at - // the respective indexes. - - // Check the indentation level against the current column. - if !yaml_parser_unroll_indent(parser, parser.mark.column, scan_mark) { - return false - } - - // Ensure that the buffer contains at least 4 characters. 4 is the length - // of the longest indicators ('--- ' and '... '). - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - // Is it the end of the stream? - if is_z(parser.buffer, parser.buffer_pos) { - return yaml_parser_fetch_stream_end(parser) - } - - // Is it a directive? - if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { - return yaml_parser_fetch_directive(parser) - } - - buf := parser.buffer - pos := parser.buffer_pos - - // Is it the document start indicator? - if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) - } - - // Is it the document end indicator? - if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { - return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) - } - - comment_mark := parser.mark - if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { - // Associate any following comments with the prior token. - comment_mark = parser.tokens[len(parser.tokens)-1].start_mark - } - defer func() { - if !ok { - return - } - if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].typ == yaml_BLOCK_ENTRY_TOKEN { - // Sequence indicators alone have no line comments. It becomes - // a head comment for whatever follows. - return - } - if !yaml_parser_scan_line_comment(parser, comment_mark) { - ok = false - return - } - }() - - // Is it the flow sequence start indicator? - if buf[pos] == '[' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) - } - - // Is it the flow mapping start indicator? - if parser.buffer[parser.buffer_pos] == '{' { - return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) - } - - // Is it the flow sequence end indicator? - if parser.buffer[parser.buffer_pos] == ']' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_SEQUENCE_END_TOKEN) - } - - // Is it the flow mapping end indicator? - if parser.buffer[parser.buffer_pos] == '}' { - return yaml_parser_fetch_flow_collection_end(parser, - yaml_FLOW_MAPPING_END_TOKEN) - } - - // Is it the flow entry indicator? - if parser.buffer[parser.buffer_pos] == ',' { - return yaml_parser_fetch_flow_entry(parser) - } - - // Is it the block entry indicator? - if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { - return yaml_parser_fetch_block_entry(parser) - } - - // Is it the key indicator? - if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_key(parser) - } - - // Is it the value indicator? - if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_value(parser) - } - - // Is it an alias? - if parser.buffer[parser.buffer_pos] == '*' { - return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) - } - - // Is it an anchor? - if parser.buffer[parser.buffer_pos] == '&' { - return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) - } - - // Is it a tag? - if parser.buffer[parser.buffer_pos] == '!' { - return yaml_parser_fetch_tag(parser) - } - - // Is it a literal scalar? - if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, true) - } - - // Is it a folded scalar? - if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { - return yaml_parser_fetch_block_scalar(parser, false) - } - - // Is it a single-quoted scalar? - if parser.buffer[parser.buffer_pos] == '\'' { - return yaml_parser_fetch_flow_scalar(parser, true) - } - - // Is it a double-quoted scalar? - if parser.buffer[parser.buffer_pos] == '"' { - return yaml_parser_fetch_flow_scalar(parser, false) - } - - // Is it a plain scalar? - // - // A plain scalar may start with any non-blank characters except - // - // '-', '?', ':', ',', '[', ']', '{', '}', - // '#', '&', '*', '!', '|', '>', '\'', '\"', - // '%', '@', '`'. - // - // In the block context (and, for the '-' indicator, in the flow context - // too), it may also start with the characters - // - // '-', '?', ':' - // - // if it is followed by a non-space character. - // - // The last rule is more restrictive than the specification requires. - // [Go] TODO Make this logic more reasonable. - //switch parser.buffer[parser.buffer_pos] { - //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': - //} - if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || - parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || - parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || - (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level == 0 && - (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && - !is_blankz(parser.buffer, parser.buffer_pos+1)) { - return yaml_parser_fetch_plain_scalar(parser) - } - - // If we don't determine the token type so far, it is an error. - return yaml_parser_set_scanner_error(parser, - "while scanning for the next token", parser.mark, - "found character that cannot start any token") -} - -func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { - if !simple_key.possible { - return false, true - } - - // The 1.2 specification says: - // - // "If the ? indicator is omitted, parsing needs to see past the - // implicit key to recognize it as such. To limit the amount of - // lookahead required, the “:” indicator must appear at most 1024 - // Unicode characters beyond the start of the key. In addition, the key - // is restricted to a single line." - // - if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { - // Check if the potential simple key to be removed is required. - if simple_key.required { - return false, yaml_parser_set_scanner_error(parser, - "while scanning a simple key", simple_key.mark, - "could not find expected ':'") - } - simple_key.possible = false - return false, true - } - return true, true -} - -// Check if a simple key may start at the current position and add it if -// needed. -func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { - // A simple key is required at the current position if the scanner is in - // the block context and the current column coincides with the indentation - // level. - - required := parser.flow_level == 0 && parser.indent == parser.mark.column - - // - // If the current position may start a simple key, save it. - // - if parser.simple_key_allowed { - simple_key := yaml_simple_key_t{ - possible: true, - required: required, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - } - - if !yaml_parser_remove_simple_key(parser) { - return false - } - parser.simple_keys[len(parser.simple_keys)-1] = simple_key - parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 - } - return true -} - -// Remove a potential simple key at the current flow level. -func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { - i := len(parser.simple_keys) - 1 - if parser.simple_keys[i].possible { - // If the key is required, it is an error. - if parser.simple_keys[i].required { - return yaml_parser_set_scanner_error(parser, - "while scanning a simple key", parser.simple_keys[i].mark, - "could not find expected ':'") - } - // Remove the key from the stack. - parser.simple_keys[i].possible = false - delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) - } - return true -} - -// max_flow_level limits the flow_level -const max_flow_level = 10000 - -// Increase the flow level and resize the simple key list if needed. -func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { - // Reset the simple key on the next level. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ - possible: false, - required: false, - token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), - mark: parser.mark, - }) - - // Increase the flow level. - parser.flow_level++ - if parser.flow_level > max_flow_level { - return yaml_parser_set_scanner_error(parser, - "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_flow_level)) - } - return true -} - -// Decrease the flow level. -func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { - if parser.flow_level > 0 { - parser.flow_level-- - last := len(parser.simple_keys) - 1 - delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) - parser.simple_keys = parser.simple_keys[:last] - } - return true -} - -// max_indents limits the indents stack size -const max_indents = 10000 - -// Push the current indentation level to the stack and set the new level -// the current column is greater than the indentation level. In this case, -// append or insert the specified token into the token queue. -func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - if parser.indent < column { - // Push the current indentation level to the stack and set the new - // indentation level. - parser.indents = append(parser.indents, parser.indent) - parser.indent = column - if len(parser.indents) > max_indents { - return yaml_parser_set_scanner_error(parser, - "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, - fmt.Sprintf("exceeded max depth of %d", max_indents)) - } - - // Create a token and insert it into the queue. - token := yaml_token_t{ - typ: typ, - start_mark: mark, - end_mark: mark, - } - if number > -1 { - number -= parser.tokens_parsed - } - yaml_insert_token(parser, number, &token) - } - return true -} - -// Pop indentation levels from the indents stack until the current level -// becomes less or equal to the column. For each indentation level, append -// the BLOCK-END token. -func yaml_parser_unroll_indent(parser *yaml_parser_t, column int, scan_mark yaml_mark_t) bool { - // In the flow context, do nothing. - if parser.flow_level > 0 { - return true - } - - block_mark := scan_mark - block_mark.index-- - - // Loop through the indentation levels in the stack. - for parser.indent > column { - - // [Go] Reposition the end token before potential following - // foot comments of parent blocks. For that, search - // backwards for recent comments that were at the same - // indent as the block that is ending now. - stop_index := block_mark.index - for i := len(parser.comments) - 1; i >= 0; i-- { - comment := &parser.comments[i] - - if comment.end_mark.index < stop_index { - // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. - // If requested indent column is < 0, then the document is over and everything else - // is a foot anyway. - break - } - if comment.start_mark.column == parser.indent+1 { - // This is a good match. But maybe there's a former comment - // at that same indent level, so keep searching. - block_mark = comment.start_mark - } - - // While the end of the former comment matches with - // the start of the following one, we know there's - // nothing in between and scanning is still safe. - stop_index = comment.scan_mark.index - } - - // Create a token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_END_TOKEN, - start_mark: block_mark, - end_mark: block_mark, - } - yaml_insert_token(parser, -1, &token) - - // Pop the indentation level. - parser.indent = parser.indents[len(parser.indents)-1] - parser.indents = parser.indents[:len(parser.indents)-1] - } - return true -} - -// Initialize the scanner and produce the STREAM-START token. -func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { - - // Set the initial indentation. - parser.indent = -1 - - // Initialize the simple key stack. - parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) - - parser.simple_keys_by_tok = make(map[int]int) - - // A simple key is allowed at the beginning of the stream. - parser.simple_key_allowed = true - - // We have started. - parser.stream_start_produced = true - - // Create the STREAM-START token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_START_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - encoding: parser.encoding, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the STREAM-END token and shut down the scanner. -func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { - - // Force new line. - if parser.mark.column != 0 { - parser.mark.column = 0 - parser.mark.line++ - } - - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the STREAM-END token and append it to the queue. - token := yaml_token_t{ - typ: yaml_STREAM_END_TOKEN, - start_mark: parser.mark, - end_mark: parser.mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. -func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. - token := yaml_token_t{} - if !yaml_parser_scan_directive(parser, &token) { - return false - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the DOCUMENT-START or DOCUMENT-END token. -func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset the indentation level. - if !yaml_parser_unroll_indent(parser, -1, parser.mark) { - return false - } - - // Reset simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - parser.simple_key_allowed = false - - // Consume the token. - start_mark := parser.mark - - skip(parser) - skip(parser) - skip(parser) - - end_mark := parser.mark - - // Create the DOCUMENT-START or DOCUMENT-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. -func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { - - // The indicators '[' and '{' may start a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // Increase the flow level. - if !yaml_parser_increase_flow_level(parser) { - return false - } - - // A simple key may follow the indicators '[' and '{'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. -func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // Reset any potential simple key on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Decrease the flow level. - if !yaml_parser_decrease_flow_level(parser) { - return false - } - - // No simple keys after the indicators ']' and '}'. - parser.simple_key_allowed = false - - // Consume the token. - - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. - token := yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - } - // Append the token to the queue. - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the FLOW-ENTRY token. -func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after ','. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the FLOW-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_FLOW_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the BLOCK-ENTRY token. -func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { - // Check if the scanner is in the block context. - if parser.flow_level == 0 { - // Check if we are allowed to start a new entry. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "block sequence entries are not allowed in this context") - } - // Add the BLOCK-SEQUENCE-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { - return false - } - } else { - // It is an error for the '-' indicator to occur in the flow context, - // but we let the Parser detect and report about it because the Parser - // is able to point to the context. - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '-'. - parser.simple_key_allowed = true - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the BLOCK-ENTRY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_BLOCK_ENTRY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the KEY token. -func yaml_parser_fetch_key(parser *yaml_parser_t) bool { - - // In the block context, additional checks are required. - if parser.flow_level == 0 { - // Check if we are allowed to start a new key (not nessesary simple). - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping keys are not allowed in this context") - } - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Reset any potential simple keys on the current flow level. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // Simple keys are allowed after '?' in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the KEY token and append it to the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the VALUE token. -func yaml_parser_fetch_value(parser *yaml_parser_t) bool { - - simple_key := &parser.simple_keys[len(parser.simple_keys)-1] - - // Have we found a simple key? - if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { - return false - - } else if valid { - - // Create the KEY token and insert it into the queue. - token := yaml_token_t{ - typ: yaml_KEY_TOKEN, - start_mark: simple_key.mark, - end_mark: simple_key.mark, - } - yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) - - // In the block context, we may need to add the BLOCK-MAPPING-START token. - if !yaml_parser_roll_indent(parser, simple_key.mark.column, - simple_key.token_number, - yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { - return false - } - - // Remove the simple key. - simple_key.possible = false - delete(parser.simple_keys_by_tok, simple_key.token_number) - - // A simple key cannot follow another simple key. - parser.simple_key_allowed = false - - } else { - // The ':' indicator follows a complex key. - - // In the block context, extra checks are required. - if parser.flow_level == 0 { - - // Check if we are allowed to start a complex value. - if !parser.simple_key_allowed { - return yaml_parser_set_scanner_error(parser, "", parser.mark, - "mapping values are not allowed in this context") - } - - // Add the BLOCK-MAPPING-START token if needed. - if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { - return false - } - } - - // Simple keys after ':' are allowed in the block context. - parser.simple_key_allowed = parser.flow_level == 0 - } - - // Consume the token. - start_mark := parser.mark - skip(parser) - end_mark := parser.mark - - // Create the VALUE token and append it to the queue. - token := yaml_token_t{ - typ: yaml_VALUE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the ALIAS or ANCHOR token. -func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { - // An anchor or an alias could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow an anchor or an alias. - parser.simple_key_allowed = false - - // Create the ALIAS or ANCHOR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_anchor(parser, &token, typ) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the TAG token. -func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { - // A tag could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a tag. - parser.simple_key_allowed = false - - // Create the TAG token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_tag(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. -func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { - // Remove any potential simple keys. - if !yaml_parser_remove_simple_key(parser) { - return false - } - - // A simple key may follow a block scalar. - parser.simple_key_allowed = true - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_block_scalar(parser, &token, literal) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. -func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_flow_scalar(parser, &token, single) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Produce the SCALAR(...,plain) token. -func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { - // A plain scalar could be a simple key. - if !yaml_parser_save_simple_key(parser) { - return false - } - - // A simple key cannot follow a flow scalar. - parser.simple_key_allowed = false - - // Create the SCALAR token and append it to the queue. - var token yaml_token_t - if !yaml_parser_scan_plain_scalar(parser, &token) { - return false - } - yaml_insert_token(parser, -1, &token) - return true -} - -// Eat whitespaces and comments until the next token is found. -func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { - - scan_mark := parser.mark - - // Until the next token is not found. - for { - // Allow the BOM mark to start a line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { - skip(parser) - } - - // Eat whitespaces. - // Tabs are allowed: - // - in the flow context - // - in the block context, but not at the beginning of the line or - // after '-', '?', or ':' (complex value). - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if we just had a line comment under a sequence entry that - // looks more like a header to the following content. Similar to this: - // - // - # The comment - // - Some data - // - // If so, transform the line comment to a head comment and reposition. - if len(parser.comments) > 0 && len(parser.tokens) > 1 { - tokenA := parser.tokens[len(parser.tokens)-2] - tokenB := parser.tokens[len(parser.tokens)-1] - comment := &parser.comments[len(parser.comments)-1] - if tokenA.typ == yaml_BLOCK_SEQUENCE_START_TOKEN && tokenB.typ == yaml_BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !is_break(parser.buffer, parser.buffer_pos) { - // If it was in the prior line, reposition so it becomes a - // header of the follow up token. Otherwise, keep it in place - // so it becomes a header of the former. - comment.head = comment.line - comment.line = nil - if comment.start_mark.line == parser.mark.line-1 { - comment.token_mark = parser.mark - } - } - } - - // Eat a comment until a line break. - if parser.buffer[parser.buffer_pos] == '#' { - if !yaml_parser_scan_comments(parser, scan_mark) { - return false - } - } - - // If it is a line break, eat it. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - - // In the block context, a new line may start a simple key. - if parser.flow_level == 0 { - parser.simple_key_allowed = true - } - } else { - break // We have found a token. - } - } - - return true -} - -// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { - // Eat '%'. - start_mark := parser.mark - skip(parser) - - // Scan the directive name. - var name []byte - if !yaml_parser_scan_directive_name(parser, start_mark, &name) { - return false - } - - // Is it a YAML directive? - if bytes.Equal(name, []byte("YAML")) { - // Scan the VERSION directive value. - var major, minor int8 - if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { - return false - } - end_mark := parser.mark - - // Create a VERSION-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_VERSION_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - major: major, - minor: minor, - } - - // Is it a TAG directive? - } else if bytes.Equal(name, []byte("TAG")) { - // Scan the TAG directive value. - var handle, prefix []byte - if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { - return false - } - end_mark := parser.mark - - // Create a TAG-DIRECTIVE token. - *token = yaml_token_t{ - typ: yaml_TAG_DIRECTIVE_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - prefix: prefix, - } - - // Unknown directive. - } else { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unknown directive name") - return false - } - - // Eat the rest of the line including any comments. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - if parser.buffer[parser.buffer_pos] == '#' { - // [Go] Discard this inline comment for the time being. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - return true -} - -// Scan the directive name. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^ -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^ -// -func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { - // Consume the directive name. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - var s []byte - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the name is empty. - if len(s) == 0 { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "could not find expected directive name") - return false - } - - // Check for an blank character after the name. - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a directive", - start_mark, "found unexpected non-alphabetical character") - return false - } - *name = s - return true -} - -// Scan the value of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^^^^^^ -func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the major version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { - return false - } - - // Eat '.'. - if parser.buffer[parser.buffer_pos] != '.' { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected digit or '.' character") - } - - skip(parser) - - // Consume the minor version number. - if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { - return false - } - return true -} - -const max_number_length = 2 - -// Scan the version number of VERSION-DIRECTIVE. -// -// Scope: -// %YAML 1.1 # a comment \n -// ^ -// %YAML 1.1 # a comment \n -// ^ -func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { - - // Repeat while the next character is digit. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var value, length int8 - for is_digit(parser.buffer, parser.buffer_pos) { - // Check if the number is too long. - length++ - if length > max_number_length { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "found extremely long version number") - } - value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the number was present. - if length == 0 { - return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", - start_mark, "did not find expected version number") - } - *number = value - return true -} - -// Scan the value of a TAG-DIRECTIVE token. -// -// Scope: -// %TAG !yaml! tag:yaml.org,2002: \n -// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -// -func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { - var handle_value, prefix_value []byte - - // Eat whitespaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a handle. - if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { - return false - } - - // Expect a whitespace. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blank(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace") - return false - } - - // Eat whitespaces. - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Scan a prefix. - if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { - return false - } - - // Expect a whitespace or line break. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", - start_mark, "did not find expected whitespace or line break") - return false - } - - *handle = handle_value - *prefix = prefix_value - return true -} - -func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { - var s []byte - - // Eat the indicator character. - start_mark := parser.mark - skip(parser) - - // Consume the value. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - end_mark := parser.mark - - /* - * Check if length of the anchor is greater than 0 and it is followed by - * a whitespace character or one of the indicators: - * - * '?', ':', ',', ']', '}', '%', '@', '`'. - */ - - if len(s) == 0 || - !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || - parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '`') { - context := "while scanning an alias" - if typ == yaml_ANCHOR_TOKEN { - context = "while scanning an anchor" - } - yaml_parser_set_scanner_error(parser, context, start_mark, - "did not find expected alphabetic or numeric character") - return false - } - - // Create a token. - *token = yaml_token_t{ - typ: typ, - start_mark: start_mark, - end_mark: end_mark, - value: s, - } - - return true -} - -/* - * Scan a TAG token. - */ - -func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { - var handle, suffix []byte - - start_mark := parser.mark - - // Check if the tag is in the canonical form. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - if parser.buffer[parser.buffer_pos+1] == '<' { - // Keep the handle as '' - - // Eat '!<' - skip(parser) - skip(parser) - - // Consume the tag value. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - - // Check for '>' and eat it. - if parser.buffer[parser.buffer_pos] != '>' { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find the expected '>'") - return false - } - - skip(parser) - } else { - // The tag has either the '!suffix' or the '!handle!suffix' form. - - // First, try to scan a handle. - if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { - return false - } - - // Check if it is, indeed, handle. - if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { - // Scan the suffix now. - if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { - return false - } - } else { - // It wasn't a handle after all. Scan the rest of the tag. - if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { - return false - } - - // Set the handle to '!'. - handle = []byte{'!'} - - // A special case: the '!' tag. Set the handle to '' and the - // suffix to '!'. - if len(suffix) == 0 { - handle, suffix = suffix, handle - } - } - } - - // Check the character which ends the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if !is_blankz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a tag", - start_mark, "did not find expected whitespace or line break") - return false - } - - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_TAG_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: handle, - suffix: suffix, - } - return true -} - -// Scan a tag handle. -func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { - // Check the initial '!' character. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] != '!' { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - - var s []byte - - // Copy the '!' character. - s = read(parser, s) - - // Copy all subsequent alphabetical and numerical characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_alpha(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check if the trailing character is '!' and copy it. - if parser.buffer[parser.buffer_pos] == '!' { - s = read(parser, s) - } else { - // It's either the '!' tag or not really a tag handle. If it's a %TAG - // directive, it's an error. If it's a tag token, it must be a part of URI. - if directive && string(s) != "!" { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected '!'") - return false - } - } - - *handle = s - return true -} - -// Scan a tag. -func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { - //size_t length = head ? strlen((char *)head) : 0 - var s []byte - hasTag := len(head) > 0 - - // Copy the head if needed. - // - // Note that we don't copy the leading '!' character. - if len(head) > 1 { - s = append(s, head[1:]...) - } - - // Scan the tag. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // The set of characters that may appear in URI is as follows: - // - // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', - // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', - // '%'. - // [Go] TODO Convert this into more reasonable logic. - for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || - parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || - parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || - parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || - parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || - parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || - parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || - parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || - parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || - parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || - parser.buffer[parser.buffer_pos] == '%' { - // Check if it is a URI-escape sequence. - if parser.buffer[parser.buffer_pos] == '%' { - if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { - return false - } - } else { - s = read(parser, s) - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - hasTag = true - } - - if !hasTag { - yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find expected tag URI") - return false - } - *uri = s - return true -} - -// Decode an URI-escape sequence corresponding to a single UTF-8 character. -func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { - - // Decode the required number of characters. - w := 1024 - for w > 0 { - // Check for a URI-escaped octet. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - - if !(parser.buffer[parser.buffer_pos] == '%' && - is_hex(parser.buffer, parser.buffer_pos+1) && - is_hex(parser.buffer, parser.buffer_pos+2)) { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "did not find URI escaped octet") - } - - // Get the octet. - octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) - - // If it is the leading octet, determine the length of the UTF-8 sequence. - if w == 1024 { - w = width(octet) - if w == 0 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect leading UTF-8 octet") - } - } else { - // Check if the trailing octet is correct. - if octet&0xC0 != 0x80 { - return yaml_parser_set_scanner_tag_error(parser, directive, - start_mark, "found an incorrect trailing UTF-8 octet") - } - } - - // Copy the octet and move the pointers. - *s = append(*s, octet) - skip(parser) - skip(parser) - skip(parser) - w-- - } - return true -} - -// Scan a block scalar. -func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { - // Eat the indicator '|' or '>'. - start_mark := parser.mark - skip(parser) - - // Scan the additional block scalar indicators. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check for a chomping indicator. - var chomping, increment int - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - // Set the chomping method and eat the indicator. - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - - // Check for an indentation indicator. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_digit(parser.buffer, parser.buffer_pos) { - // Check that the indentation is greater than 0. - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - - // Get the indentation level and eat the indicator. - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - } - - } else if is_digit(parser.buffer, parser.buffer_pos) { - // Do the same as above, but in the opposite order. - - if parser.buffer[parser.buffer_pos] == '0' { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found an indentation indicator equal to 0") - return false - } - increment = as_digit(parser.buffer, parser.buffer_pos) - skip(parser) - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { - if parser.buffer[parser.buffer_pos] == '+' { - chomping = +1 - } else { - chomping = -1 - } - skip(parser) - } - } - - // Eat whitespaces and comments to the end of the line. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for is_blank(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.buffer[parser.buffer_pos] == '#' { - // TODO Test this and then re-enable it. - //if !yaml_parser_scan_line_comment(parser, start_mark) { - // return false - //} - for !is_breakz(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - } - - // Check if we are at the end of the line. - if !is_breakz(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "did not find expected comment or line break") - return false - } - - // Eat a line break. - if is_break(parser.buffer, parser.buffer_pos) { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } - - end_mark := parser.mark - - // Set the indentation level if it was specified. - var indent int - if increment > 0 { - if parser.indent >= 0 { - indent = parser.indent + increment - } else { - indent = increment - } - } - - // Scan the leading line breaks and determine the indentation level if needed. - var s, leading_break, trailing_breaks []byte - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - - // Scan the block scalar content. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - var leading_blank, trailing_blank bool - for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { - // We are at the beginning of a non-empty line. - - // Is it a trailing whitespace? - trailing_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Check if we need to fold the leading line break. - if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { - // Do we need to join the lines by space? - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } - } else { - s = append(s, leading_break...) - } - leading_break = leading_break[:0] - - // Append the remaining line breaks. - s = append(s, trailing_breaks...) - trailing_breaks = trailing_breaks[:0] - - // Is it a leading whitespace? - leading_blank = is_blank(parser.buffer, parser.buffer_pos) - - // Consume the current line. - for !is_breakz(parser.buffer, parser.buffer_pos) { - s = read(parser, s) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - leading_break = read_line(parser, leading_break) - - // Eat the following indentation spaces and line breaks. - if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { - return false - } - } - - // Chomp the tail. - if chomping != -1 { - s = append(s, leading_break...) - } - if chomping == 1 { - s = append(s, trailing_breaks...) - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_LITERAL_SCALAR_STYLE, - } - if !literal { - token.style = yaml_FOLDED_SCALAR_STYLE - } - return true -} - -// Scan indentation spaces and line breaks for a block scalar. Determine the -// indentation level if needed. -func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { - *end_mark = parser.mark - - // Eat the indentation spaces and line breaks. - max_indent := 0 - for { - // Eat the indentation spaces. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { - skip(parser) - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - if parser.mark.column > max_indent { - max_indent = parser.mark.column - } - - // Check for a tab character messing the indentation. - if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { - return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", - start_mark, "found a tab character where an indentation space is expected") - } - - // Have we found a non-empty line? - if !is_break(parser.buffer, parser.buffer_pos) { - break - } - - // Consume the line break. - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - // [Go] Should really be returning breaks instead. - *breaks = read_line(parser, *breaks) - *end_mark = parser.mark - } - - // Determine the indentation level if needed. - if *indent == 0 { - *indent = max_indent - if *indent < parser.indent+1 { - *indent = parser.indent + 1 - } - if *indent < 1 { - *indent = 1 - } - } - return true -} - -// Scan a quoted scalar. -func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { - // Eat the left quote. - start_mark := parser.mark - skip(parser) - - // Consume the content of the quoted scalar. - var s, leading_break, trailing_breaks, whitespaces []byte - for { - // Check that there are no document indicators at the beginning of the line. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected document indicator") - return false - } - - // Check for EOF. - if is_z(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", - start_mark, "found unexpected end of stream") - return false - } - - // Consume non-blank characters. - leading_blanks := false - for !is_blankz(parser.buffer, parser.buffer_pos) { - if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { - // Is is an escaped single quote. - s = append(s, '\'') - skip(parser) - skip(parser) - - } else if single && parser.buffer[parser.buffer_pos] == '\'' { - // It is a right single quote. - break - } else if !single && parser.buffer[parser.buffer_pos] == '"' { - // It is a right double quote. - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { - // It is an escaped line break. - if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { - return false - } - skip(parser) - skip_line(parser) - leading_blanks = true - break - - } else if !single && parser.buffer[parser.buffer_pos] == '\\' { - // It is an escape sequence. - code_length := 0 - - // Check the escape character. - switch parser.buffer[parser.buffer_pos+1] { - case '0': - s = append(s, 0) - case 'a': - s = append(s, '\x07') - case 'b': - s = append(s, '\x08') - case 't', '\t': - s = append(s, '\x09') - case 'n': - s = append(s, '\x0A') - case 'v': - s = append(s, '\x0B') - case 'f': - s = append(s, '\x0C') - case 'r': - s = append(s, '\x0D') - case 'e': - s = append(s, '\x1B') - case ' ': - s = append(s, '\x20') - case '"': - s = append(s, '"') - case '\'': - s = append(s, '\'') - case '\\': - s = append(s, '\\') - case 'N': // NEL (#x85) - s = append(s, '\xC2') - s = append(s, '\x85') - case '_': // #xA0 - s = append(s, '\xC2') - s = append(s, '\xA0') - case 'L': // LS (#x2028) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA8') - case 'P': // PS (#x2029) - s = append(s, '\xE2') - s = append(s, '\x80') - s = append(s, '\xA9') - case 'x': - code_length = 2 - case 'u': - code_length = 4 - case 'U': - code_length = 8 - default: - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found unknown escape character") - return false - } - - skip(parser) - skip(parser) - - // Consume an arbitrary escape code. - if code_length > 0 { - var value int - - // Scan the character value. - if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { - return false - } - for k := 0; k < code_length; k++ { - if !is_hex(parser.buffer, parser.buffer_pos+k) { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "did not find expected hexdecimal number") - return false - } - value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) - } - - // Check the value and write the character. - if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { - yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", - start_mark, "found invalid Unicode character escape code") - return false - } - if value <= 0x7F { - s = append(s, byte(value)) - } else if value <= 0x7FF { - s = append(s, byte(0xC0+(value>>6))) - s = append(s, byte(0x80+(value&0x3F))) - } else if value <= 0xFFFF { - s = append(s, byte(0xE0+(value>>12))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } else { - s = append(s, byte(0xF0+(value>>18))) - s = append(s, byte(0x80+((value>>12)&0x3F))) - s = append(s, byte(0x80+((value>>6)&0x3F))) - s = append(s, byte(0x80+(value&0x3F))) - } - - // Advance the pointer. - for k := 0; k < code_length; k++ { - skip(parser) - } - } - } else { - // It is a non-escaped non-blank character. - s = read(parser, s) - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - // Check if we are at the end of the scalar. - if single { - if parser.buffer[parser.buffer_pos] == '\'' { - break - } - } else { - if parser.buffer[parser.buffer_pos] == '"' { - break - } - } - - // Consume blank characters. - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Join the whitespaces or fold line breaks. - if leading_blanks { - // Do we need to fold line breaks? - if len(leading_break) > 0 && leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Eat the right quote. - skip(parser) - end_mark := parser.mark - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_SINGLE_QUOTED_SCALAR_STYLE, - } - if !single { - token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE - } - return true -} - -// Scan a plain scalar. -func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { - - var s, leading_break, trailing_breaks, whitespaces []byte - var leading_blanks bool - var indent = parser.indent + 1 - - start_mark := parser.mark - end_mark := parser.mark - - // Consume the content of the plain scalar. - for { - // Check for a document indicator. - if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { - return false - } - if parser.mark.column == 0 && - ((parser.buffer[parser.buffer_pos+0] == '-' && - parser.buffer[parser.buffer_pos+1] == '-' && - parser.buffer[parser.buffer_pos+2] == '-') || - (parser.buffer[parser.buffer_pos+0] == '.' && - parser.buffer[parser.buffer_pos+1] == '.' && - parser.buffer[parser.buffer_pos+2] == '.')) && - is_blankz(parser.buffer, parser.buffer_pos+3) { - break - } - - // Check for a comment. - if parser.buffer[parser.buffer_pos] == '#' { - break - } - - // Consume non-blank characters. - for !is_blankz(parser.buffer, parser.buffer_pos) { - - // Check for indicators that may end a plain scalar. - if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || - (parser.flow_level > 0 && - (parser.buffer[parser.buffer_pos] == ',' || - parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || - parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || - parser.buffer[parser.buffer_pos] == '}')) { - break - } - - // Check if we need to join whitespaces and breaks. - if leading_blanks || len(whitespaces) > 0 { - if leading_blanks { - // Do we need to fold line breaks? - if leading_break[0] == '\n' { - if len(trailing_breaks) == 0 { - s = append(s, ' ') - } else { - s = append(s, trailing_breaks...) - } - } else { - s = append(s, leading_break...) - s = append(s, trailing_breaks...) - } - trailing_breaks = trailing_breaks[:0] - leading_break = leading_break[:0] - leading_blanks = false - } else { - s = append(s, whitespaces...) - whitespaces = whitespaces[:0] - } - } - - // Copy the character. - s = read(parser, s) - - end_mark = parser.mark - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - } - - // Is it the end? - if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { - break - } - - // Consume blank characters. - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - - for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { - if is_blank(parser.buffer, parser.buffer_pos) { - - // Check for tab characters that abuse indentation. - if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { - yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", - start_mark, "found a tab character that violates indentation") - return false - } - - // Consume a space or a tab character. - if !leading_blanks { - whitespaces = read(parser, whitespaces) - } else { - skip(parser) - } - } else { - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - - // Check if it is a first line break. - if !leading_blanks { - whitespaces = whitespaces[:0] - leading_break = read_line(parser, leading_break) - leading_blanks = true - } else { - trailing_breaks = read_line(parser, trailing_breaks) - } - } - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - } - - // Check indentation level. - if parser.flow_level == 0 && parser.mark.column < indent { - break - } - } - - // Create a token. - *token = yaml_token_t{ - typ: yaml_SCALAR_TOKEN, - start_mark: start_mark, - end_mark: end_mark, - value: s, - style: yaml_PLAIN_SCALAR_STYLE, - } - - // Note that we change the 'simple_key_allowed' flag. - if leading_blanks { - parser.simple_key_allowed = true - } - return true -} - -func yaml_parser_scan_line_comment(parser *yaml_parser_t, token_mark yaml_mark_t) bool { - if parser.newlines > 0 { - return true - } - - var start_mark yaml_mark_t - var text []byte - - for peek := 0; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - if parser.buffer[parser.buffer_pos+peek] == '#' { - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else if parser.mark.index >= seen { - if len(text) == 0 { - start_mark = parser.mark - } - text = read(parser, text) - } else { - skip(parser) - } - } - } - break - } - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - token_mark: token_mark, - start_mark: start_mark, - line: text, - }) - } - return true -} - -func yaml_parser_scan_comments(parser *yaml_parser_t, scan_mark yaml_mark_t) bool { - token := parser.tokens[len(parser.tokens)-1] - - if token.typ == yaml_FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { - token = parser.tokens[len(parser.tokens)-2] - } - - var token_mark = token.start_mark - var start_mark yaml_mark_t - - var recent_empty = false - var first_empty = parser.newlines <= 1 - - var line = parser.mark.line - var column = parser.mark.column - - var text []byte - - // The foot line is the place where a comment must start to - // still be considered as a foot of the prior content. - // If there's some content in the currently parsed line, then - // the foot is the line below it. - var foot_line = -1 - if scan_mark.line > 0 { - foot_line = parser.mark.line-parser.newlines+1 - if parser.newlines == 0 && parser.mark.column > 1 { - foot_line++ - } - } - - var peek = 0 - for ; peek < 512; peek++ { - if parser.unread < peek+1 && !yaml_parser_update_buffer(parser, peek+1) { - break - } - column++ - if is_blank(parser.buffer, parser.buffer_pos+peek) { - continue - } - c := parser.buffer[parser.buffer_pos+peek] - if is_breakz(parser.buffer, parser.buffer_pos+peek) || parser.flow_level > 0 && (c == ']' || c == '}') { - // Got line break or terminator. - if !recent_empty { - if first_empty && (start_mark.line == foot_line || start_mark.column-1 < parser.indent) { - // This is the first empty line and there were no empty lines before, - // so this initial part of the comment is a foot of the prior token - // instead of being a head for the following one. Split it up. - if len(text) > 0 { - if start_mark.column-1 < parser.indent { - // If dedented it's unrelated to the prior token. - token_mark = start_mark - } - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - } else { - if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { - text = append(text, '\n') - } - } - } - if !is_break(parser.buffer, parser.buffer_pos+peek) { - break - } - first_empty = false - recent_empty = true - column = 0 - line++ - continue - } - - if len(text) > 0 && column < parser.indent+1 && column != start_mark.column { - // The comment at the different indentation is a foot of the - // preceding data rather than a head of the upcoming one. - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: token_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek, line, column}, - foot: text, - }) - scan_mark = yaml_mark_t{parser.mark.index + peek, line, column} - token_mark = scan_mark - text = nil - } - - if parser.buffer[parser.buffer_pos+peek] != '#' { - break - } - - if len(text) == 0 { - start_mark = yaml_mark_t{parser.mark.index + peek, line, column} - } else { - text = append(text, '\n') - } - - recent_empty = false - - // Consume until after the consumed comment line. - seen := parser.mark.index+peek - for { - if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { - return false - } - if is_breakz(parser.buffer, parser.buffer_pos) { - if parser.mark.index >= seen { - break - } - if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { - return false - } - skip_line(parser) - } else if parser.mark.index >= seen { - text = read(parser, text) - } else { - skip(parser) - } - } - - peek = 0 - column = 0 - line = parser.mark.line - } - - if len(text) > 0 { - parser.comments = append(parser.comments, yaml_comment_t{ - scan_mark: scan_mark, - token_mark: start_mark, - start_mark: start_mark, - end_mark: yaml_mark_t{parser.mark.index + peek - 1, line, column}, - head: text, - }) - } - return true -} diff --git a/vendor/gopkg.in/yaml.v3/sorter.go b/vendor/gopkg.in/yaml.v3/sorter.go deleted file mode 100644 index 9210ece..0000000 --- a/vendor/gopkg.in/yaml.v3/sorter.go +++ /dev/null @@ -1,134 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package yaml - -import ( - "reflect" - "unicode" -) - -type keyList []reflect.Value - -func (l keyList) Len() int { return len(l) } -func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } -func (l keyList) Less(i, j int) bool { - a := l[i] - b := l[j] - ak := a.Kind() - bk := b.Kind() - for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { - a = a.Elem() - ak = a.Kind() - } - for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { - b = b.Elem() - bk = b.Kind() - } - af, aok := keyFloat(a) - bf, bok := keyFloat(b) - if aok && bok { - if af != bf { - return af < bf - } - if ak != bk { - return ak < bk - } - return numLess(a, b) - } - if ak != reflect.String || bk != reflect.String { - return ak < bk - } - ar, br := []rune(a.String()), []rune(b.String()) - digits := false - for i := 0; i < len(ar) && i < len(br); i++ { - if ar[i] == br[i] { - digits = unicode.IsDigit(ar[i]) - continue - } - al := unicode.IsLetter(ar[i]) - bl := unicode.IsLetter(br[i]) - if al && bl { - return ar[i] < br[i] - } - if al || bl { - if digits { - return al - } else { - return bl - } - } - var ai, bi int - var an, bn int64 - if ar[i] == '0' || br[i] == '0' { - for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { - if ar[j] != '0' { - an = 1 - bn = 1 - break - } - } - } - for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { - an = an*10 + int64(ar[ai]-'0') - } - for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { - bn = bn*10 + int64(br[bi]-'0') - } - if an != bn { - return an < bn - } - if ai != bi { - return ai < bi - } - return ar[i] < br[i] - } - return len(ar) < len(br) -} - -// keyFloat returns a float value for v if it is a number/bool -// and whether it is a number/bool or not. -func keyFloat(v reflect.Value) (f float64, ok bool) { - switch v.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return float64(v.Int()), true - case reflect.Float32, reflect.Float64: - return v.Float(), true - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return float64(v.Uint()), true - case reflect.Bool: - if v.Bool() { - return 1, true - } - return 0, true - } - return 0, false -} - -// numLess returns whether a < b. -// a and b must necessarily have the same kind. -func numLess(a, b reflect.Value) bool { - switch a.Kind() { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return a.Int() < b.Int() - case reflect.Float32, reflect.Float64: - return a.Float() < b.Float() - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return a.Uint() < b.Uint() - case reflect.Bool: - return !a.Bool() && b.Bool() - } - panic("not a number") -} diff --git a/vendor/gopkg.in/yaml.v3/writerc.go b/vendor/gopkg.in/yaml.v3/writerc.go deleted file mode 100644 index b8a116b..0000000 --- a/vendor/gopkg.in/yaml.v3/writerc.go +++ /dev/null @@ -1,48 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -// Set the writer error and return false. -func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { - emitter.error = yaml_WRITER_ERROR - emitter.problem = problem - return false -} - -// Flush the output buffer. -func yaml_emitter_flush(emitter *yaml_emitter_t) bool { - if emitter.write_handler == nil { - panic("write handler not set") - } - - // Check if the buffer is empty. - if emitter.buffer_pos == 0 { - return true - } - - if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { - return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) - } - emitter.buffer_pos = 0 - return true -} diff --git a/vendor/gopkg.in/yaml.v3/yaml.go b/vendor/gopkg.in/yaml.v3/yaml.go deleted file mode 100644 index 56e8a84..0000000 --- a/vendor/gopkg.in/yaml.v3/yaml.go +++ /dev/null @@ -1,693 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package yaml implements YAML support for the Go language. -// -// Source code and other details for the project are available at GitHub: -// -// https://github.com/go-yaml/yaml -// -package yaml - -import ( - "errors" - "fmt" - "io" - "reflect" - "strings" - "sync" - "unicode/utf8" -) - -// The Unmarshaler interface may be implemented by types to customize their -// behavior when being unmarshaled from a YAML document. -type Unmarshaler interface { - UnmarshalYAML(value *Node) error -} - -type obsoleteUnmarshaler interface { - UnmarshalYAML(unmarshal func(interface{}) error) error -} - -// The Marshaler interface may be implemented by types to customize their -// behavior when being marshaled into a YAML document. The returned value -// is marshaled in place of the original value implementing Marshaler. -// -// If an error is returned by MarshalYAML, the marshaling procedure stops -// and returns with the provided error. -type Marshaler interface { - MarshalYAML() (interface{}, error) -} - -// Unmarshal decodes the first document found within the in byte slice -// and assigns decoded values into the out value. -// -// Maps and pointers (to a struct, string, int, etc) are accepted as out -// values. If an internal pointer within a struct is not initialized, -// the yaml package will initialize it if necessary for unmarshalling -// the provided data. The out parameter must not be nil. -// -// The type of the decoded values should be compatible with the respective -// values in out. If one or more values cannot be decoded due to a type -// mismatches, decoding continues partially until the end of the YAML -// content, and a *yaml.TypeError is returned with details for all -// missed values. -// -// Struct fields are only unmarshalled if they are exported (have an -// upper case first letter), and are unmarshalled using the field name -// lowercased as the default key. Custom keys may be defined via the -// "yaml" name in the field tag: the content preceding the first comma -// is used as the key, and the following comma-separated options are -// used to tweak the marshalling process (see Marshal). -// Conflicting names result in a runtime error. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// var t T -// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) -// -// See the documentation of Marshal for the format of tags and a list of -// supported tag options. -// -func Unmarshal(in []byte, out interface{}) (err error) { - return unmarshal(in, out, false) -} - -// A Decoder reads and decodes YAML values from an input stream. -type Decoder struct { - parser *parser - knownFields bool -} - -// NewDecoder returns a new decoder that reads from r. -// -// The decoder introduces its own buffering and may read -// data from r beyond the YAML values requested. -func NewDecoder(r io.Reader) *Decoder { - return &Decoder{ - parser: newParserFromReader(r), - } -} - -// KnownFields ensures that the keys in decoded mappings to -// exist as fields in the struct being decoded into. -func (dec *Decoder) KnownFields(enable bool) { - dec.knownFields = enable -} - -// Decode reads the next YAML-encoded value from its input -// and stores it in the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (dec *Decoder) Decode(v interface{}) (err error) { - d := newDecoder() - d.knownFields = dec.knownFields - defer handleErr(&err) - node := dec.parser.parse() - if node == nil { - return io.EOF - } - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(node, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Decode decodes the node and stores its data into the value pointed to by v. -// -// See the documentation for Unmarshal for details about the -// conversion of YAML into a Go value. -func (n *Node) Decode(v interface{}) (err error) { - d := newDecoder() - defer handleErr(&err) - out := reflect.ValueOf(v) - if out.Kind() == reflect.Ptr && !out.IsNil() { - out = out.Elem() - } - d.unmarshal(n, out) - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -func unmarshal(in []byte, out interface{}, strict bool) (err error) { - defer handleErr(&err) - d := newDecoder() - p := newParser(in) - defer p.destroy() - node := p.parse() - if node != nil { - v := reflect.ValueOf(out) - if v.Kind() == reflect.Ptr && !v.IsNil() { - v = v.Elem() - } - d.unmarshal(node, v) - } - if len(d.terrors) > 0 { - return &TypeError{d.terrors} - } - return nil -} - -// Marshal serializes the value provided into a YAML document. The structure -// of the generated document will reflect the structure of the value itself. -// Maps and pointers (to struct, string, int, etc) are accepted as the in value. -// -// Struct fields are only marshalled if they are exported (have an upper case -// first letter), and are marshalled using the field name lowercased as the -// default key. Custom keys may be defined via the "yaml" name in the field -// tag: the content preceding the first comma is used as the key, and the -// following comma-separated options are used to tweak the marshalling process. -// Conflicting names result in a runtime error. -// -// The field tag format accepted is: -// -// `(...) yaml:"[][,[,]]" (...)` -// -// The following flags are currently supported: -// -// omitempty Only include the field if it's not set to the zero -// value for the type or to empty slices or maps. -// Zero valued structs will be omitted if all their public -// fields are zero, unless they implement an IsZero -// method (see the IsZeroer interface type), in which -// case the field will be excluded if IsZero returns true. -// -// flow Marshal using a flow style (useful for structs, -// sequences and maps). -// -// inline Inline the field, which must be a struct or a map, -// causing all of its fields or keys to be processed as if -// they were part of the outer struct. For maps, keys must -// not conflict with the yaml keys of other struct fields. -// -// In addition, if the key is "-", the field is ignored. -// -// For example: -// -// type T struct { -// F int `yaml:"a,omitempty"` -// B int -// } -// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" -// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" -// -func Marshal(in interface{}) (out []byte, err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(in)) - e.finish() - out = e.out - return -} - -// An Encoder writes YAML values to an output stream. -type Encoder struct { - encoder *encoder -} - -// NewEncoder returns a new encoder that writes to w. -// The Encoder should be closed after use to flush all data -// to w. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - encoder: newEncoderWithWriter(w), - } -} - -// Encode writes the YAML encoding of v to the stream. -// If multiple items are encoded to the stream, the -// second and subsequent document will be preceded -// with a "---" document separator, but the first will not. -// -// See the documentation for Marshal for details about the conversion of Go -// values to YAML. -func (e *Encoder) Encode(v interface{}) (err error) { - defer handleErr(&err) - e.encoder.marshalDoc("", reflect.ValueOf(v)) - return nil -} - -// Encode encodes value v and stores its representation in n. -// -// See the documentation for Marshal for details about the -// conversion of Go values into YAML. -func (n *Node) Encode(v interface{}) (err error) { - defer handleErr(&err) - e := newEncoder() - defer e.destroy() - e.marshalDoc("", reflect.ValueOf(v)) - e.finish() - p := newParser(e.out) - p.textless = true - defer p.destroy() - doc := p.parse() - *n = *doc.Content[0] - return nil -} - -// SetIndent changes the used indentation used when encoding. -func (e *Encoder) SetIndent(spaces int) { - if spaces < 0 { - panic("yaml: cannot indent to a negative number of spaces") - } - e.encoder.indent = spaces -} - -// Close closes the encoder by writing any remaining data. -// It does not write a stream terminating string "...". -func (e *Encoder) Close() (err error) { - defer handleErr(&err) - e.encoder.finish() - return nil -} - -func handleErr(err *error) { - if v := recover(); v != nil { - if e, ok := v.(yamlError); ok { - *err = e.err - } else { - panic(v) - } - } -} - -type yamlError struct { - err error -} - -func fail(err error) { - panic(yamlError{err}) -} - -func failf(format string, args ...interface{}) { - panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) -} - -// A TypeError is returned by Unmarshal when one or more fields in -// the YAML document cannot be properly decoded into the requested -// types. When this error is returned, the value is still -// unmarshaled partially. -type TypeError struct { - Errors []string -} - -func (e *TypeError) Error() string { - return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) -} - -type Kind uint32 - -const ( - DocumentNode Kind = 1 << iota - SequenceNode - MappingNode - ScalarNode - AliasNode -) - -type Style uint32 - -const ( - TaggedStyle Style = 1 << iota - DoubleQuotedStyle - SingleQuotedStyle - LiteralStyle - FoldedStyle - FlowStyle -) - -// Node represents an element in the YAML document hierarchy. While documents -// are typically encoded and decoded into higher level types, such as structs -// and maps, Node is an intermediate representation that allows detailed -// control over the content being decoded or encoded. -// -// It's worth noting that although Node offers access into details such as -// line numbers, colums, and comments, the content when re-encoded will not -// have its original textual representation preserved. An effort is made to -// render the data plesantly, and to preserve comments near the data they -// describe, though. -// -// Values that make use of the Node type interact with the yaml package in the -// same way any other type would do, by encoding and decoding yaml data -// directly or indirectly into them. -// -// For example: -// -// var person struct { -// Name string -// Address yaml.Node -// } -// err := yaml.Unmarshal(data, &person) -// -// Or by itself: -// -// var person Node -// err := yaml.Unmarshal(data, &person) -// -type Node struct { - // Kind defines whether the node is a document, a mapping, a sequence, - // a scalar value, or an alias to another node. The specific data type of - // scalar nodes may be obtained via the ShortTag and LongTag methods. - Kind Kind - - // Style allows customizing the apperance of the node in the tree. - Style Style - - // Tag holds the YAML tag defining the data type for the value. - // When decoding, this field will always be set to the resolved tag, - // even when it wasn't explicitly provided in the YAML content. - // When encoding, if this field is unset the value type will be - // implied from the node properties, and if it is set, it will only - // be serialized into the representation if TaggedStyle is used or - // the implicit tag diverges from the provided one. - Tag string - - // Value holds the unescaped and unquoted represenation of the value. - Value string - - // Anchor holds the anchor name for this node, which allows aliases to point to it. - Anchor string - - // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. - Alias *Node - - // Content holds contained nodes for documents, mappings, and sequences. - Content []*Node - - // HeadComment holds any comments in the lines preceding the node and - // not separated by an empty line. - HeadComment string - - // LineComment holds any comments at the end of the line where the node is in. - LineComment string - - // FootComment holds any comments following the node and before empty lines. - FootComment string - - // Line and Column hold the node position in the decoded YAML text. - // These fields are not respected when encoding the node. - Line int - Column int -} - -// IsZero returns whether the node has all of its fields unset. -func (n *Node) IsZero() bool { - return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && - n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 -} - - -// LongTag returns the long form of the tag that indicates the data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) LongTag() string { - return longTag(n.ShortTag()) -} - -// ShortTag returns the short form of the YAML tag that indicates data type for -// the node. If the Tag field isn't explicitly defined, one will be computed -// based on the node properties. -func (n *Node) ShortTag() string { - if n.indicatedString() { - return strTag - } - if n.Tag == "" || n.Tag == "!" { - switch n.Kind { - case MappingNode: - return mapTag - case SequenceNode: - return seqTag - case AliasNode: - if n.Alias != nil { - return n.Alias.ShortTag() - } - case ScalarNode: - tag, _ := resolve("", n.Value) - return tag - } - return "" - } - return shortTag(n.Tag) -} - -func (n *Node) indicatedString() bool { - return n.Kind == ScalarNode && - (shortTag(n.Tag) == strTag || - (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) -} - -// SetString is a convenience function that sets the node to a string value -// and defines its style in a pleasant way depending on its content. -func (n *Node) SetString(s string) { - n.Kind = ScalarNode - if utf8.ValidString(s) { - n.Value = s - n.Tag = strTag - } else { - n.Value = encodeBase64(s) - n.Tag = binaryTag - } - if strings.Contains(n.Value, "\n") { - n.Style = LiteralStyle - } -} - -// -------------------------------------------------------------------------- -// Maintain a mapping of keys to structure field indexes - -// The code in this section was copied from mgo/bson. - -// structInfo holds details for the serialization of fields of -// a given struct. -type structInfo struct { - FieldsMap map[string]fieldInfo - FieldsList []fieldInfo - - // InlineMap is the number of the field in the struct that - // contains an ,inline map, or -1 if there's none. - InlineMap int - - // InlineUnmarshalers holds indexes to inlined fields that - // contain unmarshaler values. - InlineUnmarshalers [][]int -} - -type fieldInfo struct { - Key string - Num int - OmitEmpty bool - Flow bool - // Id holds the unique field identifier, so we can cheaply - // check for field duplicates without maintaining an extra map. - Id int - - // Inline holds the field index if the field is part of an inlined struct. - Inline []int -} - -var structMap = make(map[reflect.Type]*structInfo) -var fieldMapMutex sync.RWMutex -var unmarshalerType reflect.Type - -func init() { - var v Unmarshaler - unmarshalerType = reflect.ValueOf(&v).Elem().Type() -} - -func getStructInfo(st reflect.Type) (*structInfo, error) { - fieldMapMutex.RLock() - sinfo, found := structMap[st] - fieldMapMutex.RUnlock() - if found { - return sinfo, nil - } - - n := st.NumField() - fieldsMap := make(map[string]fieldInfo) - fieldsList := make([]fieldInfo, 0, n) - inlineMap := -1 - inlineUnmarshalers := [][]int(nil) - for i := 0; i != n; i++ { - field := st.Field(i) - if field.PkgPath != "" && !field.Anonymous { - continue // Private field - } - - info := fieldInfo{Num: i} - - tag := field.Tag.Get("yaml") - if tag == "" && strings.Index(string(field.Tag), ":") < 0 { - tag = string(field.Tag) - } - if tag == "-" { - continue - } - - inline := false - fields := strings.Split(tag, ",") - if len(fields) > 1 { - for _, flag := range fields[1:] { - switch flag { - case "omitempty": - info.OmitEmpty = true - case "flow": - info.Flow = true - case "inline": - inline = true - default: - return nil, errors.New(fmt.Sprintf("unsupported flag %q in tag %q of type %s", flag, tag, st)) - } - } - tag = fields[0] - } - - if inline { - switch field.Type.Kind() { - case reflect.Map: - if inlineMap >= 0 { - return nil, errors.New("multiple ,inline maps in struct " + st.String()) - } - if field.Type.Key() != reflect.TypeOf("") { - return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) - } - inlineMap = info.Num - case reflect.Struct, reflect.Ptr: - ftype := field.Type - for ftype.Kind() == reflect.Ptr { - ftype = ftype.Elem() - } - if ftype.Kind() != reflect.Struct { - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - if reflect.PtrTo(ftype).Implements(unmarshalerType) { - inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) - } else { - sinfo, err := getStructInfo(ftype) - if err != nil { - return nil, err - } - for _, index := range sinfo.InlineUnmarshalers { - inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) - } - for _, finfo := range sinfo.FieldsList { - if _, found := fieldsMap[finfo.Key]; found { - msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - if finfo.Inline == nil { - finfo.Inline = []int{i, finfo.Num} - } else { - finfo.Inline = append([]int{i}, finfo.Inline...) - } - finfo.Id = len(fieldsList) - fieldsMap[finfo.Key] = finfo - fieldsList = append(fieldsList, finfo) - } - } - default: - return nil, errors.New("option ,inline may only be used on a struct or map field") - } - continue - } - - if tag != "" { - info.Key = tag - } else { - info.Key = strings.ToLower(field.Name) - } - - if _, found = fieldsMap[info.Key]; found { - msg := "duplicated key '" + info.Key + "' in struct " + st.String() - return nil, errors.New(msg) - } - - info.Id = len(fieldsList) - fieldsList = append(fieldsList, info) - fieldsMap[info.Key] = info - } - - sinfo = &structInfo{ - FieldsMap: fieldsMap, - FieldsList: fieldsList, - InlineMap: inlineMap, - InlineUnmarshalers: inlineUnmarshalers, - } - - fieldMapMutex.Lock() - structMap[st] = sinfo - fieldMapMutex.Unlock() - return sinfo, nil -} - -// IsZeroer is used to check whether an object is zero to -// determine whether it should be omitted when marshaling -// with the omitempty flag. One notable implementation -// is time.Time. -type IsZeroer interface { - IsZero() bool -} - -func isZero(v reflect.Value) bool { - kind := v.Kind() - if z, ok := v.Interface().(IsZeroer); ok { - if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { - return true - } - return z.IsZero() - } - switch kind { - case reflect.String: - return len(v.String()) == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - case reflect.Slice: - return v.Len() == 0 - case reflect.Map: - return v.Len() == 0 - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Struct: - vt := v.Type() - for i := v.NumField() - 1; i >= 0; i-- { - if vt.Field(i).PkgPath != "" { - continue // Private field - } - if !isZero(v.Field(i)) { - return false - } - } - return true - } - return false -} diff --git a/vendor/gopkg.in/yaml.v3/yamlh.go b/vendor/gopkg.in/yaml.v3/yamlh.go deleted file mode 100644 index 7c6d007..0000000 --- a/vendor/gopkg.in/yaml.v3/yamlh.go +++ /dev/null @@ -1,807 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -import ( - "fmt" - "io" -) - -// The version directive data. -type yaml_version_directive_t struct { - major int8 // The major version number. - minor int8 // The minor version number. -} - -// The tag directive data. -type yaml_tag_directive_t struct { - handle []byte // The tag handle. - prefix []byte // The tag prefix. -} - -type yaml_encoding_t int - -// The stream encoding. -const ( - // Let the parser choose the encoding. - yaml_ANY_ENCODING yaml_encoding_t = iota - - yaml_UTF8_ENCODING // The default UTF-8 encoding. - yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. - yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. -) - -type yaml_break_t int - -// Line break types. -const ( - // Let the parser choose the break type. - yaml_ANY_BREAK yaml_break_t = iota - - yaml_CR_BREAK // Use CR for line breaks (Mac style). - yaml_LN_BREAK // Use LN for line breaks (Unix style). - yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). -) - -type yaml_error_type_t int - -// Many bad things could happen with the parser and emitter. -const ( - // No error is produced. - yaml_NO_ERROR yaml_error_type_t = iota - - yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. - yaml_READER_ERROR // Cannot read or decode the input stream. - yaml_SCANNER_ERROR // Cannot scan the input stream. - yaml_PARSER_ERROR // Cannot parse the input stream. - yaml_COMPOSER_ERROR // Cannot compose a YAML document. - yaml_WRITER_ERROR // Cannot write to the output stream. - yaml_EMITTER_ERROR // Cannot emit a YAML stream. -) - -// The pointer position. -type yaml_mark_t struct { - index int // The position index. - line int // The position line. - column int // The position column. -} - -// Node Styles - -type yaml_style_t int8 - -type yaml_scalar_style_t yaml_style_t - -// Scalar styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = 0 - - yaml_PLAIN_SCALAR_STYLE yaml_scalar_style_t = 1 << iota // The plain scalar style. - yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. - yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. - yaml_LITERAL_SCALAR_STYLE // The literal scalar style. - yaml_FOLDED_SCALAR_STYLE // The folded scalar style. -) - -type yaml_sequence_style_t yaml_style_t - -// Sequence styles. -const ( - // Let the emitter choose the style. - yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota - - yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. - yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. -) - -type yaml_mapping_style_t yaml_style_t - -// Mapping styles. -const ( - // Let the emitter choose the style. - yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota - - yaml_BLOCK_MAPPING_STYLE // The block mapping style. - yaml_FLOW_MAPPING_STYLE // The flow mapping style. -) - -// Tokens - -type yaml_token_type_t int - -// Token types. -const ( - // An empty token. - yaml_NO_TOKEN yaml_token_type_t = iota - - yaml_STREAM_START_TOKEN // A STREAM-START token. - yaml_STREAM_END_TOKEN // A STREAM-END token. - - yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. - yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. - yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. - yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. - - yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. - yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. - yaml_BLOCK_END_TOKEN // A BLOCK-END token. - - yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. - yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. - yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. - yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. - - yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. - yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. - yaml_KEY_TOKEN // A KEY token. - yaml_VALUE_TOKEN // A VALUE token. - - yaml_ALIAS_TOKEN // An ALIAS token. - yaml_ANCHOR_TOKEN // An ANCHOR token. - yaml_TAG_TOKEN // A TAG token. - yaml_SCALAR_TOKEN // A SCALAR token. -) - -func (tt yaml_token_type_t) String() string { - switch tt { - case yaml_NO_TOKEN: - return "yaml_NO_TOKEN" - case yaml_STREAM_START_TOKEN: - return "yaml_STREAM_START_TOKEN" - case yaml_STREAM_END_TOKEN: - return "yaml_STREAM_END_TOKEN" - case yaml_VERSION_DIRECTIVE_TOKEN: - return "yaml_VERSION_DIRECTIVE_TOKEN" - case yaml_TAG_DIRECTIVE_TOKEN: - return "yaml_TAG_DIRECTIVE_TOKEN" - case yaml_DOCUMENT_START_TOKEN: - return "yaml_DOCUMENT_START_TOKEN" - case yaml_DOCUMENT_END_TOKEN: - return "yaml_DOCUMENT_END_TOKEN" - case yaml_BLOCK_SEQUENCE_START_TOKEN: - return "yaml_BLOCK_SEQUENCE_START_TOKEN" - case yaml_BLOCK_MAPPING_START_TOKEN: - return "yaml_BLOCK_MAPPING_START_TOKEN" - case yaml_BLOCK_END_TOKEN: - return "yaml_BLOCK_END_TOKEN" - case yaml_FLOW_SEQUENCE_START_TOKEN: - return "yaml_FLOW_SEQUENCE_START_TOKEN" - case yaml_FLOW_SEQUENCE_END_TOKEN: - return "yaml_FLOW_SEQUENCE_END_TOKEN" - case yaml_FLOW_MAPPING_START_TOKEN: - return "yaml_FLOW_MAPPING_START_TOKEN" - case yaml_FLOW_MAPPING_END_TOKEN: - return "yaml_FLOW_MAPPING_END_TOKEN" - case yaml_BLOCK_ENTRY_TOKEN: - return "yaml_BLOCK_ENTRY_TOKEN" - case yaml_FLOW_ENTRY_TOKEN: - return "yaml_FLOW_ENTRY_TOKEN" - case yaml_KEY_TOKEN: - return "yaml_KEY_TOKEN" - case yaml_VALUE_TOKEN: - return "yaml_VALUE_TOKEN" - case yaml_ALIAS_TOKEN: - return "yaml_ALIAS_TOKEN" - case yaml_ANCHOR_TOKEN: - return "yaml_ANCHOR_TOKEN" - case yaml_TAG_TOKEN: - return "yaml_TAG_TOKEN" - case yaml_SCALAR_TOKEN: - return "yaml_SCALAR_TOKEN" - } - return "" -} - -// The token structure. -type yaml_token_t struct { - // The token type. - typ yaml_token_type_t - - // The start/end of the token. - start_mark, end_mark yaml_mark_t - - // The stream encoding (for yaml_STREAM_START_TOKEN). - encoding yaml_encoding_t - - // The alias/anchor/scalar value or tag/tag directive handle - // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). - value []byte - - // The tag suffix (for yaml_TAG_TOKEN). - suffix []byte - - // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). - prefix []byte - - // The scalar style (for yaml_SCALAR_TOKEN). - style yaml_scalar_style_t - - // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). - major, minor int8 -} - -// Events - -type yaml_event_type_t int8 - -// Event types. -const ( - // An empty event. - yaml_NO_EVENT yaml_event_type_t = iota - - yaml_STREAM_START_EVENT // A STREAM-START event. - yaml_STREAM_END_EVENT // A STREAM-END event. - yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. - yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. - yaml_ALIAS_EVENT // An ALIAS event. - yaml_SCALAR_EVENT // A SCALAR event. - yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. - yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. - yaml_MAPPING_START_EVENT // A MAPPING-START event. - yaml_MAPPING_END_EVENT // A MAPPING-END event. - yaml_TAIL_COMMENT_EVENT -) - -var eventStrings = []string{ - yaml_NO_EVENT: "none", - yaml_STREAM_START_EVENT: "stream start", - yaml_STREAM_END_EVENT: "stream end", - yaml_DOCUMENT_START_EVENT: "document start", - yaml_DOCUMENT_END_EVENT: "document end", - yaml_ALIAS_EVENT: "alias", - yaml_SCALAR_EVENT: "scalar", - yaml_SEQUENCE_START_EVENT: "sequence start", - yaml_SEQUENCE_END_EVENT: "sequence end", - yaml_MAPPING_START_EVENT: "mapping start", - yaml_MAPPING_END_EVENT: "mapping end", - yaml_TAIL_COMMENT_EVENT: "tail comment", -} - -func (e yaml_event_type_t) String() string { - if e < 0 || int(e) >= len(eventStrings) { - return fmt.Sprintf("unknown event %d", e) - } - return eventStrings[e] -} - -// The event structure. -type yaml_event_t struct { - - // The event type. - typ yaml_event_type_t - - // The start and end of the event. - start_mark, end_mark yaml_mark_t - - // The document encoding (for yaml_STREAM_START_EVENT). - encoding yaml_encoding_t - - // The version directive (for yaml_DOCUMENT_START_EVENT). - version_directive *yaml_version_directive_t - - // The list of tag directives (for yaml_DOCUMENT_START_EVENT). - tag_directives []yaml_tag_directive_t - - // The comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). - anchor []byte - - // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - tag []byte - - // The scalar value (for yaml_SCALAR_EVENT). - value []byte - - // Is the document start/end indicator implicit, or the tag optional? - // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). - implicit bool - - // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). - quoted_implicit bool - - // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). - style yaml_style_t -} - -func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } -func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } -func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } - -// Nodes - -const ( - yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. - yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. - yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. - yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. - yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. - yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. - - yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. - yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. - - // Not in original libyaml. - yaml_BINARY_TAG = "tag:yaml.org,2002:binary" - yaml_MERGE_TAG = "tag:yaml.org,2002:merge" - - yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. - yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. - yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. -) - -type yaml_node_type_t int - -// Node types. -const ( - // An empty node. - yaml_NO_NODE yaml_node_type_t = iota - - yaml_SCALAR_NODE // A scalar node. - yaml_SEQUENCE_NODE // A sequence node. - yaml_MAPPING_NODE // A mapping node. -) - -// An element of a sequence node. -type yaml_node_item_t int - -// An element of a mapping node. -type yaml_node_pair_t struct { - key int // The key of the element. - value int // The value of the element. -} - -// The node structure. -type yaml_node_t struct { - typ yaml_node_type_t // The node type. - tag []byte // The node tag. - - // The node data. - - // The scalar parameters (for yaml_SCALAR_NODE). - scalar struct { - value []byte // The scalar value. - length int // The length of the scalar value. - style yaml_scalar_style_t // The scalar style. - } - - // The sequence parameters (for YAML_SEQUENCE_NODE). - sequence struct { - items_data []yaml_node_item_t // The stack of sequence items. - style yaml_sequence_style_t // The sequence style. - } - - // The mapping parameters (for yaml_MAPPING_NODE). - mapping struct { - pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). - pairs_start *yaml_node_pair_t // The beginning of the stack. - pairs_end *yaml_node_pair_t // The end of the stack. - pairs_top *yaml_node_pair_t // The top of the stack. - style yaml_mapping_style_t // The mapping style. - } - - start_mark yaml_mark_t // The beginning of the node. - end_mark yaml_mark_t // The end of the node. - -} - -// The document structure. -type yaml_document_t struct { - - // The document nodes. - nodes []yaml_node_t - - // The version directive. - version_directive *yaml_version_directive_t - - // The list of tag directives. - tag_directives_data []yaml_tag_directive_t - tag_directives_start int // The beginning of the tag directives list. - tag_directives_end int // The end of the tag directives list. - - start_implicit int // Is the document start indicator implicit? - end_implicit int // Is the document end indicator implicit? - - // The start/end of the document. - start_mark, end_mark yaml_mark_t -} - -// The prototype of a read handler. -// -// The read handler is called when the parser needs to read more bytes from the -// source. The handler should write not more than size bytes to the buffer. -// The number of written bytes should be set to the size_read variable. -// -// [in,out] data A pointer to an application data specified by -// yaml_parser_set_input(). -// [out] buffer The buffer to write the data from the source. -// [in] size The size of the buffer. -// [out] size_read The actual number of bytes read from the source. -// -// On success, the handler should return 1. If the handler failed, -// the returned value should be 0. On EOF, the handler should set the -// size_read to 0 and return 1. -type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) - -// This structure holds information about a potential simple key. -type yaml_simple_key_t struct { - possible bool // Is a simple key possible? - required bool // Is a simple key required? - token_number int // The number of the token. - mark yaml_mark_t // The position mark. -} - -// The states of the parser. -type yaml_parser_state_t int - -const ( - yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota - - yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. - yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. - yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. - yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. - yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. - yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. - yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. - yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. - yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. - yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. - yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. - yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. - yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. - yaml_PARSE_END_STATE // Expect nothing. -) - -func (ps yaml_parser_state_t) String() string { - switch ps { - case yaml_PARSE_STREAM_START_STATE: - return "yaml_PARSE_STREAM_START_STATE" - case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: - return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_START_STATE: - return "yaml_PARSE_DOCUMENT_START_STATE" - case yaml_PARSE_DOCUMENT_CONTENT_STATE: - return "yaml_PARSE_DOCUMENT_CONTENT_STATE" - case yaml_PARSE_DOCUMENT_END_STATE: - return "yaml_PARSE_DOCUMENT_END_STATE" - case yaml_PARSE_BLOCK_NODE_STATE: - return "yaml_PARSE_BLOCK_NODE_STATE" - case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: - return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" - case yaml_PARSE_FLOW_NODE_STATE: - return "yaml_PARSE_FLOW_NODE_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: - return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" - case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: - return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: - return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" - case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_KEY_STATE: - return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" - case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" - case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: - return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" - case yaml_PARSE_END_STATE: - return "yaml_PARSE_END_STATE" - } - return "" -} - -// This structure holds aliases data. -type yaml_alias_data_t struct { - anchor []byte // The anchor. - index int // The node id. - mark yaml_mark_t // The anchor mark. -} - -// The parser structure. -// -// All members are internal. Manage the structure using the -// yaml_parser_ family of functions. -type yaml_parser_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - - problem string // Error description. - - // The byte about which the problem occurred. - problem_offset int - problem_value int - problem_mark yaml_mark_t - - // The error context. - context string - context_mark yaml_mark_t - - // Reader stuff - - read_handler yaml_read_handler_t // Read handler. - - input_reader io.Reader // File input data. - input []byte // String input data. - input_pos int - - eof bool // EOF flag - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - unread int // The number of unread characters in the buffer. - - newlines int // The number of line breaks since last non-break/non-blank character - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The input encoding. - - offset int // The offset of the current position (in bytes). - mark yaml_mark_t // The mark of the current position. - - // Comments - - head_comment []byte // The current head comments - line_comment []byte // The current line comments - foot_comment []byte // The current foot comments - tail_comment []byte // Foot comment that happens at the end of a block. - stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) - - comments []yaml_comment_t // The folded comments for all parsed tokens - comments_head int - - // Scanner stuff - - stream_start_produced bool // Have we started to scan the input stream? - stream_end_produced bool // Have we reached the end of the input stream? - - flow_level int // The number of unclosed '[' and '{' indicators. - - tokens []yaml_token_t // The tokens queue. - tokens_head int // The head of the tokens queue. - tokens_parsed int // The number of tokens fetched from the queue. - token_available bool // Does the tokens queue contain a token ready for dequeueing. - - indent int // The current indentation level. - indents []int // The indentation levels stack. - - simple_key_allowed bool // May a simple key occur at the current position? - simple_keys []yaml_simple_key_t // The stack of simple keys. - simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number - - // Parser stuff - - state yaml_parser_state_t // The current parser state. - states []yaml_parser_state_t // The parser states stack. - marks []yaml_mark_t // The stack of marks. - tag_directives []yaml_tag_directive_t // The list of TAG directives. - - // Dumper stuff - - aliases []yaml_alias_data_t // The alias data. - - document *yaml_document_t // The currently parsed document. -} - -type yaml_comment_t struct { - - scan_mark yaml_mark_t // Position where scanning for comments started - token_mark yaml_mark_t // Position after which tokens will be associated with this comment - start_mark yaml_mark_t // Position of '#' comment mark - end_mark yaml_mark_t // Position where comment terminated - - head []byte - line []byte - foot []byte -} - -// Emitter Definitions - -// The prototype of a write handler. -// -// The write handler is called when the emitter needs to flush the accumulated -// characters to the output. The handler should write @a size bytes of the -// @a buffer to the output. -// -// @param[in,out] data A pointer to an application data specified by -// yaml_emitter_set_output(). -// @param[in] buffer The buffer with bytes to be written. -// @param[in] size The size of the buffer. -// -// @returns On success, the handler should return @c 1. If the handler failed, -// the returned value should be @c 0. -// -type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error - -type yaml_emitter_state_t int - -// The emitter states. -const ( - // Expect STREAM-START. - yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota - - yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. - yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. - yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. - yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. - yaml_EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out - yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. - yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out - yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. - yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. - yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. - yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. - yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. - yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. - yaml_EMIT_END_STATE // Expect nothing. -) - -// The emitter structure. -// -// All members are internal. Manage the structure using the @c yaml_emitter_ -// family of functions. -type yaml_emitter_t struct { - - // Error handling - - error yaml_error_type_t // Error type. - problem string // Error description. - - // Writer stuff - - write_handler yaml_write_handler_t // Write handler. - - output_buffer *[]byte // String output data. - output_writer io.Writer // File output data. - - buffer []byte // The working buffer. - buffer_pos int // The current position of the buffer. - - raw_buffer []byte // The raw buffer. - raw_buffer_pos int // The current position of the buffer. - - encoding yaml_encoding_t // The stream encoding. - - // Emitter stuff - - canonical bool // If the output is in the canonical style? - best_indent int // The number of indentation spaces. - best_width int // The preferred width of the output lines. - unicode bool // Allow unescaped non-ASCII characters? - line_break yaml_break_t // The preferred line break. - - state yaml_emitter_state_t // The current emitter state. - states []yaml_emitter_state_t // The stack of states. - - events []yaml_event_t // The event queue. - events_head int // The head of the event queue. - - indents []int // The stack of indentation levels. - - tag_directives []yaml_tag_directive_t // The list of tag directives. - - indent int // The current indentation level. - - flow_level int // The current flow level. - - root_context bool // Is it the document root context? - sequence_context bool // Is it a sequence context? - mapping_context bool // Is it a mapping context? - simple_key_context bool // Is it a simple mapping key context? - - line int // The current line. - column int // The current column. - whitespace bool // If the last character was a whitespace? - indention bool // If the last character was an indentation character (' ', '-', '?', ':')? - open_ended bool // If an explicit document end is required? - - space_above bool // Is there's an empty line above? - foot_indent int // The indent used to write the foot comment above, or -1 if none. - - // Anchor analysis. - anchor_data struct { - anchor []byte // The anchor value. - alias bool // Is it an alias? - } - - // Tag analysis. - tag_data struct { - handle []byte // The tag handle. - suffix []byte // The tag suffix. - } - - // Scalar analysis. - scalar_data struct { - value []byte // The scalar value. - multiline bool // Does the scalar contain line breaks? - flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? - block_plain_allowed bool // Can the scalar be expressed in the block plain style? - single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? - block_allowed bool // Can the scalar be expressed in the literal or folded styles? - style yaml_scalar_style_t // The output style. - } - - // Comments - head_comment []byte - line_comment []byte - foot_comment []byte - tail_comment []byte - - key_line_comment []byte - - // Dumper stuff - - opened bool // If the stream was already opened? - closed bool // If the stream was already closed? - - // The information associated with the document nodes. - anchors *struct { - references int // The number of references. - anchor int // The anchor id. - serialized bool // If the node has been emitted? - } - - last_anchor_id int // The last assigned anchor id. - - document *yaml_document_t // The currently emitted document. -} diff --git a/vendor/gopkg.in/yaml.v3/yamlprivateh.go b/vendor/gopkg.in/yaml.v3/yamlprivateh.go deleted file mode 100644 index e88f9c5..0000000 --- a/vendor/gopkg.in/yaml.v3/yamlprivateh.go +++ /dev/null @@ -1,198 +0,0 @@ -// -// Copyright (c) 2011-2019 Canonical Ltd -// Copyright (c) 2006-2010 Kirill Simonov -// -// Permission is hereby granted, free of charge, to any person obtaining a copy of -// this software and associated documentation files (the "Software"), to deal in -// the Software without restriction, including without limitation the rights to -// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -// of the Software, and to permit persons to whom the Software is furnished to do -// so, subject to the following conditions: -// -// The above copyright notice and this permission notice shall be included in all -// copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -// SOFTWARE. - -package yaml - -const ( - // The size of the input raw buffer. - input_raw_buffer_size = 512 - - // The size of the input buffer. - // It should be possible to decode the whole raw buffer. - input_buffer_size = input_raw_buffer_size * 3 - - // The size of the output buffer. - output_buffer_size = 128 - - // The size of the output raw buffer. - // It should be possible to encode the whole output buffer. - output_raw_buffer_size = (output_buffer_size*2 + 2) - - // The size of other stacks and queues. - initial_stack_size = 16 - initial_queue_size = 16 - initial_string_size = 16 -) - -// Check if the character at the specified position is an alphabetical -// character, a digit, '_', or '-'. -func is_alpha(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' -} - -// Check if the character at the specified position is a digit. -func is_digit(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' -} - -// Get the value of a digit. -func as_digit(b []byte, i int) int { - return int(b[i]) - '0' -} - -// Check if the character at the specified position is a hex-digit. -func is_hex(b []byte, i int) bool { - return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' -} - -// Get the value of a hex-digit. -func as_hex(b []byte, i int) int { - bi := b[i] - if bi >= 'A' && bi <= 'F' { - return int(bi) - 'A' + 10 - } - if bi >= 'a' && bi <= 'f' { - return int(bi) - 'a' + 10 - } - return int(bi) - '0' -} - -// Check if the character is ASCII. -func is_ascii(b []byte, i int) bool { - return b[i] <= 0x7F -} - -// Check if the character at the start of the buffer can be printed unescaped. -func is_printable(b []byte, i int) bool { - return ((b[i] == 0x0A) || // . == #x0A - (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E - (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF - (b[i] > 0xC2 && b[i] < 0xED) || - (b[i] == 0xED && b[i+1] < 0xA0) || - (b[i] == 0xEE) || - (b[i] == 0xEF && // #xE000 <= . <= #xFFFD - !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF - !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) -} - -// Check if the character at the specified position is NUL. -func is_z(b []byte, i int) bool { - return b[i] == 0x00 -} - -// Check if the beginning of the buffer is a BOM. -func is_bom(b []byte, i int) bool { - return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF -} - -// Check if the character at the specified position is space. -func is_space(b []byte, i int) bool { - return b[i] == ' ' -} - -// Check if the character at the specified position is tab. -func is_tab(b []byte, i int) bool { - return b[i] == '\t' -} - -// Check if the character at the specified position is blank (space or tab). -func is_blank(b []byte, i int) bool { - //return is_space(b, i) || is_tab(b, i) - return b[i] == ' ' || b[i] == '\t' -} - -// Check if the character at the specified position is a line break. -func is_break(b []byte, i int) bool { - return (b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) -} - -func is_crlf(b []byte, i int) bool { - return b[i] == '\r' && b[i+1] == '\n' -} - -// Check if the character is a line break or NUL. -func is_breakz(b []byte, i int) bool { - //return is_break(b, i) || is_z(b, i) - return ( - // is_break: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - // is_z: - b[i] == 0) -} - -// Check if the character is a line break, space, or NUL. -func is_spacez(b []byte, i int) bool { - //return is_space(b, i) || is_breakz(b, i) - return ( - // is_space: - b[i] == ' ' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Check if the character is a line break, space, tab, or NUL. -func is_blankz(b []byte, i int) bool { - //return is_blank(b, i) || is_breakz(b, i) - return ( - // is_blank: - b[i] == ' ' || b[i] == '\t' || - // is_breakz: - b[i] == '\r' || // CR (#xD) - b[i] == '\n' || // LF (#xA) - b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) - b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) - b[i] == 0) -} - -// Determine the width of the character. -func width(b byte) int { - // Don't replace these by a switch without first - // confirming that it is being inlined. - if b&0x80 == 0x00 { - return 1 - } - if b&0xE0 == 0xC0 { - return 2 - } - if b&0xF0 == 0xE0 { - return 3 - } - if b&0xF8 == 0xF0 { - return 4 - } - return 0 - -} diff --git a/vendor/k8s.io/api/LICENSE b/vendor/k8s.io/api/LICENSE deleted file mode 100644 index d645695..0000000 --- a/vendor/k8s.io/api/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go deleted file mode 100644 index 8a5d1fb..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// Package v1alpha1 is the v1alpha1 version of the API. -// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and validatingWebhookConfiguration is for the -// new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io -package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1" diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go deleted file mode 100644 index b87f74e..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1027 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto - - It has these top-level messages: - Initializer - InitializerConfiguration - InitializerConfigurationList - Rule -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Initializer) Reset() { *m = Initializer{} } -func (*Initializer) ProtoMessage() {} -func (*Initializer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *InitializerConfiguration) Reset() { *m = InitializerConfiguration{} } -func (*InitializerConfiguration) ProtoMessage() {} -func (*InitializerConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *InitializerConfigurationList) Reset() { *m = InitializerConfigurationList{} } -func (*InitializerConfigurationList) ProtoMessage() {} -func (*InitializerConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func init() { - proto.RegisterType((*Initializer)(nil), "k8s.io.api.admissionregistration.v1alpha1.Initializer") - proto.RegisterType((*InitializerConfiguration)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfiguration") - proto.RegisterType((*InitializerConfigurationList)(nil), "k8s.io.api.admissionregistration.v1alpha1.InitializerConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1alpha1.Rule") -} -func (m *Initializer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Initializer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *InitializerConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InitializerConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Initializers) > 0 { - for _, msg := range m.Initializers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *InitializerConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *InitializerConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Rule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Initializer) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *InitializerConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Initializers) > 0 { - for _, e := range m.Initializers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *InitializerConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Rule) Size() (n int) { - var l int - _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Initializer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Initializer{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InitializerConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Initializers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Initializers), "Initializer", "Initializer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *InitializerConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&InitializerConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "InitializerConfiguration", "InitializerConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Initializer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Initializer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Initializer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, Rule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitializerConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitializerConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitializerConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Initializers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Initializers = append(m.Initializers, Initializer{}) - if err := m.Initializers[len(m.Initializers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *InitializerConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: InitializerConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: InitializerConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, InitializerConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 531 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x51, 0x4d, 0x8b, 0x13, 0x31, - 0x18, 0x6e, 0x6c, 0x0b, 0x6d, 0xda, 0x45, 0x19, 0x3c, 0x94, 0x22, 0xd3, 0xd2, 0x53, 0x45, 0x4c, - 0xec, 0x22, 0x8b, 0xd7, 0x9d, 0x3d, 0x48, 0xc1, 0x8f, 0x25, 0x88, 0x07, 0xf1, 0x60, 0xda, 0xbe, - 0x3b, 0x8d, 0xed, 0x4c, 0x86, 0x24, 0x53, 0xd0, 0x93, 0x17, 0xef, 0x82, 0x7f, 0xaa, 0xc7, 0x3d, - 0xee, 0xa9, 0xd8, 0x11, 0x3c, 0xfa, 0x1b, 0x24, 0x33, 0x9d, 0x9d, 0x59, 0xeb, 0xe2, 0xea, 0x2d, - 0xef, 0xf3, 0xe6, 0xf9, 0x4a, 0x30, 0x5b, 0x3c, 0xd1, 0x44, 0x48, 0xba, 0x88, 0x27, 0xa0, 0x42, - 0x30, 0xa0, 0xe9, 0x0a, 0xc2, 0x99, 0x54, 0x74, 0xb7, 0xe0, 0x91, 0xa0, 0x7c, 0x16, 0x08, 0xad, - 0x85, 0x0c, 0x15, 0xf8, 0x42, 0x1b, 0xc5, 0x8d, 0x90, 0x21, 0x5d, 0x8d, 0xf8, 0x32, 0x9a, 0xf3, - 0x11, 0xf5, 0x21, 0x04, 0xc5, 0x0d, 0xcc, 0x48, 0xa4, 0xa4, 0x91, 0xce, 0xfd, 0x8c, 0x4a, 0x78, - 0x24, 0xc8, 0x1f, 0xa9, 0x24, 0xa7, 0x76, 0x1f, 0xfa, 0xc2, 0xcc, 0xe3, 0x09, 0x99, 0xca, 0x80, - 0xfa, 0xd2, 0x97, 0x34, 0x55, 0x98, 0xc4, 0x67, 0xe9, 0x94, 0x0e, 0xe9, 0x29, 0x53, 0xee, 0x3e, - 0x2e, 0x42, 0x05, 0x7c, 0x3a, 0x17, 0x21, 0xa8, 0x0f, 0x34, 0x5a, 0xf8, 0x16, 0xd0, 0x34, 0x00, - 0xc3, 0xe9, 0x6a, 0x2f, 0x4f, 0x97, 0x5e, 0xc7, 0x52, 0x71, 0x68, 0x44, 0x00, 0x7b, 0x84, 0xa3, - 0xbf, 0x11, 0xf4, 0x74, 0x0e, 0x01, 0xff, 0x9d, 0x37, 0xf8, 0x8c, 0x70, 0x6b, 0x1c, 0x0a, 0x23, - 0xf8, 0x52, 0x7c, 0x04, 0xe5, 0xf4, 0x71, 0x2d, 0xe4, 0x01, 0x74, 0x50, 0x1f, 0x0d, 0x9b, 0x5e, - 0x7b, 0xbd, 0xe9, 0x55, 0x92, 0x4d, 0xaf, 0xf6, 0x82, 0x07, 0xc0, 0xd2, 0x8d, 0xf3, 0x0a, 0xd7, - 0x55, 0xbc, 0x04, 0xdd, 0xb9, 0xd5, 0xaf, 0x0e, 0x5b, 0x87, 0x94, 0xdc, 0xf8, 0xe9, 0x08, 0x8b, - 0x97, 0xe0, 0x1d, 0xec, 0x34, 0xeb, 0x76, 0xd2, 0x2c, 0x13, 0x1b, 0xfc, 0x44, 0xb8, 0x53, 0xca, - 0x71, 0x22, 0xc3, 0x33, 0xe1, 0xc7, 0x99, 0x80, 0xf3, 0x0e, 0x37, 0xec, 0x43, 0xcd, 0xb8, 0xe1, - 0x69, 0xb0, 0xd6, 0xe1, 0xa3, 0x92, 0xeb, 0x65, 0x5f, 0x12, 0x2d, 0x7c, 0x0b, 0x68, 0x62, 0x6f, - 0x93, 0xd5, 0x88, 0xbc, 0x9c, 0xbc, 0x87, 0xa9, 0x79, 0x0e, 0x86, 0x7b, 0xce, 0xce, 0x16, 0x17, - 0x18, 0xbb, 0x54, 0x75, 0x22, 0xdc, 0x16, 0x85, 0x7b, 0xde, 0xed, 0xe8, 0x1f, 0xba, 0x95, 0xc2, - 0x7b, 0x77, 0x77, 0x5e, 0xed, 0x12, 0xa8, 0xd9, 0x15, 0x87, 0xc1, 0x0f, 0x84, 0xef, 0x5d, 0x57, - 0xf8, 0x99, 0xd0, 0xc6, 0x79, 0xbb, 0x57, 0x9a, 0xdc, 0xac, 0xb4, 0x65, 0xa7, 0x95, 0xef, 0xec, - 0x62, 0x34, 0x72, 0xa4, 0x54, 0x78, 0x8e, 0xeb, 0xc2, 0x40, 0x90, 0x37, 0x3d, 0xf9, 0xbf, 0xa6, - 0x57, 0x52, 0x17, 0x3f, 0x3b, 0xb6, 0xca, 0x2c, 0x33, 0x18, 0x7c, 0x45, 0xb8, 0x66, 0xbf, 0xda, - 0x79, 0x80, 0x9b, 0x3c, 0x12, 0x4f, 0x95, 0x8c, 0x23, 0xdd, 0x41, 0xfd, 0xea, 0xb0, 0xe9, 0x1d, - 0x24, 0x9b, 0x5e, 0xf3, 0xf8, 0x74, 0x9c, 0x81, 0xac, 0xd8, 0x3b, 0x23, 0xdc, 0xe2, 0x91, 0x78, - 0x0d, 0xca, 0xe6, 0xc8, 0x52, 0x36, 0xbd, 0xdb, 0xc9, 0xa6, 0xd7, 0x3a, 0x3e, 0x1d, 0xe7, 0x30, - 0x2b, 0xdf, 0xb1, 0xfa, 0x0a, 0xb4, 0x8c, 0xd5, 0x14, 0x74, 0xa7, 0x5a, 0xe8, 0xb3, 0x1c, 0x64, - 0xc5, 0xde, 0x23, 0xeb, 0xad, 0x5b, 0x39, 0xdf, 0xba, 0x95, 0x8b, 0xad, 0x5b, 0xf9, 0x94, 0xb8, - 0x68, 0x9d, 0xb8, 0xe8, 0x3c, 0x71, 0xd1, 0x45, 0xe2, 0xa2, 0x6f, 0x89, 0x8b, 0xbe, 0x7c, 0x77, - 0x2b, 0x6f, 0x1a, 0x79, 0xe9, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa2, 0x06, 0xa3, 0xcb, 0x75, - 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto deleted file mode 100644 index e17b559..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/generated.proto +++ /dev/null @@ -1,107 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.admissionregistration.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -message Initializer { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - optional string name = 1; - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - repeated Rule rules = 2; -} - -// InitializerConfiguration describes the configuration of initializers. -message InitializerConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +patchMergeKey=name - // +patchStrategy=merge - // +optional - repeated Initializer initializers = 2; -} - -// InitializerConfigurationList is a list of InitializerConfiguration. -message InitializerConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of InitializerConfiguration. - repeated InitializerConfiguration items = 2; -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -message Rule { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiGroups = 1; - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiVersions = 2; - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - repeated string resources = 3; -} - diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go deleted file mode 100644 index e9a4164..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/register.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "admissionregistration.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &InitializerConfiguration{}, - &InitializerConfigurationList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go deleted file mode 100644 index a245f1e..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types.go +++ /dev/null @@ -1,106 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfiguration describes the configuration of initializers. -type InitializerConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Initializers is a list of resources and their default initializers - // Order-sensitive. - // When merging multiple InitializerConfigurations, we sort the initializers - // from different InitializerConfigurations by the name of the - // InitializerConfigurations; the order of the initializers from the same - // InitializerConfiguration is preserved. - // +patchMergeKey=name - // +patchStrategy=merge - // +optional - Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// InitializerConfigurationList is a list of InitializerConfiguration. -type InitializerConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of InitializerConfiguration. - Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Initializer describes the name and the failure policy of an initializer, and -// what resources it applies to. -type Initializer struct { - // Name is the identifier of the initializer. It will be added to the - // object that needs to be initialized. - // Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where - // "alwayspullimages" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Rules describes what resources/subresources the initializer cares about. - // The initializer cares about an operation if it matches _any_ Rule. - // Rule.Resources must not include subresources. - Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"` -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 69e4b7c..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,71 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Initializer = map[string]string{ - "": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.", - "name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required", - "rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.", -} - -func (Initializer) SwaggerDoc() map[string]string { - return map_Initializer -} - -var map_InitializerConfiguration = map[string]string{ - "": "InitializerConfiguration describes the configuration of initializers.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.", -} - -func (InitializerConfiguration) SwaggerDoc() map[string]string { - return map_InitializerConfiguration -} - -var map_InitializerConfigurationList = map[string]string{ - "": "InitializerConfigurationList is a list of InitializerConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of InitializerConfiguration.", -} - -func (InitializerConfigurationList) SwaggerDoc() map[string]string { - return map_InitializerConfigurationList -} - -var map_Rule = map[string]string{ - "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", - "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", - "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", - "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", -} - -func (Rule) SwaggerDoc() map[string]string { - return map_Rule -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 9f636b4..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,145 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Initializer) DeepCopyInto(out *Initializer) { - *out = *in - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]Rule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Initializer. -func (in *Initializer) DeepCopy() *Initializer { - if in == nil { - return nil - } - out := new(Initializer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfiguration) DeepCopyInto(out *InitializerConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Initializers != nil { - in, out := &in.Initializers, &out.Initializers - *out = make([]Initializer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfiguration. -func (in *InitializerConfiguration) DeepCopy() *InitializerConfiguration { - if in == nil { - return nil - } - out := new(InitializerConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *InitializerConfigurationList) DeepCopyInto(out *InitializerConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]InitializerConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InitializerConfigurationList. -func (in *InitializerConfigurationList) DeepCopy() *InitializerConfigurationList { - if in == nil { - return nil - } - out := new(InitializerConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *InitializerConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { - *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { - if in == nil { - return nil - } - out := new(Rule) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go deleted file mode 100644 index afbb3d6..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/doc.go +++ /dev/null @@ -1,25 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// Package v1beta1 is the v1beta1 version of the API. -// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration -// InitializerConfiguration and validatingWebhookConfiguration is for the -// new dynamic admission controller configuration. -// +groupName=admissionregistration.k8s.io -package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1" diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go deleted file mode 100644 index d6c9d95..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.pb.go +++ /dev/null @@ -1,2190 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto - - It has these top-level messages: - MutatingWebhookConfiguration - MutatingWebhookConfigurationList - Rule - RuleWithOperations - ServiceReference - ValidatingWebhookConfiguration - ValidatingWebhookConfigurationList - Webhook - WebhookClientConfig -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *MutatingWebhookConfiguration) Reset() { *m = MutatingWebhookConfiguration{} } -func (*MutatingWebhookConfiguration) ProtoMessage() {} -func (*MutatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *MutatingWebhookConfigurationList) Reset() { *m = MutatingWebhookConfigurationList{} } -func (*MutatingWebhookConfigurationList) ProtoMessage() {} -func (*MutatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *Rule) Reset() { *m = Rule{} } -func (*Rule) ProtoMessage() {} -func (*Rule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *RuleWithOperations) Reset() { *m = RuleWithOperations{} } -func (*RuleWithOperations) ProtoMessage() {} -func (*RuleWithOperations) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ServiceReference) Reset() { *m = ServiceReference{} } -func (*ServiceReference) ProtoMessage() {} -func (*ServiceReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ValidatingWebhookConfiguration) Reset() { *m = ValidatingWebhookConfiguration{} } -func (*ValidatingWebhookConfiguration) ProtoMessage() {} -func (*ValidatingWebhookConfiguration) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *ValidatingWebhookConfigurationList) Reset() { *m = ValidatingWebhookConfigurationList{} } -func (*ValidatingWebhookConfigurationList) ProtoMessage() {} -func (*ValidatingWebhookConfigurationList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} -} - -func (m *Webhook) Reset() { *m = Webhook{} } -func (*Webhook) ProtoMessage() {} -func (*Webhook) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *WebhookClientConfig) Reset() { *m = WebhookClientConfig{} } -func (*WebhookClientConfig) ProtoMessage() {} -func (*WebhookClientConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func init() { - proto.RegisterType((*MutatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfiguration") - proto.RegisterType((*MutatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.MutatingWebhookConfigurationList") - proto.RegisterType((*Rule)(nil), "k8s.io.api.admissionregistration.v1beta1.Rule") - proto.RegisterType((*RuleWithOperations)(nil), "k8s.io.api.admissionregistration.v1beta1.RuleWithOperations") - proto.RegisterType((*ServiceReference)(nil), "k8s.io.api.admissionregistration.v1beta1.ServiceReference") - proto.RegisterType((*ValidatingWebhookConfiguration)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfiguration") - proto.RegisterType((*ValidatingWebhookConfigurationList)(nil), "k8s.io.api.admissionregistration.v1beta1.ValidatingWebhookConfigurationList") - proto.RegisterType((*Webhook)(nil), "k8s.io.api.admissionregistration.v1beta1.Webhook") - proto.RegisterType((*WebhookClientConfig)(nil), "k8s.io.api.admissionregistration.v1beta1.WebhookClientConfig") -} -func (m *MutatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MutatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MutatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MutatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Rule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Rule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *RuleWithOperations) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RuleWithOperations) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Rule.Size())) - n3, err := m.Rule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *ServiceReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Path != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Path))) - i += copy(dAtA[i:], *m.Path) - } - return i, nil -} - -func (m *ValidatingWebhookConfiguration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatingWebhookConfiguration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Webhooks) > 0 { - for _, msg := range m.Webhooks { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ValidatingWebhookConfigurationList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ValidatingWebhookConfigurationList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Webhook) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Webhook) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientConfig.Size())) - n6, err := m.ClientConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.FailurePolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FailurePolicy))) - i += copy(dAtA[i:], *m.FailurePolicy) - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n7, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.SideEffects != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects))) - i += copy(dAtA[i:], *m.SideEffects) - } - return i, nil -} - -func (m *WebhookClientConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Service != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Service.Size())) - n8, err := m.Service.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.CABundle != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CABundle))) - i += copy(dAtA[i:], m.CABundle) - } - if m.URL != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.URL))) - i += copy(dAtA[i:], *m.URL) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *MutatingWebhookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *MutatingWebhookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Rule) Size() (n int) { - var l int - _ = l - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIVersions) > 0 { - for _, s := range m.APIVersions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RuleWithOperations) Size() (n int) { - var l int - _ = l - if len(m.Operations) > 0 { - for _, s := range m.Operations { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Rule.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceReference) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Path != nil { - l = len(*m.Path) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ValidatingWebhookConfiguration) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Webhooks) > 0 { - for _, e := range m.Webhooks { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ValidatingWebhookConfigurationList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Webhook) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ClientConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.FailurePolicy != nil { - l = len(*m.FailurePolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SideEffects != nil { - l = len(*m.SideEffects) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *WebhookClientConfig) Size() (n int) { - var l int - _ = l - if m.Service != nil { - l = m.Service.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CABundle != nil { - l = len(m.CABundle) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.URL != nil { - l = len(*m.URL) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *MutatingWebhookConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MutatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *MutatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MutatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "MutatingWebhookConfiguration", "MutatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Rule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Rule{`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `}`, - }, "") - return s -} -func (this *RuleWithOperations) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RuleWithOperations{`, - `Operations:` + fmt.Sprintf("%v", this.Operations) + `,`, - `Rule:` + strings.Replace(strings.Replace(this.Rule.String(), "Rule", "Rule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceReference{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Path:` + valueToStringGenerated(this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhookConfiguration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValidatingWebhookConfiguration{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Webhooks:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Webhooks), "Webhook", "Webhook", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ValidatingWebhookConfigurationList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ValidatingWebhookConfigurationList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ValidatingWebhookConfiguration", "ValidatingWebhookConfiguration", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Webhook) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Webhook{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ClientConfig:` + strings.Replace(strings.Replace(this.ClientConfig.String(), "WebhookClientConfig", "WebhookClientConfig", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "RuleWithOperations", "RuleWithOperations", 1), `&`, ``, 1) + `,`, - `FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`, - `}`, - }, "") - return s -} -func (this *WebhookClientConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WebhookClientConfig{`, - `Service:` + strings.Replace(fmt.Sprintf("%v", this.Service), "ServiceReference", "ServiceReference", 1) + `,`, - `CABundle:` + valueToStringGenerated(this.CABundle) + `,`, - `URL:` + valueToStringGenerated(this.URL) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *MutatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MutatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MutatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, MutatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Rule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Rule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Rule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersions = append(m.APIVersions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RuleWithOperations) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RuleWithOperations: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RuleWithOperations: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operations", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operations = append(m.Operations, OperationType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Rule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Path = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidatingWebhookConfiguration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfiguration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Webhooks", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Webhooks = append(m.Webhooks, Webhook{}) - if err := m.Webhooks[len(m.Webhooks)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ValidatingWebhookConfigurationList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ValidatingWebhookConfigurationList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ValidatingWebhookConfiguration{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Webhook) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Webhook: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Webhook: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ClientConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, RuleWithOperations{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FailurePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := FailurePolicyType(dAtA[iNdEx:postIndex]) - m.FailurePolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SideEffects", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := SideEffectClass(dAtA[iNdEx:postIndex]) - m.SideEffects = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WebhookClientConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WebhookClientConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WebhookClientConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Service == nil { - m.Service = &ServiceReference{} - } - if err := m.Service.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CABundle", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CABundle = append(m.CABundle[:0], dAtA[iNdEx:postIndex]...) - if m.CABundle == nil { - m.CABundle = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field URL", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.URL = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 906 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80, - 0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a, - 0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2, - 0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67, - 0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7, - 0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45, - 0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1, - 0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd, - 0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e, - 0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a, - 0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90, - 0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce, - 0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2, - 0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf, - 0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18, - 0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad, - 0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30, - 0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16, - 0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf, - 0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48, - 0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b, - 0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd, - 0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5, - 0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa, - 0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b, - 0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a, - 0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e, - 0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb, - 0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11, - 0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb, - 0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97, - 0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd, - 0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d, - 0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc, - 0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45, - 0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee, - 0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b, - 0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4, - 0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32, - 0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e, - 0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f, - 0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35, - 0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8, - 0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f, - 0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39, - 0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33, - 0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d, - 0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2, - 0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9, - 0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1, - 0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e, - 0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36, - 0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e, - 0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d, - 0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00, - 0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto b/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto deleted file mode 100644 index 4d55ca8..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto +++ /dev/null @@ -1,269 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.admissionregistration.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. -message MutatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated Webhook Webhooks = 2; -} - -// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. -message MutatingWebhookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of MutatingWebhookConfiguration. - repeated MutatingWebhookConfiguration items = 2; -} - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -message Rule { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiGroups = 1; - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string apiVersions = 2; - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - repeated string resources = 3; -} - -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -message RuleWithOperations { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - repeated string operations = 1; - - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - optional Rule rule = 2; -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -message ServiceReference { - // `namespace` is the namespace of the service. - // Required - optional string namespace = 1; - - // `name` is the name of the service. - // Required - optional string name = 2; - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - optional string path = 3; -} - -// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -message ValidatingWebhookConfiguration { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated Webhook Webhooks = 2; -} - -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -message ValidatingWebhookConfigurationList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ValidatingWebhookConfiguration. - repeated ValidatingWebhookConfiguration items = 2; -} - -// Webhook describes an admission webhook and the resources and operations it applies to. -message Webhook { - // The name of the admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - optional string name = 1; - - // ClientConfig defines how to communicate with the hook. - // Required - optional WebhookClientConfig clientConfig = 2; - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks - // from putting the cluster in a state which cannot be recovered from without completely - // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called - // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - repeated RuleWithOperations rules = 3; - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - optional string failurePolicy = 4; - - // NamespaceSelector decides whether to run the webhook on an object based - // on whether the namespace for that object matches the selector. If the - // object itself is a namespace, the matching is performed on - // object.metadata.labels. If the object is another cluster scoped resource, - // it never skips the webhook. - // - // For example, to run the webhook on any objects whose namespace is not - // associated with "runlevel" of "0" or "1"; you will set the selector as - // follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "runlevel", - // "operator": "NotIn", - // "values": [ - // "0", - // "1" - // ] - // } - // ] - // } - // - // If instead you want to only run the webhook on any objects whose - // namespace is associated with the "environment" of "prod" or "staging"; - // you will set the selector as follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "environment", - // "operator": "In", - // "values": [ - // "prod", - // "staging" - // ] - // } - // ] - // } - // - // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - // for more examples of label selectors. - // - // Default to the empty LabelSelector, which matches everything. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 5; - - // SideEffects states whether this webhookk has side effects. - // Acceptable values are: Unknown, None, Some, NoneOnDryRun - // Webhooks with side effects MUST implement a reconciliation system, since a request may be - // rejected by a future step in the admission change and the side effects therefore need to be undone. - // Requests with the dryRun attribute will be auto-rejected if they match a webhook with - // sideEffects == Unknown or Some. Defaults to Unknown. - // +optional - optional string sideEffects = 6; -} - -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook -message WebhookClientConfig { - // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - optional string url = 3; - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // Port 443 will be used if it is open, otherwise it is an error. - // - // +optional - optional ServiceReference service = 1; - - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. - optional bytes caBundle = 2; -} - diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go b/vendor/k8s.io/api/admissionregistration/v1beta1/register.go deleted file mode 100644 index d126da9..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "admissionregistration.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &ValidatingWebhookConfiguration{}, - &ValidatingWebhookConfigurationList{}, - &MutatingWebhookConfiguration{}, - &MutatingWebhookConfigurationList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types.go deleted file mode 100644 index 0b948ba..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types.go +++ /dev/null @@ -1,306 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended -// to make sure that all the tuple expansions are valid. -type Rule struct { - // APIGroups is the API groups the resources belong to. '*' is all groups. - // If '*' is present, the length of the slice must be one. - // Required. - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"` - - // APIVersions is the API versions the resources belong to. '*' is all versions. - // If '*' is present, the length of the slice must be one. - // Required. - APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"` - - // Resources is a list of resources this rule applies to. - // - // For example: - // 'pods' means pods. - // 'pods/log' means the log subresource of pods. - // '*' means all resources, but not subresources. - // 'pods/*' means all subresources of pods. - // '*/scale' means all scale subresources. - // '*/*' means all resources and their subresources. - // - // If wildcard is present, the validation rule will ensure resources do not - // overlap with each other. - // - // Depending on the enclosing object, subresources might not be allowed. - // Required. - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` -} - -type FailurePolicyType string - -const ( - // Ignore means that an error calling the webhook is ignored. - Ignore FailurePolicyType = "Ignore" - // Fail means that an error calling the webhook causes the admission to fail. - Fail FailurePolicyType = "Fail" -) - -type SideEffectClass string - -const ( - // SideEffectClassUnknown means that no information is known about the side effects of calling the webhook. - // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. - SideEffectClassUnknown SideEffectClass = "Unknown" - // SideEffectClassNone means that calling the webhook will have no side effects. - SideEffectClassNone SideEffectClass = "None" - // SideEffectClassSome means that calling the webhook will possibly have side effects. - // If a request with the dry-run attribute would trigger a call to this webhook, the request will instead fail. - SideEffectClassSome SideEffectClass = "Some" - // SideEffectClassNoneOnDryRun means that calling the webhook will possibly have side effects, but if the - // request being reviewed has the dry-run attribute, the side effects will be suppressed. - SideEffectClassNoneOnDryRun SideEffectClass = "NoneOnDryRun" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it. -type ValidatingWebhookConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration. -type ValidatingWebhookConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of ValidatingWebhookConfiguration. - Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object. -type MutatingWebhookConfiguration struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Webhooks is a list of webhooks and the affected resources and operations. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration. -type MutatingWebhookConfigurationList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of MutatingWebhookConfiguration. - Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Webhook describes an admission webhook and the resources and operations it applies to. -type Webhook struct { - // The name of the admission webhook. - // Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where - // "imagepolicy" is the name of the webhook, and kubernetes.io is the name - // of the organization. - // Required. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // ClientConfig defines how to communicate with the hook. - // Required - ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"` - - // Rules describes what operations on what resources/subresources the webhook cares about. - // The webhook cares about an operation if it matches _any_ Rule. - // However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks - // from putting the cluster in a state which cannot be recovered from without completely - // disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called - // on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects. - Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - - // FailurePolicy defines how unrecognized errors from the admission endpoint are handled - - // allowed values are Ignore or Fail. Defaults to Ignore. - // +optional - FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"` - - // NamespaceSelector decides whether to run the webhook on an object based - // on whether the namespace for that object matches the selector. If the - // object itself is a namespace, the matching is performed on - // object.metadata.labels. If the object is another cluster scoped resource, - // it never skips the webhook. - // - // For example, to run the webhook on any objects whose namespace is not - // associated with "runlevel" of "0" or "1"; you will set the selector as - // follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "runlevel", - // "operator": "NotIn", - // "values": [ - // "0", - // "1" - // ] - // } - // ] - // } - // - // If instead you want to only run the webhook on any objects whose - // namespace is associated with the "environment" of "prod" or "staging"; - // you will set the selector as follows: - // "namespaceSelector": { - // "matchExpressions": [ - // { - // "key": "environment", - // "operator": "In", - // "values": [ - // "prod", - // "staging" - // ] - // } - // ] - // } - // - // See - // https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ - // for more examples of label selectors. - // - // Default to the empty LabelSelector, which matches everything. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"` - - // SideEffects states whether this webhookk has side effects. - // Acceptable values are: Unknown, None, Some, NoneOnDryRun - // Webhooks with side effects MUST implement a reconciliation system, since a request may be - // rejected by a future step in the admission change and the side effects therefore need to be undone. - // Requests with the dryRun attribute will be auto-rejected if they match a webhook with - // sideEffects == Unknown or Some. Defaults to Unknown. - // +optional - SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"` -} - -// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make -// sure that all the tuple expansions are valid. -type RuleWithOperations struct { - // Operations is the operations the admission hook cares about - CREATE, UPDATE, or * - // for all operations. - // If '*' is present, the length of the slice must be one. - // Required. - Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"` - // Rule is embedded, it describes other criteria of the rule, like - // APIGroups, APIVersions, Resources, etc. - Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"` -} - -type OperationType string - -// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go. -const ( - OperationAll OperationType = "*" - Create OperationType = "CREATE" - Update OperationType = "UPDATE" - Delete OperationType = "DELETE" - Connect OperationType = "CONNECT" -) - -// WebhookClientConfig contains the information to make a TLS -// connection with the webhook -type WebhookClientConfig struct { - // `url` gives the location of the webhook, in standard URL form - // (`[scheme://]host:port/path`). Exactly one of `url` or `service` - // must be specified. - // - // The `host` should not refer to a service running in the cluster; use - // the `service` field instead. The host might be resolved via external - // DNS in some apiservers (e.g., `kube-apiserver` cannot resolve - // in-cluster DNS as that would be a layering violation). `host` may - // also be an IP address. - // - // Please note that using `localhost` or `127.0.0.1` as a `host` is - // risky unless you take great care to run this webhook on all hosts - // which run an apiserver which might need to make calls to this - // webhook. Such installs are likely to be non-portable, i.e., not easy - // to turn up in a new cluster. - // - // The scheme must be "https"; the URL must begin with "https://". - // - // A path is optional, and if present may be any string permissible in - // a URL. You may use the path to pass an arbitrary string to the - // webhook, for example, a cluster identifier. - // - // Attempting to use a user or basic auth e.g. "user:password@" is not - // allowed. Fragments ("#...") and query parameters ("?...") are not - // allowed, either. - // - // +optional - URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"` - - // `service` is a reference to the service for this webhook. Either - // `service` or `url` must be specified. - // - // If the webhook is running within the cluster, then you should use `service`. - // - // Port 443 will be used if it is open, otherwise it is an error. - // - // +optional - Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"` - - // `caBundle` is a PEM encoded CA bundle which will be used to validate - // the webhook's server certificate. - // Required. - CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"` -} - -// ServiceReference holds a reference to Service.legacy.k8s.io -type ServiceReference struct { - // `namespace` is the namespace of the service. - // Required - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - // `name` is the name of the service. - // Required - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // `path` is an optional URL path which will be sent in any request to - // this service. - // +optional - Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"` -} diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index aab917a..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_MutatingWebhookConfiguration = map[string]string{ - "": "MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", -} - -func (MutatingWebhookConfiguration) SwaggerDoc() map[string]string { - return map_MutatingWebhookConfiguration -} - -var map_MutatingWebhookConfigurationList = map[string]string{ - "": "MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of MutatingWebhookConfiguration.", -} - -func (MutatingWebhookConfigurationList) SwaggerDoc() map[string]string { - return map_MutatingWebhookConfigurationList -} - -var map_Rule = map[string]string{ - "": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.", - "apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.", - "apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.", - "resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.", -} - -func (Rule) SwaggerDoc() map[string]string { - return map_Rule -} - -var map_RuleWithOperations = map[string]string{ - "": "RuleWithOperations is a tuple of Operations and Resources. It is recommended to make sure that all the tuple expansions are valid.", - "operations": "Operations is the operations the admission hook cares about - CREATE, UPDATE, or * for all operations. If '*' is present, the length of the slice must be one. Required.", -} - -func (RuleWithOperations) SwaggerDoc() map[string]string { - return map_RuleWithOperations -} - -var map_ServiceReference = map[string]string{ - "": "ServiceReference holds a reference to Service.legacy.k8s.io", - "namespace": "`namespace` is the namespace of the service. Required", - "name": "`name` is the name of the service. Required", - "path": "`path` is an optional URL path which will be sent in any request to this service.", -} - -func (ServiceReference) SwaggerDoc() map[string]string { - return map_ServiceReference -} - -var map_ValidatingWebhookConfiguration = map[string]string{ - "": "ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "webhooks": "Webhooks is a list of webhooks and the affected resources and operations.", -} - -func (ValidatingWebhookConfiguration) SwaggerDoc() map[string]string { - return map_ValidatingWebhookConfiguration -} - -var map_ValidatingWebhookConfigurationList = map[string]string{ - "": "ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ValidatingWebhookConfiguration.", -} - -func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string { - return map_ValidatingWebhookConfigurationList -} - -var map_Webhook = map[string]string{ - "": "Webhook describes an admission webhook and the resources and operations it applies to.", - "name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.", - "clientConfig": "ClientConfig defines how to communicate with the hook. Required", - "rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.", - "failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.", - "namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.", - "sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.", -} - -func (Webhook) SwaggerDoc() map[string]string { - return map_Webhook -} - -var map_WebhookClientConfig = map[string]string{ - "": "WebhookClientConfig contains the information to make a TLS connection with the webhook", - "url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.", - "service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.", - "caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.", -} - -func (WebhookClientConfig) SwaggerDoc() map[string]string { - return map_WebhookClientConfig -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index c6867be..0000000 --- a/vendor/k8s.io/api/admissionregistration/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,302 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutatingWebhookConfiguration) DeepCopyInto(out *MutatingWebhookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Webhooks != nil { - in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfiguration. -func (in *MutatingWebhookConfiguration) DeepCopy() *MutatingWebhookConfiguration { - if in == nil { - return nil - } - out := new(MutatingWebhookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MutatingWebhookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MutatingWebhookConfigurationList) DeepCopyInto(out *MutatingWebhookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]MutatingWebhookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MutatingWebhookConfigurationList. -func (in *MutatingWebhookConfigurationList) DeepCopy() *MutatingWebhookConfigurationList { - if in == nil { - return nil - } - out := new(MutatingWebhookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *MutatingWebhookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Rule) DeepCopyInto(out *Rule) { - *out = *in - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIVersions != nil { - in, out := &in.APIVersions, &out.APIVersions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule. -func (in *Rule) DeepCopy() *Rule { - if in == nil { - return nil - } - out := new(Rule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RuleWithOperations) DeepCopyInto(out *RuleWithOperations) { - *out = *in - if in.Operations != nil { - in, out := &in.Operations, &out.Operations - *out = make([]OperationType, len(*in)) - copy(*out, *in) - } - in.Rule.DeepCopyInto(&out.Rule) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleWithOperations. -func (in *RuleWithOperations) DeepCopy() *RuleWithOperations { - if in == nil { - return nil - } - out := new(RuleWithOperations) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceReference) DeepCopyInto(out *ServiceReference) { - *out = *in - if in.Path != nil { - in, out := &in.Path, &out.Path - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceReference. -func (in *ServiceReference) DeepCopy() *ServiceReference { - if in == nil { - return nil - } - out := new(ServiceReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatingWebhookConfiguration) DeepCopyInto(out *ValidatingWebhookConfiguration) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Webhooks != nil { - in, out := &in.Webhooks, &out.Webhooks - *out = make([]Webhook, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfiguration. -func (in *ValidatingWebhookConfiguration) DeepCopy() *ValidatingWebhookConfiguration { - if in == nil { - return nil - } - out := new(ValidatingWebhookConfiguration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ValidatingWebhookConfiguration) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ValidatingWebhookConfigurationList) DeepCopyInto(out *ValidatingWebhookConfigurationList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ValidatingWebhookConfiguration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ValidatingWebhookConfigurationList. -func (in *ValidatingWebhookConfigurationList) DeepCopy() *ValidatingWebhookConfigurationList { - if in == nil { - return nil - } - out := new(ValidatingWebhookConfigurationList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ValidatingWebhookConfigurationList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Webhook) DeepCopyInto(out *Webhook) { - *out = *in - in.ClientConfig.DeepCopyInto(&out.ClientConfig) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]RuleWithOperations, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.FailurePolicy != nil { - in, out := &in.FailurePolicy, &out.FailurePolicy - *out = new(FailurePolicyType) - **out = **in - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.SideEffects != nil { - in, out := &in.SideEffects, &out.SideEffects - *out = new(SideEffectClass) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Webhook. -func (in *Webhook) DeepCopy() *Webhook { - if in == nil { - return nil - } - out := new(Webhook) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WebhookClientConfig) DeepCopyInto(out *WebhookClientConfig) { - *out = *in - if in.URL != nil { - in, out := &in.URL, &out.URL - *out = new(string) - **out = **in - } - if in.Service != nil { - in, out := &in.Service, &out.Service - *out = new(ServiceReference) - (*in).DeepCopyInto(*out) - } - if in.CABundle != nil { - in, out := &in.CABundle, &out.CABundle - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WebhookClientConfig. -func (in *WebhookClientConfig) DeepCopy() *WebhookClientConfig { - if in == nil { - return nil - } - out := new(WebhookClientConfig) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/apps/v1/doc.go b/vendor/k8s.io/api/apps/v1/doc.go deleted file mode 100644 index 1d66c22..0000000 --- a/vendor/k8s.io/api/apps/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1 // import "k8s.io/api/apps/v1" diff --git a/vendor/k8s.io/api/apps/v1/generated.pb.go b/vendor/k8s.io/api/apps/v1/generated.pb.go deleted file mode 100644 index eac6ef2..0000000 --- a/vendor/k8s.io/api/apps/v1/generated.pb.go +++ /dev/null @@ -1,6943 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} -} - -func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } -func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} -func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} -} - -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } - -func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } -func (*StatefulSetUpdateStrategy) ProtoMessage() {} -func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{27} -} - -func init() { - proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1.ControllerRevision") - proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1.ControllerRevisionList") - proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1.DaemonSet") - proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1.DaemonSetCondition") - proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1.DaemonSetStatus") - proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.DaemonSetUpdateStrategy") - proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1.DeploymentList") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1.DeploymentStrategy") - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1.ReplicaSet") - proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1.ReplicaSetCondition") - proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1.ReplicaSetStatus") - proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1.RollingUpdateDaemonSet") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1.RollingUpdateDeployment") - proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1.RollingUpdateStatefulSetStrategy") - proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1.StatefulSet") - proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1.StatefulSetCondition") - proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1.StatefulSetList") - proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1.StatefulSetSpec") - proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1.StatefulSetStatus") - proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1.StatefulSetUpdateStrategy") -} -func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil -} - -func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - return i, nil -} - -func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - return i, nil -} - -func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - return i, nil -} - -func (m *DeploymentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - } - return i, nil -} - -func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil -} - -func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - return i, nil -} - -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - return i, nil -} - -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil -} - -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - } - return i, nil -} - -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - if m.MaxSurge != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - return i, nil -} - -func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Partition != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) - } - return i, nil -} - -func (m *StatefulSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n36, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n37, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n38, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n39, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n40, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - return i, nil -} - -func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n41, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ControllerRevision) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Data.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *ControllerRevisionList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetSpec) Size() (n int) { - var l int - _ = l - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - return n -} - -func (m *DaemonSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetUpdateStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Deployment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DeploymentSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } - return n -} - -func (m *DeploymentStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - return n -} - -func (m *DeploymentStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicaSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n -} - -func (m *ReplicaSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RollingUpdateDaemonSet) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { - var l int - _ = l - if m.Partition != nil { - n += 1 + sovGenerated(uint64(*m.Partition)) - } - return n -} - -func (m *StatefulSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StatefulSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodManagementPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - return n -} - -func (m *StatefulSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - l = len(m.CurrentRevision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UpdateRevision) - n += 1 + l + sovGenerated(uint64(l)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StatefulSetUpdateStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ControllerRevision) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *ControllerRevisionList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateStatefulSetStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, - `Partition:` + valueToStringGenerated(this.Partition) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, - `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ControllerRevision) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ControllerRevision{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) - } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) - } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Partition = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StatefulSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentRevision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UpdateRevision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, StatefulSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2037 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcd, 0x6f, 0x24, 0x47, - 0x1d, 0x75, 0xcf, 0x87, 0x3d, 0x2e, 0xaf, 0xed, 0xdd, 0xb2, 0xb1, 0x27, 0xbb, 0x64, 0x66, 0x19, - 0x60, 0xe3, 0x64, 0xb3, 0x3d, 0xec, 0x66, 0x13, 0xa1, 0x2c, 0x02, 0x79, 0xc6, 0x21, 0x84, 0x78, - 0x6c, 0x53, 0x5e, 0xef, 0x61, 0x09, 0x12, 0xe5, 0xe9, 0xda, 0x71, 0xc7, 0xfd, 0xa5, 0xee, 0xea, - 0x61, 0x47, 0x5c, 0x10, 0x12, 0x9c, 0x38, 0xf0, 0x9f, 0x20, 0x84, 0xe0, 0x86, 0x22, 0xc4, 0x65, - 0x2f, 0x48, 0x11, 0x17, 0x72, 0xb2, 0xd8, 0xc9, 0x09, 0xa1, 0x1c, 0xb9, 0xe4, 0x02, 0xaa, 0xea, - 0xea, 0xef, 0x6a, 0xcf, 0xd8, 0x9b, 0x75, 0x50, 0x94, 0x9b, 0xa7, 0xea, 0xfd, 0x5e, 0xff, 0xaa, - 0xea, 0x57, 0xf5, 0x5e, 0x57, 0x1b, 0xdc, 0x3b, 0xfe, 0xb6, 0xa7, 0xea, 0x76, 0xfb, 0xd8, 0x3f, - 0x24, 0xae, 0x45, 0x28, 0xf1, 0xda, 0x43, 0x62, 0x69, 0xb6, 0xdb, 0x16, 0x1d, 0xd8, 0xd1, 0xdb, - 0xd8, 0x71, 0xbc, 0xf6, 0xf0, 0x76, 0x7b, 0x40, 0x2c, 0xe2, 0x62, 0x4a, 0x34, 0xd5, 0x71, 0x6d, - 0x6a, 0x43, 0x18, 0x60, 0x54, 0xec, 0xe8, 0x2a, 0xc3, 0xa8, 0xc3, 0xdb, 0x57, 0x6f, 0x0d, 0x74, - 0x7a, 0xe4, 0x1f, 0xaa, 0x7d, 0xdb, 0x6c, 0x0f, 0xec, 0x81, 0xdd, 0xe6, 0xd0, 0x43, 0xff, 0x11, - 0xff, 0xc5, 0x7f, 0xf0, 0xbf, 0x02, 0x8a, 0xab, 0xad, 0xc4, 0x63, 0xfa, 0xb6, 0x4b, 0x24, 0x8f, - 0xb9, 0x7a, 0x37, 0xc6, 0x98, 0xb8, 0x7f, 0xa4, 0x5b, 0xc4, 0x1d, 0xb5, 0x9d, 0xe3, 0x01, 0x6b, - 0xf0, 0xda, 0x26, 0xa1, 0x58, 0x16, 0xd5, 0x2e, 0x8a, 0x72, 0x7d, 0x8b, 0xea, 0x26, 0xc9, 0x05, - 0xbc, 0x31, 0x29, 0xc0, 0xeb, 0x1f, 0x11, 0x13, 0xe7, 0xe2, 0x5e, 0x2b, 0x8a, 0xf3, 0xa9, 0x6e, - 0xb4, 0x75, 0x8b, 0x7a, 0xd4, 0xcd, 0x06, 0xb5, 0xfe, 0xa3, 0x00, 0xd8, 0xb5, 0x2d, 0xea, 0xda, - 0x86, 0x41, 0x5c, 0x44, 0x86, 0xba, 0xa7, 0xdb, 0x16, 0xfc, 0x29, 0xa8, 0xb1, 0xf1, 0x68, 0x98, - 0xe2, 0xba, 0x72, 0x5d, 0xd9, 0x58, 0xb8, 0xf3, 0x2d, 0x35, 0x9e, 0xe4, 0x88, 0x5e, 0x75, 0x8e, - 0x07, 0xac, 0xc1, 0x53, 0x19, 0x5a, 0x1d, 0xde, 0x56, 0x77, 0x0f, 0xdf, 0x27, 0x7d, 0xda, 0x23, - 0x14, 0x77, 0xe0, 0x93, 0x93, 0xe6, 0xcc, 0xf8, 0xa4, 0x09, 0xe2, 0x36, 0x14, 0xb1, 0xc2, 0x5d, - 0x50, 0xe1, 0xec, 0x25, 0xce, 0x7e, 0xab, 0x90, 0x5d, 0x0c, 0x5a, 0x45, 0xf8, 0x67, 0x6f, 0x3d, - 0xa6, 0xc4, 0x62, 0xe9, 0x75, 0x2e, 0x09, 0xea, 0xca, 0x16, 0xa6, 0x18, 0x71, 0x22, 0xf8, 0x2a, - 0xa8, 0xb9, 0x22, 0xfd, 0x7a, 0xf9, 0xba, 0xb2, 0x51, 0xee, 0x5c, 0x16, 0xa8, 0x5a, 0x38, 0x2c, - 0x14, 0x21, 0x5a, 0x7f, 0x55, 0xc0, 0x5a, 0x7e, 0xdc, 0xdb, 0xba, 0x47, 0xe1, 0x7b, 0xb9, 0xb1, - 0xab, 0xd3, 0x8d, 0x9d, 0x45, 0xf3, 0x91, 0x47, 0x0f, 0x0e, 0x5b, 0x12, 0xe3, 0x7e, 0x17, 0x54, - 0x75, 0x4a, 0x4c, 0xaf, 0x5e, 0xba, 0x5e, 0xde, 0x58, 0xb8, 0x73, 0x43, 0xcd, 0xd7, 0xae, 0x9a, - 0x4f, 0xac, 0xb3, 0x28, 0x28, 0xab, 0xef, 0xb0, 0x60, 0x14, 0x70, 0xb4, 0xfe, 0xab, 0x80, 0xf9, - 0x2d, 0x4c, 0x4c, 0xdb, 0xda, 0x27, 0xf4, 0x02, 0x16, 0xad, 0x0b, 0x2a, 0x9e, 0x43, 0xfa, 0x62, - 0xd1, 0xbe, 0x26, 0xcb, 0x3d, 0x4a, 0x67, 0xdf, 0x21, 0xfd, 0x78, 0xa1, 0xd8, 0x2f, 0xc4, 0x83, - 0xe1, 0xbb, 0x60, 0xd6, 0xa3, 0x98, 0xfa, 0x1e, 0x5f, 0xa6, 0x85, 0x3b, 0x5f, 0x3f, 0x9d, 0x86, - 0x43, 0x3b, 0x4b, 0x82, 0x68, 0x36, 0xf8, 0x8d, 0x04, 0x45, 0xeb, 0x5f, 0x25, 0x00, 0x23, 0x6c, - 0xd7, 0xb6, 0x34, 0x9d, 0xb2, 0xfa, 0x7d, 0x13, 0x54, 0xe8, 0xc8, 0x21, 0x7c, 0x1a, 0xe6, 0x3b, - 0x37, 0xc2, 0x2c, 0xee, 0x8f, 0x1c, 0xf2, 0xe9, 0x49, 0x73, 0x2d, 0x1f, 0xc1, 0x7a, 0x10, 0x8f, - 0x81, 0xdb, 0x51, 0x7e, 0x25, 0x1e, 0x7d, 0x37, 0xfd, 0xe8, 0x4f, 0x4f, 0x9a, 0x92, 0xc3, 0x42, - 0x8d, 0x98, 0xd2, 0x09, 0xc2, 0x21, 0x80, 0x06, 0xf6, 0xe8, 0x7d, 0x17, 0x5b, 0x5e, 0xf0, 0x24, - 0xdd, 0x24, 0x62, 0xe4, 0xaf, 0x4c, 0xb7, 0x3c, 0x2c, 0xa2, 0x73, 0x55, 0x64, 0x01, 0xb7, 0x73, - 0x6c, 0x48, 0xf2, 0x04, 0x78, 0x03, 0xcc, 0xba, 0x04, 0x7b, 0xb6, 0x55, 0xaf, 0xf0, 0x51, 0x44, - 0x13, 0x88, 0x78, 0x2b, 0x12, 0xbd, 0xf0, 0x65, 0x30, 0x67, 0x12, 0xcf, 0xc3, 0x03, 0x52, 0xaf, - 0x72, 0xe0, 0xb2, 0x00, 0xce, 0xf5, 0x82, 0x66, 0x14, 0xf6, 0xb7, 0x7e, 0xaf, 0x80, 0xc5, 0x68, - 0xe6, 0x2e, 0x60, 0xab, 0x74, 0xd2, 0x5b, 0xe5, 0xc5, 0x53, 0xeb, 0xa4, 0x60, 0x87, 0x7c, 0x50, - 0x4e, 0xe4, 0xcc, 0x8a, 0x10, 0xfe, 0x04, 0xd4, 0x3c, 0x62, 0x90, 0x3e, 0xb5, 0x5d, 0x91, 0xf3, - 0x6b, 0x53, 0xe6, 0x8c, 0x0f, 0x89, 0xb1, 0x2f, 0x42, 0x3b, 0x97, 0x58, 0xd2, 0xe1, 0x2f, 0x14, - 0x51, 0xc2, 0x1f, 0x81, 0x1a, 0x25, 0xa6, 0x63, 0x60, 0x4a, 0xc4, 0x36, 0x49, 0xd5, 0x37, 0x2b, - 0x17, 0x46, 0xb6, 0x67, 0x6b, 0xf7, 0x05, 0x8c, 0x6f, 0x94, 0x68, 0x1e, 0xc2, 0x56, 0x14, 0xd1, - 0xc0, 0x63, 0xb0, 0xe4, 0x3b, 0x1a, 0x43, 0x52, 0x76, 0x74, 0x0f, 0x46, 0xa2, 0x7c, 0x6e, 0x9e, - 0x3a, 0x21, 0x07, 0xa9, 0x90, 0xce, 0x9a, 0x78, 0xc0, 0x52, 0xba, 0x1d, 0x65, 0xa8, 0xe1, 0x26, - 0x58, 0x36, 0x75, 0x0b, 0x11, 0xac, 0x8d, 0xf6, 0x49, 0xdf, 0xb6, 0x34, 0x8f, 0x17, 0x50, 0xb5, - 0xb3, 0x2e, 0x08, 0x96, 0x7b, 0xe9, 0x6e, 0x94, 0xc5, 0xc3, 0x6d, 0xb0, 0x1a, 0x9e, 0xb3, 0x3f, - 0xd0, 0x3d, 0x6a, 0xbb, 0xa3, 0x6d, 0xdd, 0xd4, 0x69, 0x7d, 0x96, 0xf3, 0xd4, 0xc7, 0x27, 0xcd, - 0x55, 0x24, 0xe9, 0x47, 0xd2, 0xa8, 0xd6, 0x6f, 0x66, 0xc1, 0x72, 0xe6, 0x34, 0x80, 0x0f, 0xc0, - 0x5a, 0xdf, 0x77, 0x5d, 0x62, 0xd1, 0x1d, 0xdf, 0x3c, 0x24, 0xee, 0x7e, 0xff, 0x88, 0x68, 0xbe, - 0x41, 0x34, 0xbe, 0xa2, 0xd5, 0x4e, 0x43, 0xe4, 0xba, 0xd6, 0x95, 0xa2, 0x50, 0x41, 0x34, 0xfc, - 0x21, 0x80, 0x16, 0x6f, 0xea, 0xe9, 0x9e, 0x17, 0x71, 0x96, 0x38, 0x67, 0xb4, 0x01, 0x77, 0x72, - 0x08, 0x24, 0x89, 0x62, 0x39, 0x6a, 0xc4, 0xd3, 0x5d, 0xa2, 0x65, 0x73, 0x2c, 0xa7, 0x73, 0xdc, - 0x92, 0xa2, 0x50, 0x41, 0x34, 0x7c, 0x1d, 0x2c, 0x04, 0x4f, 0xe3, 0x73, 0x2e, 0x16, 0x67, 0x45, - 0x90, 0x2d, 0xec, 0xc4, 0x5d, 0x28, 0x89, 0x63, 0x43, 0xb3, 0x0f, 0x3d, 0xe2, 0x0e, 0x89, 0xf6, - 0x76, 0xe0, 0x01, 0x98, 0x50, 0x56, 0xb9, 0x50, 0x46, 0x43, 0xdb, 0xcd, 0x21, 0x90, 0x24, 0x8a, - 0x0d, 0x2d, 0xa8, 0x9a, 0xdc, 0xd0, 0x66, 0xd3, 0x43, 0x3b, 0x90, 0xa2, 0x50, 0x41, 0x34, 0xab, - 0xbd, 0x20, 0xe5, 0xcd, 0x21, 0xd6, 0x0d, 0x7c, 0x68, 0x90, 0xfa, 0x5c, 0xba, 0xf6, 0x76, 0xd2, - 0xdd, 0x28, 0x8b, 0x87, 0x6f, 0x83, 0x2b, 0x41, 0xd3, 0x81, 0x85, 0x23, 0x92, 0x1a, 0x27, 0x79, - 0x41, 0x90, 0x5c, 0xd9, 0xc9, 0x02, 0x50, 0x3e, 0x06, 0xbe, 0x09, 0x96, 0xfa, 0xb6, 0x61, 0xf0, - 0x7a, 0xec, 0xda, 0xbe, 0x45, 0xeb, 0xf3, 0x9c, 0x05, 0xb2, 0x3d, 0xd4, 0x4d, 0xf5, 0xa0, 0x0c, - 0x12, 0x3e, 0x04, 0xa0, 0x1f, 0xca, 0x81, 0x57, 0x07, 0xc5, 0x42, 0x9f, 0xd7, 0xa1, 0x58, 0x80, - 0xa3, 0x26, 0x0f, 0x25, 0xd8, 0x5a, 0x1f, 0x28, 0x60, 0xbd, 0x60, 0x8f, 0xc3, 0xef, 0xa5, 0x54, - 0xef, 0x66, 0x46, 0xf5, 0xae, 0x15, 0x84, 0x25, 0xa4, 0xaf, 0x0f, 0x16, 0x99, 0xef, 0xd0, 0xad, - 0x41, 0x00, 0x11, 0x27, 0xd8, 0x2b, 0xb2, 0xdc, 0x51, 0x12, 0x18, 0x1f, 0xc3, 0x57, 0xc6, 0x27, - 0xcd, 0xc5, 0x54, 0x1f, 0x4a, 0x73, 0xb6, 0x7e, 0x59, 0x02, 0x60, 0x8b, 0x38, 0x86, 0x3d, 0x32, - 0x89, 0x75, 0x11, 0xae, 0x65, 0x2b, 0xe5, 0x5a, 0x5a, 0xd2, 0x85, 0x88, 0xf2, 0x29, 0xb4, 0x2d, - 0xdb, 0x19, 0xdb, 0xf2, 0x8d, 0x09, 0x3c, 0xa7, 0xfb, 0x96, 0x7f, 0x94, 0xc1, 0x4a, 0x0c, 0x8e, - 0x8d, 0xcb, 0xbd, 0xd4, 0x12, 0xbe, 0x94, 0x59, 0xc2, 0x75, 0x49, 0xc8, 0x73, 0x73, 0x2e, 0x9f, - 0xbd, 0x83, 0x80, 0xef, 0x83, 0x25, 0x66, 0x55, 0x82, 0x42, 0xe0, 0x46, 0x68, 0xf6, 0xcc, 0x46, - 0x28, 0x12, 0xb2, 0xed, 0x14, 0x13, 0xca, 0x30, 0x17, 0x18, 0xaf, 0xb9, 0xe7, 0x6d, 0xbc, 0x5a, - 0x7f, 0x50, 0xc0, 0x52, 0xbc, 0x4c, 0x17, 0x60, 0x93, 0xba, 0x69, 0x9b, 0xd4, 0x38, 0xbd, 0x2e, - 0x0b, 0x7c, 0xd2, 0xdf, 0x2b, 0xc9, 0xac, 0xb9, 0x51, 0xda, 0x60, 0x2f, 0x54, 0x8e, 0xa1, 0xf7, - 0xb1, 0x27, 0x64, 0xf5, 0x52, 0xf0, 0x32, 0x15, 0xb4, 0xa1, 0xa8, 0x37, 0x65, 0xa9, 0x4a, 0xcf, - 0xd7, 0x52, 0x95, 0x3f, 0x1b, 0x4b, 0x75, 0x1f, 0xd4, 0xbc, 0xd0, 0x4c, 0x55, 0x38, 0xe5, 0x8d, - 0x49, 0xdb, 0x59, 0xf8, 0xa8, 0x88, 0x35, 0x72, 0x50, 0x11, 0x93, 0xcc, 0x3b, 0x55, 0x3f, 0x4f, - 0xef, 0xc4, 0xb6, 0xb0, 0x83, 0x7d, 0x8f, 0x68, 0xbc, 0xee, 0x6b, 0xf1, 0x16, 0xde, 0xe3, 0xad, - 0x48, 0xf4, 0xc2, 0x03, 0xb0, 0xee, 0xb8, 0xf6, 0xc0, 0x25, 0x9e, 0xb7, 0x45, 0xb0, 0x66, 0xe8, - 0x16, 0x09, 0x07, 0x10, 0xa8, 0xde, 0xb5, 0xf1, 0x49, 0x73, 0x7d, 0x4f, 0x0e, 0x41, 0x45, 0xb1, - 0xad, 0x3f, 0x57, 0xc0, 0xe5, 0xec, 0x89, 0x58, 0x60, 0x44, 0x94, 0x73, 0x19, 0x91, 0x57, 0x13, - 0x25, 0x1a, 0xb8, 0xb4, 0xc4, 0x3b, 0x7f, 0xae, 0x4c, 0x37, 0xc1, 0xb2, 0x30, 0x1e, 0x61, 0xa7, - 0xb0, 0x62, 0xd1, 0xf2, 0x1c, 0xa4, 0xbb, 0x51, 0x16, 0xcf, 0xec, 0x45, 0xec, 0x1a, 0x42, 0x92, - 0x4a, 0xda, 0x5e, 0x6c, 0x66, 0x01, 0x28, 0x1f, 0x03, 0x7b, 0x60, 0xc5, 0xb7, 0xf2, 0x54, 0x41, - 0xb9, 0x5c, 0x13, 0x54, 0x2b, 0x07, 0x79, 0x08, 0x92, 0xc5, 0xc1, 0x1f, 0xa7, 0x1c, 0xc7, 0x2c, - 0x3f, 0x08, 0x5e, 0x3a, 0xbd, 0xa2, 0xa7, 0xb6, 0x1c, 0xf0, 0x1e, 0x58, 0x74, 0xb9, 0xa1, 0x0c, - 0xb3, 0x0c, 0x4c, 0xd9, 0x57, 0x44, 0xd8, 0x22, 0x4a, 0x76, 0xa2, 0x34, 0x56, 0xe2, 0xa3, 0x6a, - 0xd3, 0xfa, 0xa8, 0xd6, 0x9f, 0x14, 0x00, 0xf3, 0x5b, 0x70, 0xe2, 0xcb, 0x7d, 0x2e, 0x22, 0x21, - 0x91, 0x9a, 0xdc, 0xe1, 0xdc, 0x9c, 0xec, 0x70, 0xe2, 0x13, 0x74, 0x3a, 0x8b, 0x23, 0x66, 0xe0, - 0x62, 0x2e, 0x66, 0xa6, 0xb0, 0x38, 0x71, 0x3e, 0xcf, 0x66, 0x71, 0x12, 0x3c, 0xa7, 0x5b, 0x9c, - 0x7f, 0x97, 0xc0, 0x4a, 0x0c, 0x9e, 0xda, 0xe2, 0x48, 0x42, 0xbe, 0xbc, 0x9c, 0x99, 0x7c, 0x39, - 0xc3, 0x6c, 0x47, 0x3c, 0x75, 0xff, 0x27, 0xb6, 0x23, 0x4e, 0xa8, 0xc0, 0x76, 0xfc, 0xae, 0x94, - 0xcc, 0xfa, 0x0b, 0x6f, 0x3b, 0x9e, 0xfd, 0x72, 0xa5, 0xf5, 0x97, 0x32, 0xb8, 0x9c, 0xdd, 0x82, - 0x29, 0x1d, 0x54, 0x26, 0xea, 0xe0, 0x1e, 0x58, 0x7d, 0xe4, 0x1b, 0xc6, 0x88, 0x4f, 0x43, 0x42, - 0x0c, 0x03, 0x05, 0xfd, 0xaa, 0x88, 0x5c, 0xfd, 0xbe, 0x04, 0x83, 0xa4, 0x91, 0x05, 0x9a, 0x5e, - 0x3e, 0x97, 0xa6, 0xe7, 0xd4, 0xa6, 0x72, 0x06, 0xb5, 0x91, 0xea, 0x73, 0xf5, 0x1c, 0xfa, 0x3c, - 0xb5, 0xa0, 0x4a, 0x8e, 0xab, 0x89, 0xef, 0xf0, 0xbf, 0x56, 0xc0, 0x9a, 0xfc, 0xf5, 0x19, 0x1a, - 0x60, 0xc9, 0xc4, 0x8f, 0x93, 0x97, 0x17, 0x93, 0x04, 0xc3, 0xa7, 0xba, 0xa1, 0x06, 0x5f, 0x77, - 0xd4, 0x77, 0x2c, 0xba, 0xeb, 0xee, 0x53, 0x57, 0xb7, 0x06, 0x81, 0xc0, 0xf6, 0x52, 0x5c, 0x28, - 0xc3, 0xdd, 0xfa, 0x58, 0x01, 0xeb, 0x05, 0x2a, 0x77, 0xb1, 0x99, 0xc0, 0x87, 0xa0, 0x66, 0xe2, - 0xc7, 0xfb, 0xbe, 0x3b, 0x08, 0x25, 0xf9, 0xec, 0xcf, 0xe1, 0x1b, 0xb9, 0x27, 0x58, 0x50, 0xc4, - 0xd7, 0xda, 0x05, 0xd7, 0x53, 0x83, 0x64, 0x9b, 0x86, 0x3c, 0xf2, 0x0d, 0xbe, 0x7f, 0x84, 0xa7, - 0xb8, 0x09, 0xe6, 0x1d, 0xec, 0x52, 0x3d, 0x32, 0xa3, 0xd5, 0xce, 0xe2, 0xf8, 0xa4, 0x39, 0xbf, - 0x17, 0x36, 0xa2, 0xb8, 0xbf, 0xf5, 0xab, 0x12, 0x58, 0x48, 0x90, 0x5c, 0x80, 0xbe, 0xbf, 0x95, - 0xd2, 0x77, 0xe9, 0x17, 0x93, 0xe4, 0xa8, 0x8a, 0x04, 0xbe, 0x97, 0x11, 0xf8, 0x6f, 0x4e, 0x22, - 0x3a, 0x5d, 0xe1, 0x3f, 0x29, 0x81, 0xd5, 0x04, 0x3a, 0x96, 0xf8, 0xef, 0xa4, 0x24, 0x7e, 0x23, - 0x23, 0xf1, 0x75, 0x59, 0xcc, 0x97, 0x1a, 0x3f, 0x59, 0xe3, 0xff, 0xa8, 0x80, 0xe5, 0xc4, 0xdc, - 0x5d, 0x80, 0xc8, 0x6f, 0xa5, 0x45, 0xbe, 0x39, 0xa1, 0x5e, 0x0a, 0x54, 0xfe, 0x49, 0x35, 0x95, - 0xf7, 0x17, 0x5e, 0xe6, 0x7f, 0x0e, 0x56, 0x87, 0xb6, 0xe1, 0x9b, 0xa4, 0x6b, 0x60, 0xdd, 0x0c, - 0x01, 0x4c, 0xc9, 0xd8, 0x24, 0xbe, 0x2c, 0xa5, 0x27, 0xae, 0xa7, 0x7b, 0x94, 0x58, 0xf4, 0x41, - 0x1c, 0x19, 0x6b, 0xf1, 0x03, 0x09, 0x1d, 0x92, 0x3e, 0x04, 0xbe, 0x0e, 0x16, 0x98, 0xa6, 0xea, - 0x7d, 0xb2, 0x83, 0xcd, 0xb0, 0xa6, 0xa2, 0xef, 0x03, 0xfb, 0x71, 0x17, 0x4a, 0xe2, 0xe0, 0x11, - 0x58, 0x71, 0x6c, 0xad, 0x87, 0x2d, 0x3c, 0x20, 0xec, 0xfc, 0xdf, 0xb3, 0x0d, 0xbd, 0x3f, 0xe2, - 0xf7, 0x0e, 0xf3, 0x9d, 0x37, 0xc2, 0x17, 0xd2, 0xbd, 0x3c, 0x84, 0x79, 0x76, 0x49, 0x33, 0xdf, - 0xcf, 0x32, 0x4a, 0x68, 0xe6, 0x3e, 0x67, 0xcd, 0xe5, 0xfe, 0x07, 0x40, 0x56, 0x5c, 0xe7, 0xfc, - 0xa0, 0x55, 0x74, 0xa3, 0x52, 0x3b, 0xd7, 0xd7, 0xa8, 0x4f, 0x2a, 0xe0, 0x4a, 0xee, 0x80, 0xfc, - 0x1c, 0xef, 0x34, 0x72, 0x6e, 0xa9, 0x7c, 0x06, 0xb7, 0xb4, 0x09, 0x96, 0xc5, 0x87, 0xb0, 0x8c, - 0xd9, 0x8a, 0xec, 0x68, 0x37, 0xdd, 0x8d, 0xb2, 0x78, 0xd9, 0x9d, 0x4a, 0xf5, 0x8c, 0x77, 0x2a, - 0xc9, 0x2c, 0xc4, 0xff, 0x6f, 0x04, 0x55, 0x97, 0xcf, 0x42, 0xfc, 0x1b, 0x47, 0x16, 0x0f, 0xbf, - 0x1b, 0x96, 0x54, 0xc4, 0x30, 0xc7, 0x19, 0x32, 0x35, 0x12, 0x11, 0x64, 0xd0, 0xcf, 0xf4, 0xb1, - 0xe7, 0x3d, 0xc9, 0xc7, 0x9e, 0x8d, 0x09, 0xa5, 0x3c, 0xbd, 0x55, 0xfc, 0x9b, 0x02, 0x5e, 0x28, - 0xdc, 0x03, 0x70, 0x33, 0xa5, 0xb3, 0xb7, 0x32, 0x3a, 0xfb, 0x62, 0x61, 0x60, 0x42, 0x6c, 0x4d, - 0xf9, 0x85, 0xc8, 0xdd, 0x89, 0x17, 0x22, 0x12, 0x17, 0x35, 0xf9, 0x66, 0xa4, 0xb3, 0xf1, 0xe4, - 0x69, 0x63, 0xe6, 0xc3, 0xa7, 0x8d, 0x99, 0x8f, 0x9e, 0x36, 0x66, 0x7e, 0x31, 0x6e, 0x28, 0x4f, - 0xc6, 0x0d, 0xe5, 0xc3, 0x71, 0x43, 0xf9, 0x68, 0xdc, 0x50, 0xfe, 0x39, 0x6e, 0x28, 0xbf, 0xfd, - 0xb8, 0x31, 0xf3, 0xb0, 0x34, 0xbc, 0xfd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x6b, 0x01, - 0x7b, 0x12, 0x26, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1/generated.proto b/vendor/k8s.io/api/apps/v1/generated.proto deleted file mode 100644 index c8a957a..0000000 --- a/vendor/k8s.io/api/apps/v1/generated.proto +++ /dev/null @@ -1,700 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.apps.v1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// ControllerRevision implements an immutable snapshot of state data. Clients -// are responsible for serializing and deserializing the objects that contain -// their internal state. -// Once a ControllerRevision has been successfully created, it can not be updated. -// The API Server will fail validation of all requests that attempt to mutate -// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both -// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, -// it may be subject to name and representation changes in future releases, and clients should not -// depend on its stability. It is primarily for internal use by controllers. -message ControllerRevision { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - - // Revision indicates the revision of the state represented by Data. - optional int64 revision = 3; -} - -// ControllerRevisionList is a resource containing a list of ControllerRevision objects. -message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ControllerRevisions - repeated ControllerRevision items = 2; -} - -// DaemonSet represents the configuration of a daemon set. -message DaemonSet { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional DaemonSetSpec spec = 2; - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional DaemonSetStatus status = 3; -} - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -message DaemonSetCondition { - // Type of DaemonSet condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// DaemonSetList is a collection of daemon sets. -message DaemonSetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // A list of daemon sets. - repeated DaemonSet items = 2; -} - -// DaemonSetSpec is the specification of a daemon set. -message DaemonSetSpec { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - optional DaemonSetUpdateStrategy updateStrategy = 3; - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - optional int32 minReadySeconds = 4; - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - optional int32 revisionHistoryLimit = 6; -} - -// DaemonSetStatus represents the current status of a daemon set. -message DaemonSetStatus { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 currentNumberScheduled = 1; - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 numberMisscheduled = 2; - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 desiredNumberScheduled = 3; - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - optional int32 numberReady = 4; - - // The most recent generation observed by the daemon set controller. - // +optional - optional int64 observedGeneration = 5; - - // The total number of nodes that are running updated daemon pod - // +optional - optional int32 updatedNumberScheduled = 6; - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - optional int32 numberAvailable = 7; - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - optional int32 numberUnavailable = 8; - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - optional int32 collisionCount = 9; - - // Represents the latest available observations of a DaemonSet's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated DaemonSetCondition conditions = 10; -} - -// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. -message DaemonSetUpdateStrategy { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if type = "RollingUpdate". - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - optional RollingUpdateDaemonSet rollingUpdate = 2; -} - -// Deployment enables declarative updates for Pods and ReplicaSets. -message Deployment { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the Deployment. - // +optional - optional DeploymentSpec spec = 2; - - // Most recently observed status of the Deployment. - // +optional - optional DeploymentStatus status = 3; -} - -// DeploymentCondition describes the state of a deployment at a certain point. -message DeploymentCondition { - // Type of deployment condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; - - // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; - - // The reason for the condition's last transition. - optional string reason = 4; - - // A human readable message indicating details about the transition. - optional string message = 5; -} - -// DeploymentList is a list of Deployments. -message DeploymentList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Deployments. - repeated Deployment items = 2; -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - optional int32 replicas = 1; - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - optional DeploymentStrategy strategy = 4; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 5; - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - optional int32 revisionHistoryLimit = 6; - - // Indicates that the deployment is paused. - // +optional - optional bool paused = 7; - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - optional int32 progressDeadlineSeconds = 9; -} - -// DeploymentStatus is the most recently observed status of the Deployment. -message DeploymentStatus { - // The generation observed by the deployment controller. - // +optional - optional int64 observedGeneration = 1; - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - optional int32 replicas = 2; - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - optional int32 updatedReplicas = 3; - - // Total number of ready pods targeted by this deployment. - // +optional - optional int32 readyReplicas = 7; - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - optional int32 availableReplicas = 4; - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - optional int32 unavailableReplicas = 5; - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - repeated DeploymentCondition conditions = 6; - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - optional int32 collisionCount = 8; -} - -// DeploymentStrategy describes how to replace existing pods with new ones. -message DeploymentStrategy { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - optional RollingUpdateDeployment rollingUpdate = 2; -} - -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -message ReplicaSet { - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicaSetSpec spec = 2; - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicaSetStatus status = 3; -} - -// ReplicaSetCondition describes the state of a replica set at a certain point. -message ReplicaSetCondition { - // Type of replica set condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// ReplicaSetList is a collection of ReplicaSets. -message ReplicaSetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - repeated ReplicaSet items = 2; -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -message ReplicaSetSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - optional int32 replicas = 1; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 4; - - // Selector is a label query over pods that should match the replica count. - // Label keys and values that must match in order to be controlled by this replica set. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replica set. - // +optional - optional int32 readyReplicas = 4; - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - optional int32 availableReplicas = 5; - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - // +optional - optional int64 observedGeneration = 3; - - // Represents the latest available observations of a replica set's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated ReplicaSetCondition conditions = 6; -} - -// Spec to control the desired behavior of daemon set rolling update. -message RollingUpdateDaemonSet { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; -} - -// Spec to control the desired behavior of rolling update. -message RollingUpdateDeployment { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 25%. - // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 25%. - // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. -message RollingUpdateStatefulSetStrategy { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. - // +optional - optional int32 partition = 1; -} - -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -message StatefulSet { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the desired identities of pods in this set. - // +optional - optional StatefulSetSpec spec = 2; - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - optional StatefulSetStatus status = 3; -} - -// StatefulSetCondition describes the state of a statefulset at a certain point. -message StatefulSetCondition { - // Type of statefulset condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// StatefulSetList is a collection of StatefulSets. -message StatefulSetList { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated StatefulSet items = 2; -} - -// A StatefulSetSpec is the specification of a StatefulSet. -message StatefulSetSpec { - // replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - optional int32 replicas = 1; - - // selector is a label query over pods that should match the replica count. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; - - // serviceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - optional string serviceName = 5; - - // podManagementPolicy controls how pods are created during initial scale up, - // when replacing pods on nodes, or when scaling down. The default policy is - // `OrderedReady`, where pods are created in increasing order (pod-0, then - // pod-1, etc) and the controller will wait until each pod is ready before - // continuing. When scaling down, the pods are removed in the opposite order. - // The alternative policy is `Parallel` which will create pods in parallel - // to match the desired scale without waiting, and on scale down will delete - // all pods at once. - // +optional - optional string podManagementPolicy = 6; - - // updateStrategy indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the StatefulSet when a revision is made to - // Template. - optional StatefulSetUpdateStrategy updateStrategy = 7; - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the StatefulSet's revision history. The revision history - // consists of all revisions not represented by a currently applied - // StatefulSetSpec version. The default value is 10. - optional int32 revisionHistoryLimit = 8; -} - -// StatefulSetStatus represents the current state of a StatefulSet. -message StatefulSetStatus { - // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the - // StatefulSet's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; - - // replicas is the number of Pods created by the StatefulSet controller. - optional int32 replicas = 2; - - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. - optional int32 readyReplicas = 3; - - // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by currentRevision. - optional int32 currentReplicas = 4; - - // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by updateRevision. - optional int32 updatedReplicas = 5; - - // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the - // sequence [0,currentReplicas). - optional string currentRevision = 6; - - // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence - // [replicas-updatedReplicas,replicas) - optional string updateRevision = 7; - - // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller - // uses this field as a collision avoidance mechanism when it needs to create the name for the - // newest ControllerRevision. - // +optional - optional int32 collisionCount = 9; - - // Represents the latest available observations of a statefulset's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated StatefulSetCondition conditions = 10; -} - -// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet -// controller will use to perform updates. It includes any additional parameters -// necessary to perform the update for the indicated strategy. -message StatefulSetUpdateStrategy { - // Type indicates the type of the StatefulSetUpdateStrategy. - // Default is RollingUpdate. - // +optional - optional string type = 1; - - // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. - // +optional - optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; -} - diff --git a/vendor/k8s.io/api/apps/v1/register.go b/vendor/k8s.io/api/apps/v1/register.go deleted file mode 100644 index 0271010..0000000 --- a/vendor/k8s.io/api/apps/v1/register.go +++ /dev/null @@ -1,60 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "apps" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &StatefulSet{}, - &StatefulSetList{}, - &DaemonSet{}, - &DaemonSetList{}, - &ReplicaSet{}, - &ReplicaSetList{}, - &ControllerRevision{}, - &ControllerRevisionList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/apps/v1/types.go b/vendor/k8s.io/api/apps/v1/types.go deleted file mode 100644 index 4431ca2..0000000 --- a/vendor/k8s.io/api/apps/v1/types.go +++ /dev/null @@ -1,819 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - ControllerRevisionHashLabelKey = "controller-revision-hash" - StatefulSetRevisionLabel = ControllerRevisionHashLabelKey - DeprecatedRollbackTo = "deprecated.deployment.rollback.to" - DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" - StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -type StatefulSet struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired identities of pods in this set. - // +optional - Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodManagementPolicyType defines the policy for creating pods under a stateful set. -type PodManagementPolicyType string - -const ( - // OrderedReadyPodManagement will create pods in strictly increasing order on - // scale up and strictly decreasing order on scale down, progressing only when - // the previous pod is ready or terminated. At most one pod will be changed - // at any time. - OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" - // ParallelPodManagement will create and delete pods as soon as the stateful set - // replica count is changed, and will not wait for pods to be ready or complete - // termination. - ParallelPodManagement = "Parallel" -) - -// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet -// controller will use to perform updates. It includes any additional parameters -// necessary to perform the update for the indicated strategy. -type StatefulSetUpdateStrategy struct { - // Type indicates the type of the StatefulSetUpdateStrategy. - // Default is RollingUpdate. - // +optional - Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` - // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. - // +optional - RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -// StatefulSetUpdateStrategyType is a string enumeration type that enumerates -// all possible update strategies for the StatefulSet controller. -type StatefulSetUpdateStrategyType string - -const ( - // RollingUpdateStatefulSetStrategyType indicates that update will be - // applied to all Pods in the StatefulSet with respect to the StatefulSet - // ordering constraints. When a scale operation is performed with this - // strategy, new Pods will be created from the specification version indicated - // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" - // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version - // tracking and ordered rolling restarts are disabled. Pods are recreated - // from the StatefulSetSpec when they are manually deleted. When a scale - // operation is performed with this strategy,specification version indicated - // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" -) - -// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. -type RollingUpdateStatefulSetStrategy struct { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. - // +optional - Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` -} - -// A StatefulSetSpec is the specification of a StatefulSet. -type StatefulSetSpec struct { - // replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // selector is a label query over pods that should match the replica count. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` - - // template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` - - // serviceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` - - // podManagementPolicy controls how pods are created during initial scale up, - // when replacing pods on nodes, or when scaling down. The default policy is - // `OrderedReady`, where pods are created in increasing order (pod-0, then - // pod-1, etc) and the controller will wait until each pod is ready before - // continuing. When scaling down, the pods are removed in the opposite order. - // The alternative policy is `Parallel` which will create pods in parallel - // to match the desired scale without waiting, and on scale down will delete - // all pods at once. - // +optional - PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` - - // updateStrategy indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the StatefulSet when a revision is made to - // Template. - UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the StatefulSet's revision history. The revision history - // consists of all revisions not represented by a currently applied - // StatefulSetSpec version. The default value is 10. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` -} - -// StatefulSetStatus represents the current state of a StatefulSet. -type StatefulSetStatus struct { - // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the - // StatefulSet's generation, which is updated on mutation by the API Server. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // replicas is the number of Pods created by the StatefulSet controller. - Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` - - // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by currentRevision. - CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` - - // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by updateRevision. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` - - // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the - // sequence [0,currentReplicas). - CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` - - // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence - // [replicas-updatedReplicas,replicas) - UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` - - // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller - // uses this field as a collision avoidance mechanism when it needs to create the name for the - // newest ControllerRevision. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - - // Represents the latest available observations of a statefulset's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` -} - -type StatefulSetConditionType string - -// StatefulSetCondition describes the state of a statefulset at a certain point. -type StatefulSetCondition struct { - // Type of statefulset condition. - Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StatefulSetList is a collection of StatefulSets. -type StatefulSetList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Deployment enables declarative updates for Pods and ReplicaSets. -type Deployment struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // It must match the pod template's labels. - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` - - // Template describes the pods that will be created. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - - // Indicates that the deployment is paused. - // +optional - Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets - // to select new pods (and old pods being select by new ReplicaSet). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -// DeploymentStrategy describes how to replace existing pods with new ones. -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 25%. - // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 25%. - // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is at most 130% of desired pods. - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -// DeploymentStatus is the most recently observed status of the Deployment. -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time this condition was updated. - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeploymentList is a list of Deployments. -type DeploymentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Deployments. - Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. - // +optional - Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` - - // Rolling update config params. Present only if type = "RollingUpdate". - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" - - // Replace the old daemons only when it's killed - OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` -} - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` -} - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` - - // The most recent generation observed by the daemon set controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` - - // The total number of nodes that are running updated daemon pod - // +optional - UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - - // Represents the latest available observations of a DaemonSet's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` -} - -type DaemonSetConditionType string - -// TODO: Add valid condition types of a DaemonSet. - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -type DaemonSetCondition struct { - // Type of DaemonSet condition. - Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -const ( - // DefaultDaemonSetUniqueLabelKey is the default label key that is added - // to existing DaemonSet pods to distinguish between old and new - // DaemonSet pods during DaemonSet template updates. - DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // A list of daemon sets. - Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta `json:",inline"` - - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // Selector is a label query over pods that should match the replica count. - // Label keys and values that must match in order to be controlled by this replica set. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - - // The number of ready replicas for this replica set. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` - - // Represents the latest available observations of a replica set's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` -} - -type ReplicaSetConditionType string - -// These are valid conditions of a replica set. -const ( - // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created - // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted - // due to kubelet being down or finalizers are failing. - ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" -) - -// ReplicaSetCondition describes the state of a replica set at a certain point. -type ReplicaSetCondition struct { - // Type of replica set condition. - Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ControllerRevision implements an immutable snapshot of state data. Clients -// are responsible for serializing and deserializing the objects that contain -// their internal state. -// Once a ControllerRevision has been successfully created, it can not be updated. -// The API Server will fail validation of all requests that attempt to mutate -// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both -// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, -// it may be subject to name and representation changes in future releases, and clients should not -// depend on its stability. It is primarily for internal use by controllers. -type ControllerRevision struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the serialized representation of the state. - Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - - // Revision indicates the revision of the state represented by Data. - Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ControllerRevisionList is a resource containing a list of ControllerRevision objects. -type ControllerRevisionList struct { - metav1.TypeMeta `json:",inline"` - - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ControllerRevisions - Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go deleted file mode 100644 index 85fb159..0000000 --- a/vendor/k8s.io/api/apps/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,365 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ControllerRevision = map[string]string{ - "": "ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", -} - -func (ControllerRevision) SwaggerDoc() map[string]string { - return map_ControllerRevision -} - -var map_ControllerRevisionList = map[string]string{ - "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ControllerRevisions", -} - -func (ControllerRevisionList) SwaggerDoc() map[string]string { - return map_ControllerRevisionList -} - -var map_DaemonSet = map[string]string{ - "": "DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (DaemonSet) SwaggerDoc() map[string]string { - return map_DaemonSet -} - -var map_DaemonSetCondition = map[string]string{ - "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", - "type": "Type of DaemonSet condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DaemonSetCondition) SwaggerDoc() map[string]string { - return map_DaemonSetCondition -} - -var map_DaemonSetList = map[string]string{ - "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "A list of daemon sets.", -} - -func (DaemonSetList) SwaggerDoc() map[string]string { - return map_DaemonSetList -} - -var map_DaemonSetSpec = map[string]string{ - "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", - "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", - "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", -} - -func (DaemonSetSpec) SwaggerDoc() map[string]string { - return map_DaemonSetSpec -} - -var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", - "observedGeneration": "The most recent generation observed by the daemon set controller.", - "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", - "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", - "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", - "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a DaemonSet's current state.", -} - -func (DaemonSetStatus) SwaggerDoc() map[string]string { - return map_DaemonSetStatus -} - -var map_DaemonSetUpdateStrategy = map[string]string{ - "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", - "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", -} - -func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { - return map_DaemonSetUpdateStrategy -} - -var map_Deployment = map[string]string{ - "": "Deployment enables declarative updates for Pods and ReplicaSets.", - "metadata": "Standard object metadata.", - "spec": "Specification of the desired behavior of the Deployment.", - "status": "Most recently observed status of the Deployment.", -} - -func (Deployment) SwaggerDoc() map[string]string { - return map_Deployment -} - -var map_DeploymentCondition = map[string]string{ - "": "DeploymentCondition describes the state of a deployment at a certain point.", - "type": "Type of deployment condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastUpdateTime": "The last time this condition was updated.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DeploymentCondition) SwaggerDoc() map[string]string { - return map_DeploymentCondition -} - -var map_DeploymentList = map[string]string{ - "": "DeploymentList is a list of Deployments.", - "metadata": "Standard list metadata.", - "items": "Items is the list of Deployments.", -} - -func (DeploymentList) SwaggerDoc() map[string]string { - return map_DeploymentList -} - -var map_DeploymentSpec = map[string]string{ - "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", - "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", - "paused": "Indicates that the deployment is paused.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", -} - -func (DeploymentSpec) SwaggerDoc() map[string]string { - return map_DeploymentSpec -} - -var map_DeploymentStatus = map[string]string{ - "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", -} - -func (DeploymentStatus) SwaggerDoc() map[string]string { - return map_DeploymentStatus -} - -var map_DeploymentStrategy = map[string]string{ - "": "DeploymentStrategy describes how to replace existing pods with new ones.", - "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", -} - -func (DeploymentStrategy) SwaggerDoc() map[string]string { - return map_DeploymentStrategy -} - -var map_ReplicaSet = map[string]string{ - "": "ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (ReplicaSet) SwaggerDoc() map[string]string { - return map_ReplicaSet -} - -var map_ReplicaSetCondition = map[string]string{ - "": "ReplicaSetCondition describes the state of a replica set at a certain point.", - "type": "Type of replica set condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "The last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (ReplicaSetCondition) SwaggerDoc() map[string]string { - return map_ReplicaSetCondition -} - -var map_ReplicaSetList = map[string]string{ - "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", -} - -func (ReplicaSetList) SwaggerDoc() map[string]string { - return map_ReplicaSetList -} - -var map_ReplicaSetSpec = map[string]string{ - "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", -} - -func (ReplicaSetSpec) SwaggerDoc() map[string]string { - return map_ReplicaSetSpec -} - -var map_ReplicaSetStatus = map[string]string{ - "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", - "conditions": "Represents the latest available observations of a replica set's current state.", -} - -func (ReplicaSetStatus) SwaggerDoc() map[string]string { - return map_ReplicaSetStatus -} - -var map_RollingUpdateDaemonSet = map[string]string{ - "": "Spec to control the desired behavior of daemon set rolling update.", - "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", -} - -func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { - return map_RollingUpdateDaemonSet -} - -var map_RollingUpdateDeployment = map[string]string{ - "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.", -} - -func (RollingUpdateDeployment) SwaggerDoc() map[string]string { - return map_RollingUpdateDeployment -} - -var map_RollingUpdateStatefulSetStrategy = map[string]string{ - "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", -} - -func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { - return map_RollingUpdateStatefulSetStrategy -} - -var map_StatefulSet = map[string]string{ - "": "StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", - "spec": "Spec defines the desired identities of pods in this set.", - "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", -} - -func (StatefulSet) SwaggerDoc() map[string]string { - return map_StatefulSet -} - -var map_StatefulSetCondition = map[string]string{ - "": "StatefulSetCondition describes the state of a statefulset at a certain point.", - "type": "Type of statefulset condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (StatefulSetCondition) SwaggerDoc() map[string]string { - return map_StatefulSetCondition -} - -var map_StatefulSetList = map[string]string{ - "": "StatefulSetList is a collection of StatefulSets.", -} - -func (StatefulSetList) SwaggerDoc() map[string]string { - return map_StatefulSetList -} - -var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", -} - -func (StatefulSetSpec) SwaggerDoc() map[string]string { - return map_StatefulSetSpec -} - -var map_StatefulSetStatus = map[string]string{ - "": "StatefulSetStatus represents the current state of a StatefulSet.", - "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", - "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", - "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", - "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", - "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", - "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", - "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", -} - -func (StatefulSetStatus) SwaggerDoc() map[string]string { - return map_StatefulSetStatus -} - -var map_StatefulSetUpdateStrategy = map[string]string{ - "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", - "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", - "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", -} - -func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { - return map_StatefulSetUpdateStrategy -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go deleted file mode 100644 index 885203f..0000000 --- a/vendor/k8s.io/api/apps/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,772 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Data.DeepCopyInto(&out.Data) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. -func (in *ControllerRevision) DeepCopy() *ControllerRevision { - if in == nil { - return nil - } - out := new(ControllerRevision) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ControllerRevision) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ControllerRevision, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. -func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { - if in == nil { - return nil - } - out := new(ControllerRevisionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. -func (in *DaemonSet) DeepCopy() *DaemonSet { - if in == nil { - return nil - } - out := new(DaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. -func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { - if in == nil { - return nil - } - out := new(DaemonSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. -func (in *DaemonSetList) DeepCopy() *DaemonSetList { - if in == nil { - return nil - } - out := new(DaemonSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. -func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { - if in == nil { - return nil - } - out := new(DaemonSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DaemonSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. -func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { - if in == nil { - return nil - } - out := new(DaemonSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. -func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { - if in == nil { - return nil - } - out := new(DaemonSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. -func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { - if in == nil { - return nil - } - out := new(DeploymentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { - if in == nil { - return nil - } - out := new(DeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { - if in == nil { - return nil - } - out := new(DeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { - if in == nil { - return nil - } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. -func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { - if in == nil { - return nil - } - out := new(DeploymentStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. -func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { - if in == nil { - return nil - } - out := new(ReplicaSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. -func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { - if in == nil { - return nil - } - out := new(ReplicaSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. -func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { - if in == nil { - return nil - } - out := new(RollingUpdateDaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. -func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { - if in == nil { - return nil - } - out := new(RollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { - *out = *in - if in.Partition != nil { - in, out := &in.Partition, &out.Partition - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. -func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { - if in == nil { - return nil - } - out := new(RollingUpdateStatefulSetStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. -func (in *StatefulSet) DeepCopy() *StatefulSet { - if in == nil { - return nil - } - out := new(StatefulSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StatefulSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. -func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { - if in == nil { - return nil - } - out := new(StatefulSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StatefulSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. -func (in *StatefulSetList) DeepCopy() *StatefulSetList { - if in == nil { - return nil - } - out := new(StatefulSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StatefulSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]corev1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. -func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { - if in == nil { - return nil - } - out := new(StatefulSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]StatefulSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. -func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { - if in == nil { - return nil - } - out := new(StatefulSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateStatefulSetStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. -func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { - if in == nil { - return nil - } - out := new(StatefulSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/apps/v1beta1/doc.go b/vendor/k8s.io/api/apps/v1beta1/doc.go deleted file mode 100644 index 6047ed5..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1beta1 // import "k8s.io/api/apps/v1beta1" diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go b/vendor/k8s.io/api/apps/v1beta1/generated.pb.go deleted file mode 100644 index ef9aa8e..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/generated.pb.go +++ /dev/null @@ -1,5290 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - RollbackConfig - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} -} - -func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } -func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} -func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} -} - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } -func (*StatefulSetUpdateStrategy) ProtoMessage() {} -func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} -} - -func init() { - proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta1.ControllerRevision") - proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta1.ControllerRevisionList") - proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.apps.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta1.DeploymentStrategy") - proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.apps.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta1.RollingUpdateStatefulSetStrategy") - proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta1.ScaleStatus") - proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta1.StatefulSet") - proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta1.StatefulSetCondition") - proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta1.StatefulSetList") - proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta1.StatefulSetSpec") - proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta1.StatefulSetStatus") - proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta1.StatefulSetUpdateStrategy") -} -func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil -} - -func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n7, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *DeploymentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.UpdatedAnnotations) > 0 { - keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) - for k := range m.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { - dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n10, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil -} - -func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n11, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n12, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n13, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n14, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - } - return i, nil -} - -func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil -} - -func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n15, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - return i, nil -} - -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil -} - -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n16, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.MaxSurge != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n17, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - return i, nil -} - -func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Partition != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) - } - return i, nil -} - -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n18, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n19, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n20, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - return i, nil -} - -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil -} - -func (m *StatefulSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n21, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n22, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n23, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - return i, nil -} - -func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n24, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n25, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n26, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n27, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n28, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - return i, nil -} - -func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n29, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ControllerRevision) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Data.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *ControllerRevisionList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Deployment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DeploymentRollback) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } - return n -} - -func (m *DeploymentStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - return n -} - -func (m *DeploymentStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollbackConfig) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { - var l int - _ = l - if m.Partition != nil { - n += 1 + sovGenerated(uint64(*m.Partition)) - } - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StatefulSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodManagementPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - return n -} - -func (m *StatefulSetStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - l = len(m.CurrentRevision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UpdateRevision) - n += 1 + l + sovGenerated(uint64(l)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StatefulSetUpdateStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ControllerRevision) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *ControllerRevisionList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateStatefulSetStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, - `Partition:` + valueToStringGenerated(this.Partition) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, - `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ControllerRevision) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ControllerRevision{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) - } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Partition = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scale) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Selector == nil { - m.Selector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetSelector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StatefulSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentRevision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UpdateRevision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, StatefulSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1859 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcd, 0x6f, 0x24, 0x47, - 0x15, 0x77, 0x8f, 0x67, 0xec, 0xf1, 0x73, 0x3c, 0xde, 0x2d, 0x9b, 0xf5, 0xc4, 0x81, 0xb1, 0xd5, - 0x44, 0x89, 0xf3, 0xe1, 0x9e, 0xac, 0x13, 0xa2, 0x64, 0x17, 0x45, 0x78, 0xbc, 0x4b, 0xb2, 0x91, - 0x8d, 0x9d, 0xb2, 0x1d, 0x44, 0x00, 0x29, 0x35, 0x3d, 0xb5, 0xb3, 0x1d, 0xf7, 0x97, 0xba, 0x6b, - 0x86, 0x1d, 0x71, 0xe1, 0x0f, 0x40, 0x0a, 0x67, 0xfe, 0x0a, 0x8e, 0x08, 0x6e, 0x9c, 0xf6, 0x82, - 0x14, 0x71, 0x21, 0x27, 0x8b, 0x9d, 0x5c, 0x81, 0x1b, 0x97, 0x95, 0x90, 0x50, 0x55, 0x57, 0x7f, - 0x77, 0xdb, 0x6d, 0xa4, 0xf5, 0x21, 0xb7, 0xe9, 0x7a, 0xef, 0xfd, 0x5e, 0x7d, 0xbc, 0xaf, 0xdf, - 0xc0, 0x8f, 0xce, 0xde, 0xf3, 0x35, 0xc3, 0xe9, 0x9e, 0x8d, 0xfa, 0xd4, 0xb3, 0x29, 0xa3, 0x7e, - 0x77, 0x4c, 0xed, 0x81, 0xe3, 0x75, 0xa5, 0x80, 0xb8, 0x46, 0x97, 0xb8, 0xae, 0xdf, 0x1d, 0xdf, - 0xee, 0x53, 0x46, 0x6e, 0x77, 0x87, 0xd4, 0xa6, 0x1e, 0x61, 0x74, 0xa0, 0xb9, 0x9e, 0xc3, 0x1c, - 0xb4, 0x16, 0x28, 0x6a, 0xc4, 0x35, 0x34, 0xae, 0xa8, 0x49, 0xc5, 0xf5, 0xed, 0xa1, 0xc1, 0x1e, - 0x8d, 0xfa, 0x9a, 0xee, 0x58, 0xdd, 0xa1, 0x33, 0x74, 0xba, 0x42, 0xbf, 0x3f, 0x7a, 0x28, 0xbe, - 0xc4, 0x87, 0xf8, 0x15, 0xe0, 0xac, 0xab, 0x09, 0x87, 0xba, 0xe3, 0xd1, 0xee, 0x38, 0xe7, 0x6b, - 0xfd, 0x9d, 0x58, 0xc7, 0x22, 0xfa, 0x23, 0xc3, 0xa6, 0xde, 0xa4, 0xeb, 0x9e, 0x0d, 0xf9, 0x82, - 0xdf, 0xb5, 0x28, 0x23, 0x45, 0x56, 0xdd, 0x32, 0x2b, 0x6f, 0x64, 0x33, 0xc3, 0xa2, 0x39, 0x83, - 0x77, 0x2f, 0x33, 0xf0, 0xf5, 0x47, 0xd4, 0x22, 0x39, 0xbb, 0xb7, 0xcb, 0xec, 0x46, 0xcc, 0x30, - 0xbb, 0x86, 0xcd, 0x7c, 0xe6, 0x65, 0x8d, 0xd4, 0xff, 0x28, 0x80, 0xf6, 0x1c, 0x9b, 0x79, 0x8e, - 0x69, 0x52, 0x0f, 0xd3, 0xb1, 0xe1, 0x1b, 0x8e, 0x8d, 0x3e, 0x87, 0x26, 0x3f, 0xcf, 0x80, 0x30, - 0xd2, 0x56, 0x36, 0x95, 0xad, 0xc5, 0x9d, 0xb7, 0xb4, 0xf8, 0xa6, 0x23, 0x78, 0xcd, 0x3d, 0x1b, - 0xf2, 0x05, 0x5f, 0xe3, 0xda, 0xda, 0xf8, 0xb6, 0x76, 0xd8, 0xff, 0x82, 0xea, 0xec, 0x80, 0x32, - 0xd2, 0x43, 0x4f, 0xce, 0x37, 0x66, 0xa6, 0xe7, 0x1b, 0x10, 0xaf, 0xe1, 0x08, 0x15, 0x1d, 0x42, - 0x5d, 0xa0, 0xd7, 0x04, 0xfa, 0x76, 0x29, 0xba, 0x3c, 0xb4, 0x86, 0xc9, 0xaf, 0xee, 0x3f, 0x66, - 0xd4, 0xe6, 0xdb, 0xeb, 0xbd, 0x20, 0xa1, 0xeb, 0xf7, 0x08, 0x23, 0x58, 0x00, 0xa1, 0x37, 0xa1, - 0xe9, 0xc9, 0xed, 0xb7, 0x67, 0x37, 0x95, 0xad, 0xd9, 0xde, 0x0d, 0xa9, 0xd5, 0x0c, 0x8f, 0x85, - 0x23, 0x0d, 0xf5, 0x89, 0x02, 0xb7, 0xf2, 0xe7, 0xde, 0x37, 0x7c, 0x86, 0x7e, 0x91, 0x3b, 0xbb, - 0x56, 0xed, 0xec, 0xdc, 0x5a, 0x9c, 0x3c, 0x72, 0x1c, 0xae, 0x24, 0xce, 0x7d, 0x04, 0x0d, 0x83, - 0x51, 0xcb, 0x6f, 0xd7, 0x36, 0x67, 0xb7, 0x16, 0x77, 0xde, 0xd0, 0x4a, 0x02, 0x58, 0xcb, 0xef, - 0xae, 0xb7, 0x24, 0x71, 0x1b, 0x0f, 0x38, 0x02, 0x0e, 0x80, 0xd4, 0xdf, 0xd6, 0x00, 0xee, 0x51, - 0xd7, 0x74, 0x26, 0x16, 0xb5, 0xd9, 0x35, 0x3c, 0xdd, 0x03, 0xa8, 0xfb, 0x2e, 0xd5, 0xe5, 0xd3, - 0xbd, 0x5a, 0x7a, 0x82, 0x78, 0x53, 0xc7, 0x2e, 0xd5, 0xe3, 0x47, 0xe3, 0x5f, 0x58, 0x40, 0xa0, - 0x4f, 0x60, 0xce, 0x67, 0x84, 0x8d, 0x7c, 0xf1, 0x64, 0x8b, 0x3b, 0xaf, 0x55, 0x01, 0x13, 0x06, - 0xbd, 0x96, 0x84, 0x9b, 0x0b, 0xbe, 0xb1, 0x04, 0x52, 0xff, 0x3e, 0x0b, 0x2b, 0xb1, 0xf2, 0x9e, - 0x63, 0x0f, 0x0c, 0xc6, 0x43, 0xfa, 0x2e, 0xd4, 0xd9, 0xc4, 0xa5, 0xe2, 0x4e, 0x16, 0x7a, 0xaf, - 0x86, 0x9b, 0x39, 0x99, 0xb8, 0xf4, 0xd9, 0xf9, 0xc6, 0x5a, 0x81, 0x09, 0x17, 0x61, 0x61, 0x84, - 0xf6, 0xa3, 0x7d, 0xd6, 0x84, 0xf9, 0x3b, 0x69, 0xe7, 0xcf, 0xce, 0x37, 0x0a, 0x0a, 0x88, 0x16, - 0x21, 0xa5, 0xb7, 0x88, 0x5e, 0x81, 0x39, 0x8f, 0x12, 0xdf, 0xb1, 0xdb, 0x75, 0x81, 0x16, 0x1d, - 0x05, 0x8b, 0x55, 0x2c, 0xa5, 0xe8, 0x35, 0x98, 0xb7, 0xa8, 0xef, 0x93, 0x21, 0x6d, 0x37, 0x84, - 0xe2, 0xb2, 0x54, 0x9c, 0x3f, 0x08, 0x96, 0x71, 0x28, 0x47, 0x5f, 0x40, 0xcb, 0x24, 0x3e, 0x3b, - 0x75, 0x07, 0x84, 0xd1, 0x13, 0xc3, 0xa2, 0xed, 0x39, 0x71, 0xa1, 0xaf, 0x57, 0x7b, 0x7b, 0x6e, - 0xd1, 0xbb, 0x25, 0xd1, 0x5b, 0xfb, 0x29, 0x24, 0x9c, 0x41, 0x46, 0x63, 0x40, 0x7c, 0xe5, 0xc4, - 0x23, 0xb6, 0x1f, 0x5c, 0x14, 0xf7, 0x37, 0x7f, 0x65, 0x7f, 0xeb, 0xd2, 0x1f, 0xda, 0xcf, 0xa1, - 0xe1, 0x02, 0x0f, 0xea, 0x1f, 0x15, 0x68, 0xc5, 0xcf, 0x74, 0x0d, 0xb9, 0xfa, 0x51, 0x3a, 0x57, - 0xbf, 0x5f, 0x21, 0x38, 0x4b, 0x72, 0xf4, 0x9f, 0x35, 0x40, 0xb1, 0x12, 0x76, 0x4c, 0xb3, 0x4f, - 0xf4, 0x33, 0xb4, 0x09, 0x75, 0x9b, 0x58, 0x61, 0x4c, 0x46, 0x09, 0xf2, 0x13, 0x62, 0x51, 0x2c, - 0x24, 0xe8, 0x4b, 0x05, 0xd0, 0x48, 0x5c, 0xfd, 0x60, 0xd7, 0xb6, 0x1d, 0x46, 0xf8, 0x6d, 0x84, - 0x1b, 0xda, 0xab, 0xb0, 0xa1, 0xd0, 0x97, 0x76, 0x9a, 0x43, 0xb9, 0x6f, 0x33, 0x6f, 0x12, 0xbf, - 0x42, 0x5e, 0x01, 0x17, 0xb8, 0x46, 0x3f, 0x07, 0xf0, 0x24, 0xe6, 0x89, 0x23, 0xd3, 0xb6, 0xbc, - 0x06, 0x84, 0xee, 0xf7, 0x1c, 0xfb, 0xa1, 0x31, 0x8c, 0x0b, 0x0b, 0x8e, 0x20, 0x70, 0x02, 0x6e, - 0xfd, 0x3e, 0xac, 0x95, 0xec, 0x13, 0xdd, 0x80, 0xd9, 0x33, 0x3a, 0x09, 0xae, 0x0a, 0xf3, 0x9f, - 0x68, 0x15, 0x1a, 0x63, 0x62, 0x8e, 0x68, 0x90, 0x93, 0x38, 0xf8, 0xb8, 0x53, 0x7b, 0x4f, 0x51, - 0xff, 0xd0, 0x48, 0x46, 0x0a, 0xaf, 0x37, 0x68, 0x8b, 0xb7, 0x07, 0xd7, 0x34, 0x74, 0xe2, 0x0b, - 0x8c, 0x46, 0xef, 0x85, 0xa0, 0x35, 0x04, 0x6b, 0x38, 0x92, 0xa2, 0x5f, 0x42, 0xd3, 0xa7, 0x26, - 0xd5, 0x99, 0xe3, 0xc9, 0x12, 0xf7, 0x76, 0xc5, 0x98, 0x22, 0x7d, 0x6a, 0x1e, 0x4b, 0xd3, 0x00, - 0x3e, 0xfc, 0xc2, 0x11, 0x24, 0xfa, 0x04, 0x9a, 0x8c, 0x5a, 0xae, 0x49, 0x18, 0x95, 0xb7, 0x97, - 0x8a, 0x2b, 0x5e, 0x3b, 0x38, 0xd8, 0x91, 0x33, 0x38, 0x91, 0x6a, 0xa2, 0x7a, 0x46, 0x71, 0x1a, - 0xae, 0xe2, 0x08, 0x06, 0xfd, 0x0c, 0x9a, 0x3e, 0xe3, 0x5d, 0x7d, 0x38, 0x11, 0x15, 0xe5, 0xa2, - 0xb6, 0x92, 0xac, 0xa3, 0x81, 0x49, 0x0c, 0x1d, 0xae, 0xe0, 0x08, 0x0e, 0xed, 0xc2, 0xb2, 0x65, - 0xd8, 0x98, 0x92, 0xc1, 0xe4, 0x98, 0xea, 0x8e, 0x3d, 0xf0, 0x45, 0x29, 0x6a, 0xf4, 0xd6, 0xa4, - 0xd1, 0xf2, 0x41, 0x5a, 0x8c, 0xb3, 0xfa, 0x68, 0x1f, 0x56, 0xc3, 0xb6, 0xfb, 0x91, 0xe1, 0x33, - 0xc7, 0x9b, 0xec, 0x1b, 0x96, 0xc1, 0x44, 0x81, 0x6a, 0xf4, 0xda, 0xd3, 0xf3, 0x8d, 0x55, 0x5c, - 0x20, 0xc7, 0x85, 0x56, 0xbc, 0x76, 0xba, 0x64, 0xe4, 0xd3, 0x81, 0x28, 0x38, 0xcd, 0xb8, 0x76, - 0x1e, 0x89, 0x55, 0x2c, 0xa5, 0xe8, 0xa7, 0xa9, 0x30, 0x6d, 0x5e, 0x2d, 0x4c, 0x5b, 0xe5, 0x21, - 0x8a, 0x4e, 0x61, 0xcd, 0xf5, 0x9c, 0xa1, 0x47, 0x7d, 0xff, 0x1e, 0x25, 0x03, 0xd3, 0xb0, 0x69, - 0x78, 0x33, 0x0b, 0xe2, 0x44, 0x2f, 0x4d, 0xcf, 0x37, 0xd6, 0x8e, 0x8a, 0x55, 0x70, 0x99, 0xad, - 0xfa, 0x97, 0x3a, 0xdc, 0xc8, 0xf6, 0x38, 0xf4, 0x31, 0x20, 0xa7, 0xef, 0x53, 0x6f, 0x4c, 0x07, - 0x1f, 0x06, 0x83, 0x1b, 0x9f, 0x6e, 0x14, 0x31, 0xdd, 0x44, 0x79, 0x7b, 0x98, 0xd3, 0xc0, 0x05, - 0x56, 0xc1, 0x7c, 0x24, 0x13, 0xa0, 0x26, 0x36, 0x9a, 0x98, 0x8f, 0x72, 0x49, 0xb0, 0x0b, 0xcb, - 0x32, 0xf7, 0x43, 0xa1, 0x08, 0xd6, 0xc4, 0xbb, 0x9f, 0xa6, 0xc5, 0x38, 0xab, 0x8f, 0x3e, 0x84, - 0x9b, 0x64, 0x4c, 0x0c, 0x93, 0xf4, 0x4d, 0x1a, 0x81, 0xd4, 0x05, 0xc8, 0x8b, 0x12, 0xe4, 0xe6, - 0x6e, 0x56, 0x01, 0xe7, 0x6d, 0xd0, 0x01, 0xac, 0x8c, 0xec, 0x3c, 0x54, 0x10, 0x87, 0x2f, 0x49, - 0xa8, 0x95, 0xd3, 0xbc, 0x0a, 0x2e, 0xb2, 0x43, 0x9f, 0x03, 0xe8, 0x61, 0x63, 0xf6, 0xdb, 0x73, - 0xa2, 0x92, 0xbe, 0x59, 0x21, 0x5f, 0xa2, 0x6e, 0x1e, 0x57, 0xb1, 0x68, 0xc9, 0xc7, 0x09, 0x4c, - 0x74, 0x17, 0x96, 0x3c, 0x9e, 0x01, 0xd1, 0x56, 0xe7, 0xc5, 0x56, 0xbf, 0x23, 0xcd, 0x96, 0x70, - 0x52, 0x88, 0xd3, 0xba, 0xe8, 0x0e, 0xb4, 0x74, 0xc7, 0x34, 0x45, 0xe4, 0xef, 0x39, 0x23, 0x9b, - 0x89, 0xe0, 0x6d, 0xf4, 0x10, 0xef, 0xcc, 0x7b, 0x29, 0x09, 0xce, 0x68, 0xaa, 0x7f, 0x56, 0x92, - 0x6d, 0x26, 0x4c, 0x67, 0x74, 0x27, 0x35, 0xfa, 0xbc, 0x92, 0x19, 0x7d, 0x6e, 0xe5, 0x2d, 0x12, - 0x93, 0x8f, 0x01, 0x4b, 0x3c, 0xf8, 0x0d, 0x7b, 0x18, 0x3c, 0xb8, 0x2c, 0x89, 0x6f, 0x5d, 0x98, - 0x4a, 0x91, 0x76, 0xa2, 0x31, 0xde, 0x14, 0x27, 0x4f, 0x0a, 0x71, 0x1a, 0x59, 0xfd, 0x00, 0x5a, - 0xe9, 0x3c, 0x4c, 0xcd, 0xf4, 0xca, 0xa5, 0x33, 0xfd, 0x37, 0x0a, 0xac, 0x95, 0x78, 0x47, 0x26, - 0xb4, 0x2c, 0xf2, 0x38, 0x11, 0x23, 0x97, 0xce, 0xc6, 0x9c, 0x35, 0x69, 0x01, 0x6b, 0xd2, 0x1e, - 0xd8, 0xec, 0xd0, 0x3b, 0x66, 0x9e, 0x61, 0x0f, 0x83, 0x77, 0x38, 0x48, 0x61, 0xe1, 0x0c, 0x36, - 0xfa, 0x0c, 0x9a, 0x16, 0x79, 0x7c, 0x3c, 0xf2, 0x86, 0x45, 0xf7, 0x55, 0xcd, 0x8f, 0xe8, 0x1f, - 0x07, 0x12, 0x05, 0x47, 0x78, 0xea, 0x21, 0x6c, 0xa6, 0x0e, 0xc9, 0x4b, 0x05, 0x7d, 0x38, 0x32, - 0x8f, 0x69, 0xfc, 0xe0, 0x6f, 0xc0, 0x82, 0x4b, 0x3c, 0x66, 0x44, 0xe5, 0xa2, 0xd1, 0x5b, 0x9a, - 0x9e, 0x6f, 0x2c, 0x1c, 0x85, 0x8b, 0x38, 0x96, 0xab, 0xff, 0x55, 0xa0, 0x71, 0xac, 0x13, 0x93, - 0x5e, 0x03, 0x75, 0xb8, 0x97, 0xa2, 0x0e, 0x6a, 0x69, 0x10, 0x89, 0xfd, 0x94, 0xb2, 0x86, 0xfd, - 0x0c, 0x6b, 0x78, 0xf9, 0x12, 0x9c, 0x8b, 0x09, 0xc3, 0xfb, 0xb0, 0x10, 0xb9, 0x4b, 0x55, 0x49, - 0xe5, 0xb2, 0x2a, 0xa9, 0xfe, 0xbe, 0x06, 0x8b, 0x09, 0x17, 0x57, 0xb3, 0xe6, 0xd7, 0x9d, 0x18, - 0x34, 0x78, 0x19, 0xda, 0xa9, 0x72, 0x10, 0x2d, 0x1c, 0x2a, 0x82, 0xf9, 0x2d, 0xee, 0xde, 0xf9, - 0x59, 0xe3, 0x03, 0x68, 0x31, 0xe2, 0x0d, 0x29, 0x0b, 0x65, 0xe2, 0xc2, 0x16, 0xe2, 0x49, 0xff, - 0x24, 0x25, 0xc5, 0x19, 0xed, 0xf5, 0xbb, 0xb0, 0x94, 0x72, 0x76, 0xa5, 0x21, 0xec, 0x4b, 0x7e, - 0x39, 0x71, 0x70, 0x5e, 0x43, 0x74, 0x7d, 0x9c, 0x8a, 0xae, 0xad, 0xf2, 0xcb, 0x4c, 0xa4, 0x4c, - 0x59, 0x8c, 0xe1, 0x4c, 0x8c, 0xbd, 0x5e, 0x09, 0xed, 0xe2, 0x48, 0xfb, 0x57, 0x0d, 0x56, 0x13, - 0xda, 0x31, 0x37, 0xfd, 0x61, 0xaa, 0x40, 0x6f, 0x65, 0x0a, 0x74, 0xbb, 0xc8, 0xe6, 0xb9, 0x91, - 0xd3, 0x62, 0x76, 0x37, 0xfb, 0xbc, 0xd9, 0xdd, 0x73, 0x20, 0xc5, 0xea, 0x9f, 0x14, 0x58, 0x4e, - 0xdc, 0xdd, 0x35, 0x30, 0xc6, 0x07, 0x69, 0xc6, 0xf8, 0x72, 0x95, 0xa0, 0x29, 0xa1, 0x8c, 0x7f, - 0x6d, 0xa4, 0x36, 0xff, 0xad, 0x27, 0x31, 0xbf, 0x86, 0xd5, 0xb1, 0x63, 0x8e, 0x2c, 0xba, 0x67, - 0x12, 0xc3, 0x0a, 0x15, 0xf8, 0xc4, 0x38, 0x9b, 0xfd, 0x63, 0x28, 0x82, 0xa7, 0x9e, 0x6f, 0xf8, - 0x8c, 0xda, 0xec, 0xd3, 0xd8, 0xb2, 0xf7, 0x5d, 0xe9, 0x64, 0xf5, 0xd3, 0x02, 0x38, 0x5c, 0xe8, - 0x04, 0xfd, 0x00, 0x16, 0xf9, 0xc0, 0x6c, 0xe8, 0x94, 0x73, 0x6f, 0x19, 0x58, 0x2b, 0x12, 0x68, - 0xf1, 0x38, 0x16, 0xe1, 0xa4, 0x1e, 0x7a, 0x04, 0x2b, 0xae, 0x33, 0x38, 0x20, 0x36, 0x19, 0x52, - 0x3e, 0x66, 0x1c, 0x39, 0xa6, 0xa1, 0x4f, 0x04, 0xb3, 0x59, 0xe8, 0xbd, 0x1b, 0x4e, 0xa6, 0x47, - 0x79, 0x95, 0x67, 0x9c, 0x22, 0xe4, 0x97, 0x45, 0x52, 0x17, 0x41, 0x22, 0x0f, 0x5a, 0x23, 0xd9, - 0xee, 0x25, 0xd1, 0x0b, 0xfe, 0x6f, 0xd9, 0xa9, 0x12, 0x61, 0xa7, 0x29, 0xcb, 0xb8, 0xfa, 0xa7, - 0xd7, 0x71, 0xc6, 0x43, 0x29, 0x71, 0x6b, 0xfe, 0x3f, 0xc4, 0x4d, 0xfd, 0x77, 0x1d, 0x6e, 0xe6, - 0x4a, 0x25, 0xfa, 0xf1, 0x05, 0x0c, 0xe7, 0xd6, 0x73, 0x63, 0x37, 0xb9, 0x01, 0x7d, 0xf6, 0x0a, - 0x03, 0xfa, 0x2e, 0x2c, 0xeb, 0x23, 0xcf, 0xa3, 0x36, 0xcb, 0xb0, 0x9a, 0x88, 0x1a, 0xed, 0xa5, - 0xc5, 0x38, 0xab, 0x5f, 0xc4, 0xae, 0x1a, 0x57, 0x64, 0x57, 0xc9, 0x5d, 0xc8, 0x09, 0x39, 0x08, - 0xbb, 0xfc, 0x2e, 0xe4, 0xa0, 0x9c, 0xd5, 0xe7, 0xd3, 0x41, 0x80, 0x1a, 0x21, 0xcc, 0xa7, 0xa7, - 0x83, 0xd3, 0x94, 0x14, 0x67, 0xb4, 0x0b, 0x98, 0xca, 0x42, 0x55, 0xa6, 0x82, 0x48, 0x8a, 0x84, - 0x81, 0xc8, 0xf1, 0xed, 0x2a, 0xb1, 0x5c, 0x99, 0x85, 0xa9, 0x7f, 0x53, 0xe0, 0xc5, 0xd2, 0x24, - 0x40, 0xbb, 0xa9, 0x96, 0xbb, 0x9d, 0x69, 0xb9, 0xdf, 0x2b, 0x35, 0x4c, 0xf4, 0x5d, 0xaf, 0x98, - 0x1a, 0xbd, 0x5f, 0x8d, 0x1a, 0x15, 0xcc, 0xed, 0x97, 0x73, 0xa4, 0xde, 0xf6, 0x93, 0xa7, 0x9d, - 0x99, 0xaf, 0x9e, 0x76, 0x66, 0xbe, 0x7e, 0xda, 0x99, 0xf9, 0xcd, 0xb4, 0xa3, 0x3c, 0x99, 0x76, - 0x94, 0xaf, 0xa6, 0x1d, 0xe5, 0xeb, 0x69, 0x47, 0xf9, 0xc7, 0xb4, 0xa3, 0xfc, 0xee, 0x9b, 0xce, - 0xcc, 0x67, 0xf3, 0xd2, 0xe3, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe9, 0x89, 0x29, 0x5c, 0x61, - 0x1b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta1/generated.proto b/vendor/k8s.io/api/apps/v1beta1/generated.proto deleted file mode 100644 index 6f41f06..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/generated.proto +++ /dev/null @@ -1,483 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.apps.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the -// release notes for more information. -// ControllerRevision implements an immutable snapshot of state data. Clients -// are responsible for serializing and deserializing the objects that contain -// their internal state. -// Once a ControllerRevision has been successfully created, it can not be updated. -// The API Server will fail validation of all requests that attempt to mutate -// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both -// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, -// it may be subject to name and representation changes in future releases, and clients should not -// depend on its stability. It is primarily for internal use by controllers. -message ControllerRevision { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - - // Revision indicates the revision of the state represented by Data. - optional int64 revision = 3; -} - -// ControllerRevisionList is a resource containing a list of ControllerRevision objects. -message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ControllerRevisions - repeated ControllerRevision items = 2; -} - -// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for -// more information. -// Deployment enables declarative updates for Pods and ReplicaSets. -message Deployment { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the Deployment. - // +optional - optional DeploymentSpec spec = 2; - - // Most recently observed status of the Deployment. - // +optional - optional DeploymentStatus status = 3; -} - -// DeploymentCondition describes the state of a deployment at a certain point. -message DeploymentCondition { - // Type of deployment condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; - - // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; - - // The reason for the condition's last transition. - optional string reason = 4; - - // A human readable message indicating details about the transition. - optional string message = 5; -} - -// DeploymentList is a list of Deployments. -message DeploymentList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Deployments. - repeated Deployment items = 2; -} - -// DEPRECATED. -// DeploymentRollback stores the information required to rollback a deployment. -message DeploymentRollback { - // Required: This must match the Name of a deployment. - optional string name = 1; - - // The annotations to be updated to a deployment - // +optional - map updatedAnnotations = 2; - - // The config of this deployment rollback. - optional RollbackConfig rollbackTo = 3; -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - optional int32 replicas = 1; - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - optional DeploymentStrategy strategy = 4; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 5; - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 2. - // +optional - optional int32 revisionHistoryLimit = 6; - - // Indicates that the deployment is paused. - // +optional - optional bool paused = 7; - - // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - optional RollbackConfig rollbackTo = 8; - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - // +optional - optional int32 progressDeadlineSeconds = 9; -} - -// DeploymentStatus is the most recently observed status of the Deployment. -message DeploymentStatus { - // The generation observed by the deployment controller. - // +optional - optional int64 observedGeneration = 1; - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - optional int32 replicas = 2; - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - optional int32 updatedReplicas = 3; - - // Total number of ready pods targeted by this deployment. - // +optional - optional int32 readyReplicas = 7; - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - optional int32 availableReplicas = 4; - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - optional int32 unavailableReplicas = 5; - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - repeated DeploymentCondition conditions = 6; - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - optional int32 collisionCount = 8; -} - -// DeploymentStrategy describes how to replace existing pods with new ones. -message DeploymentStrategy { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - optional RollingUpdateDeployment rollingUpdate = 2; -} - -// DEPRECATED. -message RollbackConfig { - // The revision to rollback to. If set to 0, rollback to the last revision. - // +optional - optional int64 revision = 1; -} - -// Spec to control the desired behavior of rolling update. -message RollingUpdateDeployment { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 25%. - // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 25%. - // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. -message RollingUpdateStatefulSetStrategy { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - optional int32 partition = 1; -} - -// Scale represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional ScaleSpec spec = 2; - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - optional ScaleStatus status = 3; -} - -// ScaleSpec describes the attributes of a scale subresource -message ScaleSpec { - // desired number of instances for the scaled object. - // +optional - optional int32 replicas = 1; -} - -// ScaleStatus represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - map selector = 2; - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional string targetSelector = 3; -} - -// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for -// more information. -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -message StatefulSet { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the desired identities of pods in this set. - // +optional - optional StatefulSetSpec spec = 2; - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - optional StatefulSetStatus status = 3; -} - -// StatefulSetCondition describes the state of a statefulset at a certain point. -message StatefulSetCondition { - // Type of statefulset condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// StatefulSetList is a collection of StatefulSets. -message StatefulSetList { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated StatefulSet items = 2; -} - -// A StatefulSetSpec is the specification of a StatefulSet. -message StatefulSetSpec { - // replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - optional int32 replicas = 1; - - // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; - - // serviceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - optional string serviceName = 5; - - // podManagementPolicy controls how pods are created during initial scale up, - // when replacing pods on nodes, or when scaling down. The default policy is - // `OrderedReady`, where pods are created in increasing order (pod-0, then - // pod-1, etc) and the controller will wait until each pod is ready before - // continuing. When scaling down, the pods are removed in the opposite order. - // The alternative policy is `Parallel` which will create pods in parallel - // to match the desired scale without waiting, and on scale down will delete - // all pods at once. - // +optional - optional string podManagementPolicy = 6; - - // updateStrategy indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the StatefulSet when a revision is made to - // Template. - optional StatefulSetUpdateStrategy updateStrategy = 7; - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the StatefulSet's revision history. The revision history - // consists of all revisions not represented by a currently applied - // StatefulSetSpec version. The default value is 10. - optional int32 revisionHistoryLimit = 8; -} - -// StatefulSetStatus represents the current state of a StatefulSet. -message StatefulSetStatus { - // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the - // StatefulSet's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; - - // replicas is the number of Pods created by the StatefulSet controller. - optional int32 replicas = 2; - - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. - optional int32 readyReplicas = 3; - - // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by currentRevision. - optional int32 currentReplicas = 4; - - // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by updateRevision. - optional int32 updatedReplicas = 5; - - // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the - // sequence [0,currentReplicas). - optional string currentRevision = 6; - - // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence - // [replicas-updatedReplicas,replicas) - optional string updateRevision = 7; - - // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller - // uses this field as a collision avoidance mechanism when it needs to create the name for the - // newest ControllerRevision. - // +optional - optional int32 collisionCount = 9; - - // Represents the latest available observations of a statefulset's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated StatefulSetCondition conditions = 10; -} - -// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet -// controller will use to perform updates. It includes any additional parameters -// necessary to perform the update for the indicated strategy. -message StatefulSetUpdateStrategy { - // Type indicates the type of the StatefulSetUpdateStrategy. - optional string type = 1; - - // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. - optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; -} - diff --git a/vendor/k8s.io/api/apps/v1beta1/register.go b/vendor/k8s.io/api/apps/v1beta1/register.go deleted file mode 100644 index 5b16819..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "apps" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, - &Scale{}, - &StatefulSet{}, - &StatefulSetList{}, - &ControllerRevision{}, - &ControllerRevisionList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/apps/v1beta1/types.go b/vendor/k8s.io/api/apps/v1beta1/types.go deleted file mode 100644 index d462604..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/types.go +++ /dev/null @@ -1,568 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - ControllerRevisionHashLabelKey = "controller-revision-hash" - StatefulSetRevisionLabel = ControllerRevisionHashLabelKey - StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" -) - -// ScaleSpec describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Scale represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for -// more information. -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -type StatefulSet struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired identities of pods in this set. - // +optional - Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodManagementPolicyType defines the policy for creating pods under a stateful set. -type PodManagementPolicyType string - -const ( - // OrderedReadyPodManagement will create pods in strictly increasing order on - // scale up and strictly decreasing order on scale down, progressing only when - // the previous pod is ready or terminated. At most one pod will be changed - // at any time. - OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" - // ParallelPodManagement will create and delete pods as soon as the stateful set - // replica count is changed, and will not wait for pods to be ready or complete - // termination. - ParallelPodManagement = "Parallel" -) - -// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet -// controller will use to perform updates. It includes any additional parameters -// necessary to perform the update for the indicated strategy. -type StatefulSetUpdateStrategy struct { - // Type indicates the type of the StatefulSetUpdateStrategy. - Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` - // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. - RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -// StatefulSetUpdateStrategyType is a string enumeration type that enumerates -// all possible update strategies for the StatefulSet controller. -type StatefulSetUpdateStrategyType string - -const ( - // RollingUpdateStatefulSetStrategyType indicates that update will be - // applied to all Pods in the StatefulSet with respect to the StatefulSet - // ordering constraints. When a scale operation is performed with this - // strategy, new Pods will be created from the specification version indicated - // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" - // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version - // tracking and ordered rolling restarts are disabled. Pods are recreated - // from the StatefulSetSpec when they are manually deleted. When a scale - // operation is performed with this strategy,specification version indicated - // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" -) - -// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. -type RollingUpdateStatefulSetStrategy struct { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` -} - -// A StatefulSetSpec is the specification of a StatefulSet. -type StatefulSetSpec struct { - // replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // selector is a label query over pods that should match the replica count. - // If empty, defaulted to labels on the pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` - - // serviceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` - - // podManagementPolicy controls how pods are created during initial scale up, - // when replacing pods on nodes, or when scaling down. The default policy is - // `OrderedReady`, where pods are created in increasing order (pod-0, then - // pod-1, etc) and the controller will wait until each pod is ready before - // continuing. When scaling down, the pods are removed in the opposite order. - // The alternative policy is `Parallel` which will create pods in parallel - // to match the desired scale without waiting, and on scale down will delete - // all pods at once. - // +optional - PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` - - // updateStrategy indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the StatefulSet when a revision is made to - // Template. - UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the StatefulSet's revision history. The revision history - // consists of all revisions not represented by a currently applied - // StatefulSetSpec version. The default value is 10. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` -} - -// StatefulSetStatus represents the current state of a StatefulSet. -type StatefulSetStatus struct { - // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the - // StatefulSet's generation, which is updated on mutation by the API Server. - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // replicas is the number of Pods created by the StatefulSet controller. - Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` - - // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by currentRevision. - CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` - - // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by updateRevision. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` - - // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the - // sequence [0,currentReplicas). - CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` - - // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence - // [replicas-updatedReplicas,replicas) - UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` - - // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller - // uses this field as a collision avoidance mechanism when it needs to create the name for the - // newest ControllerRevision. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - - // Represents the latest available observations of a statefulset's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` -} - -type StatefulSetConditionType string - -// StatefulSetCondition describes the state of a statefulset at a certain point. -type StatefulSetCondition struct { - // Type of statefulset condition. - Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StatefulSetList is a collection of StatefulSets. -type StatefulSetList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for -// more information. -// Deployment enables declarative updates for Pods and ReplicaSets. -type Deployment struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // Template describes the pods that will be created. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 2. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - - // Indicates that the deployment is paused. - // +optional - Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` - - // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - // +optional - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED. -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - metav1.TypeMeta `json:",inline"` - // Required: This must match the Name of a deployment. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The annotations to be updated to a deployment - // +optional - UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` - // The config of this deployment rollback. - RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` -} - -// DEPRECATED. -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollback to the last revision. - // +optional - Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets - // to select new pods (and old pods being select by new ReplicaSet). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -// DeploymentStrategy describes how to replace existing pods with new ones. -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 25%. - // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 25%. - // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -// DeploymentStatus is the most recently observed status of the Deployment. -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time this condition was updated. - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeploymentList is a list of Deployments. -type DeploymentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Deployments. - Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the -// release notes for more information. -// ControllerRevision implements an immutable snapshot of state data. Clients -// are responsible for serializing and deserializing the objects that contain -// their internal state. -// Once a ControllerRevision has been successfully created, it can not be updated. -// The API Server will fail validation of all requests that attempt to mutate -// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both -// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, -// it may be subject to name and representation changes in future releases, and clients should not -// depend on its stability. It is primarily for internal use by controllers. -type ControllerRevision struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the serialized representation of the state. - Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - - // Revision indicates the revision of the state represented by Data. - Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ControllerRevisionList is a resource containing a list of ControllerRevision objects. -type ControllerRevisionList struct { - metav1.TypeMeta `json:",inline"` - - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ControllerRevisions - Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 68ebef3..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,273 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ControllerRevision = map[string]string{ - "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1beta2/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", -} - -func (ControllerRevision) SwaggerDoc() map[string]string { - return map_ControllerRevision -} - -var map_ControllerRevisionList = map[string]string{ - "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ControllerRevisions", -} - -func (ControllerRevisionList) SwaggerDoc() map[string]string { - return map_ControllerRevisionList -} - -var map_Deployment = map[string]string{ - "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", - "metadata": "Standard object metadata.", - "spec": "Specification of the desired behavior of the Deployment.", - "status": "Most recently observed status of the Deployment.", -} - -func (Deployment) SwaggerDoc() map[string]string { - return map_Deployment -} - -var map_DeploymentCondition = map[string]string{ - "": "DeploymentCondition describes the state of a deployment at a certain point.", - "type": "Type of deployment condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastUpdateTime": "The last time this condition was updated.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DeploymentCondition) SwaggerDoc() map[string]string { - return map_DeploymentCondition -} - -var map_DeploymentList = map[string]string{ - "": "DeploymentList is a list of Deployments.", - "metadata": "Standard list metadata.", - "items": "Items is the list of Deployments.", -} - -func (DeploymentList) SwaggerDoc() map[string]string { - return map_DeploymentList -} - -var map_DeploymentRollback = map[string]string{ - "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", - "name": "Required: This must match the Name of a deployment.", - "updatedAnnotations": "The annotations to be updated to a deployment", - "rollbackTo": "The config of this deployment rollback.", -} - -func (DeploymentRollback) SwaggerDoc() map[string]string { - return map_DeploymentRollback -} - -var map_DeploymentSpec = map[string]string{ - "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", - "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 2.", - "paused": "Indicates that the deployment is paused.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", -} - -func (DeploymentSpec) SwaggerDoc() map[string]string { - return map_DeploymentSpec -} - -var map_DeploymentStatus = map[string]string{ - "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", -} - -func (DeploymentStatus) SwaggerDoc() map[string]string { - return map_DeploymentStatus -} - -var map_DeploymentStrategy = map[string]string{ - "": "DeploymentStrategy describes how to replace existing pods with new ones.", - "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", -} - -func (DeploymentStrategy) SwaggerDoc() map[string]string { - return map_DeploymentStrategy -} - -var map_RollbackConfig = map[string]string{ - "": "DEPRECATED.", - "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", -} - -func (RollbackConfig) SwaggerDoc() map[string]string { - return map_RollbackConfig -} - -var map_RollingUpdateDeployment = map[string]string{ - "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", -} - -func (RollingUpdateDeployment) SwaggerDoc() map[string]string { - return map_RollingUpdateDeployment -} - -var map_RollingUpdateStatefulSetStrategy = map[string]string{ - "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned.", -} - -func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { - return map_RollingUpdateStatefulSetStrategy -} - -var map_Scale = map[string]string{ - "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", -} - -func (Scale) SwaggerDoc() map[string]string { - return map_Scale -} - -var map_ScaleSpec = map[string]string{ - "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", -} - -func (ScaleSpec) SwaggerDoc() map[string]string { - return map_ScaleSpec -} - -var map_ScaleStatus = map[string]string{ - "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", -} - -func (ScaleStatus) SwaggerDoc() map[string]string { - return map_ScaleStatus -} - -var map_StatefulSet = map[string]string{ - "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1beta2/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", - "spec": "Spec defines the desired identities of pods in this set.", - "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", -} - -func (StatefulSet) SwaggerDoc() map[string]string { - return map_StatefulSet -} - -var map_StatefulSetCondition = map[string]string{ - "": "StatefulSetCondition describes the state of a statefulset at a certain point.", - "type": "Type of statefulset condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (StatefulSetCondition) SwaggerDoc() map[string]string { - return map_StatefulSetCondition -} - -var map_StatefulSetList = map[string]string{ - "": "StatefulSetList is a collection of StatefulSets.", -} - -func (StatefulSetList) SwaggerDoc() map[string]string { - return map_StatefulSetList -} - -var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. If empty, defaulted to labels on the pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", -} - -func (StatefulSetSpec) SwaggerDoc() map[string]string { - return map_StatefulSetSpec -} - -var map_StatefulSetStatus = map[string]string{ - "": "StatefulSetStatus represents the current state of a StatefulSet.", - "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", - "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", - "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", - "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", - "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", - "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", - "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", -} - -func (StatefulSetStatus) SwaggerDoc() map[string]string { - return map_StatefulSetStatus -} - -var map_StatefulSetUpdateStrategy = map[string]string{ - "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", - "type": "Type indicates the type of the StatefulSetUpdateStrategy.", - "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", -} - -func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { - return map_StatefulSetUpdateStrategy -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 93892bf..0000000 --- a/vendor/k8s.io/api/apps/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,594 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Data.DeepCopyInto(&out.Data) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. -func (in *ControllerRevision) DeepCopy() *ControllerRevision { - if in == nil { - return nil - } - out := new(ControllerRevision) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ControllerRevision) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ControllerRevision, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. -func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { - if in == nil { - return nil - } - out := new(ControllerRevisionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. -func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { - if in == nil { - return nil - } - out := new(DeploymentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { - if in == nil { - return nil - } - out := new(DeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.RollbackTo = in.RollbackTo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. -func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { - if in == nil { - return nil - } - out := new(DeploymentRollback) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentRollback) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { - if in == nil { - return nil - } - out := new(DeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { - if in == nil { - return nil - } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. -func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { - if in == nil { - return nil - } - out := new(DeploymentStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. -func (in *RollbackConfig) DeepCopy() *RollbackConfig { - if in == nil { - return nil - } - out := new(RollbackConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. -func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { - if in == nil { - return nil - } - out := new(RollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { - *out = *in - if in.Partition != nil { - in, out := &in.Partition, &out.Partition - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. -func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { - if in == nil { - return nil - } - out := new(RollingUpdateStatefulSetStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. -func (in *StatefulSet) DeepCopy() *StatefulSet { - if in == nil { - return nil - } - out := new(StatefulSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StatefulSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. -func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { - if in == nil { - return nil - } - out := new(StatefulSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StatefulSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. -func (in *StatefulSetList) DeepCopy() *StatefulSetList { - if in == nil { - return nil - } - out := new(StatefulSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StatefulSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]corev1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. -func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { - if in == nil { - return nil - } - out := new(StatefulSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]StatefulSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. -func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { - if in == nil { - return nil - } - out := new(StatefulSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateStatefulSetStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. -func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { - if in == nil { - return nil - } - out := new(StatefulSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/apps/v1beta2/doc.go b/vendor/k8s.io/api/apps/v1beta2/doc.go deleted file mode 100644 index e93e164..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1beta2 // import "k8s.io/api/apps/v1beta2" diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go b/vendor/k8s.io/api/apps/v1beta2/generated.pb.go deleted file mode 100644 index 72d832c..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/generated.pb.go +++ /dev/null @@ -1,7584 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto -// DO NOT EDIT! - -/* - Package v1beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto - - It has these top-level messages: - ControllerRevision - ControllerRevisionList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentSpec - DeploymentStatus - DeploymentStrategy - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - RollingUpdateDaemonSet - RollingUpdateDeployment - RollingUpdateStatefulSetStrategy - Scale - ScaleSpec - ScaleStatus - StatefulSet - StatefulSetCondition - StatefulSetList - StatefulSetSpec - StatefulSetStatus - StatefulSetUpdateStrategy -*/ -package v1beta2 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ControllerRevision) Reset() { *m = ControllerRevision{} } -func (*ControllerRevision) ProtoMessage() {} -func (*ControllerRevision) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ControllerRevisionList) Reset() { *m = ControllerRevisionList{} } -func (*ControllerRevisionList) ProtoMessage() {} -func (*ControllerRevisionList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{20} -} - -func (m *RollingUpdateStatefulSetStrategy) Reset() { *m = RollingUpdateStatefulSetStrategy{} } -func (*RollingUpdateStatefulSetStrategy) ProtoMessage() {} -func (*RollingUpdateStatefulSetStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{21} -} - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *StatefulSet) Reset() { *m = StatefulSet{} } -func (*StatefulSet) ProtoMessage() {} -func (*StatefulSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -func (m *StatefulSetCondition) Reset() { *m = StatefulSetCondition{} } -func (*StatefulSetCondition) ProtoMessage() {} -func (*StatefulSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } - -func (m *StatefulSetList) Reset() { *m = StatefulSetList{} } -func (*StatefulSetList) ProtoMessage() {} -func (*StatefulSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *StatefulSetSpec) Reset() { *m = StatefulSetSpec{} } -func (*StatefulSetSpec) ProtoMessage() {} -func (*StatefulSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } - -func (m *StatefulSetStatus) Reset() { *m = StatefulSetStatus{} } -func (*StatefulSetStatus) ProtoMessage() {} -func (*StatefulSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } - -func (m *StatefulSetUpdateStrategy) Reset() { *m = StatefulSetUpdateStrategy{} } -func (*StatefulSetUpdateStrategy) ProtoMessage() {} -func (*StatefulSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} -} - -func init() { - proto.RegisterType((*ControllerRevision)(nil), "k8s.io.api.apps.v1beta2.ControllerRevision") - proto.RegisterType((*ControllerRevisionList)(nil), "k8s.io.api.apps.v1beta2.ControllerRevisionList") - proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.apps.v1beta2.DaemonSet") - proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.apps.v1beta2.DaemonSetCondition") - proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.apps.v1beta2.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.apps.v1beta2.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.apps.v1beta2.DaemonSetStatus") - proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.DaemonSetUpdateStrategy") - proto.RegisterType((*Deployment)(nil), "k8s.io.api.apps.v1beta2.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.apps.v1beta2.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.apps.v1beta2.DeploymentList") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.apps.v1beta2.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.apps.v1beta2.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.apps.v1beta2.DeploymentStrategy") - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.apps.v1beta2.ReplicaSet") - proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetCondition") - proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.apps.v1beta2.ReplicaSetStatus") - proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDaemonSet") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateDeployment") - proto.RegisterType((*RollingUpdateStatefulSetStrategy)(nil), "k8s.io.api.apps.v1beta2.RollingUpdateStatefulSetStrategy") - proto.RegisterType((*Scale)(nil), "k8s.io.api.apps.v1beta2.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.apps.v1beta2.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.apps.v1beta2.ScaleStatus") - proto.RegisterType((*StatefulSet)(nil), "k8s.io.api.apps.v1beta2.StatefulSet") - proto.RegisterType((*StatefulSetCondition)(nil), "k8s.io.api.apps.v1beta2.StatefulSetCondition") - proto.RegisterType((*StatefulSetList)(nil), "k8s.io.api.apps.v1beta2.StatefulSetList") - proto.RegisterType((*StatefulSetSpec)(nil), "k8s.io.api.apps.v1beta2.StatefulSetSpec") - proto.RegisterType((*StatefulSetStatus)(nil), "k8s.io.api.apps.v1beta2.StatefulSetStatus") - proto.RegisterType((*StatefulSetUpdateStrategy)(nil), "k8s.io.api.apps.v1beta2.StatefulSetUpdateStrategy") -} -func (m *ControllerRevision) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ControllerRevision) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Data.Size())) - n2, err := m.Data.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil -} - -func (m *ControllerRevisionList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ControllerRevisionList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n7, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n8, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n9, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n10, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n11, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - return i, nil -} - -func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n12, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n14, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n15, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - return i, nil -} - -func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n16, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n17, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - return i, nil -} - -func (m *DeploymentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - } - return i, nil -} - -func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil -} - -func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n22, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - return i, nil -} - -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n23, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n24, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n25, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - return i, nil -} - -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n26, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n27, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n29, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil -} - -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n30, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - } - return i, nil -} - -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n31, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - if m.MaxSurge != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n32, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - return i, nil -} - -func (m *RollingUpdateStatefulSetStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateStatefulSetStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Partition != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Partition)) - } - return i, nil -} - -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n33, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n34, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n35, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil -} - -func (m *StatefulSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - return i, nil -} - -func (m *StatefulSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n39, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *StatefulSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n40, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatefulSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n41, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n42, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - if len(m.VolumeClaimTemplates) > 0 { - for _, msg := range m.VolumeClaimTemplates { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodManagementPolicy))) - i += copy(dAtA[i:], m.PodManagementPolicy) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n43, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - return i, nil -} - -func (m *StatefulSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CurrentRevision))) - i += copy(dAtA[i:], m.CurrentRevision) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UpdateRevision))) - i += copy(dAtA[i:], m.UpdateRevision) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StatefulSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n44, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ControllerRevision) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Data.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *ControllerRevisionList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetSpec) Size() (n int) { - var l int - _ = l - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - return n -} - -func (m *DaemonSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetUpdateStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Deployment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DeploymentSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } - return n -} - -func (m *DeploymentStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - return n -} - -func (m *DeploymentStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicaSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n -} - -func (m *ReplicaSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RollingUpdateDaemonSet) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateStatefulSetStrategy) Size() (n int) { - var l int - _ = l - if m.Partition != nil { - n += 1 + sovGenerated(uint64(*m.Partition)) - } - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *StatefulSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StatefulSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeClaimTemplates) > 0 { - for _, e := range m.VolumeClaimTemplates { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodManagementPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - return n -} - -func (m *StatefulSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - l = len(m.CurrentRevision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UpdateRevision) - n += 1 + l + sovGenerated(uint64(l)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StatefulSetUpdateStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *ControllerRevision) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ControllerRevision{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + strings.Replace(strings.Replace(this.Data.String(), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *ControllerRevisionList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ControllerRevisionList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ControllerRevision", "ControllerRevision", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateStatefulSetStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateStatefulSetStrategy{`, - `Partition:` + valueToStringGenerated(this.Partition) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "StatefulSetSpec", "StatefulSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "StatefulSetStatus", "StatefulSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StatefulSet", "StatefulSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `VolumeClaimTemplates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeClaimTemplates), "PersistentVolumeClaim", "k8s_io_api_core_v1.PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `PodManagementPolicy:` + fmt.Sprintf("%v", this.PodManagementPolicy) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "StatefulSetUpdateStrategy", "StatefulSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `CurrentRevision:` + fmt.Sprintf("%v", this.CurrentRevision) + `,`, - `UpdateRevision:` + fmt.Sprintf("%v", this.UpdateRevision) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "StatefulSetCondition", "StatefulSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StatefulSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StatefulSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateStatefulSetStrategy", "RollingUpdateStatefulSetStrategy", 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ControllerRevision) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ControllerRevision: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ControllerRevision: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Data.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ControllerRevisionList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ControllerRevisionList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ControllerRevisionList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ControllerRevision{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) - } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) - } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateStatefulSetStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateStatefulSetStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Partition = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scale) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Selector == nil { - m.Selector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetSelector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = StatefulSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StatefulSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeClaimTemplates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeClaimTemplates = append(m.VolumeClaimTemplates, k8s_io_api_core_v1.PersistentVolumeClaim{}) - if err := m.VolumeClaimTemplates[len(m.VolumeClaimTemplates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodManagementPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodManagementPolicy = PodManagementPolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentRevision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentRevision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateRevision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UpdateRevision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, StatefulSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StatefulSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StatefulSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StatefulSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = StatefulSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateStatefulSetStrategy{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 2176 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5a, 0xcb, 0x6f, 0x1c, 0xb7, - 0x19, 0xd7, 0xec, 0x43, 0x5a, 0x51, 0x91, 0x64, 0x53, 0xaa, 0xb4, 0x91, 0xdb, 0x95, 0xb1, 0x09, - 0x1c, 0x25, 0xb6, 0x66, 0x6d, 0xe5, 0x81, 0xc4, 0x2e, 0xda, 0x6a, 0xa5, 0xd4, 0x76, 0xa0, 0x57, - 0x28, 0xcb, 0x40, 0x83, 0x16, 0x35, 0xb5, 0x4b, 0xaf, 0x26, 0x9a, 0x17, 0x66, 0x38, 0x5b, 0x2f, - 0x7a, 0xe9, 0xa9, 0x40, 0x81, 0x02, 0x6d, 0xaf, 0xfd, 0x27, 0x7a, 0x2b, 0x8a, 0xf6, 0x56, 0x04, - 0x85, 0x2f, 0x05, 0x82, 0x5e, 0x92, 0x93, 0x50, 0x6f, 0x4e, 0x45, 0xd1, 0x4b, 0x81, 0x5e, 0x02, - 0x14, 0x28, 0xc8, 0xe1, 0x3c, 0x38, 0x0f, 0xef, 0x48, 0xb1, 0x95, 0x22, 0xc8, 0x6d, 0x87, 0xfc, - 0x7d, 0x3f, 0x7e, 0x24, 0xbf, 0x8f, 0xdf, 0x6f, 0x38, 0x0b, 0xbe, 0x77, 0xfc, 0xb6, 0xab, 0x6a, - 0x56, 0xeb, 0xd8, 0x3b, 0x24, 0x8e, 0x49, 0x28, 0x71, 0x5b, 0x7d, 0x62, 0x76, 0x2d, 0xa7, 0x25, - 0x3a, 0xb0, 0xad, 0xb5, 0xb0, 0x6d, 0xbb, 0xad, 0xfe, 0x8d, 0x43, 0x42, 0xf1, 0x5a, 0xab, 0x47, - 0x4c, 0xe2, 0x60, 0x4a, 0xba, 0xaa, 0xed, 0x58, 0xd4, 0x82, 0x8b, 0x3e, 0x50, 0xc5, 0xb6, 0xa6, - 0x32, 0xa0, 0x2a, 0x80, 0x4b, 0xab, 0x3d, 0x8d, 0x1e, 0x79, 0x87, 0x6a, 0xc7, 0x32, 0x5a, 0x3d, - 0xab, 0x67, 0xb5, 0x38, 0xfe, 0xd0, 0x7b, 0xc8, 0x9f, 0xf8, 0x03, 0xff, 0xe5, 0xf3, 0x2c, 0x35, - 0x63, 0x03, 0x76, 0x2c, 0x87, 0xb4, 0xfa, 0x37, 0x92, 0x63, 0x2d, 0xbd, 0x11, 0x61, 0x0c, 0xdc, - 0x39, 0xd2, 0x4c, 0xe2, 0x0c, 0x5a, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2d, 0x83, 0x50, 0x9c, 0x65, - 0xd5, 0xca, 0xb3, 0x72, 0x3c, 0x93, 0x6a, 0x06, 0x49, 0x19, 0xbc, 0x35, 0xca, 0xc0, 0xed, 0x1c, - 0x11, 0x03, 0xa7, 0xec, 0x5e, 0xcf, 0xb3, 0xf3, 0xa8, 0xa6, 0xb7, 0x34, 0x93, 0xba, 0xd4, 0x49, - 0x1a, 0x35, 0xff, 0xa3, 0x00, 0xb8, 0x61, 0x99, 0xd4, 0xb1, 0x74, 0x9d, 0x38, 0x88, 0xf4, 0x35, - 0x57, 0xb3, 0x4c, 0xf8, 0x00, 0xd4, 0xd8, 0x7c, 0xba, 0x98, 0xe2, 0xba, 0x72, 0x59, 0x59, 0x99, - 0x5a, 0xbb, 0xae, 0x46, 0x2b, 0x1d, 0xd2, 0xab, 0xf6, 0x71, 0x8f, 0x35, 0xb8, 0x2a, 0x43, 0xab, - 0xfd, 0x1b, 0xea, 0xee, 0xe1, 0x87, 0xa4, 0x43, 0xb7, 0x09, 0xc5, 0x6d, 0xf8, 0xf8, 0x64, 0x79, - 0x6c, 0x78, 0xb2, 0x0c, 0xa2, 0x36, 0x14, 0xb2, 0xc2, 0x5d, 0x50, 0xe1, 0xec, 0x25, 0xce, 0xbe, - 0x9a, 0xcb, 0x2e, 0x26, 0xad, 0x22, 0xfc, 0x93, 0x77, 0x1f, 0x51, 0x62, 0x32, 0xf7, 0xda, 0x2f, - 0x08, 0xea, 0xca, 0x26, 0xa6, 0x18, 0x71, 0x22, 0x78, 0x0d, 0xd4, 0x1c, 0xe1, 0x7e, 0xbd, 0x7c, - 0x59, 0x59, 0x29, 0xb7, 0x2f, 0x08, 0x54, 0x2d, 0x98, 0x16, 0x0a, 0x11, 0xcd, 0xc7, 0x0a, 0x58, - 0x48, 0xcf, 0x7b, 0x4b, 0x73, 0x29, 0xfc, 0x61, 0x6a, 0xee, 0x6a, 0xb1, 0xb9, 0x33, 0x6b, 0x3e, - 0xf3, 0x70, 0xe0, 0xa0, 0x25, 0x36, 0xef, 0x3d, 0x50, 0xd5, 0x28, 0x31, 0xdc, 0x7a, 0xe9, 0x72, - 0x79, 0x65, 0x6a, 0xed, 0xaa, 0x9a, 0x13, 0xc0, 0x6a, 0xda, 0xbb, 0xf6, 0xb4, 0xe0, 0xad, 0xde, - 0x65, 0x0c, 0xc8, 0x27, 0x6a, 0xfe, 0xa2, 0x04, 0x26, 0x37, 0x31, 0x31, 0x2c, 0x73, 0x9f, 0xd0, - 0x73, 0xd8, 0xb9, 0x3b, 0xa0, 0xe2, 0xda, 0xa4, 0x23, 0x76, 0xee, 0x4a, 0xee, 0x04, 0x42, 0x9f, - 0xf6, 0x6d, 0xd2, 0x89, 0xb6, 0x8c, 0x3d, 0x21, 0xce, 0x00, 0xf7, 0xc0, 0xb8, 0x4b, 0x31, 0xf5, - 0x5c, 0xbe, 0x61, 0x53, 0x6b, 0x2b, 0x05, 0xb8, 0x38, 0xbe, 0x3d, 0x23, 0xd8, 0xc6, 0xfd, 0x67, - 0x24, 0x78, 0x9a, 0xff, 0x28, 0x01, 0x18, 0x62, 0x37, 0x2c, 0xb3, 0xab, 0x51, 0x16, 0xce, 0x37, - 0x41, 0x85, 0x0e, 0x6c, 0xc2, 0x17, 0x64, 0xb2, 0x7d, 0x25, 0x70, 0xe5, 0xde, 0xc0, 0x26, 0x9f, - 0x9f, 0x2c, 0x2f, 0xa4, 0x2d, 0x58, 0x0f, 0xe2, 0x36, 0x70, 0x2b, 0x74, 0xb2, 0xc4, 0xad, 0xdf, - 0x90, 0x87, 0xfe, 0xfc, 0x64, 0x39, 0xe3, 0xec, 0x50, 0x43, 0x26, 0xd9, 0x41, 0xd8, 0x07, 0x50, - 0xc7, 0x2e, 0xbd, 0xe7, 0x60, 0xd3, 0xf5, 0x47, 0xd2, 0x0c, 0x22, 0xa6, 0xff, 0x5a, 0xb1, 0x8d, - 0x62, 0x16, 0xed, 0x25, 0xe1, 0x05, 0xdc, 0x4a, 0xb1, 0xa1, 0x8c, 0x11, 0xe0, 0x15, 0x30, 0xee, - 0x10, 0xec, 0x5a, 0x66, 0xbd, 0xc2, 0x67, 0x11, 0x2e, 0x20, 0xe2, 0xad, 0x48, 0xf4, 0xc2, 0x57, - 0xc1, 0x84, 0x41, 0x5c, 0x17, 0xf7, 0x48, 0xbd, 0xca, 0x81, 0xb3, 0x02, 0x38, 0xb1, 0xed, 0x37, - 0xa3, 0xa0, 0xbf, 0xf9, 0x7b, 0x05, 0x4c, 0x87, 0x2b, 0x77, 0x0e, 0x99, 0x73, 0x5b, 0xce, 0x9c, - 0xe6, 0xe8, 0x60, 0xc9, 0x49, 0x98, 0x8f, 0xca, 0x31, 0xc7, 0x59, 0x38, 0xc2, 0x1f, 0x81, 0x9a, - 0x4b, 0x74, 0xd2, 0xa1, 0x96, 0x23, 0x1c, 0x7f, 0xbd, 0xa0, 0xe3, 0xf8, 0x90, 0xe8, 0xfb, 0xc2, - 0xb4, 0xfd, 0x02, 0xf3, 0x3c, 0x78, 0x42, 0x21, 0x25, 0x7c, 0x1f, 0xd4, 0x28, 0x31, 0x6c, 0x1d, - 0x53, 0x22, 0xb2, 0xe6, 0xa5, 0xb8, 0xf3, 0x2c, 0x66, 0x18, 0xd9, 0x9e, 0xd5, 0xbd, 0x27, 0x60, - 0x3c, 0x65, 0xc2, 0xc5, 0x08, 0x5a, 0x51, 0x48, 0x03, 0x6d, 0x30, 0xe3, 0xd9, 0x5d, 0x86, 0xa4, - 0xec, 0x38, 0xef, 0x0d, 0x44, 0x0c, 0x5d, 0x1f, 0xbd, 0x2a, 0x07, 0x92, 0x5d, 0x7b, 0x41, 0x8c, - 0x32, 0x23, 0xb7, 0xa3, 0x04, 0x3f, 0x5c, 0x07, 0xb3, 0x86, 0x66, 0x22, 0x82, 0xbb, 0x83, 0x7d, - 0xd2, 0xb1, 0xcc, 0xae, 0xcb, 0x43, 0xa9, 0xda, 0x5e, 0x14, 0x04, 0xb3, 0xdb, 0x72, 0x37, 0x4a, - 0xe2, 0xe1, 0x16, 0x98, 0x0f, 0x0e, 0xe0, 0x3b, 0x9a, 0x4b, 0x2d, 0x67, 0xb0, 0xa5, 0x19, 0x1a, - 0xad, 0x8f, 0x73, 0x9e, 0xfa, 0xf0, 0x64, 0x79, 0x1e, 0x65, 0xf4, 0xa3, 0x4c, 0xab, 0xe6, 0x6f, - 0xc6, 0xc1, 0x6c, 0xe2, 0x5c, 0x80, 0xf7, 0xc1, 0x42, 0xc7, 0x73, 0x1c, 0x62, 0xd2, 0x1d, 0xcf, - 0x38, 0x24, 0xce, 0x7e, 0xe7, 0x88, 0x74, 0x3d, 0x9d, 0x74, 0xf9, 0xb6, 0x56, 0xdb, 0x0d, 0xe1, - 0xeb, 0xc2, 0x46, 0x26, 0x0a, 0xe5, 0x58, 0xc3, 0xf7, 0x00, 0x34, 0x79, 0xd3, 0xb6, 0xe6, 0xba, - 0x21, 0x67, 0x89, 0x73, 0x86, 0xa9, 0xb8, 0x93, 0x42, 0xa0, 0x0c, 0x2b, 0xe6, 0x63, 0x97, 0xb8, - 0x9a, 0x43, 0xba, 0x49, 0x1f, 0xcb, 0xb2, 0x8f, 0x9b, 0x99, 0x28, 0x94, 0x63, 0x0d, 0xdf, 0x04, - 0x53, 0xfe, 0x68, 0x7c, 0xcd, 0xc5, 0xe6, 0xcc, 0x09, 0xb2, 0xa9, 0x9d, 0xa8, 0x0b, 0xc5, 0x71, - 0x6c, 0x6a, 0xd6, 0xa1, 0x4b, 0x9c, 0x3e, 0xe9, 0xde, 0xf6, 0xc5, 0x01, 0xab, 0xa0, 0x55, 0x5e, - 0x41, 0xc3, 0xa9, 0xed, 0xa6, 0x10, 0x28, 0xc3, 0x8a, 0x4d, 0xcd, 0x8f, 0x9a, 0xd4, 0xd4, 0xc6, - 0xe5, 0xa9, 0x1d, 0x64, 0xa2, 0x50, 0x8e, 0x35, 0x8b, 0x3d, 0xdf, 0xe5, 0xf5, 0x3e, 0xd6, 0x74, - 0x7c, 0xa8, 0x93, 0xfa, 0x84, 0x1c, 0x7b, 0x3b, 0x72, 0x37, 0x4a, 0xe2, 0xe1, 0x6d, 0x70, 0xd1, - 0x6f, 0x3a, 0x30, 0x71, 0x48, 0x52, 0xe3, 0x24, 0x2f, 0x0a, 0x92, 0x8b, 0x3b, 0x49, 0x00, 0x4a, - 0xdb, 0xc0, 0x9b, 0x60, 0xa6, 0x63, 0xe9, 0x3a, 0x8f, 0xc7, 0x0d, 0xcb, 0x33, 0x69, 0x7d, 0x92, - 0xb3, 0x40, 0x96, 0x43, 0x1b, 0x52, 0x0f, 0x4a, 0x20, 0xe1, 0x8f, 0x01, 0xe8, 0x04, 0x85, 0xc1, - 0xad, 0x83, 0x11, 0x0a, 0x20, 0x5d, 0x96, 0xa2, 0xca, 0x1c, 0x36, 0xb9, 0x28, 0x46, 0xd9, 0xfc, - 0x48, 0x01, 0x8b, 0x39, 0x89, 0x0e, 0xbf, 0x2b, 0x15, 0xc1, 0xab, 0x89, 0x22, 0x78, 0x29, 0xc7, - 0x2c, 0x56, 0x09, 0x8f, 0xc0, 0x34, 0x13, 0x24, 0x9a, 0xd9, 0xf3, 0x21, 0xe2, 0x2c, 0x6b, 0xe5, - 0x4e, 0x00, 0xc5, 0xd1, 0xd1, 0xa9, 0x7c, 0x71, 0x78, 0xb2, 0x3c, 0x2d, 0xf5, 0x21, 0x99, 0xb8, - 0xf9, 0xcb, 0x12, 0x00, 0x9b, 0xc4, 0xd6, 0xad, 0x81, 0x41, 0xcc, 0xf3, 0xd0, 0x34, 0x77, 0x25, - 0x4d, 0xf3, 0x4a, 0xfe, 0x96, 0x84, 0x4e, 0xe5, 0x8a, 0x9a, 0xf7, 0x13, 0xa2, 0xe6, 0xd5, 0x22, - 0x64, 0x4f, 0x57, 0x35, 0x9f, 0x94, 0xc1, 0x5c, 0x04, 0x8e, 0x64, 0xcd, 0x2d, 0x69, 0x47, 0x5f, - 0x49, 0xec, 0xe8, 0x62, 0x86, 0xc9, 0x73, 0xd3, 0x35, 0xcf, 0x5e, 0x5f, 0xc0, 0x0f, 0xc1, 0x0c, - 0x13, 0x32, 0x7e, 0x48, 0x70, 0x99, 0x34, 0x7e, 0x6a, 0x99, 0x14, 0x16, 0xb7, 0x2d, 0x89, 0x09, - 0x25, 0x98, 0x73, 0x64, 0xd9, 0xc4, 0xf3, 0x96, 0x65, 0xcd, 0x3f, 0x28, 0x60, 0x26, 0xda, 0xa6, - 0x73, 0x10, 0x51, 0x77, 0x64, 0x11, 0xf5, 0x52, 0x81, 0xe0, 0xcc, 0x51, 0x51, 0x9f, 0x54, 0xe2, - 0xae, 0x73, 0x19, 0xb5, 0xc2, 0x5e, 0xc1, 0x6c, 0x5d, 0xeb, 0x60, 0x57, 0xd4, 0xdb, 0x17, 0xfc, - 0xd7, 0x2f, 0xbf, 0x0d, 0x85, 0xbd, 0x92, 0xe0, 0x2a, 0x3d, 0x5f, 0xc1, 0x55, 0x7e, 0x36, 0x82, - 0xeb, 0x07, 0xa0, 0xe6, 0x06, 0x52, 0xab, 0xc2, 0x29, 0xaf, 0x16, 0x4a, 0x6c, 0xa1, 0xb2, 0x42, - 0xea, 0x50, 0x5f, 0x85, 0x74, 0x59, 0xca, 0xaa, 0xfa, 0x65, 0x2a, 0x2b, 0x96, 0xcc, 0x36, 0xf6, - 0x5c, 0xd2, 0xe5, 0x19, 0x50, 0x8b, 0x92, 0x79, 0x8f, 0xb7, 0x22, 0xd1, 0x0b, 0x0f, 0xc0, 0xa2, - 0xed, 0x58, 0x3d, 0x87, 0xb8, 0xee, 0x26, 0xc1, 0x5d, 0x5d, 0x33, 0x49, 0x30, 0x01, 0xbf, 0x26, - 0x5e, 0x1a, 0x9e, 0x2c, 0x2f, 0xee, 0x65, 0x43, 0x50, 0x9e, 0x6d, 0xf3, 0xcf, 0x15, 0x70, 0x21, - 0x79, 0x36, 0xe6, 0xc8, 0x14, 0xe5, 0x4c, 0x32, 0xe5, 0x5a, 0x2c, 0x4e, 0x7d, 0x0d, 0x17, 0xbb, - 0x2a, 0x48, 0xc5, 0xea, 0x3a, 0x98, 0x15, 0xb2, 0x24, 0xe8, 0x14, 0x42, 0x2d, 0xdc, 0x9e, 0x03, - 0xb9, 0x1b, 0x25, 0xf1, 0x4c, 0x7c, 0x44, 0x9a, 0x22, 0x20, 0xa9, 0xc8, 0xe2, 0x63, 0x3d, 0x09, - 0x40, 0x69, 0x1b, 0xb8, 0x0d, 0xe6, 0x3c, 0x33, 0x4d, 0xe5, 0x87, 0xcb, 0x25, 0x41, 0x35, 0x77, - 0x90, 0x86, 0xa0, 0x2c, 0x3b, 0xf8, 0x40, 0xd2, 0x23, 0xe3, 0xfc, 0x48, 0xb8, 0x56, 0x20, 0xac, - 0x0b, 0x0b, 0x12, 0x78, 0x0b, 0x4c, 0x3b, 0x5c, 0x73, 0x06, 0xae, 0xfa, 0xba, 0xed, 0x1b, 0xc2, - 0x6c, 0x1a, 0xc5, 0x3b, 0x91, 0x8c, 0xcd, 0x90, 0x5a, 0xb5, 0xa2, 0x52, 0xab, 0xf9, 0x27, 0x05, - 0xc0, 0x74, 0x1e, 0x8e, 0xbc, 0x09, 0x48, 0x59, 0xc4, 0x2a, 0xa6, 0x96, 0xad, 0x7f, 0xae, 0x17, - 0xd4, 0x3f, 0xd1, 0x81, 0x5a, 0x4c, 0x00, 0x89, 0x65, 0x38, 0x9f, 0x4b, 0x9d, 0xa2, 0x02, 0x28, - 0x72, 0xea, 0x19, 0x08, 0xa0, 0x18, 0xd9, 0xd3, 0x05, 0xd0, 0x3f, 0x4b, 0x60, 0x2e, 0x02, 0x17, - 0x16, 0x40, 0x19, 0x26, 0x5f, 0x5f, 0xec, 0x8c, 0xbe, 0xd8, 0x61, 0xa2, 0x24, 0x5a, 0xba, 0xff, - 0x27, 0x51, 0x12, 0x79, 0x95, 0x23, 0x4a, 0x7e, 0x57, 0x8a, 0xbb, 0xfe, 0x95, 0x17, 0x25, 0x5f, - 0xfc, 0x4e, 0xa6, 0xf9, 0x97, 0x32, 0xb8, 0x90, 0xcc, 0x43, 0xa9, 0x40, 0x2a, 0x23, 0x0b, 0xe4, - 0x1e, 0x98, 0x7f, 0xe8, 0xe9, 0xfa, 0x80, 0x2f, 0x43, 0xac, 0x4a, 0xfa, 0xa5, 0xf5, 0x9b, 0xc2, - 0x72, 0xfe, 0xfb, 0x19, 0x18, 0x94, 0x69, 0x99, 0x53, 0xec, 0xcb, 0x67, 0x2a, 0xf6, 0xa9, 0x0a, - 0x54, 0x39, 0x45, 0x05, 0xca, 0x2c, 0xdc, 0xd5, 0x33, 0x14, 0xee, 0xd3, 0x55, 0xda, 0x8c, 0x83, - 0x6b, 0xe4, 0xab, 0xff, 0xcf, 0x15, 0xb0, 0x90, 0xfd, 0xc2, 0x0d, 0x75, 0x30, 0x63, 0xe0, 0x47, - 0xf1, 0x8b, 0x8f, 0x51, 0x45, 0xc4, 0xa3, 0x9a, 0xae, 0xfa, 0x9f, 0x8c, 0xd4, 0xbb, 0x26, 0xdd, - 0x75, 0xf6, 0xa9, 0xa3, 0x99, 0x3d, 0xbf, 0xf2, 0x6e, 0x4b, 0x5c, 0x28, 0xc1, 0xdd, 0xfc, 0x4c, - 0x01, 0x8b, 0x39, 0x95, 0xef, 0x7c, 0x3d, 0x81, 0x1f, 0x80, 0x9a, 0x81, 0x1f, 0xed, 0x7b, 0x4e, - 0x2f, 0xab, 0x56, 0x17, 0x1b, 0x87, 0x67, 0xf3, 0xb6, 0x60, 0x41, 0x21, 0x5f, 0x73, 0x17, 0x5c, - 0x96, 0x26, 0xc9, 0x32, 0x87, 0x3c, 0xf4, 0x74, 0x9e, 0x44, 0x42, 0x6c, 0x5c, 0x05, 0x93, 0x36, - 0x76, 0xa8, 0x16, 0x4a, 0xd5, 0x6a, 0x7b, 0x7a, 0x78, 0xb2, 0x3c, 0xb9, 0x17, 0x34, 0xa2, 0xa8, - 0xbf, 0xf9, 0x5f, 0x05, 0x54, 0xf7, 0x3b, 0x58, 0x27, 0xe7, 0x50, 0xed, 0x37, 0xa5, 0x6a, 0x9f, - 0x7f, 0x93, 0xce, 0xfd, 0xc9, 0x2d, 0xf4, 0x5b, 0x89, 0x42, 0xff, 0xf2, 0x08, 0x9e, 0xa7, 0xd7, - 0xf8, 0x77, 0xc0, 0x64, 0x38, 0xdc, 0xe9, 0x0e, 0xa0, 0xe6, 0x6f, 0x4b, 0x60, 0x2a, 0x36, 0xc4, - 0x29, 0x8f, 0xaf, 0x07, 0xd2, 0xb1, 0xcf, 0x12, 0x73, 0xad, 0xc8, 0x44, 0xd4, 0xe0, 0x88, 0x7f, - 0xd7, 0xa4, 0x4e, 0xfc, 0x05, 0x2f, 0x7d, 0xf2, 0x7f, 0x07, 0xcc, 0x50, 0xec, 0xf4, 0x08, 0x0d, - 0xfa, 0xf8, 0x82, 0x4d, 0x46, 0xb7, 0x13, 0xf7, 0xa4, 0x5e, 0x94, 0x40, 0x2f, 0xdd, 0x02, 0xd3, - 0xd2, 0x60, 0xf0, 0x02, 0x28, 0x1f, 0x93, 0x81, 0x2f, 0x7b, 0x10, 0xfb, 0x09, 0xe7, 0x41, 0xb5, - 0x8f, 0x75, 0xcf, 0x8f, 0xf3, 0x49, 0xe4, 0x3f, 0xdc, 0x2c, 0xbd, 0xad, 0x34, 0x7f, 0xc5, 0x16, - 0x27, 0x0a, 0xce, 0x73, 0x88, 0xae, 0xf7, 0xa4, 0xe8, 0xca, 0xff, 0xa8, 0x17, 0x4f, 0x99, 0xbc, - 0x18, 0x43, 0x89, 0x18, 0x7b, 0xad, 0x10, 0xdb, 0xd3, 0x23, 0xed, 0x5f, 0x25, 0x30, 0x1f, 0x43, - 0x47, 0x72, 0xf2, 0xdb, 0x92, 0x9c, 0x5c, 0x49, 0xc8, 0xc9, 0x7a, 0x96, 0xcd, 0xd7, 0x7a, 0x72, - 0xb4, 0x9e, 0xfc, 0xa3, 0x02, 0x66, 0x63, 0x6b, 0x77, 0x0e, 0x82, 0xf2, 0xae, 0x2c, 0x28, 0x5f, - 0x2e, 0x12, 0x34, 0x39, 0x8a, 0xf2, 0xaf, 0x55, 0xc9, 0xf9, 0xaf, 0xbc, 0xa4, 0xfc, 0x29, 0x98, - 0xef, 0x5b, 0xba, 0x67, 0x90, 0x0d, 0x1d, 0x6b, 0x46, 0x00, 0x60, 0xaa, 0xa9, 0x9c, 0x7c, 0x97, - 0x0b, 0xe9, 0x89, 0xe3, 0x6a, 0x2e, 0x25, 0x26, 0xbd, 0x1f, 0x59, 0x46, 0xba, 0xef, 0x7e, 0x06, - 0x1d, 0xca, 0x1c, 0x04, 0xbe, 0x09, 0xa6, 0x98, 0x7e, 0xd3, 0x3a, 0x64, 0x07, 0x1b, 0x41, 0x60, - 0x85, 0x9f, 0xb0, 0xf6, 0xa3, 0x2e, 0x14, 0xc7, 0xc1, 0x23, 0x30, 0x67, 0x5b, 0xdd, 0x6d, 0x6c, - 0xe2, 0x1e, 0x61, 0x32, 0x63, 0xcf, 0xd2, 0xb5, 0xce, 0x80, 0x5f, 0x7e, 0x4d, 0xb6, 0xdf, 0x0a, - 0x6e, 0x45, 0xf6, 0xd2, 0x10, 0xf6, 0x92, 0x98, 0xd1, 0xcc, 0x93, 0x3a, 0x8b, 0x12, 0x3a, 0xa9, - 0xcf, 0xae, 0xfe, 0x1d, 0xf1, 0x5a, 0x91, 0x08, 0x3b, 0xe3, 0x87, 0xd7, 0xbc, 0xbb, 0xbd, 0xda, - 0x99, 0xbe, 0x9a, 0xfe, 0xbb, 0x02, 0x2e, 0xa6, 0x8e, 0xca, 0x2f, 0xf1, 0x76, 0x2d, 0x25, 0xcf, - 0xcb, 0xa7, 0x90, 0xe7, 0xeb, 0x60, 0x56, 0x7c, 0xb0, 0x4d, 0xa8, 0xfb, 0xf0, 0xfd, 0x67, 0x43, - 0xee, 0x46, 0x49, 0x7c, 0xd6, 0xed, 0x5e, 0xf5, 0x94, 0xb7, 0x7b, 0x71, 0x2f, 0xc4, 0x1f, 0x90, - 0xfc, 0xd0, 0x4b, 0x7b, 0x21, 0xfe, 0x87, 0x94, 0xc4, 0x33, 0x85, 0xe0, 0xb3, 0x86, 0x0c, 0x13, - 0xb2, 0x42, 0x38, 0x90, 0x7a, 0x51, 0x02, 0xfd, 0x85, 0x3e, 0x4a, 0xe2, 0x8c, 0x8f, 0x92, 0xab, - 0x45, 0xe2, 0xb9, 0xf8, 0xbb, 0xc9, 0xdf, 0x14, 0xf0, 0x62, 0x6e, 0x22, 0xc0, 0x75, 0xa9, 0xec, - 0xae, 0x26, 0xca, 0xee, 0xb7, 0x72, 0x0d, 0x63, 0xb5, 0xd7, 0xc9, 0xbe, 0x9a, 0x7b, 0xa7, 0xd8, - 0xd5, 0x5c, 0x86, 0x76, 0x1f, 0x7d, 0x47, 0xd7, 0x5e, 0x7d, 0xfc, 0xa4, 0x31, 0xf6, 0xf1, 0x93, - 0xc6, 0xd8, 0xa7, 0x4f, 0x1a, 0x63, 0x3f, 0x1b, 0x36, 0x94, 0xc7, 0xc3, 0x86, 0xf2, 0xf1, 0xb0, - 0xa1, 0x7c, 0x3a, 0x6c, 0x28, 0x7f, 0x1f, 0x36, 0x94, 0x5f, 0x7f, 0xd6, 0x18, 0xfb, 0x60, 0x42, - 0x8c, 0xf8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x80, 0x85, 0x43, 0x0a, 0xec, 0x28, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/apps/v1beta2/generated.proto b/vendor/k8s.io/api/apps/v1beta2/generated.proto deleted file mode 100644 index cc3656d..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/generated.proto +++ /dev/null @@ -1,751 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.apps.v1beta2; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta2"; - -// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the -// release notes for more information. -// ControllerRevision implements an immutable snapshot of state data. Clients -// are responsible for serializing and deserializing the objects that contain -// their internal state. -// Once a ControllerRevision has been successfully created, it can not be updated. -// The API Server will fail validation of all requests that attempt to mutate -// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both -// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, -// it may be subject to name and representation changes in future releases, and clients should not -// depend on its stability. It is primarily for internal use by controllers. -message ControllerRevision { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data is the serialized representation of the state. - optional k8s.io.apimachinery.pkg.runtime.RawExtension data = 2; - - // Revision indicates the revision of the state represented by Data. - optional int64 revision = 3; -} - -// ControllerRevisionList is a resource containing a list of ControllerRevision objects. -message ControllerRevisionList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ControllerRevisions - repeated ControllerRevision items = 2; -} - -// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for -// more information. -// DaemonSet represents the configuration of a daemon set. -message DaemonSet { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional DaemonSetSpec spec = 2; - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional DaemonSetStatus status = 3; -} - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -message DaemonSetCondition { - // Type of DaemonSet condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// DaemonSetList is a collection of daemon sets. -message DaemonSetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // A list of daemon sets. - repeated DaemonSet items = 2; -} - -// DaemonSetSpec is the specification of a daemon set. -message DaemonSetSpec { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - optional DaemonSetUpdateStrategy updateStrategy = 3; - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - optional int32 minReadySeconds = 4; - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - optional int32 revisionHistoryLimit = 6; -} - -// DaemonSetStatus represents the current status of a daemon set. -message DaemonSetStatus { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 currentNumberScheduled = 1; - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 numberMisscheduled = 2; - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 desiredNumberScheduled = 3; - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - optional int32 numberReady = 4; - - // The most recent generation observed by the daemon set controller. - // +optional - optional int64 observedGeneration = 5; - - // The total number of nodes that are running updated daemon pod - // +optional - optional int32 updatedNumberScheduled = 6; - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - optional int32 numberAvailable = 7; - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - optional int32 numberUnavailable = 8; - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - optional int32 collisionCount = 9; - - // Represents the latest available observations of a DaemonSet's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated DaemonSetCondition conditions = 10; -} - -// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. -message DaemonSetUpdateStrategy { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if type = "RollingUpdate". - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - optional RollingUpdateDaemonSet rollingUpdate = 2; -} - -// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for -// more information. -// Deployment enables declarative updates for Pods and ReplicaSets. -message Deployment { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the Deployment. - // +optional - optional DeploymentSpec spec = 2; - - // Most recently observed status of the Deployment. - // +optional - optional DeploymentStatus status = 3; -} - -// DeploymentCondition describes the state of a deployment at a certain point. -message DeploymentCondition { - // Type of deployment condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; - - // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; - - // The reason for the condition's last transition. - optional string reason = 4; - - // A human readable message indicating details about the transition. - optional string message = 5; -} - -// DeploymentList is a list of Deployments. -message DeploymentList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Deployments. - repeated Deployment items = 2; -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - optional int32 replicas = 1; - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // It must match the pod template's labels. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - optional DeploymentStrategy strategy = 4; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 5; - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - optional int32 revisionHistoryLimit = 6; - - // Indicates that the deployment is paused. - // +optional - optional bool paused = 7; - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - optional int32 progressDeadlineSeconds = 9; -} - -// DeploymentStatus is the most recently observed status of the Deployment. -message DeploymentStatus { - // The generation observed by the deployment controller. - // +optional - optional int64 observedGeneration = 1; - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - optional int32 replicas = 2; - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - optional int32 updatedReplicas = 3; - - // Total number of ready pods targeted by this deployment. - // +optional - optional int32 readyReplicas = 7; - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - optional int32 availableReplicas = 4; - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - optional int32 unavailableReplicas = 5; - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - repeated DeploymentCondition conditions = 6; - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - optional int32 collisionCount = 8; -} - -// DeploymentStrategy describes how to replace existing pods with new ones. -message DeploymentStrategy { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - optional RollingUpdateDeployment rollingUpdate = 2; -} - -// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for -// more information. -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -message ReplicaSet { - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicaSetSpec spec = 2; - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicaSetStatus status = 3; -} - -// ReplicaSetCondition describes the state of a replica set at a certain point. -message ReplicaSetCondition { - // Type of replica set condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// ReplicaSetList is a collection of ReplicaSets. -message ReplicaSetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - repeated ReplicaSet items = 2; -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -message ReplicaSetSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - optional int32 replicas = 1; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 4; - - // Selector is a label query over pods that should match the replica count. - // Label keys and values that must match in order to be controlled by this replica set. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replica set. - // +optional - optional int32 readyReplicas = 4; - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - optional int32 availableReplicas = 5; - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - // +optional - optional int64 observedGeneration = 3; - - // Represents the latest available observations of a replica set's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated ReplicaSetCondition conditions = 6; -} - -// Spec to control the desired behavior of daemon set rolling update. -message RollingUpdateDaemonSet { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; -} - -// Spec to control the desired behavior of rolling update. -message RollingUpdateDeployment { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 25%. - // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 25%. - // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. -message RollingUpdateStatefulSetStrategy { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. - // +optional - optional int32 partition = 1; -} - -// Scale represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional ScaleSpec spec = 2; - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - optional ScaleStatus status = 3; -} - -// ScaleSpec describes the attributes of a scale subresource -message ScaleSpec { - // desired number of instances for the scaled object. - // +optional - optional int32 replicas = 1; -} - -// ScaleStatus represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - map selector = 2; - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional string targetSelector = 3; -} - -// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for -// more information. -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -message StatefulSet { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the desired identities of pods in this set. - // +optional - optional StatefulSetSpec spec = 2; - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - optional StatefulSetStatus status = 3; -} - -// StatefulSetCondition describes the state of a statefulset at a certain point. -message StatefulSetCondition { - // Type of statefulset condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// StatefulSetList is a collection of StatefulSets. -message StatefulSetList { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated StatefulSet items = 2; -} - -// A StatefulSetSpec is the specification of a StatefulSet. -message StatefulSetSpec { - // replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - optional int32 replicas = 1; - - // selector is a label query over pods that should match the replica count. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - repeated k8s.io.api.core.v1.PersistentVolumeClaim volumeClaimTemplates = 4; - - // serviceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - optional string serviceName = 5; - - // podManagementPolicy controls how pods are created during initial scale up, - // when replacing pods on nodes, or when scaling down. The default policy is - // `OrderedReady`, where pods are created in increasing order (pod-0, then - // pod-1, etc) and the controller will wait until each pod is ready before - // continuing. When scaling down, the pods are removed in the opposite order. - // The alternative policy is `Parallel` which will create pods in parallel - // to match the desired scale without waiting, and on scale down will delete - // all pods at once. - // +optional - optional string podManagementPolicy = 6; - - // updateStrategy indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the StatefulSet when a revision is made to - // Template. - optional StatefulSetUpdateStrategy updateStrategy = 7; - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the StatefulSet's revision history. The revision history - // consists of all revisions not represented by a currently applied - // StatefulSetSpec version. The default value is 10. - optional int32 revisionHistoryLimit = 8; -} - -// StatefulSetStatus represents the current state of a StatefulSet. -message StatefulSetStatus { - // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the - // StatefulSet's generation, which is updated on mutation by the API Server. - // +optional - optional int64 observedGeneration = 1; - - // replicas is the number of Pods created by the StatefulSet controller. - optional int32 replicas = 2; - - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. - optional int32 readyReplicas = 3; - - // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by currentRevision. - optional int32 currentReplicas = 4; - - // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by updateRevision. - optional int32 updatedReplicas = 5; - - // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the - // sequence [0,currentReplicas). - optional string currentRevision = 6; - - // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence - // [replicas-updatedReplicas,replicas) - optional string updateRevision = 7; - - // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller - // uses this field as a collision avoidance mechanism when it needs to create the name for the - // newest ControllerRevision. - // +optional - optional int32 collisionCount = 9; - - // Represents the latest available observations of a statefulset's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated StatefulSetCondition conditions = 10; -} - -// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet -// controller will use to perform updates. It includes any additional parameters -// necessary to perform the update for the indicated strategy. -message StatefulSetUpdateStrategy { - // Type indicates the type of the StatefulSetUpdateStrategy. - // Default is RollingUpdate. - // +optional - optional string type = 1; - - // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. - // +optional - optional RollingUpdateStatefulSetStrategy rollingUpdate = 2; -} - diff --git a/vendor/k8s.io/api/apps/v1beta2/register.go b/vendor/k8s.io/api/apps/v1beta2/register.go deleted file mode 100644 index 2784ee3..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/register.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "apps" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta2"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &Scale{}, - &StatefulSet{}, - &StatefulSetList{}, - &DaemonSet{}, - &DaemonSetList{}, - &ReplicaSet{}, - &ReplicaSetList{}, - &ControllerRevision{}, - &ControllerRevisionList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/apps/v1beta2/types.go b/vendor/k8s.io/api/apps/v1beta2/types.go deleted file mode 100644 index e552522..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/types.go +++ /dev/null @@ -1,877 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - ControllerRevisionHashLabelKey = "controller-revision-hash" - StatefulSetRevisionLabel = ControllerRevisionHashLabelKey - DeprecatedRollbackTo = "deprecated.deployment.rollback.to" - DeprecatedTemplateGeneration = "deprecated.daemonset.template.generation" - StatefulSetPodNameLabel = "statefulset.kubernetes.io/pod-name" -) - -// ScaleSpec describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Scale represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for -// more information. -// StatefulSet represents a set of pods with consistent identities. -// Identities are defined as: -// - Network: A single stable DNS and hostname. -// - Storage: As many VolumeClaims as requested. -// The StatefulSet guarantees that a given network identity will always -// map to the same storage identity. -type StatefulSet struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired identities of pods in this set. - // +optional - Spec StatefulSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current status of Pods in this StatefulSet. This data - // may be out of date by some window of time. - // +optional - Status StatefulSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PodManagementPolicyType defines the policy for creating pods under a stateful set. -type PodManagementPolicyType string - -const ( - // OrderedReadyPodManagement will create pods in strictly increasing order on - // scale up and strictly decreasing order on scale down, progressing only when - // the previous pod is ready or terminated. At most one pod will be changed - // at any time. - OrderedReadyPodManagement PodManagementPolicyType = "OrderedReady" - // ParallelPodManagement will create and delete pods as soon as the stateful set - // replica count is changed, and will not wait for pods to be ready or complete - // termination. - ParallelPodManagement = "Parallel" -) - -// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet -// controller will use to perform updates. It includes any additional parameters -// necessary to perform the update for the indicated strategy. -type StatefulSetUpdateStrategy struct { - // Type indicates the type of the StatefulSetUpdateStrategy. - // Default is RollingUpdate. - // +optional - Type StatefulSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetStrategyType"` - // RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType. - // +optional - RollingUpdate *RollingUpdateStatefulSetStrategy `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -// StatefulSetUpdateStrategyType is a string enumeration type that enumerates -// all possible update strategies for the StatefulSet controller. -type StatefulSetUpdateStrategyType string - -const ( - // RollingUpdateStatefulSetStrategyType indicates that update will be - // applied to all Pods in the StatefulSet with respect to the StatefulSet - // ordering constraints. When a scale operation is performed with this - // strategy, new Pods will be created from the specification version indicated - // by the StatefulSet's updateRevision. - RollingUpdateStatefulSetStrategyType = "RollingUpdate" - // OnDeleteStatefulSetStrategyType triggers the legacy behavior. Version - // tracking and ordered rolling restarts are disabled. Pods are recreated - // from the StatefulSetSpec when they are manually deleted. When a scale - // operation is performed with this strategy,specification version indicated - // by the StatefulSet's currentRevision. - OnDeleteStatefulSetStrategyType = "OnDelete" -) - -// RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType. -type RollingUpdateStatefulSetStrategy struct { - // Partition indicates the ordinal at which the StatefulSet should be - // partitioned. - // Default value is 0. - // +optional - Partition *int32 `json:"partition,omitempty" protobuf:"varint,1,opt,name=partition"` -} - -// A StatefulSetSpec is the specification of a StatefulSet. -type StatefulSetSpec struct { - // replicas is the desired number of replicas of the given Template. - // These are replicas in the sense that they are instantiations of the - // same Template, but individual replicas also have a consistent identity. - // If unspecified, defaults to 1. - // TODO: Consider a rename of this field. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // selector is a label query over pods that should match the replica count. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` - - // template is the object that describes the pod that will be created if - // insufficient replicas are detected. Each pod stamped out by the StatefulSet - // will fulfill this Template, but have a unique identity from the rest - // of the StatefulSet. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // volumeClaimTemplates is a list of claims that pods are allowed to reference. - // The StatefulSet controller is responsible for mapping network identities to - // claims in a way that maintains the identity of a pod. Every claim in - // this list must have at least one matching (by name) volumeMount in one - // container in the template. A claim in this list takes precedence over - // any volumes in the template, with the same name. - // TODO: Define the behavior if a claim already exists with the same name. - // +optional - VolumeClaimTemplates []v1.PersistentVolumeClaim `json:"volumeClaimTemplates,omitempty" protobuf:"bytes,4,rep,name=volumeClaimTemplates"` - - // serviceName is the name of the service that governs this StatefulSet. - // This service must exist before the StatefulSet, and is responsible for - // the network identity of the set. Pods get DNS/hostnames that follow the - // pattern: pod-specific-string.serviceName.default.svc.cluster.local - // where "pod-specific-string" is managed by the StatefulSet controller. - ServiceName string `json:"serviceName" protobuf:"bytes,5,opt,name=serviceName"` - - // podManagementPolicy controls how pods are created during initial scale up, - // when replacing pods on nodes, or when scaling down. The default policy is - // `OrderedReady`, where pods are created in increasing order (pod-0, then - // pod-1, etc) and the controller will wait until each pod is ready before - // continuing. When scaling down, the pods are removed in the opposite order. - // The alternative policy is `Parallel` which will create pods in parallel - // to match the desired scale without waiting, and on scale down will delete - // all pods at once. - // +optional - PodManagementPolicy PodManagementPolicyType `json:"podManagementPolicy,omitempty" protobuf:"bytes,6,opt,name=podManagementPolicy,casttype=PodManagementPolicyType"` - - // updateStrategy indicates the StatefulSetUpdateStrategy that will be - // employed to update Pods in the StatefulSet when a revision is made to - // Template. - UpdateStrategy StatefulSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,7,opt,name=updateStrategy"` - - // revisionHistoryLimit is the maximum number of revisions that will - // be maintained in the StatefulSet's revision history. The revision history - // consists of all revisions not represented by a currently applied - // StatefulSetSpec version. The default value is 10. - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,8,opt,name=revisionHistoryLimit"` -} - -// StatefulSetStatus represents the current state of a StatefulSet. -type StatefulSetStatus struct { - // observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the - // StatefulSet's generation, which is updated on mutation by the API Server. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // replicas is the number of Pods created by the StatefulSet controller. - Replicas int32 `json:"replicas" protobuf:"varint,2,opt,name=replicas"` - - // readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition. - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,3,opt,name=readyReplicas"` - - // currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by currentRevision. - CurrentReplicas int32 `json:"currentReplicas,omitempty" protobuf:"varint,4,opt,name=currentReplicas"` - - // updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version - // indicated by updateRevision. - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,5,opt,name=updatedReplicas"` - - // currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the - // sequence [0,currentReplicas). - CurrentRevision string `json:"currentRevision,omitempty" protobuf:"bytes,6,opt,name=currentRevision"` - - // updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence - // [replicas-updatedReplicas,replicas) - UpdateRevision string `json:"updateRevision,omitempty" protobuf:"bytes,7,opt,name=updateRevision"` - - // collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller - // uses this field as a collision avoidance mechanism when it needs to create the name for the - // newest ControllerRevision. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - - // Represents the latest available observations of a statefulset's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []StatefulSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` -} - -type StatefulSetConditionType string - -// StatefulSetCondition describes the state of a statefulset at a certain point. -type StatefulSetCondition struct { - // Type of statefulset condition. - Type StatefulSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=StatefulSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StatefulSetList is a collection of StatefulSets. -type StatefulSetList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []StatefulSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for -// more information. -// Deployment enables declarative updates for Pods and ReplicaSets. -type Deployment struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // It must match the pod template's labels. - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` - - // Template describes the pods that will be created. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - - // Indicates that the deployment is paused. - // +optional - Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. Defaults to 600s. - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing ReplicaSets (and label key that is added to its pods) to prevent the existing ReplicaSets - // to select new pods (and old pods being select by new ReplicaSet). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -// DeploymentStrategy describes how to replace existing pods with new ones. -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old ReplicaSets by new one using rolling update i.e gradually scale down the old ReplicaSets and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // Defaults to 25%. - // Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old ReplicaSet - // can be scaled down further, followed by scaling up the new ReplicaSet, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // Defaults to 25%. - // Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new ReplicaSet can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -// DeploymentStatus is the most recently observed status of the Deployment. -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time this condition was updated. - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeploymentList is a list of Deployments. -type DeploymentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Deployments. - Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet. -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". Default is RollingUpdate. - // +optional - Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` - - // Rolling update config params. Present only if type = "RollingUpdate". - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" - - // Replace the old daemons only when it's killed - OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` -} - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,1,opt,name=selector"` - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` -} - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` - - // The most recent generation observed by the daemon set controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` - - // The total number of nodes that are running updated daemon pod - // +optional - UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - - // Represents the latest available observations of a DaemonSet's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` -} - -type DaemonSetConditionType string - -// TODO: Add valid condition types of a DaemonSet. - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -type DaemonSetCondition struct { - // Type of DaemonSet condition. - Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for -// more information. -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -const ( - // DefaultDaemonSetUniqueLabelKey is the default label key that is added - // to existing DaemonSet pods to distinguish between old and new - // DaemonSet pods during DaemonSet template updates. - DefaultDaemonSetUniqueLabelKey = ControllerRevisionHashLabelKey -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // A list of daemon sets. - Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for -// more information. -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta `json:",inline"` - - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // Selector is a label query over pods that should match the replica count. - // Label keys and values that must match in order to be controlled by this replica set. - // It must match the pod template's labels. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - Selector *metav1.LabelSelector `json:"selector" protobuf:"bytes,2,opt,name=selector"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - - // The number of ready replicas for this replica set. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` - - // Represents the latest available observations of a replica set's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` -} - -type ReplicaSetConditionType string - -// These are valid conditions of a replica set. -const ( - // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created - // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted - // due to kubelet being down or finalizers are failing. - ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" -) - -// ReplicaSetCondition describes the state of a replica set at a certain point. -type ReplicaSetCondition struct { - // Type of replica set condition. - Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the -// release notes for more information. -// ControllerRevision implements an immutable snapshot of state data. Clients -// are responsible for serializing and deserializing the objects that contain -// their internal state. -// Once a ControllerRevision has been successfully created, it can not be updated. -// The API Server will fail validation of all requests that attempt to mutate -// the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both -// the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, -// it may be subject to name and representation changes in future releases, and clients should not -// depend on its stability. It is primarily for internal use by controllers. -type ControllerRevision struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data is the serialized representation of the state. - Data runtime.RawExtension `json:"data,omitempty" protobuf:"bytes,2,opt,name=data"` - - // Revision indicates the revision of the state represented by Data. - Revision int64 `json:"revision" protobuf:"varint,3,opt,name=revision"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ControllerRevisionList is a resource containing a list of ControllerRevision objects. -type ControllerRevisionList struct { - metav1.TypeMeta `json:",inline"` - - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ControllerRevisions - Items []ControllerRevision `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go deleted file mode 100644 index 627df3a..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/types_swagger_doc_generated.go +++ /dev/null @@ -1,396 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta2 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_ControllerRevision = map[string]string{ - "": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "data": "Data is the serialized representation of the state.", - "revision": "Revision indicates the revision of the state represented by Data.", -} - -func (ControllerRevision) SwaggerDoc() map[string]string { - return map_ControllerRevision -} - -var map_ControllerRevisionList = map[string]string{ - "": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ControllerRevisions", -} - -func (ControllerRevisionList) SwaggerDoc() map[string]string { - return map_ControllerRevisionList -} - -var map_DaemonSet = map[string]string{ - "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (DaemonSet) SwaggerDoc() map[string]string { - return map_DaemonSet -} - -var map_DaemonSetCondition = map[string]string{ - "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", - "type": "Type of DaemonSet condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DaemonSetCondition) SwaggerDoc() map[string]string { - return map_DaemonSetCondition -} - -var map_DaemonSetList = map[string]string{ - "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "A list of daemon sets.", -} - -func (DaemonSetList) SwaggerDoc() map[string]string { - return map_DaemonSetList -} - -var map_DaemonSetSpec = map[string]string{ - "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", - "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", - "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", -} - -func (DaemonSetSpec) SwaggerDoc() map[string]string { - return map_DaemonSetSpec -} - -var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", - "observedGeneration": "The most recent generation observed by the daemon set controller.", - "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", - "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", - "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", - "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a DaemonSet's current state.", -} - -func (DaemonSetStatus) SwaggerDoc() map[string]string { - return map_DaemonSetStatus -} - -var map_DaemonSetUpdateStrategy = map[string]string{ - "": "DaemonSetUpdateStrategy is a struct used to control the update strategy for a DaemonSet.", - "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", -} - -func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { - return map_DaemonSetUpdateStrategy -} - -var map_Deployment = map[string]string{ - "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", - "metadata": "Standard object metadata.", - "spec": "Specification of the desired behavior of the Deployment.", - "status": "Most recently observed status of the Deployment.", -} - -func (Deployment) SwaggerDoc() map[string]string { - return map_Deployment -} - -var map_DeploymentCondition = map[string]string{ - "": "DeploymentCondition describes the state of a deployment at a certain point.", - "type": "Type of deployment condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastUpdateTime": "The last time this condition was updated.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DeploymentCondition) SwaggerDoc() map[string]string { - return map_DeploymentCondition -} - -var map_DeploymentList = map[string]string{ - "": "DeploymentList is a list of Deployments.", - "metadata": "Standard list metadata.", - "items": "Items is the list of Deployments.", -} - -func (DeploymentList) SwaggerDoc() map[string]string { - return map_DeploymentList -} - -var map_DeploymentSpec = map[string]string{ - "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment. It must match the pod template's labels.", - "template": "Template describes the pods that will be created.", - "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", - "paused": "Indicates that the deployment is paused.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. Defaults to 600s.", -} - -func (DeploymentSpec) SwaggerDoc() map[string]string { - return map_DeploymentSpec -} - -var map_DeploymentStatus = map[string]string{ - "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", -} - -func (DeploymentStatus) SwaggerDoc() map[string]string { - return map_DeploymentStatus -} - -var map_DeploymentStrategy = map[string]string{ - "": "DeploymentStrategy describes how to replace existing pods with new ones.", - "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", -} - -func (DeploymentStrategy) SwaggerDoc() map[string]string { - return map_DeploymentStrategy -} - -var map_ReplicaSet = map[string]string{ - "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (ReplicaSet) SwaggerDoc() map[string]string { - return map_ReplicaSet -} - -var map_ReplicaSetCondition = map[string]string{ - "": "ReplicaSetCondition describes the state of a replica set at a certain point.", - "type": "Type of replica set condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "The last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (ReplicaSetCondition) SwaggerDoc() map[string]string { - return map_ReplicaSetCondition -} - -var map_ReplicaSetList = map[string]string{ - "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", -} - -func (ReplicaSetList) SwaggerDoc() map[string]string { - return map_ReplicaSetList -} - -var map_ReplicaSetSpec = map[string]string{ - "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. Label keys and values that must match in order to be controlled by this replica set. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", -} - -func (ReplicaSetSpec) SwaggerDoc() map[string]string { - return map_ReplicaSetSpec -} - -var map_ReplicaSetStatus = map[string]string{ - "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", - "conditions": "Represents the latest available observations of a replica set's current state.", -} - -func (ReplicaSetStatus) SwaggerDoc() map[string]string { - return map_ReplicaSetStatus -} - -var map_RollingUpdateDaemonSet = map[string]string{ - "": "Spec to control the desired behavior of daemon set rolling update.", - "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", -} - -func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { - return map_RollingUpdateDaemonSet -} - -var map_RollingUpdateDeployment = map[string]string{ - "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", -} - -func (RollingUpdateDeployment) SwaggerDoc() map[string]string { - return map_RollingUpdateDeployment -} - -var map_RollingUpdateStatefulSetStrategy = map[string]string{ - "": "RollingUpdateStatefulSetStrategy is used to communicate parameter for RollingUpdateStatefulSetStrategyType.", - "partition": "Partition indicates the ordinal at which the StatefulSet should be partitioned. Default value is 0.", -} - -func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string { - return map_RollingUpdateStatefulSetStrategy -} - -var map_Scale = map[string]string{ - "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", -} - -func (Scale) SwaggerDoc() map[string]string { - return map_Scale -} - -var map_ScaleSpec = map[string]string{ - "": "ScaleSpec describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", -} - -func (ScaleSpec) SwaggerDoc() map[string]string { - return map_ScaleSpec -} - -var map_ScaleStatus = map[string]string{ - "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", -} - -func (ScaleStatus) SwaggerDoc() map[string]string { - return map_ScaleStatus -} - -var map_StatefulSet = map[string]string{ - "": "DEPRECATED - This group version of StatefulSet is deprecated by apps/v1/StatefulSet. See the release notes for more information. StatefulSet represents a set of pods with consistent identities. Identities are defined as:\n - Network: A single stable DNS and hostname.\n - Storage: As many VolumeClaims as requested.\nThe StatefulSet guarantees that a given network identity will always map to the same storage identity.", - "spec": "Spec defines the desired identities of pods in this set.", - "status": "Status is the current status of Pods in this StatefulSet. This data may be out of date by some window of time.", -} - -func (StatefulSet) SwaggerDoc() map[string]string { - return map_StatefulSet -} - -var map_StatefulSetCondition = map[string]string{ - "": "StatefulSetCondition describes the state of a statefulset at a certain point.", - "type": "Type of statefulset condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (StatefulSetCondition) SwaggerDoc() map[string]string { - return map_StatefulSetCondition -} - -var map_StatefulSetList = map[string]string{ - "": "StatefulSetList is a collection of StatefulSets.", -} - -func (StatefulSetList) SwaggerDoc() map[string]string { - return map_StatefulSetList -} - -var map_StatefulSetSpec = map[string]string{ - "": "A StatefulSetSpec is the specification of a StatefulSet.", - "replicas": "replicas is the desired number of replicas of the given Template. These are replicas in the sense that they are instantiations of the same Template, but individual replicas also have a consistent identity. If unspecified, defaults to 1.", - "selector": "selector is a label query over pods that should match the replica count. It must match the pod template's labels. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "template is the object that describes the pod that will be created if insufficient replicas are detected. Each pod stamped out by the StatefulSet will fulfill this Template, but have a unique identity from the rest of the StatefulSet.", - "volumeClaimTemplates": "volumeClaimTemplates is a list of claims that pods are allowed to reference. The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. A claim in this list takes precedence over any volumes in the template, with the same name.", - "serviceName": "serviceName is the name of the service that governs this StatefulSet. This service must exist before the StatefulSet, and is responsible for the network identity of the set. Pods get DNS/hostnames that follow the pattern: pod-specific-string.serviceName.default.svc.cluster.local where \"pod-specific-string\" is managed by the StatefulSet controller.", - "podManagementPolicy": "podManagementPolicy controls how pods are created during initial scale up, when replacing pods on nodes, or when scaling down. The default policy is `OrderedReady`, where pods are created in increasing order (pod-0, then pod-1, etc) and the controller will wait until each pod is ready before continuing. When scaling down, the pods are removed in the opposite order. The alternative policy is `Parallel` which will create pods in parallel to match the desired scale without waiting, and on scale down will delete all pods at once.", - "updateStrategy": "updateStrategy indicates the StatefulSetUpdateStrategy that will be employed to update Pods in the StatefulSet when a revision is made to Template.", - "revisionHistoryLimit": "revisionHistoryLimit is the maximum number of revisions that will be maintained in the StatefulSet's revision history. The revision history consists of all revisions not represented by a currently applied StatefulSetSpec version. The default value is 10.", -} - -func (StatefulSetSpec) SwaggerDoc() map[string]string { - return map_StatefulSetSpec -} - -var map_StatefulSetStatus = map[string]string{ - "": "StatefulSetStatus represents the current state of a StatefulSet.", - "observedGeneration": "observedGeneration is the most recent generation observed for this StatefulSet. It corresponds to the StatefulSet's generation, which is updated on mutation by the API Server.", - "replicas": "replicas is the number of Pods created by the StatefulSet controller.", - "readyReplicas": "readyReplicas is the number of Pods created by the StatefulSet controller that have a Ready Condition.", - "currentReplicas": "currentReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by currentRevision.", - "updatedReplicas": "updatedReplicas is the number of Pods created by the StatefulSet controller from the StatefulSet version indicated by updateRevision.", - "currentRevision": "currentRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [0,currentReplicas).", - "updateRevision": "updateRevision, if not empty, indicates the version of the StatefulSet used to generate Pods in the sequence [replicas-updatedReplicas,replicas)", - "collisionCount": "collisionCount is the count of hash collisions for the StatefulSet. The StatefulSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a statefulset's current state.", -} - -func (StatefulSetStatus) SwaggerDoc() map[string]string { - return map_StatefulSetStatus -} - -var map_StatefulSetUpdateStrategy = map[string]string{ - "": "StatefulSetUpdateStrategy indicates the strategy that the StatefulSet controller will use to perform updates. It includes any additional parameters necessary to perform the update for the indicated strategy.", - "type": "Type indicates the type of the StatefulSetUpdateStrategy. Default is RollingUpdate.", - "rollingUpdate": "RollingUpdate is used to communicate parameters when Type is RollingUpdateStatefulSetStrategyType.", -} - -func (StatefulSetUpdateStrategy) SwaggerDoc() map[string]string { - return map_StatefulSetUpdateStrategy -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go deleted file mode 100644 index 8a0bad2..0000000 --- a/vendor/k8s.io/api/apps/v1beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,839 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta2 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerRevision) DeepCopyInto(out *ControllerRevision) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Data.DeepCopyInto(&out.Data) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevision. -func (in *ControllerRevision) DeepCopy() *ControllerRevision { - if in == nil { - return nil - } - out := new(ControllerRevision) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ControllerRevision) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ControllerRevisionList) DeepCopyInto(out *ControllerRevisionList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ControllerRevision, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ControllerRevisionList. -func (in *ControllerRevisionList) DeepCopy() *ControllerRevisionList { - if in == nil { - return nil - } - out := new(ControllerRevisionList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ControllerRevisionList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. -func (in *DaemonSet) DeepCopy() *DaemonSet { - if in == nil { - return nil - } - out := new(DaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. -func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { - if in == nil { - return nil - } - out := new(DaemonSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. -func (in *DaemonSetList) DeepCopy() *DaemonSetList { - if in == nil { - return nil - } - out := new(DaemonSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. -func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { - if in == nil { - return nil - } - out := new(DaemonSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DaemonSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. -func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { - if in == nil { - return nil - } - out := new(DaemonSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. -func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { - if in == nil { - return nil - } - out := new(DaemonSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. -func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { - if in == nil { - return nil - } - out := new(DeploymentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { - if in == nil { - return nil - } - out := new(DeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { - if in == nil { - return nil - } - out := new(DeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { - if in == nil { - return nil - } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. -func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { - if in == nil { - return nil - } - out := new(DeploymentStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. -func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { - if in == nil { - return nil - } - out := new(ReplicaSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. -func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { - if in == nil { - return nil - } - out := new(ReplicaSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. -func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { - if in == nil { - return nil - } - out := new(RollingUpdateDaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. -func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { - if in == nil { - return nil - } - out := new(RollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateStatefulSetStrategy) DeepCopyInto(out *RollingUpdateStatefulSetStrategy) { - *out = *in - if in.Partition != nil { - in, out := &in.Partition, &out.Partition - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateStatefulSetStrategy. -func (in *RollingUpdateStatefulSetStrategy) DeepCopy() *RollingUpdateStatefulSetStrategy { - if in == nil { - return nil - } - out := new(RollingUpdateStatefulSetStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSet) DeepCopyInto(out *StatefulSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSet. -func (in *StatefulSet) DeepCopy() *StatefulSet { - if in == nil { - return nil - } - out := new(StatefulSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StatefulSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetCondition) DeepCopyInto(out *StatefulSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetCondition. -func (in *StatefulSetCondition) DeepCopy() *StatefulSetCondition { - if in == nil { - return nil - } - out := new(StatefulSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetList) DeepCopyInto(out *StatefulSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StatefulSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetList. -func (in *StatefulSetList) DeepCopy() *StatefulSetList { - if in == nil { - return nil - } - out := new(StatefulSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StatefulSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetSpec) DeepCopyInto(out *StatefulSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - if in.VolumeClaimTemplates != nil { - in, out := &in.VolumeClaimTemplates, &out.VolumeClaimTemplates - *out = make([]corev1.PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetSpec. -func (in *StatefulSetSpec) DeepCopy() *StatefulSetSpec { - if in == nil { - return nil - } - out := new(StatefulSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetStatus) DeepCopyInto(out *StatefulSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]StatefulSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetStatus. -func (in *StatefulSetStatus) DeepCopy() *StatefulSetStatus { - if in == nil { - return nil - } - out := new(StatefulSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StatefulSetUpdateStrategy) DeepCopyInto(out *StatefulSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateStatefulSetStrategy) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StatefulSetUpdateStrategy. -func (in *StatefulSetUpdateStrategy) DeepCopy() *StatefulSetUpdateStrategy { - if in == nil { - return nil - } - out := new(StatefulSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/authentication/v1/doc.go b/vendor/k8s.io/api/authentication/v1/doc.go deleted file mode 100644 index 2d2ed2e..0000000 --- a/vendor/k8s.io/api/authentication/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=authentication.k8s.io -// +k8s:openapi-gen=true -package v1 // import "k8s.io/api/authentication/v1" diff --git a/vendor/k8s.io/api/authentication/v1/generated.pb.go b/vendor/k8s.io/api/authentication/v1/generated.pb.go deleted file mode 100644 index 2ce2e2d..0000000 --- a/vendor/k8s.io/api/authentication/v1/generated.pb.go +++ /dev/null @@ -1,2147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto - - It has these top-level messages: - BoundObjectReference - ExtraValue - TokenRequest - TokenRequestSpec - TokenRequestStatus - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *BoundObjectReference) Reset() { *m = BoundObjectReference{} } -func (*BoundObjectReference) ProtoMessage() {} -func (*BoundObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *TokenRequest) Reset() { *m = TokenRequest{} } -func (*TokenRequest) ProtoMessage() {} -func (*TokenRequest) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *TokenRequestSpec) Reset() { *m = TokenRequestSpec{} } -func (*TokenRequestSpec) ProtoMessage() {} -func (*TokenRequestSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *TokenRequestStatus) Reset() { *m = TokenRequestStatus{} } -func (*TokenRequestStatus) ProtoMessage() {} -func (*TokenRequestStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func init() { - proto.RegisterType((*BoundObjectReference)(nil), "k8s.io.api.authentication.v1.BoundObjectReference") - proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1.ExtraValue") - proto.RegisterType((*TokenRequest)(nil), "k8s.io.api.authentication.v1.TokenRequest") - proto.RegisterType((*TokenRequestSpec)(nil), "k8s.io.api.authentication.v1.TokenRequestSpec") - proto.RegisterType((*TokenRequestStatus)(nil), "k8s.io.api.authentication.v1.TokenRequestStatus") - proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1.UserInfo") -} -func (m *BoundObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *BoundObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil -} - -func (m ExtraValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *TokenRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *TokenRequestSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.BoundObjectRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.BoundObjectRef.Size())) - n4, err := m.BoundObjectRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.ExpirationSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - } - return i, nil -} - -func (m *TokenRequestStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpirationTimestamp.Size())) - n5, err := m.ExpirationTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *TokenReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n6, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n7, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n8, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - return i, nil -} - -func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Authenticated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n9, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil -} - -func (m *UserInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *BoundObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TokenRequest) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenRequestSpec) Size() (n int) { - var l int - _ = l - if len(m.Audiences) > 0 { - for _, s := range m.Audiences { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.BoundObjectRef != nil { - l = m.BoundObjectRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - return n -} - -func (m *TokenRequestStatus) Size() (n int) { - var l int - _ = l - l = len(m.Token) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ExpirationTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewSpec) Size() (n int) { - var l int - _ = l - l = len(m.Token) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = m.User.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *UserInfo) Size() (n int) { - var l int - _ = l - l = len(m.Username) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *BoundObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&BoundObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *TokenRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenRequestSpec", "TokenRequestSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenRequestStatus", "TokenRequestStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TokenRequestSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenRequestSpec{`, - `Audiences:` + fmt.Sprintf("%v", this.Audiences) + `,`, - `BoundObjectRef:` + strings.Replace(fmt.Sprintf("%v", this.BoundObjectRef), "BoundObjectReference", "BoundObjectReference", 1) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *TokenRequestStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenRequestStatus{`, - `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `ExpirationTimestamp:` + strings.Replace(strings.Replace(this.ExpirationTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewSpec{`, - `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewStatus{`, - `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, - `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *UserInfo) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&UserInfo{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *BoundObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: BoundObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: BoundObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExtraValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenRequestSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenRequestSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audiences", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audiences = append(m.Audiences, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BoundObjectRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.BoundObjectRef == nil { - m.BoundObjectRef = &BoundObjectReference{} - } - if err := m.BoundObjectRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ExpirationSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenRequestStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenRequestStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpirationTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ExpirationTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Authenticated = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 892 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0xf3, 0x63, 0xb5, 0x99, 0x74, 0x97, 0xdd, 0x29, 0x95, 0xa2, 0x05, 0xec, 0x60, 0x24, - 0x14, 0x01, 0xb5, 0x9b, 0x08, 0x95, 0xaa, 0x48, 0x48, 0x6b, 0x36, 0x82, 0x08, 0x41, 0xab, 0x69, - 0x77, 0x41, 0x9c, 0x98, 0xd8, 0x6f, 0xb3, 0x26, 0x78, 0x6c, 0xec, 0x71, 0x68, 0x6e, 0xfd, 0x13, - 0x38, 0x82, 0xc4, 0x81, 0x3f, 0x02, 0x89, 0x23, 0xd7, 0x3d, 0x56, 0x9c, 0x7a, 0x40, 0x11, 0x6b, - 0xfe, 0x05, 0x4e, 0x9c, 0xd0, 0x8c, 0x67, 0xe3, 0xfc, 0xd8, 0x4d, 0x73, 0xea, 0x2d, 0xf3, 0xde, - 0xf7, 0xbe, 0x79, 0xef, 0x9b, 0x2f, 0xcf, 0xa8, 0x37, 0xba, 0x97, 0x58, 0x7e, 0x68, 0x8f, 0xd2, - 0x01, 0xc4, 0x0c, 0x38, 0x24, 0xf6, 0x18, 0x98, 0x17, 0xc6, 0xb6, 0x4a, 0xd0, 0xc8, 0xb7, 0x69, - 0xca, 0xcf, 0x80, 0x71, 0xdf, 0xa5, 0xdc, 0x0f, 0x99, 0x3d, 0xee, 0xd8, 0x43, 0x60, 0x10, 0x53, - 0x0e, 0x9e, 0x15, 0xc5, 0x21, 0x0f, 0xf1, 0xeb, 0x39, 0xda, 0xa2, 0x91, 0x6f, 0x2d, 0xa2, 0xad, - 0x71, 0xe7, 0xe0, 0xf6, 0xd0, 0xe7, 0x67, 0xe9, 0xc0, 0x72, 0xc3, 0xc0, 0x1e, 0x86, 0xc3, 0xd0, - 0x96, 0x45, 0x83, 0xf4, 0x54, 0x9e, 0xe4, 0x41, 0xfe, 0xca, 0xc9, 0x0e, 0xde, 0x2f, 0xae, 0x0e, - 0xa8, 0x7b, 0xe6, 0x33, 0x88, 0x27, 0x76, 0x34, 0x1a, 0x8a, 0x40, 0x62, 0x07, 0xc0, 0xe9, 0x15, - 0x2d, 0x1c, 0xd8, 0xd7, 0x55, 0xc5, 0x29, 0xe3, 0x7e, 0x00, 0x2b, 0x05, 0x77, 0x5f, 0x54, 0x90, - 0xb8, 0x67, 0x10, 0xd0, 0xe5, 0x3a, 0xf3, 0x4f, 0x0d, 0xbd, 0xea, 0x84, 0x29, 0xf3, 0x1e, 0x0c, - 0xbe, 0x05, 0x97, 0x13, 0x38, 0x85, 0x18, 0x98, 0x0b, 0xb8, 0x85, 0xaa, 0x23, 0x9f, 0x79, 0x4d, - 0xad, 0xa5, 0xb5, 0xeb, 0xce, 0x8d, 0xf3, 0xa9, 0x51, 0xca, 0xa6, 0x46, 0xf5, 0x33, 0x9f, 0x79, - 0x44, 0x66, 0x70, 0x17, 0x21, 0xfa, 0xb0, 0x7f, 0x02, 0x71, 0xe2, 0x87, 0xac, 0x59, 0x96, 0x38, - 0xac, 0x70, 0xe8, 0x70, 0x96, 0x21, 0x73, 0x28, 0xc1, 0xca, 0x68, 0x00, 0xcd, 0xca, 0x22, 0xeb, - 0x17, 0x34, 0x00, 0x22, 0x33, 0xd8, 0x41, 0x95, 0xb4, 0x7f, 0xd4, 0xac, 0x4a, 0xc0, 0x1d, 0x05, - 0xa8, 0x1c, 0xf7, 0x8f, 0xfe, 0x9b, 0x1a, 0x6f, 0x5e, 0x37, 0x24, 0x9f, 0x44, 0x90, 0x58, 0xc7, - 0xfd, 0x23, 0x22, 0x8a, 0xcd, 0x0f, 0x10, 0xea, 0x3d, 0xe1, 0x31, 0x3d, 0xa1, 0xdf, 0xa5, 0x80, - 0x0d, 0x54, 0xf3, 0x39, 0x04, 0x49, 0x53, 0x6b, 0x55, 0xda, 0x75, 0xa7, 0x9e, 0x4d, 0x8d, 0x5a, - 0x5f, 0x04, 0x48, 0x1e, 0xbf, 0xbf, 0xfd, 0xd3, 0xaf, 0x46, 0xe9, 0xe9, 0x5f, 0xad, 0x92, 0xf9, - 0x4b, 0x19, 0xdd, 0x78, 0x1c, 0x8e, 0x80, 0x11, 0xf8, 0x3e, 0x85, 0x84, 0xe3, 0x6f, 0xd0, 0xb6, - 0x78, 0x22, 0x8f, 0x72, 0x2a, 0x95, 0x68, 0x74, 0xef, 0x58, 0x85, 0x3b, 0x66, 0x4d, 0x58, 0xd1, - 0x68, 0x28, 0x02, 0x89, 0x25, 0xd0, 0xd6, 0xb8, 0x63, 0xe5, 0x72, 0x7e, 0x0e, 0x9c, 0x16, 0x9a, - 0x14, 0x31, 0x32, 0x63, 0xc5, 0x0f, 0x51, 0x35, 0x89, 0xc0, 0x95, 0xfa, 0x35, 0xba, 0x96, 0xb5, - 0xce, 0x7b, 0xd6, 0x7c, 0x6f, 0x8f, 0x22, 0x70, 0x0b, 0x05, 0xc5, 0x89, 0x48, 0x26, 0xfc, 0x15, - 0xda, 0x4a, 0x38, 0xe5, 0x69, 0x22, 0x55, 0x5e, 0xec, 0xf8, 0x45, 0x9c, 0xb2, 0xce, 0xd9, 0x55, - 0xac, 0x5b, 0xf9, 0x99, 0x28, 0x3e, 0xf3, 0x5f, 0x0d, 0xed, 0x2d, 0xb7, 0x80, 0xdf, 0x45, 0x75, - 0x9a, 0x7a, 0xbe, 0x30, 0xcd, 0xa5, 0xc4, 0x3b, 0xd9, 0xd4, 0xa8, 0x1f, 0x5e, 0x06, 0x49, 0x91, - 0xc7, 0x0c, 0xed, 0x0e, 0x16, 0xdc, 0xa6, 0x7a, 0xec, 0xae, 0xef, 0xf1, 0x2a, 0x87, 0x3a, 0x38, - 0x9b, 0x1a, 0xbb, 0x8b, 0x19, 0xb2, 0xc4, 0x8e, 0x3f, 0x46, 0xfb, 0xf0, 0x24, 0xf2, 0x63, 0xc9, - 0xf4, 0x08, 0xdc, 0x90, 0x79, 0x89, 0xf4, 0x56, 0xc5, 0xb9, 0x95, 0x4d, 0x8d, 0xfd, 0xde, 0x72, - 0x92, 0xac, 0xe2, 0xcd, 0xdf, 0x34, 0x84, 0x57, 0x55, 0xc2, 0x6f, 0xa1, 0x1a, 0x17, 0x51, 0xf5, - 0x17, 0xd9, 0x51, 0xa2, 0xd5, 0x72, 0x68, 0x9e, 0xc3, 0x13, 0x74, 0xb3, 0x20, 0x7c, 0xec, 0x07, - 0x90, 0x70, 0x1a, 0x44, 0xea, 0xb5, 0xdf, 0xd9, 0xcc, 0x4b, 0xa2, 0xcc, 0x79, 0x4d, 0xd1, 0xdf, - 0xec, 0xad, 0xd2, 0x91, 0xab, 0xee, 0x30, 0x7f, 0x2e, 0xa3, 0x86, 0x6a, 0x7b, 0xec, 0xc3, 0x0f, - 0x2f, 0xc1, 0xcb, 0x0f, 0x16, 0xbc, 0x7c, 0x7b, 0x23, 0xdf, 0x89, 0xd6, 0xae, 0xb5, 0xf2, 0x97, - 0x4b, 0x56, 0xb6, 0x37, 0xa7, 0x5c, 0xef, 0xe4, 0xbb, 0xe8, 0x95, 0xa5, 0xfb, 0x37, 0x7a, 0x4e, - 0xf3, 0x0f, 0x0d, 0xed, 0xaf, 0xdc, 0x82, 0x3f, 0x44, 0x3b, 0x73, 0xcd, 0x40, 0xbe, 0x34, 0xb7, - 0x9d, 0x5b, 0x8a, 0x62, 0xe7, 0x70, 0x3e, 0x49, 0x16, 0xb1, 0xf8, 0x53, 0x54, 0x4d, 0x13, 0x88, - 0x95, 0x68, 0x6f, 0xaf, 0x9f, 0xf0, 0x38, 0x81, 0xb8, 0xcf, 0x4e, 0xc3, 0x42, 0x2d, 0x11, 0x21, - 0x92, 0x41, 0x4c, 0x00, 0x71, 0x1c, 0xc6, 0x6a, 0xbb, 0xce, 0x26, 0xe8, 0x89, 0x20, 0xc9, 0x73, - 0xe6, 0xef, 0x65, 0xb4, 0x7d, 0xc9, 0x82, 0xdf, 0x43, 0xdb, 0xa2, 0x52, 0xae, 0xe4, 0x7c, 0xec, - 0x3d, 0x55, 0x24, 0x31, 0x22, 0x4e, 0x66, 0x08, 0xfc, 0x06, 0xaa, 0xa4, 0xbe, 0xa7, 0x36, 0x7d, - 0x63, 0x6e, 0x35, 0x13, 0x11, 0xc7, 0x26, 0xda, 0x1a, 0xc6, 0x61, 0x1a, 0x89, 0xc7, 0x12, 0x5b, - 0x00, 0x09, 0xdd, 0x3f, 0x91, 0x11, 0xa2, 0x32, 0xf8, 0x04, 0xd5, 0x40, 0x6c, 0xe6, 0x66, 0xb5, - 0x55, 0x69, 0x37, 0xba, 0x9d, 0xcd, 0xa6, 0xb5, 0xe4, 0x36, 0xef, 0x31, 0x1e, 0x4f, 0xe6, 0xa6, - 0x12, 0x31, 0x92, 0xd3, 0x1d, 0x0c, 0xd4, 0xc6, 0x97, 0x18, 0xbc, 0x87, 0x2a, 0x23, 0x98, 0xe4, - 0x13, 0x11, 0xf1, 0x13, 0x7f, 0x84, 0x6a, 0x63, 0xf1, 0x31, 0x50, 0x2a, 0xb7, 0xd7, 0xdf, 0x5b, - 0x7c, 0x3c, 0x48, 0x5e, 0x76, 0xbf, 0x7c, 0x4f, 0x73, 0xda, 0xe7, 0x17, 0x7a, 0xe9, 0xd9, 0x85, - 0x5e, 0x7a, 0x7e, 0xa1, 0x97, 0x9e, 0x66, 0xba, 0x76, 0x9e, 0xe9, 0xda, 0xb3, 0x4c, 0xd7, 0x9e, - 0x67, 0xba, 0xf6, 0x77, 0xa6, 0x6b, 0x3f, 0xfe, 0xa3, 0x97, 0xbe, 0x2e, 0x8f, 0x3b, 0xff, 0x07, - 0x00, 0x00, 0xff, 0xff, 0x5e, 0x8d, 0x94, 0x78, 0x88, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1/generated.proto b/vendor/k8s.io/api/authentication/v1/generated.proto deleted file mode 100644 index 10c7921..0000000 --- a/vendor/k8s.io/api/authentication/v1/generated.proto +++ /dev/null @@ -1,159 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.authentication.v1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// BoundObjectReference is a reference to an object that a token is bound to. -message BoundObjectReference { - // Kind of the referent. Valid kinds are 'Pod' and 'Secret'. - // +optional - optional string kind = 1; - - // API version of the referent. - // +optional - optional string aPIVersion = 2; - - // Name of the referent. - // +optional - optional string name = 3; - - // UID of the referent. - // +optional - optional string uID = 4; -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// TokenRequest requests a token for a given service account. -message TokenRequest { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - optional TokenRequestSpec spec = 2; - - // +optional - optional TokenRequestStatus status = 3; -} - -// TokenRequestSpec contains client provided parameters of a token request. -message TokenRequestSpec { - // Audiences are the intendend audiences of the token. A recipient of a - // token must identitfy themself with an identifier in the list of - // audiences of the token, and otherwise should reject the token. A - // token issued for multiple audiences may be used to authenticate - // against any of the audiences listed but implies a high degree of - // trust between the target audiences. - repeated string audiences = 1; - - // ExpirationSeconds is the requested duration of validity of the request. The - // token issuer may return a token with a different validity duration so a - // client needs to check the 'expiration' field in a response. - // +optional - optional int64 expirationSeconds = 4; - - // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound objet exists. - // +optional - optional BoundObjectReference boundObjectRef = 3; -} - -// TokenRequestStatus is the result of a token request. -message TokenRequestStatus { - // Token is the opaque bearer token. - optional string token = 1; - - // ExpirationTimestamp is the time of expiration of the returned token. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time expirationTimestamp = 2; -} - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -message TokenReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional TokenReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - optional TokenReviewStatus status = 3; -} - -// TokenReviewSpec is a description of the token authentication request. -message TokenReviewSpec { - // Token is the opaque bearer token. - // +optional - optional string token = 1; -} - -// TokenReviewStatus is the result of the token authentication request. -message TokenReviewStatus { - // Authenticated indicates that the token was associated with a known user. - // +optional - optional bool authenticated = 1; - - // User is the UserInfo associated with the provided token. - // +optional - optional UserInfo user = 2; - - // Error indicates that the token couldn't be checked - // +optional - optional string error = 3; -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -message UserInfo { - // The name that uniquely identifies this user among all active users. - // +optional - optional string username = 1; - - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - optional string uid = 2; - - // The names of groups this user is a part of. - // +optional - repeated string groups = 3; - - // Any additional information provided by the authenticator. - // +optional - map extra = 4; -} - diff --git a/vendor/k8s.io/api/authentication/v1/register.go b/vendor/k8s.io/api/authentication/v1/register.go deleted file mode 100644 index c522e4a..0000000 --- a/vendor/k8s.io/api/authentication/v1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, - &TokenRequest{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/authentication/v1/types.go b/vendor/k8s.io/api/authentication/v1/types.go deleted file mode 100644 index 723457a..0000000 --- a/vendor/k8s.io/api/authentication/v1/types.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" -) - -const ( - // ImpersonateUserHeader is used to impersonate a particular user during an API server request - ImpersonateUserHeader = "Impersonate-User" - - // ImpersonateGroupHeader is used to impersonate a particular group during an API server request. - // It can be repeated multiplied times for multiple groups. - ImpersonateGroupHeader = "Impersonate-Group" - - // ImpersonateUserExtraHeaderPrefix is a prefix for any header used to impersonate an entry in the - // extra map[string][]string for user.Info. The key will be every after the prefix. - // It can be repeated multiplied times for multiple map keys and the same key can be repeated multiple - // times to have multiple elements in the slice under a single key - ImpersonateUserExtraHeaderPrefix = "Impersonate-Extra-" -) - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -type TokenReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - // +optional - Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` -} - -// TokenReviewStatus is the result of the token authentication request. -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - // +optional - Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` - // User is the UserInfo associated with the provided token. - // +optional - User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Error indicates that the token couldn't be checked - // +optional - Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - // +optional - Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` - // The names of groups this user is a part of. - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` - // Any additional information provided by the authenticator. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TokenRequest requests a token for a given service account. -type TokenRequest struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - Spec TokenRequestSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - // +optional - Status TokenRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// TokenRequestSpec contains client provided parameters of a token request. -type TokenRequestSpec struct { - // Audiences are the intendend audiences of the token. A recipient of a - // token must identitfy themself with an identifier in the list of - // audiences of the token, and otherwise should reject the token. A - // token issued for multiple audiences may be used to authenticate - // against any of the audiences listed but implies a high degree of - // trust between the target audiences. - Audiences []string `json:"audiences" protobuf:"bytes,1,rep,name=audiences"` - - // ExpirationSeconds is the requested duration of validity of the request. The - // token issuer may return a token with a different validity duration so a - // client needs to check the 'expiration' field in a response. - // +optional - ExpirationSeconds *int64 `json:"expirationSeconds" protobuf:"varint,4,opt,name=expirationSeconds"` - - // BoundObjectRef is a reference to an object that the token will be bound to. - // The token will only be valid for as long as the bound objet exists. - // +optional - BoundObjectRef *BoundObjectReference `json:"boundObjectRef" protobuf:"bytes,3,opt,name=boundObjectRef"` -} - -// TokenRequestStatus is the result of a token request. -type TokenRequestStatus struct { - // Token is the opaque bearer token. - Token string `json:"token" protobuf:"bytes,1,opt,name=token"` - // ExpirationTimestamp is the time of expiration of the returned token. - ExpirationTimestamp metav1.Time `json:"expirationTimestamp" protobuf:"bytes,2,opt,name=expirationTimestamp"` -} - -// BoundObjectReference is a reference to an object that a token is bound to. -type BoundObjectReference struct { - // Kind of the referent. Valid kinds are 'Pod' and 'Secret'. - // +optional - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // API version of the referent. - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt,name=aPIVersion"` - - // Name of the referent. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` - // UID of the referent. - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uID,casttype=k8s.io/apimachinery/pkg/types.UID"` -} diff --git a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go deleted file mode 100644 index 6632a5d..0000000 --- a/vendor/k8s.io/api/authentication/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_BoundObjectReference = map[string]string{ - "": "BoundObjectReference is a reference to an object that a token is bound to.", - "kind": "Kind of the referent. Valid kinds are 'Pod' and 'Secret'.", - "apiVersion": "API version of the referent.", - "name": "Name of the referent.", - "uid": "UID of the referent.", -} - -func (BoundObjectReference) SwaggerDoc() map[string]string { - return map_BoundObjectReference -} - -var map_TokenRequest = map[string]string{ - "": "TokenRequest requests a token for a given service account.", -} - -func (TokenRequest) SwaggerDoc() map[string]string { - return map_TokenRequest -} - -var map_TokenRequestSpec = map[string]string{ - "": "TokenRequestSpec contains client provided parameters of a token request.", - "audiences": "Audiences are the intendend audiences of the token. A recipient of a token must identitfy themself with an identifier in the list of audiences of the token, and otherwise should reject the token. A token issued for multiple audiences may be used to authenticate against any of the audiences listed but implies a high degree of trust between the target audiences.", - "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the request. The token issuer may return a token with a different validity duration so a client needs to check the 'expiration' field in a response.", - "boundObjectRef": "BoundObjectRef is a reference to an object that the token will be bound to. The token will only be valid for as long as the bound objet exists.", -} - -func (TokenRequestSpec) SwaggerDoc() map[string]string { - return map_TokenRequestSpec -} - -var map_TokenRequestStatus = map[string]string{ - "": "TokenRequestStatus is the result of a token request.", - "token": "Token is the opaque bearer token.", - "expirationTimestamp": "ExpirationTimestamp is the time of expiration of the returned token.", -} - -func (TokenRequestStatus) SwaggerDoc() map[string]string { - return map_TokenRequestStatus -} - -var map_TokenReview = map[string]string{ - "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request can be authenticated.", -} - -func (TokenReview) SwaggerDoc() map[string]string { - return map_TokenReview -} - -var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", -} - -func (TokenReviewSpec) SwaggerDoc() map[string]string { - return map_TokenReviewSpec -} - -var map_TokenReviewStatus = map[string]string{ - "": "TokenReviewStatus is the result of the token authentication request.", - "authenticated": "Authenticated indicates that the token was associated with a known user.", - "user": "User is the UserInfo associated with the provided token.", - "error": "Error indicates that the token couldn't be checked", -} - -func (TokenReviewStatus) SwaggerDoc() map[string]string { - return map_TokenReviewStatus -} - -var map_UserInfo = map[string]string{ - "": "UserInfo holds the information about the user needed to implement the user.Info interface.", - "username": "The name that uniquely identifies this user among all active users.", - "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", - "groups": "The names of groups this user is a part of.", - "extra": "Any additional information provided by the authenticator.", -} - -func (UserInfo) SwaggerDoc() map[string]string { - return map_UserInfo -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go deleted file mode 100644 index f36c253..0000000 --- a/vendor/k8s.io/api/authentication/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,234 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *BoundObjectReference) DeepCopyInto(out *BoundObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BoundObjectReference. -func (in *BoundObjectReference) DeepCopy() *BoundObjectReference { - if in == nil { - return nil - } - out := new(BoundObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExtraValue) DeepCopyInto(out *ExtraValue) { - { - in := &in - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. -func (in ExtraValue) DeepCopy() ExtraValue { - if in == nil { - return nil - } - out := new(ExtraValue) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenRequest) DeepCopyInto(out *TokenRequest) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequest. -func (in *TokenRequest) DeepCopy() *TokenRequest { - if in == nil { - return nil - } - out := new(TokenRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TokenRequest) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenRequestSpec) DeepCopyInto(out *TokenRequestSpec) { - *out = *in - if in.Audiences != nil { - in, out := &in.Audiences, &out.Audiences - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ExpirationSeconds != nil { - in, out := &in.ExpirationSeconds, &out.ExpirationSeconds - *out = new(int64) - **out = **in - } - if in.BoundObjectRef != nil { - in, out := &in.BoundObjectRef, &out.BoundObjectRef - *out = new(BoundObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestSpec. -func (in *TokenRequestSpec) DeepCopy() *TokenRequestSpec { - if in == nil { - return nil - } - out := new(TokenRequestSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenRequestStatus) DeepCopyInto(out *TokenRequestStatus) { - *out = *in - in.ExpirationTimestamp.DeepCopyInto(&out.ExpirationTimestamp) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenRequestStatus. -func (in *TokenRequestStatus) DeepCopy() *TokenRequestStatus { - if in == nil { - return nil - } - out := new(TokenRequestStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenReview) DeepCopyInto(out *TokenReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. -func (in *TokenReview) DeepCopy() *TokenReview { - if in == nil { - return nil - } - out := new(TokenReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TokenReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. -func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { - if in == nil { - return nil - } - out := new(TokenReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { - *out = *in - in.User.DeepCopyInto(&out.User) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. -func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { - if in == nil { - return nil - } - out := new(TokenReviewStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserInfo) DeepCopyInto(out *UserInfo) { - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. -func (in *UserInfo) DeepCopy() *UserInfo { - if in == nil { - return nil - } - out := new(UserInfo) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/doc.go b/vendor/k8s.io/api/authentication/v1beta1/doc.go deleted file mode 100644 index e0de315..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=authentication.k8s.io -// +k8s:openapi-gen=true -package v1beta1 // import "k8s.io/api/authentication/v1beta1" diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go b/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go deleted file mode 100644 index 8503d21..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.pb.go +++ /dev/null @@ -1,1301 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - TokenReview - TokenReviewSpec - TokenReviewStatus - UserInfo -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *TokenReview) Reset() { *m = TokenReview{} } -func (*TokenReview) ProtoMessage() {} -func (*TokenReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *TokenReviewSpec) Reset() { *m = TokenReviewSpec{} } -func (*TokenReviewSpec) ProtoMessage() {} -func (*TokenReviewSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *TokenReviewStatus) Reset() { *m = TokenReviewStatus{} } -func (*TokenReviewStatus) ProtoMessage() {} -func (*TokenReviewStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *UserInfo) Reset() { *m = UserInfo{} } -func (*UserInfo) ProtoMessage() {} -func (*UserInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authentication.v1beta1.ExtraValue") - proto.RegisterType((*TokenReview)(nil), "k8s.io.api.authentication.v1beta1.TokenReview") - proto.RegisterType((*TokenReviewSpec)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewSpec") - proto.RegisterType((*TokenReviewStatus)(nil), "k8s.io.api.authentication.v1beta1.TokenReviewStatus") - proto.RegisterType((*UserInfo)(nil), "k8s.io.api.authentication.v1beta1.UserInfo") -} -func (m ExtraValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *TokenReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *TokenReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Token))) - i += copy(dAtA[i:], m.Token) - return i, nil -} - -func (m *TokenReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TokenReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Authenticated { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.User.Size())) - n4, err := m.User.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil -} - -func (m *UserInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *UserInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x22 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n5, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TokenReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewSpec) Size() (n int) { - var l int - _ = l - l = len(m.Token) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TokenReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = m.User.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *UserInfo) Size() (n int) { - var l int - _ = l - l = len(m.Username) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *TokenReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "TokenReviewSpec", "TokenReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "TokenReviewStatus", "TokenReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewSpec{`, - `Token:` + fmt.Sprintf("%v", this.Token) + `,`, - `}`, - }, "") - return s -} -func (this *TokenReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TokenReviewStatus{`, - `Authenticated:` + fmt.Sprintf("%v", this.Authenticated) + `,`, - `User:` + strings.Replace(strings.Replace(this.User.String(), "UserInfo", "UserInfo", 1), `&`, ``, 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *UserInfo) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&UserInfo{`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Token = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TokenReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TokenReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TokenReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Authenticated", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Authenticated = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.User.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *UserInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: UserInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: UserInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authentication/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 635 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x53, 0xcf, 0x4f, 0xd4, 0x40, - 0x14, 0x6e, 0xf7, 0x07, 0xee, 0xce, 0x8a, 0xe2, 0x24, 0x26, 0x9b, 0x4d, 0xec, 0xae, 0xeb, 0x85, - 0x44, 0x99, 0x0a, 0x21, 0x48, 0xf0, 0x64, 0x95, 0x18, 0x4c, 0x88, 0xc9, 0x08, 0x1e, 0xd4, 0x83, - 0xb3, 0xdd, 0x47, 0xb7, 0xae, 0xed, 0x34, 0xd3, 0x69, 0x95, 0x1b, 0x7f, 0x82, 0x47, 0x8f, 0x26, - 0xfe, 0x25, 0x26, 0x1e, 0x38, 0x72, 0xe4, 0x60, 0x88, 0xd4, 0x7f, 0xc4, 0xcc, 0x74, 0x64, 0x17, - 0x88, 0x01, 0x6e, 0xf3, 0xbe, 0xf7, 0xbe, 0x6f, 0xde, 0xf7, 0x66, 0x1e, 0x7a, 0x31, 0x5e, 0x4d, - 0x49, 0xc8, 0xdd, 0x71, 0x36, 0x00, 0x11, 0x83, 0x84, 0xd4, 0xcd, 0x21, 0x1e, 0x72, 0xe1, 0x9a, - 0x04, 0x4b, 0x42, 0x97, 0x65, 0x72, 0x04, 0xb1, 0x0c, 0x7d, 0x26, 0x43, 0x1e, 0xbb, 0xf9, 0xe2, - 0x00, 0x24, 0x5b, 0x74, 0x03, 0x88, 0x41, 0x30, 0x09, 0x43, 0x92, 0x08, 0x2e, 0x39, 0xbe, 0x5b, - 0x52, 0x08, 0x4b, 0x42, 0x72, 0x9a, 0x42, 0x0c, 0xa5, 0xb3, 0x10, 0x84, 0x72, 0x94, 0x0d, 0x88, - 0xcf, 0x23, 0x37, 0xe0, 0x01, 0x77, 0x35, 0x73, 0x90, 0xed, 0xe8, 0x48, 0x07, 0xfa, 0x54, 0x2a, - 0x76, 0x96, 0x27, 0x4d, 0x44, 0xcc, 0x1f, 0x85, 0x31, 0x88, 0x5d, 0x37, 0x19, 0x07, 0x0a, 0x48, - 0xdd, 0x08, 0x24, 0x73, 0xf3, 0x73, 0x7d, 0x74, 0xdc, 0xff, 0xb1, 0x44, 0x16, 0xcb, 0x30, 0x82, - 0x73, 0x84, 0x95, 0x8b, 0x08, 0xa9, 0x3f, 0x82, 0x88, 0x9d, 0xe5, 0xf5, 0x1f, 0x21, 0xb4, 0xfe, - 0x59, 0x0a, 0xf6, 0x9a, 0x7d, 0xcc, 0x00, 0x77, 0x51, 0x3d, 0x94, 0x10, 0xa5, 0x6d, 0xbb, 0x57, - 0x9d, 0x6f, 0x7a, 0xcd, 0xe2, 0xa8, 0x5b, 0xdf, 0x50, 0x00, 0x2d, 0xf1, 0xb5, 0xc6, 0xd7, 0x6f, - 0x5d, 0x6b, 0xef, 0x57, 0xcf, 0xea, 0x7f, 0xaf, 0xa0, 0xd6, 0x16, 0x1f, 0x43, 0x4c, 0x21, 0x0f, - 0xe1, 0x13, 0x7e, 0x8f, 0x1a, 0xca, 0xcc, 0x90, 0x49, 0xd6, 0xb6, 0x7b, 0xf6, 0x7c, 0x6b, 0xe9, - 0x21, 0x99, 0x0c, 0xf3, 0xa4, 0x27, 0x92, 0x8c, 0x03, 0x05, 0xa4, 0x44, 0x55, 0x93, 0x7c, 0x91, - 0xbc, 0x1c, 0x7c, 0x00, 0x5f, 0x6e, 0x82, 0x64, 0x1e, 0xde, 0x3f, 0xea, 0x5a, 0xc5, 0x51, 0x17, - 0x4d, 0x30, 0x7a, 0xa2, 0x8a, 0xb7, 0x50, 0x2d, 0x4d, 0xc0, 0x6f, 0x57, 0xb4, 0xfa, 0x12, 0xb9, - 0xf0, 0xa9, 0xc8, 0x54, 0x7f, 0xaf, 0x12, 0xf0, 0xbd, 0xeb, 0x46, 0xbf, 0xa6, 0x22, 0xaa, 0xd5, - 0xf0, 0x3b, 0x34, 0x93, 0x4a, 0x26, 0xb3, 0xb4, 0x5d, 0xd5, 0xba, 0xcb, 0x57, 0xd4, 0xd5, 0x5c, - 0xef, 0x86, 0x51, 0x9e, 0x29, 0x63, 0x6a, 0x34, 0xfb, 0x2b, 0xe8, 0xe6, 0x99, 0x26, 0xf0, 0x3d, - 0x54, 0x97, 0x0a, 0xd2, 0x53, 0x6a, 0x7a, 0xb3, 0x86, 0x59, 0x2f, 0xeb, 0xca, 0x5c, 0xff, 0xa7, - 0x8d, 0x6e, 0x9d, 0xbb, 0x05, 0x3f, 0x46, 0xb3, 0x53, 0x1d, 0xc1, 0x50, 0x4b, 0x34, 0xbc, 0xdb, - 0x46, 0x62, 0xf6, 0xc9, 0x74, 0x92, 0x9e, 0xae, 0xc5, 0x9b, 0xa8, 0x96, 0xa5, 0x20, 0xcc, 0xf8, - 0xee, 0x5f, 0xc2, 0xe6, 0x76, 0x0a, 0x62, 0x23, 0xde, 0xe1, 0x93, 0xb9, 0x29, 0x84, 0x6a, 0x19, - 0x65, 0x03, 0x84, 0xe0, 0x42, 0x8f, 0x6d, 0xca, 0xc6, 0xba, 0x02, 0x69, 0x99, 0xeb, 0xff, 0xa8, - 0xa0, 0xc6, 0x3f, 0x15, 0xfc, 0x00, 0x35, 0x14, 0x33, 0x66, 0x11, 0x18, 0xef, 0x73, 0x86, 0xa4, - 0x6b, 0x14, 0x4e, 0x4f, 0x2a, 0xf0, 0x1d, 0x54, 0xcd, 0xc2, 0xa1, 0xee, 0xb6, 0xe9, 0xb5, 0x4c, - 0x61, 0x75, 0x7b, 0xe3, 0x19, 0x55, 0x38, 0xee, 0xa3, 0x99, 0x40, 0xf0, 0x2c, 0x51, 0xcf, 0xa6, - 0xbe, 0x2a, 0x52, 0xc3, 0x7f, 0xae, 0x11, 0x6a, 0x32, 0xf8, 0x2d, 0xaa, 0x83, 0xfa, 0xdb, 0xed, - 0x5a, 0xaf, 0x3a, 0xdf, 0x5a, 0x5a, 0xb9, 0x82, 0x65, 0xa2, 0x97, 0x62, 0x3d, 0x96, 0x62, 0x77, - 0xca, 0x9a, 0xc2, 0x68, 0xa9, 0xd9, 0x09, 0xcc, 0xe2, 0xe8, 0x1a, 0x3c, 0x87, 0xaa, 0x63, 0xd8, - 0x2d, 0x6d, 0x51, 0x75, 0xc4, 0x4f, 0x51, 0x3d, 0x57, 0x3b, 0x65, 0xe6, 0xbd, 0x70, 0x89, 0xcb, - 0x27, 0x8b, 0x48, 0x4b, 0xee, 0x5a, 0x65, 0xd5, 0xf6, 0x16, 0xf6, 0x8f, 0x1d, 0xeb, 0xe0, 0xd8, - 0xb1, 0x0e, 0x8f, 0x1d, 0x6b, 0xaf, 0x70, 0xec, 0xfd, 0xc2, 0xb1, 0x0f, 0x0a, 0xc7, 0x3e, 0x2c, - 0x1c, 0xfb, 0x77, 0xe1, 0xd8, 0x5f, 0xfe, 0x38, 0xd6, 0x9b, 0x6b, 0x46, 0xe4, 0x6f, 0x00, 0x00, - 0x00, 0xff, 0xff, 0x39, 0x00, 0xe7, 0xfa, 0x0e, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/generated.proto b/vendor/k8s.io/api/authentication/v1beta1/generated.proto deleted file mode 100644 index a057bc5..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/generated.proto +++ /dev/null @@ -1,98 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.authentication.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -message TokenReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional TokenReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - optional TokenReviewStatus status = 3; -} - -// TokenReviewSpec is a description of the token authentication request. -message TokenReviewSpec { - // Token is the opaque bearer token. - // +optional - optional string token = 1; -} - -// TokenReviewStatus is the result of the token authentication request. -message TokenReviewStatus { - // Authenticated indicates that the token was associated with a known user. - // +optional - optional bool authenticated = 1; - - // User is the UserInfo associated with the provided token. - // +optional - optional UserInfo user = 2; - - // Error indicates that the token couldn't be checked - // +optional - optional string error = 3; -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -message UserInfo { - // The name that uniquely identifies this user among all active users. - // +optional - optional string username = 1; - - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - optional string uid = 2; - - // The names of groups this user is a part of. - // +optional - repeated string groups = 3; - - // Any additional information provided by the authenticator. - // +optional - map extra = 4; -} - diff --git a/vendor/k8s.io/api/authentication/v1beta1/register.go b/vendor/k8s.io/api/authentication/v1beta1/register.go deleted file mode 100644 index ed23e50..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/register.go +++ /dev/null @@ -1,51 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "authentication.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &TokenReview{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types.go b/vendor/k8s.io/api/authentication/v1beta1/types.go deleted file mode 100644 index a90949d..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/types.go +++ /dev/null @@ -1,92 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// TokenReview attempts to authenticate a token to a known user. -// Note: TokenReview requests may be cached by the webhook token authenticator -// plugin in the kube-apiserver. -type TokenReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated - Spec TokenReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request can be authenticated. - // +optional - Status TokenReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// TokenReviewSpec is a description of the token authentication request. -type TokenReviewSpec struct { - // Token is the opaque bearer token. - // +optional - Token string `json:"token,omitempty" protobuf:"bytes,1,opt,name=token"` -} - -// TokenReviewStatus is the result of the token authentication request. -type TokenReviewStatus struct { - // Authenticated indicates that the token was associated with a known user. - // +optional - Authenticated bool `json:"authenticated,omitempty" protobuf:"varint,1,opt,name=authenticated"` - // User is the UserInfo associated with the provided token. - // +optional - User UserInfo `json:"user,omitempty" protobuf:"bytes,2,opt,name=user"` - // Error indicates that the token couldn't be checked - // +optional - Error string `json:"error,omitempty" protobuf:"bytes,3,opt,name=error"` -} - -// UserInfo holds the information about the user needed to implement the -// user.Info interface. -type UserInfo struct { - // The name that uniquely identifies this user among all active users. - // +optional - Username string `json:"username,omitempty" protobuf:"bytes,1,opt,name=username"` - // A unique value that identifies this user across time. If this user is - // deleted and another user by the same name is added, they will have - // different UIDs. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,2,opt,name=uid"` - // The names of groups this user is a part of. - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,3,rep,name=groups"` - // Any additional information provided by the authenticator. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,4,rep,name=extra"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} diff --git a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 968999d..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,72 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_TokenReview = map[string]string{ - "": "TokenReview attempts to authenticate a token to a known user. Note: TokenReview requests may be cached by the webhook token authenticator plugin in the kube-apiserver.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request can be authenticated.", -} - -func (TokenReview) SwaggerDoc() map[string]string { - return map_TokenReview -} - -var map_TokenReviewSpec = map[string]string{ - "": "TokenReviewSpec is a description of the token authentication request.", - "token": "Token is the opaque bearer token.", -} - -func (TokenReviewSpec) SwaggerDoc() map[string]string { - return map_TokenReviewSpec -} - -var map_TokenReviewStatus = map[string]string{ - "": "TokenReviewStatus is the result of the token authentication request.", - "authenticated": "Authenticated indicates that the token was associated with a known user.", - "user": "User is the UserInfo associated with the provided token.", - "error": "Error indicates that the token couldn't be checked", -} - -func (TokenReviewStatus) SwaggerDoc() map[string]string { - return map_TokenReviewStatus -} - -var map_UserInfo = map[string]string{ - "": "UserInfo holds the information about the user needed to implement the user.Info interface.", - "username": "The name that uniquely identifies this user among all active users.", - "uid": "A unique value that identifies this user across time. If this user is deleted and another user by the same name is added, they will have different UIDs.", - "groups": "The names of groups this user is a part of.", - "extra": "Any additional information provided by the authenticator.", -} - -func (UserInfo) SwaggerDoc() map[string]string { - return map_UserInfo -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 3a5f6d5..0000000 --- a/vendor/k8s.io/api/authentication/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,142 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExtraValue) DeepCopyInto(out *ExtraValue) { - { - in := &in - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. -func (in ExtraValue) DeepCopy() ExtraValue { - if in == nil { - return nil - } - out := new(ExtraValue) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenReview) DeepCopyInto(out *TokenReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReview. -func (in *TokenReview) DeepCopy() *TokenReview { - if in == nil { - return nil - } - out := new(TokenReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TokenReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenReviewSpec) DeepCopyInto(out *TokenReviewSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewSpec. -func (in *TokenReviewSpec) DeepCopy() *TokenReviewSpec { - if in == nil { - return nil - } - out := new(TokenReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TokenReviewStatus) DeepCopyInto(out *TokenReviewStatus) { - *out = *in - in.User.DeepCopyInto(&out.User) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TokenReviewStatus. -func (in *TokenReviewStatus) DeepCopy() *TokenReviewStatus { - if in == nil { - return nil - } - out := new(TokenReviewStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *UserInfo) DeepCopyInto(out *UserInfo) { - *out = *in - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo. -func (in *UserInfo) DeepCopy() *UserInfo { - if in == nil { - return nil - } - out := new(UserInfo) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/authorization/v1/doc.go b/vendor/k8s.io/api/authorization/v1/doc.go deleted file mode 100644 index c06b798..0000000 --- a/vendor/k8s.io/api/authorization/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=authorization.k8s.io -package v1 // import "k8s.io/api/authorization/v1" diff --git a/vendor/k8s.io/api/authorization/v1/generated.pb.go b/vendor/k8s.io/api/authorization/v1/generated.pb.go deleted file mode 100644 index e9145af..0000000 --- a/vendor/k8s.io/api/authorization/v1/generated.pb.go +++ /dev/null @@ -1,3528 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } -func (*LocalSubjectAccessReview) ProtoMessage() {} -func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } -func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} -func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} -} - -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } -func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} -func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} -} - -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } -func (*SubjectAccessReviewSpec) ProtoMessage() {} -func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} -} - -func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } -func (*SubjectAccessReviewStatus) ProtoMessage() {} -func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} -} - -func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } -func (*SubjectRulesReviewStatus) ProtoMessage() {} -func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} -} - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1.ExtraValue") - proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.LocalSubjectAccessReview") - proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1.NonResourceAttributes") - proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1.NonResourceRule") - proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1.ResourceAttributes") - proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1.ResourceRule") - proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReview") - proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectAccessReviewSpec") - proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReview") - proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1.SelfSubjectRulesReviewSpec") - proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1.SubjectAccessReview") - proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectAccessReviewStatus") - proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1.SubjectRulesReviewStatus") -} -func (m ExtraValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil -} - -func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *ResourceRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - return i, nil -} - -func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil -} - -func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - dAtA[i] = 0x20 - i++ - if m.Denied { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LocalSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NonResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NonResourceRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subresource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SelfSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SelfSubjectRulesReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectRulesReviewSpec) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.EvaluationError) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *SubjectRulesReviewStatus) Size() (n int) { - var l int - _ = l - if len(m.ResourceRules) > 0 { - for _, e := range m.ResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceRules) > 0 { - for _, e := range m.NonResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - l = len(m.EvaluationError) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *LocalSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NonResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NonResourceAttributes{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `}`, - }, "") - return s -} -func (this *NonResourceRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NonResourceRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceAttributes{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectRulesReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectRulesReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReviewStatus{`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, - `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectRulesReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, - `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, - `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NonResourceRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subresource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvaluationError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Denied = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceRules = append(m.ResourceRules, ResourceRule{}) - if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceRules = append(m.NonResourceRules, NonResourceRule{}) - if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Incomplete = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvaluationError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1140 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xf7, 0xae, 0xed, 0xc4, 0x1e, 0x37, 0xff, 0xa4, 0x13, 0xa5, 0xd9, 0xa6, 0xfa, 0xdb, 0xd1, - 0x22, 0x41, 0x2a, 0xca, 0x2e, 0xb1, 0xda, 0x26, 0xaa, 0x54, 0xa1, 0x58, 0x89, 0x50, 0xa4, 0xb6, - 0x54, 0x13, 0x25, 0x12, 0x45, 0x20, 0xc6, 0xeb, 0x89, 0xbd, 0xc4, 0xde, 0x5d, 0x66, 0x66, 0x1d, - 0xc2, 0xa9, 0x12, 0x5f, 0x80, 0x23, 0x07, 0x0e, 0x7c, 0x03, 0x2e, 0x48, 0xdc, 0x38, 0x70, 0x40, - 0x39, 0xf6, 0x58, 0x24, 0x64, 0x91, 0xe5, 0xcc, 0x77, 0x40, 0x33, 0x3b, 0xf6, 0xae, 0x93, 0xb5, - 0x9b, 0x70, 0xa0, 0x97, 0xde, 0x76, 0x9f, 0xdf, 0xef, 0x79, 0x99, 0xe7, 0x65, 0xe6, 0x01, 0xdb, - 0x47, 0x9b, 0xcc, 0x72, 0x7d, 0xfb, 0x28, 0x6c, 0x12, 0xea, 0x11, 0x4e, 0x98, 0xdd, 0x27, 0x5e, - 0xcb, 0xa7, 0xb6, 0x02, 0x70, 0xe0, 0xda, 0x38, 0xe4, 0x1d, 0x9f, 0xba, 0x5f, 0x63, 0xee, 0xfa, - 0x9e, 0xdd, 0x5f, 0xb7, 0xdb, 0xc4, 0x23, 0x14, 0x73, 0xd2, 0xb2, 0x02, 0xea, 0x73, 0x1f, 0xde, - 0x8a, 0xc9, 0x16, 0x0e, 0x5c, 0x6b, 0x8c, 0x6c, 0xf5, 0xd7, 0x57, 0xde, 0x6b, 0xbb, 0xbc, 0x13, - 0x36, 0x2d, 0xc7, 0xef, 0xd9, 0x6d, 0xbf, 0xed, 0xdb, 0x52, 0xa7, 0x19, 0x1e, 0xca, 0x3f, 0xf9, - 0x23, 0xbf, 0x62, 0x5b, 0x2b, 0x77, 0x13, 0xc7, 0x3d, 0xec, 0x74, 0x5c, 0x8f, 0xd0, 0x13, 0x3b, - 0x38, 0x6a, 0x0b, 0x01, 0xb3, 0x7b, 0x84, 0xe3, 0x8c, 0x08, 0x56, 0xec, 0x49, 0x5a, 0x34, 0xf4, - 0xb8, 0xdb, 0x23, 0x17, 0x14, 0xee, 0xbf, 0x4a, 0x81, 0x39, 0x1d, 0xd2, 0xc3, 0xe7, 0xf5, 0xcc, - 0x0d, 0x00, 0x76, 0xbe, 0xe2, 0x14, 0x1f, 0xe0, 0x6e, 0x48, 0x60, 0x0d, 0x14, 0x5d, 0x4e, 0x7a, - 0xcc, 0xd0, 0x56, 0xf3, 0x6b, 0xe5, 0x46, 0x39, 0x1a, 0xd4, 0x8a, 0xbb, 0x42, 0x80, 0x62, 0xf9, - 0x83, 0xd2, 0x77, 0x3f, 0xd4, 0x72, 0xcf, 0xff, 0x58, 0xcd, 0x99, 0x3f, 0xe9, 0xc0, 0x78, 0xe4, - 0x3b, 0xb8, 0xbb, 0x17, 0x36, 0xbf, 0x20, 0x0e, 0xdf, 0x72, 0x1c, 0xc2, 0x18, 0x22, 0x7d, 0x97, - 0x1c, 0xc3, 0xcf, 0x41, 0x49, 0x9c, 0xac, 0x85, 0x39, 0x36, 0xb4, 0x55, 0x6d, 0xad, 0x52, 0x7f, - 0xdf, 0x4a, 0x72, 0x3a, 0x0a, 0xd0, 0x0a, 0x8e, 0xda, 0x42, 0xc0, 0x2c, 0xc1, 0xb6, 0xfa, 0xeb, - 0xd6, 0x47, 0xd2, 0xd6, 0x63, 0xc2, 0x71, 0x03, 0x9e, 0x0e, 0x6a, 0xb9, 0x68, 0x50, 0x03, 0x89, - 0x0c, 0x8d, 0xac, 0xc2, 0x03, 0x50, 0x60, 0x01, 0x71, 0x0c, 0x5d, 0x5a, 0xbf, 0x6b, 0x4d, 0xa9, - 0x98, 0x95, 0x11, 0xe1, 0x5e, 0x40, 0x9c, 0xc6, 0x35, 0xe5, 0xa1, 0x20, 0xfe, 0x90, 0xb4, 0x07, - 0x3f, 0x03, 0x33, 0x8c, 0x63, 0x1e, 0x32, 0x23, 0x2f, 0x2d, 0xdf, 0xbf, 0xb2, 0x65, 0xa9, 0xdd, - 0xf8, 0x9f, 0xb2, 0x3d, 0x13, 0xff, 0x23, 0x65, 0xd5, 0xfc, 0x04, 0x2c, 0x3d, 0xf1, 0x3d, 0x44, - 0x98, 0x1f, 0x52, 0x87, 0x6c, 0x71, 0x4e, 0xdd, 0x66, 0xc8, 0x09, 0x83, 0xab, 0xa0, 0x10, 0x60, - 0xde, 0x91, 0xe9, 0x2a, 0x27, 0xa1, 0x3d, 0xc5, 0xbc, 0x83, 0x24, 0x22, 0x18, 0x7d, 0x42, 0x9b, - 0xf2, 0xc8, 0x29, 0xc6, 0x01, 0xa1, 0x4d, 0x24, 0x11, 0xf3, 0x4b, 0x30, 0x9f, 0x32, 0x8e, 0xc2, - 0xae, 0xac, 0xa8, 0x80, 0xc6, 0x2a, 0x2a, 0x34, 0x18, 0x8a, 0xe5, 0xf0, 0x21, 0x98, 0xf7, 0x12, - 0x9d, 0x7d, 0xf4, 0x88, 0x19, 0xba, 0xa4, 0x2e, 0x46, 0x83, 0x5a, 0xda, 0x9c, 0x80, 0xd0, 0x79, - 0xae, 0xf9, 0x8b, 0x0e, 0x60, 0xc6, 0x69, 0x6c, 0x50, 0xf6, 0x70, 0x8f, 0xb0, 0x00, 0x3b, 0x44, - 0x1d, 0xe9, 0xba, 0x0a, 0xb8, 0xfc, 0x64, 0x08, 0xa0, 0x84, 0xf3, 0xea, 0xc3, 0xc1, 0xb7, 0x40, - 0xb1, 0x4d, 0xfd, 0x30, 0x90, 0x85, 0x29, 0x37, 0xe6, 0x14, 0xa5, 0xf8, 0xa1, 0x10, 0xa2, 0x18, - 0x83, 0xb7, 0xc1, 0x6c, 0x9f, 0x50, 0xe6, 0xfa, 0x9e, 0x51, 0x90, 0xb4, 0x79, 0x45, 0x9b, 0x3d, - 0x88, 0xc5, 0x68, 0x88, 0xc3, 0x3b, 0xa0, 0x44, 0x55, 0xe0, 0x46, 0x51, 0x72, 0x17, 0x14, 0xb7, - 0x34, 0xca, 0xe0, 0x88, 0x01, 0xef, 0x81, 0x0a, 0x0b, 0x9b, 0x23, 0x85, 0x19, 0xa9, 0xb0, 0xa8, - 0x14, 0x2a, 0x7b, 0x09, 0x84, 0xd2, 0x3c, 0x71, 0x2c, 0x71, 0x46, 0x63, 0x76, 0xfc, 0x58, 0x22, - 0x05, 0x48, 0x22, 0xe6, 0xaf, 0x1a, 0xb8, 0x76, 0xb5, 0x8a, 0xbd, 0x0b, 0xca, 0x38, 0x70, 0xe5, - 0xb1, 0x87, 0xb5, 0x9a, 0x13, 0x79, 0xdd, 0x7a, 0xba, 0x1b, 0x0b, 0x51, 0x82, 0x0b, 0xf2, 0x30, - 0x18, 0xd1, 0xd2, 0x23, 0xf2, 0xd0, 0x25, 0x43, 0x09, 0x0e, 0x37, 0xc0, 0xdc, 0xf0, 0x47, 0x16, - 0xc9, 0x28, 0x48, 0x85, 0xeb, 0xd1, 0xa0, 0x36, 0x87, 0xd2, 0x00, 0x1a, 0xe7, 0x99, 0x3f, 0xeb, - 0x60, 0x79, 0x8f, 0x74, 0x0f, 0x5f, 0xcf, 0x5d, 0xf0, 0x6c, 0xec, 0x2e, 0xd8, 0x9c, 0x3e, 0xb1, - 0xd9, 0x51, 0xbe, 0xb6, 0xfb, 0xe0, 0x7b, 0x1d, 0xdc, 0x9a, 0x12, 0x13, 0x3c, 0x06, 0x90, 0x5e, - 0x18, 0x2f, 0x95, 0x47, 0x7b, 0x6a, 0x2c, 0x17, 0xa7, 0xb2, 0x71, 0x23, 0x1a, 0xd4, 0x32, 0xa6, - 0x15, 0x65, 0xb8, 0x80, 0xdf, 0x68, 0x60, 0xc9, 0xcb, 0xba, 0xa9, 0x54, 0x9a, 0xeb, 0x53, 0x9d, - 0x67, 0xde, 0x71, 0x8d, 0x9b, 0xd1, 0xa0, 0x96, 0x7d, 0xfd, 0xa1, 0x6c, 0x5f, 0xe2, 0x95, 0xb9, - 0x91, 0x4a, 0x8f, 0x18, 0x90, 0xff, 0xae, 0xaf, 0x3e, 0x1e, 0xeb, 0xab, 0x8d, 0xcb, 0xf6, 0x55, - 0x2a, 0xc8, 0x89, 0x6d, 0xf5, 0xe9, 0xb9, 0xb6, 0xba, 0x77, 0x99, 0xb6, 0x4a, 0x1b, 0x9e, 0xde, - 0x55, 0x8f, 0xc1, 0xca, 0xe4, 0x80, 0xae, 0x7c, 0x39, 0x9b, 0x3f, 0xea, 0x60, 0xf1, 0xcd, 0x33, - 0x7f, 0x95, 0xb1, 0xfe, 0xad, 0x00, 0x96, 0xdf, 0x8c, 0xf4, 0xa4, 0x45, 0x27, 0x64, 0x84, 0xaa, - 0x67, 0x7c, 0x54, 0x9c, 0x7d, 0x46, 0x28, 0x92, 0x08, 0x34, 0xc1, 0x4c, 0x3b, 0x7e, 0xdd, 0xe2, - 0xf7, 0x07, 0x88, 0x04, 0xab, 0xa7, 0x4d, 0x21, 0xb0, 0x05, 0x8a, 0x44, 0xec, 0xad, 0x46, 0x71, - 0x35, 0xbf, 0x56, 0xa9, 0x7f, 0xf0, 0x6f, 0x3a, 0xc3, 0x92, 0x9b, 0xef, 0x8e, 0xc7, 0xe9, 0x49, - 0xb2, 0x4e, 0x48, 0x19, 0x8a, 0x8d, 0xc3, 0xff, 0x83, 0x7c, 0xe8, 0xb6, 0xd4, 0x6b, 0x5f, 0x51, - 0x94, 0xfc, 0xfe, 0xee, 0x36, 0x12, 0xf2, 0x15, 0xac, 0x96, 0x67, 0x69, 0x02, 0x2e, 0x80, 0xfc, - 0x11, 0x39, 0x89, 0x07, 0x0a, 0x89, 0x4f, 0xf8, 0x10, 0x14, 0xfb, 0x62, 0xaf, 0x56, 0xf9, 0x7d, - 0x67, 0x6a, 0x90, 0xc9, 0x1a, 0x8e, 0x62, 0xad, 0x07, 0xfa, 0xa6, 0x66, 0xfe, 0xae, 0x81, 0x9b, - 0x13, 0xdb, 0x4f, 0xac, 0x3b, 0xb8, 0xdb, 0xf5, 0x8f, 0x49, 0x4b, 0xba, 0x2d, 0x25, 0xeb, 0xce, - 0x56, 0x2c, 0x46, 0x43, 0x1c, 0xbe, 0x0d, 0x66, 0x28, 0xc1, 0xcc, 0xf7, 0xd4, 0x8a, 0x35, 0xea, - 0x5c, 0x24, 0xa5, 0x48, 0xa1, 0x70, 0x0b, 0xcc, 0x13, 0xe1, 0x5e, 0xc6, 0xb5, 0x43, 0xa9, 0x3f, - 0xac, 0xd4, 0xb2, 0x52, 0x98, 0xdf, 0x19, 0x87, 0xd1, 0x79, 0xbe, 0x70, 0xd5, 0x22, 0x9e, 0x4b, - 0x5a, 0x72, 0x07, 0x2b, 0x25, 0xae, 0xb6, 0xa5, 0x14, 0x29, 0xd4, 0xfc, 0x5b, 0x07, 0xc6, 0xa4, - 0xab, 0x0d, 0x1e, 0x26, 0xbb, 0x88, 0x04, 0xe5, 0x3a, 0x54, 0xa9, 0xdf, 0xbe, 0xd4, 0x80, 0x08, - 0x8d, 0xc6, 0x92, 0x72, 0x3b, 0x97, 0x96, 0xa6, 0x56, 0x17, 0xf9, 0x0b, 0x29, 0x58, 0xf0, 0xc6, - 0x77, 0xe6, 0x78, 0xa9, 0xaa, 0xd4, 0xef, 0x5c, 0x76, 0x1c, 0xa4, 0x37, 0x43, 0x79, 0x5b, 0x38, - 0x07, 0x30, 0x74, 0xc1, 0x3e, 0xac, 0x03, 0xe0, 0x7a, 0x8e, 0xdf, 0x0b, 0xba, 0x84, 0x13, 0x99, - 0xde, 0x52, 0x72, 0x0f, 0xee, 0x8e, 0x10, 0x94, 0x62, 0x65, 0xd5, 0xa5, 0x70, 0xb5, 0xba, 0x34, - 0xd6, 0x4e, 0xcf, 0xaa, 0xb9, 0x17, 0x67, 0xd5, 0xdc, 0xcb, 0xb3, 0x6a, 0xee, 0x79, 0x54, 0xd5, - 0x4e, 0xa3, 0xaa, 0xf6, 0x22, 0xaa, 0x6a, 0x2f, 0xa3, 0xaa, 0xf6, 0x67, 0x54, 0xd5, 0xbe, 0xfd, - 0xab, 0x9a, 0x7b, 0xa6, 0xf7, 0xd7, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x9f, 0x85, 0x45, 0x74, - 0x47, 0x0f, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1/generated.proto b/vendor/k8s.io/api/authorization/v1/generated.proto deleted file mode 100644 index f68a04e..0000000 --- a/vendor/k8s.io/api/authorization/v1/generated.proto +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.authorization.v1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -message LocalSubjectAccessReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - optional SubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - optional SubjectAccessReviewStatus status = 3; -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -message NonResourceAttributes { - // Path is the URL path of the request - // +optional - optional string path = 1; - - // Verb is the standard HTTP verb - // +optional - optional string verb = 2; -} - -// NonResourceRule holds information that describes a rule for the non-resource -message NonResourceRule { - // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. - repeated string verbs = 1; - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, - // final step in the path. "*" means all. - // +optional - repeated string nonResourceURLs = 2; -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -message ResourceAttributes { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - // +optional - optional string namespace = 1; - - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - // +optional - optional string verb = 2; - - // Group is the API Group of the Resource. "*" means all. - // +optional - optional string group = 3; - - // Version is the API Version of the Resource. "*" means all. - // +optional - optional string version = 4; - - // Resource is one of the existing resource types. "*" means all. - // +optional - optional string resource = 5; - - // Subresource is one of the existing resource types. "" means none. - // +optional - optional string subresource = 6; - - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - // +optional - optional string name = 7; -} - -// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, -// may contain duplicates, and possibly be incomplete. -message ResourceRule { - // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. - repeated string verbs = 1; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. "*" means all. - // +optional - repeated string apiGroups = 2; - - // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. - // +optional - repeated string resources = 3; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. - // +optional - repeated string resourceNames = 4; -} - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -message SelfSubjectAccessReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. user and groups must be empty - optional SelfSubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - optional SubjectAccessReviewStatus status = 3; -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -message SelfSubjectAccessReviewSpec { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - optional ResourceAttributes resourceAttributes = 1; - - // NonResourceAttributes describes information for a non-resource access request - // +optional - optional NonResourceAttributes nonResourceAttributes = 2; -} - -// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. -// The returned list of actions may be incomplete depending on the server's authorization mode, -// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, -// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to -// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. -// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. -message SelfSubjectRulesReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. - optional SelfSubjectRulesReviewSpec spec = 2; - - // Status is filled in by the server and indicates the set of actions a user can perform. - // +optional - optional SubjectRulesReviewStatus status = 3; -} - -message SelfSubjectRulesReviewSpec { - // Namespace to evaluate rules for. Required. - optional string namespace = 1; -} - -// SubjectAccessReview checks whether or not a user or group can perform an action. -message SubjectAccessReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional SubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - optional SubjectAccessReviewStatus status = 3; -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -message SubjectAccessReviewSpec { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - optional ResourceAttributes resourceAttributes = 1; - - // NonResourceAttributes describes information for a non-resource access request - // +optional - optional NonResourceAttributes nonResourceAttributes = 2; - - // User is the user you're testing for. - // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups - // +optional - optional string user = 3; - - // Groups is the groups you're testing for. - // +optional - repeated string groups = 4; - - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - // +optional - map extra = 5; - - // UID information about the requesting user. - // +optional - optional string uid = 6; -} - -// SubjectAccessReviewStatus -message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. - optional bool allowed = 1; - - // Denied is optional. True if the action would be denied, otherwise - // false. If both allowed is false and denied is false, then the - // authorizer has no opinion on whether to authorize the action. Denied - // may not be true if Allowed is true. - // +optional - optional bool denied = 4; - - // Reason is optional. It indicates why a request was allowed or denied. - // +optional - optional string reason = 2; - - // EvaluationError is an indication that some error occurred during the authorization check. - // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. - // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. - // +optional - optional string evaluationError = 3; -} - -// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on -// the set of authorizers the server is configured with and any errors experienced during evaluation. -// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, -// even if that list is incomplete. -message SubjectRulesReviewStatus { - // ResourceRules is the list of actions the subject is allowed to perform on resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - repeated ResourceRule resourceRules = 1; - - // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - repeated NonResourceRule nonResourceRules = 2; - - // Incomplete is true when the rules returned by this call are incomplete. This is most commonly - // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. - optional bool incomplete = 3; - - // EvaluationError can appear in combination with Rules. It indicates an error occurred during - // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that - // ResourceRules and/or NonResourceRules may be incomplete. - // +optional - optional string evaluationError = 4; -} - diff --git a/vendor/k8s.io/api/authorization/v1/register.go b/vendor/k8s.io/api/authorization/v1/register.go deleted file mode 100644 index 5931198..0000000 --- a/vendor/k8s.io/api/authorization/v1/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SelfSubjectRulesReview{}, - &SelfSubjectAccessReview{}, - &SubjectAccessReview{}, - &LocalSubjectAccessReview{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/authorization/v1/types.go b/vendor/k8s.io/api/authorization/v1/types.go deleted file mode 100644 index 86b05c5..0000000 --- a/vendor/k8s.io/api/authorization/v1/types.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SubjectAccessReview checks whether or not a user or group can perform an action. -type SubjectAccessReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated - Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -type SelfSubjectAccessReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated. user and groups must be empty - Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -type LocalSubjectAccessReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -type ResourceAttributes struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - // +optional - Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` - // Group is the API Group of the Resource. "*" means all. - // +optional - Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` - // Version is the API Version of the Resource. "*" means all. - // +optional - Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` - // Resource is one of the existing resource types. "*" means all. - // +optional - Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` - // Subresource is one of the existing resource types. "" means none. - // +optional - Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -type NonResourceAttributes struct { - // Path is the URL path of the request - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // Verb is the standard HTTP verb - // +optional - Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -type SubjectAccessReviewSpec struct { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` - // NonResourceAttributes describes information for a non-resource access request - // +optional - NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` - - // User is the user you're testing for. - // If you specify "User" but not "Groups", then is it interpreted as "What if User were not a member of any groups - // +optional - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Groups is the groups you're testing for. - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` - // UID information about the requesting user. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -type SelfSubjectAccessReviewSpec struct { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` - // NonResourceAttributes describes information for a non-resource access request - // +optional - NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` -} - -// SubjectAccessReviewStatus -type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. - Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` - // Denied is optional. True if the action would be denied, otherwise - // false. If both allowed is false and denied is false, then the - // authorizer has no opinion on whether to authorize the action. Denied - // may not be true if Allowed is true. - // +optional - Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` - // Reason is optional. It indicates why a request was allowed or denied. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` - // EvaluationError is an indication that some error occurred during the authorization check. - // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. - // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. - // +optional - EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` -} - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. -// The returned list of actions may be incomplete depending on the server's authorization mode, -// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, -// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to -// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. -// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. -type SelfSubjectRulesReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated. - Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates the set of actions a user can perform. - // +optional - Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -type SelfSubjectRulesReviewSpec struct { - // Namespace to evaluate rules for. Required. - Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` -} - -// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on -// the set of authorizers the server is configured with and any errors experienced during evaluation. -// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, -// even if that list is incomplete. -type SubjectRulesReviewStatus struct { - // ResourceRules is the list of actions the subject is allowed to perform on resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` - // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` - // Incomplete is true when the rules returned by this call are incomplete. This is most commonly - // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. - Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"` - // EvaluationError can appear in combination with Rules. It indicates an error occurred during - // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that - // ResourceRules and/or NonResourceRules may be incomplete. - // +optional - EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` -} - -// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, -// may contain duplicates, and possibly be incomplete. -type ResourceRule struct { - // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. "*" means all. - // +optional - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. - // +optional - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. - // +optional - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` -} - -// NonResourceRule holds information that describes a rule for the non-resource -type NonResourceRule struct { - // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, - // final step in the path. "*" means all. - // +optional - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` -} diff --git a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go deleted file mode 100644 index 8445f71..0000000 --- a/vendor/k8s.io/api/authorization/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_LocalSubjectAccessReview = map[string]string{ - "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", - "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { - return map_LocalSubjectAccessReview -} - -var map_NonResourceAttributes = map[string]string{ - "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", - "path": "Path is the URL path of the request", - "verb": "Verb is the standard HTTP verb", -} - -func (NonResourceAttributes) SwaggerDoc() map[string]string { - return map_NonResourceAttributes -} - -var map_NonResourceRule = map[string]string{ - "": "NonResourceRule holds information that describes a rule for the non-resource", - "verbs": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", -} - -func (NonResourceRule) SwaggerDoc() map[string]string { - return map_NonResourceRule -} - -var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", -} - -func (ResourceAttributes) SwaggerDoc() map[string]string { - return map_ResourceAttributes -} - -var map_ResourceRule = map[string]string{ - "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", - "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", - "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", -} - -func (ResourceRule) SwaggerDoc() map[string]string { - return map_ResourceRule -} - -var map_SelfSubjectAccessReview = map[string]string{ - "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", - "spec": "Spec holds information about the request being evaluated. user and groups must be empty", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { - return map_SelfSubjectAccessReview -} - -var map_SelfSubjectAccessReviewSpec = map[string]string{ - "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", - "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", - "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", -} - -func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { - return map_SelfSubjectAccessReviewSpec -} - -var map_SelfSubjectRulesReview = map[string]string{ - "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", - "spec": "Spec holds information about the request being evaluated.", - "status": "Status is filled in by the server and indicates the set of actions a user can perform.", -} - -func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { - return map_SelfSubjectRulesReview -} - -var map_SelfSubjectRulesReviewSpec = map[string]string{ - "namespace": "Namespace to evaluate rules for. Required.", -} - -func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { - return map_SelfSubjectRulesReviewSpec -} - -var map_SubjectAccessReview = map[string]string{ - "": "SubjectAccessReview checks whether or not a user or group can perform an action.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (SubjectAccessReview) SwaggerDoc() map[string]string { - return map_SubjectAccessReview -} - -var map_SubjectAccessReviewSpec = map[string]string{ - "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", - "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", - "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", - "user": "User is the user you're testing for. If you specify \"User\" but not \"Groups\", then is it interpreted as \"What if User were not a member of any groups", - "groups": "Groups is the groups you're testing for.", - "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", - "uid": "UID information about the requesting user.", -} - -func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { - return map_SubjectAccessReviewSpec -} - -var map_SubjectAccessReviewStatus = map[string]string{ - "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", - "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", - "reason": "Reason is optional. It indicates why a request was allowed or denied.", - "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", -} - -func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { - return map_SubjectAccessReviewStatus -} - -var map_SubjectRulesReviewStatus = map[string]string{ - "": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", - "resourceRules": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", - "nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", - "incomplete": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", - "evaluationError": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", -} - -func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { - return map_SubjectRulesReviewStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go deleted file mode 100644 index 1d11b38..0000000 --- a/vendor/k8s.io/api/authorization/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,385 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExtraValue) DeepCopyInto(out *ExtraValue) { - { - in := &in - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. -func (in ExtraValue) DeepCopy() ExtraValue { - if in == nil { - return nil - } - out := new(ExtraValue) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. -func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { - if in == nil { - return nil - } - out := new(LocalSubjectAccessReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes. -func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes { - if in == nil { - return nil - } - out := new(NonResourceAttributes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule. -func (in *NonResourceRule) DeepCopy() *NonResourceRule { - if in == nil { - return nil - } - out := new(NonResourceRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes. -func (in *ResourceAttributes) DeepCopy() *ResourceAttributes { - if in == nil { - return nil - } - out := new(ResourceAttributes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRule) DeepCopyInto(out *ResourceRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule. -func (in *ResourceRule) DeepCopy() *ResourceRule { - if in == nil { - return nil - } - out := new(ResourceRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview. -func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview { - if in == nil { - return nil - } - out := new(SelfSubjectAccessReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) { - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec. -func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec { - if in == nil { - return nil - } - out := new(SelfSubjectAccessReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. -func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { - if in == nil { - return nil - } - out := new(SelfSubjectRulesReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. -func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { - if in == nil { - return nil - } - out := new(SelfSubjectRulesReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. -func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { - if in == nil { - return nil - } - out := new(SubjectAccessReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec. -func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec { - if in == nil { - return nil - } - out := new(SubjectAccessReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus. -func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus { - if in == nil { - return nil - } - out := new(SubjectAccessReviewStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { - *out = *in - if in.ResourceRules != nil { - in, out := &in.ResourceRules, &out.ResourceRules - *out = make([]ResourceRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NonResourceRules != nil { - in, out := &in.NonResourceRules, &out.NonResourceRules - *out = make([]NonResourceRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. -func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { - if in == nil { - return nil - } - out := new(SubjectRulesReviewStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/doc.go b/vendor/k8s.io/api/authorization/v1beta1/doc.go deleted file mode 100644 index ea4f802..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=authorization.k8s.io -package v1beta1 // import "k8s.io/api/authorization/v1beta1" diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go b/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go deleted file mode 100644 index 75ee6cf..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.pb.go +++ /dev/null @@ -1,3528 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto - - It has these top-level messages: - ExtraValue - LocalSubjectAccessReview - NonResourceAttributes - NonResourceRule - ResourceAttributes - ResourceRule - SelfSubjectAccessReview - SelfSubjectAccessReviewSpec - SelfSubjectRulesReview - SelfSubjectRulesReviewSpec - SubjectAccessReview - SubjectAccessReviewSpec - SubjectAccessReviewStatus - SubjectRulesReviewStatus -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *LocalSubjectAccessReview) Reset() { *m = LocalSubjectAccessReview{} } -func (*LocalSubjectAccessReview) ProtoMessage() {} -func (*LocalSubjectAccessReview) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *NonResourceAttributes) Reset() { *m = NonResourceAttributes{} } -func (*NonResourceAttributes) ProtoMessage() {} -func (*NonResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *NonResourceRule) Reset() { *m = NonResourceRule{} } -func (*NonResourceRule) ProtoMessage() {} -func (*NonResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ResourceAttributes) Reset() { *m = ResourceAttributes{} } -func (*ResourceAttributes) ProtoMessage() {} -func (*ResourceAttributes) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *ResourceRule) Reset() { *m = ResourceRule{} } -func (*ResourceRule) ProtoMessage() {} -func (*ResourceRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *SelfSubjectAccessReview) Reset() { *m = SelfSubjectAccessReview{} } -func (*SelfSubjectAccessReview) ProtoMessage() {} -func (*SelfSubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *SelfSubjectAccessReviewSpec) Reset() { *m = SelfSubjectAccessReviewSpec{} } -func (*SelfSubjectAccessReviewSpec) ProtoMessage() {} -func (*SelfSubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} -} - -func (m *SelfSubjectRulesReview) Reset() { *m = SelfSubjectRulesReview{} } -func (*SelfSubjectRulesReview) ProtoMessage() {} -func (*SelfSubjectRulesReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *SelfSubjectRulesReviewSpec) Reset() { *m = SelfSubjectRulesReviewSpec{} } -func (*SelfSubjectRulesReviewSpec) ProtoMessage() {} -func (*SelfSubjectRulesReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} -} - -func (m *SubjectAccessReview) Reset() { *m = SubjectAccessReview{} } -func (*SubjectAccessReview) ProtoMessage() {} -func (*SubjectAccessReview) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *SubjectAccessReviewSpec) Reset() { *m = SubjectAccessReviewSpec{} } -func (*SubjectAccessReviewSpec) ProtoMessage() {} -func (*SubjectAccessReviewSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} -} - -func (m *SubjectAccessReviewStatus) Reset() { *m = SubjectAccessReviewStatus{} } -func (*SubjectAccessReviewStatus) ProtoMessage() {} -func (*SubjectAccessReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} -} - -func (m *SubjectRulesReviewStatus) Reset() { *m = SubjectRulesReviewStatus{} } -func (*SubjectRulesReviewStatus) ProtoMessage() {} -func (*SubjectRulesReviewStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} -} - -func init() { - proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.authorization.v1beta1.ExtraValue") - proto.RegisterType((*LocalSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.LocalSubjectAccessReview") - proto.RegisterType((*NonResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.NonResourceAttributes") - proto.RegisterType((*NonResourceRule)(nil), "k8s.io.api.authorization.v1beta1.NonResourceRule") - proto.RegisterType((*ResourceAttributes)(nil), "k8s.io.api.authorization.v1beta1.ResourceAttributes") - proto.RegisterType((*ResourceRule)(nil), "k8s.io.api.authorization.v1beta1.ResourceRule") - proto.RegisterType((*SelfSubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReview") - proto.RegisterType((*SelfSubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectAccessReviewSpec") - proto.RegisterType((*SelfSubjectRulesReview)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReview") - proto.RegisterType((*SelfSubjectRulesReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SelfSubjectRulesReviewSpec") - proto.RegisterType((*SubjectAccessReview)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReview") - proto.RegisterType((*SubjectAccessReviewSpec)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewSpec") - proto.RegisterType((*SubjectAccessReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectAccessReviewStatus") - proto.RegisterType((*SubjectRulesReviewStatus)(nil), "k8s.io.api.authorization.v1beta1.SubjectRulesReviewStatus") -} -func (m ExtraValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *LocalSubjectAccessReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LocalSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *NonResourceAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NonResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - return i, nil -} - -func (m *NonResourceRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NonResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ResourceAttributes) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceAttributes) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Verb))) - i += copy(dAtA[i:], m.Verb) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Version))) - i += copy(dAtA[i:], m.Version) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subresource))) - i += copy(dAtA[i:], m.Subresource) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *ResourceRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *SelfSubjectAccessReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n5, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n6, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - return i, nil -} - -func (m *SelfSubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n7, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n8, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *SelfSubjectRulesReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectRulesReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n11, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - return i, nil -} - -func (m *SelfSubjectRulesReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SelfSubjectRulesReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func (m *SubjectAccessReview) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectAccessReview) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *SubjectAccessReviewSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectAccessReviewSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ResourceAttributes != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceAttributes.Size())) - n15, err := m.ResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.NonResourceAttributes != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NonResourceAttributes.Size())) - n16, err := m.NonResourceAttributes.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x2a - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n17, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - return i, nil -} - -func (m *SubjectAccessReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectAccessReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Allowed { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - dAtA[i] = 0x20 - i++ - if m.Denied { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *SubjectRulesReviewStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SubjectRulesReviewStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ResourceRules) > 0 { - for _, msg := range m.ResourceRules { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NonResourceRules) > 0 { - for _, msg := range m.NonResourceRules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x18 - i++ - if m.Incomplete { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EvaluationError))) - i += copy(dAtA[i:], m.EvaluationError) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LocalSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NonResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NonResourceRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceAttributes) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Verb) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Version) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Subresource) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SelfSubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SelfSubjectRulesReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SelfSubjectRulesReviewSpec) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReview) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReviewSpec) Size() (n int) { - var l int - _ = l - if m.ResourceAttributes != nil { - l = m.ResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NonResourceAttributes != nil { - l = m.NonResourceAttributes.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SubjectAccessReviewStatus) Size() (n int) { - var l int - _ = l - n += 2 - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.EvaluationError) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *SubjectRulesReviewStatus) Size() (n int) { - var l int - _ = l - if len(m.ResourceRules) > 0 { - for _, e := range m.ResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceRules) > 0 { - for _, e := range m.NonResourceRules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - l = len(m.EvaluationError) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *LocalSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NonResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NonResourceAttributes{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `}`, - }, "") - return s -} -func (this *NonResourceRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NonResourceRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceAttributes) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceAttributes{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Verb:` + fmt.Sprintf("%v", this.Verb) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `Version:` + fmt.Sprintf("%v", this.Version) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Subresource:` + fmt.Sprintf("%v", this.Subresource) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectAccessReviewSpec", "SelfSubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectRulesReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectRulesReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SelfSubjectRulesReviewSpec", "SelfSubjectRulesReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectRulesReviewStatus", "SubjectRulesReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SelfSubjectRulesReviewSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SelfSubjectRulesReviewSpec{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReview) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReview{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "SubjectAccessReviewSpec", "SubjectAccessReviewSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "SubjectAccessReviewStatus", "SubjectAccessReviewStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewSpec) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&SubjectAccessReviewSpec{`, - `ResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.ResourceAttributes), "ResourceAttributes", "ResourceAttributes", 1) + `,`, - `NonResourceAttributes:` + strings.Replace(fmt.Sprintf("%v", this.NonResourceAttributes), "NonResourceAttributes", "NonResourceAttributes", 1) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Extra:` + mapStringForExtra + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectAccessReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectAccessReviewStatus{`, - `Allowed:` + fmt.Sprintf("%v", this.Allowed) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, - `Denied:` + fmt.Sprintf("%v", this.Denied) + `,`, - `}`, - }, "") - return s -} -func (this *SubjectRulesReviewStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SubjectRulesReviewStatus{`, - `ResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ResourceRules), "ResourceRule", "ResourceRule", 1), `&`, ``, 1) + `,`, - `NonResourceRules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NonResourceRules), "NonResourceRule", "NonResourceRule", 1), `&`, ``, 1) + `,`, - `Incomplete:` + fmt.Sprintf("%v", this.Incomplete) + `,`, - `EvaluationError:` + fmt.Sprintf("%v", this.EvaluationError) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *ExtraValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalSubjectAccessReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NonResourceAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NonResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NonResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NonResourceRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NonResourceRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NonResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceAttributes) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceAttributes: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceAttributes: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verb", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verb = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Version = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subresource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subresource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectRulesReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectRulesReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectRulesReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SelfSubjectRulesReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SelfSubjectRulesReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReview) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReview: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReview: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceAttributes == nil { - m.ResourceAttributes = &ResourceAttributes{} - } - if err := m.ResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NonResourceAttributes == nil { - m.NonResourceAttributes = &NonResourceAttributes{} - } - if err := m.NonResourceAttributes.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectAccessReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectAccessReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Allowed", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Allowed = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvaluationError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Denied", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Denied = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SubjectRulesReviewStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SubjectRulesReviewStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SubjectRulesReviewStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceRules = append(m.ResourceRules, ResourceRule{}) - if err := m.ResourceRules[len(m.ResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceRules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceRules = append(m.NonResourceRules, NonResourceRule{}) - if err := m.NonResourceRules[len(m.NonResourceRules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Incomplete", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Incomplete = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvaluationError", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EvaluationError = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/authorization/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1137 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xcb, 0x6f, 0x1b, 0x45, - 0x18, 0xf7, 0xfa, 0x91, 0xd8, 0xe3, 0x86, 0xa4, 0x13, 0xa5, 0xd9, 0x06, 0x61, 0x5b, 0x46, 0x42, - 0x41, 0xb4, 0xbb, 0x24, 0x2a, 0xa4, 0x04, 0x7a, 0x88, 0x95, 0x08, 0x45, 0x6a, 0x4b, 0x35, 0x51, - 0x72, 0xa0, 0x12, 0x30, 0xbb, 0x9e, 0xd8, 0x8b, 0xed, 0xdd, 0x65, 0x66, 0xd6, 0x21, 0x88, 0x43, - 0x8f, 0x1c, 0x39, 0x72, 0xe4, 0xc4, 0xff, 0xc0, 0x05, 0x09, 0x4e, 0x39, 0xf6, 0x18, 0x24, 0x64, - 0x91, 0xe5, 0x8f, 0xe0, 0x8a, 0x66, 0x76, 0xec, 0x5d, 0x27, 0x9b, 0x38, 0xce, 0x81, 0x5e, 0x7a, - 0xdb, 0xf9, 0x7e, 0xdf, 0xfb, 0xb5, 0x1f, 0xd8, 0xed, 0x3c, 0x64, 0x86, 0xe3, 0x99, 0x9d, 0xc0, - 0x22, 0xd4, 0x25, 0x9c, 0x30, 0xb3, 0x4f, 0xdc, 0xa6, 0x47, 0x4d, 0x05, 0x60, 0xdf, 0x31, 0x71, - 0xc0, 0xdb, 0x1e, 0x75, 0xbe, 0xc3, 0xdc, 0xf1, 0x5c, 0xb3, 0xbf, 0x66, 0x11, 0x8e, 0xd7, 0xcc, - 0x16, 0x71, 0x09, 0xc5, 0x9c, 0x34, 0x0d, 0x9f, 0x7a, 0xdc, 0x83, 0xb5, 0x48, 0xc2, 0xc0, 0xbe, - 0x63, 0x8c, 0x49, 0x18, 0x4a, 0x62, 0xe5, 0x7e, 0xcb, 0xe1, 0xed, 0xc0, 0x32, 0x6c, 0xaf, 0x67, - 0xb6, 0xbc, 0x96, 0x67, 0x4a, 0x41, 0x2b, 0x38, 0x94, 0x2f, 0xf9, 0x90, 0x5f, 0x91, 0xc2, 0x95, - 0x07, 0xb1, 0x0b, 0x3d, 0x6c, 0xb7, 0x1d, 0x97, 0xd0, 0x63, 0xd3, 0xef, 0xb4, 0x04, 0x81, 0x99, - 0x3d, 0xc2, 0xb1, 0xd9, 0xbf, 0xe0, 0xc6, 0x8a, 0x79, 0x99, 0x14, 0x0d, 0x5c, 0xee, 0xf4, 0xc8, - 0x05, 0x81, 0x0f, 0x27, 0x09, 0x30, 0xbb, 0x4d, 0x7a, 0xf8, 0xbc, 0x5c, 0x7d, 0x03, 0x80, 0x9d, - 0x6f, 0x39, 0xc5, 0x07, 0xb8, 0x1b, 0x10, 0x58, 0x05, 0x05, 0x87, 0x93, 0x1e, 0xd3, 0xb5, 0x5a, - 0x6e, 0xb5, 0xd4, 0x28, 0x85, 0x83, 0x6a, 0x61, 0x57, 0x10, 0x50, 0x44, 0xdf, 0x2c, 0xfe, 0xf4, - 0x73, 0x35, 0xf3, 0xe2, 0xaf, 0x5a, 0xa6, 0xfe, 0x5b, 0x16, 0xe8, 0x8f, 0x3d, 0x1b, 0x77, 0xf7, - 0x02, 0xeb, 0x6b, 0x62, 0xf3, 0x2d, 0xdb, 0x26, 0x8c, 0x21, 0xd2, 0x77, 0xc8, 0x11, 0xfc, 0x0a, - 0x14, 0x45, 0x64, 0x4d, 0xcc, 0xb1, 0xae, 0xd5, 0xb4, 0xd5, 0xf2, 0xfa, 0xfb, 0x46, 0x9c, 0xd8, - 0x91, 0x83, 0x86, 0xdf, 0x69, 0x09, 0x02, 0x33, 0x04, 0xb7, 0xd1, 0x5f, 0x33, 0x3e, 0x93, 0xba, - 0x9e, 0x10, 0x8e, 0x1b, 0xf0, 0x64, 0x50, 0xcd, 0x84, 0x83, 0x2a, 0x88, 0x69, 0x68, 0xa4, 0x15, - 0x3e, 0x07, 0x79, 0xe6, 0x13, 0x5b, 0xcf, 0x4a, 0xed, 0x1f, 0x19, 0x93, 0xca, 0x66, 0xa4, 0xb8, - 0xb9, 0xe7, 0x13, 0xbb, 0x71, 0x4b, 0x99, 0xc9, 0x8b, 0x17, 0x92, 0x4a, 0xa1, 0x0d, 0x66, 0x18, - 0xc7, 0x3c, 0x60, 0x7a, 0x4e, 0xaa, 0xff, 0xf8, 0x66, 0xea, 0xa5, 0x8a, 0xc6, 0x1b, 0xca, 0xc0, - 0x4c, 0xf4, 0x46, 0x4a, 0x75, 0xfd, 0x39, 0x58, 0x7a, 0xea, 0xb9, 0x88, 0x30, 0x2f, 0xa0, 0x36, - 0xd9, 0xe2, 0x9c, 0x3a, 0x56, 0xc0, 0x09, 0x83, 0x35, 0x90, 0xf7, 0x31, 0x6f, 0xcb, 0xc4, 0x95, - 0x62, 0xff, 0x9e, 0x61, 0xde, 0x46, 0x12, 0x11, 0x1c, 0x7d, 0x42, 0x2d, 0x19, 0x7c, 0x82, 0xe3, - 0x80, 0x50, 0x0b, 0x49, 0xa4, 0xfe, 0x0d, 0x98, 0x4f, 0x28, 0x47, 0x41, 0x57, 0xd6, 0x56, 0x40, - 0x63, 0xb5, 0x15, 0x12, 0x0c, 0x45, 0x74, 0xf8, 0x08, 0xcc, 0xbb, 0xb1, 0xcc, 0x3e, 0x7a, 0xcc, - 0xf4, 0xac, 0x64, 0x5d, 0x0c, 0x07, 0xd5, 0xa4, 0x3a, 0x01, 0xa1, 0xf3, 0xbc, 0xa2, 0x21, 0x60, - 0x4a, 0x34, 0x26, 0x28, 0xb9, 0xb8, 0x47, 0x98, 0x8f, 0x6d, 0xa2, 0x42, 0xba, 0xad, 0x1c, 0x2e, - 0x3d, 0x1d, 0x02, 0x28, 0xe6, 0x99, 0x1c, 0x1c, 0x7c, 0x1b, 0x14, 0x5a, 0xd4, 0x0b, 0x7c, 0x59, - 0x9d, 0x52, 0x63, 0x4e, 0xb1, 0x14, 0x3e, 0x15, 0x44, 0x14, 0x61, 0xf0, 0x5d, 0x30, 0xdb, 0x27, - 0x94, 0x39, 0x9e, 0xab, 0xe7, 0x25, 0xdb, 0xbc, 0x62, 0x9b, 0x3d, 0x88, 0xc8, 0x68, 0x88, 0xc3, - 0x7b, 0xa0, 0x48, 0x95, 0xe3, 0x7a, 0x41, 0xf2, 0x2e, 0x28, 0xde, 0xe2, 0x28, 0x83, 0x23, 0x0e, - 0xf8, 0x01, 0x28, 0xb3, 0xc0, 0x1a, 0x09, 0xcc, 0x48, 0x81, 0x45, 0x25, 0x50, 0xde, 0x8b, 0x21, - 0x94, 0xe4, 0x13, 0x61, 0x89, 0x18, 0xf5, 0xd9, 0xf1, 0xb0, 0x44, 0x0a, 0x90, 0x44, 0xea, 0x7f, - 0x68, 0xe0, 0xd6, 0x74, 0x15, 0x7b, 0x0f, 0x94, 0xb0, 0xef, 0xc8, 0xb0, 0x87, 0xb5, 0x9a, 0x13, - 0x79, 0xdd, 0x7a, 0xb6, 0x1b, 0x11, 0x51, 0x8c, 0x0b, 0xe6, 0xa1, 0x33, 0xa2, 0xaf, 0x47, 0xcc, - 0x43, 0x93, 0x0c, 0xc5, 0x38, 0xdc, 0x00, 0x73, 0xc3, 0x87, 0x2c, 0x92, 0x9e, 0x97, 0x02, 0xb7, - 0xc3, 0x41, 0x75, 0x0e, 0x25, 0x01, 0x34, 0xce, 0x57, 0xff, 0x3d, 0x0b, 0x96, 0xf7, 0x48, 0xf7, - 0xf0, 0xd5, 0x6c, 0x85, 0x2f, 0xc7, 0xb6, 0xc2, 0xa3, 0x6b, 0x8c, 0x6d, 0xba, 0xab, 0xaf, 0x76, - 0x33, 0xfc, 0x92, 0x05, 0x6f, 0x5e, 0xe1, 0x18, 0xfc, 0x1e, 0x40, 0x7a, 0x61, 0xd0, 0x54, 0x46, - 0x1f, 0x4c, 0x76, 0xe8, 0xe2, 0x90, 0x36, 0xee, 0x84, 0x83, 0x6a, 0xca, 0xf0, 0xa2, 0x14, 0x3b, - 0xf0, 0x07, 0x0d, 0x2c, 0xb9, 0x69, 0x8b, 0x4b, 0x65, 0x7d, 0x63, 0xb2, 0x07, 0xa9, 0x7b, 0xaf, - 0x71, 0x37, 0x1c, 0x54, 0xd3, 0x57, 0x22, 0x4a, 0x37, 0x28, 0x56, 0xce, 0x9d, 0x44, 0xa2, 0xc4, - 0xd0, 0xfc, 0x7f, 0xbd, 0xf6, 0xc5, 0x58, 0xaf, 0x7d, 0x32, 0x55, 0xaf, 0x25, 0x3c, 0xbd, 0xb4, - 0xd5, 0xac, 0x73, 0xad, 0xb6, 0x79, 0xed, 0x56, 0x4b, 0x6a, 0xbf, 0xba, 0xd3, 0x9e, 0x80, 0x95, - 0xcb, 0xbd, 0x9a, 0x7a, 0x75, 0xd7, 0x7f, 0xcd, 0x82, 0xc5, 0xd7, 0xe7, 0xc0, 0xcd, 0x86, 0xfe, - 0x34, 0x0f, 0x96, 0x5f, 0x0f, 0xfc, 0xd5, 0x03, 0x2f, 0x7e, 0xa2, 0x01, 0x23, 0x54, 0xfd, 0xf8, - 0x47, 0xb5, 0xda, 0x67, 0x84, 0x22, 0x89, 0xc0, 0xda, 0xf0, 0x36, 0x88, 0x7e, 0x58, 0x40, 0x64, - 0x5a, 0xfd, 0x0b, 0xd5, 0x61, 0xe0, 0x80, 0x02, 0x11, 0x17, 0xaf, 0x5e, 0xa8, 0xe5, 0x56, 0xcb, - 0xeb, 0xdb, 0x37, 0xee, 0x15, 0x43, 0x1e, 0xce, 0x3b, 0x2e, 0xa7, 0xc7, 0xf1, 0x0d, 0x22, 0x69, - 0x28, 0xb2, 0x00, 0xdf, 0x02, 0xb9, 0xc0, 0x69, 0xaa, 0x13, 0xa1, 0xac, 0x58, 0x72, 0xfb, 0xbb, - 0xdb, 0x48, 0xd0, 0x57, 0x0e, 0xd5, 0xed, 0x2d, 0x55, 0xc0, 0x05, 0x90, 0xeb, 0x90, 0xe3, 0x68, - 0xce, 0x90, 0xf8, 0x84, 0x0d, 0x50, 0xe8, 0x8b, 0xb3, 0x5c, 0xe5, 0xf9, 0xde, 0x64, 0x4f, 0xe3, - 0x53, 0x1e, 0x45, 0xa2, 0x9b, 0xd9, 0x87, 0x5a, 0xfd, 0x4f, 0x0d, 0xdc, 0xbd, 0xb4, 0x21, 0xc5, - 0xa1, 0x84, 0xbb, 0x5d, 0xef, 0x88, 0x34, 0xa5, 0xed, 0x62, 0x7c, 0x28, 0x6d, 0x45, 0x64, 0x34, - 0xc4, 0xe1, 0x3b, 0x60, 0x86, 0x12, 0xcc, 0x3c, 0x57, 0x1d, 0x67, 0xa3, 0x5e, 0x46, 0x92, 0x8a, - 0x14, 0x0a, 0xb7, 0xc0, 0x3c, 0x11, 0xe6, 0xa5, 0x73, 0x3b, 0x94, 0x7a, 0xc3, 0x8a, 0x2d, 0x2b, - 0x81, 0xf9, 0x9d, 0x71, 0x18, 0x9d, 0xe7, 0x17, 0xa6, 0x9a, 0xc4, 0x75, 0x48, 0x53, 0x5e, 0x6f, - 0xc5, 0xd8, 0xd4, 0xb6, 0xa4, 0x22, 0x85, 0xd6, 0xff, 0xcd, 0x02, 0xfd, 0xb2, 0xb5, 0x07, 0x3b, - 0xf1, 0x15, 0x23, 0x41, 0x79, 0x48, 0x95, 0xd7, 0x8d, 0xeb, 0x8f, 0x8c, 0x10, 0x6b, 0x2c, 0x29, - 0xdb, 0x73, 0x49, 0x6a, 0xe2, 0xf2, 0x91, 0x4f, 0x78, 0x04, 0x16, 0xdc, 0xf1, 0x93, 0x3b, 0xba, - 0xc9, 0xca, 0xeb, 0x6b, 0x53, 0x0d, 0x88, 0x34, 0xa9, 0x2b, 0x93, 0x0b, 0xe7, 0x00, 0x86, 0x2e, - 0x18, 0x81, 0xeb, 0x00, 0x38, 0xae, 0xed, 0xf5, 0xfc, 0x2e, 0xe1, 0x44, 0x26, 0xba, 0x18, 0x6f, - 0xcb, 0xdd, 0x11, 0x82, 0x12, 0x5c, 0x69, 0x15, 0xca, 0x4f, 0x57, 0xa1, 0xc6, 0xfd, 0x93, 0xb3, - 0x4a, 0xe6, 0xe5, 0x59, 0x25, 0x73, 0x7a, 0x56, 0xc9, 0xbc, 0x08, 0x2b, 0xda, 0x49, 0x58, 0xd1, - 0x5e, 0x86, 0x15, 0xed, 0x34, 0xac, 0x68, 0x7f, 0x87, 0x15, 0xed, 0xc7, 0x7f, 0x2a, 0x99, 0xcf, - 0x67, 0x55, 0x84, 0xff, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc5, 0xba, 0xf8, 0x96, 0xa4, 0x0f, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/generated.proto b/vendor/k8s.io/api/authorization/v1beta1/generated.proto deleted file mode 100644 index 3876a3e..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/generated.proto +++ /dev/null @@ -1,272 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.authorization.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -message LocalSubjectAccessReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - optional SubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - optional SubjectAccessReviewStatus status = 3; -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -message NonResourceAttributes { - // Path is the URL path of the request - // +optional - optional string path = 1; - - // Verb is the standard HTTP verb - // +optional - optional string verb = 2; -} - -// NonResourceRule holds information that describes a rule for the non-resource -message NonResourceRule { - // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. - repeated string verbs = 1; - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, - // final step in the path. "*" means all. - // +optional - repeated string nonResourceURLs = 2; -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -message ResourceAttributes { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - // +optional - optional string namespace = 1; - - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - // +optional - optional string verb = 2; - - // Group is the API Group of the Resource. "*" means all. - // +optional - optional string group = 3; - - // Version is the API Version of the Resource. "*" means all. - // +optional - optional string version = 4; - - // Resource is one of the existing resource types. "*" means all. - // +optional - optional string resource = 5; - - // Subresource is one of the existing resource types. "" means none. - // +optional - optional string subresource = 6; - - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - // +optional - optional string name = 7; -} - -// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, -// may contain duplicates, and possibly be incomplete. -message ResourceRule { - // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. - repeated string verbs = 1; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. "*" means all. - // +optional - repeated string apiGroups = 2; - - // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. - // +optional - repeated string resources = 3; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. - // +optional - repeated string resourceNames = 4; -} - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -message SelfSubjectAccessReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. user and groups must be empty - optional SelfSubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - optional SubjectAccessReviewStatus status = 3; -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -message SelfSubjectAccessReviewSpec { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - optional ResourceAttributes resourceAttributes = 1; - - // NonResourceAttributes describes information for a non-resource access request - // +optional - optional NonResourceAttributes nonResourceAttributes = 2; -} - -// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. -// The returned list of actions may be incomplete depending on the server's authorization mode, -// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, -// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to -// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. -// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. -message SelfSubjectRulesReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated. - optional SelfSubjectRulesReviewSpec spec = 2; - - // Status is filled in by the server and indicates the set of actions a user can perform. - // +optional - optional SubjectRulesReviewStatus status = 3; -} - -message SelfSubjectRulesReviewSpec { - // Namespace to evaluate rules for. Required. - optional string namespace = 1; -} - -// SubjectAccessReview checks whether or not a user or group can perform an action. -message SubjectAccessReview { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec holds information about the request being evaluated - optional SubjectAccessReviewSpec spec = 2; - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - optional SubjectAccessReviewStatus status = 3; -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -message SubjectAccessReviewSpec { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - optional ResourceAttributes resourceAttributes = 1; - - // NonResourceAttributes describes information for a non-resource access request - // +optional - optional NonResourceAttributes nonResourceAttributes = 2; - - // User is the user you're testing for. - // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups - // +optional - optional string user = 3; - - // Groups is the groups you're testing for. - // +optional - repeated string group = 4; - - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - // +optional - map extra = 5; - - // UID information about the requesting user. - // +optional - optional string uid = 6; -} - -// SubjectAccessReviewStatus -message SubjectAccessReviewStatus { - // Allowed is required. True if the action would be allowed, false otherwise. - optional bool allowed = 1; - - // Denied is optional. True if the action would be denied, otherwise - // false. If both allowed is false and denied is false, then the - // authorizer has no opinion on whether to authorize the action. Denied - // may not be true if Allowed is true. - // +optional - optional bool denied = 4; - - // Reason is optional. It indicates why a request was allowed or denied. - // +optional - optional string reason = 2; - - // EvaluationError is an indication that some error occurred during the authorization check. - // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. - // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. - // +optional - optional string evaluationError = 3; -} - -// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on -// the set of authorizers the server is configured with and any errors experienced during evaluation. -// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, -// even if that list is incomplete. -message SubjectRulesReviewStatus { - // ResourceRules is the list of actions the subject is allowed to perform on resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - repeated ResourceRule resourceRules = 1; - - // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - repeated NonResourceRule nonResourceRules = 2; - - // Incomplete is true when the rules returned by this call are incomplete. This is most commonly - // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. - optional bool incomplete = 3; - - // EvaluationError can appear in combination with Rules. It indicates an error occurred during - // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that - // ResourceRules and/or NonResourceRules may be incomplete. - // +optional - optional string evaluationError = 4; -} - diff --git a/vendor/k8s.io/api/authorization/v1beta1/register.go b/vendor/k8s.io/api/authorization/v1beta1/register.go deleted file mode 100644 index 84255dd..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/register.go +++ /dev/null @@ -1,55 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &SelfSubjectRulesReview{}, - &SelfSubjectAccessReview{}, - &SubjectAccessReview{}, - &LocalSubjectAccessReview{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/types.go b/vendor/k8s.io/api/authorization/v1beta1/types.go deleted file mode 100644 index 618ff8c..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/types.go +++ /dev/null @@ -1,268 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SubjectAccessReview checks whether or not a user or group can perform an action. -type SubjectAccessReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated - Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a -// spec.namespace means "in all namespaces". Self is a special case, because users should always be able -// to check whether they can perform an action -type SelfSubjectAccessReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated. user and groups must be empty - Spec SelfSubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. -// Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions -// checking. -type LocalSubjectAccessReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace - // you made the request against. If empty, it is defaulted. - Spec SubjectAccessReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates whether the request is allowed or not - // +optional - Status SubjectAccessReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface -type ResourceAttributes struct { - // Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces - // "" (empty) is defaulted for LocalSubjectAccessReviews - // "" (empty) is empty for cluster-scoped resources - // "" (empty) means "all" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` - // Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. "*" means all. - // +optional - Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` - // Group is the API Group of the Resource. "*" means all. - // +optional - Group string `json:"group,omitempty" protobuf:"bytes,3,opt,name=group"` - // Version is the API Version of the Resource. "*" means all. - // +optional - Version string `json:"version,omitempty" protobuf:"bytes,4,opt,name=version"` - // Resource is one of the existing resource types. "*" means all. - // +optional - Resource string `json:"resource,omitempty" protobuf:"bytes,5,opt,name=resource"` - // Subresource is one of the existing resource types. "" means none. - // +optional - Subresource string `json:"subresource,omitempty" protobuf:"bytes,6,opt,name=subresource"` - // Name is the name of the resource being requested for a "get" or deleted for a "delete". "" (empty) means all. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,7,opt,name=name"` -} - -// NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface -type NonResourceAttributes struct { - // Path is the URL path of the request - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // Verb is the standard HTTP verb - // +optional - Verb string `json:"verb,omitempty" protobuf:"bytes,2,opt,name=verb"` -} - -// SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -type SubjectAccessReviewSpec struct { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` - // NonResourceAttributes describes information for a non-resource access request - // +optional - NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` - - // User is the user you're testing for. - // If you specify "User" but not "Group", then is it interpreted as "What if User were not a member of any groups - // +optional - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Groups is the groups you're testing for. - // +optional - Groups []string `json:"group,omitempty" protobuf:"bytes,4,rep,name=group"` - // Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer - // it needs a reflection here. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,5,rep,name=extra"` - // UID information about the requesting user. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,6,opt,name=uid"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} - -// SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes -// and NonResourceAuthorizationAttributes must be set -type SelfSubjectAccessReviewSpec struct { - // ResourceAuthorizationAttributes describes information for a resource access request - // +optional - ResourceAttributes *ResourceAttributes `json:"resourceAttributes,omitempty" protobuf:"bytes,1,opt,name=resourceAttributes"` - // NonResourceAttributes describes information for a non-resource access request - // +optional - NonResourceAttributes *NonResourceAttributes `json:"nonResourceAttributes,omitempty" protobuf:"bytes,2,opt,name=nonResourceAttributes"` -} - -// SubjectAccessReviewStatus -type SubjectAccessReviewStatus struct { - // Allowed is required. True if the action would be allowed, false otherwise. - Allowed bool `json:"allowed" protobuf:"varint,1,opt,name=allowed"` - // Denied is optional. True if the action would be denied, otherwise - // false. If both allowed is false and denied is false, then the - // authorizer has no opinion on whether to authorize the action. Denied - // may not be true if Allowed is true. - // +optional - Denied bool `json:"denied,omitempty" protobuf:"varint,4,opt,name=denied"` - // Reason is optional. It indicates why a request was allowed or denied. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` - // EvaluationError is an indication that some error occurred during the authorization check. - // It is entirely possible to get an error and be able to continue determine authorization status in spite of it. - // For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request. - // +optional - EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,3,opt,name=evaluationError"` -} - -// +genclient -// +genclient:nonNamespaced -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. -// The returned list of actions may be incomplete depending on the server's authorization mode, -// and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, -// or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to -// drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. -// SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server. -type SelfSubjectRulesReview struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec holds information about the request being evaluated. - Spec SelfSubjectRulesReviewSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status is filled in by the server and indicates the set of actions a user can perform. - // +optional - Status SubjectRulesReviewStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -type SelfSubjectRulesReviewSpec struct { - // Namespace to evaluate rules for. Required. - Namespace string `json:"namespace,omitempty" protobuf:"bytes,1,opt,name=namespace"` -} - -// SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on -// the set of authorizers the server is configured with and any errors experienced during evaluation. -// Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, -// even if that list is incomplete. -type SubjectRulesReviewStatus struct { - // ResourceRules is the list of actions the subject is allowed to perform on resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - ResourceRules []ResourceRule `json:"resourceRules" protobuf:"bytes,1,rep,name=resourceRules"` - // NonResourceRules is the list of actions the subject is allowed to perform on non-resources. - // The list ordering isn't significant, may contain duplicates, and possibly be incomplete. - NonResourceRules []NonResourceRule `json:"nonResourceRules" protobuf:"bytes,2,rep,name=nonResourceRules"` - // Incomplete is true when the rules returned by this call are incomplete. This is most commonly - // encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation. - Incomplete bool `json:"incomplete" protobuf:"bytes,3,rep,name=incomplete"` - // EvaluationError can appear in combination with Rules. It indicates an error occurred during - // rule evaluation, such as an authorizer that doesn't support rule evaluation, and that - // ResourceRules and/or NonResourceRules may be incomplete. - // +optional - EvaluationError string `json:"evaluationError,omitempty" protobuf:"bytes,4,opt,name=evaluationError"` -} - -// ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, -// may contain duplicates, and possibly be incomplete. -type ResourceRule struct { - // Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. "*" means all. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. "*" means all. - // +optional - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. "*" means all in the specified apiGroups. - // "*/foo" represents the subresource 'foo' for all resources in the specified apiGroups. - // +optional - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. "*" means all. - // +optional - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` -} - -// NonResourceRule holds information that describes a rule for the non-resource -type NonResourceRule struct { - // Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. "*" means all. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, - // final step in the path. "*" means all. - // +optional - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,2,rep,name=nonResourceURLs"` -} diff --git a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 3ae6e72..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,173 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_LocalSubjectAccessReview = map[string]string{ - "": "LocalSubjectAccessReview checks whether or not a user or group can perform an action in a given namespace. Having a namespace scoped resource makes it much easier to grant namespace scoped policy that includes permissions checking.", - "spec": "Spec holds information about the request being evaluated. spec.namespace must be equal to the namespace you made the request against. If empty, it is defaulted.", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (LocalSubjectAccessReview) SwaggerDoc() map[string]string { - return map_LocalSubjectAccessReview -} - -var map_NonResourceAttributes = map[string]string{ - "": "NonResourceAttributes includes the authorization attributes available for non-resource requests to the Authorizer interface", - "path": "Path is the URL path of the request", - "verb": "Verb is the standard HTTP verb", -} - -func (NonResourceAttributes) SwaggerDoc() map[string]string { - return map_NonResourceAttributes -} - -var map_NonResourceRule = map[string]string{ - "": "NonResourceRule holds information that describes a rule for the non-resource", - "verbs": "Verb is a list of kubernetes non-resource API verbs, like: get, post, put, delete, patch, head, options. \"*\" means all.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path. \"*\" means all.", -} - -func (NonResourceRule) SwaggerDoc() map[string]string { - return map_NonResourceRule -} - -var map_ResourceAttributes = map[string]string{ - "": "ResourceAttributes includes the authorization attributes available for resource requests to the Authorizer interface", - "namespace": "Namespace is the namespace of the action being requested. Currently, there is no distinction between no namespace and all namespaces \"\" (empty) is defaulted for LocalSubjectAccessReviews \"\" (empty) is empty for cluster-scoped resources \"\" (empty) means \"all\" for namespace scoped resources from a SubjectAccessReview or SelfSubjectAccessReview", - "verb": "Verb is a kubernetes resource API verb, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "group": "Group is the API Group of the Resource. \"*\" means all.", - "version": "Version is the API Version of the Resource. \"*\" means all.", - "resource": "Resource is one of the existing resource types. \"*\" means all.", - "subresource": "Subresource is one of the existing resource types. \"\" means none.", - "name": "Name is the name of the resource being requested for a \"get\" or deleted for a \"delete\". \"\" (empty) means all.", -} - -func (ResourceAttributes) SwaggerDoc() map[string]string { - return map_ResourceAttributes -} - -var map_ResourceRule = map[string]string{ - "": "ResourceRule is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", - "verbs": "Verb is a list of kubernetes resource API verbs, like: get, list, watch, create, update, delete, proxy. \"*\" means all.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed. \"*\" means all.", - "resources": "Resources is a list of resources this rule applies to. \"*\" means all in the specified apiGroups.\n \"*/foo\" represents the subresource 'foo' for all resources in the specified apiGroups.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. \"*\" means all.", -} - -func (ResourceRule) SwaggerDoc() map[string]string { - return map_ResourceRule -} - -var map_SelfSubjectAccessReview = map[string]string{ - "": "SelfSubjectAccessReview checks whether or the current user can perform an action. Not filling in a spec.namespace means \"in all namespaces\". Self is a special case, because users should always be able to check whether they can perform an action", - "spec": "Spec holds information about the request being evaluated. user and groups must be empty", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (SelfSubjectAccessReview) SwaggerDoc() map[string]string { - return map_SelfSubjectAccessReview -} - -var map_SelfSubjectAccessReviewSpec = map[string]string{ - "": "SelfSubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", - "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", - "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", -} - -func (SelfSubjectAccessReviewSpec) SwaggerDoc() map[string]string { - return map_SelfSubjectAccessReviewSpec -} - -var map_SelfSubjectRulesReview = map[string]string{ - "": "SelfSubjectRulesReview enumerates the set of actions the current user can perform within a namespace. The returned list of actions may be incomplete depending on the server's authorization mode, and any errors experienced during the evaluation. SelfSubjectRulesReview should be used by UIs to show/hide actions, or to quickly let an end user reason about their permissions. It should NOT Be used by external systems to drive authorization decisions as this raises confused deputy, cache lifetime/revocation, and correctness concerns. SubjectAccessReview, and LocalAccessReview are the correct way to defer authorization decisions to the API server.", - "spec": "Spec holds information about the request being evaluated.", - "status": "Status is filled in by the server and indicates the set of actions a user can perform.", -} - -func (SelfSubjectRulesReview) SwaggerDoc() map[string]string { - return map_SelfSubjectRulesReview -} - -var map_SelfSubjectRulesReviewSpec = map[string]string{ - "namespace": "Namespace to evaluate rules for. Required.", -} - -func (SelfSubjectRulesReviewSpec) SwaggerDoc() map[string]string { - return map_SelfSubjectRulesReviewSpec -} - -var map_SubjectAccessReview = map[string]string{ - "": "SubjectAccessReview checks whether or not a user or group can perform an action.", - "spec": "Spec holds information about the request being evaluated", - "status": "Status is filled in by the server and indicates whether the request is allowed or not", -} - -func (SubjectAccessReview) SwaggerDoc() map[string]string { - return map_SubjectAccessReview -} - -var map_SubjectAccessReviewSpec = map[string]string{ - "": "SubjectAccessReviewSpec is a description of the access request. Exactly one of ResourceAuthorizationAttributes and NonResourceAuthorizationAttributes must be set", - "resourceAttributes": "ResourceAuthorizationAttributes describes information for a resource access request", - "nonResourceAttributes": "NonResourceAttributes describes information for a non-resource access request", - "user": "User is the user you're testing for. If you specify \"User\" but not \"Group\", then is it interpreted as \"What if User were not a member of any groups", - "group": "Groups is the groups you're testing for.", - "extra": "Extra corresponds to the user.Info.GetExtra() method from the authenticator. Since that is input to the authorizer it needs a reflection here.", - "uid": "UID information about the requesting user.", -} - -func (SubjectAccessReviewSpec) SwaggerDoc() map[string]string { - return map_SubjectAccessReviewSpec -} - -var map_SubjectAccessReviewStatus = map[string]string{ - "": "SubjectAccessReviewStatus", - "allowed": "Allowed is required. True if the action would be allowed, false otherwise.", - "denied": "Denied is optional. True if the action would be denied, otherwise false. If both allowed is false and denied is false, then the authorizer has no opinion on whether to authorize the action. Denied may not be true if Allowed is true.", - "reason": "Reason is optional. It indicates why a request was allowed or denied.", - "evaluationError": "EvaluationError is an indication that some error occurred during the authorization check. It is entirely possible to get an error and be able to continue determine authorization status in spite of it. For instance, RBAC can be missing a role, but enough roles are still present and bound to reason about the request.", -} - -func (SubjectAccessReviewStatus) SwaggerDoc() map[string]string { - return map_SubjectAccessReviewStatus -} - -var map_SubjectRulesReviewStatus = map[string]string{ - "": "SubjectRulesReviewStatus contains the result of a rules check. This check can be incomplete depending on the set of authorizers the server is configured with and any errors experienced during evaluation. Because authorization rules are additive, if a rule appears in a list it's safe to assume the subject has that permission, even if that list is incomplete.", - "resourceRules": "ResourceRules is the list of actions the subject is allowed to perform on resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", - "nonResourceRules": "NonResourceRules is the list of actions the subject is allowed to perform on non-resources. The list ordering isn't significant, may contain duplicates, and possibly be incomplete.", - "incomplete": "Incomplete is true when the rules returned by this call are incomplete. This is most commonly encountered when an authorizer, such as an external authorizer, doesn't support rules evaluation.", - "evaluationError": "EvaluationError can appear in combination with Rules. It indicates an error occurred during rule evaluation, such as an authorizer that doesn't support rule evaluation, and that ResourceRules and/or NonResourceRules may be incomplete.", -} - -func (SubjectRulesReviewStatus) SwaggerDoc() map[string]string { - return map_SubjectRulesReviewStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 58b2dfe..0000000 --- a/vendor/k8s.io/api/authorization/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,385 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExtraValue) DeepCopyInto(out *ExtraValue) { - { - in := &in - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. -func (in ExtraValue) DeepCopy() ExtraValue { - if in == nil { - return nil - } - out := new(ExtraValue) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalSubjectAccessReview) DeepCopyInto(out *LocalSubjectAccessReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalSubjectAccessReview. -func (in *LocalSubjectAccessReview) DeepCopy() *LocalSubjectAccessReview { - if in == nil { - return nil - } - out := new(LocalSubjectAccessReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LocalSubjectAccessReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NonResourceAttributes) DeepCopyInto(out *NonResourceAttributes) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceAttributes. -func (in *NonResourceAttributes) DeepCopy() *NonResourceAttributes { - if in == nil { - return nil - } - out := new(NonResourceAttributes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NonResourceRule) DeepCopyInto(out *NonResourceRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NonResourceRule. -func (in *NonResourceRule) DeepCopy() *NonResourceRule { - if in == nil { - return nil - } - out := new(NonResourceRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceAttributes) DeepCopyInto(out *ResourceAttributes) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceAttributes. -func (in *ResourceAttributes) DeepCopy() *ResourceAttributes { - if in == nil { - return nil - } - out := new(ResourceAttributes) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRule) DeepCopyInto(out *ResourceRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRule. -func (in *ResourceRule) DeepCopy() *ResourceRule { - if in == nil { - return nil - } - out := new(ResourceRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectAccessReview) DeepCopyInto(out *SelfSubjectAccessReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReview. -func (in *SelfSubjectAccessReview) DeepCopy() *SelfSubjectAccessReview { - if in == nil { - return nil - } - out := new(SelfSubjectAccessReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SelfSubjectAccessReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectAccessReviewSpec) DeepCopyInto(out *SelfSubjectAccessReviewSpec) { - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectAccessReviewSpec. -func (in *SelfSubjectAccessReviewSpec) DeepCopy() *SelfSubjectAccessReviewSpec { - if in == nil { - return nil - } - out := new(SelfSubjectAccessReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectRulesReview) DeepCopyInto(out *SelfSubjectRulesReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReview. -func (in *SelfSubjectRulesReview) DeepCopy() *SelfSubjectRulesReview { - if in == nil { - return nil - } - out := new(SelfSubjectRulesReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SelfSubjectRulesReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SelfSubjectRulesReviewSpec) DeepCopyInto(out *SelfSubjectRulesReviewSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SelfSubjectRulesReviewSpec. -func (in *SelfSubjectRulesReviewSpec) DeepCopy() *SelfSubjectRulesReviewSpec { - if in == nil { - return nil - } - out := new(SelfSubjectRulesReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectAccessReview) DeepCopyInto(out *SubjectAccessReview) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReview. -func (in *SubjectAccessReview) DeepCopy() *SubjectAccessReview { - if in == nil { - return nil - } - out := new(SubjectAccessReview) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SubjectAccessReview) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectAccessReviewSpec) DeepCopyInto(out *SubjectAccessReviewSpec) { - *out = *in - if in.ResourceAttributes != nil { - in, out := &in.ResourceAttributes, &out.ResourceAttributes - *out = new(ResourceAttributes) - **out = **in - } - if in.NonResourceAttributes != nil { - in, out := &in.NonResourceAttributes, &out.NonResourceAttributes - *out = new(NonResourceAttributes) - **out = **in - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewSpec. -func (in *SubjectAccessReviewSpec) DeepCopy() *SubjectAccessReviewSpec { - if in == nil { - return nil - } - out := new(SubjectAccessReviewSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectAccessReviewStatus) DeepCopyInto(out *SubjectAccessReviewStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectAccessReviewStatus. -func (in *SubjectAccessReviewStatus) DeepCopy() *SubjectAccessReviewStatus { - if in == nil { - return nil - } - out := new(SubjectAccessReviewStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SubjectRulesReviewStatus) DeepCopyInto(out *SubjectRulesReviewStatus) { - *out = *in - if in.ResourceRules != nil { - in, out := &in.ResourceRules, &out.ResourceRules - *out = make([]ResourceRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NonResourceRules != nil { - in, out := &in.NonResourceRules, &out.NonResourceRules - *out = make([]NonResourceRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubjectRulesReviewStatus. -func (in *SubjectRulesReviewStatus) DeepCopy() *SubjectRulesReviewStatus { - if in == nil { - return nil - } - out := new(SubjectRulesReviewStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/autoscaling/v1/doc.go b/vendor/k8s.io/api/autoscaling/v1/doc.go deleted file mode 100644 index 9c3be84..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1 // import "k8s.io/api/autoscaling/v1" diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v1/generated.pb.go deleted file mode 100644 index 47a46a5..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/generated.pb.go +++ /dev/null @@ -1,4710 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus - Scale - ScaleSpec - ScaleStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } -func (*CrossVersionObjectReference) ProtoMessage() {} -func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } -func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} -func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} -func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} -func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} -} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} -func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} -} - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v1.ResourceMetricStatus") - proto.RegisterType((*Scale)(nil), "k8s.io.api.autoscaling.v1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.autoscaling.v1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.autoscaling.v1.ScaleStatus") -} -func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil -} - -func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetCPUUtilizationPercentage)) - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentCPUUtilizationPercentage)) - } - return i, nil -} - -func (m *MetricSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - return i, nil -} - -func (m *MetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - } - return i, nil -} - -func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - return i, nil -} - -func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - return i, nil -} - -func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - if m.Selector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - return i, nil -} - -func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - if m.Selector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } - return i, nil -} - -func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - return i, nil -} - -func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n36, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n37, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n38, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - return i, nil -} - -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Selector))) - i += copy(dAtA[i:], m.Selector) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CrossVersionObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExternalMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - if m.MetricSelector != nil { - l = m.MetricSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetValue != nil { - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - if m.MetricSelector != nil { - l = m.MetricSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.CurrentAverageValue != nil { - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - l = m.ScaleTargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if m.TargetCPUUtilizationPercentage != nil { - n += 1 + sovGenerated(uint64(*m.TargetCPUUtilizationPercentage)) - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if m.CurrentCPUUtilizationPercentage != nil { - n += 1 + sovGenerated(uint64(*m.CurrentCPUUtilizationPercentage)) - } - return n -} - -func (m *MetricSpec) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.External != nil { - l = m.External.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.External != nil { - l = m.External.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ObjectMetricSource) Size() (n int) { - var l int - _ = l - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageValue != nil { - l = m.AverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ObjectMetricStatus) Size() (n int) { - var l int - _ = l - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageValue != nil { - l = m.AverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodsMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodsMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.CurrentAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) - } - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - l = len(m.Selector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CrossVersionObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CrossVersionObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalMetricSource{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalMetricStatus{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscaler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, - `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, - `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `TargetCPUUtilizationPercentage:` + valueToStringGenerated(this.TargetCPUUtilizationPercentage) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentCPUUtilizationPercentage:` + valueToStringGenerated(this.CurrentCPUUtilizationPercentage) + `,`, - `}`, - }, "") - return s -} -func (this *MetricSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricSpec{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricStatus{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricSource{`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricStatus{`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricSource{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricStatus{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricSource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + fmt.Sprintf("%v", this.Selector) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - m.MaxReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetCPUUtilizationPercentage", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetCPUUtilizationPercentage = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - m.DesiredReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentCPUUtilizationPercentage", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentCPUUtilizationPercentage = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricSource{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricSource{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricSource{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.External == nil { - m.External = &ExternalMetricSource{} - } - if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricStatus{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricStatus{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricStatus{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.External == nil { - m.External = &ExternalMetricStatus{} - } - if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scale) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Selector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1516 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcf, 0x6f, 0x13, 0xc7, - 0x17, 0x8f, 0x7f, 0x24, 0x24, 0xe3, 0x90, 0xe4, 0x3b, 0x20, 0x08, 0xe1, 0x8b, 0x37, 0xda, 0x22, - 0x44, 0x7f, 0xb0, 0x6e, 0x52, 0x8a, 0xe8, 0x31, 0x76, 0x4b, 0x41, 0x8d, 0x21, 0x4c, 0x02, 0xa5, - 0x3f, 0xc5, 0x64, 0x3d, 0x38, 0x43, 0xbc, 0xbb, 0xd6, 0xec, 0xd8, 0x22, 0x48, 0x95, 0xda, 0x43, - 0xef, 0xbd, 0xb4, 0xea, 0xb1, 0x95, 0x7a, 0xed, 0x99, 0x73, 0x6f, 0x1c, 0x39, 0x20, 0x95, 0xd3, - 0xaa, 0x6c, 0x8f, 0xfd, 0x0f, 0x38, 0x55, 0xf3, 0xc3, 0xeb, 0x5d, 0xdb, 0xeb, 0x24, 0x26, 0x44, - 0x6d, 0x6f, 0x3b, 0x33, 0xef, 0x7d, 0xde, 0xec, 0x7b, 0x6f, 0xde, 0x2f, 0x50, 0xde, 0xbe, 0xec, - 0x5b, 0xd4, 0x2b, 0x6d, 0xb7, 0x36, 0x09, 0x73, 0x09, 0x27, 0x7e, 0xa9, 0x4d, 0xdc, 0x9a, 0xc7, - 0x4a, 0xfa, 0x00, 0x37, 0x69, 0x09, 0xb7, 0xb8, 0xe7, 0xdb, 0xb8, 0x41, 0xdd, 0x7a, 0xa9, 0xbd, - 0x54, 0xaa, 0x13, 0x97, 0x30, 0xcc, 0x49, 0xcd, 0x6a, 0x32, 0x8f, 0x7b, 0xf0, 0x94, 0x22, 0xb5, - 0x70, 0x93, 0x5a, 0x31, 0x52, 0xab, 0xbd, 0xb4, 0x70, 0xa1, 0x4e, 0xf9, 0x56, 0x6b, 0xd3, 0xb2, - 0x3d, 0xa7, 0x54, 0xf7, 0xea, 0x5e, 0x49, 0x72, 0x6c, 0xb6, 0xee, 0xc9, 0x95, 0x5c, 0xc8, 0x2f, - 0x85, 0xb4, 0x60, 0xc6, 0x84, 0xda, 0x1e, 0x23, 0x03, 0xa4, 0x2d, 0x5c, 0xec, 0xd2, 0x38, 0xd8, - 0xde, 0xa2, 0x2e, 0x61, 0x3b, 0xa5, 0xe6, 0x76, 0x5d, 0x32, 0x31, 0xe2, 0x7b, 0x2d, 0x66, 0x93, - 0x7d, 0x71, 0xf9, 0x25, 0x87, 0x70, 0x3c, 0x48, 0x56, 0x29, 0x8d, 0x8b, 0xb5, 0x5c, 0x4e, 0x9d, - 0x7e, 0x31, 0x97, 0x76, 0x63, 0xf0, 0xed, 0x2d, 0xe2, 0xe0, 0x5e, 0x3e, 0xf3, 0xfb, 0x0c, 0x38, - 0x5d, 0x61, 0x9e, 0xef, 0xdf, 0x26, 0xcc, 0xa7, 0x9e, 0x7b, 0x63, 0xf3, 0x3e, 0xb1, 0x39, 0x22, - 0xf7, 0x08, 0x23, 0xae, 0x4d, 0xe0, 0x22, 0xc8, 0x6f, 0x53, 0xb7, 0x36, 0x9f, 0x59, 0xcc, 0x9c, - 0x9f, 0x2a, 0x4f, 0x3f, 0x0e, 0x8c, 0xb1, 0x30, 0x30, 0xf2, 0x1f, 0x51, 0xb7, 0x86, 0xe4, 0x89, - 0xa0, 0x70, 0xb1, 0x43, 0xe6, 0xb3, 0x49, 0x8a, 0xeb, 0xd8, 0x21, 0x48, 0x9e, 0xc0, 0x65, 0x00, - 0x70, 0x93, 0x6a, 0x01, 0xf3, 0x39, 0x49, 0x07, 0x35, 0x1d, 0x58, 0x59, 0xbb, 0xa6, 0x4f, 0x50, - 0x8c, 0xca, 0xfc, 0x21, 0x07, 0x8e, 0x7f, 0xf0, 0x80, 0x13, 0xe6, 0xe2, 0x46, 0x95, 0x70, 0x46, - 0xed, 0x75, 0xa9, 0x5f, 0x01, 0xe6, 0xc8, 0xb5, 0x10, 0xa0, 0xaf, 0x15, 0x81, 0x55, 0xa3, 0x13, - 0x14, 0xa3, 0x82, 0x1e, 0x98, 0x51, 0xab, 0x75, 0xd2, 0x20, 0x36, 0xf7, 0x98, 0xbc, 0x6c, 0x61, - 0xf9, 0x1d, 0xab, 0xeb, 0x40, 0x91, 0xd6, 0xac, 0xe6, 0x76, 0x5d, 0x6c, 0xf8, 0x96, 0x30, 0x8e, - 0xd5, 0x5e, 0xb2, 0x56, 0xf1, 0x26, 0x69, 0x74, 0x58, 0xcb, 0x30, 0x0c, 0x8c, 0x99, 0x6a, 0x02, - 0x0e, 0xf5, 0xc0, 0x43, 0x0c, 0x0a, 0x1c, 0xb3, 0x3a, 0xe1, 0xb7, 0x71, 0xa3, 0x45, 0xe4, 0x2f, - 0x17, 0x96, 0xad, 0x61, 0xd2, 0xac, 0x8e, 0x03, 0x59, 0x37, 0x5b, 0xd8, 0xe5, 0x94, 0xef, 0x94, - 0x67, 0xc3, 0xc0, 0x28, 0x6c, 0x74, 0x61, 0x50, 0x1c, 0x13, 0xb6, 0x01, 0x54, 0xcb, 0x95, 0x36, - 0x61, 0xb8, 0x4e, 0x94, 0xa4, 0xfc, 0x48, 0x92, 0x4e, 0x84, 0x81, 0x01, 0x37, 0xfa, 0xd0, 0xd0, - 0x00, 0x09, 0xe6, 0x4f, 0xfd, 0x86, 0xe1, 0x98, 0xb7, 0xfc, 0x7f, 0x87, 0x61, 0xb6, 0xc0, 0xb4, - 0xdd, 0x62, 0x8c, 0xb8, 0x2f, 0x65, 0x99, 0xe3, 0xfa, 0xb7, 0xa6, 0x2b, 0x31, 0x2c, 0x94, 0x40, - 0x86, 0x3b, 0xe0, 0x98, 0x5e, 0x1f, 0x80, 0x81, 0x4e, 0x86, 0x81, 0x71, 0xac, 0xd2, 0x0f, 0x87, - 0x06, 0xc9, 0x30, 0x1f, 0x65, 0xc1, 0xc9, 0xab, 0x1e, 0xa3, 0x0f, 0x3d, 0x97, 0xe3, 0xc6, 0x9a, - 0x57, 0x5b, 0xd1, 0xb1, 0x91, 0x30, 0x78, 0x17, 0x4c, 0x0a, 0xed, 0xd5, 0x30, 0xc7, 0xd2, 0x46, - 0x85, 0xe5, 0xb7, 0xf7, 0xa6, 0x6b, 0x15, 0x18, 0xaa, 0x84, 0xe3, 0xae, 0x55, 0xbb, 0x7b, 0x28, - 0x42, 0x85, 0x77, 0x40, 0xde, 0x6f, 0x12, 0x5b, 0x5b, 0xf2, 0x92, 0x95, 0x1a, 0xa3, 0xad, 0x94, - 0x3b, 0xae, 0x37, 0x89, 0xdd, 0x8d, 0x23, 0x62, 0x85, 0x24, 0x22, 0xbc, 0x0b, 0x26, 0x7c, 0xe9, - 0x6b, 0xda, 0x6c, 0x97, 0x47, 0xc0, 0x96, 0xfc, 0xe5, 0x19, 0x8d, 0x3e, 0xa1, 0xd6, 0x48, 0xe3, - 0x9a, 0xdf, 0xe6, 0xc0, 0x62, 0x0a, 0x67, 0xc5, 0x73, 0x6b, 0x94, 0x53, 0xcf, 0x85, 0x57, 0x41, - 0x9e, 0xef, 0x34, 0x3b, 0x2e, 0x7e, 0xb1, 0x73, 0xd1, 0x8d, 0x9d, 0x26, 0x79, 0x11, 0x18, 0x67, - 0x77, 0xe3, 0x17, 0x74, 0x48, 0x22, 0xc0, 0xd5, 0xe8, 0x87, 0xb2, 0x09, 0x2c, 0x7d, 0xad, 0x17, - 0x81, 0x31, 0x20, 0x2f, 0x59, 0x11, 0x52, 0xf2, 0xf2, 0x22, 0x22, 0x34, 0xb0, 0xcf, 0x37, 0x18, - 0x76, 0x7d, 0x25, 0x89, 0x3a, 0x1d, 0x0f, 0x7f, 0x63, 0x6f, 0x46, 0x16, 0x1c, 0xe5, 0x05, 0x7d, - 0x0b, 0xb8, 0xda, 0x87, 0x86, 0x06, 0x48, 0x80, 0xe7, 0xc0, 0x04, 0x23, 0xd8, 0xf7, 0x5c, 0xe9, - 0xdc, 0x53, 0x5d, 0xe5, 0x22, 0xb9, 0x8b, 0xf4, 0x29, 0x7c, 0x1d, 0x1c, 0x71, 0x88, 0xef, 0xe3, - 0x3a, 0x99, 0x1f, 0x97, 0x84, 0xb3, 0x9a, 0xf0, 0x48, 0x55, 0x6d, 0xa3, 0xce, 0xb9, 0xf9, 0x34, - 0x03, 0x4e, 0xa7, 0xe8, 0x71, 0x95, 0xfa, 0x1c, 0x7e, 0xde, 0xe7, 0xc5, 0xd6, 0x1e, 0x23, 0x06, - 0xf5, 0x95, 0x0f, 0xcf, 0x69, 0xd9, 0x93, 0x9d, 0x9d, 0x98, 0x07, 0x7f, 0x0c, 0xc6, 0x29, 0x27, - 0x8e, 0xb0, 0x4a, 0xee, 0x7c, 0x61, 0x79, 0x79, 0xff, 0x6e, 0x56, 0x3e, 0xaa, 0xe1, 0xc7, 0xaf, - 0x09, 0x20, 0xa4, 0xf0, 0xcc, 0xbf, 0xb2, 0xa9, 0xbf, 0x25, 0xdc, 0x1c, 0xb6, 0xc1, 0x8c, 0x5c, - 0xa9, 0x50, 0x8c, 0xc8, 0x3d, 0xfd, 0x73, 0xc3, 0x1e, 0xd1, 0x90, 0xe4, 0x5d, 0x3e, 0xa1, 0x6f, - 0x31, 0xb3, 0x9e, 0x40, 0x45, 0x3d, 0x52, 0xe0, 0x12, 0x28, 0x38, 0xd4, 0x45, 0xa4, 0xd9, 0xa0, - 0x36, 0x56, 0xce, 0x38, 0xae, 0xd2, 0x4f, 0xb5, 0xbb, 0x8d, 0xe2, 0x34, 0xf0, 0x5d, 0x50, 0x70, - 0xf0, 0x83, 0x88, 0x25, 0x27, 0x59, 0x8e, 0x69, 0x79, 0x85, 0x6a, 0xf7, 0x08, 0xc5, 0xe9, 0xe0, - 0x7d, 0x50, 0x54, 0x39, 0xa5, 0xb2, 0x76, 0xeb, 0x16, 0xa7, 0x0d, 0xfa, 0x10, 0x0b, 0x3f, 0x5a, - 0x23, 0xcc, 0x26, 0x2e, 0x17, 0xae, 0x91, 0x97, 0x48, 0x66, 0x18, 0x18, 0xc5, 0x8d, 0xa1, 0x94, - 0x68, 0x17, 0x24, 0xf3, 0xb7, 0x1c, 0x38, 0x33, 0x34, 0x0c, 0xc0, 0x2b, 0x00, 0x7a, 0x9b, 0x3e, - 0x61, 0x6d, 0x52, 0xfb, 0x50, 0xd5, 0x45, 0xa2, 0x40, 0x11, 0x3a, 0xcf, 0xa9, 0x9c, 0x78, 0xa3, - 0xef, 0x14, 0x0d, 0xe0, 0x80, 0x36, 0x38, 0x2a, 0xde, 0x85, 0xd2, 0x32, 0xd5, 0xb5, 0xd0, 0xfe, - 0x1e, 0xdd, 0xff, 0xc2, 0xc0, 0x38, 0xba, 0x1a, 0x07, 0x41, 0x49, 0x4c, 0xb8, 0x02, 0x66, 0x75, - 0xb0, 0xef, 0xd1, 0xfa, 0x49, 0xad, 0xf5, 0xd9, 0x4a, 0xf2, 0x18, 0xf5, 0xd2, 0x0b, 0x88, 0x1a, - 0xf1, 0x29, 0x23, 0xb5, 0x08, 0x22, 0x9f, 0x84, 0x78, 0x3f, 0x79, 0x8c, 0x7a, 0xe9, 0xa1, 0x03, - 0x0c, 0x8d, 0x9a, 0x6a, 0xc1, 0x71, 0x09, 0xf9, 0x5a, 0x18, 0x18, 0x46, 0x65, 0x38, 0x29, 0xda, - 0x0d, 0x4b, 0x94, 0x81, 0xba, 0x76, 0x90, 0x0f, 0xe4, 0x62, 0x22, 0xf4, 0x2e, 0xf6, 0x84, 0xde, - 0xb9, 0x78, 0xa1, 0x18, 0x0b, 0xb3, 0x37, 0xc1, 0x84, 0x27, 0x5f, 0x86, 0xb6, 0xcb, 0x85, 0x21, - 0xcf, 0x29, 0x4a, 0x69, 0x11, 0x50, 0x19, 0x88, 0x58, 0xa6, 0x9f, 0x96, 0x06, 0x82, 0xd7, 0x40, - 0xbe, 0xe9, 0xd5, 0x3a, 0x89, 0xe8, 0xcd, 0x21, 0x80, 0x6b, 0x5e, 0xcd, 0x4f, 0xc0, 0x4d, 0x8a, - 0x1b, 0x8b, 0x5d, 0x24, 0x21, 0xe0, 0x27, 0x60, 0xb2, 0x93, 0xf0, 0x75, 0x75, 0x50, 0x1a, 0x02, - 0x87, 0x34, 0x69, 0x02, 0x72, 0x5a, 0x04, 0xb2, 0xce, 0x09, 0x8a, 0xe0, 0x04, 0x34, 0xd1, 0xa5, - 0x9a, 0xb4, 0xca, 0x70, 0xe8, 0x41, 0xe5, 0xb6, 0x82, 0xee, 0x9c, 0xa0, 0x08, 0xce, 0xfc, 0x31, - 0x07, 0xa6, 0x13, 0xe5, 0xdf, 0x21, 0x9b, 0x46, 0xe5, 0xf1, 0x03, 0x33, 0x8d, 0x82, 0x3b, 0x50, - 0xd3, 0x28, 0xc8, 0x57, 0x62, 0x9a, 0x18, 0xf4, 0x00, 0xd3, 0x3c, 0xcd, 0x01, 0xd8, 0xef, 0xc6, - 0xf0, 0x4b, 0x30, 0xa1, 0x02, 0xe6, 0x4b, 0x26, 0x95, 0x28, 0xbd, 0xeb, 0xfc, 0xa1, 0x51, 0x7b, - 0xea, 0xff, 0xec, 0x9e, 0xea, 0x7f, 0x72, 0x10, 0x7d, 0x52, 0x94, 0x75, 0x52, 0x7b, 0xa5, 0x2f, - 0xc0, 0xa4, 0xdf, 0x69, 0x30, 0xf2, 0xa3, 0x37, 0x18, 0x52, 0xe1, 0x51, 0x6b, 0x11, 0x41, 0xc2, - 0x1a, 0x98, 0xc6, 0xf1, 0x1a, 0x7f, 0x7c, 0xa4, 0xdf, 0x98, 0x13, 0x0d, 0x45, 0xa2, 0xb8, 0x4f, - 0xa0, 0x9a, 0xbf, 0xf7, 0x9a, 0x55, 0xbd, 0xbb, 0x7f, 0xa2, 0x59, 0x0f, 0xaf, 0xcb, 0xfa, 0x4f, - 0x58, 0xf6, 0xe7, 0x2c, 0x98, 0xeb, 0x4d, 0x13, 0x23, 0xb5, 0xd3, 0x0f, 0x07, 0xce, 0x04, 0xb2, - 0x23, 0x5d, 0x3a, 0xea, 0x02, 0xf6, 0x36, 0x17, 0x48, 0x58, 0x22, 0x77, 0xe0, 0x96, 0x30, 0x7f, - 0x49, 0xea, 0x68, 0xf4, 0x91, 0xc3, 0x57, 0x83, 0xfb, 0xf2, 0xd1, 0x94, 0x74, 0x5a, 0x0b, 0xdb, - 0x73, 0x6f, 0xfe, 0xaa, 0xd5, 0xf4, 0x6b, 0x16, 0x1c, 0x1f, 0x54, 0x22, 0xc0, 0x8a, 0x9e, 0xd2, - 0x29, 0x25, 0x95, 0xe2, 0x53, 0xba, 0x17, 0x81, 0x61, 0x0c, 0x68, 0x33, 0x3b, 0x30, 0xb1, 0x41, - 0xde, 0x1d, 0x30, 0x9f, 0xb0, 0x7c, 0xac, 0x66, 0xd3, 0x4d, 0xc3, 0xff, 0xc3, 0xc0, 0x98, 0xdf, - 0x48, 0xa1, 0x41, 0xa9, 0xdc, 0x29, 0xd3, 0xac, 0xdc, 0x2b, 0x9f, 0x66, 0x3d, 0xea, 0xd7, 0x97, - 0x72, 0xad, 0x03, 0xd1, 0xd7, 0x67, 0xe0, 0x54, 0xd2, 0x07, 0xfa, 0x15, 0x76, 0x26, 0x0c, 0x8c, - 0x53, 0x95, 0x34, 0x22, 0x94, 0xce, 0x9f, 0xe6, 0xc8, 0xb9, 0xc3, 0x71, 0x64, 0xf3, 0x9b, 0x2c, - 0x18, 0x97, 0xcd, 0xc9, 0x21, 0x8c, 0x94, 0xae, 0x24, 0x46, 0x4a, 0x67, 0x87, 0x64, 0x38, 0x79, - 0xa3, 0xd4, 0x01, 0xd2, 0xf5, 0x9e, 0x01, 0xd2, 0xb9, 0x5d, 0x91, 0x86, 0x8f, 0x8b, 0xde, 0x03, - 0x53, 0x91, 0x40, 0xf8, 0x96, 0x28, 0x16, 0x75, 0x57, 0x95, 0x91, 0xb6, 0x8d, 0x66, 0x0c, 0x51, - 0x3b, 0x15, 0x51, 0x98, 0x14, 0x14, 0x62, 0x12, 0xf6, 0xc7, 0x2c, 0xa8, 0xfd, 0xf8, 0xc0, 0x74, - 0xaa, 0x4b, 0xdd, 0x1f, 0x13, 0xca, 0xe7, 0x1f, 0x3f, 0x2f, 0x8e, 0x3d, 0x79, 0x5e, 0x1c, 0x7b, - 0xf6, 0xbc, 0x38, 0xf6, 0x75, 0x58, 0xcc, 0x3c, 0x0e, 0x8b, 0x99, 0x27, 0x61, 0x31, 0xf3, 0x2c, - 0x2c, 0x66, 0xfe, 0x08, 0x8b, 0x99, 0xef, 0xfe, 0x2c, 0x8e, 0x7d, 0x9a, 0x6d, 0x2f, 0xfd, 0x1d, - 0x00, 0x00, 0xff, 0xff, 0x3c, 0x26, 0x41, 0xcb, 0x94, 0x19, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v1/generated.proto b/vendor/k8s.io/api/autoscaling/v1/generated.proto deleted file mode 100644 index 5b56b2a..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/generated.proto +++ /dev/null @@ -1,415 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.autoscaling.v1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - optional string kind = 1; - - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - optional string name = 2; - - // API version of the referent - // +optional - optional string apiVersion = 3; -} - -// ExternalMetricSource indicates how to scale on a metric not associated with -// any Kubernetes object (for example length of queue in cloud -// messaging service, or QPS from loadbalancer running outside of cluster). -message ExternalMetricSource { - // metricName is the name of the metric in question. - optional string metricName = 1; - - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; - - // targetValue is the target value of the metric (as a quantity). - // Mutually exclusive with TargetAverageValue. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; - - // targetAverageValue is the target per-pod value of global metric (as a quantity). - // Mutually exclusive with TargetValue. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; -} - -// ExternalMetricStatus indicates the current value of a global metric -// not associated with any Kubernetes object. -message ExternalMetricStatus { - // metricName is the name of a metric used for autoscaling in - // metric system. - optional string metricName = 1; - - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; - - // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; - - // currentAverageValue is the current value of metric averaged over autoscaled pods. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; -} - -// configuration of a horizontal pod autoscaler. -message HorizontalPodAutoscaler { - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional HorizontalPodAutoscalerSpec spec = 2; - - // current information about the autoscaler. - // +optional - optional HorizontalPodAutoscalerStatus status = 3; -} - -// HorizontalPodAutoscalerCondition describes the state of -// a HorizontalPodAutoscaler at a certain point. -message HorizontalPodAutoscalerCondition { - // type describes the current condition - optional string type = 1; - - // status is the status of the condition (True, False, Unknown) - optional string status = 2; - - // lastTransitionTime is the last time the condition transitioned from - // one status to another - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // reason is the reason for the condition's last transition. - // +optional - optional string reason = 4; - - // message is a human-readable explanation containing details about - // the transition - // +optional - optional string message = 5; -} - -// list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// specification of a horizontal pod autoscaler. -message HorizontalPodAutoscalerSpec { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - optional CrossVersionObjectReference scaleTargetRef = 1; - - // lower limit for the number of pods that can be set by the autoscaler, default 1. - // +optional - optional int32 minReplicas = 2; - - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - optional int32 maxReplicas = 3; - - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. - // +optional - optional int32 targetCPUUtilizationPercentage = 4; -} - -// current status of a horizontal pod autoscaler -message HorizontalPodAutoscalerStatus { - // most recent generation observed by this autoscaler. - // +optional - optional int64 observedGeneration = 1; - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - - // current number of replicas of pods managed by this autoscaler. - optional int32 currentReplicas = 3; - - // desired number of replicas of pods managed by this autoscaler. - optional int32 desiredReplicas = 4; - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - // +optional - optional int32 currentCPUUtilizationPercentage = 5; -} - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. - optional string type = 1; - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - optional ObjectMetricSource object = 2; - - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - optional PodsMetricSource pods = 3; - - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - optional ResourceMetricSource resource = 4; - - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - optional ExternalMetricSource external = 5; -} - -// MetricStatus describes the last-read state of a single metric. -message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. - optional string type = 1; - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - optional ObjectMetricStatus object = 2; - - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - optional PodsMetricStatus pods = 3; - - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - optional ResourceMetricStatus resource = 4; - - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - optional ExternalMetricStatus external = 5; -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -message ObjectMetricSource { - // target is the described Kubernetes object. - optional CrossVersionObjectReference target = 1; - - // metricName is the name of the metric in question. - optional string metricName = 2; - - // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric. - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - - // averageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -message ObjectMetricStatus { - // target is the described Kubernetes object. - optional CrossVersionObjectReference target = 1; - - // metricName is the name of the metric in question. - optional string metricName = 2; - - // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - - // averageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -message PodsMetricSource { - // metricName is the name of the metric in question - optional string metricName = 1; - - // targetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -message PodsMetricStatus { - // metricName is the name of the metric in question - optional string metricName = 1; - - // currentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -message ResourceMetricSource { - // name is the name of the resource in question. - optional string name = 1; - - // targetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - optional int32 targetAverageUtilization = 2; - - // targetAverageValue is the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -message ResourceMetricStatus { - // name is the name of the resource in question. - optional string name = 1; - - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - optional int32 currentAverageUtilization = 2; - - // currentAverageValue is the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; -} - -// Scale represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional ScaleSpec spec = 2; - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - optional ScaleStatus status = 3; -} - -// ScaleSpec describes the attributes of a scale subresource. -message ScaleSpec { - // desired number of instances for the scaled object. - // +optional - optional int32 replicas = 1; -} - -// ScaleStatus represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - optional string selector = 2; -} - diff --git a/vendor/k8s.io/api/autoscaling/v1/register.go b/vendor/k8s.io/api/autoscaling/v1/register.go deleted file mode 100644 index 8dfe361..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - &Scale{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/autoscaling/v1/types.go b/vendor/k8s.io/api/autoscaling/v1/types.go deleted file mode 100644 index c03af13..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/types.go +++ /dev/null @@ -1,428 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` -} - -// specification of a horizontal pod autoscaler. -type HorizontalPodAutoscalerSpec struct { - // reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption - // and will set the desired number of pods by using its Scale subresource. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // lower limit for the number of pods that can be set by the autoscaler, default 1. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // target average CPU utilization (represented as a percentage of requested CPU) over all the pods; - // if not specified the default autoscaling policy will be used. - // +optional - TargetCPUUtilizationPercentage *int32 `json:"targetCPUUtilizationPercentage,omitempty" protobuf:"varint,4,opt,name=targetCPUUtilizationPercentage"` -} - -// current status of a horizontal pod autoscaler -type HorizontalPodAutoscalerStatus struct { - // most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // last time the HorizontalPodAutoscaler scaled the number of pods; - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // current number of replicas of pods managed by this autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desired number of replicas of pods managed by this autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // current average CPU utilization over all pods, represented as a percentage of requested CPU, - // e.g. 70 means that an average pod is using now 70% of its requested CPU. - // +optional - CurrentCPUUtilizationPercentage *int32 `json:"currentCPUUtilizationPercentage,omitempty" protobuf:"varint,5,opt,name=currentCPUUtilizationPercentage"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// configuration of a horizontal pod autoscaler. -type HorizontalPodAutoscaler struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current information about the autoscaler. - // +optional - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Scale represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// ScaleSpec describes the attributes of a scale subresource. -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// ScaleStatus represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // label query over pods that should match the replicas count. This is same - // as the label selector but in the string format to avoid introspection - // by clients. The string will be in the same format as the query-param syntax. - // More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector string `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` -} - -// the types below are used in the alpha metrics annotation - -// MetricSourceType indicates the type of metric. -type MetricSourceType string - -var ( - // ObjectMetricSourceType is a metric describing a kubernetes object - // (for example, hits-per-second on an Ingress object). - ObjectMetricSourceType MetricSourceType = "Object" - // PodsMetricSourceType is a metric describing each pod in the current scale - // target (for example, transactions-processed-per-second). The values - // will be averaged together before being compared to the target value. - PodsMetricSourceType MetricSourceType = "Pods" - // ResourceMetricSourceType is a resource metric known to Kubernetes, as - // specified in requests and limits, describing each pod in the current - // scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics (the "pods" source). - ResourceMetricSourceType MetricSourceType = "Resource" - // ExternalMetricSourceType is a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - ExternalMetricSourceType MetricSourceType = "External" -) - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricSource struct { - // target is the described Kubernetes object. - Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` - - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` - // targetValue is the target value of the metric (as a quantity). - TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric. - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` - // averageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -type PodsMetricSource struct { - // metricName is the name of the metric in question - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // targetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -type ResourceMetricSource struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // targetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` - // targetAverageValue is the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` -} - -// ExternalMetricSource indicates how to scale on a metric not associated with -// any Kubernetes object (for example length of queue in cloud -// messaging service, or QPS from loadbalancer running outside of cluster). -type ExternalMetricSource struct { - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` - // targetValue is the target value of the metric (as a quantity). - // Mutually exclusive with TargetAverageValue. - // +optional - TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` - // targetAverageValue is the target per-pod value of global metric (as a quantity). - // Mutually exclusive with TargetValue. - // +optional - TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"` -} - -// MetricStatus describes the last-read state of a single metric. -type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` -} - -// HorizontalPodAutoscalerConditionType are the valid conditions of -// a HorizontalPodAutoscaler. -type HorizontalPodAutoscalerConditionType string - -var ( - // ScalingActive indicates that the HPA controller is able to scale if necessary: - // it's correctly configured, can fetch the desired metrics, and isn't disabled. - ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" - // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, - // such as being in a backoff window, or being unable to access/update the target scale. - AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" - // ScalingLimited indicates that the calculated scale based on metrics would be above or - // below the range for the HPA, and has thus been capped. - ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" -) - -// HorizontalPodAutoscalerCondition describes the state of -// a HorizontalPodAutoscaler at a certain point. -type HorizontalPodAutoscalerCondition struct { - // type describes the current condition - Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` - // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` - // lastTransitionTime is the last time the condition transitioned from - // one status to another - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // reason is the reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // message is a human-readable explanation containing details about - // the transition - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricStatus struct { - // target is the described Kubernetes object. - Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` - - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` - // currentValue is the current value of the metric (as a quantity). - CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` - // averageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -type PodsMetricStatus struct { - // metricName is the name of the metric in question - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // currentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -type ResourceMetricStatus struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` - // currentAverageValue is the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` -} - -// ExternalMetricStatus indicates the current value of a global metric -// not associated with any Kubernetes object. -type ExternalMetricStatus struct { - // metricName is the name of a metric used for autoscaling in - // metric system. - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` - // currentValue is the current value of the metric (as a quantity) - CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` - // currentAverageValue is the current value of metric averaged over autoscaled pods. - // +optional - CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` -} diff --git a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go deleted file mode 100644 index a6e874f..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,250 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CrossVersionObjectReference = map[string]string{ - "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", -} - -func (CrossVersionObjectReference) SwaggerDoc() map[string]string { - return map_CrossVersionObjectReference -} - -var map_ExternalMetricSource = map[string]string{ - "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", - "metricName": "metricName is the name of the metric in question.", - "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", - "targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.", - "targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.", -} - -func (ExternalMetricSource) SwaggerDoc() map[string]string { - return map_ExternalMetricSource -} - -var map_ExternalMetricStatus = map[string]string{ - "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", - "metricName": "metricName is the name of a metric used for autoscaling in metric system.", - "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", - "currentValue": "currentValue is the current value of the metric (as a quantity)", - "currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.", -} - -func (ExternalMetricStatus) SwaggerDoc() map[string]string { - return map_ExternalMetricStatus -} - -var map_HorizontalPodAutoscaler = map[string]string{ - "": "configuration of a horizontal pod autoscaler.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "behaviour of autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerCondition = map[string]string{ - "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", - "type": "type describes the current condition", - "status": "status is the status of the condition (True, False, Unknown)", - "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", - "reason": "reason is the reason for the condition's last transition.", - "message": "message is a human-readable explanation containing details about the transition", -} - -func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerCondition -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "list of horizontal pod autoscaler objects.", - "metadata": "Standard list metadata.", - "items": "list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "specification of a horizontal pod autoscaler.", - "scaleTargetRef": "reference to scaled resource; horizontal pod autoscaler will learn the current resource consumption and will set the desired number of pods by using its Scale subresource.", - "minReplicas": "lower limit for the number of pods that can be set by the autoscaler, default 1.", - "maxReplicas": "upper limit for the number of pods that can be set by the autoscaler; cannot be smaller than MinReplicas.", - "targetCPUUtilizationPercentage": "target average CPU utilization (represented as a percentage of requested CPU) over all the pods; if not specified the default autoscaling policy will be used.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "current status of a horizontal pod autoscaler", - "observedGeneration": "most recent generation observed by this autoscaler.", - "lastScaleTime": "last time the HorizontalPodAutoscaler scaled the number of pods; used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "current number of replicas of pods managed by this autoscaler.", - "desiredReplicas": "desired number of replicas of pods managed by this autoscaler.", - "currentCPUUtilizationPercentage": "current average CPU utilization over all pods, represented as a percentage of requested CPU, e.g. 70 means that an average pod is using now 70% of its requested CPU.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - -var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", -} - -func (MetricSpec) SwaggerDoc() map[string]string { - return map_MetricSpec -} - -var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", -} - -func (MetricStatus) SwaggerDoc() map[string]string { - return map_MetricStatus -} - -var map_ObjectMetricSource = map[string]string{ - "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target is the described Kubernetes object.", - "metricName": "metricName is the name of the metric in question.", - "targetValue": "targetValue is the target value of the metric (as a quantity).", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric. When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", - "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", -} - -func (ObjectMetricSource) SwaggerDoc() map[string]string { - return map_ObjectMetricSource -} - -var map_ObjectMetricStatus = map[string]string{ - "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target is the described Kubernetes object.", - "metricName": "metricName is the name of the metric in question.", - "currentValue": "currentValue is the current value of the metric (as a quantity).", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", - "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", -} - -func (ObjectMetricStatus) SwaggerDoc() map[string]string { - return map_ObjectMetricStatus -} - -var map_PodsMetricSource = map[string]string{ - "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "metricName": "metricName is the name of the metric in question", - "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", -} - -func (PodsMetricSource) SwaggerDoc() map[string]string { - return map_PodsMetricSource -} - -var map_PodsMetricStatus = map[string]string{ - "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", - "metricName": "metricName is the name of the metric in question", - "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", -} - -func (PodsMetricStatus) SwaggerDoc() map[string]string { - return map_PodsMetricStatus -} - -var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", - "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", - "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", -} - -func (ResourceMetricSource) SwaggerDoc() map[string]string { - return map_ResourceMetricSource -} - -var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", - "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", - "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", -} - -func (ResourceMetricStatus) SwaggerDoc() map[string]string { - return map_ResourceMetricStatus -} - -var map_Scale = map[string]string{ - "": "Scale represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", -} - -func (Scale) SwaggerDoc() map[string]string { - return map_Scale -} - -var map_ScaleSpec = map[string]string{ - "": "ScaleSpec describes the attributes of a scale subresource.", - "replicas": "desired number of instances for the scaled object.", -} - -func (ScaleSpec) SwaggerDoc() map[string]string { - return map_ScaleSpec -} - -var map_ScaleStatus = map[string]string{ - "": "ScaleStatus represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. This is same as the label selector but in the string format to avoid introspection by clients. The string will be in the same format as the query-param syntax. More info about label selectors: http://kubernetes.io/docs/user-guide/labels#label-selectors", -} - -func (ScaleStatus) SwaggerDoc() map[string]string { - return map_ScaleStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go deleted file mode 100644 index 3fda47d..0000000 --- a/vendor/k8s.io/api/autoscaling/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,515 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. -func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { - if in == nil { - return nil - } - out := new(CrossVersionObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { - *out = *in - if in.MetricSelector != nil { - in, out := &in.MetricSelector, &out.MetricSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.TargetValue != nil { - in, out := &in.TargetValue, &out.TargetValue - x := (*in).DeepCopy() - *out = &x - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. -func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { - if in == nil { - return nil - } - out := new(ExternalMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { - *out = *in - if in.MetricSelector != nil { - in, out := &in.MetricSelector, &out.MetricSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - out.CurrentValue = in.CurrentValue.DeepCopy() - if in.CurrentAverageValue != nil { - in, out := &in.CurrentAverageValue, &out.CurrentAverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. -func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { - if in == nil { - return nil - } - out := new(ExternalMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. -func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscaler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. -func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. -func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { - *out = *in - out.ScaleTargetRef = in.ScaleTargetRef - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.TargetCPUUtilizationPercentage != nil { - in, out := &in.TargetCPUUtilizationPercentage, &out.TargetCPUUtilizationPercentage - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. -func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = (*in).DeepCopy() - } - if in.CurrentCPUUtilizationPercentage != nil { - in, out := &in.CurrentCPUUtilizationPercentage, &out.CurrentCPUUtilizationPercentage - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. -func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricSource) - (*in).DeepCopyInto(*out) - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricSource) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricSource) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalMetricSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. -func (in *MetricSpec) DeepCopy() *MetricSpec { - if in == nil { - return nil - } - out := new(MetricSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalMetricStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. -func (in *MetricStatus) DeepCopy() *MetricStatus { - if in == nil { - return nil - } - out := new(MetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { - *out = *in - out.Target = in.Target - out.TargetValue = in.TargetValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.AverageValue != nil { - in, out := &in.AverageValue, &out.AverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. -func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { - if in == nil { - return nil - } - out := new(ObjectMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { - *out = *in - out.Target = in.Target - out.CurrentValue = in.CurrentValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.AverageValue != nil { - in, out := &in.AverageValue, &out.AverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. -func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { - if in == nil { - return nil - } - out := new(ObjectMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { - *out = *in - out.TargetAverageValue = in.TargetAverageValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. -func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { - if in == nil { - return nil - } - out := new(PodsMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { - *out = *in - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. -func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { - if in == nil { - return nil - } - out := new(PodsMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { - *out = *in - if in.TargetAverageUtilization != nil { - in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization - *out = new(int32) - **out = **in - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. -func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { - if in == nil { - return nil - } - out := new(ResourceMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { - *out = *in - if in.CurrentAverageUtilization != nil { - in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization - *out = new(int32) - **out = **in - } - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. -func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { - if in == nil { - return nil - } - out := new(ResourceMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go b/vendor/k8s.io/api/autoscaling/v2beta1/doc.go deleted file mode 100644 index da9789e..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v2beta1 // import "k8s.io/api/autoscaling/v2beta1" diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go deleted file mode 100644 index bee9412..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.pb.go +++ /dev/null @@ -1,4326 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto -// DO NOT EDIT! - -/* - Package v2beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricSpec - MetricStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ -package v2beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } -func (*CrossVersionObjectReference) ProtoMessage() {} -func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } -func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} -func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} -func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} -func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} -} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} -func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} -} - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta1.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta1.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta1.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.MetricStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta1.ResourceMetricStatus") -} -func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil -} - -func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n1, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.TargetValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.TargetAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n3, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - if m.MetricSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MetricSelector.Size())) - n4, err := m.MetricSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n5, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.CurrentAverageValue != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n6, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n8, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n9, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n10, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n12, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n13, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MetricSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n14, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n15, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n16, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n17, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - return i, nil -} - -func (m *MetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n18, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n19, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n20, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n21, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - } - return i, nil -} - -func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n22, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n23, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n24, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n25, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - return i, nil -} - -func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n27, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n28, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - if m.AverageValue != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n29, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - } - return i, nil -} - -func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n30, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - if m.Selector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n31, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - return i, nil -} - -func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MetricName))) - i += copy(dAtA[i:], m.MetricName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n32, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - if m.Selector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n33, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - } - return i, nil -} - -func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.TargetAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetAverageValue.Size())) - n34, err := m.TargetAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - return i, nil -} - -func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.CurrentAverageUtilization != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CurrentAverageUtilization)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentAverageValue.Size())) - n35, err := m.CurrentAverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CrossVersionObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExternalMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - if m.MetricSelector != nil { - l = m.MetricSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetValue != nil { - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ExternalMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - if m.MetricSelector != nil { - l = m.MetricSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.CurrentAverageValue != nil { - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - l = m.ScaleTargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if len(m.CurrentMetrics) > 0 { - for _, e := range m.CurrentMetrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *MetricSpec) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.External != nil { - l = m.External.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.External != nil { - l = m.External.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ObjectMetricSource) Size() (n int) { - var l int - _ = l - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageValue != nil { - l = m.AverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ObjectMetricStatus) Size() (n int) { - var l int - _ = l - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageValue != nil { - l = m.AverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodsMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodsMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.MetricName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.TargetAverageUtilization)) - } - if m.TargetAverageValue != nil { - l = m.TargetAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.CurrentAverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.CurrentAverageUtilization)) - } - l = m.CurrentAverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CrossVersionObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CrossVersionObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalMetricSource{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `TargetValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalMetricStatus{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `MetricSelector:` + strings.Replace(fmt.Sprintf("%v", this.MetricSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `CurrentAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.CurrentAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscaler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, - `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, - `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricSpec{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricStatus{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricSource{`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricStatus{`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricSource{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `TargetAverageValue:` + strings.Replace(strings.Replace(this.TargetAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricStatus{`, - `MetricName:` + fmt.Sprintf("%v", this.MetricName) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricSource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetAverageUtilization:` + valueToStringGenerated(this.TargetAverageUtilization) + `,`, - `TargetAverageValue:` + strings.Replace(fmt.Sprintf("%v", this.TargetAverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentAverageUtilization:` + valueToStringGenerated(this.CurrentAverageUtilization) + `,`, - `CurrentAverageValue:` + strings.Replace(strings.Replace(this.CurrentAverageValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetValue == nil { - m.TargetValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MetricSelector == nil { - m.MetricSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.MetricSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CurrentAverageValue == nil { - m.CurrentAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - m.MaxReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, MetricSpec{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - m.DesiredReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) - if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricSource{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricSource{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricSource{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.External == nil { - m.External = &ExternalMetricSource{} - } - if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricStatus{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricStatus{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricStatus{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.External == nil { - m.External = &ExternalMetricStatus{} - } - if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MetricName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MetricName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TargetAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetAverageValue == nil { - m.TargetAverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.TargetAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CurrentAverageUtilization = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentAverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentAverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1475 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0xcb, 0x8f, 0x1b, 0x45, - 0x13, 0x5f, 0x3f, 0x76, 0xb3, 0x69, 0x6f, 0x76, 0xf7, 0xeb, 0x44, 0x89, 0xb3, 0xf9, 0x62, 0xaf, - 0x2c, 0x84, 0x42, 0x44, 0x66, 0x12, 0xb3, 0x3c, 0x24, 0x84, 0xc4, 0xda, 0x40, 0x12, 0xb1, 0x4e, - 0x42, 0xef, 0x26, 0x42, 0x90, 0x20, 0xda, 0x33, 0x1d, 0x6f, 0xb3, 0x9e, 0x19, 0x6b, 0xba, 0x6d, - 0x65, 0x83, 0x90, 0xb8, 0x70, 0xe7, 0x02, 0x67, 0x90, 0x38, 0x21, 0xb8, 0xc2, 0x99, 0x5b, 0x8e, - 0x39, 0x26, 0x02, 0x59, 0x64, 0xf8, 0x2f, 0x72, 0x42, 0xfd, 0x98, 0xf1, 0x8c, 0x1f, 0x6b, 0xc7, - 0x38, 0xe1, 0x71, 0x9b, 0xee, 0xaa, 0xfa, 0x55, 0x4f, 0xfd, 0xaa, 0xab, 0xbb, 0x1a, 0x5c, 0xdc, - 0x7b, 0x8d, 0x19, 0xd4, 0x33, 0xf7, 0xda, 0x75, 0xe2, 0xbb, 0x84, 0x13, 0x66, 0x76, 0x88, 0x6b, - 0x7b, 0xbe, 0xa9, 0x05, 0xb8, 0x45, 0x4d, 0xdc, 0xe6, 0x1e, 0xb3, 0x70, 0x93, 0xba, 0x0d, 0xb3, - 0x53, 0xae, 0x13, 0x8e, 0x2f, 0x98, 0x0d, 0xe2, 0x12, 0x1f, 0x73, 0x62, 0x1b, 0x2d, 0xdf, 0xe3, - 0x1e, 0x2c, 0x28, 0x7d, 0x03, 0xb7, 0xa8, 0x11, 0xd3, 0x37, 0xb4, 0xfe, 0xda, 0xb9, 0x06, 0xe5, - 0xbb, 0xed, 0xba, 0x61, 0x79, 0x8e, 0xd9, 0xf0, 0x1a, 0x9e, 0x29, 0xcd, 0xea, 0xed, 0xdb, 0x72, - 0x24, 0x07, 0xf2, 0x4b, 0xc1, 0xad, 0x95, 0x62, 0xee, 0x2d, 0xcf, 0x27, 0x66, 0x67, 0xc0, 0xe5, - 0xda, 0x46, 0x4f, 0xc7, 0xc1, 0xd6, 0x2e, 0x75, 0x89, 0xbf, 0x6f, 0xb6, 0xf6, 0x1a, 0xd2, 0xc8, - 0x27, 0xcc, 0x6b, 0xfb, 0x16, 0x79, 0x22, 0x2b, 0x66, 0x3a, 0x84, 0xe3, 0x61, 0xbe, 0xcc, 0x51, - 0x56, 0x7e, 0xdb, 0xe5, 0xd4, 0x19, 0x74, 0xf3, 0xca, 0x38, 0x03, 0x66, 0xed, 0x12, 0x07, 0xf7, - 0xdb, 0x95, 0xbe, 0x4a, 0x81, 0x53, 0x55, 0xdf, 0x63, 0xec, 0x06, 0xf1, 0x19, 0xf5, 0xdc, 0xab, - 0xf5, 0x4f, 0x88, 0xc5, 0x11, 0xb9, 0x4d, 0x7c, 0xe2, 0x5a, 0x04, 0xae, 0x83, 0xec, 0x1e, 0x75, - 0xed, 0x7c, 0x6a, 0x3d, 0x75, 0xe6, 0x70, 0x65, 0xe9, 0x5e, 0xb7, 0x38, 0x17, 0x74, 0x8b, 0xd9, - 0x77, 0xa9, 0x6b, 0x23, 0x29, 0x11, 0x1a, 0x2e, 0x76, 0x48, 0x3e, 0x9d, 0xd4, 0xb8, 0x82, 0x1d, - 0x82, 0xa4, 0x04, 0x96, 0x01, 0xc0, 0x2d, 0xaa, 0x1d, 0xe4, 0x33, 0x52, 0x0f, 0x6a, 0x3d, 0xb0, - 0x79, 0xed, 0xb2, 0x96, 0xa0, 0x98, 0x56, 0xe9, 0xeb, 0x0c, 0x38, 0xf6, 0xf6, 0x1d, 0x4e, 0x7c, - 0x17, 0x37, 0x6b, 0x84, 0xfb, 0xd4, 0xda, 0x96, 0xf1, 0x15, 0x60, 0x8e, 0x1c, 0x0b, 0x07, 0x7a, - 0x59, 0x11, 0x58, 0x2d, 0x92, 0xa0, 0x98, 0x16, 0xf4, 0xc0, 0xb2, 0x1a, 0x6d, 0x93, 0x26, 0xb1, - 0xb8, 0xe7, 0xcb, 0xc5, 0xe6, 0xca, 0x2f, 0x19, 0xbd, 0x2c, 0x8a, 0xa2, 0x66, 0xb4, 0xf6, 0x1a, - 0x62, 0x82, 0x19, 0x82, 0x1c, 0xa3, 0x73, 0xc1, 0xd8, 0xc2, 0x75, 0xd2, 0x0c, 0x4d, 0x2b, 0x30, - 0xe8, 0x16, 0x97, 0x6b, 0x09, 0x38, 0xd4, 0x07, 0x0f, 0x31, 0xc8, 0x71, 0xec, 0x37, 0x08, 0xbf, - 0x81, 0x9b, 0x6d, 0x22, 0x7f, 0x39, 0x57, 0x36, 0x0e, 0xf2, 0x66, 0x84, 0x09, 0x64, 0xbc, 0xd7, - 0xc6, 0x2e, 0xa7, 0x7c, 0xbf, 0xb2, 0x12, 0x74, 0x8b, 0xb9, 0x9d, 0x1e, 0x0c, 0x8a, 0x63, 0xc2, - 0x0e, 0x80, 0x6a, 0xb8, 0xd9, 0x21, 0x3e, 0x6e, 0x10, 0xe5, 0x29, 0x3b, 0x95, 0xa7, 0xe3, 0x41, - 0xb7, 0x08, 0x77, 0x06, 0xd0, 0xd0, 0x10, 0x0f, 0xa5, 0x6f, 0x06, 0x89, 0xe1, 0x98, 0xb7, 0xd9, - 0xbf, 0x83, 0x98, 0x5d, 0xb0, 0x64, 0xb5, 0x7d, 0x9f, 0xb8, 0x7f, 0x89, 0x99, 0x63, 0xfa, 0xb7, - 0x96, 0xaa, 0x31, 0x2c, 0x94, 0x40, 0x86, 0xfb, 0xe0, 0xa8, 0x1e, 0xcf, 0x80, 0xa0, 0x13, 0x41, - 0xb7, 0x78, 0xb4, 0x3a, 0x08, 0x87, 0x86, 0xf9, 0x28, 0xfd, 0x92, 0x06, 0x27, 0x2e, 0x79, 0x3e, - 0xbd, 0xeb, 0xb9, 0x1c, 0x37, 0xaf, 0x79, 0xf6, 0xa6, 0x2e, 0x90, 0xc4, 0x87, 0x1f, 0x83, 0x45, - 0x11, 0x3d, 0x1b, 0x73, 0x2c, 0x39, 0xca, 0x95, 0xcf, 0x4f, 0x16, 0x6b, 0x55, 0x18, 0x6a, 0x84, - 0xe3, 0x1e, 0xab, 0xbd, 0x39, 0x14, 0xa1, 0xc2, 0x5b, 0x20, 0xcb, 0x5a, 0xc4, 0xd2, 0x4c, 0xbe, - 0x6e, 0x1c, 0x5c, 0xa8, 0x8d, 0x11, 0x0b, 0xdd, 0x6e, 0x11, 0xab, 0x57, 0x4c, 0xc4, 0x08, 0x49, - 0x58, 0x48, 0xc0, 0x02, 0x93, 0x09, 0xa7, 0xb9, 0x7b, 0x63, 0x5a, 0x07, 0x12, 0xa4, 0xb2, 0xac, - 0x5d, 0x2c, 0xa8, 0x31, 0xd2, 0xe0, 0xa5, 0x2f, 0x32, 0x60, 0x7d, 0x84, 0x65, 0xd5, 0x73, 0x6d, - 0xca, 0xa9, 0xe7, 0xc2, 0x4b, 0x20, 0xcb, 0xf7, 0x5b, 0x61, 0xb2, 0x6f, 0x84, 0xab, 0xdd, 0xd9, - 0x6f, 0x91, 0xc7, 0xdd, 0xe2, 0x73, 0xe3, 0xec, 0x85, 0x1e, 0x92, 0x08, 0x70, 0x2b, 0xfa, 0xab, - 0x74, 0x02, 0x4b, 0x2f, 0xeb, 0x71, 0xb7, 0x38, 0xe4, 0x84, 0x32, 0x22, 0xa4, 0xe4, 0xe2, 0x45, - 0x6d, 0x68, 0x62, 0xc6, 0x77, 0x7c, 0xec, 0x32, 0xe5, 0x89, 0x3a, 0x61, 0xae, 0x9f, 0x9d, 0x8c, - 0x6e, 0x61, 0x51, 0x59, 0xd3, 0xab, 0x80, 0x5b, 0x03, 0x68, 0x68, 0x88, 0x07, 0xf8, 0x3c, 0x58, - 0xf0, 0x09, 0x66, 0x9e, 0x2b, 0xd3, 0xfc, 0x70, 0x2f, 0xb8, 0x48, 0xce, 0x22, 0x2d, 0x85, 0x2f, - 0x80, 0x43, 0x0e, 0x61, 0x0c, 0x37, 0x48, 0x7e, 0x5e, 0x2a, 0xae, 0x68, 0xc5, 0x43, 0x35, 0x35, - 0x8d, 0x42, 0x79, 0xe9, 0x61, 0x0a, 0x9c, 0x1a, 0x11, 0xc7, 0x2d, 0xca, 0x38, 0xbc, 0x39, 0x90, - 0xcf, 0xc6, 0x84, 0xb5, 0x83, 0x32, 0x95, 0xcd, 0xab, 0xda, 0xf7, 0x62, 0x38, 0x13, 0xcb, 0xe5, - 0x9b, 0x60, 0x9e, 0x72, 0xe2, 0x08, 0x56, 0x32, 0x67, 0x72, 0xe5, 0x57, 0xa7, 0xcc, 0xb5, 0xca, - 0x11, 0xed, 0x63, 0xfe, 0xb2, 0x40, 0x43, 0x0a, 0xb4, 0xf4, 0x6b, 0x7a, 0xe4, 0xbf, 0x89, 0x84, - 0x87, 0x9f, 0x82, 0x65, 0x39, 0x52, 0x95, 0x19, 0x91, 0xdb, 0xfa, 0x0f, 0xc7, 0xee, 0xa9, 0x03, - 0x0e, 0xf4, 0xca, 0x71, 0xbd, 0x94, 0xe5, 0xed, 0x04, 0x34, 0xea, 0x73, 0x05, 0x2f, 0x80, 0x9c, - 0x43, 0x5d, 0x44, 0x5a, 0x4d, 0x6a, 0x61, 0x95, 0x96, 0xf3, 0xea, 0x48, 0xaa, 0xf5, 0xa6, 0x51, - 0x5c, 0x07, 0xbe, 0x0c, 0x72, 0x0e, 0xbe, 0x13, 0x99, 0x64, 0xa4, 0xc9, 0x51, 0xed, 0x2f, 0x57, - 0xeb, 0x89, 0x50, 0x5c, 0x0f, 0x5e, 0x17, 0xd9, 0x20, 0xaa, 0x34, 0xcb, 0x67, 0x65, 0x98, 0xcf, - 0x8e, 0xfb, 0x3f, 0x5d, 0xe4, 0x45, 0x89, 0x88, 0x65, 0x8e, 0x84, 0x40, 0x21, 0x56, 0xe9, 0xa7, - 0x2c, 0x38, 0x7d, 0xe0, 0xde, 0x87, 0xef, 0x00, 0xe8, 0xd5, 0x19, 0xf1, 0x3b, 0xc4, 0xbe, 0xa8, - 0xae, 0x45, 0xe2, 0x7e, 0x22, 0x62, 0x9c, 0x51, 0x47, 0xe2, 0xd5, 0x01, 0x29, 0x1a, 0x62, 0x01, - 0x2d, 0x70, 0x44, 0x6c, 0x06, 0x15, 0x50, 0xaa, 0xaf, 0x42, 0x4f, 0xb6, 0xd3, 0xfe, 0x17, 0x74, - 0x8b, 0x47, 0xb6, 0xe2, 0x20, 0x28, 0x89, 0x09, 0x37, 0xc1, 0x8a, 0xae, 0xf5, 0x7d, 0x01, 0x3e, - 0xa1, 0x23, 0xb0, 0x52, 0x4d, 0x8a, 0x51, 0xbf, 0xbe, 0x80, 0xb0, 0x09, 0xa3, 0x3e, 0xb1, 0x23, - 0x88, 0x6c, 0x12, 0xe2, 0xad, 0xa4, 0x18, 0xf5, 0xeb, 0xc3, 0x26, 0x58, 0xd6, 0xa8, 0x3a, 0xde, - 0xf9, 0x79, 0x49, 0xd9, 0x8b, 0x13, 0x52, 0xa6, 0x8a, 0x6e, 0x94, 0x83, 0xd5, 0x04, 0x16, 0xea, - 0xc3, 0x86, 0x1c, 0x00, 0x2b, 0x2c, 0x71, 0x2c, 0xbf, 0x20, 0x3d, 0xbd, 0x39, 0xe5, 0x1e, 0x8c, - 0x6a, 0x65, 0xef, 0xf8, 0x8a, 0xa6, 0x18, 0x8a, 0xf9, 0x29, 0x7d, 0x9f, 0x01, 0xa0, 0x97, 0x61, - 0x70, 0x23, 0x51, 0xe4, 0xd7, 0xfb, 0x8a, 0xfc, 0x6a, 0xfc, 0x72, 0x1a, 0x2b, 0xe8, 0x37, 0xc0, - 0x82, 0x27, 0x77, 0x9e, 0x4e, 0x86, 0xf2, 0xb8, 0x65, 0x47, 0x67, 0x69, 0x84, 0x56, 0x01, 0xa2, - 0x74, 0xea, 0xfd, 0xab, 0xd1, 0xe0, 0x15, 0x90, 0x6d, 0x79, 0x76, 0x78, 0xf8, 0x9d, 0x1f, 0x87, - 0x7a, 0xcd, 0xb3, 0x59, 0x02, 0x73, 0x51, 0xac, 0x5d, 0xcc, 0x22, 0x89, 0x03, 0x3f, 0x02, 0x8b, - 0xe1, 0x75, 0x43, 0xdf, 0x4d, 0x36, 0xc6, 0x61, 0x22, 0xad, 0x9f, 0xc0, 0x5d, 0x12, 0x15, 0x34, - 0x94, 0xa0, 0x08, 0x53, 0xe0, 0x13, 0x7d, 0x5b, 0x94, 0xb5, 0x7e, 0x02, 0xfc, 0x61, 0xd7, 0x7e, - 0x85, 0x1f, 0x4a, 0x50, 0x84, 0x59, 0xfa, 0x21, 0x03, 0x96, 0x12, 0xd7, 0xd0, 0xbf, 0x83, 0x2e, - 0x95, 0xd5, 0xb3, 0xa5, 0x4b, 0x61, 0xce, 0x9e, 0x2e, 0x85, 0xfb, 0xf4, 0xe8, 0x8a, 0xe1, 0x0f, - 0xa1, 0xeb, 0x61, 0x06, 0xc0, 0xc1, 0x4c, 0x87, 0x16, 0x58, 0x50, 0xad, 0xc6, 0x2c, 0x4e, 0xb8, - 0xe8, 0xd6, 0xa1, 0x0f, 0x33, 0x0d, 0xdd, 0xd7, 0xa0, 0xa4, 0x27, 0x6a, 0x50, 0xc8, 0x2c, 0x1a, - 0xb9, 0xe8, 0x08, 0x1c, 0xd9, 0xcc, 0xdd, 0x02, 0x8b, 0x2c, 0xec, 0x80, 0xb2, 0xd3, 0x77, 0x40, - 0x32, 0xea, 0x51, 0xef, 0x13, 0x41, 0x42, 0x1b, 0x2c, 0xe1, 0x78, 0x13, 0x32, 0x3f, 0xd5, 0x6f, - 0xac, 0x8a, 0x8e, 0x27, 0xd1, 0x7d, 0x24, 0x50, 0x4b, 0xbf, 0xf5, 0x73, 0xab, 0x36, 0xe4, 0x3f, - 0x96, 0xdb, 0x67, 0xd7, 0x0b, 0xfe, 0x27, 0xe8, 0xfd, 0x36, 0x0d, 0x56, 0xfb, 0x8f, 0x93, 0xa9, - 0x9a, 0xfe, 0xbb, 0x43, 0x5f, 0x2e, 0xd2, 0x53, 0x2d, 0x3a, 0xea, 0x50, 0x26, 0x7b, 0xbd, 0x48, - 0x30, 0x91, 0x99, 0x39, 0x13, 0xa5, 0xef, 0x92, 0x31, 0x9a, 0xfe, 0x61, 0xe4, 0xb3, 0xe1, 0xaf, - 0x07, 0xd3, 0x05, 0xe9, 0x94, 0x76, 0x36, 0xf1, 0x0b, 0xc2, 0xd3, 0x0e, 0xd3, 0x8f, 0x69, 0x70, - 0x6c, 0xd8, 0x2d, 0x02, 0x56, 0xf5, 0x5b, 0xa2, 0x0a, 0x92, 0x19, 0x7f, 0x4b, 0x7c, 0xdc, 0x2d, - 0x16, 0x87, 0xb4, 0xc0, 0x21, 0x4c, 0xec, 0xb9, 0xf1, 0x7d, 0x90, 0x4f, 0x30, 0x7f, 0x9d, 0xd3, - 0x26, 0xbd, 0xab, 0x2e, 0xf7, 0xaa, 0x8d, 0xf9, 0x7f, 0xd0, 0x2d, 0xe6, 0x77, 0x46, 0xe8, 0xa0, - 0x91, 0xd6, 0x23, 0xde, 0xdc, 0x32, 0x4f, 0xfd, 0xcd, 0xed, 0xe7, 0xc1, 0x78, 0xa9, 0xd4, 0x9a, - 0x49, 0xbc, 0x3e, 0x04, 0x27, 0x93, 0x39, 0x30, 0x18, 0xb0, 0xd3, 0x41, 0xb7, 0x78, 0xb2, 0x3a, - 0x4a, 0x09, 0x8d, 0xb6, 0x1f, 0x95, 0xc8, 0x99, 0x67, 0x93, 0xc8, 0x95, 0x73, 0xf7, 0x1e, 0x15, - 0xe6, 0xee, 0x3f, 0x2a, 0xcc, 0x3d, 0x78, 0x54, 0x98, 0xfb, 0x3c, 0x28, 0xa4, 0xee, 0x05, 0x85, - 0xd4, 0xfd, 0xa0, 0x90, 0x7a, 0x10, 0x14, 0x52, 0xbf, 0x07, 0x85, 0xd4, 0x97, 0x7f, 0x14, 0xe6, - 0x3e, 0x38, 0xa4, 0x8f, 0x9e, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x05, 0x26, 0x31, 0x5d, 0x9f, - 0x18, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto deleted file mode 100644 index 04bc0ed..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/generated.proto +++ /dev/null @@ -1,397 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.autoscaling.v2beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v2beta1"; - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - optional string kind = 1; - - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - optional string name = 2; - - // API version of the referent - // +optional - optional string apiVersion = 3; -} - -// ExternalMetricSource indicates how to scale on a metric not associated with -// any Kubernetes object (for example length of queue in cloud -// messaging service, or QPS from loadbalancer running outside of cluster). -// Exactly one "target" type should be set. -message ExternalMetricSource { - // metricName is the name of the metric in question. - optional string metricName = 1; - - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; - - // targetValue is the target value of the metric (as a quantity). - // Mutually exclusive with TargetAverageValue. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; - - // targetAverageValue is the target per-pod value of global metric (as a quantity). - // Mutually exclusive with TargetValue. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 4; -} - -// ExternalMetricStatus indicates the current value of a global metric -// not associated with any Kubernetes object. -message ExternalMetricStatus { - // metricName is the name of a metric used for autoscaling in - // metric system. - optional string metricName = 1; - - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector metricSelector = 2; - - // currentValue is the current value of the metric (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; - - // currentAverageValue is the current value of metric averaged over autoscaled pods. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 4; -} - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -message HorizontalPodAutoscaler { - // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional HorizontalPodAutoscalerSpec spec = 2; - - // status is the current information about the autoscaler. - // +optional - optional HorizontalPodAutoscalerStatus status = 3; -} - -// HorizontalPodAutoscalerCondition describes the state of -// a HorizontalPodAutoscaler at a certain point. -message HorizontalPodAutoscalerCondition { - // type describes the current condition - optional string type = 1; - - // status is the status of the condition (True, False, Unknown) - optional string status = 2; - - // lastTransitionTime is the last time the condition transitioned from - // one status to another - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // reason is the reason for the condition's last transition. - // +optional - optional string reason = 4; - - // message is a human-readable explanation containing details about - // the transition - // +optional - optional string message = 5; -} - -// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // metadata is the standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -message HorizontalPodAutoscalerSpec { - // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - optional CrossVersionObjectReference scaleTargetRef = 1; - - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - optional int32 minReplicas = 2; - - // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - optional int32 maxReplicas = 3; - - // metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // +optional - repeated MetricSpec metrics = 4; -} - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -message HorizontalPodAutoscalerStatus { - // observedGeneration is the most recent generation observed by this autoscaler. - // +optional - optional int64 observedGeneration = 1; - - // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - - // currentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - optional int32 currentReplicas = 3; - - // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - optional int32 desiredReplicas = 4; - - // currentMetrics is the last read state of the metrics used by this autoscaler. - // +optional - repeated MetricStatus currentMetrics = 5; - - // conditions is the set of conditions required for this autoscaler to scale its target, - // and indicates whether or not those conditions are met. - repeated HorizontalPodAutoscalerCondition conditions = 6; -} - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. - optional string type = 1; - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - optional ObjectMetricSource object = 2; - - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - optional PodsMetricSource pods = 3; - - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - optional ResourceMetricSource resource = 4; - - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - optional ExternalMetricSource external = 5; -} - -// MetricStatus describes the last-read state of a single metric. -message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. - optional string type = 1; - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - optional ObjectMetricStatus object = 2; - - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - optional PodsMetricStatus pods = 3; - - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - optional ResourceMetricStatus resource = 4; - - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - optional ExternalMetricStatus external = 5; -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -message ObjectMetricSource { - // target is the described Kubernetes object. - optional CrossVersionObjectReference target = 1; - - // metricName is the name of the metric in question. - optional string metricName = 2; - - // targetValue is the target value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetValue = 3; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - - // averageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -message ObjectMetricStatus { - // target is the described Kubernetes object. - optional CrossVersionObjectReference target = 1; - - // metricName is the name of the metric in question. - optional string metricName = 2; - - // currentValue is the current value of the metric (as a quantity). - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentValue = 3; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - - // averageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 5; -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -message PodsMetricSource { - // metricName is the name of the metric in question - optional string metricName = 1; - - // targetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 2; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -message PodsMetricStatus { - // metricName is the name of the metric in question - optional string metricName = 1; - - // currentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 2; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 3; -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -message ResourceMetricSource { - // name is the name of the resource in question. - optional string name = 1; - - // targetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - optional int32 targetAverageUtilization = 2; - - // targetAverageValue is the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity targetAverageValue = 3; -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -message ResourceMetricStatus { - // name is the name of the resource in question. - optional string name = 1; - - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - optional int32 currentAverageUtilization = 2; - - // currentAverageValue is the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - optional k8s.io.apimachinery.pkg.api.resource.Quantity currentAverageValue = 3; -} - diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/register.go b/vendor/k8s.io/api/autoscaling/v2beta1/register.go deleted file mode 100644 index 12d697f..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types.go b/vendor/k8s.io/api/autoscaling/v2beta1/types.go deleted file mode 100644 index 6a30e67..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types.go +++ /dev/null @@ -1,405 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2beta1 - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -type HorizontalPodAutoscalerSpec struct { - // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // +optional - Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` -} - -// MetricSourceType indicates the type of metric. -type MetricSourceType string - -var ( - // ObjectMetricSourceType is a metric describing a kubernetes object - // (for example, hits-per-second on an Ingress object). - ObjectMetricSourceType MetricSourceType = "Object" - // PodsMetricSourceType is a metric describing each pod in the current scale - // target (for example, transactions-processed-per-second). The values - // will be averaged together before being compared to the target value. - PodsMetricSourceType MetricSourceType = "Pods" - // ResourceMetricSourceType is a resource metric known to Kubernetes, as - // specified in requests and limits, describing each pod in the current - // scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics (the "pods" source). - ResourceMetricSourceType MetricSourceType = "Resource" - // ExternalMetricSourceType is a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - ExternalMetricSourceType MetricSourceType = "External" -) - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricSource struct { - // target is the described Kubernetes object. - Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` - - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` - // targetValue is the target value of the metric (as a quantity). - TargetValue resource.Quantity `json:"targetValue" protobuf:"bytes,3,name=targetValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` - // averageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -type PodsMetricSource struct { - // metricName is the name of the metric in question - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // targetAverageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - TargetAverageValue resource.Quantity `json:"targetAverageValue" protobuf:"bytes,2,name=targetAverageValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -type ResourceMetricSource struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // targetAverageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - TargetAverageUtilization *int32 `json:"targetAverageUtilization,omitempty" protobuf:"varint,2,opt,name=targetAverageUtilization"` - // targetAverageValue is the target value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // +optional - TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,3,opt,name=targetAverageValue"` -} - -// ExternalMetricSource indicates how to scale on a metric not associated with -// any Kubernetes object (for example length of queue in cloud -// messaging service, or QPS from loadbalancer running outside of cluster). -// Exactly one "target" type should be set. -type ExternalMetricSource struct { - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` - // targetValue is the target value of the metric (as a quantity). - // Mutually exclusive with TargetAverageValue. - // +optional - TargetValue *resource.Quantity `json:"targetValue,omitempty" protobuf:"bytes,3,opt,name=targetValue"` - // targetAverageValue is the target per-pod value of global metric (as a quantity). - // Mutually exclusive with TargetValue. - // +optional - TargetAverageValue *resource.Quantity `json:"targetAverageValue,omitempty" protobuf:"bytes,4,opt,name=targetAverageValue"` -} - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -type HorizontalPodAutoscalerStatus struct { - // observedGeneration is the most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // currentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // currentMetrics is the last read state of the metrics used by this autoscaler. - // +optional - CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` - - // conditions is the set of conditions required for this autoscaler to scale its target, - // and indicates whether or not those conditions are met. - Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` -} - -// HorizontalPodAutoscalerConditionType are the valid conditions of -// a HorizontalPodAutoscaler. -type HorizontalPodAutoscalerConditionType string - -var ( - // ScalingActive indicates that the HPA controller is able to scale if necessary: - // it's correctly configured, can fetch the desired metrics, and isn't disabled. - ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" - // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, - // such as being in a backoff window, or being unable to access/update the target scale. - AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" - // ScalingLimited indicates that the calculated scale based on metrics would be above or - // below the range for the HPA, and has thus been capped. - ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" -) - -// HorizontalPodAutoscalerCondition describes the state of -// a HorizontalPodAutoscaler at a certain point. -type HorizontalPodAutoscalerCondition struct { - // type describes the current condition - Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` - // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` - // lastTransitionTime is the last time the condition transitioned from - // one status to another - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // reason is the reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // message is a human-readable explanation containing details about - // the transition - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// MetricStatus describes the last-read state of a single metric. -type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricStatus struct { - // target is the described Kubernetes object. - Target CrossVersionObjectReference `json:"target" protobuf:"bytes,1,name=target"` - - // metricName is the name of the metric in question. - MetricName string `json:"metricName" protobuf:"bytes,2,name=metricName"` - // currentValue is the current value of the metric (as a quantity). - CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,name=selector"` - // averageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,5,name=averageValue"` -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -type PodsMetricStatus struct { - // metricName is the name of the metric in question - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // currentAverageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,2,name=currentAverageValue"` - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,3,name=selector"` -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -type ResourceMetricStatus struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. It will only be - // present if `targetAverageValue` was set in the corresponding metric - // specification. - // +optional - CurrentAverageUtilization *int32 `json:"currentAverageUtilization,omitempty" protobuf:"bytes,2,opt,name=currentAverageUtilization"` - // currentAverageValue is the current value of the average of the - // resource metric across all relevant pods, as a raw value (instead of as - // a percentage of the request), similar to the "pods" metric source type. - // It will always be set, regardless of the corresponding metric specification. - CurrentAverageValue resource.Quantity `json:"currentAverageValue" protobuf:"bytes,3,name=currentAverageValue"` -} - -// ExternalMetricStatus indicates the current value of a global metric -// not associated with any Kubernetes object. -type ExternalMetricStatus struct { - // metricName is the name of a metric used for autoscaling in - // metric system. - MetricName string `json:"metricName" protobuf:"bytes,1,name=metricName"` - // metricSelector is used to identify a specific time series - // within a given metric. - // +optional - MetricSelector *metav1.LabelSelector `json:"metricSelector,omitempty" protobuf:"bytes,2,opt,name=metricSelector"` - // currentValue is the current value of the metric (as a quantity) - CurrentValue resource.Quantity `json:"currentValue" protobuf:"bytes,3,name=currentValue"` - // currentAverageValue is the current value of metric averaged over autoscaled pods. - // +optional - CurrentAverageValue *resource.Quantity `json:"currentAverageValue,omitempty" protobuf:"bytes,4,opt,name=currentAverageValue"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -type HorizontalPodAutoscaler struct { - metav1.TypeMeta `json:",inline"` - // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // status is the current information about the autoscaler. - // +optional - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - metav1.TypeMeta `json:",inline"` - // metadata is the standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go deleted file mode 100644 index 411b817..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,221 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CrossVersionObjectReference = map[string]string{ - "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", -} - -func (CrossVersionObjectReference) SwaggerDoc() map[string]string { - return map_CrossVersionObjectReference -} - -var map_ExternalMetricSource = map[string]string{ - "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster). Exactly one \"target\" type should be set.", - "metricName": "metricName is the name of the metric in question.", - "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", - "targetValue": "targetValue is the target value of the metric (as a quantity). Mutually exclusive with TargetAverageValue.", - "targetAverageValue": "targetAverageValue is the target per-pod value of global metric (as a quantity). Mutually exclusive with TargetValue.", -} - -func (ExternalMetricSource) SwaggerDoc() map[string]string { - return map_ExternalMetricSource -} - -var map_ExternalMetricStatus = map[string]string{ - "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", - "metricName": "metricName is the name of a metric used for autoscaling in metric system.", - "metricSelector": "metricSelector is used to identify a specific time series within a given metric.", - "currentValue": "currentValue is the current value of the metric (as a quantity)", - "currentAverageValue": "currentAverageValue is the current value of metric averaged over autoscaled pods.", -} - -func (ExternalMetricStatus) SwaggerDoc() map[string]string { - return map_ExternalMetricStatus -} - -var map_HorizontalPodAutoscaler = map[string]string{ - "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "status is the current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerCondition = map[string]string{ - "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", - "type": "type describes the current condition", - "status": "status is the status of the condition (True, False, Unknown)", - "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", - "reason": "reason is the reason for the condition's last transition.", - "message": "message is a human-readable explanation containing details about the transition", -} - -func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerCondition -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "HorizontalPodAutoscaler is a list of horizontal pod autoscaler objects.", - "metadata": "metadata is the standard list metadata.", - "items": "items is the list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", - "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", - "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", - "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", - "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", - "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", - "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", - "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", - "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - -var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", -} - -func (MetricSpec) SwaggerDoc() map[string]string { - return map_MetricSpec -} - -var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", -} - -func (MetricStatus) SwaggerDoc() map[string]string { - return map_MetricStatus -} - -var map_ObjectMetricSource = map[string]string{ - "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target is the described Kubernetes object.", - "metricName": "metricName is the name of the metric in question.", - "targetValue": "targetValue is the target value of the metric (as a quantity).", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", - "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", -} - -func (ObjectMetricSource) SwaggerDoc() map[string]string { - return map_ObjectMetricSource -} - -var map_ObjectMetricStatus = map[string]string{ - "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target is the described Kubernetes object.", - "metricName": "metricName is the name of the metric in question.", - "currentValue": "currentValue is the current value of the metric (as a quantity).", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the ObjectMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", - "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", -} - -func (ObjectMetricStatus) SwaggerDoc() map[string]string { - return map_ObjectMetricStatus -} - -var map_PodsMetricSource = map[string]string{ - "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "metricName": "metricName is the name of the metric in question", - "targetAverageValue": "targetAverageValue is the target value of the average of the metric across all relevant pods (as a quantity)", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping When unset, just the metricName will be used to gather metrics.", -} - -func (PodsMetricSource) SwaggerDoc() map[string]string { - return map_PodsMetricSource -} - -var map_PodsMetricStatus = map[string]string{ - "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", - "metricName": "metricName is the name of the metric in question", - "currentAverageValue": "currentAverageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set in the PodsMetricSource, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", -} - -func (PodsMetricStatus) SwaggerDoc() map[string]string { - return map_PodsMetricStatus -} - -var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", - "targetAverageUtilization": "targetAverageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", - "targetAverageValue": "targetAverageValue is the target value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type.", -} - -func (ResourceMetricSource) SwaggerDoc() map[string]string { - return map_ResourceMetricSource -} - -var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "name is the name of the resource in question.", - "currentAverageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. It will only be present if `targetAverageValue` was set in the corresponding metric specification.", - "currentAverageValue": "currentAverageValue is the current value of the average of the resource metric across all relevant pods, as a raw value (instead of as a percentage of the request), similar to the \"pods\" metric source type. It will always be set, regardless of the corresponding metric specification.", -} - -func (ResourceMetricStatus) SwaggerDoc() map[string]string { - return map_ResourceMetricStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go deleted file mode 100644 index 2ec7e61..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,466 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v2beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. -func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { - if in == nil { - return nil - } - out := new(CrossVersionObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { - *out = *in - if in.MetricSelector != nil { - in, out := &in.MetricSelector, &out.MetricSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.TargetValue != nil { - in, out := &in.TargetValue, &out.TargetValue - x := (*in).DeepCopy() - *out = &x - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. -func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { - if in == nil { - return nil - } - out := new(ExternalMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { - *out = *in - if in.MetricSelector != nil { - in, out := &in.MetricSelector, &out.MetricSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - out.CurrentValue = in.CurrentValue.DeepCopy() - if in.CurrentAverageValue != nil { - in, out := &in.CurrentAverageValue, &out.CurrentAverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. -func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { - if in == nil { - return nil - } - out := new(ExternalMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. -func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscaler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. -func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. -func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { - *out = *in - out.ScaleTargetRef = in.ScaleTargetRef - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]MetricSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. -func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = (*in).DeepCopy() - } - if in.CurrentMetrics != nil { - in, out := &in.CurrentMetrics, &out.CurrentMetrics - *out = make([]MetricStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]HorizontalPodAutoscalerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. -func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricSource) - (*in).DeepCopyInto(*out) - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricSource) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricSource) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalMetricSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. -func (in *MetricSpec) DeepCopy() *MetricSpec { - if in == nil { - return nil - } - out := new(MetricSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalMetricStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. -func (in *MetricStatus) DeepCopy() *MetricStatus { - if in == nil { - return nil - } - out := new(MetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { - *out = *in - out.Target = in.Target - out.TargetValue = in.TargetValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.AverageValue != nil { - in, out := &in.AverageValue, &out.AverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. -func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { - if in == nil { - return nil - } - out := new(ObjectMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { - *out = *in - out.Target = in.Target - out.CurrentValue = in.CurrentValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.AverageValue != nil { - in, out := &in.AverageValue, &out.AverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. -func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { - if in == nil { - return nil - } - out := new(ObjectMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { - *out = *in - out.TargetAverageValue = in.TargetAverageValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. -func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { - if in == nil { - return nil - } - out := new(PodsMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { - *out = *in - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. -func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { - if in == nil { - return nil - } - out := new(PodsMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { - *out = *in - if in.TargetAverageUtilization != nil { - in, out := &in.TargetAverageUtilization, &out.TargetAverageUtilization - *out = new(int32) - **out = **in - } - if in.TargetAverageValue != nil { - in, out := &in.TargetAverageValue, &out.TargetAverageValue - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. -func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { - if in == nil { - return nil - } - out := new(ResourceMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { - *out = *in - if in.CurrentAverageUtilization != nil { - in, out := &in.CurrentAverageUtilization, &out.CurrentAverageUtilization - *out = new(int32) - **out = **in - } - out.CurrentAverageValue = in.CurrentAverageValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. -func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { - if in == nil { - return nil - } - out := new(ResourceMetricStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go b/vendor/k8s.io/api/autoscaling/v2beta2/doc.go deleted file mode 100644 index 7c7d2b6..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v2beta2 // import "k8s.io/api/autoscaling/v2beta2" diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go b/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go deleted file mode 100644 index be752a1..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.pb.go +++ /dev/null @@ -1,4438 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto -// DO NOT EDIT! - -/* - Package v2beta2 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto - - It has these top-level messages: - CrossVersionObjectReference - ExternalMetricSource - ExternalMetricStatus - HorizontalPodAutoscaler - HorizontalPodAutoscalerCondition - HorizontalPodAutoscalerList - HorizontalPodAutoscalerSpec - HorizontalPodAutoscalerStatus - MetricIdentifier - MetricSpec - MetricStatus - MetricTarget - MetricValueStatus - ObjectMetricSource - ObjectMetricStatus - PodsMetricSource - PodsMetricStatus - ResourceMetricSource - ResourceMetricStatus -*/ -package v2beta2 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *CrossVersionObjectReference) Reset() { *m = CrossVersionObjectReference{} } -func (*CrossVersionObjectReference) ProtoMessage() {} -func (*CrossVersionObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *ExternalMetricSource) Reset() { *m = ExternalMetricSource{} } -func (*ExternalMetricSource) ProtoMessage() {} -func (*ExternalMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ExternalMetricStatus) Reset() { *m = ExternalMetricStatus{} } -func (*ExternalMetricStatus) ProtoMessage() {} -func (*ExternalMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *HorizontalPodAutoscaler) Reset() { *m = HorizontalPodAutoscaler{} } -func (*HorizontalPodAutoscaler) ProtoMessage() {} -func (*HorizontalPodAutoscaler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *HorizontalPodAutoscalerCondition) Reset() { *m = HorizontalPodAutoscalerCondition{} } -func (*HorizontalPodAutoscalerCondition) ProtoMessage() {} -func (*HorizontalPodAutoscalerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *HorizontalPodAutoscalerList) Reset() { *m = HorizontalPodAutoscalerList{} } -func (*HorizontalPodAutoscalerList) ProtoMessage() {} -func (*HorizontalPodAutoscalerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *HorizontalPodAutoscalerSpec) Reset() { *m = HorizontalPodAutoscalerSpec{} } -func (*HorizontalPodAutoscalerSpec) ProtoMessage() {} -func (*HorizontalPodAutoscalerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{6} -} - -func (m *HorizontalPodAutoscalerStatus) Reset() { *m = HorizontalPodAutoscalerStatus{} } -func (*HorizontalPodAutoscalerStatus) ProtoMessage() {} -func (*HorizontalPodAutoscalerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{7} -} - -func (m *MetricIdentifier) Reset() { *m = MetricIdentifier{} } -func (*MetricIdentifier) ProtoMessage() {} -func (*MetricIdentifier) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *MetricSpec) Reset() { *m = MetricSpec{} } -func (*MetricSpec) ProtoMessage() {} -func (*MetricSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *MetricStatus) Reset() { *m = MetricStatus{} } -func (*MetricStatus) ProtoMessage() {} -func (*MetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *MetricTarget) Reset() { *m = MetricTarget{} } -func (*MetricTarget) ProtoMessage() {} -func (*MetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *MetricValueStatus) Reset() { *m = MetricValueStatus{} } -func (*MetricValueStatus) ProtoMessage() {} -func (*MetricValueStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *ObjectMetricSource) Reset() { *m = ObjectMetricSource{} } -func (*ObjectMetricSource) ProtoMessage() {} -func (*ObjectMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ObjectMetricStatus) Reset() { *m = ObjectMetricStatus{} } -func (*ObjectMetricStatus) ProtoMessage() {} -func (*ObjectMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *PodsMetricSource) Reset() { *m = PodsMetricSource{} } -func (*PodsMetricSource) ProtoMessage() {} -func (*PodsMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *PodsMetricStatus) Reset() { *m = PodsMetricStatus{} } -func (*PodsMetricStatus) ProtoMessage() {} -func (*PodsMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ResourceMetricSource) Reset() { *m = ResourceMetricSource{} } -func (*ResourceMetricSource) ProtoMessage() {} -func (*ResourceMetricSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ResourceMetricStatus) Reset() { *m = ResourceMetricStatus{} } -func (*ResourceMetricStatus) ProtoMessage() {} -func (*ResourceMetricStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func init() { - proto.RegisterType((*CrossVersionObjectReference)(nil), "k8s.io.api.autoscaling.v2beta2.CrossVersionObjectReference") - proto.RegisterType((*ExternalMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricSource") - proto.RegisterType((*ExternalMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ExternalMetricStatus") - proto.RegisterType((*HorizontalPodAutoscaler)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscaler") - proto.RegisterType((*HorizontalPodAutoscalerCondition)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerCondition") - proto.RegisterType((*HorizontalPodAutoscalerList)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerList") - proto.RegisterType((*HorizontalPodAutoscalerSpec)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerSpec") - proto.RegisterType((*HorizontalPodAutoscalerStatus)(nil), "k8s.io.api.autoscaling.v2beta2.HorizontalPodAutoscalerStatus") - proto.RegisterType((*MetricIdentifier)(nil), "k8s.io.api.autoscaling.v2beta2.MetricIdentifier") - proto.RegisterType((*MetricSpec)(nil), "k8s.io.api.autoscaling.v2beta2.MetricSpec") - proto.RegisterType((*MetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.MetricStatus") - proto.RegisterType((*MetricTarget)(nil), "k8s.io.api.autoscaling.v2beta2.MetricTarget") - proto.RegisterType((*MetricValueStatus)(nil), "k8s.io.api.autoscaling.v2beta2.MetricValueStatus") - proto.RegisterType((*ObjectMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ObjectMetricSource") - proto.RegisterType((*ObjectMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ObjectMetricStatus") - proto.RegisterType((*PodsMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.PodsMetricSource") - proto.RegisterType((*PodsMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.PodsMetricStatus") - proto.RegisterType((*ResourceMetricSource)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricSource") - proto.RegisterType((*ResourceMetricStatus)(nil), "k8s.io.api.autoscaling.v2beta2.ResourceMetricStatus") -} -func (m *CrossVersionObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CrossVersionObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - return i, nil -} - -func (m *ExternalMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n1, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n2, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *ExternalMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExternalMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n3, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n4, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *HorizontalPodAutoscaler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscaler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n5, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n6, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n7, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - return i, nil -} - -func (m *HorizontalPodAutoscalerCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n8, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *HorizontalPodAutoscalerList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleTargetRef.Size())) - n10, err := m.ScaleTargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - if m.MinReplicas != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.MinReplicas)) - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, msg := range m.Metrics { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HorizontalPodAutoscalerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HorizontalPodAutoscalerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ObservedGeneration != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScaleTime.Size())) - n11, err := m.LastScaleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredReplicas)) - if len(m.CurrentMetrics) > 0 { - for _, msg := range m.CurrentMetrics { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *MetricIdentifier) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricIdentifier) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n12, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *MetricSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n13, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n14, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n15, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n16, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - } - return i, nil -} - -func (m *MetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Object != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Object.Size())) - n17, err := m.Object.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - } - if m.Pods != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Pods.Size())) - n18, err := m.Pods.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - if m.Resource != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resource.Size())) - n19, err := m.Resource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - if m.External != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.External.Size())) - n20, err := m.External.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - } - return i, nil -} - -func (m *MetricTarget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n21, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - } - if m.AverageValue != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n22, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.AverageUtilization != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) - } - return i, nil -} - -func (m *MetricValueStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *MetricValueStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Value != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value.Size())) - n23, err := m.Value.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.AverageValue != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AverageValue.Size())) - n24, err := m.AverageValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - if m.AverageUtilization != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.AverageUtilization)) - } - return i, nil -} - -func (m *ObjectMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n25, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n26, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n27, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - return i, nil -} - -func (m *ObjectMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n28, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n29, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DescribedObject.Size())) - n30, err := m.DescribedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - return i, nil -} - -func (m *PodsMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n31, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n32, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - return i, nil -} - -func (m *PodsMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodsMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Metric.Size())) - n33, err := m.Metric.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n34, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - return i, nil -} - -func (m *ResourceMetricSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n35, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *ResourceMetricStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceMetricStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Current.Size())) - n36, err := m.Current.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CrossVersionObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExternalMetricSource) Size() (n int) { - var l int - _ = l - l = m.Metric.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExternalMetricStatus) Size() (n int) { - var l int - _ = l - l = m.Metric.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Current.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscaler) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HorizontalPodAutoscalerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerSpec) Size() (n int) { - var l int - _ = l - l = m.ScaleTargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.MinReplicas != nil { - n += 1 + sovGenerated(uint64(*m.MinReplicas)) - } - n += 1 + sovGenerated(uint64(m.MaxReplicas)) - if len(m.Metrics) > 0 { - for _, e := range m.Metrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HorizontalPodAutoscalerStatus) Size() (n int) { - var l int - _ = l - if m.ObservedGeneration != nil { - n += 1 + sovGenerated(uint64(*m.ObservedGeneration)) - } - if m.LastScaleTime != nil { - l = m.LastScaleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.CurrentReplicas)) - n += 1 + sovGenerated(uint64(m.DesiredReplicas)) - if len(m.CurrentMetrics) > 0 { - for _, e := range m.CurrentMetrics { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *MetricIdentifier) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MetricSpec) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.External != nil { - l = m.External.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Object != nil { - l = m.Object.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Pods != nil { - l = m.Pods.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Resource != nil { - l = m.Resource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.External != nil { - l = m.External.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *MetricTarget) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = m.Value.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageValue != nil { - l = m.AverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.AverageUtilization)) - } - return n -} - -func (m *MetricValueStatus) Size() (n int) { - var l int - _ = l - if m.Value != nil { - l = m.Value.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageValue != nil { - l = m.AverageValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AverageUtilization != nil { - n += 1 + sovGenerated(uint64(*m.AverageUtilization)) - } - return n -} - -func (m *ObjectMetricSource) Size() (n int) { - var l int - _ = l - l = m.DescribedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Metric.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectMetricStatus) Size() (n int) { - var l int - _ = l - l = m.Metric.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Current.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.DescribedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodsMetricSource) Size() (n int) { - var l int - _ = l - l = m.Metric.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodsMetricStatus) Size() (n int) { - var l int - _ = l - l = m.Metric.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Current.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceMetricSource) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceMetricStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Current.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CrossVersionObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CrossVersionObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalMetricSource{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ExternalMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExternalMetricStatus{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscaler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscaler{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "HorizontalPodAutoscalerSpec", "HorizontalPodAutoscalerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "HorizontalPodAutoscalerStatus", "HorizontalPodAutoscalerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "HorizontalPodAutoscaler", "HorizontalPodAutoscaler", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerSpec{`, - `ScaleTargetRef:` + strings.Replace(strings.Replace(this.ScaleTargetRef.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `MinReplicas:` + valueToStringGenerated(this.MinReplicas) + `,`, - `MaxReplicas:` + fmt.Sprintf("%v", this.MaxReplicas) + `,`, - `Metrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Metrics), "MetricSpec", "MetricSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HorizontalPodAutoscalerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HorizontalPodAutoscalerStatus{`, - `ObservedGeneration:` + valueToStringGenerated(this.ObservedGeneration) + `,`, - `LastScaleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScaleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CurrentReplicas:` + fmt.Sprintf("%v", this.CurrentReplicas) + `,`, - `DesiredReplicas:` + fmt.Sprintf("%v", this.DesiredReplicas) + `,`, - `CurrentMetrics:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.CurrentMetrics), "MetricStatus", "MetricStatus", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "HorizontalPodAutoscalerCondition", "HorizontalPodAutoscalerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricIdentifier) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricIdentifier{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricSpec{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricSource", "ObjectMetricSource", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricSource", "PodsMetricSource", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricSource", "ResourceMetricSource", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricSource", "ExternalMetricSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricStatus{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Object:` + strings.Replace(fmt.Sprintf("%v", this.Object), "ObjectMetricStatus", "ObjectMetricStatus", 1) + `,`, - `Pods:` + strings.Replace(fmt.Sprintf("%v", this.Pods), "PodsMetricStatus", "PodsMetricStatus", 1) + `,`, - `Resource:` + strings.Replace(fmt.Sprintf("%v", this.Resource), "ResourceMetricStatus", "ResourceMetricStatus", 1) + `,`, - `External:` + strings.Replace(fmt.Sprintf("%v", this.External), "ExternalMetricStatus", "ExternalMetricStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *MetricTarget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricTarget{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, - `}`, - }, "") - return s -} -func (this *MetricValueStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&MetricValueStatus{`, - `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageValue:` + strings.Replace(fmt.Sprintf("%v", this.AverageValue), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `AverageUtilization:` + valueToStringGenerated(this.AverageUtilization) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricSource{`, - `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectMetricStatus{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `DescribedObject:` + strings.Replace(strings.Replace(this.DescribedObject.String(), "CrossVersionObjectReference", "CrossVersionObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricSource{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodsMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodsMetricStatus{`, - `Metric:` + strings.Replace(strings.Replace(this.Metric.String(), "MetricIdentifier", "MetricIdentifier", 1), `&`, ``, 1) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricSource{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "MetricTarget", "MetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceMetricStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceMetricStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Current:` + strings.Replace(strings.Replace(this.Current.String(), "MetricValueStatus", "MetricValueStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CrossVersionObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CrossVersionObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CrossVersionObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExternalMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExternalMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExternalMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscaler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscaler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = HorizontalPodAutoscalerConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, HorizontalPodAutoscaler{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleTargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ScaleTargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReplicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.MinReplicas = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxReplicas", wireType) - } - m.MaxReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MaxReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Metrics = append(m.Metrics, MetricSpec{}) - if err := m.Metrics[len(m.Metrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HorizontalPodAutoscalerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HorizontalPodAutoscalerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ObservedGeneration = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScaleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScaleTime == nil { - m.LastScaleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.LastScaleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentReplicas", wireType) - } - m.CurrentReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredReplicas", wireType) - } - m.DesiredReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentMetrics", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CurrentMetrics = append(m.CurrentMetrics, MetricStatus{}) - if err := m.CurrentMetrics[len(m.CurrentMetrics)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, HorizontalPodAutoscalerCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricIdentifier) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricIdentifier: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricIdentifier: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricSource{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricSource{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricSource{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.External == nil { - m.External = &ExternalMetricSource{} - } - if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricSourceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Object", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Object == nil { - m.Object = &ObjectMetricStatus{} - } - if err := m.Object.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Pods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Pods == nil { - m.Pods = &PodsMetricStatus{} - } - if err := m.Pods.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Resource == nil { - m.Resource = &ResourceMetricStatus{} - } - if err := m.Resource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field External", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.External == nil { - m.External = &ExternalMetricStatus{} - } - if err := m.External.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricTarget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricTarget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = MetricTargetType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.AverageUtilization = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *MetricValueStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: MetricValueStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: MetricValueStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Value == nil { - m.Value = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AverageValue == nil { - m.AverageValue = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.AverageValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AverageUtilization", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.AverageUtilization = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DescribedObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DescribedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodsMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodsMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodsMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metric", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceMetricStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceMetricStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceMetricStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Current", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Current.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1425 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xd4, 0x58, 0xdd, 0x6f, 0x1b, 0xc5, - 0x16, 0xcf, 0xda, 0x8e, 0x93, 0x8e, 0xd3, 0x24, 0x9d, 0x5b, 0xb5, 0x56, 0xaa, 0x6b, 0x47, 0xab, - 0xab, 0xab, 0x52, 0xd1, 0x35, 0x31, 0xe1, 0x43, 0x42, 0x48, 0xc4, 0x01, 0xda, 0x8a, 0xa4, 0x2d, - 0x93, 0xb4, 0x42, 0xa8, 0x45, 0x8c, 0x77, 0x4f, 0xdc, 0x21, 0xde, 0x5d, 0x6b, 0x76, 0x6c, 0x35, - 0x45, 0x42, 0xbc, 0xf0, 0x8e, 0x40, 0xfc, 0x13, 0x88, 0x17, 0x5e, 0x90, 0x78, 0xe4, 0x43, 0xa8, - 0x42, 0x08, 0xf5, 0xb1, 0x08, 0xc9, 0xa2, 0xe6, 0xbf, 0xe8, 0x13, 0xda, 0x99, 0xd9, 0xf5, 0xae, - 0xed, 0xc4, 0x4e, 0x95, 0x14, 0xf5, 0xcd, 0x33, 0xe7, 0x9c, 0xdf, 0xf9, 0x9c, 0x73, 0xce, 0x1a, - 0x5d, 0xda, 0x7d, 0x35, 0xb0, 0x98, 0x5f, 0xd9, 0x6d, 0xd7, 0x81, 0x7b, 0x20, 0x20, 0xa8, 0x74, - 0xc0, 0x73, 0x7c, 0x5e, 0xd1, 0x04, 0xda, 0x62, 0x15, 0xda, 0x16, 0x7e, 0x60, 0xd3, 0x26, 0xf3, - 0x1a, 0x95, 0x4e, 0xb5, 0x0e, 0x82, 0x56, 0x2b, 0x0d, 0xf0, 0x80, 0x53, 0x01, 0x8e, 0xd5, 0xe2, - 0xbe, 0xf0, 0x71, 0x49, 0xf1, 0x5b, 0xb4, 0xc5, 0xac, 0x04, 0xbf, 0xa5, 0xf9, 0x97, 0x2e, 0x36, - 0x98, 0xb8, 0xd3, 0xae, 0x5b, 0xb6, 0xef, 0x56, 0x1a, 0x7e, 0xc3, 0xaf, 0x48, 0xb1, 0x7a, 0x7b, - 0x47, 0x9e, 0xe4, 0x41, 0xfe, 0x52, 0x70, 0x4b, 0x66, 0x42, 0xbd, 0xed, 0x73, 0xa8, 0x74, 0x56, - 0x06, 0x55, 0x2e, 0xad, 0xf6, 0x79, 0x5c, 0x6a, 0xdf, 0x61, 0x1e, 0xf0, 0xbd, 0x4a, 0x6b, 0xb7, - 0x21, 0x85, 0x38, 0x04, 0x7e, 0x9b, 0xdb, 0x70, 0x28, 0xa9, 0xa0, 0xe2, 0x82, 0xa0, 0xa3, 0x74, - 0x55, 0xf6, 0x93, 0xe2, 0x6d, 0x4f, 0x30, 0x77, 0x58, 0xcd, 0xcb, 0xe3, 0x04, 0x02, 0xfb, 0x0e, - 0xb8, 0x74, 0x50, 0xce, 0xfc, 0xca, 0x40, 0xe7, 0xd6, 0xb9, 0x1f, 0x04, 0x37, 0x81, 0x07, 0xcc, - 0xf7, 0xae, 0xd5, 0x3f, 0x02, 0x5b, 0x10, 0xd8, 0x01, 0x0e, 0x9e, 0x0d, 0x78, 0x19, 0xe5, 0x76, - 0x99, 0xe7, 0x14, 0x8d, 0x65, 0xe3, 0xfc, 0x89, 0xda, 0xdc, 0xfd, 0x6e, 0x79, 0xaa, 0xd7, 0x2d, - 0xe7, 0xde, 0x61, 0x9e, 0x43, 0x24, 0x25, 0xe4, 0xf0, 0xa8, 0x0b, 0xc5, 0x4c, 0x9a, 0xe3, 0x2a, - 0x75, 0x81, 0x48, 0x0a, 0xae, 0x22, 0x44, 0x5b, 0x4c, 0x2b, 0x28, 0x66, 0x25, 0x1f, 0xd6, 0x7c, - 0x68, 0xed, 0xfa, 0x15, 0x4d, 0x21, 0x09, 0x2e, 0xf3, 0x17, 0x03, 0x9d, 0x7e, 0xeb, 0xae, 0x00, - 0xee, 0xd1, 0xe6, 0x26, 0x08, 0xce, 0xec, 0x2d, 0x19, 0x5f, 0xfc, 0x1e, 0xca, 0xbb, 0xf2, 0x2c, - 0x4d, 0x2a, 0x54, 0x5f, 0xb0, 0x0e, 0xae, 0x04, 0x4b, 0x49, 0x5f, 0x71, 0xc0, 0x13, 0x6c, 0x87, - 0x01, 0xaf, 0xcd, 0x6b, 0xd5, 0x79, 0x45, 0x21, 0x1a, 0x0f, 0x6f, 0xa3, 0xbc, 0xa0, 0xbc, 0x01, - 0x42, 0xba, 0x52, 0xa8, 0x3e, 0x3f, 0x19, 0xf2, 0xb6, 0x94, 0xe9, 0xa3, 0xaa, 0x33, 0xd1, 0x58, - 0xe6, 0xef, 0xc3, 0x8e, 0x08, 0x2a, 0xda, 0xc1, 0x31, 0x3a, 0x72, 0x0b, 0xcd, 0xd8, 0x6d, 0xce, - 0xc1, 0x8b, 0x3c, 0x59, 0x99, 0x0c, 0xfa, 0x26, 0x6d, 0xb6, 0x41, 0x59, 0x57, 0x5b, 0xd0, 0xd8, - 0x33, 0xeb, 0x0a, 0x89, 0x44, 0x90, 0xe6, 0x0f, 0x19, 0x74, 0xf6, 0xb2, 0xcf, 0xd9, 0x3d, 0xdf, - 0x13, 0xb4, 0x79, 0xdd, 0x77, 0xd6, 0x34, 0x20, 0x70, 0xfc, 0x21, 0x9a, 0x0d, 0x2b, 0xda, 0xa1, - 0x82, 0x8e, 0xf0, 0x2a, 0x2e, 0x4c, 0xab, 0xb5, 0xdb, 0x08, 0x2f, 0x02, 0x2b, 0xe4, 0xb6, 0x3a, - 0x2b, 0x96, 0x2a, 0xbb, 0x4d, 0x10, 0xb4, 0x5f, 0x19, 0xfd, 0x3b, 0x12, 0xa3, 0xe2, 0xdb, 0x28, - 0x17, 0xb4, 0xc0, 0xd6, 0x8e, 0xbd, 0x36, 0xce, 0xb1, 0x7d, 0x0c, 0xdd, 0x6a, 0x81, 0xdd, 0x2f, - 0xd5, 0xf0, 0x44, 0x24, 0x2c, 0x06, 0x94, 0x0f, 0x64, 0x00, 0x64, 0x99, 0x16, 0xaa, 0xaf, 0x3f, - 0xa9, 0x02, 0x15, 0xc5, 0x38, 0x43, 0xea, 0x4c, 0x34, 0xb8, 0xf9, 0x59, 0x16, 0x2d, 0xef, 0x23, - 0xb9, 0xee, 0x7b, 0x0e, 0x13, 0xcc, 0xf7, 0xf0, 0x65, 0x94, 0x13, 0x7b, 0x2d, 0xd0, 0x4f, 0x6f, - 0x35, 0xb2, 0x76, 0x7b, 0xaf, 0x05, 0x8f, 0xbb, 0xe5, 0xff, 0x8d, 0x93, 0x0f, 0xf9, 0x88, 0x44, - 0xc0, 0x1b, 0xb1, 0x57, 0x99, 0x14, 0x96, 0x36, 0xeb, 0x71, 0xb7, 0x3c, 0xa2, 0xff, 0x59, 0x31, - 0x52, 0xda, 0x78, 0xdc, 0x41, 0xb8, 0x49, 0x03, 0xb1, 0xcd, 0xa9, 0x17, 0x28, 0x4d, 0xcc, 0x05, - 0x1d, 0xaf, 0x0b, 0x93, 0xa5, 0x3b, 0x94, 0xa8, 0x2d, 0x69, 0x2b, 0xf0, 0xc6, 0x10, 0x1a, 0x19, - 0xa1, 0x01, 0xff, 0x1f, 0xe5, 0x39, 0xd0, 0xc0, 0xf7, 0x8a, 0x39, 0xe9, 0x45, 0x1c, 0x5c, 0x22, - 0x6f, 0x89, 0xa6, 0xe2, 0xe7, 0xd0, 0x8c, 0x0b, 0x41, 0x40, 0x1b, 0x50, 0x9c, 0x96, 0x8c, 0x71, - 0x2d, 0x6f, 0xaa, 0x6b, 0x12, 0xd1, 0xcd, 0x3f, 0x0c, 0x74, 0x6e, 0x9f, 0x38, 0x6e, 0xb0, 0x40, - 0xe0, 0x5b, 0x43, 0xf5, 0x6c, 0x4d, 0xe6, 0x60, 0x28, 0x2d, 0xab, 0x79, 0x51, 0xeb, 0x9e, 0x8d, - 0x6e, 0x12, 0xb5, 0x7c, 0x0b, 0x4d, 0x33, 0x01, 0x6e, 0x98, 0x95, 0xec, 0xf9, 0x42, 0xf5, 0x95, - 0x27, 0xac, 0xb5, 0xda, 0x49, 0xad, 0x63, 0xfa, 0x4a, 0x88, 0x46, 0x14, 0xa8, 0xf9, 0x67, 0x66, - 0x5f, 0xdf, 0xc2, 0x82, 0xc7, 0x1f, 0xa3, 0x79, 0x79, 0xd2, 0xfd, 0x0a, 0x76, 0xb4, 0x87, 0x63, - 0xdf, 0xd4, 0x01, 0xe3, 0xa2, 0x76, 0x46, 0x9b, 0x32, 0xbf, 0x95, 0x82, 0x26, 0x03, 0xaa, 0xf0, - 0x0a, 0x2a, 0xb8, 0xcc, 0x23, 0xd0, 0x6a, 0x32, 0x9b, 0xaa, 0xb2, 0x9c, 0xae, 0x2d, 0xf4, 0xba, - 0xe5, 0xc2, 0x66, 0xff, 0x9a, 0x24, 0x79, 0xf0, 0x4b, 0xa8, 0xe0, 0xd2, 0xbb, 0xb1, 0x48, 0x56, - 0x8a, 0xfc, 0x47, 0xeb, 0x2b, 0x6c, 0xf6, 0x49, 0x24, 0xc9, 0x87, 0x6f, 0x84, 0xd5, 0x10, 0x76, - 0xb7, 0xa0, 0x98, 0x93, 0x61, 0xbe, 0x30, 0x59, 0x33, 0x94, 0x2d, 0x22, 0x51, 0x39, 0x12, 0x82, - 0x44, 0x58, 0xe6, 0x77, 0x39, 0xf4, 0xdf, 0x03, 0xdf, 0x3e, 0x7e, 0x1b, 0x61, 0xbf, 0x1e, 0x00, - 0xef, 0x80, 0x73, 0x49, 0x0d, 0xdd, 0x70, 0xfa, 0x85, 0x31, 0xce, 0xd6, 0xce, 0x84, 0x65, 0x7f, - 0x6d, 0x88, 0x4a, 0x46, 0x48, 0x60, 0x1b, 0x9d, 0x0c, 0x1f, 0x83, 0x0a, 0x28, 0xd3, 0x83, 0xf6, - 0x70, 0x2f, 0xed, 0x54, 0xaf, 0x5b, 0x3e, 0xb9, 0x91, 0x04, 0x21, 0x69, 0x4c, 0xbc, 0x86, 0x16, - 0x74, 0x7f, 0x1f, 0x08, 0xf0, 0x59, 0x1d, 0x81, 0x85, 0xf5, 0x34, 0x99, 0x0c, 0xf2, 0x87, 0x10, - 0x0e, 0x04, 0x8c, 0x83, 0x13, 0x43, 0xe4, 0xd2, 0x10, 0x6f, 0xa6, 0xc9, 0x64, 0x90, 0x1f, 0x37, - 0xd1, 0xbc, 0x46, 0xd5, 0xf1, 0x2e, 0x4e, 0xcb, 0x94, 0x4d, 0x38, 0x89, 0x75, 0xd3, 0x8d, 0x6b, - 0x70, 0x3d, 0x85, 0x45, 0x06, 0xb0, 0xb1, 0x40, 0xc8, 0x8e, 0x5a, 0x5c, 0x50, 0xcc, 0x4b, 0x4d, - 0x6f, 0x3c, 0xe1, 0x1b, 0x8c, 0x7b, 0x65, 0x7f, 0x7c, 0xc5, 0x57, 0x01, 0x49, 0xe8, 0x31, 0xbf, - 0x34, 0xd0, 0xe2, 0xe0, 0x24, 0x8f, 0x77, 0x28, 0x63, 0xdf, 0x1d, 0xea, 0x36, 0x9a, 0x0d, 0xa0, - 0x09, 0xb6, 0xf0, 0xb9, 0x2e, 0x80, 0x17, 0x27, 0xec, 0x44, 0xb4, 0x0e, 0xcd, 0x2d, 0x2d, 0x5a, - 0x9b, 0x0b, 0x5b, 0x51, 0x74, 0x22, 0x31, 0xa4, 0xf9, 0x75, 0x16, 0xa1, 0x7e, 0xdd, 0xe3, 0xd5, - 0xd4, 0xe8, 0x59, 0x1e, 0x18, 0x3d, 0x8b, 0xc9, 0x85, 0x2c, 0x31, 0x66, 0x6e, 0xa2, 0xbc, 0x2f, - 0xfb, 0x81, 0xb6, 0xb0, 0x3a, 0x2e, 0x98, 0xf1, 0x84, 0x8f, 0xd1, 0x6a, 0x28, 0x6c, 0xe8, 0xba, - 0xab, 0x68, 0x34, 0x7c, 0x15, 0xe5, 0x5a, 0xbe, 0x13, 0x8d, 0xe4, 0xb1, 0x7b, 0xd2, 0x75, 0xdf, - 0x09, 0x52, 0x98, 0xb3, 0xa1, 0xed, 0xe1, 0x2d, 0x91, 0x38, 0xf8, 0x03, 0x34, 0x1b, 0xad, 0xeb, - 0xb2, 0x44, 0x0b, 0xd5, 0xd5, 0x71, 0x98, 0x44, 0xf3, 0xa7, 0x70, 0x65, 0x30, 0x23, 0x0a, 0x89, - 0x31, 0x43, 0x7c, 0xd0, 0x1b, 0x9f, 0x9c, 0x40, 0x13, 0xe0, 0x8f, 0x5a, 0x75, 0x15, 0x7e, 0x44, - 0x21, 0x31, 0xa6, 0xf9, 0x4d, 0x16, 0xcd, 0xa5, 0x56, 0xc9, 0x7f, 0x23, 0x5d, 0xea, 0xad, 0x1d, - 0x6d, 0xba, 0x14, 0xe6, 0xd1, 0xa7, 0x4b, 0xe1, 0x1e, 0x5f, 0xba, 0x12, 0xf8, 0x23, 0xd2, 0xf5, - 0x53, 0x26, 0x4a, 0x97, 0x9a, 0x7f, 0x93, 0xa5, 0x4b, 0xf1, 0x26, 0xd2, 0x75, 0x0d, 0x4d, 0x77, - 0xc2, 0x05, 0x5d, 0x67, 0xeb, 0xc0, 0x45, 0xc4, 0x8a, 0x9c, 0xb3, 0xde, 0x6d, 0x53, 0x4f, 0x30, - 0xb1, 0x57, 0x3b, 0x11, 0x2e, 0x08, 0x72, 0xc3, 0x27, 0x0a, 0x07, 0x3b, 0x68, 0x8e, 0x76, 0x80, - 0xd3, 0x06, 0xc8, 0x6b, 0x9d, 0xaf, 0xc3, 0xe2, 0x2e, 0xf6, 0xba, 0xe5, 0xb9, 0xb5, 0x04, 0x0e, - 0x49, 0xa1, 0x86, 0x63, 0x50, 0x9f, 0x6f, 0x08, 0xd6, 0x64, 0xf7, 0xd4, 0x18, 0x54, 0x93, 0x41, - 0x8e, 0xc1, 0xb5, 0x21, 0x2a, 0x19, 0x21, 0x61, 0x7e, 0x91, 0x41, 0xa7, 0x86, 0x3e, 0x53, 0xfa, - 0x41, 0x31, 0x8e, 0x29, 0x28, 0x99, 0xa7, 0x18, 0x94, 0xec, 0xa1, 0x83, 0xf2, 0x73, 0x06, 0xe1, - 0xe1, 0x26, 0x8a, 0x3f, 0x91, 0xa3, 0xd8, 0xe6, 0xac, 0x0e, 0x8e, 0x22, 0x1f, 0xc5, 0x6e, 0x97, - 0x9c, 0xe3, 0x49, 0x6c, 0x32, 0xa8, 0xec, 0x78, 0xbe, 0xa4, 0x13, 0x1f, 0xcc, 0xd9, 0xa3, 0xfd, - 0x60, 0x36, 0x7f, 0x1b, 0x0c, 0xe3, 0x33, 0xfd, 0x85, 0x3e, 0x2a, 0xfd, 0xd9, 0xa7, 0x98, 0x7e, - 0xf3, 0x47, 0x03, 0x2d, 0x0e, 0x0e, 0xe1, 0x67, 0xee, 0x7f, 0x9b, 0x5f, 0xd3, 0x4e, 0x3c, 0xdb, - 0xff, 0xd9, 0x7c, 0x6b, 0xa0, 0xd3, 0xa3, 0x56, 0x18, 0xbc, 0x9e, 0x5a, 0x3c, 0x2b, 0xc9, 0xc5, - 0xf3, 0x71, 0xb7, 0x5c, 0x1e, 0xf1, 0xaf, 0x40, 0x04, 0x93, 0xd8, 0x4d, 0x8f, 0x27, 0x01, 0xdf, - 0x0f, 0xdb, 0xac, 0x92, 0x70, 0x24, 0x36, 0x1f, 0x6b, 0xbc, 0x6b, 0x17, 0xef, 0x3f, 0x2a, 0x4d, - 0x3d, 0x78, 0x54, 0x9a, 0x7a, 0xf8, 0xa8, 0x34, 0xf5, 0x69, 0xaf, 0x64, 0xdc, 0xef, 0x95, 0x8c, - 0x07, 0xbd, 0x92, 0xf1, 0xb0, 0x57, 0x32, 0xfe, 0xea, 0x95, 0x8c, 0xcf, 0xff, 0x2e, 0x4d, 0xbd, - 0x3f, 0xa3, 0xa1, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x7e, 0xa0, 0xce, 0xf5, 0x16, 0x17, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto b/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto deleted file mode 100644 index b4e4c95..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/generated.proto +++ /dev/null @@ -1,369 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.autoscaling.v2beta2; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v2beta2"; - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -message CrossVersionObjectReference { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - optional string kind = 1; - - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - optional string name = 2; - - // API version of the referent - // +optional - optional string apiVersion = 3; -} - -// ExternalMetricSource indicates how to scale on a metric not associated with -// any Kubernetes object (for example length of queue in cloud -// messaging service, or QPS from loadbalancer running outside of cluster). -message ExternalMetricSource { - // metric identifies the target metric by name and selector - optional MetricIdentifier metric = 1; - - // target specifies the target value for the given metric - optional MetricTarget target = 2; -} - -// ExternalMetricStatus indicates the current value of a global metric -// not associated with any Kubernetes object. -message ExternalMetricStatus { - // metric identifies the target metric by name and selector - optional MetricIdentifier metric = 1; - - // current contains the current value for the given metric - optional MetricValueStatus current = 2; -} - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -message HorizontalPodAutoscaler { - // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional HorizontalPodAutoscalerSpec spec = 2; - - // status is the current information about the autoscaler. - // +optional - optional HorizontalPodAutoscalerStatus status = 3; -} - -// HorizontalPodAutoscalerCondition describes the state of -// a HorizontalPodAutoscaler at a certain point. -message HorizontalPodAutoscalerCondition { - // type describes the current condition - optional string type = 1; - - // status is the status of the condition (True, False, Unknown) - optional string status = 2; - - // lastTransitionTime is the last time the condition transitioned from - // one status to another - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // reason is the reason for the condition's last transition. - // +optional - optional string reason = 4; - - // message is a human-readable explanation containing details about - // the transition - // +optional - optional string message = 5; -} - -// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. -message HorizontalPodAutoscalerList { - // metadata is the standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of horizontal pod autoscaler objects. - repeated HorizontalPodAutoscaler items = 2; -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -message HorizontalPodAutoscalerSpec { - // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - optional CrossVersionObjectReference scaleTargetRef = 1; - - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - optional int32 minReplicas = 2; - - // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - optional int32 maxReplicas = 3; - - // metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // If not set, the default metric will be set to 80% average CPU utilization. - // +optional - repeated MetricSpec metrics = 4; -} - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -message HorizontalPodAutoscalerStatus { - // observedGeneration is the most recent generation observed by this autoscaler. - // +optional - optional int64 observedGeneration = 1; - - // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScaleTime = 2; - - // currentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - optional int32 currentReplicas = 3; - - // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - optional int32 desiredReplicas = 4; - - // currentMetrics is the last read state of the metrics used by this autoscaler. - // +optional - repeated MetricStatus currentMetrics = 5; - - // conditions is the set of conditions required for this autoscaler to scale its target, - // and indicates whether or not those conditions are met. - repeated HorizontalPodAutoscalerCondition conditions = 6; -} - -// MetricIdentifier defines the name and optionally selector for a metric -message MetricIdentifier { - // name is the name of the given metric - optional string name = 1; - - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; -} - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -message MetricSpec { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. - optional string type = 1; - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - optional ObjectMetricSource object = 2; - - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - optional PodsMetricSource pods = 3; - - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - optional ResourceMetricSource resource = 4; - - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - optional ExternalMetricSource external = 5; -} - -// MetricStatus describes the last-read state of a single metric. -message MetricStatus { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. - optional string type = 1; - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - optional ObjectMetricStatus object = 2; - - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - optional PodsMetricStatus pods = 3; - - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - optional ResourceMetricStatus resource = 4; - - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - optional ExternalMetricStatus external = 5; -} - -// MetricTarget defines the target value, average value, or average utilization of a specific metric -message MetricTarget { - // type represents whether the metric type is Utilization, Value, or AverageValue - optional string type = 1; - - // value is the target value of the metric (as a quantity). - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; - - // averageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 3; - - // averageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // Currently only valid for Resource metric source type - // +optional - optional int32 averageUtilization = 4; -} - -// MetricValueStatus holds the current value for a metric -message MetricValueStatus { - // value is the current value of the metric (as a quantity). - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 1; - - // averageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity averageValue = 2; - - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - optional int32 averageUtilization = 3; -} - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -message ObjectMetricSource { - optional CrossVersionObjectReference describedObject = 1; - - // target specifies the target value for the given metric - optional MetricTarget target = 2; - - // metric identifies the target metric by name and selector - optional MetricIdentifier metric = 3; -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -message ObjectMetricStatus { - // metric identifies the target metric by name and selector - optional MetricIdentifier metric = 1; - - // current contains the current value for the given metric - optional MetricValueStatus current = 2; - - optional CrossVersionObjectReference describedObject = 3; -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -message PodsMetricSource { - // metric identifies the target metric by name and selector - optional MetricIdentifier metric = 1; - - // target specifies the target value for the given metric - optional MetricTarget target = 2; -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -message PodsMetricStatus { - // metric identifies the target metric by name and selector - optional MetricIdentifier metric = 1; - - // current contains the current value for the given metric - optional MetricValueStatus current = 2; -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -message ResourceMetricSource { - // name is the name of the resource in question. - optional string name = 1; - - // target specifies the target value for the given metric - optional MetricTarget target = 2; -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -message ResourceMetricStatus { - // Name is the name of the resource in question. - optional string name = 1; - - // current contains the current value for the given metric - optional MetricValueStatus current = 2; -} - diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/register.go b/vendor/k8s.io/api/autoscaling/v2beta2/register.go deleted file mode 100644 index eb1265c..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2beta2 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "autoscaling" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2beta2"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &HorizontalPodAutoscaler{}, - &HorizontalPodAutoscalerList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types.go b/vendor/k8s.io/api/autoscaling/v2beta2/types.go deleted file mode 100644 index 2d33795..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types.go +++ /dev/null @@ -1,393 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:openapi-gen=true - -package v2beta2 - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// HorizontalPodAutoscaler is the configuration for a horizontal pod -// autoscaler, which automatically manages the replica count of any resource -// implementing the scale subresource based on the metrics specified. -type HorizontalPodAutoscaler struct { - metav1.TypeMeta `json:",inline"` - // metadata is the standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec is the specification for the behaviour of the autoscaler. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec HorizontalPodAutoscalerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // status is the current information about the autoscaler. - // +optional - Status HorizontalPodAutoscalerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler. -type HorizontalPodAutoscalerSpec struct { - // scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics - // should be collected, as well as to actually change the replica count. - ScaleTargetRef CrossVersionObjectReference `json:"scaleTargetRef" protobuf:"bytes,1,opt,name=scaleTargetRef"` - // minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. - // It defaults to 1 pod. - // +optional - MinReplicas *int32 `json:"minReplicas,omitempty" protobuf:"varint,2,opt,name=minReplicas"` - // maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. - // It cannot be less that minReplicas. - MaxReplicas int32 `json:"maxReplicas" protobuf:"varint,3,opt,name=maxReplicas"` - // metrics contains the specifications for which to use to calculate the - // desired replica count (the maximum replica count across all metrics will - // be used). The desired replica count is calculated multiplying the - // ratio between the target value and the current value by the current - // number of pods. Ergo, metrics used must decrease as the pod count is - // increased, and vice-versa. See the individual metric source types for - // more information about how each type of metric must respond. - // If not set, the default metric will be set to 80% average CPU utilization. - // +optional - Metrics []MetricSpec `json:"metrics,omitempty" protobuf:"bytes,4,rep,name=metrics"` -} - -// CrossVersionObjectReference contains enough information to let you identify the referred resource. -type CrossVersionObjectReference struct { - // Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds" - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - // API version of the referent - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,3,opt,name=apiVersion"` -} - -// MetricSpec specifies how to scale based on a single metric -// (only `type` and one other matching field should be set at once). -type MetricSpec struct { - // type is the type of metric source. It should be one of "Object", - // "Pods" or "Resource", each mapping to a matching field in the object. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricSource `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricSource `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricSource `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - External *ExternalMetricSource `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` -} - -// MetricSourceType indicates the type of metric. -type MetricSourceType string - -var ( - // ObjectMetricSourceType is a metric describing a kubernetes object - // (for example, hits-per-second on an Ingress object). - ObjectMetricSourceType MetricSourceType = "Object" - // PodsMetricSourceType is a metric describing each pod in the current scale - // target (for example, transactions-processed-per-second). The values - // will be averaged together before being compared to the target value. - PodsMetricSourceType MetricSourceType = "Pods" - // ResourceMetricSourceType is a resource metric known to Kubernetes, as - // specified in requests and limits, describing each pod in the current - // scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics (the "pods" source). - ResourceMetricSourceType MetricSourceType = "Resource" - // ExternalMetricSourceType is a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - ExternalMetricSourceType MetricSourceType = "External" -) - -// ObjectMetricSource indicates how to scale on a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricSource struct { - DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,1,name=describedObject"` - // target specifies the target value for the given metric - Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` - // metric identifies the target metric by name and selector - Metric MetricIdentifier `json:"metric" protobuf:"bytes,3,name=metric"` -} - -// PodsMetricSource indicates how to scale on a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -// The values will be averaged together before being compared to the target -// value. -type PodsMetricSource struct { - // metric identifies the target metric by name and selector - Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` - // target specifies the target value for the given metric - Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` -} - -// ResourceMetricSource indicates how to scale on a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). The values will be averaged -// together before being compared to the target. Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. Only one "target" type -// should be set. -type ResourceMetricSource struct { - // name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // target specifies the target value for the given metric - Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` -} - -// ExternalMetricSource indicates how to scale on a metric not associated with -// any Kubernetes object (for example length of queue in cloud -// messaging service, or QPS from loadbalancer running outside of cluster). -type ExternalMetricSource struct { - // metric identifies the target metric by name and selector - Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` - // target specifies the target value for the given metric - Target MetricTarget `json:"target" protobuf:"bytes,2,name=target"` -} - -// MetricIdentifier defines the name and optionally selector for a metric -type MetricIdentifier struct { - // name is the name of the given metric - Name string `json:"name" protobuf:"bytes,1,name=name"` - // selector is the string-encoded form of a standard kubernetes label selector for the given metric - // When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. - // When unset, just the metricName will be used to gather metrics. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,name=selector"` -} - -// MetricTarget defines the target value, average value, or average utilization of a specific metric -type MetricTarget struct { - // type represents whether the metric type is Utilization, Value, or AverageValue - Type MetricTargetType `json:"type" protobuf:"bytes,1,name=type"` - // value is the target value of the metric (as a quantity). - // +optional - Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` - // averageValue is the target value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,3,opt,name=averageValue"` - // averageUtilization is the target value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // Currently only valid for Resource metric source type - // +optional - AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,4,opt,name=averageUtilization"` -} - -// MetricTargetType specifies the type of metric being targeted, and should be either -// "Value", "AverageValue", or "Utilization" -type MetricTargetType string - -var ( - // UtilizationMetricType declares a MetricTarget is an AverageUtilization value - UtilizationMetricType MetricTargetType = "Utilization" - // ValueMetricType declares a MetricTarget is a raw value - ValueMetricType MetricTargetType = "Value" - // AverageValueMetricType declares a MetricTarget is an - AverageValueMetricType MetricTargetType = "AverageValue" -) - -// HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler. -type HorizontalPodAutoscalerStatus struct { - // observedGeneration is the most recent generation observed by this autoscaler. - // +optional - ObservedGeneration *int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, - // used by the autoscaler to control how often the number of pods is changed. - // +optional - LastScaleTime *metav1.Time `json:"lastScaleTime,omitempty" protobuf:"bytes,2,opt,name=lastScaleTime"` - - // currentReplicas is current number of replicas of pods managed by this autoscaler, - // as last seen by the autoscaler. - CurrentReplicas int32 `json:"currentReplicas" protobuf:"varint,3,opt,name=currentReplicas"` - - // desiredReplicas is the desired number of replicas of pods managed by this autoscaler, - // as last calculated by the autoscaler. - DesiredReplicas int32 `json:"desiredReplicas" protobuf:"varint,4,opt,name=desiredReplicas"` - - // currentMetrics is the last read state of the metrics used by this autoscaler. - // +optional - CurrentMetrics []MetricStatus `json:"currentMetrics" protobuf:"bytes,5,rep,name=currentMetrics"` - - // conditions is the set of conditions required for this autoscaler to scale its target, - // and indicates whether or not those conditions are met. - Conditions []HorizontalPodAutoscalerCondition `json:"conditions" protobuf:"bytes,6,rep,name=conditions"` -} - -// HorizontalPodAutoscalerConditionType are the valid conditions of -// a HorizontalPodAutoscaler. -type HorizontalPodAutoscalerConditionType string - -var ( - // ScalingActive indicates that the HPA controller is able to scale if necessary: - // it's correctly configured, can fetch the desired metrics, and isn't disabled. - ScalingActive HorizontalPodAutoscalerConditionType = "ScalingActive" - // AbleToScale indicates a lack of transient issues which prevent scaling from occurring, - // such as being in a backoff window, or being unable to access/update the target scale. - AbleToScale HorizontalPodAutoscalerConditionType = "AbleToScale" - // ScalingLimited indicates that the calculated scale based on metrics would be above or - // below the range for the HPA, and has thus been capped. - ScalingLimited HorizontalPodAutoscalerConditionType = "ScalingLimited" -) - -// HorizontalPodAutoscalerCondition describes the state of -// a HorizontalPodAutoscaler at a certain point. -type HorizontalPodAutoscalerCondition struct { - // type describes the current condition - Type HorizontalPodAutoscalerConditionType `json:"type" protobuf:"bytes,1,name=type"` - // status is the status of the condition (True, False, Unknown) - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,name=status"` - // lastTransitionTime is the last time the condition transitioned from - // one status to another - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // reason is the reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // message is a human-readable explanation containing details about - // the transition - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// MetricStatus describes the last-read state of a single metric. -type MetricStatus struct { - // type is the type of metric source. It will be one of "Object", - // "Pods" or "Resource", each corresponds to a matching field in the object. - Type MetricSourceType `json:"type" protobuf:"bytes,1,name=type"` - - // object refers to a metric describing a single kubernetes object - // (for example, hits-per-second on an Ingress object). - // +optional - Object *ObjectMetricStatus `json:"object,omitempty" protobuf:"bytes,2,opt,name=object"` - // pods refers to a metric describing each pod in the current scale target - // (for example, transactions-processed-per-second). The values will be - // averaged together before being compared to the target value. - // +optional - Pods *PodsMetricStatus `json:"pods,omitempty" protobuf:"bytes,3,opt,name=pods"` - // resource refers to a resource metric (such as those specified in - // requests and limits) known to Kubernetes describing each pod in the - // current scale target (e.g. CPU or memory). Such metrics are built in to - // Kubernetes, and have special scaling options on top of those available - // to normal per-pod metrics using the "pods" source. - // +optional - Resource *ResourceMetricStatus `json:"resource,omitempty" protobuf:"bytes,4,opt,name=resource"` - // external refers to a global metric that is not associated - // with any Kubernetes object. It allows autoscaling based on information - // coming from components running outside of cluster - // (for example length of queue in cloud messaging service, or - // QPS from loadbalancer running outside of cluster). - // +optional - External *ExternalMetricStatus `json:"external,omitempty" protobuf:"bytes,5,opt,name=external"` -} - -// ObjectMetricStatus indicates the current value of a metric describing a -// kubernetes object (for example, hits-per-second on an Ingress object). -type ObjectMetricStatus struct { - // metric identifies the target metric by name and selector - Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` - // current contains the current value for the given metric - Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` - - DescribedObject CrossVersionObjectReference `json:"describedObject" protobuf:"bytes,3,name=describedObject"` -} - -// PodsMetricStatus indicates the current value of a metric describing each pod in -// the current scale target (for example, transactions-processed-per-second). -type PodsMetricStatus struct { - // metric identifies the target metric by name and selector - Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` - // current contains the current value for the given metric - Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` -} - -// ResourceMetricStatus indicates the current value of a resource metric known to -// Kubernetes, as specified in requests and limits, describing each pod in the -// current scale target (e.g. CPU or memory). Such metrics are built in to -// Kubernetes, and have special scaling options on top of those available to -// normal per-pod metrics using the "pods" source. -type ResourceMetricStatus struct { - // Name is the name of the resource in question. - Name v1.ResourceName `json:"name" protobuf:"bytes,1,name=name"` - // current contains the current value for the given metric - Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` -} - -// ExternalMetricStatus indicates the current value of a global metric -// not associated with any Kubernetes object. -type ExternalMetricStatus struct { - // metric identifies the target metric by name and selector - Metric MetricIdentifier `json:"metric" protobuf:"bytes,1,name=metric"` - // current contains the current value for the given metric - Current MetricValueStatus `json:"current" protobuf:"bytes,2,name=current"` -} - -// MetricValueStatus holds the current value for a metric -type MetricValueStatus struct { - // value is the current value of the metric (as a quantity). - // +optional - Value *resource.Quantity `json:"value,omitempty" protobuf:"bytes,1,opt,name=value"` - // averageValue is the current value of the average of the - // metric across all relevant pods (as a quantity) - // +optional - AverageValue *resource.Quantity `json:"averageValue,omitempty" protobuf:"bytes,2,opt,name=averageValue"` - // currentAverageUtilization is the current value of the average of the - // resource metric across all relevant pods, represented as a percentage of - // the requested value of the resource for the pods. - // +optional - AverageUtilization *int32 `json:"averageUtilization,omitempty" protobuf:"bytes,3,opt,name=averageUtilization"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects. -type HorizontalPodAutoscalerList struct { - metav1.TypeMeta `json:",inline"` - // metadata is the standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of horizontal pod autoscaler objects. - Items []HorizontalPodAutoscaler `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go b/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go deleted file mode 100644 index 996dc18..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/types_swagger_doc_generated.go +++ /dev/null @@ -1,240 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2beta2 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CrossVersionObjectReference = map[string]string{ - "": "CrossVersionObjectReference contains enough information to let you identify the referred resource.", - "kind": "Kind of the referent; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds\"", - "name": "Name of the referent; More info: http://kubernetes.io/docs/user-guide/identifiers#names", - "apiVersion": "API version of the referent", -} - -func (CrossVersionObjectReference) SwaggerDoc() map[string]string { - return map_CrossVersionObjectReference -} - -var map_ExternalMetricSource = map[string]string{ - "": "ExternalMetricSource indicates how to scale on a metric not associated with any Kubernetes object (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", - "metric": "metric identifies the target metric by name and selector", - "target": "target specifies the target value for the given metric", -} - -func (ExternalMetricSource) SwaggerDoc() map[string]string { - return map_ExternalMetricSource -} - -var map_ExternalMetricStatus = map[string]string{ - "": "ExternalMetricStatus indicates the current value of a global metric not associated with any Kubernetes object.", - "metric": "metric identifies the target metric by name and selector", - "current": "current contains the current value for the given metric", -} - -func (ExternalMetricStatus) SwaggerDoc() map[string]string { - return map_ExternalMetricStatus -} - -var map_HorizontalPodAutoscaler = map[string]string{ - "": "HorizontalPodAutoscaler is the configuration for a horizontal pod autoscaler, which automatically manages the replica count of any resource implementing the scale subresource based on the metrics specified.", - "metadata": "metadata is the standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec is the specification for the behaviour of the autoscaler. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "status is the current information about the autoscaler.", -} - -func (HorizontalPodAutoscaler) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscaler -} - -var map_HorizontalPodAutoscalerCondition = map[string]string{ - "": "HorizontalPodAutoscalerCondition describes the state of a HorizontalPodAutoscaler at a certain point.", - "type": "type describes the current condition", - "status": "status is the status of the condition (True, False, Unknown)", - "lastTransitionTime": "lastTransitionTime is the last time the condition transitioned from one status to another", - "reason": "reason is the reason for the condition's last transition.", - "message": "message is a human-readable explanation containing details about the transition", -} - -func (HorizontalPodAutoscalerCondition) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerCondition -} - -var map_HorizontalPodAutoscalerList = map[string]string{ - "": "HorizontalPodAutoscalerList is a list of horizontal pod autoscaler objects.", - "metadata": "metadata is the standard list metadata.", - "items": "items is the list of horizontal pod autoscaler objects.", -} - -func (HorizontalPodAutoscalerList) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerList -} - -var map_HorizontalPodAutoscalerSpec = map[string]string{ - "": "HorizontalPodAutoscalerSpec describes the desired functionality of the HorizontalPodAutoscaler.", - "scaleTargetRef": "scaleTargetRef points to the target resource to scale, and is used to the pods for which metrics should be collected, as well as to actually change the replica count.", - "minReplicas": "minReplicas is the lower limit for the number of replicas to which the autoscaler can scale down. It defaults to 1 pod.", - "maxReplicas": "maxReplicas is the upper limit for the number of replicas to which the autoscaler can scale up. It cannot be less that minReplicas.", - "metrics": "metrics contains the specifications for which to use to calculate the desired replica count (the maximum replica count across all metrics will be used). The desired replica count is calculated multiplying the ratio between the target value and the current value by the current number of pods. Ergo, metrics used must decrease as the pod count is increased, and vice-versa. See the individual metric source types for more information about how each type of metric must respond. If not set, the default metric will be set to 80% average CPU utilization.", -} - -func (HorizontalPodAutoscalerSpec) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerSpec -} - -var map_HorizontalPodAutoscalerStatus = map[string]string{ - "": "HorizontalPodAutoscalerStatus describes the current status of a horizontal pod autoscaler.", - "observedGeneration": "observedGeneration is the most recent generation observed by this autoscaler.", - "lastScaleTime": "lastScaleTime is the last time the HorizontalPodAutoscaler scaled the number of pods, used by the autoscaler to control how often the number of pods is changed.", - "currentReplicas": "currentReplicas is current number of replicas of pods managed by this autoscaler, as last seen by the autoscaler.", - "desiredReplicas": "desiredReplicas is the desired number of replicas of pods managed by this autoscaler, as last calculated by the autoscaler.", - "currentMetrics": "currentMetrics is the last read state of the metrics used by this autoscaler.", - "conditions": "conditions is the set of conditions required for this autoscaler to scale its target, and indicates whether or not those conditions are met.", -} - -func (HorizontalPodAutoscalerStatus) SwaggerDoc() map[string]string { - return map_HorizontalPodAutoscalerStatus -} - -var map_MetricIdentifier = map[string]string{ - "": "MetricIdentifier defines the name and optionally selector for a metric", - "name": "name is the name of the given metric", - "selector": "selector is the string-encoded form of a standard kubernetes label selector for the given metric When set, it is passed as an additional parameter to the metrics server for more specific metrics scoping. When unset, just the metricName will be used to gather metrics.", -} - -func (MetricIdentifier) SwaggerDoc() map[string]string { - return map_MetricIdentifier -} - -var map_MetricSpec = map[string]string{ - "": "MetricSpec specifies how to scale based on a single metric (only `type` and one other matching field should be set at once).", - "type": "type is the type of metric source. It should be one of \"Object\", \"Pods\" or \"Resource\", each mapping to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", -} - -func (MetricSpec) SwaggerDoc() map[string]string { - return map_MetricSpec -} - -var map_MetricStatus = map[string]string{ - "": "MetricStatus describes the last-read state of a single metric.", - "type": "type is the type of metric source. It will be one of \"Object\", \"Pods\" or \"Resource\", each corresponds to a matching field in the object.", - "object": "object refers to a metric describing a single kubernetes object (for example, hits-per-second on an Ingress object).", - "pods": "pods refers to a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "resource": "resource refers to a resource metric (such as those specified in requests and limits) known to Kubernetes describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "external": "external refers to a global metric that is not associated with any Kubernetes object. It allows autoscaling based on information coming from components running outside of cluster (for example length of queue in cloud messaging service, or QPS from loadbalancer running outside of cluster).", -} - -func (MetricStatus) SwaggerDoc() map[string]string { - return map_MetricStatus -} - -var map_MetricTarget = map[string]string{ - "": "MetricTarget defines the target value, average value, or average utilization of a specific metric", - "type": "type represents whether the metric type is Utilization, Value, or AverageValue", - "value": "value is the target value of the metric (as a quantity).", - "averageValue": "averageValue is the target value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "averageUtilization is the target value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods. Currently only valid for Resource metric source type", -} - -func (MetricTarget) SwaggerDoc() map[string]string { - return map_MetricTarget -} - -var map_MetricValueStatus = map[string]string{ - "": "MetricValueStatus holds the current value for a metric", - "value": "value is the current value of the metric (as a quantity).", - "averageValue": "averageValue is the current value of the average of the metric across all relevant pods (as a quantity)", - "averageUtilization": "currentAverageUtilization is the current value of the average of the resource metric across all relevant pods, represented as a percentage of the requested value of the resource for the pods.", -} - -func (MetricValueStatus) SwaggerDoc() map[string]string { - return map_MetricValueStatus -} - -var map_ObjectMetricSource = map[string]string{ - "": "ObjectMetricSource indicates how to scale on a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "target": "target specifies the target value for the given metric", - "metric": "metric identifies the target metric by name and selector", -} - -func (ObjectMetricSource) SwaggerDoc() map[string]string { - return map_ObjectMetricSource -} - -var map_ObjectMetricStatus = map[string]string{ - "": "ObjectMetricStatus indicates the current value of a metric describing a kubernetes object (for example, hits-per-second on an Ingress object).", - "metric": "metric identifies the target metric by name and selector", - "current": "current contains the current value for the given metric", -} - -func (ObjectMetricStatus) SwaggerDoc() map[string]string { - return map_ObjectMetricStatus -} - -var map_PodsMetricSource = map[string]string{ - "": "PodsMetricSource indicates how to scale on a metric describing each pod in the current scale target (for example, transactions-processed-per-second). The values will be averaged together before being compared to the target value.", - "metric": "metric identifies the target metric by name and selector", - "target": "target specifies the target value for the given metric", -} - -func (PodsMetricSource) SwaggerDoc() map[string]string { - return map_PodsMetricSource -} - -var map_PodsMetricStatus = map[string]string{ - "": "PodsMetricStatus indicates the current value of a metric describing each pod in the current scale target (for example, transactions-processed-per-second).", - "metric": "metric identifies the target metric by name and selector", - "current": "current contains the current value for the given metric", -} - -func (PodsMetricStatus) SwaggerDoc() map[string]string { - return map_PodsMetricStatus -} - -var map_ResourceMetricSource = map[string]string{ - "": "ResourceMetricSource indicates how to scale on a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). The values will be averaged together before being compared to the target. Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source. Only one \"target\" type should be set.", - "name": "name is the name of the resource in question.", - "target": "target specifies the target value for the given metric", -} - -func (ResourceMetricSource) SwaggerDoc() map[string]string { - return map_ResourceMetricSource -} - -var map_ResourceMetricStatus = map[string]string{ - "": "ResourceMetricStatus indicates the current value of a resource metric known to Kubernetes, as specified in requests and limits, describing each pod in the current scale target (e.g. CPU or memory). Such metrics are built in to Kubernetes, and have special scaling options on top of those available to normal per-pod metrics using the \"pods\" source.", - "name": "Name is the name of the resource in question.", - "current": "current contains the current value for the given metric", -} - -func (ResourceMetricStatus) SwaggerDoc() map[string]string { - return map_ResourceMetricStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go b/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go deleted file mode 100644 index a6a9565..0000000 --- a/vendor/k8s.io/api/autoscaling/v2beta2/zz_generated.deepcopy.go +++ /dev/null @@ -1,487 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v2beta2 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CrossVersionObjectReference) DeepCopyInto(out *CrossVersionObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrossVersionObjectReference. -func (in *CrossVersionObjectReference) DeepCopy() *CrossVersionObjectReference { - if in == nil { - return nil - } - out := new(CrossVersionObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalMetricSource) DeepCopyInto(out *ExternalMetricSource) { - *out = *in - in.Metric.DeepCopyInto(&out.Metric) - in.Target.DeepCopyInto(&out.Target) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricSource. -func (in *ExternalMetricSource) DeepCopy() *ExternalMetricSource { - if in == nil { - return nil - } - out := new(ExternalMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExternalMetricStatus) DeepCopyInto(out *ExternalMetricStatus) { - *out = *in - in.Metric.DeepCopyInto(&out.Metric) - in.Current.DeepCopyInto(&out.Current) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExternalMetricStatus. -func (in *ExternalMetricStatus) DeepCopy() *ExternalMetricStatus { - if in == nil { - return nil - } - out := new(ExternalMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscaler) DeepCopyInto(out *HorizontalPodAutoscaler) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscaler. -func (in *HorizontalPodAutoscaler) DeepCopy() *HorizontalPodAutoscaler { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscaler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HorizontalPodAutoscaler) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerCondition) DeepCopyInto(out *HorizontalPodAutoscalerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerCondition. -func (in *HorizontalPodAutoscalerCondition) DeepCopy() *HorizontalPodAutoscalerCondition { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerList) DeepCopyInto(out *HorizontalPodAutoscalerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]HorizontalPodAutoscaler, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerList. -func (in *HorizontalPodAutoscalerList) DeepCopy() *HorizontalPodAutoscalerList { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *HorizontalPodAutoscalerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerSpec) DeepCopyInto(out *HorizontalPodAutoscalerSpec) { - *out = *in - out.ScaleTargetRef = in.ScaleTargetRef - if in.MinReplicas != nil { - in, out := &in.MinReplicas, &out.MinReplicas - *out = new(int32) - **out = **in - } - if in.Metrics != nil { - in, out := &in.Metrics, &out.Metrics - *out = make([]MetricSpec, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerSpec. -func (in *HorizontalPodAutoscalerSpec) DeepCopy() *HorizontalPodAutoscalerSpec { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HorizontalPodAutoscalerStatus) DeepCopyInto(out *HorizontalPodAutoscalerStatus) { - *out = *in - if in.ObservedGeneration != nil { - in, out := &in.ObservedGeneration, &out.ObservedGeneration - *out = new(int64) - **out = **in - } - if in.LastScaleTime != nil { - in, out := &in.LastScaleTime, &out.LastScaleTime - *out = (*in).DeepCopy() - } - if in.CurrentMetrics != nil { - in, out := &in.CurrentMetrics, &out.CurrentMetrics - *out = make([]MetricStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]HorizontalPodAutoscalerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HorizontalPodAutoscalerStatus. -func (in *HorizontalPodAutoscalerStatus) DeepCopy() *HorizontalPodAutoscalerStatus { - if in == nil { - return nil - } - out := new(HorizontalPodAutoscalerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricIdentifier) DeepCopyInto(out *MetricIdentifier) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricIdentifier. -func (in *MetricIdentifier) DeepCopy() *MetricIdentifier { - if in == nil { - return nil - } - out := new(MetricIdentifier) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricSpec) DeepCopyInto(out *MetricSpec) { - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricSource) - (*in).DeepCopyInto(*out) - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricSource) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricSource) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalMetricSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricSpec. -func (in *MetricSpec) DeepCopy() *MetricSpec { - if in == nil { - return nil - } - out := new(MetricSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricStatus) DeepCopyInto(out *MetricStatus) { - *out = *in - if in.Object != nil { - in, out := &in.Object, &out.Object - *out = new(ObjectMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.Pods != nil { - in, out := &in.Pods, &out.Pods - *out = new(PodsMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.Resource != nil { - in, out := &in.Resource, &out.Resource - *out = new(ResourceMetricStatus) - (*in).DeepCopyInto(*out) - } - if in.External != nil { - in, out := &in.External, &out.External - *out = new(ExternalMetricStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricStatus. -func (in *MetricStatus) DeepCopy() *MetricStatus { - if in == nil { - return nil - } - out := new(MetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricTarget) DeepCopyInto(out *MetricTarget) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - x := (*in).DeepCopy() - *out = &x - } - if in.AverageValue != nil { - in, out := &in.AverageValue, &out.AverageValue - x := (*in).DeepCopy() - *out = &x - } - if in.AverageUtilization != nil { - in, out := &in.AverageUtilization, &out.AverageUtilization - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricTarget. -func (in *MetricTarget) DeepCopy() *MetricTarget { - if in == nil { - return nil - } - out := new(MetricTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *MetricValueStatus) DeepCopyInto(out *MetricValueStatus) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - x := (*in).DeepCopy() - *out = &x - } - if in.AverageValue != nil { - in, out := &in.AverageValue, &out.AverageValue - x := (*in).DeepCopy() - *out = &x - } - if in.AverageUtilization != nil { - in, out := &in.AverageUtilization, &out.AverageUtilization - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MetricValueStatus. -func (in *MetricValueStatus) DeepCopy() *MetricValueStatus { - if in == nil { - return nil - } - out := new(MetricValueStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMetricSource) DeepCopyInto(out *ObjectMetricSource) { - *out = *in - out.DescribedObject = in.DescribedObject - in.Target.DeepCopyInto(&out.Target) - in.Metric.DeepCopyInto(&out.Metric) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricSource. -func (in *ObjectMetricSource) DeepCopy() *ObjectMetricSource { - if in == nil { - return nil - } - out := new(ObjectMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectMetricStatus) DeepCopyInto(out *ObjectMetricStatus) { - *out = *in - in.Metric.DeepCopyInto(&out.Metric) - in.Current.DeepCopyInto(&out.Current) - out.DescribedObject = in.DescribedObject - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectMetricStatus. -func (in *ObjectMetricStatus) DeepCopy() *ObjectMetricStatus { - if in == nil { - return nil - } - out := new(ObjectMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodsMetricSource) DeepCopyInto(out *PodsMetricSource) { - *out = *in - in.Metric.DeepCopyInto(&out.Metric) - in.Target.DeepCopyInto(&out.Target) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricSource. -func (in *PodsMetricSource) DeepCopy() *PodsMetricSource { - if in == nil { - return nil - } - out := new(PodsMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodsMetricStatus) DeepCopyInto(out *PodsMetricStatus) { - *out = *in - in.Metric.DeepCopyInto(&out.Metric) - in.Current.DeepCopyInto(&out.Current) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodsMetricStatus. -func (in *PodsMetricStatus) DeepCopy() *PodsMetricStatus { - if in == nil { - return nil - } - out := new(PodsMetricStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceMetricSource) DeepCopyInto(out *ResourceMetricSource) { - *out = *in - in.Target.DeepCopyInto(&out.Target) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricSource. -func (in *ResourceMetricSource) DeepCopy() *ResourceMetricSource { - if in == nil { - return nil - } - out := new(ResourceMetricSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceMetricStatus) DeepCopyInto(out *ResourceMetricStatus) { - *out = *in - in.Current.DeepCopyInto(&out.Current) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceMetricStatus. -func (in *ResourceMetricStatus) DeepCopy() *ResourceMetricStatus { - if in == nil { - return nil - } - out := new(ResourceMetricStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/batch/v1/doc.go b/vendor/k8s.io/api/batch/v1/doc.go deleted file mode 100644 index 0449180..0000000 --- a/vendor/k8s.io/api/batch/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1 // import "k8s.io/api/batch/v1" diff --git a/vendor/k8s.io/api/batch/v1/generated.pb.go b/vendor/k8s.io/api/batch/v1/generated.pb.go deleted file mode 100644 index 097a6ff..0000000 --- a/vendor/k8s.io/api/batch/v1/generated.pb.go +++ /dev/null @@ -1,1646 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto - - It has these top-level messages: - Job - JobCondition - JobList - JobSpec - JobStatus -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Job) Reset() { *m = Job{} } -func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *JobCondition) Reset() { *m = JobCondition{} } -func (*JobCondition) ProtoMessage() {} -func (*JobCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *JobList) Reset() { *m = JobList{} } -func (*JobList) ProtoMessage() {} -func (*JobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *JobSpec) Reset() { *m = JobSpec{} } -func (*JobSpec) ProtoMessage() {} -func (*JobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *JobStatus) Reset() { *m = JobStatus{} } -func (*JobStatus) ProtoMessage() {} -func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func init() { - proto.RegisterType((*Job)(nil), "k8s.io.api.batch.v1.Job") - proto.RegisterType((*JobCondition)(nil), "k8s.io.api.batch.v1.JobCondition") - proto.RegisterType((*JobList)(nil), "k8s.io.api.batch.v1.JobList") - proto.RegisterType((*JobSpec)(nil), "k8s.io.api.batch.v1.JobSpec") - proto.RegisterType((*JobStatus)(nil), "k8s.io.api.batch.v1.JobStatus") -} -func (m *Job) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Job) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *JobCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n4, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n5, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *JobList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *JobSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Parallelism != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Parallelism)) - } - if m.Completions != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n7, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.ManualSelector != nil { - dAtA[i] = 0x28 - i++ - if *m.ManualSelector { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if m.BackoffLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.BackoffLimit)) - } - if m.TTLSecondsAfterFinished != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TTLSecondsAfterFinished)) - } - return i, nil -} - -func (m *JobStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.StartTime != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n9, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - if m.CompletionTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CompletionTime.Size())) - n10, err := m.CompletionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Succeeded)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Failed)) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Job) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *JobSpec) Size() (n int) { - var l int - _ = l - if m.Parallelism != nil { - n += 1 + sovGenerated(uint64(*m.Parallelism)) - } - if m.Completions != nil { - n += 1 + sovGenerated(uint64(*m.Completions)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ManualSelector != nil { - n += 2 - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.BackoffLimit != nil { - n += 1 + sovGenerated(uint64(*m.BackoffLimit)) - } - if m.TTLSecondsAfterFinished != nil { - n += 1 + sovGenerated(uint64(*m.TTLSecondsAfterFinished)) - } - return n -} - -func (m *JobStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CompletionTime != nil { - l = m.CompletionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.Active)) - n += 1 + sovGenerated(uint64(m.Succeeded)) - n += 1 + sovGenerated(uint64(m.Failed)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Job) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Job{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "JobSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "JobStatus", "JobStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *JobList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Job", "Job", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobSpec{`, - `Parallelism:` + valueToStringGenerated(this.Parallelism) + `,`, - `Completions:` + valueToStringGenerated(this.Completions) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `ManualSelector:` + valueToStringGenerated(this.ManualSelector) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `BackoffLimit:` + valueToStringGenerated(this.BackoffLimit) + `,`, - `TTLSecondsAfterFinished:` + valueToStringGenerated(this.TTLSecondsAfterFinished) + `,`, - `}`, - }, "") - return s -} -func (this *JobStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "JobCondition", "JobCondition", 1), `&`, ``, 1) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `CompletionTime:` + strings.Replace(fmt.Sprintf("%v", this.CompletionTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `Active:` + fmt.Sprintf("%v", this.Active) + `,`, - `Succeeded:` + fmt.Sprintf("%v", this.Succeeded) + `,`, - `Failed:` + fmt.Sprintf("%v", this.Failed) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Job) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Job: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Job: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = JobConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Job{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Parallelism", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Parallelism = &v - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Completions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Completions = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ManualSelector", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ManualSelector = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field BackoffLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.BackoffLimit = &v - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTLSecondsAfterFinished", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTLSecondsAfterFinished = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, JobCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CompletionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CompletionTime == nil { - m.CompletionTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.CompletionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - m.Active = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Active |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType) - } - m.Succeeded = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Succeeded |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Failed", wireType) - } - m.Failed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Failed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 929 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5d, 0x6f, 0xe3, 0x44, - 0x14, 0xad, 0x9b, 0xa6, 0x4d, 0xa6, 0x1f, 0x5b, 0x06, 0x55, 0x1b, 0x0a, 0xb2, 0x97, 0x20, 0xa1, - 0x82, 0x84, 0x4d, 0x4b, 0x85, 0x10, 0x02, 0xa4, 0x75, 0x51, 0x25, 0xaa, 0x54, 0x5b, 0x26, 0x59, - 0x21, 0x21, 0x90, 0x18, 0xdb, 0x37, 0x89, 0x89, 0xed, 0xb1, 0x3c, 0x93, 0x48, 0x7d, 0xe3, 0x27, - 0xf0, 0x23, 0x10, 0x7f, 0x82, 0x77, 0xd4, 0xc7, 0x7d, 0xdc, 0x27, 0x8b, 0x9a, 0x1f, 0xc0, 0xfb, - 0x3e, 0xa1, 0x19, 0x3b, 0xb6, 0xd3, 0x26, 0xa2, 0xcb, 0x5b, 0xe6, 0xcc, 0x39, 0xe7, 0x5e, 0xdf, - 0x39, 0xb9, 0xe8, 0x8b, 0xc9, 0x67, 0xdc, 0xf4, 0x99, 0x35, 0x99, 0x3a, 0x90, 0x44, 0x20, 0x80, - 0x5b, 0x33, 0x88, 0x3c, 0x96, 0x58, 0xc5, 0x05, 0x8d, 0x7d, 0xcb, 0xa1, 0xc2, 0x1d, 0x5b, 0xb3, - 0x63, 0x6b, 0x04, 0x11, 0x24, 0x54, 0x80, 0x67, 0xc6, 0x09, 0x13, 0x0c, 0xbf, 0x99, 0x93, 0x4c, - 0x1a, 0xfb, 0xa6, 0x22, 0x99, 0xb3, 0xe3, 0xc3, 0x8f, 0x46, 0xbe, 0x18, 0x4f, 0x1d, 0xd3, 0x65, - 0xa1, 0x35, 0x62, 0x23, 0x66, 0x29, 0xae, 0x33, 0x1d, 0xaa, 0x93, 0x3a, 0xa8, 0x5f, 0xb9, 0xc7, - 0x61, 0xb7, 0x56, 0xc8, 0x65, 0x09, 0x2c, 0xa9, 0x73, 0x78, 0x5a, 0x71, 0x42, 0xea, 0x8e, 0xfd, - 0x08, 0x92, 0x6b, 0x2b, 0x9e, 0x8c, 0x24, 0xc0, 0xad, 0x10, 0x04, 0x5d, 0xa6, 0xb2, 0x56, 0xa9, - 0x92, 0x69, 0x24, 0xfc, 0x10, 0xee, 0x09, 0x3e, 0xfd, 0x2f, 0x01, 0x77, 0xc7, 0x10, 0xd2, 0xbb, - 0xba, 0xee, 0x3f, 0x1a, 0x6a, 0x5c, 0x30, 0x07, 0xff, 0x84, 0x5a, 0xb2, 0x17, 0x8f, 0x0a, 0xda, - 0xd1, 0x9e, 0x68, 0x47, 0xdb, 0x27, 0x1f, 0x9b, 0xd5, 0x84, 0x4a, 0x4b, 0x33, 0x9e, 0x8c, 0x24, - 0xc0, 0x4d, 0xc9, 0x36, 0x67, 0xc7, 0xe6, 0x33, 0xe7, 0x67, 0x70, 0xc5, 0x25, 0x08, 0x6a, 0xe3, - 0x9b, 0xd4, 0x58, 0xcb, 0x52, 0x03, 0x55, 0x18, 0x29, 0x5d, 0xf1, 0x57, 0x68, 0x83, 0xc7, 0xe0, - 0x76, 0xd6, 0x95, 0xfb, 0x3b, 0xe6, 0x92, 0xf9, 0x9b, 0x17, 0xcc, 0xe9, 0xc7, 0xe0, 0xda, 0x3b, - 0x85, 0xd3, 0x86, 0x3c, 0x11, 0xa5, 0xc3, 0xe7, 0x68, 0x93, 0x0b, 0x2a, 0xa6, 0xbc, 0xd3, 0x50, - 0x0e, 0xfa, 0x4a, 0x07, 0xc5, 0xb2, 0xf7, 0x0a, 0x8f, 0xcd, 0xfc, 0x4c, 0x0a, 0x75, 0xf7, 0xcf, - 0x06, 0xda, 0xb9, 0x60, 0xce, 0x19, 0x8b, 0x3c, 0x5f, 0xf8, 0x2c, 0xc2, 0xa7, 0x68, 0x43, 0x5c, - 0xc7, 0xa0, 0x3e, 0xbb, 0x6d, 0x3f, 0x99, 0x97, 0x1e, 0x5c, 0xc7, 0xf0, 0x2a, 0x35, 0xf6, 0xeb, - 0x5c, 0x89, 0x11, 0xc5, 0xc6, 0xbd, 0xb2, 0x9d, 0x75, 0xa5, 0x3b, 0x5d, 0x2c, 0xf7, 0x2a, 0x35, - 0x96, 0xa4, 0xc3, 0x2c, 0x9d, 0x16, 0x9b, 0xc2, 0x23, 0xb4, 0x1b, 0x50, 0x2e, 0xae, 0x12, 0xe6, - 0xc0, 0xc0, 0x0f, 0xa1, 0xf8, 0xc6, 0x0f, 0x1f, 0xf6, 0x06, 0x52, 0x61, 0x1f, 0x14, 0x0d, 0xec, - 0xf6, 0xea, 0x46, 0x64, 0xd1, 0x17, 0xcf, 0x10, 0x96, 0xc0, 0x20, 0xa1, 0x11, 0xcf, 0x3f, 0x49, - 0x56, 0xdb, 0x78, 0xed, 0x6a, 0x87, 0x45, 0x35, 0xdc, 0xbb, 0xe7, 0x46, 0x96, 0x54, 0xc0, 0xef, - 0xa3, 0xcd, 0x04, 0x28, 0x67, 0x51, 0xa7, 0xa9, 0xc6, 0x55, 0xbe, 0x0e, 0x51, 0x28, 0x29, 0x6e, - 0xf1, 0x07, 0x68, 0x2b, 0x04, 0xce, 0xe9, 0x08, 0x3a, 0x9b, 0x8a, 0xf8, 0xa8, 0x20, 0x6e, 0x5d, - 0xe6, 0x30, 0x99, 0xdf, 0x77, 0x7f, 0xd7, 0xd0, 0xd6, 0x05, 0x73, 0x7a, 0x3e, 0x17, 0xf8, 0x87, - 0x7b, 0xf1, 0x35, 0x1f, 0xf6, 0x31, 0x52, 0xad, 0xc2, 0xbb, 0x5f, 0xd4, 0x69, 0xcd, 0x91, 0x5a, - 0x74, 0xbf, 0x44, 0x4d, 0x5f, 0x40, 0x28, 0x9f, 0xba, 0x71, 0xb4, 0x7d, 0xd2, 0x59, 0x95, 0x3c, - 0x7b, 0xb7, 0x30, 0x69, 0x7e, 0x23, 0xe9, 0x24, 0x57, 0x75, 0xff, 0xd8, 0x50, 0x8d, 0xca, 0x2c, - 0xe3, 0x63, 0xb4, 0x1d, 0xd3, 0x84, 0x06, 0x01, 0x04, 0x3e, 0x0f, 0x55, 0xaf, 0x4d, 0xfb, 0x51, - 0x96, 0x1a, 0xdb, 0x57, 0x15, 0x4c, 0xea, 0x1c, 0x29, 0x71, 0x59, 0x18, 0x07, 0x20, 0x87, 0x99, - 0xc7, 0xad, 0x90, 0x9c, 0x55, 0x30, 0xa9, 0x73, 0xf0, 0x33, 0x74, 0x40, 0x5d, 0xe1, 0xcf, 0xe0, - 0x6b, 0xa0, 0x5e, 0xe0, 0x47, 0xd0, 0x07, 0x97, 0x45, 0x5e, 0xfe, 0xd7, 0x69, 0xd8, 0x6f, 0x65, - 0xa9, 0x71, 0xf0, 0x74, 0x19, 0x81, 0x2c, 0xd7, 0xe1, 0x1f, 0x51, 0x8b, 0x43, 0x00, 0xae, 0x60, - 0x49, 0x11, 0x96, 0x4f, 0x1e, 0x38, 0x5f, 0xea, 0x40, 0xd0, 0x2f, 0xa4, 0xf6, 0x8e, 0x1c, 0xf0, - 0xfc, 0x44, 0x4a, 0x4b, 0xfc, 0x39, 0xda, 0x0b, 0x69, 0x34, 0xa5, 0x25, 0x53, 0xa5, 0xa4, 0x65, - 0xe3, 0x2c, 0x35, 0xf6, 0x2e, 0x17, 0x6e, 0xc8, 0x1d, 0x26, 0xfe, 0x16, 0xb5, 0x04, 0x84, 0x71, - 0x40, 0x45, 0x1e, 0x99, 0xed, 0x93, 0xf7, 0xea, 0xef, 0x23, 0xff, 0x79, 0xb2, 0x91, 0x2b, 0xe6, - 0x0d, 0x0a, 0x9a, 0x5a, 0x31, 0xe5, 0x7b, 0xcf, 0x51, 0x52, 0xda, 0xe0, 0x53, 0xb4, 0xe3, 0x50, - 0x77, 0xc2, 0x86, 0xc3, 0x9e, 0x1f, 0xfa, 0xa2, 0xb3, 0xa5, 0x46, 0xbe, 0x9f, 0xa5, 0xc6, 0x8e, - 0x5d, 0xc3, 0xc9, 0x02, 0x0b, 0x3f, 0x47, 0x8f, 0x85, 0x08, 0x8a, 0x89, 0x3d, 0x1d, 0x0a, 0x48, - 0xce, 0xfd, 0xc8, 0xe7, 0x63, 0xf0, 0x3a, 0x2d, 0x65, 0xf0, 0x76, 0x96, 0x1a, 0x8f, 0x07, 0x83, - 0xde, 0x32, 0x0a, 0x59, 0xa5, 0xed, 0xfe, 0xd6, 0x40, 0xed, 0x72, 0xab, 0xe1, 0xe7, 0x08, 0xb9, - 0xf3, 0x1d, 0xc2, 0x3b, 0x9a, 0xca, 0xe3, 0xbb, 0xab, 0xf2, 0x58, 0x6e, 0x9b, 0x6a, 0x35, 0x97, - 0x10, 0x27, 0x35, 0x23, 0xfc, 0x1d, 0x6a, 0x73, 0x41, 0x13, 0xa1, 0xb6, 0xc1, 0xfa, 0x6b, 0x6f, - 0x83, 0xdd, 0x2c, 0x35, 0xda, 0xfd, 0xb9, 0x01, 0xa9, 0xbc, 0xf0, 0x10, 0xed, 0x55, 0xc1, 0xfc, - 0x9f, 0x9b, 0x4d, 0xa5, 0xe0, 0x6c, 0xc1, 0x85, 0xdc, 0x71, 0x95, 0xfb, 0x25, 0x4f, 0xae, 0x8a, - 0x67, 0xb3, 0xda, 0x2f, 0x79, 0xcc, 0x49, 0x71, 0x8b, 0x2d, 0xd4, 0xe6, 0x53, 0xd7, 0x05, 0xf0, - 0xc0, 0x53, 0x21, 0x6b, 0xda, 0x6f, 0x14, 0xd4, 0x76, 0x7f, 0x7e, 0x41, 0x2a, 0x8e, 0x34, 0x1e, - 0x52, 0x3f, 0x00, 0x4f, 0x85, 0xab, 0x66, 0x7c, 0xae, 0x50, 0x52, 0xdc, 0xda, 0x47, 0x37, 0xb7, - 0xfa, 0xda, 0x8b, 0x5b, 0x7d, 0xed, 0xe5, 0xad, 0xbe, 0xf6, 0x4b, 0xa6, 0x6b, 0x37, 0x99, 0xae, - 0xbd, 0xc8, 0x74, 0xed, 0x65, 0xa6, 0x6b, 0x7f, 0x65, 0xba, 0xf6, 0xeb, 0xdf, 0xfa, 0xda, 0xf7, - 0xeb, 0xb3, 0xe3, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x13, 0xdb, 0x98, 0xf9, 0xb8, 0x08, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1/generated.proto b/vendor/k8s.io/api/batch/v1/generated.proto deleted file mode 100644 index 039149d..0000000 --- a/vendor/k8s.io/api/batch/v1/generated.proto +++ /dev/null @@ -1,184 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.batch.v1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Job represents the configuration of a single job. -message Job { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional JobSpec spec = 2; - - // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional JobStatus status = 3; -} - -// JobCondition describes current state of a job. -message JobCondition { - // Type of job condition, Complete or Failed. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition was checked. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; - - // Last time the condition transit from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// JobList is a collection of jobs. -message JobList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of Jobs. - repeated Job items = 2; -} - -// JobSpec describes how the job execution will look like. -message JobSpec { - // Specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - // +optional - optional int32 parallelism = 1; - - // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - // +optional - optional int32 completions = 2; - - // Specifies the duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - optional int64 activeDeadlineSeconds = 3; - - // Specifies the number of retries before marking this job failed. - // Defaults to 6 - // +optional - optional int32 backoffLimit = 7; - - // A label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - - // manualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector - // +optional - optional bool manualSelector = 5; - - // Describes the pod that will be created when executing a job. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - optional k8s.io.api.core.v1.PodTemplateSpec template = 6; - - // ttlSecondsAfterFinished limits the lifetime of a Job that has finished - // execution (either Complete or Failed). If this field is set, - // ttlSecondsAfterFinished after the Job finishes, it is eligible to be - // automatically deleted. When the Job is being deleted, its lifecycle - // guarantees (e.g. finalizers) will be honored. If this field is unset, - // the Job won't be automatically deleted. If this field is set to zero, - // the Job becomes eligible to be deleted immediately after it finishes. - // This field is alpha-level and is only honored by servers that enable the - // TTLAfterFinished feature. - // +optional - optional int32 ttlSecondsAfterFinished = 8; -} - -// JobStatus represents the current state of a Job. -message JobStatus { - // The latest available observations of an object's current state. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated JobCondition conditions = 1; - - // Represents time when the job was acknowledged by the job controller. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 2; - - // Represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time completionTime = 3; - - // The number of actively running pods. - // +optional - optional int32 active = 4; - - // The number of pods which reached phase Succeeded. - // +optional - optional int32 succeeded = 5; - - // The number of pods which reached phase Failed. - // +optional - optional int32 failed = 6; -} - diff --git a/vendor/k8s.io/api/batch/v1/register.go b/vendor/k8s.io/api/batch/v1/register.go deleted file mode 100644 index 32fa51f..0000000 --- a/vendor/k8s.io/api/batch/v1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Job{}, - &JobList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/batch/v1/types.go b/vendor/k8s.io/api/batch/v1/types.go deleted file mode 100644 index 8dad904..0000000 --- a/vendor/k8s.io/api/batch/v1/types.go +++ /dev/null @@ -1,193 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Job represents the configuration of a single job. -type Job struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Current status of a job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status JobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// JobList is a collection of jobs. -type JobList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of Jobs. - Items []Job `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// JobSpec describes how the job execution will look like. -type JobSpec struct { - - // Specifies the maximum desired number of pods the job should - // run at any given time. The actual number of pods running in steady state will - // be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), - // i.e. when the work left to do is less than max parallelism. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - // +optional - Parallelism *int32 `json:"parallelism,omitempty" protobuf:"varint,1,opt,name=parallelism"` - - // Specifies the desired number of successfully finished pods the - // job should be run with. Setting to nil means that the success of any - // pod signals the success of all pods, and allows parallelism to have any positive - // value. Setting to 1 means that parallelism is limited to 1 and the success of that - // pod signals the success of the job. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - // +optional - Completions *int32 `json:"completions,omitempty" protobuf:"varint,2,opt,name=completions"` - - // Specifies the duration in seconds relative to the startTime that the job may be active - // before the system tries to terminate it; value must be positive integer - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,3,opt,name=activeDeadlineSeconds"` - - // Specifies the number of retries before marking this job failed. - // Defaults to 6 - // +optional - BackoffLimit *int32 `json:"backoffLimit,omitempty" protobuf:"varint,7,opt,name=backoffLimit"` - - // TODO enabled it when https://github.com/kubernetes/kubernetes/issues/28486 has been fixed - // Optional number of failed pods to retain. - // +optional - // FailedPodsLimit *int32 `json:"failedPodsLimit,omitempty" protobuf:"varint,9,opt,name=failedPodsLimit"` - - // A label query over pods that should match the pod count. - // Normally, the system sets this field for you. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - - // manualSelector controls generation of pod labels and pod selectors. - // Leave `manualSelector` unset unless you are certain what you are doing. - // When false or unset, the system pick labels unique to this job - // and appends those labels to the pod template. When true, - // the user is responsible for picking unique labels and specifying - // the selector. Failure to pick a unique label may cause this - // and other jobs to not function correctly. However, You may see - // `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` - // API. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector - // +optional - ManualSelector *bool `json:"manualSelector,omitempty" protobuf:"varint,5,opt,name=manualSelector"` - - // Describes the pod that will be created when executing a job. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,6,opt,name=template"` - - // ttlSecondsAfterFinished limits the lifetime of a Job that has finished - // execution (either Complete or Failed). If this field is set, - // ttlSecondsAfterFinished after the Job finishes, it is eligible to be - // automatically deleted. When the Job is being deleted, its lifecycle - // guarantees (e.g. finalizers) will be honored. If this field is unset, - // the Job won't be automatically deleted. If this field is set to zero, - // the Job becomes eligible to be deleted immediately after it finishes. - // This field is alpha-level and is only honored by servers that enable the - // TTLAfterFinished feature. - // +optional - TTLSecondsAfterFinished *int32 `json:"ttlSecondsAfterFinished,omitempty" protobuf:"varint,8,opt,name=ttlSecondsAfterFinished"` -} - -// JobStatus represents the current state of a Job. -type JobStatus struct { - // The latest available observations of an object's current state. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/ - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []JobCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,1,rep,name=conditions"` - - // Represents time when the job was acknowledged by the job controller. - // It is not guaranteed to be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,2,opt,name=startTime"` - - // Represents time when the job was completed. It is not guaranteed to - // be set in happens-before order across separate operations. - // It is represented in RFC3339 form and is in UTC. - // +optional - CompletionTime *metav1.Time `json:"completionTime,omitempty" protobuf:"bytes,3,opt,name=completionTime"` - - // The number of actively running pods. - // +optional - Active int32 `json:"active,omitempty" protobuf:"varint,4,opt,name=active"` - - // The number of pods which reached phase Succeeded. - // +optional - Succeeded int32 `json:"succeeded,omitempty" protobuf:"varint,5,opt,name=succeeded"` - - // The number of pods which reached phase Failed. - // +optional - Failed int32 `json:"failed,omitempty" protobuf:"varint,6,opt,name=failed"` -} - -type JobConditionType string - -// These are valid conditions of a job. -const ( - // JobComplete means the job has completed its execution. - JobComplete JobConditionType = "Complete" - // JobFailed means the job has failed its execution. - JobFailed JobConditionType = "Failed" -) - -// JobCondition describes current state of a job. -type JobCondition struct { - // Type of job condition, Complete or Failed. - Type JobConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=JobConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition was checked. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} diff --git a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go deleted file mode 100644 index d8e2bdd..0000000 --- a/vendor/k8s.io/api/batch/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Job = map[string]string{ - "": "Job represents the configuration of a single job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Job) SwaggerDoc() map[string]string { - return map_Job -} - -var map_JobCondition = map[string]string{ - "": "JobCondition describes current state of a job.", - "type": "Type of job condition, Complete or Failed.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastProbeTime": "Last time the condition was checked.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (JobCondition) SwaggerDoc() map[string]string { - return map_JobCondition -} - -var map_JobList = map[string]string{ - "": "JobList is a collection of jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of Jobs.", -} - -func (JobList) SwaggerDoc() map[string]string { - return map_JobList -} - -var map_JobSpec = map[string]string{ - "": "JobSpec describes how the job execution will look like.", - "parallelism": "Specifies the maximum desired number of pods the job should run at any given time. The actual number of pods running in steady state will be less than this number when ((.spec.completions - .status.successful) < .spec.parallelism), i.e. when the work left to do is less than max parallelism. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "completions": "Specifies the desired number of successfully finished pods the job should be run with. Setting to nil means that the success of any pod signals the success of all pods, and allows parallelism to have any positive value. Setting to 1 means that parallelism is limited to 1 and the success of that pod signals the success of the job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "activeDeadlineSeconds": "Specifies the duration in seconds relative to the startTime that the job may be active before the system tries to terminate it; value must be positive integer", - "backoffLimit": "Specifies the number of retries before marking this job failed. Defaults to 6", - "selector": "A label query over pods that should match the pod count. Normally, the system sets this field for you. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "manualSelector": "manualSelector controls generation of pod labels and pod selectors. Leave `manualSelector` unset unless you are certain what you are doing. When false or unset, the system pick labels unique to this job and appends those labels to the pod template. When true, the user is responsible for picking unique labels and specifying the selector. Failure to pick a unique label may cause this and other jobs to not function correctly. However, You may see `manualSelector=true` in jobs that were created with the old `extensions/v1beta1` API. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/#specifying-your-own-pod-selector", - "template": "Describes the pod that will be created when executing a job. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "ttlSecondsAfterFinished": "ttlSecondsAfterFinished limits the lifetime of a Job that has finished execution (either Complete or Failed). If this field is set, ttlSecondsAfterFinished after the Job finishes, it is eligible to be automatically deleted. When the Job is being deleted, its lifecycle guarantees (e.g. finalizers) will be honored. If this field is unset, the Job won't be automatically deleted. If this field is set to zero, the Job becomes eligible to be deleted immediately after it finishes. This field is alpha-level and is only honored by servers that enable the TTLAfterFinished feature.", -} - -func (JobSpec) SwaggerDoc() map[string]string { - return map_JobSpec -} - -var map_JobStatus = map[string]string{ - "": "JobStatus represents the current state of a Job.", - "conditions": "The latest available observations of an object's current state. More info: https://kubernetes.io/docs/concepts/workloads/controllers/jobs-run-to-completion/", - "startTime": "Represents time when the job was acknowledged by the job controller. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "completionTime": "Represents time when the job was completed. It is not guaranteed to be set in happens-before order across separate operations. It is represented in RFC3339 form and is in UTC.", - "active": "The number of actively running pods.", - "succeeded": "The number of pods which reached phase Succeeded.", - "failed": "The number of pods which reached phase Failed.", -} - -func (JobStatus) SwaggerDoc() map[string]string { - return map_JobStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go deleted file mode 100644 index 88cb016..0000000 --- a/vendor/k8s.io/api/batch/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,188 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Job) DeepCopyInto(out *Job) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Job. -func (in *Job) DeepCopy() *Job { - if in == nil { - return nil - } - out := new(Job) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Job) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobCondition) DeepCopyInto(out *JobCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobCondition. -func (in *JobCondition) DeepCopy() *JobCondition { - if in == nil { - return nil - } - out := new(JobCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobList) DeepCopyInto(out *JobList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Job, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobList. -func (in *JobList) DeepCopy() *JobList { - if in == nil { - return nil - } - out := new(JobList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobSpec) DeepCopyInto(out *JobSpec) { - *out = *in - if in.Parallelism != nil { - in, out := &in.Parallelism, &out.Parallelism - *out = new(int32) - **out = **in - } - if in.Completions != nil { - in, out := &in.Completions, &out.Completions - *out = new(int32) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.BackoffLimit != nil { - in, out := &in.BackoffLimit, &out.BackoffLimit - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.ManualSelector != nil { - in, out := &in.ManualSelector, &out.ManualSelector - *out = new(bool) - **out = **in - } - in.Template.DeepCopyInto(&out.Template) - if in.TTLSecondsAfterFinished != nil { - in, out := &in.TTLSecondsAfterFinished, &out.TTLSecondsAfterFinished - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSpec. -func (in *JobSpec) DeepCopy() *JobSpec { - if in == nil { - return nil - } - out := new(JobSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobStatus) DeepCopyInto(out *JobStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]JobCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.CompletionTime != nil { - in, out := &in.CompletionTime, &out.CompletionTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus. -func (in *JobStatus) DeepCopy() *JobStatus { - if in == nil { - return nil - } - out := new(JobStatus) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/batch/v1beta1/doc.go b/vendor/k8s.io/api/batch/v1beta1/doc.go deleted file mode 100644 index 43020ed..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1beta1 // import "k8s.io/api/batch/v1beta1" diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go b/vendor/k8s.io/api/batch/v1beta1/generated.pb.go deleted file mode 100644 index ece2204..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/generated.pb.go +++ /dev/null @@ -1,1509 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func init() { - proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v1beta1.CronJob") - proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v1beta1.CronJobList") - proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v1beta1.CronJobSpec") - proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v1beta1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v1beta1.JobTemplate") - proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v1beta1.JobTemplateSpec") -} -func (m *CronJob) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *CronJobList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) - if m.Suspend != nil { - dAtA[i] = 0x20 - i++ - if *m.Suspend { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) - } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) - } - return i, nil -} - -func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LastScheduleTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CronJob) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CronJobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CronJobSpec) Size() (n int) { - var l int - _ = l - l = len(m.Schedule) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartingDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) - } - l = len(m.ConcurrencyPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.Suspend != nil { - n += 2 - } - l = m.JobTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.SuccessfulJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) - } - if m.FailedJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) - } - return n -} - -func (m *CronJobStatus) Size() (n int) { - var l int - _ = l - if len(m.Active) > 0 { - for _, e := range m.Active { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LastScheduleTime != nil { - l = m.LastScheduleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *JobTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CronJob) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobSpec{`, - `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, - `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, - `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, - `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, - `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, - `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CronJob) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJob: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CronJob{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schedule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StartingDeadlineSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Suspend = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SuccessfulJobsHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.FailedJobsHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 771 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xc7, 0xe3, 0x34, 0xbf, 0x76, 0xc2, 0x42, 0xd7, 0xa0, 0x5d, 0x2b, 0x20, 0x27, 0x64, 0xb5, - 0x22, 0x20, 0x76, 0x4c, 0x2b, 0x84, 0x38, 0x21, 0xad, 0x17, 0x2d, 0x50, 0x8a, 0x16, 0x39, 0x45, - 0x48, 0xa8, 0x42, 0x1d, 0x8f, 0x5f, 0x92, 0x69, 0x6c, 0x8f, 0xe5, 0x19, 0x47, 0xca, 0x8d, 0x0b, - 0x77, 0xfe, 0x11, 0x4e, 0xfc, 0x13, 0x11, 0xa7, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x2f, 0x38, 0x21, - 0x4f, 0x9c, 0x1f, 0xcd, 0x8f, 0xb6, 0x7b, 0xe9, 0xcd, 0xf3, 0xe6, 0xfb, 0xfd, 0xcc, 0xf3, 0x7b, - 0x6f, 0x06, 0xbd, 0x18, 0x7e, 0x29, 0x30, 0xe3, 0xd6, 0x30, 0x71, 0x21, 0x0e, 0x41, 0x82, 0xb0, - 0x46, 0x10, 0x7a, 0x3c, 0xb6, 0xf2, 0x0d, 0x12, 0x31, 0xcb, 0x25, 0x92, 0x0e, 0xac, 0xd1, 0x81, - 0x0b, 0x92, 0x1c, 0x58, 0x7d, 0x08, 0x21, 0x26, 0x12, 0x3c, 0x1c, 0xc5, 0x5c, 0x72, 0xdd, 0x98, - 0x29, 0x31, 0x89, 0x18, 0x56, 0x4a, 0x9c, 0x2b, 0x1b, 0xcf, 0xfb, 0x4c, 0x0e, 0x12, 0x17, 0x53, - 0x1e, 0x58, 0x7d, 0xde, 0xe7, 0x96, 0x32, 0xb8, 0x49, 0x4f, 0xad, 0xd4, 0x42, 0x7d, 0xcd, 0x40, - 0x8d, 0xa7, 0x5b, 0x8e, 0x5c, 0x3f, 0xad, 0xd1, 0x5e, 0x11, 0x51, 0x1e, 0xc3, 0x36, 0xcd, 0xe7, - 0x4b, 0x4d, 0x40, 0xe8, 0x80, 0x85, 0x10, 0x8f, 0xad, 0x68, 0xd8, 0xcf, 0x02, 0xc2, 0x0a, 0x40, - 0x92, 0x6d, 0x2e, 0x6b, 0x97, 0x2b, 0x4e, 0x42, 0xc9, 0x02, 0xd8, 0x30, 0x7c, 0x71, 0x9b, 0x41, - 0xd0, 0x01, 0x04, 0x64, 0xdd, 0xd7, 0xfe, 0xbd, 0x88, 0xaa, 0x2f, 0x63, 0x1e, 0x1e, 0x71, 0x57, - 0x3f, 0x43, 0xb5, 0x2c, 0x1f, 0x8f, 0x48, 0x62, 0x68, 0x2d, 0xad, 0x53, 0x3f, 0xfc, 0x0c, 0x2f, - 0xeb, 0xb9, 0xc0, 0xe2, 0x68, 0xd8, 0xcf, 0x02, 0x02, 0x67, 0x6a, 0x3c, 0x3a, 0xc0, 0xaf, 0xdd, - 0x73, 0xa0, 0xf2, 0x07, 0x90, 0xc4, 0xd6, 0x27, 0xd3, 0x66, 0x21, 0x9d, 0x36, 0xd1, 0x32, 0xe6, - 0x2c, 0xa8, 0xfa, 0x37, 0xa8, 0x24, 0x22, 0xa0, 0x46, 0x51, 0xd1, 0x9f, 0xe1, 0x5d, 0xdd, 0xc2, - 0x79, 0x4a, 0xdd, 0x08, 0xa8, 0xfd, 0x56, 0x8e, 0x2c, 0x65, 0x2b, 0x47, 0x01, 0xf4, 0xd7, 0xa8, - 0x22, 0x24, 0x91, 0x89, 0x30, 0xf6, 0x14, 0xea, 0xa3, 0xdb, 0x51, 0x4a, 0x6e, 0xbf, 0x9d, 0xc3, - 0x2a, 0xb3, 0xb5, 0x93, 0x63, 0xda, 0x7f, 0x69, 0xa8, 0x9e, 0x2b, 0x8f, 0x99, 0x90, 0xfa, 0xe9, - 0x46, 0x2d, 0xf0, 0xdd, 0x6a, 0x91, 0xb9, 0x55, 0x25, 0xf6, 0xf3, 0x93, 0x6a, 0xf3, 0xc8, 0x4a, - 0x1d, 0x5e, 0xa1, 0x32, 0x93, 0x10, 0x08, 0xa3, 0xd8, 0xda, 0xeb, 0xd4, 0x0f, 0x3f, 0xbc, 0x35, - 0x7b, 0xfb, 0x61, 0x4e, 0x2b, 0x7f, 0x97, 0xf9, 0x9c, 0x99, 0xbd, 0xfd, 0x67, 0x69, 0x91, 0x75, - 0x56, 0x1c, 0xfd, 0x53, 0x54, 0xcb, 0xfa, 0xec, 0x25, 0x3e, 0xa8, 0xac, 0x1f, 0x2c, 0xb3, 0xe8, - 0xe6, 0x71, 0x67, 0xa1, 0xd0, 0x7f, 0x42, 0x4f, 0x84, 0x24, 0xb1, 0x64, 0x61, 0xff, 0x6b, 0x20, - 0x9e, 0xcf, 0x42, 0xe8, 0x02, 0xe5, 0xa1, 0x27, 0x54, 0x83, 0xf6, 0xec, 0xf7, 0xd3, 0x69, 0xf3, - 0x49, 0x77, 0xbb, 0xc4, 0xd9, 0xe5, 0xd5, 0x4f, 0xd1, 0x23, 0xca, 0x43, 0x9a, 0xc4, 0x31, 0x84, - 0x74, 0xfc, 0x23, 0xf7, 0x19, 0x1d, 0xab, 0x36, 0x3d, 0xb0, 0x71, 0x9e, 0xcd, 0xa3, 0x97, 0xeb, - 0x82, 0xff, 0xb6, 0x05, 0x9d, 0x4d, 0x90, 0xfe, 0x0c, 0x55, 0x45, 0x22, 0x22, 0x08, 0x3d, 0xa3, - 0xd4, 0xd2, 0x3a, 0x35, 0xbb, 0x9e, 0x4e, 0x9b, 0xd5, 0xee, 0x2c, 0xe4, 0xcc, 0xf7, 0xf4, 0x33, - 0x54, 0x3f, 0xe7, 0xee, 0x09, 0x04, 0x91, 0x4f, 0x24, 0x18, 0x65, 0xd5, 0xc2, 0x8f, 0x77, 0xd7, - 0xf9, 0x68, 0x29, 0x56, 0x43, 0xf7, 0x6e, 0x9e, 0x69, 0x7d, 0x65, 0xc3, 0x59, 0x45, 0xea, 0xbf, - 0xa2, 0x86, 0x48, 0x28, 0x05, 0x21, 0x7a, 0x89, 0x7f, 0xc4, 0x5d, 0xf1, 0x2d, 0x13, 0x92, 0xc7, - 0xe3, 0x63, 0x16, 0x30, 0x69, 0x54, 0x5a, 0x5a, 0xa7, 0x6c, 0x9b, 0xe9, 0xb4, 0xd9, 0xe8, 0xee, - 0x54, 0x39, 0x37, 0x10, 0x74, 0x07, 0x3d, 0xee, 0x11, 0xe6, 0x83, 0xb7, 0xc1, 0xae, 0x2a, 0x76, - 0x23, 0x9d, 0x36, 0x1f, 0xbf, 0xda, 0xaa, 0x70, 0x76, 0x38, 0xdb, 0x7f, 0x6b, 0xe8, 0xe1, 0xb5, - 0xfb, 0xa0, 0x7f, 0x8f, 0x2a, 0x84, 0x4a, 0x36, 0xca, 0xe6, 0x25, 0x1b, 0xc5, 0xa7, 0xab, 0x25, - 0xca, 0xde, 0xb4, 0xe5, 0xfd, 0x76, 0xa0, 0x07, 0x59, 0x27, 0x60, 0x79, 0x89, 0x5e, 0x28, 0xab, - 0x93, 0x23, 0x74, 0x1f, 0xed, 0xfb, 0x44, 0xc8, 0xf9, 0xa8, 0x9d, 0xb0, 0x00, 0x54, 0x93, 0xea, - 0x87, 0x9f, 0xdc, 0xed, 0xf2, 0x64, 0x0e, 0xfb, 0xbd, 0x74, 0xda, 0xdc, 0x3f, 0x5e, 0xe3, 0x38, - 0x1b, 0xe4, 0xf6, 0x44, 0x43, 0xab, 0xdd, 0xb9, 0x87, 0xe7, 0xeb, 0x67, 0x54, 0x93, 0xf3, 0x89, - 0x2a, 0xbe, 0xe9, 0x44, 0x2d, 0x6e, 0xe2, 0x62, 0x9c, 0x16, 0xb0, 0xec, 0xf5, 0x79, 0x67, 0x4d, - 0x7f, 0x0f, 0xbf, 0xf3, 0xd5, 0xb5, 0xd7, 0xf8, 0x83, 0x6d, 0xbf, 0x82, 0x6f, 0x78, 0x84, 0xed, - 0xe7, 0x93, 0x2b, 0xb3, 0x70, 0x71, 0x65, 0x16, 0x2e, 0xaf, 0xcc, 0xc2, 0x6f, 0xa9, 0xa9, 0x4d, - 0x52, 0x53, 0xbb, 0x48, 0x4d, 0xed, 0x32, 0x35, 0xb5, 0x7f, 0x52, 0x53, 0xfb, 0xe3, 0x5f, 0xb3, - 0xf0, 0x4b, 0x35, 0x2f, 0xc8, 0xff, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf6, 0x9f, 0xb3, 0xdd, 0xdf, - 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v1beta1/generated.proto b/vendor/k8s.io/api/batch/v1beta1/generated.proto deleted file mode 100644 index 043b355..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/generated.proto +++ /dev/null @@ -1,137 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.batch.v1beta1; - -import "k8s.io/api/batch/v1/generated.proto"; -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// CronJob represents the configuration of a single cron job. -message CronJob { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional CronJobSpec spec = 2; - - // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional CronJobStatus status = 3; -} - -// CronJobList is a collection of cron jobs. -message CronJobList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CronJobs. - repeated CronJob items = 2; -} - -// CronJobSpec describes how the job execution will look like and when it will actually run. -message CronJobSpec { - // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - optional string schedule = 1; - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - // +optional - optional int64 startingDeadlineSeconds = 2; - - // Specifies how to treat concurrent executions of a Job. - // Valid values are: - // - "Allow" (default): allows CronJobs to run concurrently; - // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - // - "Replace": cancels currently running job and replaces it with a new one - // +optional - optional string concurrencyPolicy = 3; - - // This flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - // +optional - optional bool suspend = 4; - - // Specifies the job that will be created when executing a CronJob. - optional JobTemplateSpec jobTemplate = 5; - - // The number of successful finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 3. - // +optional - optional int32 successfulJobsHistoryLimit = 6; - - // The number of failed finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 1. - // +optional - optional int32 failedJobsHistoryLimit = 7; -} - -// CronJobStatus represents the current state of a cron job. -message CronJobStatus { - // A list of pointers to currently running jobs. - // +optional - repeated k8s.io.api.core.v1.ObjectReference active = 1; - - // Information when was the last time the job was successfully scheduled. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; -} - -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; -} - -// JobTemplateSpec describes the data a Job should have when created from a template -message JobTemplateSpec { - // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional k8s.io.api.batch.v1.JobSpec spec = 2; -} - diff --git a/vendor/k8s.io/api/batch/v1beta1/register.go b/vendor/k8s.io/api/batch/v1beta1/register.go deleted file mode 100644 index 226de49..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, - &CronJob{}, - &CronJobList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/batch/v1beta1/types.go b/vendor/k8s.io/api/batch/v1beta1/types.go deleted file mode 100644 index cb5c9ba..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/types.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - -// JobTemplateSpec describes the data a Job should have when created from a template -type JobTemplateSpec struct { - // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CronJob represents the configuration of a single cron job. -type CronJob struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CronJobList is a collection of cron jobs. -type CronJobList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CronJobs. - Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CronJobSpec describes how the job execution will look like and when it will actually run. -type CronJobSpec struct { - - // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - // +optional - StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` - - // Specifies how to treat concurrent executions of a Job. - // Valid values are: - // - "Allow" (default): allows CronJobs to run concurrently; - // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - // - "Replace": cancels currently running job and replaces it with a new one - // +optional - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` - - // This flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - // +optional - Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` - - // Specifies the job that will be created when executing a CronJob. - JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` - - // The number of successful finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 3. - // +optional - SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` - - // The number of failed finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 1. - // +optional - FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` -} - -// ConcurrencyPolicy describes how the job will be handled. -// Only one of the following concurrent policies may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -type ConcurrencyPolicy string - -const ( - // AllowConcurrent allows CronJobs to run concurrently. - AllowConcurrent ConcurrencyPolicy = "Allow" - - // ForbidConcurrent forbids concurrent runs, skipping next run if previous - // hasn't finished yet. - ForbidConcurrent ConcurrencyPolicy = "Forbid" - - // ReplaceConcurrent cancels currently running job and replaces it with a new one. - ReplaceConcurrent ConcurrencyPolicy = "Replace" -) - -// CronJobStatus represents the current state of a cron job. -type CronJobStatus struct { - // A list of pointers to currently running jobs. - // +optional - Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` - - // Information when was the last time the job was successfully scheduled. - // +optional - LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` -} diff --git a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index abbdfec..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CronJob = map[string]string{ - "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (CronJob) SwaggerDoc() map[string]string { - return map_CronJob -} - -var map_CronJobList = map[string]string{ - "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of CronJobs.", -} - -func (CronJobList) SwaggerDoc() map[string]string { - return map_CronJobList -} - -var map_CronJobSpec = map[string]string{ - "": "CronJobSpec describes how the job execution will look like and when it will actually run.", - "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", - "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", - "jobTemplate": "Specifies the job that will be created when executing a CronJob.", - "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 3.", - "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", -} - -func (CronJobSpec) SwaggerDoc() map[string]string { - return map_CronJobSpec -} - -var map_CronJobStatus = map[string]string{ - "": "CronJobStatus represents the current state of a cron job.", - "active": "A list of pointers to currently running jobs.", - "lastScheduleTime": "Information when was the last time the job was successfully scheduled.", -} - -func (CronJobStatus) SwaggerDoc() map[string]string { - return map_CronJobStatus -} - -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - -var map_JobTemplateSpec = map[string]string{ - "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (JobTemplateSpec) SwaggerDoc() map[string]string { - return map_JobTemplateSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 1c8bc44..0000000 --- a/vendor/k8s.io/api/batch/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,194 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJob) DeepCopyInto(out *CronJob) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob. -func (in *CronJob) DeepCopy() *CronJob { - if in == nil { - return nil - } - out := new(CronJob) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CronJob) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJobList) DeepCopyInto(out *CronJobList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CronJob, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList. -func (in *CronJobList) DeepCopy() *CronJobList { - if in == nil { - return nil - } - out := new(CronJobList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CronJobList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { - *out = *in - if in.StartingDeadlineSeconds != nil { - in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Suspend != nil { - in, out := &in.Suspend, &out.Suspend - *out = new(bool) - **out = **in - } - in.JobTemplate.DeepCopyInto(&out.JobTemplate) - if in.SuccessfulJobsHistoryLimit != nil { - in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit - *out = new(int32) - **out = **in - } - if in.FailedJobsHistoryLimit != nil { - in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec. -func (in *CronJobSpec) DeepCopy() *CronJobSpec { - if in == nil { - return nil - } - out := new(CronJobSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { - *out = *in - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]v1.ObjectReference, len(*in)) - copy(*out, *in) - } - if in.LastScheduleTime != nil { - in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus. -func (in *CronJobStatus) DeepCopy() *CronJobStatus { - if in == nil { - return nil - } - out := new(CronJobStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec. -func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { - if in == nil { - return nil - } - out := new(JobTemplateSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/doc.go b/vendor/k8s.io/api/batch/v2alpha1/doc.go deleted file mode 100644 index f4ed01a..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v2alpha1 // import "k8s.io/api/batch/v2alpha1" diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go b/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go deleted file mode 100644 index 6ab41eb..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.pb.go +++ /dev/null @@ -1,1509 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v2alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto - - It has these top-level messages: - CronJob - CronJobList - CronJobSpec - CronJobStatus - JobTemplate - JobTemplateSpec -*/ -package v2alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *CronJob) Reset() { *m = CronJob{} } -func (*CronJob) ProtoMessage() {} -func (*CronJob) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *CronJobList) Reset() { *m = CronJobList{} } -func (*CronJobList) ProtoMessage() {} -func (*CronJobList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *CronJobSpec) Reset() { *m = CronJobSpec{} } -func (*CronJobSpec) ProtoMessage() {} -func (*CronJobSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *CronJobStatus) Reset() { *m = CronJobStatus{} } -func (*CronJobStatus) ProtoMessage() {} -func (*CronJobStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *JobTemplate) Reset() { *m = JobTemplate{} } -func (*JobTemplate) ProtoMessage() {} -func (*JobTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *JobTemplateSpec) Reset() { *m = JobTemplateSpec{} } -func (*JobTemplateSpec) ProtoMessage() {} -func (*JobTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func init() { - proto.RegisterType((*CronJob)(nil), "k8s.io.api.batch.v2alpha1.CronJob") - proto.RegisterType((*CronJobList)(nil), "k8s.io.api.batch.v2alpha1.CronJobList") - proto.RegisterType((*CronJobSpec)(nil), "k8s.io.api.batch.v2alpha1.CronJobSpec") - proto.RegisterType((*CronJobStatus)(nil), "k8s.io.api.batch.v2alpha1.CronJobStatus") - proto.RegisterType((*JobTemplate)(nil), "k8s.io.api.batch.v2alpha1.JobTemplate") - proto.RegisterType((*JobTemplateSpec)(nil), "k8s.io.api.batch.v2alpha1.JobTemplateSpec") -} -func (m *CronJob) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJob) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *CronJobList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJobList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CronJobSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJobSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Schedule))) - i += copy(dAtA[i:], m.Schedule) - if m.StartingDeadlineSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.StartingDeadlineSeconds)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConcurrencyPolicy))) - i += copy(dAtA[i:], m.ConcurrencyPolicy) - if m.Suspend != nil { - dAtA[i] = 0x20 - i++ - if *m.Suspend { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.JobTemplate.Size())) - n5, err := m.JobTemplate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if m.SuccessfulJobsHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SuccessfulJobsHistoryLimit)) - } - if m.FailedJobsHistoryLimit != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FailedJobsHistoryLimit)) - } - return i, nil -} - -func (m *CronJobStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CronJobStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Active) > 0 { - for _, msg := range m.Active { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LastScheduleTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastScheduleTime.Size())) - n6, err := m.LastScheduleTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *JobTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n8, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - return i, nil -} - -func (m *JobTemplateSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *JobTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n9, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n10, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CronJob) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CronJobList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CronJobSpec) Size() (n int) { - var l int - _ = l - l = len(m.Schedule) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartingDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.StartingDeadlineSeconds)) - } - l = len(m.ConcurrencyPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.Suspend != nil { - n += 2 - } - l = m.JobTemplate.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.SuccessfulJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.SuccessfulJobsHistoryLimit)) - } - if m.FailedJobsHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.FailedJobsHistoryLimit)) - } - return n -} - -func (m *CronJobStatus) Size() (n int) { - var l int - _ = l - if len(m.Active) > 0 { - for _, e := range m.Active { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LastScheduleTime != nil { - l = m.LastScheduleTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *JobTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *JobTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CronJob) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJob{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CronJobSpec", "CronJobSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CronJobStatus", "CronJobStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CronJob", "CronJob", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobSpec{`, - `Schedule:` + fmt.Sprintf("%v", this.Schedule) + `,`, - `StartingDeadlineSeconds:` + valueToStringGenerated(this.StartingDeadlineSeconds) + `,`, - `ConcurrencyPolicy:` + fmt.Sprintf("%v", this.ConcurrencyPolicy) + `,`, - `Suspend:` + valueToStringGenerated(this.Suspend) + `,`, - `JobTemplate:` + strings.Replace(strings.Replace(this.JobTemplate.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `SuccessfulJobsHistoryLimit:` + valueToStringGenerated(this.SuccessfulJobsHistoryLimit) + `,`, - `FailedJobsHistoryLimit:` + valueToStringGenerated(this.FailedJobsHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *CronJobStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CronJobStatus{`, - `Active:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Active), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `LastScheduleTime:` + strings.Replace(fmt.Sprintf("%v", this.LastScheduleTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "JobTemplateSpec", "JobTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *JobTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&JobTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "JobSpec", "k8s_io_api_batch_v1.JobSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CronJob) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJob: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJob: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CronJob{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schedule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schedule = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StartingDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StartingDeadlineSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConcurrencyPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConcurrencyPolicy = ConcurrencyPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Suspend", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Suspend = &b - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field JobTemplate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.JobTemplate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessfulJobsHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SuccessfulJobsHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailedJobsHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.FailedJobsHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CronJobStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CronJobStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CronJobStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Active = append(m.Active, k8s_io_api_core_v1.ObjectReference{}) - if err := m.Active[len(m.Active)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastScheduleTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastScheduleTime == nil { - m.LastScheduleTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.LastScheduleTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *JobTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: JobTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: JobTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/batch/v2alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 774 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x94, 0x4d, 0x6f, 0xdb, 0x36, - 0x18, 0xc7, 0x2d, 0xc7, 0x6f, 0xa1, 0x97, 0x2d, 0xd1, 0x86, 0xc4, 0xf3, 0x06, 0xd9, 0x50, 0xb0, - 0xc1, 0x18, 0x36, 0x6a, 0x09, 0x86, 0x61, 0xa7, 0x01, 0x53, 0x86, 0x36, 0x4d, 0x53, 0x34, 0x90, - 0x53, 0xa0, 0x28, 0x82, 0xa2, 0x14, 0x45, 0xdb, 0x8c, 0x25, 0x51, 0x10, 0x29, 0x03, 0xbe, 0xf5, - 0xd6, 0x6b, 0x3f, 0x49, 0x2f, 0xed, 0x87, 0x48, 0x7b, 0xca, 0x31, 0x27, 0xa3, 0x51, 0xbf, 0x45, - 0x4f, 0x85, 0x68, 0xf9, 0x25, 0x7e, 0x49, 0xd2, 0x4b, 0x6e, 0xe2, 0xa3, 0xff, 0xff, 0xc7, 0x87, - 0xcf, 0xf3, 0x90, 0xc0, 0xec, 0xfe, 0xc3, 0x21, 0x65, 0x46, 0x37, 0xb2, 0x49, 0xe8, 0x13, 0x41, - 0xb8, 0xd1, 0x23, 0xbe, 0xc3, 0x42, 0x23, 0xfd, 0x81, 0x02, 0x6a, 0xd8, 0x48, 0xe0, 0x8e, 0xd1, - 0xdb, 0x45, 0x6e, 0xd0, 0x41, 0x3b, 0x46, 0x9b, 0xf8, 0x24, 0x44, 0x82, 0x38, 0x30, 0x08, 0x99, - 0x60, 0xea, 0x8f, 0x43, 0x29, 0x44, 0x01, 0x85, 0x52, 0x0a, 0x47, 0xd2, 0xea, 0x1f, 0x6d, 0x2a, - 0x3a, 0x91, 0x0d, 0x31, 0xf3, 0x8c, 0x36, 0x6b, 0x33, 0x43, 0x3a, 0xec, 0xa8, 0x25, 0x57, 0x72, - 0x21, 0xbf, 0x86, 0xa4, 0xea, 0xf6, 0xfc, 0xa6, 0x73, 0xdb, 0x55, 0xf5, 0x29, 0x11, 0x66, 0x21, - 0x59, 0xa4, 0xf9, 0x6b, 0xa2, 0xf1, 0x10, 0xee, 0x50, 0x9f, 0x84, 0x7d, 0x23, 0xe8, 0xb6, 0x93, - 0x00, 0x37, 0x3c, 0x22, 0xd0, 0x22, 0x97, 0xb1, 0xcc, 0x15, 0x46, 0xbe, 0xa0, 0x1e, 0x99, 0x33, - 0xfc, 0x7d, 0x93, 0x81, 0xe3, 0x0e, 0xf1, 0xd0, 0xac, 0x4f, 0x7f, 0x95, 0x05, 0xc5, 0xbd, 0x90, - 0xf9, 0x07, 0xcc, 0x56, 0x5f, 0x80, 0x52, 0x92, 0x8f, 0x83, 0x04, 0xaa, 0x28, 0x75, 0xa5, 0x51, - 0xde, 0xfd, 0x13, 0x4e, 0x0a, 0x3a, 0xc6, 0xc2, 0xa0, 0xdb, 0x4e, 0x02, 0x1c, 0x26, 0x6a, 0xd8, - 0xdb, 0x81, 0x8f, 0xed, 0x53, 0x82, 0xc5, 0x23, 0x22, 0x90, 0xa9, 0x9e, 0x0d, 0x6a, 0x99, 0x78, - 0x50, 0x03, 0x93, 0x98, 0x35, 0xa6, 0xaa, 0xfb, 0x20, 0xc7, 0x03, 0x82, 0x2b, 0x59, 0x49, 0xff, - 0x15, 0x2e, 0x6d, 0x17, 0x4c, 0x73, 0x6a, 0x06, 0x04, 0x9b, 0xdf, 0xa4, 0xcc, 0x5c, 0xb2, 0xb2, - 0x24, 0x41, 0x3d, 0x02, 0x05, 0x2e, 0x90, 0x88, 0x78, 0x65, 0x45, 0xb2, 0x1a, 0xb7, 0x60, 0x49, - 0xbd, 0xf9, 0x6d, 0x4a, 0x2b, 0x0c, 0xd7, 0x56, 0xca, 0xd1, 0xdf, 0x29, 0xa0, 0x9c, 0x2a, 0x0f, - 0x29, 0x17, 0xea, 0xc9, 0x5c, 0x35, 0xe0, 0xed, 0xaa, 0x91, 0xb8, 0x65, 0x2d, 0xd6, 0xd3, 0x9d, - 0x4a, 0xa3, 0xc8, 0x54, 0x25, 0xee, 0x83, 0x3c, 0x15, 0xc4, 0xe3, 0x95, 0x6c, 0x7d, 0xa5, 0x51, - 0xde, 0xd5, 0x6f, 0x4e, 0xdf, 0x5c, 0x4b, 0x71, 0xf9, 0x07, 0x89, 0xd1, 0x1a, 0xfa, 0xf5, 0x37, - 0xb9, 0x71, 0xda, 0x49, 0x79, 0xd4, 0xdf, 0x41, 0x29, 0x69, 0xb5, 0x13, 0xb9, 0x44, 0xa6, 0xbd, - 0x3a, 0x49, 0xa3, 0x99, 0xc6, 0xad, 0xb1, 0x42, 0x7d, 0x02, 0xb6, 0xb8, 0x40, 0xa1, 0xa0, 0x7e, - 0xfb, 0x7f, 0x82, 0x1c, 0x97, 0xfa, 0xa4, 0x49, 0x30, 0xf3, 0x1d, 0x2e, 0x7b, 0xb4, 0x62, 0xfe, - 0x14, 0x0f, 0x6a, 0x5b, 0xcd, 0xc5, 0x12, 0x6b, 0x99, 0x57, 0x3d, 0x01, 0x1b, 0x98, 0xf9, 0x38, - 0x0a, 0x43, 0xe2, 0xe3, 0xfe, 0x11, 0x73, 0x29, 0xee, 0xcb, 0x46, 0xad, 0x9a, 0x30, 0xcd, 0x66, - 0x63, 0x6f, 0x56, 0xf0, 0x79, 0x51, 0xd0, 0x9a, 0x07, 0xa9, 0xbf, 0x80, 0x22, 0x8f, 0x78, 0x40, - 0x7c, 0xa7, 0x92, 0xab, 0x2b, 0x8d, 0x92, 0x59, 0x8e, 0x07, 0xb5, 0x62, 0x73, 0x18, 0xb2, 0x46, - 0xff, 0x54, 0x04, 0xca, 0xa7, 0xcc, 0x3e, 0x26, 0x5e, 0xe0, 0x22, 0x41, 0x2a, 0x79, 0xd9, 0xc3, - 0xdf, 0xae, 0x29, 0xf4, 0xc1, 0x44, 0x2d, 0xe7, 0xee, 0xfb, 0x34, 0xd5, 0xf2, 0xd4, 0x0f, 0x6b, - 0x9a, 0xa9, 0x3e, 0x07, 0x55, 0x1e, 0x61, 0x4c, 0x38, 0x6f, 0x45, 0xee, 0x01, 0xb3, 0xf9, 0x3e, - 0xe5, 0x82, 0x85, 0xfd, 0x43, 0xea, 0x51, 0x51, 0x29, 0xd4, 0x95, 0x46, 0xde, 0xd4, 0xe2, 0x41, - 0xad, 0xda, 0x5c, 0xaa, 0xb2, 0xae, 0x21, 0xa8, 0x16, 0xd8, 0x6c, 0x21, 0xea, 0x12, 0x67, 0x8e, - 0x5d, 0x94, 0xec, 0x6a, 0x3c, 0xa8, 0x6d, 0xde, 0x5b, 0xa8, 0xb0, 0x96, 0x38, 0xf5, 0x0f, 0x0a, - 0x58, 0xbb, 0x72, 0x23, 0xd4, 0x87, 0xa0, 0x80, 0xb0, 0xa0, 0xbd, 0x64, 0x60, 0x92, 0x61, 0xdc, - 0x9e, 0xae, 0x51, 0xf2, 0xae, 0x4d, 0xee, 0xb8, 0x45, 0x5a, 0x24, 0x69, 0x05, 0x99, 0x5c, 0xa3, - 0xff, 0xa4, 0xd5, 0x4a, 0x11, 0xaa, 0x0b, 0xd6, 0x5d, 0xc4, 0xc5, 0x68, 0xd6, 0x8e, 0xa9, 0x47, - 0x64, 0x97, 0xae, 0x96, 0xfe, 0x9a, 0xeb, 0x93, 0x38, 0xcc, 0x1f, 0xe2, 0x41, 0x6d, 0xfd, 0x70, - 0x86, 0x63, 0xcd, 0x91, 0xf5, 0xf7, 0x0a, 0x98, 0xee, 0xce, 0x1d, 0x3c, 0x61, 0x4f, 0x41, 0x49, - 0x8c, 0x46, 0x2a, 0xfb, 0xd5, 0x23, 0x35, 0xbe, 0x8b, 0xe3, 0x79, 0x1a, 0xd3, 0xf4, 0xb7, 0x0a, - 0xf8, 0x6e, 0x46, 0x7f, 0x07, 0xe7, 0xf9, 0xf7, 0xca, 0x93, 0xfc, 0xf3, 0x82, 0xb3, 0xc8, 0x53, - 0x2c, 0x7b, 0x88, 0x4d, 0x78, 0x76, 0xa9, 0x65, 0xce, 0x2f, 0xb5, 0xcc, 0xc5, 0xa5, 0x96, 0x79, - 0x19, 0x6b, 0xca, 0x59, 0xac, 0x29, 0xe7, 0xb1, 0xa6, 0x5c, 0xc4, 0x9a, 0xf2, 0x31, 0xd6, 0x94, - 0xd7, 0x9f, 0xb4, 0xcc, 0xb3, 0xd2, 0xa8, 0x22, 0x5f, 0x02, 0x00, 0x00, 0xff, 0xff, 0x20, 0x1c, - 0xcf, 0x94, 0xe7, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/generated.proto b/vendor/k8s.io/api/batch/v2alpha1/generated.proto deleted file mode 100644 index 4321c33..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/generated.proto +++ /dev/null @@ -1,135 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.batch.v2alpha1; - -import "k8s.io/api/batch/v1/generated.proto"; -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v2alpha1"; - -// CronJob represents the configuration of a single cron job. -message CronJob { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional CronJobSpec spec = 2; - - // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional CronJobStatus status = 3; -} - -// CronJobList is a collection of cron jobs. -message CronJobList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of CronJobs. - repeated CronJob items = 2; -} - -// CronJobSpec describes how the job execution will look like and when it will actually run. -message CronJobSpec { - // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - optional string schedule = 1; - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - // +optional - optional int64 startingDeadlineSeconds = 2; - - // Specifies how to treat concurrent executions of a Job. - // Valid values are: - // - "Allow" (default): allows CronJobs to run concurrently; - // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - // - "Replace": cancels currently running job and replaces it with a new one - // +optional - optional string concurrencyPolicy = 3; - - // This flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - // +optional - optional bool suspend = 4; - - // Specifies the job that will be created when executing a CronJob. - optional JobTemplateSpec jobTemplate = 5; - - // The number of successful finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - optional int32 successfulJobsHistoryLimit = 6; - - // The number of failed finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - optional int32 failedJobsHistoryLimit = 7; -} - -// CronJobStatus represents the current state of a cron job. -message CronJobStatus { - // A list of pointers to currently running jobs. - // +optional - repeated k8s.io.api.core.v1.ObjectReference active = 1; - - // Information when was the last time the job was successfully scheduled. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastScheduleTime = 4; -} - -// JobTemplate describes a template for creating copies of a predefined pod. -message JobTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional JobTemplateSpec template = 2; -} - -// JobTemplateSpec describes the data a Job should have when created from a template -message JobTemplateSpec { - // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional k8s.io.api.batch.v1.JobSpec spec = 2; -} - diff --git a/vendor/k8s.io/api/batch/v2alpha1/register.go b/vendor/k8s.io/api/batch/v2alpha1/register.go deleted file mode 100644 index ac7fa50..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "batch" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v2alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &JobTemplate{}, - &CronJob{}, - &CronJobList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/types.go b/vendor/k8s.io/api/batch/v2alpha1/types.go deleted file mode 100644 index cccff94..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/types.go +++ /dev/null @@ -1,156 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -import ( - batchv1 "k8s.io/api/batch/v1" - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// JobTemplate describes a template for creating copies of a predefined pod. -type JobTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Defines jobs that will be created from this template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Template JobTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - -// JobTemplateSpec describes the data a Job should have when created from a template -type JobTemplateSpec struct { - // Standard object's metadata of the jobs created from this template. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec batchv1.JobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CronJob represents the configuration of a single cron job. -type CronJob struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of a cron job, including the schedule. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec CronJobSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Current status of a cron job. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status CronJobStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// CronJobList is a collection of cron jobs. -type CronJobList struct { - metav1.TypeMeta `json:",inline"` - - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of CronJobs. - Items []CronJob `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// CronJobSpec describes how the job execution will look like and when it will actually run. -type CronJobSpec struct { - - // The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron. - Schedule string `json:"schedule" protobuf:"bytes,1,opt,name=schedule"` - - // Optional deadline in seconds for starting the job if it misses scheduled - // time for any reason. Missed jobs executions will be counted as failed ones. - // +optional - StartingDeadlineSeconds *int64 `json:"startingDeadlineSeconds,omitempty" protobuf:"varint,2,opt,name=startingDeadlineSeconds"` - - // Specifies how to treat concurrent executions of a Job. - // Valid values are: - // - "Allow" (default): allows CronJobs to run concurrently; - // - "Forbid": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - // - "Replace": cancels currently running job and replaces it with a new one - // +optional - ConcurrencyPolicy ConcurrencyPolicy `json:"concurrencyPolicy,omitempty" protobuf:"bytes,3,opt,name=concurrencyPolicy,casttype=ConcurrencyPolicy"` - - // This flag tells the controller to suspend subsequent executions, it does - // not apply to already started executions. Defaults to false. - // +optional - Suspend *bool `json:"suspend,omitempty" protobuf:"varint,4,opt,name=suspend"` - - // Specifies the job that will be created when executing a CronJob. - JobTemplate JobTemplateSpec `json:"jobTemplate" protobuf:"bytes,5,opt,name=jobTemplate"` - - // The number of successful finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - SuccessfulJobsHistoryLimit *int32 `json:"successfulJobsHistoryLimit,omitempty" protobuf:"varint,6,opt,name=successfulJobsHistoryLimit"` - - // The number of failed finished jobs to retain. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - FailedJobsHistoryLimit *int32 `json:"failedJobsHistoryLimit,omitempty" protobuf:"varint,7,opt,name=failedJobsHistoryLimit"` -} - -// ConcurrencyPolicy describes how the job will be handled. -// Only one of the following concurrent policies may be specified. -// If none of the following policies is specified, the default one -// is AllowConcurrent. -type ConcurrencyPolicy string - -const ( - // AllowConcurrent allows CronJobs to run concurrently. - AllowConcurrent ConcurrencyPolicy = "Allow" - - // ForbidConcurrent forbids concurrent runs, skipping next run if previous - // hasn't finished yet. - ForbidConcurrent ConcurrencyPolicy = "Forbid" - - // ReplaceConcurrent cancels currently running job and replaces it with a new one. - ReplaceConcurrent ConcurrencyPolicy = "Replace" -) - -// CronJobStatus represents the current state of a cron job. -type CronJobStatus struct { - // A list of pointers to currently running jobs. - // +optional - Active []v1.ObjectReference `json:"active,omitempty" protobuf:"bytes,1,rep,name=active"` - - // Information when was the last time the job was successfully scheduled. - // +optional - LastScheduleTime *metav1.Time `json:"lastScheduleTime,omitempty" protobuf:"bytes,4,opt,name=lastScheduleTime"` -} diff --git a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go deleted file mode 100644 index f448a92..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,96 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v2alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CronJob = map[string]string{ - "": "CronJob represents the configuration of a single cron job.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (CronJob) SwaggerDoc() map[string]string { - return map_CronJob -} - -var map_CronJobList = map[string]string{ - "": "CronJobList is a collection of cron jobs.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of CronJobs.", -} - -func (CronJobList) SwaggerDoc() map[string]string { - return map_CronJobList -} - -var map_CronJobSpec = map[string]string{ - "": "CronJobSpec describes how the job execution will look like and when it will actually run.", - "schedule": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.", - "startingDeadlineSeconds": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.", - "concurrencyPolicy": "Specifies how to treat concurrent executions of a Job. Valid values are: - \"Allow\" (default): allows CronJobs to run concurrently; - \"Forbid\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \"Replace\": cancels currently running job and replaces it with a new one", - "suspend": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.", - "jobTemplate": "Specifies the job that will be created when executing a CronJob.", - "successfulJobsHistoryLimit": "The number of successful finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", - "failedJobsHistoryLimit": "The number of failed finished jobs to retain. This is a pointer to distinguish between explicit zero and not specified.", -} - -func (CronJobSpec) SwaggerDoc() map[string]string { - return map_CronJobSpec -} - -var map_CronJobStatus = map[string]string{ - "": "CronJobStatus represents the current state of a cron job.", - "active": "A list of pointers to currently running jobs.", - "lastScheduleTime": "Information when was the last time the job was successfully scheduled.", -} - -func (CronJobStatus) SwaggerDoc() map[string]string { - return map_CronJobStatus -} - -var map_JobTemplate = map[string]string{ - "": "JobTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Defines jobs that will be created from this template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (JobTemplate) SwaggerDoc() map[string]string { - return map_JobTemplate -} - -var map_JobTemplateSpec = map[string]string{ - "": "JobTemplateSpec describes the data a Job should have when created from a template", - "metadata": "Standard object's metadata of the jobs created from this template. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the job. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (JobTemplateSpec) SwaggerDoc() map[string]string { - return map_JobTemplateSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 20d87e7..0000000 --- a/vendor/k8s.io/api/batch/v2alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,194 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v2alpha1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJob) DeepCopyInto(out *CronJob) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJob. -func (in *CronJob) DeepCopy() *CronJob { - if in == nil { - return nil - } - out := new(CronJob) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CronJob) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJobList) DeepCopyInto(out *CronJobList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CronJob, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobList. -func (in *CronJobList) DeepCopy() *CronJobList { - if in == nil { - return nil - } - out := new(CronJobList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CronJobList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJobSpec) DeepCopyInto(out *CronJobSpec) { - *out = *in - if in.StartingDeadlineSeconds != nil { - in, out := &in.StartingDeadlineSeconds, &out.StartingDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.Suspend != nil { - in, out := &in.Suspend, &out.Suspend - *out = new(bool) - **out = **in - } - in.JobTemplate.DeepCopyInto(&out.JobTemplate) - if in.SuccessfulJobsHistoryLimit != nil { - in, out := &in.SuccessfulJobsHistoryLimit, &out.SuccessfulJobsHistoryLimit - *out = new(int32) - **out = **in - } - if in.FailedJobsHistoryLimit != nil { - in, out := &in.FailedJobsHistoryLimit, &out.FailedJobsHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobSpec. -func (in *CronJobSpec) DeepCopy() *CronJobSpec { - if in == nil { - return nil - } - out := new(CronJobSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CronJobStatus) DeepCopyInto(out *CronJobStatus) { - *out = *in - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = make([]v1.ObjectReference, len(*in)) - copy(*out, *in) - } - if in.LastScheduleTime != nil { - in, out := &in.LastScheduleTime, &out.LastScheduleTime - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CronJobStatus. -func (in *CronJobStatus) DeepCopy() *CronJobStatus { - if in == nil { - return nil - } - out := new(CronJobStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplate) DeepCopyInto(out *JobTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplate. -func (in *JobTemplate) DeepCopy() *JobTemplate { - if in == nil { - return nil - } - out := new(JobTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *JobTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *JobTemplateSpec) DeepCopyInto(out *JobTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobTemplateSpec. -func (in *JobTemplateSpec) DeepCopy() *JobTemplateSpec { - if in == nil { - return nil - } - out := new(JobTemplateSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/certificates/v1beta1/doc.go b/vendor/k8s.io/api/certificates/v1beta1/doc.go deleted file mode 100644 index fb23aad..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=certificates.k8s.io -package v1beta1 // import "k8s.io/api/certificates/v1beta1" diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go b/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go deleted file mode 100644 index eda1599..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.pb.go +++ /dev/null @@ -1,1693 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto - - It has these top-level messages: - CertificateSigningRequest - CertificateSigningRequestCondition - CertificateSigningRequestList - CertificateSigningRequestSpec - CertificateSigningRequestStatus - ExtraValue -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *CertificateSigningRequest) Reset() { *m = CertificateSigningRequest{} } -func (*CertificateSigningRequest) ProtoMessage() {} -func (*CertificateSigningRequest) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *CertificateSigningRequestCondition) Reset() { *m = CertificateSigningRequestCondition{} } -func (*CertificateSigningRequestCondition) ProtoMessage() {} -func (*CertificateSigningRequestCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *CertificateSigningRequestList) Reset() { *m = CertificateSigningRequestList{} } -func (*CertificateSigningRequestList) ProtoMessage() {} -func (*CertificateSigningRequestList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *CertificateSigningRequestSpec) Reset() { *m = CertificateSigningRequestSpec{} } -func (*CertificateSigningRequestSpec) ProtoMessage() {} -func (*CertificateSigningRequestSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *CertificateSigningRequestStatus) Reset() { *m = CertificateSigningRequestStatus{} } -func (*CertificateSigningRequestStatus) ProtoMessage() {} -func (*CertificateSigningRequestStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{4} -} - -func (m *ExtraValue) Reset() { *m = ExtraValue{} } -func (*ExtraValue) ProtoMessage() {} -func (*ExtraValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func init() { - proto.RegisterType((*CertificateSigningRequest)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequest") - proto.RegisterType((*CertificateSigningRequestCondition)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestCondition") - proto.RegisterType((*CertificateSigningRequestList)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestList") - proto.RegisterType((*CertificateSigningRequestSpec)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestSpec") - proto.RegisterType((*CertificateSigningRequestStatus)(nil), "k8s.io.api.certificates.v1beta1.CertificateSigningRequestStatus") - proto.RegisterType((*ExtraValue)(nil), "k8s.io.api.certificates.v1beta1.ExtraValue") -} -func (m *CertificateSigningRequest) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequest) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *CertificateSigningRequestCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n4, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *CertificateSigningRequestList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CertificateSigningRequestSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Request != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Request))) - i += copy(dAtA[i:], m.Request) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Username))) - i += copy(dAtA[i:], m.Username) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Usages) > 0 { - for _, s := range m.Usages { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Extra) > 0 { - keysForExtra := make([]string, 0, len(m.Extra)) - for k := range m.Extra { - keysForExtra = append(keysForExtra, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - for _, k := range keysForExtra { - dAtA[i] = 0x32 - i++ - v := m.Extra[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n6, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - } - return i, nil -} - -func (m *CertificateSigningRequestStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CertificateSigningRequestStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Certificate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Certificate))) - i += copy(dAtA[i:], m.Certificate) - } - return i, nil -} - -func (m ExtraValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m ExtraValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *CertificateSigningRequest) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CertificateSigningRequestCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CertificateSigningRequestList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CertificateSigningRequestSpec) Size() (n int) { - var l int - _ = l - if m.Request != nil { - l = len(m.Request) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Username) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Groups) > 0 { - for _, s := range m.Groups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Usages) > 0 { - for _, s := range m.Usages { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Extra) > 0 { - for k, v := range m.Extra { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *CertificateSigningRequestStatus) Size() (n int) { - var l int - _ = l - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Certificate != nil { - l = len(m.Certificate) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m ExtraValue) Size() (n int) { - var l int - _ = l - if len(m) > 0 { - for _, s := range m { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *CertificateSigningRequest) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequest{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "CertificateSigningRequestSpec", "CertificateSigningRequestSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "CertificateSigningRequestStatus", "CertificateSigningRequestStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CertificateSigningRequest", "CertificateSigningRequest", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestSpec) String() string { - if this == nil { - return "nil" - } - keysForExtra := make([]string, 0, len(this.Extra)) - for k := range this.Extra { - keysForExtra = append(keysForExtra, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForExtra) - mapStringForExtra := "map[string]ExtraValue{" - for _, k := range keysForExtra { - mapStringForExtra += fmt.Sprintf("%v: %v,", k, this.Extra[k]) - } - mapStringForExtra += "}" - s := strings.Join([]string{`&CertificateSigningRequestSpec{`, - `Request:` + valueToStringGenerated(this.Request) + `,`, - `Username:` + fmt.Sprintf("%v", this.Username) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `Groups:` + fmt.Sprintf("%v", this.Groups) + `,`, - `Usages:` + fmt.Sprintf("%v", this.Usages) + `,`, - `Extra:` + mapStringForExtra + `,`, - `}`, - }, "") - return s -} -func (this *CertificateSigningRequestStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CertificateSigningRequestStatus{`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "CertificateSigningRequestCondition", "CertificateSigningRequestCondition", 1), `&`, ``, 1) + `,`, - `Certificate:` + valueToStringGenerated(this.Certificate) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *CertificateSigningRequest) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = RequestConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CertificateSigningRequest{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Request", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Request = append(m.Request[:0], dAtA[iNdEx:postIndex]...) - if m.Request == nil { - m.Request = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Username = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Groups = append(m.Groups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Usages", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Usages = append(m.Usages, KeyUsage(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Extra", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Extra == nil { - m.Extra = make(map[string]ExtraValue) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &ExtraValue{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Extra[mapkey] = *mapvalue - } else { - var mapvalue ExtraValue - m.Extra[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CertificateSigningRequestStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CertificateSigningRequestStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CertificateSigningRequestStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, CertificateSigningRequestCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Certificate", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Certificate = append(m.Certificate[:0], dAtA[iNdEx:postIndex]...) - if m.Certificate == nil { - m.Certificate = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExtraValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExtraValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExtraValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - *m = append(*m, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/certificates/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0x4d, 0x8f, 0xdb, 0x44, - 0x18, 0x8e, 0xf3, 0xb5, 0xc9, 0x64, 0xd9, 0x56, 0x23, 0x54, 0x99, 0x95, 0x6a, 0xaf, 0x2c, 0x40, - 0xcb, 0x47, 0xc7, 0x6c, 0x85, 0x60, 0xb5, 0x07, 0x04, 0x5e, 0x2a, 0x58, 0xd1, 0x0a, 0x69, 0xda, - 0x70, 0x40, 0x48, 0x74, 0xe2, 0xbc, 0x75, 0xa6, 0xa9, 0x3f, 0xf0, 0x8c, 0x03, 0xb9, 0xf5, 0x27, - 0x70, 0xe4, 0x82, 0xc4, 0x2f, 0xe1, 0xbc, 0x1c, 0x90, 0x7a, 0xec, 0x01, 0x45, 0x6c, 0xf8, 0x17, - 0x3d, 0xa1, 0x19, 0x4f, 0xe2, 0x90, 0x55, 0x48, 0xd5, 0xbd, 0x79, 0x9e, 0xf7, 0x79, 0x9e, 0xf7, - 0x63, 0xde, 0x31, 0xfa, 0x72, 0x7c, 0x2c, 0x08, 0x4f, 0xfd, 0x71, 0x31, 0x80, 0x3c, 0x01, 0x09, - 0xc2, 0x9f, 0x40, 0x32, 0x4c, 0x73, 0xdf, 0x04, 0x58, 0xc6, 0xfd, 0x10, 0x72, 0xc9, 0x1f, 0xf1, - 0x90, 0xe9, 0xf0, 0xd1, 0x00, 0x24, 0x3b, 0xf2, 0x23, 0x48, 0x20, 0x67, 0x12, 0x86, 0x24, 0xcb, - 0x53, 0x99, 0x62, 0xb7, 0x14, 0x10, 0x96, 0x71, 0xb2, 0x2a, 0x20, 0x46, 0xb0, 0x7f, 0x2b, 0xe2, - 0x72, 0x54, 0x0c, 0x48, 0x98, 0xc6, 0x7e, 0x94, 0x46, 0xa9, 0xaf, 0x75, 0x83, 0xe2, 0x91, 0x3e, - 0xe9, 0x83, 0xfe, 0x2a, 0xfd, 0xf6, 0x3f, 0xac, 0x0a, 0x88, 0x59, 0x38, 0xe2, 0x09, 0xe4, 0x53, - 0x3f, 0x1b, 0x47, 0x0a, 0x10, 0x7e, 0x0c, 0x92, 0xf9, 0x93, 0x4b, 0x55, 0xec, 0xfb, 0x9b, 0x54, - 0x79, 0x91, 0x48, 0x1e, 0xc3, 0x25, 0xc1, 0x47, 0xdb, 0x04, 0x22, 0x1c, 0x41, 0xcc, 0xd6, 0x75, - 0xde, 0x1f, 0x75, 0xf4, 0xc6, 0x69, 0xd5, 0xe6, 0x7d, 0x1e, 0x25, 0x3c, 0x89, 0x28, 0xfc, 0x50, - 0x80, 0x90, 0xf8, 0x21, 0xea, 0xa8, 0x0a, 0x87, 0x4c, 0x32, 0xdb, 0x3a, 0xb0, 0x0e, 0x7b, 0xb7, - 0x3f, 0x20, 0xd5, 0x7c, 0x96, 0x89, 0x48, 0x36, 0x8e, 0x14, 0x20, 0x88, 0x62, 0x93, 0xc9, 0x11, - 0xf9, 0x7a, 0xf0, 0x18, 0x42, 0x79, 0x0f, 0x24, 0x0b, 0xf0, 0xf9, 0xcc, 0xad, 0xcd, 0x67, 0x2e, - 0xaa, 0x30, 0xba, 0x74, 0xc5, 0x0f, 0x51, 0x53, 0x64, 0x10, 0xda, 0x75, 0xed, 0xfe, 0x09, 0xd9, - 0x32, 0x7d, 0xb2, 0xb1, 0xd6, 0xfb, 0x19, 0x84, 0xc1, 0xae, 0xc9, 0xd5, 0x54, 0x27, 0xaa, 0x9d, - 0xf1, 0x08, 0xb5, 0x85, 0x64, 0xb2, 0x10, 0x76, 0x43, 0xe7, 0xf8, 0xf4, 0x0a, 0x39, 0xb4, 0x4f, - 0xb0, 0x67, 0xb2, 0xb4, 0xcb, 0x33, 0x35, 0xfe, 0xde, 0xaf, 0x75, 0xe4, 0x6d, 0xd4, 0x9e, 0xa6, - 0xc9, 0x90, 0x4b, 0x9e, 0x26, 0xf8, 0x18, 0x35, 0xe5, 0x34, 0x03, 0x3d, 0xd0, 0x6e, 0xf0, 0xe6, - 0xa2, 0xe4, 0x07, 0xd3, 0x0c, 0x5e, 0xcc, 0xdc, 0xd7, 0xd7, 0xf9, 0x0a, 0xa7, 0x5a, 0x81, 0xdf, - 0x46, 0xed, 0x1c, 0x98, 0x48, 0x13, 0x3d, 0xae, 0x6e, 0x55, 0x08, 0xd5, 0x28, 0x35, 0x51, 0xfc, - 0x0e, 0xda, 0x89, 0x41, 0x08, 0x16, 0x81, 0xee, 0xb9, 0x1b, 0x5c, 0x33, 0xc4, 0x9d, 0x7b, 0x25, - 0x4c, 0x17, 0x71, 0xfc, 0x18, 0xed, 0x3d, 0x61, 0x42, 0xf6, 0xb3, 0x21, 0x93, 0xf0, 0x80, 0xc7, - 0x60, 0x37, 0xf5, 0x94, 0xde, 0x7d, 0xb9, 0x7b, 0x56, 0x8a, 0xe0, 0x86, 0x71, 0xdf, 0xbb, 0xfb, - 0x1f, 0x27, 0xba, 0xe6, 0xec, 0xcd, 0x2c, 0x74, 0x73, 0xe3, 0x7c, 0xee, 0x72, 0x21, 0xf1, 0x77, - 0x97, 0xf6, 0x8d, 0xbc, 0x5c, 0x1d, 0x4a, 0xad, 0xb7, 0xed, 0xba, 0xa9, 0xa5, 0xb3, 0x40, 0x56, - 0x76, 0xed, 0x7b, 0xd4, 0xe2, 0x12, 0x62, 0x61, 0xd7, 0x0f, 0x1a, 0x87, 0xbd, 0xdb, 0x27, 0xaf, - 0xbe, 0x08, 0xc1, 0x6b, 0x26, 0x4d, 0xeb, 0x4c, 0x19, 0xd2, 0xd2, 0xd7, 0xfb, 0xbd, 0xf1, 0x3f, - 0x0d, 0xaa, 0x95, 0xc4, 0x6f, 0xa1, 0x9d, 0xbc, 0x3c, 0xea, 0xfe, 0x76, 0x83, 0x9e, 0xba, 0x15, - 0xc3, 0xa0, 0x8b, 0x18, 0x7e, 0x1f, 0x75, 0x0a, 0x01, 0x79, 0xc2, 0x62, 0x30, 0x57, 0xbd, 0xec, - 0xab, 0x6f, 0x70, 0xba, 0x64, 0xe0, 0x9b, 0xa8, 0x51, 0xf0, 0xa1, 0xb9, 0xea, 0x9e, 0x21, 0x36, - 0xfa, 0x67, 0x9f, 0x53, 0x85, 0x63, 0x0f, 0xb5, 0xa3, 0x3c, 0x2d, 0x32, 0x61, 0x37, 0x0f, 0x1a, - 0x87, 0xdd, 0x00, 0xa9, 0x8d, 0xf9, 0x42, 0x23, 0xd4, 0x44, 0x30, 0x41, 0xed, 0x42, 0xed, 0x83, - 0xb0, 0x5b, 0x9a, 0x73, 0x43, 0x71, 0xfa, 0x1a, 0x79, 0x31, 0x73, 0x3b, 0x5f, 0xc1, 0x54, 0x1f, - 0xa8, 0x61, 0xe1, 0x04, 0xb5, 0xe0, 0x27, 0x99, 0x33, 0xbb, 0xad, 0x47, 0x79, 0x76, 0xb5, 0x77, - 0x4b, 0xee, 0x28, 0xaf, 0x3b, 0x89, 0xcc, 0xa7, 0xd5, 0x64, 0x35, 0x46, 0xcb, 0x34, 0xfb, 0x80, - 0x50, 0xc5, 0xc1, 0xd7, 0x51, 0x63, 0x0c, 0xd3, 0xf2, 0x01, 0x51, 0xf5, 0x89, 0x3f, 0x43, 0xad, - 0x09, 0x7b, 0x52, 0x80, 0xf9, 0x8f, 0xbc, 0xb7, 0xb5, 0x1e, 0xed, 0xf6, 0x8d, 0x92, 0xd0, 0x52, - 0x79, 0x52, 0x3f, 0xb6, 0xbc, 0x3f, 0x2d, 0xe4, 0x6e, 0x79, 0xfd, 0xf8, 0x47, 0x84, 0xc2, 0xc5, - 0xdb, 0x14, 0xb6, 0xa5, 0xfb, 0x3f, 0x7d, 0xf5, 0xfe, 0x97, 0xef, 0xbc, 0xfa, 0x51, 0x2e, 0x21, - 0x41, 0x57, 0x52, 0xe1, 0x23, 0xd4, 0x5b, 0xb1, 0xd6, 0x9d, 0xee, 0x06, 0xd7, 0xe6, 0x33, 0xb7, - 0xb7, 0x62, 0x4e, 0x57, 0x39, 0xde, 0xc7, 0x66, 0x6c, 0xba, 0x51, 0xec, 0x2e, 0xf6, 0xdf, 0xd2, - 0x77, 0xdc, 0x5d, 0xdf, 0xdf, 0x93, 0xce, 0x2f, 0xbf, 0xb9, 0xb5, 0xa7, 0x7f, 0x1d, 0xd4, 0x82, - 0x5b, 0xe7, 0x17, 0x4e, 0xed, 0xd9, 0x85, 0x53, 0x7b, 0x7e, 0xe1, 0xd4, 0x9e, 0xce, 0x1d, 0xeb, - 0x7c, 0xee, 0x58, 0xcf, 0xe6, 0x8e, 0xf5, 0x7c, 0xee, 0x58, 0x7f, 0xcf, 0x1d, 0xeb, 0xe7, 0x7f, - 0x9c, 0xda, 0xb7, 0x3b, 0xa6, 0xbb, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xb7, 0x6b, 0x5b, 0xf9, - 0x7f, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/certificates/v1beta1/generated.proto b/vendor/k8s.io/api/certificates/v1beta1/generated.proto deleted file mode 100644 index 5200224..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/generated.proto +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.certificates.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Describes a certificate signing request -message CertificateSigningRequest { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The certificate request itself and any additional information. - // +optional - optional CertificateSigningRequestSpec spec = 2; - - // Derived information about the request. - // +optional - optional CertificateSigningRequestStatus status = 3; -} - -message CertificateSigningRequestCondition { - // request approval state, currently Approved or Denied. - optional string type = 1; - - // brief reason for the request state - // +optional - optional string reason = 2; - - // human readable message with details about the request state - // +optional - optional string message = 3; - - // timestamp for the last update to this condition - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 4; -} - -message CertificateSigningRequestList { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated CertificateSigningRequest items = 2; -} - -// This information is immutable after the request is created. Only the Request -// and Usages fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -message CertificateSigningRequestSpec { - // Base64-encoded PKCS#10 CSR data - optional bytes request = 1; - - // allowedUsages specifies a set of usage contexts the key will be - // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - repeated string usages = 5; - - // Information about the requesting user. - // See user.Info interface for details. - // +optional - optional string username = 2; - - // UID information about the requesting user. - // See user.Info interface for details. - // +optional - optional string uid = 3; - - // Group information about the requesting user. - // See user.Info interface for details. - // +optional - repeated string groups = 4; - - // Extra information about the requesting user. - // See user.Info interface for details. - // +optional - map extra = 6; -} - -message CertificateSigningRequestStatus { - // Conditions applied to the request, such as approval or denial. - // +optional - repeated CertificateSigningRequestCondition conditions = 1; - - // If request was approved, the controller will place the issued certificate here. - // +optional - optional bytes certificate = 2; -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -message ExtraValue { - // items, if empty, will result in an empty slice - - repeated string items = 1; -} - diff --git a/vendor/k8s.io/api/certificates/v1beta1/register.go b/vendor/k8s.io/api/certificates/v1beta1/register.go deleted file mode 100644 index b4f3af9..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/register.go +++ /dev/null @@ -1,59 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "certificates.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &CertificateSigningRequest{}, - &CertificateSigningRequestList{}, - ) - - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/certificates/v1beta1/types.go b/vendor/k8s.io/api/certificates/v1beta1/types.go deleted file mode 100644 index bb9e82d..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/types.go +++ /dev/null @@ -1,155 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Describes a certificate signing request -type CertificateSigningRequest struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The certificate request itself and any additional information. - // +optional - Spec CertificateSigningRequestSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Derived information about the request. - // +optional - Status CertificateSigningRequestStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// This information is immutable after the request is created. Only the Request -// and Usages fields can be set on creation, other fields are derived by -// Kubernetes and cannot be modified by users. -type CertificateSigningRequestSpec struct { - // Base64-encoded PKCS#10 CSR data - Request []byte `json:"request" protobuf:"bytes,1,opt,name=request"` - - // allowedUsages specifies a set of usage contexts the key will be - // valid for. - // See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 - // https://tools.ietf.org/html/rfc5280#section-4.2.1.12 - Usages []KeyUsage `json:"usages,omitempty" protobuf:"bytes,5,opt,name=usages"` - - // Information about the requesting user. - // See user.Info interface for details. - // +optional - Username string `json:"username,omitempty" protobuf:"bytes,2,opt,name=username"` - // UID information about the requesting user. - // See user.Info interface for details. - // +optional - UID string `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` - // Group information about the requesting user. - // See user.Info interface for details. - // +optional - Groups []string `json:"groups,omitempty" protobuf:"bytes,4,rep,name=groups"` - // Extra information about the requesting user. - // See user.Info interface for details. - // +optional - Extra map[string]ExtraValue `json:"extra,omitempty" protobuf:"bytes,6,rep,name=extra"` -} - -// ExtraValue masks the value so protobuf can generate -// +protobuf.nullable=true -// +protobuf.options.(gogoproto.goproto_stringer)=false -type ExtraValue []string - -func (t ExtraValue) String() string { - return fmt.Sprintf("%v", []string(t)) -} - -type CertificateSigningRequestStatus struct { - // Conditions applied to the request, such as approval or denial. - // +optional - Conditions []CertificateSigningRequestCondition `json:"conditions,omitempty" protobuf:"bytes,1,rep,name=conditions"` - - // If request was approved, the controller will place the issued certificate here. - // +optional - Certificate []byte `json:"certificate,omitempty" protobuf:"bytes,2,opt,name=certificate"` -} - -type RequestConditionType string - -// These are the possible conditions for a certificate request. -const ( - CertificateApproved RequestConditionType = "Approved" - CertificateDenied RequestConditionType = "Denied" -) - -type CertificateSigningRequestCondition struct { - // request approval state, currently Approved or Denied. - Type RequestConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=RequestConditionType"` - // brief reason for the request state - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,2,opt,name=reason"` - // human readable message with details about the request state - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // timestamp for the last update to this condition - // +optional - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,4,opt,name=lastUpdateTime"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -type CertificateSigningRequestList struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - Items []CertificateSigningRequest `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// KeyUsages specifies valid usage contexts for keys. -// See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3 -// https://tools.ietf.org/html/rfc5280#section-4.2.1.12 -type KeyUsage string - -const ( - UsageSigning KeyUsage = "signing" - UsageDigitalSignature KeyUsage = "digital signature" - UsageContentCommittment KeyUsage = "content commitment" - UsageKeyEncipherment KeyUsage = "key encipherment" - UsageKeyAgreement KeyUsage = "key agreement" - UsageDataEncipherment KeyUsage = "data encipherment" - UsageCertSign KeyUsage = "cert sign" - UsageCRLSign KeyUsage = "crl sign" - UsageEncipherOnly KeyUsage = "encipher only" - UsageDecipherOnly KeyUsage = "decipher only" - UsageAny KeyUsage = "any" - UsageServerAuth KeyUsage = "server auth" - UsageClientAuth KeyUsage = "client auth" - UsageCodeSigning KeyUsage = "code signing" - UsageEmailProtection KeyUsage = "email protection" - UsageSMIME KeyUsage = "s/mime" - UsageIPsecEndSystem KeyUsage = "ipsec end system" - UsageIPsecTunnel KeyUsage = "ipsec tunnel" - UsageIPsecUser KeyUsage = "ipsec user" - UsageTimestamping KeyUsage = "timestamping" - UsageOCSPSigning KeyUsage = "ocsp signing" - UsageMicrosoftSGC KeyUsage = "microsoft sgc" - UsageNetscapSGC KeyUsage = "netscape sgc" -) diff --git a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index f6a7e16..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_CertificateSigningRequest = map[string]string{ - "": "Describes a certificate signing request", - "spec": "The certificate request itself and any additional information.", - "status": "Derived information about the request.", -} - -func (CertificateSigningRequest) SwaggerDoc() map[string]string { - return map_CertificateSigningRequest -} - -var map_CertificateSigningRequestCondition = map[string]string{ - "type": "request approval state, currently Approved or Denied.", - "reason": "brief reason for the request state", - "message": "human readable message with details about the request state", - "lastUpdateTime": "timestamp for the last update to this condition", -} - -func (CertificateSigningRequestCondition) SwaggerDoc() map[string]string { - return map_CertificateSigningRequestCondition -} - -var map_CertificateSigningRequestSpec = map[string]string{ - "": "This information is immutable after the request is created. Only the Request and Usages fields can be set on creation, other fields are derived by Kubernetes and cannot be modified by users.", - "request": "Base64-encoded PKCS#10 CSR data", - "usages": "allowedUsages specifies a set of usage contexts the key will be valid for. See: https://tools.ietf.org/html/rfc5280#section-4.2.1.3\n https://tools.ietf.org/html/rfc5280#section-4.2.1.12", - "username": "Information about the requesting user. See user.Info interface for details.", - "uid": "UID information about the requesting user. See user.Info interface for details.", - "groups": "Group information about the requesting user. See user.Info interface for details.", - "extra": "Extra information about the requesting user. See user.Info interface for details.", -} - -func (CertificateSigningRequestSpec) SwaggerDoc() map[string]string { - return map_CertificateSigningRequestSpec -} - -var map_CertificateSigningRequestStatus = map[string]string{ - "conditions": "Conditions applied to the request, such as approval or denial.", - "certificate": "If request was approved, the controller will place the issued certificate here.", -} - -func (CertificateSigningRequestStatus) SwaggerDoc() map[string]string { - return map_CertificateSigningRequestStatus -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 1b103f1..0000000 --- a/vendor/k8s.io/api/certificates/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,197 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateSigningRequest) DeepCopyInto(out *CertificateSigningRequest) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequest. -func (in *CertificateSigningRequest) DeepCopy() *CertificateSigningRequest { - if in == nil { - return nil - } - out := new(CertificateSigningRequest) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CertificateSigningRequest) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateSigningRequestCondition) DeepCopyInto(out *CertificateSigningRequestCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestCondition. -func (in *CertificateSigningRequestCondition) DeepCopy() *CertificateSigningRequestCondition { - if in == nil { - return nil - } - out := new(CertificateSigningRequestCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateSigningRequestList) DeepCopyInto(out *CertificateSigningRequestList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CertificateSigningRequest, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestList. -func (in *CertificateSigningRequestList) DeepCopy() *CertificateSigningRequestList { - if in == nil { - return nil - } - out := new(CertificateSigningRequestList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *CertificateSigningRequestList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateSigningRequestSpec) DeepCopyInto(out *CertificateSigningRequestSpec) { - *out = *in - if in.Request != nil { - in, out := &in.Request, &out.Request - *out = make([]byte, len(*in)) - copy(*out, *in) - } - if in.Usages != nil { - in, out := &in.Usages, &out.Usages - *out = make([]KeyUsage, len(*in)) - copy(*out, *in) - } - if in.Groups != nil { - in, out := &in.Groups, &out.Groups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Extra != nil { - in, out := &in.Extra, &out.Extra - *out = make(map[string]ExtraValue, len(*in)) - for key, val := range *in { - var outVal []string - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestSpec. -func (in *CertificateSigningRequestSpec) DeepCopy() *CertificateSigningRequestSpec { - if in == nil { - return nil - } - out := new(CertificateSigningRequestSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CertificateSigningRequestStatus) DeepCopyInto(out *CertificateSigningRequestStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]CertificateSigningRequestCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Certificate != nil { - in, out := &in.Certificate, &out.Certificate - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CertificateSigningRequestStatus. -func (in *CertificateSigningRequestStatus) DeepCopy() *CertificateSigningRequestStatus { - if in == nil { - return nil - } - out := new(CertificateSigningRequestStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ExtraValue) DeepCopyInto(out *ExtraValue) { - { - in := &in - *out = make(ExtraValue, len(*in)) - copy(*out, *in) - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExtraValue. -func (in ExtraValue) DeepCopy() ExtraValue { - if in == nil { - return nil - } - out := new(ExtraValue) - in.DeepCopyInto(out) - return *out -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/doc.go b/vendor/k8s.io/api/coordination/v1beta1/doc.go deleted file mode 100644 index fecb513..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=coordination.k8s.io -package v1beta1 // import "k8s.io/api/coordination/v1beta1" diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go b/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go deleted file mode 100644 index 6c2dbd9..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.pb.go +++ /dev/null @@ -1,883 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto - - It has these top-level messages: - Lease - LeaseList - LeaseSpec -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Lease) Reset() { *m = Lease{} } -func (*Lease) ProtoMessage() {} -func (*Lease) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *LeaseList) Reset() { *m = LeaseList{} } -func (*LeaseList) ProtoMessage() {} -func (*LeaseList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *LeaseSpec) Reset() { *m = LeaseSpec{} } -func (*LeaseSpec) ProtoMessage() {} -func (*LeaseSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*Lease)(nil), "k8s.io.api.coordination.v1beta1.Lease") - proto.RegisterType((*LeaseList)(nil), "k8s.io.api.coordination.v1beta1.LeaseList") - proto.RegisterType((*LeaseSpec)(nil), "k8s.io.api.coordination.v1beta1.LeaseSpec") -} -func (m *Lease) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lease) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *LeaseList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LeaseSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LeaseSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HolderIdentity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.HolderIdentity))) - i += copy(dAtA[i:], *m.HolderIdentity) - } - if m.LeaseDurationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseDurationSeconds)) - } - if m.AcquireTime != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AcquireTime.Size())) - n4, err := m.AcquireTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.RenewTime != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RenewTime.Size())) - n5, err := m.RenewTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.LeaseTransitions != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LeaseTransitions)) - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Lease) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LeaseList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LeaseSpec) Size() (n int) { - var l int - _ = l - if m.HolderIdentity != nil { - l = len(*m.HolderIdentity) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LeaseDurationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.LeaseDurationSeconds)) - } - if m.AcquireTime != nil { - l = m.AcquireTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RenewTime != nil { - l = m.RenewTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LeaseTransitions != nil { - n += 1 + sovGenerated(uint64(*m.LeaseTransitions)) - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Lease) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Lease{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LeaseSpec", "LeaseSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LeaseList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LeaseList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Lease", "Lease", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LeaseSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LeaseSpec{`, - `HolderIdentity:` + valueToStringGenerated(this.HolderIdentity) + `,`, - `LeaseDurationSeconds:` + valueToStringGenerated(this.LeaseDurationSeconds) + `,`, - `AcquireTime:` + strings.Replace(fmt.Sprintf("%v", this.AcquireTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `RenewTime:` + strings.Replace(fmt.Sprintf("%v", this.RenewTime), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1) + `,`, - `LeaseTransitions:` + valueToStringGenerated(this.LeaseTransitions) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Lease) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lease: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Lease{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LeaseSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LeaseSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LeaseSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HolderIdentity", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.HolderIdentity = &s - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseDurationSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LeaseDurationSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AcquireTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AcquireTime == nil { - m.AcquireTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} - } - if err := m.AcquireTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RenewTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RenewTime == nil { - m.RenewTime = &k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime{} - } - if err := m.RenewTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LeaseTransitions", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LeaseTransitions = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/coordination/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 540 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x91, 0xc1, 0x6e, 0xd3, 0x40, - 0x10, 0x86, 0xe3, 0xb6, 0x11, 0xcd, 0x86, 0x96, 0xc8, 0xca, 0xc1, 0xca, 0xc1, 0xae, 0x72, 0x40, - 0x15, 0x52, 0x77, 0x49, 0x85, 0x10, 0xe2, 0x04, 0x16, 0x87, 0x56, 0xb8, 0x42, 0x72, 0x7b, 0x42, - 0x3d, 0xb0, 0xb6, 0x07, 0x67, 0x49, 0xed, 0x35, 0xbb, 0xeb, 0xa0, 0xde, 0x78, 0x04, 0xae, 0xbc, - 0x08, 0xbc, 0x42, 0x8e, 0x3d, 0xf6, 0x64, 0x11, 0xf3, 0x22, 0xc8, 0x1b, 0xb7, 0x09, 0x49, 0x51, - 0x23, 0x6e, 0xde, 0x99, 0xf9, 0xbf, 0xf9, 0xe7, 0x37, 0x3a, 0x1a, 0xbd, 0x90, 0x98, 0x71, 0x32, - 0xca, 0x03, 0x10, 0x29, 0x28, 0x90, 0x64, 0x0c, 0x69, 0xc4, 0x05, 0xa9, 0x1b, 0x34, 0x63, 0x24, - 0xe4, 0x5c, 0x44, 0x2c, 0xa5, 0x8a, 0xf1, 0x94, 0x8c, 0x07, 0x01, 0x28, 0x3a, 0x20, 0x31, 0xa4, - 0x20, 0xa8, 0x82, 0x08, 0x67, 0x82, 0x2b, 0x6e, 0x3a, 0x33, 0x01, 0xa6, 0x19, 0xc3, 0x8b, 0x02, - 0x5c, 0x0b, 0x7a, 0x07, 0x31, 0x53, 0xc3, 0x3c, 0xc0, 0x21, 0x4f, 0x48, 0xcc, 0x63, 0x4e, 0xb4, - 0x2e, 0xc8, 0x3f, 0xea, 0x97, 0x7e, 0xe8, 0xaf, 0x19, 0xaf, 0xf7, 0x6c, 0x6e, 0x20, 0xa1, 0xe1, - 0x90, 0xa5, 0x20, 0x2e, 0x49, 0x36, 0x8a, 0xab, 0x82, 0x24, 0x09, 0x28, 0x4a, 0xc6, 0x2b, 0x2e, - 0x7a, 0xe4, 0x5f, 0x2a, 0x91, 0xa7, 0x8a, 0x25, 0xb0, 0x22, 0x78, 0x7e, 0x9f, 0x40, 0x86, 0x43, - 0x48, 0xe8, 0xb2, 0xae, 0xff, 0xd3, 0x40, 0x4d, 0x0f, 0xa8, 0x04, 0xf3, 0x03, 0xda, 0xae, 0xdc, - 0x44, 0x54, 0x51, 0xcb, 0xd8, 0x33, 0xf6, 0xdb, 0x87, 0x4f, 0xf1, 0x3c, 0x8b, 0x5b, 0x28, 0xce, - 0x46, 0x71, 0x55, 0x90, 0xb8, 0x9a, 0xc6, 0xe3, 0x01, 0x7e, 0x17, 0x7c, 0x82, 0x50, 0x9d, 0x80, - 0xa2, 0xae, 0x39, 0x29, 0x9c, 0x46, 0x59, 0x38, 0x68, 0x5e, 0xf3, 0x6f, 0xa9, 0xa6, 0x87, 0xb6, - 0x64, 0x06, 0xa1, 0xb5, 0xa1, 0xe9, 0x4f, 0xf0, 0x3d, 0x49, 0x63, 0xed, 0xeb, 0x34, 0x83, 0xd0, - 0x7d, 0x58, 0x73, 0xb7, 0xaa, 0x97, 0xaf, 0x29, 0xfd, 0x1f, 0x06, 0x6a, 0xe9, 0x09, 0x8f, 0x49, - 0x65, 0x9e, 0xaf, 0xb8, 0xc7, 0xeb, 0xb9, 0xaf, 0xd4, 0xda, 0x7b, 0xa7, 0xde, 0xb1, 0x7d, 0x53, - 0x59, 0x70, 0xfe, 0x16, 0x35, 0x99, 0x82, 0x44, 0x5a, 0x1b, 0x7b, 0x9b, 0xfb, 0xed, 0xc3, 0xc7, - 0xeb, 0x59, 0x77, 0x77, 0x6a, 0x64, 0xf3, 0xb8, 0x12, 0xfb, 0x33, 0x46, 0xff, 0xfb, 0x66, 0x6d, - 0xbc, 0x3a, 0xc6, 0x7c, 0x89, 0x76, 0x87, 0xfc, 0x22, 0x02, 0x71, 0x1c, 0x41, 0xaa, 0x98, 0xba, - 0xd4, 0xf6, 0x5b, 0xae, 0x59, 0x16, 0xce, 0xee, 0xd1, 0x5f, 0x1d, 0x7f, 0x69, 0xd2, 0xf4, 0x50, - 0xf7, 0xa2, 0x02, 0xbd, 0xc9, 0x85, 0x5e, 0x7f, 0x0a, 0x21, 0x4f, 0x23, 0xa9, 0x03, 0x6e, 0xba, - 0x56, 0x59, 0x38, 0x5d, 0xef, 0x8e, 0xbe, 0x7f, 0xa7, 0xca, 0x0c, 0x50, 0x9b, 0x86, 0x9f, 0x73, - 0x26, 0xe0, 0x8c, 0x25, 0x60, 0x6d, 0xea, 0x14, 0xc9, 0x7a, 0x29, 0x9e, 0xb0, 0x50, 0xf0, 0x4a, - 0xe6, 0x3e, 0x2a, 0x0b, 0xa7, 0xfd, 0x7a, 0xce, 0xf1, 0x17, 0xa1, 0xe6, 0x39, 0x6a, 0x09, 0x48, - 0xe1, 0x8b, 0xde, 0xb0, 0xf5, 0x7f, 0x1b, 0x76, 0xca, 0xc2, 0x69, 0xf9, 0x37, 0x14, 0x7f, 0x0e, - 0x34, 0x5f, 0xa1, 0x8e, 0xbe, 0xec, 0x4c, 0xd0, 0x54, 0xb2, 0xea, 0x36, 0x69, 0x35, 0x75, 0x16, - 0xdd, 0xb2, 0x70, 0x3a, 0xde, 0x52, 0xcf, 0x5f, 0x99, 0x76, 0x0f, 0x26, 0x53, 0xbb, 0x71, 0x35, - 0xb5, 0x1b, 0xd7, 0x53, 0xbb, 0xf1, 0xb5, 0xb4, 0x8d, 0x49, 0x69, 0x1b, 0x57, 0xa5, 0x6d, 0x5c, - 0x97, 0xb6, 0xf1, 0xab, 0xb4, 0x8d, 0x6f, 0xbf, 0xed, 0xc6, 0xfb, 0x07, 0xf5, 0x6f, 0xfe, 0x13, - 0x00, 0x00, 0xff, 0xff, 0x51, 0x34, 0x6a, 0x0f, 0x77, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/generated.proto b/vendor/k8s.io/api/coordination/v1beta1/generated.proto deleted file mode 100644 index 918e0de..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/generated.proto +++ /dev/null @@ -1,80 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.coordination.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Lease defines a lease concept. -message Lease { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional LeaseSpec spec = 2; -} - -// LeaseList is a list of Lease objects. -message LeaseList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated Lease items = 2; -} - -// LeaseSpec is a specification of a Lease. -message LeaseSpec { - // holderIdentity contains the identity of the holder of a current lease. - // +optional - optional string holderIdentity = 1; - - // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. - // +optional - optional int32 leaseDurationSeconds = 2; - - // acquireTime is a time when the current lease was acquired. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime acquireTime = 3; - - // renewTime is a time when the current holder of a lease has last - // updated the lease. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime renewTime = 4; - - // leaseTransitions is the number of transitions of a lease between - // holders. - // +optional - optional int32 leaseTransitions = 5; -} - diff --git a/vendor/k8s.io/api/coordination/v1beta1/register.go b/vendor/k8s.io/api/coordination/v1beta1/register.go deleted file mode 100644 index 85efaa6..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "coordination.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Lease{}, - &LeaseList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/types.go b/vendor/k8s.io/api/coordination/v1beta1/types.go deleted file mode 100644 index 846f728..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/types.go +++ /dev/null @@ -1,74 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Lease defines a lease concept. -type Lease struct { - metav1.TypeMeta `json:",inline"` - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the Lease. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec LeaseSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// LeaseSpec is a specification of a Lease. -type LeaseSpec struct { - // holderIdentity contains the identity of the holder of a current lease. - // +optional - HolderIdentity *string `json:"holderIdentity,omitempty" protobuf:"bytes,1,opt,name=holderIdentity"` - // leaseDurationSeconds is a duration that candidates for a lease need - // to wait to force acquire it. This is measure against time of last - // observed RenewTime. - // +optional - LeaseDurationSeconds *int32 `json:"leaseDurationSeconds,omitempty" protobuf:"varint,2,opt,name=leaseDurationSeconds"` - // acquireTime is a time when the current lease was acquired. - // +optional - AcquireTime *metav1.MicroTime `json:"acquireTime,omitempty" protobuf:"bytes,3,opt,name=acquireTime"` - // renewTime is a time when the current holder of a lease has last - // updated the lease. - // +optional - RenewTime *metav1.MicroTime `json:"renewTime,omitempty" protobuf:"bytes,4,opt,name=renewTime"` - // leaseTransitions is the number of transitions of a lease between - // holders. - // +optional - LeaseTransitions *int32 `json:"leaseTransitions,omitempty" protobuf:"varint,5,opt,name=leaseTransitions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LeaseList is a list of Lease objects. -type LeaseList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []Lease `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 4532d32..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,63 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Lease = map[string]string{ - "": "Lease defines a lease concept.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the Lease. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Lease) SwaggerDoc() map[string]string { - return map_Lease -} - -var map_LeaseList = map[string]string{ - "": "LeaseList is a list of Lease objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (LeaseList) SwaggerDoc() map[string]string { - return map_LeaseList -} - -var map_LeaseSpec = map[string]string{ - "": "LeaseSpec is a specification of a Lease.", - "holderIdentity": "holderIdentity contains the identity of the holder of a current lease.", - "leaseDurationSeconds": "leaseDurationSeconds is a duration that candidates for a lease need to wait to force acquire it. This is measure against time of last observed RenewTime.", - "acquireTime": "acquireTime is a time when the current lease was acquired.", - "renewTime": "renewTime is a time when the current holder of a lease has last updated the lease.", - "leaseTransitions": "leaseTransitions is the number of transitions of a lease between holders.", -} - -func (LeaseSpec) SwaggerDoc() map[string]string { - return map_LeaseSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index a628ac1..0000000 --- a/vendor/k8s.io/api/coordination/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lease) DeepCopyInto(out *Lease) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lease. -func (in *Lease) DeepCopy() *Lease { - if in == nil { - return nil - } - out := new(Lease) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Lease) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaseList) DeepCopyInto(out *LeaseList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Lease, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseList. -func (in *LeaseList) DeepCopy() *LeaseList { - if in == nil { - return nil - } - out := new(LeaseList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LeaseList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LeaseSpec) DeepCopyInto(out *LeaseSpec) { - *out = *in - if in.HolderIdentity != nil { - in, out := &in.HolderIdentity, &out.HolderIdentity - *out = new(string) - **out = **in - } - if in.LeaseDurationSeconds != nil { - in, out := &in.LeaseDurationSeconds, &out.LeaseDurationSeconds - *out = new(int32) - **out = **in - } - if in.AcquireTime != nil { - in, out := &in.AcquireTime, &out.AcquireTime - *out = (*in).DeepCopy() - } - if in.RenewTime != nil { - in, out := &in.RenewTime, &out.RenewTime - *out = (*in).DeepCopy() - } - if in.LeaseTransitions != nil { - in, out := &in.LeaseTransitions, &out.LeaseTransitions - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LeaseSpec. -func (in *LeaseSpec) DeepCopy() *LeaseSpec { - if in == nil { - return nil - } - out := new(LeaseSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/core/v1/annotation_key_constants.go b/vendor/k8s.io/api/core/v1/annotation_key_constants.go deleted file mode 100644 index 16a0cfc..0000000 --- a/vendor/k8s.io/api/core/v1/annotation_key_constants.go +++ /dev/null @@ -1,81 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file should be consistent with pkg/api/annotation_key_constants.go. - -package v1 - -const ( - // ImagePolicyFailedOpenKey is added to pods created by failing open when the image policy - // webhook backend fails. - ImagePolicyFailedOpenKey string = "alpha.image-policy.k8s.io/failed-open" - - // PodPresetOptOutAnnotationKey represents the annotation key for a pod to exempt itself from pod preset manipulation - PodPresetOptOutAnnotationKey string = "podpreset.admission.kubernetes.io/exclude" - - // MirrorAnnotationKey represents the annotation key set by kubelets when creating mirror pods - MirrorPodAnnotationKey string = "kubernetes.io/config.mirror" - - // TolerationsAnnotationKey represents the key of tolerations data (json serialized) - // in the Annotations of a Pod. - TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations" - - // TaintsAnnotationKey represents the key of taints data (json serialized) - // in the Annotations of a Node. - TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints" - - // SeccompPodAnnotationKey represents the key of a seccomp profile applied - // to all containers of a pod. - SeccompPodAnnotationKey string = "seccomp.security.alpha.kubernetes.io/pod" - - // SeccompContainerAnnotationKeyPrefix represents the key of a seccomp profile applied - // to one container of a pod. - SeccompContainerAnnotationKeyPrefix string = "container.seccomp.security.alpha.kubernetes.io/" - - // SeccompProfileRuntimeDefault represents the default seccomp profile used by container runtime. - SeccompProfileRuntimeDefault string = "runtime/default" - - // DeprecatedSeccompProfileDockerDefault represents the default seccomp profile used by docker. - // This is now deprecated and should be replaced by SeccompProfileRuntimeDefault. - DeprecatedSeccompProfileDockerDefault string = "docker/default" - - // PreferAvoidPodsAnnotationKey represents the key of preferAvoidPods data (json serialized) - // in the Annotations of a Node. - PreferAvoidPodsAnnotationKey string = "scheduler.alpha.kubernetes.io/preferAvoidPods" - - // ObjectTTLAnnotations represents a suggestion for kubelet for how long it can cache - // an object (e.g. secret, config map) before fetching it again from apiserver. - // This annotation can be attached to node. - ObjectTTLAnnotationKey string = "node.alpha.kubernetes.io/ttl" - - // annotation key prefix used to identify non-convertible json paths. - NonConvertibleAnnotationPrefix = "non-convertible.kubernetes.io" - - kubectlPrefix = "kubectl.kubernetes.io/" - - // LastAppliedConfigAnnotation is the annotation used to store the previous - // configuration of a resource for use in a three way diff by UpdateApplyAnnotation. - LastAppliedConfigAnnotation = kubectlPrefix + "last-applied-configuration" - - // AnnotationLoadBalancerSourceRangesKey is the key of the annotation on a service to set allowed ingress ranges on their LoadBalancers - // - // It should be a comma-separated list of CIDRs, e.g. `0.0.0.0/0` to - // allow full access (the default) or `18.0.0.0/8,56.0.0.0/8` to allow - // access only from the CIDRs currently allocated to MIT & the USPS. - // - // Not all cloud providers support this annotation, though AWS & GCE do. - AnnotationLoadBalancerSourceRangesKey = "service.beta.kubernetes.io/load-balancer-source-ranges" -) diff --git a/vendor/k8s.io/api/core/v1/doc.go b/vendor/k8s.io/api/core/v1/doc.go deleted file mode 100644 index 96994c6..0000000 --- a/vendor/k8s.io/api/core/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:openapi-gen=true -// +k8s:deepcopy-gen=package - -// Package v1 is the v1 version of the core API. -package v1 // import "k8s.io/api/core/v1" diff --git a/vendor/k8s.io/api/core/v1/generated.pb.go b/vendor/k8s.io/api/core/v1/generated.pb.go deleted file mode 100644 index b569ea8..0000000 --- a/vendor/k8s.io/api/core/v1/generated.pb.go +++ /dev/null @@ -1,52149 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto - - It has these top-level messages: - AWSElasticBlockStoreVolumeSource - Affinity - AttachedVolume - AvoidPods - AzureDiskVolumeSource - AzureFilePersistentVolumeSource - AzureFileVolumeSource - Binding - CSIPersistentVolumeSource - Capabilities - CephFSPersistentVolumeSource - CephFSVolumeSource - CinderPersistentVolumeSource - CinderVolumeSource - ClientIPConfig - ComponentCondition - ComponentStatus - ComponentStatusList - ConfigMap - ConfigMapEnvSource - ConfigMapKeySelector - ConfigMapList - ConfigMapNodeConfigSource - ConfigMapProjection - ConfigMapVolumeSource - Container - ContainerImage - ContainerPort - ContainerState - ContainerStateRunning - ContainerStateTerminated - ContainerStateWaiting - ContainerStatus - DaemonEndpoint - DownwardAPIProjection - DownwardAPIVolumeFile - DownwardAPIVolumeSource - EmptyDirVolumeSource - EndpointAddress - EndpointPort - EndpointSubset - Endpoints - EndpointsList - EnvFromSource - EnvVar - EnvVarSource - Event - EventList - EventSeries - EventSource - ExecAction - FCVolumeSource - FlexPersistentVolumeSource - FlexVolumeSource - FlockerVolumeSource - GCEPersistentDiskVolumeSource - GitRepoVolumeSource - GlusterfsVolumeSource - HTTPGetAction - HTTPHeader - Handler - HostAlias - HostPathVolumeSource - ISCSIPersistentVolumeSource - ISCSIVolumeSource - KeyToPath - Lifecycle - LimitRange - LimitRangeItem - LimitRangeList - LimitRangeSpec - List - LoadBalancerIngress - LoadBalancerStatus - LocalObjectReference - LocalVolumeSource - NFSVolumeSource - Namespace - NamespaceList - NamespaceSpec - NamespaceStatus - Node - NodeAddress - NodeAffinity - NodeCondition - NodeConfigSource - NodeConfigStatus - NodeDaemonEndpoints - NodeList - NodeProxyOptions - NodeResources - NodeSelector - NodeSelectorRequirement - NodeSelectorTerm - NodeSpec - NodeStatus - NodeSystemInfo - ObjectFieldSelector - ObjectReference - PersistentVolume - PersistentVolumeClaim - PersistentVolumeClaimCondition - PersistentVolumeClaimList - PersistentVolumeClaimSpec - PersistentVolumeClaimStatus - PersistentVolumeClaimVolumeSource - PersistentVolumeList - PersistentVolumeSource - PersistentVolumeSpec - PersistentVolumeStatus - PhotonPersistentDiskVolumeSource - Pod - PodAffinity - PodAffinityTerm - PodAntiAffinity - PodAttachOptions - PodCondition - PodDNSConfig - PodDNSConfigOption - PodExecOptions - PodList - PodLogOptions - PodPortForwardOptions - PodProxyOptions - PodReadinessGate - PodSecurityContext - PodSignature - PodSpec - PodStatus - PodStatusResult - PodTemplate - PodTemplateList - PodTemplateSpec - PortworxVolumeSource - Preconditions - PreferAvoidPodsEntry - PreferredSchedulingTerm - Probe - ProjectedVolumeSource - QuobyteVolumeSource - RBDPersistentVolumeSource - RBDVolumeSource - RangeAllocation - ReplicationController - ReplicationControllerCondition - ReplicationControllerList - ReplicationControllerSpec - ReplicationControllerStatus - ResourceFieldSelector - ResourceQuota - ResourceQuotaList - ResourceQuotaSpec - ResourceQuotaStatus - ResourceRequirements - SELinuxOptions - ScaleIOPersistentVolumeSource - ScaleIOVolumeSource - ScopeSelector - ScopedResourceSelectorRequirement - Secret - SecretEnvSource - SecretKeySelector - SecretList - SecretProjection - SecretReference - SecretVolumeSource - SecurityContext - SerializedReference - Service - ServiceAccount - ServiceAccountList - ServiceAccountTokenProjection - ServiceList - ServicePort - ServiceProxyOptions - ServiceSpec - ServiceStatus - SessionAffinityConfig - StorageOSPersistentVolumeSource - StorageOSVolumeSource - Sysctl - TCPSocketAction - Taint - Toleration - TopologySelectorLabelRequirement - TopologySelectorTerm - TypedLocalObjectReference - Volume - VolumeDevice - VolumeMount - VolumeNodeAffinity - VolumeProjection - VolumeSource - VsphereVirtualDiskVolumeSource - WeightedPodAffinityTerm -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" -import k8s_io_apimachinery_pkg_runtime "k8s.io/apimachinery/pkg/runtime" - -import k8s_io_apimachinery_pkg_types "k8s.io/apimachinery/pkg/types" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AWSElasticBlockStoreVolumeSource) Reset() { *m = AWSElasticBlockStoreVolumeSource{} } -func (*AWSElasticBlockStoreVolumeSource) ProtoMessage() {} -func (*AWSElasticBlockStoreVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{0} -} - -func (m *Affinity) Reset() { *m = Affinity{} } -func (*Affinity) ProtoMessage() {} -func (*Affinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *AttachedVolume) Reset() { *m = AttachedVolume{} } -func (*AttachedVolume) ProtoMessage() {} -func (*AttachedVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *AvoidPods) Reset() { *m = AvoidPods{} } -func (*AvoidPods) ProtoMessage() {} -func (*AvoidPods) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *AzureDiskVolumeSource) Reset() { *m = AzureDiskVolumeSource{} } -func (*AzureDiskVolumeSource) ProtoMessage() {} -func (*AzureDiskVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *AzureFilePersistentVolumeSource) Reset() { *m = AzureFilePersistentVolumeSource{} } -func (*AzureFilePersistentVolumeSource) ProtoMessage() {} -func (*AzureFilePersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{5} -} - -func (m *AzureFileVolumeSource) Reset() { *m = AzureFileVolumeSource{} } -func (*AzureFileVolumeSource) ProtoMessage() {} -func (*AzureFileVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *Binding) Reset() { *m = Binding{} } -func (*Binding) ProtoMessage() {} -func (*Binding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *CSIPersistentVolumeSource) Reset() { *m = CSIPersistentVolumeSource{} } -func (*CSIPersistentVolumeSource) ProtoMessage() {} -func (*CSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{8} -} - -func (m *Capabilities) Reset() { *m = Capabilities{} } -func (*Capabilities) ProtoMessage() {} -func (*Capabilities) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *CephFSPersistentVolumeSource) Reset() { *m = CephFSPersistentVolumeSource{} } -func (*CephFSPersistentVolumeSource) ProtoMessage() {} -func (*CephFSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{10} -} - -func (m *CephFSVolumeSource) Reset() { *m = CephFSVolumeSource{} } -func (*CephFSVolumeSource) ProtoMessage() {} -func (*CephFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *CinderPersistentVolumeSource) Reset() { *m = CinderPersistentVolumeSource{} } -func (*CinderPersistentVolumeSource) ProtoMessage() {} -func (*CinderPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{12} -} - -func (m *CinderVolumeSource) Reset() { *m = CinderVolumeSource{} } -func (*CinderVolumeSource) ProtoMessage() {} -func (*CinderVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *ClientIPConfig) Reset() { *m = ClientIPConfig{} } -func (*ClientIPConfig) ProtoMessage() {} -func (*ClientIPConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *ComponentCondition) Reset() { *m = ComponentCondition{} } -func (*ComponentCondition) ProtoMessage() {} -func (*ComponentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *ComponentStatus) Reset() { *m = ComponentStatus{} } -func (*ComponentStatus) ProtoMessage() {} -func (*ComponentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *ComponentStatusList) Reset() { *m = ComponentStatusList{} } -func (*ComponentStatusList) ProtoMessage() {} -func (*ComponentStatusList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *ConfigMap) Reset() { *m = ConfigMap{} } -func (*ConfigMap) ProtoMessage() {} -func (*ConfigMap) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *ConfigMapEnvSource) Reset() { *m = ConfigMapEnvSource{} } -func (*ConfigMapEnvSource) ProtoMessage() {} -func (*ConfigMapEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *ConfigMapKeySelector) Reset() { *m = ConfigMapKeySelector{} } -func (*ConfigMapKeySelector) ProtoMessage() {} -func (*ConfigMapKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *ConfigMapList) Reset() { *m = ConfigMapList{} } -func (*ConfigMapList) ProtoMessage() {} -func (*ConfigMapList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } - -func (m *ConfigMapNodeConfigSource) Reset() { *m = ConfigMapNodeConfigSource{} } -func (*ConfigMapNodeConfigSource) ProtoMessage() {} -func (*ConfigMapNodeConfigSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{22} -} - -func (m *ConfigMapProjection) Reset() { *m = ConfigMapProjection{} } -func (*ConfigMapProjection) ProtoMessage() {} -func (*ConfigMapProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *ConfigMapVolumeSource) Reset() { *m = ConfigMapVolumeSource{} } -func (*ConfigMapVolumeSource) ProtoMessage() {} -func (*ConfigMapVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *Container) Reset() { *m = Container{} } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -func (m *ContainerImage) Reset() { *m = ContainerImage{} } -func (*ContainerImage) ProtoMessage() {} -func (*ContainerImage) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } - -func (m *ContainerPort) Reset() { *m = ContainerPort{} } -func (*ContainerPort) ProtoMessage() {} -func (*ContainerPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } - -func (m *ContainerStateRunning) Reset() { *m = ContainerStateRunning{} } -func (*ContainerStateRunning) ProtoMessage() {} -func (*ContainerStateRunning) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } - -func (m *ContainerStateTerminated) Reset() { *m = ContainerStateTerminated{} } -func (*ContainerStateTerminated) ProtoMessage() {} -func (*ContainerStateTerminated) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{30} -} - -func (m *ContainerStateWaiting) Reset() { *m = ContainerStateWaiting{} } -func (*ContainerStateWaiting) ProtoMessage() {} -func (*ContainerStateWaiting) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } - -func (m *ContainerStatus) Reset() { *m = ContainerStatus{} } -func (*ContainerStatus) ProtoMessage() {} -func (*ContainerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } - -func (m *DaemonEndpoint) Reset() { *m = DaemonEndpoint{} } -func (*DaemonEndpoint) ProtoMessage() {} -func (*DaemonEndpoint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } - -func (m *DownwardAPIProjection) Reset() { *m = DownwardAPIProjection{} } -func (*DownwardAPIProjection) ProtoMessage() {} -func (*DownwardAPIProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{34} } - -func (m *DownwardAPIVolumeFile) Reset() { *m = DownwardAPIVolumeFile{} } -func (*DownwardAPIVolumeFile) ProtoMessage() {} -func (*DownwardAPIVolumeFile) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{35} } - -func (m *DownwardAPIVolumeSource) Reset() { *m = DownwardAPIVolumeSource{} } -func (*DownwardAPIVolumeSource) ProtoMessage() {} -func (*DownwardAPIVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{36} -} - -func (m *EmptyDirVolumeSource) Reset() { *m = EmptyDirVolumeSource{} } -func (*EmptyDirVolumeSource) ProtoMessage() {} -func (*EmptyDirVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } - -func (m *EndpointAddress) Reset() { *m = EndpointAddress{} } -func (*EndpointAddress) ProtoMessage() {} -func (*EndpointAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } - -func (m *EndpointPort) Reset() { *m = EndpointPort{} } -func (*EndpointPort) ProtoMessage() {} -func (*EndpointPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } - -func (m *EndpointSubset) Reset() { *m = EndpointSubset{} } -func (*EndpointSubset) ProtoMessage() {} -func (*EndpointSubset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } - -func (m *Endpoints) Reset() { *m = Endpoints{} } -func (*Endpoints) ProtoMessage() {} -func (*Endpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } - -func (m *EndpointsList) Reset() { *m = EndpointsList{} } -func (*EndpointsList) ProtoMessage() {} -func (*EndpointsList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } - -func (m *EnvFromSource) Reset() { *m = EnvFromSource{} } -func (*EnvFromSource) ProtoMessage() {} -func (*EnvFromSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } - -func (m *EnvVar) Reset() { *m = EnvVar{} } -func (*EnvVar) ProtoMessage() {} -func (*EnvVar) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } - -func (m *EnvVarSource) Reset() { *m = EnvVarSource{} } -func (*EnvVarSource) ProtoMessage() {} -func (*EnvVarSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } - -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } - -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } - -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{48} } - -func (m *EventSource) Reset() { *m = EventSource{} } -func (*EventSource) ProtoMessage() {} -func (*EventSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } - -func (m *ExecAction) Reset() { *m = ExecAction{} } -func (*ExecAction) ProtoMessage() {} -func (*ExecAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } - -func (m *FCVolumeSource) Reset() { *m = FCVolumeSource{} } -func (*FCVolumeSource) ProtoMessage() {} -func (*FCVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{51} } - -func (m *FlexPersistentVolumeSource) Reset() { *m = FlexPersistentVolumeSource{} } -func (*FlexPersistentVolumeSource) ProtoMessage() {} -func (*FlexPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} -} - -func (m *FlexVolumeSource) Reset() { *m = FlexVolumeSource{} } -func (*FlexVolumeSource) ProtoMessage() {} -func (*FlexVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } - -func (m *FlockerVolumeSource) Reset() { *m = FlockerVolumeSource{} } -func (*FlockerVolumeSource) ProtoMessage() {} -func (*FlockerVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *GCEPersistentDiskVolumeSource) Reset() { *m = GCEPersistentDiskVolumeSource{} } -func (*GCEPersistentDiskVolumeSource) ProtoMessage() {} -func (*GCEPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{55} -} - -func (m *GitRepoVolumeSource) Reset() { *m = GitRepoVolumeSource{} } -func (*GitRepoVolumeSource) ProtoMessage() {} -func (*GitRepoVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } - -func (m *GlusterfsVolumeSource) Reset() { *m = GlusterfsVolumeSource{} } -func (*GlusterfsVolumeSource) ProtoMessage() {} -func (*GlusterfsVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{57} } - -func (m *HTTPGetAction) Reset() { *m = HTTPGetAction{} } -func (*HTTPGetAction) ProtoMessage() {} -func (*HTTPGetAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{58} } - -func (m *HTTPHeader) Reset() { *m = HTTPHeader{} } -func (*HTTPHeader) ProtoMessage() {} -func (*HTTPHeader) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{59} } - -func (m *Handler) Reset() { *m = Handler{} } -func (*Handler) ProtoMessage() {} -func (*Handler) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{60} } - -func (m *HostAlias) Reset() { *m = HostAlias{} } -func (*HostAlias) ProtoMessage() {} -func (*HostAlias) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{61} } - -func (m *HostPathVolumeSource) Reset() { *m = HostPathVolumeSource{} } -func (*HostPathVolumeSource) ProtoMessage() {} -func (*HostPathVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{62} } - -func (m *ISCSIPersistentVolumeSource) Reset() { *m = ISCSIPersistentVolumeSource{} } -func (*ISCSIPersistentVolumeSource) ProtoMessage() {} -func (*ISCSIPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{63} -} - -func (m *ISCSIVolumeSource) Reset() { *m = ISCSIVolumeSource{} } -func (*ISCSIVolumeSource) ProtoMessage() {} -func (*ISCSIVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{64} } - -func (m *KeyToPath) Reset() { *m = KeyToPath{} } -func (*KeyToPath) ProtoMessage() {} -func (*KeyToPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{65} } - -func (m *Lifecycle) Reset() { *m = Lifecycle{} } -func (*Lifecycle) ProtoMessage() {} -func (*Lifecycle) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{66} } - -func (m *LimitRange) Reset() { *m = LimitRange{} } -func (*LimitRange) ProtoMessage() {} -func (*LimitRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{67} } - -func (m *LimitRangeItem) Reset() { *m = LimitRangeItem{} } -func (*LimitRangeItem) ProtoMessage() {} -func (*LimitRangeItem) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{68} } - -func (m *LimitRangeList) Reset() { *m = LimitRangeList{} } -func (*LimitRangeList) ProtoMessage() {} -func (*LimitRangeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{69} } - -func (m *LimitRangeSpec) Reset() { *m = LimitRangeSpec{} } -func (*LimitRangeSpec) ProtoMessage() {} -func (*LimitRangeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{70} } - -func (m *List) Reset() { *m = List{} } -func (*List) ProtoMessage() {} -func (*List) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{71} } - -func (m *LoadBalancerIngress) Reset() { *m = LoadBalancerIngress{} } -func (*LoadBalancerIngress) ProtoMessage() {} -func (*LoadBalancerIngress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{72} } - -func (m *LoadBalancerStatus) Reset() { *m = LoadBalancerStatus{} } -func (*LoadBalancerStatus) ProtoMessage() {} -func (*LoadBalancerStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{73} } - -func (m *LocalObjectReference) Reset() { *m = LocalObjectReference{} } -func (*LocalObjectReference) ProtoMessage() {} -func (*LocalObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{74} } - -func (m *LocalVolumeSource) Reset() { *m = LocalVolumeSource{} } -func (*LocalVolumeSource) ProtoMessage() {} -func (*LocalVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{75} } - -func (m *NFSVolumeSource) Reset() { *m = NFSVolumeSource{} } -func (*NFSVolumeSource) ProtoMessage() {} -func (*NFSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{76} } - -func (m *Namespace) Reset() { *m = Namespace{} } -func (*Namespace) ProtoMessage() {} -func (*Namespace) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{77} } - -func (m *NamespaceList) Reset() { *m = NamespaceList{} } -func (*NamespaceList) ProtoMessage() {} -func (*NamespaceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{78} } - -func (m *NamespaceSpec) Reset() { *m = NamespaceSpec{} } -func (*NamespaceSpec) ProtoMessage() {} -func (*NamespaceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{79} } - -func (m *NamespaceStatus) Reset() { *m = NamespaceStatus{} } -func (*NamespaceStatus) ProtoMessage() {} -func (*NamespaceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{80} } - -func (m *Node) Reset() { *m = Node{} } -func (*Node) ProtoMessage() {} -func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{81} } - -func (m *NodeAddress) Reset() { *m = NodeAddress{} } -func (*NodeAddress) ProtoMessage() {} -func (*NodeAddress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{82} } - -func (m *NodeAffinity) Reset() { *m = NodeAffinity{} } -func (*NodeAffinity) ProtoMessage() {} -func (*NodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{83} } - -func (m *NodeCondition) Reset() { *m = NodeCondition{} } -func (*NodeCondition) ProtoMessage() {} -func (*NodeCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{84} } - -func (m *NodeConfigSource) Reset() { *m = NodeConfigSource{} } -func (*NodeConfigSource) ProtoMessage() {} -func (*NodeConfigSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{85} } - -func (m *NodeConfigStatus) Reset() { *m = NodeConfigStatus{} } -func (*NodeConfigStatus) ProtoMessage() {} -func (*NodeConfigStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{86} } - -func (m *NodeDaemonEndpoints) Reset() { *m = NodeDaemonEndpoints{} } -func (*NodeDaemonEndpoints) ProtoMessage() {} -func (*NodeDaemonEndpoints) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{87} } - -func (m *NodeList) Reset() { *m = NodeList{} } -func (*NodeList) ProtoMessage() {} -func (*NodeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{88} } - -func (m *NodeProxyOptions) Reset() { *m = NodeProxyOptions{} } -func (*NodeProxyOptions) ProtoMessage() {} -func (*NodeProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{89} } - -func (m *NodeResources) Reset() { *m = NodeResources{} } -func (*NodeResources) ProtoMessage() {} -func (*NodeResources) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{90} } - -func (m *NodeSelector) Reset() { *m = NodeSelector{} } -func (*NodeSelector) ProtoMessage() {} -func (*NodeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{91} } - -func (m *NodeSelectorRequirement) Reset() { *m = NodeSelectorRequirement{} } -func (*NodeSelectorRequirement) ProtoMessage() {} -func (*NodeSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{92} -} - -func (m *NodeSelectorTerm) Reset() { *m = NodeSelectorTerm{} } -func (*NodeSelectorTerm) ProtoMessage() {} -func (*NodeSelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{93} } - -func (m *NodeSpec) Reset() { *m = NodeSpec{} } -func (*NodeSpec) ProtoMessage() {} -func (*NodeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{94} } - -func (m *NodeStatus) Reset() { *m = NodeStatus{} } -func (*NodeStatus) ProtoMessage() {} -func (*NodeStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{95} } - -func (m *NodeSystemInfo) Reset() { *m = NodeSystemInfo{} } -func (*NodeSystemInfo) ProtoMessage() {} -func (*NodeSystemInfo) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{96} } - -func (m *ObjectFieldSelector) Reset() { *m = ObjectFieldSelector{} } -func (*ObjectFieldSelector) ProtoMessage() {} -func (*ObjectFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{97} } - -func (m *ObjectReference) Reset() { *m = ObjectReference{} } -func (*ObjectReference) ProtoMessage() {} -func (*ObjectReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{98} } - -func (m *PersistentVolume) Reset() { *m = PersistentVolume{} } -func (*PersistentVolume) ProtoMessage() {} -func (*PersistentVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{99} } - -func (m *PersistentVolumeClaim) Reset() { *m = PersistentVolumeClaim{} } -func (*PersistentVolumeClaim) ProtoMessage() {} -func (*PersistentVolumeClaim) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{100} } - -func (m *PersistentVolumeClaimCondition) Reset() { *m = PersistentVolumeClaimCondition{} } -func (*PersistentVolumeClaimCondition) ProtoMessage() {} -func (*PersistentVolumeClaimCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{101} -} - -func (m *PersistentVolumeClaimList) Reset() { *m = PersistentVolumeClaimList{} } -func (*PersistentVolumeClaimList) ProtoMessage() {} -func (*PersistentVolumeClaimList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{102} -} - -func (m *PersistentVolumeClaimSpec) Reset() { *m = PersistentVolumeClaimSpec{} } -func (*PersistentVolumeClaimSpec) ProtoMessage() {} -func (*PersistentVolumeClaimSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{103} -} - -func (m *PersistentVolumeClaimStatus) Reset() { *m = PersistentVolumeClaimStatus{} } -func (*PersistentVolumeClaimStatus) ProtoMessage() {} -func (*PersistentVolumeClaimStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{104} -} - -func (m *PersistentVolumeClaimVolumeSource) Reset() { *m = PersistentVolumeClaimVolumeSource{} } -func (*PersistentVolumeClaimVolumeSource) ProtoMessage() {} -func (*PersistentVolumeClaimVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{105} -} - -func (m *PersistentVolumeList) Reset() { *m = PersistentVolumeList{} } -func (*PersistentVolumeList) ProtoMessage() {} -func (*PersistentVolumeList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{106} } - -func (m *PersistentVolumeSource) Reset() { *m = PersistentVolumeSource{} } -func (*PersistentVolumeSource) ProtoMessage() {} -func (*PersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{107} -} - -func (m *PersistentVolumeSpec) Reset() { *m = PersistentVolumeSpec{} } -func (*PersistentVolumeSpec) ProtoMessage() {} -func (*PersistentVolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{108} } - -func (m *PersistentVolumeStatus) Reset() { *m = PersistentVolumeStatus{} } -func (*PersistentVolumeStatus) ProtoMessage() {} -func (*PersistentVolumeStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{109} -} - -func (m *PhotonPersistentDiskVolumeSource) Reset() { *m = PhotonPersistentDiskVolumeSource{} } -func (*PhotonPersistentDiskVolumeSource) ProtoMessage() {} -func (*PhotonPersistentDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{110} -} - -func (m *Pod) Reset() { *m = Pod{} } -func (*Pod) ProtoMessage() {} -func (*Pod) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{111} } - -func (m *PodAffinity) Reset() { *m = PodAffinity{} } -func (*PodAffinity) ProtoMessage() {} -func (*PodAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{112} } - -func (m *PodAffinityTerm) Reset() { *m = PodAffinityTerm{} } -func (*PodAffinityTerm) ProtoMessage() {} -func (*PodAffinityTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{113} } - -func (m *PodAntiAffinity) Reset() { *m = PodAntiAffinity{} } -func (*PodAntiAffinity) ProtoMessage() {} -func (*PodAntiAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{114} } - -func (m *PodAttachOptions) Reset() { *m = PodAttachOptions{} } -func (*PodAttachOptions) ProtoMessage() {} -func (*PodAttachOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{115} } - -func (m *PodCondition) Reset() { *m = PodCondition{} } -func (*PodCondition) ProtoMessage() {} -func (*PodCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{116} } - -func (m *PodDNSConfig) Reset() { *m = PodDNSConfig{} } -func (*PodDNSConfig) ProtoMessage() {} -func (*PodDNSConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{117} } - -func (m *PodDNSConfigOption) Reset() { *m = PodDNSConfigOption{} } -func (*PodDNSConfigOption) ProtoMessage() {} -func (*PodDNSConfigOption) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{118} } - -func (m *PodExecOptions) Reset() { *m = PodExecOptions{} } -func (*PodExecOptions) ProtoMessage() {} -func (*PodExecOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{119} } - -func (m *PodList) Reset() { *m = PodList{} } -func (*PodList) ProtoMessage() {} -func (*PodList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{120} } - -func (m *PodLogOptions) Reset() { *m = PodLogOptions{} } -func (*PodLogOptions) ProtoMessage() {} -func (*PodLogOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{121} } - -func (m *PodPortForwardOptions) Reset() { *m = PodPortForwardOptions{} } -func (*PodPortForwardOptions) ProtoMessage() {} -func (*PodPortForwardOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{122} } - -func (m *PodProxyOptions) Reset() { *m = PodProxyOptions{} } -func (*PodProxyOptions) ProtoMessage() {} -func (*PodProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{123} } - -func (m *PodReadinessGate) Reset() { *m = PodReadinessGate{} } -func (*PodReadinessGate) ProtoMessage() {} -func (*PodReadinessGate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{124} } - -func (m *PodSecurityContext) Reset() { *m = PodSecurityContext{} } -func (*PodSecurityContext) ProtoMessage() {} -func (*PodSecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{125} } - -func (m *PodSignature) Reset() { *m = PodSignature{} } -func (*PodSignature) ProtoMessage() {} -func (*PodSignature) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{126} } - -func (m *PodSpec) Reset() { *m = PodSpec{} } -func (*PodSpec) ProtoMessage() {} -func (*PodSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{127} } - -func (m *PodStatus) Reset() { *m = PodStatus{} } -func (*PodStatus) ProtoMessage() {} -func (*PodStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{128} } - -func (m *PodStatusResult) Reset() { *m = PodStatusResult{} } -func (*PodStatusResult) ProtoMessage() {} -func (*PodStatusResult) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{129} } - -func (m *PodTemplate) Reset() { *m = PodTemplate{} } -func (*PodTemplate) ProtoMessage() {} -func (*PodTemplate) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{130} } - -func (m *PodTemplateList) Reset() { *m = PodTemplateList{} } -func (*PodTemplateList) ProtoMessage() {} -func (*PodTemplateList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{131} } - -func (m *PodTemplateSpec) Reset() { *m = PodTemplateSpec{} } -func (*PodTemplateSpec) ProtoMessage() {} -func (*PodTemplateSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{132} } - -func (m *PortworxVolumeSource) Reset() { *m = PortworxVolumeSource{} } -func (*PortworxVolumeSource) ProtoMessage() {} -func (*PortworxVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{133} } - -func (m *Preconditions) Reset() { *m = Preconditions{} } -func (*Preconditions) ProtoMessage() {} -func (*Preconditions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{134} } - -func (m *PreferAvoidPodsEntry) Reset() { *m = PreferAvoidPodsEntry{} } -func (*PreferAvoidPodsEntry) ProtoMessage() {} -func (*PreferAvoidPodsEntry) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{135} } - -func (m *PreferredSchedulingTerm) Reset() { *m = PreferredSchedulingTerm{} } -func (*PreferredSchedulingTerm) ProtoMessage() {} -func (*PreferredSchedulingTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{136} -} - -func (m *Probe) Reset() { *m = Probe{} } -func (*Probe) ProtoMessage() {} -func (*Probe) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{137} } - -func (m *ProjectedVolumeSource) Reset() { *m = ProjectedVolumeSource{} } -func (*ProjectedVolumeSource) ProtoMessage() {} -func (*ProjectedVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{138} } - -func (m *QuobyteVolumeSource) Reset() { *m = QuobyteVolumeSource{} } -func (*QuobyteVolumeSource) ProtoMessage() {} -func (*QuobyteVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{139} } - -func (m *RBDPersistentVolumeSource) Reset() { *m = RBDPersistentVolumeSource{} } -func (*RBDPersistentVolumeSource) ProtoMessage() {} -func (*RBDPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{140} -} - -func (m *RBDVolumeSource) Reset() { *m = RBDVolumeSource{} } -func (*RBDVolumeSource) ProtoMessage() {} -func (*RBDVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{141} } - -func (m *RangeAllocation) Reset() { *m = RangeAllocation{} } -func (*RangeAllocation) ProtoMessage() {} -func (*RangeAllocation) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{142} } - -func (m *ReplicationController) Reset() { *m = ReplicationController{} } -func (*ReplicationController) ProtoMessage() {} -func (*ReplicationController) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{143} } - -func (m *ReplicationControllerCondition) Reset() { *m = ReplicationControllerCondition{} } -func (*ReplicationControllerCondition) ProtoMessage() {} -func (*ReplicationControllerCondition) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{144} -} - -func (m *ReplicationControllerList) Reset() { *m = ReplicationControllerList{} } -func (*ReplicationControllerList) ProtoMessage() {} -func (*ReplicationControllerList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{145} -} - -func (m *ReplicationControllerSpec) Reset() { *m = ReplicationControllerSpec{} } -func (*ReplicationControllerSpec) ProtoMessage() {} -func (*ReplicationControllerSpec) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{146} -} - -func (m *ReplicationControllerStatus) Reset() { *m = ReplicationControllerStatus{} } -func (*ReplicationControllerStatus) ProtoMessage() {} -func (*ReplicationControllerStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{147} -} - -func (m *ResourceFieldSelector) Reset() { *m = ResourceFieldSelector{} } -func (*ResourceFieldSelector) ProtoMessage() {} -func (*ResourceFieldSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{148} } - -func (m *ResourceQuota) Reset() { *m = ResourceQuota{} } -func (*ResourceQuota) ProtoMessage() {} -func (*ResourceQuota) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{149} } - -func (m *ResourceQuotaList) Reset() { *m = ResourceQuotaList{} } -func (*ResourceQuotaList) ProtoMessage() {} -func (*ResourceQuotaList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{150} } - -func (m *ResourceQuotaSpec) Reset() { *m = ResourceQuotaSpec{} } -func (*ResourceQuotaSpec) ProtoMessage() {} -func (*ResourceQuotaSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{151} } - -func (m *ResourceQuotaStatus) Reset() { *m = ResourceQuotaStatus{} } -func (*ResourceQuotaStatus) ProtoMessage() {} -func (*ResourceQuotaStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{152} } - -func (m *ResourceRequirements) Reset() { *m = ResourceRequirements{} } -func (*ResourceRequirements) ProtoMessage() {} -func (*ResourceRequirements) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{153} } - -func (m *SELinuxOptions) Reset() { *m = SELinuxOptions{} } -func (*SELinuxOptions) ProtoMessage() {} -func (*SELinuxOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{154} } - -func (m *ScaleIOPersistentVolumeSource) Reset() { *m = ScaleIOPersistentVolumeSource{} } -func (*ScaleIOPersistentVolumeSource) ProtoMessage() {} -func (*ScaleIOPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{155} -} - -func (m *ScaleIOVolumeSource) Reset() { *m = ScaleIOVolumeSource{} } -func (*ScaleIOVolumeSource) ProtoMessage() {} -func (*ScaleIOVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{156} } - -func (m *ScopeSelector) Reset() { *m = ScopeSelector{} } -func (*ScopeSelector) ProtoMessage() {} -func (*ScopeSelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{157} } - -func (m *ScopedResourceSelectorRequirement) Reset() { *m = ScopedResourceSelectorRequirement{} } -func (*ScopedResourceSelectorRequirement) ProtoMessage() {} -func (*ScopedResourceSelectorRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{158} -} - -func (m *Secret) Reset() { *m = Secret{} } -func (*Secret) ProtoMessage() {} -func (*Secret) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{159} } - -func (m *SecretEnvSource) Reset() { *m = SecretEnvSource{} } -func (*SecretEnvSource) ProtoMessage() {} -func (*SecretEnvSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{160} } - -func (m *SecretKeySelector) Reset() { *m = SecretKeySelector{} } -func (*SecretKeySelector) ProtoMessage() {} -func (*SecretKeySelector) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{161} } - -func (m *SecretList) Reset() { *m = SecretList{} } -func (*SecretList) ProtoMessage() {} -func (*SecretList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{162} } - -func (m *SecretProjection) Reset() { *m = SecretProjection{} } -func (*SecretProjection) ProtoMessage() {} -func (*SecretProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{163} } - -func (m *SecretReference) Reset() { *m = SecretReference{} } -func (*SecretReference) ProtoMessage() {} -func (*SecretReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{164} } - -func (m *SecretVolumeSource) Reset() { *m = SecretVolumeSource{} } -func (*SecretVolumeSource) ProtoMessage() {} -func (*SecretVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{165} } - -func (m *SecurityContext) Reset() { *m = SecurityContext{} } -func (*SecurityContext) ProtoMessage() {} -func (*SecurityContext) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{166} } - -func (m *SerializedReference) Reset() { *m = SerializedReference{} } -func (*SerializedReference) ProtoMessage() {} -func (*SerializedReference) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{167} } - -func (m *Service) Reset() { *m = Service{} } -func (*Service) ProtoMessage() {} -func (*Service) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{168} } - -func (m *ServiceAccount) Reset() { *m = ServiceAccount{} } -func (*ServiceAccount) ProtoMessage() {} -func (*ServiceAccount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{169} } - -func (m *ServiceAccountList) Reset() { *m = ServiceAccountList{} } -func (*ServiceAccountList) ProtoMessage() {} -func (*ServiceAccountList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{170} } - -func (m *ServiceAccountTokenProjection) Reset() { *m = ServiceAccountTokenProjection{} } -func (*ServiceAccountTokenProjection) ProtoMessage() {} -func (*ServiceAccountTokenProjection) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{171} -} - -func (m *ServiceList) Reset() { *m = ServiceList{} } -func (*ServiceList) ProtoMessage() {} -func (*ServiceList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{172} } - -func (m *ServicePort) Reset() { *m = ServicePort{} } -func (*ServicePort) ProtoMessage() {} -func (*ServicePort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{173} } - -func (m *ServiceProxyOptions) Reset() { *m = ServiceProxyOptions{} } -func (*ServiceProxyOptions) ProtoMessage() {} -func (*ServiceProxyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{174} } - -func (m *ServiceSpec) Reset() { *m = ServiceSpec{} } -func (*ServiceSpec) ProtoMessage() {} -func (*ServiceSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{175} } - -func (m *ServiceStatus) Reset() { *m = ServiceStatus{} } -func (*ServiceStatus) ProtoMessage() {} -func (*ServiceStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{176} } - -func (m *SessionAffinityConfig) Reset() { *m = SessionAffinityConfig{} } -func (*SessionAffinityConfig) ProtoMessage() {} -func (*SessionAffinityConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{177} } - -func (m *StorageOSPersistentVolumeSource) Reset() { *m = StorageOSPersistentVolumeSource{} } -func (*StorageOSPersistentVolumeSource) ProtoMessage() {} -func (*StorageOSPersistentVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{178} -} - -func (m *StorageOSVolumeSource) Reset() { *m = StorageOSVolumeSource{} } -func (*StorageOSVolumeSource) ProtoMessage() {} -func (*StorageOSVolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{179} } - -func (m *Sysctl) Reset() { *m = Sysctl{} } -func (*Sysctl) ProtoMessage() {} -func (*Sysctl) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{180} } - -func (m *TCPSocketAction) Reset() { *m = TCPSocketAction{} } -func (*TCPSocketAction) ProtoMessage() {} -func (*TCPSocketAction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{181} } - -func (m *Taint) Reset() { *m = Taint{} } -func (*Taint) ProtoMessage() {} -func (*Taint) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{182} } - -func (m *Toleration) Reset() { *m = Toleration{} } -func (*Toleration) ProtoMessage() {} -func (*Toleration) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{183} } - -func (m *TopologySelectorLabelRequirement) Reset() { *m = TopologySelectorLabelRequirement{} } -func (*TopologySelectorLabelRequirement) ProtoMessage() {} -func (*TopologySelectorLabelRequirement) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{184} -} - -func (m *TopologySelectorTerm) Reset() { *m = TopologySelectorTerm{} } -func (*TopologySelectorTerm) ProtoMessage() {} -func (*TopologySelectorTerm) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{185} } - -func (m *TypedLocalObjectReference) Reset() { *m = TypedLocalObjectReference{} } -func (*TypedLocalObjectReference) ProtoMessage() {} -func (*TypedLocalObjectReference) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{186} -} - -func (m *Volume) Reset() { *m = Volume{} } -func (*Volume) ProtoMessage() {} -func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{187} } - -func (m *VolumeDevice) Reset() { *m = VolumeDevice{} } -func (*VolumeDevice) ProtoMessage() {} -func (*VolumeDevice) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{188} } - -func (m *VolumeMount) Reset() { *m = VolumeMount{} } -func (*VolumeMount) ProtoMessage() {} -func (*VolumeMount) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{189} } - -func (m *VolumeNodeAffinity) Reset() { *m = VolumeNodeAffinity{} } -func (*VolumeNodeAffinity) ProtoMessage() {} -func (*VolumeNodeAffinity) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{190} } - -func (m *VolumeProjection) Reset() { *m = VolumeProjection{} } -func (*VolumeProjection) ProtoMessage() {} -func (*VolumeProjection) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{191} } - -func (m *VolumeSource) Reset() { *m = VolumeSource{} } -func (*VolumeSource) ProtoMessage() {} -func (*VolumeSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{192} } - -func (m *VsphereVirtualDiskVolumeSource) Reset() { *m = VsphereVirtualDiskVolumeSource{} } -func (*VsphereVirtualDiskVolumeSource) ProtoMessage() {} -func (*VsphereVirtualDiskVolumeSource) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{193} -} - -func (m *WeightedPodAffinityTerm) Reset() { *m = WeightedPodAffinityTerm{} } -func (*WeightedPodAffinityTerm) ProtoMessage() {} -func (*WeightedPodAffinityTerm) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{194} -} - -func init() { - proto.RegisterType((*AWSElasticBlockStoreVolumeSource)(nil), "k8s.io.api.core.v1.AWSElasticBlockStoreVolumeSource") - proto.RegisterType((*Affinity)(nil), "k8s.io.api.core.v1.Affinity") - proto.RegisterType((*AttachedVolume)(nil), "k8s.io.api.core.v1.AttachedVolume") - proto.RegisterType((*AvoidPods)(nil), "k8s.io.api.core.v1.AvoidPods") - proto.RegisterType((*AzureDiskVolumeSource)(nil), "k8s.io.api.core.v1.AzureDiskVolumeSource") - proto.RegisterType((*AzureFilePersistentVolumeSource)(nil), "k8s.io.api.core.v1.AzureFilePersistentVolumeSource") - proto.RegisterType((*AzureFileVolumeSource)(nil), "k8s.io.api.core.v1.AzureFileVolumeSource") - proto.RegisterType((*Binding)(nil), "k8s.io.api.core.v1.Binding") - proto.RegisterType((*CSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CSIPersistentVolumeSource") - proto.RegisterType((*Capabilities)(nil), "k8s.io.api.core.v1.Capabilities") - proto.RegisterType((*CephFSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CephFSPersistentVolumeSource") - proto.RegisterType((*CephFSVolumeSource)(nil), "k8s.io.api.core.v1.CephFSVolumeSource") - proto.RegisterType((*CinderPersistentVolumeSource)(nil), "k8s.io.api.core.v1.CinderPersistentVolumeSource") - proto.RegisterType((*CinderVolumeSource)(nil), "k8s.io.api.core.v1.CinderVolumeSource") - proto.RegisterType((*ClientIPConfig)(nil), "k8s.io.api.core.v1.ClientIPConfig") - proto.RegisterType((*ComponentCondition)(nil), "k8s.io.api.core.v1.ComponentCondition") - proto.RegisterType((*ComponentStatus)(nil), "k8s.io.api.core.v1.ComponentStatus") - proto.RegisterType((*ComponentStatusList)(nil), "k8s.io.api.core.v1.ComponentStatusList") - proto.RegisterType((*ConfigMap)(nil), "k8s.io.api.core.v1.ConfigMap") - proto.RegisterType((*ConfigMapEnvSource)(nil), "k8s.io.api.core.v1.ConfigMapEnvSource") - proto.RegisterType((*ConfigMapKeySelector)(nil), "k8s.io.api.core.v1.ConfigMapKeySelector") - proto.RegisterType((*ConfigMapList)(nil), "k8s.io.api.core.v1.ConfigMapList") - proto.RegisterType((*ConfigMapNodeConfigSource)(nil), "k8s.io.api.core.v1.ConfigMapNodeConfigSource") - proto.RegisterType((*ConfigMapProjection)(nil), "k8s.io.api.core.v1.ConfigMapProjection") - proto.RegisterType((*ConfigMapVolumeSource)(nil), "k8s.io.api.core.v1.ConfigMapVolumeSource") - proto.RegisterType((*Container)(nil), "k8s.io.api.core.v1.Container") - proto.RegisterType((*ContainerImage)(nil), "k8s.io.api.core.v1.ContainerImage") - proto.RegisterType((*ContainerPort)(nil), "k8s.io.api.core.v1.ContainerPort") - proto.RegisterType((*ContainerState)(nil), "k8s.io.api.core.v1.ContainerState") - proto.RegisterType((*ContainerStateRunning)(nil), "k8s.io.api.core.v1.ContainerStateRunning") - proto.RegisterType((*ContainerStateTerminated)(nil), "k8s.io.api.core.v1.ContainerStateTerminated") - proto.RegisterType((*ContainerStateWaiting)(nil), "k8s.io.api.core.v1.ContainerStateWaiting") - proto.RegisterType((*ContainerStatus)(nil), "k8s.io.api.core.v1.ContainerStatus") - proto.RegisterType((*DaemonEndpoint)(nil), "k8s.io.api.core.v1.DaemonEndpoint") - proto.RegisterType((*DownwardAPIProjection)(nil), "k8s.io.api.core.v1.DownwardAPIProjection") - proto.RegisterType((*DownwardAPIVolumeFile)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeFile") - proto.RegisterType((*DownwardAPIVolumeSource)(nil), "k8s.io.api.core.v1.DownwardAPIVolumeSource") - proto.RegisterType((*EmptyDirVolumeSource)(nil), "k8s.io.api.core.v1.EmptyDirVolumeSource") - proto.RegisterType((*EndpointAddress)(nil), "k8s.io.api.core.v1.EndpointAddress") - proto.RegisterType((*EndpointPort)(nil), "k8s.io.api.core.v1.EndpointPort") - proto.RegisterType((*EndpointSubset)(nil), "k8s.io.api.core.v1.EndpointSubset") - proto.RegisterType((*Endpoints)(nil), "k8s.io.api.core.v1.Endpoints") - proto.RegisterType((*EndpointsList)(nil), "k8s.io.api.core.v1.EndpointsList") - proto.RegisterType((*EnvFromSource)(nil), "k8s.io.api.core.v1.EnvFromSource") - proto.RegisterType((*EnvVar)(nil), "k8s.io.api.core.v1.EnvVar") - proto.RegisterType((*EnvVarSource)(nil), "k8s.io.api.core.v1.EnvVarSource") - proto.RegisterType((*Event)(nil), "k8s.io.api.core.v1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.core.v1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.core.v1.EventSeries") - proto.RegisterType((*EventSource)(nil), "k8s.io.api.core.v1.EventSource") - proto.RegisterType((*ExecAction)(nil), "k8s.io.api.core.v1.ExecAction") - proto.RegisterType((*FCVolumeSource)(nil), "k8s.io.api.core.v1.FCVolumeSource") - proto.RegisterType((*FlexPersistentVolumeSource)(nil), "k8s.io.api.core.v1.FlexPersistentVolumeSource") - proto.RegisterType((*FlexVolumeSource)(nil), "k8s.io.api.core.v1.FlexVolumeSource") - proto.RegisterType((*FlockerVolumeSource)(nil), "k8s.io.api.core.v1.FlockerVolumeSource") - proto.RegisterType((*GCEPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.GCEPersistentDiskVolumeSource") - proto.RegisterType((*GitRepoVolumeSource)(nil), "k8s.io.api.core.v1.GitRepoVolumeSource") - proto.RegisterType((*GlusterfsVolumeSource)(nil), "k8s.io.api.core.v1.GlusterfsVolumeSource") - proto.RegisterType((*HTTPGetAction)(nil), "k8s.io.api.core.v1.HTTPGetAction") - proto.RegisterType((*HTTPHeader)(nil), "k8s.io.api.core.v1.HTTPHeader") - proto.RegisterType((*Handler)(nil), "k8s.io.api.core.v1.Handler") - proto.RegisterType((*HostAlias)(nil), "k8s.io.api.core.v1.HostAlias") - proto.RegisterType((*HostPathVolumeSource)(nil), "k8s.io.api.core.v1.HostPathVolumeSource") - proto.RegisterType((*ISCSIPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIPersistentVolumeSource") - proto.RegisterType((*ISCSIVolumeSource)(nil), "k8s.io.api.core.v1.ISCSIVolumeSource") - proto.RegisterType((*KeyToPath)(nil), "k8s.io.api.core.v1.KeyToPath") - proto.RegisterType((*Lifecycle)(nil), "k8s.io.api.core.v1.Lifecycle") - proto.RegisterType((*LimitRange)(nil), "k8s.io.api.core.v1.LimitRange") - proto.RegisterType((*LimitRangeItem)(nil), "k8s.io.api.core.v1.LimitRangeItem") - proto.RegisterType((*LimitRangeList)(nil), "k8s.io.api.core.v1.LimitRangeList") - proto.RegisterType((*LimitRangeSpec)(nil), "k8s.io.api.core.v1.LimitRangeSpec") - proto.RegisterType((*List)(nil), "k8s.io.api.core.v1.List") - proto.RegisterType((*LoadBalancerIngress)(nil), "k8s.io.api.core.v1.LoadBalancerIngress") - proto.RegisterType((*LoadBalancerStatus)(nil), "k8s.io.api.core.v1.LoadBalancerStatus") - proto.RegisterType((*LocalObjectReference)(nil), "k8s.io.api.core.v1.LocalObjectReference") - proto.RegisterType((*LocalVolumeSource)(nil), "k8s.io.api.core.v1.LocalVolumeSource") - proto.RegisterType((*NFSVolumeSource)(nil), "k8s.io.api.core.v1.NFSVolumeSource") - proto.RegisterType((*Namespace)(nil), "k8s.io.api.core.v1.Namespace") - proto.RegisterType((*NamespaceList)(nil), "k8s.io.api.core.v1.NamespaceList") - proto.RegisterType((*NamespaceSpec)(nil), "k8s.io.api.core.v1.NamespaceSpec") - proto.RegisterType((*NamespaceStatus)(nil), "k8s.io.api.core.v1.NamespaceStatus") - proto.RegisterType((*Node)(nil), "k8s.io.api.core.v1.Node") - proto.RegisterType((*NodeAddress)(nil), "k8s.io.api.core.v1.NodeAddress") - proto.RegisterType((*NodeAffinity)(nil), "k8s.io.api.core.v1.NodeAffinity") - proto.RegisterType((*NodeCondition)(nil), "k8s.io.api.core.v1.NodeCondition") - proto.RegisterType((*NodeConfigSource)(nil), "k8s.io.api.core.v1.NodeConfigSource") - proto.RegisterType((*NodeConfigStatus)(nil), "k8s.io.api.core.v1.NodeConfigStatus") - proto.RegisterType((*NodeDaemonEndpoints)(nil), "k8s.io.api.core.v1.NodeDaemonEndpoints") - proto.RegisterType((*NodeList)(nil), "k8s.io.api.core.v1.NodeList") - proto.RegisterType((*NodeProxyOptions)(nil), "k8s.io.api.core.v1.NodeProxyOptions") - proto.RegisterType((*NodeResources)(nil), "k8s.io.api.core.v1.NodeResources") - proto.RegisterType((*NodeSelector)(nil), "k8s.io.api.core.v1.NodeSelector") - proto.RegisterType((*NodeSelectorRequirement)(nil), "k8s.io.api.core.v1.NodeSelectorRequirement") - proto.RegisterType((*NodeSelectorTerm)(nil), "k8s.io.api.core.v1.NodeSelectorTerm") - proto.RegisterType((*NodeSpec)(nil), "k8s.io.api.core.v1.NodeSpec") - proto.RegisterType((*NodeStatus)(nil), "k8s.io.api.core.v1.NodeStatus") - proto.RegisterType((*NodeSystemInfo)(nil), "k8s.io.api.core.v1.NodeSystemInfo") - proto.RegisterType((*ObjectFieldSelector)(nil), "k8s.io.api.core.v1.ObjectFieldSelector") - proto.RegisterType((*ObjectReference)(nil), "k8s.io.api.core.v1.ObjectReference") - proto.RegisterType((*PersistentVolume)(nil), "k8s.io.api.core.v1.PersistentVolume") - proto.RegisterType((*PersistentVolumeClaim)(nil), "k8s.io.api.core.v1.PersistentVolumeClaim") - proto.RegisterType((*PersistentVolumeClaimCondition)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimCondition") - proto.RegisterType((*PersistentVolumeClaimList)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimList") - proto.RegisterType((*PersistentVolumeClaimSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimSpec") - proto.RegisterType((*PersistentVolumeClaimStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimStatus") - proto.RegisterType((*PersistentVolumeClaimVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeClaimVolumeSource") - proto.RegisterType((*PersistentVolumeList)(nil), "k8s.io.api.core.v1.PersistentVolumeList") - proto.RegisterType((*PersistentVolumeSource)(nil), "k8s.io.api.core.v1.PersistentVolumeSource") - proto.RegisterType((*PersistentVolumeSpec)(nil), "k8s.io.api.core.v1.PersistentVolumeSpec") - proto.RegisterType((*PersistentVolumeStatus)(nil), "k8s.io.api.core.v1.PersistentVolumeStatus") - proto.RegisterType((*PhotonPersistentDiskVolumeSource)(nil), "k8s.io.api.core.v1.PhotonPersistentDiskVolumeSource") - proto.RegisterType((*Pod)(nil), "k8s.io.api.core.v1.Pod") - proto.RegisterType((*PodAffinity)(nil), "k8s.io.api.core.v1.PodAffinity") - proto.RegisterType((*PodAffinityTerm)(nil), "k8s.io.api.core.v1.PodAffinityTerm") - proto.RegisterType((*PodAntiAffinity)(nil), "k8s.io.api.core.v1.PodAntiAffinity") - proto.RegisterType((*PodAttachOptions)(nil), "k8s.io.api.core.v1.PodAttachOptions") - proto.RegisterType((*PodCondition)(nil), "k8s.io.api.core.v1.PodCondition") - proto.RegisterType((*PodDNSConfig)(nil), "k8s.io.api.core.v1.PodDNSConfig") - proto.RegisterType((*PodDNSConfigOption)(nil), "k8s.io.api.core.v1.PodDNSConfigOption") - proto.RegisterType((*PodExecOptions)(nil), "k8s.io.api.core.v1.PodExecOptions") - proto.RegisterType((*PodList)(nil), "k8s.io.api.core.v1.PodList") - proto.RegisterType((*PodLogOptions)(nil), "k8s.io.api.core.v1.PodLogOptions") - proto.RegisterType((*PodPortForwardOptions)(nil), "k8s.io.api.core.v1.PodPortForwardOptions") - proto.RegisterType((*PodProxyOptions)(nil), "k8s.io.api.core.v1.PodProxyOptions") - proto.RegisterType((*PodReadinessGate)(nil), "k8s.io.api.core.v1.PodReadinessGate") - proto.RegisterType((*PodSecurityContext)(nil), "k8s.io.api.core.v1.PodSecurityContext") - proto.RegisterType((*PodSignature)(nil), "k8s.io.api.core.v1.PodSignature") - proto.RegisterType((*PodSpec)(nil), "k8s.io.api.core.v1.PodSpec") - proto.RegisterType((*PodStatus)(nil), "k8s.io.api.core.v1.PodStatus") - proto.RegisterType((*PodStatusResult)(nil), "k8s.io.api.core.v1.PodStatusResult") - proto.RegisterType((*PodTemplate)(nil), "k8s.io.api.core.v1.PodTemplate") - proto.RegisterType((*PodTemplateList)(nil), "k8s.io.api.core.v1.PodTemplateList") - proto.RegisterType((*PodTemplateSpec)(nil), "k8s.io.api.core.v1.PodTemplateSpec") - proto.RegisterType((*PortworxVolumeSource)(nil), "k8s.io.api.core.v1.PortworxVolumeSource") - proto.RegisterType((*Preconditions)(nil), "k8s.io.api.core.v1.Preconditions") - proto.RegisterType((*PreferAvoidPodsEntry)(nil), "k8s.io.api.core.v1.PreferAvoidPodsEntry") - proto.RegisterType((*PreferredSchedulingTerm)(nil), "k8s.io.api.core.v1.PreferredSchedulingTerm") - proto.RegisterType((*Probe)(nil), "k8s.io.api.core.v1.Probe") - proto.RegisterType((*ProjectedVolumeSource)(nil), "k8s.io.api.core.v1.ProjectedVolumeSource") - proto.RegisterType((*QuobyteVolumeSource)(nil), "k8s.io.api.core.v1.QuobyteVolumeSource") - proto.RegisterType((*RBDPersistentVolumeSource)(nil), "k8s.io.api.core.v1.RBDPersistentVolumeSource") - proto.RegisterType((*RBDVolumeSource)(nil), "k8s.io.api.core.v1.RBDVolumeSource") - proto.RegisterType((*RangeAllocation)(nil), "k8s.io.api.core.v1.RangeAllocation") - proto.RegisterType((*ReplicationController)(nil), "k8s.io.api.core.v1.ReplicationController") - proto.RegisterType((*ReplicationControllerCondition)(nil), "k8s.io.api.core.v1.ReplicationControllerCondition") - proto.RegisterType((*ReplicationControllerList)(nil), "k8s.io.api.core.v1.ReplicationControllerList") - proto.RegisterType((*ReplicationControllerSpec)(nil), "k8s.io.api.core.v1.ReplicationControllerSpec") - proto.RegisterType((*ReplicationControllerStatus)(nil), "k8s.io.api.core.v1.ReplicationControllerStatus") - proto.RegisterType((*ResourceFieldSelector)(nil), "k8s.io.api.core.v1.ResourceFieldSelector") - proto.RegisterType((*ResourceQuota)(nil), "k8s.io.api.core.v1.ResourceQuota") - proto.RegisterType((*ResourceQuotaList)(nil), "k8s.io.api.core.v1.ResourceQuotaList") - proto.RegisterType((*ResourceQuotaSpec)(nil), "k8s.io.api.core.v1.ResourceQuotaSpec") - proto.RegisterType((*ResourceQuotaStatus)(nil), "k8s.io.api.core.v1.ResourceQuotaStatus") - proto.RegisterType((*ResourceRequirements)(nil), "k8s.io.api.core.v1.ResourceRequirements") - proto.RegisterType((*SELinuxOptions)(nil), "k8s.io.api.core.v1.SELinuxOptions") - proto.RegisterType((*ScaleIOPersistentVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOPersistentVolumeSource") - proto.RegisterType((*ScaleIOVolumeSource)(nil), "k8s.io.api.core.v1.ScaleIOVolumeSource") - proto.RegisterType((*ScopeSelector)(nil), "k8s.io.api.core.v1.ScopeSelector") - proto.RegisterType((*ScopedResourceSelectorRequirement)(nil), "k8s.io.api.core.v1.ScopedResourceSelectorRequirement") - proto.RegisterType((*Secret)(nil), "k8s.io.api.core.v1.Secret") - proto.RegisterType((*SecretEnvSource)(nil), "k8s.io.api.core.v1.SecretEnvSource") - proto.RegisterType((*SecretKeySelector)(nil), "k8s.io.api.core.v1.SecretKeySelector") - proto.RegisterType((*SecretList)(nil), "k8s.io.api.core.v1.SecretList") - proto.RegisterType((*SecretProjection)(nil), "k8s.io.api.core.v1.SecretProjection") - proto.RegisterType((*SecretReference)(nil), "k8s.io.api.core.v1.SecretReference") - proto.RegisterType((*SecretVolumeSource)(nil), "k8s.io.api.core.v1.SecretVolumeSource") - proto.RegisterType((*SecurityContext)(nil), "k8s.io.api.core.v1.SecurityContext") - proto.RegisterType((*SerializedReference)(nil), "k8s.io.api.core.v1.SerializedReference") - proto.RegisterType((*Service)(nil), "k8s.io.api.core.v1.Service") - proto.RegisterType((*ServiceAccount)(nil), "k8s.io.api.core.v1.ServiceAccount") - proto.RegisterType((*ServiceAccountList)(nil), "k8s.io.api.core.v1.ServiceAccountList") - proto.RegisterType((*ServiceAccountTokenProjection)(nil), "k8s.io.api.core.v1.ServiceAccountTokenProjection") - proto.RegisterType((*ServiceList)(nil), "k8s.io.api.core.v1.ServiceList") - proto.RegisterType((*ServicePort)(nil), "k8s.io.api.core.v1.ServicePort") - proto.RegisterType((*ServiceProxyOptions)(nil), "k8s.io.api.core.v1.ServiceProxyOptions") - proto.RegisterType((*ServiceSpec)(nil), "k8s.io.api.core.v1.ServiceSpec") - proto.RegisterType((*ServiceStatus)(nil), "k8s.io.api.core.v1.ServiceStatus") - proto.RegisterType((*SessionAffinityConfig)(nil), "k8s.io.api.core.v1.SessionAffinityConfig") - proto.RegisterType((*StorageOSPersistentVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSPersistentVolumeSource") - proto.RegisterType((*StorageOSVolumeSource)(nil), "k8s.io.api.core.v1.StorageOSVolumeSource") - proto.RegisterType((*Sysctl)(nil), "k8s.io.api.core.v1.Sysctl") - proto.RegisterType((*TCPSocketAction)(nil), "k8s.io.api.core.v1.TCPSocketAction") - proto.RegisterType((*Taint)(nil), "k8s.io.api.core.v1.Taint") - proto.RegisterType((*Toleration)(nil), "k8s.io.api.core.v1.Toleration") - proto.RegisterType((*TopologySelectorLabelRequirement)(nil), "k8s.io.api.core.v1.TopologySelectorLabelRequirement") - proto.RegisterType((*TopologySelectorTerm)(nil), "k8s.io.api.core.v1.TopologySelectorTerm") - proto.RegisterType((*TypedLocalObjectReference)(nil), "k8s.io.api.core.v1.TypedLocalObjectReference") - proto.RegisterType((*Volume)(nil), "k8s.io.api.core.v1.Volume") - proto.RegisterType((*VolumeDevice)(nil), "k8s.io.api.core.v1.VolumeDevice") - proto.RegisterType((*VolumeMount)(nil), "k8s.io.api.core.v1.VolumeMount") - proto.RegisterType((*VolumeNodeAffinity)(nil), "k8s.io.api.core.v1.VolumeNodeAffinity") - proto.RegisterType((*VolumeProjection)(nil), "k8s.io.api.core.v1.VolumeProjection") - proto.RegisterType((*VolumeSource)(nil), "k8s.io.api.core.v1.VolumeSource") - proto.RegisterType((*VsphereVirtualDiskVolumeSource)(nil), "k8s.io.api.core.v1.VsphereVirtualDiskVolumeSource") - proto.RegisterType((*WeightedPodAffinityTerm)(nil), "k8s.io.api.core.v1.WeightedPodAffinityTerm") -} -func (m *AWSElasticBlockStoreVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AWSElasticBlockStoreVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *Affinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Affinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeAffinity != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n1, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PodAffinity != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinity.Size())) - n2, err := m.PodAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.PodAntiAffinity != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAntiAffinity.Size())) - n3, err := m.PodAntiAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - return i, nil -} - -func (m *AttachedVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AttachedVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil -} - -func (m *AvoidPods) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AvoidPods) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, msg := range m.PreferAvoidPods { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *AzureDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AzureDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DiskName))) - i += copy(dAtA[i:], m.DiskName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DataDiskURI))) - i += copy(dAtA[i:], m.DataDiskURI) - if m.CachingMode != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.CachingMode))) - i += copy(dAtA[i:], *m.CachingMode) - } - if m.FSType != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - if m.ReadOnly != nil { - dAtA[i] = 0x28 - i++ - if *m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.Kind != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Kind))) - i += copy(dAtA[i:], *m.Kind) - } - return i, nil -} - -func (m *AzureFilePersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AzureFilePersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretNamespace != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SecretNamespace))) - i += copy(dAtA[i:], *m.SecretNamespace) - } - return i, nil -} - -func (m *AzureFileVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AzureFileVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ShareName))) - i += copy(dAtA[i:], m.ShareName) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *Binding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Binding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n4, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Target.Size())) - n5, err := m.Target.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *CSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeHandle))) - i += copy(dAtA[i:], m.VolumeHandle) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if len(m.VolumeAttributes) > 0 { - keysForVolumeAttributes := make([]string, 0, len(m.VolumeAttributes)) - for k := range m.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - for _, k := range keysForVolumeAttributes { - dAtA[i] = 0x2a - i++ - v := m.VolumeAttributes[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ControllerPublishSecretRef != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ControllerPublishSecretRef.Size())) - n6, err := m.ControllerPublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.NodeStageSecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeStageSecretRef.Size())) - n7, err := m.NodeStageSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.NodePublishSecretRef != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePublishSecretRef.Size())) - n8, err := m.NodePublishSecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - return i, nil -} - -func (m *Capabilities) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Capabilities) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *CephFSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CephFSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n9, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *CephFSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CephFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretFile))) - i += copy(dAtA[i:], m.SecretFile) - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n10, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *CinderPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CinderPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n11, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - return i, nil -} - -func (m *CinderVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CinderVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n12, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - } - return i, nil -} - -func (m *ClientIPConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClientIPConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.TimeoutSeconds != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds)) - } - return i, nil -} - -func (m *ComponentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ComponentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil -} - -func (m *ComponentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ComponentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n13, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ComponentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ComponentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n14, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ConfigMap) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMap) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n15, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if len(m.BinaryData) > 0 { - keysForBinaryData := make([]string, 0, len(m.BinaryData)) - for k := range m.BinaryData { - keysForBinaryData = append(keysForBinaryData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - for _, k := range keysForBinaryData { - dAtA[i] = 0x1a - i++ - v := m.BinaryData[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - return i, nil -} - -func (m *ConfigMapEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n16, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ConfigMapKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n17, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ConfigMapList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n18, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ConfigMapNodeConfigSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapNodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletConfigKey))) - i += copy(dAtA[i:], m.KubeletConfigKey) - return i, nil -} - -func (m *ConfigMapProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n19, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ConfigMapVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ConfigMapVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n20, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *Container) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Container) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.WorkingDir))) - i += copy(dAtA[i:], m.WorkingDir) - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n21, err := m.Resources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.LivenessProbe != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LivenessProbe.Size())) - n22, err := m.LivenessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.ReadinessProbe != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadinessProbe.Size())) - n23, err := m.ReadinessProbe.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - if m.Lifecycle != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lifecycle.Size())) - n24, err := m.Lifecycle.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - } - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePath))) - i += copy(dAtA[i:], m.TerminationMessagePath) - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImagePullPolicy))) - i += copy(dAtA[i:], m.ImagePullPolicy) - if m.SecurityContext != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n25, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - } - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x88 - i++ - dAtA[i] = 0x1 - i++ - if m.StdinOnce { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x90 - i++ - dAtA[i] = 0x1 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TerminationMessagePolicy))) - i += copy(dAtA[i:], m.TerminationMessagePolicy) - if len(m.VolumeDevices) > 0 { - for _, msg := range m.VolumeDevices { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ContainerImage) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerImage) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeBytes)) - return i, nil -} - -func (m *ContainerPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPort)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ContainerPort)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - return i, nil -} - -func (m *ContainerState) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerState) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Waiting != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Waiting.Size())) - n26, err := m.Waiting.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - } - if m.Running != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Running.Size())) - n27, err := m.Running.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - } - if m.Terminated != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Terminated.Size())) - n28, err := m.Terminated.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - } - return i, nil -} - -func (m *ContainerStateRunning) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerStateRunning) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n29, err := m.StartedAt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - return i, nil -} - -func (m *ContainerStateTerminated) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerStateTerminated) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExitCode)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Signal)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartedAt.Size())) - n30, err := m.StartedAt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FinishedAt.Size())) - n31, err := m.FinishedAt.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil -} - -func (m *ContainerStateWaiting) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerStateWaiting) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ContainerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ContainerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.State.Size())) - n32, err := m.State.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTerminationState.Size())) - n33, err := m.LastTerminationState.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - dAtA[i] = 0x20 - i++ - if m.Ready { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RestartCount)) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Image))) - i += copy(dAtA[i:], m.Image) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ImageID))) - i += copy(dAtA[i:], m.ImageID) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerID))) - i += copy(dAtA[i:], m.ContainerID) - return i, nil -} - -func (m *DaemonEndpoint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonEndpoint) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - return i, nil -} - -func (m *DownwardAPIProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DownwardAPIProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DownwardAPIVolumeFile) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DownwardAPIVolumeFile) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n34, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n35, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - } - if m.Mode != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil -} - -func (m *DownwardAPIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DownwardAPIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil -} - -func (m *EmptyDirVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EmptyDirVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Medium))) - i += copy(dAtA[i:], m.Medium) - if m.SizeLimit != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SizeLimit.Size())) - n36, err := m.SizeLimit.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - } - return i, nil -} - -func (m *EndpointAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EndpointAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if m.TargetRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetRef.Size())) - n37, err := m.TargetRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - if m.NodeName != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.NodeName))) - i += copy(dAtA[i:], *m.NodeName) - } - return i, nil -} - -func (m *EndpointPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EndpointPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - return i, nil -} - -func (m *EndpointSubset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EndpointSubset) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.NotReadyAddresses) > 0 { - for _, msg := range m.NotReadyAddresses { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *Endpoints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Endpoints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n38, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - if len(m.Subsets) > 0 { - for _, msg := range m.Subsets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EndpointsList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EndpointsList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n39, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EnvFromSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EnvFromSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Prefix))) - i += copy(dAtA[i:], m.Prefix) - if m.ConfigMapRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapRef.Size())) - n40, err := m.ConfigMapRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - } - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n41, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - } - return i, nil -} - -func (m *EnvVar) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EnvVar) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - if m.ValueFrom != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ValueFrom.Size())) - n42, err := m.ValueFrom.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - } - return i, nil -} - -func (m *EnvVarSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EnvVarSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.FieldRef != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FieldRef.Size())) - n43, err := m.FieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - } - if m.ResourceFieldRef != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ResourceFieldRef.Size())) - n44, err := m.ResourceFieldRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - } - if m.ConfigMapKeyRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMapKeyRef.Size())) - n45, err := m.ConfigMapKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - } - if m.SecretKeyRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretKeyRef.Size())) - n46, err := m.SecretKeyRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - } - return i, nil -} - -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n47, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InvolvedObject.Size())) - n48, err := m.InvolvedObject.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n49, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FirstTimestamp.Size())) - n50, err := m.FirstTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTimestamp.Size())) - n51, err := m.LastTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n52, err := m.EventTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - if m.Series != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n53, err := m.Series.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 - } - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - if m.Related != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n54, err := m.Related.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 - } - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - return i, nil -} - -func (m *EventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n55, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EventSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n56, err := m.LastObservedTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil -} - -func (m *EventSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Component))) - i += copy(dAtA[i:], m.Component) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil -} - -func (m *ExecAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ExecAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *FCVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FCVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.Lun != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Lun)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *FlexPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FlexPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n57, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *FlexVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FlexVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n58, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n58 - } - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Options) > 0 { - keysForOptions := make([]string, 0, len(m.Options)) - for k := range m.Options { - keysForOptions = append(keysForOptions, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - for _, k := range keysForOptions { - dAtA[i] = 0x2a - i++ - v := m.Options[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *FlockerVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FlockerVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetName))) - i += copy(dAtA[i:], m.DatasetName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DatasetUUID))) - i += copy(dAtA[i:], m.DatasetUUID) - return i, nil -} - -func (m *GCEPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GCEPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PDName))) - i += copy(dAtA[i:], m.PDName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Partition)) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *GitRepoVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GitRepoVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Repository))) - i += copy(dAtA[i:], m.Repository) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Revision))) - i += copy(dAtA[i:], m.Revision) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Directory))) - i += copy(dAtA[i:], m.Directory) - return i, nil -} - -func (m *GlusterfsVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GlusterfsVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.EndpointsName))) - i += copy(dAtA[i:], m.EndpointsName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *HTTPGetAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPGetAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n59, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Scheme))) - i += copy(dAtA[i:], m.Scheme) - if len(m.HTTPHeaders) > 0 { - for _, msg := range m.HTTPHeaders { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HTTPHeader) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPHeader) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil -} - -func (m *Handler) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Handler) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Exec != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Exec.Size())) - n60, err := m.Exec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - } - if m.HTTPGet != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTPGet.Size())) - n61, err := m.HTTPGet.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - } - if m.TCPSocket != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TCPSocket.Size())) - n62, err := m.TCPSocket.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - } - return i, nil -} - -func (m *HostAlias) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostAlias) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *HostPathVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPathVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Type != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Type))) - i += copy(dAtA[i:], *m.Type) - } - return i, nil -} - -func (m *ISCSIPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ISCSIPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n63, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n63 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil -} - -func (m *ISCSIVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ISCSIVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetPortal))) - i += copy(dAtA[i:], m.TargetPortal) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IQN))) - i += copy(dAtA[i:], m.IQN) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Lun)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ISCSIInterface))) - i += copy(dAtA[i:], m.ISCSIInterface) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x30 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Portals) > 0 { - for _, s := range m.Portals { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x40 - i++ - if m.DiscoveryCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n64, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n64 - } - dAtA[i] = 0x58 - i++ - if m.SessionCHAPAuth { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.InitiatorName != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.InitiatorName))) - i += copy(dAtA[i:], *m.InitiatorName) - } - return i, nil -} - -func (m *KeyToPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *KeyToPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.Mode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Mode)) - } - return i, nil -} - -func (m *Lifecycle) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Lifecycle) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PostStart != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PostStart.Size())) - n65, err := m.PostStart.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n65 - } - if m.PreStop != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PreStop.Size())) - n66, err := m.PreStop.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n66 - } - return i, nil -} - -func (m *LimitRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LimitRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n67, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n67 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n68, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n68 - return i, nil -} - -func (m *LimitRangeItem) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LimitRangeItem) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.Max) > 0 { - keysForMax := make([]string, 0, len(m.Max)) - for k := range m.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - for _, k := range keysForMax { - dAtA[i] = 0x12 - i++ - v := m.Max[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n69, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n69 - } - } - if len(m.Min) > 0 { - keysForMin := make([]string, 0, len(m.Min)) - for k := range m.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - for _, k := range keysForMin { - dAtA[i] = 0x1a - i++ - v := m.Min[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n70, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n70 - } - } - if len(m.Default) > 0 { - keysForDefault := make([]string, 0, len(m.Default)) - for k := range m.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - for _, k := range keysForDefault { - dAtA[i] = 0x22 - i++ - v := m.Default[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n71, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n71 - } - } - if len(m.DefaultRequest) > 0 { - keysForDefaultRequest := make([]string, 0, len(m.DefaultRequest)) - for k := range m.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - for _, k := range keysForDefaultRequest { - dAtA[i] = 0x2a - i++ - v := m.DefaultRequest[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n72, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n72 - } - } - if len(m.MaxLimitRequestRatio) > 0 { - keysForMaxLimitRequestRatio := make([]string, 0, len(m.MaxLimitRequestRatio)) - for k := range m.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - for _, k := range keysForMaxLimitRequestRatio { - dAtA[i] = 0x32 - i++ - v := m.MaxLimitRequestRatio[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n73, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n73 - } - } - return i, nil -} - -func (m *LimitRangeList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LimitRangeList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n74, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n74 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LimitRangeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LimitRangeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - for _, msg := range m.Limits { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *List) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *List) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n75, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n75 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LoadBalancerIngress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LoadBalancerIngress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IP))) - i += copy(dAtA[i:], m.IP) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - return i, nil -} - -func (m *LoadBalancerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LoadBalancerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *LocalObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *LocalVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *LocalVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - if m.FSType != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.FSType))) - i += copy(dAtA[i:], *m.FSType) - } - return i, nil -} - -func (m *NFSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NFSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Server))) - i += copy(dAtA[i:], m.Server) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *Namespace) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Namespace) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n76, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n76 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n77, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n77 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n78, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n78 - return i, nil -} - -func (m *NamespaceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n79, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n79 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NamespaceSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *NamespaceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NamespaceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - return i, nil -} - -func (m *Node) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Node) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n80, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n80 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n81, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n81 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n82, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n82 - return i, nil -} - -func (m *NodeAddress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeAddress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Address))) - i += copy(dAtA[i:], m.Address) - return i, nil -} - -func (m *NodeAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RequiredDuringSchedulingIgnoredDuringExecution.Size())) - n83, err := m.RequiredDuringSchedulingIgnoredDuringExecution.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n83 - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastHeartbeatTime.Size())) - n84, err := m.LastHeartbeatTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n84 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n85, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n85 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *NodeConfigSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeConfigSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ConfigMap != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n86, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n86 - } - return i, nil -} - -func (m *NodeConfigStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeConfigStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Assigned != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Assigned.Size())) - n87, err := m.Assigned.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n87 - } - if m.Active != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Active.Size())) - n88, err := m.Active.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n88 - } - if m.LastKnownGood != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastKnownGood.Size())) - n89, err := m.LastKnownGood.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n89 - } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Error))) - i += copy(dAtA[i:], m.Error) - return i, nil -} - -func (m *NodeDaemonEndpoints) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeDaemonEndpoints) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.KubeletEndpoint.Size())) - n90, err := m.KubeletEndpoint.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n90 - return i, nil -} - -func (m *NodeList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n91, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n91 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *NodeResources) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeResources) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n92, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n92 - } - } - return i, nil -} - -func (m *NodeSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, msg := range m.NodeSelectorTerms { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *NodeSelectorTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeSelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.MatchFields) > 0 { - for _, msg := range m.MatchFields { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NodeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodCIDR))) - i += copy(dAtA[i:], m.PodCIDR) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DoNotUse_ExternalID))) - i += copy(dAtA[i:], m.DoNotUse_ExternalID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProviderID))) - i += copy(dAtA[i:], m.ProviderID) - dAtA[i] = 0x20 - i++ - if m.Unschedulable { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.Taints) > 0 { - for _, msg := range m.Taints { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.ConfigSource != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigSource.Size())) - n93, err := m.ConfigSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n93 - } - return i, nil -} - -func (m *NodeStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n94, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n94 - } - } - if len(m.Allocatable) > 0 { - keysForAllocatable := make([]string, 0, len(m.Allocatable)) - for k := range m.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - for _, k := range keysForAllocatable { - dAtA[i] = 0x12 - i++ - v := m.Allocatable[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n95, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n95 - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Addresses) > 0 { - for _, msg := range m.Addresses { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DaemonEndpoints.Size())) - n96, err := m.DaemonEndpoints.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n96 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeInfo.Size())) - n97, err := m.NodeInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n97 - if len(m.Images) > 0 { - for _, msg := range m.Images { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.VolumesAttached) > 0 { - for _, msg := range m.VolumesAttached { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Config != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Config.Size())) - n98, err := m.Config.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n98 - } - return i, nil -} - -func (m *NodeSystemInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeSystemInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MachineID))) - i += copy(dAtA[i:], m.MachineID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SystemUUID))) - i += copy(dAtA[i:], m.SystemUUID) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.BootID))) - i += copy(dAtA[i:], m.BootID) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KernelVersion))) - i += copy(dAtA[i:], m.KernelVersion) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OSImage))) - i += copy(dAtA[i:], m.OSImage) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerRuntimeVersion))) - i += copy(dAtA[i:], m.ContainerRuntimeVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeletVersion))) - i += copy(dAtA[i:], m.KubeletVersion) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.KubeProxyVersion))) - i += copy(dAtA[i:], m.KubeProxyVersion) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.OperatingSystem))) - i += copy(dAtA[i:], m.OperatingSystem) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Architecture))) - i += copy(dAtA[i:], m.Architecture) - return i, nil -} - -func (m *ObjectFieldSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil -} - -func (m *ObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.UID))) - i += copy(dAtA[i:], m.UID) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ResourceVersion))) - i += copy(dAtA[i:], m.ResourceVersion) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FieldPath))) - i += copy(dAtA[i:], m.FieldPath) - return i, nil -} - -func (m *PersistentVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n99, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n99 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n100, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n100 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n101, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n101 - return i, nil -} - -func (m *PersistentVolumeClaim) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeClaim) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n102, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n102 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n103, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n103 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n104, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n104 - return i, nil -} - -func (m *PersistentVolumeClaimCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeClaimCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n105, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n105 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n106, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n106 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *PersistentVolumeClaimList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeClaimList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n107, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n107 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeClaimSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeClaimSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Resources.Size())) - n108, err := m.Resources.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n108 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - if m.Selector != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n109, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n109 - } - if m.StorageClassName != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.StorageClassName))) - i += copy(dAtA[i:], *m.StorageClassName) - } - if m.VolumeMode != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) - } - if m.DataSource != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DataSource.Size())) - n110, err := m.DataSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n110 - } - return i, nil -} - -func (m *PersistentVolumeClaimStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeClaimStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0x1a - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n111, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n111 - } - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeClaimVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeClaimVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClaimName))) - i += copy(dAtA[i:], m.ClaimName) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *PersistentVolumeList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n112, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n112 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.GCEPersistentDisk != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n113, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n113 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n114, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n114 - } - if m.HostPath != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n115, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n115 - } - if m.Glusterfs != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n116, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n116 - } - if m.NFS != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n117, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n117 - } - if m.RBD != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n118, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n118 - } - if m.ISCSI != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n119, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n119 - } - if m.Cinder != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n120, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n120 - } - if m.CephFS != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n121, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n121 - } - if m.FC != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n122, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n122 - } - if m.Flocker != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n123, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n123 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n124, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n124 - } - if m.AzureFile != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n125, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n125 - } - if m.VsphereVolume != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n126, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n126 - } - if m.Quobyte != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n127, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n127 - } - if m.AzureDisk != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n128, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n128 - } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n129, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n129 - } - if m.PortworxVolume != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n130, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n130 - } - if m.ScaleIO != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n131, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n131 - } - if m.Local != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Local.Size())) - n132, err := m.Local.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n132 - } - if m.StorageOS != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n133, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n133 - } - if m.CSI != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CSI.Size())) - n134, err := m.CSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n134 - } - return i, nil -} - -func (m *PersistentVolumeSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Capacity) > 0 { - keysForCapacity := make([]string, 0, len(m.Capacity)) - for k := range m.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - for _, k := range keysForCapacity { - dAtA[i] = 0xa - i++ - v := m.Capacity[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n135, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n135 - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeSource.Size())) - n136, err := m.PersistentVolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n136 - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.ClaimRef != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClaimRef.Size())) - n137, err := m.ClaimRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n137 - } - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PersistentVolumeReclaimPolicy))) - i += copy(dAtA[i:], m.PersistentVolumeReclaimPolicy) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageClassName))) - i += copy(dAtA[i:], m.StorageClassName) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.VolumeMode != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeMode))) - i += copy(dAtA[i:], *m.VolumeMode) - } - if m.NodeAffinity != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodeAffinity.Size())) - n138, err := m.NodeAffinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n138 - } - return i, nil -} - -func (m *PersistentVolumeStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PersistentVolumeStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - return i, nil -} - -func (m *PhotonPersistentDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PhotonPersistentDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PdID))) - i += copy(dAtA[i:], m.PdID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - return i, nil -} - -func (m *Pod) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Pod) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n139, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n139 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n140, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n140 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n141, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n141 - return i, nil -} - -func (m *PodAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodAffinityTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.LabelSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LabelSelector.Size())) - n142, err := m.LabelSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n142 - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TopologyKey))) - i += copy(dAtA[i:], m.TopologyKey) - return i, nil -} - -func (m *PodAntiAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodAntiAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.RequiredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, msg := range m.PreferredDuringSchedulingIgnoredDuringExecution { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodAttachOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodAttachOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - return i, nil -} - -func (m *PodCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastProbeTime.Size())) - n143, err := m.LastProbeTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n143 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n144, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n144 - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *PodDNSConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDNSConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Options) > 0 { - for _, msg := range m.Options { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodDNSConfigOption) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDNSConfigOption) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if m.Value != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Value))) - i += copy(dAtA[i:], *m.Value) - } - return i, nil -} - -func (m *PodExecOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodExecOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Stdin { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x10 - i++ - if m.Stdout { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Stderr { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x20 - i++ - if m.TTY { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - if len(m.Command) > 0 { - for _, s := range m.Command { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *PodList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n145, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n145 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodLogOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodLogOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Container))) - i += copy(dAtA[i:], m.Container) - dAtA[i] = 0x10 - i++ - if m.Follow { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x18 - i++ - if m.Previous { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SinceSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SinceTime.Size())) - n146, err := m.SinceTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n146 - } - dAtA[i] = 0x30 - i++ - if m.Timestamps { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.TailLines != nil { - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.LimitBytes)) - } - return i, nil -} - -func (m *PodPortForwardOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPortForwardOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, num := range m.Ports { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) - } - } - return i, nil -} - -func (m *PodProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *PodReadinessGate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodReadinessGate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ConditionType))) - i += copy(dAtA[i:], m.ConditionType) - return i, nil -} - -func (m *PodSecurityContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.SELinuxOptions != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n147, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n147 - } - if m.RunAsUser != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x18 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.SupplementalGroups) > 0 { - for _, num := range m.SupplementalGroups { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(num)) - } - } - if m.FSGroup != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if len(m.Sysctls) > 0 { - for _, msg := range m.Sysctls { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSignature) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSignature) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodController != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodController.Size())) - n148, err := m.PodController.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n148 - } - return i, nil -} - -func (m *PodSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Containers) > 0 { - for _, msg := range m.Containers { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RestartPolicy))) - i += copy(dAtA[i:], m.RestartPolicy) - if m.TerminationGracePeriodSeconds != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ActiveDeadlineSeconds)) - } - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DNSPolicy))) - i += copy(dAtA[i:], m.DNSPolicy) - if len(m.NodeSelector) > 0 { - keysForNodeSelector := make([]string, 0, len(m.NodeSelector)) - for k := range m.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - for _, k := range keysForNodeSelector { - dAtA[i] = 0x3a - i++ - v := m.NodeSelector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceAccountName))) - i += copy(dAtA[i:], m.ServiceAccountName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DeprecatedServiceAccount))) - i += copy(dAtA[i:], m.DeprecatedServiceAccount) - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - dAtA[i] = 0x58 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x60 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x68 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecurityContext != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecurityContext.Size())) - n149, err := m.SecurityContext.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n149 - } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Hostname))) - i += copy(dAtA[i:], m.Hostname) - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Subdomain))) - i += copy(dAtA[i:], m.Subdomain) - if m.Affinity != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Affinity.Size())) - n150, err := m.Affinity.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n150 - } - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SchedulerName))) - i += copy(dAtA[i:], m.SchedulerName) - if len(m.InitContainers) > 0 { - for _, msg := range m.InitContainers { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0xa8 - i++ - dAtA[i] = 0x1 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.Tolerations) > 0 { - for _, msg := range m.Tolerations { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.HostAliases) > 0 { - for _, msg := range m.HostAliases { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PriorityClassName))) - i += copy(dAtA[i:], m.PriorityClassName) - if m.Priority != nil { - dAtA[i] = 0xc8 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Priority)) - } - if m.DNSConfig != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DNSConfig.Size())) - n151, err := m.DNSConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n151 - } - if m.ShareProcessNamespace != nil { - dAtA[i] = 0xd8 - i++ - dAtA[i] = 0x1 - i++ - if *m.ShareProcessNamespace { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.ReadinessGates) > 0 { - for _, msg := range m.ReadinessGates { - dAtA[i] = 0xe2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.RuntimeClassName != nil { - dAtA[i] = 0xea - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.RuntimeClassName))) - i += copy(dAtA[i:], *m.RuntimeClassName) - } - return i, nil -} - -func (m *PodStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Phase))) - i += copy(dAtA[i:], m.Phase) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.HostIP))) - i += copy(dAtA[i:], m.HostIP) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PodIP))) - i += copy(dAtA[i:], m.PodIP) - if m.StartTime != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StartTime.Size())) - n152, err := m.StartTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n152 - } - if len(m.ContainerStatuses) > 0 { - for _, msg := range m.ContainerStatuses { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.QOSClass))) - i += copy(dAtA[i:], m.QOSClass) - if len(m.InitContainerStatuses) > 0 { - for _, msg := range m.InitContainerStatuses { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NominatedNodeName))) - i += copy(dAtA[i:], m.NominatedNodeName) - return i, nil -} - -func (m *PodStatusResult) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodStatusResult) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n153, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n153 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n154, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n154 - return i, nil -} - -func (m *PodTemplate) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodTemplate) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n155, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n155 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n156, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n156 - return i, nil -} - -func (m *PodTemplateList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodTemplateList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n157, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n157 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodTemplateSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodTemplateSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n158, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n158 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n159, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n159 - return i, nil -} - -func (m *PortworxVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PortworxVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeID))) - i += copy(dAtA[i:], m.VolumeID) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *Preconditions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Preconditions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.UID != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.UID))) - i += copy(dAtA[i:], *m.UID) - } - return i, nil -} - -func (m *PreferAvoidPodsEntry) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PreferAvoidPodsEntry) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSignature.Size())) - n160, err := m.PodSignature.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n160 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EvictionTime.Size())) - n161, err := m.EvictionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n161 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *PreferredSchedulingTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PreferredSchedulingTerm) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Preference.Size())) - n162, err := m.Preference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n162 - return i, nil -} - -func (m *Probe) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Probe) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Handler.Size())) - n163, err := m.Handler.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n163 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.InitialDelaySeconds)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeoutSeconds)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PeriodSeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SuccessThreshold)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FailureThreshold)) - return i, nil -} - -func (m *ProjectedVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ProjectedVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Sources) > 0 { - for _, msg := range m.Sources { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - return i, nil -} - -func (m *QuobyteVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *QuobyteVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Registry))) - i += copy(dAtA[i:], m.Registry) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Volume))) - i += copy(dAtA[i:], m.Volume) - dAtA[i] = 0x18 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Group))) - i += copy(dAtA[i:], m.Group) - return i, nil -} - -func (m *RBDPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RBDPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n164, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n164 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *RBDVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RBDVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDImage))) - i += copy(dAtA[i:], m.RBDImage) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RBDPool))) - i += copy(dAtA[i:], m.RBDPool) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.RadosUser))) - i += copy(dAtA[i:], m.RadosUser) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Keyring))) - i += copy(dAtA[i:], m.Keyring) - if m.SecretRef != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n165, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n165 - } - dAtA[i] = 0x40 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *RangeAllocation) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RangeAllocation) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n166, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n166 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Range))) - i += copy(dAtA[i:], m.Range) - if m.Data != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Data))) - i += copy(dAtA[i:], m.Data) - } - return i, nil -} - -func (m *ReplicationController) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicationController) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n167, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n167 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n168, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n168 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n169, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n169 - return i, nil -} - -func (m *ReplicationControllerCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicationControllerCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n170, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n170 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ReplicationControllerList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicationControllerList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n171, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n171 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicationControllerSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicationControllerSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.Template != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n172, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n172 - } - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil -} - -func (m *ReplicationControllerStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicationControllerStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ResourceFieldSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceFieldSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ContainerName))) - i += copy(dAtA[i:], m.ContainerName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Resource))) - i += copy(dAtA[i:], m.Resource) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Divisor.Size())) - n173, err := m.Divisor.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n173 - return i, nil -} - -func (m *ResourceQuota) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceQuota) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n174, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n174 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n175, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n175 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n176, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n176 - return i, nil -} - -func (m *ResourceQuotaList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceQuotaList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n177, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n177 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ResourceQuotaSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceQuotaSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n178, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n178 - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.ScopeSelector != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScopeSelector.Size())) - n179, err := m.ScopeSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n179 - } - return i, nil -} - -func (m *ResourceQuotaStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceQuotaStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hard) > 0 { - keysForHard := make([]string, 0, len(m.Hard)) - for k := range m.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - for _, k := range keysForHard { - dAtA[i] = 0xa - i++ - v := m.Hard[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n180, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n180 - } - } - if len(m.Used) > 0 { - keysForUsed := make([]string, 0, len(m.Used)) - for k := range m.Used { - keysForUsed = append(keysForUsed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - for _, k := range keysForUsed { - dAtA[i] = 0x12 - i++ - v := m.Used[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n181, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n181 - } - } - return i, nil -} - -func (m *ResourceRequirements) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResourceRequirements) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Limits) > 0 { - keysForLimits := make([]string, 0, len(m.Limits)) - for k := range m.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - for _, k := range keysForLimits { - dAtA[i] = 0xa - i++ - v := m.Limits[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n182, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n182 - } - } - if len(m.Requests) > 0 { - keysForRequests := make([]string, 0, len(m.Requests)) - for k := range m.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - for _, k := range keysForRequests { - dAtA[i] = 0x12 - i++ - v := m.Requests[ResourceName(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n183, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n183 - } - } - return i, nil -} - -func (m *SELinuxOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.User))) - i += copy(dAtA[i:], m.User) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Role))) - i += copy(dAtA[i:], m.Role) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Level))) - i += copy(dAtA[i:], m.Level) - return i, nil -} - -func (m *ScaleIOPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleIOPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n184, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n184 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *ScaleIOVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleIOVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Gateway))) - i += copy(dAtA[i:], m.Gateway) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.System))) - i += copy(dAtA[i:], m.System) - if m.SecretRef != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n185, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n185 - } - dAtA[i] = 0x20 - i++ - if m.SSLEnabled { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ProtectionDomain))) - i += copy(dAtA[i:], m.ProtectionDomain) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePool))) - i += copy(dAtA[i:], m.StoragePool) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StorageMode))) - i += copy(dAtA[i:], m.StorageMode) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x50 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *ScopeSelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopeSelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, msg := range m.MatchExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ScopedResourceSelectorRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScopedResourceSelectorRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ScopeName))) - i += copy(dAtA[i:], m.ScopeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Secret) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Secret) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n186, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n186 - if len(m.Data) > 0 { - keysForData := make([]string, 0, len(m.Data)) - for k := range m.Data { - keysForData = append(keysForData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - for _, k := range keysForData { - dAtA[i] = 0x12 - i++ - v := m.Data[string(k)] - byteSize := 0 - if v != nil { - byteSize = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + byteSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - if v != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.StringData) > 0 { - keysForStringData := make([]string, 0, len(m.StringData)) - for k := range m.StringData { - keysForStringData = append(keysForStringData, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - for _, k := range keysForStringData { - dAtA[i] = 0x22 - i++ - v := m.StringData[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - return i, nil -} - -func (m *SecretEnvSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretEnvSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n187, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n187 - if m.Optional != nil { - dAtA[i] = 0x10 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretKeySelector) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretKeySelector) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n188, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n188 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if m.Optional != nil { - dAtA[i] = 0x18 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n189, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n189 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SecretProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LocalObjectReference.Size())) - n190, err := m.LocalObjectReference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n190 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecretReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func (m *SecretVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecretVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.DefaultMode != nil { - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.DefaultMode)) - } - if m.Optional != nil { - dAtA[i] = 0x20 - i++ - if *m.Optional { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *SecurityContext) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SecurityContext) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Capabilities != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Capabilities.Size())) - n191, err := m.Capabilities.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n191 - } - if m.Privileged != nil { - dAtA[i] = 0x10 - i++ - if *m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.SELinuxOptions != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n192, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n192 - } - if m.RunAsUser != nil { - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - dAtA[i] = 0x28 - i++ - if *m.RunAsNonRoot { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.ReadOnlyRootFilesystem != nil { - dAtA[i] = 0x30 - i++ - if *m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x38 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.RunAsGroup != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ProcMount))) - i += copy(dAtA[i:], *m.ProcMount) - } - return i, nil -} - -func (m *SerializedReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SerializedReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Reference.Size())) - n193, err := m.Reference.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n193 - return i, nil -} - -func (m *Service) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Service) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n194, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n194 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n195, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n195 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n196, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n196 - return i, nil -} - -func (m *ServiceAccount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceAccount) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n197, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n197 - if len(m.Secrets) > 0 { - for _, msg := range m.Secrets { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.ImagePullSecrets) > 0 { - for _, msg := range m.ImagePullSecrets { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AutomountServiceAccountToken != nil { - dAtA[i] = 0x20 - i++ - if *m.AutomountServiceAccountToken { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - return i, nil -} - -func (m *ServiceAccountList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceAccountList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n198, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n198 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ServiceAccountTokenProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceAccountTokenProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Audience))) - i += copy(dAtA[i:], m.Audience) - if m.ExpirationSeconds != nil { - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ExpirationSeconds)) - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n199, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n199 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ServicePort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServicePort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Protocol))) - i += copy(dAtA[i:], m.Protocol) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port)) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetPort.Size())) - n200, err := m.TargetPort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n200 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NodePort)) - return i, nil -} - -func (m *ServiceProxyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceProxyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - return i, nil -} - -func (m *ServiceSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ClusterIP))) - i += copy(dAtA[i:], m.ClusterIP) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SessionAffinity))) - i += copy(dAtA[i:], m.SessionAffinity) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.LoadBalancerIP))) - i += copy(dAtA[i:], m.LoadBalancerIP) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - dAtA[i] = 0x4a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalName))) - i += copy(dAtA[i:], m.ExternalName) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ExternalTrafficPolicy))) - i += copy(dAtA[i:], m.ExternalTrafficPolicy) - dAtA[i] = 0x60 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HealthCheckNodePort)) - dAtA[i] = 0x68 - i++ - if m.PublishNotReadyAddresses { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SessionAffinityConfig != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SessionAffinityConfig.Size())) - n201, err := m.SessionAffinityConfig.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n201 - } - return i, nil -} - -func (m *ServiceStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ServiceStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n202, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n202 - return i, nil -} - -func (m *SessionAffinityConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SessionAffinityConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.ClientIP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ClientIP.Size())) - n203, err := m.ClientIP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n203 - } - return i, nil -} - -func (m *StorageOSPersistentVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageOSPersistentVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n204, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n204 - } - return i, nil -} - -func (m *StorageOSVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageOSVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeName))) - i += copy(dAtA[i:], m.VolumeName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumeNamespace))) - i += copy(dAtA[i:], m.VolumeNamespace) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x20 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.SecretRef != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SecretRef.Size())) - n205, err := m.SecretRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n205 - } - return i, nil -} - -func (m *Sysctl) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Sysctl) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - return i, nil -} - -func (m *TCPSocketAction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TCPSocketAction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n206, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n206 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - return i, nil -} - -func (m *Taint) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Taint) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TimeAdded != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TimeAdded.Size())) - n207, err := m.TimeAdded.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n207 - } - return i, nil -} - -func (m *Toleration) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Toleration) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Operator))) - i += copy(dAtA[i:], m.Operator) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Value))) - i += copy(dAtA[i:], m.Value) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Effect))) - i += copy(dAtA[i:], m.Effect) - if m.TolerationSeconds != nil { - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.TolerationSeconds)) - } - return i, nil -} - -func (m *TopologySelectorLabelRequirement) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TopologySelectorLabelRequirement) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Key))) - i += copy(dAtA[i:], m.Key) - if len(m.Values) > 0 { - for _, s := range m.Values { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *TopologySelectorTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TopologySelectorTerm) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, msg := range m.MatchLabelExpressions { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *TypedLocalObjectReference) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TypedLocalObjectReference) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.APIGroup != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.APIGroup))) - i += copy(dAtA[i:], *m.APIGroup) - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *Volume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Volume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VolumeSource.Size())) - n208, err := m.VolumeSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n208 - return i, nil -} - -func (m *VolumeDevice) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeDevice) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.DevicePath))) - i += copy(dAtA[i:], m.DevicePath) - return i, nil -} - -func (m *VolumeMount) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeMount) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.MountPath))) - i += copy(dAtA[i:], m.MountPath) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SubPath))) - i += copy(dAtA[i:], m.SubPath) - if m.MountPropagation != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.MountPropagation))) - i += copy(dAtA[i:], *m.MountPropagation) - } - return i, nil -} - -func (m *VolumeNodeAffinity) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeNodeAffinity) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Required != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Required.Size())) - n209, err := m.Required.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n209 - } - return i, nil -} - -func (m *VolumeProjection) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeProjection) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Secret != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n210, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n210 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n211, err := m.DownwardAPI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n211 - } - if m.ConfigMap != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n212, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n212 - } - if m.ServiceAccountToken != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServiceAccountToken.Size())) - n213, err := m.ServiceAccountToken.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n213 - } - return i, nil -} - -func (m *VolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HostPath != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HostPath.Size())) - n214, err := m.HostPath.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n214 - } - if m.EmptyDir != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EmptyDir.Size())) - n215, err := m.EmptyDir.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n215 - } - if m.GCEPersistentDisk != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GCEPersistentDisk.Size())) - n216, err := m.GCEPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n216 - } - if m.AWSElasticBlockStore != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AWSElasticBlockStore.Size())) - n217, err := m.AWSElasticBlockStore.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n217 - } - if m.GitRepo != nil { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.GitRepo.Size())) - n218, err := m.GitRepo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n218 - } - if m.Secret != nil { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Secret.Size())) - n219, err := m.Secret.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n219 - } - if m.NFS != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NFS.Size())) - n220, err := m.NFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n220 - } - if m.ISCSI != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ISCSI.Size())) - n221, err := m.ISCSI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n221 - } - if m.Glusterfs != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Glusterfs.Size())) - n222, err := m.Glusterfs.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n222 - } - if m.PersistentVolumeClaim != nil { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PersistentVolumeClaim.Size())) - n223, err := m.PersistentVolumeClaim.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n223 - } - if m.RBD != nil { - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RBD.Size())) - n224, err := m.RBD.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n224 - } - if m.FlexVolume != nil { - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FlexVolume.Size())) - n225, err := m.FlexVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n225 - } - if m.Cinder != nil { - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Cinder.Size())) - n226, err := m.Cinder.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n226 - } - if m.CephFS != nil { - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CephFS.Size())) - n227, err := m.CephFS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n227 - } - if m.Flocker != nil { - dAtA[i] = 0x7a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Flocker.Size())) - n228, err := m.Flocker.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n228 - } - if m.DownwardAPI != nil { - dAtA[i] = 0x82 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DownwardAPI.Size())) - n229, err := m.DownwardAPI.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n229 - } - if m.FC != nil { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FC.Size())) - n230, err := m.FC.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n230 - } - if m.AzureFile != nil { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureFile.Size())) - n231, err := m.AzureFile.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n231 - } - if m.ConfigMap != nil { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ConfigMap.Size())) - n232, err := m.ConfigMap.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n232 - } - if m.VsphereVolume != nil { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.VsphereVolume.Size())) - n233, err := m.VsphereVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n233 - } - if m.Quobyte != nil { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Quobyte.Size())) - n234, err := m.Quobyte.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n234 - } - if m.AzureDisk != nil { - dAtA[i] = 0xb2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AzureDisk.Size())) - n235, err := m.AzureDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n235 - } - if m.PhotonPersistentDisk != nil { - dAtA[i] = 0xba - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PhotonPersistentDisk.Size())) - n236, err := m.PhotonPersistentDisk.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n236 - } - if m.PortworxVolume != nil { - dAtA[i] = 0xc2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PortworxVolume.Size())) - n237, err := m.PortworxVolume.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n237 - } - if m.ScaleIO != nil { - dAtA[i] = 0xca - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ScaleIO.Size())) - n238, err := m.ScaleIO.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n238 - } - if m.Projected != nil { - dAtA[i] = 0xd2 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Projected.Size())) - n239, err := m.Projected.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n239 - } - if m.StorageOS != nil { - dAtA[i] = 0xda - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.StorageOS.Size())) - n240, err := m.StorageOS.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n240 - } - return i, nil -} - -func (m *VsphereVirtualDiskVolumeSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VsphereVirtualDiskVolumeSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.VolumePath))) - i += copy(dAtA[i:], m.VolumePath) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.FSType))) - i += copy(dAtA[i:], m.FSType) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyName))) - i += copy(dAtA[i:], m.StoragePolicyName) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.StoragePolicyID))) - i += copy(dAtA[i:], m.StoragePolicyID) - return i, nil -} - -func (m *WeightedPodAffinityTerm) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *WeightedPodAffinityTerm) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Weight)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodAffinityTerm.Size())) - n241, err := m.PodAffinityTerm.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n241 - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AWSElasticBlockStoreVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *Affinity) Size() (n int) { - var l int - _ = l - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PodAffinity != nil { - l = m.PodAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PodAntiAffinity != nil { - l = m.PodAntiAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AttachedVolume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AvoidPods) Size() (n int) { - var l int - _ = l - if len(m.PreferAvoidPods) > 0 { - for _, e := range m.PreferAvoidPods { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *AzureDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DiskName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DataDiskURI) - n += 1 + l + sovGenerated(uint64(l)) - if m.CachingMode != nil { - l = len(*m.CachingMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadOnly != nil { - n += 2 - } - if m.Kind != nil { - l = len(*m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AzureFilePersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretNamespace != nil { - l = len(*m.SecretNamespace) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *AzureFileVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ShareName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Binding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Target.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CSIPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeHandle) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeAttributes) > 0 { - for k, v := range m.VolumeAttributes { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ControllerPublishSecretRef != nil { - l = m.ControllerPublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeStageSecretRef != nil { - l = m.NodeStageSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodePublishSecretRef != nil { - l = m.NodePublishSecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Capabilities) Size() (n int) { - var l int - _ = l - if len(m.Add) > 0 { - for _, s := range m.Add { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Drop) > 0 { - for _, s := range m.Drop { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CephFSPersistentVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *CephFSVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Monitors) > 0 { - for _, s := range m.Monitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SecretFile) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *CinderPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *CinderVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClientIPConfig) Size() (n int) { - var l int - _ = l - if m.TimeoutSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TimeoutSeconds)) - } - return n -} - -func (m *ComponentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ComponentStatus) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ComponentStatusList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ConfigMap) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.BinaryData) > 0 { - for k, v := range m.BinaryData { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ConfigMapEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *ConfigMapKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *ConfigMapList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ConfigMapNodeConfigSource) Size() (n int) { - var l int - _ = l - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletConfigKey) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ConfigMapProjection) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *ConfigMapVolumeSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *Container) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Args) > 0 { - for _, s := range m.Args { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.WorkingDir) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.LivenessProbe != nil { - l = m.LivenessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ReadinessProbe != nil { - l = m.ReadinessProbe.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Lifecycle != nil { - l = m.Lifecycle.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.TerminationMessagePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImagePullPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 3 - n += 3 - n += 3 - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - l = len(m.TerminationMessagePolicy) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.VolumeDevices) > 0 { - for _, e := range m.VolumeDevices { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ContainerImage) Size() (n int) { - var l int - _ = l - if len(m.Names) > 0 { - for _, s := range m.Names { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.SizeBytes)) - return n -} - -func (m *ContainerPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HostPort)) - n += 1 + sovGenerated(uint64(m.ContainerPort)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerState) Size() (n int) { - var l int - _ = l - if m.Waiting != nil { - l = m.Waiting.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Running != nil { - l = m.Running.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Terminated != nil { - l = m.Terminated.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ContainerStateRunning) Size() (n int) { - var l int - _ = l - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateTerminated) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ExitCode)) - n += 1 + sovGenerated(uint64(m.Signal)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.StartedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FinishedAt.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStateWaiting) Size() (n int) { - var l int - _ = l - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ContainerStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.State.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTerminationState.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 1 + sovGenerated(uint64(m.RestartCount)) - l = len(m.Image) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ImageID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonEndpoint) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Port)) - return n -} - -func (m *DownwardAPIProjection) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DownwardAPIVolumeFile) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) - } - return n -} - -func (m *DownwardAPIVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - return n -} - -func (m *EmptyDirVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Medium) - n += 1 + l + sovGenerated(uint64(l)) - if m.SizeLimit != nil { - l = m.SizeLimit.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *EndpointAddress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if m.TargetRef != nil { - l = m.TargetRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - if m.NodeName != nil { - l = len(*m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *EndpointPort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EndpointSubset) Size() (n int) { - var l int - _ = l - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NotReadyAddresses) > 0 { - for _, e := range m.NotReadyAddresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Endpoints) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subsets) > 0 { - for _, e := range m.Subsets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EndpointsList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EnvFromSource) Size() (n int) { - var l int - _ = l - l = len(m.Prefix) - n += 1 + l + sovGenerated(uint64(l)) - if m.ConfigMapRef != nil { - l = m.ConfigMapRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *EnvVar) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - if m.ValueFrom != nil { - l = m.ValueFrom.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *EnvVarSource) Size() (n int) { - var l int - _ = l - if m.FieldRef != nil { - l = m.FieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ResourceFieldRef != nil { - l = m.ResourceFieldRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMapKeyRef != nil { - l = m.ConfigMapKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.SecretKeyRef != nil { - l = m.SecretKeyRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.InvolvedObject.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Count)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EventList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EventSeries) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *EventSource) Size() (n int) { - var l int - _ = l - l = len(m.Component) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ExecAction) Size() (n int) { - var l int - _ = l - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *FCVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.TargetWWNs) > 0 { - for _, s := range m.TargetWWNs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Lun != nil { - n += 1 + sovGenerated(uint64(*m.Lun)) - } - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.WWIDs) > 0 { - for _, s := range m.WWIDs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *FlexPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *FlexVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if len(m.Options) > 0 { - for k, v := range m.Options { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *FlockerVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.DatasetName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DatasetUUID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GCEPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PDName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Partition)) - n += 2 - return n -} - -func (m *GitRepoVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Repository) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Revision) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Directory) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *GlusterfsVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.EndpointsName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *HTTPGetAction) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Scheme) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.HTTPHeaders) > 0 { - for _, e := range m.HTTPHeaders { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPHeader) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Handler) Size() (n int) { - var l int - _ = l - if m.Exec != nil { - l = m.Exec.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HTTPGet != nil { - l = m.HTTPGet.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.TCPSocket != nil { - l = m.TCPSocket.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *HostAlias) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Hostnames) > 0 { - for _, s := range m.Hostnames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPathVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Type != nil { - l = len(*m.Type) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ISCSIPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ISCSIVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.TargetPortal) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.IQN) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Lun)) - l = len(m.ISCSIInterface) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Portals) > 0 { - for _, s := range m.Portals { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.InitiatorName != nil { - l = len(*m.InitiatorName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *KeyToPath) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.Mode != nil { - n += 1 + sovGenerated(uint64(*m.Mode)) - } - return n -} - -func (m *Lifecycle) Size() (n int) { - var l int - _ = l - if m.PostStart != nil { - l = m.PostStart.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PreStop != nil { - l = m.PreStop.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *LimitRange) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LimitRangeItem) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Max) > 0 { - for k, v := range m.Max { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Min) > 0 { - for k, v := range m.Min { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Default) > 0 { - for k, v := range m.Default { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.DefaultRequest) > 0 { - for k, v := range m.DefaultRequest { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.MaxLimitRequestRatio) > 0 { - for k, v := range m.MaxLimitRequestRatio { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *LimitRangeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LimitRangeSpec) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for _, e := range m.Limits { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *List) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LoadBalancerIngress) Size() (n int) { - var l int - _ = l - l = len(m.IP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Hostname) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LoadBalancerStatus) Size() (n int) { - var l int - _ = l - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *LocalObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *LocalVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - if m.FSType != nil { - l = len(*m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NFSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Server) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Namespace) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NamespaceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamespaceSpec) Size() (n int) { - var l int - _ = l - if len(m.Finalizers) > 0 { - for _, s := range m.Finalizers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NamespaceStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Node) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeAddress) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Address) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeAffinity) Size() (n int) { - var l int - _ = l - if m.RequiredDuringSchedulingIgnoredDuringExecution != nil { - l = m.RequiredDuringSchedulingIgnoredDuringExecution.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastHeartbeatTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeConfigSource) Size() (n int) { - var l int - _ = l - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NodeConfigStatus) Size() (n int) { - var l int - _ = l - if m.Assigned != nil { - l = m.Assigned.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Active != nil { - l = m.Active.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.LastKnownGood != nil { - l = m.LastKnownGood.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Error) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeDaemonEndpoints) Size() (n int) { - var l int - _ = l - l = m.KubeletEndpoint.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NodeResources) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *NodeSelector) Size() (n int) { - var l int - _ = l - if len(m.NodeSelectorTerms) > 0 { - for _, e := range m.NodeSelectorTerms { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.MatchFields) > 0 { - for _, e := range m.MatchFields { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NodeSpec) Size() (n int) { - var l int - _ = l - l = len(m.PodCIDR) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DoNotUse_ExternalID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ProviderID) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if len(m.Taints) > 0 { - for _, e := range m.Taints { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ConfigSource != nil { - l = m.ConfigSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NodeStatus) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Allocatable) > 0 { - for k, v := range m.Allocatable { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Addresses) > 0 { - for _, e := range m.Addresses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.DaemonEndpoints.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.NodeInfo.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Images) > 0 { - for _, e := range m.Images { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumesInUse) > 0 { - for _, s := range m.VolumesInUse { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumesAttached) > 0 { - for _, e := range m.VolumesAttached { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Config != nil { - l = m.Config.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NodeSystemInfo) Size() (n int) { - var l int - _ = l - l = len(m.MachineID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SystemUUID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.BootID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KernelVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OSImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ContainerRuntimeVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeletVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.KubeProxyVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.OperatingSystem) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Architecture) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ObjectReference) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.UID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ResourceVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FieldPath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolume) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolumeClaim) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolumeClaimCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PersistentVolumeClaimList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PersistentVolumeClaimSpec) Size() (n int) { - var l int - _ = l - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.Resources.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.StorageClassName != nil { - l = len(*m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DataSource != nil { - l = m.DataSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PersistentVolumeClaimStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PersistentVolumeClaimVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.ClaimName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *PersistentVolumeList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PersistentVolumeSource) Size() (n int) { - var l int - _ = l - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Local != nil { - l = m.Local.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.CSI != nil { - l = m.CSI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PersistentVolumeSpec) Size() (n int) { - var l int - _ = l - if len(m.Capacity) > 0 { - for k, v := range m.Capacity { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.PersistentVolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.AccessModes) > 0 { - for _, s := range m.AccessModes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ClaimRef != nil { - l = m.ClaimRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.PersistentVolumeReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageClassName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.VolumeMode != nil { - l = len(*m.VolumeMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NodeAffinity != nil { - l = m.NodeAffinity.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PersistentVolumeStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PhotonPersistentDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.PdID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Pod) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodAffinity) Size() (n int) { - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodAffinityTerm) Size() (n int) { - var l int - _ = l - if m.LabelSelector != nil { - l = m.LabelSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.Namespaces) > 0 { - for _, s := range m.Namespaces { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.TopologyKey) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodAntiAffinity) Size() (n int) { - var l int - _ = l - if len(m.RequiredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.RequiredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PreferredDuringSchedulingIgnoredDuringExecution) > 0 { - for _, e := range m.PreferredDuringSchedulingIgnoredDuringExecution { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodAttachOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastProbeTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDNSConfig) Size() (n int) { - var l int - _ = l - if len(m.Nameservers) > 0 { - for _, s := range m.Nameservers { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Searches) > 0 { - for _, s := range m.Searches { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Options) > 0 { - for _, e := range m.Options { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDNSConfigOption) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if m.Value != nil { - l = len(*m.Value) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodExecOptions) Size() (n int) { - var l int - _ = l - n += 2 - n += 2 - n += 2 - n += 2 - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Command) > 0 { - for _, s := range m.Command { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodLogOptions) Size() (n int) { - var l int - _ = l - l = len(m.Container) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - if m.SinceSeconds != nil { - n += 1 + sovGenerated(uint64(*m.SinceSeconds)) - } - if m.SinceTime != nil { - l = m.SinceTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - if m.TailLines != nil { - n += 1 + sovGenerated(uint64(*m.TailLines)) - } - if m.LimitBytes != nil { - n += 1 + sovGenerated(uint64(*m.LimitBytes)) - } - return n -} - -func (m *PodPortForwardOptions) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - n += 1 + sovGenerated(uint64(e)) - } - } - return n -} - -func (m *PodProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodReadinessGate) Size() (n int) { - var l int - _ = l - l = len(m.ConditionType) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityContext) Size() (n int) { - var l int - _ = l - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if len(m.SupplementalGroups) > 0 { - for _, e := range m.SupplementalGroups { - n += 1 + sovGenerated(uint64(e)) - } - } - if m.FSGroup != nil { - n += 1 + sovGenerated(uint64(*m.FSGroup)) - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if len(m.Sysctls) > 0 { - for _, e := range m.Sysctls { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSignature) Size() (n int) { - var l int - _ = l - if m.PodController != nil { - l = m.PodController.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodSpec) Size() (n int) { - var l int - _ = l - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Containers) > 0 { - for _, e := range m.Containers { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RestartPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if m.TerminationGracePeriodSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TerminationGracePeriodSeconds)) - } - if m.ActiveDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ActiveDeadlineSeconds)) - } - l = len(m.DNSPolicy) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.NodeSelector) > 0 { - for k, v := range m.NodeSelector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ServiceAccountName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DeprecatedServiceAccount) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - n += 2 - n += 2 - if m.SecurityContext != nil { - l = m.SecurityContext.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Hostname) - n += 2 + l + sovGenerated(uint64(l)) - l = len(m.Subdomain) - n += 2 + l + sovGenerated(uint64(l)) - if m.Affinity != nil { - l = m.Affinity.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - l = len(m.SchedulerName) - n += 2 + l + sovGenerated(uint64(l)) - if len(m.InitContainers) > 0 { - for _, e := range m.InitContainers { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.AutomountServiceAccountToken != nil { - n += 3 - } - if len(m.Tolerations) > 0 { - for _, e := range m.Tolerations { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.HostAliases) > 0 { - for _, e := range m.HostAliases { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - l = len(m.PriorityClassName) - n += 2 + l + sovGenerated(uint64(l)) - if m.Priority != nil { - n += 2 + sovGenerated(uint64(*m.Priority)) - } - if m.DNSConfig != nil { - l = m.DNSConfig.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ShareProcessNamespace != nil { - n += 3 - } - if len(m.ReadinessGates) > 0 { - for _, e := range m.ReadinessGates { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if m.RuntimeClassName != nil { - l = len(*m.RuntimeClassName) - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodStatus) Size() (n int) { - var l int - _ = l - l = len(m.Phase) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.HostIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.PodIP) - n += 1 + l + sovGenerated(uint64(l)) - if m.StartTime != nil { - l = m.StartTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.ContainerStatuses) > 0 { - for _, e := range m.ContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.QOSClass) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.InitContainerStatuses) > 0 { - for _, e := range m.InitContainerStatuses { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.NominatedNodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodStatusResult) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodTemplate) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodTemplateList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodTemplateSpec) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PortworxVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeID) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Preconditions) Size() (n int) { - var l int - _ = l - if m.UID != nil { - l = len(*m.UID) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PreferAvoidPodsEntry) Size() (n int) { - var l int - _ = l - l = m.PodSignature.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EvictionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PreferredSchedulingTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.Preference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Probe) Size() (n int) { - var l int - _ = l - l = m.Handler.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.InitialDelaySeconds)) - n += 1 + sovGenerated(uint64(m.TimeoutSeconds)) - n += 1 + sovGenerated(uint64(m.PeriodSeconds)) - n += 1 + sovGenerated(uint64(m.SuccessThreshold)) - n += 1 + sovGenerated(uint64(m.FailureThreshold)) - return n -} - -func (m *ProjectedVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.Sources) > 0 { - for _, e := range m.Sources { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - return n -} - -func (m *QuobyteVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Registry) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Volume) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Group) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RBDPersistentVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *RBDVolumeSource) Size() (n int) { - var l int - _ = l - if len(m.CephMonitors) > 0 { - for _, s := range m.CephMonitors { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.RBDImage) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RBDPool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.RadosUser) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Keyring) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - return n -} - -func (m *RangeAllocation) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Range) - n += 1 + l + sovGenerated(uint64(l)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ReplicationController) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicationControllerCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicationControllerList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicationControllerSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.Template != nil { - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n -} - -func (m *ReplicationControllerStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceFieldSelector) Size() (n int) { - var l int - _ = l - l = len(m.ContainerName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Resource) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Divisor.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceQuota) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ResourceQuotaList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ResourceQuotaSpec) Size() (n int) { - var l int - _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Scopes) > 0 { - for _, s := range m.Scopes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.ScopeSelector != nil { - l = m.ScopeSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ResourceQuotaStatus) Size() (n int) { - var l int - _ = l - if len(m.Hard) > 0 { - for k, v := range m.Hard { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Used) > 0 { - for k, v := range m.Used { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *ResourceRequirements) Size() (n int) { - var l int - _ = l - if len(m.Limits) > 0 { - for k, v := range m.Limits { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if len(m.Requests) > 0 { - for k, v := range m.Requests { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SELinuxOptions) Size() (n int) { - var l int - _ = l - l = len(m.User) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Role) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Level) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleIOPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ScaleIOVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.Gateway) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.System) - n += 1 + l + sovGenerated(uint64(l)) - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - n += 2 - l = len(m.ProtectionDomain) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePool) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StorageMode) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *ScopeSelector) Size() (n int) { - var l int - _ = l - if len(m.MatchExpressions) > 0 { - for _, e := range m.MatchExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ScopedResourceSelectorRequirement) Size() (n int) { - var l int - _ = l - l = len(m.ScopeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Secret) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Data) > 0 { - for k, v := range m.Data { - _ = k - _ = v - l = 0 - if v != nil { - l = 1 + len(v) + sovGenerated(uint64(len(v))) - } - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + l - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.StringData) > 0 { - for k, v := range m.StringData { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - return n -} - -func (m *SecretEnvSource) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecretKeySelector) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecretList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SecretProjection) Size() (n int) { - var l int - _ = l - l = m.LocalObjectReference.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecretReference) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SecretVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.DefaultMode != nil { - n += 1 + sovGenerated(uint64(*m.DefaultMode)) - } - if m.Optional != nil { - n += 2 - } - return n -} - -func (m *SecurityContext) Size() (n int) { - var l int - _ = l - if m.Capabilities != nil { - l = m.Capabilities.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Privileged != nil { - n += 2 - } - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RunAsUser != nil { - n += 1 + sovGenerated(uint64(*m.RunAsUser)) - } - if m.RunAsNonRoot != nil { - n += 2 - } - if m.ReadOnlyRootFilesystem != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 2 - } - if m.RunAsGroup != nil { - n += 1 + sovGenerated(uint64(*m.RunAsGroup)) - } - if m.ProcMount != nil { - l = len(*m.ProcMount) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SerializedReference) Size() (n int) { - var l int - _ = l - l = m.Reference.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Service) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceAccount) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Secrets) > 0 { - for _, e := range m.Secrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ImagePullSecrets) > 0 { - for _, e := range m.ImagePullSecrets { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AutomountServiceAccountToken != nil { - n += 2 - } - return n -} - -func (m *ServiceAccountList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServiceAccountTokenProjection) Size() (n int) { - var l int - _ = l - l = len(m.Audience) - n += 1 + l + sovGenerated(uint64(l)) - if m.ExpirationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ExpirationSeconds)) - } - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ServicePort) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Port)) - l = m.TargetPort.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.NodePort)) - return n -} - -func (m *ServiceProxyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ServiceSpec) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.ClusterIP) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.ExternalIPs) > 0 { - for _, s := range m.ExternalIPs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SessionAffinity) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.LoadBalancerIP) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.LoadBalancerSourceRanges) > 0 { - for _, s := range m.LoadBalancerSourceRanges { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.ExternalName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ExternalTrafficPolicy) - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.HealthCheckNodePort)) - n += 2 - if m.SessionAffinityConfig != nil { - l = m.SessionAffinityConfig.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ServiceStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SessionAffinityConfig) Size() (n int) { - var l int - _ = l - if m.ClientIP != nil { - l = m.ClientIP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *StorageOSPersistentVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *StorageOSVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumeName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.VolumeNamespace) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.SecretRef != nil { - l = m.SecretRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Sysctl) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *TCPSocketAction) Size() (n int) { - var l int - _ = l - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Taint) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TimeAdded != nil { - l = m.TimeAdded.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Toleration) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Operator) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Value) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Effect) - n += 1 + l + sovGenerated(uint64(l)) - if m.TolerationSeconds != nil { - n += 1 + sovGenerated(uint64(*m.TolerationSeconds)) - } - return n -} - -func (m *TopologySelectorLabelRequirement) Size() (n int) { - var l int - _ = l - l = len(m.Key) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Values) > 0 { - for _, s := range m.Values { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TopologySelectorTerm) Size() (n int) { - var l int - _ = l - if len(m.MatchLabelExpressions) > 0 { - for _, e := range m.MatchLabelExpressions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TypedLocalObjectReference) Size() (n int) { - var l int - _ = l - if m.APIGroup != nil { - l = len(*m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Volume) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.VolumeSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeDevice) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.DevicePath) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeMount) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - l = len(m.MountPath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.SubPath) - n += 1 + l + sovGenerated(uint64(l)) - if m.MountPropagation != nil { - l = len(*m.MountPropagation) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeNodeAffinity) Size() (n int) { - var l int - _ = l - if m.Required != nil { - l = m.Required.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeProjection) Size() (n int) { - var l int - _ = l - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ServiceAccountToken != nil { - l = m.ServiceAccountToken.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeSource) Size() (n int) { - var l int - _ = l - if m.HostPath != nil { - l = m.HostPath.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.EmptyDir != nil { - l = m.EmptyDir.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GCEPersistentDisk != nil { - l = m.GCEPersistentDisk.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.AWSElasticBlockStore != nil { - l = m.AWSElasticBlockStore.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.GitRepo != nil { - l = m.GitRepo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Secret != nil { - l = m.Secret.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NFS != nil { - l = m.NFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ISCSI != nil { - l = m.ISCSI.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Glusterfs != nil { - l = m.Glusterfs.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.PersistentVolumeClaim != nil { - l = m.PersistentVolumeClaim.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.RBD != nil { - l = m.RBD.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.FlexVolume != nil { - l = m.FlexVolume.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Cinder != nil { - l = m.Cinder.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.CephFS != nil { - l = m.CephFS.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Flocker != nil { - l = m.Flocker.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DownwardAPI != nil { - l = m.DownwardAPI.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.FC != nil { - l = m.FC.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.AzureFile != nil { - l = m.AzureFile.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ConfigMap != nil { - l = m.ConfigMap.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.VsphereVolume != nil { - l = m.VsphereVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Quobyte != nil { - l = m.Quobyte.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.AzureDisk != nil { - l = m.AzureDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PhotonPersistentDisk != nil { - l = m.PhotonPersistentDisk.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.PortworxVolume != nil { - l = m.PortworxVolume.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.ScaleIO != nil { - l = m.ScaleIO.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.Projected != nil { - l = m.Projected.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - if m.StorageOS != nil { - l = m.StorageOS.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VsphereVirtualDiskVolumeSource) Size() (n int) { - var l int - _ = l - l = len(m.VolumePath) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.FSType) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyName) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.StoragePolicyID) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *WeightedPodAffinityTerm) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Weight)) - l = m.PodAffinityTerm.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AWSElasticBlockStoreVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AWSElasticBlockStoreVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Affinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Affinity{`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "NodeAffinity", "NodeAffinity", 1) + `,`, - `PodAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAffinity), "PodAffinity", "PodAffinity", 1) + `,`, - `PodAntiAffinity:` + strings.Replace(fmt.Sprintf("%v", this.PodAntiAffinity), "PodAntiAffinity", "PodAntiAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *AttachedVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AttachedVolume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *AvoidPods) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AvoidPods{`, - `PreferAvoidPods:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferAvoidPods), "PreferAvoidPodsEntry", "PreferAvoidPodsEntry", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *AzureDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureDiskVolumeSource{`, - `DiskName:` + fmt.Sprintf("%v", this.DiskName) + `,`, - `DataDiskURI:` + fmt.Sprintf("%v", this.DataDiskURI) + `,`, - `CachingMode:` + valueToStringGenerated(this.CachingMode) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `ReadOnly:` + valueToStringGenerated(this.ReadOnly) + `,`, - `Kind:` + valueToStringGenerated(this.Kind) + `,`, - `}`, - }, "") - return s -} -func (this *AzureFilePersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureFilePersistentVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretNamespace:` + valueToStringGenerated(this.SecretNamespace) + `,`, - `}`, - }, "") - return s -} -func (this *AzureFileVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AzureFileVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `ShareName:` + fmt.Sprintf("%v", this.ShareName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Binding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Binding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Target:` + strings.Replace(strings.Replace(this.Target.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForVolumeAttributes := make([]string, 0, len(this.VolumeAttributes)) - for k := range this.VolumeAttributes { - keysForVolumeAttributes = append(keysForVolumeAttributes, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForVolumeAttributes) - mapStringForVolumeAttributes := "map[string]string{" - for _, k := range keysForVolumeAttributes { - mapStringForVolumeAttributes += fmt.Sprintf("%v: %v,", k, this.VolumeAttributes[k]) - } - mapStringForVolumeAttributes += "}" - s := strings.Join([]string{`&CSIPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `VolumeHandle:` + fmt.Sprintf("%v", this.VolumeHandle) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `VolumeAttributes:` + mapStringForVolumeAttributes + `,`, - `ControllerPublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.ControllerPublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodeStageSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodeStageSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `NodePublishSecretRef:` + strings.Replace(fmt.Sprintf("%v", this.NodePublishSecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Capabilities) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Capabilities{`, - `Add:` + fmt.Sprintf("%v", this.Add) + `,`, - `Drop:` + fmt.Sprintf("%v", this.Drop) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CephFSPersistentVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CephFSVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CephFSVolumeSource{`, - `Monitors:` + fmt.Sprintf("%v", this.Monitors) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `SecretFile:` + fmt.Sprintf("%v", this.SecretFile) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CinderPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CinderPersistentVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *CinderVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CinderVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClientIPConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClientIPConfig{`, - `TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ComponentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ComponentStatus{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ComponentCondition", "ComponentCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ComponentStatusList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ComponentStatusList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ComponentStatus", "ComponentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMap) String() string { - if this == nil { - return "nil" - } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string]string{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) - } - mapStringForData += "}" - keysForBinaryData := make([]string, 0, len(this.BinaryData)) - for k := range this.BinaryData { - keysForBinaryData = append(keysForBinaryData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForBinaryData) - mapStringForBinaryData := "map[string][]byte{" - for _, k := range keysForBinaryData { - mapStringForBinaryData += fmt.Sprintf("%v: %v,", k, this.BinaryData[k]) - } - mapStringForBinaryData += "}" - s := strings.Join([]string{`&ConfigMap{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `BinaryData:` + mapStringForBinaryData + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapEnvSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapKeySelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ConfigMap", "ConfigMap", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapNodeConfigSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapNodeConfigSource{`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `KubeletConfigKey:` + fmt.Sprintf("%v", this.KubeletConfigKey) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *ConfigMapVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ConfigMapVolumeSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *Container) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Container{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `Args:` + fmt.Sprintf("%v", this.Args) + `,`, - `WorkingDir:` + fmt.Sprintf("%v", this.WorkingDir) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ContainerPort", "ContainerPort", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "EnvVar", 1), `&`, ``, 1) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "VolumeMount", 1), `&`, ``, 1) + `,`, - `LivenessProbe:` + strings.Replace(fmt.Sprintf("%v", this.LivenessProbe), "Probe", "Probe", 1) + `,`, - `ReadinessProbe:` + strings.Replace(fmt.Sprintf("%v", this.ReadinessProbe), "Probe", "Probe", 1) + `,`, - `Lifecycle:` + strings.Replace(fmt.Sprintf("%v", this.Lifecycle), "Lifecycle", "Lifecycle", 1) + `,`, - `TerminationMessagePath:` + fmt.Sprintf("%v", this.TerminationMessagePath) + `,`, - `ImagePullPolicy:` + fmt.Sprintf("%v", this.ImagePullPolicy) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "SecurityContext", "SecurityContext", 1) + `,`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `StdinOnce:` + fmt.Sprintf("%v", this.StdinOnce) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "EnvFromSource", 1), `&`, ``, 1) + `,`, - `TerminationMessagePolicy:` + fmt.Sprintf("%v", this.TerminationMessagePolicy) + `,`, - `VolumeDevices:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeDevices), "VolumeDevice", "VolumeDevice", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerImage) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerImage{`, - `Names:` + fmt.Sprintf("%v", this.Names) + `,`, - `SizeBytes:` + fmt.Sprintf("%v", this.SizeBytes) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `HostPort:` + fmt.Sprintf("%v", this.HostPort) + `,`, - `ContainerPort:` + fmt.Sprintf("%v", this.ContainerPort) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerState) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerState{`, - `Waiting:` + strings.Replace(fmt.Sprintf("%v", this.Waiting), "ContainerStateWaiting", "ContainerStateWaiting", 1) + `,`, - `Running:` + strings.Replace(fmt.Sprintf("%v", this.Running), "ContainerStateRunning", "ContainerStateRunning", 1) + `,`, - `Terminated:` + strings.Replace(fmt.Sprintf("%v", this.Terminated), "ContainerStateTerminated", "ContainerStateTerminated", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateRunning) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStateRunning{`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateTerminated) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStateTerminated{`, - `ExitCode:` + fmt.Sprintf("%v", this.ExitCode) + `,`, - `Signal:` + fmt.Sprintf("%v", this.Signal) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `StartedAt:` + strings.Replace(strings.Replace(this.StartedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `FinishedAt:` + strings.Replace(strings.Replace(this.FinishedAt.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStateWaiting) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStateWaiting{`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ContainerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ContainerStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `State:` + strings.Replace(strings.Replace(this.State.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `LastTerminationState:` + strings.Replace(strings.Replace(this.LastTerminationState.String(), "ContainerState", "ContainerState", 1), `&`, ``, 1) + `,`, - `Ready:` + fmt.Sprintf("%v", this.Ready) + `,`, - `RestartCount:` + fmt.Sprintf("%v", this.RestartCount) + `,`, - `Image:` + fmt.Sprintf("%v", this.Image) + `,`, - `ImageID:` + fmt.Sprintf("%v", this.ImageID) + `,`, - `ContainerID:` + fmt.Sprintf("%v", this.ContainerID) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonEndpoint) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonEndpoint{`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DownwardAPIProjection{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeFile) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DownwardAPIVolumeFile{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *DownwardAPIVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DownwardAPIVolumeSource{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DownwardAPIVolumeFile", "DownwardAPIVolumeFile", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *EmptyDirVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EmptyDirVolumeSource{`, - `Medium:` + fmt.Sprintf("%v", this.Medium) + `,`, - `SizeLimit:` + strings.Replace(fmt.Sprintf("%v", this.SizeLimit), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointAddress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EndpointAddress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `TargetRef:` + strings.Replace(fmt.Sprintf("%v", this.TargetRef), "ObjectReference", "ObjectReference", 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `NodeName:` + valueToStringGenerated(this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EndpointPort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointSubset) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EndpointSubset{`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `NotReadyAddresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NotReadyAddresses), "EndpointAddress", "EndpointAddress", 1), `&`, ``, 1) + `,`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "EndpointPort", "EndpointPort", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Endpoints) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Endpoints{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subsets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subsets), "EndpointSubset", "EndpointSubset", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EndpointsList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EndpointsList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Endpoints", "Endpoints", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvFromSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EnvFromSource{`, - `Prefix:` + fmt.Sprintf("%v", this.Prefix) + `,`, - `ConfigMapRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapRef), "ConfigMapEnvSource", "ConfigMapEnvSource", 1) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretEnvSource", "SecretEnvSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVar) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EnvVar{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `ValueFrom:` + strings.Replace(fmt.Sprintf("%v", this.ValueFrom), "EnvVarSource", "EnvVarSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *EnvVarSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EnvVarSource{`, - `FieldRef:` + strings.Replace(fmt.Sprintf("%v", this.FieldRef), "ObjectFieldSelector", "ObjectFieldSelector", 1) + `,`, - `ResourceFieldRef:` + strings.Replace(fmt.Sprintf("%v", this.ResourceFieldRef), "ResourceFieldSelector", "ResourceFieldSelector", 1) + `,`, - `ConfigMapKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMapKeyRef), "ConfigMapKeySelector", "ConfigMapKeySelector", 1) + `,`, - `SecretKeyRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretKeyRef), "SecretKeySelector", "SecretKeySelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Event) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `InvolvedObject:` + strings.Replace(strings.Replace(this.InvolvedObject.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "EventSource", "EventSource", 1), `&`, ``, 1) + `,`, - `FirstTimestamp:` + strings.Replace(strings.Replace(this.FirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTimestamp:` + strings.Replace(strings.Replace(this.LastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "ObjectReference", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `}`, - }, "") - return s -} -func (this *EventList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSeries) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s -} -func (this *EventSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventSource{`, - `Component:` + fmt.Sprintf("%v", this.Component) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *ExecAction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ExecAction{`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *FCVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FCVolumeSource{`, - `TargetWWNs:` + fmt.Sprintf("%v", this.TargetWWNs) + `,`, - `Lun:` + valueToStringGenerated(this.Lun) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `WWIDs:` + fmt.Sprintf("%v", this.WWIDs) + `,`, - `}`, - }, "") - return s -} -func (this *FlexPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) - } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexPersistentVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlexVolumeSource) String() string { - if this == nil { - return "nil" - } - keysForOptions := make([]string, 0, len(this.Options)) - for k := range this.Options { - keysForOptions = append(keysForOptions, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForOptions) - mapStringForOptions := "map[string]string{" - for _, k := range keysForOptions { - mapStringForOptions += fmt.Sprintf("%v: %v,", k, this.Options[k]) - } - mapStringForOptions += "}" - s := strings.Join([]string{`&FlexVolumeSource{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Options:` + mapStringForOptions + `,`, - `}`, - }, "") - return s -} -func (this *FlockerVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FlockerVolumeSource{`, - `DatasetName:` + fmt.Sprintf("%v", this.DatasetName) + `,`, - `DatasetUUID:` + fmt.Sprintf("%v", this.DatasetUUID) + `,`, - `}`, - }, "") - return s -} -func (this *GCEPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GCEPersistentDiskVolumeSource{`, - `PDName:` + fmt.Sprintf("%v", this.PDName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `Partition:` + fmt.Sprintf("%v", this.Partition) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *GitRepoVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GitRepoVolumeSource{`, - `Repository:` + fmt.Sprintf("%v", this.Repository) + `,`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `Directory:` + fmt.Sprintf("%v", this.Directory) + `,`, - `}`, - }, "") - return s -} -func (this *GlusterfsVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&GlusterfsVolumeSource{`, - `EndpointsName:` + fmt.Sprintf("%v", this.EndpointsName) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPGetAction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPGetAction{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `Scheme:` + fmt.Sprintf("%v", this.Scheme) + `,`, - `HTTPHeaders:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPHeaders), "HTTPHeader", "HTTPHeader", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPHeader) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPHeader{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *Handler) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Handler{`, - `Exec:` + strings.Replace(fmt.Sprintf("%v", this.Exec), "ExecAction", "ExecAction", 1) + `,`, - `HTTPGet:` + strings.Replace(fmt.Sprintf("%v", this.HTTPGet), "HTTPGetAction", "HTTPGetAction", 1) + `,`, - `TCPSocket:` + strings.Replace(fmt.Sprintf("%v", this.TCPSocket), "TCPSocketAction", "TCPSocketAction", 1) + `,`, - `}`, - }, "") - return s -} -func (this *HostAlias) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostAlias{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostnames:` + fmt.Sprintf("%v", this.Hostnames) + `,`, - `}`, - }, "") - return s -} -func (this *HostPathVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPathVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Type:` + valueToStringGenerated(this.Type) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ISCSIPersistentVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s -} -func (this *ISCSIVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ISCSIVolumeSource{`, - `TargetPortal:` + fmt.Sprintf("%v", this.TargetPortal) + `,`, - `IQN:` + fmt.Sprintf("%v", this.IQN) + `,`, - `Lun:` + fmt.Sprintf("%v", this.Lun) + `,`, - `ISCSIInterface:` + fmt.Sprintf("%v", this.ISCSIInterface) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `Portals:` + fmt.Sprintf("%v", this.Portals) + `,`, - `DiscoveryCHAPAuth:` + fmt.Sprintf("%v", this.DiscoveryCHAPAuth) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SessionCHAPAuth:` + fmt.Sprintf("%v", this.SessionCHAPAuth) + `,`, - `InitiatorName:` + valueToStringGenerated(this.InitiatorName) + `,`, - `}`, - }, "") - return s -} -func (this *KeyToPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&KeyToPath{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Mode:` + valueToStringGenerated(this.Mode) + `,`, - `}`, - }, "") - return s -} -func (this *Lifecycle) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Lifecycle{`, - `PostStart:` + strings.Replace(fmt.Sprintf("%v", this.PostStart), "Handler", "Handler", 1) + `,`, - `PreStop:` + strings.Replace(fmt.Sprintf("%v", this.PreStop), "Handler", "Handler", 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LimitRange{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "LimitRangeSpec", "LimitRangeSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeItem) String() string { - if this == nil { - return "nil" - } - keysForMax := make([]string, 0, len(this.Max)) - for k := range this.Max { - keysForMax = append(keysForMax, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMax) - mapStringForMax := "ResourceList{" - for _, k := range keysForMax { - mapStringForMax += fmt.Sprintf("%v: %v,", k, this.Max[ResourceName(k)]) - } - mapStringForMax += "}" - keysForMin := make([]string, 0, len(this.Min)) - for k := range this.Min { - keysForMin = append(keysForMin, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMin) - mapStringForMin := "ResourceList{" - for _, k := range keysForMin { - mapStringForMin += fmt.Sprintf("%v: %v,", k, this.Min[ResourceName(k)]) - } - mapStringForMin += "}" - keysForDefault := make([]string, 0, len(this.Default)) - for k := range this.Default { - keysForDefault = append(keysForDefault, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefault) - mapStringForDefault := "ResourceList{" - for _, k := range keysForDefault { - mapStringForDefault += fmt.Sprintf("%v: %v,", k, this.Default[ResourceName(k)]) - } - mapStringForDefault += "}" - keysForDefaultRequest := make([]string, 0, len(this.DefaultRequest)) - for k := range this.DefaultRequest { - keysForDefaultRequest = append(keysForDefaultRequest, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDefaultRequest) - mapStringForDefaultRequest := "ResourceList{" - for _, k := range keysForDefaultRequest { - mapStringForDefaultRequest += fmt.Sprintf("%v: %v,", k, this.DefaultRequest[ResourceName(k)]) - } - mapStringForDefaultRequest += "}" - keysForMaxLimitRequestRatio := make([]string, 0, len(this.MaxLimitRequestRatio)) - for k := range this.MaxLimitRequestRatio { - keysForMaxLimitRequestRatio = append(keysForMaxLimitRequestRatio, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForMaxLimitRequestRatio) - mapStringForMaxLimitRequestRatio := "ResourceList{" - for _, k := range keysForMaxLimitRequestRatio { - mapStringForMaxLimitRequestRatio += fmt.Sprintf("%v: %v,", k, this.MaxLimitRequestRatio[ResourceName(k)]) - } - mapStringForMaxLimitRequestRatio += "}" - s := strings.Join([]string{`&LimitRangeItem{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Max:` + mapStringForMax + `,`, - `Min:` + mapStringForMin + `,`, - `Default:` + mapStringForDefault + `,`, - `DefaultRequest:` + mapStringForDefaultRequest + `,`, - `MaxLimitRequestRatio:` + mapStringForMaxLimitRequestRatio + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LimitRangeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "LimitRange", "LimitRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LimitRangeSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LimitRangeSpec{`, - `Limits:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Limits), "LimitRangeItem", "LimitRangeItem", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *List) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&List{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RawExtension", "k8s_io_apimachinery_pkg_runtime.RawExtension", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerIngress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LoadBalancerIngress{`, - `IP:` + fmt.Sprintf("%v", this.IP) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `}`, - }, "") - return s -} -func (this *LoadBalancerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LoadBalancerStatus{`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "LoadBalancerIngress", "LoadBalancerIngress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *LocalObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalObjectReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *LocalVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&LocalVolumeSource{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `FSType:` + valueToStringGenerated(this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *NFSVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NFSVolumeSource{`, - `Server:` + fmt.Sprintf("%v", this.Server) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Namespace) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Namespace{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NamespaceSpec", "NamespaceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NamespaceStatus", "NamespaceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Namespace", "Namespace", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceSpec{`, - `Finalizers:` + fmt.Sprintf("%v", this.Finalizers) + `,`, - `}`, - }, "") - return s -} -func (this *NamespaceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NamespaceStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `}`, - }, "") - return s -} -func (this *Node) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Node{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NodeSpec", "NodeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "NodeStatus", "NodeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAddress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeAddress{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Address:` + fmt.Sprintf("%v", this.Address) + `,`, - `}`, - }, "") - return s -} -func (this *NodeAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "NodeSelector", "NodeSelector", 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "PreferredSchedulingTerm", "PreferredSchedulingTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastHeartbeatTime:` + strings.Replace(strings.Replace(this.LastHeartbeatTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeConfigSource{`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapNodeConfigSource", "ConfigMapNodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeConfigStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeConfigStatus{`, - `Assigned:` + strings.Replace(fmt.Sprintf("%v", this.Assigned), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Active:` + strings.Replace(fmt.Sprintf("%v", this.Active), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `LastKnownGood:` + strings.Replace(fmt.Sprintf("%v", this.LastKnownGood), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `Error:` + fmt.Sprintf("%v", this.Error) + `,`, - `}`, - }, "") - return s -} -func (this *NodeDaemonEndpoints) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeDaemonEndpoints{`, - `KubeletEndpoint:` + strings.Replace(strings.Replace(this.KubeletEndpoint.String(), "DaemonEndpoint", "DaemonEndpoint", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Node", "Node", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeProxyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *NodeResources) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - s := strings.Join([]string{`&NodeResources{`, - `Capacity:` + mapStringForCapacity + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSelector{`, - `NodeSelectorTerms:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.NodeSelectorTerms), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSelectorRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSelectorTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSelectorTerm{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `MatchFields:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchFields), "NodeSelectorRequirement", "NodeSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSpec{`, - `PodCIDR:` + fmt.Sprintf("%v", this.PodCIDR) + `,`, - `DoNotUse_ExternalID:` + fmt.Sprintf("%v", this.DoNotUse_ExternalID) + `,`, - `ProviderID:` + fmt.Sprintf("%v", this.ProviderID) + `,`, - `Unschedulable:` + fmt.Sprintf("%v", this.Unschedulable) + `,`, - `Taints:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Taints), "Taint", "Taint", 1), `&`, ``, 1) + `,`, - `ConfigSource:` + strings.Replace(fmt.Sprintf("%v", this.ConfigSource), "NodeConfigSource", "NodeConfigSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeStatus) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - keysForAllocatable := make([]string, 0, len(this.Allocatable)) - for k := range this.Allocatable { - keysForAllocatable = append(keysForAllocatable, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAllocatable) - mapStringForAllocatable := "ResourceList{" - for _, k := range keysForAllocatable { - mapStringForAllocatable += fmt.Sprintf("%v: %v,", k, this.Allocatable[ResourceName(k)]) - } - mapStringForAllocatable += "}" - s := strings.Join([]string{`&NodeStatus{`, - `Capacity:` + mapStringForCapacity + `,`, - `Allocatable:` + mapStringForAllocatable + `,`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "NodeCondition", "NodeCondition", 1), `&`, ``, 1) + `,`, - `Addresses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Addresses), "NodeAddress", "NodeAddress", 1), `&`, ``, 1) + `,`, - `DaemonEndpoints:` + strings.Replace(strings.Replace(this.DaemonEndpoints.String(), "NodeDaemonEndpoints", "NodeDaemonEndpoints", 1), `&`, ``, 1) + `,`, - `NodeInfo:` + strings.Replace(strings.Replace(this.NodeInfo.String(), "NodeSystemInfo", "NodeSystemInfo", 1), `&`, ``, 1) + `,`, - `Images:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Images), "ContainerImage", "ContainerImage", 1), `&`, ``, 1) + `,`, - `VolumesInUse:` + fmt.Sprintf("%v", this.VolumesInUse) + `,`, - `VolumesAttached:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumesAttached), "AttachedVolume", "AttachedVolume", 1), `&`, ``, 1) + `,`, - `Config:` + strings.Replace(fmt.Sprintf("%v", this.Config), "NodeConfigStatus", "NodeConfigStatus", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NodeSystemInfo) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NodeSystemInfo{`, - `MachineID:` + fmt.Sprintf("%v", this.MachineID) + `,`, - `SystemUUID:` + fmt.Sprintf("%v", this.SystemUUID) + `,`, - `BootID:` + fmt.Sprintf("%v", this.BootID) + `,`, - `KernelVersion:` + fmt.Sprintf("%v", this.KernelVersion) + `,`, - `OSImage:` + fmt.Sprintf("%v", this.OSImage) + `,`, - `ContainerRuntimeVersion:` + fmt.Sprintf("%v", this.ContainerRuntimeVersion) + `,`, - `KubeletVersion:` + fmt.Sprintf("%v", this.KubeletVersion) + `,`, - `KubeProxyVersion:` + fmt.Sprintf("%v", this.KubeProxyVersion) + `,`, - `OperatingSystem:` + fmt.Sprintf("%v", this.OperatingSystem) + `,`, - `Architecture:` + fmt.Sprintf("%v", this.Architecture) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectFieldSelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectFieldSelector{`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *ObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ObjectReference{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UID:` + fmt.Sprintf("%v", this.UID) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `ResourceVersion:` + fmt.Sprintf("%v", this.ResourceVersion) + `,`, - `FieldPath:` + fmt.Sprintf("%v", this.FieldPath) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolume{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeSpec", "PersistentVolumeSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeStatus", "PersistentVolumeStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaim) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaim{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PersistentVolumeClaimSpec", "PersistentVolumeClaimSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PersistentVolumeClaimStatus", "PersistentVolumeClaimStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolumeClaim", "PersistentVolumeClaim", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimSpec{`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Resources:` + strings.Replace(strings.Replace(this.Resources.String(), "ResourceRequirements", "ResourceRequirements", 1), `&`, ``, 1) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `StorageClassName:` + valueToStringGenerated(this.StorageClassName) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `DataSource:` + strings.Replace(fmt.Sprintf("%v", this.DataSource), "TypedLocalObjectReference", "TypedLocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimStatus) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeClaimStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `Capacity:` + mapStringForCapacity + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PersistentVolumeClaimCondition", "PersistentVolumeClaimCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeClaimVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeClaimVolumeSource{`, - `ClaimName:` + fmt.Sprintf("%v", this.ClaimName) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PersistentVolume", "PersistentVolume", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeSource{`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDPersistentVolumeSource", "RBDPersistentVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIPersistentVolumeSource", "ISCSIPersistentVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderPersistentVolumeSource", "CinderPersistentVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSPersistentVolumeSource", "CephFSPersistentVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexPersistentVolumeSource", "FlexPersistentVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFilePersistentVolumeSource", "AzureFilePersistentVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOPersistentVolumeSource", "ScaleIOPersistentVolumeSource", 1) + `,`, - `Local:` + strings.Replace(fmt.Sprintf("%v", this.Local), "LocalVolumeSource", "LocalVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSPersistentVolumeSource", "StorageOSPersistentVolumeSource", 1) + `,`, - `CSI:` + strings.Replace(fmt.Sprintf("%v", this.CSI), "CSIPersistentVolumeSource", "CSIPersistentVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeSpec) String() string { - if this == nil { - return "nil" - } - keysForCapacity := make([]string, 0, len(this.Capacity)) - for k := range this.Capacity { - keysForCapacity = append(keysForCapacity, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForCapacity) - mapStringForCapacity := "ResourceList{" - for _, k := range keysForCapacity { - mapStringForCapacity += fmt.Sprintf("%v: %v,", k, this.Capacity[ResourceName(k)]) - } - mapStringForCapacity += "}" - s := strings.Join([]string{`&PersistentVolumeSpec{`, - `Capacity:` + mapStringForCapacity + `,`, - `PersistentVolumeSource:` + strings.Replace(strings.Replace(this.PersistentVolumeSource.String(), "PersistentVolumeSource", "PersistentVolumeSource", 1), `&`, ``, 1) + `,`, - `AccessModes:` + fmt.Sprintf("%v", this.AccessModes) + `,`, - `ClaimRef:` + strings.Replace(fmt.Sprintf("%v", this.ClaimRef), "ObjectReference", "ObjectReference", 1) + `,`, - `PersistentVolumeReclaimPolicy:` + fmt.Sprintf("%v", this.PersistentVolumeReclaimPolicy) + `,`, - `StorageClassName:` + fmt.Sprintf("%v", this.StorageClassName) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `VolumeMode:` + valueToStringGenerated(this.VolumeMode) + `,`, - `NodeAffinity:` + strings.Replace(fmt.Sprintf("%v", this.NodeAffinity), "VolumeNodeAffinity", "VolumeNodeAffinity", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PersistentVolumeStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PersistentVolumeStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `}`, - }, "") - return s -} -func (this *PhotonPersistentDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PhotonPersistentDiskVolumeSource{`, - `PdID:` + fmt.Sprintf("%v", this.PdID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `}`, - }, "") - return s -} -func (this *Pod) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Pod{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAffinityTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodAffinityTerm{`, - `LabelSelector:` + strings.Replace(fmt.Sprintf("%v", this.LabelSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Namespaces:` + fmt.Sprintf("%v", this.Namespaces) + `,`, - `TopologyKey:` + fmt.Sprintf("%v", this.TopologyKey) + `,`, - `}`, - }, "") - return s -} -func (this *PodAntiAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodAntiAffinity{`, - `RequiredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.RequiredDuringSchedulingIgnoredDuringExecution), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `PreferredDuringSchedulingIgnoredDuringExecution:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.PreferredDuringSchedulingIgnoredDuringExecution), "WeightedPodAffinityTerm", "WeightedPodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodAttachOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodAttachOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `}`, - }, "") - return s -} -func (this *PodCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastProbeTime:` + strings.Replace(strings.Replace(this.LastProbeTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PodDNSConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDNSConfig{`, - `Nameservers:` + fmt.Sprintf("%v", this.Nameservers) + `,`, - `Searches:` + fmt.Sprintf("%v", this.Searches) + `,`, - `Options:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Options), "PodDNSConfigOption", "PodDNSConfigOption", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDNSConfigOption) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDNSConfigOption{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + valueToStringGenerated(this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *PodExecOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodExecOptions{`, - `Stdin:` + fmt.Sprintf("%v", this.Stdin) + `,`, - `Stdout:` + fmt.Sprintf("%v", this.Stdout) + `,`, - `Stderr:` + fmt.Sprintf("%v", this.Stderr) + `,`, - `TTY:` + fmt.Sprintf("%v", this.TTY) + `,`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Command:` + fmt.Sprintf("%v", this.Command) + `,`, - `}`, - }, "") - return s -} -func (this *PodList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Pod", "Pod", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodLogOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodLogOptions{`, - `Container:` + fmt.Sprintf("%v", this.Container) + `,`, - `Follow:` + fmt.Sprintf("%v", this.Follow) + `,`, - `Previous:` + fmt.Sprintf("%v", this.Previous) + `,`, - `SinceSeconds:` + valueToStringGenerated(this.SinceSeconds) + `,`, - `SinceTime:` + strings.Replace(fmt.Sprintf("%v", this.SinceTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `Timestamps:` + fmt.Sprintf("%v", this.Timestamps) + `,`, - `TailLines:` + valueToStringGenerated(this.TailLines) + `,`, - `LimitBytes:` + valueToStringGenerated(this.LimitBytes) + `,`, - `}`, - }, "") - return s -} -func (this *PodPortForwardOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPortForwardOptions{`, - `Ports:` + fmt.Sprintf("%v", this.Ports) + `,`, - `}`, - }, "") - return s -} -func (this *PodProxyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *PodReadinessGate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodReadinessGate{`, - `ConditionType:` + fmt.Sprintf("%v", this.ConditionType) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityContext{`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `SupplementalGroups:` + fmt.Sprintf("%v", this.SupplementalGroups) + `,`, - `FSGroup:` + valueToStringGenerated(this.FSGroup) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `Sysctls:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sysctls), "Sysctl", "Sysctl", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSignature) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSignature{`, - `PodController:` + strings.Replace(fmt.Sprintf("%v", this.PodController), "OwnerReference", "k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSpec) String() string { - if this == nil { - return "nil" - } - keysForNodeSelector := make([]string, 0, len(this.NodeSelector)) - for k := range this.NodeSelector { - keysForNodeSelector = append(keysForNodeSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForNodeSelector) - mapStringForNodeSelector := "map[string]string{" - for _, k := range keysForNodeSelector { - mapStringForNodeSelector += fmt.Sprintf("%v: %v,", k, this.NodeSelector[k]) - } - mapStringForNodeSelector += "}" - s := strings.Join([]string{`&PodSpec{`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "Volume", 1), `&`, ``, 1) + `,`, - `Containers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Containers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `RestartPolicy:` + fmt.Sprintf("%v", this.RestartPolicy) + `,`, - `TerminationGracePeriodSeconds:` + valueToStringGenerated(this.TerminationGracePeriodSeconds) + `,`, - `ActiveDeadlineSeconds:` + valueToStringGenerated(this.ActiveDeadlineSeconds) + `,`, - `DNSPolicy:` + fmt.Sprintf("%v", this.DNSPolicy) + `,`, - `NodeSelector:` + mapStringForNodeSelector + `,`, - `ServiceAccountName:` + fmt.Sprintf("%v", this.ServiceAccountName) + `,`, - `DeprecatedServiceAccount:` + fmt.Sprintf("%v", this.DeprecatedServiceAccount) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SecurityContext:` + strings.Replace(fmt.Sprintf("%v", this.SecurityContext), "PodSecurityContext", "PodSecurityContext", 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Hostname:` + fmt.Sprintf("%v", this.Hostname) + `,`, - `Subdomain:` + fmt.Sprintf("%v", this.Subdomain) + `,`, - `Affinity:` + strings.Replace(fmt.Sprintf("%v", this.Affinity), "Affinity", "Affinity", 1) + `,`, - `SchedulerName:` + fmt.Sprintf("%v", this.SchedulerName) + `,`, - `InitContainers:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainers), "Container", "Container", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `Tolerations:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Tolerations), "Toleration", "Toleration", 1), `&`, ``, 1) + `,`, - `HostAliases:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostAliases), "HostAlias", "HostAlias", 1), `&`, ``, 1) + `,`, - `PriorityClassName:` + fmt.Sprintf("%v", this.PriorityClassName) + `,`, - `Priority:` + valueToStringGenerated(this.Priority) + `,`, - `DNSConfig:` + strings.Replace(fmt.Sprintf("%v", this.DNSConfig), "PodDNSConfig", "PodDNSConfig", 1) + `,`, - `ShareProcessNamespace:` + valueToStringGenerated(this.ShareProcessNamespace) + `,`, - `ReadinessGates:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ReadinessGates), "PodReadinessGate", "PodReadinessGate", 1), `&`, ``, 1) + `,`, - `RuntimeClassName:` + valueToStringGenerated(this.RuntimeClassName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodStatus{`, - `Phase:` + fmt.Sprintf("%v", this.Phase) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "PodCondition", "PodCondition", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `HostIP:` + fmt.Sprintf("%v", this.HostIP) + `,`, - `PodIP:` + fmt.Sprintf("%v", this.PodIP) + `,`, - `StartTime:` + strings.Replace(fmt.Sprintf("%v", this.StartTime), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `ContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `QOSClass:` + fmt.Sprintf("%v", this.QOSClass) + `,`, - `InitContainerStatuses:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.InitContainerStatuses), "ContainerStatus", "ContainerStatus", 1), `&`, ``, 1) + `,`, - `NominatedNodeName:` + fmt.Sprintf("%v", this.NominatedNodeName) + `,`, - `}`, - }, "") - return s -} -func (this *PodStatusResult) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodStatusResult{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodStatus", "PodStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplate) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodTemplate{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodTemplateList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodTemplate", "PodTemplate", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodTemplateSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodTemplateSpec{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSpec", "PodSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PortworxVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PortworxVolumeSource{`, - `VolumeID:` + fmt.Sprintf("%v", this.VolumeID) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Preconditions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Preconditions{`, - `UID:` + valueToStringGenerated(this.UID) + `,`, - `}`, - }, "") - return s -} -func (this *PreferAvoidPodsEntry) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PreferAvoidPodsEntry{`, - `PodSignature:` + strings.Replace(strings.Replace(this.PodSignature.String(), "PodSignature", "PodSignature", 1), `&`, ``, 1) + `,`, - `EvictionTime:` + strings.Replace(strings.Replace(this.EvictionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *PreferredSchedulingTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PreferredSchedulingTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `Preference:` + strings.Replace(strings.Replace(this.Preference.String(), "NodeSelectorTerm", "NodeSelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Probe) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Probe{`, - `Handler:` + strings.Replace(strings.Replace(this.Handler.String(), "Handler", "Handler", 1), `&`, ``, 1) + `,`, - `InitialDelaySeconds:` + fmt.Sprintf("%v", this.InitialDelaySeconds) + `,`, - `TimeoutSeconds:` + fmt.Sprintf("%v", this.TimeoutSeconds) + `,`, - `PeriodSeconds:` + fmt.Sprintf("%v", this.PeriodSeconds) + `,`, - `SuccessThreshold:` + fmt.Sprintf("%v", this.SuccessThreshold) + `,`, - `FailureThreshold:` + fmt.Sprintf("%v", this.FailureThreshold) + `,`, - `}`, - }, "") - return s -} -func (this *ProjectedVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ProjectedVolumeSource{`, - `Sources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Sources), "VolumeProjection", "VolumeProjection", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `}`, - }, "") - return s -} -func (this *QuobyteVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&QuobyteVolumeSource{`, - `Registry:` + fmt.Sprintf("%v", this.Registry) + `,`, - `Volume:` + fmt.Sprintf("%v", this.Volume) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Group:` + fmt.Sprintf("%v", this.Group) + `,`, - `}`, - }, "") - return s -} -func (this *RBDPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RBDPersistentVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RBDVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RBDVolumeSource{`, - `CephMonitors:` + fmt.Sprintf("%v", this.CephMonitors) + `,`, - `RBDImage:` + fmt.Sprintf("%v", this.RBDImage) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `RBDPool:` + fmt.Sprintf("%v", this.RBDPool) + `,`, - `RadosUser:` + fmt.Sprintf("%v", this.RadosUser) + `,`, - `Keyring:` + fmt.Sprintf("%v", this.Keyring) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *RangeAllocation) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RangeAllocation{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Range:` + fmt.Sprintf("%v", this.Range) + `,`, - `Data:` + valueToStringGenerated(this.Data) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationController) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationController{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicationControllerSpec", "ReplicationControllerSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicationControllerStatus", "ReplicationControllerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationControllerCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationControllerList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicationController", "ReplicationController", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ReplicationControllerSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `Template:` + strings.Replace(fmt.Sprintf("%v", this.Template), "PodTemplateSpec", "PodTemplateSpec", 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationControllerStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicationControllerCondition", "ReplicationControllerCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceFieldSelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceFieldSelector{`, - `ContainerName:` + fmt.Sprintf("%v", this.ContainerName) + `,`, - `Resource:` + fmt.Sprintf("%v", this.Resource) + `,`, - `Divisor:` + strings.Replace(strings.Replace(this.Divisor.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuota) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceQuota{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ResourceQuotaSpec", "ResourceQuotaSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ResourceQuotaStatus", "ResourceQuotaStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ResourceQuotaList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ResourceQuota", "ResourceQuota", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaSpec) String() string { - if this == nil { - return "nil" - } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) - } - mapStringForHard += "}" - s := strings.Join([]string{`&ResourceQuotaSpec{`, - `Hard:` + mapStringForHard + `,`, - `Scopes:` + fmt.Sprintf("%v", this.Scopes) + `,`, - `ScopeSelector:` + strings.Replace(fmt.Sprintf("%v", this.ScopeSelector), "ScopeSelector", "ScopeSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ResourceQuotaStatus) String() string { - if this == nil { - return "nil" - } - keysForHard := make([]string, 0, len(this.Hard)) - for k := range this.Hard { - keysForHard = append(keysForHard, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForHard) - mapStringForHard := "ResourceList{" - for _, k := range keysForHard { - mapStringForHard += fmt.Sprintf("%v: %v,", k, this.Hard[ResourceName(k)]) - } - mapStringForHard += "}" - keysForUsed := make([]string, 0, len(this.Used)) - for k := range this.Used { - keysForUsed = append(keysForUsed, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUsed) - mapStringForUsed := "ResourceList{" - for _, k := range keysForUsed { - mapStringForUsed += fmt.Sprintf("%v: %v,", k, this.Used[ResourceName(k)]) - } - mapStringForUsed += "}" - s := strings.Join([]string{`&ResourceQuotaStatus{`, - `Hard:` + mapStringForHard + `,`, - `Used:` + mapStringForUsed + `,`, - `}`, - }, "") - return s -} -func (this *ResourceRequirements) String() string { - if this == nil { - return "nil" - } - keysForLimits := make([]string, 0, len(this.Limits)) - for k := range this.Limits { - keysForLimits = append(keysForLimits, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForLimits) - mapStringForLimits := "ResourceList{" - for _, k := range keysForLimits { - mapStringForLimits += fmt.Sprintf("%v: %v,", k, this.Limits[ResourceName(k)]) - } - mapStringForLimits += "}" - keysForRequests := make([]string, 0, len(this.Requests)) - for k := range this.Requests { - keysForRequests = append(keysForRequests, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForRequests) - mapStringForRequests := "ResourceList{" - for _, k := range keysForRequests { - mapStringForRequests += fmt.Sprintf("%v: %v,", k, this.Requests[ResourceName(k)]) - } - mapStringForRequests += "}" - s := strings.Join([]string{`&ResourceRequirements{`, - `Limits:` + mapStringForLimits + `,`, - `Requests:` + mapStringForRequests + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxOptions{`, - `User:` + fmt.Sprintf("%v", this.User) + `,`, - `Role:` + fmt.Sprintf("%v", this.Role) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Level:` + fmt.Sprintf("%v", this.Level) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleIOPersistentVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "SecretReference", "SecretReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleIOVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleIOVolumeSource{`, - `Gateway:` + fmt.Sprintf("%v", this.Gateway) + `,`, - `System:` + fmt.Sprintf("%v", this.System) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `SSLEnabled:` + fmt.Sprintf("%v", this.SSLEnabled) + `,`, - `ProtectionDomain:` + fmt.Sprintf("%v", this.ProtectionDomain) + `,`, - `StoragePool:` + fmt.Sprintf("%v", this.StoragePool) + `,`, - `StorageMode:` + fmt.Sprintf("%v", this.StorageMode) + `,`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *ScopeSelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScopeSelector{`, - `MatchExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchExpressions), "ScopedResourceSelectorRequirement", "ScopedResourceSelectorRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScopedResourceSelectorRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScopedResourceSelectorRequirement{`, - `ScopeName:` + fmt.Sprintf("%v", this.ScopeName) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *Secret) String() string { - if this == nil { - return "nil" - } - keysForData := make([]string, 0, len(this.Data)) - for k := range this.Data { - keysForData = append(keysForData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForData) - mapStringForData := "map[string][]byte{" - for _, k := range keysForData { - mapStringForData += fmt.Sprintf("%v: %v,", k, this.Data[k]) - } - mapStringForData += "}" - keysForStringData := make([]string, 0, len(this.StringData)) - for k := range this.StringData { - keysForStringData = append(keysForStringData, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForStringData) - mapStringForStringData := "map[string]string{" - for _, k := range keysForStringData { - mapStringForStringData += fmt.Sprintf("%v: %v,", k, this.StringData[k]) - } - mapStringForStringData += "}" - s := strings.Join([]string{`&Secret{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Data:` + mapStringForData + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `StringData:` + mapStringForStringData + `,`, - `}`, - }, "") - return s -} -func (this *SecretEnvSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretEnvSource{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretKeySelector) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretKeySelector{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Secret", "Secret", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SecretProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretProjection{`, - `LocalObjectReference:` + strings.Replace(strings.Replace(this.LocalObjectReference.String(), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecretReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretReference{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func (this *SecretVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecretVolumeSource{`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "KeyToPath", "KeyToPath", 1), `&`, ``, 1) + `,`, - `DefaultMode:` + valueToStringGenerated(this.DefaultMode) + `,`, - `Optional:` + valueToStringGenerated(this.Optional) + `,`, - `}`, - }, "") - return s -} -func (this *SecurityContext) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SecurityContext{`, - `Capabilities:` + strings.Replace(fmt.Sprintf("%v", this.Capabilities), "Capabilities", "Capabilities", 1) + `,`, - `Privileged:` + valueToStringGenerated(this.Privileged) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "SELinuxOptions", 1) + `,`, - `RunAsUser:` + valueToStringGenerated(this.RunAsUser) + `,`, - `RunAsNonRoot:` + valueToStringGenerated(this.RunAsNonRoot) + `,`, - `ReadOnlyRootFilesystem:` + valueToStringGenerated(this.ReadOnlyRootFilesystem) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `RunAsGroup:` + valueToStringGenerated(this.RunAsGroup) + `,`, - `ProcMount:` + valueToStringGenerated(this.ProcMount) + `,`, - `}`, - }, "") - return s -} -func (this *SerializedReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SerializedReference{`, - `Reference:` + strings.Replace(strings.Replace(this.Reference.String(), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *Service) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Service{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ServiceSpec", "ServiceSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ServiceStatus", "ServiceStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccount) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceAccount{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Secrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Secrets), "ObjectReference", "ObjectReference", 1), `&`, ``, 1) + `,`, - `ImagePullSecrets:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ImagePullSecrets), "LocalObjectReference", "LocalObjectReference", 1), `&`, ``, 1) + `,`, - `AutomountServiceAccountToken:` + valueToStringGenerated(this.AutomountServiceAccountToken) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceAccountList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ServiceAccount", "ServiceAccount", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceAccountTokenProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceAccountTokenProjection{`, - `Audience:` + fmt.Sprintf("%v", this.Audience) + `,`, - `ExpirationSeconds:` + valueToStringGenerated(this.ExpirationSeconds) + `,`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Service", "Service", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServicePort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServicePort{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Protocol:` + fmt.Sprintf("%v", this.Protocol) + `,`, - `Port:` + fmt.Sprintf("%v", this.Port) + `,`, - `TargetPort:` + strings.Replace(strings.Replace(this.TargetPort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `NodePort:` + fmt.Sprintf("%v", this.NodePort) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceProxyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceProxyOptions{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceSpec) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ServiceSpec{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "ServicePort", "ServicePort", 1), `&`, ``, 1) + `,`, - `Selector:` + mapStringForSelector + `,`, - `ClusterIP:` + fmt.Sprintf("%v", this.ClusterIP) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `ExternalIPs:` + fmt.Sprintf("%v", this.ExternalIPs) + `,`, - `SessionAffinity:` + fmt.Sprintf("%v", this.SessionAffinity) + `,`, - `LoadBalancerIP:` + fmt.Sprintf("%v", this.LoadBalancerIP) + `,`, - `LoadBalancerSourceRanges:` + fmt.Sprintf("%v", this.LoadBalancerSourceRanges) + `,`, - `ExternalName:` + fmt.Sprintf("%v", this.ExternalName) + `,`, - `ExternalTrafficPolicy:` + fmt.Sprintf("%v", this.ExternalTrafficPolicy) + `,`, - `HealthCheckNodePort:` + fmt.Sprintf("%v", this.HealthCheckNodePort) + `,`, - `PublishNotReadyAddresses:` + fmt.Sprintf("%v", this.PublishNotReadyAddresses) + `,`, - `SessionAffinityConfig:` + strings.Replace(fmt.Sprintf("%v", this.SessionAffinityConfig), "SessionAffinityConfig", "SessionAffinityConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ServiceStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ServiceStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SessionAffinityConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SessionAffinityConfig{`, - `ClientIP:` + strings.Replace(fmt.Sprintf("%v", this.ClientIP), "ClientIPConfig", "ClientIPConfig", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSPersistentVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageOSPersistentVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "ObjectReference", "ObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageOSVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageOSVolumeSource{`, - `VolumeName:` + fmt.Sprintf("%v", this.VolumeName) + `,`, - `VolumeNamespace:` + fmt.Sprintf("%v", this.VolumeNamespace) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `SecretRef:` + strings.Replace(fmt.Sprintf("%v", this.SecretRef), "LocalObjectReference", "LocalObjectReference", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Sysctl) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Sysctl{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `}`, - }, "") - return s -} -func (this *TCPSocketAction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TCPSocketAction{`, - `Port:` + strings.Replace(strings.Replace(this.Port.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `}`, - }, "") - return s -} -func (this *Taint) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Taint{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TimeAdded:` + strings.Replace(fmt.Sprintf("%v", this.TimeAdded), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Toleration) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Toleration{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Operator:` + fmt.Sprintf("%v", this.Operator) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `Effect:` + fmt.Sprintf("%v", this.Effect) + `,`, - `TolerationSeconds:` + valueToStringGenerated(this.TolerationSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *TopologySelectorLabelRequirement) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TopologySelectorLabelRequirement{`, - `Key:` + fmt.Sprintf("%v", this.Key) + `,`, - `Values:` + fmt.Sprintf("%v", this.Values) + `,`, - `}`, - }, "") - return s -} -func (this *TopologySelectorTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TopologySelectorTerm{`, - `MatchLabelExpressions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.MatchLabelExpressions), "TopologySelectorLabelRequirement", "TopologySelectorLabelRequirement", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *TypedLocalObjectReference) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TypedLocalObjectReference{`, - `APIGroup:` + valueToStringGenerated(this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Volume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Volume{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `VolumeSource:` + strings.Replace(strings.Replace(this.VolumeSource.String(), "VolumeSource", "VolumeSource", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeDevice) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeDevice{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `DevicePath:` + fmt.Sprintf("%v", this.DevicePath) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeMount) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeMount{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `MountPath:` + fmt.Sprintf("%v", this.MountPath) + `,`, - `SubPath:` + fmt.Sprintf("%v", this.SubPath) + `,`, - `MountPropagation:` + valueToStringGenerated(this.MountPropagation) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeNodeAffinity) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeNodeAffinity{`, - `Required:` + strings.Replace(fmt.Sprintf("%v", this.Required), "NodeSelector", "NodeSelector", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeProjection) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeProjection{`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretProjection", "SecretProjection", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIProjection", "DownwardAPIProjection", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapProjection", "ConfigMapProjection", 1) + `,`, - `ServiceAccountToken:` + strings.Replace(fmt.Sprintf("%v", this.ServiceAccountToken), "ServiceAccountTokenProjection", "ServiceAccountTokenProjection", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeSource{`, - `HostPath:` + strings.Replace(fmt.Sprintf("%v", this.HostPath), "HostPathVolumeSource", "HostPathVolumeSource", 1) + `,`, - `EmptyDir:` + strings.Replace(fmt.Sprintf("%v", this.EmptyDir), "EmptyDirVolumeSource", "EmptyDirVolumeSource", 1) + `,`, - `GCEPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.GCEPersistentDisk), "GCEPersistentDiskVolumeSource", "GCEPersistentDiskVolumeSource", 1) + `,`, - `AWSElasticBlockStore:` + strings.Replace(fmt.Sprintf("%v", this.AWSElasticBlockStore), "AWSElasticBlockStoreVolumeSource", "AWSElasticBlockStoreVolumeSource", 1) + `,`, - `GitRepo:` + strings.Replace(fmt.Sprintf("%v", this.GitRepo), "GitRepoVolumeSource", "GitRepoVolumeSource", 1) + `,`, - `Secret:` + strings.Replace(fmt.Sprintf("%v", this.Secret), "SecretVolumeSource", "SecretVolumeSource", 1) + `,`, - `NFS:` + strings.Replace(fmt.Sprintf("%v", this.NFS), "NFSVolumeSource", "NFSVolumeSource", 1) + `,`, - `ISCSI:` + strings.Replace(fmt.Sprintf("%v", this.ISCSI), "ISCSIVolumeSource", "ISCSIVolumeSource", 1) + `,`, - `Glusterfs:` + strings.Replace(fmt.Sprintf("%v", this.Glusterfs), "GlusterfsVolumeSource", "GlusterfsVolumeSource", 1) + `,`, - `PersistentVolumeClaim:` + strings.Replace(fmt.Sprintf("%v", this.PersistentVolumeClaim), "PersistentVolumeClaimVolumeSource", "PersistentVolumeClaimVolumeSource", 1) + `,`, - `RBD:` + strings.Replace(fmt.Sprintf("%v", this.RBD), "RBDVolumeSource", "RBDVolumeSource", 1) + `,`, - `FlexVolume:` + strings.Replace(fmt.Sprintf("%v", this.FlexVolume), "FlexVolumeSource", "FlexVolumeSource", 1) + `,`, - `Cinder:` + strings.Replace(fmt.Sprintf("%v", this.Cinder), "CinderVolumeSource", "CinderVolumeSource", 1) + `,`, - `CephFS:` + strings.Replace(fmt.Sprintf("%v", this.CephFS), "CephFSVolumeSource", "CephFSVolumeSource", 1) + `,`, - `Flocker:` + strings.Replace(fmt.Sprintf("%v", this.Flocker), "FlockerVolumeSource", "FlockerVolumeSource", 1) + `,`, - `DownwardAPI:` + strings.Replace(fmt.Sprintf("%v", this.DownwardAPI), "DownwardAPIVolumeSource", "DownwardAPIVolumeSource", 1) + `,`, - `FC:` + strings.Replace(fmt.Sprintf("%v", this.FC), "FCVolumeSource", "FCVolumeSource", 1) + `,`, - `AzureFile:` + strings.Replace(fmt.Sprintf("%v", this.AzureFile), "AzureFileVolumeSource", "AzureFileVolumeSource", 1) + `,`, - `ConfigMap:` + strings.Replace(fmt.Sprintf("%v", this.ConfigMap), "ConfigMapVolumeSource", "ConfigMapVolumeSource", 1) + `,`, - `VsphereVolume:` + strings.Replace(fmt.Sprintf("%v", this.VsphereVolume), "VsphereVirtualDiskVolumeSource", "VsphereVirtualDiskVolumeSource", 1) + `,`, - `Quobyte:` + strings.Replace(fmt.Sprintf("%v", this.Quobyte), "QuobyteVolumeSource", "QuobyteVolumeSource", 1) + `,`, - `AzureDisk:` + strings.Replace(fmt.Sprintf("%v", this.AzureDisk), "AzureDiskVolumeSource", "AzureDiskVolumeSource", 1) + `,`, - `PhotonPersistentDisk:` + strings.Replace(fmt.Sprintf("%v", this.PhotonPersistentDisk), "PhotonPersistentDiskVolumeSource", "PhotonPersistentDiskVolumeSource", 1) + `,`, - `PortworxVolume:` + strings.Replace(fmt.Sprintf("%v", this.PortworxVolume), "PortworxVolumeSource", "PortworxVolumeSource", 1) + `,`, - `ScaleIO:` + strings.Replace(fmt.Sprintf("%v", this.ScaleIO), "ScaleIOVolumeSource", "ScaleIOVolumeSource", 1) + `,`, - `Projected:` + strings.Replace(fmt.Sprintf("%v", this.Projected), "ProjectedVolumeSource", "ProjectedVolumeSource", 1) + `,`, - `StorageOS:` + strings.Replace(fmt.Sprintf("%v", this.StorageOS), "StorageOSVolumeSource", "StorageOSVolumeSource", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VsphereVirtualDiskVolumeSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VsphereVirtualDiskVolumeSource{`, - `VolumePath:` + fmt.Sprintf("%v", this.VolumePath) + `,`, - `FSType:` + fmt.Sprintf("%v", this.FSType) + `,`, - `StoragePolicyName:` + fmt.Sprintf("%v", this.StoragePolicyName) + `,`, - `StoragePolicyID:` + fmt.Sprintf("%v", this.StoragePolicyID) + `,`, - `}`, - }, "") - return s -} -func (this *WeightedPodAffinityTerm) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&WeightedPodAffinityTerm{`, - `Weight:` + fmt.Sprintf("%v", this.Weight) + `,`, - `PodAffinityTerm:` + strings.Replace(strings.Replace(this.PodAffinityTerm.String(), "PodAffinityTerm", "PodAffinityTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AWSElasticBlockStoreVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AWSElasticBlockStoreVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Affinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Affinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Affinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeAffinity == nil { - m.NodeAffinity = &NodeAffinity{} - } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAffinity == nil { - m.PodAffinity = &PodAffinity{} - } - if err := m.PodAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAntiAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodAntiAffinity == nil { - m.PodAntiAffinity = &PodAntiAffinity{} - } - if err := m.PodAntiAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AttachedVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AttachedVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AttachedVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = UniqueVolumeName(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AvoidPods) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AvoidPods: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AvoidPods: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferAvoidPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferAvoidPods = append(m.PreferAvoidPods, PreferAvoidPodsEntry{}) - if err := m.PreferAvoidPods[len(m.PreferAvoidPods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DiskName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DiskName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataDiskURI", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DataDiskURI = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CachingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskCachingMode(dAtA[iNdEx:postIndex]) - m.CachingMode = &s - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnly = &b - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := AzureDataDiskKind(dAtA[iNdEx:postIndex]) - m.Kind = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureFilePersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFilePersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.SecretNamespace = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AzureFileVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AzureFileVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AzureFileVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ShareName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Binding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Binding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Binding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Target.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeHandle", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeHandle = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeAttributes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.VolumeAttributes == nil { - m.VolumeAttributes = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.VolumeAttributes[mapkey] = mapvalue - } else { - var mapvalue string - m.VolumeAttributes[mapkey] = mapvalue - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ControllerPublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ControllerPublishSecretRef == nil { - m.ControllerPublishSecretRef = &SecretReference{} - } - if err := m.ControllerPublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeStageSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeStageSecretRef == nil { - m.NodeStageSecretRef = &SecretReference{} - } - if err := m.NodeStageSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePublishSecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodePublishSecretRef == nil { - m.NodePublishSecretRef = &SecretReference{} - } - if err := m.NodePublishSecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Capabilities) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Capabilities: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Capabilities: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Add", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Add = append(m.Add, Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Drop", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Drop = append(m.Drop, Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CephFSPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CephFSVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CephFSVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CephFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Monitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Monitors = append(m.Monitors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretFile", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretFile = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CinderPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CinderPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CinderVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CinderVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CinderVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClientIPConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClientIPConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClientIPConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TimeoutSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ComponentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ComponentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ComponentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ComponentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ComponentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ComponentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMap) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMap: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMap: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Data[mapkey] = mapvalue - } else { - var mapvalue string - m.Data[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BinaryData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.BinaryData == nil { - m.BinaryData = make(map[string][]byte) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.BinaryData[mapkey] = mapvalue - } else { - var mapvalue []byte - m.BinaryData[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapEnvSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapEnvSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapKeySelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ConfigMap{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapNodeConfigSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapNodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletConfigKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KubeletConfigKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfigMapVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfigMapVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfigMapVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Container) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Container: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Container: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WorkingDir", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WorkingDir = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ContainerPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LivenessProbe", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LivenessProbe == nil { - m.LivenessProbe = &Probe{} - } - if err := m.LivenessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessProbe", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ReadinessProbe == nil { - m.ReadinessProbe = &Probe{} - } - if err := m.ReadinessProbe.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Lifecycle", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Lifecycle == nil { - m.Lifecycle = &Lifecycle{} - } - if err := m.Lifecycle.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TerminationMessagePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullPolicy = PullPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecurityContext == nil { - m.SecurityContext = &SecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 17: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StdinOnce", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StdinOnce = bool(v != 0) - case 18: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EnvFrom = append(m.EnvFrom, EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationMessagePolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TerminationMessagePolicy = TerminationMessagePolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeDevices", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeDevices = append(m.VolumeDevices, VolumeDevice{}) - if err := m.VolumeDevices[len(m.VolumeDevices)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerImage) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerImage: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerImage: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Names", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Names = append(m.Names, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeBytes", wireType) - } - m.SizeBytes = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SizeBytes |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerPort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPort", wireType) - } - m.HostPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HostPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerPort", wireType) - } - m.ContainerPort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ContainerPort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerState) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Waiting", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Waiting == nil { - m.Waiting = &ContainerStateWaiting{} - } - if err := m.Waiting.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Running", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Running == nil { - m.Running = &ContainerStateRunning{} - } - if err := m.Running.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Terminated", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Terminated == nil { - m.Terminated = &ContainerStateTerminated{} - } - if err := m.Terminated.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateRunning) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateRunning: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateRunning: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateTerminated) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateTerminated: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateTerminated: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExitCode", wireType) - } - m.ExitCode = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExitCode |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) - } - m.Signal = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Signal |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.StartedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FinishedAt", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FinishedAt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStateWaiting) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStateWaiting: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStateWaiting: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ContainerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ContainerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ContainerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.State.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTerminationState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTerminationState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Ready", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ready = bool(v != 0) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartCount", wireType) - } - m.RestartCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.RestartCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Image = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImageID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImageID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonEndpoint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonEndpoint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonEndpoint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIVolumeFile) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeFile: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DownwardAPIVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DownwardAPIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DownwardAPIVolumeFile{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EmptyDirVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EmptyDirVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EmptyDirVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Medium", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Medium = StorageMedium(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SizeLimit", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SizeLimit == nil { - m.SizeLimit = &k8s_io_apimachinery_pkg_api_resource.Quantity{} - } - if err := m.SizeLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TargetRef == nil { - m.TargetRef = &ObjectReference{} - } - if err := m.TargetRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.NodeName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointPort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointSubset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointSubset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointSubset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, EndpointAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NotReadyAddresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NotReadyAddresses = append(m.NotReadyAddresses, EndpointAddress{}) - if err := m.NotReadyAddresses[len(m.NotReadyAddresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, EndpointPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Endpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Endpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Endpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subsets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subsets = append(m.Subsets, EndpointSubset{}) - if err := m.Subsets[len(m.Subsets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EndpointsList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EndpointsList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EndpointsList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Endpoints{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvFromSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvFromSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvFromSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Prefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Prefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapRef == nil { - m.ConfigMapRef = &ConfigMapEnvSource{} - } - if err := m.ConfigMapRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretEnvSource{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVar) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVar: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVar: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ValueFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ValueFrom == nil { - m.ValueFrom = &EnvVarSource{} - } - if err := m.ValueFrom.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EnvVarSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EnvVarSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EnvVarSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FieldRef == nil { - m.FieldRef = &ObjectFieldSelector{} - } - if err := m.FieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceFieldRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ResourceFieldRef == nil { - m.ResourceFieldRef = &ResourceFieldSelector{} - } - if err := m.ResourceFieldRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMapKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMapKeyRef == nil { - m.ConfigMapKeyRef = &ConfigMapKeySelector{} - } - if err := m.ConfigMapKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretKeyRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretKeyRef == nil { - m.SecretKeyRef = &SecretKeySelector{} - } - if err := m.SecretKeyRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InvolvedObject", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.InvolvedObject.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FirstTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Series == nil { - m.Series = &EventSeries{} - } - if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Action = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Related == nil { - m.Related = &ObjectReference{} - } - if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReportingController = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReportingInstance = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Event{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = EventSeriesState(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Component", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Component = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ExecAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ExecAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ExecAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FCVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FCVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FCVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetWWNs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetWWNs = append(m.TargetWWNs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Lun = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field WWIDs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.WWIDs = append(m.WWIDs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlexPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlexPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Options == nil { - m.Options = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlexVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlexVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlexVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Options == nil { - m.Options = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Options[mapkey] = mapvalue - } else { - var mapvalue string - m.Options[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FlockerVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FlockerVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FlockerVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DatasetName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DatasetUUID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DatasetUUID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GCEPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GCEPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PDName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PDName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Partition", wireType) - } - m.Partition = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Partition |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GitRepoVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GitRepoVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GitRepoVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Repository", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Repository = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Revision = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Directory", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Directory = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GlusterfsVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GlusterfsVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GlusterfsVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EndpointsName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EndpointsName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPGetAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPGetAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPGetAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scheme", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scheme = URIScheme(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPHeaders", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HTTPHeaders = append(m.HTTPHeaders, HTTPHeader{}) - if err := m.HTTPHeaders[len(m.HTTPHeaders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPHeader) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPHeader: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPHeader: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Handler) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Handler: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Handler: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Exec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Exec == nil { - m.Exec = &ExecAction{} - } - if err := m.Exec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTPGet", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTPGet == nil { - m.HTTPGet = &HTTPGetAction{} - } - if err := m.HTTPGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TCPSocket", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TCPSocket == nil { - m.TCPSocket = &TCPSocketAction{} - } - if err := m.TCPSocket.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostAlias) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostAlias: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostAlias: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostnames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostnames = append(m.Hostnames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPathVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPathVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPathVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := HostPathType(dAtA[iNdEx:postIndex]) - m.Type = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ISCSIPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetPortal = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IQN = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - m.Lun = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DiscoveryCHAPAuth = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SessionCHAPAuth = bool(v != 0) - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.InitiatorName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ISCSIVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ISCSIVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ISCSIVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPortal", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetPortal = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IQN", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IQN = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Lun", wireType) - } - m.Lun = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Lun |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSIInterface", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ISCSIInterface = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Portals", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Portals = append(m.Portals, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DiscoveryCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DiscoveryCHAPAuth = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionCHAPAuth", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SessionCHAPAuth = bool(v != 0) - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitiatorName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.InitiatorName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *KeyToPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: KeyToPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: KeyToPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Mode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Lifecycle) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Lifecycle: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Lifecycle: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PostStart", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PostStart == nil { - m.PostStart = &Handler{} - } - if err := m.PostStart.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreStop", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PreStop == nil { - m.PreStop = &Handler{} - } - if err := m.PreStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeItem) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeItem: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeItem: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = LimitType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Max == nil { - m.Max = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Max[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Max[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Min == nil { - m.Min = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Min[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Min[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Default", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Default == nil { - m.Default = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Default[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Default[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultRequest", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.DefaultRequest == nil { - m.DefaultRequest = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DefaultRequest[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.DefaultRequest[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxLimitRequestRatio", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.MaxLimitRequestRatio == nil { - m.MaxLimitRequestRatio = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.MaxLimitRequestRatio[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.MaxLimitRequestRatio[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, LimitRange{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LimitRangeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LimitRangeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LimitRangeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Limits = append(m.Limits, LimitRangeItem{}) - if err := m.Limits[len(m.Limits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *List) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: List: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: List: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, k8s_io_apimachinery_pkg_runtime.RawExtension{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LoadBalancerIngress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LoadBalancerIngress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LoadBalancerIngress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LoadBalancerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LoadBalancerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LoadBalancerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, LoadBalancerIngress{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *LocalVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: LocalVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: LocalVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.FSType = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NFSVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NFSVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NFSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Server", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Server = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Namespace) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Namespace: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Namespace: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Namespace{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Finalizers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Finalizers = append(m.Finalizers, FinalizerName(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NamespaceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NamespaceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NamespaceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = NamespacePhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Node) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Node: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeAddress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeAddress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeAddress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = NodeAddressType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Address", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Address = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequiredDuringSchedulingIgnoredDuringExecution == nil { - m.RequiredDuringSchedulingIgnoredDuringExecution = &NodeSelector{} - } - if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, PreferredSchedulingTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = NodeConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastHeartbeatTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastHeartbeatTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeConfigSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeConfigSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeConfigSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &ConfigMapNodeConfigSource{} - } - if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeConfigStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeConfigStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeConfigStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Assigned", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Assigned == nil { - m.Assigned = &NodeConfigSource{} - } - if err := m.Assigned.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Active", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Active == nil { - m.Active = &NodeConfigSource{} - } - if err := m.Active.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastKnownGood", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LastKnownGood == nil { - m.LastKnownGood = &NodeConfigSource{} - } - if err := m.LastKnownGood.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Error = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeDaemonEndpoints) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeDaemonEndpoints: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeDaemonEndpoints: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletEndpoint", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.KubeletEndpoint.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Node{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeProxyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeProxyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeResources) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeResources: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeResources: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelectorTerms", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeSelectorTerms = append(m.NodeSelectorTerms, NodeSelectorTerm{}) - if err := m.NodeSelectorTerms[len(m.NodeSelectorTerms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelectorRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = NodeSelectorOperator(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSelectorTerm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSelectorTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, NodeSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchFields", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchFields = append(m.MatchFields, NodeSelectorRequirement{}) - if err := m.MatchFields[len(m.MatchFields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodCIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodCIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DoNotUse_ExternalID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DoNotUse_ExternalID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProviderID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProviderID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Unschedulable", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Unschedulable = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Taints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Taints = append(m.Taints, Taint{}) - if err := m.Taints[len(m.Taints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigSource == nil { - m.ConfigSource = &NodeConfigSource{} - } - if err := m.ConfigSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Allocatable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Allocatable == nil { - m.Allocatable = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Allocatable[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Allocatable[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = NodePhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, NodeCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addresses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addresses = append(m.Addresses, NodeAddress{}) - if err := m.Addresses[len(m.Addresses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DaemonEndpoints", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DaemonEndpoints.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Images", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Images = append(m.Images, ContainerImage{}) - if err := m.Images[len(m.Images)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumesInUse", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumesInUse = append(m.VolumesInUse, UniqueVolumeName(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumesAttached", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumesAttached = append(m.VolumesAttached, AttachedVolume{}) - if err := m.VolumesAttached[len(m.VolumesAttached)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Config", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Config == nil { - m.Config = &NodeConfigStatus{} - } - if err := m.Config.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeSystemInfo) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeSystemInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeSystemInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MachineID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MachineID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SystemUUID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SystemUUID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field BootID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.BootID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KernelVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KernelVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OSImage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OSImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerRuntimeVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerRuntimeVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeletVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KubeletVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field KubeProxyVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.KubeProxyVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field OperatingSystem", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.OperatingSystem = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Architecture", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Architecture = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectFieldSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectFieldSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.UID = k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FieldPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FieldPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaim) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaim: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaim: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = PersistentVolumeClaimConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PersistentVolumeClaim{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.StorageClassName = &s - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) - m.VolumeMode = &s - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DataSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DataSource == nil { - m.DataSource = &TypedLocalObjectReference{} - } - if err := m.DataSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PersistentVolumeClaimPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, PersistentVolumeClaimCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeClaimVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeClaimVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClaimName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PersistentVolume{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GCEPersistentDisk == nil { - m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} - } - if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AWSElasticBlockStore == nil { - m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} - } - if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HostPath == nil { - m.HostPath = &HostPathVolumeSource{} - } - if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} - } - if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NFS == nil { - m.NFS = &NFSVolumeSource{} - } - if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RBD == nil { - m.RBD = &RBDPersistentVolumeSource{} - } - if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ISCSI == nil { - m.ISCSI = &ISCSIPersistentVolumeSource{} - } - if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cinder == nil { - m.Cinder = &CinderPersistentVolumeSource{} - } - if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CephFS == nil { - m.CephFS = &CephFSPersistentVolumeSource{} - } - if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FC == nil { - m.FC = &FCVolumeSource{} - } - if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Flocker == nil { - m.Flocker = &FlockerVolumeSource{} - } - if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FlexVolume == nil { - m.FlexVolume = &FlexPersistentVolumeSource{} - } - if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AzureFile == nil { - m.AzureFile = &AzureFilePersistentVolumeSource{} - } - if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VsphereVolume == nil { - m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} - } - if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Quobyte == nil { - m.Quobyte = &QuobyteVolumeSource{} - } - if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AzureDisk == nil { - m.AzureDisk = &AzureDiskVolumeSource{} - } - if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PhotonPersistentDisk == nil { - m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} - } - if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PortworxVolume == nil { - m.PortworxVolume = &PortworxVolumeSource{} - } - if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ScaleIO == nil { - m.ScaleIO = &ScaleIOPersistentVolumeSource{} - } - if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Local", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Local == nil { - m.Local = &LocalVolumeSource{} - } - if err := m.Local.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StorageOS == nil { - m.StorageOS = &StorageOSPersistentVolumeSource{} - } - if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CSI == nil { - m.CSI = &CSIPersistentVolumeSource{} - } - if err := m.CSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capacity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Capacity == nil { - m.Capacity = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Capacity[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Capacity[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PersistentVolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AccessModes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AccessModes = append(m.AccessModes, PersistentVolumeAccessMode(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClaimRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClaimRef == nil { - m.ClaimRef = &ObjectReference{} - } - if err := m.ClaimRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PersistentVolumeReclaimPolicy = PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StorageClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := PersistentVolumeMode(dAtA[iNdEx:postIndex]) - m.VolumeMode = &s - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeAffinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeAffinity == nil { - m.NodeAffinity = &VolumeNodeAffinity{} - } - if err := m.NodeAffinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PersistentVolumeStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PersistentVolumeStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PersistentVolumeStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PersistentVolumePhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PhotonPersistentDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PhotonPersistentDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PdID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PdID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Pod) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Pod: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Pod: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAffinityTerm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAffinityTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LabelSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.LabelSelector == nil { - m.LabelSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.LabelSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespaces", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespaces = append(m.Namespaces, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TopologyKey", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TopologyKey = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAntiAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAntiAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAntiAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDuringSchedulingIgnoredDuringExecution = append(m.RequiredDuringSchedulingIgnoredDuringExecution, PodAffinityTerm{}) - if err := m.RequiredDuringSchedulingIgnoredDuringExecution[len(m.RequiredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PreferredDuringSchedulingIgnoredDuringExecution", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PreferredDuringSchedulingIgnoredDuringExecution = append(m.PreferredDuringSchedulingIgnoredDuringExecution, WeightedPodAffinityTerm{}) - if err := m.PreferredDuringSchedulingIgnoredDuringExecution[len(m.PreferredDuringSchedulingIgnoredDuringExecution)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodAttachOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodAttachOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodAttachOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = PodConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastProbeTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastProbeTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDNSConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDNSConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDNSConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Nameservers", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Nameservers = append(m.Nameservers, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Searches", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Searches = append(m.Searches, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Options = append(m.Options, PodDNSConfigOption{}) - if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDNSConfigOption) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDNSConfigOption: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDNSConfigOption: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Value = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodExecOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodExecOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodExecOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdin", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdin = bool(v != 0) - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stdout", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stdout = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Stderr", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Stderr = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TTY", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TTY = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Pod{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodLogOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodLogOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodLogOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Container", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Container = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Follow", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Follow = bool(v != 0) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Previous", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Previous = bool(v != 0) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SinceSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SinceSeconds = &v - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SinceTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SinceTime == nil { - m.SinceTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.SinceTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Timestamps = bool(v != 0) - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TailLines", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TailLines = &v - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LimitBytes", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LimitBytes = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPortForwardOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPortForwardOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPortForwardOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType == 0 { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ports = append(m.Ports, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Ports = append(m.Ports, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodProxyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodProxyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodReadinessGate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodReadinessGate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodReadinessGate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConditionType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ConditionType = PodConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsUser = &v - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 4: - if wireType == 0 { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SupplementalGroups = append(m.SupplementalGroups, v) - } else if wireType == 2 { - var packedLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - packedLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if packedLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + packedLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - for iNdEx < postIndex { - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SupplementalGroups = append(m.SupplementalGroups, v) - } - } else { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.FSGroup = &v - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsGroup = &v - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sysctls", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sysctls = append(m.Sysctls, Sysctl{}) - if err := m.Sysctls[len(m.Sysctls)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSignature) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSignature: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSignature: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodController", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodController == nil { - m.PodController = &k8s_io_apimachinery_pkg_apis_meta_v1.OwnerReference{} - } - if err := m.PodController.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Containers = append(m.Containers, Container{}) - if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RestartPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RestartPolicy = RestartPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TerminationGracePeriodSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TerminationGracePeriodSeconds = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ActiveDeadlineSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ActiveDeadlineSeconds = &v - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DNSPolicy = DNSPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.NodeSelector == nil { - m.NodeSelector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.NodeSelector[mapkey] = mapvalue - } else { - var mapvalue string - m.NodeSelector[mapkey] = mapvalue - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceAccountName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedServiceAccount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DeprecatedServiceAccount = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecurityContext", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecurityContext == nil { - m.SecurityContext = &PodSecurityContext{} - } - if err := m.SecurityContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hostname", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hostname = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subdomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subdomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Affinity", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Affinity == nil { - m.Affinity = &Affinity{} - } - if err := m.Affinity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchedulerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchedulerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainers", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitContainers = append(m.InitContainers, Container{}) - if err := m.InitContainers[len(m.InitContainers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Tolerations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Tolerations = append(m.Tolerations, Toleration{}) - if err := m.Tolerations[len(m.Tolerations)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostAliases", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostAliases = append(m.HostAliases, HostAlias{}) - if err := m.HostAliases[len(m.HostAliases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PriorityClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PriorityClassName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 25: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Priority", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Priority = &v - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DNSConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DNSConfig == nil { - m.DNSConfig = &PodDNSConfig{} - } - if err := m.DNSConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 27: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ShareProcessNamespace", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ShareProcessNamespace = &b - case 28: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadinessGates", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReadinessGates = append(m.ReadinessGates, PodReadinessGate{}) - if err := m.ReadinessGates[len(m.ReadinessGates)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 29: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RuntimeClassName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.RuntimeClassName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Phase", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Phase = PodPhase(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, PodCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PodIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StartTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StartTime == nil { - m.StartTime = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.StartTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerStatuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerStatuses = append(m.ContainerStatuses, ContainerStatus{}) - if err := m.ContainerStatuses[len(m.ContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field QOSClass", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.QOSClass = PodQOSClass(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InitContainerStatuses", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.InitContainerStatuses = append(m.InitContainerStatuses, ContainerStatus{}) - if err := m.InitContainerStatuses[len(m.InitContainerStatuses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NominatedNodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NominatedNodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodStatusResult) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodStatusResult: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodStatusResult: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplate) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplate: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplate: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplateList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplateList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodTemplate{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodTemplateSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodTemplateSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodTemplateSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PortworxVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PortworxVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PortworxVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Preconditions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Preconditions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Preconditions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_apimachinery_pkg_types.UID(dAtA[iNdEx:postIndex]) - m.UID = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PreferAvoidPodsEntry) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PreferAvoidPodsEntry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PreferAvoidPodsEntry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSignature", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodSignature.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EvictionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EvictionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PreferredSchedulingTerm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PreferredSchedulingTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PreferredSchedulingTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) - } - m.Weight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Preference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Preference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Probe) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Probe: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Probe: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Handler", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Handler.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InitialDelaySeconds", wireType) - } - m.InitialDelaySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.InitialDelaySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType) - } - m.TimeoutSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TimeoutSeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeriodSeconds", wireType) - } - m.PeriodSeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PeriodSeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SuccessThreshold", wireType) - } - m.SuccessThreshold = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.SuccessThreshold |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FailureThreshold", wireType) - } - m.FailureThreshold = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FailureThreshold |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ProjectedVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ProjectedVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ProjectedVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Sources", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Sources = append(m.Sources, VolumeProjection{}) - if err := m.Sources[len(m.Sources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *QuobyteVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: QuobyteVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: QuobyteVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Registry", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Registry = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volume", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volume = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Group = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RBDPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RBDPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RBDPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RBDImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RBDPool = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RadosUser = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyring = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RBDVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RBDVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RBDVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephMonitors", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CephMonitors = append(m.CephMonitors, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBDImage", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RBDImage = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBDPool", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RBDPool = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RadosUser", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RadosUser = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Keyring", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Keyring = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RangeAllocation) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RangeAllocation: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RangeAllocation: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Range = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationController) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationController: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationController: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ReplicationControllerConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ReplicationController{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Selector == nil { - m.Selector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Template == nil { - m.Template = &PodTemplateSpec{} - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) - } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ReplicationControllerCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceFieldSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceFieldSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceFieldSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ContainerName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ContainerName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resource", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resource = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Divisor", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Divisor.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuota) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuota: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuota: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuotaList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ResourceQuota{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuotaSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hard == nil { - m.Hard = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Scopes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Scopes = append(m.Scopes, ResourceQuotaScope(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ScopeSelector == nil { - m.ScopeSelector = &ScopeSelector{} - } - if err := m.ScopeSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceQuotaStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceQuotaStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceQuotaStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Hard == nil { - m.Hard = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Hard[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Hard[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Used", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Used == nil { - m.Used = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Used[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Used[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ResourceRequirements) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResourceRequirements: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResourceRequirements: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Limits", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Limits == nil { - m.Limits = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Limits[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Limits[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Requests", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := ResourceName(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Requests == nil { - m.Requests = make(ResourceList) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_api_resource.Quantity{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.Requests[ResourceName(mapkey)] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_api_resource.Quantity - m.Requests[ResourceName(mapkey)] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SELinuxOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SELinuxOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field User", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.User = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Role = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Level = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleIOPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleIOPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Gateway = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.System = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &SecretReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SSLEnabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoragePool = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StorageMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleIOVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleIOVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleIOVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Gateway", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Gateway = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field System", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.System = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field SSLEnabled", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.SSLEnabled = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProtectionDomain", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ProtectionDomain = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoragePool", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoragePool = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StorageMode = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopeSelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopeSelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopeSelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchExpressions = append(m.MatchExpressions, ScopedResourceSelectorRequirement{}) - if err := m.MatchExpressions[len(m.MatchExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScopedResourceSelectorRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScopedResourceSelectorRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScopedResourceSelectorRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScopeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ScopeName = ResourceQuotaScope(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = ScopeSelectorOperator(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Secret) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Secret: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Secret: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Data == nil { - m.Data = make(map[string][]byte) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapbyteLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapbyteLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intMapbyteLen := int(mapbyteLen) - if intMapbyteLen < 0 { - return ErrInvalidLengthGenerated - } - postbytesIndex := iNdEx + intMapbyteLen - if postbytesIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := make([]byte, mapbyteLen) - copy(mapvalue, dAtA[iNdEx:postbytesIndex]) - iNdEx = postbytesIndex - m.Data[mapkey] = mapvalue - } else { - var mapvalue []byte - m.Data[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = SecretType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StringData", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.StringData == nil { - m.StringData = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.StringData[mapkey] = mapvalue - } else { - var mapvalue string - m.StringData[mapkey] = mapvalue - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretEnvSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretEnvSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretEnvSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretKeySelector) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretKeySelector: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretKeySelector: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Secret{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LocalObjectReference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LocalObjectReference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecretVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecretVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecretVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, KeyToPath{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultMode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.DefaultMode = &v - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Optional = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SecurityContext) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SecurityContext: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SecurityContext: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Capabilities", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Capabilities == nil { - m.Capabilities = &Capabilities{} - } - if err := m.Capabilities.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.Privileged = &b - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsUser = &v - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsNonRoot", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.RunAsNonRoot = &b - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.ReadOnlyRootFilesystem = &b - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsGroup", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RunAsGroup = &v - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ProcMount", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := ProcMountType(dAtA[iNdEx:postIndex]) - m.ProcMount = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SerializedReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SerializedReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SerializedReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reference", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Reference.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Service) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Service: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Secrets = append(m.Secrets, ObjectReference{}) - if err := m.Secrets[len(m.Secrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ImagePullSecrets", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ImagePullSecrets = append(m.ImagePullSecrets, LocalObjectReference{}) - if err := m.ImagePullSecrets[len(m.ImagePullSecrets)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AutomountServiceAccountToken", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AutomountServiceAccountToken = &b - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccountList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ServiceAccount{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceAccountTokenProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceAccountTokenProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceAccountTokenProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Audience", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Audience = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpirationSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ExpirationSeconds = &v - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Service{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServicePort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServicePort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServicePort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Protocol = Protocol(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - m.Port = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Port |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetPort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetPort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodePort", wireType) - } - m.NodePort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NodePort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceProxyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceProxyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceProxyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, ServicePort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Selector == nil { - m.Selector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ServiceType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalIPs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalIPs = append(m.ExternalIPs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinity", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SessionAffinity = ServiceAffinity(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerIP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LoadBalancerIP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancerSourceRanges", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LoadBalancerSourceRanges = append(m.LoadBalancerSourceRanges, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ExternalTrafficPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ExternalTrafficPolicy = ServiceExternalTrafficPolicyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HealthCheckNodePort", wireType) - } - m.HealthCheckNodePort = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.HealthCheckNodePort |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 13: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PublishNotReadyAddresses", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PublishNotReadyAddresses = bool(v != 0) - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SessionAffinityConfig", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SessionAffinityConfig == nil { - m.SessionAffinityConfig = &SessionAffinityConfig{} - } - if err := m.SessionAffinityConfig.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ServiceStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ServiceStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ServiceStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SessionAffinityConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SessionAffinityConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SessionAffinityConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClientIP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ClientIP == nil { - m.ClientIP = &ClientIPConfig{} - } - if err := m.ClientIP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageOSPersistentVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageOSPersistentVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageOSPersistentVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &ObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageOSVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageOSVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageOSVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeNamespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeNamespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SecretRef == nil { - m.SecretRef = &LocalObjectReference{} - } - if err := m.SecretRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Sysctl) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Sysctl: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Sysctl: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TCPSocketAction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TCPSocketAction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TCPSocketAction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Taint) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Taint: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Taint: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TimeAdded", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.TimeAdded == nil { - m.TimeAdded = &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - } - if err := m.TimeAdded.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Toleration) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Toleration: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Toleration: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Operator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Operator = TolerationOperator(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Value = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Effect", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Effect = TaintEffect(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TolerationSeconds", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.TolerationSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TopologySelectorLabelRequirement) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TopologySelectorLabelRequirement: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TopologySelectorLabelRequirement: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Key = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TopologySelectorTerm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TopologySelectorTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TopologySelectorTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MatchLabelExpressions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MatchLabelExpressions = append(m.MatchLabelExpressions, TopologySelectorLabelRequirement{}) - if err := m.MatchLabelExpressions[len(m.MatchLabelExpressions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TypedLocalObjectReference) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TypedLocalObjectReference: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TypedLocalObjectReference: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.APIGroup = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Volume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Volume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Volume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.VolumeSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeDevice) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeDevice: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeDevice: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DevicePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DevicePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeMount) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeMount: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeMount: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SubPath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SubPath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountPropagation", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := MountPropagationMode(dAtA[iNdEx:postIndex]) - m.MountPropagation = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeNodeAffinity) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeNodeAffinity: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeNodeAffinity: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Required", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Required == nil { - m.Required = &NodeSelector{} - } - if err := m.Required.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeProjection) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeProjection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeProjection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Secret == nil { - m.Secret = &SecretProjection{} - } - if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DownwardAPI == nil { - m.DownwardAPI = &DownwardAPIProjection{} - } - if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &ConfigMapProjection{} - } - if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceAccountToken", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ServiceAccountToken == nil { - m.ServiceAccountToken = &ServiceAccountTokenProjection{} - } - if err := m.ServiceAccountToken.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPath", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HostPath == nil { - m.HostPath = &HostPathVolumeSource{} - } - if err := m.HostPath.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EmptyDir", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.EmptyDir == nil { - m.EmptyDir = &EmptyDirVolumeSource{} - } - if err := m.EmptyDir.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GCEPersistentDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GCEPersistentDisk == nil { - m.GCEPersistentDisk = &GCEPersistentDiskVolumeSource{} - } - if err := m.GCEPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AWSElasticBlockStore", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AWSElasticBlockStore == nil { - m.AWSElasticBlockStore = &AWSElasticBlockStoreVolumeSource{} - } - if err := m.AWSElasticBlockStore.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field GitRepo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.GitRepo == nil { - m.GitRepo = &GitRepoVolumeSource{} - } - if err := m.GitRepo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Secret", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Secret == nil { - m.Secret = &SecretVolumeSource{} - } - if err := m.Secret.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NFS == nil { - m.NFS = &NFSVolumeSource{} - } - if err := m.NFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ISCSI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ISCSI == nil { - m.ISCSI = &ISCSIVolumeSource{} - } - if err := m.ISCSI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Glusterfs", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Glusterfs == nil { - m.Glusterfs = &GlusterfsVolumeSource{} - } - if err := m.Glusterfs.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeClaim", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PersistentVolumeClaim == nil { - m.PersistentVolumeClaim = &PersistentVolumeClaimVolumeSource{} - } - if err := m.PersistentVolumeClaim.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RBD", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RBD == nil { - m.RBD = &RBDVolumeSource{} - } - if err := m.RBD.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FlexVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FlexVolume == nil { - m.FlexVolume = &FlexVolumeSource{} - } - if err := m.FlexVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cinder", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Cinder == nil { - m.Cinder = &CinderVolumeSource{} - } - if err := m.Cinder.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CephFS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.CephFS == nil { - m.CephFS = &CephFSVolumeSource{} - } - if err := m.CephFS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Flocker", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Flocker == nil { - m.Flocker = &FlockerVolumeSource{} - } - if err := m.Flocker.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 16: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DownwardAPI", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DownwardAPI == nil { - m.DownwardAPI = &DownwardAPIVolumeSource{} - } - if err := m.DownwardAPI.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FC", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.FC == nil { - m.FC = &FCVolumeSource{} - } - if err := m.FC.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureFile", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AzureFile == nil { - m.AzureFile = &AzureFileVolumeSource{} - } - if err := m.AzureFile.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfigMap", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ConfigMap == nil { - m.ConfigMap = &ConfigMapVolumeSource{} - } - if err := m.ConfigMap.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VsphereVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.VsphereVolume == nil { - m.VsphereVolume = &VsphereVirtualDiskVolumeSource{} - } - if err := m.VsphereVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Quobyte", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Quobyte == nil { - m.Quobyte = &QuobyteVolumeSource{} - } - if err := m.Quobyte.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 22: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AzureDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AzureDisk == nil { - m.AzureDisk = &AzureDiskVolumeSource{} - } - if err := m.AzureDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 23: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PhotonPersistentDisk", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PhotonPersistentDisk == nil { - m.PhotonPersistentDisk = &PhotonPersistentDiskVolumeSource{} - } - if err := m.PhotonPersistentDisk.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 24: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PortworxVolume", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PortworxVolume == nil { - m.PortworxVolume = &PortworxVolumeSource{} - } - if err := m.PortworxVolume.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 25: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ScaleIO", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ScaleIO == nil { - m.ScaleIO = &ScaleIOVolumeSource{} - } - if err := m.ScaleIO.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 26: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Projected", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Projected == nil { - m.Projected = &ProjectedVolumeSource{} - } - if err := m.Projected.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 27: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StorageOS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.StorageOS == nil { - m.StorageOS = &StorageOSVolumeSource{} - } - if err := m.StorageOS.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VsphereVirtualDiskVolumeSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VsphereVirtualDiskVolumeSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumePath", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumePath = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSType", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.FSType = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoragePolicyName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field StoragePolicyID", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.StoragePolicyID = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *WeightedPodAffinityTerm) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: WeightedPodAffinityTerm: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: WeightedPodAffinityTerm: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Weight", wireType) - } - m.Weight = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Weight |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodAffinityTerm", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodAffinityTerm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/core/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 12780 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x7d, 0x6b, 0x6c, 0x24, 0x47, - 0x7a, 0xd8, 0xf5, 0xcc, 0x90, 0x9c, 0xf9, 0xf8, 0xae, 0x7d, 0x88, 0x4b, 0x69, 0x77, 0x56, 0xad, - 0xbb, 0xd5, 0xea, 0x24, 0x91, 0xa7, 0x95, 0x74, 0x92, 0x4f, 0x3a, 0xd9, 0x24, 0x87, 0xdc, 0x1d, - 0xed, 0x92, 0x3b, 0xaa, 0xe1, 0xee, 0xde, 0xc9, 0xba, 0xf3, 0x35, 0x67, 0x8a, 0x64, 0x8b, 0xc3, - 0xee, 0x51, 0x77, 0x0f, 0x77, 0xa9, 0xd8, 0x40, 0x72, 0x8e, 0x9d, 0x5c, 0x6c, 0x04, 0x87, 0xd8, - 0xc8, 0xc3, 0x36, 0x1c, 0xc0, 0x71, 0x60, 0x3b, 0x4e, 0x82, 0x38, 0x76, 0x6c, 0xc7, 0x67, 0x27, - 0x8e, 0x9d, 0x1f, 0x0e, 0x10, 0x5c, 0x9c, 0x00, 0xc1, 0x19, 0x30, 0xc2, 0xd8, 0x74, 0x1e, 0xf0, - 0x8f, 0x3c, 0x10, 0xe7, 0x47, 0xcc, 0x18, 0x71, 0x50, 0xcf, 0xae, 0xea, 0xe9, 0x9e, 0x19, 0xae, - 0xb8, 0x94, 0x7c, 0xb8, 0x7f, 0x33, 0xf5, 0x7d, 0xf5, 0x55, 0x75, 0x3d, 0xbf, 0xef, 0xab, 0xef, - 0x01, 0xaf, 0xed, 0xbc, 0x1a, 0xce, 0xb9, 0xfe, 0xfc, 0x4e, 0x67, 0x83, 0x04, 0x1e, 0x89, 0x48, - 0x38, 0xbf, 0x47, 0xbc, 0xa6, 0x1f, 0xcc, 0x0b, 0x80, 0xd3, 0x76, 0xe7, 0x1b, 0x7e, 0x40, 0xe6, - 0xf7, 0x5e, 0x98, 0xdf, 0x22, 0x1e, 0x09, 0x9c, 0x88, 0x34, 0xe7, 0xda, 0x81, 0x1f, 0xf9, 0x08, - 0x71, 0x9c, 0x39, 0xa7, 0xed, 0xce, 0x51, 0x9c, 0xb9, 0xbd, 0x17, 0x66, 0x9f, 0xdf, 0x72, 0xa3, - 0xed, 0xce, 0xc6, 0x5c, 0xc3, 0xdf, 0x9d, 0xdf, 0xf2, 0xb7, 0xfc, 0x79, 0x86, 0xba, 0xd1, 0xd9, - 0x64, 0xff, 0xd8, 0x1f, 0xf6, 0x8b, 0x93, 0x98, 0x7d, 0x29, 0x6e, 0x66, 0xd7, 0x69, 0x6c, 0xbb, - 0x1e, 0x09, 0xf6, 0xe7, 0xdb, 0x3b, 0x5b, 0xac, 0xdd, 0x80, 0x84, 0x7e, 0x27, 0x68, 0x90, 0x64, - 0xc3, 0x3d, 0x6b, 0x85, 0xf3, 0xbb, 0x24, 0x72, 0x52, 0xba, 0x3b, 0x3b, 0x9f, 0x55, 0x2b, 0xe8, - 0x78, 0x91, 0xbb, 0xdb, 0xdd, 0xcc, 0xa7, 0xfb, 0x55, 0x08, 0x1b, 0xdb, 0x64, 0xd7, 0xe9, 0xaa, - 0xf7, 0x62, 0x56, 0xbd, 0x4e, 0xe4, 0xb6, 0xe6, 0x5d, 0x2f, 0x0a, 0xa3, 0x20, 0x59, 0xc9, 0xfe, - 0x86, 0x05, 0x97, 0x17, 0xee, 0xd5, 0x97, 0x5b, 0x4e, 0x18, 0xb9, 0x8d, 0xc5, 0x96, 0xdf, 0xd8, - 0xa9, 0x47, 0x7e, 0x40, 0xee, 0xfa, 0xad, 0xce, 0x2e, 0xa9, 0xb3, 0x81, 0x40, 0xcf, 0x41, 0x71, - 0x8f, 0xfd, 0xaf, 0x56, 0x66, 0xac, 0xcb, 0xd6, 0xd5, 0xd2, 0xe2, 0xd4, 0x6f, 0x1d, 0x94, 0x3f, - 0x76, 0x78, 0x50, 0x2e, 0xde, 0x15, 0xe5, 0x58, 0x61, 0xa0, 0x2b, 0x30, 0xbc, 0x19, 0xae, 0xef, - 0xb7, 0xc9, 0x4c, 0x8e, 0xe1, 0x4e, 0x08, 0xdc, 0xe1, 0x95, 0x3a, 0x2d, 0xc5, 0x02, 0x8a, 0xe6, - 0xa1, 0xd4, 0x76, 0x82, 0xc8, 0x8d, 0x5c, 0xdf, 0x9b, 0xc9, 0x5f, 0xb6, 0xae, 0x0e, 0x2d, 0x4e, - 0x0b, 0xd4, 0x52, 0x4d, 0x02, 0x70, 0x8c, 0x43, 0xbb, 0x11, 0x10, 0xa7, 0x79, 0xdb, 0x6b, 0xed, - 0xcf, 0x14, 0x2e, 0x5b, 0x57, 0x8b, 0x71, 0x37, 0xb0, 0x28, 0xc7, 0x0a, 0xc3, 0xfe, 0x91, 0x1c, - 0x14, 0x17, 0x36, 0x37, 0x5d, 0xcf, 0x8d, 0xf6, 0xd1, 0x5d, 0x18, 0xf3, 0xfc, 0x26, 0x91, 0xff, - 0xd9, 0x57, 0x8c, 0x5e, 0xbb, 0x3c, 0xd7, 0xbd, 0x94, 0xe6, 0xd6, 0x34, 0xbc, 0xc5, 0xa9, 0xc3, - 0x83, 0xf2, 0x98, 0x5e, 0x82, 0x0d, 0x3a, 0x08, 0xc3, 0x68, 0xdb, 0x6f, 0x2a, 0xb2, 0x39, 0x46, - 0xb6, 0x9c, 0x46, 0xb6, 0x16, 0xa3, 0x2d, 0x4e, 0x1e, 0x1e, 0x94, 0x47, 0xb5, 0x02, 0xac, 0x13, - 0x41, 0x1b, 0x30, 0x49, 0xff, 0x7a, 0x91, 0xab, 0xe8, 0xe6, 0x19, 0xdd, 0xa7, 0xb2, 0xe8, 0x6a, - 0xa8, 0x8b, 0x67, 0x0e, 0x0f, 0xca, 0x93, 0x89, 0x42, 0x9c, 0x24, 0x68, 0xbf, 0x0f, 0x13, 0x0b, - 0x51, 0xe4, 0x34, 0xb6, 0x49, 0x93, 0xcf, 0x20, 0x7a, 0x09, 0x0a, 0x9e, 0xb3, 0x4b, 0xc4, 0xfc, - 0x5e, 0x16, 0x03, 0x5b, 0x58, 0x73, 0x76, 0xc9, 0xd1, 0x41, 0x79, 0xea, 0x8e, 0xe7, 0xbe, 0xd7, - 0x11, 0xab, 0x82, 0x96, 0x61, 0x86, 0x8d, 0xae, 0x01, 0x34, 0xc9, 0x9e, 0xdb, 0x20, 0x35, 0x27, - 0xda, 0x16, 0xf3, 0x8d, 0x44, 0x5d, 0xa8, 0x28, 0x08, 0xd6, 0xb0, 0xec, 0x07, 0x50, 0x5a, 0xd8, - 0xf3, 0xdd, 0x66, 0xcd, 0x6f, 0x86, 0x68, 0x07, 0x26, 0xdb, 0x01, 0xd9, 0x24, 0x81, 0x2a, 0x9a, - 0xb1, 0x2e, 0xe7, 0xaf, 0x8e, 0x5e, 0xbb, 0x9a, 0xfa, 0xb1, 0x26, 0xea, 0xb2, 0x17, 0x05, 0xfb, - 0x8b, 0x8f, 0x89, 0xf6, 0x26, 0x13, 0x50, 0x9c, 0xa4, 0x6c, 0xff, 0xcb, 0x1c, 0x9c, 0x5b, 0x78, - 0xbf, 0x13, 0x90, 0x8a, 0x1b, 0xee, 0x24, 0x57, 0x78, 0xd3, 0x0d, 0x77, 0xd6, 0xe2, 0x11, 0x50, - 0x4b, 0xab, 0x22, 0xca, 0xb1, 0xc2, 0x40, 0xcf, 0xc3, 0x08, 0xfd, 0x7d, 0x07, 0x57, 0xc5, 0x27, - 0x9f, 0x11, 0xc8, 0xa3, 0x15, 0x27, 0x72, 0x2a, 0x1c, 0x84, 0x25, 0x0e, 0x5a, 0x85, 0xd1, 0x06, - 0xdb, 0x90, 0x5b, 0xab, 0x7e, 0x93, 0xb0, 0xc9, 0x2c, 0x2d, 0x3e, 0x4b, 0xd1, 0x97, 0xe2, 0xe2, - 0xa3, 0x83, 0xf2, 0x0c, 0xef, 0x9b, 0x20, 0xa1, 0xc1, 0xb0, 0x5e, 0x1f, 0xd9, 0x6a, 0x7f, 0x15, - 0x18, 0x25, 0x48, 0xd9, 0x5b, 0x57, 0xb5, 0xad, 0x32, 0xc4, 0xb6, 0xca, 0x58, 0xfa, 0x36, 0x41, - 0x2f, 0x40, 0x61, 0xc7, 0xf5, 0x9a, 0x33, 0xc3, 0x8c, 0xd6, 0x45, 0x3a, 0xe7, 0x37, 0x5d, 0xaf, - 0x79, 0x74, 0x50, 0x9e, 0x36, 0xba, 0x43, 0x0b, 0x31, 0x43, 0xb5, 0xff, 0xc8, 0x82, 0x32, 0x83, - 0xad, 0xb8, 0x2d, 0x52, 0x23, 0x41, 0xe8, 0x86, 0x11, 0xf1, 0x22, 0x63, 0x40, 0xaf, 0x01, 0x84, - 0xa4, 0x11, 0x90, 0x48, 0x1b, 0x52, 0xb5, 0x30, 0xea, 0x0a, 0x82, 0x35, 0x2c, 0x7a, 0x20, 0x84, - 0xdb, 0x4e, 0xc0, 0xd6, 0x97, 0x18, 0x58, 0x75, 0x20, 0xd4, 0x25, 0x00, 0xc7, 0x38, 0xc6, 0x81, - 0x90, 0xef, 0x77, 0x20, 0xa0, 0xcf, 0xc2, 0x64, 0xdc, 0x58, 0xd8, 0x76, 0x1a, 0x72, 0x00, 0xd9, - 0x96, 0xa9, 0x9b, 0x20, 0x9c, 0xc4, 0xb5, 0xff, 0x9e, 0x25, 0x16, 0x0f, 0xfd, 0xea, 0x8f, 0xf8, - 0xb7, 0xda, 0xbf, 0x6c, 0xc1, 0xc8, 0xa2, 0xeb, 0x35, 0x5d, 0x6f, 0x0b, 0x7d, 0x09, 0x8a, 0xf4, - 0x6e, 0x6a, 0x3a, 0x91, 0x23, 0xce, 0xbd, 0x4f, 0x69, 0x7b, 0x4b, 0x5d, 0x15, 0x73, 0xed, 0x9d, - 0x2d, 0x5a, 0x10, 0xce, 0x51, 0x6c, 0xba, 0xdb, 0x6e, 0x6f, 0xbc, 0x4b, 0x1a, 0xd1, 0x2a, 0x89, - 0x9c, 0xf8, 0x73, 0xe2, 0x32, 0xac, 0xa8, 0xa2, 0x9b, 0x30, 0x1c, 0x39, 0xc1, 0x16, 0x89, 0xc4, - 0x01, 0x98, 0x7a, 0x50, 0xf1, 0x9a, 0x98, 0xee, 0x48, 0xe2, 0x35, 0x48, 0x7c, 0x2d, 0xac, 0xb3, - 0xaa, 0x58, 0x90, 0xb0, 0xff, 0xca, 0x30, 0x5c, 0x58, 0xaa, 0x57, 0x33, 0xd6, 0xd5, 0x15, 0x18, - 0x6e, 0x06, 0xee, 0x1e, 0x09, 0xc4, 0x38, 0x2b, 0x2a, 0x15, 0x56, 0x8a, 0x05, 0x14, 0xbd, 0x0a, - 0x63, 0xfc, 0x42, 0xba, 0xe1, 0x78, 0xcd, 0x96, 0x1c, 0xe2, 0xb3, 0x02, 0x7b, 0xec, 0xae, 0x06, - 0xc3, 0x06, 0xe6, 0x31, 0x17, 0xd5, 0x95, 0xc4, 0x66, 0xcc, 0xba, 0xec, 0xbe, 0x62, 0xc1, 0x14, - 0x6f, 0x66, 0x21, 0x8a, 0x02, 0x77, 0xa3, 0x13, 0x91, 0x70, 0x66, 0x88, 0x9d, 0x74, 0x4b, 0x69, - 0xa3, 0x95, 0x39, 0x02, 0x73, 0x77, 0x13, 0x54, 0xf8, 0x21, 0x38, 0x23, 0xda, 0x9d, 0x4a, 0x82, - 0x71, 0x57, 0xb3, 0xe8, 0x7b, 0x2d, 0x98, 0x6d, 0xf8, 0x5e, 0x14, 0xf8, 0xad, 0x16, 0x09, 0x6a, - 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xf3, 0x75, 0x8a, 0xc9, 0x26, 0x3b, 0x09, 0x32, 0xe6, 0x50, 0x21, - 0x89, 0x39, 0xbc, 0x74, 0x78, 0x50, 0x9e, 0x5d, 0xca, 0x24, 0x85, 0x7b, 0x34, 0x83, 0x76, 0x00, - 0xd1, 0xab, 0xb4, 0x1e, 0x39, 0x5b, 0x24, 0x6e, 0x7c, 0x64, 0xf0, 0xc6, 0xcf, 0x1f, 0x1e, 0x94, - 0xd1, 0x5a, 0x17, 0x09, 0x9c, 0x42, 0x16, 0xbd, 0x07, 0x67, 0x69, 0x69, 0xd7, 0xb7, 0x16, 0x07, - 0x6f, 0x6e, 0xe6, 0xf0, 0xa0, 0x7c, 0x76, 0x2d, 0x85, 0x08, 0x4e, 0x25, 0x3d, 0xbb, 0x04, 0xe7, - 0x52, 0xa7, 0x0a, 0x4d, 0x41, 0x7e, 0x87, 0x70, 0x16, 0xa4, 0x84, 0xe9, 0x4f, 0x74, 0x16, 0x86, - 0xf6, 0x9c, 0x56, 0x47, 0xac, 0x52, 0xcc, 0xff, 0x7c, 0x26, 0xf7, 0xaa, 0x65, 0x37, 0x60, 0x6c, - 0xc9, 0x69, 0x3b, 0x1b, 0x6e, 0xcb, 0x8d, 0x5c, 0x12, 0xa2, 0xa7, 0x21, 0xef, 0x34, 0x9b, 0xec, - 0x8a, 0x2c, 0x2d, 0x9e, 0x3b, 0x3c, 0x28, 0xe7, 0x17, 0x9a, 0xf4, 0xac, 0x06, 0x85, 0xb5, 0x8f, - 0x29, 0x06, 0xfa, 0x24, 0x14, 0x9a, 0x81, 0xdf, 0x9e, 0xc9, 0x31, 0x4c, 0x3a, 0x54, 0x85, 0x4a, - 0xe0, 0xb7, 0x13, 0xa8, 0x0c, 0xc7, 0xfe, 0xf5, 0x1c, 0x3c, 0xb1, 0x44, 0xda, 0xdb, 0x2b, 0xf5, - 0x8c, 0x4d, 0x77, 0x15, 0x8a, 0xbb, 0xbe, 0xe7, 0x46, 0x7e, 0x10, 0x8a, 0xa6, 0xd9, 0x6d, 0xb2, - 0x2a, 0xca, 0xb0, 0x82, 0xa2, 0xcb, 0x50, 0x68, 0xc7, 0x9c, 0xc0, 0x98, 0xe4, 0x22, 0x18, 0x0f, - 0xc0, 0x20, 0x14, 0xa3, 0x13, 0x92, 0x40, 0xdc, 0x82, 0x0a, 0xe3, 0x4e, 0x48, 0x02, 0xcc, 0x20, - 0xf1, 0x71, 0x4a, 0x0f, 0x5a, 0xb1, 0xad, 0x12, 0xc7, 0x29, 0x85, 0x60, 0x0d, 0x0b, 0xd5, 0xa0, - 0x14, 0xaa, 0x49, 0x1d, 0x1a, 0x7c, 0x52, 0xc7, 0xd9, 0x79, 0xab, 0x66, 0x32, 0x26, 0x62, 0x1c, - 0x03, 0xc3, 0x7d, 0xcf, 0xdb, 0xaf, 0xe5, 0x00, 0xf1, 0x21, 0xfc, 0x33, 0x36, 0x70, 0x77, 0xba, - 0x07, 0x2e, 0x95, 0xf3, 0xba, 0xe5, 0x37, 0x9c, 0x56, 0xf2, 0x08, 0x3f, 0xa9, 0xd1, 0xfb, 0xdf, - 0x16, 0x3c, 0xb1, 0xe4, 0x7a, 0x4d, 0x12, 0x64, 0x2c, 0xc0, 0x47, 0x23, 0x80, 0x1c, 0xef, 0xa4, - 0x37, 0x96, 0x58, 0xe1, 0x04, 0x96, 0x98, 0xfd, 0x3f, 0x2c, 0x40, 0xfc, 0xb3, 0x3f, 0x72, 0x1f, - 0x7b, 0xa7, 0xfb, 0x63, 0x4f, 0x60, 0x59, 0xd8, 0xb7, 0x60, 0x62, 0xa9, 0xe5, 0x12, 0x2f, 0xaa, - 0xd6, 0x96, 0x7c, 0x6f, 0xd3, 0xdd, 0x42, 0x9f, 0x81, 0x09, 0x2a, 0xd3, 0xfa, 0x9d, 0xa8, 0x4e, - 0x1a, 0xbe, 0xc7, 0xd8, 0x7f, 0x2a, 0x09, 0xa2, 0xc3, 0x83, 0xf2, 0xc4, 0xba, 0x01, 0xc1, 0x09, - 0x4c, 0xfb, 0x77, 0xe9, 0xf8, 0xf9, 0xbb, 0x6d, 0xdf, 0x23, 0x5e, 0xb4, 0xe4, 0x7b, 0x4d, 0x2e, - 0x26, 0x7e, 0x06, 0x0a, 0x11, 0x1d, 0x0f, 0x3e, 0x76, 0x57, 0xe4, 0x46, 0xa1, 0xa3, 0x70, 0x74, - 0x50, 0x3e, 0xdf, 0x5d, 0x83, 0x8d, 0x13, 0xab, 0x83, 0xbe, 0x0d, 0x86, 0xc3, 0xc8, 0x89, 0x3a, - 0xa1, 0x18, 0xcd, 0x27, 0xe5, 0x68, 0xd6, 0x59, 0xe9, 0xd1, 0x41, 0x79, 0x52, 0x55, 0xe3, 0x45, - 0x58, 0x54, 0x40, 0xcf, 0xc0, 0xc8, 0x2e, 0x09, 0x43, 0x67, 0x4b, 0x72, 0xf8, 0x93, 0xa2, 0xee, - 0xc8, 0x2a, 0x2f, 0xc6, 0x12, 0x8e, 0x9e, 0x82, 0x21, 0x12, 0x04, 0x7e, 0x20, 0xf6, 0xe8, 0xb8, - 0x40, 0x1c, 0x5a, 0xa6, 0x85, 0x98, 0xc3, 0xec, 0x7f, 0x63, 0xc1, 0xa4, 0xea, 0x2b, 0x6f, 0xeb, - 0x14, 0x58, 0xb9, 0xb7, 0x01, 0x1a, 0xf2, 0x03, 0x43, 0x76, 0x7b, 0x8c, 0x5e, 0xbb, 0x92, 0xca, - 0xa0, 0x74, 0x0d, 0x63, 0x4c, 0x59, 0x15, 0x85, 0x58, 0xa3, 0x66, 0xff, 0x9a, 0x05, 0x67, 0x12, - 0x5f, 0x74, 0xcb, 0x0d, 0x23, 0xf4, 0x4e, 0xd7, 0x57, 0xcd, 0x0d, 0xf6, 0x55, 0xb4, 0x36, 0xfb, - 0x26, 0xb5, 0x94, 0x65, 0x89, 0xf6, 0x45, 0x37, 0x60, 0xc8, 0x8d, 0xc8, 0xae, 0xfc, 0x98, 0xa7, - 0x7a, 0x7e, 0x0c, 0xef, 0x55, 0x3c, 0x23, 0x55, 0x5a, 0x13, 0x73, 0x02, 0xf6, 0x0f, 0xe5, 0xa1, - 0xc4, 0x97, 0xed, 0xaa, 0xd3, 0x3e, 0x85, 0xb9, 0xa8, 0x42, 0x81, 0x51, 0xe7, 0x1d, 0x7f, 0x3a, - 0xbd, 0xe3, 0xa2, 0x3b, 0x73, 0x54, 0x4e, 0xe3, 0xac, 0xa0, 0xba, 0x1a, 0x68, 0x11, 0x66, 0x24, - 0x90, 0x03, 0xb0, 0xe1, 0x7a, 0x4e, 0xb0, 0x4f, 0xcb, 0x66, 0xf2, 0x8c, 0xe0, 0xf3, 0xbd, 0x09, - 0x2e, 0x2a, 0x7c, 0x4e, 0x56, 0xf5, 0x35, 0x06, 0x60, 0x8d, 0xe8, 0xec, 0x2b, 0x50, 0x52, 0xc8, - 0xc7, 0xe1, 0x71, 0x66, 0x3f, 0x0b, 0x93, 0x89, 0xb6, 0xfa, 0x55, 0x1f, 0xd3, 0x59, 0xa4, 0x5f, - 0x61, 0xa7, 0x80, 0xe8, 0xf5, 0xb2, 0xb7, 0x27, 0x4e, 0xd1, 0xf7, 0xe1, 0x6c, 0x2b, 0xe5, 0x70, - 0x12, 0x53, 0x35, 0xf8, 0x61, 0xf6, 0x84, 0xf8, 0xec, 0xb3, 0x69, 0x50, 0x9c, 0xda, 0x06, 0xbd, - 0xf6, 0xfd, 0x36, 0x5d, 0xf3, 0x4e, 0x8b, 0xf5, 0x57, 0x48, 0xdf, 0xb7, 0x45, 0x19, 0x56, 0x50, - 0x7a, 0x84, 0x9d, 0x55, 0x9d, 0xbf, 0x49, 0xf6, 0xeb, 0xa4, 0x45, 0x1a, 0x91, 0x1f, 0x7c, 0xa8, - 0xdd, 0xbf, 0xc8, 0x47, 0x9f, 0x9f, 0x80, 0xa3, 0x82, 0x40, 0xfe, 0x26, 0xd9, 0xe7, 0x53, 0xa1, - 0x7f, 0x5d, 0xbe, 0xe7, 0xd7, 0xfd, 0x9c, 0x05, 0xe3, 0xea, 0xeb, 0x4e, 0x61, 0xab, 0x2f, 0x9a, - 0x5b, 0xfd, 0x62, 0xcf, 0x05, 0x9e, 0xb1, 0xc9, 0xbf, 0x96, 0x83, 0x0b, 0x0a, 0x87, 0xb2, 0xfb, - 0xfc, 0x8f, 0x58, 0x55, 0xf3, 0x50, 0xf2, 0x94, 0xf6, 0xc0, 0x32, 0xc5, 0xf6, 0x58, 0x77, 0x10, - 0xe3, 0x50, 0xae, 0xcd, 0x8b, 0x45, 0xfc, 0x31, 0x5d, 0xad, 0x26, 0x54, 0x68, 0x8b, 0x90, 0xef, - 0xb8, 0x4d, 0x71, 0x67, 0x7c, 0x4a, 0x8e, 0xf6, 0x9d, 0x6a, 0xe5, 0xe8, 0xa0, 0xfc, 0x64, 0x96, - 0x4a, 0x97, 0x5e, 0x56, 0xe1, 0xdc, 0x9d, 0x6a, 0x05, 0xd3, 0xca, 0x68, 0x01, 0x26, 0xa5, 0xd6, - 0xfa, 0x2e, 0xe5, 0xa0, 0x7c, 0x4f, 0x5c, 0x2d, 0x4a, 0x37, 0x86, 0x4d, 0x30, 0x4e, 0xe2, 0xa3, - 0x0a, 0x4c, 0xed, 0x74, 0x36, 0x48, 0x8b, 0x44, 0xfc, 0x83, 0x6f, 0x12, 0xae, 0x39, 0x2a, 0xc5, - 0xa2, 0xe5, 0xcd, 0x04, 0x1c, 0x77, 0xd5, 0xb0, 0xff, 0x94, 0x1d, 0xf1, 0x62, 0xf4, 0x6a, 0x81, - 0x4f, 0x17, 0x16, 0xa5, 0xfe, 0x61, 0x2e, 0xe7, 0x41, 0x56, 0xc5, 0x4d, 0xb2, 0xbf, 0xee, 0x53, - 0x66, 0x3b, 0x7d, 0x55, 0x18, 0x6b, 0xbe, 0xd0, 0x73, 0xcd, 0xff, 0x42, 0x0e, 0xce, 0xa9, 0x11, - 0x30, 0xf8, 0xba, 0x3f, 0xeb, 0x63, 0xf0, 0x02, 0x8c, 0x36, 0xc9, 0xa6, 0xd3, 0x69, 0x45, 0x4a, - 0x8d, 0x39, 0xc4, 0x55, 0xd9, 0x95, 0xb8, 0x18, 0xeb, 0x38, 0xc7, 0x18, 0xb6, 0x9f, 0x1c, 0x65, - 0x77, 0x6b, 0xe4, 0xd0, 0x35, 0xae, 0x76, 0x8d, 0x95, 0xb9, 0x6b, 0x9e, 0x82, 0x21, 0x77, 0x97, - 0xf2, 0x5a, 0x39, 0x93, 0x85, 0xaa, 0xd2, 0x42, 0xcc, 0x61, 0xe8, 0x13, 0x30, 0xd2, 0xf0, 0x77, - 0x77, 0x1d, 0xaf, 0xc9, 0xae, 0xbc, 0xd2, 0xe2, 0x28, 0x65, 0xc7, 0x96, 0x78, 0x11, 0x96, 0x30, - 0xf4, 0x04, 0x14, 0x9c, 0x60, 0x2b, 0x9c, 0x29, 0x30, 0x9c, 0x22, 0x6d, 0x69, 0x21, 0xd8, 0x0a, - 0x31, 0x2b, 0xa5, 0x52, 0xd5, 0x7d, 0x3f, 0xd8, 0x71, 0xbd, 0xad, 0x8a, 0x1b, 0x88, 0x2d, 0xa1, - 0xee, 0xc2, 0x7b, 0x0a, 0x82, 0x35, 0x2c, 0xb4, 0x02, 0x43, 0x6d, 0x3f, 0x88, 0xc2, 0x99, 0x61, - 0x36, 0xdc, 0x4f, 0x66, 0x1c, 0x44, 0xfc, 0x6b, 0x6b, 0x7e, 0x10, 0xc5, 0x1f, 0x40, 0xff, 0x85, - 0x98, 0x57, 0x47, 0xdf, 0x06, 0x79, 0xe2, 0xed, 0xcd, 0x8c, 0x30, 0x2a, 0xb3, 0x69, 0x54, 0x96, - 0xbd, 0xbd, 0xbb, 0x4e, 0x10, 0x9f, 0xd2, 0xcb, 0xde, 0x1e, 0xa6, 0x75, 0xd0, 0xe7, 0xa1, 0x24, - 0xb7, 0x78, 0x28, 0xd4, 0x1c, 0xa9, 0x4b, 0x4c, 0x1e, 0x0c, 0x98, 0xbc, 0xd7, 0x71, 0x03, 0xb2, - 0x4b, 0xbc, 0x28, 0x8c, 0xcf, 0x34, 0x09, 0x0d, 0x71, 0x4c, 0x0d, 0x7d, 0x5e, 0xea, 0xd6, 0x56, - 0xfd, 0x8e, 0x17, 0x85, 0x33, 0x25, 0xd6, 0xbd, 0xd4, 0x57, 0x8f, 0xbb, 0x31, 0x5e, 0x52, 0xf9, - 0xc6, 0x2b, 0x63, 0x83, 0x14, 0xc2, 0x30, 0xde, 0x72, 0xf7, 0x88, 0x47, 0xc2, 0xb0, 0x16, 0xf8, - 0x1b, 0x64, 0x06, 0x58, 0xcf, 0x2f, 0xa4, 0x3f, 0x06, 0xf8, 0x1b, 0x64, 0x71, 0xfa, 0xf0, 0xa0, - 0x3c, 0x7e, 0x4b, 0xaf, 0x83, 0x4d, 0x12, 0xe8, 0x0e, 0x4c, 0x50, 0xb9, 0xc6, 0x8d, 0x89, 0x8e, - 0xf6, 0x23, 0xca, 0xa4, 0x0f, 0x6c, 0x54, 0xc2, 0x09, 0x22, 0xe8, 0x4d, 0x28, 0xb5, 0xdc, 0x4d, - 0xd2, 0xd8, 0x6f, 0xb4, 0xc8, 0xcc, 0x18, 0xa3, 0x98, 0xba, 0xad, 0x6e, 0x49, 0x24, 0x2e, 0x17, - 0xa9, 0xbf, 0x38, 0xae, 0x8e, 0xee, 0xc2, 0xf9, 0x88, 0x04, 0xbb, 0xae, 0xe7, 0xd0, 0xed, 0x20, - 0xe4, 0x05, 0xf6, 0xa4, 0x32, 0xce, 0xd6, 0xdb, 0x25, 0x31, 0x74, 0xe7, 0xd7, 0x53, 0xb1, 0x70, - 0x46, 0x6d, 0x74, 0x1b, 0x26, 0xd9, 0x4e, 0xa8, 0x75, 0x5a, 0xad, 0x9a, 0xdf, 0x72, 0x1b, 0xfb, - 0x33, 0x13, 0x8c, 0xe0, 0x27, 0xe4, 0xbd, 0x50, 0x35, 0xc1, 0x47, 0x07, 0x65, 0x88, 0xff, 0xe1, - 0x64, 0x6d, 0xb4, 0xc1, 0x74, 0xe8, 0x9d, 0xc0, 0x8d, 0xf6, 0xe9, 0xfa, 0x25, 0x0f, 0xa2, 0x99, - 0xc9, 0x9e, 0xa2, 0xb0, 0x8e, 0xaa, 0x14, 0xed, 0x7a, 0x21, 0x4e, 0x12, 0xa4, 0x5b, 0x3b, 0x8c, - 0x9a, 0xae, 0x37, 0x33, 0xc5, 0x4e, 0x0c, 0xb5, 0x33, 0xea, 0xb4, 0x10, 0x73, 0x18, 0xd3, 0x9f, - 0xd3, 0x1f, 0xb7, 0xe9, 0x09, 0x3a, 0xcd, 0x10, 0x63, 0xfd, 0xb9, 0x04, 0xe0, 0x18, 0x87, 0x32, - 0x35, 0x51, 0xb4, 0x3f, 0x83, 0x18, 0xaa, 0xda, 0x2e, 0xeb, 0xeb, 0x9f, 0xc7, 0xb4, 0x1c, 0xdd, - 0x82, 0x11, 0xe2, 0xed, 0xad, 0x04, 0xfe, 0xee, 0xcc, 0x99, 0xec, 0x3d, 0xbb, 0xcc, 0x51, 0xf8, - 0x81, 0x1e, 0x0b, 0x78, 0xa2, 0x18, 0x4b, 0x12, 0xe8, 0x01, 0xcc, 0xa4, 0xcc, 0x08, 0x9f, 0x80, - 0xb3, 0x6c, 0x02, 0x5e, 0x17, 0x75, 0x67, 0xd6, 0x33, 0xf0, 0x8e, 0x7a, 0xc0, 0x70, 0x26, 0x75, - 0xf4, 0x05, 0x18, 0xe7, 0x1b, 0x8a, 0x3f, 0xbe, 0x85, 0x33, 0xe7, 0xd8, 0xd7, 0x5c, 0xce, 0xde, - 0x9c, 0x1c, 0x71, 0xf1, 0x9c, 0xe8, 0xd0, 0xb8, 0x5e, 0x1a, 0x62, 0x93, 0x9a, 0xbd, 0x01, 0x13, - 0xea, 0xdc, 0x62, 0x4b, 0x07, 0x95, 0x61, 0x88, 0x71, 0x3b, 0x42, 0xbf, 0x55, 0xa2, 0x33, 0xc5, - 0x38, 0x21, 0xcc, 0xcb, 0xd9, 0x4c, 0xb9, 0xef, 0x93, 0xc5, 0xfd, 0x88, 0x70, 0xa9, 0x3a, 0xaf, - 0xcd, 0x94, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x7f, 0x9c, 0x6b, 0x8c, 0x0f, 0xc7, 0x01, 0xae, 0x83, - 0xe7, 0xa0, 0xb8, 0xed, 0x87, 0x11, 0xc5, 0x66, 0x6d, 0x0c, 0xc5, 0x7c, 0xe2, 0x0d, 0x51, 0x8e, - 0x15, 0x06, 0x7a, 0x0d, 0xc6, 0x1b, 0x7a, 0x03, 0xe2, 0x2e, 0x53, 0x43, 0x60, 0xb4, 0x8e, 0x4d, - 0x5c, 0xf4, 0x2a, 0x14, 0xd9, 0xd3, 0x79, 0xc3, 0x6f, 0x09, 0x26, 0x4b, 0x5e, 0xc8, 0xc5, 0x9a, - 0x28, 0x3f, 0xd2, 0x7e, 0x63, 0x85, 0x8d, 0xae, 0xc0, 0x30, 0xed, 0x42, 0xb5, 0x26, 0x6e, 0x11, - 0xa5, 0xaa, 0xb9, 0xc1, 0x4a, 0xb1, 0x80, 0xda, 0x7f, 0x2d, 0xa7, 0x8d, 0x32, 0x95, 0x48, 0x09, - 0xaa, 0xc1, 0xc8, 0x7d, 0xc7, 0x8d, 0x5c, 0x6f, 0x4b, 0xb0, 0x0b, 0xcf, 0xf4, 0xbc, 0x52, 0x58, - 0xa5, 0x7b, 0xbc, 0x02, 0xbf, 0xf4, 0xc4, 0x1f, 0x2c, 0xc9, 0x50, 0x8a, 0x41, 0xc7, 0xf3, 0x28, - 0xc5, 0xdc, 0xa0, 0x14, 0x31, 0xaf, 0xc0, 0x29, 0x8a, 0x3f, 0x58, 0x92, 0x41, 0xef, 0x00, 0xc8, - 0x65, 0x49, 0x9a, 0xe2, 0xc9, 0xfa, 0xb9, 0xfe, 0x44, 0xd7, 0x55, 0x9d, 0xc5, 0x09, 0x7a, 0xa5, - 0xc6, 0xff, 0xb1, 0x46, 0xcf, 0x8e, 0x18, 0x5b, 0xd5, 0xdd, 0x19, 0xf4, 0x9d, 0xf4, 0x24, 0x70, - 0x82, 0x88, 0x34, 0x17, 0x22, 0x31, 0x38, 0x9f, 0x1c, 0x4c, 0xa6, 0x58, 0x77, 0x77, 0x89, 0x7e, - 0x6a, 0x08, 0x22, 0x38, 0xa6, 0x67, 0xff, 0x52, 0x1e, 0x66, 0xb2, 0xba, 0x4b, 0x17, 0x1d, 0x79, - 0xe0, 0x46, 0x4b, 0x94, 0x1b, 0xb2, 0xcc, 0x45, 0xb7, 0x2c, 0xca, 0xb1, 0xc2, 0xa0, 0xb3, 0x1f, - 0xba, 0x5b, 0x52, 0x24, 0x1c, 0x8a, 0x67, 0xbf, 0xce, 0x4a, 0xb1, 0x80, 0x52, 0xbc, 0x80, 0x38, - 0xa1, 0xb0, 0x89, 0xd0, 0x56, 0x09, 0x66, 0xa5, 0x58, 0x40, 0x75, 0x7d, 0x53, 0xa1, 0x8f, 0xbe, - 0xc9, 0x18, 0xa2, 0xa1, 0x93, 0x1d, 0x22, 0xf4, 0x45, 0x80, 0x4d, 0xd7, 0x73, 0xc3, 0x6d, 0x46, - 0x7d, 0xf8, 0xd8, 0xd4, 0x15, 0x2f, 0xb5, 0xa2, 0xa8, 0x60, 0x8d, 0x22, 0x7a, 0x19, 0x46, 0xd5, - 0x06, 0xac, 0x56, 0xd8, 0x03, 0x91, 0xf6, 0xe0, 0x1e, 0x9f, 0x46, 0x15, 0xac, 0xe3, 0xd9, 0xef, - 0x26, 0xd7, 0x8b, 0xd8, 0x01, 0xda, 0xf8, 0x5a, 0x83, 0x8e, 0x6f, 0xae, 0xf7, 0xf8, 0xda, 0xbf, - 0x91, 0x87, 0x49, 0xa3, 0xb1, 0x4e, 0x38, 0xc0, 0x99, 0x75, 0x9d, 0xde, 0x73, 0x4e, 0x44, 0xc4, - 0xfe, 0xb3, 0xfb, 0x6f, 0x15, 0xfd, 0x2e, 0xa4, 0x3b, 0x80, 0xd7, 0x47, 0x5f, 0x84, 0x52, 0xcb, - 0x09, 0x99, 0xee, 0x8a, 0x88, 0x7d, 0x37, 0x08, 0xb1, 0x58, 0x8e, 0x70, 0xc2, 0x48, 0xbb, 0x6a, - 0x38, 0xed, 0x98, 0x24, 0xbd, 0x90, 0x29, 0xef, 0x23, 0x8d, 0x6e, 0x54, 0x27, 0x28, 0x83, 0xb4, - 0x8f, 0x39, 0x0c, 0xbd, 0x0a, 0x63, 0x01, 0x61, 0xab, 0x62, 0x89, 0xb2, 0x72, 0x6c, 0x99, 0x0d, - 0xc5, 0x3c, 0x1f, 0xd6, 0x60, 0xd8, 0xc0, 0x8c, 0x59, 0xf9, 0xe1, 0x1e, 0xac, 0xfc, 0x33, 0x30, - 0xc2, 0x7e, 0xa8, 0x15, 0xa0, 0x66, 0xa3, 0xca, 0x8b, 0xb1, 0x84, 0x27, 0x17, 0x4c, 0x71, 0xc0, - 0x05, 0xf3, 0x49, 0x98, 0xa8, 0x38, 0x64, 0xd7, 0xf7, 0x96, 0xbd, 0x66, 0xdb, 0x77, 0xbd, 0x08, - 0xcd, 0x40, 0x81, 0xdd, 0x0e, 0x7c, 0x6f, 0x17, 0x28, 0x05, 0x5c, 0xa0, 0x8c, 0xb9, 0xbd, 0x05, - 0xe7, 0x2a, 0xfe, 0x7d, 0xef, 0xbe, 0x13, 0x34, 0x17, 0x6a, 0x55, 0x4d, 0xce, 0x5d, 0x93, 0x72, - 0x16, 0x37, 0x62, 0x49, 0x3d, 0x53, 0xb5, 0x9a, 0xfc, 0xae, 0x5d, 0x71, 0x5b, 0x24, 0x43, 0x1b, - 0xf1, 0x37, 0x72, 0x46, 0x4b, 0x31, 0xbe, 0x7a, 0x30, 0xb2, 0x32, 0x1f, 0x8c, 0xde, 0x82, 0xe2, - 0xa6, 0x4b, 0x5a, 0x4d, 0x4c, 0x36, 0xc5, 0x12, 0x7b, 0x3a, 0xfb, 0x5d, 0x7e, 0x85, 0x62, 0x4a, - 0xed, 0x13, 0x97, 0xd2, 0x56, 0x44, 0x65, 0xac, 0xc8, 0xa0, 0x1d, 0x98, 0x92, 0x62, 0x80, 0x84, - 0x8a, 0x05, 0xf7, 0x4c, 0x2f, 0xd9, 0xc2, 0x24, 0x7e, 0xf6, 0xf0, 0xa0, 0x3c, 0x85, 0x13, 0x64, - 0x70, 0x17, 0x61, 0x2a, 0x96, 0xed, 0xd2, 0xa3, 0xb5, 0xc0, 0x86, 0x9f, 0x89, 0x65, 0x4c, 0xc2, - 0x64, 0xa5, 0xf6, 0x8f, 0x59, 0xf0, 0x58, 0xd7, 0xc8, 0x08, 0x49, 0xfb, 0x84, 0x67, 0x21, 0x29, - 0xf9, 0xe6, 0xfa, 0x4b, 0xbe, 0xf6, 0xdf, 0xb7, 0xe0, 0xec, 0xf2, 0x6e, 0x3b, 0xda, 0xaf, 0xb8, - 0xe6, 0xeb, 0xce, 0x2b, 0x30, 0xbc, 0x4b, 0x9a, 0x6e, 0x67, 0x57, 0xcc, 0x5c, 0x59, 0x1e, 0x3f, - 0xab, 0xac, 0xf4, 0xe8, 0xa0, 0x3c, 0x5e, 0x8f, 0xfc, 0xc0, 0xd9, 0x22, 0xbc, 0x00, 0x0b, 0x74, - 0x76, 0x88, 0xbb, 0xef, 0x93, 0x5b, 0xee, 0xae, 0x2b, 0xed, 0x2c, 0x7a, 0xea, 0xce, 0xe6, 0xe4, - 0x80, 0xce, 0xbd, 0xd5, 0x71, 0xbc, 0xc8, 0x8d, 0xf6, 0xc5, 0xc3, 0x8c, 0x24, 0x82, 0x63, 0x7a, - 0xf6, 0x37, 0x2c, 0x98, 0x94, 0xeb, 0x7e, 0xa1, 0xd9, 0x0c, 0x48, 0x18, 0xa2, 0x59, 0xc8, 0xb9, - 0x6d, 0xd1, 0x4b, 0x10, 0xbd, 0xcc, 0x55, 0x6b, 0x38, 0xe7, 0xb6, 0x51, 0x0d, 0x4a, 0xdc, 0x5c, - 0x23, 0x5e, 0x5c, 0x03, 0x19, 0x7d, 0xb0, 0x1e, 0xac, 0xcb, 0x9a, 0x38, 0x26, 0x22, 0x39, 0x38, - 0x76, 0x66, 0xe6, 0xcd, 0x57, 0xaf, 0x1b, 0xa2, 0x1c, 0x2b, 0x0c, 0x74, 0x15, 0x8a, 0x9e, 0xdf, - 0xe4, 0xd6, 0x33, 0xfc, 0xf6, 0x63, 0x4b, 0x76, 0x4d, 0x94, 0x61, 0x05, 0xb5, 0x7f, 0xd0, 0x82, - 0x31, 0xf9, 0x65, 0x03, 0x32, 0x93, 0x74, 0x6b, 0xc5, 0x8c, 0x64, 0xbc, 0xb5, 0x28, 0x33, 0xc8, - 0x20, 0x06, 0x0f, 0x98, 0x3f, 0x0e, 0x0f, 0x68, 0xff, 0x68, 0x0e, 0x26, 0x64, 0x77, 0xea, 0x9d, - 0x8d, 0x90, 0x44, 0x68, 0x1d, 0x4a, 0x0e, 0x1f, 0x72, 0x22, 0x57, 0xec, 0x53, 0xe9, 0xc2, 0x87, - 0x31, 0x3f, 0xf1, 0xb5, 0xbc, 0x20, 0x6b, 0xe3, 0x98, 0x10, 0x6a, 0xc1, 0xb4, 0xe7, 0x47, 0xec, - 0x88, 0x56, 0xf0, 0x5e, 0x4f, 0x20, 0x49, 0xea, 0x17, 0x04, 0xf5, 0xe9, 0xb5, 0x24, 0x15, 0xdc, - 0x4d, 0x18, 0x2d, 0x4b, 0x85, 0x47, 0x3e, 0x5b, 0xdc, 0xd0, 0x67, 0x21, 0x5d, 0xdf, 0x61, 0xff, - 0xaa, 0x05, 0x25, 0x89, 0x76, 0x1a, 0xaf, 0x5d, 0xab, 0x30, 0x12, 0xb2, 0x49, 0x90, 0x43, 0x63, - 0xf7, 0xea, 0x38, 0x9f, 0xaf, 0xf8, 0xe6, 0xe1, 0xff, 0x43, 0x2c, 0x69, 0x30, 0x7d, 0xb7, 0xea, - 0xfe, 0x47, 0x44, 0xdf, 0xad, 0xfa, 0x93, 0x71, 0xc3, 0xfc, 0x57, 0xd6, 0x67, 0x4d, 0xac, 0xa5, - 0x0c, 0x52, 0x3b, 0x20, 0x9b, 0xee, 0x83, 0x24, 0x83, 0x54, 0x63, 0xa5, 0x58, 0x40, 0xd1, 0x3b, - 0x30, 0xd6, 0x90, 0x8a, 0xce, 0xf8, 0x18, 0xb8, 0xd2, 0x53, 0xe9, 0xae, 0xde, 0x67, 0xb8, 0x65, - 0xed, 0x92, 0x56, 0x1f, 0x1b, 0xd4, 0xcc, 0xe7, 0xf6, 0x7c, 0xbf, 0xe7, 0xf6, 0x98, 0x6e, 0xf6, - 0xe3, 0xf3, 0x8f, 0x5b, 0x30, 0xcc, 0xd5, 0x65, 0x83, 0xe9, 0x17, 0xb5, 0xe7, 0xaa, 0x78, 0xec, - 0xee, 0xd2, 0x42, 0xf1, 0xfc, 0x84, 0x56, 0xa1, 0xc4, 0x7e, 0x30, 0xb5, 0x41, 0x3e, 0xdb, 0xa4, - 0x98, 0xb7, 0xaa, 0x77, 0xf0, 0xae, 0xac, 0x86, 0x63, 0x0a, 0xf6, 0x0f, 0xe7, 0xe9, 0x51, 0x15, - 0xa3, 0x1a, 0x37, 0xb8, 0xf5, 0xe8, 0x6e, 0xf0, 0xdc, 0xa3, 0xba, 0xc1, 0xb7, 0x60, 0xb2, 0xa1, - 0x3d, 0x6e, 0xc5, 0x33, 0x79, 0xb5, 0xe7, 0x22, 0xd1, 0xde, 0xc1, 0xb8, 0xca, 0x68, 0xc9, 0x24, - 0x82, 0x93, 0x54, 0xd1, 0x77, 0xc2, 0x18, 0x9f, 0x67, 0xd1, 0x0a, 0xb7, 0x58, 0xf8, 0x44, 0xf6, - 0x7a, 0xd1, 0x9b, 0x60, 0x2b, 0xb1, 0xae, 0x55, 0xc7, 0x06, 0x31, 0xfb, 0x97, 0x8a, 0x30, 0xb4, - 0xbc, 0x47, 0xbc, 0xe8, 0x14, 0x0e, 0xa4, 0x06, 0x4c, 0xb8, 0xde, 0x9e, 0xdf, 0xda, 0x23, 0x4d, - 0x0e, 0x3f, 0xce, 0xe5, 0x7a, 0x5e, 0x90, 0x9e, 0xa8, 0x1a, 0x24, 0x70, 0x82, 0xe4, 0xa3, 0x90, - 0x30, 0xaf, 0xc3, 0x30, 0x9f, 0x7b, 0x21, 0x5e, 0xa6, 0x2a, 0x83, 0xd9, 0x20, 0x8a, 0x5d, 0x10, - 0x4b, 0xbf, 0x5c, 0xfb, 0x2c, 0xaa, 0xa3, 0x77, 0x61, 0x62, 0xd3, 0x0d, 0xc2, 0x88, 0x8a, 0x86, - 0x61, 0xe4, 0xec, 0xb6, 0x1f, 0x42, 0xa2, 0x54, 0xe3, 0xb0, 0x62, 0x50, 0xc2, 0x09, 0xca, 0x68, - 0x0b, 0xc6, 0xa9, 0x90, 0x13, 0x37, 0x35, 0x72, 0xec, 0xa6, 0x94, 0xca, 0xe8, 0x96, 0x4e, 0x08, - 0x9b, 0x74, 0xe9, 0x61, 0xd2, 0x60, 0x42, 0x51, 0x91, 0x71, 0x14, 0xea, 0x30, 0xe1, 0xd2, 0x10, - 0x87, 0xd1, 0x33, 0x89, 0x99, 0xad, 0x94, 0xcc, 0x33, 0x49, 0x33, 0x4e, 0xf9, 0x12, 0x94, 0x08, - 0x1d, 0x42, 0x4a, 0x58, 0x28, 0xc6, 0xe7, 0x07, 0xeb, 0xeb, 0xaa, 0xdb, 0x08, 0x7c, 0x53, 0x96, - 0x5f, 0x96, 0x94, 0x70, 0x4c, 0x14, 0x2d, 0xc1, 0x70, 0x48, 0x02, 0x97, 0x84, 0x42, 0x45, 0xde, - 0x63, 0x1a, 0x19, 0x1a, 0xb7, 0x3d, 0xe7, 0xbf, 0xb1, 0xa8, 0x4a, 0x97, 0x97, 0xc3, 0xa4, 0x21, - 0xa6, 0x15, 0xd7, 0x96, 0xd7, 0x02, 0x2b, 0xc5, 0x02, 0x8a, 0xde, 0x84, 0x91, 0x80, 0xb4, 0x98, - 0xb2, 0x68, 0x7c, 0xf0, 0x45, 0xce, 0x75, 0x4f, 0xbc, 0x1e, 0x96, 0x04, 0xd0, 0x4d, 0x40, 0x01, - 0xa1, 0x3c, 0x84, 0xeb, 0x6d, 0x29, 0x63, 0x0e, 0xa1, 0xeb, 0x7e, 0x5c, 0xb4, 0x7f, 0x06, 0xc7, - 0x18, 0xd2, 0x2a, 0x15, 0xa7, 0x54, 0x43, 0xd7, 0x61, 0x5a, 0x95, 0x56, 0xbd, 0x30, 0x72, 0xbc, - 0x06, 0x61, 0x6a, 0xee, 0x52, 0xcc, 0x15, 0xe1, 0x24, 0x02, 0xee, 0xae, 0x63, 0xff, 0x0c, 0x65, - 0x67, 0xe8, 0x68, 0x9d, 0x02, 0x2f, 0xf0, 0x86, 0xc9, 0x0b, 0x5c, 0xc8, 0x9c, 0xb9, 0x0c, 0x3e, - 0xe0, 0xd0, 0x82, 0x51, 0x6d, 0x66, 0xe3, 0x35, 0x6b, 0xf5, 0x58, 0xb3, 0x1d, 0x98, 0xa2, 0x2b, - 0xfd, 0xf6, 0x46, 0x48, 0x82, 0x3d, 0xd2, 0x64, 0x0b, 0x33, 0xf7, 0x70, 0x0b, 0x53, 0xbd, 0x32, - 0xdf, 0x4a, 0x10, 0xc4, 0x5d, 0x4d, 0xa0, 0x57, 0xa4, 0xe6, 0x24, 0x6f, 0x18, 0x69, 0x71, 0xad, - 0xc8, 0xd1, 0x41, 0x79, 0x4a, 0xfb, 0x10, 0x5d, 0x53, 0x62, 0x7f, 0x49, 0x7e, 0xa3, 0x7a, 0xcd, - 0x6f, 0xa8, 0xc5, 0x92, 0x78, 0xcd, 0x57, 0xcb, 0x01, 0xc7, 0x38, 0x74, 0x8f, 0x52, 0x11, 0x24, - 0xf9, 0x9a, 0x4f, 0x05, 0x14, 0xcc, 0x20, 0xf6, 0x8b, 0x00, 0xcb, 0x0f, 0x48, 0x83, 0x2f, 0x75, - 0xfd, 0x01, 0xd2, 0xca, 0x7e, 0x80, 0xb4, 0xff, 0x9d, 0x05, 0x13, 0x2b, 0x4b, 0x86, 0x98, 0x38, - 0x07, 0xc0, 0x65, 0xa3, 0x7b, 0xf7, 0xd6, 0xa4, 0x6e, 0x9d, 0xab, 0x47, 0x55, 0x29, 0xd6, 0x30, - 0xd0, 0x05, 0xc8, 0xb7, 0x3a, 0x9e, 0x10, 0x59, 0x46, 0x0e, 0x0f, 0xca, 0xf9, 0x5b, 0x1d, 0x0f, - 0xd3, 0x32, 0xcd, 0x42, 0x30, 0x3f, 0xb0, 0x85, 0x60, 0x5f, 0xf7, 0x2a, 0x54, 0x86, 0xa1, 0xfb, - 0xf7, 0xdd, 0x26, 0x37, 0x62, 0x17, 0x7a, 0xff, 0x7b, 0xf7, 0xaa, 0x95, 0x10, 0xf3, 0x72, 0xfb, - 0xab, 0x79, 0x98, 0x5d, 0x69, 0x91, 0x07, 0x1f, 0xd0, 0x90, 0x7f, 0x50, 0xfb, 0xc6, 0xe3, 0xf1, - 0x8b, 0xc7, 0xb5, 0x61, 0xed, 0x3f, 0x1e, 0x9b, 0x30, 0xc2, 0x1f, 0xb3, 0xa5, 0x59, 0xff, 0x6b, - 0x69, 0xad, 0x67, 0x0f, 0xc8, 0x1c, 0x7f, 0x14, 0x17, 0xe6, 0xfc, 0xea, 0xa6, 0x15, 0xa5, 0x58, - 0x12, 0x9f, 0xfd, 0x0c, 0x8c, 0xe9, 0x98, 0xc7, 0xb2, 0x26, 0xff, 0x0b, 0x79, 0x98, 0xa2, 0x3d, - 0x78, 0xa4, 0x13, 0x71, 0xa7, 0x7b, 0x22, 0x4e, 0xda, 0xa2, 0xb8, 0xff, 0x6c, 0xbc, 0x93, 0x9c, - 0x8d, 0x17, 0xb2, 0x66, 0xe3, 0xb4, 0xe7, 0xe0, 0x7b, 0x2d, 0x38, 0xb3, 0xd2, 0xf2, 0x1b, 0x3b, - 0x09, 0xab, 0xdf, 0x97, 0x61, 0x94, 0x9e, 0xe3, 0xa1, 0xe1, 0x45, 0x64, 0xf8, 0x95, 0x09, 0x10, - 0xd6, 0xf1, 0xb4, 0x6a, 0x77, 0xee, 0x54, 0x2b, 0x69, 0xee, 0x68, 0x02, 0x84, 0x75, 0x3c, 0xfb, - 0xeb, 0x16, 0x5c, 0xbc, 0xbe, 0xb4, 0x1c, 0x2f, 0xc5, 0x2e, 0x8f, 0x38, 0x2a, 0x05, 0x36, 0xb5, - 0xae, 0xc4, 0x52, 0x60, 0x85, 0xf5, 0x42, 0x40, 0x3f, 0x2a, 0xde, 0x9e, 0x3f, 0x6d, 0xc1, 0x99, - 0xeb, 0x6e, 0x44, 0xaf, 0xe5, 0xa4, 0x6f, 0x16, 0xbd, 0x97, 0x43, 0x37, 0xf2, 0x83, 0xfd, 0xa4, - 0x6f, 0x16, 0x56, 0x10, 0xac, 0x61, 0xf1, 0x96, 0xf7, 0x5c, 0x66, 0x46, 0x95, 0x33, 0x55, 0x51, - 0x58, 0x94, 0x63, 0x85, 0x41, 0x3f, 0xac, 0xe9, 0x06, 0x4c, 0x94, 0xd8, 0x17, 0x27, 0xac, 0xfa, - 0xb0, 0x8a, 0x04, 0xe0, 0x18, 0xc7, 0xfe, 0x31, 0x0b, 0xce, 0x5d, 0x6f, 0x75, 0xc2, 0x88, 0x04, - 0x9b, 0xa1, 0xd1, 0xd9, 0x17, 0xa1, 0x44, 0xa4, 0xb8, 0x2e, 0xfa, 0xaa, 0x18, 0x4c, 0x25, 0xc7, - 0x73, 0xc7, 0x30, 0x85, 0x37, 0x80, 0xe7, 0xc0, 0xf1, 0x5c, 0xc7, 0x7e, 0x3e, 0x07, 0xe3, 0x37, - 0xd6, 0xd7, 0x6b, 0xd7, 0x49, 0x24, 0x6e, 0xb1, 0xfe, 0xaa, 0x66, 0xac, 0x69, 0xcc, 0x7a, 0x09, - 0x45, 0x9d, 0xc8, 0x6d, 0xcd, 0x71, 0x4f, 0xe4, 0xb9, 0xaa, 0x17, 0xdd, 0x0e, 0xea, 0x51, 0xe0, - 0x7a, 0x5b, 0xa9, 0x3a, 0x36, 0x79, 0xd7, 0xe6, 0xb3, 0xee, 0x5a, 0xf4, 0x22, 0x0c, 0x33, 0x57, - 0x68, 0x29, 0x9e, 0x3c, 0xae, 0x64, 0x0a, 0x56, 0x7a, 0x74, 0x50, 0x2e, 0xdd, 0xc1, 0x55, 0xfe, - 0x07, 0x0b, 0x54, 0x74, 0x07, 0x46, 0xb7, 0xa3, 0xa8, 0x7d, 0x83, 0x38, 0x4d, 0x12, 0xc8, 0xd3, - 0xe1, 0x52, 0xda, 0xe9, 0x40, 0x07, 0x81, 0xa3, 0xc5, 0x1b, 0x2a, 0x2e, 0x0b, 0xb1, 0x4e, 0xc7, - 0xae, 0x03, 0xc4, 0xb0, 0x13, 0xd2, 0x2f, 0xd8, 0x7f, 0x60, 0xc1, 0x08, 0xf7, 0x4a, 0x0b, 0xd0, - 0xeb, 0x50, 0x20, 0x0f, 0x48, 0x43, 0x70, 0x8e, 0xa9, 0x1d, 0x8e, 0x19, 0x0f, 0xae, 0x2d, 0xa7, - 0xff, 0x31, 0xab, 0x85, 0x6e, 0xc0, 0x08, 0xed, 0xed, 0x75, 0xe5, 0xa2, 0xf7, 0x64, 0xd6, 0x17, - 0xab, 0x69, 0xe7, 0xbc, 0x8a, 0x28, 0xc2, 0xb2, 0x3a, 0xd3, 0xfc, 0x36, 0xda, 0x75, 0x7a, 0x80, - 0x45, 0xbd, 0xee, 0xd9, 0xf5, 0xa5, 0x1a, 0x47, 0x12, 0xd4, 0xb8, 0xe6, 0x57, 0x16, 0xe2, 0x98, - 0x88, 0xbd, 0x0e, 0x25, 0x3a, 0xa9, 0x0b, 0x2d, 0xd7, 0xe9, 0xad, 0x74, 0x7e, 0x16, 0x4a, 0x52, - 0x01, 0x1c, 0x0a, 0xc7, 0x26, 0x46, 0x55, 0xea, 0x87, 0x43, 0x1c, 0xc3, 0xed, 0x4d, 0x38, 0xcb, - 0x5e, 0xfe, 0x9d, 0x68, 0xdb, 0xd8, 0x63, 0xfd, 0x17, 0xf3, 0x73, 0x42, 0x10, 0xe3, 0x33, 0x33, - 0xa3, 0xf9, 0x0e, 0x8c, 0x49, 0x8a, 0xb1, 0x50, 0x66, 0xff, 0x61, 0x01, 0x1e, 0xaf, 0xd6, 0xb3, - 0x1d, 0x16, 0x5f, 0x85, 0x31, 0xce, 0xa6, 0xd1, 0xa5, 0xed, 0xb4, 0x44, 0xbb, 0xea, 0x5d, 0x6c, - 0x5d, 0x83, 0x61, 0x03, 0x13, 0x5d, 0x84, 0xbc, 0xfb, 0x9e, 0x97, 0x34, 0xc3, 0xad, 0xbe, 0xb5, - 0x86, 0x69, 0x39, 0x05, 0x53, 0x8e, 0x8f, 0x1f, 0xa5, 0x0a, 0xac, 0xb8, 0xbe, 0x37, 0x60, 0xc2, - 0x0d, 0x1b, 0xa1, 0x5b, 0xf5, 0xe8, 0x39, 0x13, 0x3b, 0xbb, 0xc6, 0x4a, 0x02, 0xda, 0x69, 0x05, - 0xc5, 0x09, 0x6c, 0xed, 0x5c, 0x1f, 0x1a, 0x98, 0x6b, 0xec, 0xeb, 0xe9, 0x43, 0x19, 0xe2, 0x36, - 0xfb, 0xba, 0x90, 0x19, 0xb5, 0x09, 0x86, 0x98, 0x7f, 0x70, 0x88, 0x25, 0x8c, 0x4a, 0x60, 0x8d, - 0x6d, 0xa7, 0xbd, 0xd0, 0x89, 0xb6, 0x2b, 0x6e, 0xd8, 0xf0, 0xf7, 0x48, 0xb0, 0xcf, 0x84, 0xe7, - 0x62, 0x2c, 0x81, 0x29, 0xc0, 0xd2, 0x8d, 0x85, 0x1a, 0xc5, 0xc4, 0xdd, 0x75, 0x4c, 0xae, 0x10, - 0x4e, 0x82, 0x2b, 0x5c, 0x80, 0x49, 0xd9, 0x4c, 0x9d, 0x84, 0xec, 0x8e, 0x18, 0x65, 0x1d, 0x53, - 0xa6, 0xb6, 0xa2, 0x58, 0x75, 0x2b, 0x89, 0x8f, 0x5e, 0x81, 0x71, 0xd7, 0x73, 0x23, 0xd7, 0x89, - 0xfc, 0x80, 0xdd, 0xb0, 0x5c, 0x4e, 0x66, 0x96, 0x6c, 0x55, 0x1d, 0x80, 0x4d, 0x3c, 0xfb, 0x3f, - 0x15, 0x60, 0x9a, 0x4d, 0xdb, 0xb7, 0x56, 0xd8, 0x47, 0x66, 0x85, 0xdd, 0xe9, 0x5e, 0x61, 0x27, - 0xc1, 0xee, 0x7e, 0x98, 0xcb, 0xec, 0x5d, 0x28, 0x29, 0x5b, 0x60, 0xe9, 0x0c, 0x60, 0x65, 0x38, - 0x03, 0xf4, 0xe7, 0x3e, 0xe4, 0x33, 0x6e, 0x3e, 0xf5, 0x19, 0xf7, 0x6f, 0x59, 0x10, 0x9b, 0x44, - 0xa2, 0x1b, 0x50, 0x6a, 0xfb, 0xcc, 0xec, 0x20, 0x90, 0xb6, 0x3c, 0x8f, 0xa7, 0x5e, 0x54, 0xfc, - 0x52, 0xe4, 0xe3, 0x57, 0x93, 0x35, 0x70, 0x5c, 0x19, 0x2d, 0xc2, 0x48, 0x3b, 0x20, 0xf5, 0x88, - 0xb9, 0xc0, 0xf6, 0xa5, 0xc3, 0xd7, 0x08, 0xc7, 0xc7, 0xb2, 0xa2, 0xfd, 0x0b, 0x16, 0x00, 0x7f, - 0x29, 0x75, 0xbc, 0x2d, 0x72, 0x0a, 0xda, 0xdf, 0x0a, 0x14, 0xc2, 0x36, 0x69, 0xf4, 0x32, 0x08, - 0x89, 0xfb, 0x53, 0x6f, 0x93, 0x46, 0x3c, 0xe0, 0xf4, 0x1f, 0x66, 0xb5, 0xed, 0xef, 0x03, 0x98, - 0x88, 0xd1, 0xaa, 0x11, 0xd9, 0x45, 0xcf, 0x1b, 0x2e, 0x71, 0x17, 0x12, 0x2e, 0x71, 0x25, 0x86, - 0xad, 0x29, 0x1a, 0xdf, 0x85, 0xfc, 0xae, 0xf3, 0x40, 0x68, 0x92, 0x9e, 0xed, 0xdd, 0x0d, 0x4a, - 0x7f, 0x6e, 0xd5, 0x79, 0xc0, 0x65, 0xa6, 0x67, 0xe5, 0x02, 0x59, 0x75, 0x1e, 0x1c, 0x71, 0xb3, - 0x0f, 0x76, 0x48, 0xdd, 0x72, 0xc3, 0xe8, 0xcb, 0xff, 0x31, 0xfe, 0xcf, 0x96, 0x1d, 0x6d, 0x84, - 0xb5, 0xe5, 0x7a, 0xe2, 0xdd, 0x70, 0xa0, 0xb6, 0x5c, 0x2f, 0xd9, 0x96, 0xeb, 0x0d, 0xd0, 0x96, - 0xeb, 0xa1, 0xf7, 0x61, 0x44, 0xbc, 0xd1, 0x33, 0x5b, 0x6f, 0x53, 0x4b, 0x95, 0xd5, 0x9e, 0x78, - 0xe2, 0xe7, 0x6d, 0xce, 0x4b, 0x99, 0x50, 0x94, 0xf6, 0x6d, 0x57, 0x36, 0x88, 0xfe, 0xba, 0x05, - 0x13, 0xe2, 0x37, 0x26, 0xef, 0x75, 0x48, 0x18, 0x09, 0xde, 0xf3, 0xd3, 0x83, 0xf7, 0x41, 0x54, - 0xe4, 0x5d, 0xf9, 0xb4, 0x3c, 0x66, 0x4d, 0x60, 0xdf, 0x1e, 0x25, 0x7a, 0x81, 0xfe, 0xa1, 0x05, - 0x67, 0x77, 0x9d, 0x07, 0xbc, 0x45, 0x5e, 0x86, 0x9d, 0xc8, 0xf5, 0x85, 0xed, 0xfa, 0xeb, 0x83, - 0x4d, 0x7f, 0x57, 0x75, 0xde, 0x49, 0x69, 0xe6, 0x7a, 0x36, 0x0d, 0xa5, 0x6f, 0x57, 0x53, 0xfb, - 0x35, 0xbb, 0x09, 0x45, 0xb9, 0xde, 0x52, 0x24, 0xef, 0x8a, 0xce, 0x58, 0x1f, 0xdb, 0x44, 0x42, - 0xf7, 0x4b, 0xa3, 0xed, 0x88, 0xb5, 0xf6, 0x48, 0xdb, 0x79, 0x17, 0xc6, 0xf4, 0x35, 0xf6, 0x48, - 0xdb, 0x7a, 0x0f, 0xce, 0xa4, 0xac, 0xa5, 0x47, 0xda, 0xe4, 0x7d, 0xb8, 0x90, 0xb9, 0x3e, 0x1e, - 0x65, 0xc3, 0xf6, 0xcf, 0x5b, 0xfa, 0x39, 0x78, 0x0a, 0x2a, 0xf8, 0x25, 0x53, 0x05, 0x7f, 0xa9, - 0xf7, 0xce, 0xc9, 0xd0, 0xc3, 0xbf, 0xa3, 0x77, 0x9a, 0x9e, 0xea, 0xe8, 0x4d, 0x18, 0x6e, 0xd1, - 0x12, 0x69, 0x1c, 0x62, 0xf7, 0xdf, 0x91, 0x31, 0x2f, 0xc5, 0xca, 0x43, 0x2c, 0x28, 0xd8, 0xbf, - 0x6c, 0x41, 0xe1, 0x14, 0x46, 0x02, 0x9b, 0x23, 0xf1, 0x7c, 0x26, 0x69, 0x11, 0xd2, 0x6c, 0x0e, - 0x3b, 0xf7, 0x97, 0x1f, 0x44, 0xc4, 0x0b, 0x99, 0xa8, 0x98, 0x3a, 0x30, 0xdf, 0x05, 0x67, 0x6e, - 0xf9, 0x4e, 0x73, 0xd1, 0x69, 0x39, 0x5e, 0x83, 0x04, 0x55, 0x6f, 0xab, 0xaf, 0x95, 0x92, 0x6e, - 0x53, 0x94, 0xeb, 0x67, 0x53, 0x64, 0x6f, 0x03, 0xd2, 0x1b, 0x10, 0x76, 0x9c, 0x18, 0x46, 0x5c, - 0xde, 0x94, 0x18, 0xfe, 0xa7, 0xd3, 0xb9, 0xbb, 0xae, 0x9e, 0x69, 0x16, 0x8a, 0xbc, 0x00, 0x4b, - 0x42, 0xf6, 0xab, 0x90, 0xea, 0xbb, 0xd5, 0x5f, 0x6d, 0x60, 0x7f, 0x1e, 0xa6, 0x59, 0xcd, 0x63, - 0x8a, 0xb4, 0x76, 0x42, 0x49, 0x97, 0x12, 0x32, 0xca, 0xfe, 0x8a, 0x05, 0x93, 0x6b, 0x89, 0xf8, - 0x15, 0x57, 0xd8, 0x7b, 0x60, 0x8a, 0x6e, 0xb8, 0xce, 0x4a, 0xb1, 0x80, 0x9e, 0xb8, 0x0e, 0xea, - 0x4f, 0x2d, 0x88, 0xdd, 0x29, 0x4f, 0x81, 0xf1, 0x5a, 0x32, 0x18, 0xaf, 0x54, 0xdd, 0x88, 0xea, - 0x4e, 0x16, 0xdf, 0x85, 0x6e, 0xaa, 0xd8, 0x01, 0x3d, 0xd4, 0x22, 0x31, 0x19, 0xee, 0x69, 0x3e, - 0x61, 0x06, 0x18, 0x90, 0xd1, 0x04, 0x98, 0x29, 0x91, 0xc2, 0xfd, 0x88, 0x98, 0x12, 0xa9, 0xfe, - 0x64, 0xec, 0xd0, 0x9a, 0xd6, 0x65, 0x76, 0x72, 0x7d, 0x3b, 0x33, 0x0d, 0x77, 0x5a, 0xee, 0xfb, - 0x44, 0x05, 0x40, 0x29, 0x0b, 0x53, 0x6f, 0x51, 0x7a, 0x74, 0x50, 0x1e, 0x57, 0xff, 0x78, 0x94, - 0xac, 0xb8, 0x8a, 0x7d, 0x03, 0x26, 0x13, 0x03, 0x86, 0x5e, 0x86, 0xa1, 0xf6, 0xb6, 0x13, 0x92, - 0x84, 0xf9, 0xe4, 0x50, 0x8d, 0x16, 0x1e, 0x1d, 0x94, 0x27, 0x54, 0x05, 0x56, 0x82, 0x39, 0xb6, - 0xfd, 0x3f, 0x2d, 0x28, 0xac, 0xf9, 0xcd, 0xd3, 0x58, 0x4c, 0x6f, 0x18, 0x8b, 0xe9, 0x89, 0xac, - 0x18, 0x83, 0x99, 0xeb, 0x68, 0x25, 0xb1, 0x8e, 0x2e, 0x65, 0x52, 0xe8, 0xbd, 0x84, 0x76, 0x61, - 0x94, 0x45, 0x2e, 0x14, 0xe6, 0x9c, 0x2f, 0x1a, 0x32, 0x40, 0x39, 0x21, 0x03, 0x4c, 0x6a, 0xa8, - 0x9a, 0x24, 0xf0, 0x0c, 0x8c, 0x08, 0x93, 0xc2, 0xa4, 0x11, 0xbc, 0xc0, 0xc5, 0x12, 0x6e, 0xff, - 0x78, 0x1e, 0x8c, 0x48, 0x89, 0xe8, 0x57, 0x2d, 0x98, 0x0b, 0xb8, 0x57, 0x61, 0xb3, 0xd2, 0x09, - 0x5c, 0x6f, 0xab, 0xde, 0xd8, 0x26, 0xcd, 0x4e, 0xcb, 0xf5, 0xb6, 0xaa, 0x5b, 0x9e, 0xaf, 0x8a, - 0x97, 0x1f, 0x90, 0x46, 0x87, 0xbd, 0x0b, 0xf4, 0x09, 0xcb, 0xa8, 0x4c, 0x76, 0xae, 0x1d, 0x1e, - 0x94, 0xe7, 0xf0, 0xb1, 0x68, 0xe3, 0x63, 0xf6, 0x05, 0x7d, 0xdd, 0x82, 0x79, 0x1e, 0x40, 0x70, - 0xf0, 0xfe, 0xf7, 0x90, 0x98, 0x6a, 0x92, 0x54, 0x4c, 0x64, 0x9d, 0x04, 0xbb, 0x8b, 0xaf, 0x88, - 0x01, 0x9d, 0xaf, 0x1d, 0xaf, 0x2d, 0x7c, 0xdc, 0xce, 0xd9, 0xff, 0x22, 0x0f, 0xe3, 0xc2, 0xa1, - 0x5d, 0x44, 0x4a, 0x79, 0xd9, 0x58, 0x12, 0x4f, 0x26, 0x96, 0xc4, 0xb4, 0x81, 0x7c, 0x32, 0x41, - 0x52, 0x42, 0x98, 0x6e, 0x39, 0x61, 0x74, 0x83, 0x38, 0x41, 0xb4, 0x41, 0x1c, 0x6e, 0xca, 0x92, - 0x3f, 0xb6, 0xd9, 0x8d, 0x52, 0xd1, 0xdc, 0x4a, 0x12, 0xc3, 0xdd, 0xf4, 0xd1, 0x1e, 0x20, 0x66, - 0x8f, 0x13, 0x38, 0x5e, 0xc8, 0xbf, 0xc5, 0x15, 0x6f, 0x06, 0xc7, 0x6b, 0x75, 0x56, 0xb4, 0x8a, - 0x6e, 0x75, 0x51, 0xc3, 0x29, 0x2d, 0x68, 0x76, 0x56, 0x43, 0x83, 0xda, 0x59, 0x0d, 0xf7, 0xf1, - 0x34, 0xf1, 0x60, 0xaa, 0x2b, 0x26, 0xc1, 0xdb, 0x50, 0x52, 0xf6, 0x70, 0xe2, 0xd0, 0xe9, 0x1d, - 0xda, 0x23, 0x49, 0x81, 0xab, 0x51, 0x62, 0x5b, 0xcc, 0x98, 0x9c, 0xfd, 0x8f, 0x72, 0x46, 0x83, - 0x7c, 0x12, 0xd7, 0xa0, 0xe8, 0x84, 0xa1, 0xbb, 0xe5, 0x91, 0xa6, 0xd8, 0xb1, 0x1f, 0xcf, 0xda, - 0xb1, 0x46, 0x33, 0xcc, 0x26, 0x71, 0x41, 0xd4, 0xc4, 0x8a, 0x06, 0xba, 0xc1, 0x0d, 0x86, 0xf6, - 0x24, 0xcf, 0x3f, 0x18, 0x35, 0x90, 0x26, 0x45, 0x7b, 0x04, 0x8b, 0xfa, 0xe8, 0x0b, 0xdc, 0xa2, - 0xeb, 0xa6, 0xe7, 0xdf, 0xf7, 0xae, 0xfb, 0xbe, 0xf4, 0x42, 0x1b, 0x8c, 0xe0, 0xb4, 0xb4, 0xe3, - 0x52, 0xd5, 0xb1, 0x49, 0x6d, 0xb0, 0xb8, 0x3d, 0xdf, 0x0d, 0x67, 0x28, 0x69, 0xd3, 0x97, 0x24, - 0x44, 0x04, 0x26, 0x45, 0xb4, 0x04, 0x59, 0x26, 0xc6, 0x2e, 0x95, 0x9d, 0x37, 0x6b, 0xc7, 0x4a, - 0xbf, 0x9b, 0x26, 0x09, 0x9c, 0xa4, 0x69, 0xff, 0x94, 0x05, 0xcc, 0x0a, 0xfe, 0x14, 0x58, 0x86, - 0xcf, 0x9a, 0x2c, 0xc3, 0x4c, 0xd6, 0x20, 0x67, 0x70, 0x0b, 0x2f, 0xf1, 0x95, 0x55, 0x0b, 0xfc, - 0x07, 0xfb, 0xe2, 0x35, 0xbd, 0x3f, 0x27, 0x6b, 0xff, 0x5f, 0x8b, 0x1f, 0x62, 0xca, 0x31, 0x1d, - 0x7d, 0x0f, 0x14, 0x1b, 0x4e, 0xdb, 0x69, 0xf0, 0xb0, 0xbe, 0x99, 0x5a, 0x1d, 0xa3, 0xd2, 0xdc, - 0x92, 0xa8, 0xc1, 0xb5, 0x14, 0x32, 0xea, 0x46, 0x51, 0x16, 0xf7, 0xd5, 0x4c, 0xa8, 0x26, 0x67, - 0x77, 0x60, 0xdc, 0x20, 0xf6, 0x48, 0x45, 0xda, 0xef, 0xe1, 0x57, 0xac, 0x8a, 0x12, 0xb3, 0x0b, - 0xd3, 0x9e, 0xf6, 0x9f, 0x5e, 0x28, 0x52, 0x4c, 0xf9, 0x78, 0xbf, 0x4b, 0x94, 0xdd, 0x3e, 0x9a, - 0x95, 0x7f, 0x82, 0x0c, 0xee, 0xa6, 0x6c, 0xff, 0x84, 0x05, 0x8f, 0xe9, 0x88, 0x5a, 0xcc, 0x80, - 0x7e, 0x7a, 0xe2, 0x0a, 0x14, 0xfd, 0x36, 0x09, 0x9c, 0xc8, 0x0f, 0xc4, 0xad, 0x71, 0x55, 0x0e, - 0xfa, 0x6d, 0x51, 0x7e, 0x24, 0xe2, 0x2b, 0x4a, 0xea, 0xb2, 0x1c, 0xab, 0x9a, 0x54, 0x8e, 0x61, - 0x83, 0x11, 0x8a, 0x78, 0x0e, 0xec, 0x0c, 0x60, 0x4f, 0xa6, 0x21, 0x16, 0x10, 0xfb, 0x0f, 0x2d, - 0xbe, 0xb0, 0xf4, 0xae, 0xa3, 0xf7, 0x60, 0x6a, 0xd7, 0x89, 0x1a, 0xdb, 0xcb, 0x0f, 0xda, 0x01, - 0x57, 0x8f, 0xcb, 0x71, 0x7a, 0xb6, 0xdf, 0x38, 0x69, 0x1f, 0x19, 0x1b, 0xa9, 0xad, 0x26, 0x88, - 0xe1, 0x2e, 0xf2, 0x68, 0x03, 0x46, 0x59, 0x19, 0xb3, 0x86, 0x0e, 0x7b, 0xb1, 0x06, 0x59, 0xad, - 0xa9, 0x57, 0xe7, 0xd5, 0x98, 0x0e, 0xd6, 0x89, 0xda, 0x5f, 0xce, 0xf3, 0xdd, 0xce, 0xb8, 0xed, - 0x67, 0x60, 0xa4, 0xed, 0x37, 0x97, 0xaa, 0x15, 0x2c, 0x66, 0x41, 0x5d, 0x23, 0x35, 0x5e, 0x8c, - 0x25, 0x1c, 0xbd, 0x06, 0x40, 0x1e, 0x44, 0x24, 0xf0, 0x9c, 0x96, 0x32, 0x1a, 0x51, 0x66, 0x92, - 0x15, 0x7f, 0xcd, 0x8f, 0xee, 0x84, 0xe4, 0xbb, 0x96, 0x15, 0x0a, 0xd6, 0xd0, 0xd1, 0x35, 0x80, - 0x76, 0xe0, 0xef, 0xb9, 0x4d, 0xe6, 0x5e, 0x97, 0x37, 0x4d, 0x2a, 0x6a, 0x0a, 0x82, 0x35, 0x2c, - 0xf4, 0x1a, 0x8c, 0x77, 0xbc, 0x90, 0x73, 0x28, 0xce, 0x86, 0x88, 0x4e, 0x58, 0x8c, 0xad, 0x1b, - 0xee, 0xe8, 0x40, 0x6c, 0xe2, 0xa2, 0x05, 0x18, 0x8e, 0x1c, 0x66, 0x13, 0x31, 0x94, 0x6d, 0xdb, - 0xb8, 0x4e, 0x31, 0xf4, 0xa0, 0xb2, 0xb4, 0x02, 0x16, 0x15, 0xd1, 0xdb, 0xd2, 0x57, 0x81, 0x9f, - 0xf5, 0xc2, 0xa8, 0x78, 0xb0, 0x7b, 0x41, 0xf3, 0x54, 0x10, 0xc6, 0xca, 0x06, 0x2d, 0xfb, 0xeb, - 0x25, 0x80, 0x98, 0x1d, 0x47, 0xef, 0x77, 0x9d, 0x47, 0xcf, 0xf5, 0x66, 0xe0, 0x4f, 0xee, 0x30, - 0x42, 0xdf, 0x6f, 0xc1, 0xa8, 0xd3, 0x6a, 0xf9, 0x0d, 0x27, 0x62, 0xa3, 0x9c, 0xeb, 0x7d, 0x1e, - 0x8a, 0xf6, 0x17, 0xe2, 0x1a, 0xbc, 0x0b, 0x2f, 0xca, 0x85, 0xa7, 0x41, 0xfa, 0xf6, 0x42, 0x6f, - 0x18, 0x7d, 0x4a, 0x4a, 0x69, 0x7c, 0x79, 0xcc, 0x26, 0xa5, 0xb4, 0x12, 0x3b, 0xfa, 0x35, 0x01, - 0x0d, 0xdd, 0x31, 0x02, 0xcf, 0x15, 0xb2, 0x63, 0x30, 0x18, 0x5c, 0x69, 0xbf, 0x98, 0x73, 0xa8, - 0xa6, 0x3b, 0x57, 0x0d, 0x65, 0x07, 0x2a, 0xd1, 0xc4, 0x9f, 0x3e, 0x8e, 0x55, 0xef, 0xc2, 0x64, - 0xd3, 0xbc, 0xdb, 0xc5, 0x6a, 0x7a, 0x3a, 0x8b, 0x6e, 0x82, 0x15, 0x88, 0x6f, 0xf3, 0x04, 0x00, - 0x27, 0x09, 0xa3, 0x1a, 0x77, 0x73, 0xab, 0x7a, 0x9b, 0xbe, 0x30, 0x4e, 0xb7, 0x33, 0xe7, 0x72, - 0x3f, 0x8c, 0xc8, 0x2e, 0xc5, 0x8c, 0x2f, 0xed, 0x35, 0x51, 0x17, 0x2b, 0x2a, 0xe8, 0x4d, 0x18, - 0x66, 0x7e, 0xb2, 0xe1, 0x4c, 0x31, 0x5b, 0x99, 0x68, 0x86, 0x78, 0x88, 0x37, 0x15, 0xfb, 0x1b, - 0x62, 0x41, 0x01, 0xdd, 0x90, 0x71, 0x60, 0xc2, 0xaa, 0x77, 0x27, 0x24, 0x2c, 0x0e, 0x4c, 0x69, - 0xf1, 0xe3, 0x71, 0x88, 0x17, 0x5e, 0x9e, 0x1a, 0x3e, 0xde, 0xa8, 0x49, 0x99, 0x23, 0xf1, 0x5f, - 0x46, 0xa5, 0x9f, 0x81, 0xec, 0xee, 0x99, 0x91, 0xeb, 0xe3, 0xe1, 0xbc, 0x6b, 0x92, 0xc0, 0x49, - 0x9a, 0x94, 0xd1, 0xe4, 0x3b, 0x57, 0x98, 0xb7, 0xf7, 0xdb, 0xff, 0x5c, 0xbe, 0x66, 0x97, 0x0c, - 0x2f, 0xc1, 0xa2, 0xfe, 0xa9, 0xde, 0xfa, 0xb3, 0x1e, 0x4c, 0x25, 0xb7, 0xe8, 0x23, 0xe5, 0x32, - 0xfe, 0xa0, 0x00, 0x13, 0xe6, 0x92, 0x42, 0xf3, 0x50, 0x12, 0x44, 0x54, 0x50, 0x52, 0xb5, 0x4b, - 0x56, 0x25, 0x00, 0xc7, 0x38, 0x2c, 0x16, 0x2d, 0xab, 0xae, 0x99, 0x25, 0xc6, 0xb1, 0x68, 0x15, - 0x04, 0x6b, 0x58, 0x54, 0x5e, 0xda, 0xf0, 0xfd, 0x48, 0x5d, 0x2a, 0x6a, 0xdd, 0x2d, 0xb2, 0x52, - 0x2c, 0xa0, 0xf4, 0x32, 0xd9, 0x21, 0x81, 0x47, 0x5a, 0x66, 0xac, 0x33, 0x75, 0x99, 0xdc, 0xd4, - 0x81, 0xd8, 0xc4, 0xa5, 0xb7, 0xa4, 0x1f, 0xb2, 0x85, 0x2c, 0xa4, 0xb2, 0xd8, 0xcc, 0xb3, 0xce, - 0x3d, 0xce, 0x25, 0x1c, 0x7d, 0x1e, 0x1e, 0x53, 0x0e, 0xe2, 0x98, 0x2b, 0xaa, 0x65, 0x8b, 0xc3, - 0x86, 0x12, 0xe5, 0xb1, 0xa5, 0x74, 0x34, 0x9c, 0x55, 0x1f, 0xbd, 0x01, 0x13, 0x82, 0x73, 0x97, - 0x14, 0x47, 0x4c, 0xdb, 0x89, 0x9b, 0x06, 0x14, 0x27, 0xb0, 0x65, 0xb4, 0x36, 0xc6, 0x3c, 0x4b, - 0x0a, 0xc5, 0xee, 0x68, 0x6d, 0x3a, 0x1c, 0x77, 0xd5, 0x40, 0x0b, 0x30, 0xc9, 0x59, 0x2b, 0xd7, - 0xdb, 0xe2, 0x73, 0x22, 0xbc, 0x4f, 0xd4, 0x96, 0xba, 0x6d, 0x82, 0x71, 0x12, 0x1f, 0xbd, 0x0a, - 0x63, 0x4e, 0xd0, 0xd8, 0x76, 0x23, 0xd2, 0x88, 0x3a, 0x01, 0x77, 0x4b, 0xd1, 0x8c, 0x4f, 0x16, - 0x34, 0x18, 0x36, 0x30, 0xed, 0xf7, 0xe1, 0x4c, 0x8a, 0xe3, 0x1a, 0x5d, 0x38, 0x4e, 0xdb, 0x95, - 0xdf, 0x94, 0x30, 0xd8, 0x5c, 0xa8, 0x55, 0xe5, 0xd7, 0x68, 0x58, 0x74, 0x75, 0x32, 0x07, 0x37, - 0x2d, 0x09, 0x85, 0x5a, 0x9d, 0x2b, 0x12, 0x80, 0x63, 0x1c, 0xfb, 0x7f, 0xe5, 0x60, 0x32, 0x45, - 0xf9, 0xce, 0x12, 0x21, 0x24, 0x64, 0x8f, 0x38, 0xef, 0x81, 0x19, 0xfc, 0x2f, 0x77, 0x8c, 0xe0, - 0x7f, 0xf9, 0x7e, 0xc1, 0xff, 0x0a, 0x1f, 0x24, 0xf8, 0x9f, 0x39, 0x62, 0x43, 0x03, 0x8d, 0x58, - 0x4a, 0xc0, 0xc0, 0xe1, 0x63, 0x06, 0x0c, 0x34, 0x06, 0x7d, 0x64, 0x80, 0x41, 0xff, 0xe1, 0x1c, - 0x4c, 0x25, 0x8d, 0xe4, 0x4e, 0x41, 0x1d, 0xfb, 0xa6, 0xa1, 0x8e, 0x4d, 0x4f, 0x2b, 0x92, 0x34, - 0xdd, 0xcb, 0x52, 0xcd, 0xe2, 0x84, 0x6a, 0xf6, 0x93, 0x03, 0x51, 0xeb, 0xad, 0xa6, 0xfd, 0x3b, - 0x39, 0x38, 0x97, 0xac, 0xb2, 0xd4, 0x72, 0xdc, 0xdd, 0x53, 0x18, 0x9b, 0xdb, 0xc6, 0xd8, 0x3c, - 0x3f, 0xc8, 0xd7, 0xb0, 0xae, 0x65, 0x0e, 0xd0, 0xbd, 0xc4, 0x00, 0xcd, 0x0f, 0x4e, 0xb2, 0xf7, - 0x28, 0x7d, 0x23, 0x0f, 0x97, 0x52, 0xeb, 0xc5, 0xda, 0xcc, 0x15, 0x43, 0x9b, 0x79, 0x2d, 0xa1, - 0xcd, 0xb4, 0x7b, 0xd7, 0x3e, 0x19, 0xf5, 0xa6, 0xf0, 0x28, 0x64, 0x01, 0xe2, 0x1e, 0x52, 0xb5, - 0x69, 0x78, 0x14, 0x2a, 0x42, 0xd8, 0xa4, 0xfb, 0xcd, 0xa4, 0xd2, 0xfc, 0x57, 0x16, 0x5c, 0x48, - 0x9d, 0x9b, 0x53, 0x50, 0x61, 0xad, 0x99, 0x2a, 0xac, 0x67, 0x06, 0x5e, 0xad, 0x19, 0x3a, 0xad, - 0xdf, 0x2c, 0x64, 0x7c, 0x0b, 0x13, 0xd0, 0x6f, 0xc3, 0xa8, 0xd3, 0x68, 0x90, 0x30, 0x5c, 0xf5, - 0x9b, 0x2a, 0x60, 0xda, 0xf3, 0x4c, 0xce, 0x8a, 0x8b, 0x8f, 0x0e, 0xca, 0xb3, 0x49, 0x12, 0x31, - 0x18, 0xeb, 0x14, 0xcc, 0x18, 0x8f, 0xb9, 0x13, 0x8d, 0xf1, 0x78, 0x0d, 0x60, 0x4f, 0x71, 0xeb, - 0x49, 0x21, 0x5f, 0xe3, 0xe3, 0x35, 0x2c, 0xf4, 0x05, 0x28, 0x86, 0xe2, 0x1a, 0x17, 0x4b, 0xf1, - 0xc5, 0x01, 0xe7, 0xca, 0xd9, 0x20, 0x2d, 0xd3, 0x75, 0x5d, 0xe9, 0x43, 0x14, 0x49, 0xf4, 0x1d, - 0x30, 0x15, 0xf2, 0xc8, 0x28, 0x4b, 0x2d, 0x27, 0x64, 0x7e, 0x10, 0x62, 0x15, 0x32, 0x7f, 0xf4, - 0x7a, 0x02, 0x86, 0xbb, 0xb0, 0xd1, 0x8a, 0xfc, 0x28, 0x16, 0xc6, 0x85, 0x2f, 0xcc, 0x2b, 0xf1, - 0x07, 0x89, 0x34, 0x4c, 0x67, 0x93, 0xc3, 0xcf, 0x06, 0x5e, 0xab, 0x89, 0xbe, 0x00, 0x40, 0x97, - 0x8f, 0xd0, 0x25, 0x8c, 0x64, 0x1f, 0x9e, 0xf4, 0x54, 0x69, 0xa6, 0x5a, 0x7e, 0x32, 0x5f, 0xbe, - 0x8a, 0x22, 0x82, 0x35, 0x82, 0xf6, 0x0f, 0x17, 0xe0, 0xf1, 0x1e, 0x67, 0x24, 0x5a, 0x30, 0x9f, - 0x40, 0x9f, 0x4d, 0x0a, 0xd7, 0xb3, 0xa9, 0x95, 0x0d, 0x69, 0x3b, 0xb1, 0x14, 0x73, 0x1f, 0x78, - 0x29, 0xfe, 0x80, 0xa5, 0xa9, 0x3d, 0xb8, 0x31, 0xdf, 0x67, 0x8f, 0x79, 0xf6, 0x9f, 0xa0, 0x1e, - 0x64, 0x33, 0x45, 0x99, 0x70, 0x6d, 0xe0, 0xee, 0x0c, 0xac, 0x5d, 0x38, 0x5d, 0xe5, 0xef, 0x97, - 0x2d, 0x78, 0x32, 0xb5, 0xbf, 0x86, 0xc9, 0xc6, 0x3c, 0x94, 0x1a, 0xb4, 0x50, 0x73, 0xdd, 0x8a, - 0x7d, 0x5a, 0x25, 0x00, 0xc7, 0x38, 0x86, 0x65, 0x46, 0xae, 0xaf, 0x65, 0xc6, 0x3f, 0xb7, 0xa0, - 0x6b, 0x7f, 0x9c, 0xc2, 0x41, 0x5d, 0x35, 0x0f, 0xea, 0x8f, 0x0f, 0x32, 0x97, 0x19, 0x67, 0xf4, - 0x7f, 0x9e, 0x84, 0xf3, 0x19, 0xbe, 0x1a, 0x7b, 0x30, 0xbd, 0xd5, 0x20, 0xa6, 0x53, 0x9c, 0xf8, - 0x98, 0x54, 0xff, 0xc1, 0x9e, 0x1e, 0x74, 0x2c, 0x3d, 0xcf, 0x74, 0x17, 0x0a, 0xee, 0x6e, 0x02, - 0x7d, 0xd9, 0x82, 0xb3, 0xce, 0xfd, 0xb0, 0x2b, 0x09, 0xa3, 0x58, 0x33, 0x2f, 0xa5, 0x2a, 0x41, - 0xfa, 0x24, 0x6d, 0xe4, 0xf9, 0x8a, 0xd2, 0xb0, 0x70, 0x6a, 0x5b, 0x08, 0x8b, 0x10, 0x9a, 0x94, - 0x9d, 0xef, 0xe1, 0xb6, 0x99, 0xe6, 0x54, 0xc3, 0x8f, 0x6c, 0x09, 0xc1, 0x8a, 0x0e, 0xba, 0x0b, - 0xa5, 0x2d, 0xe9, 0xe9, 0x26, 0xae, 0x84, 0xd4, 0x3b, 0x36, 0xd5, 0x1d, 0x8e, 0x3f, 0x4b, 0x2a, - 0x10, 0x8e, 0x49, 0xa1, 0x37, 0x20, 0xef, 0x6d, 0x86, 0xbd, 0x12, 0xfd, 0x24, 0x2c, 0x99, 0xb8, - 0x4b, 0xf4, 0xda, 0x4a, 0x1d, 0xd3, 0x8a, 0xe8, 0x06, 0xe4, 0x83, 0x8d, 0xa6, 0xd0, 0xdb, 0xa5, - 0x9e, 0xdc, 0x78, 0xb1, 0x92, 0xbe, 0x48, 0x38, 0x25, 0xbc, 0x58, 0xc1, 0x94, 0x04, 0xaa, 0xc1, - 0x10, 0x73, 0x6b, 0x10, 0xb7, 0x40, 0x2a, 0xbf, 0xdb, 0xc3, 0x3d, 0x88, 0xfb, 0x4d, 0x33, 0x04, - 0xcc, 0x09, 0xa1, 0x75, 0x18, 0x6e, 0xb0, 0xa4, 0x30, 0x22, 0x6a, 0xf3, 0xa7, 0x52, 0x35, 0x74, - 0x3d, 0xb2, 0xe5, 0x08, 0x85, 0x15, 0xc3, 0xc0, 0x82, 0x16, 0xa3, 0x4a, 0xda, 0xdb, 0x9b, 0x21, - 0x93, 0xf0, 0xb3, 0xa8, 0xf6, 0x48, 0x02, 0x25, 0xa8, 0x32, 0x0c, 0x2c, 0x68, 0xa1, 0xcf, 0x40, - 0x6e, 0xb3, 0x21, 0xbc, 0x1e, 0x52, 0x55, 0x75, 0xa6, 0x57, 0xfb, 0xe2, 0xf0, 0xe1, 0x41, 0x39, - 0xb7, 0xb2, 0x84, 0x73, 0x9b, 0x0d, 0xb4, 0x06, 0x23, 0x9b, 0xdc, 0x0f, 0x56, 0x68, 0xe3, 0x9e, - 0x4e, 0x77, 0xd1, 0xed, 0x72, 0x95, 0xe5, 0xd6, 0xfa, 0x02, 0x80, 0x25, 0x11, 0x16, 0x87, 0x52, - 0xf9, 0xf3, 0x8a, 0x80, 0xcc, 0x73, 0xc7, 0xf3, 0xc1, 0xe6, 0xb7, 0x72, 0xec, 0x15, 0x8c, 0x35, - 0x8a, 0xe8, 0x4b, 0x50, 0x72, 0x64, 0xfa, 0x3f, 0x11, 0xb0, 0xe2, 0xc5, 0xd4, 0x8d, 0xd9, 0x3b, - 0x33, 0x22, 0x5f, 0xd5, 0x0a, 0x09, 0xc7, 0x44, 0xd1, 0x0e, 0x8c, 0xef, 0x85, 0xed, 0x6d, 0x22, - 0x37, 0x32, 0x8b, 0x5f, 0x91, 0x71, 0x71, 0xdd, 0x15, 0x88, 0x6e, 0x10, 0x75, 0x9c, 0x56, 0xd7, - 0xd9, 0xc3, 0xde, 0xb2, 0xef, 0xea, 0xc4, 0xb0, 0x49, 0x9b, 0x0e, 0xff, 0x7b, 0x1d, 0x7f, 0x63, - 0x3f, 0x22, 0x22, 0x82, 0x73, 0xea, 0xf0, 0xbf, 0xc5, 0x51, 0xba, 0x87, 0x5f, 0x00, 0xb0, 0x24, - 0x42, 0xb7, 0xba, 0x23, 0x53, 0x6b, 0xb2, 0xc8, 0xcd, 0x19, 0x5b, 0x3d, 0x35, 0xff, 0xa6, 0x36, - 0x28, 0xec, 0x8c, 0x8c, 0x49, 0xb1, 0xb3, 0xb1, 0xbd, 0xed, 0x47, 0xbe, 0x97, 0x38, 0x97, 0xa7, - 0xb3, 0xcf, 0xc6, 0x5a, 0x0a, 0x7e, 0xf7, 0xd9, 0x98, 0x86, 0x85, 0x53, 0xdb, 0x42, 0x4d, 0x98, - 0x68, 0xfb, 0x41, 0x74, 0xdf, 0x0f, 0xe4, 0xfa, 0x42, 0x3d, 0xb4, 0x09, 0x06, 0xa6, 0x68, 0x91, - 0x45, 0x14, 0x37, 0x21, 0x38, 0x41, 0x13, 0x7d, 0x0e, 0x46, 0xc2, 0x86, 0xd3, 0x22, 0xd5, 0xdb, - 0x33, 0x67, 0xb2, 0x2f, 0x9d, 0x3a, 0x47, 0xc9, 0x58, 0x5d, 0x6c, 0x72, 0x04, 0x0a, 0x96, 0xe4, - 0xd0, 0x0a, 0x0c, 0xb1, 0xb4, 0x00, 0x2c, 0xf8, 0x74, 0x46, 0x60, 0xa4, 0x2e, 0xbb, 0x52, 0x7e, - 0x36, 0xb1, 0x62, 0xcc, 0xab, 0xd3, 0x3d, 0x20, 0x98, 0x6a, 0x3f, 0x9c, 0x39, 0x97, 0xbd, 0x07, - 0x04, 0x2f, 0x7e, 0xbb, 0xde, 0x6b, 0x0f, 0x28, 0x24, 0x1c, 0x13, 0xa5, 0x27, 0x33, 0x3d, 0x4d, - 0xcf, 0xf7, 0x30, 0x63, 0xc9, 0x3c, 0x4b, 0xd9, 0xc9, 0x4c, 0x4f, 0x52, 0x4a, 0xc2, 0xfe, 0xbd, - 0x91, 0x6e, 0x4e, 0x85, 0x89, 0x61, 0x7f, 0xd1, 0xea, 0x7a, 0xa1, 0xfb, 0xf4, 0xa0, 0x5a, 0xa1, - 0x13, 0xe4, 0x51, 0xbf, 0x6c, 0xc1, 0xf9, 0x76, 0xea, 0x87, 0x88, 0x6b, 0x7f, 0x30, 0xe5, 0x12, - 0xff, 0x74, 0x15, 0x20, 0x3e, 0x1d, 0x8e, 0x33, 0x5a, 0x4a, 0xca, 0x01, 0xf9, 0x0f, 0x2c, 0x07, - 0xac, 0x42, 0x91, 0xb1, 0x96, 0x7d, 0x92, 0xa4, 0x25, 0xc5, 0x21, 0xc6, 0x40, 0x2c, 0x89, 0x8a, - 0x58, 0x91, 0x40, 0x3f, 0x68, 0xc1, 0xc5, 0x64, 0xd7, 0x31, 0x61, 0x60, 0x11, 0x4e, 0x9d, 0x4b, - 0x80, 0x2b, 0xe2, 0xfb, 0x2f, 0xd6, 0x7a, 0x21, 0x1f, 0xf5, 0x43, 0xc0, 0xbd, 0x1b, 0x43, 0x95, - 0x14, 0x11, 0x74, 0xd8, 0x54, 0xbb, 0x0f, 0x20, 0x86, 0xbe, 0x04, 0x63, 0xbb, 0x7e, 0xc7, 0x8b, - 0x84, 0xd5, 0x8b, 0xf0, 0x53, 0x64, 0xcf, 0xcc, 0xab, 0x5a, 0x39, 0x36, 0xb0, 0x12, 0xc2, 0x6b, - 0xf1, 0xa1, 0x85, 0xd7, 0x77, 0x12, 0xa9, 0xb0, 0x4b, 0xd9, 0x61, 0xfb, 0x84, 0x9c, 0x7f, 0x8c, - 0x84, 0xd8, 0xa7, 0x2b, 0x11, 0xfd, 0x8c, 0x95, 0xc2, 0xca, 0x73, 0x19, 0xf9, 0x75, 0x53, 0x46, - 0xbe, 0x92, 0x94, 0x91, 0xbb, 0x54, 0xae, 0x86, 0x78, 0x3c, 0x78, 0xec, 0xe7, 0x41, 0x83, 0xa9, - 0xd9, 0x2d, 0xb8, 0xdc, 0xef, 0x5a, 0x62, 0xe6, 0x4f, 0x4d, 0xf5, 0xc0, 0x16, 0x9b, 0x3f, 0x35, - 0xab, 0x15, 0xcc, 0x20, 0x83, 0x46, 0xdb, 0xb0, 0xff, 0x9b, 0x05, 0xf9, 0x9a, 0xdf, 0x3c, 0x05, - 0x15, 0xf2, 0x67, 0x0d, 0x15, 0xf2, 0xe3, 0x19, 0x29, 0xca, 0x33, 0x15, 0xc6, 0xcb, 0x09, 0x85, - 0xf1, 0xc5, 0x2c, 0x02, 0xbd, 0xd5, 0xc3, 0x3f, 0x99, 0x07, 0x3d, 0xa1, 0x3a, 0xfa, 0xcd, 0x87, - 0xb1, 0x3d, 0xce, 0xf7, 0xca, 0xb1, 0x2e, 0x28, 0x33, 0xab, 0x29, 0xe9, 0x7a, 0xf7, 0x67, 0xcc, - 0x04, 0xf9, 0x1e, 0x71, 0xb7, 0xb6, 0x23, 0xd2, 0x4c, 0x7e, 0xce, 0xe9, 0x99, 0x20, 0xff, 0x17, - 0x0b, 0x26, 0x13, 0xad, 0xa3, 0x16, 0x8c, 0xb7, 0x74, 0xfd, 0x9f, 0x58, 0xa7, 0x0f, 0xa5, 0x3a, - 0x14, 0x26, 0x9c, 0x5a, 0x11, 0x36, 0x89, 0xa3, 0x39, 0x00, 0xf5, 0x3e, 0x27, 0xf5, 0x5e, 0x8c, - 0xeb, 0x57, 0x0f, 0x78, 0x21, 0xd6, 0x30, 0xd0, 0xcb, 0x30, 0x1a, 0xf9, 0x6d, 0xbf, 0xe5, 0x6f, - 0xed, 0xdf, 0x24, 0x32, 0xbe, 0x8b, 0x32, 0xcc, 0x5a, 0x8f, 0x41, 0x58, 0xc7, 0xb3, 0x7f, 0x3a, - 0x0f, 0xc9, 0x24, 0xfc, 0xdf, 0x5a, 0x93, 0x1f, 0xcd, 0x35, 0xf9, 0x0d, 0x0b, 0xa6, 0x68, 0xeb, - 0xcc, 0x48, 0x44, 0x5e, 0xb6, 0x2a, 0x07, 0x8d, 0xd5, 0x23, 0x07, 0xcd, 0x15, 0x7a, 0x76, 0x35, - 0xfd, 0x4e, 0x24, 0xf4, 0x66, 0xda, 0xe1, 0x44, 0x4b, 0xb1, 0x80, 0x0a, 0x3c, 0x12, 0x04, 0xc2, - 0xf3, 0x49, 0xc7, 0x23, 0x41, 0x80, 0x05, 0x54, 0xa6, 0xa8, 0x29, 0x64, 0xa4, 0xa8, 0x61, 0xd1, - 0xea, 0x84, 0x39, 0x81, 0x60, 0x7b, 0xb4, 0x68, 0x75, 0xd2, 0xce, 0x20, 0xc6, 0xb1, 0x7f, 0x3e, - 0x0f, 0x63, 0x35, 0xbf, 0x19, 0xbf, 0x90, 0xbd, 0x64, 0xbc, 0x90, 0x5d, 0x4e, 0xbc, 0x90, 0x4d, - 0xe9, 0xb8, 0xdf, 0x7a, 0x0f, 0xfb, 0xb0, 0xde, 0xc3, 0xfe, 0x99, 0xc5, 0x66, 0xad, 0xb2, 0x56, - 0x17, 0x29, 0x72, 0x5f, 0x80, 0x51, 0x76, 0x20, 0x31, 0x57, 0x3b, 0xf9, 0x6c, 0xc4, 0xa2, 0xcf, - 0xaf, 0xc5, 0xc5, 0x58, 0xc7, 0x41, 0x57, 0xa1, 0x18, 0x12, 0x27, 0x68, 0x6c, 0xab, 0x33, 0x4e, - 0x3c, 0xaa, 0xf0, 0x32, 0xac, 0xa0, 0xe8, 0xad, 0x38, 0x50, 0x5a, 0x3e, 0x3b, 0xd9, 0xab, 0xde, - 0x1f, 0xbe, 0x45, 0xb2, 0xa3, 0xa3, 0xd9, 0xf7, 0x00, 0x75, 0xe3, 0x0f, 0x10, 0x12, 0xa9, 0x6c, - 0x86, 0x44, 0x2a, 0x75, 0x85, 0x43, 0xfa, 0x13, 0x0b, 0x26, 0x6a, 0x7e, 0x93, 0x6e, 0xdd, 0x6f, - 0xa6, 0x7d, 0xaa, 0x47, 0x89, 0x1c, 0xee, 0x11, 0x25, 0xf2, 0xef, 0x5a, 0x30, 0x52, 0xf3, 0x9b, - 0xa7, 0xa0, 0x6d, 0x7f, 0xdd, 0xd4, 0xb6, 0x3f, 0x96, 0xb1, 0x24, 0x32, 0x14, 0xec, 0xbf, 0x98, - 0x87, 0x71, 0xda, 0x4f, 0x7f, 0x4b, 0xce, 0x92, 0x31, 0x22, 0xd6, 0x00, 0x23, 0x42, 0xd9, 0x5c, - 0xbf, 0xd5, 0xf2, 0xef, 0x27, 0x67, 0x6c, 0x85, 0x95, 0x62, 0x01, 0x45, 0xcf, 0x41, 0xb1, 0x1d, - 0x90, 0x3d, 0xd7, 0x17, 0xfc, 0xa3, 0xf6, 0x76, 0x51, 0x13, 0xe5, 0x58, 0x61, 0x50, 0xb9, 0x2b, - 0x74, 0xbd, 0x06, 0x91, 0x99, 0xa6, 0x0b, 0x2c, 0x19, 0x15, 0x0f, 0xff, 0xac, 0x95, 0x63, 0x03, - 0x0b, 0xdd, 0x83, 0x12, 0xfb, 0xcf, 0x4e, 0x94, 0xe3, 0x27, 0xcf, 0x11, 0x39, 0x17, 0x04, 0x01, - 0x1c, 0xd3, 0x42, 0xd7, 0x00, 0x22, 0x19, 0x22, 0x38, 0x14, 0x91, 0x6d, 0x14, 0xaf, 0xad, 0x82, - 0x07, 0x87, 0x58, 0xc3, 0x42, 0xcf, 0x42, 0x29, 0x72, 0xdc, 0xd6, 0x2d, 0xd7, 0x23, 0x21, 0x53, - 0x39, 0xe7, 0x65, 0x4a, 0x05, 0x51, 0x88, 0x63, 0x38, 0xe5, 0x75, 0x98, 0xdb, 0x37, 0x4f, 0xbd, - 0x55, 0x64, 0xd8, 0x8c, 0xd7, 0xb9, 0xa5, 0x4a, 0xb1, 0x86, 0x61, 0xbf, 0x0a, 0xe7, 0x6a, 0x7e, - 0xb3, 0xe6, 0x07, 0xd1, 0x8a, 0x1f, 0xdc, 0x77, 0x82, 0xa6, 0x9c, 0xbf, 0xb2, 0x8c, 0xee, 0x4f, - 0xcf, 0x9e, 0x21, 0xbe, 0x33, 0x8d, 0xb8, 0xfd, 0x2f, 0x32, 0x6e, 0xe7, 0x98, 0xae, 0x1c, 0x0d, - 0x76, 0xef, 0xaa, 0x2c, 0x7b, 0xd7, 0x9d, 0x88, 0xa0, 0xdb, 0x2c, 0x33, 0x57, 0x7c, 0x05, 0x89, - 0xea, 0xcf, 0x68, 0x99, 0xb9, 0x62, 0x60, 0xea, 0x9d, 0x65, 0xd6, 0xb7, 0x7f, 0x2d, 0xcf, 0x4e, - 0xa3, 0x44, 0xd2, 0x39, 0xf4, 0x45, 0x98, 0x08, 0xc9, 0x2d, 0xd7, 0xeb, 0x3c, 0x90, 0x42, 0x78, - 0x0f, 0x67, 0x9c, 0xfa, 0xb2, 0x8e, 0xc9, 0x55, 0x79, 0x66, 0x19, 0x4e, 0x50, 0xa3, 0xf3, 0x14, - 0x74, 0xbc, 0x85, 0xf0, 0x4e, 0x48, 0x02, 0x91, 0xf4, 0x8c, 0xcd, 0x13, 0x96, 0x85, 0x38, 0x86, - 0xd3, 0x75, 0xc9, 0xfe, 0xac, 0xf9, 0x1e, 0xf6, 0xfd, 0x48, 0xae, 0x64, 0x96, 0x36, 0x47, 0x2b, - 0xc7, 0x06, 0x16, 0x5a, 0x01, 0x14, 0x76, 0xda, 0xed, 0x16, 0x7b, 0xce, 0x77, 0x5a, 0xd7, 0x03, - 0xbf, 0xd3, 0xe6, 0x6f, 0x9d, 0xf9, 0xc5, 0xf3, 0xf4, 0x0a, 0xab, 0x77, 0x41, 0x71, 0x4a, 0x0d, - 0x7a, 0xfa, 0x6c, 0x86, 0xec, 0x37, 0x5b, 0xdd, 0x79, 0xa1, 0x5e, 0xaf, 0xb3, 0x22, 0x2c, 0x61, - 0x74, 0x31, 0xb1, 0xe6, 0x39, 0xe6, 0x70, 0xbc, 0x98, 0xb0, 0x2a, 0xc5, 0x1a, 0x06, 0x5a, 0x86, - 0x91, 0x70, 0x3f, 0x6c, 0x44, 0x22, 0x0e, 0x53, 0x46, 0xfa, 0xca, 0x3a, 0x43, 0xd1, 0x52, 0x2a, - 0xf0, 0x2a, 0x58, 0xd6, 0xb5, 0xbf, 0x87, 0x5d, 0x86, 0x2c, 0x45, 0x56, 0xd4, 0x09, 0x08, 0xda, - 0x85, 0xf1, 0x36, 0x9b, 0x72, 0x11, 0xc0, 0x59, 0xcc, 0xdb, 0x4b, 0x03, 0x4a, 0xb5, 0xf7, 0xe9, - 0x41, 0xa3, 0xb4, 0x4e, 0x4c, 0x5c, 0xa8, 0xe9, 0xe4, 0xb0, 0x49, 0xdd, 0xfe, 0xd7, 0xd3, 0xec, - 0xcc, 0xad, 0x73, 0x51, 0x75, 0x44, 0x18, 0x14, 0x0b, 0xbe, 0x7c, 0x36, 0x5b, 0x67, 0x12, 0x7f, - 0x91, 0x30, 0x4a, 0xc6, 0xb2, 0x2e, 0x7a, 0x8b, 0xbd, 0x4d, 0xf3, 0x83, 0xae, 0x5f, 0xa6, 0x62, - 0x8e, 0x65, 0x3c, 0x43, 0x8b, 0x8a, 0x58, 0x23, 0x82, 0x6e, 0xc1, 0xb8, 0xc8, 0xa8, 0x24, 0x94, - 0x62, 0x79, 0x43, 0xe9, 0x31, 0x8e, 0x75, 0xe0, 0x51, 0xb2, 0x00, 0x9b, 0x95, 0xd1, 0x16, 0x5c, - 0xd4, 0xd2, 0x0b, 0x5e, 0x0f, 0x1c, 0xf6, 0x5e, 0xe9, 0xb2, 0x4d, 0xa4, 0x9d, 0x9b, 0x4f, 0x1e, - 0x1e, 0x94, 0x2f, 0xae, 0xf7, 0x42, 0xc4, 0xbd, 0xe9, 0xa0, 0xdb, 0x70, 0x8e, 0xfb, 0xed, 0x55, - 0x88, 0xd3, 0x6c, 0xb9, 0x9e, 0x3a, 0x98, 0xf9, 0x3a, 0xbc, 0x70, 0x78, 0x50, 0x3e, 0xb7, 0x90, - 0x86, 0x80, 0xd3, 0xeb, 0xa1, 0xd7, 0xa1, 0xd4, 0xf4, 0x42, 0x31, 0x06, 0xc3, 0x46, 0xe6, 0xcc, - 0x52, 0x65, 0xad, 0xae, 0xbe, 0x3f, 0xfe, 0x83, 0xe3, 0x0a, 0x68, 0x8b, 0x2b, 0xc6, 0x94, 0x1c, - 0x3a, 0x92, 0x9d, 0x25, 0x5d, 0x2c, 0x09, 0xc3, 0x73, 0x87, 0x6b, 0x84, 0x95, 0xe5, 0xab, 0xe1, - 0xd4, 0x63, 0x10, 0x46, 0x6f, 0x02, 0xa2, 0x8c, 0x9a, 0xdb, 0x20, 0x0b, 0x0d, 0x16, 0x47, 0x9b, - 0xe9, 0x11, 0x8b, 0x86, 0xa7, 0x04, 0xaa, 0x77, 0x61, 0xe0, 0x94, 0x5a, 0xe8, 0x06, 0x3d, 0xc8, - 0xf4, 0x52, 0x61, 0xc1, 0x2b, 0x99, 0xfb, 0x99, 0x0a, 0x69, 0x07, 0xa4, 0xe1, 0x44, 0xa4, 0x69, - 0x52, 0xc4, 0x89, 0x7a, 0xf4, 0x2e, 0x55, 0x29, 0x75, 0xc0, 0x0c, 0x96, 0xd1, 0x9d, 0x56, 0x87, - 0xca, 0xc5, 0xdb, 0x7e, 0x18, 0xad, 0x91, 0xe8, 0xbe, 0x1f, 0xec, 0x88, 0xd8, 0x64, 0x71, 0x98, - 0xcc, 0x18, 0x84, 0x75, 0x3c, 0xca, 0x07, 0xb3, 0xc7, 0xe1, 0x6a, 0x85, 0xbd, 0xd0, 0x15, 0xe3, - 0x7d, 0x72, 0x83, 0x17, 0x63, 0x09, 0x97, 0xa8, 0xd5, 0xda, 0x12, 0x7b, 0x6d, 0x4b, 0xa0, 0x56, - 0x6b, 0x4b, 0x58, 0xc2, 0x11, 0xe9, 0xce, 0x4a, 0x3a, 0x91, 0xad, 0xd5, 0xec, 0xbe, 0x0e, 0x06, - 0x4c, 0x4c, 0xea, 0xc1, 0x94, 0xca, 0x87, 0xca, 0x83, 0xb6, 0x85, 0x33, 0x93, 0x6c, 0x91, 0x0c, - 0x1e, 0xf1, 0x4d, 0xe9, 0x89, 0xab, 0x09, 0x4a, 0xb8, 0x8b, 0xb6, 0x11, 0xbe, 0x64, 0xaa, 0x6f, - 0x4a, 0xa4, 0x79, 0x28, 0x85, 0x9d, 0x8d, 0xa6, 0xbf, 0xeb, 0xb8, 0x1e, 0x7b, 0x1c, 0xd3, 0x98, - 0xac, 0xba, 0x04, 0xe0, 0x18, 0x07, 0xad, 0x40, 0xd1, 0x91, 0x4a, 0x60, 0x94, 0x1d, 0xab, 0x40, - 0xa9, 0x7e, 0xb9, 0xfb, 0xae, 0x54, 0xfb, 0xaa, 0xba, 0xe8, 0x35, 0x18, 0x17, 0xde, 0x5a, 0x3c, - 0x82, 0x03, 0x7b, 0xbc, 0xd2, 0xcc, 0xf1, 0xeb, 0x3a, 0x10, 0x9b, 0xb8, 0xe8, 0x0b, 0x30, 0x41, - 0xa9, 0xc4, 0x07, 0xdb, 0xcc, 0xd9, 0x41, 0x4e, 0x44, 0x2d, 0xd5, 0x85, 0x5e, 0x19, 0x27, 0x88, - 0xa1, 0x26, 0x3c, 0xe1, 0x74, 0x22, 0x9f, 0x29, 0xd2, 0xcd, 0xf5, 0xbf, 0xee, 0xef, 0x10, 0x8f, - 0xbd, 0x61, 0x15, 0x17, 0x2f, 0x1f, 0x1e, 0x94, 0x9f, 0x58, 0xe8, 0x81, 0x87, 0x7b, 0x52, 0x41, - 0x77, 0x60, 0x34, 0xf2, 0x5b, 0xcc, 0x30, 0x9e, 0xb2, 0x12, 0xe7, 0xb3, 0xc3, 0xff, 0xac, 0x2b, - 0x34, 0x5d, 0x89, 0xa4, 0xaa, 0x62, 0x9d, 0x0e, 0x5a, 0xe7, 0x7b, 0x8c, 0x05, 0x46, 0x25, 0xe1, - 0xcc, 0x63, 0xd9, 0x03, 0xa3, 0xe2, 0xa7, 0x9a, 0x5b, 0x50, 0xd4, 0xc4, 0x3a, 0x19, 0x74, 0x1d, - 0xa6, 0xdb, 0x81, 0xeb, 0xb3, 0x85, 0xad, 0x1e, 0x31, 0x66, 0xcc, 0xec, 0x06, 0xb5, 0x24, 0x02, - 0xee, 0xae, 0x43, 0x85, 0x4c, 0x59, 0x38, 0x73, 0x81, 0xa7, 0xca, 0xe2, 0x8c, 0x37, 0x2f, 0xc3, - 0x0a, 0x8a, 0x56, 0xd9, 0xb9, 0xcc, 0xc5, 0xc1, 0x99, 0xd9, 0xec, 0x18, 0x0f, 0xba, 0xd8, 0xc8, - 0xf9, 0x25, 0xf5, 0x17, 0xc7, 0x14, 0xe8, 0xbd, 0x11, 0x6e, 0x3b, 0x01, 0xa9, 0x05, 0x7e, 0x83, - 0xf0, 0xce, 0x70, 0x9b, 0xfc, 0xc7, 0x79, 0xfc, 0x46, 0x7a, 0x6f, 0xd4, 0xd3, 0x10, 0x70, 0x7a, - 0x3d, 0xd4, 0xd4, 0x32, 0x44, 0x53, 0x36, 0x34, 0x9c, 0x79, 0xa2, 0x87, 0x99, 0x51, 0x82, 0x67, - 0x8d, 0xd7, 0xa2, 0x51, 0x1c, 0xe2, 0x04, 0x4d, 0xf4, 0x1d, 0x30, 0x25, 0xc2, 0x1d, 0xc5, 0xe3, - 0x7e, 0x31, 0xb6, 0x5f, 0xc4, 0x09, 0x18, 0xee, 0xc2, 0x9e, 0xfd, 0x76, 0x98, 0xee, 0xba, 0x71, - 0x8e, 0x15, 0x7c, 0xfc, 0x8f, 0x87, 0xa0, 0xa4, 0x94, 0xe9, 0x68, 0xde, 0x7c, 0x23, 0xb9, 0x90, - 0x7c, 0x23, 0x29, 0x52, 0x9e, 0x5e, 0x7f, 0x16, 0x59, 0x37, 0xcc, 0xea, 0x72, 0xd9, 0xa9, 0xbe, - 0x74, 0xae, 0xbc, 0xaf, 0x8b, 0x9e, 0xa6, 0x1b, 0xc9, 0x0f, 0xfc, 0xd8, 0x52, 0xe8, 0xa9, 0x6e, - 0x19, 0x30, 0xd3, 0x2e, 0x7a, 0x8a, 0x0a, 0x36, 0xcd, 0x6a, 0x2d, 0x99, 0x7a, 0xb2, 0x46, 0x0b, - 0x31, 0x87, 0x31, 0x01, 0x90, 0xb2, 0x47, 0x4c, 0x00, 0x1c, 0x79, 0x48, 0x01, 0x50, 0x12, 0xc0, - 0x31, 0x2d, 0xd4, 0x82, 0xe9, 0x86, 0x99, 0x35, 0x54, 0xb9, 0xe5, 0x3d, 0xd5, 0x37, 0x7f, 0x67, - 0x47, 0x4b, 0xd1, 0xb6, 0x94, 0xa4, 0x82, 0xbb, 0x09, 0xa3, 0xd7, 0xa0, 0xf8, 0x9e, 0x1f, 0xb2, - 0xc5, 0x24, 0x78, 0x04, 0xe9, 0xbe, 0x54, 0x7c, 0xeb, 0x76, 0x9d, 0x95, 0x1f, 0x1d, 0x94, 0x47, - 0x6b, 0x7e, 0x53, 0xfe, 0xc5, 0xaa, 0x02, 0x7a, 0x00, 0xe7, 0x8c, 0x93, 0x55, 0x75, 0x17, 0x06, - 0xef, 0xee, 0x45, 0xd1, 0xdc, 0xb9, 0x6a, 0x1a, 0x25, 0x9c, 0xde, 0x00, 0x3d, 0xae, 0x3c, 0x5f, - 0x64, 0xdc, 0x95, 0x7c, 0x08, 0x63, 0x37, 0x4a, 0xba, 0xf3, 0x7a, 0x02, 0x01, 0x77, 0xd7, 0xb1, - 0x7f, 0x85, 0xbf, 0x3d, 0x08, 0x0d, 0x25, 0x09, 0x3b, 0xad, 0xd3, 0x48, 0xe8, 0xb4, 0x6c, 0x28, - 0x4f, 0x1f, 0xfa, 0x7d, 0xeb, 0x37, 0x2c, 0xf6, 0xbe, 0xb5, 0x4e, 0x76, 0xdb, 0x2d, 0x2a, 0x27, - 0x3f, 0xfa, 0x8e, 0xbf, 0x05, 0xc5, 0x48, 0xb4, 0xd6, 0x2b, 0x07, 0x95, 0xd6, 0x29, 0xf6, 0xc6, - 0xa7, 0x38, 0x14, 0x59, 0x8a, 0x15, 0x19, 0xfb, 0x9f, 0xf0, 0x19, 0x90, 0x90, 0x53, 0x50, 0x64, - 0x55, 0x4c, 0x45, 0x56, 0xb9, 0xcf, 0x17, 0x64, 0x28, 0xb4, 0xfe, 0xb1, 0xd9, 0x6f, 0x26, 0x0c, - 0x7e, 0xd4, 0x1f, 0x56, 0xed, 0x1f, 0xb1, 0xe0, 0x6c, 0x9a, 0x25, 0x12, 0xe5, 0x2a, 0xb9, 0x28, - 0xaa, 0x1e, 0x9a, 0xd5, 0x08, 0xde, 0x15, 0xe5, 0x58, 0x61, 0x0c, 0x9c, 0xde, 0xe1, 0x78, 0xf1, - 0xdd, 0x6e, 0xc3, 0x78, 0x2d, 0x20, 0xda, 0x1d, 0xf0, 0x06, 0xf7, 0x83, 0xe3, 0xfd, 0x79, 0xee, - 0xd8, 0x3e, 0x70, 0xf6, 0xcf, 0xe6, 0xe0, 0x2c, 0x7f, 0x29, 0x5a, 0xd8, 0xf3, 0xdd, 0x66, 0xcd, - 0x6f, 0x8a, 0xd4, 0x1c, 0x6f, 0xc3, 0x58, 0x5b, 0xd3, 0x1f, 0xf4, 0x8a, 0x30, 0xa5, 0xeb, 0x19, - 0x62, 0x39, 0x4e, 0x2f, 0xc5, 0x06, 0x2d, 0xd4, 0x84, 0x31, 0xb2, 0xe7, 0x36, 0xd4, 0x73, 0x43, - 0xee, 0xd8, 0x77, 0x83, 0x6a, 0x65, 0x59, 0xa3, 0x83, 0x0d, 0xaa, 0x8f, 0x20, 0x5b, 0x9b, 0xfd, - 0xa3, 0x16, 0x3c, 0x96, 0x11, 0x8f, 0x8a, 0x36, 0x77, 0x9f, 0xbd, 0xc9, 0x89, 0xc4, 0x4f, 0xaa, - 0x39, 0xfe, 0x52, 0x87, 0x05, 0x14, 0x7d, 0x0e, 0x80, 0xbf, 0xb4, 0x51, 0xb1, 0xa6, 0x5f, 0xe0, - 0x1e, 0x23, 0xe6, 0x88, 0x16, 0x2b, 0x42, 0xd6, 0xc7, 0x1a, 0x2d, 0xfb, 0xa7, 0xf2, 0x30, 0xc4, - 0x5e, 0x76, 0xd0, 0x0a, 0x8c, 0x6c, 0xf3, 0x08, 0xcd, 0x83, 0x04, 0x83, 0x8e, 0xe5, 0x43, 0x5e, - 0x80, 0x65, 0x65, 0xb4, 0x0a, 0x67, 0x78, 0x84, 0xeb, 0x56, 0x85, 0xb4, 0x9c, 0x7d, 0xa9, 0x66, - 0xe0, 0xc9, 0x92, 0x54, 0xdc, 0x8b, 0x6a, 0x37, 0x0a, 0x4e, 0xab, 0x87, 0xde, 0x80, 0x09, 0xca, - 0x97, 0xf9, 0x9d, 0x48, 0x52, 0xe2, 0xb1, 0xad, 0x15, 0x23, 0xb8, 0x6e, 0x40, 0x71, 0x02, 0x9b, - 0x0a, 0x4c, 0xed, 0x2e, 0x85, 0xca, 0x50, 0x2c, 0x30, 0x99, 0x4a, 0x14, 0x13, 0x97, 0x99, 0x20, - 0x75, 0x98, 0xc1, 0xd5, 0xfa, 0x76, 0x40, 0xc2, 0x6d, 0xbf, 0xd5, 0x14, 0xb9, 0xb6, 0x63, 0x13, - 0xa4, 0x04, 0x1c, 0x77, 0xd5, 0xa0, 0x54, 0x36, 0x1d, 0xb7, 0xd5, 0x09, 0x48, 0x4c, 0x65, 0xd8, - 0xa4, 0xb2, 0x92, 0x80, 0xe3, 0xae, 0x1a, 0x74, 0x1d, 0x9d, 0x13, 0xc9, 0xaf, 0xa5, 0x37, 0xbe, - 0xb2, 0x2b, 0x1b, 0x91, 0x7e, 0x49, 0x3d, 0xc2, 0xd1, 0x08, 0xcb, 0x1b, 0x95, 0x3e, 0x5b, 0xd3, - 0x03, 0x0a, 0x8f, 0x24, 0x49, 0xe5, 0x61, 0x52, 0x30, 0xff, 0x9e, 0x05, 0x67, 0x52, 0xec, 0x57, - 0xf9, 0x51, 0xb5, 0xe5, 0x86, 0x91, 0x4a, 0x08, 0xa3, 0x1d, 0x55, 0xbc, 0x1c, 0x2b, 0x0c, 0xba, - 0x1f, 0xf8, 0x61, 0x98, 0x3c, 0x00, 0x85, 0x7d, 0x98, 0x80, 0x1e, 0xef, 0x00, 0x44, 0x97, 0xa1, - 0xd0, 0x09, 0x89, 0x0c, 0x24, 0xa5, 0xce, 0x6f, 0xa6, 0x19, 0x66, 0x10, 0xca, 0x9a, 0x6e, 0x29, - 0xa5, 0xac, 0xc6, 0x9a, 0x72, 0x4d, 0x2b, 0x87, 0xd9, 0x5f, 0xcd, 0xc3, 0x85, 0x4c, 0x4b, 0x75, - 0xda, 0xa5, 0x5d, 0xdf, 0x73, 0x23, 0x5f, 0xbd, 0x1a, 0xf2, 0x50, 0x26, 0xa4, 0xbd, 0xbd, 0x2a, - 0xca, 0xb1, 0xc2, 0x40, 0x57, 0x64, 0x1a, 0xf6, 0x64, 0xca, 0x9b, 0xc5, 0x8a, 0x91, 0x89, 0x7d, - 0xd0, 0x74, 0x62, 0x4f, 0x41, 0xa1, 0xed, 0xfb, 0xad, 0xe4, 0x61, 0x44, 0xbb, 0xeb, 0xfb, 0x2d, - 0xcc, 0x80, 0xe8, 0x13, 0x62, 0x1c, 0x12, 0xcf, 0x64, 0xd8, 0x69, 0xfa, 0xa1, 0x36, 0x18, 0xcf, - 0xc0, 0xc8, 0x0e, 0xd9, 0x0f, 0x5c, 0x6f, 0x2b, 0xf9, 0x7c, 0x7a, 0x93, 0x17, 0x63, 0x09, 0x37, - 0x33, 0x3e, 0x8c, 0x9c, 0x74, 0x1e, 0xb0, 0x62, 0xdf, 0xab, 0xed, 0x07, 0xf2, 0x30, 0x89, 0x17, - 0x2b, 0xdf, 0x9a, 0x88, 0x3b, 0xdd, 0x13, 0x71, 0xd2, 0x79, 0xc0, 0xfa, 0xcf, 0xc6, 0x2f, 0x5a, - 0x30, 0xc9, 0xa2, 0x22, 0x8b, 0x00, 0x1a, 0xae, 0xef, 0x9d, 0x02, 0xeb, 0xf6, 0x14, 0x0c, 0x05, - 0xb4, 0xd1, 0x64, 0x72, 0x1f, 0xd6, 0x13, 0xcc, 0x61, 0xe8, 0x09, 0x28, 0xb0, 0x2e, 0xd0, 0xc9, - 0x1b, 0xe3, 0x79, 0x11, 0x2a, 0x4e, 0xe4, 0x60, 0x56, 0xca, 0xbc, 0xc2, 0x31, 0x69, 0xb7, 0x5c, - 0xde, 0xe9, 0xf8, 0x49, 0xe2, 0xa3, 0xe1, 0x15, 0x9e, 0xda, 0xb5, 0x0f, 0xe6, 0x15, 0x9e, 0x4e, - 0xb2, 0xb7, 0x58, 0xf4, 0xdf, 0x73, 0x70, 0x29, 0xb5, 0xde, 0xc0, 0x5e, 0xe1, 0xbd, 0x6b, 0x9f, - 0x8c, 0x15, 0x4c, 0xba, 0x71, 0x4a, 0xfe, 0x14, 0x8d, 0x53, 0x0a, 0x83, 0x72, 0x8e, 0x43, 0x03, - 0x38, 0x6b, 0xa7, 0x0e, 0xd9, 0x47, 0xc4, 0x59, 0x3b, 0xb5, 0x6f, 0x19, 0x62, 0xdd, 0x9f, 0xe6, - 0x32, 0xbe, 0x85, 0x09, 0x78, 0x57, 0xe9, 0x39, 0xc3, 0x80, 0xa1, 0xe0, 0x84, 0xc7, 0xf8, 0x19, - 0xc3, 0xcb, 0xb0, 0x82, 0x22, 0x57, 0x73, 0x7b, 0xce, 0x65, 0xa7, 0x7e, 0xcc, 0x6c, 0x6a, 0xce, - 0x7c, 0x41, 0x52, 0x43, 0x90, 0xe2, 0x02, 0xbd, 0xaa, 0x09, 0xe5, 0xf9, 0xc1, 0x85, 0xf2, 0xb1, - 0x74, 0x81, 0x1c, 0x2d, 0xc0, 0xe4, 0xae, 0xeb, 0xb1, 0x54, 0xfe, 0x26, 0x2b, 0xaa, 0xa2, 0x80, - 0xac, 0x9a, 0x60, 0x9c, 0xc4, 0x9f, 0x7d, 0x0d, 0xc6, 0x1f, 0x5e, 0x1d, 0xf9, 0x8d, 0x3c, 0x3c, - 0xde, 0x63, 0xdb, 0xf3, 0xb3, 0xde, 0x98, 0x03, 0xed, 0xac, 0xef, 0x9a, 0x87, 0x1a, 0x9c, 0xdd, - 0xec, 0xb4, 0x5a, 0xfb, 0xcc, 0xfe, 0x93, 0x34, 0x25, 0x86, 0xe0, 0x15, 0x9f, 0x90, 0x99, 0x28, - 0x56, 0x52, 0x70, 0x70, 0x6a, 0x4d, 0xf4, 0x26, 0x20, 0x5f, 0xe4, 0x9d, 0xbd, 0x4e, 0x3c, 0xa1, - 0x97, 0x67, 0x03, 0x9f, 0x8f, 0x37, 0xe3, 0xed, 0x2e, 0x0c, 0x9c, 0x52, 0x8b, 0x32, 0xfd, 0xf4, - 0x56, 0xda, 0x57, 0xdd, 0x4a, 0x30, 0xfd, 0x58, 0x07, 0x62, 0x13, 0x17, 0x5d, 0x87, 0x69, 0x67, - 0xcf, 0x71, 0x79, 0x74, 0x3c, 0x49, 0x80, 0x73, 0xfd, 0x4a, 0x09, 0xb6, 0x90, 0x44, 0xc0, 0xdd, - 0x75, 0x12, 0x8e, 0xd1, 0xc3, 0xd9, 0x8e, 0xd1, 0xbd, 0xcf, 0xc5, 0x7e, 0x3a, 0x5d, 0xfb, 0x3f, - 0x58, 0xf4, 0xfa, 0x4a, 0xc9, 0x1d, 0x4f, 0xc7, 0x41, 0xe9, 0x26, 0x35, 0x1f, 0xe5, 0x73, 0x9a, - 0x85, 0x47, 0x0c, 0xc4, 0x26, 0x2e, 0x5f, 0x10, 0x61, 0xec, 0x24, 0x63, 0xb0, 0xee, 0x22, 0xc6, - 0x81, 0xc2, 0x40, 0x9f, 0x87, 0x91, 0xa6, 0xbb, 0xe7, 0x86, 0x7e, 0x20, 0x36, 0xcb, 0x31, 0x5d, - 0x0d, 0xe2, 0x73, 0xb0, 0xc2, 0xc9, 0x60, 0x49, 0xcf, 0xfe, 0x81, 0x1c, 0x8c, 0xcb, 0x16, 0xdf, - 0xea, 0xf8, 0x91, 0x73, 0x0a, 0xd7, 0xf2, 0x75, 0xe3, 0x5a, 0xfe, 0x44, 0xaf, 0x40, 0x0f, 0xac, - 0x4b, 0x99, 0xd7, 0xf1, 0xed, 0xc4, 0x75, 0xfc, 0x74, 0x7f, 0x52, 0xbd, 0xaf, 0xe1, 0x7f, 0x6a, - 0xc1, 0xb4, 0x81, 0x7f, 0x0a, 0xb7, 0xc1, 0x8a, 0x79, 0x1b, 0x3c, 0xd9, 0xf7, 0x1b, 0x32, 0x6e, - 0x81, 0xef, 0xcb, 0x27, 0xfa, 0xce, 0x4e, 0xff, 0xf7, 0xa0, 0xb0, 0xed, 0x04, 0xcd, 0x5e, 0x01, - 0x65, 0xbb, 0x2a, 0xcd, 0xdd, 0x70, 0x82, 0x26, 0x3f, 0xc3, 0x9f, 0x53, 0xd9, 0x2a, 0x9d, 0xa0, - 0xd9, 0xd7, 0x27, 0x8c, 0x35, 0x85, 0x5e, 0x85, 0xe1, 0xb0, 0xe1, 0xb7, 0x95, 0xc5, 0xe6, 0x65, - 0x9e, 0xc9, 0x92, 0x96, 0x1c, 0x1d, 0x94, 0x91, 0xd9, 0x1c, 0x2d, 0xc6, 0x02, 0x1f, 0xbd, 0x0d, - 0xe3, 0xec, 0x97, 0xb2, 0x5c, 0xc8, 0x67, 0xa7, 0x31, 0xa8, 0xeb, 0x88, 0xdc, 0x00, 0xc6, 0x28, - 0xc2, 0x26, 0xa9, 0xd9, 0x2d, 0x28, 0xa9, 0xcf, 0x7a, 0xa4, 0xbe, 0x3c, 0xff, 0x36, 0x0f, 0x67, - 0x52, 0xd6, 0x1c, 0x0a, 0x8d, 0x99, 0x78, 0x61, 0xc0, 0xa5, 0xfa, 0x01, 0xe7, 0x22, 0x64, 0xd2, - 0x50, 0x53, 0xac, 0xad, 0x81, 0x1b, 0xbd, 0x13, 0x92, 0x64, 0xa3, 0xb4, 0xa8, 0x7f, 0xa3, 0xb4, - 0xb1, 0x53, 0x1b, 0x6a, 0xda, 0x90, 0xea, 0xe9, 0x23, 0x9d, 0xd3, 0x3f, 0xca, 0xc3, 0xd9, 0xb4, - 0xd8, 0x33, 0xe8, 0xbb, 0x13, 0x29, 0x6d, 0x5e, 0x1a, 0x34, 0x6a, 0x0d, 0xcf, 0x73, 0x23, 0x12, - 0x34, 0xcf, 0x99, 0x49, 0x6e, 0xfa, 0x0e, 0xb3, 0x68, 0x93, 0x39, 0x80, 0x06, 0x3c, 0x15, 0x91, - 0x3c, 0x3e, 0x3e, 0x3d, 0x70, 0x07, 0x44, 0x0e, 0xa3, 0x30, 0xe1, 0x00, 0x2a, 0x8b, 0xfb, 0x3b, - 0x80, 0xca, 0x96, 0x67, 0x5d, 0x18, 0xd5, 0xbe, 0xe6, 0x91, 0xce, 0xf8, 0x0e, 0xbd, 0xad, 0xb4, - 0x7e, 0x3f, 0xd2, 0x59, 0xff, 0x51, 0x0b, 0x12, 0xe6, 0x91, 0x4a, 0xdd, 0x65, 0x65, 0xaa, 0xbb, - 0x2e, 0x43, 0x21, 0xf0, 0x5b, 0x24, 0x99, 0x41, 0x06, 0xfb, 0x2d, 0x82, 0x19, 0x84, 0x62, 0x44, - 0xb1, 0xb2, 0x63, 0x4c, 0x17, 0xe4, 0x84, 0x88, 0xf6, 0x14, 0x0c, 0xb5, 0xc8, 0x1e, 0x69, 0x25, - 0xc3, 0xb3, 0xdf, 0xa2, 0x85, 0x98, 0xc3, 0xec, 0x5f, 0x2c, 0xc0, 0xc5, 0x9e, 0x2e, 0xd4, 0x54, - 0x1c, 0xda, 0x72, 0x22, 0x72, 0xdf, 0xd9, 0x4f, 0xc6, 0x51, 0xbe, 0xce, 0x8b, 0xb1, 0x84, 0x33, - 0x8b, 0x71, 0x1e, 0x37, 0x31, 0xa1, 0x1c, 0x14, 0xe1, 0x12, 0x05, 0xf4, 0x11, 0x24, 0xa7, 0xbf, - 0x06, 0x10, 0x86, 0xad, 0x65, 0x8f, 0x72, 0x77, 0x4d, 0x61, 0x8a, 0x1e, 0xc7, 0xd7, 0xac, 0xdf, - 0x12, 0x10, 0xac, 0x61, 0xa1, 0x0a, 0x4c, 0xb5, 0x03, 0x3f, 0xe2, 0xba, 0xd6, 0x0a, 0x37, 0x14, - 0x1a, 0x32, 0xbd, 0x57, 0x6b, 0x09, 0x38, 0xee, 0xaa, 0x81, 0x5e, 0x86, 0x51, 0xe1, 0xd1, 0x5a, - 0xf3, 0xfd, 0x96, 0x50, 0x03, 0x29, 0xb3, 0x93, 0x7a, 0x0c, 0xc2, 0x3a, 0x9e, 0x56, 0x8d, 0x29, - 0x70, 0x47, 0x52, 0xab, 0x71, 0x25, 0xae, 0x86, 0x97, 0x88, 0x43, 0x55, 0x1c, 0x28, 0x0e, 0x55, - 0xac, 0x18, 0x2b, 0x0d, 0xfc, 0x66, 0x05, 0x7d, 0x55, 0x49, 0x3f, 0x57, 0x80, 0x33, 0x62, 0xe1, - 0x3c, 0xea, 0xe5, 0xf2, 0x88, 0x52, 0xe8, 0x7f, 0x6b, 0xcd, 0x9c, 0xf6, 0x9a, 0xf9, 0x41, 0x0b, - 0x4c, 0xf6, 0x0a, 0xfd, 0xb9, 0xcc, 0x40, 0xf4, 0x2f, 0x67, 0xb2, 0x6b, 0x4d, 0x79, 0x81, 0x7c, - 0xc0, 0x90, 0xf4, 0xf6, 0xbf, 0xb7, 0xe0, 0xc9, 0xbe, 0x14, 0xd1, 0x32, 0x94, 0x18, 0x0f, 0xa8, - 0x49, 0x67, 0x4f, 0x2b, 0x43, 0x42, 0x09, 0xc8, 0x60, 0x49, 0xe3, 0x9a, 0x68, 0xb9, 0x2b, 0xe2, - 0xff, 0x33, 0x29, 0x11, 0xff, 0xcf, 0x19, 0xc3, 0xf3, 0x90, 0x21, 0xff, 0x7f, 0x25, 0x0f, 0xc3, - 0x7c, 0xc5, 0x9f, 0x82, 0x18, 0xb6, 0x22, 0xf4, 0xb6, 0x3d, 0x22, 0x51, 0xf1, 0xbe, 0xcc, 0x55, - 0x9c, 0xc8, 0xe1, 0x6c, 0x82, 0xba, 0xad, 0x62, 0x0d, 0x2f, 0x9a, 0x33, 0xee, 0xb3, 0xd9, 0x84, - 0x62, 0x12, 0x38, 0x0d, 0xed, 0x76, 0xfb, 0x22, 0x40, 0xc8, 0xb2, 0xe5, 0x53, 0x1a, 0x22, 0xa6, - 0xd9, 0x27, 0x7b, 0xb4, 0x5e, 0x57, 0xc8, 0xbc, 0x0f, 0xf1, 0x4e, 0x57, 0x00, 0xac, 0x51, 0x9c, - 0x7d, 0x05, 0x4a, 0x0a, 0xb9, 0x9f, 0x16, 0x67, 0x4c, 0x67, 0x2e, 0x3e, 0x0b, 0x93, 0x89, 0xb6, - 0x8e, 0xa5, 0x04, 0xfa, 0x25, 0x0b, 0x26, 0x79, 0x97, 0x97, 0xbd, 0x3d, 0x71, 0xa6, 0xbe, 0x0f, - 0x67, 0x5b, 0x29, 0x67, 0x9b, 0x98, 0xd1, 0xc1, 0xcf, 0x42, 0xa5, 0xf4, 0x49, 0x83, 0xe2, 0xd4, - 0x36, 0xd0, 0x55, 0xba, 0x6e, 0xe9, 0xd9, 0xe5, 0xb4, 0x84, 0xf7, 0xd1, 0x18, 0x5f, 0xb3, 0xbc, - 0x0c, 0x2b, 0xa8, 0xfd, 0x3b, 0x16, 0x4c, 0xf3, 0x9e, 0xdf, 0x24, 0xfb, 0x6a, 0x87, 0x7f, 0x98, - 0x7d, 0x17, 0x49, 0x38, 0x72, 0x19, 0x49, 0x38, 0xf4, 0x4f, 0xcb, 0xf7, 0xfc, 0xb4, 0x9f, 0xb5, - 0x40, 0xac, 0xc0, 0x53, 0x10, 0xe5, 0xbf, 0xdd, 0x14, 0xe5, 0x67, 0xb3, 0x17, 0x75, 0x86, 0x0c, - 0xff, 0x27, 0x16, 0x4c, 0x71, 0x84, 0xf8, 0x2d, 0xf9, 0x43, 0x9d, 0x87, 0x41, 0xb2, 0xe9, 0xa9, - 0x14, 0xdb, 0xe9, 0x1f, 0x65, 0x4c, 0x56, 0xa1, 0xe7, 0x64, 0x35, 0xe5, 0x06, 0x3a, 0x46, 0x26, - 0xc9, 0x63, 0x07, 0xb3, 0xb6, 0xff, 0xd0, 0x02, 0xc4, 0x9b, 0x31, 0xd8, 0x1f, 0xca, 0x54, 0xb0, - 0x52, 0xed, 0xba, 0x88, 0x8f, 0x1a, 0x05, 0xc1, 0x1a, 0xd6, 0x89, 0x0c, 0x4f, 0xc2, 0x20, 0x20, - 0xdf, 0xdf, 0x20, 0xe0, 0x18, 0x23, 0xfa, 0x7f, 0x0a, 0x90, 0x74, 0x07, 0x40, 0x77, 0x61, 0xac, - 0xe1, 0xb4, 0x9d, 0x0d, 0xb7, 0xe5, 0x46, 0x2e, 0x09, 0x7b, 0x59, 0x12, 0x2d, 0x69, 0x78, 0xe2, - 0xa9, 0x57, 0x2b, 0xc1, 0x06, 0x1d, 0x34, 0x07, 0xd0, 0x0e, 0xdc, 0x3d, 0xb7, 0x45, 0xb6, 0x98, - 0xc6, 0x81, 0xf9, 0x3b, 0x72, 0xf3, 0x18, 0x59, 0x8a, 0x35, 0x8c, 0x14, 0xd7, 0xb5, 0xfc, 0xa3, - 0x73, 0x5d, 0x2b, 0x1c, 0xd3, 0x75, 0x6d, 0x68, 0x20, 0xd7, 0x35, 0x0c, 0xe7, 0x25, 0x8b, 0x44, - 0xff, 0xaf, 0xb8, 0x2d, 0x22, 0xf8, 0x62, 0xee, 0x05, 0x39, 0x7b, 0x78, 0x50, 0x3e, 0x8f, 0x53, - 0x31, 0x70, 0x46, 0x4d, 0xf4, 0x39, 0x98, 0x71, 0x5a, 0x2d, 0xff, 0xbe, 0x1a, 0xb5, 0xe5, 0xb0, - 0xe1, 0xb4, 0xb8, 0xc6, 0x7e, 0x84, 0x51, 0x7d, 0xe2, 0xf0, 0xa0, 0x3c, 0xb3, 0x90, 0x81, 0x83, - 0x33, 0x6b, 0x27, 0x3c, 0xdf, 0x8a, 0x7d, 0x3d, 0xdf, 0x5e, 0x87, 0x52, 0x3b, 0xf0, 0x1b, 0xab, - 0x9a, 0x37, 0xce, 0x25, 0x96, 0xa7, 0x5e, 0x16, 0x1e, 0x1d, 0x94, 0xc7, 0xd5, 0x1f, 0x76, 0xc3, - 0xc7, 0x15, 0xec, 0x1d, 0x38, 0x53, 0x27, 0x81, 0xcb, 0x32, 0x60, 0x36, 0xe3, 0x0d, 0xbd, 0x0e, - 0xa5, 0x20, 0x71, 0x84, 0x0d, 0x14, 0x58, 0x49, 0x8b, 0xf2, 0x2b, 0x8f, 0xac, 0x98, 0x90, 0xfd, - 0xc7, 0x16, 0x8c, 0x08, 0x87, 0x86, 0x53, 0xe0, 0x9c, 0x16, 0x0c, 0x05, 0x76, 0x39, 0xfd, 0x98, - 0x67, 0x9d, 0xc9, 0x54, 0x5d, 0x57, 0x13, 0xaa, 0xeb, 0x27, 0x7b, 0x11, 0xe9, 0xad, 0xb4, 0xfe, - 0x9b, 0x79, 0x98, 0x30, 0x9d, 0x39, 0x4e, 0x61, 0x08, 0xd6, 0x60, 0x24, 0x14, 0x9e, 0x43, 0xb9, - 0x6c, 0xcb, 0xe9, 0xe4, 0x24, 0xc6, 0x66, 0x51, 0xc2, 0x57, 0x48, 0x12, 0x49, 0x75, 0x49, 0xca, - 0x3f, 0x42, 0x97, 0xa4, 0x7e, 0xfe, 0x34, 0x85, 0x93, 0xf0, 0xa7, 0xb1, 0xbf, 0xc6, 0xae, 0x1a, - 0xbd, 0xfc, 0x14, 0xb8, 0x90, 0xeb, 0xe6, 0xa5, 0x64, 0xf7, 0x58, 0x59, 0xa2, 0x53, 0x19, 0xdc, - 0xc8, 0x2f, 0x58, 0x70, 0x31, 0xe5, 0xab, 0x34, 0xd6, 0xe4, 0x39, 0x28, 0x3a, 0x9d, 0xa6, 0xab, - 0xf6, 0xb2, 0xf6, 0x8c, 0xb5, 0x20, 0xca, 0xb1, 0xc2, 0x40, 0x4b, 0x30, 0x4d, 0x1e, 0xb4, 0x5d, - 0xfe, 0x8e, 0xa8, 0xdb, 0x2e, 0xe6, 0x79, 0x88, 0xd9, 0xe5, 0x24, 0x10, 0x77, 0xe3, 0x2b, 0x77, - 0xec, 0x7c, 0xa6, 0x3b, 0xf6, 0x3f, 0xb0, 0x60, 0x54, 0x74, 0xfb, 0x14, 0x46, 0xfb, 0x3b, 0xcc, - 0xd1, 0x7e, 0xbc, 0xc7, 0x68, 0x67, 0x0c, 0xf3, 0xdf, 0xce, 0xa9, 0xfe, 0xd6, 0xfc, 0x20, 0x1a, - 0x80, 0xe5, 0x79, 0x15, 0x8a, 0xed, 0xc0, 0x8f, 0xfc, 0x86, 0xdf, 0x12, 0x1c, 0xcf, 0x13, 0x71, - 0xb4, 0x00, 0x5e, 0x7e, 0xa4, 0xfd, 0xc6, 0x0a, 0x9b, 0x8d, 0x9e, 0x1f, 0x44, 0x82, 0xcb, 0x88, - 0x47, 0xcf, 0x0f, 0x22, 0xcc, 0x20, 0xa8, 0x09, 0x10, 0x39, 0xc1, 0x16, 0x89, 0x68, 0x99, 0x08, - 0x3c, 0x92, 0x7d, 0x78, 0x74, 0x22, 0xb7, 0x35, 0xe7, 0x7a, 0x51, 0x18, 0x05, 0x73, 0x55, 0x2f, - 0xba, 0x1d, 0x70, 0x01, 0x4a, 0x73, 0xff, 0x57, 0xb4, 0xb0, 0x46, 0x57, 0xfa, 0x68, 0xb2, 0x36, - 0x86, 0xcc, 0x07, 0xf1, 0x35, 0x51, 0x8e, 0x15, 0x86, 0xfd, 0x0a, 0xbb, 0x4a, 0xd8, 0x00, 0x1d, - 0xcf, 0x33, 0xff, 0xeb, 0x45, 0x35, 0xb4, 0xec, 0x35, 0xac, 0xa2, 0xfb, 0xff, 0xf7, 0x3e, 0xb9, - 0x69, 0xc3, 0xba, 0x1f, 0x4d, 0x1c, 0x24, 0x00, 0x7d, 0x67, 0x97, 0x9d, 0xc4, 0xf3, 0x7d, 0xae, - 0x80, 0x63, 0x58, 0x46, 0xb0, 0xb0, 0xd7, 0x2c, 0x3c, 0x70, 0xb5, 0x26, 0x16, 0xb9, 0x16, 0xf6, - 0x5a, 0x00, 0x70, 0x8c, 0x83, 0xe6, 0x85, 0xf8, 0x5d, 0x30, 0x92, 0xdf, 0x49, 0xf1, 0x5b, 0x7e, - 0xbe, 0x26, 0x7f, 0xbf, 0x00, 0xa3, 0x2a, 0x09, 0x5e, 0x8d, 0xe7, 0x12, 0x13, 0x61, 0x58, 0x96, - 0xe3, 0x62, 0xac, 0xe3, 0xa0, 0x75, 0x98, 0x0c, 0xb9, 0xee, 0x45, 0x45, 0xdb, 0xe3, 0x3a, 0xac, - 0x4f, 0x4a, 0xfb, 0x8a, 0xba, 0x09, 0x3e, 0x62, 0x45, 0xfc, 0xe8, 0x90, 0x8e, 0x96, 0x49, 0x12, - 0xe8, 0x0d, 0x98, 0x68, 0xe9, 0xe9, 0xe6, 0x6b, 0x42, 0xc5, 0xa5, 0xcc, 0x8f, 0x8d, 0x64, 0xf4, - 0x35, 0x9c, 0xc0, 0xa6, 0x9c, 0x92, 0x5e, 0x22, 0x22, 0x44, 0x3a, 0xde, 0x16, 0x09, 0x45, 0x0a, - 0x2f, 0xc6, 0x29, 0xdd, 0xca, 0xc0, 0xc1, 0x99, 0xb5, 0xd1, 0xab, 0x30, 0x26, 0x3f, 0x5f, 0x73, - 0x23, 0x8e, 0x8d, 0xdc, 0x35, 0x18, 0x36, 0x30, 0xd1, 0x7d, 0x38, 0x27, 0xff, 0xaf, 0x07, 0xce, - 0xe6, 0xa6, 0xdb, 0x10, 0x5e, 0xdc, 0xdc, 0xd3, 0x67, 0x41, 0xba, 0x0e, 0x2d, 0xa7, 0x21, 0x1d, - 0x1d, 0x94, 0x2f, 0x8b, 0x51, 0x4b, 0x85, 0xb3, 0x49, 0x4c, 0xa7, 0x8f, 0x56, 0xe1, 0xcc, 0x36, - 0x71, 0x5a, 0xd1, 0xf6, 0xd2, 0x36, 0x69, 0xec, 0xc8, 0x4d, 0xc4, 0x9c, 0x93, 0x35, 0xd3, 0xf0, - 0x1b, 0xdd, 0x28, 0x38, 0xad, 0x1e, 0x7a, 0x07, 0x66, 0xda, 0x9d, 0x8d, 0x96, 0x1b, 0x6e, 0xaf, - 0xf9, 0x11, 0x33, 0xe9, 0x50, 0x39, 0xe4, 0x84, 0x17, 0xb3, 0x72, 0xcc, 0xae, 0x65, 0xe0, 0xe1, - 0x4c, 0x0a, 0xe8, 0x7d, 0x38, 0x97, 0x58, 0x0c, 0xc2, 0xa7, 0x72, 0x22, 0x3b, 0xde, 0x6e, 0x3d, - 0xad, 0x82, 0xf0, 0x91, 0x4c, 0x03, 0xe1, 0xf4, 0x26, 0x3e, 0x98, 0xa1, 0xcf, 0x7b, 0xb4, 0xb2, - 0xc6, 0x94, 0xa1, 0x2f, 0xc1, 0x98, 0xbe, 0x8a, 0xc4, 0x05, 0x73, 0x25, 0x9d, 0x67, 0xd1, 0x56, - 0x1b, 0x67, 0xe9, 0xd4, 0x8a, 0xd2, 0x61, 0xd8, 0xa0, 0x68, 0x13, 0x48, 0xff, 0x3e, 0x74, 0x0b, - 0x8a, 0x8d, 0x96, 0x4b, 0xbc, 0xa8, 0x5a, 0xeb, 0x15, 0xf4, 0x63, 0x49, 0xe0, 0x88, 0x01, 0x13, - 0x01, 0x4a, 0x79, 0x19, 0x56, 0x14, 0xec, 0x5f, 0xcf, 0x41, 0xb9, 0x4f, 0xb4, 0xdb, 0x84, 0x3e, - 0xda, 0x1a, 0x48, 0x1f, 0xbd, 0x20, 0x33, 0xe2, 0xad, 0x25, 0x84, 0xf4, 0x44, 0xb6, 0xbb, 0x58, - 0x54, 0x4f, 0xe2, 0x0f, 0x6c, 0x1f, 0xac, 0xab, 0xb4, 0x0b, 0x7d, 0x2d, 0xd7, 0x8d, 0xa7, 0xac, - 0xa1, 0xc1, 0x05, 0x91, 0xcc, 0x67, 0x09, 0xfb, 0x6b, 0x39, 0x38, 0xa7, 0x86, 0xf0, 0x9b, 0x77, - 0xe0, 0xee, 0x74, 0x0f, 0xdc, 0x09, 0x3c, 0xea, 0xd8, 0xb7, 0x61, 0x98, 0x07, 0x4d, 0x19, 0x80, - 0x01, 0x7a, 0xca, 0x8c, 0xb0, 0xa5, 0xae, 0x69, 0x23, 0xca, 0xd6, 0x5f, 0xb2, 0x60, 0x72, 0x7d, - 0xa9, 0x56, 0xf7, 0x1b, 0x3b, 0x24, 0x5a, 0xe0, 0x0c, 0x2b, 0x16, 0xfc, 0x8f, 0xf5, 0x90, 0x7c, - 0x4d, 0x1a, 0xc7, 0x74, 0x19, 0x0a, 0xdb, 0x7e, 0x18, 0x25, 0x5f, 0x7c, 0x6f, 0xf8, 0x61, 0x84, - 0x19, 0xc4, 0xfe, 0x5d, 0x0b, 0x86, 0x58, 0x1e, 0xd7, 0x7e, 0xc9, 0x85, 0x07, 0xf9, 0x2e, 0xf4, - 0x32, 0x0c, 0x93, 0xcd, 0x4d, 0xd2, 0x88, 0xc4, 0xac, 0x4a, 0x77, 0xd4, 0xe1, 0x65, 0x56, 0x4a, - 0x2f, 0x7d, 0xd6, 0x18, 0xff, 0x8b, 0x05, 0x32, 0xba, 0x07, 0xa5, 0xc8, 0xdd, 0x25, 0x0b, 0xcd, - 0xa6, 0x78, 0x33, 0x7b, 0x08, 0xef, 0xdf, 0x75, 0x49, 0x00, 0xc7, 0xb4, 0xec, 0xaf, 0xe6, 0x00, - 0x62, 0xd7, 0xff, 0x7e, 0x9f, 0xb8, 0xd8, 0xf5, 0x9a, 0x72, 0x25, 0xe5, 0x35, 0x05, 0xc5, 0x04, - 0x53, 0x9e, 0x52, 0xd4, 0x30, 0xe5, 0x07, 0x1a, 0xa6, 0xc2, 0x71, 0x86, 0x69, 0x09, 0xa6, 0xe3, - 0xd0, 0x05, 0x66, 0x1c, 0x17, 0x26, 0xa4, 0xac, 0x27, 0x81, 0xb8, 0x1b, 0xdf, 0x26, 0x70, 0x59, - 0x46, 0xd4, 0x94, 0x77, 0x0d, 0x33, 0xc9, 0x3c, 0x46, 0x9e, 0xe9, 0xf8, 0xb9, 0x28, 0x97, 0xf9, - 0x5c, 0xf4, 0x13, 0x16, 0x9c, 0x4d, 0xb6, 0xc3, 0x7c, 0xdf, 0xbe, 0x62, 0xc1, 0x39, 0xf6, 0x68, - 0xc6, 0x5a, 0xed, 0x7e, 0xa2, 0x7b, 0x29, 0x3d, 0xa4, 0x43, 0xef, 0x1e, 0xc7, 0x7e, 0xcf, 0xab, - 0x69, 0xa4, 0x71, 0x7a, 0x8b, 0xf6, 0x57, 0x2c, 0xb8, 0x90, 0x99, 0x3e, 0x08, 0x5d, 0x85, 0xa2, - 0xd3, 0x76, 0xb9, 0x46, 0x4a, 0xec, 0x77, 0x26, 0x3d, 0xd6, 0xaa, 0x5c, 0x1f, 0xa5, 0xa0, 0x2a, - 0xad, 0x61, 0x2e, 0x33, 0xad, 0x61, 0xdf, 0x2c, 0x85, 0xf6, 0xf7, 0x5b, 0x20, 0xdc, 0x9d, 0x06, - 0x38, 0x64, 0xde, 0x96, 0x59, 0x61, 0x8d, 0x60, 0xe6, 0x97, 0xb3, 0xfd, 0xbf, 0x44, 0x08, 0x73, - 0x75, 0xa9, 0x1b, 0x81, 0xcb, 0x0d, 0x5a, 0x76, 0x13, 0x04, 0xb4, 0x42, 0x98, 0xce, 0xaa, 0x7f, - 0x6f, 0xae, 0x01, 0x34, 0x19, 0xae, 0x96, 0x1b, 0x52, 0x5d, 0x21, 0x15, 0x05, 0xc1, 0x1a, 0x96, - 0xfd, 0x43, 0x39, 0x18, 0x95, 0xc1, 0xb3, 0x3b, 0xde, 0x20, 0x92, 0xe5, 0xb1, 0x72, 0xe8, 0xb0, - 0x64, 0xaa, 0x94, 0x70, 0x2d, 0x16, 0xc8, 0xe3, 0x64, 0xaa, 0x12, 0x80, 0x63, 0x1c, 0xf4, 0x0c, - 0x8c, 0x84, 0x9d, 0x0d, 0x86, 0x9e, 0x70, 0xe2, 0xa9, 0xf3, 0x62, 0x2c, 0xe1, 0xe8, 0x73, 0x30, - 0xc5, 0xeb, 0x05, 0x7e, 0xdb, 0xd9, 0xe2, 0xea, 0xcf, 0x21, 0xe5, 0x55, 0x3b, 0xb5, 0x9a, 0x80, - 0x1d, 0x1d, 0x94, 0xcf, 0x26, 0xcb, 0x98, 0xe2, 0xbc, 0x8b, 0x8a, 0xfd, 0x25, 0x40, 0xdd, 0xf1, - 0xc0, 0xd1, 0x9b, 0xdc, 0x94, 0xca, 0x0d, 0x48, 0xb3, 0x97, 0x46, 0x5c, 0x77, 0x02, 0x95, 0x86, - 0xf4, 0xbc, 0x16, 0x56, 0xf5, 0xed, 0xbf, 0x9a, 0x87, 0xa9, 0xa4, 0x4b, 0x20, 0xba, 0x01, 0xc3, - 0xfc, 0xb2, 0x13, 0xe4, 0x7b, 0x3c, 0xb8, 0x6a, 0x8e, 0x84, 0x6c, 0xdb, 0x8b, 0xfb, 0x52, 0xd4, - 0x47, 0xef, 0xc0, 0x68, 0xd3, 0xbf, 0xef, 0xdd, 0x77, 0x82, 0xe6, 0x42, 0xad, 0x2a, 0xd6, 0x65, - 0x2a, 0xcf, 0x5c, 0x89, 0xd1, 0x74, 0xe7, 0x44, 0xf6, 0xb8, 0x10, 0x83, 0xb0, 0x4e, 0x0e, 0xad, - 0xb3, 0x18, 0x87, 0x9b, 0xee, 0xd6, 0xaa, 0xd3, 0xee, 0x65, 0x57, 0xbb, 0x24, 0x91, 0x34, 0xca, - 0xe3, 0x22, 0x10, 0x22, 0x07, 0xe0, 0x98, 0x10, 0xfa, 0x6e, 0x38, 0x13, 0x66, 0xa8, 0xd9, 0xb2, - 0xd2, 0x43, 0xf4, 0xd2, 0x3c, 0x2d, 0x3e, 0x46, 0xa5, 0x99, 0x34, 0x85, 0x5c, 0x5a, 0x33, 0xf6, - 0x97, 0xcf, 0x80, 0xb1, 0x1b, 0x8d, 0x1c, 0x41, 0xd6, 0x09, 0xe5, 0x08, 0xc2, 0x50, 0x24, 0xbb, - 0xed, 0x68, 0xbf, 0xe2, 0x06, 0xbd, 0x72, 0xd8, 0x2d, 0x0b, 0x9c, 0x6e, 0x9a, 0x12, 0x82, 0x15, - 0x9d, 0xf4, 0x44, 0x4e, 0xf9, 0x0f, 0x31, 0x91, 0x53, 0xe1, 0x14, 0x13, 0x39, 0xad, 0xc1, 0xc8, - 0x96, 0x1b, 0x61, 0xd2, 0xf6, 0x05, 0x9b, 0x99, 0xba, 0x0e, 0xaf, 0x73, 0x94, 0xee, 0xe4, 0x21, - 0x02, 0x80, 0x25, 0x11, 0xf4, 0xa6, 0xda, 0x81, 0xc3, 0xd9, 0x52, 0x5a, 0xf7, 0xcb, 0x60, 0xea, - 0x1e, 0x14, 0x89, 0x9b, 0x46, 0x1e, 0x36, 0x71, 0xd3, 0x8a, 0x4c, 0xb7, 0x54, 0xcc, 0x36, 0x82, - 0x67, 0xd9, 0x94, 0xfa, 0x24, 0x59, 0x32, 0x12, 0x53, 0x95, 0x4e, 0x2e, 0x31, 0xd5, 0xf7, 0x5b, - 0x70, 0xae, 0x9d, 0x96, 0xa3, 0x4d, 0x24, 0x49, 0x7a, 0x79, 0xe0, 0x24, 0x74, 0x46, 0x83, 0x4c, - 0x5c, 0x4f, 0x45, 0xc3, 0xe9, 0xcd, 0xd1, 0x81, 0x0e, 0x36, 0x9a, 0x22, 0xb3, 0xd2, 0x53, 0x19, - 0x19, 0xae, 0x7a, 0xe4, 0xb5, 0x5a, 0x4f, 0xc9, 0xa6, 0xf4, 0xf1, 0xac, 0x6c, 0x4a, 0x03, 0xe7, - 0x50, 0x7a, 0x53, 0xe5, 0xb6, 0x1a, 0xcf, 0x5e, 0x4a, 0x3c, 0x73, 0x55, 0xdf, 0x8c, 0x56, 0x6f, - 0xaa, 0x8c, 0x56, 0x3d, 0x62, 0xbd, 0xf1, 0x7c, 0x55, 0x7d, 0xf3, 0x58, 0x69, 0xb9, 0xa8, 0x26, - 0x4f, 0x26, 0x17, 0x95, 0x71, 0xd5, 0xf0, 0x74, 0x48, 0xcf, 0xf6, 0xb9, 0x6a, 0x0c, 0xba, 0xbd, - 0x2f, 0x1b, 0x9e, 0x77, 0x6b, 0xfa, 0xa1, 0xf2, 0x6e, 0xdd, 0xd5, 0xf3, 0x58, 0xa1, 0x3e, 0x89, - 0x9a, 0x28, 0xd2, 0x80, 0xd9, 0xab, 0xee, 0xea, 0x17, 0xe0, 0x99, 0x6c, 0xba, 0xea, 0x9e, 0xeb, - 0xa6, 0x9b, 0x7a, 0x05, 0x76, 0x65, 0xc5, 0x3a, 0x7b, 0x3a, 0x59, 0xb1, 0xce, 0x9d, 0x78, 0x56, - 0xac, 0xf3, 0xa7, 0x90, 0x15, 0xeb, 0xb1, 0x0f, 0x35, 0x2b, 0xd6, 0xcc, 0x23, 0xc8, 0x8a, 0xb5, - 0x16, 0x67, 0xc5, 0xba, 0x90, 0x3d, 0x25, 0x29, 0x96, 0xb9, 0x19, 0xb9, 0xb0, 0xee, 0xb2, 0xe7, - 0x79, 0x1e, 0xb3, 0x42, 0x04, 0xa3, 0x4b, 0xcf, 0xfb, 0x9b, 0x16, 0xd8, 0x82, 0x4f, 0x89, 0x02, - 0xe1, 0x98, 0x14, 0xa5, 0x1b, 0xe7, 0xc6, 0x7a, 0xbc, 0x87, 0x42, 0x36, 0x4d, 0xd5, 0x95, 0x9d, - 0x11, 0xcb, 0xfe, 0xcb, 0x39, 0xb8, 0xd4, 0x7b, 0x5d, 0xc7, 0x7a, 0xb2, 0x5a, 0xfc, 0xae, 0x93, - 0xd0, 0x93, 0x71, 0x21, 0x27, 0xc6, 0x1a, 0x38, 0xb0, 0xcf, 0x75, 0x98, 0x56, 0x26, 0xb9, 0x2d, - 0xb7, 0xb1, 0xaf, 0xe5, 0x03, 0x56, 0xae, 0x87, 0xf5, 0x24, 0x02, 0xee, 0xae, 0x83, 0x16, 0x60, - 0xd2, 0x28, 0xac, 0x56, 0x84, 0x30, 0xa3, 0x14, 0x73, 0x75, 0x13, 0x8c, 0x93, 0xf8, 0xf6, 0xcf, - 0x58, 0xf0, 0x58, 0x46, 0xc2, 0x88, 0x81, 0xe3, 0xd6, 0x6c, 0xc2, 0x64, 0xdb, 0xac, 0xda, 0x27, - 0xbc, 0x95, 0x91, 0x96, 0x42, 0xf5, 0x35, 0x01, 0xc0, 0x49, 0xa2, 0x8b, 0x57, 0x7f, 0xeb, 0xf7, - 0x2f, 0x7d, 0xec, 0xb7, 0x7f, 0xff, 0xd2, 0xc7, 0x7e, 0xe7, 0xf7, 0x2f, 0x7d, 0xec, 0xcf, 0x1f, - 0x5e, 0xb2, 0x7e, 0xeb, 0xf0, 0x92, 0xf5, 0xdb, 0x87, 0x97, 0xac, 0xdf, 0x39, 0xbc, 0x64, 0xfd, - 0xde, 0xe1, 0x25, 0xeb, 0xab, 0x7f, 0x70, 0xe9, 0x63, 0x6f, 0xe7, 0xf6, 0x5e, 0xf8, 0xff, 0x01, - 0x00, 0x00, 0xff, 0xff, 0x20, 0x56, 0xf9, 0x8e, 0x0d, 0xe7, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/core/v1/generated.proto b/vendor/k8s.io/api/core/v1/generated.proto deleted file mode 100644 index f76251d..0000000 --- a/vendor/k8s.io/api/core/v1/generated.proto +++ /dev/null @@ -1,4757 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.core.v1; - -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -message AWSElasticBlockStoreVolumeSource { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - optional string volumeID = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // +optional - optional int32 partition = 3; - - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // +optional - optional bool readOnly = 4; -} - -// Affinity is a group of affinity scheduling rules. -message Affinity { - // Describes node affinity scheduling rules for the pod. - // +optional - optional NodeAffinity nodeAffinity = 1; - - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - optional PodAffinity podAffinity = 2; - - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - optional PodAntiAffinity podAntiAffinity = 3; -} - -// AttachedVolume describes a volume attached to a node -message AttachedVolume { - // Name of the attached volume - optional string name = 1; - - // DevicePath represents the device path where the volume should be available - optional string devicePath = 2; -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -message AvoidPods { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - repeated PreferAvoidPodsEntry preferAvoidPods = 1; -} - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -message AzureDiskVolumeSource { - // The Name of the data disk in the blob storage - optional string diskName = 1; - - // The URI the data disk in the blob storage - optional string diskURI = 2; - - // Host Caching mode: None, Read Only, Read Write. - // +optional - optional string cachingMode = 3; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - optional string fsType = 4; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 5; - - // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - optional string kind = 6; -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -message AzureFilePersistentVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key - optional string secretName = 1; - - // Share Name - optional string shareName = 2; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 3; - - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - optional string secretNamespace = 4; -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -message AzureFileVolumeSource { - // the name of secret that contains Azure Storage Account Name and Key - optional string secretName = 1; - - // Share Name - optional string shareName = 2; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 3; -} - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -message Binding { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The target object that you want to bind to the standard object. - optional ObjectReference target = 2; -} - -// Represents storage that is managed by an external CSI volume driver (Beta feature) -message CSIPersistentVolumeSource { - // Driver is the name of the driver to use for this volume. - // Required. - optional string driver = 1; - - // VolumeHandle is the unique volume name returned by the CSI volume - // plugin’s CreateVolume to refer to the volume on all subsequent calls. - // Required. - optional string volumeHandle = 2; - - // Optional: The value to pass to ControllerPublishVolumeRequest. - // Defaults to false (read/write). - // +optional - optional bool readOnly = 3; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // +optional - optional string fsType = 4; - - // Attributes of the volume to publish. - // +optional - map volumeAttributes = 5; - - // ControllerPublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - optional SecretReference controllerPublishSecretRef = 6; - - // NodeStageSecretRef is a reference to the secret object containing sensitive - // information to pass to the CSI driver to complete the CSI NodeStageVolume - // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - optional SecretReference nodeStageSecretRef = 7; - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - optional SecretReference nodePublishSecretRef = 8; -} - -// Adds and removes POSIX capabilities from running containers. -message Capabilities { - // Added capabilities - // +optional - repeated string add = 1; - - // Removed capabilities - // +optional - repeated string drop = 2; -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -message CephFSPersistentVolumeSource { - // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - repeated string monitors = 1; - - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - optional string path = 2; - - // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional string user = 3; - - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional string secretFile = 4; - - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional SecretReference secretRef = 5; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional bool readOnly = 6; -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -message CephFSVolumeSource { - // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - repeated string monitors = 1; - - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - optional string path = 2; - - // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional string user = 3; - - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional string secretFile = 4; - - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional LocalObjectReference secretRef = 5; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - optional bool readOnly = 6; -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -message CinderPersistentVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - optional string volumeID = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - optional string fsType = 2; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - optional bool readOnly = 3; - - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - optional SecretReference secretRef = 4; -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -message CinderVolumeSource { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - optional string volumeID = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - optional string fsType = 2; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - optional bool readOnly = 3; - - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - optional LocalObjectReference secretRef = 4; -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -message ClientIPConfig { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - optional int32 timeoutSeconds = 1; -} - -// Information about the condition of a component. -message ComponentCondition { - // Type of condition for a component. - // Valid value: "Healthy" - optional string type = 1; - - // Status of the condition for a component. - // Valid values for "Healthy": "True", "False", or "Unknown". - optional string status = 2; - - // Message about the condition for a component. - // For example, information about a health check. - // +optional - optional string message = 3; - - // Condition error code for a component. - // For example, a health check error code. - // +optional - optional string error = 4; -} - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -message ComponentStatus { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // List of component conditions observed - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated ComponentCondition conditions = 2; -} - -// Status of all the conditions for the component as a list of ComponentStatus objects. -message ComponentStatusList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ComponentStatus objects. - repeated ComponentStatus items = 2; -} - -// ConfigMap holds configuration data for pods to consume. -message ConfigMap { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // Values with non-UTF-8 byte sequences must use the BinaryData field. - // The keys stored in Data must not overlap with the keys in - // the BinaryData field, this is enforced during validation process. - // +optional - map data = 2; - - // BinaryData contains the binary data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // BinaryData can contain byte sequences that are not in the UTF-8 range. - // The keys stored in BinaryData must not overlap with the ones in - // the Data field, this is enforced during validation process. - // Using this field will require 1.10+ apiserver and - // kubelet. - // +optional - map binaryData = 3; -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -message ConfigMapEnvSource { - // The ConfigMap to select from. - optional LocalObjectReference localObjectReference = 1; - - // Specify whether the ConfigMap must be defined - // +optional - optional bool optional = 2; -} - -// Selects a key from a ConfigMap. -message ConfigMapKeySelector { - // The ConfigMap to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key to select. - optional string key = 2; - - // Specify whether the ConfigMap or it's key must be defined - // +optional - optional bool optional = 3; -} - -// ConfigMapList is a resource containing a list of ConfigMap objects. -message ConfigMapList { - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of ConfigMaps. - repeated ConfigMap items = 2; -} - -// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. -message ConfigMapNodeConfigSource { - // Namespace is the metadata.namespace of the referenced ConfigMap. - // This field is required in all cases. - optional string namespace = 1; - - // Name is the metadata.name of the referenced ConfigMap. - // This field is required in all cases. - optional string name = 2; - - // UID is the metadata.UID of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - optional string uid = 3; - - // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - optional string resourceVersion = 4; - - // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure - // This field is required in all cases. - optional string kubeletConfigKey = 5; -} - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -message ConfigMapProjection { - optional LocalObjectReference localObjectReference = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - repeated KeyToPath items = 2; - - // Specify whether the ConfigMap or it's keys must be defined - // +optional - optional bool optional = 4; -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -message ConfigMapVolumeSource { - optional LocalObjectReference localObjectReference = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - repeated KeyToPath items = 2; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - optional int32 defaultMode = 3; - - // Specify whether the ConfigMap or it's keys must be defined - // +optional - optional bool optional = 4; -} - -// A single application container that you want to run within a pod. -message Container { - // Name of the container specified as a DNS_LABEL. - // Each container in a pod must have a unique name (DNS_LABEL). - // Cannot be updated. - optional string name = 1; - - // Docker image name. - // More info: https://kubernetes.io/docs/concepts/containers/images - // This field is optional to allow higher level config management to default or override - // container images in workload controllers like Deployments and StatefulSets. - // +optional - optional string image = 2; - - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - repeated string command = 3; - - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - repeated string args = 4; - - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - // +optional - optional string workingDir = 5; - - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Cannot be updated. - // +optional - // +patchMergeKey=containerPort - // +patchStrategy=merge - repeated ContainerPort ports = 6; - - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - repeated EnvFromSource envFrom = 19; - - // List of environment variables to set in the container. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated EnvVar env = 7; - - // Compute Resources required by this container. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - optional ResourceRequirements resources = 8; - - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - // +optional - // +patchMergeKey=mountPath - // +patchStrategy=merge - repeated VolumeMount volumeMounts = 9; - - // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. - // +patchMergeKey=devicePath - // +patchStrategy=merge - // +optional - repeated VolumeDevice volumeDevices = 21; - - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - optional Probe livenessProbe = 10; - - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - optional Probe readinessProbe = 11; - - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - // +optional - optional Lifecycle lifecycle = 12; - - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Will be truncated by the node if greater than 4096 bytes. The total message length across - // all containers will be limited to 12kb. - // Defaults to /dev/termination-log. - // Cannot be updated. - // +optional - optional string terminationMessagePath = 13; - - // Indicate how the termination message should be populated. File will use the contents of - // terminationMessagePath to populate the container status message on both success and failure. - // FallbackToLogsOnError will use the last chunk of container log output if the termination - // message file is empty and the container exited with an error. - // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - // Defaults to File. - // Cannot be updated. - // +optional - optional string terminationMessagePolicy = 20; - - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - // +optional - optional string imagePullPolicy = 14; - - // Security options the pod should run with. - // More info: https://kubernetes.io/docs/concepts/policy/security-context/ - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional SecurityContext securityContext = 15; - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - // +optional - optional bool stdin = 16; - - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - // +optional - optional bool stdinOnce = 17; - - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - // +optional - optional bool tty = 18; -} - -// Describe a container image -message ContainerImage { - // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - repeated string names = 1; - - // The size of the image in bytes. - // +optional - optional int64 sizeBytes = 2; -} - -// ContainerPort represents a network port in a single container. -message ContainerPort { - // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - // named port in a pod must have a unique name. Name for the port that can be - // referred to by services. - // +optional - optional string name = 1; - - // Number of port to expose on the host. - // If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // Most containers do not need this. - // +optional - optional int32 hostPort = 2; - - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - optional int32 containerPort = 3; - - // Protocol for port. Must be UDP, TCP, or SCTP. - // Defaults to "TCP". - // +optional - optional string protocol = 4; - - // What host IP to bind the external port to. - // +optional - optional string hostIP = 5; -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -message ContainerState { - // Details about a waiting container - // +optional - optional ContainerStateWaiting waiting = 1; - - // Details about a running container - // +optional - optional ContainerStateRunning running = 2; - - // Details about a terminated container - // +optional - optional ContainerStateTerminated terminated = 3; -} - -// ContainerStateRunning is a running state of a container. -message ContainerStateRunning { - // Time at which the container was last (re-)started - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 1; -} - -// ContainerStateTerminated is a terminated state of a container. -message ContainerStateTerminated { - // Exit status from the last termination of the container - optional int32 exitCode = 1; - - // Signal from the last termination of the container - // +optional - optional int32 signal = 2; - - // (brief) reason from the last termination of the container - // +optional - optional string reason = 3; - - // Message regarding the last termination of the container - // +optional - optional string message = 4; - - // Time at which previous execution of the container started - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startedAt = 5; - - // Time at which the container last terminated - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time finishedAt = 6; - - // Container's ID in the format 'docker://' - // +optional - optional string containerID = 7; -} - -// ContainerStateWaiting is a waiting state of a container. -message ContainerStateWaiting { - // (brief) reason the container is not yet running. - // +optional - optional string reason = 1; - - // Message regarding why the container is not yet running. - // +optional - optional string message = 2; -} - -// ContainerStatus contains details for the current status of this container. -message ContainerStatus { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. - // Cannot be updated. - optional string name = 1; - - // Details about the container's current condition. - // +optional - optional ContainerState state = 2; - - // Details about the container's last termination condition. - // +optional - optional ContainerState lastState = 3; - - // Specifies whether the container has passed its readiness probe. - optional bool ready = 4; - - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - optional int32 restartCount = 5; - - // The image the container is running. - // More info: https://kubernetes.io/docs/concepts/containers/images - // TODO(dchen1107): Which image the container is running with? - optional string image = 6; - - // ImageID of the container's image. - optional string imageID = 7; - - // Container's ID in the format 'docker://'. - // +optional - optional string containerID = 8; -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -message DaemonEndpoint { - // Port number of the given endpoint. - optional int32 Port = 1; -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -message DownwardAPIProjection { - // Items is a list of DownwardAPIVolume file - // +optional - repeated DownwardAPIVolumeFile items = 1; -} - -// DownwardAPIVolumeFile represents information to create the file containing the pod field -message DownwardAPIVolumeFile { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - optional string path = 1; - - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - // +optional - optional ObjectFieldSelector fieldRef = 2; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - optional ResourceFieldSelector resourceFieldRef = 3; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - optional int32 mode = 4; -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -message DownwardAPIVolumeSource { - // Items is a list of downward API volume file - // +optional - repeated DownwardAPIVolumeFile items = 1; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - optional int32 defaultMode = 2; -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -message EmptyDirVolumeSource { - // What type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // Must be an empty string (default) or Memory. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - // +optional - optional string medium = 1; - - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity sizeLimit = 2; -} - -// EndpointAddress is a tuple that describes single IP address. -message EndpointAddress { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. - optional string ip = 1; - - // The Hostname of this endpoint - // +optional - optional string hostname = 3; - - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - optional string nodeName = 4; - - // Reference to object providing the endpoint. - // +optional - optional ObjectReference targetRef = 2; -} - -// EndpointPort is a tuple that describes a single port. -message EndpointPort { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - // +optional - optional string name = 1; - - // The port number of the endpoint. - optional int32 port = 2; - - // The IP protocol for this port. - // Must be UDP, TCP, or SCTP. - // Default is TCP. - // +optional - optional string protocol = 3; -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -message EndpointSubset { - // IP addresses which offer the related ports that are marked as ready. These endpoints - // should be considered safe for load balancers and clients to utilize. - // +optional - repeated EndpointAddress addresses = 1; - - // IP addresses which offer the related ports but are not currently marked as ready - // because they have not yet finished starting, have recently failed a readiness check, - // or have recently failed a liveness check. - // +optional - repeated EndpointAddress notReadyAddresses = 2; - - // Port numbers available on the related IP addresses. - // +optional - repeated EndpointPort ports = 3; -} - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -message Endpoints { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The set of all endpoints is the union of all subsets. Addresses are placed into - // subsets according to the IPs they share. A single address with multiple ports, - // some of which are ready and some of which are not (because they come from - // different containers) will result in the address being displayed in different - // subsets for the different ports. No address will appear in both Addresses and - // NotReadyAddresses in the same subset. - // Sets of addresses and ports that comprise a service. - // +optional - repeated EndpointSubset subsets = 2; -} - -// EndpointsList is a list of endpoints. -message EndpointsList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of endpoints. - repeated Endpoints items = 2; -} - -// EnvFromSource represents the source of a set of ConfigMaps -message EnvFromSource { - // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - // +optional - optional string prefix = 1; - - // The ConfigMap to select from - // +optional - optional ConfigMapEnvSource configMapRef = 2; - - // The Secret to select from - // +optional - optional SecretEnvSource secretRef = 3; -} - -// EnvVar represents an environment variable present in a Container. -message EnvVar { - // Name of the environment variable. Must be a C_IDENTIFIER. - optional string name = 1; - - // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // Defaults to "". - // +optional - optional string value = 2; - - // Source for the environment variable's value. Cannot be used if value is not empty. - // +optional - optional EnvVarSource valueFrom = 3; -} - -// EnvVarSource represents a source for the value of an EnvVar. -message EnvVarSource { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. - // +optional - optional ObjectFieldSelector fieldRef = 1; - - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - optional ResourceFieldSelector resourceFieldRef = 2; - - // Selects a key of a ConfigMap. - // +optional - optional ConfigMapKeySelector configMapKeyRef = 3; - - // Selects a key of a secret in the pod's namespace - // +optional - optional SecretKeySelector secretKeyRef = 4; -} - -// Event is a report of an event somewhere in the cluster. -message Event { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The object that this event is about. - optional ObjectReference involvedObject = 2; - - // This should be a short, machine understandable string that gives the reason - // for the transition into the object's current status. - // TODO: provide exact specification for format. - // +optional - optional string reason = 3; - - // A human-readable description of the status of this operation. - // TODO: decide on maximum length. - // +optional - optional string message = 4; - - // The component reporting this event. Should be a short machine understandable string. - // +optional - optional EventSource source = 5; - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time firstTimestamp = 6; - - // The time at which the most recent occurrence of this event was recorded. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTimestamp = 7; - - // The number of times this event has occurred. - // +optional - optional int32 count = 8; - - // Type of this event (Normal, Warning), new types could be added in the future - // +optional - optional string type = 9; - - // Time when this Event was first observed. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 10; - - // Data about the Event series this event represents or nil if it's a singleton Event. - // +optional - optional EventSeries series = 11; - - // What action was taken/failed regarding to the Regarding object. - // +optional - optional string action = 12; - - // Optional secondary object for more complex actions. - // +optional - optional ObjectReference related = 13; - - // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. - // +optional - optional string reportingComponent = 14; - - // ID of the controller instance, e.g. `kubelet-xyzf`. - // +optional - optional string reportingInstance = 15; -} - -// EventList is a list of events. -message EventList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of events - repeated Event items = 2; -} - -// EventSeries contain information on series of events, i.e. thing that was/is happening -// continuously for some time. -message EventSeries { - // Number of occurrences in this series up to the last heartbeat time - optional int32 count = 1; - - // Time of the last occurrence observed - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; - - // State of this Series: Ongoing or Finished - optional string state = 3; -} - -// EventSource contains information for an event. -message EventSource { - // Component from which the event is generated. - // +optional - optional string component = 1; - - // Node name on which the event is generated. - // +optional - optional string host = 2; -} - -// ExecAction describes a "run in container" action. -message ExecAction { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - // +optional - repeated string command = 1; -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -message FCVolumeSource { - // Optional: FC target worldwide names (WWNs) - // +optional - repeated string targetWWNs = 1; - - // Optional: FC target lun number - // +optional - optional int32 lun = 2; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 4; - - // Optional: FC volume world wide identifiers (wwids) - // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - // +optional - repeated string wwids = 5; -} - -// FlexPersistentVolumeSource represents a generic persistent volume resource that is -// provisioned/attached using an exec based plugin. -message FlexPersistentVolumeSource { - // Driver is the name of the driver to use for this volume. - optional string driver = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - optional string fsType = 2; - - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - optional SecretReference secretRef = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 4; - - // Optional: Extra command options if any. - // +optional - map options = 5; -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. -message FlexVolumeSource { - // Driver is the name of the driver to use for this volume. - optional string driver = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - optional string fsType = 2; - - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - optional LocalObjectReference secretRef = 3; - - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 4; - - // Optional: Extra command options if any. - // +optional - map options = 5; -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -message FlockerVolumeSource { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - optional string datasetName = 1; - - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - optional string datasetUUID = 2; -} - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -message GCEPersistentDiskVolumeSource { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - optional string pdName = 1; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 2; - - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - optional int32 partition = 3; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - optional bool readOnly = 4; -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -// -// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an -// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir -// into the Pod's container. -message GitRepoVolumeSource { - // Repository URL - optional string repository = 1; - - // Commit hash for the specified revision. - // +optional - optional string revision = 2; - - // Target directory name. - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - optional string directory = 3; -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -message GlusterfsVolumeSource { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - optional string endpoints = 1; - - // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - optional string path = 2; - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - // +optional - optional bool readOnly = 3; -} - -// HTTPGetAction describes an action based on HTTP Get requests. -message HTTPGetAction { - // Path to access on the HTTP server. - // +optional - optional string path = 1; - - // Name or number of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; - - // Host name to connect to, defaults to the pod IP. You probably want to set - // "Host" in httpHeaders instead. - // +optional - optional string host = 3; - - // Scheme to use for connecting to the host. - // Defaults to HTTP. - // +optional - optional string scheme = 4; - - // Custom headers to set in the request. HTTP allows repeated headers. - // +optional - repeated HTTPHeader httpHeaders = 5; -} - -// HTTPHeader describes a custom header to be used in HTTP probes -message HTTPHeader { - // The header field name - optional string name = 1; - - // The header field value - optional string value = 2; -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -message Handler { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - optional ExecAction exec = 1; - - // HTTPGet specifies the http request to perform. - // +optional - optional HTTPGetAction httpGet = 2; - - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - // +optional - optional TCPSocketAction tcpSocket = 3; -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -message HostAlias { - // IP address of the host file entry. - optional string ip = 1; - - // Hostnames for the above IP address. - repeated string hostnames = 2; -} - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -message HostPathVolumeSource { - // Path of the directory on the host. - // If the path is a symlink, it will follow the link to the real path. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - optional string path = 1; - - // Type for HostPath Volume - // Defaults to "" - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - // +optional - optional string type = 2; -} - -// ISCSIPersistentVolumeSource represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIPersistentVolumeSource { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - optional string targetPortal = 1; - - // Target iSCSI Qualified Name. - optional string iqn = 2; - - // iSCSI Target Lun number. - optional int32 lun = 3; - - // iSCSI Interface Name that uses an iSCSI transport. - // Defaults to 'default' (tcp). - // +optional - optional string iscsiInterface = 4; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 5; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // +optional - optional bool readOnly = 6; - - // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - // +optional - repeated string portals = 7; - - // whether support iSCSI Discovery CHAP authentication - // +optional - optional bool chapAuthDiscovery = 8; - - // whether support iSCSI Session CHAP authentication - // +optional - optional bool chapAuthSession = 11; - - // CHAP Secret for iSCSI target and initiator authentication - // +optional - optional SecretReference secretRef = 10; - - // Custom iSCSI Initiator Name. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - optional string initiatorName = 12; -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -message ISCSIVolumeSource { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - optional string targetPortal = 1; - - // Target iSCSI Qualified Name. - optional string iqn = 2; - - // iSCSI Target Lun number. - optional int32 lun = 3; - - // iSCSI Interface Name that uses an iSCSI transport. - // Defaults to 'default' (tcp). - // +optional - optional string iscsiInterface = 4; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 5; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // +optional - optional bool readOnly = 6; - - // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - // +optional - repeated string portals = 7; - - // whether support iSCSI Discovery CHAP authentication - // +optional - optional bool chapAuthDiscovery = 8; - - // whether support iSCSI Session CHAP authentication - // +optional - optional bool chapAuthSession = 11; - - // CHAP Secret for iSCSI target and initiator authentication - // +optional - optional LocalObjectReference secretRef = 10; - - // Custom iSCSI Initiator Name. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - optional string initiatorName = 12; -} - -// Maps a string key to a path within a volume. -message KeyToPath { - // The key to project. - optional string key = 1; - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - optional string path = 2; - - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - optional int32 mode = 3; -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -message Lifecycle { - // PostStart is called immediately after a container is created. If the handler fails, - // the container is terminated and restarted according to its restart policy. - // Other management of the container blocks until the hook completes. - // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - // +optional - optional Handler postStart = 1; - - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. - // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - // +optional - optional Handler preStop = 2; -} - -// LimitRange sets resource usage limits for each kind of resource in a Namespace. -message LimitRange { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional LimitRangeSpec spec = 2; -} - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. -message LimitRangeItem { - // Type of resource that this limit applies to. - // +optional - optional string type = 1; - - // Max usage constraints on this kind by resource name. - // +optional - map max = 2; - - // Min usage constraints on this kind by resource name. - // +optional - map min = 3; - - // Default resource requirement limit value by resource name if resource limit is omitted. - // +optional - map default = 4; - - // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - // +optional - map defaultRequest = 5; - - // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - // +optional - map maxLimitRequestRatio = 6; -} - -// LimitRangeList is a list of LimitRange items. -message LimitRangeList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of LimitRange objects. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - repeated LimitRange items = 2; -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind. -message LimitRangeSpec { - // Limits is the list of LimitRangeItem objects that are enforced. - repeated LimitRangeItem limits = 1; -} - -// List holds a list of objects, which may not be known by the server. -message List { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of objects - repeated k8s.io.apimachinery.pkg.runtime.RawExtension items = 2; -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -message LoadBalancerIngress { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - optional string ip = 1; - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - optional string hostname = 2; -} - -// LoadBalancerStatus represents the status of a load-balancer. -message LoadBalancerStatus { - // Ingress is a list containing ingress points for the load-balancer. - // Traffic intended for the service should be sent to these ingress points. - // +optional - repeated LoadBalancerIngress ingress = 1; -} - -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -message LocalObjectReference { - // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - // TODO: Add other useful fields. apiVersion, kind, uid? - // +optional - optional string name = 1; -} - -// Local represents directly-attached storage with node affinity (Beta feature) -message LocalVolumeSource { - // The full path to the volume on the node. - // It can be either a directory or block device (disk, partition, ...). - optional string path = 1; - - // Filesystem type to mount. - // It applies only when the Path is a block device. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. - // +optional - optional string fsType = 2; -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -message NFSVolumeSource { - // Server is the hostname or IP address of the NFS server. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - optional string server = 1; - - // Path that is exported by the NFS server. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - optional string path = 2; - - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. - // Defaults to false. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - // +optional - optional bool readOnly = 3; -} - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional. -message Namespace { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional NamespaceSpec spec = 2; - - // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional NamespaceStatus status = 3; -} - -// NamespaceList is a list of Namespaces. -message NamespaceList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Namespace objects in the list. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - repeated Namespace items = 2; -} - -// NamespaceSpec describes the attributes on a Namespace. -message NamespaceSpec { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ - // +optional - repeated string finalizers = 1; -} - -// NamespaceStatus is information about the current status of a Namespace. -message NamespaceStatus { - // Phase is the current lifecycle phase of the namespace. - // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ - // +optional - optional string phase = 1; -} - -// Node is a worker node in Kubernetes. -// Each node will have a unique identifier in the cache (i.e. in etcd). -message Node { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional NodeSpec spec = 2; - - // Most recently observed status of the node. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional NodeStatus status = 3; -} - -// NodeAddress contains information for the node's address. -message NodeAddress { - // Node address type, one of Hostname, ExternalIP or InternalIP. - optional string type = 1; - - // The node address. - optional string address = 2; -} - -// Node affinity is a group of node affinity scheduling rules. -message NodeAffinity { - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - optional NodeSelector requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - repeated PreferredSchedulingTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// NodeCondition contains condition information for a node. -message NodeCondition { - // Type of node condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time we got an update on a given condition. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastHeartbeatTime = 3; - - // Last time the condition transit from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - - // (brief) reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. -message NodeConfigSource { - // ConfigMap is a reference to a Node's ConfigMap - optional ConfigMapNodeConfigSource configMap = 2; -} - -// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. -message NodeConfigStatus { - // Assigned reports the checkpointed config the node will try to use. - // When Node.Spec.ConfigSource is updated, the node checkpoints the associated - // config payload to local disk, along with a record indicating intended - // config. The node refers to this record to choose its config checkpoint, and - // reports this record in Assigned. Assigned only updates in the status after - // the record has been checkpointed to disk. When the Kubelet is restarted, - // it tries to make the Assigned config the Active config by loading and - // validating the checkpointed payload identified by Assigned. - // +optional - optional NodeConfigSource assigned = 1; - - // Active reports the checkpointed config the node is actively using. - // Active will represent either the current version of the Assigned config, - // or the current LastKnownGood config, depending on whether attempting to use the - // Assigned config results in an error. - // +optional - optional NodeConfigSource active = 2; - - // LastKnownGood reports the checkpointed config the node will fall back to - // when it encounters an error attempting to use the Assigned config. - // The Assigned config becomes the LastKnownGood config when the node determines - // that the Assigned config is stable and correct. - // This is currently implemented as a 10-minute soak period starting when the local - // record of Assigned config is updated. If the Assigned config is Active at the end - // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is - // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, - // because the local default config is always assumed good. - // You should not make assumptions about the node's method of determining config stability - // and correctness, as this may change or become configurable in the future. - // +optional - optional NodeConfigSource lastKnownGood = 3; - - // Error describes any problems reconciling the Spec.ConfigSource to the Active config. - // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned - // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting - // to load or validate the Assigned config, etc. - // Errors may occur at different points while syncing config. Earlier errors (e.g. download or - // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across - // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in - // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error - // by fixing the config assigned in Spec.ConfigSource. - // You can find additional information for debugging by searching the error message in the Kubelet log. - // Error is a human-readable description of the error state; machines can check whether or not Error - // is empty, but should not rely on the stability of the Error text across Kubelet versions. - // +optional - optional string error = 4; -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -message NodeDaemonEndpoints { - // Endpoint on which Kubelet is listening. - // +optional - optional DaemonEndpoint kubeletEndpoint = 1; -} - -// NodeList is the whole list of all Nodes which have been registered with master. -message NodeList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of nodes - repeated Node items = 2; -} - -// NodeProxyOptions is the query options to a Node's proxy call. -message NodeProxyOptions { - // Path is the URL path to use for the current proxy request to node. - // +optional - optional string path = 1; -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -message NodeResources { - // Capacity represents the available resources of a node - map capacity = 1; -} - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -message NodeSelector { - // Required. A list of node selector terms. The terms are ORed. - repeated NodeSelectorTerm nodeSelectorTerms = 1; -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -message NodeSelectorRequirement { - // The label key that the selector applies to. - optional string key = 1; - - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - optional string operator = 2; - - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - repeated string values = 3; -} - -// A null or empty node selector term matches no objects. The requirements of -// them are ANDed. -// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. -message NodeSelectorTerm { - // A list of node selector requirements by node's labels. - // +optional - repeated NodeSelectorRequirement matchExpressions = 1; - - // A list of node selector requirements by node's fields. - // +optional - repeated NodeSelectorRequirement matchFields = 2; -} - -// NodeSpec describes the attributes that a node is created with. -message NodeSpec { - // PodCIDR represents the pod IP range assigned to the node. - // +optional - optional string podCIDR = 1; - - // ID of the node assigned by the cloud provider in the format: :// - // +optional - optional string providerID = 3; - - // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration - // +optional - optional bool unschedulable = 4; - - // If specified, the node's taints. - // +optional - repeated Taint taints = 5; - - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field - // +optional - optional NodeConfigSource configSource = 6; - - // Deprecated. Not all kubelets will set this field. Remove field after 1.13. - // see: https://issues.k8s.io/61966 - // +optional - optional string externalID = 2; -} - -// NodeStatus is information about the current status of a node. -message NodeStatus { - // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity - // +optional - map capacity = 1; - - // Allocatable represents the resources of a node that are available for scheduling. - // Defaults to Capacity. - // +optional - map allocatable = 2; - - // NodePhase is the recently observed lifecycle phase of the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase - // The field is never populated, and now is deprecated. - // +optional - optional string phase = 3; - - // Conditions is an array of current observed node conditions. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated NodeCondition conditions = 4; - - // List of addresses reachable to the node. - // Queried from cloud provider, if available. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated NodeAddress addresses = 5; - - // Endpoints of daemons running on the Node. - // +optional - optional NodeDaemonEndpoints daemonEndpoints = 6; - - // Set of ids/uuids to uniquely identify the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#info - // +optional - optional NodeSystemInfo nodeInfo = 7; - - // List of container images on this node - // +optional - repeated ContainerImage images = 8; - - // List of attachable volumes in use (mounted) by the node. - // +optional - repeated string volumesInUse = 9; - - // List of volumes that are attached to the node. - // +optional - repeated AttachedVolume volumesAttached = 10; - - // Status of the config assigned to the node via the dynamic Kubelet config feature. - // +optional - optional NodeConfigStatus config = 11; -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -message NodeSystemInfo { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - optional string machineID = 1; - - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - optional string systemUUID = 2; - - // Boot ID reported by the node. - optional string bootID = 3; - - // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - optional string kernelVersion = 4; - - // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - optional string osImage = 5; - - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - optional string containerRuntimeVersion = 6; - - // Kubelet Version reported by the node. - optional string kubeletVersion = 7; - - // KubeProxy Version reported by the node. - optional string kubeProxyVersion = 8; - - // The Operating System reported by the node - optional string operatingSystem = 9; - - // The Architecture reported by the node - optional string architecture = 10; -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -message ObjectFieldSelector { - // Version of the schema the FieldPath is written in terms of, defaults to "v1". - // +optional - optional string apiVersion = 1; - - // Path of the field to select in the specified API version. - optional string fieldPath = 2; -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message ObjectReference { - // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional string kind = 1; - - // Namespace of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - // +optional - optional string namespace = 2; - - // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - // +optional - optional string name = 3; - - // UID of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - // +optional - optional string uid = 4; - - // API version of the referent. - // +optional - optional string apiVersion = 5; - - // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - optional string resourceVersion = 6; - - // If referring to a piece of an object instead of an entire object, this string - // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - // For example, if the object reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - optional string fieldPath = 7; -} - -// PersistentVolume (PV) is a storage resource provisioned by an administrator. -// It is analogous to a node. -// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes -message PersistentVolume { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines a specification of a persistent volume owned by the cluster. - // Provisioned by an administrator. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes - // +optional - optional PersistentVolumeSpec spec = 2; - - // Status represents the current information/status for the persistent volume. - // Populated by the system. - // Read-only. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes - // +optional - optional PersistentVolumeStatus status = 3; -} - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -message PersistentVolumeClaim { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - // +optional - optional PersistentVolumeClaimSpec spec = 2; - - // Status represents the current information/status of a persistent volume claim. - // Read-only. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - // +optional - optional PersistentVolumeClaimStatus status = 3; -} - -// PersistentVolumeClaimCondition contails details about state of pvc -message PersistentVolumeClaimCondition { - optional string type = 1; - - optional string status = 2; - - // Last time we probed the condition. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - - // Unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying - // persistent volume is being resized. - // +optional - optional string reason = 5; - - // Human-readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. -message PersistentVolumeClaimList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // A list of persistent volume claims. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - repeated PersistentVolumeClaim items = 2; -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -message PersistentVolumeClaimSpec { - // AccessModes contains the desired access modes the volume should have. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - // +optional - repeated string accessModes = 1; - - // A label query over volumes to consider for binding. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 4; - - // Resources represents the minimum resources the volume should have. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - // +optional - optional ResourceRequirements resources = 2; - - // VolumeName is the binding reference to the PersistentVolume backing this claim. - // +optional - optional string volumeName = 3; - - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - // +optional - optional string storageClassName = 5; - - // volumeMode defines what type of volume is required by the claim. - // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. - // +optional - optional string volumeMode = 6; - - // This field requires the VolumeSnapshotDataSource alpha feature gate to be - // enabled and currently VolumeSnapshot is the only supported data source. - // If the provisioner can support VolumeSnapshot data source, it will create - // a new volume and data will be restored to the volume at the same time. - // If the provisioner does not support VolumeSnapshot data source, volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. - // +optional - optional TypedLocalObjectReference dataSource = 7; -} - -// PersistentVolumeClaimStatus is the current status of a persistent volume claim. -message PersistentVolumeClaimStatus { - // Phase represents the current phase of PersistentVolumeClaim. - // +optional - optional string phase = 1; - - // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - // +optional - repeated string accessModes = 2; - - // Represents the actual resources of the underlying volume. - // +optional - map capacity = 3; - - // Current Condition of persistent volume claim. If underlying persistent volume is being - // resized then the Condition will be set to 'ResizeStarted'. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated PersistentVolumeClaimCondition conditions = 4; -} - -// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. -// This volume finds the bound PV and mounts that volume for the pod. A -// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -// type of volume that is owned by someone else (the system). -message PersistentVolumeClaimVolumeSource { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - optional string claimName = 1; - - // Will force the ReadOnly setting in VolumeMounts. - // Default false. - // +optional - optional bool readOnly = 2; -} - -// PersistentVolumeList is a list of PersistentVolume items. -message PersistentVolumeList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of persistent volumes. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - repeated PersistentVolume items = 2; -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the -// administrator who creates PVs. Exactly one of its members must be set. -message PersistentVolumeSource { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 1; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // +optional - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 2; - - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - // +optional - optional HostPathVolumeSource hostPath = 3; - - // Glusterfs represents a Glusterfs volume that is attached to a host and - // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md - // +optional - optional GlusterfsVolumeSource glusterfs = 4; - - // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - // +optional - optional NFSVolumeSource nfs = 5; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md - // +optional - optional RBDPersistentVolumeSource rbd = 6; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // +optional - optional ISCSIPersistentVolumeSource iscsi = 7; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - optional CinderPersistentVolumeSource cinder = 8; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - optional CephFSPersistentVolumeSource cephfs = 9; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - optional FCVolumeSource fc = 10; - - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - // +optional - optional FlockerVolumeSource flocker = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - optional FlexPersistentVolumeSource flexVolume = 12; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - optional AzureFilePersistentVolumeSource azureFile = 13; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - optional VsphereVirtualDiskVolumeSource vsphereVolume = 14; - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - optional QuobyteVolumeSource quobyte = 15; - - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - optional AzureDiskVolumeSource azureDisk = 16; - - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine - optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 17; - - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - optional PortworxVolumeSource portworxVolume = 18; - - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - optional ScaleIOPersistentVolumeSource scaleIO = 19; - - // Local represents directly-attached storage with node affinity - // +optional - optional LocalVolumeSource local = 20; - - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md - // +optional - optional StorageOSPersistentVolumeSource storageos = 21; - - // CSI represents storage that handled by an external CSI driver (Beta feature). - // +optional - optional CSIPersistentVolumeSource csi = 22; -} - -// PersistentVolumeSpec is the specification of a persistent volume. -message PersistentVolumeSpec { - // A description of the persistent volume's resources and capacity. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity - // +optional - map capacity = 1; - - // The actual volume backing the persistent volume. - optional PersistentVolumeSource persistentVolumeSource = 2; - - // AccessModes contains all ways the volume can be mounted. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes - // +optional - repeated string accessModes = 3; - - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // Expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding - // +optional - optional ObjectReference claimRef = 4; - - // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default for manually created PersistentVolumes), Delete (default - // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). - // Recycle must be supported by the volume plugin underlying this PersistentVolume. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming - // +optional - optional string persistentVolumeReclaimPolicy = 5; - - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - optional string storageClassName = 6; - - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options - // +optional - repeated string mountOptions = 7; - - // volumeMode defines if a volume is intended to be used with a formatted filesystem - // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. - // +optional - optional string volumeMode = 8; - - // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. - // This field influences the scheduling of pods that use this volume. - // +optional - optional VolumeNodeAffinity nodeAffinity = 9; -} - -// PersistentVolumeStatus is the current status of a persistent volume. -message PersistentVolumeStatus { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase - // +optional - optional string phase = 1; - - // A human-readable message indicating details about why the volume is in this state. - // +optional - optional string message = 2; - - // Reason is a brief CamelCase string that describes any failure and is meant - // for machine parsing and tidy display in the CLI. - // +optional - optional string reason = 3; -} - -// Represents a Photon Controller persistent disk resource. -message PhotonPersistentDiskVolumeSource { - // ID that identifies Photon Controller persistent disk - optional string pdID = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 2; -} - -// Pod is a collection of containers that can run on a host. This resource is created -// by clients and scheduled onto hosts. -message Pod { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional PodSpec spec = 2; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional PodStatus status = 3; -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -message PodAffinity { - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running -message PodAffinityTerm { - // A label query over a set of resources, in this case pods. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector labelSelector = 1; - - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - // +optional - repeated string namespaces = 2; - - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - optional string topologyKey = 3; -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -message PodAntiAffinity { - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - repeated PodAffinityTerm requiredDuringSchedulingIgnoredDuringExecution = 1; - - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - repeated WeightedPodAffinityTerm preferredDuringSchedulingIgnoredDuringExecution = 2; -} - -// PodAttachOptions is the query options to a Pod's remote attach call. -// --- -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodAttachOptions { - // Stdin if true, redirects the standard input stream of the pod for this call. - // Defaults to false. - // +optional - optional bool stdin = 1; - - // Stdout if true indicates that stdout is to be redirected for the attach call. - // Defaults to true. - // +optional - optional bool stdout = 2; - - // Stderr if true indicates that stderr is to be redirected for the attach call. - // Defaults to true. - // +optional - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the attach call. - // This is passed through the container runtime so the tty - // is allocated on the worker node by the container runtime. - // Defaults to false. - // +optional - optional bool tty = 4; - - // The container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - // +optional - optional string container = 5; -} - -// PodCondition contains details for the current condition of this pod. -message PodCondition { - // Type is the type of the condition. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - optional string type = 1; - - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - optional string status = 2; - - // Last time we probed the condition. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastProbeTime = 3; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 4; - - // Unique, one-word, CamelCase reason for the condition's last transition. - // +optional - optional string reason = 5; - - // Human-readable message indicating details about last transition. - // +optional - optional string message = 6; -} - -// PodDNSConfig defines the DNS parameters of a pod in addition to -// those generated from DNSPolicy. -message PodDNSConfig { - // A list of DNS name server IP addresses. - // This will be appended to the base nameservers generated from DNSPolicy. - // Duplicated nameservers will be removed. - // +optional - repeated string nameservers = 1; - - // A list of DNS search domains for host-name lookup. - // This will be appended to the base search paths generated from DNSPolicy. - // Duplicated search paths will be removed. - // +optional - repeated string searches = 2; - - // A list of DNS resolver options. - // This will be merged with the base options generated from DNSPolicy. - // Duplicated entries will be removed. Resolution options given in Options - // will override those that appear in the base DNSPolicy. - // +optional - repeated PodDNSConfigOption options = 3; -} - -// PodDNSConfigOption defines DNS resolver options of a pod. -message PodDNSConfigOption { - // Required. - optional string name = 1; - - // +optional - optional string value = 2; -} - -// PodExecOptions is the query options to a Pod's remote exec call. -// --- -// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -message PodExecOptions { - // Redirect the standard input stream of the pod for this call. - // Defaults to false. - // +optional - optional bool stdin = 1; - - // Redirect the standard output stream of the pod for this call. - // Defaults to true. - // +optional - optional bool stdout = 2; - - // Redirect the standard error stream of the pod for this call. - // Defaults to true. - // +optional - optional bool stderr = 3; - - // TTY if true indicates that a tty will be allocated for the exec call. - // Defaults to false. - // +optional - optional bool tty = 4; - - // Container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - // +optional - optional string container = 5; - - // Command is the remote command to execute. argv array. Not executed within a shell. - repeated string command = 6; -} - -// PodList is a list of Pods. -message PodList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md - repeated Pod items = 2; -} - -// PodLogOptions is the query options for a Pod's logs REST call. -message PodLogOptions { - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - // +optional - optional string container = 1; - - // Follow the log stream of the pod. Defaults to false. - // +optional - optional bool follow = 2; - - // Return previous terminated container logs. Defaults to false. - // +optional - optional bool previous = 3; - - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - // +optional - optional int64 sinceSeconds = 4; - - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time sinceTime = 5; - - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - // +optional - optional bool timestamps = 6; - - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - // +optional - optional int64 tailLines = 7; - - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - // +optional - optional int64 limitBytes = 8; -} - -// PodPortForwardOptions is the query options to a Pod's port forward call -// when using WebSockets. -// The `port` query parameter must specify the port or -// ports (comma separated) to forward over. -// Port forwarding over SPDY does not use these options. It requires the port -// to be passed in the `port` header as part of request. -message PodPortForwardOptions { - // List of ports to forward - // Required when using WebSockets - // +optional - repeated int32 ports = 1; -} - -// PodProxyOptions is the query options to a Pod's proxy call. -message PodProxyOptions { - // Path is the URL path to use for the current proxy request to pod. - // +optional - optional string path = 1; -} - -// PodReadinessGate contains the reference to a pod condition -message PodReadinessGate { - // ConditionType refers to a condition in the pod's condition list with matching type. - optional string conditionType = 1; -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -message PodSecurityContext { - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - optional SELinuxOptions seLinuxOptions = 1; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - optional int64 runAsUser = 2; - - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - optional int64 runAsGroup = 6; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional bool runAsNonRoot = 3; - - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - repeated int64 supplementalGroups = 4; - - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - optional int64 fsGroup = 5; - - // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - // sysctls (by the container runtime) might fail to launch. - // +optional - repeated Sysctl sysctls = 7; -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -message PodSignature { - // Reference to controller whose pods should avoid this node. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.OwnerReference podController = 1; -} - -// PodSpec is a description of a pod. -message PodSpec { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes - // +optional - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - repeated Volume volumes = 1; - - // List of initialization containers belonging to the pod. - // Init containers are executed in order prior to containers being started. If any - // init container fails, the pod is considered to have failed and is handled according - // to its restartPolicy. The name for an init container or normal container must be - // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. - // The resourceRequirements of an init container are taken into account during scheduling - // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers - // in a similar fashion. - // Init containers cannot currently be added or removed. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // +patchMergeKey=name - // +patchStrategy=merge - repeated Container initContainers = 20; - - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // +patchMergeKey=name - // +patchStrategy=merge - repeated Container containers = 2; - - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - // +optional - optional string restartPolicy = 3; - - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - // +optional - optional int64 terminationGracePeriodSeconds = 4; - - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - // +optional - optional int64 activeDeadlineSeconds = 5; - - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - // +optional - optional string dnsPolicy = 6; - - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // +optional - map nodeSelector = 7; - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - // +optional - optional string serviceAccountName = 8; - - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - // +k8s:conversion-gen=false - // +optional - optional string serviceAccount = 9; - - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - optional bool automountServiceAccountToken = 21; - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - optional string nodeName = 10; - - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - // +k8s:conversion-gen=false - // +optional - optional bool hostNetwork = 11; - - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - optional bool hostPID = 12; - - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - optional bool hostIPC = 13; - - // Share a single process namespace between all of the containers in a pod. - // When this is set containers will be able to view and signal processes from other containers - // in the same pod, and the first process in each container will not be assigned PID 1. - // HostPID and ShareProcessNamespace cannot both be set. - // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. - // +k8s:conversion-gen=false - // +optional - optional bool shareProcessNamespace = 27; - - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - optional PodSecurityContext securityContext = 14; - - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated LocalObjectReference imagePullSecrets = 15; - - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - optional string hostname = 16; - - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - optional string subdomain = 17; - - // If specified, the pod's scheduling constraints - // +optional - optional Affinity affinity = 18; - - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - optional string schedulerName = 19; - - // If specified, the pod's tolerations. - // +optional - repeated Toleration tolerations = 22; - - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - // +patchMergeKey=ip - // +patchStrategy=merge - repeated HostAlias hostAliases = 23; - - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - optional string priorityClassName = 24; - - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - optional int32 priority = 25; - - // Specifies the DNS parameters of a pod. - // Parameters specified here will be merged to the generated DNS - // configuration based on DNSPolicy. - // +optional - optional PodDNSConfig dnsConfig = 26; - - // If specified, all readiness gates will be evaluated for pod readiness. - // A pod is ready when all its containers are ready AND - // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md - // +optional - repeated PodReadinessGate readinessGates = 28; - - // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md - // This is an alpha feature and may change in the future. - // +optional - optional string runtimeClassName = 29; -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system, especially if the node that hosts the pod cannot contact the control -// plane. -message PodStatus { - // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. - // The conditions array, the reason and message fields, and the individual container status - // arrays contain more detail about the pod's status. - // There are five possible phase values: - // - // Pending: The pod has been accepted by the Kubernetes system, but one or more of the - // container images has not been created. This includes time before being scheduled as - // well as time spent downloading images over the network, which could take a while. - // Running: The pod has been bound to a node, and all of the containers have been created. - // At least one container is still running, or is in the process of starting or restarting. - // Succeeded: All containers in the pod have terminated in success, and will not be restarted. - // Failed: All containers in the pod have terminated, and at least one container has - // terminated in failure. The container either exited with non-zero status or was terminated - // by the system. - // Unknown: For some reason the state of the pod could not be obtained, typically due to an - // error in communicating with the host of the pod. - // - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase - // +optional - optional string phase = 1; - - // Current service state of pod. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated PodCondition conditions = 2; - - // A human readable message indicating details about why the pod is in this condition. - // +optional - optional string message = 3; - - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'Evicted' - // +optional - optional string reason = 4; - - // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be - // scheduled right away as preemption victims receive their graceful termination periods. - // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide - // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to - // give the resources on this node to a higher priority pod that is created after preemption. - // As a result, this field may be different than PodSpec.nodeName when the pod is - // scheduled. - // +optional - optional string nominatedNodeName = 11; - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - // +optional - optional string hostIP = 5; - - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - // +optional - optional string podIP = 6; - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time startTime = 7; - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status - repeated ContainerStatus initContainerStatuses = 10; - - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status - // +optional - repeated ContainerStatus containerStatuses = 8; - - // The Quality of Service (QOS) classification assigned to the pod based on resource requirements - // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md - // +optional - optional string qosClass = 9; -} - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -message PodStatusResult { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional PodStatus status = 2; -} - -// PodTemplate describes a template for creating copies of a predefined pod. -message PodTemplate { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional PodTemplateSpec template = 2; -} - -// PodTemplateList is a list of PodTemplates. -message PodTemplateList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of pod templates - repeated PodTemplate items = 2; -} - -// PodTemplateSpec describes the data a pod should have when created from a template -message PodTemplateSpec { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional PodSpec spec = 2; -} - -// PortworxVolumeSource represents a Portworx volume resource. -message PortworxVolumeSource { - // VolumeID uniquely identifies a Portworx volume - optional string volumeID = 1; - - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - optional string fsType = 2; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 3; -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -// +k8s:openapi-gen=false -message Preconditions { - // Specifies the target UID. - // +optional - optional string uid = 1; -} - -// Describes a class of pods that should avoid this node. -message PreferAvoidPodsEntry { - // The class of pods. - optional PodSignature podSignature = 1; - - // Time at which this entry was added to the list. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time evictionTime = 2; - - // (brief) reason why this entry was added to the list. - // +optional - optional string reason = 3; - - // Human readable message indicating why this entry was added to the list. - // +optional - optional string message = 4; -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -message PreferredSchedulingTerm { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - optional int32 weight = 1; - - // A node selector term, associated with the corresponding weight. - optional NodeSelectorTerm preference = 2; -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -message Probe { - // The action taken to determine the health of a container - optional Handler handler = 1; - - // Number of seconds after the container has started before liveness probes are initiated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - optional int32 initialDelaySeconds = 2; - - // Number of seconds after which the probe times out. - // Defaults to 1 second. Minimum value is 1. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - optional int32 timeoutSeconds = 3; - - // How often (in seconds) to perform the probe. - // Default to 10 seconds. Minimum value is 1. - // +optional - optional int32 periodSeconds = 4; - - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - // +optional - optional int32 successThreshold = 5; - - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // Defaults to 3. Minimum value is 1. - // +optional - optional int32 failureThreshold = 6; -} - -// Represents a projected volume source -message ProjectedVolumeSource { - // list of volume projections - repeated VolumeProjection sources = 1; - - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - optional int32 defaultMode = 2; -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -message QuobyteVolumeSource { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - optional string registry = 1; - - // Volume is a string that references an already created Quobyte volume by name. - optional string volume = 2; - - // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. - // Defaults to false. - // +optional - optional bool readOnly = 3; - - // User to map volume access to - // Defaults to serivceaccount user - // +optional - optional string user = 4; - - // Group to map volume access to - // Default is no group - // +optional - optional string group = 5; -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -message RBDPersistentVolumeSource { - // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - repeated string monitors = 1; - - // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - optional string image = 2; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 3; - - // The rados pool name. - // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional string pool = 4; - - // The rados user name. - // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional string user = 5; - - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional string keyring = 6; - - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional SecretReference secretRef = 7; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional bool readOnly = 8; -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -message RBDVolumeSource { - // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - repeated string monitors = 1; - - // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - optional string image = 2; - - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - optional string fsType = 3; - - // The rados pool name. - // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional string pool = 4; - - // The rados user name. - // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional string user = 5; - - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional string keyring = 6; - - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional LocalObjectReference secretRef = 7; - - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - optional bool readOnly = 8; -} - -// RangeAllocation is not a public type. -message RangeAllocation { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Range is string that identifies the range represented by 'data'. - optional string range = 2; - - // Data is a bit array containing all allocated addresses in the previous segment. - optional bytes data = 3; -} - -// ReplicationController represents the configuration of a replication controller. -message ReplicationController { - // If the Labels of a ReplicationController are empty, they are defaulted to - // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicationControllerSpec spec = 2; - - // Status is the most recently observed status of the replication controller. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicationControllerStatus status = 3; -} - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -message ReplicationControllerCondition { - // Type of replication controller condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// ReplicationControllerList is a collection of replication controllers. -message ReplicationControllerList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of replication controllers. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - repeated ReplicationController items = 2; -} - -// ReplicationControllerSpec is the specification of a replication controller. -message ReplicationControllerSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller - // +optional - optional int32 replicas = 1; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 4; - - // Selector is a label query over pods that should match the Replicas count. - // If Selector is empty, it is defaulted to the labels present on the Pod template. - // Label keys and values that must match in order to be controlled by this replication - // controller, if empty defaulted to labels on Pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - map selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - optional PodTemplateSpec template = 3; -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -message ReplicationControllerStatus { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replication controller. - // +optional - optional int32 readyReplicas = 4; - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - optional int32 availableReplicas = 5; - - // ObservedGeneration reflects the generation of the most recently observed replication controller. - // +optional - optional int64 observedGeneration = 3; - - // Represents the latest available observations of a replication controller's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated ReplicationControllerCondition conditions = 6; -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -message ResourceFieldSelector { - // Container name: required for volumes, optional for env vars - // +optional - optional string containerName = 1; - - // Required: resource to select - optional string resource = 2; - - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - optional k8s.io.apimachinery.pkg.api.resource.Quantity divisor = 3; -} - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -message ResourceQuota { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ResourceQuotaSpec spec = 2; - - // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ResourceQuotaStatus status = 3; -} - -// ResourceQuotaList is a list of ResourceQuota items. -message ResourceQuotaList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ResourceQuota objects. - // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - repeated ResourceQuota items = 2; -} - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. -message ResourceQuotaSpec { - // hard is the set of desired hard limits for each named resource. - // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - // +optional - map hard = 1; - - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - repeated string scopes = 2; - - // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota - // but expressed using ScopeSelectorOperator in combination with possible values. - // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. - // +optional - optional ScopeSelector scopeSelector = 3; -} - -// ResourceQuotaStatus defines the enforced hard limits and observed use. -message ResourceQuotaStatus { - // Hard is the set of enforced hard limits for each named resource. - // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - // +optional - map hard = 1; - - // Used is the current observed total usage of the resource in the namespace. - // +optional - map used = 2; -} - -// ResourceRequirements describes the compute resource requirements. -message ResourceRequirements { - // Limits describes the maximum amount of compute resources allowed. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - map limits = 1; - - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - map requests = 2; -} - -// SELinuxOptions are the labels to be applied to the container -message SELinuxOptions { - // User is a SELinux user label that applies to the container. - // +optional - optional string user = 1; - - // Role is a SELinux role label that applies to the container. - // +optional - optional string role = 2; - - // Type is a SELinux type label that applies to the container. - // +optional - optional string type = 3; - - // Level is SELinux level label that applies to the container. - // +optional - optional string level = 4; -} - -// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume -message ScaleIOPersistentVolumeSource { - // The host address of the ScaleIO API Gateway. - optional string gateway = 1; - - // The name of the storage system as configured in ScaleIO. - optional string system = 2; - - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - optional SecretReference secretRef = 3; - - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - optional bool sslEnabled = 4; - - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - optional string protectionDomain = 5; - - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - optional string storagePool = 6; - - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - optional string storageMode = 7; - - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - optional string volumeName = 8; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs" - // +optional - optional string fsType = 9; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 10; -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -message ScaleIOVolumeSource { - // The host address of the ScaleIO API Gateway. - optional string gateway = 1; - - // The name of the storage system as configured in ScaleIO. - optional string system = 2; - - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - optional LocalObjectReference secretRef = 3; - - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - optional bool sslEnabled = 4; - - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - optional string protectionDomain = 5; - - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - optional string storagePool = 6; - - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - optional string storageMode = 7; - - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - optional string volumeName = 8; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs". - // +optional - optional string fsType = 9; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 10; -} - -// A scope selector represents the AND of the selectors represented -// by the scoped-resource selector requirements. -message ScopeSelector { - // A list of scope selector requirements by scope of the resources. - // +optional - repeated ScopedResourceSelectorRequirement matchExpressions = 1; -} - -// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator -// that relates the scope name and values. -message ScopedResourceSelectorRequirement { - // The name of the scope that the selector applies to. - optional string scopeName = 1; - - // Represents a scope's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. - optional string operator = 2; - - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. - // This array is replaced during a strategic merge patch. - // +optional - repeated string values = 3; -} - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -message Secret { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 - // +optional - map data = 2; - - // stringData allows specifying non-binary secret data in string form. - // It is provided as a write-only convenience method. - // All keys and values are merged into the data field on write, overwriting any existing values. - // It is never output when reading from the API. - // +k8s:conversion-gen=false - // +optional - map stringData = 4; - - // Used to facilitate programmatic handling of secret data. - // +optional - optional string type = 3; -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -message SecretEnvSource { - // The Secret to select from. - optional LocalObjectReference localObjectReference = 1; - - // Specify whether the Secret must be defined - // +optional - optional bool optional = 2; -} - -// SecretKeySelector selects a key of a Secret. -message SecretKeySelector { - // The name of the secret in the pod's namespace to select from. - optional LocalObjectReference localObjectReference = 1; - - // The key of the secret to select from. Must be a valid secret key. - optional string key = 2; - - // Specify whether the Secret or it's key must be defined - // +optional - optional bool optional = 3; -} - -// SecretList is a list of Secret. -message SecretList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of secret objects. - // More info: https://kubernetes.io/docs/concepts/configuration/secret - repeated Secret items = 2; -} - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -message SecretProjection { - optional LocalObjectReference localObjectReference = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - repeated KeyToPath items = 2; - - // Specify whether the Secret or its key must be defined - // +optional - optional bool optional = 4; -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -message SecretReference { - // Name is unique within a namespace to reference a secret resource. - // +optional - optional string name = 1; - - // Namespace defines the space within which the secret name must be unique. - // +optional - optional string namespace = 2; -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -message SecretVolumeSource { - // Name of the secret in the pod's namespace to use. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - // +optional - optional string secretName = 1; - - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - repeated KeyToPath items = 2; - - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - optional int32 defaultMode = 3; - - // Specify whether the Secret or it's keys must be defined - // +optional - optional bool optional = 4; -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -message SecurityContext { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - optional Capabilities capabilities = 1; - - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - optional bool privileged = 2; - - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional SELinuxOptions seLinuxOptions = 3; - - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional int64 runAsUser = 4; - - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional int64 runAsGroup = 8; - - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - optional bool runAsNonRoot = 5; - - // Whether this container has a read-only root filesystem. - // Default is false. - // +optional - optional bool readOnlyRootFilesystem = 6; - - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // AllowPrivilegeEscalation is true always when the container is: - // 1) run as Privileged - // 2) has CAP_SYS_ADMIN - // +optional - optional bool allowPrivilegeEscalation = 7; - - // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for - // readonly paths and masked paths. - // This requires the ProcMountType feature flag to be enabled. - // +optional - optional string procMount = 9; -} - -// SerializedReference is a reference to serialized object. -message SerializedReference { - // The reference to an object in the system. - // +optional - optional ObjectReference reference = 1; -} - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -message Service { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ServiceSpec spec = 2; - - // Most recently observed status of the service. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ServiceStatus status = 3; -} - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -message ServiceAccount { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: https://kubernetes.io/docs/concepts/configuration/secret - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - repeated ObjectReference secrets = 2; - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - // +optional - repeated LocalObjectReference imagePullSecrets = 3; - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - optional bool automountServiceAccountToken = 4; -} - -// ServiceAccountList is a list of ServiceAccount objects -message ServiceAccountList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ServiceAccounts. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - repeated ServiceAccount items = 2; -} - -// ServiceAccountTokenProjection represents a projected service account token -// volume. This projection can be used to insert a service account token into -// the pods runtime filesystem for use against APIs (Kubernetes API Server or -// otherwise). -message ServiceAccountTokenProjection { - // Audience is the intended audience of the token. A recipient of a token - // must identify itself with an identifier specified in the audience of the - // token, and otherwise should reject the token. The audience defaults to the - // identifier of the apiserver. - // +optional - optional string audience = 1; - - // ExpirationSeconds is the requested duration of validity of the service - // account token. As the token approaches expiration, the kubelet volume - // plugin will proactively rotate the service account token. The kubelet will - // start trying to rotate the token if the token is older than 80 percent of - // its time to live or if the token is older than 24 hours.Defaults to 1 hour - // and must be at least 10 minutes. - // +optional - optional int64 expirationSeconds = 2; - - // Path is the path relative to the mount point of the file to project the - // token into. - optional string path = 3; -} - -// ServiceList holds a list of services. -message ServiceList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of services - repeated Service items = 2; -} - -// ServicePort contains information on service's port. -message ServicePort { - // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - // Optional if only one ServicePort is defined on this service. - // +optional - optional string name = 1; - - // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - // Default is TCP. - // +optional - optional string protocol = 2; - - // The port that will be exposed by this service. - optional int32 port = 3; - - // Number or name of the port to access on the pods targeted by the service. - // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - // If this is a string, it will be looked up as a named port in the - // target Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString targetPort = 4; - - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - // +optional - optional int32 nodePort = 5; -} - -// ServiceProxyOptions is the query options to a Service's proxy call. -message ServiceProxyOptions { - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - // +optional - optional string path = 1; -} - -// ServiceSpec describes the attributes that a user creates on a service. -message ServiceSpec { - // The list of ports that are exposed by this service. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +patchMergeKey=port - // +patchStrategy=merge - repeated ServicePort ports = 1; - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - map selector = 2; - - // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - optional string clusterIP = 3; - - // type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types - // +optional - optional string type = 4; - - // externalIPs is a list of IP addresses for which nodes in the cluster - // will also accept traffic for this service. These IPs are not managed by - // Kubernetes. The user is responsible for ensuring that traffic arrives - // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. - // +optional - repeated string externalIPs = 5; - - // Supports "ClientIP" and "None". Used to maintain session affinity. - // Enable client IP based session affinity. - // Must be ClientIP or None. - // Defaults to None. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - optional string sessionAffinity = 7; - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - optional string loadBalancerIP = 8; - - // If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ - // +optional - repeated string loadBalancerSourceRanges = 9; - - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. - // +optional - optional string externalName = 10; - - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - optional string externalTrafficPolicy = 11; - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - optional int32 healthCheckNodePort = 12; - - // publishNotReadyAddresses, when set to true, indicates that DNS implementations - // must publish the notReadyAddresses of subsets for the Endpoints associated with - // the Service. The default value is false. - // The primary use case for setting this field is to use a StatefulSet's Headless Service - // to propagate SRV records for its Pods without respect to their readiness for purpose - // of peer discovery. - // +optional - optional bool publishNotReadyAddresses = 13; - - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - optional SessionAffinityConfig sessionAffinityConfig = 14; -} - -// ServiceStatus represents the current status of a service. -message ServiceStatus { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - optional LoadBalancerStatus loadBalancer = 1; -} - -// SessionAffinityConfig represents the configurations of session affinity. -message SessionAffinityConfig { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - optional ClientIPConfig clientIP = 1; -} - -// Represents a StorageOS persistent volume resource. -message StorageOSPersistentVolumeSource { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - optional string volumeName = 1; - - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - optional string volumeNamespace = 2; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - optional string fsType = 3; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 4; - - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - optional ObjectReference secretRef = 5; -} - -// Represents a StorageOS persistent volume resource. -message StorageOSVolumeSource { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - optional string volumeName = 1; - - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - optional string volumeNamespace = 2; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - optional string fsType = 3; - - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - optional bool readOnly = 4; - - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - optional LocalObjectReference secretRef = 5; -} - -// Sysctl defines a kernel parameter to be set -message Sysctl { - // Name of a property to set - optional string name = 1; - - // Value of a property to set - optional string value = 2; -} - -// TCPSocketAction describes an action based on opening a socket -message TCPSocketAction { - // Number or name of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 1; - - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - optional string host = 2; -} - -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -message Taint { - // Required. The taint key to be applied to a node. - optional string key = 1; - - // Required. The taint value corresponding to the taint key. - // +optional - optional string value = 2; - - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - optional string effect = 3; - - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timeAdded = 4; -} - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -message Toleration { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - optional string key = 1; - - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - optional string operator = 2; - - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - optional string value = 3; - - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - optional string effect = 4; - - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - optional int64 tolerationSeconds = 5; -} - -// A topology selector requirement is a selector that matches given label. -// This is an alpha feature and may change in the future. -message TopologySelectorLabelRequirement { - // The label key that the selector applies to. - optional string key = 1; - - // An array of string values. One value must match the label to be selected. - // Each entry in Values is ORed. - repeated string values = 2; -} - -// A topology selector term represents the result of label queries. -// A null or empty topology selector term matches no objects. -// The requirements of them are ANDed. -// It provides a subset of functionality as NodeSelectorTerm. -// This is an alpha feature and may change in the future. -message TopologySelectorTerm { - // A list of topology selector requirements by labels. - // +optional - repeated TopologySelectorLabelRequirement matchLabelExpressions = 1; -} - -// TypedLocalObjectReference contains enough information to let you locate the -// typed referenced object inside the same namespace. -message TypedLocalObjectReference { - // APIGroup is the group for the resource being referenced. - // If APIGroup is not specified, the specified Kind must be in the core API group. - // For any other third-party types, APIGroup is required. - // +optional - optional string apiGroup = 1; - - // Kind is the type of resource being referenced - optional string kind = 2; - - // Name is the name of resource being referenced - optional string name = 3; -} - -// Volume represents a named volume in a pod that may be accessed by any container in the pod. -message Volume { - // Volume's name. - // Must be a DNS_LABEL and unique within the pod. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - optional string name = 1; - - // VolumeSource represents the location and type of the mounted volume. - // If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - optional VolumeSource volumeSource = 2; -} - -// volumeDevice describes a mapping of a raw block device within a container. -message VolumeDevice { - // name must match the name of a persistentVolumeClaim in the pod - optional string name = 1; - - // devicePath is the path inside of the container that the device will be mapped to. - optional string devicePath = 2; -} - -// VolumeMount describes a mounting of a Volume within a container. -message VolumeMount { - // This must match the Name of a Volume. - optional string name = 1; - - // Mounted read-only if true, read-write otherwise (false or unspecified). - // Defaults to false. - // +optional - optional bool readOnly = 2; - - // Path within the container at which the volume should be mounted. Must - // not contain ':'. - optional string mountPath = 3; - - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - optional string subPath = 4; - - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationNone is used. - // This field is beta in 1.10. - // +optional - optional string mountPropagation = 5; -} - -// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. -message VolumeNodeAffinity { - // Required specifies hard node constraints that must be met. - optional NodeSelector required = 1; -} - -// Projection that may be projected along with other supported volume types -message VolumeProjection { - // information about the secret data to project - // +optional - optional SecretProjection secret = 1; - - // information about the downwardAPI data to project - // +optional - optional DownwardAPIProjection downwardAPI = 2; - - // information about the configMap data to project - // +optional - optional ConfigMapProjection configMap = 3; - - // information about the serviceAccountToken data to project - // +optional - optional ServiceAccountTokenProjection serviceAccountToken = 4; -} - -// Represents the source of a volume to mount. -// Only one of its members may be specified. -message VolumeSource { - // HostPath represents a pre-existing file or directory on the host - // machine that is directly exposed to the container. This is generally - // used for system agents or other privileged things that are allowed - // to see the host machine. Most containers will NOT need this. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - optional HostPathVolumeSource hostPath = 1; - - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - // +optional - optional EmptyDirVolumeSource emptyDir = 2; - - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - optional GCEPersistentDiskVolumeSource gcePersistentDisk = 3; - - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // +optional - optional AWSElasticBlockStoreVolumeSource awsElasticBlockStore = 4; - - // GitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - // into the Pod's container. - // +optional - optional GitRepoVolumeSource gitRepo = 5; - - // Secret represents a secret that should populate this volume. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - // +optional - optional SecretVolumeSource secret = 6; - - // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - // +optional - optional NFSVolumeSource nfs = 7; - - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md - // +optional - optional ISCSIVolumeSource iscsi = 8; - - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md - // +optional - optional GlusterfsVolumeSource glusterfs = 9; - - // PersistentVolumeClaimVolumeSource represents a reference to a - // PersistentVolumeClaim in the same namespace. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - // +optional - optional PersistentVolumeClaimVolumeSource persistentVolumeClaim = 10; - - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md - // +optional - optional RBDVolumeSource rbd = 11; - - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - optional FlexVolumeSource flexVolume = 12; - - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - optional CinderVolumeSource cinder = 13; - - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - optional CephFSVolumeSource cephfs = 14; - - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - optional FlockerVolumeSource flocker = 15; - - // DownwardAPI represents downward API about the pod that should populate this volume - // +optional - optional DownwardAPIVolumeSource downwardAPI = 16; - - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - optional FCVolumeSource fc = 17; - - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - optional AzureFileVolumeSource azureFile = 18; - - // ConfigMap represents a configMap that should populate this volume - // +optional - optional ConfigMapVolumeSource configMap = 19; - - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - optional VsphereVirtualDiskVolumeSource vsphereVolume = 20; - - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - optional QuobyteVolumeSource quobyte = 21; - - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - optional AzureDiskVolumeSource azureDisk = 22; - - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine - optional PhotonPersistentDiskVolumeSource photonPersistentDisk = 23; - - // Items for all in one resources secrets, configmaps, and downward API - optional ProjectedVolumeSource projected = 26; - - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - optional PortworxVolumeSource portworxVolume = 24; - - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - optional ScaleIOVolumeSource scaleIO = 25; - - // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. - // +optional - optional StorageOSVolumeSource storageos = 27; -} - -// Represents a vSphere volume resource. -message VsphereVirtualDiskVolumeSource { - // Path that identifies vSphere volume vmdk - optional string volumePath = 1; - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - optional string fsType = 2; - - // Storage Policy Based Management (SPBM) profile name. - // +optional - optional string storagePolicyName = 3; - - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - optional string storagePolicyID = 4; -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -message WeightedPodAffinityTerm { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - optional int32 weight = 1; - - // Required. A pod affinity term, associated with the corresponding weight. - optional PodAffinityTerm podAffinityTerm = 2; -} - diff --git a/vendor/k8s.io/api/core/v1/objectreference.go b/vendor/k8s.io/api/core/v1/objectreference.go deleted file mode 100644 index ee5335e..0000000 --- a/vendor/k8s.io/api/core/v1/objectreference.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// IsAnAPIObject allows clients to preemptively get a reference to an API object and pass it to places that -// intend only to get a reference to that object. This simplifies the event recording interface. -func (obj *ObjectReference) SetGroupVersionKind(gvk schema.GroupVersionKind) { - obj.APIVersion, obj.Kind = gvk.ToAPIVersionAndKind() -} - -func (obj *ObjectReference) GroupVersionKind() schema.GroupVersionKind { - return schema.FromAPIVersionAndKind(obj.APIVersion, obj.Kind) -} - -func (obj *ObjectReference) GetObjectKind() schema.ObjectKind { return obj } diff --git a/vendor/k8s.io/api/core/v1/register.go b/vendor/k8s.io/api/core/v1/register.go deleted file mode 100644 index 1aac0cb..0000000 --- a/vendor/k8s.io/api/core/v1/register.go +++ /dev/null @@ -1,99 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // We only register manually written functions here. The registration of the - // generated functions takes place in the generated files. The separation - // makes the code compile even when the generated files are missing. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Pod{}, - &PodList{}, - &PodStatusResult{}, - &PodTemplate{}, - &PodTemplateList{}, - &ReplicationController{}, - &ReplicationControllerList{}, - &Service{}, - &ServiceProxyOptions{}, - &ServiceList{}, - &Endpoints{}, - &EndpointsList{}, - &Node{}, - &NodeList{}, - &NodeProxyOptions{}, - &Binding{}, - &Event{}, - &EventList{}, - &List{}, - &LimitRange{}, - &LimitRangeList{}, - &ResourceQuota{}, - &ResourceQuotaList{}, - &Namespace{}, - &NamespaceList{}, - &Secret{}, - &SecretList{}, - &ServiceAccount{}, - &ServiceAccountList{}, - &PersistentVolume{}, - &PersistentVolumeList{}, - &PersistentVolumeClaim{}, - &PersistentVolumeClaimList{}, - &PodAttachOptions{}, - &PodLogOptions{}, - &PodExecOptions{}, - &PodPortForwardOptions{}, - &PodProxyOptions{}, - &ComponentStatus{}, - &ComponentStatusList{}, - &SerializedReference{}, - &RangeAllocation{}, - &ConfigMap{}, - &ConfigMapList{}, - ) - - // Add common types - scheme.AddKnownTypes(SchemeGroupVersion, &metav1.Status{}) - - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/core/v1/resource.go b/vendor/k8s.io/api/core/v1/resource.go deleted file mode 100644 index bb80412..0000000 --- a/vendor/k8s.io/api/core/v1/resource.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/api/resource" -) - -// Returns string version of ResourceName. -func (self ResourceName) String() string { - return string(self) -} - -// Returns the CPU limit if specified. -func (self *ResourceList) Cpu() *resource.Quantity { - if val, ok := (*self)[ResourceCPU]; ok { - return &val - } - return &resource.Quantity{Format: resource.DecimalSI} -} - -// Returns the Memory limit if specified. -func (self *ResourceList) Memory() *resource.Quantity { - if val, ok := (*self)[ResourceMemory]; ok { - return &val - } - return &resource.Quantity{Format: resource.BinarySI} -} - -func (self *ResourceList) Pods() *resource.Quantity { - if val, ok := (*self)[ResourcePods]; ok { - return &val - } - return &resource.Quantity{} -} - -func (self *ResourceList) StorageEphemeral() *resource.Quantity { - if val, ok := (*self)[ResourceEphemeralStorage]; ok { - return &val - } - return &resource.Quantity{} -} diff --git a/vendor/k8s.io/api/core/v1/taint.go b/vendor/k8s.io/api/core/v1/taint.go deleted file mode 100644 index 7b606a3..0000000 --- a/vendor/k8s.io/api/core/v1/taint.go +++ /dev/null @@ -1,33 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import "fmt" - -// MatchTaint checks if the taint matches taintToMatch. Taints are unique by key:effect, -// if the two taints have same key:effect, regard as they match. -func (t *Taint) MatchTaint(taintToMatch *Taint) bool { - return t.Key == taintToMatch.Key && t.Effect == taintToMatch.Effect -} - -// taint.ToString() converts taint struct to string in format key=value:effect or key:effect. -func (t *Taint) ToString() string { - if len(t.Value) == 0 { - return fmt.Sprintf("%v:%v", t.Key, t.Effect) - } - return fmt.Sprintf("%v=%v:%v", t.Key, t.Value, t.Effect) -} diff --git a/vendor/k8s.io/api/core/v1/toleration.go b/vendor/k8s.io/api/core/v1/toleration.go deleted file mode 100644 index b203d33..0000000 --- a/vendor/k8s.io/api/core/v1/toleration.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// MatchToleration checks if the toleration matches tolerationToMatch. Tolerations are unique by , -// if the two tolerations have same combination, regard as they match. -// TODO: uniqueness check for tolerations in api validations. -func (t *Toleration) MatchToleration(tolerationToMatch *Toleration) bool { - return t.Key == tolerationToMatch.Key && - t.Effect == tolerationToMatch.Effect && - t.Operator == tolerationToMatch.Operator && - t.Value == tolerationToMatch.Value -} - -// ToleratesTaint checks if the toleration tolerates the taint. -// The matching follows the rules below: -// (1) Empty toleration.effect means to match all taint effects, -// otherwise taint effect must equal to toleration.effect. -// (2) If toleration.operator is 'Exists', it means to match all taint values. -// (3) Empty toleration.key means to match all taint keys. -// If toleration.key is empty, toleration.operator must be 'Exists'; -// this combination means to match all taint values and all taint keys. -func (t *Toleration) ToleratesTaint(taint *Taint) bool { - if len(t.Effect) > 0 && t.Effect != taint.Effect { - return false - } - - if len(t.Key) > 0 && t.Key != taint.Key { - return false - } - - // TODO: Use proper defaulting when Toleration becomes a field of PodSpec - switch t.Operator { - // empty operator means Equal - case "", TolerationOpEqual: - return t.Value == taint.Value - case TolerationOpExists: - return true - default: - return false - } -} diff --git a/vendor/k8s.io/api/core/v1/types.go b/vendor/k8s.io/api/core/v1/types.go deleted file mode 100644 index d9a57bd..0000000 --- a/vendor/k8s.io/api/core/v1/types.go +++ /dev/null @@ -1,5320 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" -) - -const ( - // NamespaceDefault means the object is in the default namespace which is applied when not specified by clients - NamespaceDefault string = "default" - // NamespaceAll is the default argument to specify on a context when you want to list or filter resources across all namespaces - NamespaceAll string = "" - // NamespaceNodeLease is the namespace where we place node lease objects (used for node heartbeats) - NamespaceNodeLease string = "kube-node-lease" -) - -// Volume represents a named volume in a pod that may be accessed by any container in the pod. -type Volume struct { - // Volume's name. - // Must be a DNS_LABEL and unique within the pod. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // VolumeSource represents the location and type of the mounted volume. - // If not specified, the Volume is implied to be an EmptyDir. - // This implied behavior is deprecated and will be removed in a future version. - VolumeSource `json:",inline" protobuf:"bytes,2,opt,name=volumeSource"` -} - -// Represents the source of a volume to mount. -// Only one of its members may be specified. -type VolumeSource struct { - // HostPath represents a pre-existing file or directory on the host - // machine that is directly exposed to the container. This is generally - // used for system agents or other privileged things that are allowed - // to see the host machine. Most containers will NOT need this. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - // --- - // TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not - // mount host directories as read/write. - // +optional - HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,1,opt,name=hostPath"` - // EmptyDir represents a temporary directory that shares a pod's lifetime. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - // +optional - EmptyDir *EmptyDirVolumeSource `json:"emptyDir,omitempty" protobuf:"bytes,2,opt,name=emptyDir"` - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,3,opt,name=gcePersistentDisk"` - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,4,opt,name=awsElasticBlockStore"` - // GitRepo represents a git repository at a particular revision. - // DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an - // EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir - // into the Pod's container. - // +optional - GitRepo *GitRepoVolumeSource `json:"gitRepo,omitempty" protobuf:"bytes,5,opt,name=gitRepo"` - // Secret represents a secret that should populate this volume. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - // +optional - Secret *SecretVolumeSource `json:"secret,omitempty" protobuf:"bytes,6,opt,name=secret"` - // NFS represents an NFS mount on the host that shares a pod's lifetime - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - // +optional - NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,7,opt,name=nfs"` - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md - // +optional - ISCSI *ISCSIVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,8,opt,name=iscsi"` - // Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md - // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,9,opt,name=glusterfs"` - // PersistentVolumeClaimVolumeSource represents a reference to a - // PersistentVolumeClaim in the same namespace. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - // +optional - PersistentVolumeClaim *PersistentVolumeClaimVolumeSource `json:"persistentVolumeClaim,omitempty" protobuf:"bytes,10,opt,name=persistentVolumeClaim"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md - // +optional - RBD *RBDVolumeSource `json:"rbd,omitempty" protobuf:"bytes,11,opt,name=rbd"` - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - Cinder *CinderVolumeSource `json:"cinder,omitempty" protobuf:"bytes,13,opt,name=cinder"` - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,14,opt,name=cephfs"` - // Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,15,opt,name=flocker"` - // DownwardAPI represents downward API about the pod that should populate this volume - // +optional - DownwardAPI *DownwardAPIVolumeSource `json:"downwardAPI,omitempty" protobuf:"bytes,16,opt,name=downwardAPI"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,17,opt,name=fc"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"` - // ConfigMap represents a configMap that should populate this volume - // +optional - ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"` - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,21,opt,name=quobyte"` - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,22,opt,name=azureDisk"` - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,23,opt,name=photonPersistentDisk"` - // Items for all in one resources secrets, configmaps, and downward API - Projected *ProjectedVolumeSource `json:"projected,omitempty" protobuf:"bytes,26,opt,name=projected"` - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,24,opt,name=portworxVolume"` - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,25,opt,name=scaleIO"` - // StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes. - // +optional - StorageOS *StorageOSVolumeSource `json:"storageos,omitempty" protobuf:"bytes,27,opt,name=storageos"` -} - -// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. -// This volume finds the bound PV and mounts that volume for the pod. A -// PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another -// type of volume that is owned by someone else (the system). -type PersistentVolumeClaimVolumeSource struct { - // ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - ClaimName string `json:"claimName" protobuf:"bytes,1,opt,name=claimName"` - // Will force the ReadOnly setting in VolumeMounts. - // Default false. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// PersistentVolumeSource is similar to VolumeSource but meant for the -// administrator who creates PVs. Exactly one of its members must be set. -type PersistentVolumeSource struct { - // GCEPersistentDisk represents a GCE Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - GCEPersistentDisk *GCEPersistentDiskVolumeSource `json:"gcePersistentDisk,omitempty" protobuf:"bytes,1,opt,name=gcePersistentDisk"` - // AWSElasticBlockStore represents an AWS Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // +optional - AWSElasticBlockStore *AWSElasticBlockStoreVolumeSource `json:"awsElasticBlockStore,omitempty" protobuf:"bytes,2,opt,name=awsElasticBlockStore"` - // HostPath represents a directory on the host. - // Provisioned by a developer or tester. - // This is useful for single-node development and testing only! - // On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - // +optional - HostPath *HostPathVolumeSource `json:"hostPath,omitempty" protobuf:"bytes,3,opt,name=hostPath"` - // Glusterfs represents a Glusterfs volume that is attached to a host and - // exposed to the pod. Provisioned by an admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md - // +optional - Glusterfs *GlusterfsVolumeSource `json:"glusterfs,omitempty" protobuf:"bytes,4,opt,name=glusterfs"` - // NFS represents an NFS mount on the host. Provisioned by an admin. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - // +optional - NFS *NFSVolumeSource `json:"nfs,omitempty" protobuf:"bytes,5,opt,name=nfs"` - // RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md - // +optional - RBD *RBDPersistentVolumeSource `json:"rbd,omitempty" protobuf:"bytes,6,opt,name=rbd"` - // ISCSI represents an ISCSI Disk resource that is attached to a - // kubelet's host machine and then exposed to the pod. Provisioned by an admin. - // +optional - ISCSI *ISCSIPersistentVolumeSource `json:"iscsi,omitempty" protobuf:"bytes,7,opt,name=iscsi"` - // Cinder represents a cinder volume attached and mounted on kubelets host machine - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - Cinder *CinderPersistentVolumeSource `json:"cinder,omitempty" protobuf:"bytes,8,opt,name=cinder"` - // CephFS represents a Ceph FS mount on the host that shares a pod's lifetime - // +optional - CephFS *CephFSPersistentVolumeSource `json:"cephfs,omitempty" protobuf:"bytes,9,opt,name=cephfs"` - // FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod. - // +optional - FC *FCVolumeSource `json:"fc,omitempty" protobuf:"bytes,10,opt,name=fc"` - // Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running - // +optional - Flocker *FlockerVolumeSource `json:"flocker,omitempty" protobuf:"bytes,11,opt,name=flocker"` - // FlexVolume represents a generic volume resource that is - // provisioned/attached using an exec based plugin. - // +optional - FlexVolume *FlexPersistentVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"` - // AzureFile represents an Azure File Service mount on the host and bind mount to the pod. - // +optional - AzureFile *AzureFilePersistentVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"` - // VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine - // +optional - VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"` - // Quobyte represents a Quobyte mount on the host that shares a pod's lifetime - // +optional - Quobyte *QuobyteVolumeSource `json:"quobyte,omitempty" protobuf:"bytes,15,opt,name=quobyte"` - // AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. - // +optional - AzureDisk *AzureDiskVolumeSource `json:"azureDisk,omitempty" protobuf:"bytes,16,opt,name=azureDisk"` - // PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine - PhotonPersistentDisk *PhotonPersistentDiskVolumeSource `json:"photonPersistentDisk,omitempty" protobuf:"bytes,17,opt,name=photonPersistentDisk"` - // PortworxVolume represents a portworx volume attached and mounted on kubelets host machine - // +optional - PortworxVolume *PortworxVolumeSource `json:"portworxVolume,omitempty" protobuf:"bytes,18,opt,name=portworxVolume"` - // ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes. - // +optional - ScaleIO *ScaleIOPersistentVolumeSource `json:"scaleIO,omitempty" protobuf:"bytes,19,opt,name=scaleIO"` - // Local represents directly-attached storage with node affinity - // +optional - Local *LocalVolumeSource `json:"local,omitempty" protobuf:"bytes,20,opt,name=local"` - // StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod - // More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md - // +optional - StorageOS *StorageOSPersistentVolumeSource `json:"storageos,omitempty" protobuf:"bytes,21,opt,name=storageos"` - // CSI represents storage that handled by an external CSI driver (Beta feature). - // +optional - CSI *CSIPersistentVolumeSource `json:"csi,omitempty" protobuf:"bytes,22,opt,name=csi"` -} - -const ( - // BetaStorageClassAnnotation represents the beta/previous StorageClass annotation. - // It's currently still used and will be held for backwards compatibility - BetaStorageClassAnnotation = "volume.beta.kubernetes.io/storage-class" - - // MountOptionAnnotation defines mount option annotation used in PVs - MountOptionAnnotation = "volume.beta.kubernetes.io/mount-options" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolume (PV) is a storage resource provisioned by an administrator. -// It is analogous to a node. -// More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes -type PersistentVolume struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines a specification of a persistent volume owned by the cluster. - // Provisioned by an administrator. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes - // +optional - Spec PersistentVolumeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status represents the current information/status for the persistent volume. - // Populated by the system. - // Read-only. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes - // +optional - Status PersistentVolumeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// PersistentVolumeSpec is the specification of a persistent volume. -type PersistentVolumeSpec struct { - // A description of the persistent volume's resources and capacity. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity - // +optional - Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // The actual volume backing the persistent volume. - PersistentVolumeSource `json:",inline" protobuf:"bytes,2,opt,name=persistentVolumeSource"` - // AccessModes contains all ways the volume can be mounted. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes - // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,3,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. - // Expected to be non-nil when bound. - // claim.VolumeName is the authoritative bind between PV and PVC. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding - // +optional - ClaimRef *ObjectReference `json:"claimRef,omitempty" protobuf:"bytes,4,opt,name=claimRef"` - // What happens to a persistent volume when released from its claim. - // Valid options are Retain (default for manually created PersistentVolumes), Delete (default - // for dynamically provisioned PersistentVolumes), and Recycle (deprecated). - // Recycle must be supported by the volume plugin underlying this PersistentVolume. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming - // +optional - PersistentVolumeReclaimPolicy PersistentVolumeReclaimPolicy `json:"persistentVolumeReclaimPolicy,omitempty" protobuf:"bytes,5,opt,name=persistentVolumeReclaimPolicy,casttype=PersistentVolumeReclaimPolicy"` - // Name of StorageClass to which this persistent volume belongs. Empty value - // means that this volume does not belong to any StorageClass. - // +optional - StorageClassName string `json:"storageClassName,omitempty" protobuf:"bytes,6,opt,name=storageClassName"` - // A list of mount options, e.g. ["ro", "soft"]. Not validated - mount will - // simply fail if one is invalid. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options - // +optional - MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,7,opt,name=mountOptions"` - // volumeMode defines if a volume is intended to be used with a formatted filesystem - // or to remain in raw block state. Value of Filesystem is implied when not included in spec. - // This is an alpha feature and may change in the future. - // +optional - VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,8,opt,name=volumeMode,casttype=PersistentVolumeMode"` - // NodeAffinity defines constraints that limit what nodes this volume can be accessed from. - // This field influences the scheduling of pods that use this volume. - // +optional - NodeAffinity *VolumeNodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,9,opt,name=nodeAffinity"` -} - -// VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from. -type VolumeNodeAffinity struct { - // Required specifies hard node constraints that must be met. - Required *NodeSelector `json:"required,omitempty" protobuf:"bytes,1,opt,name=required"` -} - -// PersistentVolumeReclaimPolicy describes a policy for end-of-life maintenance of persistent volumes. -type PersistentVolumeReclaimPolicy string - -const ( - // PersistentVolumeReclaimRecycle means the volume will be recycled back into the pool of unbound persistent volumes on release from its claim. - // The volume plugin must support Recycling. - PersistentVolumeReclaimRecycle PersistentVolumeReclaimPolicy = "Recycle" - // PersistentVolumeReclaimDelete means the volume will be deleted from Kubernetes on release from its claim. - // The volume plugin must support Deletion. - PersistentVolumeReclaimDelete PersistentVolumeReclaimPolicy = "Delete" - // PersistentVolumeReclaimRetain means the volume will be left in its current phase (Released) for manual reclamation by the administrator. - // The default policy is Retain. - PersistentVolumeReclaimRetain PersistentVolumeReclaimPolicy = "Retain" -) - -// PersistentVolumeMode describes how a volume is intended to be consumed, either Block or Filesystem. -type PersistentVolumeMode string - -const ( - // PersistentVolumeBlock means the volume will not be formatted with a filesystem and will remain a raw block device. - PersistentVolumeBlock PersistentVolumeMode = "Block" - // PersistentVolumeFilesystem means the volume will be or is formatted with a filesystem. - PersistentVolumeFilesystem PersistentVolumeMode = "Filesystem" -) - -// PersistentVolumeStatus is the current status of a persistent volume. -type PersistentVolumeStatus struct { - // Phase indicates if a volume is available, bound to a claim, or released by a claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase - // +optional - Phase PersistentVolumePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumePhase"` - // A human-readable message indicating details about why the volume is in this state. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` - // Reason is a brief CamelCase string that describes any failure and is meant - // for machine parsing and tidy display in the CLI. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeList is a list of PersistentVolume items. -type PersistentVolumeList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // List of persistent volumes. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes - Items []PersistentVolume `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaim is a user's request for and claim to a persistent volume -type PersistentVolumeClaim struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired characteristics of a volume requested by a pod author. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - // +optional - Spec PersistentVolumeClaimSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status represents the current information/status of a persistent volume claim. - // Read-only. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - // +optional - Status PersistentVolumeClaimStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PersistentVolumeClaimList is a list of PersistentVolumeClaim items. -type PersistentVolumeClaimList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // A list of persistent volume claims. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims - Items []PersistentVolumeClaim `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// PersistentVolumeClaimSpec describes the common attributes of storage devices -// and allows a Source for provider-specific attributes -type PersistentVolumeClaimSpec struct { - // AccessModes contains the desired access modes the volume should have. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,1,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // A label query over volumes to consider for binding. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,4,opt,name=selector"` - // Resources represents the minimum resources the volume should have. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources - // +optional - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,2,opt,name=resources"` - // VolumeName is the binding reference to the PersistentVolume backing this claim. - // +optional - VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,3,opt,name=volumeName"` - // Name of the StorageClass required by the claim. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1 - // +optional - StorageClassName *string `json:"storageClassName,omitempty" protobuf:"bytes,5,opt,name=storageClassName"` - // volumeMode defines what type of volume is required by the claim. - // Value of Filesystem is implied when not included in claim spec. - // This is an alpha feature and may change in the future. - // +optional - VolumeMode *PersistentVolumeMode `json:"volumeMode,omitempty" protobuf:"bytes,6,opt,name=volumeMode,casttype=PersistentVolumeMode"` - // This field requires the VolumeSnapshotDataSource alpha feature gate to be - // enabled and currently VolumeSnapshot is the only supported data source. - // If the provisioner can support VolumeSnapshot data source, it will create - // a new volume and data will be restored to the volume at the same time. - // If the provisioner does not support VolumeSnapshot data source, volume will - // not be created and the failure will be reported as an event. - // In the future, we plan to support more data source types and the behavior - // of the provisioner may change. - // +optional - DataSource *TypedLocalObjectReference `json:"dataSource" protobuf:"bytes,7,opt,name=dataSource"` -} - -// PersistentVolumeClaimConditionType is a valid value of PersistentVolumeClaimCondition.Type -type PersistentVolumeClaimConditionType string - -const ( - // PersistentVolumeClaimResizing - a user trigger resize of pvc has been started - PersistentVolumeClaimResizing PersistentVolumeClaimConditionType = "Resizing" - // PersistentVolumeClaimFileSystemResizePending - controller resize is finished and a file system resize is pending on node - PersistentVolumeClaimFileSystemResizePending PersistentVolumeClaimConditionType = "FileSystemResizePending" -) - -// PersistentVolumeClaimCondition contails details about state of pvc -type PersistentVolumeClaimCondition struct { - Type PersistentVolumeClaimConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PersistentVolumeClaimConditionType"` - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // Unique, this should be a short, machine understandable string that gives the reason - // for condition's last transition. If it reports "ResizeStarted" that means the underlying - // persistent volume is being resized. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// PersistentVolumeClaimStatus is the current status of a persistent volume claim. -type PersistentVolumeClaimStatus struct { - // Phase represents the current phase of PersistentVolumeClaim. - // +optional - Phase PersistentVolumeClaimPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PersistentVolumeClaimPhase"` - // AccessModes contains the actual access modes the volume backing the PVC has. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 - // +optional - AccessModes []PersistentVolumeAccessMode `json:"accessModes,omitempty" protobuf:"bytes,2,rep,name=accessModes,casttype=PersistentVolumeAccessMode"` - // Represents the actual resources of the underlying volume. - // +optional - Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,3,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // Current Condition of persistent volume claim. If underlying persistent volume is being - // resized then the Condition will be set to 'ResizeStarted'. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []PersistentVolumeClaimCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` -} - -type PersistentVolumeAccessMode string - -const ( - // can be mounted read/write mode to exactly 1 host - ReadWriteOnce PersistentVolumeAccessMode = "ReadWriteOnce" - // can be mounted in read-only mode to many hosts - ReadOnlyMany PersistentVolumeAccessMode = "ReadOnlyMany" - // can be mounted in read/write mode to many hosts - ReadWriteMany PersistentVolumeAccessMode = "ReadWriteMany" -) - -type PersistentVolumePhase string - -const ( - // used for PersistentVolumes that are not available - VolumePending PersistentVolumePhase = "Pending" - // used for PersistentVolumes that are not yet bound - // Available volumes are held by the binder and matched to PersistentVolumeClaims - VolumeAvailable PersistentVolumePhase = "Available" - // used for PersistentVolumes that are bound - VolumeBound PersistentVolumePhase = "Bound" - // used for PersistentVolumes where the bound PersistentVolumeClaim was deleted - // released volumes must be recycled before becoming available again - // this phase is used by the persistent volume claim binder to signal to another process to reclaim the resource - VolumeReleased PersistentVolumePhase = "Released" - // used for PersistentVolumes that failed to be correctly recycled or deleted after being released from a claim - VolumeFailed PersistentVolumePhase = "Failed" -) - -type PersistentVolumeClaimPhase string - -const ( - // used for PersistentVolumeClaims that are not yet bound - ClaimPending PersistentVolumeClaimPhase = "Pending" - // used for PersistentVolumeClaims that are bound - ClaimBound PersistentVolumeClaimPhase = "Bound" - // used for PersistentVolumeClaims that lost their underlying - // PersistentVolume. The claim was bound to a PersistentVolume and this - // volume does not exist any longer and all data on it was lost. - ClaimLost PersistentVolumeClaimPhase = "Lost" -) - -type HostPathType string - -const ( - // For backwards compatible, leave it empty if unset - HostPathUnset HostPathType = "" - // If nothing exists at the given path, an empty directory will be created there - // as needed with file mode 0755, having the same group and ownership with Kubelet. - HostPathDirectoryOrCreate HostPathType = "DirectoryOrCreate" - // A directory must exist at the given path - HostPathDirectory HostPathType = "Directory" - // If nothing exists at the given path, an empty file will be created there - // as needed with file mode 0644, having the same group and ownership with Kubelet. - HostPathFileOrCreate HostPathType = "FileOrCreate" - // A file must exist at the given path - HostPathFile HostPathType = "File" - // A UNIX socket must exist at the given path - HostPathSocket HostPathType = "Socket" - // A character device must exist at the given path - HostPathCharDev HostPathType = "CharDevice" - // A block device must exist at the given path - HostPathBlockDev HostPathType = "BlockDevice" -) - -// Represents a host path mapped into a pod. -// Host path volumes do not support ownership management or SELinux relabeling. -type HostPathVolumeSource struct { - // Path of the directory on the host. - // If the path is a symlink, it will follow the link to the real path. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Type for HostPath Volume - // Defaults to "" - // More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath - // +optional - Type *HostPathType `json:"type,omitempty" protobuf:"bytes,2,opt,name=type"` -} - -// Represents an empty directory for a pod. -// Empty directory volumes support ownership management and SELinux relabeling. -type EmptyDirVolumeSource struct { - // What type of storage medium should back this directory. - // The default is "" which means to use the node's default medium. - // Must be an empty string (default) or Memory. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir - // +optional - Medium StorageMedium `json:"medium,omitempty" protobuf:"bytes,1,opt,name=medium,casttype=StorageMedium"` - // Total amount of local storage required for this EmptyDir volume. - // The size limit is also applicable for memory medium. - // The maximum usage on memory medium EmptyDir would be the minimum value between - // the SizeLimit specified here and the sum of memory limits of all containers in a pod. - // The default is nil which means that the limit is undefined. - // More info: http://kubernetes.io/docs/user-guide/volumes#emptydir - // +optional - SizeLimit *resource.Quantity `json:"sizeLimit,omitempty" protobuf:"bytes,2,opt,name=sizeLimit"` -} - -// Represents a Glusterfs mount that lasts the lifetime of a pod. -// Glusterfs volumes do not support ownership management or SELinux relabeling. -type GlusterfsVolumeSource struct { - // EndpointsName is the endpoint name that details Glusterfs topology. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - EndpointsName string `json:"endpoints" protobuf:"bytes,1,opt,name=endpoints"` - - // Path is the Glusterfs volume path. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDVolumeSource struct { - // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // The rados pool name. - // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` - // The rados user name. - // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` -} - -// Represents a Rados Block Device mount that lasts the lifetime of a pod. -// RBD volumes support ownership management and SELinux relabeling. -type RBDPersistentVolumeSource struct { - // A collection of Ceph monitors. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - CephMonitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // The rados image name. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - RBDImage string `json:"image" protobuf:"bytes,2,opt,name=image"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // The rados pool name. - // Default is rbd. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - RBDPool string `json:"pool,omitempty" protobuf:"bytes,4,opt,name=pool"` - // The rados user name. - // Default is admin. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - RadosUser string `json:"user,omitempty" protobuf:"bytes,5,opt,name=user"` - // Keyring is the path to key ring for RBDUser. - // Default is /etc/ceph/keyring. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - Keyring string `json:"keyring,omitempty" protobuf:"bytes,6,opt,name=keyring"` - // SecretRef is name of the authentication secret for RBDUser. If provided - // overrides keyring. - // Default is nil. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,7,opt,name=secretRef"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,8,opt,name=readOnly"` -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -type CinderVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` -} - -// Represents a cinder volume resource in Openstack. -// A Cinder volume must exist before mounting to a container. -// The volume must also be in the same region as the kubelet. -// Cinder volumes support ownership management and SELinux relabeling. -type CinderPersistentVolumeSource struct { - // volume id used to identify the volume in cinder - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // Optional: points to a secret object containing parameters used to connect - // to OpenStack. - // +optional - SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,4,opt,name=secretRef"` -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` - // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` -} - -// SecretReference represents a Secret Reference. It has enough information to retrieve secret -// in any namespace -type SecretReference struct { - // Name is unique within a namespace to reference a secret resource. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Namespace defines the space within which the secret name must be unique. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` -} - -// Represents a Ceph Filesystem mount that lasts the lifetime of a pod -// Cephfs volumes do not support ownership management or SELinux relabeling. -type CephFSPersistentVolumeSource struct { - // Required: Monitors is a collection of Ceph monitors - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - Monitors []string `json:"monitors" protobuf:"bytes,1,rep,name=monitors"` - // Optional: Used as the mounted root, rather than the full Ceph tree, default is / - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,2,opt,name=path"` - // Optional: User is the rados user name, default is admin - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - User string `json:"user,omitempty" protobuf:"bytes,3,opt,name=user"` - // Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - SecretFile string `json:"secretFile,omitempty" protobuf:"bytes,4,opt,name=secretFile"` - // Optional: SecretRef is reference to the authentication secret for User, default is empty. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` -} - -// Represents a Flocker volume mounted by the Flocker agent. -// One and only one of datasetName and datasetUUID should be set. -// Flocker volumes do not support ownership management or SELinux relabeling. -type FlockerVolumeSource struct { - // Name of the dataset stored as metadata -> name on the dataset for Flocker - // should be considered as deprecated - // +optional - DatasetName string `json:"datasetName,omitempty" protobuf:"bytes,1,opt,name=datasetName"` - // UUID of the dataset. This is unique identifier of a Flocker dataset - // +optional - DatasetUUID string `json:"datasetUUID,omitempty" protobuf:"bytes,2,opt,name=datasetUUID"` -} - -// StorageMedium defines ways that storage can be allocated to a volume. -type StorageMedium string - -const ( - StorageMediumDefault StorageMedium = "" // use whatever the default is for the node, assume anything we don't explicitly handle is this - StorageMediumMemory StorageMedium = "Memory" // use memory (e.g. tmpfs on linux) - StorageMediumHugePages StorageMedium = "HugePages" // use hugepages -) - -// Protocol defines network protocols supported for things like container ports. -type Protocol string - -const ( - // ProtocolTCP is the TCP protocol. - ProtocolTCP Protocol = "TCP" - // ProtocolUDP is the UDP protocol. - ProtocolUDP Protocol = "UDP" - // ProtocolSCTP is the SCTP protocol. - ProtocolSCTP Protocol = "SCTP" -) - -// Represents a Persistent Disk resource in Google Compute Engine. -// -// A GCE PD must exist before mounting to a container. The disk must -// also be in the same GCE project and zone as the kubelet. A GCE PD -// can only be mounted as read/write once or read-only many times. GCE -// PDs support ownership management and SELinux relabeling. -type GCEPersistentDiskVolumeSource struct { - // Unique name of the PD resource in GCE. Used to identify the disk in GCE. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - PDName string `json:"pdName" protobuf:"bytes,1,opt,name=pdName"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` -} - -// Represents a Quobyte mount that lasts the lifetime of a pod. -// Quobyte volumes do not support ownership management or SELinux relabeling. -type QuobyteVolumeSource struct { - // Registry represents a single or multiple Quobyte Registry services - // specified as a string as host:port pair (multiple entries are separated with commas) - // which acts as the central registry for volumes - Registry string `json:"registry" protobuf:"bytes,1,opt,name=registry"` - - // Volume is a string that references an already created Quobyte volume by name. - Volume string `json:"volume" protobuf:"bytes,2,opt,name=volume"` - - // ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. - // Defaults to false. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - - // User to map volume access to - // Defaults to serivceaccount user - // +optional - User string `json:"user,omitempty" protobuf:"bytes,4,opt,name=user"` - - // Group to map volume access to - // Default is no group - // +optional - Group string `json:"group,omitempty" protobuf:"bytes,5,opt,name=group"` -} - -// FlexPersistentVolumeSource represents a generic persistent volume resource that is -// provisioned/attached using an exec based plugin. -type FlexPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: Extra command options if any. - // +optional - Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` -} - -// FlexVolume represents a generic volume resource that is -// provisioned/attached using an exec based plugin. -type FlexVolumeSource struct { - // Driver is the name of the driver to use for this volume. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default filesystem depends on FlexVolume script. - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Optional: SecretRef is reference to the secret object containing - // sensitive information to pass to the plugin scripts. This may be - // empty if no secret object is specified. If the secret object - // contains more than one secret, all secrets are passed to the plugin - // scripts. - // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: Extra command options if any. - // +optional - Options map[string]string `json:"options,omitempty" protobuf:"bytes,5,rep,name=options"` -} - -// Represents a Persistent Disk resource in AWS. -// -// An AWS EBS disk must exist before mounting to a container. The disk -// must also be in the same AWS zone as the kubelet. An AWS EBS disk -// can only be mounted as read/write once. AWS EBS volumes support -// ownership management and SELinux relabeling. -type AWSElasticBlockStoreVolumeSource struct { - // Unique ID of the persistent disk resource in AWS (Amazon EBS volume). - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // The partition in the volume that you want to mount. - // If omitted, the default is to mount by volume name. - // Examples: For volume /dev/sda1, you specify the partition as "1". - // Similarly, the volume partition for /dev/sda is "0" (or you can leave the property empty). - // +optional - Partition int32 `json:"partition,omitempty" protobuf:"varint,3,opt,name=partition"` - // Specify "true" to force and set the ReadOnly property in VolumeMounts to "true". - // If omitted, the default is "false". - // More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` -} - -// Represents a volume that is populated with the contents of a git repository. -// Git repo volumes do not support ownership management. -// Git repo volumes support SELinux relabeling. -// -// DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an -// EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir -// into the Pod's container. -type GitRepoVolumeSource struct { - // Repository URL - Repository string `json:"repository" protobuf:"bytes,1,opt,name=repository"` - // Commit hash for the specified revision. - // +optional - Revision string `json:"revision,omitempty" protobuf:"bytes,2,opt,name=revision"` - // Target directory name. - // Must not contain or start with '..'. If '.' is supplied, the volume directory will be the - // git repository. Otherwise, if specified, the volume will contain the git repository in - // the subdirectory with the given name. - // +optional - Directory string `json:"directory,omitempty" protobuf:"bytes,3,opt,name=directory"` -} - -// Adapts a Secret into a volume. -// -// The contents of the target Secret's Data field will be presented in a volume -// as files using the keys in the Data field as the file names. -// Secret volumes support ownership management and SELinux relabeling. -type SecretVolumeSource struct { - // Name of the secret in the pod's namespace to use. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#secret - // +optional - SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"` - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"bytes,3,opt,name=defaultMode"` - // Specify whether the Secret or it's keys must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` -} - -const ( - SecretVolumeSourceDefaultMode int32 = 0644 -) - -// Adapts a secret into a projected volume. -// -// The contents of the target Secret's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names. -// Note that this is identical to a secret volume source without the default -// mode. -type SecretProjection struct { - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced - // Secret will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the Secret, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the Secret or its key must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` -} - -// Represents an NFS mount that lasts the lifetime of a pod. -// NFS volumes do not support ownership management or SELinux relabeling. -type NFSVolumeSource struct { - // Server is the hostname or IP address of the NFS server. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - Server string `json:"server" protobuf:"bytes,1,opt,name=server"` - - // Path that is exported by the NFS server. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - - // ReadOnly here will force - // the NFS export to be mounted with read-only permissions. - // Defaults to false. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// Represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIVolumeSource struct { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` - // Target iSCSI Qualified Name. - IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI Target Lun number. - Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // iSCSI Interface Name that uses an iSCSI transport. - // Defaults to 'default' (tcp). - // +optional - ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - // +optional - Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` - // whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` - // whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP Secret for iSCSI target and initiator authentication - // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI Initiator Name. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` -} - -// ISCSIPersistentVolumeSource represents an ISCSI disk. -// ISCSI volumes can only be mounted as read/write once. -// ISCSI volumes support ownership management and SELinux relabeling. -type ISCSIPersistentVolumeSource struct { - // iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - TargetPortal string `json:"targetPortal" protobuf:"bytes,1,opt,name=targetPortal"` - // Target iSCSI Qualified Name. - IQN string `json:"iqn" protobuf:"bytes,2,opt,name=iqn"` - // iSCSI Target Lun number. - Lun int32 `json:"lun" protobuf:"varint,3,opt,name=lun"` - // iSCSI Interface Name that uses an iSCSI transport. - // Defaults to 'default' (tcp). - // +optional - ISCSIInterface string `json:"iscsiInterface,omitempty" protobuf:"bytes,4,opt,name=iscsiInterface"` - // Filesystem type of the volume that you want to mount. - // Tip: Ensure that the filesystem type is supported by the host operating system. - // Examples: "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,5,opt,name=fsType"` - // ReadOnly here will force the ReadOnly setting in VolumeMounts. - // Defaults to false. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,6,opt,name=readOnly"` - // iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port - // is other than default (typically TCP ports 860 and 3260). - // +optional - Portals []string `json:"portals,omitempty" protobuf:"bytes,7,opt,name=portals"` - // whether support iSCSI Discovery CHAP authentication - // +optional - DiscoveryCHAPAuth bool `json:"chapAuthDiscovery,omitempty" protobuf:"varint,8,opt,name=chapAuthDiscovery"` - // whether support iSCSI Session CHAP authentication - // +optional - SessionCHAPAuth bool `json:"chapAuthSession,omitempty" protobuf:"varint,11,opt,name=chapAuthSession"` - // CHAP Secret for iSCSI target and initiator authentication - // +optional - SecretRef *SecretReference `json:"secretRef,omitempty" protobuf:"bytes,10,opt,name=secretRef"` - // Custom iSCSI Initiator Name. - // If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface - // : will be created for the connection. - // +optional - InitiatorName *string `json:"initiatorName,omitempty" protobuf:"bytes,12,opt,name=initiatorName"` -} - -// Represents a Fibre Channel volume. -// Fibre Channel volumes can only be mounted as read/write once. -// Fibre Channel volumes support ownership management and SELinux relabeling. -type FCVolumeSource struct { - // Optional: FC target worldwide names (WWNs) - // +optional - TargetWWNs []string `json:"targetWWNs,omitempty" protobuf:"bytes,1,rep,name=targetWWNs"` - // Optional: FC target lun number - // +optional - Lun *int32 `json:"lun,omitempty" protobuf:"varint,2,opt,name=lun"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // TODO: how do we prevent errors in the filesystem from compromising the machine - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Optional: Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // Optional: FC volume world wide identifiers (wwids) - // Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously. - // +optional - WWIDs []string `json:"wwids,omitempty" protobuf:"bytes,5,rep,name=wwids"` -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFileVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` - // Share Name - ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// AzureFile represents an Azure File Service mount on the host and bind mount to the pod. -type AzureFilePersistentVolumeSource struct { - // the name of secret that contains Azure Storage Account Name and Key - SecretName string `json:"secretName" protobuf:"bytes,1,opt,name=secretName"` - // Share Name - ShareName string `json:"shareName" protobuf:"bytes,2,opt,name=shareName"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - // the namespace of the secret that contains Azure Storage Account Name and Key - // default is the same as the Pod - // +optional - SecretNamespace *string `json:"secretNamespace" protobuf:"bytes,4,opt,name=secretNamespace"` -} - -// Represents a vSphere volume resource. -type VsphereVirtualDiskVolumeSource struct { - // Path that identifies vSphere volume vmdk - VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Storage Policy Based Management (SPBM) profile name. - // +optional - StoragePolicyName string `json:"storagePolicyName,omitempty" protobuf:"bytes,3,opt,name=storagePolicyName"` - // Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName. - // +optional - StoragePolicyID string `json:"storagePolicyID,omitempty" protobuf:"bytes,4,opt,name=storagePolicyID"` -} - -// Represents a Photon Controller persistent disk resource. -type PhotonPersistentDiskVolumeSource struct { - // ID that identifies Photon Controller persistent disk - PdID string `json:"pdID" protobuf:"bytes,1,opt,name=pdID"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` -} - -type AzureDataDiskCachingMode string -type AzureDataDiskKind string - -const ( - AzureDataDiskCachingNone AzureDataDiskCachingMode = "None" - AzureDataDiskCachingReadOnly AzureDataDiskCachingMode = "ReadOnly" - AzureDataDiskCachingReadWrite AzureDataDiskCachingMode = "ReadWrite" - - AzureSharedBlobDisk AzureDataDiskKind = "Shared" - AzureDedicatedBlobDisk AzureDataDiskKind = "Dedicated" - AzureManagedDisk AzureDataDiskKind = "Managed" -) - -// AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod. -type AzureDiskVolumeSource struct { - // The Name of the data disk in the blob storage - DiskName string `json:"diskName" protobuf:"bytes,1,opt,name=diskName"` - // The URI the data disk in the blob storage - DataDiskURI string `json:"diskURI" protobuf:"bytes,2,opt,name=diskURI"` - // Host Caching mode: None, Read Only, Read Write. - // +optional - CachingMode *AzureDataDiskCachingMode `json:"cachingMode,omitempty" protobuf:"bytes,3,opt,name=cachingMode,casttype=AzureDataDiskCachingMode"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType *string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly *bool `json:"readOnly,omitempty" protobuf:"varint,5,opt,name=readOnly"` - // Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared - Kind *AzureDataDiskKind `json:"kind,omitempty" protobuf:"bytes,6,opt,name=kind,casttype=AzureDataDiskKind"` -} - -// PortworxVolumeSource represents a Portworx volume resource. -type PortworxVolumeSource struct { - // VolumeID uniquely identifies a Portworx volume - VolumeID string `json:"volumeID" protobuf:"bytes,1,opt,name=volumeID"` - // FSType represents the filesystem type to mount - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs". Implicitly inferred to be "ext4" if unspecified. - FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` -} - -// ScaleIOVolumeSource represents a persistent ScaleIO volume -type ScaleIOVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` - // The name of the storage system as configured in ScaleIO. - System string `json:"system" protobuf:"bytes,2,opt,name=system"` - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *LocalObjectReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs". - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` -} - -// ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume -type ScaleIOPersistentVolumeSource struct { - // The host address of the ScaleIO API Gateway. - Gateway string `json:"gateway" protobuf:"bytes,1,opt,name=gateway"` - // The name of the storage system as configured in ScaleIO. - System string `json:"system" protobuf:"bytes,2,opt,name=system"` - // SecretRef references to the secret for ScaleIO user and other - // sensitive information. If this is not provided, Login operation will fail. - SecretRef *SecretReference `json:"secretRef" protobuf:"bytes,3,opt,name=secretRef"` - // Flag to enable/disable SSL communication with Gateway, default false - // +optional - SSLEnabled bool `json:"sslEnabled,omitempty" protobuf:"varint,4,opt,name=sslEnabled"` - // The name of the ScaleIO Protection Domain for the configured storage. - // +optional - ProtectionDomain string `json:"protectionDomain,omitempty" protobuf:"bytes,5,opt,name=protectionDomain"` - // The ScaleIO Storage Pool associated with the protection domain. - // +optional - StoragePool string `json:"storagePool,omitempty" protobuf:"bytes,6,opt,name=storagePool"` - // Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. - // Default is ThinProvisioned. - // +optional - StorageMode string `json:"storageMode,omitempty" protobuf:"bytes,7,opt,name=storageMode"` - // The name of a volume already created in the ScaleIO system - // that is associated with this volume source. - VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,8,opt,name=volumeName"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // Default is "xfs" - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,9,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,10,opt,name=readOnly"` -} - -// Represents a StorageOS persistent volume resource. -type StorageOSVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *LocalObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` -} - -// Represents a StorageOS persistent volume resource. -type StorageOSPersistentVolumeSource struct { - // VolumeName is the human-readable name of the StorageOS volume. Volume - // names are only unique within a namespace. - VolumeName string `json:"volumeName,omitempty" protobuf:"bytes,1,opt,name=volumeName"` - // VolumeNamespace specifies the scope of the volume within StorageOS. If no - // namespace is specified then the Pod's namespace will be used. This allows the - // Kubernetes name scoping to be mirrored within StorageOS for tighter integration. - // Set VolumeName to any name to override the default behaviour. - // Set to "default" if you are not using namespaces within StorageOS. - // Namespaces that do not pre-exist within StorageOS will be created. - // +optional - VolumeNamespace string `json:"volumeNamespace,omitempty" protobuf:"bytes,2,opt,name=volumeNamespace"` - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified. - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,3,opt,name=fsType"` - // Defaults to false (read/write). ReadOnly here will force - // the ReadOnly setting in VolumeMounts. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,4,opt,name=readOnly"` - // SecretRef specifies the secret to use for obtaining the StorageOS API - // credentials. If not specified, default values will be attempted. - // +optional - SecretRef *ObjectReference `json:"secretRef,omitempty" protobuf:"bytes,5,opt,name=secretRef"` -} - -// Adapts a ConfigMap into a volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// volume as files using the keys in the Data field as the file names, unless -// the items element is populated with specific mappings of keys to paths. -// ConfigMap volumes support ownership management and SELinux relabeling. -type ConfigMapVolumeSource struct { - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,3,opt,name=defaultMode"` - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` -} - -const ( - ConfigMapVolumeSourceDefaultMode int32 = 0644 -) - -// Adapts a ConfigMap into a projected volume. -// -// The contents of the target ConfigMap's Data field will be presented in a -// projected volume as files using the keys in the Data field as the file names, -// unless the items element is populated with specific mappings of keys to paths. -// Note that this is identical to a configmap volume source without the default -// mode. -type ConfigMapProjection struct { - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // If unspecified, each key-value pair in the Data field of the referenced - // ConfigMap will be projected into the volume as a file whose name is the - // key and content is the value. If specified, the listed keys will be - // projected into the specified paths, and unlisted keys will not be - // present. If a key is specified which is not present in the ConfigMap, - // the volume setup will error unless it is marked optional. Paths must be - // relative and may not contain the '..' path or start with '..'. - // +optional - Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"` - // Specify whether the ConfigMap or it's keys must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,4,opt,name=optional"` -} - -// ServiceAccountTokenProjection represents a projected service account token -// volume. This projection can be used to insert a service account token into -// the pods runtime filesystem for use against APIs (Kubernetes API Server or -// otherwise). -type ServiceAccountTokenProjection struct { - // Audience is the intended audience of the token. A recipient of a token - // must identify itself with an identifier specified in the audience of the - // token, and otherwise should reject the token. The audience defaults to the - // identifier of the apiserver. - //+optional - Audience string `json:"audience,omitempty" protobuf:"bytes,1,rep,name=audience"` - // ExpirationSeconds is the requested duration of validity of the service - // account token. As the token approaches expiration, the kubelet volume - // plugin will proactively rotate the service account token. The kubelet will - // start trying to rotate the token if the token is older than 80 percent of - // its time to live or if the token is older than 24 hours.Defaults to 1 hour - // and must be at least 10 minutes. - //+optional - ExpirationSeconds *int64 `json:"expirationSeconds,omitempty" protobuf:"varint,2,opt,name=expirationSeconds"` - // Path is the path relative to the mount point of the file to project the - // token into. - Path string `json:"path" protobuf:"bytes,3,opt,name=path"` -} - -// Represents a projected volume source -type ProjectedVolumeSource struct { - // list of volume projections - Sources []VolumeProjection `json:"sources" protobuf:"bytes,1,rep,name=sources"` - // Mode bits to use on created files by default. Must be a value between - // 0 and 0777. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` -} - -// Projection that may be projected along with other supported volume types -type VolumeProjection struct { - // all types below are the supported types for projection into the same volume - - // information about the secret data to project - // +optional - Secret *SecretProjection `json:"secret,omitempty" protobuf:"bytes,1,opt,name=secret"` - // information about the downwardAPI data to project - // +optional - DownwardAPI *DownwardAPIProjection `json:"downwardAPI,omitempty" protobuf:"bytes,2,opt,name=downwardAPI"` - // information about the configMap data to project - // +optional - ConfigMap *ConfigMapProjection `json:"configMap,omitempty" protobuf:"bytes,3,opt,name=configMap"` - // information about the serviceAccountToken data to project - // +optional - ServiceAccountToken *ServiceAccountTokenProjection `json:"serviceAccountToken,omitempty" protobuf:"bytes,4,opt,name=serviceAccountToken"` -} - -const ( - ProjectedVolumeSourceDefaultMode int32 = 0644 -) - -// Maps a string key to a path within a volume. -type KeyToPath struct { - // The key to project. - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - - // The relative path of the file to map the key to. - // May not be an absolute path. - // May not contain the path element '..'. - // May not start with the string '..'. - Path string `json:"path" protobuf:"bytes,2,opt,name=path"` - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 `json:"mode,omitempty" protobuf:"varint,3,opt,name=mode"` -} - -// Local represents directly-attached storage with node affinity (Beta feature) -type LocalVolumeSource struct { - // The full path to the volume on the node. - // It can be either a directory or block device (disk, partition, ...). - Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - - // Filesystem type to mount. - // It applies only when the Path is a block device. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". The default value is to auto-select a fileystem if unspecified. - // +optional - FSType *string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"` -} - -// Represents storage that is managed by an external CSI volume driver (Beta feature) -type CSIPersistentVolumeSource struct { - // Driver is the name of the driver to use for this volume. - // Required. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` - - // VolumeHandle is the unique volume name returned by the CSI volume - // plugin’s CreateVolume to refer to the volume on all subsequent calls. - // Required. - VolumeHandle string `json:"volumeHandle" protobuf:"bytes,2,opt,name=volumeHandle"` - - // Optional: The value to pass to ControllerPublishVolumeRequest. - // Defaults to false (read/write). - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"` - - // Filesystem type to mount. - // Must be a filesystem type supported by the host operating system. - // Ex. "ext4", "xfs", "ntfs". - // +optional - FSType string `json:"fsType,omitempty" protobuf:"bytes,4,opt,name=fsType"` - - // Attributes of the volume to publish. - // +optional - VolumeAttributes map[string]string `json:"volumeAttributes,omitempty" protobuf:"bytes,5,rep,name=volumeAttributes"` - - // ControllerPublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // ControllerPublishVolume and ControllerUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - ControllerPublishSecretRef *SecretReference `json:"controllerPublishSecretRef,omitempty" protobuf:"bytes,6,opt,name=controllerPublishSecretRef"` - - // NodeStageSecretRef is a reference to the secret object containing sensitive - // information to pass to the CSI driver to complete the CSI NodeStageVolume - // and NodeStageVolume and NodeUnstageVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - NodeStageSecretRef *SecretReference `json:"nodeStageSecretRef,omitempty" protobuf:"bytes,7,opt,name=nodeStageSecretRef"` - - // NodePublishSecretRef is a reference to the secret object containing - // sensitive information to pass to the CSI driver to complete the CSI - // NodePublishVolume and NodeUnpublishVolume calls. - // This field is optional, and may be empty if no secret is required. If the - // secret object contains more than one secret, all secrets are passed. - // +optional - NodePublishSecretRef *SecretReference `json:"nodePublishSecretRef,omitempty" protobuf:"bytes,8,opt,name=nodePublishSecretRef"` -} - -// ContainerPort represents a network port in a single container. -type ContainerPort struct { - // If specified, this must be an IANA_SVC_NAME and unique within the pod. Each - // named port in a pod must have a unique name. Name for the port that can be - // referred to by services. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // Number of port to expose on the host. - // If specified, this must be a valid port number, 0 < x < 65536. - // If HostNetwork is specified, this must match ContainerPort. - // Most containers do not need this. - // +optional - HostPort int32 `json:"hostPort,omitempty" protobuf:"varint,2,opt,name=hostPort"` - // Number of port to expose on the pod's IP address. - // This must be a valid port number, 0 < x < 65536. - ContainerPort int32 `json:"containerPort" protobuf:"varint,3,opt,name=containerPort"` - // Protocol for port. Must be UDP, TCP, or SCTP. - // Defaults to "TCP". - // +optional - Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,4,opt,name=protocol,casttype=Protocol"` - // What host IP to bind the external port to. - // +optional - HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` -} - -// VolumeMount describes a mounting of a Volume within a container. -type VolumeMount struct { - // This must match the Name of a Volume. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Mounted read-only if true, read-write otherwise (false or unspecified). - // Defaults to false. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` - // Path within the container at which the volume should be mounted. Must - // not contain ':'. - MountPath string `json:"mountPath" protobuf:"bytes,3,opt,name=mountPath"` - // Path within the volume from which the container's volume should be mounted. - // Defaults to "" (volume's root). - // +optional - SubPath string `json:"subPath,omitempty" protobuf:"bytes,4,opt,name=subPath"` - // mountPropagation determines how mounts are propagated from the host - // to container and the other way around. - // When not set, MountPropagationNone is used. - // This field is beta in 1.10. - // +optional - MountPropagation *MountPropagationMode `json:"mountPropagation,omitempty" protobuf:"bytes,5,opt,name=mountPropagation,casttype=MountPropagationMode"` -} - -// MountPropagationMode describes mount propagation. -type MountPropagationMode string - -const ( - // MountPropagationNone means that the volume in a container will - // not receive new mounts from the host or other containers, and filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode corresponds to "private" in Linux terminology. - MountPropagationNone MountPropagationMode = "None" - // MountPropagationHostToContainer means that the volume in a container will - // receive new mounts from the host or other containers, but filesystems - // mounted inside the container won't be propagated to the host or other - // containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rslave" in Linux terminology). - MountPropagationHostToContainer MountPropagationMode = "HostToContainer" - // MountPropagationBidirectional means that the volume in a container will - // receive new mounts from the host or other containers, and its own mounts - // will be propagated from the container to the host or other containers. - // Note that this mode is recursively applied to all mounts in the volume - // ("rshared" in Linux terminology). - MountPropagationBidirectional MountPropagationMode = "Bidirectional" -) - -// volumeDevice describes a mapping of a raw block device within a container. -type VolumeDevice struct { - // name must match the name of a persistentVolumeClaim in the pod - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // devicePath is the path inside of the container that the device will be mapped to. - DevicePath string `json:"devicePath" protobuf:"bytes,2,opt,name=devicePath"` -} - -// EnvVar represents an environment variable present in a Container. -type EnvVar struct { - // Name of the environment variable. Must be a C_IDENTIFIER. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - - // Optional: no more than one of the following may be specified. - - // Variable references $(VAR_NAME) are expanded - // using the previous defined environment variables in the container and - // any service environment variables. If a variable cannot be resolved, - // the reference in the input string will be unchanged. The $(VAR_NAME) - // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped - // references will never be expanded, regardless of whether the variable - // exists or not. - // Defaults to "". - // +optional - Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` - // Source for the environment variable's value. Cannot be used if value is not empty. - // +optional - ValueFrom *EnvVarSource `json:"valueFrom,omitempty" protobuf:"bytes,3,opt,name=valueFrom"` -} - -// EnvVarSource represents a source for the value of an EnvVar. -type EnvVarSource struct { - // Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, - // spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP. - // +optional - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,1,opt,name=fieldRef"` - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,2,opt,name=resourceFieldRef"` - // Selects a key of a ConfigMap. - // +optional - ConfigMapKeyRef *ConfigMapKeySelector `json:"configMapKeyRef,omitempty" protobuf:"bytes,3,opt,name=configMapKeyRef"` - // Selects a key of a secret in the pod's namespace - // +optional - SecretKeyRef *SecretKeySelector `json:"secretKeyRef,omitempty" protobuf:"bytes,4,opt,name=secretKeyRef"` -} - -// ObjectFieldSelector selects an APIVersioned field of an object. -type ObjectFieldSelector struct { - // Version of the schema the FieldPath is written in terms of, defaults to "v1". - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,1,opt,name=apiVersion"` - // Path of the field to select in the specified API version. - FieldPath string `json:"fieldPath" protobuf:"bytes,2,opt,name=fieldPath"` -} - -// ResourceFieldSelector represents container resources (cpu, memory) and their output format -type ResourceFieldSelector struct { - // Container name: required for volumes, optional for env vars - // +optional - ContainerName string `json:"containerName,omitempty" protobuf:"bytes,1,opt,name=containerName"` - // Required: resource to select - Resource string `json:"resource" protobuf:"bytes,2,opt,name=resource"` - // Specifies the output format of the exposed resources, defaults to "1" - // +optional - Divisor resource.Quantity `json:"divisor,omitempty" protobuf:"bytes,3,opt,name=divisor"` -} - -// Selects a key from a ConfigMap. -type ConfigMapKeySelector struct { - // The ConfigMap to select from. - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // The key to select. - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the ConfigMap or it's key must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` -} - -// SecretKeySelector selects a key of a Secret. -type SecretKeySelector struct { - // The name of the secret in the pod's namespace to select from. - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // The key of the secret to select from. Must be a valid secret key. - Key string `json:"key" protobuf:"bytes,2,opt,name=key"` - // Specify whether the Secret or it's key must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,3,opt,name=optional"` -} - -// EnvFromSource represents the source of a set of ConfigMaps -type EnvFromSource struct { - // An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER. - // +optional - Prefix string `json:"prefix,omitempty" protobuf:"bytes,1,opt,name=prefix"` - // The ConfigMap to select from - // +optional - ConfigMapRef *ConfigMapEnvSource `json:"configMapRef,omitempty" protobuf:"bytes,2,opt,name=configMapRef"` - // The Secret to select from - // +optional - SecretRef *SecretEnvSource `json:"secretRef,omitempty" protobuf:"bytes,3,opt,name=secretRef"` -} - -// ConfigMapEnvSource selects a ConfigMap to populate the environment -// variables with. -// -// The contents of the target ConfigMap's Data field will represent the -// key-value pairs as environment variables. -type ConfigMapEnvSource struct { - // The ConfigMap to select from. - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // Specify whether the ConfigMap must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` -} - -// SecretEnvSource selects a Secret to populate the environment -// variables with. -// -// The contents of the target Secret's Data field will represent the -// key-value pairs as environment variables. -type SecretEnvSource struct { - // The Secret to select from. - LocalObjectReference `json:",inline" protobuf:"bytes,1,opt,name=localObjectReference"` - // Specify whether the Secret must be defined - // +optional - Optional *bool `json:"optional,omitempty" protobuf:"varint,2,opt,name=optional"` -} - -// HTTPHeader describes a custom header to be used in HTTP probes -type HTTPHeader struct { - // The header field name - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The header field value - Value string `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -// HTTPGetAction describes an action based on HTTP Get requests. -type HTTPGetAction struct { - // Path to access on the HTTP server. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - // Name or number of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - Port intstr.IntOrString `json:"port" protobuf:"bytes,2,opt,name=port"` - // Host name to connect to, defaults to the pod IP. You probably want to set - // "Host" in httpHeaders instead. - // +optional - Host string `json:"host,omitempty" protobuf:"bytes,3,opt,name=host"` - // Scheme to use for connecting to the host. - // Defaults to HTTP. - // +optional - Scheme URIScheme `json:"scheme,omitempty" protobuf:"bytes,4,opt,name=scheme,casttype=URIScheme"` - // Custom headers to set in the request. HTTP allows repeated headers. - // +optional - HTTPHeaders []HTTPHeader `json:"httpHeaders,omitempty" protobuf:"bytes,5,rep,name=httpHeaders"` -} - -// URIScheme identifies the scheme used for connection to a host for Get actions -type URIScheme string - -const ( - // URISchemeHTTP means that the scheme used will be http:// - URISchemeHTTP URIScheme = "HTTP" - // URISchemeHTTPS means that the scheme used will be https:// - URISchemeHTTPS URIScheme = "HTTPS" -) - -// TCPSocketAction describes an action based on opening a socket -type TCPSocketAction struct { - // Number or name of the port to access on the container. - // Number must be in the range 1 to 65535. - // Name must be an IANA_SVC_NAME. - Port intstr.IntOrString `json:"port" protobuf:"bytes,1,opt,name=port"` - // Optional: Host name to connect to, defaults to the pod IP. - // +optional - Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` -} - -// ExecAction describes a "run in container" action. -type ExecAction struct { - // Command is the command line to execute inside the container, the working directory for the - // command is root ('/') in the container's filesystem. The command is simply exec'd, it is - // not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use - // a shell, you need to explicitly call out to that shell. - // Exit status of 0 is treated as live/healthy and non-zero is unhealthy. - // +optional - Command []string `json:"command,omitempty" protobuf:"bytes,1,rep,name=command"` -} - -// Probe describes a health check to be performed against a container to determine whether it is -// alive or ready to receive traffic. -type Probe struct { - // The action taken to determine the health of a container - Handler `json:",inline" protobuf:"bytes,1,opt,name=handler"` - // Number of seconds after the container has started before liveness probes are initiated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - InitialDelaySeconds int32 `json:"initialDelaySeconds,omitempty" protobuf:"varint,2,opt,name=initialDelaySeconds"` - // Number of seconds after which the probe times out. - // Defaults to 1 second. Minimum value is 1. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - TimeoutSeconds int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,3,opt,name=timeoutSeconds"` - // How often (in seconds) to perform the probe. - // Default to 10 seconds. Minimum value is 1. - // +optional - PeriodSeconds int32 `json:"periodSeconds,omitempty" protobuf:"varint,4,opt,name=periodSeconds"` - // Minimum consecutive successes for the probe to be considered successful after having failed. - // Defaults to 1. Must be 1 for liveness. Minimum value is 1. - // +optional - SuccessThreshold int32 `json:"successThreshold,omitempty" protobuf:"varint,5,opt,name=successThreshold"` - // Minimum consecutive failures for the probe to be considered failed after having succeeded. - // Defaults to 3. Minimum value is 1. - // +optional - FailureThreshold int32 `json:"failureThreshold,omitempty" protobuf:"varint,6,opt,name=failureThreshold"` -} - -// PullPolicy describes a policy for if/when to pull a container image -type PullPolicy string - -const ( - // PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails. - PullAlways PullPolicy = "Always" - // PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present - PullNever PullPolicy = "Never" - // PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails. - PullIfNotPresent PullPolicy = "IfNotPresent" -) - -// TerminationMessagePolicy describes how termination messages are retrieved from a container. -type TerminationMessagePolicy string - -const ( - // TerminationMessageReadFile is the default behavior and will set the container status message to - // the contents of the container's terminationMessagePath when the container exits. - TerminationMessageReadFile TerminationMessagePolicy = "File" - // TerminationMessageFallbackToLogsOnError will read the most recent contents of the container logs - // for the container status message when the container exits with an error and the - // terminationMessagePath has no contents. - TerminationMessageFallbackToLogsOnError TerminationMessagePolicy = "FallbackToLogsOnError" -) - -// Capability represent POSIX capabilities type -type Capability string - -// Adds and removes POSIX capabilities from running containers. -type Capabilities struct { - // Added capabilities - // +optional - Add []Capability `json:"add,omitempty" protobuf:"bytes,1,rep,name=add,casttype=Capability"` - // Removed capabilities - // +optional - Drop []Capability `json:"drop,omitempty" protobuf:"bytes,2,rep,name=drop,casttype=Capability"` -} - -// ResourceRequirements describes the compute resource requirements. -type ResourceRequirements struct { - // Limits describes the maximum amount of compute resources allowed. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - Limits ResourceList `json:"limits,omitempty" protobuf:"bytes,1,rep,name=limits,casttype=ResourceList,castkey=ResourceName"` - // Requests describes the minimum amount of compute resources required. - // If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, - // otherwise to an implementation-defined value. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - Requests ResourceList `json:"requests,omitempty" protobuf:"bytes,2,rep,name=requests,casttype=ResourceList,castkey=ResourceName"` -} - -const ( - // TerminationMessagePathDefault means the default path to capture the application termination message running in a container - TerminationMessagePathDefault string = "/dev/termination-log" -) - -// A single application container that you want to run within a pod. -type Container struct { - // Name of the container specified as a DNS_LABEL. - // Each container in a pod must have a unique name (DNS_LABEL). - // Cannot be updated. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Docker image name. - // More info: https://kubernetes.io/docs/concepts/containers/images - // This field is optional to allow higher level config management to default or override - // container images in workload controllers like Deployments and StatefulSets. - // +optional - Image string `json:"image,omitempty" protobuf:"bytes,2,opt,name=image"` - // Entrypoint array. Not executed within a shell. - // The docker image's ENTRYPOINT is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - Command []string `json:"command,omitempty" protobuf:"bytes,3,rep,name=command"` - // Arguments to the entrypoint. - // The docker image's CMD is used if this is not provided. - // Variable references $(VAR_NAME) are expanded using the container's environment. If a variable - // cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax - // can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, - // regardless of whether the variable exists or not. - // Cannot be updated. - // More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell - // +optional - Args []string `json:"args,omitempty" protobuf:"bytes,4,rep,name=args"` - // Container's working directory. - // If not specified, the container runtime's default will be used, which - // might be configured in the container image. - // Cannot be updated. - // +optional - WorkingDir string `json:"workingDir,omitempty" protobuf:"bytes,5,opt,name=workingDir"` - // List of ports to expose from the container. Exposing a port here gives - // the system additional information about the network connections a - // container uses, but is primarily informational. Not specifying a port here - // DOES NOT prevent that port from being exposed. Any port which is - // listening on the default "0.0.0.0" address inside a container will be - // accessible from the network. - // Cannot be updated. - // +optional - // +patchMergeKey=containerPort - // +patchStrategy=merge - Ports []ContainerPort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"containerPort" protobuf:"bytes,6,rep,name=ports"` - // List of sources to populate environment variables in the container. - // The keys defined within a source must be a C_IDENTIFIER. All invalid keys - // will be reported as an event when the container is starting. When a key exists in multiple - // sources, the value associated with the last source will take precedence. - // Values defined by an Env with a duplicate key will take precedence. - // Cannot be updated. - // +optional - EnvFrom []EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,19,rep,name=envFrom"` - // List of environment variables to set in the container. - // Cannot be updated. - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Env []EnvVar `json:"env,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,7,rep,name=env"` - // Compute Resources required by this container. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - // +optional - Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"` - // Pod volumes to mount into the container's filesystem. - // Cannot be updated. - // +optional - // +patchMergeKey=mountPath - // +patchStrategy=merge - VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"mountPath" protobuf:"bytes,9,rep,name=volumeMounts"` - // volumeDevices is the list of block devices to be used by the container. - // This is an alpha feature and may change in the future. - // +patchMergeKey=devicePath - // +patchStrategy=merge - // +optional - VolumeDevices []VolumeDevice `json:"volumeDevices,omitempty" patchStrategy:"merge" patchMergeKey:"devicePath" protobuf:"bytes,21,rep,name=volumeDevices"` - // Periodic probe of container liveness. - // Container will be restarted if the probe fails. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - LivenessProbe *Probe `json:"livenessProbe,omitempty" protobuf:"bytes,10,opt,name=livenessProbe"` - // Periodic probe of container service readiness. - // Container will be removed from service endpoints if the probe fails. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - // +optional - ReadinessProbe *Probe `json:"readinessProbe,omitempty" protobuf:"bytes,11,opt,name=readinessProbe"` - // Actions that the management system should take in response to container lifecycle events. - // Cannot be updated. - // +optional - Lifecycle *Lifecycle `json:"lifecycle,omitempty" protobuf:"bytes,12,opt,name=lifecycle"` - // Optional: Path at which the file to which the container's termination message - // will be written is mounted into the container's filesystem. - // Message written is intended to be brief final status, such as an assertion failure message. - // Will be truncated by the node if greater than 4096 bytes. The total message length across - // all containers will be limited to 12kb. - // Defaults to /dev/termination-log. - // Cannot be updated. - // +optional - TerminationMessagePath string `json:"terminationMessagePath,omitempty" protobuf:"bytes,13,opt,name=terminationMessagePath"` - // Indicate how the termination message should be populated. File will use the contents of - // terminationMessagePath to populate the container status message on both success and failure. - // FallbackToLogsOnError will use the last chunk of container log output if the termination - // message file is empty and the container exited with an error. - // The log output is limited to 2048 bytes or 80 lines, whichever is smaller. - // Defaults to File. - // Cannot be updated. - // +optional - TerminationMessagePolicy TerminationMessagePolicy `json:"terminationMessagePolicy,omitempty" protobuf:"bytes,20,opt,name=terminationMessagePolicy,casttype=TerminationMessagePolicy"` - // Image pull policy. - // One of Always, Never, IfNotPresent. - // Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - // +optional - ImagePullPolicy PullPolicy `json:"imagePullPolicy,omitempty" protobuf:"bytes,14,opt,name=imagePullPolicy,casttype=PullPolicy"` - // Security options the pod should run with. - // More info: https://kubernetes.io/docs/concepts/policy/security-context/ - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SecurityContext *SecurityContext `json:"securityContext,omitempty" protobuf:"bytes,15,opt,name=securityContext"` - - // Variables for interactive containers, these have very specialized use-cases (e.g. debugging) - // and shouldn't be used for general purpose containers. - - // Whether this container should allocate a buffer for stdin in the container runtime. If this - // is not set, reads from stdin in the container will always result in EOF. - // Default is false. - // +optional - Stdin bool `json:"stdin,omitempty" protobuf:"varint,16,opt,name=stdin"` - // Whether the container runtime should close the stdin channel after it has been opened by - // a single attach. When stdin is true the stdin stream will remain open across multiple attach - // sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the - // first client attaches to stdin, and then remains open and accepts data until the client disconnects, - // at which time stdin is closed and remains closed until the container is restarted. If this - // flag is false, a container processes that reads from stdin will never receive an EOF. - // Default is false - // +optional - StdinOnce bool `json:"stdinOnce,omitempty" protobuf:"varint,17,opt,name=stdinOnce"` - // Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. - // Default is false. - // +optional - TTY bool `json:"tty,omitempty" protobuf:"varint,18,opt,name=tty"` -} - -// Handler defines a specific action that should be taken -// TODO: pass structured data to these actions, and document that data here. -type Handler struct { - // One and only one of the following should be specified. - // Exec specifies the action to take. - // +optional - Exec *ExecAction `json:"exec,omitempty" protobuf:"bytes,1,opt,name=exec"` - // HTTPGet specifies the http request to perform. - // +optional - HTTPGet *HTTPGetAction `json:"httpGet,omitempty" protobuf:"bytes,2,opt,name=httpGet"` - // TCPSocket specifies an action involving a TCP port. - // TCP hooks not yet supported - // TODO: implement a realistic TCP lifecycle hook - // +optional - TCPSocket *TCPSocketAction `json:"tcpSocket,omitempty" protobuf:"bytes,3,opt,name=tcpSocket"` -} - -// Lifecycle describes actions that the management system should take in response to container lifecycle -// events. For the PostStart and PreStop lifecycle handlers, management of the container blocks -// until the action is complete, unless the container process fails, in which case the handler is aborted. -type Lifecycle struct { - // PostStart is called immediately after a container is created. If the handler fails, - // the container is terminated and restarted according to its restart policy. - // Other management of the container blocks until the hook completes. - // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - // +optional - PostStart *Handler `json:"postStart,omitempty" protobuf:"bytes,1,opt,name=postStart"` - // PreStop is called immediately before a container is terminated. - // The container is terminated after the handler completes. - // The reason for termination is passed to the handler. - // Regardless of the outcome of the handler, the container is eventually terminated. - // Other management of the container blocks until the hook completes. - // More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks - // +optional - PreStop *Handler `json:"preStop,omitempty" protobuf:"bytes,2,opt,name=preStop"` -} - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// ContainerStateWaiting is a waiting state of a container. -type ContainerStateWaiting struct { - // (brief) reason the container is not yet running. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,1,opt,name=reason"` - // Message regarding why the container is not yet running. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} - -// ContainerStateRunning is a running state of a container. -type ContainerStateRunning struct { - // Time at which the container was last (re-)started - // +optional - StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,1,opt,name=startedAt"` -} - -// ContainerStateTerminated is a terminated state of a container. -type ContainerStateTerminated struct { - // Exit status from the last termination of the container - ExitCode int32 `json:"exitCode" protobuf:"varint,1,opt,name=exitCode"` - // Signal from the last termination of the container - // +optional - Signal int32 `json:"signal,omitempty" protobuf:"varint,2,opt,name=signal"` - // (brief) reason from the last termination of the container - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - // Message regarding the last termination of the container - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` - // Time at which previous execution of the container started - // +optional - StartedAt metav1.Time `json:"startedAt,omitempty" protobuf:"bytes,5,opt,name=startedAt"` - // Time at which the container last terminated - // +optional - FinishedAt metav1.Time `json:"finishedAt,omitempty" protobuf:"bytes,6,opt,name=finishedAt"` - // Container's ID in the format 'docker://' - // +optional - ContainerID string `json:"containerID,omitempty" protobuf:"bytes,7,opt,name=containerID"` -} - -// ContainerState holds a possible state of container. -// Only one of its members may be specified. -// If none of them is specified, the default one is ContainerStateWaiting. -type ContainerState struct { - // Details about a waiting container - // +optional - Waiting *ContainerStateWaiting `json:"waiting,omitempty" protobuf:"bytes,1,opt,name=waiting"` - // Details about a running container - // +optional - Running *ContainerStateRunning `json:"running,omitempty" protobuf:"bytes,2,opt,name=running"` - // Details about a terminated container - // +optional - Terminated *ContainerStateTerminated `json:"terminated,omitempty" protobuf:"bytes,3,opt,name=terminated"` -} - -// ContainerStatus contains details for the current status of this container. -type ContainerStatus struct { - // This must be a DNS_LABEL. Each container in a pod must have a unique name. - // Cannot be updated. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Details about the container's current condition. - // +optional - State ContainerState `json:"state,omitempty" protobuf:"bytes,2,opt,name=state"` - // Details about the container's last termination condition. - // +optional - LastTerminationState ContainerState `json:"lastState,omitempty" protobuf:"bytes,3,opt,name=lastState"` - // Specifies whether the container has passed its readiness probe. - Ready bool `json:"ready" protobuf:"varint,4,opt,name=ready"` - // The number of times the container has been restarted, currently based on - // the number of dead containers that have not yet been removed. - // Note that this is calculated from dead containers. But those containers are subject to - // garbage collection. This value will get capped at 5 by GC. - RestartCount int32 `json:"restartCount" protobuf:"varint,5,opt,name=restartCount"` - // The image the container is running. - // More info: https://kubernetes.io/docs/concepts/containers/images - // TODO(dchen1107): Which image the container is running with? - Image string `json:"image" protobuf:"bytes,6,opt,name=image"` - // ImageID of the container's image. - ImageID string `json:"imageID" protobuf:"bytes,7,opt,name=imageID"` - // Container's ID in the format 'docker://'. - // +optional - ContainerID string `json:"containerID,omitempty" protobuf:"bytes,8,opt,name=containerID"` -} - -// PodPhase is a label for the condition of a pod at the current time. -type PodPhase string - -// These are the valid statuses of pods. -const ( - // PodPending means the pod has been accepted by the system, but one or more of the containers - // has not been started. This includes time before being bound to a node, as well as time spent - // pulling images onto the host. - PodPending PodPhase = "Pending" - // PodRunning means the pod has been bound to a node and all of the containers have been started. - // At least one container is still running or is in the process of being restarted. - PodRunning PodPhase = "Running" - // PodSucceeded means that all containers in the pod have voluntarily terminated - // with a container exit code of 0, and the system is not going to restart any of these containers. - PodSucceeded PodPhase = "Succeeded" - // PodFailed means that all containers in the pod have terminated, and at least one container has - // terminated in a failure (exited with a non-zero exit code or was stopped by the system). - PodFailed PodPhase = "Failed" - // PodUnknown means that for some reason the state of the pod could not be obtained, typically due - // to an error in communicating with the host of the pod. - PodUnknown PodPhase = "Unknown" -) - -// PodConditionType is a valid value for PodCondition.Type -type PodConditionType string - -// These are valid conditions of pod. -const ( - // PodScheduled represents status of the scheduling process for this pod. - PodScheduled PodConditionType = "PodScheduled" - // PodReady means the pod is able to service requests and should be added to the - // load balancing pools of all matching services. - PodReady PodConditionType = "Ready" - // PodInitialized means that all init containers in the pod have started successfully. - PodInitialized PodConditionType = "Initialized" - // PodReasonUnschedulable reason in PodScheduled PodCondition means that the scheduler - // can't schedule the pod right now, for example due to insufficient resources in the cluster. - PodReasonUnschedulable = "Unschedulable" - // ContainersReady indicates whether all containers in the pod are ready. - ContainersReady PodConditionType = "ContainersReady" -) - -// PodCondition contains details for the current condition of this pod. -type PodCondition struct { - // Type is the type of the condition. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - Type PodConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=PodConditionType"` - // Status is the status of the condition. - // Can be True, False, Unknown. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we probed the condition. - // +optional - LastProbeTime metav1.Time `json:"lastProbeTime,omitempty" protobuf:"bytes,3,opt,name=lastProbeTime"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // Unique, one-word, CamelCase reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human-readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -// RestartPolicy describes how the container should be restarted. -// Only one of the following restart policies may be specified. -// If none of the following policies is specified, the default one -// is RestartPolicyAlways. -type RestartPolicy string - -const ( - RestartPolicyAlways RestartPolicy = "Always" - RestartPolicyOnFailure RestartPolicy = "OnFailure" - RestartPolicyNever RestartPolicy = "Never" -) - -// DNSPolicy defines how a pod's DNS will be configured. -type DNSPolicy string - -const ( - // DNSClusterFirstWithHostNet indicates that the pod should use cluster DNS - // first, if it is available, then fall back on the default - // (as determined by kubelet) DNS settings. - DNSClusterFirstWithHostNet DNSPolicy = "ClusterFirstWithHostNet" - - // DNSClusterFirst indicates that the pod should use cluster DNS - // first unless hostNetwork is true, if it is available, then - // fall back on the default (as determined by kubelet) DNS settings. - DNSClusterFirst DNSPolicy = "ClusterFirst" - - // DNSDefault indicates that the pod should use the default (as - // determined by kubelet) DNS settings. - DNSDefault DNSPolicy = "Default" - - // DNSNone indicates that the pod should use empty DNS settings. DNS - // parameters such as nameservers and search paths should be defined via - // DNSConfig. - DNSNone DNSPolicy = "None" -) - -const ( - // DefaultTerminationGracePeriodSeconds indicates the default duration in - // seconds a pod needs to terminate gracefully. - DefaultTerminationGracePeriodSeconds = 30 -) - -// A node selector represents the union of the results of one or more label queries -// over a set of nodes; that is, it represents the OR of the selectors represented -// by the node selector terms. -type NodeSelector struct { - //Required. A list of node selector terms. The terms are ORed. - NodeSelectorTerms []NodeSelectorTerm `json:"nodeSelectorTerms" protobuf:"bytes,1,rep,name=nodeSelectorTerms"` -} - -// A null or empty node selector term matches no objects. The requirements of -// them are ANDed. -// The TopologySelectorTerm type implements a subset of the NodeSelectorTerm. -type NodeSelectorTerm struct { - // A list of node selector requirements by node's labels. - // +optional - MatchExpressions []NodeSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` - // A list of node selector requirements by node's fields. - // +optional - MatchFields []NodeSelectorRequirement `json:"matchFields,omitempty" protobuf:"bytes,2,rep,name=matchFields"` -} - -// A node selector requirement is a selector that contains values, a key, and an operator -// that relates the key and values. -type NodeSelectorRequirement struct { - // The label key that the selector applies to. - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // Represents a key's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt. - Operator NodeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=NodeSelectorOperator"` - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. If the operator is Gt or Lt, the values - // array must have a single element, which will be interpreted as an integer. - // This array is replaced during a strategic merge patch. - // +optional - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A node selector operator is the set of operators that can be used in -// a node selector requirement. -type NodeSelectorOperator string - -const ( - NodeSelectorOpIn NodeSelectorOperator = "In" - NodeSelectorOpNotIn NodeSelectorOperator = "NotIn" - NodeSelectorOpExists NodeSelectorOperator = "Exists" - NodeSelectorOpDoesNotExist NodeSelectorOperator = "DoesNotExist" - NodeSelectorOpGt NodeSelectorOperator = "Gt" - NodeSelectorOpLt NodeSelectorOperator = "Lt" -) - -// A topology selector term represents the result of label queries. -// A null or empty topology selector term matches no objects. -// The requirements of them are ANDed. -// It provides a subset of functionality as NodeSelectorTerm. -// This is an alpha feature and may change in the future. -type TopologySelectorTerm struct { - // A list of topology selector requirements by labels. - // +optional - MatchLabelExpressions []TopologySelectorLabelRequirement `json:"matchLabelExpressions,omitempty" protobuf:"bytes,1,rep,name=matchLabelExpressions"` -} - -// A topology selector requirement is a selector that matches given label. -// This is an alpha feature and may change in the future. -type TopologySelectorLabelRequirement struct { - // The label key that the selector applies to. - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // An array of string values. One value must match the label to be selected. - // Each entry in Values is ORed. - Values []string `json:"values" protobuf:"bytes,2,rep,name=values"` -} - -// Affinity is a group of affinity scheduling rules. -type Affinity struct { - // Describes node affinity scheduling rules for the pod. - // +optional - NodeAffinity *NodeAffinity `json:"nodeAffinity,omitempty" protobuf:"bytes,1,opt,name=nodeAffinity"` - // Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAffinity *PodAffinity `json:"podAffinity,omitempty" protobuf:"bytes,2,opt,name=podAffinity"` - // Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). - // +optional - PodAntiAffinity *PodAntiAffinity `json:"podAntiAffinity,omitempty" protobuf:"bytes,3,opt,name=podAntiAffinity"` -} - -// Pod affinity is a group of inter pod affinity scheduling rules. -type PodAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` -} - -// Pod anti affinity is a group of inter pod anti affinity scheduling rules. -type PodAntiAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system will try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - - // If the anti-affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the anti-affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to a pod label update), the - // system may or may not try to eventually evict the pod from its node. - // When there are multiple elements, the lists of nodes corresponding to each - // podAffinityTerm are intersected, i.e. all terms must be satisfied. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution []PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,rep,name=requiredDuringSchedulingIgnoredDuringExecution"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the anti-affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling anti-affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []WeightedPodAffinityTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` -} - -// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) -type WeightedPodAffinityTerm struct { - // weight associated with matching the corresponding podAffinityTerm, - // in the range 1-100. - Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` - // Required. A pod affinity term, associated with the corresponding weight. - PodAffinityTerm PodAffinityTerm `json:"podAffinityTerm" protobuf:"bytes,2,opt,name=podAffinityTerm"` -} - -// Defines a set of pods (namely those matching the labelSelector -// relative to the given namespace(s)) that this pod should be -// co-located (affinity) or not co-located (anti-affinity) with, -// where co-located is defined as running on a node whose value of -// the label with key matches that of any node on which -// a pod of the set of pods is running -type PodAffinityTerm struct { - // A label query over a set of resources, in this case pods. - // +optional - LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty" protobuf:"bytes,1,opt,name=labelSelector"` - // namespaces specifies which namespaces the labelSelector applies to (matches against); - // null or empty list means "this pod's namespace" - // +optional - Namespaces []string `json:"namespaces,omitempty" protobuf:"bytes,2,rep,name=namespaces"` - // This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching - // the labelSelector in the specified namespaces, where co-located is defined as running on a node - // whose value of the label with key topologyKey matches that of any node on which any of the - // selected pods is running. - // Empty topologyKey is not allowed. - TopologyKey string `json:"topologyKey" protobuf:"bytes,3,opt,name=topologyKey"` -} - -// Node affinity is a group of node affinity scheduling rules. -type NodeAffinity struct { - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // will try to eventually evict the pod from its node. - // +optional - // RequiredDuringSchedulingRequiredDuringExecution *NodeSelector `json:"requiredDuringSchedulingRequiredDuringExecution,omitempty"` - - // If the affinity requirements specified by this field are not met at - // scheduling time, the pod will not be scheduled onto the node. - // If the affinity requirements specified by this field cease to be met - // at some point during pod execution (e.g. due to an update), the system - // may or may not try to eventually evict the pod from its node. - // +optional - RequiredDuringSchedulingIgnoredDuringExecution *NodeSelector `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,1,opt,name=requiredDuringSchedulingIgnoredDuringExecution"` - // The scheduler will prefer to schedule pods to nodes that satisfy - // the affinity expressions specified by this field, but it may choose - // a node that violates one or more of the expressions. The node that is - // most preferred is the one with the greatest sum of weights, i.e. - // for each node that meets all of the scheduling requirements (resource - // request, requiredDuringScheduling affinity expressions, etc.), - // compute a sum by iterating through the elements of this field and adding - // "weight" to the sum if the node matches the corresponding matchExpressions; the - // node(s) with the highest sum are the most preferred. - // +optional - PreferredDuringSchedulingIgnoredDuringExecution []PreferredSchedulingTerm `json:"preferredDuringSchedulingIgnoredDuringExecution,omitempty" protobuf:"bytes,2,rep,name=preferredDuringSchedulingIgnoredDuringExecution"` -} - -// An empty preferred scheduling term matches all objects with implicit weight 0 -// (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). -type PreferredSchedulingTerm struct { - // Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100. - Weight int32 `json:"weight" protobuf:"varint,1,opt,name=weight"` - // A node selector term, associated with the corresponding weight. - Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"` -} - -// The node this Taint is attached to has the "effect" on -// any pod that does not tolerate the Taint. -type Taint struct { - // Required. The taint key to be applied to a node. - Key string `json:"key" protobuf:"bytes,1,opt,name=key"` - // Required. The taint value corresponding to the taint key. - // +optional - Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` - // Required. The effect of the taint on pods - // that do not tolerate the taint. - // Valid effects are NoSchedule, PreferNoSchedule and NoExecute. - Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"` - // TimeAdded represents the time at which the taint was added. - // It is only written for NoExecute taints. - // +optional - TimeAdded *metav1.Time `json:"timeAdded,omitempty" protobuf:"bytes,4,opt,name=timeAdded"` -} - -type TaintEffect string - -const ( - // Do not allow new pods to schedule onto the node unless they tolerate the taint, - // but allow all pods submitted to Kubelet without going through the scheduler - // to start, and allow all already-running pods to continue running. - // Enforced by the scheduler. - TaintEffectNoSchedule TaintEffect = "NoSchedule" - // Like TaintEffectNoSchedule, but the scheduler tries not to schedule - // new pods onto the node, rather than prohibiting new pods from scheduling - // onto the node entirely. Enforced by the scheduler. - TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule" - // NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented. - // Like TaintEffectNoSchedule, but additionally do not allow pods submitted to - // Kubelet without going through the scheduler to start. - // Enforced by Kubelet and the scheduler. - // TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit" - - // Evict any already-running pods that do not tolerate the taint. - // Currently enforced by NodeController. - TaintEffectNoExecute TaintEffect = "NoExecute" -) - -// The pod this Toleration is attached to tolerates any taint that matches -// the triple using the matching operator . -type Toleration struct { - // Key is the taint key that the toleration applies to. Empty means match all taint keys. - // If the key is empty, operator must be Exists; this combination means to match all values and all keys. - // +optional - Key string `json:"key,omitempty" protobuf:"bytes,1,opt,name=key"` - // Operator represents a key's relationship to the value. - // Valid operators are Exists and Equal. Defaults to Equal. - // Exists is equivalent to wildcard for value, so that a pod can - // tolerate all taints of a particular category. - // +optional - Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"` - // Value is the taint value the toleration matches to. - // If the operator is Exists, the value should be empty, otherwise just a regular string. - // +optional - Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"` - // Effect indicates the taint effect to match. Empty means match all taint effects. - // When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. - // +optional - Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"` - // TolerationSeconds represents the period of time the toleration (which must be - // of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, - // it is not set, which means tolerate the taint forever (do not evict). Zero and - // negative values will be treated as 0 (evict immediately) by the system. - // +optional - TolerationSeconds *int64 `json:"tolerationSeconds,omitempty" protobuf:"varint,5,opt,name=tolerationSeconds"` -} - -// A toleration operator is the set of operators that can be used in a toleration. -type TolerationOperator string - -const ( - TolerationOpExists TolerationOperator = "Exists" - TolerationOpEqual TolerationOperator = "Equal" -) - -// PodReadinessGate contains the reference to a pod condition -type PodReadinessGate struct { - // ConditionType refers to a condition in the pod's condition list with matching type. - ConditionType PodConditionType `json:"conditionType" protobuf:"bytes,1,opt,name=conditionType,casttype=PodConditionType"` -} - -// PodSpec is a description of a pod. -type PodSpec struct { - // List of volumes that can be mounted by containers belonging to the pod. - // More info: https://kubernetes.io/docs/concepts/storage/volumes - // +optional - // +patchMergeKey=name - // +patchStrategy=merge,retainKeys - Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge,retainKeys" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"` - // List of initialization containers belonging to the pod. - // Init containers are executed in order prior to containers being started. If any - // init container fails, the pod is considered to have failed and is handled according - // to its restartPolicy. The name for an init container or normal container must be - // unique among all containers. - // Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. - // The resourceRequirements of an init container are taken into account during scheduling - // by finding the highest request/limit for each resource type, and then using the max of - // of that value or the sum of the normal containers. Limits are applied to init containers - // in a similar fashion. - // Init containers cannot currently be added or removed. - // Cannot be updated. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ - // +patchMergeKey=name - // +patchStrategy=merge - InitContainers []Container `json:"initContainers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,20,rep,name=initContainers"` - // List of containers belonging to the pod. - // Containers cannot currently be added or removed. - // There must be at least one container in a Pod. - // Cannot be updated. - // +patchMergeKey=name - // +patchStrategy=merge - Containers []Container `json:"containers" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=containers"` - // Restart policy for all containers within the pod. - // One of Always, OnFailure, Never. - // Default to Always. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy - // +optional - RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" protobuf:"bytes,3,opt,name=restartPolicy,casttype=RestartPolicy"` - // Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. - // Value must be non-negative integer. The value zero indicates delete immediately. - // If this value is nil, the default grace period will be used instead. - // The grace period is the duration in seconds after the processes running in the pod are sent - // a termination signal and the time when the processes are forcibly halted with a kill signal. - // Set this value longer than the expected cleanup time for your process. - // Defaults to 30 seconds. - // +optional - TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty" protobuf:"varint,4,opt,name=terminationGracePeriodSeconds"` - // Optional duration in seconds the pod may be active on the node relative to - // StartTime before the system will actively try to mark it failed and kill associated containers. - // Value must be a positive integer. - // +optional - ActiveDeadlineSeconds *int64 `json:"activeDeadlineSeconds,omitempty" protobuf:"varint,5,opt,name=activeDeadlineSeconds"` - // Set DNS policy for the pod. - // Defaults to "ClusterFirst". - // Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. - // DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. - // To have DNS options set along with hostNetwork, you have to specify DNS policy - // explicitly to 'ClusterFirstWithHostNet'. - // +optional - DNSPolicy DNSPolicy `json:"dnsPolicy,omitempty" protobuf:"bytes,6,opt,name=dnsPolicy,casttype=DNSPolicy"` - // NodeSelector is a selector which must be true for the pod to fit on a node. - // Selector which must match a node's labels for the pod to be scheduled on that node. - // More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - // +optional - NodeSelector map[string]string `json:"nodeSelector,omitempty" protobuf:"bytes,7,rep,name=nodeSelector"` - - // ServiceAccountName is the name of the ServiceAccount to use to run this pod. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - // +optional - ServiceAccountName string `json:"serviceAccountName,omitempty" protobuf:"bytes,8,opt,name=serviceAccountName"` - // DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. - // Deprecated: Use serviceAccountName instead. - // +k8s:conversion-gen=false - // +optional - DeprecatedServiceAccount string `json:"serviceAccount,omitempty" protobuf:"bytes,9,opt,name=serviceAccount"` - // AutomountServiceAccountToken indicates whether a service account token should be automatically mounted. - // +optional - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,21,opt,name=automountServiceAccountToken"` - - // NodeName is a request to schedule this pod onto a specific node. If it is non-empty, - // the scheduler simply schedules this pod onto that node, assuming that it fits resource - // requirements. - // +optional - NodeName string `json:"nodeName,omitempty" protobuf:"bytes,10,opt,name=nodeName"` - // Host networking requested for this pod. Use the host's network namespace. - // If this option is set, the ports that will be used must be specified. - // Default to false. - // +k8s:conversion-gen=false - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,11,opt,name=hostNetwork"` - // Use the host's pid namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,12,opt,name=hostPID"` - // Use the host's ipc namespace. - // Optional: Default to false. - // +k8s:conversion-gen=false - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,13,opt,name=hostIPC"` - // Share a single process namespace between all of the containers in a pod. - // When this is set containers will be able to view and signal processes from other containers - // in the same pod, and the first process in each container will not be assigned PID 1. - // HostPID and ShareProcessNamespace cannot both be set. - // Optional: Default to false. - // This field is beta-level and may be disabled with the PodShareProcessNamespace feature. - // +k8s:conversion-gen=false - // +optional - ShareProcessNamespace *bool `json:"shareProcessNamespace,omitempty" protobuf:"varint,27,opt,name=shareProcessNamespace"` - // SecurityContext holds pod-level security attributes and common container settings. - // Optional: Defaults to empty. See type description for default values of each field. - // +optional - SecurityContext *PodSecurityContext `json:"securityContext,omitempty" protobuf:"bytes,14,opt,name=securityContext"` - // ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - // If specified, these secrets will be passed to individual puller implementations for them to use. For example, - // in the case of docker, only DockerConfig type secrets are honored. - // More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,15,rep,name=imagePullSecrets"` - // Specifies the hostname of the Pod - // If not specified, the pod's hostname will be set to a system-defined value. - // +optional - Hostname string `json:"hostname,omitempty" protobuf:"bytes,16,opt,name=hostname"` - // If specified, the fully qualified Pod hostname will be "...svc.". - // If not specified, the pod will not have a domainname at all. - // +optional - Subdomain string `json:"subdomain,omitempty" protobuf:"bytes,17,opt,name=subdomain"` - // If specified, the pod's scheduling constraints - // +optional - Affinity *Affinity `json:"affinity,omitempty" protobuf:"bytes,18,opt,name=affinity"` - // If specified, the pod will be dispatched by specified scheduler. - // If not specified, the pod will be dispatched by default scheduler. - // +optional - SchedulerName string `json:"schedulerName,omitempty" protobuf:"bytes,19,opt,name=schedulerName"` - // If specified, the pod's tolerations. - // +optional - Tolerations []Toleration `json:"tolerations,omitempty" protobuf:"bytes,22,opt,name=tolerations"` - // HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts - // file if specified. This is only valid for non-hostNetwork pods. - // +optional - // +patchMergeKey=ip - // +patchStrategy=merge - HostAliases []HostAlias `json:"hostAliases,omitempty" patchStrategy:"merge" patchMergeKey:"ip" protobuf:"bytes,23,rep,name=hostAliases"` - // If specified, indicates the pod's priority. "system-node-critical" and - // "system-cluster-critical" are two special keywords which indicate the - // highest priorities with the former being the highest priority. Any other - // name must be defined by creating a PriorityClass object with that name. - // If not specified, the pod priority will be default or zero if there is no - // default. - // +optional - PriorityClassName string `json:"priorityClassName,omitempty" protobuf:"bytes,24,opt,name=priorityClassName"` - // The priority value. Various system components use this field to find the - // priority of the pod. When Priority Admission Controller is enabled, it - // prevents users from setting this field. The admission controller populates - // this field from PriorityClassName. - // The higher the value, the higher the priority. - // +optional - Priority *int32 `json:"priority,omitempty" protobuf:"bytes,25,opt,name=priority"` - // Specifies the DNS parameters of a pod. - // Parameters specified here will be merged to the generated DNS - // configuration based on DNSPolicy. - // +optional - DNSConfig *PodDNSConfig `json:"dnsConfig,omitempty" protobuf:"bytes,26,opt,name=dnsConfig"` - - // If specified, all readiness gates will be evaluated for pod readiness. - // A pod is ready when all its containers are ready AND - // all conditions specified in the readiness gates have status equal to "True" - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md - // +optional - ReadinessGates []PodReadinessGate `json:"readinessGates,omitempty" protobuf:"bytes,28,opt,name=readinessGates"` - // RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used - // to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. - // If unset or empty, the "legacy" RuntimeClass will be used, which is an implicit class with an - // empty definition that uses the default runtime handler. - // More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md - // This is an alpha feature and may change in the future. - // +optional - RuntimeClassName *string `json:"runtimeClassName,omitempty" protobuf:"bytes,29,opt,name=runtimeClassName"` -} - -// HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the -// pod's hosts file. -type HostAlias struct { - // IP address of the host file entry. - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - // Hostnames for the above IP address. - Hostnames []string `json:"hostnames,omitempty" protobuf:"bytes,2,rep,name=hostnames"` -} - -// PodSecurityContext holds pod-level security attributes and common container settings. -// Some fields are also present in container.securityContext. Field values of -// container.securityContext take precedence over field values of PodSecurityContext. -type PodSecurityContext struct { - // The SELinux context to be applied to all containers. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in SecurityContext. If set in - // both SecurityContext and PodSecurityContext, the value specified in SecurityContext - // takes precedence for that container. - // +optional - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,1,opt,name=seLinuxOptions"` - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,2,opt,name=runAsUser"` - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence - // for that container. - // +optional - RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,6,opt,name=runAsGroup"` - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in SecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,3,opt,name=runAsNonRoot"` - // A list of groups applied to the first process run in each container, in addition - // to the container's primary GID. If unspecified, no groups will be added to - // any container. - // +optional - SupplementalGroups []int64 `json:"supplementalGroups,omitempty" protobuf:"varint,4,rep,name=supplementalGroups"` - // A special supplemental group that applies to all containers in a pod. - // Some volume types allow the Kubelet to change the ownership of that volume - // to be owned by the pod: - // - // 1. The owning GID will be the FSGroup - // 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) - // 3. The permission bits are OR'd with rw-rw---- - // - // If unset, the Kubelet will not modify the ownership and permissions of any volume. - // +optional - FSGroup *int64 `json:"fsGroup,omitempty" protobuf:"varint,5,opt,name=fsGroup"` - // Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported - // sysctls (by the container runtime) might fail to launch. - // +optional - Sysctls []Sysctl `json:"sysctls,omitempty" protobuf:"bytes,7,rep,name=sysctls"` -} - -// PodQOSClass defines the supported qos classes of Pods. -type PodQOSClass string - -const ( - // PodQOSGuaranteed is the Guaranteed qos class. - PodQOSGuaranteed PodQOSClass = "Guaranteed" - // PodQOSBurstable is the Burstable qos class. - PodQOSBurstable PodQOSClass = "Burstable" - // PodQOSBestEffort is the BestEffort qos class. - PodQOSBestEffort PodQOSClass = "BestEffort" -) - -// PodDNSConfig defines the DNS parameters of a pod in addition to -// those generated from DNSPolicy. -type PodDNSConfig struct { - // A list of DNS name server IP addresses. - // This will be appended to the base nameservers generated from DNSPolicy. - // Duplicated nameservers will be removed. - // +optional - Nameservers []string `json:"nameservers,omitempty" protobuf:"bytes,1,rep,name=nameservers"` - // A list of DNS search domains for host-name lookup. - // This will be appended to the base search paths generated from DNSPolicy. - // Duplicated search paths will be removed. - // +optional - Searches []string `json:"searches,omitempty" protobuf:"bytes,2,rep,name=searches"` - // A list of DNS resolver options. - // This will be merged with the base options generated from DNSPolicy. - // Duplicated entries will be removed. Resolution options given in Options - // will override those that appear in the base DNSPolicy. - // +optional - Options []PodDNSConfigOption `json:"options,omitempty" protobuf:"bytes,3,rep,name=options"` -} - -// PodDNSConfigOption defines DNS resolver options of a pod. -type PodDNSConfigOption struct { - // Required. - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - // +optional - Value *string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"` -} - -// PodStatus represents information about the status of a pod. Status may trail the actual -// state of a system, especially if the node that hosts the pod cannot contact the control -// plane. -type PodStatus struct { - // The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. - // The conditions array, the reason and message fields, and the individual container status - // arrays contain more detail about the pod's status. - // There are five possible phase values: - // - // Pending: The pod has been accepted by the Kubernetes system, but one or more of the - // container images has not been created. This includes time before being scheduled as - // well as time spent downloading images over the network, which could take a while. - // Running: The pod has been bound to a node, and all of the containers have been created. - // At least one container is still running, or is in the process of starting or restarting. - // Succeeded: All containers in the pod have terminated in success, and will not be restarted. - // Failed: All containers in the pod have terminated, and at least one container has - // terminated in failure. The container either exited with non-zero status or was terminated - // by the system. - // Unknown: For some reason the state of the pod could not be obtained, typically due to an - // error in communicating with the host of the pod. - // - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase - // +optional - Phase PodPhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=PodPhase"` - // Current service state of pod. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []PodCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` - // A human readable message indicating details about why the pod is in this condition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // A brief CamelCase message indicating details about why the pod is in this state. - // e.g. 'Evicted' - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be - // scheduled right away as preemption victims receive their graceful termination periods. - // This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide - // to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to - // give the resources on this node to a higher priority pod that is created after preemption. - // As a result, this field may be different than PodSpec.nodeName when the pod is - // scheduled. - // +optional - NominatedNodeName string `json:"nominatedNodeName,omitempty" protobuf:"bytes,11,opt,name=nominatedNodeName"` - - // IP address of the host to which the pod is assigned. Empty if not yet scheduled. - // +optional - HostIP string `json:"hostIP,omitempty" protobuf:"bytes,5,opt,name=hostIP"` - // IP address allocated to the pod. Routable at least within the cluster. - // Empty if not yet allocated. - // +optional - PodIP string `json:"podIP,omitempty" protobuf:"bytes,6,opt,name=podIP"` - - // RFC 3339 date and time at which the object was acknowledged by the Kubelet. - // This is before the Kubelet pulled the container image(s) for the pod. - // +optional - StartTime *metav1.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"` - - // The list has one entry per init container in the manifest. The most recent successful - // init container will have ready = true, the most recently started container will have - // startTime set. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status - InitContainerStatuses []ContainerStatus `json:"initContainerStatuses,omitempty" protobuf:"bytes,10,rep,name=initContainerStatuses"` - - // The list has one entry per container in the manifest. Each entry is currently the output - // of `docker inspect`. - // More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status - // +optional - ContainerStatuses []ContainerStatus `json:"containerStatuses,omitempty" protobuf:"bytes,8,rep,name=containerStatuses"` - // The Quality of Service (QOS) classification assigned to the pod based on resource requirements - // See PodQOSClass type for available QOS classes - // More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md - // +optional - QOSClass PodQOSClass `json:"qosClass,omitempty" protobuf:"bytes,9,rep,name=qosClass"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded -type PodStatusResult struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status PodStatus `json:"status,omitempty" protobuf:"bytes,2,opt,name=status"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Pod is a collection of containers that can run on a host. This resource is created -// by clients and scheduled onto hosts. -type Pod struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the pod. - // This data may not be up to date. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status PodStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodList is a list of Pods. -type PodList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pods. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md - Items []Pod `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// PodTemplateSpec describes the data a pod should have when created from a template -type PodTemplateSpec struct { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the pod. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec PodSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplate describes a template for creating copies of a predefined pod. -type PodTemplate struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Template defines the pods that will be created from this pod template. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Template PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,2,opt,name=template"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodTemplateList is a list of PodTemplates. -type PodTemplateList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of pod templates - Items []PodTemplate `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ReplicationControllerSpec is the specification of a replication controller. -type ReplicationControllerSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // Selector is a label query over pods that should match the Replicas count. - // If Selector is empty, it is defaulted to the labels present on the Pod template. - // Label keys and values that must match in order to be controlled by this replication - // controller, if empty defaulted to labels on Pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // TemplateRef is a reference to an object that describes the pod that will be created if - // insufficient replicas are detected. - // Reference to an object that describes the pod that will be created if insufficient replicas are detected. - // +optional - // TemplateRef *ObjectReference `json:"templateRef,omitempty"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. This takes precedence over a TemplateRef. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - Template *PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` -} - -// ReplicationControllerStatus represents the current status of a replication -// controller. -type ReplicationControllerStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replication controller. - // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - - // The number of ready replicas for this replication controller. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - - // The number of available replicas (ready for at least minReadySeconds) for this replication controller. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` - - // ObservedGeneration reflects the generation of the most recently observed replication controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` - - // Represents the latest available observations of a replication controller's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []ReplicationControllerCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` -} - -type ReplicationControllerConditionType string - -// These are valid conditions of a replication controller. -const ( - // ReplicationControllerReplicaFailure is added in a replication controller when one of its pods - // fails to be created due to insufficient quota, limit ranges, pod security policy, node selectors, - // etc. or deleted due to kubelet being down or finalizers are failing. - ReplicationControllerReplicaFailure ReplicationControllerConditionType = "ReplicaFailure" -) - -// ReplicationControllerCondition describes the state of a replication controller at a certain point. -type ReplicationControllerCondition struct { - // Type of replication controller condition. - Type ReplicationControllerConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicationControllerConditionType"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/extensions/v1beta1.Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/extensions/v1beta1.Scale,result=k8s.io/api/extensions/v1beta1.Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationController represents the configuration of a replication controller. -type ReplicationController struct { - metav1.TypeMeta `json:",inline"` - - // If the Labels of a ReplicationController are empty, they are defaulted to - // be the same as the Pod(s) that the replication controller manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the replication controller. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ReplicationControllerSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the replication controller. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ReplicationControllerStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicationControllerList is a collection of replication controllers. -type ReplicationControllerList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of replication controllers. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - Items []ReplicationController `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Session Affinity Type string -type ServiceAffinity string - -const ( - // ServiceAffinityClientIP is the Client IP based. - ServiceAffinityClientIP ServiceAffinity = "ClientIP" - - // ServiceAffinityNone - no session affinity. - ServiceAffinityNone ServiceAffinity = "None" -) - -const DefaultClientIPServiceAffinitySeconds int32 = 10800 - -// SessionAffinityConfig represents the configurations of session affinity. -type SessionAffinityConfig struct { - // clientIP contains the configurations of Client IP based session affinity. - // +optional - ClientIP *ClientIPConfig `json:"clientIP,omitempty" protobuf:"bytes,1,opt,name=clientIP"` -} - -// ClientIPConfig represents the configurations of Client IP based session affinity. -type ClientIPConfig struct { - // timeoutSeconds specifies the seconds of ClientIP type session sticky time. - // The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP". - // Default value is 10800(for 3 hours). - // +optional - TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,1,opt,name=timeoutSeconds"` -} - -// Service Type string describes ingress methods for a service -type ServiceType string - -const ( - // ServiceTypeClusterIP means a service will only be accessible inside the - // cluster, via the cluster IP. - ServiceTypeClusterIP ServiceType = "ClusterIP" - - // ServiceTypeNodePort means a service will be exposed on one port of - // every node, in addition to 'ClusterIP' type. - ServiceTypeNodePort ServiceType = "NodePort" - - // ServiceTypeLoadBalancer means a service will be exposed via an - // external load balancer (if the cloud provider supports it), in addition - // to 'NodePort' type. - ServiceTypeLoadBalancer ServiceType = "LoadBalancer" - - // ServiceTypeExternalName means a service consists of only a reference to - // an external name that kubedns or equivalent will return as a CNAME - // record, with no exposing or proxying of any pods involved. - ServiceTypeExternalName ServiceType = "ExternalName" -) - -// Service External Traffic Policy Type string -type ServiceExternalTrafficPolicyType string - -const ( - // ServiceExternalTrafficPolicyTypeLocal specifies node-local endpoints behavior. - ServiceExternalTrafficPolicyTypeLocal ServiceExternalTrafficPolicyType = "Local" - // ServiceExternalTrafficPolicyTypeCluster specifies node-global (legacy) behavior. - ServiceExternalTrafficPolicyTypeCluster ServiceExternalTrafficPolicyType = "Cluster" -) - -// ServiceStatus represents the current status of a service. -type ServiceStatus struct { - // LoadBalancer contains the current status of the load-balancer, - // if one is present. - // +optional - LoadBalancer LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` -} - -// LoadBalancerStatus represents the status of a load-balancer. -type LoadBalancerStatus struct { - // Ingress is a list containing ingress points for the load-balancer. - // Traffic intended for the service should be sent to these ingress points. - // +optional - Ingress []LoadBalancerIngress `json:"ingress,omitempty" protobuf:"bytes,1,rep,name=ingress"` -} - -// LoadBalancerIngress represents the status of a load-balancer ingress point: -// traffic intended for the service should be sent to an ingress point. -type LoadBalancerIngress struct { - // IP is set for load-balancer ingress points that are IP based - // (typically GCE or OpenStack load-balancers) - // +optional - IP string `json:"ip,omitempty" protobuf:"bytes,1,opt,name=ip"` - - // Hostname is set for load-balancer ingress points that are DNS based - // (typically AWS load-balancers) - // +optional - Hostname string `json:"hostname,omitempty" protobuf:"bytes,2,opt,name=hostname"` -} - -// ServiceSpec describes the attributes that a user creates on a service. -type ServiceSpec struct { - // The list of ports that are exposed by this service. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +patchMergeKey=port - // +patchStrategy=merge - Ports []ServicePort `json:"ports,omitempty" patchStrategy:"merge" patchMergeKey:"port" protobuf:"bytes,1,rep,name=ports"` - - // Route service traffic to pods with label keys and values matching this - // selector. If empty or not present, the service is assumed to have an - // external process managing its endpoints, which Kubernetes will not - // modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. - // Ignored if type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/ - // +optional - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // clusterIP is the IP address of the service and is usually assigned - // randomly by the master. If an address is specified manually and is not in - // use by others, it will be allocated to the service; otherwise, creation - // of the service will fail. This field can not be changed through updates. - // Valid values are "None", empty string (""), or a valid IP address. "None" - // can be specified for headless services when proxying is not required. - // Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if - // type is ExternalName. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - ClusterIP string `json:"clusterIP,omitempty" protobuf:"bytes,3,opt,name=clusterIP"` - - // type determines how the Service is exposed. Defaults to ClusterIP. Valid - // options are ExternalName, ClusterIP, NodePort, and LoadBalancer. - // "ExternalName" maps to the specified externalName. - // "ClusterIP" allocates a cluster-internal IP address for load-balancing to - // endpoints. Endpoints are determined by the selector or if that is not - // specified, by manual construction of an Endpoints object. If clusterIP is - // "None", no virtual IP is allocated and the endpoints are published as a - // set of endpoints rather than a stable IP. - // "NodePort" builds on ClusterIP and allocates a port on every node which - // routes to the clusterIP. - // "LoadBalancer" builds on NodePort and creates an - // external load-balancer (if supported in the current cloud) which routes - // to the clusterIP. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services---service-types - // +optional - Type ServiceType `json:"type,omitempty" protobuf:"bytes,4,opt,name=type,casttype=ServiceType"` - - // externalIPs is a list of IP addresses for which nodes in the cluster - // will also accept traffic for this service. These IPs are not managed by - // Kubernetes. The user is responsible for ensuring that traffic arrives - // at a node with this IP. A common example is external load-balancers - // that are not part of the Kubernetes system. - // +optional - ExternalIPs []string `json:"externalIPs,omitempty" protobuf:"bytes,5,rep,name=externalIPs"` - - // Supports "ClientIP" and "None". Used to maintain session affinity. - // Enable client IP based session affinity. - // Must be ClientIP or None. - // Defaults to None. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - // +optional - SessionAffinity ServiceAffinity `json:"sessionAffinity,omitempty" protobuf:"bytes,7,opt,name=sessionAffinity,casttype=ServiceAffinity"` - - // Only applies to Service Type: LoadBalancer - // LoadBalancer will get created with the IP specified in this field. - // This feature depends on whether the underlying cloud-provider supports specifying - // the loadBalancerIP when a load balancer is created. - // This field will be ignored if the cloud-provider does not support the feature. - // +optional - LoadBalancerIP string `json:"loadBalancerIP,omitempty" protobuf:"bytes,8,opt,name=loadBalancerIP"` - - // If specified and supported by the platform, this will restrict traffic through the cloud-provider - // load-balancer will be restricted to the specified client IPs. This field will be ignored if the - // cloud-provider does not support the feature." - // More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/ - // +optional - LoadBalancerSourceRanges []string `json:"loadBalancerSourceRanges,omitempty" protobuf:"bytes,9,opt,name=loadBalancerSourceRanges"` - - // externalName is the external reference that kubedns or equivalent will - // return as a CNAME record for this service. No proxying will be involved. - // Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) - // and requires Type to be ExternalName. - // +optional - ExternalName string `json:"externalName,omitempty" protobuf:"bytes,10,opt,name=externalName"` - - // externalTrafficPolicy denotes if this Service desires to route external - // traffic to node-local or cluster-wide endpoints. "Local" preserves the - // client source IP and avoids a second hop for LoadBalancer and Nodeport - // type services, but risks potentially imbalanced traffic spreading. - // "Cluster" obscures the client source IP and may cause a second hop to - // another node, but should have good overall load-spreading. - // +optional - ExternalTrafficPolicy ServiceExternalTrafficPolicyType `json:"externalTrafficPolicy,omitempty" protobuf:"bytes,11,opt,name=externalTrafficPolicy"` - - // healthCheckNodePort specifies the healthcheck nodePort for the service. - // If not specified, HealthCheckNodePort is created by the service api - // backend with the allocated nodePort. Will use user-specified nodePort value - // if specified by the client. Only effects when Type is set to LoadBalancer - // and ExternalTrafficPolicy is set to Local. - // +optional - HealthCheckNodePort int32 `json:"healthCheckNodePort,omitempty" protobuf:"bytes,12,opt,name=healthCheckNodePort"` - - // publishNotReadyAddresses, when set to true, indicates that DNS implementations - // must publish the notReadyAddresses of subsets for the Endpoints associated with - // the Service. The default value is false. - // The primary use case for setting this field is to use a StatefulSet's Headless Service - // to propagate SRV records for its Pods without respect to their readiness for purpose - // of peer discovery. - // +optional - PublishNotReadyAddresses bool `json:"publishNotReadyAddresses,omitempty" protobuf:"varint,13,opt,name=publishNotReadyAddresses"` - // sessionAffinityConfig contains the configurations of session affinity. - // +optional - SessionAffinityConfig *SessionAffinityConfig `json:"sessionAffinityConfig,omitempty" protobuf:"bytes,14,opt,name=sessionAffinityConfig"` -} - -// ServicePort contains information on service's port. -type ServicePort struct { - // The name of this port within the service. This must be a DNS_LABEL. - // All ports within a ServiceSpec must have unique names. This maps to - // the 'Name' field in EndpointPort objects. - // Optional if only one ServicePort is defined on this service. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // The IP protocol for this port. Supports "TCP", "UDP", and "SCTP". - // Default is TCP. - // +optional - Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,2,opt,name=protocol,casttype=Protocol"` - - // The port that will be exposed by this service. - Port int32 `json:"port" protobuf:"varint,3,opt,name=port"` - - // Number or name of the port to access on the pods targeted by the service. - // Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. - // If this is a string, it will be looked up as a named port in the - // target Pod's container ports. If this is not specified, the value - // of the 'port' field is used (an identity map). - // This field is ignored for services with clusterIP=None, and should be - // omitted or set equal to the 'port' field. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service - // +optional - TargetPort intstr.IntOrString `json:"targetPort,omitempty" protobuf:"bytes,4,opt,name=targetPort"` - - // The port on each node on which this service is exposed when type=NodePort or LoadBalancer. - // Usually assigned by the system. If specified, it will be allocated to the service - // if unused or else creation of the service will fail. - // Default is to auto-allocate a port if the ServiceType of this Service requires one. - // More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - // +optional - NodePort int32 `json:"nodePort,omitempty" protobuf:"varint,5,opt,name=nodePort"` -} - -// +genclient -// +genclient:skipVerbs=deleteCollection -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Service is a named abstraction of software service (for example, mysql) consisting of local port -// (for example 3306) that the proxy listens on, and the selector that determines which pods -// will answer requests sent through the proxy. -type Service struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of a service. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ServiceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the service. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ServiceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -const ( - // ClusterIPNone - do not assign a cluster IP - // no proxying required and no environment variables should be created for pods - ClusterIPNone = "None" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceList holds a list of services. -type ServiceList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of services - Items []Service `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccount binds together: -// * a name, understood by users, and perhaps by peripheral systems, for an identity -// * a principal that can be authenticated and authorized -// * a set of secrets -type ServiceAccount struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. - // More info: https://kubernetes.io/docs/concepts/configuration/secret - // +optional - // +patchMergeKey=name - // +patchStrategy=merge - Secrets []ObjectReference `json:"secrets,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=secrets"` - - // ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images - // in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets - // can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. - // More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - // +optional - ImagePullSecrets []LocalObjectReference `json:"imagePullSecrets,omitempty" protobuf:"bytes,3,rep,name=imagePullSecrets"` - - // AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. - // Can be overridden at the pod level. - // +optional - AutomountServiceAccountToken *bool `json:"automountServiceAccountToken,omitempty" protobuf:"varint,4,opt,name=automountServiceAccountToken"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceAccountList is a list of ServiceAccount objects -type ServiceAccountList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ServiceAccounts. - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ - Items []ServiceAccount `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Endpoints is a collection of endpoints that implement the actual service. Example: -// Name: "mysvc", -// Subsets: [ -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// }, -// { -// Addresses: [{"ip": "10.10.3.3"}], -// Ports: [{"name": "a", "port": 93}, {"name": "b", "port": 76}] -// }, -// ] -type Endpoints struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The set of all endpoints is the union of all subsets. Addresses are placed into - // subsets according to the IPs they share. A single address with multiple ports, - // some of which are ready and some of which are not (because they come from - // different containers) will result in the address being displayed in different - // subsets for the different ports. No address will appear in both Addresses and - // NotReadyAddresses in the same subset. - // Sets of addresses and ports that comprise a service. - // +optional - Subsets []EndpointSubset `json:"subsets,omitempty" protobuf:"bytes,2,rep,name=subsets"` -} - -// EndpointSubset is a group of addresses with a common set of ports. The -// expanded set of endpoints is the Cartesian product of Addresses x Ports. -// For example, given: -// { -// Addresses: [{"ip": "10.10.1.1"}, {"ip": "10.10.2.2"}], -// Ports: [{"name": "a", "port": 8675}, {"name": "b", "port": 309}] -// } -// The resulting set of endpoints can be viewed as: -// a: [ 10.10.1.1:8675, 10.10.2.2:8675 ], -// b: [ 10.10.1.1:309, 10.10.2.2:309 ] -type EndpointSubset struct { - // IP addresses which offer the related ports that are marked as ready. These endpoints - // should be considered safe for load balancers and clients to utilize. - // +optional - Addresses []EndpointAddress `json:"addresses,omitempty" protobuf:"bytes,1,rep,name=addresses"` - // IP addresses which offer the related ports but are not currently marked as ready - // because they have not yet finished starting, have recently failed a readiness check, - // or have recently failed a liveness check. - // +optional - NotReadyAddresses []EndpointAddress `json:"notReadyAddresses,omitempty" protobuf:"bytes,2,rep,name=notReadyAddresses"` - // Port numbers available on the related IP addresses. - // +optional - Ports []EndpointPort `json:"ports,omitempty" protobuf:"bytes,3,rep,name=ports"` -} - -// EndpointAddress is a tuple that describes single IP address. -type EndpointAddress struct { - // The IP of this endpoint. - // May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), - // or link-local multicast ((224.0.0.0/24). - // IPv6 is also accepted but not fully supported on all platforms. Also, certain - // kubernetes components, like kube-proxy, are not IPv6 ready. - // TODO: This should allow hostname or IP, See #4447. - IP string `json:"ip" protobuf:"bytes,1,opt,name=ip"` - // The Hostname of this endpoint - // +optional - Hostname string `json:"hostname,omitempty" protobuf:"bytes,3,opt,name=hostname"` - // Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node. - // +optional - NodeName *string `json:"nodeName,omitempty" protobuf:"bytes,4,opt,name=nodeName"` - // Reference to object providing the endpoint. - // +optional - TargetRef *ObjectReference `json:"targetRef,omitempty" protobuf:"bytes,2,opt,name=targetRef"` -} - -// EndpointPort is a tuple that describes a single port. -type EndpointPort struct { - // The name of this port (corresponds to ServicePort.Name). - // Must be a DNS_LABEL. - // Optional only if one port is defined. - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` - - // The port number of the endpoint. - Port int32 `json:"port" protobuf:"varint,2,opt,name=port"` - - // The IP protocol for this port. - // Must be UDP, TCP, or SCTP. - // Default is TCP. - // +optional - Protocol Protocol `json:"protocol,omitempty" protobuf:"bytes,3,opt,name=protocol,casttype=Protocol"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EndpointsList is a list of endpoints. -type EndpointsList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of endpoints. - Items []Endpoints `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// NodeSpec describes the attributes that a node is created with. -type NodeSpec struct { - // PodCIDR represents the pod IP range assigned to the node. - // +optional - PodCIDR string `json:"podCIDR,omitempty" protobuf:"bytes,1,opt,name=podCIDR"` - // ID of the node assigned by the cloud provider in the format: :// - // +optional - ProviderID string `json:"providerID,omitempty" protobuf:"bytes,3,opt,name=providerID"` - // Unschedulable controls node schedulability of new pods. By default, node is schedulable. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration - // +optional - Unschedulable bool `json:"unschedulable,omitempty" protobuf:"varint,4,opt,name=unschedulable"` - // If specified, the node's taints. - // +optional - Taints []Taint `json:"taints,omitempty" protobuf:"bytes,5,opt,name=taints"` - // If specified, the source to get node configuration from - // The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field - // +optional - ConfigSource *NodeConfigSource `json:"configSource,omitempty" protobuf:"bytes,6,opt,name=configSource"` - - // Deprecated. Not all kubelets will set this field. Remove field after 1.13. - // see: https://issues.k8s.io/61966 - // +optional - DoNotUse_ExternalID string `json:"externalID,omitempty" protobuf:"bytes,2,opt,name=externalID"` -} - -// NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil. -type NodeConfigSource struct { - // For historical context, regarding the below kind, apiVersion, and configMapRef deprecation tags: - // 1. kind/apiVersion were used by the kubelet to persist this struct to disk (they had no protobuf tags) - // 2. configMapRef and proto tag 1 were used by the API to refer to a configmap, - // but used a generic ObjectReference type that didn't really have the fields we needed - // All uses/persistence of the NodeConfigSource struct prior to 1.11 were gated by alpha feature flags, - // so there was no persisted data for these fields that needed to be migrated/handled. - - // +k8s:deprecated=kind - // +k8s:deprecated=apiVersion - // +k8s:deprecated=configMapRef,protobuf=1 - - // ConfigMap is a reference to a Node's ConfigMap - ConfigMap *ConfigMapNodeConfigSource `json:"configMap,omitempty" protobuf:"bytes,2,opt,name=configMap"` -} - -// ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node. -type ConfigMapNodeConfigSource struct { - // Namespace is the metadata.namespace of the referenced ConfigMap. - // This field is required in all cases. - Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"` - - // Name is the metadata.name of the referenced ConfigMap. - // This field is required in all cases. - Name string `json:"name" protobuf:"bytes,2,opt,name=name"` - - // UID is the metadata.UID of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,3,opt,name=uid"` - - // ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. - // This field is forbidden in Node.Spec, and required in Node.Status. - // +optional - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,4,opt,name=resourceVersion"` - - // KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure - // This field is required in all cases. - KubeletConfigKey string `json:"kubeletConfigKey" protobuf:"bytes,5,opt,name=kubeletConfigKey"` -} - -// DaemonEndpoint contains information about a single Daemon endpoint. -type DaemonEndpoint struct { - /* - The port tag was not properly in quotes in earlier releases, so it must be - uppercased for backwards compat (since it was falling back to var name of - 'Port'). - */ - - // Port number of the given endpoint. - Port int32 `json:"Port" protobuf:"varint,1,opt,name=Port"` -} - -// NodeDaemonEndpoints lists ports opened by daemons running on the Node. -type NodeDaemonEndpoints struct { - // Endpoint on which Kubelet is listening. - // +optional - KubeletEndpoint DaemonEndpoint `json:"kubeletEndpoint,omitempty" protobuf:"bytes,1,opt,name=kubeletEndpoint"` -} - -// NodeSystemInfo is a set of ids/uuids to uniquely identify the node. -type NodeSystemInfo struct { - // MachineID reported by the node. For unique machine identification - // in the cluster this field is preferred. Learn more from man(5) - // machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html - MachineID string `json:"machineID" protobuf:"bytes,1,opt,name=machineID"` - // SystemUUID reported by the node. For unique machine identification - // MachineID is preferred. This field is specific to Red Hat hosts - // https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html - SystemUUID string `json:"systemUUID" protobuf:"bytes,2,opt,name=systemUUID"` - // Boot ID reported by the node. - BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"` - // Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64). - KernelVersion string `json:"kernelVersion" protobuf:"bytes,4,opt,name=kernelVersion"` - // OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)). - OSImage string `json:"osImage" protobuf:"bytes,5,opt,name=osImage"` - // ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0). - ContainerRuntimeVersion string `json:"containerRuntimeVersion" protobuf:"bytes,6,opt,name=containerRuntimeVersion"` - // Kubelet Version reported by the node. - KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"` - // KubeProxy Version reported by the node. - KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"` - // The Operating System reported by the node - OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"` - // The Architecture reported by the node - Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"` -} - -// NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource. -type NodeConfigStatus struct { - // Assigned reports the checkpointed config the node will try to use. - // When Node.Spec.ConfigSource is updated, the node checkpoints the associated - // config payload to local disk, along with a record indicating intended - // config. The node refers to this record to choose its config checkpoint, and - // reports this record in Assigned. Assigned only updates in the status after - // the record has been checkpointed to disk. When the Kubelet is restarted, - // it tries to make the Assigned config the Active config by loading and - // validating the checkpointed payload identified by Assigned. - // +optional - Assigned *NodeConfigSource `json:"assigned,omitempty" protobuf:"bytes,1,opt,name=assigned"` - // Active reports the checkpointed config the node is actively using. - // Active will represent either the current version of the Assigned config, - // or the current LastKnownGood config, depending on whether attempting to use the - // Assigned config results in an error. - // +optional - Active *NodeConfigSource `json:"active,omitempty" protobuf:"bytes,2,opt,name=active"` - // LastKnownGood reports the checkpointed config the node will fall back to - // when it encounters an error attempting to use the Assigned config. - // The Assigned config becomes the LastKnownGood config when the node determines - // that the Assigned config is stable and correct. - // This is currently implemented as a 10-minute soak period starting when the local - // record of Assigned config is updated. If the Assigned config is Active at the end - // of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is - // reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, - // because the local default config is always assumed good. - // You should not make assumptions about the node's method of determining config stability - // and correctness, as this may change or become configurable in the future. - // +optional - LastKnownGood *NodeConfigSource `json:"lastKnownGood,omitempty" protobuf:"bytes,3,opt,name=lastKnownGood"` - // Error describes any problems reconciling the Spec.ConfigSource to the Active config. - // Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned - // record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting - // to load or validate the Assigned config, etc. - // Errors may occur at different points while syncing config. Earlier errors (e.g. download or - // checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across - // Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in - // a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error - // by fixing the config assigned in Spec.ConfigSource. - // You can find additional information for debugging by searching the error message in the Kubelet log. - // Error is a human-readable description of the error state; machines can check whether or not Error - // is empty, but should not rely on the stability of the Error text across Kubelet versions. - // +optional - Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` -} - -// NodeStatus is information about the current status of a node. -type NodeStatus struct { - // Capacity represents the total resources of a node. - // More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity - // +optional - Capacity ResourceList `json:"capacity,omitempty" protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` - // Allocatable represents the resources of a node that are available for scheduling. - // Defaults to Capacity. - // +optional - Allocatable ResourceList `json:"allocatable,omitempty" protobuf:"bytes,2,rep,name=allocatable,casttype=ResourceList,castkey=ResourceName"` - // NodePhase is the recently observed lifecycle phase of the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#phase - // The field is never populated, and now is deprecated. - // +optional - Phase NodePhase `json:"phase,omitempty" protobuf:"bytes,3,opt,name=phase,casttype=NodePhase"` - // Conditions is an array of current observed node conditions. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#condition - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []NodeCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,4,rep,name=conditions"` - // List of addresses reachable to the node. - // Queried from cloud provider, if available. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Addresses []NodeAddress `json:"addresses,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,5,rep,name=addresses"` - // Endpoints of daemons running on the Node. - // +optional - DaemonEndpoints NodeDaemonEndpoints `json:"daemonEndpoints,omitempty" protobuf:"bytes,6,opt,name=daemonEndpoints"` - // Set of ids/uuids to uniquely identify the node. - // More info: https://kubernetes.io/docs/concepts/nodes/node/#info - // +optional - NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"` - // List of container images on this node - // +optional - Images []ContainerImage `json:"images,omitempty" protobuf:"bytes,8,rep,name=images"` - // List of attachable volumes in use (mounted) by the node. - // +optional - VolumesInUse []UniqueVolumeName `json:"volumesInUse,omitempty" protobuf:"bytes,9,rep,name=volumesInUse"` - // List of volumes that are attached to the node. - // +optional - VolumesAttached []AttachedVolume `json:"volumesAttached,omitempty" protobuf:"bytes,10,rep,name=volumesAttached"` - // Status of the config assigned to the node via the dynamic Kubelet config feature. - // +optional - Config *NodeConfigStatus `json:"config,omitempty" protobuf:"bytes,11,opt,name=config"` -} - -type UniqueVolumeName string - -// AttachedVolume describes a volume attached to a node -type AttachedVolume struct { - // Name of the attached volume - Name UniqueVolumeName `json:"name" protobuf:"bytes,1,rep,name=name"` - - // DevicePath represents the device path where the volume should be available - DevicePath string `json:"devicePath" protobuf:"bytes,2,rep,name=devicePath"` -} - -// AvoidPods describes pods that should avoid this node. This is the value for a -// Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and -// will eventually become a field of NodeStatus. -type AvoidPods struct { - // Bounded-sized list of signatures of pods that should avoid this node, sorted - // in timestamp order from oldest to newest. Size of the slice is unspecified. - // +optional - PreferAvoidPods []PreferAvoidPodsEntry `json:"preferAvoidPods,omitempty" protobuf:"bytes,1,rep,name=preferAvoidPods"` -} - -// Describes a class of pods that should avoid this node. -type PreferAvoidPodsEntry struct { - // The class of pods. - PodSignature PodSignature `json:"podSignature" protobuf:"bytes,1,opt,name=podSignature"` - // Time at which this entry was added to the list. - // +optional - EvictionTime metav1.Time `json:"evictionTime,omitempty" protobuf:"bytes,2,opt,name=evictionTime"` - // (brief) reason why this entry was added to the list. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - // Human readable message indicating why this entry was added to the list. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` -} - -// Describes the class of pods that should avoid this node. -// Exactly one field should be set. -type PodSignature struct { - // Reference to controller whose pods should avoid this node. - // +optional - PodController *metav1.OwnerReference `json:"podController,omitempty" protobuf:"bytes,1,opt,name=podController"` -} - -// Describe a container image -type ContainerImage struct { - // Names by which this image is known. - // e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"] - Names []string `json:"names" protobuf:"bytes,1,rep,name=names"` - // The size of the image in bytes. - // +optional - SizeBytes int64 `json:"sizeBytes,omitempty" protobuf:"varint,2,opt,name=sizeBytes"` -} - -type NodePhase string - -// These are the valid phases of node. -const ( - // NodePending means the node has been created/added by the system, but not configured. - NodePending NodePhase = "Pending" - // NodeRunning means the node has been configured and has Kubernetes components running. - NodeRunning NodePhase = "Running" - // NodeTerminated means the node has been removed from the cluster. - NodeTerminated NodePhase = "Terminated" -) - -type NodeConditionType string - -// These are valid conditions of node. Currently, we don't have enough information to decide -// node condition. In the future, we will add more. The proposed set of conditions are: -// NodeReachable, NodeLive, NodeReady, NodeSchedulable, NodeRunnable. -const ( - // NodeReady means kubelet is healthy and ready to accept pods. - NodeReady NodeConditionType = "Ready" - // NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk - // space on the node. - NodeOutOfDisk NodeConditionType = "OutOfDisk" - // NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory. - NodeMemoryPressure NodeConditionType = "MemoryPressure" - // NodeDiskPressure means the kubelet is under pressure due to insufficient available disk. - NodeDiskPressure NodeConditionType = "DiskPressure" - // NodePIDPressure means the kubelet is under pressure due to insufficient available PID. - NodePIDPressure NodeConditionType = "PIDPressure" - // NodeNetworkUnavailable means that network for the node is not correctly configured. - NodeNetworkUnavailable NodeConditionType = "NetworkUnavailable" -) - -// NodeCondition contains condition information for a node. -type NodeCondition struct { - // Type of node condition. - Type NodeConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeConditionType"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Last time we got an update on a given condition. - // +optional - LastHeartbeatTime metav1.Time `json:"lastHeartbeatTime,omitempty" protobuf:"bytes,3,opt,name=lastHeartbeatTime"` - // Last time the condition transit from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,4,opt,name=lastTransitionTime"` - // (brief) reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,5,opt,name=reason"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,6,opt,name=message"` -} - -type NodeAddressType string - -// These are valid address type of node. -const ( - NodeHostName NodeAddressType = "Hostname" - NodeExternalIP NodeAddressType = "ExternalIP" - NodeInternalIP NodeAddressType = "InternalIP" - NodeExternalDNS NodeAddressType = "ExternalDNS" - NodeInternalDNS NodeAddressType = "InternalDNS" -) - -// NodeAddress contains information for the node's address. -type NodeAddress struct { - // Node address type, one of Hostname, ExternalIP or InternalIP. - Type NodeAddressType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=NodeAddressType"` - // The node address. - Address string `json:"address" protobuf:"bytes,2,opt,name=address"` -} - -// ResourceName is the name identifying various resources in a ResourceList. -type ResourceName string - -// Resource names must be not more than 63 characters, consisting of upper- or lower-case alphanumeric characters, -// with the -, _, and . characters allowed anywhere, except the first or last character. -// The default convention, matching that for annotations, is to use lower-case names, with dashes, rather than -// camel case, separating compound words. -// Fully-qualified resource typenames are constructed from a DNS-style subdomain, followed by a slash `/` and a name. -const ( - // CPU, in cores. (500m = .5 cores) - ResourceCPU ResourceName = "cpu" - // Memory, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceMemory ResourceName = "memory" - // Volume size, in bytes (e,g. 5Gi = 5GiB = 5 * 1024 * 1024 * 1024) - ResourceStorage ResourceName = "storage" - // Local ephemeral storage, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // The resource name for ResourceEphemeralStorage is alpha and it can change across releases. - ResourceEphemeralStorage ResourceName = "ephemeral-storage" -) - -const ( - // Default namespace prefix. - ResourceDefaultNamespacePrefix = "kubernetes.io/" - // Name prefix for huge page resources (alpha). - ResourceHugePagesPrefix = "hugepages-" - // Name prefix for storage resource limits - ResourceAttachableVolumesPrefix = "attachable-volumes-" -) - -// ResourceList is a set of (resource name, quantity) pairs. -type ResourceList map[ResourceName]resource.Quantity - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Node is a worker node in Kubernetes. -// Each node will have a unique identifier in the cache (i.e. in etcd). -type Node struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of a node. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec NodeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the node. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status NodeStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeList is the whole list of all Nodes which have been registered with master. -type NodeList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of nodes - Items []Node `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// FinalizerName is the name identifying a finalizer during namespace lifecycle. -type FinalizerName string - -// These are internal finalizer values to Kubernetes, must be qualified name unless defined here or -// in metav1. -const ( - FinalizerKubernetes FinalizerName = "kubernetes" -) - -// NamespaceSpec describes the attributes on a Namespace. -type NamespaceSpec struct { - // Finalizers is an opaque list of values that must be empty to permanently remove object from storage. - // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ - // +optional - Finalizers []FinalizerName `json:"finalizers,omitempty" protobuf:"bytes,1,rep,name=finalizers,casttype=FinalizerName"` -} - -// NamespaceStatus is information about the current status of a Namespace. -type NamespaceStatus struct { - // Phase is the current lifecycle phase of the namespace. - // More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/ - // +optional - Phase NamespacePhase `json:"phase,omitempty" protobuf:"bytes,1,opt,name=phase,casttype=NamespacePhase"` -} - -type NamespacePhase string - -// These are the valid phases of a namespace. -const ( - // NamespaceActive means the namespace is available for use in the system - NamespaceActive NamespacePhase = "Active" - // NamespaceTerminating means the namespace is undergoing graceful termination - NamespaceTerminating NamespacePhase = "Terminating" -) - -// +genclient -// +genclient:nonNamespaced -// +genclient:skipVerbs=deleteCollection -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Namespace provides a scope for Names. -// Use of multiple namespaces is optional. -type Namespace struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the behavior of the Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec NamespaceSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status describes the current status of a Namespace. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status NamespaceStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NamespaceList is a list of Namespaces. -type NamespaceList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Namespace objects in the list. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - Items []Namespace `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Binding ties one object to another; for example, a pod is bound to a node by a scheduler. -// Deprecated in 1.7, please use the bindings subresource of pods instead. -type Binding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The target object that you want to bind to the standard object. - Target ObjectReference `json:"target" protobuf:"bytes,2,opt,name=target"` -} - -// Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out. -// +k8s:openapi-gen=false -type Preconditions struct { - // Specifies the target UID. - // +optional - UID *types.UID `json:"uid,omitempty" protobuf:"bytes,1,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodLogOptions is the query options for a Pod's logs REST call. -type PodLogOptions struct { - metav1.TypeMeta `json:",inline"` - - // The container for which to stream logs. Defaults to only container if there is one container in the pod. - // +optional - Container string `json:"container,omitempty" protobuf:"bytes,1,opt,name=container"` - // Follow the log stream of the pod. Defaults to false. - // +optional - Follow bool `json:"follow,omitempty" protobuf:"varint,2,opt,name=follow"` - // Return previous terminated container logs. Defaults to false. - // +optional - Previous bool `json:"previous,omitempty" protobuf:"varint,3,opt,name=previous"` - // A relative time in seconds before the current time from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - // +optional - SinceSeconds *int64 `json:"sinceSeconds,omitempty" protobuf:"varint,4,opt,name=sinceSeconds"` - // An RFC3339 timestamp from which to show logs. If this value - // precedes the time a pod was started, only logs since the pod start will be returned. - // If this value is in the future, no logs will be returned. - // Only one of sinceSeconds or sinceTime may be specified. - // +optional - SinceTime *metav1.Time `json:"sinceTime,omitempty" protobuf:"bytes,5,opt,name=sinceTime"` - // If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line - // of log output. Defaults to false. - // +optional - Timestamps bool `json:"timestamps,omitempty" protobuf:"varint,6,opt,name=timestamps"` - // If set, the number of lines from the end of the logs to show. If not specified, - // logs are shown from the creation of the container or sinceSeconds or sinceTime - // +optional - TailLines *int64 `json:"tailLines,omitempty" protobuf:"varint,7,opt,name=tailLines"` - // If set, the number of bytes to read from the server before terminating the - // log output. This may not display a complete final line of logging, and may return - // slightly more or slightly less than the specified limit. - // +optional - LimitBytes *int64 `json:"limitBytes,omitempty" protobuf:"varint,8,opt,name=limitBytes"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodAttachOptions is the query options to a Pod's remote attach call. -// --- -// TODO: merge w/ PodExecOptions below for stdin, stdout, etc -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -type PodAttachOptions struct { - metav1.TypeMeta `json:",inline"` - - // Stdin if true, redirects the standard input stream of the pod for this call. - // Defaults to false. - // +optional - Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` - - // Stdout if true indicates that stdout is to be redirected for the attach call. - // Defaults to true. - // +optional - Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` - - // Stderr if true indicates that stderr is to be redirected for the attach call. - // Defaults to true. - // +optional - Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` - - // TTY if true indicates that a tty will be allocated for the attach call. - // This is passed through the container runtime so the tty - // is allocated on the worker node by the container runtime. - // Defaults to false. - // +optional - TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` - - // The container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - // +optional - Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodExecOptions is the query options to a Pod's remote exec call. -// --- -// TODO: This is largely identical to PodAttachOptions above, make sure they stay in sync and see about merging -// and also when we cut V2, we should export a "StreamOptions" or somesuch that contains Stdin, Stdout, Stder and TTY -type PodExecOptions struct { - metav1.TypeMeta `json:",inline"` - - // Redirect the standard input stream of the pod for this call. - // Defaults to false. - // +optional - Stdin bool `json:"stdin,omitempty" protobuf:"varint,1,opt,name=stdin"` - - // Redirect the standard output stream of the pod for this call. - // Defaults to true. - // +optional - Stdout bool `json:"stdout,omitempty" protobuf:"varint,2,opt,name=stdout"` - - // Redirect the standard error stream of the pod for this call. - // Defaults to true. - // +optional - Stderr bool `json:"stderr,omitempty" protobuf:"varint,3,opt,name=stderr"` - - // TTY if true indicates that a tty will be allocated for the exec call. - // Defaults to false. - // +optional - TTY bool `json:"tty,omitempty" protobuf:"varint,4,opt,name=tty"` - - // Container in which to execute the command. - // Defaults to only container if there is only one container in the pod. - // +optional - Container string `json:"container,omitempty" protobuf:"bytes,5,opt,name=container"` - - // Command is the remote command to execute. argv array. Not executed within a shell. - Command []string `json:"command" protobuf:"bytes,6,rep,name=command"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPortForwardOptions is the query options to a Pod's port forward call -// when using WebSockets. -// The `port` query parameter must specify the port or -// ports (comma separated) to forward over. -// Port forwarding over SPDY does not use these options. It requires the port -// to be passed in the `port` header as part of request. -type PodPortForwardOptions struct { - metav1.TypeMeta `json:",inline"` - - // List of ports to forward - // Required when using WebSockets - // +optional - Ports []int32 `json:"ports,omitempty" protobuf:"varint,1,rep,name=ports"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodProxyOptions is the query options to a Pod's proxy call. -type PodProxyOptions struct { - metav1.TypeMeta `json:",inline"` - - // Path is the URL path to use for the current proxy request to pod. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NodeProxyOptions is the query options to a Node's proxy call. -type NodeProxyOptions struct { - metav1.TypeMeta `json:",inline"` - - // Path is the URL path to use for the current proxy request to node. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ServiceProxyOptions is the query options to a Service's proxy call. -type ServiceProxyOptions struct { - metav1.TypeMeta `json:",inline"` - - // Path is the part of URLs that include service endpoints, suffixes, - // and parameters to use for the current proxy request to service. - // For example, the whole request URL is - // http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. - // Path is _search?q=user:kimchy. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` -} - -// ObjectReference contains enough information to let you inspect or modify the referred object. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type ObjectReference struct { - // Kind of the referent. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - Kind string `json:"kind,omitempty" protobuf:"bytes,1,opt,name=kind"` - // Namespace of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/ - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,2,opt,name=namespace"` - // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,3,opt,name=name"` - // UID of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids - // +optional - UID types.UID `json:"uid,omitempty" protobuf:"bytes,4,opt,name=uid,casttype=k8s.io/apimachinery/pkg/types.UID"` - // API version of the referent. - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,5,opt,name=apiVersion"` - // Specific resourceVersion to which this reference is made, if any. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency - // +optional - ResourceVersion string `json:"resourceVersion,omitempty" protobuf:"bytes,6,opt,name=resourceVersion"` - - // If referring to a piece of an object instead of an entire object, this string - // should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. - // For example, if the object reference is to a container within a pod, this would take on a value like: - // "spec.containers{name}" (where "name" refers to the name of the container that triggered - // the event) or if no container name is specified "spec.containers[2]" (container with - // index 2 in this pod). This syntax is chosen only to have some well-defined way of - // referencing a part of an object. - // TODO: this design is not final and this field is subject to change in the future. - // +optional - FieldPath string `json:"fieldPath,omitempty" protobuf:"bytes,7,opt,name=fieldPath"` -} - -// LocalObjectReference contains enough information to let you locate the -// referenced object inside the same namespace. -type LocalObjectReference struct { - // Name of the referent. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names - // TODO: Add other useful fields. apiVersion, kind, uid? - // +optional - Name string `json:"name,omitempty" protobuf:"bytes,1,opt,name=name"` -} - -// TypedLocalObjectReference contains enough information to let you locate the -// typed referenced object inside the same namespace. -type TypedLocalObjectReference struct { - // APIGroup is the group for the resource being referenced. - // If APIGroup is not specified, the specified Kind must be in the core API group. - // For any other third-party types, APIGroup is required. - // +optional - APIGroup *string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced - Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SerializedReference is a reference to serialized object. -type SerializedReference struct { - metav1.TypeMeta `json:",inline"` - // The reference to an object in the system. - // +optional - Reference ObjectReference `json:"reference,omitempty" protobuf:"bytes,1,opt,name=reference"` -} - -// EventSource contains information for an event. -type EventSource struct { - // Component from which the event is generated. - // +optional - Component string `json:"component,omitempty" protobuf:"bytes,1,opt,name=component"` - // Node name on which the event is generated. - // +optional - Host string `json:"host,omitempty" protobuf:"bytes,2,opt,name=host"` -} - -// Valid values for event types (new types could be added in future) -const ( - // Information only and will not cause any problems - EventTypeNormal string = "Normal" - // These events are to warn that something might go wrong - EventTypeWarning string = "Warning" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. -type Event struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - metav1.ObjectMeta `json:"metadata" protobuf:"bytes,1,opt,name=metadata"` - - // The object that this event is about. - InvolvedObject ObjectReference `json:"involvedObject" protobuf:"bytes,2,opt,name=involvedObject"` - - // This should be a short, machine understandable string that gives the reason - // for the transition into the object's current status. - // TODO: provide exact specification for format. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,3,opt,name=reason"` - - // A human-readable description of the status of this operation. - // TODO: decide on maximum length. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,4,opt,name=message"` - - // The component reporting this event. Should be a short machine understandable string. - // +optional - Source EventSource `json:"source,omitempty" protobuf:"bytes,5,opt,name=source"` - - // The time at which the event was first recorded. (Time of server receipt is in TypeMeta.) - // +optional - FirstTimestamp metav1.Time `json:"firstTimestamp,omitempty" protobuf:"bytes,6,opt,name=firstTimestamp"` - - // The time at which the most recent occurrence of this event was recorded. - // +optional - LastTimestamp metav1.Time `json:"lastTimestamp,omitempty" protobuf:"bytes,7,opt,name=lastTimestamp"` - - // The number of times this event has occurred. - // +optional - Count int32 `json:"count,omitempty" protobuf:"varint,8,opt,name=count"` - - // Type of this event (Normal, Warning), new types could be added in the future - // +optional - Type string `json:"type,omitempty" protobuf:"bytes,9,opt,name=type"` - - // Time when this Event was first observed. - // +optional - EventTime metav1.MicroTime `json:"eventTime,omitempty" protobuf:"bytes,10,opt,name=eventTime"` - - // Data about the Event series this event represents or nil if it's a singleton Event. - // +optional - Series *EventSeries `json:"series,omitempty" protobuf:"bytes,11,opt,name=series"` - - // What action was taken/failed regarding to the Regarding object. - // +optional - Action string `json:"action,omitempty" protobuf:"bytes,12,opt,name=action"` - - // Optional secondary object for more complex actions. - // +optional - Related *ObjectReference `json:"related,omitempty" protobuf:"bytes,13,opt,name=related"` - - // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. - // +optional - ReportingController string `json:"reportingComponent" protobuf:"bytes,14,opt,name=reportingComponent"` - - // ID of the controller instance, e.g. `kubelet-xyzf`. - // +optional - ReportingInstance string `json:"reportingInstance" protobuf:"bytes,15,opt,name=reportingInstance"` -} - -// EventSeries contain information on series of events, i.e. thing that was/is happening -// continuously for some time. -type EventSeries struct { - // Number of occurrences in this series up to the last heartbeat time - Count int32 `json:"count,omitempty" protobuf:"varint,1,name=count"` - // Time of the last occurrence observed - LastObservedTime metav1.MicroTime `json:"lastObservedTime,omitempty" protobuf:"bytes,2,name=lastObservedTime"` - // State of this Series: Ongoing or Finished - State EventSeriesState `json:"state,omitempty" protobuf:"bytes,3,name=state"` -} - -type EventSeriesState string - -const ( - EventSeriesStateOngoing EventSeriesState = "Ongoing" - EventSeriesStateFinished EventSeriesState = "Finished" - EventSeriesStateUnknown EventSeriesState = "Unknown" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of events. -type EventList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of events - Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// List holds a list of objects, which may not be known by the server. -type List metav1.List - -// LimitType is a type of object that is limited -type LimitType string - -const ( - // Limit that applies to all pods in a namespace - LimitTypePod LimitType = "Pod" - // Limit that applies to all containers in a namespace - LimitTypeContainer LimitType = "Container" - // Limit that applies to all persistent volume claims in a namespace - LimitTypePersistentVolumeClaim LimitType = "PersistentVolumeClaim" -) - -// LimitRangeItem defines a min/max usage limit for any resource that matches on kind. -type LimitRangeItem struct { - // Type of resource that this limit applies to. - // +optional - Type LimitType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=LimitType"` - // Max usage constraints on this kind by resource name. - // +optional - Max ResourceList `json:"max,omitempty" protobuf:"bytes,2,rep,name=max,casttype=ResourceList,castkey=ResourceName"` - // Min usage constraints on this kind by resource name. - // +optional - Min ResourceList `json:"min,omitempty" protobuf:"bytes,3,rep,name=min,casttype=ResourceList,castkey=ResourceName"` - // Default resource requirement limit value by resource name if resource limit is omitted. - // +optional - Default ResourceList `json:"default,omitempty" protobuf:"bytes,4,rep,name=default,casttype=ResourceList,castkey=ResourceName"` - // DefaultRequest is the default resource requirement request value by resource name if resource request is omitted. - // +optional - DefaultRequest ResourceList `json:"defaultRequest,omitempty" protobuf:"bytes,5,rep,name=defaultRequest,casttype=ResourceList,castkey=ResourceName"` - // MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource. - // +optional - MaxLimitRequestRatio ResourceList `json:"maxLimitRequestRatio,omitempty" protobuf:"bytes,6,rep,name=maxLimitRequestRatio,casttype=ResourceList,castkey=ResourceName"` -} - -// LimitRangeSpec defines a min/max usage limit for resources that match on kind. -type LimitRangeSpec struct { - // Limits is the list of LimitRangeItem objects that are enforced. - Limits []LimitRangeItem `json:"limits" protobuf:"bytes,1,rep,name=limits"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRange sets resource usage limits for each kind of resource in a Namespace. -type LimitRange struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the limits enforced. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec LimitRangeSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// LimitRangeList is a list of LimitRange items. -type LimitRangeList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of LimitRange objects. - // More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ - Items []LimitRange `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// The following identify resource constants for Kubernetes object types -const ( - // Pods, number - ResourcePods ResourceName = "pods" - // Services, number - ResourceServices ResourceName = "services" - // ReplicationControllers, number - ResourceReplicationControllers ResourceName = "replicationcontrollers" - // ResourceQuotas, number - ResourceQuotas ResourceName = "resourcequotas" - // ResourceSecrets, number - ResourceSecrets ResourceName = "secrets" - // ResourceConfigMaps, number - ResourceConfigMaps ResourceName = "configmaps" - // ResourcePersistentVolumeClaims, number - ResourcePersistentVolumeClaims ResourceName = "persistentvolumeclaims" - // ResourceServicesNodePorts, number - ResourceServicesNodePorts ResourceName = "services.nodeports" - // ResourceServicesLoadBalancers, number - ResourceServicesLoadBalancers ResourceName = "services.loadbalancers" - // CPU request, in cores. (500m = .5 cores) - ResourceRequestsCPU ResourceName = "requests.cpu" - // Memory request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsMemory ResourceName = "requests.memory" - // Storage request, in bytes - ResourceRequestsStorage ResourceName = "requests.storage" - // Local ephemeral storage request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceRequestsEphemeralStorage ResourceName = "requests.ephemeral-storage" - // CPU limit, in cores. (500m = .5 cores) - ResourceLimitsCPU ResourceName = "limits.cpu" - // Memory limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsMemory ResourceName = "limits.memory" - // Local ephemeral storage limit, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - ResourceLimitsEphemeralStorage ResourceName = "limits.ephemeral-storage" -) - -// The following identify resource prefix for Kubernetes object types -const ( - // HugePages request, in bytes. (500Gi = 500GiB = 500 * 1024 * 1024 * 1024) - // As burst is not supported for HugePages, we would only quota its request, and ignore the limit. - ResourceRequestsHugePagesPrefix = "requests.hugepages-" - // Default resource requests prefix - DefaultResourceRequestsPrefix = "requests." -) - -// A ResourceQuotaScope defines a filter that must match each object tracked by a quota -type ResourceQuotaScope string - -const ( - // Match all pod objects where spec.activeDeadlineSeconds - ResourceQuotaScopeTerminating ResourceQuotaScope = "Terminating" - // Match all pod objects where !spec.activeDeadlineSeconds - ResourceQuotaScopeNotTerminating ResourceQuotaScope = "NotTerminating" - // Match all pod objects that have best effort quality of service - ResourceQuotaScopeBestEffort ResourceQuotaScope = "BestEffort" - // Match all pod objects that do not have best effort quality of service - ResourceQuotaScopeNotBestEffort ResourceQuotaScope = "NotBestEffort" - // Match all pod objects that have priority class mentioned - ResourceQuotaScopePriorityClass ResourceQuotaScope = "PriorityClass" -) - -// ResourceQuotaSpec defines the desired hard limits to enforce for Quota. -type ResourceQuotaSpec struct { - // hard is the set of desired hard limits for each named resource. - // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - // +optional - Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` - // A collection of filters that must match each object tracked by a quota. - // If not specified, the quota matches all objects. - // +optional - Scopes []ResourceQuotaScope `json:"scopes,omitempty" protobuf:"bytes,2,rep,name=scopes,casttype=ResourceQuotaScope"` - // scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota - // but expressed using ScopeSelectorOperator in combination with possible values. - // For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched. - // +optional - ScopeSelector *ScopeSelector `json:"scopeSelector,omitempty" protobuf:"bytes,3,opt,name=scopeSelector"` -} - -// A scope selector represents the AND of the selectors represented -// by the scoped-resource selector requirements. -type ScopeSelector struct { - // A list of scope selector requirements by scope of the resources. - // +optional - MatchExpressions []ScopedResourceSelectorRequirement `json:"matchExpressions,omitempty" protobuf:"bytes,1,rep,name=matchExpressions"` -} - -// A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator -// that relates the scope name and values. -type ScopedResourceSelectorRequirement struct { - // The name of the scope that the selector applies to. - ScopeName ResourceQuotaScope `json:"scopeName" protobuf:"bytes,1,opt,name=scopeName"` - // Represents a scope's relationship to a set of values. - // Valid operators are In, NotIn, Exists, DoesNotExist. - Operator ScopeSelectorOperator `json:"operator" protobuf:"bytes,2,opt,name=operator,casttype=ScopedResourceSelectorOperator"` - // An array of string values. If the operator is In or NotIn, - // the values array must be non-empty. If the operator is Exists or DoesNotExist, - // the values array must be empty. - // This array is replaced during a strategic merge patch. - // +optional - Values []string `json:"values,omitempty" protobuf:"bytes,3,rep,name=values"` -} - -// A scope selector operator is the set of operators that can be used in -// a scope selector requirement. -type ScopeSelectorOperator string - -const ( - ScopeSelectorOpIn ScopeSelectorOperator = "In" - ScopeSelectorOpNotIn ScopeSelectorOperator = "NotIn" - ScopeSelectorOpExists ScopeSelectorOperator = "Exists" - ScopeSelectorOpDoesNotExist ScopeSelectorOperator = "DoesNotExist" -) - -// ResourceQuotaStatus defines the enforced hard limits and observed use. -type ResourceQuotaStatus struct { - // Hard is the set of enforced hard limits for each named resource. - // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - // +optional - Hard ResourceList `json:"hard,omitempty" protobuf:"bytes,1,rep,name=hard,casttype=ResourceList,castkey=ResourceName"` - // Used is the current observed total usage of the resource in the namespace. - // +optional - Used ResourceList `json:"used,omitempty" protobuf:"bytes,2,rep,name=used,casttype=ResourceList,castkey=ResourceName"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuota sets aggregate quota restrictions enforced per namespace -type ResourceQuota struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the desired quota. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ResourceQuotaSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status defines the actual enforced quota and its current usage. - // https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ResourceQuotaStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ResourceQuotaList is a list of ResourceQuota items. -type ResourceQuotaList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ResourceQuota objects. - // More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/ - Items []ResourceQuota `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Secret holds secret data of a certain type. The total bytes of the values in -// the Data field must be less than MaxSecretSize bytes. -type Secret struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data contains the secret data. Each key must consist of alphanumeric - // characters, '-', '_' or '.'. The serialized form of the secret data is a - // base64 encoded string, representing the arbitrary (possibly non-string) - // data value here. Described in https://tools.ietf.org/html/rfc4648#section-4 - // +optional - Data map[string][]byte `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` - - // stringData allows specifying non-binary secret data in string form. - // It is provided as a write-only convenience method. - // All keys and values are merged into the data field on write, overwriting any existing values. - // It is never output when reading from the API. - // +k8s:conversion-gen=false - // +optional - StringData map[string]string `json:"stringData,omitempty" protobuf:"bytes,4,rep,name=stringData"` - - // Used to facilitate programmatic handling of secret data. - // +optional - Type SecretType `json:"type,omitempty" protobuf:"bytes,3,opt,name=type,casttype=SecretType"` -} - -const MaxSecretSize = 1 * 1024 * 1024 - -type SecretType string - -const ( - // SecretTypeOpaque is the default. Arbitrary user-defined data - SecretTypeOpaque SecretType = "Opaque" - - // SecretTypeServiceAccountToken contains a token that identifies a service account to the API - // - // Required fields: - // - Secret.Annotations["kubernetes.io/service-account.name"] - the name of the ServiceAccount the token identifies - // - Secret.Annotations["kubernetes.io/service-account.uid"] - the UID of the ServiceAccount the token identifies - // - Secret.Data["token"] - a token that identifies the service account to the API - SecretTypeServiceAccountToken SecretType = "kubernetes.io/service-account-token" - - // ServiceAccountNameKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountNameKey = "kubernetes.io/service-account.name" - // ServiceAccountUIDKey is the key of the required annotation for SecretTypeServiceAccountToken secrets - ServiceAccountUIDKey = "kubernetes.io/service-account.uid" - // ServiceAccountTokenKey is the key of the required data for SecretTypeServiceAccountToken secrets - ServiceAccountTokenKey = "token" - // ServiceAccountKubeconfigKey is the key of the optional kubeconfig data for SecretTypeServiceAccountToken secrets - ServiceAccountKubeconfigKey = "kubernetes.kubeconfig" - // ServiceAccountRootCAKey is the key of the optional root certificate authority for SecretTypeServiceAccountToken secrets - ServiceAccountRootCAKey = "ca.crt" - // ServiceAccountNamespaceKey is the key of the optional namespace to use as the default for namespaced API calls - ServiceAccountNamespaceKey = "namespace" - - // SecretTypeDockercfg contains a dockercfg file that follows the same format rules as ~/.dockercfg - // - // Required fields: - // - Secret.Data[".dockercfg"] - a serialized ~/.dockercfg file - SecretTypeDockercfg SecretType = "kubernetes.io/dockercfg" - - // DockerConfigKey is the key of the required data for SecretTypeDockercfg secrets - DockerConfigKey = ".dockercfg" - - // SecretTypeDockerConfigJson contains a dockercfg file that follows the same format rules as ~/.docker/config.json - // - // Required fields: - // - Secret.Data[".dockerconfigjson"] - a serialized ~/.docker/config.json file - SecretTypeDockerConfigJson SecretType = "kubernetes.io/dockerconfigjson" - - // DockerConfigJsonKey is the key of the required data for SecretTypeDockerConfigJson secrets - DockerConfigJsonKey = ".dockerconfigjson" - - // SecretTypeBasicAuth contains data needed for basic authentication. - // - // Required at least one of fields: - // - Secret.Data["username"] - username used for authentication - // - Secret.Data["password"] - password or token needed for authentication - SecretTypeBasicAuth SecretType = "kubernetes.io/basic-auth" - - // BasicAuthUsernameKey is the key of the username for SecretTypeBasicAuth secrets - BasicAuthUsernameKey = "username" - // BasicAuthPasswordKey is the key of the password or token for SecretTypeBasicAuth secrets - BasicAuthPasswordKey = "password" - - // SecretTypeSSHAuth contains data needed for SSH authetication. - // - // Required field: - // - Secret.Data["ssh-privatekey"] - private SSH key needed for authentication - SecretTypeSSHAuth SecretType = "kubernetes.io/ssh-auth" - - // SSHAuthPrivateKey is the key of the required SSH private key for SecretTypeSSHAuth secrets - SSHAuthPrivateKey = "ssh-privatekey" - // SecretTypeTLS contains information about a TLS client or server secret. It - // is primarily used with TLS termination of the Ingress resource, but may be - // used in other types. - // - // Required fields: - // - Secret.Data["tls.key"] - TLS private key. - // Secret.Data["tls.crt"] - TLS certificate. - // TODO: Consider supporting different formats, specifying CA/destinationCA. - SecretTypeTLS SecretType = "kubernetes.io/tls" - - // TLSCertKey is the key for tls certificates in a TLS secert. - TLSCertKey = "tls.crt" - // TLSPrivateKeyKey is the key for the private key field in a TLS secret. - TLSPrivateKeyKey = "tls.key" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// SecretList is a list of Secret. -type SecretList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of secret objects. - // More info: https://kubernetes.io/docs/concepts/configuration/secret - Items []Secret `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMap holds configuration data for pods to consume. -type ConfigMap struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Data contains the configuration data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // Values with non-UTF-8 byte sequences must use the BinaryData field. - // The keys stored in Data must not overlap with the keys in - // the BinaryData field, this is enforced during validation process. - // +optional - Data map[string]string `json:"data,omitempty" protobuf:"bytes,2,rep,name=data"` - - // BinaryData contains the binary data. - // Each key must consist of alphanumeric characters, '-', '_' or '.'. - // BinaryData can contain byte sequences that are not in the UTF-8 range. - // The keys stored in BinaryData must not overlap with the ones in - // the Data field, this is enforced during validation process. - // Using this field will require 1.10+ apiserver and - // kubelet. - // +optional - BinaryData map[string][]byte `json:"binaryData,omitempty" protobuf:"bytes,3,rep,name=binaryData"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ConfigMapList is a resource containing a list of ConfigMap objects. -type ConfigMapList struct { - metav1.TypeMeta `json:",inline"` - - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of ConfigMaps. - Items []ConfigMap `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// Type and constants for component health validation. -type ComponentConditionType string - -// These are the valid conditions for the component. -const ( - ComponentHealthy ComponentConditionType = "Healthy" -) - -// Information about the condition of a component. -type ComponentCondition struct { - // Type of condition for a component. - // Valid value: "Healthy" - Type ComponentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ComponentConditionType"` - // Status of the condition for a component. - // Valid values for "Healthy": "True", "False", or "Unknown". - Status ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=ConditionStatus"` - // Message about the condition for a component. - // For example, information about a health check. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,3,opt,name=message"` - // Condition error code for a component. - // For example, a health check error code. - // +optional - Error string `json:"error,omitempty" protobuf:"bytes,4,opt,name=error"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ComponentStatus (and ComponentStatusList) holds the cluster validation info. -type ComponentStatus struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of component conditions observed - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []ComponentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,2,rep,name=conditions"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Status of all the conditions for the component as a list of ComponentStatus objects. -type ComponentStatusList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ComponentStatus objects. - Items []ComponentStatus `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// DownwardAPIVolumeSource represents a volume containing downward API info. -// Downward API volumes support ownership management and SELinux relabeling. -type DownwardAPIVolumeSource struct { - // Items is a list of downward API volume file - // +optional - Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` - // Optional: mode bits to use on created files by default. Must be a - // value between 0 and 0777. Defaults to 0644. - // Directories within the path are not affected by this setting. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - DefaultMode *int32 `json:"defaultMode,omitempty" protobuf:"varint,2,opt,name=defaultMode"` -} - -const ( - DownwardAPIVolumeSourceDefaultMode int32 = 0644 -) - -// DownwardAPIVolumeFile represents information to create the file containing the pod field -type DownwardAPIVolumeFile struct { - // Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..' - Path string `json:"path" protobuf:"bytes,1,opt,name=path"` - // Required: Selects a field of the pod: only annotations, labels, name and namespace are supported. - // +optional - FieldRef *ObjectFieldSelector `json:"fieldRef,omitempty" protobuf:"bytes,2,opt,name=fieldRef"` - // Selects a resource of the container: only resources limits and requests - // (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported. - // +optional - ResourceFieldRef *ResourceFieldSelector `json:"resourceFieldRef,omitempty" protobuf:"bytes,3,opt,name=resourceFieldRef"` - // Optional: mode bits to use on this file, must be a value between 0 - // and 0777. If not specified, the volume defaultMode will be used. - // This might be in conflict with other options that affect the file - // mode, like fsGroup, and the result can be other mode bits set. - // +optional - Mode *int32 `json:"mode,omitempty" protobuf:"varint,4,opt,name=mode"` -} - -// Represents downward API info for projecting into a projected volume. -// Note that this is identical to a downwardAPI volume source without the default -// mode. -type DownwardAPIProjection struct { - // Items is a list of DownwardAPIVolume file - // +optional - Items []DownwardAPIVolumeFile `json:"items,omitempty" protobuf:"bytes,1,rep,name=items"` -} - -// SecurityContext holds security configuration that will be applied to a container. -// Some fields are present in both SecurityContext and PodSecurityContext. When both -// are set, the values in SecurityContext take precedence. -type SecurityContext struct { - // The capabilities to add/drop when running containers. - // Defaults to the default set of capabilities granted by the container runtime. - // +optional - Capabilities *Capabilities `json:"capabilities,omitempty" protobuf:"bytes,1,opt,name=capabilities"` - // Run container in privileged mode. - // Processes in privileged containers are essentially equivalent to root on the host. - // Defaults to false. - // +optional - Privileged *bool `json:"privileged,omitempty" protobuf:"varint,2,opt,name=privileged"` - // The SELinux context to be applied to the container. - // If unspecified, the container runtime will allocate a random SELinux context for each - // container. May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - SELinuxOptions *SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,3,opt,name=seLinuxOptions"` - // The UID to run the entrypoint of the container process. - // Defaults to user specified in image metadata if unspecified. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsUser *int64 `json:"runAsUser,omitempty" protobuf:"varint,4,opt,name=runAsUser"` - // The GID to run the entrypoint of the container process. - // Uses runtime default if unset. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsGroup *int64 `json:"runAsGroup,omitempty" protobuf:"varint,8,opt,name=runAsGroup"` - // Indicates that the container must run as a non-root user. - // If true, the Kubelet will validate the image at runtime to ensure that it - // does not run as UID 0 (root) and fail to start the container if it does. - // If unset or false, no such validation will be performed. - // May also be set in PodSecurityContext. If set in both SecurityContext and - // PodSecurityContext, the value specified in SecurityContext takes precedence. - // +optional - RunAsNonRoot *bool `json:"runAsNonRoot,omitempty" protobuf:"varint,5,opt,name=runAsNonRoot"` - // Whether this container has a read-only root filesystem. - // Default is false. - // +optional - ReadOnlyRootFilesystem *bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,6,opt,name=readOnlyRootFilesystem"` - // AllowPrivilegeEscalation controls whether a process can gain more - // privileges than its parent process. This bool directly controls if - // the no_new_privs flag will be set on the container process. - // AllowPrivilegeEscalation is true always when the container is: - // 1) run as Privileged - // 2) has CAP_SYS_ADMIN - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,7,opt,name=allowPrivilegeEscalation"` - // procMount denotes the type of proc mount to use for the containers. - // The default is DefaultProcMount which uses the container runtime defaults for - // readonly paths and masked paths. - // This requires the ProcMountType feature flag to be enabled. - // +optional - ProcMount *ProcMountType `json:"procMount,omitempty" protobuf:"bytes,9,opt,name=procMount"` -} - -type ProcMountType string - -const ( - // DefaultProcMount uses the container runtime defaults for readonly and masked - // paths for /proc. Most container runtimes mask certain paths in /proc to avoid - // accidental security exposure of special devices or information. - DefaultProcMount ProcMountType = "Default" - - // UnmaskedProcMount bypasses the default masking behavior of the container - // runtime and ensures the newly created /proc the container stays in tact with - // no modifications. - UnmaskedProcMount ProcMountType = "Unmasked" -) - -// SELinuxOptions are the labels to be applied to the container -type SELinuxOptions struct { - // User is a SELinux user label that applies to the container. - // +optional - User string `json:"user,omitempty" protobuf:"bytes,1,opt,name=user"` - // Role is a SELinux role label that applies to the container. - // +optional - Role string `json:"role,omitempty" protobuf:"bytes,2,opt,name=role"` - // Type is a SELinux type label that applies to the container. - // +optional - Type string `json:"type,omitempty" protobuf:"bytes,3,opt,name=type"` - // Level is SELinux level label that applies to the container. - // +optional - Level string `json:"level,omitempty" protobuf:"bytes,4,opt,name=level"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RangeAllocation is not a public type. -type RangeAllocation struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Range is string that identifies the range represented by 'data'. - Range string `json:"range" protobuf:"bytes,2,opt,name=range"` - // Data is a bit array containing all allocated addresses in the previous segment. - Data []byte `json:"data" protobuf:"bytes,3,opt,name=data"` -} - -const ( - // "default-scheduler" is the name of default scheduler. - DefaultSchedulerName = "default-scheduler" - - // RequiredDuringScheduling affinity is not symmetric, but there is an implicit PreferredDuringScheduling affinity rule - // corresponding to every RequiredDuringScheduling affinity rule. - // When the --hard-pod-affinity-weight scheduler flag is not specified, - // DefaultHardPodAffinityWeight defines the weight of the implicit PreferredDuringScheduling affinity rule. - DefaultHardPodAffinitySymmetricWeight int32 = 1 -) - -// Sysctl defines a kernel parameter to be set -type Sysctl struct { - // Name of a property to set - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Value of a property to set - Value string `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -// NodeResources is an object for conveying resource information about a node. -// see http://releases.k8s.io/HEAD/docs/design/resources.md for more details. -type NodeResources struct { - // Capacity represents the available resources of a node - Capacity ResourceList `protobuf:"bytes,1,rep,name=capacity,casttype=ResourceList,castkey=ResourceName"` -} - -const ( - // Enable stdin for remote command execution - ExecStdinParam = "input" - // Enable stdout for remote command execution - ExecStdoutParam = "output" - // Enable stderr for remote command execution - ExecStderrParam = "error" - // Enable TTY for remote command execution - ExecTTYParam = "tty" - // Command to run for remote command execution - ExecCommandParam = "command" - - // Name of header that specifies stream type - StreamType = "streamType" - // Value for streamType header for stdin stream - StreamTypeStdin = "stdin" - // Value for streamType header for stdout stream - StreamTypeStdout = "stdout" - // Value for streamType header for stderr stream - StreamTypeStderr = "stderr" - // Value for streamType header for data stream - StreamTypeData = "data" - // Value for streamType header for error stream - StreamTypeError = "error" - // Value for streamType header for terminal resize stream - StreamTypeResize = "resize" - - // Name of header that specifies the port being forwarded - PortHeader = "port" - // Name of header that specifies a request ID used to associate the error - // and data streams for a single forwarded connection - PortForwardRequestIDHeader = "requestID" -) diff --git a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go deleted file mode 100644 index c781e54..0000000 --- a/vendor/k8s.io/api/core/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,2333 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AWSElasticBlockStoreVolumeSource = map[string]string{ - "": "Represents a Persistent Disk resource in AWS.\n\nAn AWS EBS disk must exist before mounting to a container. The disk must also be in the same AWS zone as the kubelet. An AWS EBS disk can only be mounted as read/write once. AWS EBS volumes support ownership management and SELinux relabeling.", - "volumeID": "Unique ID of the persistent disk resource in AWS (Amazon EBS volume). More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty).", - "readOnly": "Specify \"true\" to force and set the ReadOnly property in VolumeMounts to \"true\". If omitted, the default is \"false\". More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", -} - -func (AWSElasticBlockStoreVolumeSource) SwaggerDoc() map[string]string { - return map_AWSElasticBlockStoreVolumeSource -} - -var map_Affinity = map[string]string{ - "": "Affinity is a group of affinity scheduling rules.", - "nodeAffinity": "Describes node affinity scheduling rules for the pod.", - "podAffinity": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", - "podAntiAffinity": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", -} - -func (Affinity) SwaggerDoc() map[string]string { - return map_Affinity -} - -var map_AttachedVolume = map[string]string{ - "": "AttachedVolume describes a volume attached to a node", - "name": "Name of the attached volume", - "devicePath": "DevicePath represents the device path where the volume should be available", -} - -func (AttachedVolume) SwaggerDoc() map[string]string { - return map_AttachedVolume -} - -var map_AvoidPods = map[string]string{ - "": "AvoidPods describes pods that should avoid this node. This is the value for a Node annotation with key scheduler.alpha.kubernetes.io/preferAvoidPods and will eventually become a field of NodeStatus.", - "preferAvoidPods": "Bounded-sized list of signatures of pods that should avoid this node, sorted in timestamp order from oldest to newest. Size of the slice is unspecified.", -} - -func (AvoidPods) SwaggerDoc() map[string]string { - return map_AvoidPods -} - -var map_AzureDiskVolumeSource = map[string]string{ - "": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "diskName": "The Name of the data disk in the blob storage", - "diskURI": "The URI the data disk in the blob storage", - "cachingMode": "Host Caching mode: None, Read Only, Read Write.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "kind": "Expected values Shared: multiple blob disks per storage account Dedicated: single blob disk per storage account Managed: azure managed data disk (only in managed availability set). defaults to shared", -} - -func (AzureDiskVolumeSource) SwaggerDoc() map[string]string { - return map_AzureDiskVolumeSource -} - -var map_AzureFilePersistentVolumeSource = map[string]string{ - "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "secretName": "the name of secret that contains Azure Storage Account Name and Key", - "shareName": "Share Name", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "secretNamespace": "the namespace of the secret that contains Azure Storage Account Name and Key default is the same as the Pod", -} - -func (AzureFilePersistentVolumeSource) SwaggerDoc() map[string]string { - return map_AzureFilePersistentVolumeSource -} - -var map_AzureFileVolumeSource = map[string]string{ - "": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "secretName": "the name of secret that contains Azure Storage Account Name and Key", - "shareName": "Share Name", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", -} - -func (AzureFileVolumeSource) SwaggerDoc() map[string]string { - return map_AzureFileVolumeSource -} - -var map_Binding = map[string]string{ - "": "Binding ties one object to another; for example, a pod is bound to a node by a scheduler. Deprecated in 1.7, please use the bindings subresource of pods instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "target": "The target object that you want to bind to the standard object.", -} - -func (Binding) SwaggerDoc() map[string]string { - return map_Binding -} - -var map_CSIPersistentVolumeSource = map[string]string{ - "": "Represents storage that is managed by an external CSI volume driver (Beta feature)", - "driver": "Driver is the name of the driver to use for this volume. Required.", - "volumeHandle": "VolumeHandle is the unique volume name returned by the CSI volume plugin’s CreateVolume to refer to the volume on all subsequent calls. Required.", - "readOnly": "Optional: The value to pass to ControllerPublishVolumeRequest. Defaults to false (read/write).", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\".", - "volumeAttributes": "Attributes of the volume to publish.", - "controllerPublishSecretRef": "ControllerPublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI ControllerPublishVolume and ControllerUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodeStageSecretRef": "NodeStageSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodeStageVolume and NodeStageVolume and NodeUnstageVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", - "nodePublishSecretRef": "NodePublishSecretRef is a reference to the secret object containing sensitive information to pass to the CSI driver to complete the CSI NodePublishVolume and NodeUnpublishVolume calls. This field is optional, and may be empty if no secret is required. If the secret object contains more than one secret, all secrets are passed.", -} - -func (CSIPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_CSIPersistentVolumeSource -} - -var map_Capabilities = map[string]string{ - "": "Adds and removes POSIX capabilities from running containers.", - "add": "Added capabilities", - "drop": "Removed capabilities", -} - -func (Capabilities) SwaggerDoc() map[string]string { - return map_Capabilities -} - -var map_CephFSPersistentVolumeSource = map[string]string{ - "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", -} - -func (CephFSPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_CephFSPersistentVolumeSource -} - -var map_CephFSVolumeSource = map[string]string{ - "": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.", - "monitors": "Required: Monitors is a collection of Ceph monitors More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "path": "Optional: Used as the mounted root, rather than the full Ceph tree, default is /", - "user": "Optional: User is the rados user name, default is admin More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretFile": "Optional: SecretFile is the path to key ring for User, default is /etc/ceph/user.secret More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "secretRef": "Optional: SecretRef is reference to the authentication secret for User, default is empty. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/volumes/cephfs/README.md#how-to-use-it", -} - -func (CephFSVolumeSource) SwaggerDoc() map[string]string { - return map_CephFSVolumeSource -} - -var map_CinderPersistentVolumeSource = map[string]string{ - "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", -} - -func (CinderPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_CinderPersistentVolumeSource -} - -var map_CinderVolumeSource = map[string]string{ - "": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.", - "volumeID": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "secretRef": "Optional: points to a secret object containing parameters used to connect to OpenStack.", -} - -func (CinderVolumeSource) SwaggerDoc() map[string]string { - return map_CinderVolumeSource -} - -var map_ClientIPConfig = map[string]string{ - "": "ClientIPConfig represents the configurations of Client IP based session affinity.", - "timeoutSeconds": "timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours).", -} - -func (ClientIPConfig) SwaggerDoc() map[string]string { - return map_ClientIPConfig -} - -var map_ComponentCondition = map[string]string{ - "": "Information about the condition of a component.", - "type": "Type of condition for a component. Valid value: \"Healthy\"", - "status": "Status of the condition for a component. Valid values for \"Healthy\": \"True\", \"False\", or \"Unknown\".", - "message": "Message about the condition for a component. For example, information about a health check.", - "error": "Condition error code for a component. For example, a health check error code.", -} - -func (ComponentCondition) SwaggerDoc() map[string]string { - return map_ComponentCondition -} - -var map_ComponentStatus = map[string]string{ - "": "ComponentStatus (and ComponentStatusList) holds the cluster validation info.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "conditions": "List of component conditions observed", -} - -func (ComponentStatus) SwaggerDoc() map[string]string { - return map_ComponentStatus -} - -var map_ComponentStatusList = map[string]string{ - "": "Status of all the conditions for the component as a list of ComponentStatus objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ComponentStatus objects.", -} - -func (ComponentStatusList) SwaggerDoc() map[string]string { - return map_ComponentStatusList -} - -var map_ConfigMap = map[string]string{ - "": "ConfigMap holds configuration data for pods to consume.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "data": "Data contains the configuration data. Each key must consist of alphanumeric characters, '-', '_' or '.'. Values with non-UTF-8 byte sequences must use the BinaryData field. The keys stored in Data must not overlap with the keys in the BinaryData field, this is enforced during validation process.", - "binaryData": "BinaryData contains the binary data. Each key must consist of alphanumeric characters, '-', '_' or '.'. BinaryData can contain byte sequences that are not in the UTF-8 range. The keys stored in BinaryData must not overlap with the ones in the Data field, this is enforced during validation process. Using this field will require 1.10+ apiserver and kubelet.", -} - -func (ConfigMap) SwaggerDoc() map[string]string { - return map_ConfigMap -} - -var map_ConfigMapEnvSource = map[string]string{ - "": "ConfigMapEnvSource selects a ConfigMap to populate the environment variables with.\n\nThe contents of the target ConfigMap's Data field will represent the key-value pairs as environment variables.", - "optional": "Specify whether the ConfigMap must be defined", -} - -func (ConfigMapEnvSource) SwaggerDoc() map[string]string { - return map_ConfigMapEnvSource -} - -var map_ConfigMapKeySelector = map[string]string{ - "": "Selects a key from a ConfigMap.", - "key": "The key to select.", - "optional": "Specify whether the ConfigMap or it's key must be defined", -} - -func (ConfigMapKeySelector) SwaggerDoc() map[string]string { - return map_ConfigMapKeySelector -} - -var map_ConfigMapList = map[string]string{ - "": "ConfigMapList is a resource containing a list of ConfigMap objects.", - "metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of ConfigMaps.", -} - -func (ConfigMapList) SwaggerDoc() map[string]string { - return map_ConfigMapList -} - -var map_ConfigMapNodeConfigSource = map[string]string{ - "": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.", - "namespace": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases.", - "name": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases.", - "uid": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", - "resourceVersion": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status.", - "kubeletConfigKey": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases.", -} - -func (ConfigMapNodeConfigSource) SwaggerDoc() map[string]string { - return map_ConfigMapNodeConfigSource -} - -var map_ConfigMapProjection = map[string]string{ - "": "Adapts a ConfigMap into a projected volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. Note that this is identical to a configmap volume source without the default mode.", - "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", -} - -func (ConfigMapProjection) SwaggerDoc() map[string]string { - return map_ConfigMapProjection -} - -var map_ConfigMapVolumeSource = map[string]string{ - "": "Adapts a ConfigMap into a volume.\n\nThe contents of the target ConfigMap's Data field will be presented in a volume as files using the keys in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths. ConfigMap volumes support ownership management and SELinux relabeling.", - "items": "If unspecified, each key-value pair in the Data field of the referenced ConfigMap will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the ConfigMap, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the ConfigMap or it's keys must be defined", -} - -func (ConfigMapVolumeSource) SwaggerDoc() map[string]string { - return map_ConfigMapVolumeSource -} - -var map_Container = map[string]string{ - "": "A single application container that you want to run within a pod.", - "name": "Name of the container specified as a DNS_LABEL. Each container in a pod must have a unique name (DNS_LABEL). Cannot be updated.", - "image": "Docker image name. More info: https://kubernetes.io/docs/concepts/containers/images This field is optional to allow higher level config management to default or override container images in workload controllers like Deployments and StatefulSets.", - "command": "Entrypoint array. Not executed within a shell. The docker image's ENTRYPOINT is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "args": "Arguments to the entrypoint. The docker image's CMD is used if this is not provided. Variable references $(VAR_NAME) are expanded using the container's environment. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Cannot be updated. More info: https://kubernetes.io/docs/tasks/inject-data-application/define-command-argument-container/#running-a-command-in-a-shell", - "workingDir": "Container's working directory. If not specified, the container runtime's default will be used, which might be configured in the container image. Cannot be updated.", - "ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.", - "envFrom": "List of sources to populate environment variables in the container. The keys defined within a source must be a C_IDENTIFIER. All invalid keys will be reported as an event when the container is starting. When a key exists in multiple sources, the value associated with the last source will take precedence. Values defined by an Env with a duplicate key will take precedence. Cannot be updated.", - "env": "List of environment variables to set in the container. Cannot be updated.", - "resources": "Compute Resources required by this container. Cannot be updated. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.", - "volumeDevices": "volumeDevices is the list of block devices to be used by the container. This is an alpha feature and may change in the future.", - "livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.", - "terminationMessagePath": "Optional: Path at which the file to which the container's termination message will be written is mounted into the container's filesystem. Message written is intended to be brief final status, such as an assertion failure message. Will be truncated by the node if greater than 4096 bytes. The total message length across all containers will be limited to 12kb. Defaults to /dev/termination-log. Cannot be updated.", - "terminationMessagePolicy": "Indicate how the termination message should be populated. File will use the contents of terminationMessagePath to populate the container status message on both success and failure. FallbackToLogsOnError will use the last chunk of container log output if the termination message file is empty and the container exited with an error. The log output is limited to 2048 bytes or 80 lines, whichever is smaller. Defaults to File. Cannot be updated.", - "imagePullPolicy": "Image pull policy. One of Always, Never, IfNotPresent. Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. More info: https://kubernetes.io/docs/concepts/containers/images#updating-images", - "securityContext": "Security options the pod should run with. More info: https://kubernetes.io/docs/concepts/policy/security-context/ More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", - "stdin": "Whether this container should allocate a buffer for stdin in the container runtime. If this is not set, reads from stdin in the container will always result in EOF. Default is false.", - "stdinOnce": "Whether the container runtime should close the stdin channel after it has been opened by a single attach. When stdin is true the stdin stream will remain open across multiple attach sessions. If stdinOnce is set to true, stdin is opened on container start, is empty until the first client attaches to stdin, and then remains open and accepts data until the client disconnects, at which time stdin is closed and remains closed until the container is restarted. If this flag is false, a container processes that reads from stdin will never receive an EOF. Default is false", - "tty": "Whether this container should allocate a TTY for itself, also requires 'stdin' to be true. Default is false.", -} - -func (Container) SwaggerDoc() map[string]string { - return map_Container -} - -var map_ContainerImage = map[string]string{ - "": "Describe a container image", - "names": "Names by which this image is known. e.g. [\"k8s.gcr.io/hyperkube:v1.0.7\", \"dockerhub.io/google_containers/hyperkube:v1.0.7\"]", - "sizeBytes": "The size of the image in bytes.", -} - -func (ContainerImage) SwaggerDoc() map[string]string { - return map_ContainerImage -} - -var map_ContainerPort = map[string]string{ - "": "ContainerPort represents a network port in a single container.", - "name": "If specified, this must be an IANA_SVC_NAME and unique within the pod. Each named port in a pod must have a unique name. Name for the port that can be referred to by services.", - "hostPort": "Number of port to expose on the host. If specified, this must be a valid port number, 0 < x < 65536. If HostNetwork is specified, this must match ContainerPort. Most containers do not need this.", - "containerPort": "Number of port to expose on the pod's IP address. This must be a valid port number, 0 < x < 65536.", - "protocol": "Protocol for port. Must be UDP, TCP, or SCTP. Defaults to \"TCP\".", - "hostIP": "What host IP to bind the external port to.", -} - -func (ContainerPort) SwaggerDoc() map[string]string { - return map_ContainerPort -} - -var map_ContainerState = map[string]string{ - "": "ContainerState holds a possible state of container. Only one of its members may be specified. If none of them is specified, the default one is ContainerStateWaiting.", - "waiting": "Details about a waiting container", - "running": "Details about a running container", - "terminated": "Details about a terminated container", -} - -func (ContainerState) SwaggerDoc() map[string]string { - return map_ContainerState -} - -var map_ContainerStateRunning = map[string]string{ - "": "ContainerStateRunning is a running state of a container.", - "startedAt": "Time at which the container was last (re-)started", -} - -func (ContainerStateRunning) SwaggerDoc() map[string]string { - return map_ContainerStateRunning -} - -var map_ContainerStateTerminated = map[string]string{ - "": "ContainerStateTerminated is a terminated state of a container.", - "exitCode": "Exit status from the last termination of the container", - "signal": "Signal from the last termination of the container", - "reason": "(brief) reason from the last termination of the container", - "message": "Message regarding the last termination of the container", - "startedAt": "Time at which previous execution of the container started", - "finishedAt": "Time at which the container last terminated", - "containerID": "Container's ID in the format 'docker://'", -} - -func (ContainerStateTerminated) SwaggerDoc() map[string]string { - return map_ContainerStateTerminated -} - -var map_ContainerStateWaiting = map[string]string{ - "": "ContainerStateWaiting is a waiting state of a container.", - "reason": "(brief) reason the container is not yet running.", - "message": "Message regarding why the container is not yet running.", -} - -func (ContainerStateWaiting) SwaggerDoc() map[string]string { - return map_ContainerStateWaiting -} - -var map_ContainerStatus = map[string]string{ - "": "ContainerStatus contains details for the current status of this container.", - "name": "This must be a DNS_LABEL. Each container in a pod must have a unique name. Cannot be updated.", - "state": "Details about the container's current condition.", - "lastState": "Details about the container's last termination condition.", - "ready": "Specifies whether the container has passed its readiness probe.", - "restartCount": "The number of times the container has been restarted, currently based on the number of dead containers that have not yet been removed. Note that this is calculated from dead containers. But those containers are subject to garbage collection. This value will get capped at 5 by GC.", - "image": "The image the container is running. More info: https://kubernetes.io/docs/concepts/containers/images", - "imageID": "ImageID of the container's image.", - "containerID": "Container's ID in the format 'docker://'.", -} - -func (ContainerStatus) SwaggerDoc() map[string]string { - return map_ContainerStatus -} - -var map_DaemonEndpoint = map[string]string{ - "": "DaemonEndpoint contains information about a single Daemon endpoint.", - "Port": "Port number of the given endpoint.", -} - -func (DaemonEndpoint) SwaggerDoc() map[string]string { - return map_DaemonEndpoint -} - -var map_DownwardAPIProjection = map[string]string{ - "": "Represents downward API info for projecting into a projected volume. Note that this is identical to a downwardAPI volume source without the default mode.", - "items": "Items is a list of DownwardAPIVolume file", -} - -func (DownwardAPIProjection) SwaggerDoc() map[string]string { - return map_DownwardAPIProjection -} - -var map_DownwardAPIVolumeFile = map[string]string{ - "": "DownwardAPIVolumeFile represents information to create the file containing the pod field", - "path": "Required: Path is the relative path name of the file to be created. Must not be absolute or contain the '..' path. Must be utf-8 encoded. The first item of the relative path must not start with '..'", - "fieldRef": "Required: Selects a field of the pod: only annotations, labels, name and namespace are supported.", - "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, requests.cpu and requests.memory) are currently supported.", - "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", -} - -func (DownwardAPIVolumeFile) SwaggerDoc() map[string]string { - return map_DownwardAPIVolumeFile -} - -var map_DownwardAPIVolumeSource = map[string]string{ - "": "DownwardAPIVolumeSource represents a volume containing downward API info. Downward API volumes support ownership management and SELinux relabeling.", - "items": "Items is a list of downward API volume file", - "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", -} - -func (DownwardAPIVolumeSource) SwaggerDoc() map[string]string { - return map_DownwardAPIVolumeSource -} - -var map_EmptyDirVolumeSource = map[string]string{ - "": "Represents an empty directory for a pod. Empty directory volumes support ownership management and SELinux relabeling.", - "medium": "What type of storage medium should back this directory. The default is \"\" which means to use the node's default medium. Must be an empty string (default) or Memory. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "sizeLimit": "Total amount of local storage required for this EmptyDir volume. The size limit is also applicable for memory medium. The maximum usage on memory medium EmptyDir would be the minimum value between the SizeLimit specified here and the sum of memory limits of all containers in a pod. The default is nil which means that the limit is undefined. More info: http://kubernetes.io/docs/user-guide/volumes#emptydir", -} - -func (EmptyDirVolumeSource) SwaggerDoc() map[string]string { - return map_EmptyDirVolumeSource -} - -var map_EndpointAddress = map[string]string{ - "": "EndpointAddress is a tuple that describes single IP address.", - "ip": "The IP of this endpoint. May not be loopback (127.0.0.0/8), link-local (169.254.0.0/16), or link-local multicast ((224.0.0.0/24). IPv6 is also accepted but not fully supported on all platforms. Also, certain kubernetes components, like kube-proxy, are not IPv6 ready.", - "hostname": "The Hostname of this endpoint", - "nodeName": "Optional: Node hosting this endpoint. This can be used to determine endpoints local to a node.", - "targetRef": "Reference to object providing the endpoint.", -} - -func (EndpointAddress) SwaggerDoc() map[string]string { - return map_EndpointAddress -} - -var map_EndpointPort = map[string]string{ - "": "EndpointPort is a tuple that describes a single port.", - "name": "The name of this port (corresponds to ServicePort.Name). Must be a DNS_LABEL. Optional only if one port is defined.", - "port": "The port number of the endpoint.", - "protocol": "The IP protocol for this port. Must be UDP, TCP, or SCTP. Default is TCP.", -} - -func (EndpointPort) SwaggerDoc() map[string]string { - return map_EndpointPort -} - -var map_EndpointSubset = map[string]string{ - "": "EndpointSubset is a group of addresses with a common set of ports. The expanded set of endpoints is the Cartesian product of Addresses x Ports. For example, given:\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n }\nThe resulting set of endpoints can be viewed as:\n a: [ 10.10.1.1:8675, 10.10.2.2:8675 ],\n b: [ 10.10.1.1:309, 10.10.2.2:309 ]", - "addresses": "IP addresses which offer the related ports that are marked as ready. These endpoints should be considered safe for load balancers and clients to utilize.", - "notReadyAddresses": "IP addresses which offer the related ports but are not currently marked as ready because they have not yet finished starting, have recently failed a readiness check, or have recently failed a liveness check.", - "ports": "Port numbers available on the related IP addresses.", -} - -func (EndpointSubset) SwaggerDoc() map[string]string { - return map_EndpointSubset -} - -var map_Endpoints = map[string]string{ - "": "Endpoints is a collection of endpoints that implement the actual service. Example:\n Name: \"mysvc\",\n Subsets: [\n {\n Addresses: [{\"ip\": \"10.10.1.1\"}, {\"ip\": \"10.10.2.2\"}],\n Ports: [{\"name\": \"a\", \"port\": 8675}, {\"name\": \"b\", \"port\": 309}]\n },\n {\n Addresses: [{\"ip\": \"10.10.3.3\"}],\n Ports: [{\"name\": \"a\", \"port\": 93}, {\"name\": \"b\", \"port\": 76}]\n },\n ]", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "subsets": "The set of all endpoints is the union of all subsets. Addresses are placed into subsets according to the IPs they share. A single address with multiple ports, some of which are ready and some of which are not (because they come from different containers) will result in the address being displayed in different subsets for the different ports. No address will appear in both Addresses and NotReadyAddresses in the same subset. Sets of addresses and ports that comprise a service.", -} - -func (Endpoints) SwaggerDoc() map[string]string { - return map_Endpoints -} - -var map_EndpointsList = map[string]string{ - "": "EndpointsList is a list of endpoints.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of endpoints.", -} - -func (EndpointsList) SwaggerDoc() map[string]string { - return map_EndpointsList -} - -var map_EnvFromSource = map[string]string{ - "": "EnvFromSource represents the source of a set of ConfigMaps", - "prefix": "An optional identifier to prepend to each key in the ConfigMap. Must be a C_IDENTIFIER.", - "configMapRef": "The ConfigMap to select from", - "secretRef": "The Secret to select from", -} - -func (EnvFromSource) SwaggerDoc() map[string]string { - return map_EnvFromSource -} - -var map_EnvVar = map[string]string{ - "": "EnvVar represents an environment variable present in a Container.", - "name": "Name of the environment variable. Must be a C_IDENTIFIER.", - "value": "Variable references $(VAR_NAME) are expanded using the previous defined environment variables in the container and any service environment variables. If a variable cannot be resolved, the reference in the input string will be unchanged. The $(VAR_NAME) syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped references will never be expanded, regardless of whether the variable exists or not. Defaults to \"\".", - "valueFrom": "Source for the environment variable's value. Cannot be used if value is not empty.", -} - -func (EnvVar) SwaggerDoc() map[string]string { - return map_EnvVar -} - -var map_EnvVarSource = map[string]string{ - "": "EnvVarSource represents a source for the value of an EnvVar.", - "fieldRef": "Selects a field of the pod: supports metadata.name, metadata.namespace, metadata.labels, metadata.annotations, spec.nodeName, spec.serviceAccountName, status.hostIP, status.podIP.", - "resourceFieldRef": "Selects a resource of the container: only resources limits and requests (limits.cpu, limits.memory, limits.ephemeral-storage, requests.cpu, requests.memory and requests.ephemeral-storage) are currently supported.", - "configMapKeyRef": "Selects a key of a ConfigMap.", - "secretKeyRef": "Selects a key of a secret in the pod's namespace", -} - -func (EnvVarSource) SwaggerDoc() map[string]string { - return map_EnvVarSource -} - -var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "involvedObject": "The object that this event is about.", - "reason": "This should be a short, machine understandable string that gives the reason for the transition into the object's current status.", - "message": "A human-readable description of the status of this operation.", - "source": "The component reporting this event. Should be a short machine understandable string.", - "firstTimestamp": "The time at which the event was first recorded. (Time of server receipt is in TypeMeta.)", - "lastTimestamp": "The time at which the most recent occurrence of this event was recorded.", - "count": "The number of times this event has occurred.", - "type": "Type of this event (Normal, Warning), new types could be added in the future", - "eventTime": "Time when this Event was first observed.", - "series": "Data about the Event series this event represents or nil if it's a singleton Event.", - "action": "What action was taken/failed regarding to the Regarding object.", - "related": "Optional secondary object for more complex actions.", - "reportingComponent": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", - "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", -} - -func (Event) SwaggerDoc() map[string]string { - return map_Event -} - -var map_EventList = map[string]string{ - "": "EventList is a list of events.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of events", -} - -func (EventList) SwaggerDoc() map[string]string { - return map_EventList -} - -var map_EventSeries = map[string]string{ - "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", - "count": "Number of occurrences in this series up to the last heartbeat time", - "lastObservedTime": "Time of the last occurrence observed", - "state": "State of this Series: Ongoing or Finished", -} - -func (EventSeries) SwaggerDoc() map[string]string { - return map_EventSeries -} - -var map_EventSource = map[string]string{ - "": "EventSource contains information for an event.", - "component": "Component from which the event is generated.", - "host": "Node name on which the event is generated.", -} - -func (EventSource) SwaggerDoc() map[string]string { - return map_EventSource -} - -var map_ExecAction = map[string]string{ - "": "ExecAction describes a \"run in container\" action.", - "command": "Command is the command line to execute inside the container, the working directory for the command is root ('/') in the container's filesystem. The command is simply exec'd, it is not run inside a shell, so traditional shell instructions ('|', etc) won't work. To use a shell, you need to explicitly call out to that shell. Exit status of 0 is treated as live/healthy and non-zero is unhealthy.", -} - -func (ExecAction) SwaggerDoc() map[string]string { - return map_ExecAction -} - -var map_FCVolumeSource = map[string]string{ - "": "Represents a Fibre Channel volume. Fibre Channel volumes can only be mounted as read/write once. Fibre Channel volumes support ownership management and SELinux relabeling.", - "targetWWNs": "Optional: FC target worldwide names (WWNs)", - "lun": "Optional: FC target lun number", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "wwids": "Optional: FC volume world wide identifiers (wwids) Either wwids or combination of targetWWNs and lun must be set, but not both simultaneously.", -} - -func (FCVolumeSource) SwaggerDoc() map[string]string { - return map_FCVolumeSource -} - -var map_FlexPersistentVolumeSource = map[string]string{ - "": "FlexPersistentVolumeSource represents a generic persistent volume resource that is provisioned/attached using an exec based plugin.", - "driver": "Driver is the name of the driver to use for this volume.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "options": "Optional: Extra command options if any.", -} - -func (FlexPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_FlexPersistentVolumeSource -} - -var map_FlexVolumeSource = map[string]string{ - "": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "driver": "Driver is the name of the driver to use for this volume.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default filesystem depends on FlexVolume script.", - "secretRef": "Optional: SecretRef is reference to the secret object containing sensitive information to pass to the plugin scripts. This may be empty if no secret object is specified. If the secret object contains more than one secret, all secrets are passed to the plugin scripts.", - "readOnly": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "options": "Optional: Extra command options if any.", -} - -func (FlexVolumeSource) SwaggerDoc() map[string]string { - return map_FlexVolumeSource -} - -var map_FlockerVolumeSource = map[string]string{ - "": "Represents a Flocker volume mounted by the Flocker agent. One and only one of datasetName and datasetUUID should be set. Flocker volumes do not support ownership management or SELinux relabeling.", - "datasetName": "Name of the dataset stored as metadata -> name on the dataset for Flocker should be considered as deprecated", - "datasetUUID": "UUID of the dataset. This is unique identifier of a Flocker dataset", -} - -func (FlockerVolumeSource) SwaggerDoc() map[string]string { - return map_FlockerVolumeSource -} - -var map_GCEPersistentDiskVolumeSource = map[string]string{ - "": "Represents a Persistent Disk resource in Google Compute Engine.\n\nA GCE PD must exist before mounting to a container. The disk must also be in the same GCE project and zone as the kubelet. A GCE PD can only be mounted as read/write once or read-only many times. GCE PDs support ownership management and SELinux relabeling.", - "pdName": "Unique name of the PD resource in GCE. Used to identify the disk in GCE. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "partition": "The partition in the volume that you want to mount. If omitted, the default is to mount by volume name. Examples: For volume /dev/sda1, you specify the partition as \"1\". Similarly, the volume partition for /dev/sda is \"0\" (or you can leave the property empty). More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", -} - -func (GCEPersistentDiskVolumeSource) SwaggerDoc() map[string]string { - return map_GCEPersistentDiskVolumeSource -} - -var map_GitRepoVolumeSource = map[string]string{ - "": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - "repository": "Repository URL", - "revision": "Commit hash for the specified revision.", - "directory": "Target directory name. Must not contain or start with '..'. If '.' is supplied, the volume directory will be the git repository. Otherwise, if specified, the volume will contain the git repository in the subdirectory with the given name.", -} - -func (GitRepoVolumeSource) SwaggerDoc() map[string]string { - return map_GitRepoVolumeSource -} - -var map_GlusterfsVolumeSource = map[string]string{ - "": "Represents a Glusterfs mount that lasts the lifetime of a pod. Glusterfs volumes do not support ownership management or SELinux relabeling.", - "endpoints": "EndpointsName is the endpoint name that details Glusterfs topology. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "path": "Path is the Glusterfs volume path. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", - "readOnly": "ReadOnly here will force the Glusterfs volume to be mounted with read-only permissions. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md#create-a-pod", -} - -func (GlusterfsVolumeSource) SwaggerDoc() map[string]string { - return map_GlusterfsVolumeSource -} - -var map_HTTPGetAction = map[string]string{ - "": "HTTPGetAction describes an action based on HTTP Get requests.", - "path": "Path to access on the HTTP server.", - "port": "Name or number of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - "host": "Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.", - "scheme": "Scheme to use for connecting to the host. Defaults to HTTP.", - "httpHeaders": "Custom headers to set in the request. HTTP allows repeated headers.", -} - -func (HTTPGetAction) SwaggerDoc() map[string]string { - return map_HTTPGetAction -} - -var map_HTTPHeader = map[string]string{ - "": "HTTPHeader describes a custom header to be used in HTTP probes", - "name": "The header field name", - "value": "The header field value", -} - -func (HTTPHeader) SwaggerDoc() map[string]string { - return map_HTTPHeader -} - -var map_Handler = map[string]string{ - "": "Handler defines a specific action that should be taken", - "exec": "One and only one of the following should be specified. Exec specifies the action to take.", - "httpGet": "HTTPGet specifies the http request to perform.", - "tcpSocket": "TCPSocket specifies an action involving a TCP port. TCP hooks not yet supported", -} - -func (Handler) SwaggerDoc() map[string]string { - return map_Handler -} - -var map_HostAlias = map[string]string{ - "": "HostAlias holds the mapping between IP and hostnames that will be injected as an entry in the pod's hosts file.", - "ip": "IP address of the host file entry.", - "hostnames": "Hostnames for the above IP address.", -} - -func (HostAlias) SwaggerDoc() map[string]string { - return map_HostAlias -} - -var map_HostPathVolumeSource = map[string]string{ - "": "Represents a host path mapped into a pod. Host path volumes do not support ownership management or SELinux relabeling.", - "path": "Path of the directory on the host. If the path is a symlink, it will follow the link to the real path. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "type": "Type for HostPath Volume Defaults to \"\" More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", -} - -func (HostPathVolumeSource) SwaggerDoc() map[string]string { - return map_HostPathVolumeSource -} - -var map_ISCSIPersistentVolumeSource = map[string]string{ - "": "ISCSIPersistentVolumeSource represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI Target Lun number.", - "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI Target Portal List. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", - "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP Secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", -} - -func (ISCSIPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_ISCSIPersistentVolumeSource -} - -var map_ISCSIVolumeSource = map[string]string{ - "": "Represents an ISCSI disk. ISCSI volumes can only be mounted as read/write once. ISCSI volumes support ownership management and SELinux relabeling.", - "targetPortal": "iSCSI Target Portal. The Portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "iqn": "Target iSCSI Qualified Name.", - "lun": "iSCSI Target Lun number.", - "iscsiInterface": "iSCSI Interface Name that uses an iSCSI transport. Defaults to 'default' (tcp).", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#iscsi", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false.", - "portals": "iSCSI Target Portal List. The portal is either an IP or ip_addr:port if the port is other than default (typically TCP ports 860 and 3260).", - "chapAuthDiscovery": "whether support iSCSI Discovery CHAP authentication", - "chapAuthSession": "whether support iSCSI Session CHAP authentication", - "secretRef": "CHAP Secret for iSCSI target and initiator authentication", - "initiatorName": "Custom iSCSI Initiator Name. If initiatorName is specified with iscsiInterface simultaneously, new iSCSI interface : will be created for the connection.", -} - -func (ISCSIVolumeSource) SwaggerDoc() map[string]string { - return map_ISCSIVolumeSource -} - -var map_KeyToPath = map[string]string{ - "": "Maps a string key to a path within a volume.", - "key": "The key to project.", - "path": "The relative path of the file to map the key to. May not be an absolute path. May not contain the path element '..'. May not start with the string '..'.", - "mode": "Optional: mode bits to use on this file, must be a value between 0 and 0777. If not specified, the volume defaultMode will be used. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", -} - -func (KeyToPath) SwaggerDoc() map[string]string { - return map_KeyToPath -} - -var map_Lifecycle = map[string]string{ - "": "Lifecycle describes actions that the management system should take in response to container lifecycle events. For the PostStart and PreStop lifecycle handlers, management of the container blocks until the action is complete, unless the container process fails, in which case the handler is aborted.", - "postStart": "PostStart is called immediately after a container is created. If the handler fails, the container is terminated and restarted according to its restart policy. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", - "preStop": "PreStop is called immediately before a container is terminated. The container is terminated after the handler completes. The reason for termination is passed to the handler. Regardless of the outcome of the handler, the container is eventually terminated. Other management of the container blocks until the hook completes. More info: https://kubernetes.io/docs/concepts/containers/container-lifecycle-hooks/#container-hooks", -} - -func (Lifecycle) SwaggerDoc() map[string]string { - return map_Lifecycle -} - -var map_LimitRange = map[string]string{ - "": "LimitRange sets resource usage limits for each kind of resource in a Namespace.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the limits enforced. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (LimitRange) SwaggerDoc() map[string]string { - return map_LimitRange -} - -var map_LimitRangeItem = map[string]string{ - "": "LimitRangeItem defines a min/max usage limit for any resource that matches on kind.", - "type": "Type of resource that this limit applies to.", - "max": "Max usage constraints on this kind by resource name.", - "min": "Min usage constraints on this kind by resource name.", - "default": "Default resource requirement limit value by resource name if resource limit is omitted.", - "defaultRequest": "DefaultRequest is the default resource requirement request value by resource name if resource request is omitted.", - "maxLimitRequestRatio": "MaxLimitRequestRatio if specified, the named resource must have a request and limit that are both non-zero where limit divided by request is less than or equal to the enumerated value; this represents the max burst for the named resource.", -} - -func (LimitRangeItem) SwaggerDoc() map[string]string { - return map_LimitRangeItem -} - -var map_LimitRangeList = map[string]string{ - "": "LimitRangeList is a list of LimitRange items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of LimitRange objects. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", -} - -func (LimitRangeList) SwaggerDoc() map[string]string { - return map_LimitRangeList -} - -var map_LimitRangeSpec = map[string]string{ - "": "LimitRangeSpec defines a min/max usage limit for resources that match on kind.", - "limits": "Limits is the list of LimitRangeItem objects that are enforced.", -} - -func (LimitRangeSpec) SwaggerDoc() map[string]string { - return map_LimitRangeSpec -} - -var map_LoadBalancerIngress = map[string]string{ - "": "LoadBalancerIngress represents the status of a load-balancer ingress point: traffic intended for the service should be sent to an ingress point.", - "ip": "IP is set for load-balancer ingress points that are IP based (typically GCE or OpenStack load-balancers)", - "hostname": "Hostname is set for load-balancer ingress points that are DNS based (typically AWS load-balancers)", -} - -func (LoadBalancerIngress) SwaggerDoc() map[string]string { - return map_LoadBalancerIngress -} - -var map_LoadBalancerStatus = map[string]string{ - "": "LoadBalancerStatus represents the status of a load-balancer.", - "ingress": "Ingress is a list containing ingress points for the load-balancer. Traffic intended for the service should be sent to these ingress points.", -} - -func (LoadBalancerStatus) SwaggerDoc() map[string]string { - return map_LoadBalancerStatus -} - -var map_LocalObjectReference = map[string]string{ - "": "LocalObjectReference contains enough information to let you locate the referenced object inside the same namespace.", - "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", -} - -func (LocalObjectReference) SwaggerDoc() map[string]string { - return map_LocalObjectReference -} - -var map_LocalVolumeSource = map[string]string{ - "": "Local represents directly-attached storage with node affinity (Beta feature)", - "path": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...).", - "fsType": "Filesystem type to mount. It applies only when the Path is a block device. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". The default value is to auto-select a fileystem if unspecified.", -} - -func (LocalVolumeSource) SwaggerDoc() map[string]string { - return map_LocalVolumeSource -} - -var map_NFSVolumeSource = map[string]string{ - "": "Represents an NFS mount that lasts the lifetime of a pod. NFS volumes do not support ownership management or SELinux relabeling.", - "server": "Server is the hostname or IP address of the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "path": "Path that is exported by the NFS server. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "readOnly": "ReadOnly here will force the NFS export to be mounted with read-only permissions. Defaults to false. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", -} - -func (NFSVolumeSource) SwaggerDoc() map[string]string { - return map_NFSVolumeSource -} - -var map_Namespace = map[string]string{ - "": "Namespace provides a scope for Names. Use of multiple namespaces is optional.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of the Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status describes the current status of a Namespace. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Namespace) SwaggerDoc() map[string]string { - return map_Namespace -} - -var map_NamespaceList = map[string]string{ - "": "NamespaceList is a list of Namespaces.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is the list of Namespace objects in the list. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", -} - -func (NamespaceList) SwaggerDoc() map[string]string { - return map_NamespaceList -} - -var map_NamespaceSpec = map[string]string{ - "": "NamespaceSpec describes the attributes on a Namespace.", - "finalizers": "Finalizers is an opaque list of values that must be empty to permanently remove object from storage. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", -} - -func (NamespaceSpec) SwaggerDoc() map[string]string { - return map_NamespaceSpec -} - -var map_NamespaceStatus = map[string]string{ - "": "NamespaceStatus is information about the current status of a Namespace.", - "phase": "Phase is the current lifecycle phase of the namespace. More info: https://kubernetes.io/docs/tasks/administer-cluster/namespaces/", -} - -func (NamespaceStatus) SwaggerDoc() map[string]string { - return map_NamespaceStatus -} - -var map_Node = map[string]string{ - "": "Node is a worker node in Kubernetes. Each node will have a unique identifier in the cache (i.e. in etcd).", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a node. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the node. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Node) SwaggerDoc() map[string]string { - return map_Node -} - -var map_NodeAddress = map[string]string{ - "": "NodeAddress contains information for the node's address.", - "type": "Node address type, one of Hostname, ExternalIP or InternalIP.", - "address": "The node address.", -} - -func (NodeAddress) SwaggerDoc() map[string]string { - return map_NodeAddress -} - -var map_NodeAffinity = map[string]string{ - "": "Node affinity is a group of node affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", -} - -func (NodeAffinity) SwaggerDoc() map[string]string { - return map_NodeAffinity -} - -var map_NodeCondition = map[string]string{ - "": "NodeCondition contains condition information for a node.", - "type": "Type of node condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastHeartbeatTime": "Last time we got an update on a given condition.", - "lastTransitionTime": "Last time the condition transit from one status to another.", - "reason": "(brief) reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (NodeCondition) SwaggerDoc() map[string]string { - return map_NodeCondition -} - -var map_NodeConfigSource = map[string]string{ - "": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.", - "configMap": "ConfigMap is a reference to a Node's ConfigMap", -} - -func (NodeConfigSource) SwaggerDoc() map[string]string { - return map_NodeConfigSource -} - -var map_NodeConfigStatus = map[string]string{ - "": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.", - "assigned": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned.", - "active": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error.", - "lastKnownGood": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future.", - "error": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions.", -} - -func (NodeConfigStatus) SwaggerDoc() map[string]string { - return map_NodeConfigStatus -} - -var map_NodeDaemonEndpoints = map[string]string{ - "": "NodeDaemonEndpoints lists ports opened by daemons running on the Node.", - "kubeletEndpoint": "Endpoint on which Kubelet is listening.", -} - -func (NodeDaemonEndpoints) SwaggerDoc() map[string]string { - return map_NodeDaemonEndpoints -} - -var map_NodeList = map[string]string{ - "": "NodeList is the whole list of all Nodes which have been registered with master.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of nodes", -} - -func (NodeList) SwaggerDoc() map[string]string { - return map_NodeList -} - -var map_NodeProxyOptions = map[string]string{ - "": "NodeProxyOptions is the query options to a Node's proxy call.", - "path": "Path is the URL path to use for the current proxy request to node.", -} - -func (NodeProxyOptions) SwaggerDoc() map[string]string { - return map_NodeProxyOptions -} - -var map_NodeResources = map[string]string{ - "": "NodeResources is an object for conveying resource information about a node. see http://releases.k8s.io/HEAD/docs/design/resources.md for more details.", - "Capacity": "Capacity represents the available resources of a node", -} - -func (NodeResources) SwaggerDoc() map[string]string { - return map_NodeResources -} - -var map_NodeSelector = map[string]string{ - "": "A node selector represents the union of the results of one or more label queries over a set of nodes; that is, it represents the OR of the selectors represented by the node selector terms.", - "nodeSelectorTerms": "Required. A list of node selector terms. The terms are ORed.", -} - -func (NodeSelector) SwaggerDoc() map[string]string { - return map_NodeSelector -} - -var map_NodeSelectorRequirement = map[string]string{ - "": "A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.", - "key": "The label key that the selector applies to.", - "operator": "Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.", - "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.", -} - -func (NodeSelectorRequirement) SwaggerDoc() map[string]string { - return map_NodeSelectorRequirement -} - -var map_NodeSelectorTerm = map[string]string{ - "": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.", - "matchExpressions": "A list of node selector requirements by node's labels.", - "matchFields": "A list of node selector requirements by node's fields.", -} - -func (NodeSelectorTerm) SwaggerDoc() map[string]string { - return map_NodeSelectorTerm -} - -var map_NodeSpec = map[string]string{ - "": "NodeSpec describes the attributes that a node is created with.", - "podCIDR": "PodCIDR represents the pod IP range assigned to the node.", - "providerID": "ID of the node assigned by the cloud provider in the format: ://", - "unschedulable": "Unschedulable controls node schedulability of new pods. By default, node is schedulable. More info: https://kubernetes.io/docs/concepts/nodes/node/#manual-node-administration", - "taints": "If specified, the node's taints.", - "configSource": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field", - "externalID": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966", -} - -func (NodeSpec) SwaggerDoc() map[string]string { - return map_NodeSpec -} - -var map_NodeStatus = map[string]string{ - "": "NodeStatus is information about the current status of a node.", - "capacity": "Capacity represents the total resources of a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", - "allocatable": "Allocatable represents the resources of a node that are available for scheduling. Defaults to Capacity.", - "phase": "NodePhase is the recently observed lifecycle phase of the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#phase The field is never populated, and now is deprecated.", - "conditions": "Conditions is an array of current observed node conditions. More info: https://kubernetes.io/docs/concepts/nodes/node/#condition", - "addresses": "List of addresses reachable to the node. Queried from cloud provider, if available. More info: https://kubernetes.io/docs/concepts/nodes/node/#addresses", - "daemonEndpoints": "Endpoints of daemons running on the Node.", - "nodeInfo": "Set of ids/uuids to uniquely identify the node. More info: https://kubernetes.io/docs/concepts/nodes/node/#info", - "images": "List of container images on this node", - "volumesInUse": "List of attachable volumes in use (mounted) by the node.", - "volumesAttached": "List of volumes that are attached to the node.", - "config": "Status of the config assigned to the node via the dynamic Kubelet config feature.", -} - -func (NodeStatus) SwaggerDoc() map[string]string { - return map_NodeStatus -} - -var map_NodeSystemInfo = map[string]string{ - "": "NodeSystemInfo is a set of ids/uuids to uniquely identify the node.", - "machineID": "MachineID reported by the node. For unique machine identification in the cluster this field is preferred. Learn more from man(5) machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html", - "systemUUID": "SystemUUID reported by the node. For unique machine identification MachineID is preferred. This field is specific to Red Hat hosts https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html", - "bootID": "Boot ID reported by the node.", - "kernelVersion": "Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).", - "osImage": "OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).", - "containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).", - "kubeletVersion": "Kubelet Version reported by the node.", - "kubeProxyVersion": "KubeProxy Version reported by the node.", - "operatingSystem": "The Operating System reported by the node", - "architecture": "The Architecture reported by the node", -} - -func (NodeSystemInfo) SwaggerDoc() map[string]string { - return map_NodeSystemInfo -} - -var map_ObjectFieldSelector = map[string]string{ - "": "ObjectFieldSelector selects an APIVersioned field of an object.", - "apiVersion": "Version of the schema the FieldPath is written in terms of, defaults to \"v1\".", - "fieldPath": "Path of the field to select in the specified API version.", -} - -func (ObjectFieldSelector) SwaggerDoc() map[string]string { - return map_ObjectFieldSelector -} - -var map_ObjectReference = map[string]string{ - "": "ObjectReference contains enough information to let you inspect or modify the referred object.", - "kind": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "namespace": "Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/", - "name": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", - "uid": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids", - "apiVersion": "API version of the referent.", - "resourceVersion": "Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency", - "fieldPath": "If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: \"spec.containers{name}\" (where \"name\" refers to the name of the container that triggered the event) or if no container name is specified \"spec.containers[2]\" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object.", -} - -func (ObjectReference) SwaggerDoc() map[string]string { - return map_ObjectReference -} - -var map_PersistentVolume = map[string]string{ - "": "PersistentVolume (PV) is a storage resource provisioned by an administrator. It is analogous to a node. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines a specification of a persistent volume owned by the cluster. Provisioned by an administrator. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", - "status": "Status represents the current information/status for the persistent volume. Populated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistent-volumes", -} - -func (PersistentVolume) SwaggerDoc() map[string]string { - return map_PersistentVolume -} - -var map_PersistentVolumeClaim = map[string]string{ - "": "PersistentVolumeClaim is a user's request for and claim to a persistent volume", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired characteristics of a volume requested by a pod author. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "status": "Status represents the current information/status of a persistent volume claim. Read-only. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", -} - -func (PersistentVolumeClaim) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaim -} - -var map_PersistentVolumeClaimCondition = map[string]string{ - "": "PersistentVolumeClaimCondition contails details about state of pvc", - "lastProbeTime": "Last time we probed the condition.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "Unique, this should be a short, machine understandable string that gives the reason for condition's last transition. If it reports \"ResizeStarted\" that means the underlying persistent volume is being resized.", - "message": "Human-readable message indicating details about last transition.", -} - -func (PersistentVolumeClaimCondition) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimCondition -} - -var map_PersistentVolumeClaimList = map[string]string{ - "": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "A list of persistent volume claims. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", -} - -func (PersistentVolumeClaimList) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimList -} - -var map_PersistentVolumeClaimSpec = map[string]string{ - "": "PersistentVolumeClaimSpec describes the common attributes of storage devices and allows a Source for provider-specific attributes", - "accessModes": "AccessModes contains the desired access modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "selector": "A label query over volumes to consider for binding.", - "resources": "Resources represents the minimum resources the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources", - "volumeName": "VolumeName is the binding reference to the PersistentVolume backing this claim.", - "storageClassName": "Name of the StorageClass required by the claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1", - "volumeMode": "volumeMode defines what type of volume is required by the claim. Value of Filesystem is implied when not included in claim spec. This is an alpha feature and may change in the future.", - "dataSource": "This field requires the VolumeSnapshotDataSource alpha feature gate to be enabled and currently VolumeSnapshot is the only supported data source. If the provisioner can support VolumeSnapshot data source, it will create a new volume and data will be restored to the volume at the same time. If the provisioner does not support VolumeSnapshot data source, volume will not be created and the failure will be reported as an event. In the future, we plan to support more data source types and the behavior of the provisioner may change.", -} - -func (PersistentVolumeClaimSpec) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimSpec -} - -var map_PersistentVolumeClaimStatus = map[string]string{ - "": "PersistentVolumeClaimStatus is the current status of a persistent volume claim.", - "phase": "Phase represents the current phase of PersistentVolumeClaim.", - "accessModes": "AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1", - "capacity": "Represents the actual resources of the underlying volume.", - "conditions": "Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'.", -} - -func (PersistentVolumeClaimStatus) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimStatus -} - -var map_PersistentVolumeClaimVolumeSource = map[string]string{ - "": "PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace. This volume finds the bound PV and mounts that volume for the pod. A PersistentVolumeClaimVolumeSource is, essentially, a wrapper around another type of volume that is owned by someone else (the system).", - "claimName": "ClaimName is the name of a PersistentVolumeClaim in the same namespace as the pod using this volume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "readOnly": "Will force the ReadOnly setting in VolumeMounts. Default false.", -} - -func (PersistentVolumeClaimVolumeSource) SwaggerDoc() map[string]string { - return map_PersistentVolumeClaimVolumeSource -} - -var map_PersistentVolumeList = map[string]string{ - "": "PersistentVolumeList is a list of PersistentVolume items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of persistent volumes. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes", -} - -func (PersistentVolumeList) SwaggerDoc() map[string]string { - return map_PersistentVolumeList -} - -var map_PersistentVolumeSource = map[string]string{ - "": "PersistentVolumeSource is similar to VolumeSource but meant for the administrator who creates PVs. Exactly one of its members must be set.", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "hostPath": "HostPath represents a directory on the host. Provisioned by a developer or tester. This is useful for single-node development and testing only! On-host storage is not supported in any way and WILL NOT WORK in a multi-node cluster. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "glusterfs": "Glusterfs represents a Glusterfs volume that is attached to a host and exposed to the pod. Provisioned by an admin. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - "nfs": "NFS represents an NFS mount on the host. Provisioned by an admin. More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "local": "Local represents directly-attached storage with node affinity", - "storageos": "StorageOS represents a StorageOS volume that is attached to the kubelet's host machine and mounted into the pod More info: https://releases.k8s.io/HEAD/examples/volumes/storageos/README.md", - "csi": "CSI represents storage that handled by an external CSI driver (Beta feature).", -} - -func (PersistentVolumeSource) SwaggerDoc() map[string]string { - return map_PersistentVolumeSource -} - -var map_PersistentVolumeSpec = map[string]string{ - "": "PersistentVolumeSpec is the specification of a persistent volume.", - "capacity": "A description of the persistent volume's resources and capacity. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#capacity", - "accessModes": "AccessModes contains all ways the volume can be mounted. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes", - "claimRef": "ClaimRef is part of a bi-directional binding between PersistentVolume and PersistentVolumeClaim. Expected to be non-nil when bound. claim.VolumeName is the authoritative bind between PV and PVC. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#binding", - "persistentVolumeReclaimPolicy": "What happens to a persistent volume when released from its claim. Valid options are Retain (default for manually created PersistentVolumes), Delete (default for dynamically provisioned PersistentVolumes), and Recycle (deprecated). Recycle must be supported by the volume plugin underlying this PersistentVolume. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#reclaiming", - "storageClassName": "Name of StorageClass to which this persistent volume belongs. Empty value means that this volume does not belong to any StorageClass.", - "mountOptions": "A list of mount options, e.g. [\"ro\", \"soft\"]. Not validated - mount will simply fail if one is invalid. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#mount-options", - "volumeMode": "volumeMode defines if a volume is intended to be used with a formatted filesystem or to remain in raw block state. Value of Filesystem is implied when not included in spec. This is an alpha feature and may change in the future.", - "nodeAffinity": "NodeAffinity defines constraints that limit what nodes this volume can be accessed from. This field influences the scheduling of pods that use this volume.", -} - -func (PersistentVolumeSpec) SwaggerDoc() map[string]string { - return map_PersistentVolumeSpec -} - -var map_PersistentVolumeStatus = map[string]string{ - "": "PersistentVolumeStatus is the current status of a persistent volume.", - "phase": "Phase indicates if a volume is available, bound to a claim, or released by a claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#phase", - "message": "A human-readable message indicating details about why the volume is in this state.", - "reason": "Reason is a brief CamelCase string that describes any failure and is meant for machine parsing and tidy display in the CLI.", -} - -func (PersistentVolumeStatus) SwaggerDoc() map[string]string { - return map_PersistentVolumeStatus -} - -var map_PhotonPersistentDiskVolumeSource = map[string]string{ - "": "Represents a Photon Controller persistent disk resource.", - "pdID": "ID that identifies Photon Controller persistent disk", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", -} - -func (PhotonPersistentDiskVolumeSource) SwaggerDoc() map[string]string { - return map_PhotonPersistentDiskVolumeSource -} - -var map_Pod = map[string]string{ - "": "Pod is a collection of containers that can run on a host. This resource is created by clients and scheduled onto hosts.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Pod) SwaggerDoc() map[string]string { - return map_Pod -} - -var map_PodAffinity = map[string]string{ - "": "Pod affinity is a group of inter pod affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", -} - -func (PodAffinity) SwaggerDoc() map[string]string { - return map_PodAffinity -} - -var map_PodAffinityTerm = map[string]string{ - "": "Defines a set of pods (namely those matching the labelSelector relative to the given namespace(s)) that this pod should be co-located (affinity) or not co-located (anti-affinity) with, where co-located is defined as running on a node whose value of the label with key matches that of any node on which a pod of the set of pods is running", - "labelSelector": "A label query over a set of resources, in this case pods.", - "namespaces": "namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means \"this pod's namespace\"", - "topologyKey": "This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.", -} - -func (PodAffinityTerm) SwaggerDoc() map[string]string { - return map_PodAffinityTerm -} - -var map_PodAntiAffinity = map[string]string{ - "": "Pod anti affinity is a group of inter pod anti affinity scheduling rules.", - "requiredDuringSchedulingIgnoredDuringExecution": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "preferredDuringSchedulingIgnoredDuringExecution": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", -} - -func (PodAntiAffinity) SwaggerDoc() map[string]string { - return map_PodAntiAffinity -} - -var map_PodAttachOptions = map[string]string{ - "": "PodAttachOptions is the query options to a Pod's remote attach call.", - "stdin": "Stdin if true, redirects the standard input stream of the pod for this call. Defaults to false.", - "stdout": "Stdout if true indicates that stdout is to be redirected for the attach call. Defaults to true.", - "stderr": "Stderr if true indicates that stderr is to be redirected for the attach call. Defaults to true.", - "tty": "TTY if true indicates that a tty will be allocated for the attach call. This is passed through the container runtime so the tty is allocated on the worker node by the container runtime. Defaults to false.", - "container": "The container in which to execute the command. Defaults to only container if there is only one container in the pod.", -} - -func (PodAttachOptions) SwaggerDoc() map[string]string { - return map_PodAttachOptions -} - -var map_PodCondition = map[string]string{ - "": "PodCondition contains details for the current condition of this pod.", - "type": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "status": "Status is the status of the condition. Can be True, False, Unknown. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "lastProbeTime": "Last time we probed the condition.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "Unique, one-word, CamelCase reason for the condition's last transition.", - "message": "Human-readable message indicating details about last transition.", -} - -func (PodCondition) SwaggerDoc() map[string]string { - return map_PodCondition -} - -var map_PodDNSConfig = map[string]string{ - "": "PodDNSConfig defines the DNS parameters of a pod in addition to those generated from DNSPolicy.", - "nameservers": "A list of DNS name server IP addresses. This will be appended to the base nameservers generated from DNSPolicy. Duplicated nameservers will be removed.", - "searches": "A list of DNS search domains for host-name lookup. This will be appended to the base search paths generated from DNSPolicy. Duplicated search paths will be removed.", - "options": "A list of DNS resolver options. This will be merged with the base options generated from DNSPolicy. Duplicated entries will be removed. Resolution options given in Options will override those that appear in the base DNSPolicy.", -} - -func (PodDNSConfig) SwaggerDoc() map[string]string { - return map_PodDNSConfig -} - -var map_PodDNSConfigOption = map[string]string{ - "": "PodDNSConfigOption defines DNS resolver options of a pod.", - "name": "Required.", -} - -func (PodDNSConfigOption) SwaggerDoc() map[string]string { - return map_PodDNSConfigOption -} - -var map_PodExecOptions = map[string]string{ - "": "PodExecOptions is the query options to a Pod's remote exec call.", - "stdin": "Redirect the standard input stream of the pod for this call. Defaults to false.", - "stdout": "Redirect the standard output stream of the pod for this call. Defaults to true.", - "stderr": "Redirect the standard error stream of the pod for this call. Defaults to true.", - "tty": "TTY if true indicates that a tty will be allocated for the exec call. Defaults to false.", - "container": "Container in which to execute the command. Defaults to only container if there is only one container in the pod.", - "command": "Command is the remote command to execute. argv array. Not executed within a shell.", -} - -func (PodExecOptions) SwaggerDoc() map[string]string { - return map_PodExecOptions -} - -var map_PodList = map[string]string{ - "": "PodList is a list of Pods.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of pods. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md", -} - -func (PodList) SwaggerDoc() map[string]string { - return map_PodList -} - -var map_PodLogOptions = map[string]string{ - "": "PodLogOptions is the query options for a Pod's logs REST call.", - "container": "The container for which to stream logs. Defaults to only container if there is one container in the pod.", - "follow": "Follow the log stream of the pod. Defaults to false.", - "previous": "Return previous terminated container logs. Defaults to false.", - "sinceSeconds": "A relative time in seconds before the current time from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "sinceTime": "An RFC3339 timestamp from which to show logs. If this value precedes the time a pod was started, only logs since the pod start will be returned. If this value is in the future, no logs will be returned. Only one of sinceSeconds or sinceTime may be specified.", - "timestamps": "If true, add an RFC3339 or RFC3339Nano timestamp at the beginning of every line of log output. Defaults to false.", - "tailLines": "If set, the number of lines from the end of the logs to show. If not specified, logs are shown from the creation of the container or sinceSeconds or sinceTime", - "limitBytes": "If set, the number of bytes to read from the server before terminating the log output. This may not display a complete final line of logging, and may return slightly more or slightly less than the specified limit.", -} - -func (PodLogOptions) SwaggerDoc() map[string]string { - return map_PodLogOptions -} - -var map_PodPortForwardOptions = map[string]string{ - "": "PodPortForwardOptions is the query options to a Pod's port forward call when using WebSockets. The `port` query parameter must specify the port or ports (comma separated) to forward over. Port forwarding over SPDY does not use these options. It requires the port to be passed in the `port` header as part of request.", - "ports": "List of ports to forward Required when using WebSockets", -} - -func (PodPortForwardOptions) SwaggerDoc() map[string]string { - return map_PodPortForwardOptions -} - -var map_PodProxyOptions = map[string]string{ - "": "PodProxyOptions is the query options to a Pod's proxy call.", - "path": "Path is the URL path to use for the current proxy request to pod.", -} - -func (PodProxyOptions) SwaggerDoc() map[string]string { - return map_PodProxyOptions -} - -var map_PodReadinessGate = map[string]string{ - "": "PodReadinessGate contains the reference to a pod condition", - "conditionType": "ConditionType refers to a condition in the pod's condition list with matching type.", -} - -func (PodReadinessGate) SwaggerDoc() map[string]string { - return map_PodReadinessGate -} - -var map_PodSecurityContext = map[string]string{ - "": "PodSecurityContext holds pod-level security attributes and common container settings. Some fields are also present in container.securityContext. Field values of container.securityContext take precedence over field values of PodSecurityContext.", - "seLinuxOptions": "The SELinux context to be applied to all containers. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "supplementalGroups": "A list of groups applied to the first process run in each container, in addition to the container's primary GID. If unspecified, no groups will be added to any container.", - "fsGroup": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw ", - "sysctls": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch.", -} - -func (PodSecurityContext) SwaggerDoc() map[string]string { - return map_PodSecurityContext -} - -var map_PodSignature = map[string]string{ - "": "Describes the class of pods that should avoid this node. Exactly one field should be set.", - "podController": "Reference to controller whose pods should avoid this node.", -} - -func (PodSignature) SwaggerDoc() map[string]string { - return map_PodSignature -} - -var map_PodSpec = map[string]string{ - "": "PodSpec is a description of a pod.", - "volumes": "List of volumes that can be mounted by containers belonging to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes", - "initContainers": "List of initialization containers belonging to the pod. Init containers are executed in order prior to containers being started. If any init container fails, the pod is considered to have failed and is handled according to its restartPolicy. The name for an init container or normal container must be unique among all containers. Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes. The resourceRequirements of an init container are taken into account during scheduling by finding the highest request/limit for each resource type, and then using the max of of that value or the sum of the normal containers. Limits are applied to init containers in a similar fashion. Init containers cannot currently be added or removed. Cannot be updated. More info: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/", - "containers": "List of containers belonging to the pod. Containers cannot currently be added or removed. There must be at least one container in a Pod. Cannot be updated.", - "restartPolicy": "Restart policy for all containers within the pod. One of Always, OnFailure, Never. Default to Always. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#restart-policy", - "terminationGracePeriodSeconds": "Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period will be used instead. The grace period is the duration in seconds after the processes running in the pod are sent a termination signal and the time when the processes are forcibly halted with a kill signal. Set this value longer than the expected cleanup time for your process. Defaults to 30 seconds.", - "activeDeadlineSeconds": "Optional duration in seconds the pod may be active on the node relative to StartTime before the system will actively try to mark it failed and kill associated containers. Value must be a positive integer.", - "dnsPolicy": "Set DNS policy for the pod. Defaults to \"ClusterFirst\". Valid values are 'ClusterFirstWithHostNet', 'ClusterFirst', 'Default' or 'None'. DNS parameters given in DNSConfig will be merged with the policy selected with DNSPolicy. To have DNS options set along with hostNetwork, you have to specify DNS policy explicitly to 'ClusterFirstWithHostNet'.", - "nodeSelector": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - "serviceAccountName": "ServiceAccountName is the name of the ServiceAccount to use to run this pod. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", - "serviceAccount": "DeprecatedServiceAccount is a depreciated alias for ServiceAccountName. Deprecated: Use serviceAccountName instead.", - "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether a service account token should be automatically mounted.", - "nodeName": "NodeName is a request to schedule this pod onto a specific node. If it is non-empty, the scheduler simply schedules this pod onto that node, assuming that it fits resource requirements.", - "hostNetwork": "Host networking requested for this pod. Use the host's network namespace. If this option is set, the ports that will be used must be specified. Default to false.", - "hostPID": "Use the host's pid namespace. Optional: Default to false.", - "hostIPC": "Use the host's ipc namespace. Optional: Default to false.", - "shareProcessNamespace": "Share a single process namespace between all of the containers in a pod. When this is set containers will be able to view and signal processes from other containers in the same pod, and the first process in each container will not be assigned PID 1. HostPID and ShareProcessNamespace cannot both be set. Optional: Default to false. This field is beta-level and may be disabled with the PodShareProcessNamespace feature.", - "securityContext": "SecurityContext holds pod-level security attributes and common container settings. Optional: Defaults to empty. See type description for default values of each field.", - "imagePullSecrets": "ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. If specified, these secrets will be passed to individual puller implementations for them to use. For example, in the case of docker, only DockerConfig type secrets are honored. More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod", - "hostname": "Specifies the hostname of the Pod If not specified, the pod's hostname will be set to a system-defined value.", - "subdomain": "If specified, the fully qualified Pod hostname will be \"...svc.\". If not specified, the pod will not have a domainname at all.", - "affinity": "If specified, the pod's scheduling constraints", - "schedulerName": "If specified, the pod will be dispatched by specified scheduler. If not specified, the pod will be dispatched by default scheduler.", - "tolerations": "If specified, the pod's tolerations.", - "hostAliases": "HostAliases is an optional list of hosts and IPs that will be injected into the pod's hosts file if specified. This is only valid for non-hostNetwork pods.", - "priorityClassName": "If specified, indicates the pod's priority. \"system-node-critical\" and \"system-cluster-critical\" are two special keywords which indicate the highest priorities with the former being the highest priority. Any other name must be defined by creating a PriorityClass object with that name. If not specified, the pod priority will be default or zero if there is no default.", - "priority": "The priority value. Various system components use this field to find the priority of the pod. When Priority Admission Controller is enabled, it prevents users from setting this field. The admission controller populates this field from PriorityClassName. The higher the value, the higher the priority.", - "dnsConfig": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy.", - "readinessGates": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md", - "runtimeClassName": "RuntimeClassName refers to a RuntimeClass object in the node.k8s.io group, which should be used to run this pod. If no RuntimeClass resource matches the named class, the pod will not be run. If unset or empty, the \"legacy\" RuntimeClass will be used, which is an implicit class with an empty definition that uses the default runtime handler. More info: https://github.com/kubernetes/community/blob/master/keps/sig-node/0014-runtime-class.md This is an alpha feature and may change in the future.", -} - -func (PodSpec) SwaggerDoc() map[string]string { - return map_PodSpec -} - -var map_PodStatus = map[string]string{ - "": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.", - "phase": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase", - "conditions": "Current service state of pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions", - "message": "A human readable message indicating details about why the pod is in this condition.", - "reason": "A brief CamelCase message indicating details about why the pod is in this state. e.g. 'Evicted'", - "nominatedNodeName": "nominatedNodeName is set only when this pod preempts other pods on the node, but it cannot be scheduled right away as preemption victims receive their graceful termination periods. This field does not guarantee that the pod will be scheduled on this node. Scheduler may decide to place the pod elsewhere if other nodes become available sooner. Scheduler may also decide to give the resources on this node to a higher priority pod that is created after preemption. As a result, this field may be different than PodSpec.nodeName when the pod is scheduled.", - "hostIP": "IP address of the host to which the pod is assigned. Empty if not yet scheduled.", - "podIP": "IP address allocated to the pod. Routable at least within the cluster. Empty if not yet allocated.", - "startTime": "RFC 3339 date and time at which the object was acknowledged by the Kubelet. This is before the Kubelet pulled the container image(s) for the pod.", - "initContainerStatuses": "The list has one entry per init container in the manifest. The most recent successful init container will have ready = true, the most recently started container will have startTime set. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "containerStatuses": "The list has one entry per container in the manifest. Each entry is currently the output of `docker inspect`. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-and-container-status", - "qosClass": "The Quality of Service (QOS) classification assigned to the pod based on resource requirements See PodQOSClass type for available QOS classes More info: https://git.k8s.io/community/contributors/design-proposals/node/resource-qos.md", -} - -func (PodStatus) SwaggerDoc() map[string]string { - return map_PodStatus -} - -var map_PodStatusResult = map[string]string{ - "": "PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "status": "Most recently observed status of the pod. This data may not be up to date. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (PodStatusResult) SwaggerDoc() map[string]string { - return map_PodStatusResult -} - -var map_PodTemplate = map[string]string{ - "": "PodTemplate describes a template for creating copies of a predefined pod.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "template": "Template defines the pods that will be created from this pod template. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (PodTemplate) SwaggerDoc() map[string]string { - return map_PodTemplate -} - -var map_PodTemplateList = map[string]string{ - "": "PodTemplateList is a list of PodTemplates.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of pod templates", -} - -func (PodTemplateList) SwaggerDoc() map[string]string { - return map_PodTemplateList -} - -var map_PodTemplateSpec = map[string]string{ - "": "PodTemplateSpec describes the data a pod should have when created from a template", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior of the pod. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (PodTemplateSpec) SwaggerDoc() map[string]string { - return map_PodTemplateSpec -} - -var map_PortworxVolumeSource = map[string]string{ - "": "PortworxVolumeSource represents a Portworx volume resource.", - "volumeID": "VolumeID uniquely identifies a Portworx volume", - "fsType": "FSType represents the filesystem type to mount Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", -} - -func (PortworxVolumeSource) SwaggerDoc() map[string]string { - return map_PortworxVolumeSource -} - -var map_Preconditions = map[string]string{ - "": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.", - "uid": "Specifies the target UID.", -} - -func (Preconditions) SwaggerDoc() map[string]string { - return map_Preconditions -} - -var map_PreferAvoidPodsEntry = map[string]string{ - "": "Describes a class of pods that should avoid this node.", - "podSignature": "The class of pods.", - "evictionTime": "Time at which this entry was added to the list.", - "reason": "(brief) reason why this entry was added to the list.", - "message": "Human readable message indicating why this entry was added to the list.", -} - -func (PreferAvoidPodsEntry) SwaggerDoc() map[string]string { - return map_PreferAvoidPodsEntry -} - -var map_PreferredSchedulingTerm = map[string]string{ - "": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", - "weight": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", - "preference": "A node selector term, associated with the corresponding weight.", -} - -func (PreferredSchedulingTerm) SwaggerDoc() map[string]string { - return map_PreferredSchedulingTerm -} - -var map_Probe = map[string]string{ - "": "Probe describes a health check to be performed against a container to determine whether it is alive or ready to receive traffic.", - "initialDelaySeconds": "Number of seconds after the container has started before liveness probes are initiated. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "timeoutSeconds": "Number of seconds after which the probe times out. Defaults to 1 second. Minimum value is 1. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes", - "periodSeconds": "How often (in seconds) to perform the probe. Default to 10 seconds. Minimum value is 1.", - "successThreshold": "Minimum consecutive successes for the probe to be considered successful after having failed. Defaults to 1. Must be 1 for liveness. Minimum value is 1.", - "failureThreshold": "Minimum consecutive failures for the probe to be considered failed after having succeeded. Defaults to 3. Minimum value is 1.", -} - -func (Probe) SwaggerDoc() map[string]string { - return map_Probe -} - -var map_ProjectedVolumeSource = map[string]string{ - "": "Represents a projected volume source", - "sources": "list of volume projections", - "defaultMode": "Mode bits to use on created files by default. Must be a value between 0 and 0777. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", -} - -func (ProjectedVolumeSource) SwaggerDoc() map[string]string { - return map_ProjectedVolumeSource -} - -var map_QuobyteVolumeSource = map[string]string{ - "": "Represents a Quobyte mount that lasts the lifetime of a pod. Quobyte volumes do not support ownership management or SELinux relabeling.", - "registry": "Registry represents a single or multiple Quobyte Registry services specified as a string as host:port pair (multiple entries are separated with commas) which acts as the central registry for volumes", - "volume": "Volume is a string that references an already created Quobyte volume by name.", - "readOnly": "ReadOnly here will force the Quobyte volume to be mounted with read-only permissions. Defaults to false.", - "user": "User to map volume access to Defaults to serivceaccount user", - "group": "Group to map volume access to Default is no group", -} - -func (QuobyteVolumeSource) SwaggerDoc() map[string]string { - return map_QuobyteVolumeSource -} - -var map_RBDPersistentVolumeSource = map[string]string{ - "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", -} - -func (RBDPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_RBDPersistentVolumeSource -} - -var map_RBDVolumeSource = map[string]string{ - "": "Represents a Rados Block Device mount that lasts the lifetime of a pod. RBD volumes support ownership management and SELinux relabeling.", - "monitors": "A collection of Ceph monitors. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "image": "The rados image name. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "fsType": "Filesystem type of the volume that you want to mount. Tip: Ensure that the filesystem type is supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://kubernetes.io/docs/concepts/storage/volumes#rbd", - "pool": "The rados pool name. Default is rbd. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "user": "The rados user name. Default is admin. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "keyring": "Keyring is the path to key ring for RBDUser. Default is /etc/ceph/keyring. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "secretRef": "SecretRef is name of the authentication secret for RBDUser. If provided overrides keyring. Default is nil. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", - "readOnly": "ReadOnly here will force the ReadOnly setting in VolumeMounts. Defaults to false. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md#how-to-use-it", -} - -func (RBDVolumeSource) SwaggerDoc() map[string]string { - return map_RBDVolumeSource -} - -var map_RangeAllocation = map[string]string{ - "": "RangeAllocation is not a public type.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "range": "Range is string that identifies the range represented by 'data'.", - "data": "Data is a bit array containing all allocated addresses in the previous segment.", -} - -func (RangeAllocation) SwaggerDoc() map[string]string { - return map_RangeAllocation -} - -var map_ReplicationController = map[string]string{ - "": "ReplicationController represents the configuration of a replication controller.", - "metadata": "If the Labels of a ReplicationController are empty, they are defaulted to be the same as the Pod(s) that the replication controller manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the replication controller. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the replication controller. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (ReplicationController) SwaggerDoc() map[string]string { - return map_ReplicationController -} - -var map_ReplicationControllerCondition = map[string]string{ - "": "ReplicationControllerCondition describes the state of a replication controller at a certain point.", - "type": "Type of replication controller condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "The last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (ReplicationControllerCondition) SwaggerDoc() map[string]string { - return map_ReplicationControllerCondition -} - -var map_ReplicationControllerList = map[string]string{ - "": "ReplicationControllerList is a collection of replication controllers.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of replication controllers. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", -} - -func (ReplicationControllerList) SwaggerDoc() map[string]string { - return map_ReplicationControllerList -} - -var map_ReplicationControllerSpec = map[string]string{ - "": "ReplicationControllerSpec is the specification of a replication controller.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the Replicas count. If Selector is empty, it is defaulted to the labels present on the Pod template. Label keys and values that must match in order to be controlled by this replication controller, if empty defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. This takes precedence over a TemplateRef. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", -} - -func (ReplicationControllerSpec) SwaggerDoc() map[string]string { - return map_ReplicationControllerSpec -} - -var map_ReplicationControllerStatus = map[string]string{ - "": "ReplicationControllerStatus represents the current status of a replication controller.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replication controller.", - "readyReplicas": "The number of ready replicas for this replication controller.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replication controller.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed replication controller.", - "conditions": "Represents the latest available observations of a replication controller's current state.", -} - -func (ReplicationControllerStatus) SwaggerDoc() map[string]string { - return map_ReplicationControllerStatus -} - -var map_ResourceFieldSelector = map[string]string{ - "": "ResourceFieldSelector represents container resources (cpu, memory) and their output format", - "containerName": "Container name: required for volumes, optional for env vars", - "resource": "Required: resource to select", - "divisor": "Specifies the output format of the exposed resources, defaults to \"1\"", -} - -func (ResourceFieldSelector) SwaggerDoc() map[string]string { - return map_ResourceFieldSelector -} - -var map_ResourceQuota = map[string]string{ - "": "ResourceQuota sets aggregate quota restrictions enforced per namespace", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the desired quota. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status defines the actual enforced quota and its current usage. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (ResourceQuota) SwaggerDoc() map[string]string { - return map_ResourceQuota -} - -var map_ResourceQuotaList = map[string]string{ - "": "ResourceQuotaList is a list of ResourceQuota items.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of ResourceQuota objects. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", -} - -func (ResourceQuotaList) SwaggerDoc() map[string]string { - return map_ResourceQuotaList -} - -var map_ResourceQuotaSpec = map[string]string{ - "": "ResourceQuotaSpec defines the desired hard limits to enforce for Quota.", - "hard": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", - "scopes": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects.", - "scopeSelector": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched.", -} - -func (ResourceQuotaSpec) SwaggerDoc() map[string]string { - return map_ResourceQuotaSpec -} - -var map_ResourceQuotaStatus = map[string]string{ - "": "ResourceQuotaStatus defines the enforced hard limits and observed use.", - "hard": "Hard is the set of enforced hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/", - "used": "Used is the current observed total usage of the resource in the namespace.", -} - -func (ResourceQuotaStatus) SwaggerDoc() map[string]string { - return map_ResourceQuotaStatus -} - -var map_ResourceRequirements = map[string]string{ - "": "ResourceRequirements describes the compute resource requirements.", - "limits": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "requests": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", -} - -func (ResourceRequirements) SwaggerDoc() map[string]string { - return map_ResourceRequirements -} - -var map_SELinuxOptions = map[string]string{ - "": "SELinuxOptions are the labels to be applied to the container", - "user": "User is a SELinux user label that applies to the container.", - "role": "Role is a SELinux role label that applies to the container.", - "type": "Type is a SELinux type label that applies to the container.", - "level": "Level is SELinux level label that applies to the container.", -} - -func (SELinuxOptions) SwaggerDoc() map[string]string { - return map_SELinuxOptions -} - -var map_ScaleIOPersistentVolumeSource = map[string]string{ - "": "ScaleIOPersistentVolumeSource represents a persistent ScaleIO volume", - "gateway": "The host address of the ScaleIO API Gateway.", - "system": "The name of the storage system as configured in ScaleIO.", - "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", - "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", - "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", - "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", - "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\"", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", -} - -func (ScaleIOPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_ScaleIOPersistentVolumeSource -} - -var map_ScaleIOVolumeSource = map[string]string{ - "": "ScaleIOVolumeSource represents a persistent ScaleIO volume", - "gateway": "The host address of the ScaleIO API Gateway.", - "system": "The name of the storage system as configured in ScaleIO.", - "secretRef": "SecretRef references to the secret for ScaleIO user and other sensitive information. If this is not provided, Login operation will fail.", - "sslEnabled": "Flag to enable/disable SSL communication with Gateway, default false", - "protectionDomain": "The name of the ScaleIO Protection Domain for the configured storage.", - "storagePool": "The ScaleIO Storage Pool associated with the protection domain.", - "storageMode": "Indicates whether the storage for a volume should be ThickProvisioned or ThinProvisioned. Default is ThinProvisioned.", - "volumeName": "The name of a volume already created in the ScaleIO system that is associated with this volume source.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Default is \"xfs\".", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", -} - -func (ScaleIOVolumeSource) SwaggerDoc() map[string]string { - return map_ScaleIOVolumeSource -} - -var map_ScopeSelector = map[string]string{ - "": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.", - "matchExpressions": "A list of scope selector requirements by scope of the resources.", -} - -func (ScopeSelector) SwaggerDoc() map[string]string { - return map_ScopeSelector -} - -var map_ScopedResourceSelectorRequirement = map[string]string{ - "": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.", - "scopeName": "The name of the scope that the selector applies to.", - "operator": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist.", - "values": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.", -} - -func (ScopedResourceSelectorRequirement) SwaggerDoc() map[string]string { - return map_ScopedResourceSelectorRequirement -} - -var map_Secret = map[string]string{ - "": "Secret holds secret data of a certain type. The total bytes of the values in the Data field must be less than MaxSecretSize bytes.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "data": "Data contains the secret data. Each key must consist of alphanumeric characters, '-', '_' or '.'. The serialized form of the secret data is a base64 encoded string, representing the arbitrary (possibly non-string) data value here. Described in https://tools.ietf.org/html/rfc4648#section-4", - "stringData": "stringData allows specifying non-binary secret data in string form. It is provided as a write-only convenience method. All keys and values are merged into the data field on write, overwriting any existing values. It is never output when reading from the API.", - "type": "Used to facilitate programmatic handling of secret data.", -} - -func (Secret) SwaggerDoc() map[string]string { - return map_Secret -} - -var map_SecretEnvSource = map[string]string{ - "": "SecretEnvSource selects a Secret to populate the environment variables with.\n\nThe contents of the target Secret's Data field will represent the key-value pairs as environment variables.", - "optional": "Specify whether the Secret must be defined", -} - -func (SecretEnvSource) SwaggerDoc() map[string]string { - return map_SecretEnvSource -} - -var map_SecretKeySelector = map[string]string{ - "": "SecretKeySelector selects a key of a Secret.", - "key": "The key of the secret to select from. Must be a valid secret key.", - "optional": "Specify whether the Secret or it's key must be defined", -} - -func (SecretKeySelector) SwaggerDoc() map[string]string { - return map_SecretKeySelector -} - -var map_SecretList = map[string]string{ - "": "SecretList is a list of Secret.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "Items is a list of secret objects. More info: https://kubernetes.io/docs/concepts/configuration/secret", -} - -func (SecretList) SwaggerDoc() map[string]string { - return map_SecretList -} - -var map_SecretProjection = map[string]string{ - "": "Adapts a secret into a projected volume.\n\nThe contents of the target Secret's Data field will be presented in a projected volume as files using the keys in the Data field as the file names. Note that this is identical to a secret volume source without the default mode.", - "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "optional": "Specify whether the Secret or its key must be defined", -} - -func (SecretProjection) SwaggerDoc() map[string]string { - return map_SecretProjection -} - -var map_SecretReference = map[string]string{ - "": "SecretReference represents a Secret Reference. It has enough information to retrieve secret in any namespace", - "name": "Name is unique within a namespace to reference a secret resource.", - "namespace": "Namespace defines the space within which the secret name must be unique.", -} - -func (SecretReference) SwaggerDoc() map[string]string { - return map_SecretReference -} - -var map_SecretVolumeSource = map[string]string{ - "": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.", - "secretName": "Name of the secret in the pod's namespace to use. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - "items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error unless it is marked optional. Paths must be relative and may not contain the '..' path or start with '..'.", - "defaultMode": "Optional: mode bits to use on created files by default. Must be a value between 0 and 0777. Defaults to 0644. Directories within the path are not affected by this setting. This might be in conflict with other options that affect the file mode, like fsGroup, and the result can be other mode bits set.", - "optional": "Specify whether the Secret or it's keys must be defined", -} - -func (SecretVolumeSource) SwaggerDoc() map[string]string { - return map_SecretVolumeSource -} - -var map_SecurityContext = map[string]string{ - "": "SecurityContext holds security configuration that will be applied to a container. Some fields are present in both SecurityContext and PodSecurityContext. When both are set, the values in SecurityContext take precedence.", - "capabilities": "The capabilities to add/drop when running containers. Defaults to the default set of capabilities granted by the container runtime.", - "privileged": "Run container in privileged mode. Processes in privileged containers are essentially equivalent to root on the host. Defaults to false.", - "seLinuxOptions": "The SELinux context to be applied to the container. If unspecified, the container runtime will allocate a random SELinux context for each container. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsUser": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsGroup": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "runAsNonRoot": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence.", - "readOnlyRootFilesystem": "Whether this container has a read-only root filesystem. Default is false.", - "allowPrivilegeEscalation": "AllowPrivilegeEscalation controls whether a process can gain more privileges than its parent process. This bool directly controls if the no_new_privs flag will be set on the container process. AllowPrivilegeEscalation is true always when the container is: 1) run as Privileged 2) has CAP_SYS_ADMIN", - "procMount": "procMount denotes the type of proc mount to use for the containers. The default is DefaultProcMount which uses the container runtime defaults for readonly paths and masked paths. This requires the ProcMountType feature flag to be enabled.", -} - -func (SecurityContext) SwaggerDoc() map[string]string { - return map_SecurityContext -} - -var map_SerializedReference = map[string]string{ - "": "SerializedReference is a reference to serialized object.", - "reference": "The reference to an object in the system.", -} - -func (SerializedReference) SwaggerDoc() map[string]string { - return map_SerializedReference -} - -var map_Service = map[string]string{ - "": "Service is a named abstraction of software service (for example, mysql) consisting of local port (for example 3306) that the proxy listens on, and the selector that determines which pods will answer requests sent through the proxy.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the behavior of a service. https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Most recently observed status of the service. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Service) SwaggerDoc() map[string]string { - return map_Service -} - -var map_ServiceAccount = map[string]string{ - "": "ServiceAccount binds together: * a name, understood by users, and perhaps by peripheral systems, for an identity * a principal that can be authenticated and authorized * a set of secrets", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "secrets": "Secrets is the list of secrets allowed to be used by pods running using this ServiceAccount. More info: https://kubernetes.io/docs/concepts/configuration/secret", - "imagePullSecrets": "ImagePullSecrets is a list of references to secrets in the same namespace to use for pulling any images in pods that reference this ServiceAccount. ImagePullSecrets are distinct from Secrets because Secrets can be mounted in the pod, but ImagePullSecrets are only accessed by the kubelet. More info: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod", - "automountServiceAccountToken": "AutomountServiceAccountToken indicates whether pods running as this service account should have an API token automatically mounted. Can be overridden at the pod level.", -} - -func (ServiceAccount) SwaggerDoc() map[string]string { - return map_ServiceAccount -} - -var map_ServiceAccountList = map[string]string{ - "": "ServiceAccountList is a list of ServiceAccount objects", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ServiceAccounts. More info: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/", -} - -func (ServiceAccountList) SwaggerDoc() map[string]string { - return map_ServiceAccountList -} - -var map_ServiceAccountTokenProjection = map[string]string{ - "": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).", - "audience": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver.", - "expirationSeconds": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes.", - "path": "Path is the path relative to the mount point of the file to project the token into.", -} - -func (ServiceAccountTokenProjection) SwaggerDoc() map[string]string { - return map_ServiceAccountTokenProjection -} - -var map_ServiceList = map[string]string{ - "": "ServiceList holds a list of services.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of services", -} - -func (ServiceList) SwaggerDoc() map[string]string { - return map_ServiceList -} - -var map_ServicePort = map[string]string{ - "": "ServicePort contains information on service's port.", - "name": "The name of this port within the service. This must be a DNS_LABEL. All ports within a ServiceSpec must have unique names. This maps to the 'Name' field in EndpointPort objects. Optional if only one ServicePort is defined on this service.", - "protocol": "The IP protocol for this port. Supports \"TCP\", \"UDP\", and \"SCTP\". Default is TCP.", - "port": "The port that will be exposed by this service.", - "targetPort": "Number or name of the port to access on the pods targeted by the service. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME. If this is a string, it will be looked up as a named port in the target Pod's container ports. If this is not specified, the value of the 'port' field is used (an identity map). This field is ignored for services with clusterIP=None, and should be omitted or set equal to the 'port' field. More info: https://kubernetes.io/docs/concepts/services-networking/service/#defining-a-service", - "nodePort": "The port on each node on which this service is exposed when type=NodePort or LoadBalancer. Usually assigned by the system. If specified, it will be allocated to the service if unused or else creation of the service will fail. Default is to auto-allocate a port if the ServiceType of this Service requires one. More info: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport", -} - -func (ServicePort) SwaggerDoc() map[string]string { - return map_ServicePort -} - -var map_ServiceProxyOptions = map[string]string{ - "": "ServiceProxyOptions is the query options to a Service's proxy call.", - "path": "Path is the part of URLs that include service endpoints, suffixes, and parameters to use for the current proxy request to service. For example, the whole request URL is http://localhost/api/v1/namespaces/kube-system/services/elasticsearch-logging/_search?q=user:kimchy. Path is _search?q=user:kimchy.", -} - -func (ServiceProxyOptions) SwaggerDoc() map[string]string { - return map_ServiceProxyOptions -} - -var map_ServiceSpec = map[string]string{ - "": "ServiceSpec describes the attributes that a user creates on a service.", - "ports": "The list of ports that are exposed by this service. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "selector": "Route service traffic to pods with label keys and values matching this selector. If empty or not present, the service is assumed to have an external process managing its endpoints, which Kubernetes will not modify. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/", - "clusterIP": "clusterIP is the IP address of the service and is usually assigned randomly by the master. If an address is specified manually and is not in use by others, it will be allocated to the service; otherwise, creation of the service will fail. This field can not be changed through updates. Valid values are \"None\", empty string (\"\"), or a valid IP address. \"None\" can be specified for headless services when proxying is not required. Only applies to types ClusterIP, NodePort, and LoadBalancer. Ignored if type is ExternalName. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "type": "type determines how the Service is exposed. Defaults to ClusterIP. Valid options are ExternalName, ClusterIP, NodePort, and LoadBalancer. \"ExternalName\" maps to the specified externalName. \"ClusterIP\" allocates a cluster-internal IP address for load-balancing to endpoints. Endpoints are determined by the selector or if that is not specified, by manual construction of an Endpoints object. If clusterIP is \"None\", no virtual IP is allocated and the endpoints are published as a set of endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and allocates a port on every node which routes to the clusterIP. \"LoadBalancer\" builds on NodePort and creates an external load-balancer (if supported in the current cloud) which routes to the clusterIP. More info: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services ", - "externalIPs": "externalIPs is a list of IP addresses for which nodes in the cluster will also accept traffic for this service. These IPs are not managed by Kubernetes. The user is responsible for ensuring that traffic arrives at a node with this IP. A common example is external load-balancers that are not part of the Kubernetes system.", - "sessionAffinity": "Supports \"ClientIP\" and \"None\". Used to maintain session affinity. Enable client IP based session affinity. Must be ClientIP or None. Defaults to None. More info: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies", - "loadBalancerIP": "Only applies to Service Type: LoadBalancer LoadBalancer will get created with the IP specified in this field. This feature depends on whether the underlying cloud-provider supports specifying the loadBalancerIP when a load balancer is created. This field will be ignored if the cloud-provider does not support the feature.", - "loadBalancerSourceRanges": "If specified and supported by the platform, this will restrict traffic through the cloud-provider load-balancer will be restricted to the specified client IPs. This field will be ignored if the cloud-provider does not support the feature.\" More info: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/", - "externalName": "externalName is the external reference that kubedns or equivalent will return as a CNAME record for this service. No proxying will be involved. Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123) and requires Type to be ExternalName.", - "externalTrafficPolicy": "externalTrafficPolicy denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints. \"Local\" preserves the client source IP and avoids a second hop for LoadBalancer and Nodeport type services, but risks potentially imbalanced traffic spreading. \"Cluster\" obscures the client source IP and may cause a second hop to another node, but should have good overall load-spreading.", - "healthCheckNodePort": "healthCheckNodePort specifies the healthcheck nodePort for the service. If not specified, HealthCheckNodePort is created by the service api backend with the allocated nodePort. Will use user-specified nodePort value if specified by the client. Only effects when Type is set to LoadBalancer and ExternalTrafficPolicy is set to Local.", - "publishNotReadyAddresses": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery.", - "sessionAffinityConfig": "sessionAffinityConfig contains the configurations of session affinity.", -} - -func (ServiceSpec) SwaggerDoc() map[string]string { - return map_ServiceSpec -} - -var map_ServiceStatus = map[string]string{ - "": "ServiceStatus represents the current status of a service.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer, if one is present.", -} - -func (ServiceStatus) SwaggerDoc() map[string]string { - return map_ServiceStatus -} - -var map_SessionAffinityConfig = map[string]string{ - "": "SessionAffinityConfig represents the configurations of session affinity.", - "clientIP": "clientIP contains the configurations of Client IP based session affinity.", -} - -func (SessionAffinityConfig) SwaggerDoc() map[string]string { - return map_SessionAffinityConfig -} - -var map_StorageOSPersistentVolumeSource = map[string]string{ - "": "Represents a StorageOS persistent volume resource.", - "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", -} - -func (StorageOSPersistentVolumeSource) SwaggerDoc() map[string]string { - return map_StorageOSPersistentVolumeSource -} - -var map_StorageOSVolumeSource = map[string]string{ - "": "Represents a StorageOS persistent volume resource.", - "volumeName": "VolumeName is the human-readable name of the StorageOS volume. Volume names are only unique within a namespace.", - "volumeNamespace": "VolumeNamespace specifies the scope of the volume within StorageOS. If no namespace is specified then the Pod's namespace will be used. This allows the Kubernetes name scoping to be mirrored within StorageOS for tighter integration. Set VolumeName to any name to override the default behaviour. Set to \"default\" if you are not using namespaces within StorageOS. Namespaces that do not pre-exist within StorageOS will be created.", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "readOnly": "Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts.", - "secretRef": "SecretRef specifies the secret to use for obtaining the StorageOS API credentials. If not specified, default values will be attempted.", -} - -func (StorageOSVolumeSource) SwaggerDoc() map[string]string { - return map_StorageOSVolumeSource -} - -var map_Sysctl = map[string]string{ - "": "Sysctl defines a kernel parameter to be set", - "name": "Name of a property to set", - "value": "Value of a property to set", -} - -func (Sysctl) SwaggerDoc() map[string]string { - return map_Sysctl -} - -var map_TCPSocketAction = map[string]string{ - "": "TCPSocketAction describes an action based on opening a socket", - "port": "Number or name of the port to access on the container. Number must be in the range 1 to 65535. Name must be an IANA_SVC_NAME.", - "host": "Optional: Host name to connect to, defaults to the pod IP.", -} - -func (TCPSocketAction) SwaggerDoc() map[string]string { - return map_TCPSocketAction -} - -var map_Taint = map[string]string{ - "": "The node this Taint is attached to has the \"effect\" on any pod that does not tolerate the Taint.", - "key": "Required. The taint key to be applied to a node.", - "value": "Required. The taint value corresponding to the taint key.", - "effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule, PreferNoSchedule and NoExecute.", - "timeAdded": "TimeAdded represents the time at which the taint was added. It is only written for NoExecute taints.", -} - -func (Taint) SwaggerDoc() map[string]string { - return map_Taint -} - -var map_Toleration = map[string]string{ - "": "The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator .", - "key": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", - "operator": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", - "value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - "effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", - "tolerationSeconds": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", -} - -func (Toleration) SwaggerDoc() map[string]string { - return map_Toleration -} - -var map_TopologySelectorLabelRequirement = map[string]string{ - "": "A topology selector requirement is a selector that matches given label. This is an alpha feature and may change in the future.", - "key": "The label key that the selector applies to.", - "values": "An array of string values. One value must match the label to be selected. Each entry in Values is ORed.", -} - -func (TopologySelectorLabelRequirement) SwaggerDoc() map[string]string { - return map_TopologySelectorLabelRequirement -} - -var map_TopologySelectorTerm = map[string]string{ - "": "A topology selector term represents the result of label queries. A null or empty topology selector term matches no objects. The requirements of them are ANDed. It provides a subset of functionality as NodeSelectorTerm. This is an alpha feature and may change in the future.", - "matchLabelExpressions": "A list of topology selector requirements by labels.", -} - -func (TopologySelectorTerm) SwaggerDoc() map[string]string { - return map_TopologySelectorTerm -} - -var map_TypedLocalObjectReference = map[string]string{ - "": "TypedLocalObjectReference contains enough information to let you locate the typed referenced object inside the same namespace.", - "apiGroup": "APIGroup is the group for the resource being referenced. If APIGroup is not specified, the specified Kind must be in the core API group. For any other third-party types, APIGroup is required.", - "kind": "Kind is the type of resource being referenced", - "name": "Name is the name of resource being referenced", -} - -func (TypedLocalObjectReference) SwaggerDoc() map[string]string { - return map_TypedLocalObjectReference -} - -var map_Volume = map[string]string{ - "": "Volume represents a named volume in a pod that may be accessed by any container in the pod.", - "name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names", -} - -func (Volume) SwaggerDoc() map[string]string { - return map_Volume -} - -var map_VolumeDevice = map[string]string{ - "": "volumeDevice describes a mapping of a raw block device within a container.", - "name": "name must match the name of a persistentVolumeClaim in the pod", - "devicePath": "devicePath is the path inside of the container that the device will be mapped to.", -} - -func (VolumeDevice) SwaggerDoc() map[string]string { - return map_VolumeDevice -} - -var map_VolumeMount = map[string]string{ - "": "VolumeMount describes a mounting of a Volume within a container.", - "name": "This must match the Name of a Volume.", - "readOnly": "Mounted read-only if true, read-write otherwise (false or unspecified). Defaults to false.", - "mountPath": "Path within the container at which the volume should be mounted. Must not contain ':'.", - "subPath": "Path within the volume from which the container's volume should be mounted. Defaults to \"\" (volume's root).", - "mountPropagation": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10.", -} - -func (VolumeMount) SwaggerDoc() map[string]string { - return map_VolumeMount -} - -var map_VolumeNodeAffinity = map[string]string{ - "": "VolumeNodeAffinity defines constraints that limit what nodes this volume can be accessed from.", - "required": "Required specifies hard node constraints that must be met.", -} - -func (VolumeNodeAffinity) SwaggerDoc() map[string]string { - return map_VolumeNodeAffinity -} - -var map_VolumeProjection = map[string]string{ - "": "Projection that may be projected along with other supported volume types", - "secret": "information about the secret data to project", - "downwardAPI": "information about the downwardAPI data to project", - "configMap": "information about the configMap data to project", - "serviceAccountToken": "information about the serviceAccountToken data to project", -} - -func (VolumeProjection) SwaggerDoc() map[string]string { - return map_VolumeProjection -} - -var map_VolumeSource = map[string]string{ - "": "Represents the source of a volume to mount. Only one of its members may be specified.", - "hostPath": "HostPath represents a pre-existing file or directory on the host machine that is directly exposed to the container. This is generally used for system agents or other privileged things that are allowed to see the host machine. Most containers will NOT need this. More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath", - "emptyDir": "EmptyDir represents a temporary directory that shares a pod's lifetime. More info: https://kubernetes.io/docs/concepts/storage/volumes#emptydir", - "gcePersistentDisk": "GCEPersistentDisk represents a GCE Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#gcepersistentdisk", - "awsElasticBlockStore": "AWSElasticBlockStore represents an AWS Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://kubernetes.io/docs/concepts/storage/volumes#awselasticblockstore", - "gitRepo": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.", - "secret": "Secret represents a secret that should populate this volume. More info: https://kubernetes.io/docs/concepts/storage/volumes#secret", - "nfs": "NFS represents an NFS mount on the host that shares a pod's lifetime More info: https://kubernetes.io/docs/concepts/storage/volumes#nfs", - "iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: https://releases.k8s.io/HEAD/examples/volumes/iscsi/README.md", - "glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/glusterfs/README.md", - "persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims", - "rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: https://releases.k8s.io/HEAD/examples/volumes/rbd/README.md", - "flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using an exec based plugin.", - "cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md", - "cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime", - "flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running", - "downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume", - "fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.", - "azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.", - "configMap": "ConfigMap represents a configMap that should populate this volume", - "vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine", - "quobyte": "Quobyte represents a Quobyte mount on the host that shares a pod's lifetime", - "azureDisk": "AzureDisk represents an Azure Data Disk mount on the host and bind mount to the pod.", - "photonPersistentDisk": "PhotonPersistentDisk represents a PhotonController persistent disk attached and mounted on kubelets host machine", - "projected": "Items for all in one resources secrets, configmaps, and downward API", - "portworxVolume": "PortworxVolume represents a portworx volume attached and mounted on kubelets host machine", - "scaleIO": "ScaleIO represents a ScaleIO persistent volume attached and mounted on Kubernetes nodes.", - "storageos": "StorageOS represents a StorageOS volume attached and mounted on Kubernetes nodes.", -} - -func (VolumeSource) SwaggerDoc() map[string]string { - return map_VolumeSource -} - -var map_VsphereVirtualDiskVolumeSource = map[string]string{ - "": "Represents a vSphere volume resource.", - "volumePath": "Path that identifies vSphere volume vmdk", - "fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.", - "storagePolicyName": "Storage Policy Based Management (SPBM) profile name.", - "storagePolicyID": "Storage Policy Based Management (SPBM) profile ID associated with the StoragePolicyName.", -} - -func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string { - return map_VsphereVirtualDiskVolumeSource -} - -var map_WeightedPodAffinityTerm = map[string]string{ - "": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "podAffinityTerm": "Required. A pod affinity term, associated with the corresponding weight.", -} - -func (WeightedPodAffinityTerm) SwaggerDoc() map[string]string { - return map_WeightedPodAffinityTerm -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go deleted file mode 100644 index f8f3471..0000000 --- a/vendor/k8s.io/api/core/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,5404 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - types "k8s.io/apimachinery/pkg/types" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopyInto(out *AWSElasticBlockStoreVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AWSElasticBlockStoreVolumeSource. -func (in *AWSElasticBlockStoreVolumeSource) DeepCopy() *AWSElasticBlockStoreVolumeSource { - if in == nil { - return nil - } - out := new(AWSElasticBlockStoreVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Affinity) DeepCopyInto(out *Affinity) { - *out = *in - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(NodeAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAffinity != nil { - in, out := &in.PodAffinity, &out.PodAffinity - *out = new(PodAffinity) - (*in).DeepCopyInto(*out) - } - if in.PodAntiAffinity != nil { - in, out := &in.PodAntiAffinity, &out.PodAntiAffinity - *out = new(PodAntiAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Affinity. -func (in *Affinity) DeepCopy() *Affinity { - if in == nil { - return nil - } - out := new(Affinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AttachedVolume) DeepCopyInto(out *AttachedVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AttachedVolume. -func (in *AttachedVolume) DeepCopy() *AttachedVolume { - if in == nil { - return nil - } - out := new(AttachedVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AvoidPods) DeepCopyInto(out *AvoidPods) { - *out = *in - if in.PreferAvoidPods != nil { - in, out := &in.PreferAvoidPods, &out.PreferAvoidPods - *out = make([]PreferAvoidPodsEntry, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AvoidPods. -func (in *AvoidPods) DeepCopy() *AvoidPods { - if in == nil { - return nil - } - out := new(AvoidPods) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureDiskVolumeSource) DeepCopyInto(out *AzureDiskVolumeSource) { - *out = *in - if in.CachingMode != nil { - in, out := &in.CachingMode, &out.CachingMode - *out = new(AzureDataDiskCachingMode) - **out = **in - } - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - if in.ReadOnly != nil { - in, out := &in.ReadOnly, &out.ReadOnly - *out = new(bool) - **out = **in - } - if in.Kind != nil { - in, out := &in.Kind, &out.Kind - *out = new(AzureDataDiskKind) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureDiskVolumeSource. -func (in *AzureDiskVolumeSource) DeepCopy() *AzureDiskVolumeSource { - if in == nil { - return nil - } - out := new(AzureDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFilePersistentVolumeSource) DeepCopyInto(out *AzureFilePersistentVolumeSource) { - *out = *in - if in.SecretNamespace != nil { - in, out := &in.SecretNamespace, &out.SecretNamespace - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFilePersistentVolumeSource. -func (in *AzureFilePersistentVolumeSource) DeepCopy() *AzureFilePersistentVolumeSource { - if in == nil { - return nil - } - out := new(AzureFilePersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AzureFileVolumeSource) DeepCopyInto(out *AzureFileVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AzureFileVolumeSource. -func (in *AzureFileVolumeSource) DeepCopy() *AzureFileVolumeSource { - if in == nil { - return nil - } - out := new(AzureFileVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Binding) DeepCopyInto(out *Binding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Target = in.Target - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Binding. -func (in *Binding) DeepCopy() *Binding { - if in == nil { - return nil - } - out := new(Binding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Binding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CSIPersistentVolumeSource) DeepCopyInto(out *CSIPersistentVolumeSource) { - *out = *in - if in.VolumeAttributes != nil { - in, out := &in.VolumeAttributes, &out.VolumeAttributes - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ControllerPublishSecretRef != nil { - in, out := &in.ControllerPublishSecretRef, &out.ControllerPublishSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodeStageSecretRef != nil { - in, out := &in.NodeStageSecretRef, &out.NodeStageSecretRef - *out = new(SecretReference) - **out = **in - } - if in.NodePublishSecretRef != nil { - in, out := &in.NodePublishSecretRef, &out.NodePublishSecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CSIPersistentVolumeSource. -func (in *CSIPersistentVolumeSource) DeepCopy() *CSIPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CSIPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Capabilities) DeepCopyInto(out *Capabilities) { - *out = *in - if in.Add != nil { - in, out := &in.Add, &out.Add - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - if in.Drop != nil { - in, out := &in.Drop, &out.Drop - *out = make([]Capability, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Capabilities. -func (in *Capabilities) DeepCopy() *Capabilities { - if in == nil { - return nil - } - out := new(Capabilities) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSPersistentVolumeSource) DeepCopyInto(out *CephFSPersistentVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSPersistentVolumeSource. -func (in *CephFSPersistentVolumeSource) DeepCopy() *CephFSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CephFSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CephFSVolumeSource) DeepCopyInto(out *CephFSVolumeSource) { - *out = *in - if in.Monitors != nil { - in, out := &in.Monitors, &out.Monitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CephFSVolumeSource. -func (in *CephFSVolumeSource) DeepCopy() *CephFSVolumeSource { - if in == nil { - return nil - } - out := new(CephFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderPersistentVolumeSource) DeepCopyInto(out *CinderPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderPersistentVolumeSource. -func (in *CinderPersistentVolumeSource) DeepCopy() *CinderPersistentVolumeSource { - if in == nil { - return nil - } - out := new(CinderPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CinderVolumeSource) DeepCopyInto(out *CinderVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CinderVolumeSource. -func (in *CinderVolumeSource) DeepCopy() *CinderVolumeSource { - if in == nil { - return nil - } - out := new(CinderVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClientIPConfig) DeepCopyInto(out *ClientIPConfig) { - *out = *in - if in.TimeoutSeconds != nil { - in, out := &in.TimeoutSeconds, &out.TimeoutSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClientIPConfig. -func (in *ClientIPConfig) DeepCopy() *ClientIPConfig { - if in == nil { - return nil - } - out := new(ClientIPConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentCondition) DeepCopyInto(out *ComponentCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentCondition. -func (in *ComponentCondition) DeepCopy() *ComponentCondition { - if in == nil { - return nil - } - out := new(ComponentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatus) DeepCopyInto(out *ComponentStatus) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ComponentCondition, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatus. -func (in *ComponentStatus) DeepCopy() *ComponentStatus { - if in == nil { - return nil - } - out := new(ComponentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatus) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ComponentStatusList) DeepCopyInto(out *ComponentStatusList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ComponentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ComponentStatusList. -func (in *ComponentStatusList) DeepCopy() *ComponentStatusList { - if in == nil { - return nil - } - out := new(ComponentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ComponentStatusList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMap) DeepCopyInto(out *ConfigMap) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.BinaryData != nil { - in, out := &in.BinaryData, &out.BinaryData - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - var outVal []byte - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]byte, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMap. -func (in *ConfigMap) DeepCopy() *ConfigMap { - if in == nil { - return nil - } - out := new(ConfigMap) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMap) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapEnvSource) DeepCopyInto(out *ConfigMapEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapEnvSource. -func (in *ConfigMapEnvSource) DeepCopy() *ConfigMapEnvSource { - if in == nil { - return nil - } - out := new(ConfigMapEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapKeySelector) DeepCopyInto(out *ConfigMapKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeySelector. -func (in *ConfigMapKeySelector) DeepCopy() *ConfigMapKeySelector { - if in == nil { - return nil - } - out := new(ConfigMapKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapList) DeepCopyInto(out *ConfigMapList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ConfigMap, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapList. -func (in *ConfigMapList) DeepCopy() *ConfigMapList { - if in == nil { - return nil - } - out := new(ConfigMapList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ConfigMapList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapNodeConfigSource) DeepCopyInto(out *ConfigMapNodeConfigSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapNodeConfigSource. -func (in *ConfigMapNodeConfigSource) DeepCopy() *ConfigMapNodeConfigSource { - if in == nil { - return nil - } - out := new(ConfigMapNodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapProjection) DeepCopyInto(out *ConfigMapProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapProjection. -func (in *ConfigMapProjection) DeepCopy() *ConfigMapProjection { - if in == nil { - return nil - } - out := new(ConfigMapProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ConfigMapVolumeSource) DeepCopyInto(out *ConfigMapVolumeSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapVolumeSource. -func (in *ConfigMapVolumeSource) DeepCopy() *ConfigMapVolumeSource { - if in == nil { - return nil - } - out := new(ConfigMapVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Container) DeepCopyInto(out *Container) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Args != nil { - in, out := &in.Args, &out.Args - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ContainerPort, len(*in)) - copy(*out, *in) - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - in.Resources.DeepCopyInto(&out.Resources) - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeDevices != nil { - in, out := &in.VolumeDevices, &out.VolumeDevices - *out = make([]VolumeDevice, len(*in)) - copy(*out, *in) - } - if in.LivenessProbe != nil { - in, out := &in.LivenessProbe, &out.LivenessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.ReadinessProbe != nil { - in, out := &in.ReadinessProbe, &out.ReadinessProbe - *out = new(Probe) - (*in).DeepCopyInto(*out) - } - if in.Lifecycle != nil { - in, out := &in.Lifecycle, &out.Lifecycle - *out = new(Lifecycle) - (*in).DeepCopyInto(*out) - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(SecurityContext) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Container. -func (in *Container) DeepCopy() *Container { - if in == nil { - return nil - } - out := new(Container) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerImage) DeepCopyInto(out *ContainerImage) { - *out = *in - if in.Names != nil { - in, out := &in.Names, &out.Names - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerImage. -func (in *ContainerImage) DeepCopy() *ContainerImage { - if in == nil { - return nil - } - out := new(ContainerImage) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerPort) DeepCopyInto(out *ContainerPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerPort. -func (in *ContainerPort) DeepCopy() *ContainerPort { - if in == nil { - return nil - } - out := new(ContainerPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerState) DeepCopyInto(out *ContainerState) { - *out = *in - if in.Waiting != nil { - in, out := &in.Waiting, &out.Waiting - *out = new(ContainerStateWaiting) - **out = **in - } - if in.Running != nil { - in, out := &in.Running, &out.Running - *out = new(ContainerStateRunning) - (*in).DeepCopyInto(*out) - } - if in.Terminated != nil { - in, out := &in.Terminated, &out.Terminated - *out = new(ContainerStateTerminated) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerState. -func (in *ContainerState) DeepCopy() *ContainerState { - if in == nil { - return nil - } - out := new(ContainerState) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateRunning) DeepCopyInto(out *ContainerStateRunning) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateRunning. -func (in *ContainerStateRunning) DeepCopy() *ContainerStateRunning { - if in == nil { - return nil - } - out := new(ContainerStateRunning) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateTerminated) DeepCopyInto(out *ContainerStateTerminated) { - *out = *in - in.StartedAt.DeepCopyInto(&out.StartedAt) - in.FinishedAt.DeepCopyInto(&out.FinishedAt) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateTerminated. -func (in *ContainerStateTerminated) DeepCopy() *ContainerStateTerminated { - if in == nil { - return nil - } - out := new(ContainerStateTerminated) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStateWaiting) DeepCopyInto(out *ContainerStateWaiting) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStateWaiting. -func (in *ContainerStateWaiting) DeepCopy() *ContainerStateWaiting { - if in == nil { - return nil - } - out := new(ContainerStateWaiting) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ContainerStatus) DeepCopyInto(out *ContainerStatus) { - *out = *in - in.State.DeepCopyInto(&out.State) - in.LastTerminationState.DeepCopyInto(&out.LastTerminationState) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerStatus. -func (in *ContainerStatus) DeepCopy() *ContainerStatus { - if in == nil { - return nil - } - out := new(ContainerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonEndpoint) DeepCopyInto(out *DaemonEndpoint) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonEndpoint. -func (in *DaemonEndpoint) DeepCopy() *DaemonEndpoint { - if in == nil { - return nil - } - out := new(DaemonEndpoint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIProjection) DeepCopyInto(out *DownwardAPIProjection) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIProjection. -func (in *DownwardAPIProjection) DeepCopy() *DownwardAPIProjection { - if in == nil { - return nil - } - out := new(DownwardAPIProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeFile) DeepCopyInto(out *DownwardAPIVolumeFile) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeFile. -func (in *DownwardAPIVolumeFile) DeepCopy() *DownwardAPIVolumeFile { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeFile) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DownwardAPIVolumeSource) DeepCopyInto(out *DownwardAPIVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DownwardAPIVolumeFile, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DownwardAPIVolumeSource. -func (in *DownwardAPIVolumeSource) DeepCopy() *DownwardAPIVolumeSource { - if in == nil { - return nil - } - out := new(DownwardAPIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EmptyDirVolumeSource) DeepCopyInto(out *EmptyDirVolumeSource) { - *out = *in - if in.SizeLimit != nil { - in, out := &in.SizeLimit, &out.SizeLimit - x := (*in).DeepCopy() - *out = &x - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EmptyDirVolumeSource. -func (in *EmptyDirVolumeSource) DeepCopy() *EmptyDirVolumeSource { - if in == nil { - return nil - } - out := new(EmptyDirVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointAddress) DeepCopyInto(out *EndpointAddress) { - *out = *in - if in.NodeName != nil { - in, out := &in.NodeName, &out.NodeName - *out = new(string) - **out = **in - } - if in.TargetRef != nil { - in, out := &in.TargetRef, &out.TargetRef - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointAddress. -func (in *EndpointAddress) DeepCopy() *EndpointAddress { - if in == nil { - return nil - } - out := new(EndpointAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointPort) DeepCopyInto(out *EndpointPort) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointPort. -func (in *EndpointPort) DeepCopy() *EndpointPort { - if in == nil { - return nil - } - out := new(EndpointPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointSubset) DeepCopyInto(out *EndpointSubset) { - *out = *in - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.NotReadyAddresses != nil { - in, out := &in.NotReadyAddresses, &out.NotReadyAddresses - *out = make([]EndpointAddress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]EndpointPort, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointSubset. -func (in *EndpointSubset) DeepCopy() *EndpointSubset { - if in == nil { - return nil - } - out := new(EndpointSubset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Endpoints) DeepCopyInto(out *Endpoints) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subsets != nil { - in, out := &in.Subsets, &out.Subsets - *out = make([]EndpointSubset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Endpoints. -func (in *Endpoints) DeepCopy() *Endpoints { - if in == nil { - return nil - } - out := new(Endpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Endpoints) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EndpointsList) DeepCopyInto(out *EndpointsList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Endpoints, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EndpointsList. -func (in *EndpointsList) DeepCopy() *EndpointsList { - if in == nil { - return nil - } - out := new(EndpointsList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EndpointsList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvFromSource) DeepCopyInto(out *EnvFromSource) { - *out = *in - if in.ConfigMapRef != nil { - in, out := &in.ConfigMapRef, &out.ConfigMapRef - *out = new(ConfigMapEnvSource) - (*in).DeepCopyInto(*out) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretEnvSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvFromSource. -func (in *EnvFromSource) DeepCopy() *EnvFromSource { - if in == nil { - return nil - } - out := new(EnvFromSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVar) DeepCopyInto(out *EnvVar) { - *out = *in - if in.ValueFrom != nil { - in, out := &in.ValueFrom, &out.ValueFrom - *out = new(EnvVarSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVar. -func (in *EnvVar) DeepCopy() *EnvVar { - if in == nil { - return nil - } - out := new(EnvVar) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EnvVarSource) DeepCopyInto(out *EnvVarSource) { - *out = *in - if in.FieldRef != nil { - in, out := &in.FieldRef, &out.FieldRef - *out = new(ObjectFieldSelector) - **out = **in - } - if in.ResourceFieldRef != nil { - in, out := &in.ResourceFieldRef, &out.ResourceFieldRef - *out = new(ResourceFieldSelector) - (*in).DeepCopyInto(*out) - } - if in.ConfigMapKeyRef != nil { - in, out := &in.ConfigMapKeyRef, &out.ConfigMapKeyRef - *out = new(ConfigMapKeySelector) - (*in).DeepCopyInto(*out) - } - if in.SecretKeyRef != nil { - in, out := &in.SecretKeyRef, &out.SecretKeyRef - *out = new(SecretKeySelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EnvVarSource. -func (in *EnvVarSource) DeepCopy() *EnvVarSource { - if in == nil { - return nil - } - out := new(EnvVarSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.InvolvedObject = in.InvolvedObject - out.Source = in.Source - in.FirstTimestamp.DeepCopyInto(&out.FirstTimestamp) - in.LastTimestamp.DeepCopyInto(&out.LastTimestamp) - in.EventTime.DeepCopyInto(&out.EventTime) - if in.Series != nil { - in, out := &in.Series, &out.Series - *out = new(EventSeries) - (*in).DeepCopyInto(*out) - } - if in.Related != nil { - in, out := &in.Related, &out.Related - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSeries) DeepCopyInto(out *EventSeries) { - *out = *in - in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. -func (in *EventSeries) DeepCopy() *EventSeries { - if in == nil { - return nil - } - out := new(EventSeries) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSource) DeepCopyInto(out *EventSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSource. -func (in *EventSource) DeepCopy() *EventSource { - if in == nil { - return nil - } - out := new(EventSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ExecAction) DeepCopyInto(out *ExecAction) { - *out = *in - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExecAction. -func (in *ExecAction) DeepCopy() *ExecAction { - if in == nil { - return nil - } - out := new(ExecAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FCVolumeSource) DeepCopyInto(out *FCVolumeSource) { - *out = *in - if in.TargetWWNs != nil { - in, out := &in.TargetWWNs, &out.TargetWWNs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Lun != nil { - in, out := &in.Lun, &out.Lun - *out = new(int32) - **out = **in - } - if in.WWIDs != nil { - in, out := &in.WWIDs, &out.WWIDs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FCVolumeSource. -func (in *FCVolumeSource) DeepCopy() *FCVolumeSource { - if in == nil { - return nil - } - out := new(FCVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexPersistentVolumeSource) DeepCopyInto(out *FlexPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexPersistentVolumeSource. -func (in *FlexPersistentVolumeSource) DeepCopy() *FlexPersistentVolumeSource { - if in == nil { - return nil - } - out := new(FlexPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlexVolumeSource) DeepCopyInto(out *FlexVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlexVolumeSource. -func (in *FlexVolumeSource) DeepCopy() *FlexVolumeSource { - if in == nil { - return nil - } - out := new(FlexVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FlockerVolumeSource) DeepCopyInto(out *FlockerVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FlockerVolumeSource. -func (in *FlockerVolumeSource) DeepCopy() *FlockerVolumeSource { - if in == nil { - return nil - } - out := new(FlockerVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GCEPersistentDiskVolumeSource) DeepCopyInto(out *GCEPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GCEPersistentDiskVolumeSource. -func (in *GCEPersistentDiskVolumeSource) DeepCopy() *GCEPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(GCEPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GitRepoVolumeSource) DeepCopyInto(out *GitRepoVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GitRepoVolumeSource. -func (in *GitRepoVolumeSource) DeepCopy() *GitRepoVolumeSource { - if in == nil { - return nil - } - out := new(GitRepoVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *GlusterfsVolumeSource) DeepCopyInto(out *GlusterfsVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GlusterfsVolumeSource. -func (in *GlusterfsVolumeSource) DeepCopy() *GlusterfsVolumeSource { - if in == nil { - return nil - } - out := new(GlusterfsVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPGetAction) DeepCopyInto(out *HTTPGetAction) { - *out = *in - out.Port = in.Port - if in.HTTPHeaders != nil { - in, out := &in.HTTPHeaders, &out.HTTPHeaders - *out = make([]HTTPHeader, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPGetAction. -func (in *HTTPGetAction) DeepCopy() *HTTPGetAction { - if in == nil { - return nil - } - out := new(HTTPGetAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPHeader) DeepCopyInto(out *HTTPHeader) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPHeader. -func (in *HTTPHeader) DeepCopy() *HTTPHeader { - if in == nil { - return nil - } - out := new(HTTPHeader) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Handler) DeepCopyInto(out *Handler) { - *out = *in - if in.Exec != nil { - in, out := &in.Exec, &out.Exec - *out = new(ExecAction) - (*in).DeepCopyInto(*out) - } - if in.HTTPGet != nil { - in, out := &in.HTTPGet, &out.HTTPGet - *out = new(HTTPGetAction) - (*in).DeepCopyInto(*out) - } - if in.TCPSocket != nil { - in, out := &in.TCPSocket, &out.TCPSocket - *out = new(TCPSocketAction) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Handler. -func (in *Handler) DeepCopy() *Handler { - if in == nil { - return nil - } - out := new(Handler) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostAlias) DeepCopyInto(out *HostAlias) { - *out = *in - if in.Hostnames != nil { - in, out := &in.Hostnames, &out.Hostnames - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostAlias. -func (in *HostAlias) DeepCopy() *HostAlias { - if in == nil { - return nil - } - out := new(HostAlias) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPathVolumeSource) DeepCopyInto(out *HostPathVolumeSource) { - *out = *in - if in.Type != nil { - in, out := &in.Type, &out.Type - *out = new(HostPathType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPathVolumeSource. -func (in *HostPathVolumeSource) DeepCopy() *HostPathVolumeSource { - if in == nil { - return nil - } - out := new(HostPathVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIPersistentVolumeSource) DeepCopyInto(out *ISCSIPersistentVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIPersistentVolumeSource. -func (in *ISCSIPersistentVolumeSource) DeepCopy() *ISCSIPersistentVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ISCSIVolumeSource) DeepCopyInto(out *ISCSIVolumeSource) { - *out = *in - if in.Portals != nil { - in, out := &in.Portals, &out.Portals - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - if in.InitiatorName != nil { - in, out := &in.InitiatorName, &out.InitiatorName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ISCSIVolumeSource. -func (in *ISCSIVolumeSource) DeepCopy() *ISCSIVolumeSource { - if in == nil { - return nil - } - out := new(ISCSIVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *KeyToPath) DeepCopyInto(out *KeyToPath) { - *out = *in - if in.Mode != nil { - in, out := &in.Mode, &out.Mode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new KeyToPath. -func (in *KeyToPath) DeepCopy() *KeyToPath { - if in == nil { - return nil - } - out := new(KeyToPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Lifecycle) DeepCopyInto(out *Lifecycle) { - *out = *in - if in.PostStart != nil { - in, out := &in.PostStart, &out.PostStart - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - if in.PreStop != nil { - in, out := &in.PreStop, &out.PreStop - *out = new(Handler) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Lifecycle. -func (in *Lifecycle) DeepCopy() *Lifecycle { - if in == nil { - return nil - } - out := new(Lifecycle) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRange) DeepCopyInto(out *LimitRange) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRange. -func (in *LimitRange) DeepCopy() *LimitRange { - if in == nil { - return nil - } - out := new(LimitRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRange) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeItem) DeepCopyInto(out *LimitRangeItem) { - *out = *in - if in.Max != nil { - in, out := &in.Max, &out.Max - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Min != nil { - in, out := &in.Min, &out.Min - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Default != nil { - in, out := &in.Default, &out.Default - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.DefaultRequest != nil { - in, out := &in.DefaultRequest, &out.DefaultRequest - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.MaxLimitRequestRatio != nil { - in, out := &in.MaxLimitRequestRatio, &out.MaxLimitRequestRatio - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeItem. -func (in *LimitRangeItem) DeepCopy() *LimitRangeItem { - if in == nil { - return nil - } - out := new(LimitRangeItem) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeList) DeepCopyInto(out *LimitRangeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]LimitRange, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeList. -func (in *LimitRangeList) DeepCopy() *LimitRangeList { - if in == nil { - return nil - } - out := new(LimitRangeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *LimitRangeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LimitRangeSpec) DeepCopyInto(out *LimitRangeSpec) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make([]LimitRangeItem, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LimitRangeSpec. -func (in *LimitRangeSpec) DeepCopy() *LimitRangeSpec { - if in == nil { - return nil - } - out := new(LimitRangeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *List) DeepCopyInto(out *List) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]runtime.RawExtension, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new List. -func (in *List) DeepCopy() *List { - if in == nil { - return nil - } - out := new(List) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *List) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerIngress) DeepCopyInto(out *LoadBalancerIngress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerIngress. -func (in *LoadBalancerIngress) DeepCopy() *LoadBalancerIngress { - if in == nil { - return nil - } - out := new(LoadBalancerIngress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LoadBalancerStatus) DeepCopyInto(out *LoadBalancerStatus) { - *out = *in - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]LoadBalancerIngress, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LoadBalancerStatus. -func (in *LoadBalancerStatus) DeepCopy() *LoadBalancerStatus { - if in == nil { - return nil - } - out := new(LoadBalancerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalObjectReference) DeepCopyInto(out *LocalObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalObjectReference. -func (in *LocalObjectReference) DeepCopy() *LocalObjectReference { - if in == nil { - return nil - } - out := new(LocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *LocalVolumeSource) DeepCopyInto(out *LocalVolumeSource) { - *out = *in - if in.FSType != nil { - in, out := &in.FSType, &out.FSType - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new LocalVolumeSource. -func (in *LocalVolumeSource) DeepCopy() *LocalVolumeSource { - if in == nil { - return nil - } - out := new(LocalVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NFSVolumeSource) DeepCopyInto(out *NFSVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NFSVolumeSource. -func (in *NFSVolumeSource) DeepCopy() *NFSVolumeSource { - if in == nil { - return nil - } - out := new(NFSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Namespace) DeepCopyInto(out *Namespace) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Namespace. -func (in *Namespace) DeepCopy() *Namespace { - if in == nil { - return nil - } - out := new(Namespace) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Namespace) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceList) DeepCopyInto(out *NamespaceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Namespace, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceList. -func (in *NamespaceList) DeepCopy() *NamespaceList { - if in == nil { - return nil - } - out := new(NamespaceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NamespaceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceSpec) DeepCopyInto(out *NamespaceSpec) { - *out = *in - if in.Finalizers != nil { - in, out := &in.Finalizers, &out.Finalizers - *out = make([]FinalizerName, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceSpec. -func (in *NamespaceSpec) DeepCopy() *NamespaceSpec { - if in == nil { - return nil - } - out := new(NamespaceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NamespaceStatus) DeepCopyInto(out *NamespaceStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespaceStatus. -func (in *NamespaceStatus) DeepCopy() *NamespaceStatus { - if in == nil { - return nil - } - out := new(NamespaceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Node) DeepCopyInto(out *Node) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Node. -func (in *Node) DeepCopy() *Node { - if in == nil { - return nil - } - out := new(Node) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Node) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAddress) DeepCopyInto(out *NodeAddress) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAddress. -func (in *NodeAddress) DeepCopy() *NodeAddress { - if in == nil { - return nil - } - out := new(NodeAddress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeAffinity) DeepCopyInto(out *NodeAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]PreferredSchedulingTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeAffinity. -func (in *NodeAffinity) DeepCopy() *NodeAffinity { - if in == nil { - return nil - } - out := new(NodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeCondition) DeepCopyInto(out *NodeCondition) { - *out = *in - in.LastHeartbeatTime.DeepCopyInto(&out.LastHeartbeatTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeCondition. -func (in *NodeCondition) DeepCopy() *NodeCondition { - if in == nil { - return nil - } - out := new(NodeCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigSource) DeepCopyInto(out *NodeConfigSource) { - *out = *in - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapNodeConfigSource) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigSource. -func (in *NodeConfigSource) DeepCopy() *NodeConfigSource { - if in == nil { - return nil - } - out := new(NodeConfigSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeConfigStatus) DeepCopyInto(out *NodeConfigStatus) { - *out = *in - if in.Assigned != nil { - in, out := &in.Assigned, &out.Assigned - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - if in.Active != nil { - in, out := &in.Active, &out.Active - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - if in.LastKnownGood != nil { - in, out := &in.LastKnownGood, &out.LastKnownGood - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeConfigStatus. -func (in *NodeConfigStatus) DeepCopy() *NodeConfigStatus { - if in == nil { - return nil - } - out := new(NodeConfigStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeDaemonEndpoints) DeepCopyInto(out *NodeDaemonEndpoints) { - *out = *in - out.KubeletEndpoint = in.KubeletEndpoint - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeDaemonEndpoints. -func (in *NodeDaemonEndpoints) DeepCopy() *NodeDaemonEndpoints { - if in == nil { - return nil - } - out := new(NodeDaemonEndpoints) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeList) DeepCopyInto(out *NodeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Node, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeList. -func (in *NodeList) DeepCopy() *NodeList { - if in == nil { - return nil - } - out := new(NodeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeProxyOptions) DeepCopyInto(out *NodeProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeProxyOptions. -func (in *NodeProxyOptions) DeepCopy() *NodeProxyOptions { - if in == nil { - return nil - } - out := new(NodeProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NodeProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeResources) DeepCopyInto(out *NodeResources) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeResources. -func (in *NodeResources) DeepCopy() *NodeResources { - if in == nil { - return nil - } - out := new(NodeResources) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelector) DeepCopyInto(out *NodeSelector) { - *out = *in - if in.NodeSelectorTerms != nil { - in, out := &in.NodeSelectorTerms, &out.NodeSelectorTerms - *out = make([]NodeSelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelector. -func (in *NodeSelector) DeepCopy() *NodeSelector { - if in == nil { - return nil - } - out := new(NodeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorRequirement) DeepCopyInto(out *NodeSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorRequirement. -func (in *NodeSelectorRequirement) DeepCopy() *NodeSelectorRequirement { - if in == nil { - return nil - } - out := new(NodeSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSelectorTerm) DeepCopyInto(out *NodeSelectorTerm) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.MatchFields != nil { - in, out := &in.MatchFields, &out.MatchFields - *out = make([]NodeSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSelectorTerm. -func (in *NodeSelectorTerm) DeepCopy() *NodeSelectorTerm { - if in == nil { - return nil - } - out := new(NodeSelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSpec) DeepCopyInto(out *NodeSpec) { - *out = *in - if in.Taints != nil { - in, out := &in.Taints, &out.Taints - *out = make([]Taint, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ConfigSource != nil { - in, out := &in.ConfigSource, &out.ConfigSource - *out = new(NodeConfigSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSpec. -func (in *NodeSpec) DeepCopy() *NodeSpec { - if in == nil { - return nil - } - out := new(NodeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeStatus) DeepCopyInto(out *NodeStatus) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Allocatable != nil { - in, out := &in.Allocatable, &out.Allocatable - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]NodeCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Addresses != nil { - in, out := &in.Addresses, &out.Addresses - *out = make([]NodeAddress, len(*in)) - copy(*out, *in) - } - out.DaemonEndpoints = in.DaemonEndpoints - out.NodeInfo = in.NodeInfo - if in.Images != nil { - in, out := &in.Images, &out.Images - *out = make([]ContainerImage, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumesInUse != nil { - in, out := &in.VolumesInUse, &out.VolumesInUse - *out = make([]UniqueVolumeName, len(*in)) - copy(*out, *in) - } - if in.VolumesAttached != nil { - in, out := &in.VolumesAttached, &out.VolumesAttached - *out = make([]AttachedVolume, len(*in)) - copy(*out, *in) - } - if in.Config != nil { - in, out := &in.Config, &out.Config - *out = new(NodeConfigStatus) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeStatus. -func (in *NodeStatus) DeepCopy() *NodeStatus { - if in == nil { - return nil - } - out := new(NodeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NodeSystemInfo) DeepCopyInto(out *NodeSystemInfo) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeSystemInfo. -func (in *NodeSystemInfo) DeepCopy() *NodeSystemInfo { - if in == nil { - return nil - } - out := new(NodeSystemInfo) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectFieldSelector) DeepCopyInto(out *ObjectFieldSelector) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectFieldSelector. -func (in *ObjectFieldSelector) DeepCopy() *ObjectFieldSelector { - if in == nil { - return nil - } - out := new(ObjectFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ObjectReference) DeepCopyInto(out *ObjectReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ObjectReference. -func (in *ObjectReference) DeepCopy() *ObjectReference { - if in == nil { - return nil - } - out := new(ObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ObjectReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolume) DeepCopyInto(out *PersistentVolume) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - out.Status = in.Status - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolume. -func (in *PersistentVolume) DeepCopy() *PersistentVolume { - if in == nil { - return nil - } - out := new(PersistentVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolume) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaim) DeepCopyInto(out *PersistentVolumeClaim) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaim. -func (in *PersistentVolumeClaim) DeepCopy() *PersistentVolumeClaim { - if in == nil { - return nil - } - out := new(PersistentVolumeClaim) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaim) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimCondition) DeepCopyInto(out *PersistentVolumeClaimCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimCondition. -func (in *PersistentVolumeClaimCondition) DeepCopy() *PersistentVolumeClaimCondition { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimList) DeepCopyInto(out *PersistentVolumeClaimList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolumeClaim, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimList. -func (in *PersistentVolumeClaimList) DeepCopy() *PersistentVolumeClaimList { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeClaimList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimSpec) DeepCopyInto(out *PersistentVolumeClaimSpec) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Resources.DeepCopyInto(&out.Resources) - if in.StorageClassName != nil { - in, out := &in.StorageClassName, &out.StorageClassName - *out = new(string) - **out = **in - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(PersistentVolumeMode) - **out = **in - } - if in.DataSource != nil { - in, out := &in.DataSource, &out.DataSource - *out = new(TypedLocalObjectReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimSpec. -func (in *PersistentVolumeClaimSpec) DeepCopy() *PersistentVolumeClaimSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimStatus) DeepCopyInto(out *PersistentVolumeClaimStatus) { - *out = *in - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PersistentVolumeClaimCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimStatus. -func (in *PersistentVolumeClaimStatus) DeepCopy() *PersistentVolumeClaimStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeClaimVolumeSource) DeepCopyInto(out *PersistentVolumeClaimVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeClaimVolumeSource. -func (in *PersistentVolumeClaimVolumeSource) DeepCopy() *PersistentVolumeClaimVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeClaimVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeList) DeepCopyInto(out *PersistentVolumeList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PersistentVolume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeList. -func (in *PersistentVolumeList) DeepCopy() *PersistentVolumeList { - if in == nil { - return nil - } - out := new(PersistentVolumeList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PersistentVolumeList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSource) DeepCopyInto(out *PersistentVolumeSource) { - *out = *in - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFilePersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Local != nil { - in, out := &in.Local, &out.Local - *out = new(LocalVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - *out = new(StorageOSPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CSI != nil { - in, out := &in.CSI, &out.CSI - *out = new(CSIPersistentVolumeSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSource. -func (in *PersistentVolumeSource) DeepCopy() *PersistentVolumeSource { - if in == nil { - return nil - } - out := new(PersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeSpec) DeepCopyInto(out *PersistentVolumeSpec) { - *out = *in - if in.Capacity != nil { - in, out := &in.Capacity, &out.Capacity - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - in.PersistentVolumeSource.DeepCopyInto(&out.PersistentVolumeSource) - if in.AccessModes != nil { - in, out := &in.AccessModes, &out.AccessModes - *out = make([]PersistentVolumeAccessMode, len(*in)) - copy(*out, *in) - } - if in.ClaimRef != nil { - in, out := &in.ClaimRef, &out.ClaimRef - *out = new(ObjectReference) - **out = **in - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.VolumeMode != nil { - in, out := &in.VolumeMode, &out.VolumeMode - *out = new(PersistentVolumeMode) - **out = **in - } - if in.NodeAffinity != nil { - in, out := &in.NodeAffinity, &out.NodeAffinity - *out = new(VolumeNodeAffinity) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeSpec. -func (in *PersistentVolumeSpec) DeepCopy() *PersistentVolumeSpec { - if in == nil { - return nil - } - out := new(PersistentVolumeSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PersistentVolumeStatus) DeepCopyInto(out *PersistentVolumeStatus) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PersistentVolumeStatus. -func (in *PersistentVolumeStatus) DeepCopy() *PersistentVolumeStatus { - if in == nil { - return nil - } - out := new(PersistentVolumeStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PhotonPersistentDiskVolumeSource) DeepCopyInto(out *PhotonPersistentDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PhotonPersistentDiskVolumeSource. -func (in *PhotonPersistentDiskVolumeSource) DeepCopy() *PhotonPersistentDiskVolumeSource { - if in == nil { - return nil - } - out := new(PhotonPersistentDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Pod) DeepCopyInto(out *Pod) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Pod. -func (in *Pod) DeepCopy() *Pod { - if in == nil { - return nil - } - out := new(Pod) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Pod) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinity) DeepCopyInto(out *PodAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinity. -func (in *PodAffinity) DeepCopy() *PodAffinity { - if in == nil { - return nil - } - out := new(PodAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAffinityTerm) DeepCopyInto(out *PodAffinityTerm) { - *out = *in - if in.LabelSelector != nil { - in, out := &in.LabelSelector, &out.LabelSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.Namespaces != nil { - in, out := &in.Namespaces, &out.Namespaces - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAffinityTerm. -func (in *PodAffinityTerm) DeepCopy() *PodAffinityTerm { - if in == nil { - return nil - } - out := new(PodAffinityTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAntiAffinity) DeepCopyInto(out *PodAntiAffinity) { - *out = *in - if in.RequiredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.RequiredDuringSchedulingIgnoredDuringExecution, &out.RequiredDuringSchedulingIgnoredDuringExecution - *out = make([]PodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PreferredDuringSchedulingIgnoredDuringExecution != nil { - in, out := &in.PreferredDuringSchedulingIgnoredDuringExecution, &out.PreferredDuringSchedulingIgnoredDuringExecution - *out = make([]WeightedPodAffinityTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAntiAffinity. -func (in *PodAntiAffinity) DeepCopy() *PodAntiAffinity { - if in == nil { - return nil - } - out := new(PodAntiAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodAttachOptions) DeepCopyInto(out *PodAttachOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodAttachOptions. -func (in *PodAttachOptions) DeepCopy() *PodAttachOptions { - if in == nil { - return nil - } - out := new(PodAttachOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodAttachOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodCondition) DeepCopyInto(out *PodCondition) { - *out = *in - in.LastProbeTime.DeepCopyInto(&out.LastProbeTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodCondition. -func (in *PodCondition) DeepCopy() *PodCondition { - if in == nil { - return nil - } - out := new(PodCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDNSConfig) DeepCopyInto(out *PodDNSConfig) { - *out = *in - if in.Nameservers != nil { - in, out := &in.Nameservers, &out.Nameservers - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Searches != nil { - in, out := &in.Searches, &out.Searches - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Options != nil { - in, out := &in.Options, &out.Options - *out = make([]PodDNSConfigOption, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfig. -func (in *PodDNSConfig) DeepCopy() *PodDNSConfig { - if in == nil { - return nil - } - out := new(PodDNSConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDNSConfigOption) DeepCopyInto(out *PodDNSConfigOption) { - *out = *in - if in.Value != nil { - in, out := &in.Value, &out.Value - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDNSConfigOption. -func (in *PodDNSConfigOption) DeepCopy() *PodDNSConfigOption { - if in == nil { - return nil - } - out := new(PodDNSConfigOption) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodExecOptions) DeepCopyInto(out *PodExecOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Command != nil { - in, out := &in.Command, &out.Command - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodExecOptions. -func (in *PodExecOptions) DeepCopy() *PodExecOptions { - if in == nil { - return nil - } - out := new(PodExecOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodExecOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodList) DeepCopyInto(out *PodList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Pod, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodList. -func (in *PodList) DeepCopy() *PodList { - if in == nil { - return nil - } - out := new(PodList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodLogOptions) DeepCopyInto(out *PodLogOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.SinceSeconds != nil { - in, out := &in.SinceSeconds, &out.SinceSeconds - *out = new(int64) - **out = **in - } - if in.SinceTime != nil { - in, out := &in.SinceTime, &out.SinceTime - *out = (*in).DeepCopy() - } - if in.TailLines != nil { - in, out := &in.TailLines, &out.TailLines - *out = new(int64) - **out = **in - } - if in.LimitBytes != nil { - in, out := &in.LimitBytes, &out.LimitBytes - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodLogOptions. -func (in *PodLogOptions) DeepCopy() *PodLogOptions { - if in == nil { - return nil - } - out := new(PodLogOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodLogOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPortForwardOptions) DeepCopyInto(out *PodPortForwardOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]int32, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPortForwardOptions. -func (in *PodPortForwardOptions) DeepCopy() *PodPortForwardOptions { - if in == nil { - return nil - } - out := new(PodPortForwardOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPortForwardOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodProxyOptions) DeepCopyInto(out *PodProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodProxyOptions. -func (in *PodProxyOptions) DeepCopy() *PodProxyOptions { - if in == nil { - return nil - } - out := new(PodProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodReadinessGate) DeepCopyInto(out *PodReadinessGate) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodReadinessGate. -func (in *PodReadinessGate) DeepCopy() *PodReadinessGate { - if in == nil { - return nil - } - out := new(PodReadinessGate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityContext) DeepCopyInto(out *PodSecurityContext) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.SupplementalGroups != nil { - in, out := &in.SupplementalGroups, &out.SupplementalGroups - *out = make([]int64, len(*in)) - copy(*out, *in) - } - if in.FSGroup != nil { - in, out := &in.FSGroup, &out.FSGroup - *out = new(int64) - **out = **in - } - if in.Sysctls != nil { - in, out := &in.Sysctls, &out.Sysctls - *out = make([]Sysctl, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityContext. -func (in *PodSecurityContext) DeepCopy() *PodSecurityContext { - if in == nil { - return nil - } - out := new(PodSecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSignature) DeepCopyInto(out *PodSignature) { - *out = *in - if in.PodController != nil { - in, out := &in.PodController, &out.PodController - *out = new(metav1.OwnerReference) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSignature. -func (in *PodSignature) DeepCopy() *PodSignature { - if in == nil { - return nil - } - out := new(PodSignature) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSpec) DeepCopyInto(out *PodSpec) { - *out = *in - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.InitContainers != nil { - in, out := &in.InitContainers, &out.InitContainers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Containers != nil { - in, out := &in.Containers, &out.Containers - *out = make([]Container, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.TerminationGracePeriodSeconds != nil { - in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds - *out = new(int64) - **out = **in - } - if in.ActiveDeadlineSeconds != nil { - in, out := &in.ActiveDeadlineSeconds, &out.ActiveDeadlineSeconds - *out = new(int64) - **out = **in - } - if in.NodeSelector != nil { - in, out := &in.NodeSelector, &out.NodeSelector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - if in.ShareProcessNamespace != nil { - in, out := &in.ShareProcessNamespace, &out.ShareProcessNamespace - *out = new(bool) - **out = **in - } - if in.SecurityContext != nil { - in, out := &in.SecurityContext, &out.SecurityContext - *out = new(PodSecurityContext) - (*in).DeepCopyInto(*out) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.Affinity != nil { - in, out := &in.Affinity, &out.Affinity - *out = new(Affinity) - (*in).DeepCopyInto(*out) - } - if in.Tolerations != nil { - in, out := &in.Tolerations, &out.Tolerations - *out = make([]Toleration, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.HostAliases != nil { - in, out := &in.HostAliases, &out.HostAliases - *out = make([]HostAlias, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Priority != nil { - in, out := &in.Priority, &out.Priority - *out = new(int32) - **out = **in - } - if in.DNSConfig != nil { - in, out := &in.DNSConfig, &out.DNSConfig - *out = new(PodDNSConfig) - (*in).DeepCopyInto(*out) - } - if in.ReadinessGates != nil { - in, out := &in.ReadinessGates, &out.ReadinessGates - *out = make([]PodReadinessGate, len(*in)) - copy(*out, *in) - } - if in.RuntimeClassName != nil { - in, out := &in.RuntimeClassName, &out.RuntimeClassName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSpec. -func (in *PodSpec) DeepCopy() *PodSpec { - if in == nil { - return nil - } - out := new(PodSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatus) DeepCopyInto(out *PodStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]PodCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.StartTime != nil { - in, out := &in.StartTime, &out.StartTime - *out = (*in).DeepCopy() - } - if in.InitContainerStatuses != nil { - in, out := &in.InitContainerStatuses, &out.InitContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.ContainerStatuses != nil { - in, out := &in.ContainerStatuses, &out.ContainerStatuses - *out = make([]ContainerStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatus. -func (in *PodStatus) DeepCopy() *PodStatus { - if in == nil { - return nil - } - out := new(PodStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodStatusResult) DeepCopyInto(out *PodStatusResult) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodStatusResult. -func (in *PodStatusResult) DeepCopy() *PodStatusResult { - if in == nil { - return nil - } - out := new(PodStatusResult) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodStatusResult) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplate) DeepCopyInto(out *PodTemplate) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplate. -func (in *PodTemplate) DeepCopy() *PodTemplate { - if in == nil { - return nil - } - out := new(PodTemplate) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplate) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateList) DeepCopyInto(out *PodTemplateList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodTemplate, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateList. -func (in *PodTemplateList) DeepCopy() *PodTemplateList { - if in == nil { - return nil - } - out := new(PodTemplateList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodTemplateList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodTemplateSpec) DeepCopyInto(out *PodTemplateSpec) { - *out = *in - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodTemplateSpec. -func (in *PodTemplateSpec) DeepCopy() *PodTemplateSpec { - if in == nil { - return nil - } - out := new(PodTemplateSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PortworxVolumeSource) DeepCopyInto(out *PortworxVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PortworxVolumeSource. -func (in *PortworxVolumeSource) DeepCopy() *PortworxVolumeSource { - if in == nil { - return nil - } - out := new(PortworxVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Preconditions) DeepCopyInto(out *Preconditions) { - *out = *in - if in.UID != nil { - in, out := &in.UID, &out.UID - *out = new(types.UID) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Preconditions. -func (in *Preconditions) DeepCopy() *Preconditions { - if in == nil { - return nil - } - out := new(Preconditions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferAvoidPodsEntry) DeepCopyInto(out *PreferAvoidPodsEntry) { - *out = *in - in.PodSignature.DeepCopyInto(&out.PodSignature) - in.EvictionTime.DeepCopyInto(&out.EvictionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferAvoidPodsEntry. -func (in *PreferAvoidPodsEntry) DeepCopy() *PreferAvoidPodsEntry { - if in == nil { - return nil - } - out := new(PreferAvoidPodsEntry) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PreferredSchedulingTerm) DeepCopyInto(out *PreferredSchedulingTerm) { - *out = *in - in.Preference.DeepCopyInto(&out.Preference) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PreferredSchedulingTerm. -func (in *PreferredSchedulingTerm) DeepCopy() *PreferredSchedulingTerm { - if in == nil { - return nil - } - out := new(PreferredSchedulingTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Probe) DeepCopyInto(out *Probe) { - *out = *in - in.Handler.DeepCopyInto(&out.Handler) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Probe. -func (in *Probe) DeepCopy() *Probe { - if in == nil { - return nil - } - out := new(Probe) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ProjectedVolumeSource) DeepCopyInto(out *ProjectedVolumeSource) { - *out = *in - if in.Sources != nil { - in, out := &in.Sources, &out.Sources - *out = make([]VolumeProjection, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ProjectedVolumeSource. -func (in *ProjectedVolumeSource) DeepCopy() *ProjectedVolumeSource { - if in == nil { - return nil - } - out := new(ProjectedVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *QuobyteVolumeSource) DeepCopyInto(out *QuobyteVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new QuobyteVolumeSource. -func (in *QuobyteVolumeSource) DeepCopy() *QuobyteVolumeSource { - if in == nil { - return nil - } - out := new(QuobyteVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDPersistentVolumeSource) DeepCopyInto(out *RBDPersistentVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDPersistentVolumeSource. -func (in *RBDPersistentVolumeSource) DeepCopy() *RBDPersistentVolumeSource { - if in == nil { - return nil - } - out := new(RBDPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RBDVolumeSource) DeepCopyInto(out *RBDVolumeSource) { - *out = *in - if in.CephMonitors != nil { - in, out := &in.CephMonitors, &out.CephMonitors - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RBDVolumeSource. -func (in *RBDVolumeSource) DeepCopy() *RBDVolumeSource { - if in == nil { - return nil - } - out := new(RBDVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RangeAllocation) DeepCopyInto(out *RangeAllocation) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make([]byte, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RangeAllocation. -func (in *RangeAllocation) DeepCopy() *RangeAllocation { - if in == nil { - return nil - } - out := new(RangeAllocation) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RangeAllocation) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationController) DeepCopyInto(out *ReplicationController) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationController. -func (in *ReplicationController) DeepCopy() *ReplicationController { - if in == nil { - return nil - } - out := new(ReplicationController) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationController) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerCondition) DeepCopyInto(out *ReplicationControllerCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerCondition. -func (in *ReplicationControllerCondition) DeepCopy() *ReplicationControllerCondition { - if in == nil { - return nil - } - out := new(ReplicationControllerCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerList) DeepCopyInto(out *ReplicationControllerList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicationController, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerList. -func (in *ReplicationControllerList) DeepCopy() *ReplicationControllerList { - if in == nil { - return nil - } - out := new(ReplicationControllerList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerSpec) DeepCopyInto(out *ReplicationControllerSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.Template != nil { - in, out := &in.Template, &out.Template - *out = new(PodTemplateSpec) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerSpec. -func (in *ReplicationControllerSpec) DeepCopy() *ReplicationControllerSpec { - if in == nil { - return nil - } - out := new(ReplicationControllerSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerStatus) DeepCopyInto(out *ReplicationControllerStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicationControllerCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerStatus. -func (in *ReplicationControllerStatus) DeepCopy() *ReplicationControllerStatus { - if in == nil { - return nil - } - out := new(ReplicationControllerStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceFieldSelector) DeepCopyInto(out *ResourceFieldSelector) { - *out = *in - out.Divisor = in.Divisor.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceFieldSelector. -func (in *ResourceFieldSelector) DeepCopy() *ResourceFieldSelector { - if in == nil { - return nil - } - out := new(ResourceFieldSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in ResourceList) DeepCopyInto(out *ResourceList) { - { - in := &in - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - return - } -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceList. -func (in ResourceList) DeepCopy() ResourceList { - if in == nil { - return nil - } - out := new(ResourceList) - in.DeepCopyInto(out) - return *out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuota) DeepCopyInto(out *ResourceQuota) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuota. -func (in *ResourceQuota) DeepCopy() *ResourceQuota { - if in == nil { - return nil - } - out := new(ResourceQuota) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuota) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaList) DeepCopyInto(out *ResourceQuotaList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ResourceQuota, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaList. -func (in *ResourceQuotaList) DeepCopy() *ResourceQuotaList { - if in == nil { - return nil - } - out := new(ResourceQuotaList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ResourceQuotaList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaSpec) DeepCopyInto(out *ResourceQuotaSpec) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Scopes != nil { - in, out := &in.Scopes, &out.Scopes - *out = make([]ResourceQuotaScope, len(*in)) - copy(*out, *in) - } - if in.ScopeSelector != nil { - in, out := &in.ScopeSelector, &out.ScopeSelector - *out = new(ScopeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaSpec. -func (in *ResourceQuotaSpec) DeepCopy() *ResourceQuotaSpec { - if in == nil { - return nil - } - out := new(ResourceQuotaSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceQuotaStatus) DeepCopyInto(out *ResourceQuotaStatus) { - *out = *in - if in.Hard != nil { - in, out := &in.Hard, &out.Hard - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Used != nil { - in, out := &in.Used, &out.Used - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceQuotaStatus. -func (in *ResourceQuotaStatus) DeepCopy() *ResourceQuotaStatus { - if in == nil { - return nil - } - out := new(ResourceQuotaStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceRequirements) DeepCopyInto(out *ResourceRequirements) { - *out = *in - if in.Limits != nil { - in, out := &in.Limits, &out.Limits - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - if in.Requests != nil { - in, out := &in.Requests, &out.Requests - *out = make(ResourceList, len(*in)) - for key, val := range *in { - (*out)[key] = val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceRequirements. -func (in *ResourceRequirements) DeepCopy() *ResourceRequirements { - if in == nil { - return nil - } - out := new(ResourceRequirements) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxOptions) DeepCopyInto(out *SELinuxOptions) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxOptions. -func (in *SELinuxOptions) DeepCopy() *SELinuxOptions { - if in == nil { - return nil - } - out := new(SELinuxOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOPersistentVolumeSource) DeepCopyInto(out *ScaleIOPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(SecretReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOPersistentVolumeSource. -func (in *ScaleIOPersistentVolumeSource) DeepCopy() *ScaleIOPersistentVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleIOVolumeSource) DeepCopyInto(out *ScaleIOVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleIOVolumeSource. -func (in *ScaleIOVolumeSource) DeepCopy() *ScaleIOVolumeSource { - if in == nil { - return nil - } - out := new(ScaleIOVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScopeSelector) DeepCopyInto(out *ScopeSelector) { - *out = *in - if in.MatchExpressions != nil { - in, out := &in.MatchExpressions, &out.MatchExpressions - *out = make([]ScopedResourceSelectorRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopeSelector. -func (in *ScopeSelector) DeepCopy() *ScopeSelector { - if in == nil { - return nil - } - out := new(ScopeSelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScopedResourceSelectorRequirement) DeepCopyInto(out *ScopedResourceSelectorRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScopedResourceSelectorRequirement. -func (in *ScopedResourceSelectorRequirement) DeepCopy() *ScopedResourceSelectorRequirement { - if in == nil { - return nil - } - out := new(ScopedResourceSelectorRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Secret) DeepCopyInto(out *Secret) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Data != nil { - in, out := &in.Data, &out.Data - *out = make(map[string][]byte, len(*in)) - for key, val := range *in { - var outVal []byte - if val == nil { - (*out)[key] = nil - } else { - in, out := &val, &outVal - *out = make([]byte, len(*in)) - copy(*out, *in) - } - (*out)[key] = outVal - } - } - if in.StringData != nil { - in, out := &in.StringData, &out.StringData - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Secret. -func (in *Secret) DeepCopy() *Secret { - if in == nil { - return nil - } - out := new(Secret) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Secret) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretEnvSource) DeepCopyInto(out *SecretEnvSource) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretEnvSource. -func (in *SecretEnvSource) DeepCopy() *SecretEnvSource { - if in == nil { - return nil - } - out := new(SecretEnvSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretKeySelector) DeepCopyInto(out *SecretKeySelector) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretKeySelector. -func (in *SecretKeySelector) DeepCopy() *SecretKeySelector { - if in == nil { - return nil - } - out := new(SecretKeySelector) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretList) DeepCopyInto(out *SecretList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Secret, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretList. -func (in *SecretList) DeepCopy() *SecretList { - if in == nil { - return nil - } - out := new(SecretList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SecretList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretProjection) DeepCopyInto(out *SecretProjection) { - *out = *in - out.LocalObjectReference = in.LocalObjectReference - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretProjection. -func (in *SecretProjection) DeepCopy() *SecretProjection { - if in == nil { - return nil - } - out := new(SecretProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretReference) DeepCopyInto(out *SecretReference) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretReference. -func (in *SecretReference) DeepCopy() *SecretReference { - if in == nil { - return nil - } - out := new(SecretReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecretVolumeSource) DeepCopyInto(out *SecretVolumeSource) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]KeyToPath, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.DefaultMode != nil { - in, out := &in.DefaultMode, &out.DefaultMode - *out = new(int32) - **out = **in - } - if in.Optional != nil { - in, out := &in.Optional, &out.Optional - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecretVolumeSource. -func (in *SecretVolumeSource) DeepCopy() *SecretVolumeSource { - if in == nil { - return nil - } - out := new(SecretVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SecurityContext) DeepCopyInto(out *SecurityContext) { - *out = *in - if in.Capabilities != nil { - in, out := &in.Capabilities, &out.Capabilities - *out = new(Capabilities) - (*in).DeepCopyInto(*out) - } - if in.Privileged != nil { - in, out := &in.Privileged, &out.Privileged - *out = new(bool) - **out = **in - } - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(SELinuxOptions) - **out = **in - } - if in.RunAsUser != nil { - in, out := &in.RunAsUser, &out.RunAsUser - *out = new(int64) - **out = **in - } - if in.RunAsGroup != nil { - in, out := &in.RunAsGroup, &out.RunAsGroup - *out = new(int64) - **out = **in - } - if in.RunAsNonRoot != nil { - in, out := &in.RunAsNonRoot, &out.RunAsNonRoot - *out = new(bool) - **out = **in - } - if in.ReadOnlyRootFilesystem != nil { - in, out := &in.ReadOnlyRootFilesystem, &out.ReadOnlyRootFilesystem - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.ProcMount != nil { - in, out := &in.ProcMount, &out.ProcMount - *out = new(ProcMountType) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SecurityContext. -func (in *SecurityContext) DeepCopy() *SecurityContext { - if in == nil { - return nil - } - out := new(SecurityContext) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SerializedReference) DeepCopyInto(out *SerializedReference) { - *out = *in - out.TypeMeta = in.TypeMeta - out.Reference = in.Reference - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SerializedReference. -func (in *SerializedReference) DeepCopy() *SerializedReference { - if in == nil { - return nil - } - out := new(SerializedReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *SerializedReference) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Service) DeepCopyInto(out *Service) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Service. -func (in *Service) DeepCopy() *Service { - if in == nil { - return nil - } - out := new(Service) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Service) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccount) DeepCopyInto(out *ServiceAccount) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Secrets != nil { - in, out := &in.Secrets, &out.Secrets - *out = make([]ObjectReference, len(*in)) - copy(*out, *in) - } - if in.ImagePullSecrets != nil { - in, out := &in.ImagePullSecrets, &out.ImagePullSecrets - *out = make([]LocalObjectReference, len(*in)) - copy(*out, *in) - } - if in.AutomountServiceAccountToken != nil { - in, out := &in.AutomountServiceAccountToken, &out.AutomountServiceAccountToken - *out = new(bool) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccount. -func (in *ServiceAccount) DeepCopy() *ServiceAccount { - if in == nil { - return nil - } - out := new(ServiceAccount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccount) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountList) DeepCopyInto(out *ServiceAccountList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ServiceAccount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountList. -func (in *ServiceAccountList) DeepCopy() *ServiceAccountList { - if in == nil { - return nil - } - out := new(ServiceAccountList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceAccountList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceAccountTokenProjection) DeepCopyInto(out *ServiceAccountTokenProjection) { - *out = *in - if in.ExpirationSeconds != nil { - in, out := &in.ExpirationSeconds, &out.ExpirationSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceAccountTokenProjection. -func (in *ServiceAccountTokenProjection) DeepCopy() *ServiceAccountTokenProjection { - if in == nil { - return nil - } - out := new(ServiceAccountTokenProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceList) DeepCopyInto(out *ServiceList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Service, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceList. -func (in *ServiceList) DeepCopy() *ServiceList { - if in == nil { - return nil - } - out := new(ServiceList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServicePort) DeepCopyInto(out *ServicePort) { - *out = *in - out.TargetPort = in.TargetPort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServicePort. -func (in *ServicePort) DeepCopy() *ServicePort { - if in == nil { - return nil - } - out := new(ServicePort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceProxyOptions) DeepCopyInto(out *ServiceProxyOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceProxyOptions. -func (in *ServiceProxyOptions) DeepCopy() *ServiceProxyOptions { - if in == nil { - return nil - } - out := new(ServiceProxyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ServiceProxyOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceSpec) DeepCopyInto(out *ServiceSpec) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]ServicePort, len(*in)) - copy(*out, *in) - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ExternalIPs != nil { - in, out := &in.ExternalIPs, &out.ExternalIPs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.LoadBalancerSourceRanges != nil { - in, out := &in.LoadBalancerSourceRanges, &out.LoadBalancerSourceRanges - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.SessionAffinityConfig != nil { - in, out := &in.SessionAffinityConfig, &out.SessionAffinityConfig - *out = new(SessionAffinityConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceSpec. -func (in *ServiceSpec) DeepCopy() *ServiceSpec { - if in == nil { - return nil - } - out := new(ServiceSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ServiceStatus) DeepCopyInto(out *ServiceStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServiceStatus. -func (in *ServiceStatus) DeepCopy() *ServiceStatus { - if in == nil { - return nil - } - out := new(ServiceStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SessionAffinityConfig) DeepCopyInto(out *SessionAffinityConfig) { - *out = *in - if in.ClientIP != nil { - in, out := &in.ClientIP, &out.ClientIP - *out = new(ClientIPConfig) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SessionAffinityConfig. -func (in *SessionAffinityConfig) DeepCopy() *SessionAffinityConfig { - if in == nil { - return nil - } - out := new(SessionAffinityConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSPersistentVolumeSource) DeepCopyInto(out *StorageOSPersistentVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(ObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSPersistentVolumeSource. -func (in *StorageOSPersistentVolumeSource) DeepCopy() *StorageOSPersistentVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSPersistentVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageOSVolumeSource) DeepCopyInto(out *StorageOSVolumeSource) { - *out = *in - if in.SecretRef != nil { - in, out := &in.SecretRef, &out.SecretRef - *out = new(LocalObjectReference) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageOSVolumeSource. -func (in *StorageOSVolumeSource) DeepCopy() *StorageOSVolumeSource { - if in == nil { - return nil - } - out := new(StorageOSVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Sysctl) DeepCopyInto(out *Sysctl) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sysctl. -func (in *Sysctl) DeepCopy() *Sysctl { - if in == nil { - return nil - } - out := new(Sysctl) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TCPSocketAction) DeepCopyInto(out *TCPSocketAction) { - *out = *in - out.Port = in.Port - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TCPSocketAction. -func (in *TCPSocketAction) DeepCopy() *TCPSocketAction { - if in == nil { - return nil - } - out := new(TCPSocketAction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Taint) DeepCopyInto(out *Taint) { - *out = *in - if in.TimeAdded != nil { - in, out := &in.TimeAdded, &out.TimeAdded - *out = (*in).DeepCopy() - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Taint. -func (in *Taint) DeepCopy() *Taint { - if in == nil { - return nil - } - out := new(Taint) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Toleration) DeepCopyInto(out *Toleration) { - *out = *in - if in.TolerationSeconds != nil { - in, out := &in.TolerationSeconds, &out.TolerationSeconds - *out = new(int64) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Toleration. -func (in *Toleration) DeepCopy() *Toleration { - if in == nil { - return nil - } - out := new(Toleration) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySelectorLabelRequirement) DeepCopyInto(out *TopologySelectorLabelRequirement) { - *out = *in - if in.Values != nil { - in, out := &in.Values, &out.Values - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorLabelRequirement. -func (in *TopologySelectorLabelRequirement) DeepCopy() *TopologySelectorLabelRequirement { - if in == nil { - return nil - } - out := new(TopologySelectorLabelRequirement) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TopologySelectorTerm) DeepCopyInto(out *TopologySelectorTerm) { - *out = *in - if in.MatchLabelExpressions != nil { - in, out := &in.MatchLabelExpressions, &out.MatchLabelExpressions - *out = make([]TopologySelectorLabelRequirement, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySelectorTerm. -func (in *TopologySelectorTerm) DeepCopy() *TopologySelectorTerm { - if in == nil { - return nil - } - out := new(TopologySelectorTerm) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TypedLocalObjectReference) DeepCopyInto(out *TypedLocalObjectReference) { - *out = *in - if in.APIGroup != nil { - in, out := &in.APIGroup, &out.APIGroup - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TypedLocalObjectReference. -func (in *TypedLocalObjectReference) DeepCopy() *TypedLocalObjectReference { - if in == nil { - return nil - } - out := new(TypedLocalObjectReference) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Volume) DeepCopyInto(out *Volume) { - *out = *in - in.VolumeSource.DeepCopyInto(&out.VolumeSource) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Volume. -func (in *Volume) DeepCopy() *Volume { - if in == nil { - return nil - } - out := new(Volume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeDevice) DeepCopyInto(out *VolumeDevice) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeDevice. -func (in *VolumeDevice) DeepCopy() *VolumeDevice { - if in == nil { - return nil - } - out := new(VolumeDevice) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeMount) DeepCopyInto(out *VolumeMount) { - *out = *in - if in.MountPropagation != nil { - in, out := &in.MountPropagation, &out.MountPropagation - *out = new(MountPropagationMode) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeMount. -func (in *VolumeMount) DeepCopy() *VolumeMount { - if in == nil { - return nil - } - out := new(VolumeMount) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeNodeAffinity) DeepCopyInto(out *VolumeNodeAffinity) { - *out = *in - if in.Required != nil { - in, out := &in.Required, &out.Required - *out = new(NodeSelector) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeNodeAffinity. -func (in *VolumeNodeAffinity) DeepCopy() *VolumeNodeAffinity { - if in == nil { - return nil - } - out := new(VolumeNodeAffinity) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeProjection) DeepCopyInto(out *VolumeProjection) { - *out = *in - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretProjection) - (*in).DeepCopyInto(*out) - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIProjection) - (*in).DeepCopyInto(*out) - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapProjection) - (*in).DeepCopyInto(*out) - } - if in.ServiceAccountToken != nil { - in, out := &in.ServiceAccountToken, &out.ServiceAccountToken - *out = new(ServiceAccountTokenProjection) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeProjection. -func (in *VolumeProjection) DeepCopy() *VolumeProjection { - if in == nil { - return nil - } - out := new(VolumeProjection) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeSource) DeepCopyInto(out *VolumeSource) { - *out = *in - if in.HostPath != nil { - in, out := &in.HostPath, &out.HostPath - *out = new(HostPathVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.EmptyDir != nil { - in, out := &in.EmptyDir, &out.EmptyDir - *out = new(EmptyDirVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.GCEPersistentDisk != nil { - in, out := &in.GCEPersistentDisk, &out.GCEPersistentDisk - *out = new(GCEPersistentDiskVolumeSource) - **out = **in - } - if in.AWSElasticBlockStore != nil { - in, out := &in.AWSElasticBlockStore, &out.AWSElasticBlockStore - *out = new(AWSElasticBlockStoreVolumeSource) - **out = **in - } - if in.GitRepo != nil { - in, out := &in.GitRepo, &out.GitRepo - *out = new(GitRepoVolumeSource) - **out = **in - } - if in.Secret != nil { - in, out := &in.Secret, &out.Secret - *out = new(SecretVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.NFS != nil { - in, out := &in.NFS, &out.NFS - *out = new(NFSVolumeSource) - **out = **in - } - if in.ISCSI != nil { - in, out := &in.ISCSI, &out.ISCSI - *out = new(ISCSIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Glusterfs != nil { - in, out := &in.Glusterfs, &out.Glusterfs - *out = new(GlusterfsVolumeSource) - **out = **in - } - if in.PersistentVolumeClaim != nil { - in, out := &in.PersistentVolumeClaim, &out.PersistentVolumeClaim - *out = new(PersistentVolumeClaimVolumeSource) - **out = **in - } - if in.RBD != nil { - in, out := &in.RBD, &out.RBD - *out = new(RBDVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FlexVolume != nil { - in, out := &in.FlexVolume, &out.FlexVolume - *out = new(FlexVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Cinder != nil { - in, out := &in.Cinder, &out.Cinder - *out = new(CinderVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.CephFS != nil { - in, out := &in.CephFS, &out.CephFS - *out = new(CephFSVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.Flocker != nil { - in, out := &in.Flocker, &out.Flocker - *out = new(FlockerVolumeSource) - **out = **in - } - if in.DownwardAPI != nil { - in, out := &in.DownwardAPI, &out.DownwardAPI - *out = new(DownwardAPIVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.FC != nil { - in, out := &in.FC, &out.FC - *out = new(FCVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.AzureFile != nil { - in, out := &in.AzureFile, &out.AzureFile - *out = new(AzureFileVolumeSource) - **out = **in - } - if in.ConfigMap != nil { - in, out := &in.ConfigMap, &out.ConfigMap - *out = new(ConfigMapVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.VsphereVolume != nil { - in, out := &in.VsphereVolume, &out.VsphereVolume - *out = new(VsphereVirtualDiskVolumeSource) - **out = **in - } - if in.Quobyte != nil { - in, out := &in.Quobyte, &out.Quobyte - *out = new(QuobyteVolumeSource) - **out = **in - } - if in.AzureDisk != nil { - in, out := &in.AzureDisk, &out.AzureDisk - *out = new(AzureDiskVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PhotonPersistentDisk != nil { - in, out := &in.PhotonPersistentDisk, &out.PhotonPersistentDisk - *out = new(PhotonPersistentDiskVolumeSource) - **out = **in - } - if in.Projected != nil { - in, out := &in.Projected, &out.Projected - *out = new(ProjectedVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.PortworxVolume != nil { - in, out := &in.PortworxVolume, &out.PortworxVolume - *out = new(PortworxVolumeSource) - **out = **in - } - if in.ScaleIO != nil { - in, out := &in.ScaleIO, &out.ScaleIO - *out = new(ScaleIOVolumeSource) - (*in).DeepCopyInto(*out) - } - if in.StorageOS != nil { - in, out := &in.StorageOS, &out.StorageOS - *out = new(StorageOSVolumeSource) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeSource. -func (in *VolumeSource) DeepCopy() *VolumeSource { - if in == nil { - return nil - } - out := new(VolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VsphereVirtualDiskVolumeSource) DeepCopyInto(out *VsphereVirtualDiskVolumeSource) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VsphereVirtualDiskVolumeSource. -func (in *VsphereVirtualDiskVolumeSource) DeepCopy() *VsphereVirtualDiskVolumeSource { - if in == nil { - return nil - } - out := new(VsphereVirtualDiskVolumeSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *WeightedPodAffinityTerm) DeepCopyInto(out *WeightedPodAffinityTerm) { - *out = *in - in.PodAffinityTerm.DeepCopyInto(&out.PodAffinityTerm) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new WeightedPodAffinityTerm. -func (in *WeightedPodAffinityTerm) DeepCopy() *WeightedPodAffinityTerm { - if in == nil { - return nil - } - out := new(WeightedPodAffinityTerm) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/events/v1beta1/doc.go b/vendor/k8s.io/api/events/v1beta1/doc.go deleted file mode 100644 index 8b1a3e3..0000000 --- a/vendor/k8s.io/api/events/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=events.k8s.io -package v1beta1 // import "k8s.io/api/events/v1beta1" diff --git a/vendor/k8s.io/api/events/v1beta1/generated.pb.go b/vendor/k8s.io/api/events/v1beta1/generated.pb.go deleted file mode 100644 index e24a82a..0000000 --- a/vendor/k8s.io/api/events/v1beta1/generated.pb.go +++ /dev/null @@ -1,1306 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto - - It has these top-level messages: - Event - EventList - EventSeries -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *Event) Reset() { *m = Event{} } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *EventList) Reset() { *m = EventList{} } -func (*EventList) ProtoMessage() {} -func (*EventList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *EventSeries) Reset() { *m = EventSeries{} } -func (*EventSeries) ProtoMessage() {} -func (*EventSeries) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*Event)(nil), "k8s.io.api.events.v1beta1.Event") - proto.RegisterType((*EventList)(nil), "k8s.io.api.events.v1beta1.EventList") - proto.RegisterType((*EventSeries)(nil), "k8s.io.api.events.v1beta1.EventSeries") -} -func (m *Event) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Event) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.EventTime.Size())) - n2, err := m.EventTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if m.Series != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Series.Size())) - n3, err := m.Series.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingController))) - i += copy(dAtA[i:], m.ReportingController) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ReportingInstance))) - i += copy(dAtA[i:], m.ReportingInstance) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Action))) - i += copy(dAtA[i:], m.Action) - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Regarding.Size())) - n4, err := m.Regarding.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if m.Related != nil { - dAtA[i] = 0x4a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Related.Size())) - n5, err := m.Related.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Note))) - i += copy(dAtA[i:], m.Note) - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedSource.Size())) - n6, err := m.DeprecatedSource.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedFirstTimestamp.Size())) - n7, err := m.DeprecatedFirstTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x72 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedLastTimestamp.Size())) - n8, err := m.DeprecatedLastTimestamp.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x78 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeprecatedCount)) - return i, nil -} - -func (m *EventList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n9, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *EventSeries) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *EventSeries) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Count)) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastObservedTime.Size())) - n10, err := m.LastObservedTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.State))) - i += copy(dAtA[i:], m.State) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *Event) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.EventTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Series != nil { - l = m.Series.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.ReportingController) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.ReportingInstance) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Action) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Regarding.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.Related != nil { - l = m.Related.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = len(m.Note) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = m.DeprecatedSource.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.DeprecatedFirstTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.DeprecatedLastTimestamp.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.DeprecatedCount)) - return n -} - -func (m *EventList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *EventSeries) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Count)) - l = m.LastObservedTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.State) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *Event) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Event{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `EventTime:` + strings.Replace(strings.Replace(this.EventTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `Series:` + strings.Replace(fmt.Sprintf("%v", this.Series), "EventSeries", "EventSeries", 1) + `,`, - `ReportingController:` + fmt.Sprintf("%v", this.ReportingController) + `,`, - `ReportingInstance:` + fmt.Sprintf("%v", this.ReportingInstance) + `,`, - `Action:` + fmt.Sprintf("%v", this.Action) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Regarding:` + strings.Replace(strings.Replace(this.Regarding.String(), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1), `&`, ``, 1) + `,`, - `Related:` + strings.Replace(fmt.Sprintf("%v", this.Related), "ObjectReference", "k8s_io_api_core_v1.ObjectReference", 1) + `,`, - `Note:` + fmt.Sprintf("%v", this.Note) + `,`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `DeprecatedSource:` + strings.Replace(strings.Replace(this.DeprecatedSource.String(), "EventSource", "k8s_io_api_core_v1.EventSource", 1), `&`, ``, 1) + `,`, - `DeprecatedFirstTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedFirstTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedLastTimestamp:` + strings.Replace(strings.Replace(this.DeprecatedLastTimestamp.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `DeprecatedCount:` + fmt.Sprintf("%v", this.DeprecatedCount) + `,`, - `}`, - }, "") - return s -} -func (this *EventList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Event", "Event", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *EventSeries) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&EventSeries{`, - `Count:` + fmt.Sprintf("%v", this.Count) + `,`, - `LastObservedTime:` + strings.Replace(strings.Replace(this.LastObservedTime.String(), "MicroTime", "k8s_io_apimachinery_pkg_apis_meta_v1.MicroTime", 1), `&`, ``, 1) + `,`, - `State:` + fmt.Sprintf("%v", this.State) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *Event) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Event: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EventTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.EventTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Series == nil { - m.Series = &EventSeries{} - } - if err := m.Series.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportingController", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReportingController = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReportingInstance", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ReportingInstance = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Action = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Regarding", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Regarding.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Related", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Related == nil { - m.Related = &k8s_io_api_core_v1.ObjectReference{} - } - if err := m.Related.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Note", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Note = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedSource", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeprecatedSource.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedFirstTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeprecatedFirstTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedLastTimestamp", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.DeprecatedLastTimestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DeprecatedCount", wireType) - } - m.DeprecatedCount = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DeprecatedCount |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Event{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *EventSeries) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: EventSeries: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: EventSeries: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) - } - m.Count = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Count |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastObservedTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastObservedTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.State = EventSeriesState(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/events/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 801 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xcd, 0x6e, 0xdb, 0x46, - 0x10, 0x16, 0x13, 0x4b, 0xb2, 0x56, 0x49, 0x2c, 0x6f, 0x0e, 0xde, 0xb8, 0x00, 0xa5, 0x2a, 0x40, - 0x20, 0x14, 0x08, 0x59, 0x07, 0x45, 0xdb, 0x6b, 0x18, 0xb9, 0x45, 0x02, 0xbb, 0x01, 0xd6, 0x3e, - 0x15, 0x3d, 0x64, 0x45, 0x4d, 0x68, 0x56, 0xe2, 0x2e, 0xb1, 0xbb, 0x12, 0xe0, 0x5b, 0x2f, 0x05, - 0x7a, 0xec, 0x33, 0xf4, 0x09, 0xfa, 0x18, 0x3e, 0xe6, 0x98, 0x93, 0x50, 0xb3, 0x6f, 0xd1, 0x53, - 0xc1, 0xe5, 0x4a, 0x94, 0xf5, 0x83, 0xa8, 0xe8, 0x4d, 0x9c, 0xf9, 0x7e, 0x66, 0x66, 0x47, 0x83, - 0x82, 0xd1, 0xb7, 0xca, 0x8b, 0x85, 0x3f, 0x9a, 0x0c, 0x40, 0x72, 0xd0, 0xa0, 0xfc, 0x29, 0xf0, - 0xa1, 0x90, 0xbe, 0x4d, 0xb0, 0x34, 0xf6, 0x61, 0x0a, 0x5c, 0x2b, 0x7f, 0x7a, 0x32, 0x00, 0xcd, - 0x4e, 0xfc, 0x08, 0x38, 0x48, 0xa6, 0x61, 0xe8, 0xa5, 0x52, 0x68, 0x81, 0x9f, 0x14, 0x50, 0x8f, - 0xa5, 0xb1, 0x57, 0x40, 0x3d, 0x0b, 0x3d, 0x7e, 0x1e, 0xc5, 0xfa, 0x6a, 0x32, 0xf0, 0x42, 0x91, - 0xf8, 0x91, 0x88, 0x84, 0x6f, 0x18, 0x83, 0xc9, 0x7b, 0xf3, 0x65, 0x3e, 0xcc, 0xaf, 0x42, 0xe9, - 0xb8, 0xbb, 0x64, 0x1a, 0x0a, 0x09, 0xfe, 0x74, 0xcd, 0xed, 0xf8, 0xab, 0x12, 0x93, 0xb0, 0xf0, - 0x2a, 0xe6, 0x20, 0xaf, 0xfd, 0x74, 0x14, 0xe5, 0x01, 0xe5, 0x27, 0xa0, 0xd9, 0x26, 0x96, 0xbf, - 0x8d, 0x25, 0x27, 0x5c, 0xc7, 0x09, 0xac, 0x11, 0xbe, 0xfe, 0x14, 0x41, 0x85, 0x57, 0x90, 0xb0, - 0x55, 0x5e, 0xf7, 0x8f, 0x06, 0xaa, 0x9e, 0xe6, 0x43, 0xc0, 0xef, 0xd0, 0x7e, 0x5e, 0xcd, 0x90, - 0x69, 0x46, 0x9c, 0x8e, 0xd3, 0x6b, 0xbe, 0xf8, 0xd2, 0x2b, 0x27, 0xb5, 0x10, 0xf5, 0xd2, 0x51, - 0x94, 0x07, 0x94, 0x97, 0xa3, 0xbd, 0xe9, 0x89, 0xf7, 0x76, 0xf0, 0x33, 0x84, 0xfa, 0x1c, 0x34, - 0x0b, 0xf0, 0xcd, 0xac, 0x5d, 0xc9, 0x66, 0x6d, 0x54, 0xc6, 0xe8, 0x42, 0x15, 0xbf, 0x43, 0x0d, - 0x33, 0xef, 0xcb, 0x38, 0x01, 0x72, 0xcf, 0x58, 0xf8, 0xbb, 0x59, 0x9c, 0xc7, 0xa1, 0x14, 0x39, - 0x2d, 0x38, 0xb4, 0x0e, 0x8d, 0xd3, 0xb9, 0x12, 0x2d, 0x45, 0xf1, 0x1b, 0x54, 0x53, 0x20, 0x63, - 0x50, 0xe4, 0xbe, 0x91, 0x7f, 0xe6, 0x6d, 0x7d, 0x6b, 0xcf, 0x08, 0x5c, 0x18, 0x74, 0x80, 0xb2, - 0x59, 0xbb, 0x56, 0xfc, 0xa6, 0x56, 0x01, 0x9f, 0xa3, 0xc7, 0x12, 0x52, 0x21, 0x75, 0xcc, 0xa3, - 0x57, 0x82, 0x6b, 0x29, 0xc6, 0x63, 0x90, 0x64, 0xaf, 0xe3, 0xf4, 0x1a, 0xc1, 0x67, 0xb6, 0x8c, - 0xc7, 0x74, 0x1d, 0x42, 0x37, 0xf1, 0xf0, 0xf7, 0xe8, 0x70, 0x11, 0x7e, 0xcd, 0x95, 0x66, 0x3c, - 0x04, 0x52, 0x35, 0x62, 0x4f, 0xac, 0xd8, 0x21, 0x5d, 0x05, 0xd0, 0x75, 0x0e, 0x7e, 0x86, 0x6a, - 0x2c, 0xd4, 0xb1, 0xe0, 0xa4, 0x66, 0xd8, 0x8f, 0x2c, 0xbb, 0xf6, 0xd2, 0x44, 0xa9, 0xcd, 0xe6, - 0x38, 0x09, 0x4c, 0x09, 0x4e, 0xea, 0x77, 0x71, 0xd4, 0x44, 0xa9, 0xcd, 0xe2, 0x4b, 0xd4, 0x90, - 0x10, 0x31, 0x39, 0x8c, 0x79, 0x44, 0xf6, 0xcd, 0xd8, 0x9e, 0x2e, 0x8f, 0x2d, 0x5f, 0xec, 0xf2, - 0x99, 0x29, 0xbc, 0x07, 0x09, 0x3c, 0x5c, 0x7a, 0x09, 0x3a, 0x67, 0xd3, 0x52, 0x08, 0xbf, 0x41, - 0x75, 0x09, 0xe3, 0x7c, 0xd1, 0x48, 0x63, 0x77, 0xcd, 0x66, 0x36, 0x6b, 0xd7, 0x69, 0xc1, 0xa3, - 0x73, 0x01, 0xdc, 0x41, 0x7b, 0x5c, 0x68, 0x20, 0xc8, 0xf4, 0xf1, 0xc0, 0xfa, 0xee, 0xfd, 0x20, - 0x34, 0x50, 0x93, 0xc9, 0x11, 0xfa, 0x3a, 0x05, 0xd2, 0xbc, 0x8b, 0xb8, 0xbc, 0x4e, 0x81, 0x9a, - 0x0c, 0x06, 0xd4, 0x1a, 0x42, 0x2a, 0x21, 0xcc, 0x15, 0x2f, 0xc4, 0x44, 0x86, 0x40, 0x1e, 0x98, - 0xc2, 0xda, 0x9b, 0x0a, 0x2b, 0x96, 0xc3, 0xc0, 0x02, 0x62, 0xe5, 0x5a, 0xfd, 0x15, 0x01, 0xba, - 0x26, 0x89, 0x7f, 0x73, 0x10, 0x29, 0x83, 0xdf, 0xc5, 0x52, 0x99, 0xc5, 0x54, 0x9a, 0x25, 0x29, - 0x79, 0x68, 0xfc, 0xbe, 0xd8, 0x6d, 0xe5, 0xcd, 0xb6, 0x77, 0xac, 0x35, 0xe9, 0x6f, 0xd1, 0xa4, - 0x5b, 0xdd, 0xf0, 0xaf, 0x0e, 0x3a, 0x2a, 0x93, 0x67, 0x6c, 0xb9, 0x92, 0x47, 0xff, 0xb9, 0x92, - 0xb6, 0xad, 0xe4, 0xa8, 0xbf, 0x59, 0x92, 0x6e, 0xf3, 0xc2, 0x2f, 0xd1, 0x41, 0x99, 0x7a, 0x25, - 0x26, 0x5c, 0x93, 0x83, 0x8e, 0xd3, 0xab, 0x06, 0x47, 0x56, 0xf2, 0xa0, 0x7f, 0x37, 0x4d, 0x57, - 0xf1, 0xdd, 0x3f, 0x1d, 0x54, 0xfc, 0xdf, 0xcf, 0x62, 0xa5, 0xf1, 0x4f, 0x6b, 0x87, 0xca, 0xdb, - 0xad, 0x91, 0x9c, 0x6d, 0xce, 0x54, 0xcb, 0x3a, 0xef, 0xcf, 0x23, 0x4b, 0x47, 0xea, 0x14, 0x55, - 0x63, 0x0d, 0x89, 0x22, 0xf7, 0x3a, 0xf7, 0x7b, 0xcd, 0x17, 0x9d, 0x4f, 0x5d, 0x90, 0xe0, 0xa1, - 0x15, 0xab, 0xbe, 0xce, 0x69, 0xb4, 0x60, 0x77, 0x33, 0x07, 0x35, 0x97, 0x2e, 0x0c, 0x7e, 0x8a, - 0xaa, 0xa1, 0xe9, 0xdd, 0x31, 0xbd, 0x2f, 0x48, 0x45, 0xc7, 0x45, 0x0e, 0x4f, 0x50, 0x6b, 0xcc, - 0x94, 0x7e, 0x3b, 0x50, 0x20, 0xa7, 0x30, 0xfc, 0x3f, 0x77, 0x72, 0xb1, 0xb4, 0x67, 0x2b, 0x82, - 0x74, 0xcd, 0x02, 0x7f, 0x83, 0xaa, 0x4a, 0x33, 0x0d, 0xe6, 0x68, 0x36, 0x82, 0xcf, 0xe7, 0xb5, - 0x5d, 0xe4, 0xc1, 0x7f, 0x66, 0xed, 0xd6, 0x52, 0x23, 0x26, 0x46, 0x0b, 0x7c, 0xf0, 0xfc, 0xe6, - 0xd6, 0xad, 0x7c, 0xb8, 0x75, 0x2b, 0x1f, 0x6f, 0xdd, 0xca, 0x2f, 0x99, 0xeb, 0xdc, 0x64, 0xae, - 0xf3, 0x21, 0x73, 0x9d, 0x8f, 0x99, 0xeb, 0xfc, 0x95, 0xb9, 0xce, 0xef, 0x7f, 0xbb, 0x95, 0x1f, - 0xeb, 0x76, 0x5e, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, 0x25, 0x9b, 0x14, 0x4d, 0xbd, 0x07, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/events/v1beta1/generated.proto b/vendor/k8s.io/api/events/v1beta1/generated.proto deleted file mode 100644 index b3e565e..0000000 --- a/vendor/k8s.io/api/events/v1beta1/generated.proto +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.events.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. -message Event { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Required. Time when this Event was first observed. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime eventTime = 2; - - // Data about the Event series this event represents or nil if it's a singleton Event. - // +optional - optional EventSeries series = 3; - - // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. - // +optional - optional string reportingController = 4; - - // ID of the controller instance, e.g. `kubelet-xyzf`. - // +optional - optional string reportingInstance = 5; - - // What action was taken/failed regarding to the regarding object. - // +optional - optional string action = 6; - - // Why the action was taken. - optional string reason = 7; - - // The object this Event is about. In most cases it's an Object reporting controller implements. - // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because - // it acts on some changes in a ReplicaSet object. - // +optional - optional k8s.io.api.core.v1.ObjectReference regarding = 8; - - // Optional secondary object for more complex actions. E.g. when regarding object triggers - // a creation or deletion of related object. - // +optional - optional k8s.io.api.core.v1.ObjectReference related = 9; - - // Optional. A human-readable description of the status of this operation. - // Maximal length of the note is 1kB, but libraries should be prepared to - // handle values up to 64kB. - // +optional - optional string note = 10; - - // Type of this event (Normal, Warning), new types could be added in the - // future. - // +optional - optional string type = 11; - - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - optional k8s.io.api.core.v1.EventSource deprecatedSource = 12; - - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedFirstTimestamp = 13; - - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time deprecatedLastTimestamp = 14; - - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - optional int32 deprecatedCount = 15; -} - -// EventList is a list of Event objects. -message EventList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated Event items = 2; -} - -// EventSeries contain information on series of events, i.e. thing that was/is happening -// continuously for some time. -message EventSeries { - // Number of occurrences in this series up to the last heartbeat time - optional int32 count = 1; - - // Time when last Event from the series was seen before last heartbeat. - optional k8s.io.apimachinery.pkg.apis.meta.v1.MicroTime lastObservedTime = 2; - - // Information whether this series is ongoing or finished. - optional string state = 3; -} - diff --git a/vendor/k8s.io/api/events/v1beta1/register.go b/vendor/k8s.io/api/events/v1beta1/register.go deleted file mode 100644 index 4506782..0000000 --- a/vendor/k8s.io/api/events/v1beta1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "events.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to api.Scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Event{}, - &EventList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/events/v1beta1/types.go b/vendor/k8s.io/api/events/v1beta1/types.go deleted file mode 100644 index dc48ddb..0000000 --- a/vendor/k8s.io/api/events/v1beta1/types.go +++ /dev/null @@ -1,122 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system. -type Event struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Required. Time when this Event was first observed. - EventTime metav1.MicroTime `json:"eventTime" protobuf:"bytes,2,opt,name=eventTime"` - - // Data about the Event series this event represents or nil if it's a singleton Event. - // +optional - Series *EventSeries `json:"series,omitempty" protobuf:"bytes,3,opt,name=series"` - - // Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`. - // +optional - ReportingController string `json:"reportingController,omitempty" protobuf:"bytes,4,opt,name=reportingController"` - - // ID of the controller instance, e.g. `kubelet-xyzf`. - // +optional - ReportingInstance string `json:"reportingInstance,omitemtpy" protobuf:"bytes,5,opt,name=reportingInstance"` - - // What action was taken/failed regarding to the regarding object. - // +optional - Action string `json:"action,omitemtpy" protobuf:"bytes,6,name=action"` - - // Why the action was taken. - Reason string `json:"reason,omitempty" protobuf:"bytes,7,name=reason"` - - // The object this Event is about. In most cases it's an Object reporting controller implements. - // E.g. ReplicaSetController implements ReplicaSets and this event is emitted because - // it acts on some changes in a ReplicaSet object. - // +optional - Regarding corev1.ObjectReference `json:"regarding,omitempty" protobuf:"bytes,8,opt,name=regarding"` - - // Optional secondary object for more complex actions. E.g. when regarding object triggers - // a creation or deletion of related object. - // +optional - Related *corev1.ObjectReference `json:"related,omitempty" protobuf:"bytes,9,opt,name=related"` - - // Optional. A human-readable description of the status of this operation. - // Maximal length of the note is 1kB, but libraries should be prepared to - // handle values up to 64kB. - // +optional - Note string `json:"note,omitempty" protobuf:"bytes,10,opt,name=note"` - - // Type of this event (Normal, Warning), new types could be added in the - // future. - // +optional - Type string `json:"type,omitempty" protobuf:"bytes,11,opt,name=type"` - - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - DeprecatedSource corev1.EventSource `json:"deprecatedSource,omitempty" protobuf:"bytes,12,opt,name=deprecatedSource"` - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - DeprecatedFirstTimestamp metav1.Time `json:"deprecatedFirstTimestamp,omitempty" protobuf:"bytes,13,opt,name=deprecatedFirstTimestamp"` - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - DeprecatedLastTimestamp metav1.Time `json:"deprecatedLastTimestamp,omitempty" protobuf:"bytes,14,opt,name=deprecatedLastTimestamp"` - // Deprecated field assuring backward compatibility with core.v1 Event type - // +optional - DeprecatedCount int32 `json:"deprecatedCount,omitempty" protobuf:"varint,15,opt,name=deprecatedCount"` -} - -// EventSeries contain information on series of events, i.e. thing that was/is happening -// continuously for some time. -type EventSeries struct { - // Number of occurrences in this series up to the last heartbeat time - Count int32 `json:"count" protobuf:"varint,1,opt,name=count"` - // Time when last Event from the series was seen before last heartbeat. - LastObservedTime metav1.MicroTime `json:"lastObservedTime" protobuf:"bytes,2,opt,name=lastObservedTime"` - // Information whether this series is ongoing or finished. - State EventSeriesState `json:"state" protobuf:"bytes,3,opt,name=state"` -} - -type EventSeriesState string - -const ( - EventSeriesStateOngoing EventSeriesState = "Ongoing" - EventSeriesStateFinished EventSeriesState = "Finished" - EventSeriesStateUnknown EventSeriesState = "Unknown" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// EventList is a list of Event objects. -type EventList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []Event `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index a15672c..0000000 --- a/vendor/k8s.io/api/events/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,73 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_Event = map[string]string{ - "": "Event is a report of an event somewhere in the cluster. It generally denotes some state change in the system.", - "eventTime": "Required. Time when this Event was first observed.", - "series": "Data about the Event series this event represents or nil if it's a singleton Event.", - "reportingController": "Name of the controller that emitted this Event, e.g. `kubernetes.io/kubelet`.", - "reportingInstance": "ID of the controller instance, e.g. `kubelet-xyzf`.", - "action": "What action was taken/failed regarding to the regarding object.", - "reason": "Why the action was taken.", - "regarding": "The object this Event is about. In most cases it's an Object reporting controller implements. E.g. ReplicaSetController implements ReplicaSets and this event is emitted because it acts on some changes in a ReplicaSet object.", - "related": "Optional secondary object for more complex actions. E.g. when regarding object triggers a creation or deletion of related object.", - "note": "Optional. A human-readable description of the status of this operation. Maximal length of the note is 1kB, but libraries should be prepared to handle values up to 64kB.", - "type": "Type of this event (Normal, Warning), new types could be added in the future.", - "deprecatedSource": "Deprecated field assuring backward compatibility with core.v1 Event type", - "deprecatedFirstTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", - "deprecatedLastTimestamp": "Deprecated field assuring backward compatibility with core.v1 Event type", - "deprecatedCount": "Deprecated field assuring backward compatibility with core.v1 Event type", -} - -func (Event) SwaggerDoc() map[string]string { - return map_Event -} - -var map_EventList = map[string]string{ - "": "EventList is a list of Event objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (EventList) SwaggerDoc() map[string]string { - return map_EventList -} - -var map_EventSeries = map[string]string{ - "": "EventSeries contain information on series of events, i.e. thing that was/is happening continuously for some time.", - "count": "Number of occurrences in this series up to the last heartbeat time", - "lastObservedTime": "Time when last Event from the series was seen before last heartbeat.", - "state": "Information whether this series is ongoing or finished.", -} - -func (EventSeries) SwaggerDoc() map[string]string { - return map_EventSeries -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index e52e142..0000000 --- a/vendor/k8s.io/api/events/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Event) DeepCopyInto(out *Event) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.EventTime.DeepCopyInto(&out.EventTime) - if in.Series != nil { - in, out := &in.Series, &out.Series - *out = new(EventSeries) - (*in).DeepCopyInto(*out) - } - out.Regarding = in.Regarding - if in.Related != nil { - in, out := &in.Related, &out.Related - *out = new(v1.ObjectReference) - **out = **in - } - out.DeprecatedSource = in.DeprecatedSource - in.DeprecatedFirstTimestamp.DeepCopyInto(&out.DeprecatedFirstTimestamp) - in.DeprecatedLastTimestamp.DeepCopyInto(&out.DeprecatedLastTimestamp) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Event. -func (in *Event) DeepCopy() *Event { - if in == nil { - return nil - } - out := new(Event) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Event) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventList) DeepCopyInto(out *EventList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Event, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventList. -func (in *EventList) DeepCopy() *EventList { - if in == nil { - return nil - } - out := new(EventList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *EventList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *EventSeries) DeepCopyInto(out *EventSeries) { - *out = *in - in.LastObservedTime.DeepCopyInto(&out.LastObservedTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventSeries. -func (in *EventSeries) DeepCopy() *EventSeries { - if in == nil { - return nil - } - out := new(EventSeries) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/doc.go b/vendor/k8s.io/api/extensions/v1beta1/doc.go deleted file mode 100644 index 8ce1830..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -package v1beta1 // import "k8s.io/api/extensions/v1beta1" diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go b/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go deleted file mode 100644 index 72d64db..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.pb.go +++ /dev/null @@ -1,12708 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto - - It has these top-level messages: - AllowedFlexVolume - AllowedHostPath - CustomMetricCurrentStatus - CustomMetricCurrentStatusList - CustomMetricTarget - CustomMetricTargetList - DaemonSet - DaemonSetCondition - DaemonSetList - DaemonSetSpec - DaemonSetStatus - DaemonSetUpdateStrategy - Deployment - DeploymentCondition - DeploymentList - DeploymentRollback - DeploymentSpec - DeploymentStatus - DeploymentStrategy - FSGroupStrategyOptions - HTTPIngressPath - HTTPIngressRuleValue - HostPortRange - IDRange - IPBlock - Ingress - IngressBackend - IngressList - IngressRule - IngressRuleValue - IngressSpec - IngressStatus - IngressTLS - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - ReplicaSet - ReplicaSetCondition - ReplicaSetList - ReplicaSetSpec - ReplicaSetStatus - ReplicationControllerDummy - RollbackConfig - RollingUpdateDaemonSet - RollingUpdateDeployment - RunAsUserStrategyOptions - SELinuxStrategyOptions - Scale - ScaleSpec - ScaleStatus - SupplementalGroupsStrategyOptions -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *CustomMetricCurrentStatus) Reset() { *m = CustomMetricCurrentStatus{} } -func (*CustomMetricCurrentStatus) ProtoMessage() {} -func (*CustomMetricCurrentStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{2} -} - -func (m *CustomMetricCurrentStatusList) Reset() { *m = CustomMetricCurrentStatusList{} } -func (*CustomMetricCurrentStatusList) ProtoMessage() {} -func (*CustomMetricCurrentStatusList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *CustomMetricTarget) Reset() { *m = CustomMetricTarget{} } -func (*CustomMetricTarget) ProtoMessage() {} -func (*CustomMetricTarget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *CustomMetricTargetList) Reset() { *m = CustomMetricTargetList{} } -func (*CustomMetricTargetList) ProtoMessage() {} -func (*CustomMetricTargetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *DaemonSet) Reset() { *m = DaemonSet{} } -func (*DaemonSet) ProtoMessage() {} -func (*DaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *DaemonSetCondition) Reset() { *m = DaemonSetCondition{} } -func (*DaemonSetCondition) ProtoMessage() {} -func (*DaemonSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *DaemonSetList) Reset() { *m = DaemonSetList{} } -func (*DaemonSetList) ProtoMessage() {} -func (*DaemonSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *DaemonSetSpec) Reset() { *m = DaemonSetSpec{} } -func (*DaemonSetSpec) ProtoMessage() {} -func (*DaemonSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *DaemonSetStatus) Reset() { *m = DaemonSetStatus{} } -func (*DaemonSetStatus) ProtoMessage() {} -func (*DaemonSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *DaemonSetUpdateStrategy) Reset() { *m = DaemonSetUpdateStrategy{} } -func (*DaemonSetUpdateStrategy) ProtoMessage() {} -func (*DaemonSetUpdateStrategy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{11} -} - -func (m *Deployment) Reset() { *m = Deployment{} } -func (*Deployment) ProtoMessage() {} -func (*Deployment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *DeploymentCondition) Reset() { *m = DeploymentCondition{} } -func (*DeploymentCondition) ProtoMessage() {} -func (*DeploymentCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{13} } - -func (m *DeploymentList) Reset() { *m = DeploymentList{} } -func (*DeploymentList) ProtoMessage() {} -func (*DeploymentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *DeploymentRollback) Reset() { *m = DeploymentRollback{} } -func (*DeploymentRollback) ProtoMessage() {} -func (*DeploymentRollback) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{15} } - -func (m *DeploymentSpec) Reset() { *m = DeploymentSpec{} } -func (*DeploymentSpec) ProtoMessage() {} -func (*DeploymentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{16} } - -func (m *DeploymentStatus) Reset() { *m = DeploymentStatus{} } -func (*DeploymentStatus) ProtoMessage() {} -func (*DeploymentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{17} } - -func (m *DeploymentStrategy) Reset() { *m = DeploymentStrategy{} } -func (*DeploymentStrategy) ProtoMessage() {} -func (*DeploymentStrategy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{18} } - -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{19} } - -func (m *HTTPIngressPath) Reset() { *m = HTTPIngressPath{} } -func (*HTTPIngressPath) ProtoMessage() {} -func (*HTTPIngressPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{20} } - -func (m *HTTPIngressRuleValue) Reset() { *m = HTTPIngressRuleValue{} } -func (*HTTPIngressRuleValue) ProtoMessage() {} -func (*HTTPIngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{21} } - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{22} } - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{23} } - -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{24} } - -func (m *Ingress) Reset() { *m = Ingress{} } -func (*Ingress) ProtoMessage() {} -func (*Ingress) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{25} } - -func (m *IngressBackend) Reset() { *m = IngressBackend{} } -func (*IngressBackend) ProtoMessage() {} -func (*IngressBackend) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{26} } - -func (m *IngressList) Reset() { *m = IngressList{} } -func (*IngressList) ProtoMessage() {} -func (*IngressList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{27} } - -func (m *IngressRule) Reset() { *m = IngressRule{} } -func (*IngressRule) ProtoMessage() {} -func (*IngressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{28} } - -func (m *IngressRuleValue) Reset() { *m = IngressRuleValue{} } -func (*IngressRuleValue) ProtoMessage() {} -func (*IngressRuleValue) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{29} } - -func (m *IngressSpec) Reset() { *m = IngressSpec{} } -func (*IngressSpec) ProtoMessage() {} -func (*IngressSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{30} } - -func (m *IngressStatus) Reset() { *m = IngressStatus{} } -func (*IngressStatus) ProtoMessage() {} -func (*IngressStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{31} } - -func (m *IngressTLS) Reset() { *m = IngressTLS{} } -func (*IngressTLS) ProtoMessage() {} -func (*IngressTLS) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{32} } - -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{33} } - -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{34} -} - -func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } -func (*NetworkPolicyIngressRule) ProtoMessage() {} -func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{35} -} - -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{36} } - -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{37} } - -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{38} } - -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{39} } - -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{40} } - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{41} } - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{42} } - -func (m *ReplicaSet) Reset() { *m = ReplicaSet{} } -func (*ReplicaSet) ProtoMessage() {} -func (*ReplicaSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{43} } - -func (m *ReplicaSetCondition) Reset() { *m = ReplicaSetCondition{} } -func (*ReplicaSetCondition) ProtoMessage() {} -func (*ReplicaSetCondition) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{44} } - -func (m *ReplicaSetList) Reset() { *m = ReplicaSetList{} } -func (*ReplicaSetList) ProtoMessage() {} -func (*ReplicaSetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{45} } - -func (m *ReplicaSetSpec) Reset() { *m = ReplicaSetSpec{} } -func (*ReplicaSetSpec) ProtoMessage() {} -func (*ReplicaSetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{46} } - -func (m *ReplicaSetStatus) Reset() { *m = ReplicaSetStatus{} } -func (*ReplicaSetStatus) ProtoMessage() {} -func (*ReplicaSetStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{47} } - -func (m *ReplicationControllerDummy) Reset() { *m = ReplicationControllerDummy{} } -func (*ReplicationControllerDummy) ProtoMessage() {} -func (*ReplicationControllerDummy) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{48} -} - -func (m *RollbackConfig) Reset() { *m = RollbackConfig{} } -func (*RollbackConfig) ProtoMessage() {} -func (*RollbackConfig) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{49} } - -func (m *RollingUpdateDaemonSet) Reset() { *m = RollingUpdateDaemonSet{} } -func (*RollingUpdateDaemonSet) ProtoMessage() {} -func (*RollingUpdateDaemonSet) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{50} } - -func (m *RollingUpdateDeployment) Reset() { *m = RollingUpdateDeployment{} } -func (*RollingUpdateDeployment) ProtoMessage() {} -func (*RollingUpdateDeployment) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{51} -} - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{52} -} - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{53} } - -func (m *Scale) Reset() { *m = Scale{} } -func (*Scale) ProtoMessage() {} -func (*Scale) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{54} } - -func (m *ScaleSpec) Reset() { *m = ScaleSpec{} } -func (*ScaleSpec) ProtoMessage() {} -func (*ScaleSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{55} } - -func (m *ScaleStatus) Reset() { *m = ScaleStatus{} } -func (*ScaleStatus) ProtoMessage() {} -func (*ScaleStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{56} } - -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{57} -} - -func init() { - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.extensions.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.extensions.v1beta1.AllowedHostPath") - proto.RegisterType((*CustomMetricCurrentStatus)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatus") - proto.RegisterType((*CustomMetricCurrentStatusList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricCurrentStatusList") - proto.RegisterType((*CustomMetricTarget)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTarget") - proto.RegisterType((*CustomMetricTargetList)(nil), "k8s.io.api.extensions.v1beta1.CustomMetricTargetList") - proto.RegisterType((*DaemonSet)(nil), "k8s.io.api.extensions.v1beta1.DaemonSet") - proto.RegisterType((*DaemonSetCondition)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetCondition") - proto.RegisterType((*DaemonSetList)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetList") - proto.RegisterType((*DaemonSetSpec)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetSpec") - proto.RegisterType((*DaemonSetStatus)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetStatus") - proto.RegisterType((*DaemonSetUpdateStrategy)(nil), "k8s.io.api.extensions.v1beta1.DaemonSetUpdateStrategy") - proto.RegisterType((*Deployment)(nil), "k8s.io.api.extensions.v1beta1.Deployment") - proto.RegisterType((*DeploymentCondition)(nil), "k8s.io.api.extensions.v1beta1.DeploymentCondition") - proto.RegisterType((*DeploymentList)(nil), "k8s.io.api.extensions.v1beta1.DeploymentList") - proto.RegisterType((*DeploymentRollback)(nil), "k8s.io.api.extensions.v1beta1.DeploymentRollback") - proto.RegisterType((*DeploymentSpec)(nil), "k8s.io.api.extensions.v1beta1.DeploymentSpec") - proto.RegisterType((*DeploymentStatus)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStatus") - proto.RegisterType((*DeploymentStrategy)(nil), "k8s.io.api.extensions.v1beta1.DeploymentStrategy") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HTTPIngressPath)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressPath") - proto.RegisterType((*HTTPIngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.HTTPIngressRuleValue") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.extensions.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.extensions.v1beta1.IDRange") - proto.RegisterType((*IPBlock)(nil), "k8s.io.api.extensions.v1beta1.IPBlock") - proto.RegisterType((*Ingress)(nil), "k8s.io.api.extensions.v1beta1.Ingress") - proto.RegisterType((*IngressBackend)(nil), "k8s.io.api.extensions.v1beta1.IngressBackend") - proto.RegisterType((*IngressList)(nil), "k8s.io.api.extensions.v1beta1.IngressList") - proto.RegisterType((*IngressRule)(nil), "k8s.io.api.extensions.v1beta1.IngressRule") - proto.RegisterType((*IngressRuleValue)(nil), "k8s.io.api.extensions.v1beta1.IngressRuleValue") - proto.RegisterType((*IngressSpec)(nil), "k8s.io.api.extensions.v1beta1.IngressSpec") - proto.RegisterType((*IngressStatus)(nil), "k8s.io.api.extensions.v1beta1.IngressStatus") - proto.RegisterType((*IngressTLS)(nil), "k8s.io.api.extensions.v1beta1.IngressTLS") - proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicy") - proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyEgressRule") - proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyIngressRule") - proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyList") - proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPeer") - proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicyPort") - proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.NetworkPolicySpec") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.extensions.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*ReplicaSet)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSet") - proto.RegisterType((*ReplicaSetCondition)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetCondition") - proto.RegisterType((*ReplicaSetList)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetList") - proto.RegisterType((*ReplicaSetSpec)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetSpec") - proto.RegisterType((*ReplicaSetStatus)(nil), "k8s.io.api.extensions.v1beta1.ReplicaSetStatus") - proto.RegisterType((*ReplicationControllerDummy)(nil), "k8s.io.api.extensions.v1beta1.ReplicationControllerDummy") - proto.RegisterType((*RollbackConfig)(nil), "k8s.io.api.extensions.v1beta1.RollbackConfig") - proto.RegisterType((*RollingUpdateDaemonSet)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDaemonSet") - proto.RegisterType((*RollingUpdateDeployment)(nil), "k8s.io.api.extensions.v1beta1.RollingUpdateDeployment") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*Scale)(nil), "k8s.io.api.extensions.v1beta1.Scale") - proto.RegisterType((*ScaleSpec)(nil), "k8s.io.api.extensions.v1beta1.ScaleSpec") - proto.RegisterType((*ScaleStatus)(nil), "k8s.io.api.extensions.v1beta1.ScaleStatus") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.extensions.v1beta1.SupplementalGroupsStrategyOptions") -} -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *CustomMetricCurrentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricCurrentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentValue.Size())) - n1, err := m.CurrentValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func (m *CustomMetricCurrentStatusList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricCurrentStatusList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *CustomMetricTarget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricTarget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TargetValue.Size())) - n2, err := m.TargetValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *CustomMetricTargetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *CustomMetricTargetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *DaemonSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n6, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *DaemonSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n7, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Selector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n9, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdateStrategy.Size())) - n10, err := m.UpdateStrategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - return i, nil -} - -func (m *DaemonSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentNumberScheduled)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberMisscheduled)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredNumberScheduled)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberReady)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedNumberScheduled)) - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberAvailable)) - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DaemonSetUpdateStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DaemonSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n11, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - } - return i, nil -} - -func (m *Deployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n12, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n13, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n14, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - return i, nil -} - -func (m *DeploymentCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastUpdateTime.Size())) - n15, err := m.LastUpdateTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n16, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - return i, nil -} - -func (m *DeploymentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n17, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *DeploymentRollback) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentRollback) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - if len(m.UpdatedAnnotations) > 0 { - keysForUpdatedAnnotations := make([]string, 0, len(m.UpdatedAnnotations)) - for k := range m.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - for _, k := range keysForUpdatedAnnotations { - dAtA[i] = 0x12 - i++ - v := m.UpdatedAnnotations[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n18, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - return i, nil -} - -func (m *DeploymentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n19, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n19 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n20, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n20 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Strategy.Size())) - n21, err := m.Strategy.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n21 - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.RevisionHistoryLimit)) - } - dAtA[i] = 0x38 - i++ - if m.Paused { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.RollbackTo != nil { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollbackTo.Size())) - n22, err := m.RollbackTo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n22 - } - if m.ProgressDeadlineSeconds != nil { - dAtA[i] = 0x48 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.ProgressDeadlineSeconds)) - } - return i, nil -} - -func (m *DeploymentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UpdatedReplicas)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x38 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - dAtA[i] = 0x40 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.CollisionCount)) - } - return i, nil -} - -func (m *DeploymentStrategy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeploymentStrategy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - if m.RollingUpdate != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RollingUpdate.Size())) - n23, err := m.RollingUpdate.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n23 - } - return i, nil -} - -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HTTPIngressPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPIngressPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Path))) - i += copy(dAtA[i:], m.Path) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n24, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n24 - return i, nil -} - -func (m *HTTPIngressRuleValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HTTPIngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Paths) > 0 { - for _, msg := range m.Paths { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil -} - -func (m *IPBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) - if len(m.Except) > 0 { - for _, s := range m.Except { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Ingress) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Ingress) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n25, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n25 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n26, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n26 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n27, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n27 - return i, nil -} - -func (m *IngressBackend) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressBackend) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.ServiceName))) - i += copy(dAtA[i:], m.ServiceName) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ServicePort.Size())) - n28, err := m.ServicePort.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n28 - return i, nil -} - -func (m *IngressList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n29, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n29 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *IngressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Host))) - i += copy(dAtA[i:], m.Host) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IngressRuleValue.Size())) - n30, err := m.IngressRuleValue.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n30 - return i, nil -} - -func (m *IngressRuleValue) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressRuleValue) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.HTTP != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.HTTP.Size())) - n31, err := m.HTTP.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n31 - } - return i, nil -} - -func (m *IngressSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Backend != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Backend.Size())) - n32, err := m.Backend.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n32 - } - if len(m.TLS) > 0 { - for _, msg := range m.TLS { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *IngressStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LoadBalancer.Size())) - n33, err := m.LoadBalancer.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n33 - return i, nil -} - -func (m *IngressTLS) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IngressTLS) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.SecretName))) - i += copy(dAtA[i:], m.SecretName) - return i, nil -} - -func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n34, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n34 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n35, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n35 - return i, nil -} - -func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n36, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n36 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n37, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n37 - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n38, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n38 - } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n39, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n39 - } - return i, nil -} - -func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } - if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n40, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n40 - } - return i, nil -} - -func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n41, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n41 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Egress) > 0 { - for _, msg := range m.Egress { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n42, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n42 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n43, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n43 - return i, nil -} - -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n44, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n44 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n45, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n45 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n46, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n46 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n47, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n47 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n48, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n48 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *ReplicaSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n49, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n49 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n50, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n50 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n51, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n51 - return i, nil -} - -func (m *ReplicaSetCondition) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetCondition) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Type))) - i += copy(dAtA[i:], m.Type) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Status))) - i += copy(dAtA[i:], m.Status) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.LastTransitionTime.Size())) - n52, err := m.LastTransitionTime.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n52 - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Reason))) - i += copy(dAtA[i:], m.Reason) - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func (m *ReplicaSetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n53, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n53 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicaSetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Replicas != nil { - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(*m.Replicas)) - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n54, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n54 - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Template.Size())) - n55, err := m.Template.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n55 - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinReadySeconds)) - return i, nil -} - -func (m *ReplicaSetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicaSetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FullyLabeledReplicas)) - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ReadyReplicas)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, msg := range m.Conditions { - dAtA[i] = 0x32 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ReplicationControllerDummy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ReplicationControllerDummy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - return i, nil -} - -func (m *RollbackConfig) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollbackConfig) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Revision)) - return i, nil -} - -func (m *RollingUpdateDaemonSet) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDaemonSet) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n56, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n56 - } - return i, nil -} - -func (m *RollingUpdateDeployment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RollingUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MaxUnavailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n57, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n57 - } - if m.MaxSurge != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxSurge.Size())) - n58, err := m.MaxSurge.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n58 - } - return i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n59, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n59 - } - return i, nil -} - -func (m *Scale) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Scale) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n60, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n60 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n61, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n61 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n62, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n62 - return i, nil -} - -func (m *ScaleSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - return i, nil -} - -func (m *ScaleStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ScaleStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Replicas)) - if len(m.Selector) > 0 { - keysForSelector := make([]string, 0, len(m.Selector)) - for k := range m.Selector { - keysForSelector = append(keysForSelector, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - for _, k := range keysForSelector { - dAtA[i] = 0x12 - i++ - v := m.Selector[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.TargetSelector))) - i += copy(dAtA[i:], m.TargetSelector) - return i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AllowedFlexVolume) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedHostPath) Size() (n int) { - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *CustomMetricCurrentStatus) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.CurrentValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricCurrentStatusList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *CustomMetricTarget) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = m.TargetValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *CustomMetricTargetList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DaemonSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetSpec) Size() (n int) { - var l int - _ = l - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.UpdateStrategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - n += 1 + sovGenerated(uint64(m.TemplateGeneration)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - return n -} - -func (m *DaemonSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.CurrentNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberMisscheduled)) - n += 1 + sovGenerated(uint64(m.DesiredNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberReady)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.UpdatedNumberScheduled)) - n += 1 + sovGenerated(uint64(m.NumberAvailable)) - n += 1 + sovGenerated(uint64(m.NumberUnavailable)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DaemonSetUpdateStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Deployment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastUpdateTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *DeploymentRollback) Size() (n int) { - var l int - _ = l - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.UpdatedAnnotations) > 0 { - for k, v := range m.UpdatedAnnotations { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *DeploymentSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Strategy.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - if m.RevisionHistoryLimit != nil { - n += 1 + sovGenerated(uint64(*m.RevisionHistoryLimit)) - } - n += 2 - if m.RollbackTo != nil { - l = m.RollbackTo.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.ProgressDeadlineSeconds != nil { - n += 1 + sovGenerated(uint64(*m.ProgressDeadlineSeconds)) - } - return n -} - -func (m *DeploymentStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.UpdatedReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - n += 1 + sovGenerated(uint64(m.UnavailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - if m.CollisionCount != nil { - n += 1 + sovGenerated(uint64(*m.CollisionCount)) - } - return n -} - -func (m *DeploymentStrategy) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - if m.RollingUpdate != nil { - l = m.RollingUpdate.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *FSGroupStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HTTPIngressPath) Size() (n int) { - var l int - _ = l - l = len(m.Path) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *HTTPIngressRuleValue) Size() (n int) { - var l int - _ = l - if len(m.Paths) > 0 { - for _, e := range m.Paths { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IPBlock) Size() (n int) { - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Ingress) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressBackend) Size() (n int) { - var l int - _ = l - l = len(m.ServiceName) - n += 1 + l + sovGenerated(uint64(l)) - l = m.ServicePort.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressRule) Size() (n int) { - var l int - _ = l - l = len(m.Host) - n += 1 + l + sovGenerated(uint64(l)) - l = m.IngressRuleValue.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressRuleValue) Size() (n int) { - var l int - _ = l - if m.HTTP != nil { - l = m.HTTP.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *IngressSpec) Size() (n int) { - var l int - _ = l - if m.Backend != nil { - l = m.Backend.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.TLS) > 0 { - for _, e := range m.TLS { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *IngressStatus) Size() (n int) { - var l int - _ = l - l = m.LoadBalancer.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *IngressTLS) Size() (n int) { - var l int - _ = l - if len(m.Hosts) > 0 { - for _, s := range m.Hosts { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = len(m.SecretName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicyEgressRule) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyIngressRule) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyPeer) Size() (n int) { - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NetworkPolicyPort) Size() (n int) { - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NetworkPolicySpec) Size() (n int) { - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicySpec) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 3 - } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicaSet) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetCondition) Size() (n int) { - var l int - _ = l - l = len(m.Type) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Status) - n += 1 + l + sovGenerated(uint64(l)) - l = m.LastTransitionTime.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Reason) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ReplicaSetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicaSetSpec) Size() (n int) { - var l int - _ = l - if m.Replicas != nil { - n += 1 + sovGenerated(uint64(*m.Replicas)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - l = m.Template.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.MinReadySeconds)) - return n -} - -func (m *ReplicaSetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - n += 1 + sovGenerated(uint64(m.FullyLabeledReplicas)) - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - n += 1 + sovGenerated(uint64(m.ReadyReplicas)) - n += 1 + sovGenerated(uint64(m.AvailableReplicas)) - if len(m.Conditions) > 0 { - for _, e := range m.Conditions { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ReplicationControllerDummy) Size() (n int) { - var l int - _ = l - return n -} - -func (m *RollbackConfig) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Revision)) - return n -} - -func (m *RollingUpdateDaemonSet) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RollingUpdateDeployment) Size() (n int) { - var l int - _ = l - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxSurge != nil { - l = m.MaxSurge.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *Scale) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ScaleSpec) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - return n -} - -func (m *ScaleStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Replicas)) - if len(m.Selector) > 0 { - for k, v := range m.Selector { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - l = len(m.TargetSelector) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricCurrentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatus{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `CurrentValue:` + strings.Replace(strings.Replace(this.CurrentValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricCurrentStatusList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricCurrentStatusList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricCurrentStatus", "CustomMetricCurrentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricTarget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricTarget{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `TargetValue:` + strings.Replace(strings.Replace(this.TargetValue.String(), "Quantity", "k8s_io_apimachinery_pkg_api_resource.Quantity", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *CustomMetricTargetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&CustomMetricTargetList{`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "CustomMetricTarget", "CustomMetricTarget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DaemonSetSpec", "DaemonSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DaemonSetStatus", "DaemonSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "DaemonSet", "DaemonSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetSpec{`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `UpdateStrategy:` + strings.Replace(strings.Replace(this.UpdateStrategy.String(), "DaemonSetUpdateStrategy", "DaemonSetUpdateStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `TemplateGeneration:` + fmt.Sprintf("%v", this.TemplateGeneration) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetStatus{`, - `CurrentNumberScheduled:` + fmt.Sprintf("%v", this.CurrentNumberScheduled) + `,`, - `NumberMisscheduled:` + fmt.Sprintf("%v", this.NumberMisscheduled) + `,`, - `DesiredNumberScheduled:` + fmt.Sprintf("%v", this.DesiredNumberScheduled) + `,`, - `NumberReady:` + fmt.Sprintf("%v", this.NumberReady) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `UpdatedNumberScheduled:` + fmt.Sprintf("%v", this.UpdatedNumberScheduled) + `,`, - `NumberAvailable:` + fmt.Sprintf("%v", this.NumberAvailable) + `,`, - `NumberUnavailable:` + fmt.Sprintf("%v", this.NumberUnavailable) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DaemonSetCondition", "DaemonSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DaemonSetUpdateStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DaemonSetUpdateStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDaemonSet", "RollingUpdateDaemonSet", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Deployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Deployment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "DeploymentSpec", "DeploymentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "DeploymentStatus", "DeploymentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `LastUpdateTime:` + strings.Replace(strings.Replace(this.LastUpdateTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Deployment", "Deployment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentRollback) String() string { - if this == nil { - return "nil" - } - keysForUpdatedAnnotations := make([]string, 0, len(this.UpdatedAnnotations)) - for k := range this.UpdatedAnnotations { - keysForUpdatedAnnotations = append(keysForUpdatedAnnotations, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForUpdatedAnnotations) - mapStringForUpdatedAnnotations := "map[string]string{" - for _, k := range keysForUpdatedAnnotations { - mapStringForUpdatedAnnotations += fmt.Sprintf("%v: %v,", k, this.UpdatedAnnotations[k]) - } - mapStringForUpdatedAnnotations += "}" - s := strings.Join([]string{`&DeploymentRollback{`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `UpdatedAnnotations:` + mapStringForUpdatedAnnotations + `,`, - `RollbackTo:` + strings.Replace(strings.Replace(this.RollbackTo.String(), "RollbackConfig", "RollbackConfig", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `Strategy:` + strings.Replace(strings.Replace(this.Strategy.String(), "DeploymentStrategy", "DeploymentStrategy", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `RevisionHistoryLimit:` + valueToStringGenerated(this.RevisionHistoryLimit) + `,`, - `Paused:` + fmt.Sprintf("%v", this.Paused) + `,`, - `RollbackTo:` + strings.Replace(fmt.Sprintf("%v", this.RollbackTo), "RollbackConfig", "RollbackConfig", 1) + `,`, - `ProgressDeadlineSeconds:` + valueToStringGenerated(this.ProgressDeadlineSeconds) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `UpdatedReplicas:` + fmt.Sprintf("%v", this.UpdatedReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `UnavailableReplicas:` + fmt.Sprintf("%v", this.UnavailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "DeploymentCondition", "DeploymentCondition", 1), `&`, ``, 1) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `CollisionCount:` + valueToStringGenerated(this.CollisionCount) + `,`, - `}`, - }, "") - return s -} -func (this *DeploymentStrategy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&DeploymentStrategy{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `RollingUpdate:` + strings.Replace(fmt.Sprintf("%v", this.RollingUpdate), "RollingUpdateDeployment", "RollingUpdateDeployment", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressPath{`, - `Path:` + fmt.Sprintf("%v", this.Path) + `,`, - `Backend:` + strings.Replace(strings.Replace(this.Backend.String(), "IngressBackend", "IngressBackend", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HTTPIngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HTTPIngressRuleValue{`, - `Paths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Paths), "HTTPIngressPath", "HTTPIngressPath", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *Ingress) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Ingress{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "IngressSpec", "IngressSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "IngressStatus", "IngressStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressBackend) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressBackend{`, - `ServiceName:` + fmt.Sprintf("%v", this.ServiceName) + `,`, - `ServicePort:` + strings.Replace(strings.Replace(this.ServicePort.String(), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Ingress", "Ingress", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRule{`, - `Host:` + fmt.Sprintf("%v", this.Host) + `,`, - `IngressRuleValue:` + strings.Replace(strings.Replace(this.IngressRuleValue.String(), "IngressRuleValue", "IngressRuleValue", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressRuleValue) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressRuleValue{`, - `HTTP:` + strings.Replace(fmt.Sprintf("%v", this.HTTP), "HTTPIngressRuleValue", "HTTPIngressRuleValue", 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressSpec{`, - `Backend:` + strings.Replace(fmt.Sprintf("%v", this.Backend), "IngressBackend", "IngressBackend", 1) + `,`, - `TLS:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.TLS), "IngressTLS", "IngressTLS", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "IngressRule", "IngressRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressStatus{`, - `LoadBalancer:` + strings.Replace(strings.Replace(this.LoadBalancer.String(), "LoadBalancerStatus", "k8s_io_api_core_v1.LoadBalancerStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *IngressTLS) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IngressTLS{`, - `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, - `SecretName:` + fmt.Sprintf("%v", this.SecretName) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSet{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ReplicaSetSpec", "ReplicaSetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ReplicaSetStatus", "ReplicaSetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetCondition) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetCondition{`, - `Type:` + fmt.Sprintf("%v", this.Type) + `,`, - `Status:` + fmt.Sprintf("%v", this.Status) + `,`, - `LastTransitionTime:` + strings.Replace(strings.Replace(this.LastTransitionTime.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ReplicaSet", "ReplicaSet", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetSpec{`, - `Replicas:` + valueToStringGenerated(this.Replicas) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `Template:` + strings.Replace(strings.Replace(this.Template.String(), "PodTemplateSpec", "k8s_io_api_core_v1.PodTemplateSpec", 1), `&`, ``, 1) + `,`, - `MinReadySeconds:` + fmt.Sprintf("%v", this.MinReadySeconds) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicaSetStatus) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicaSetStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `FullyLabeledReplicas:` + fmt.Sprintf("%v", this.FullyLabeledReplicas) + `,`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `ReadyReplicas:` + fmt.Sprintf("%v", this.ReadyReplicas) + `,`, - `AvailableReplicas:` + fmt.Sprintf("%v", this.AvailableReplicas) + `,`, - `Conditions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Conditions), "ReplicaSetCondition", "ReplicaSetCondition", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ReplicationControllerDummy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ReplicationControllerDummy{`, - `}`, - }, "") - return s -} -func (this *RollbackConfig) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollbackConfig{`, - `Revision:` + fmt.Sprintf("%v", this.Revision) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDaemonSet) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDaemonSet{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RollingUpdateDeployment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RollingUpdateDeployment{`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `MaxSurge:` + strings.Replace(fmt.Sprintf("%v", this.MaxSurge), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *Scale) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Scale{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "ScaleSpec", "ScaleSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "ScaleStatus", "ScaleStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ScaleSpec{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `}`, - }, "") - return s -} -func (this *ScaleStatus) String() string { - if this == nil { - return "nil" - } - keysForSelector := make([]string, 0, len(this.Selector)) - for k := range this.Selector { - keysForSelector = append(keysForSelector, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForSelector) - mapStringForSelector := "map[string]string{" - for _, k := range keysForSelector { - mapStringForSelector += fmt.Sprintf("%v: %v,", k, this.Selector[k]) - } - mapStringForSelector += "}" - s := strings.Join([]string{`&ScaleStatus{`, - `Replicas:` + fmt.Sprintf("%v", this.Replicas) + `,`, - `Selector:` + mapStringForSelector + `,`, - `TargetSelector:` + fmt.Sprintf("%v", this.TargetSelector) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.CurrentValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricCurrentStatusList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricCurrentStatusList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricCurrentStatus{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTarget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTarget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTarget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.TargetValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *CustomMetricTargetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: CustomMetricTargetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: CustomMetricTargetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, CustomMetricTarget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, DaemonSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdateStrategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.UpdateStrategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field TemplateGeneration", wireType) - } - m.TemplateGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.TemplateGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentNumberScheduled", wireType) - } - m.CurrentNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberMisscheduled", wireType) - } - m.NumberMisscheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberMisscheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredNumberScheduled", wireType) - } - m.DesiredNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberReady", wireType) - } - m.NumberReady = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberReady |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedNumberScheduled", wireType) - } - m.UpdatedNumberScheduled = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedNumberScheduled |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberAvailable", wireType) - } - m.NumberAvailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberAvailable |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NumberUnavailable", wireType) - } - m.NumberUnavailable = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.NumberUnavailable |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DaemonSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DaemonSetUpdateStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DaemonSetUpdateStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DaemonSetUpdateStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDaemonSet{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Deployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Deployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastUpdateTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastUpdateTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Deployment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentRollback) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentRollback: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentRollback: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedAnnotations", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.UpdatedAnnotations == nil { - m.UpdatedAnnotations = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.UpdatedAnnotations[mapkey] = mapvalue - } else { - var mapvalue string - m.UpdatedAnnotations[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Strategy", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Strategy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RevisionHistoryLimit", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.RevisionHistoryLimit = &v - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Paused", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Paused = bool(v != 0) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollbackTo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollbackTo == nil { - m.RollbackTo = &RollbackConfig{} - } - if err := m.RollbackTo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ProgressDeadlineSeconds", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ProgressDeadlineSeconds = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UpdatedReplicas", wireType) - } - m.UpdatedReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UpdatedReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field UnavailableReplicas", wireType) - } - m.UnavailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.UnavailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, DeploymentCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 7: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CollisionCount", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.CollisionCount = &v - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeploymentStrategy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeploymentStrategy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeploymentStrategy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = DeploymentStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RollingUpdate", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RollingUpdate == nil { - m.RollingUpdate = &RollingUpdateDeployment{} - } - if err := m.RollingUpdate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Path = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HTTPIngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HTTPIngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HTTPIngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Paths = append(m.Paths, HTTPIngressPath{}) - if err := m.Paths[len(m.Paths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IDRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Ingress) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Ingress: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Ingress: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressBackend) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressBackend: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressBackend: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServiceName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ServiceName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ServicePort", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ServicePort.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Ingress{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IngressRuleValue", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.IngressRuleValue.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressRuleValue) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressRuleValue: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressRuleValue: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HTTP", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HTTP == nil { - m.HTTP = &HTTPIngressRuleValue{} - } - if err := m.HTTP.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Backend", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Backend == nil { - m.Backend = &IngressBackend{} - } - if err := m.Backend.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TLS", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TLS = append(m.TLS, IngressTLS{}) - if err := m.TLS[len(m.TLS)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, IngressRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LoadBalancer", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LoadBalancer.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IngressTLS) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IngressTLS: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IngressTLS: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SecretName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SecretName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} - } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetCondition) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetCondition: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetCondition: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Type = ReplicaSetConditionType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Status = k8s_io_api_core_v1.ConditionStatus(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LastTransitionTime", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.LastTransitionTime.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Reason = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ReplicaSet{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Replicas = &v - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Template", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Template.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field MinReadySeconds", wireType) - } - m.MinReadySeconds = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.MinReadySeconds |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicaSetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicaSetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicaSetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field FullyLabeledReplicas", wireType) - } - m.FullyLabeledReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.FullyLabeledReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadyReplicas", wireType) - } - m.ReadyReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ReadyReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AvailableReplicas", wireType) - } - m.AvailableReplicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.AvailableReplicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Conditions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Conditions = append(m.Conditions, ReplicaSetCondition{}) - if err := m.Conditions[len(m.Conditions)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ReplicationControllerDummy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ReplicationControllerDummy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ReplicationControllerDummy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollbackConfig) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollbackConfig: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollbackConfig: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType) - } - m.Revision = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Revision |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDaemonSet) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDaemonSet: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RollingUpdateDeployment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RollingUpdateDeployment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RollingUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxSurge", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxSurge == nil { - m.MaxSurge = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxSurge.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Scale) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Scale: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Scale: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ScaleStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ScaleStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ScaleStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicas", wireType) - } - m.Replicas = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Replicas |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Selector == nil { - m.Selector = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Selector[mapkey] = mapvalue - } else { - var mapvalue string - m.Selector[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field TargetSelector", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.TargetSelector = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/extensions/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 3665 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x5b, 0xcd, 0x6f, 0x24, 0x49, - 0x56, 0xef, 0xac, 0x2a, 0xbb, 0xca, 0xcf, 0xed, 0xaf, 0xb0, 0xdb, 0x5d, 0xdb, 0x33, 0xed, 0xea, - 0xcd, 0x91, 0x9a, 0x9e, 0xa1, 0xa7, 0x6a, 0xba, 0xe7, 0x63, 0x87, 0x69, 0xb1, 0xbb, 0x2e, 0xbb, - 0xdd, 0xed, 0xc5, 0x1f, 0x35, 0x51, 0x76, 0xb3, 0x8c, 0x98, 0x65, 0xd2, 0x55, 0xe1, 0x72, 0x8e, - 0xb3, 0x32, 0x73, 0x33, 0x22, 0xbd, 0x2e, 0x09, 0x21, 0x0e, 0x08, 0x09, 0x09, 0x04, 0x1c, 0x96, - 0x0f, 0x71, 0x61, 0x2f, 0x9c, 0x40, 0x70, 0x83, 0xc3, 0x6a, 0x24, 0xa4, 0x45, 0x1a, 0xa1, 0x45, - 0xda, 0x1b, 0x7b, 0xb2, 0x18, 0xcf, 0x09, 0xf1, 0x0f, 0xa0, 0x3e, 0x20, 0x14, 0x91, 0x91, 0xdf, - 0x99, 0xae, 0x2a, 0x4f, 0xb7, 0x85, 0xd0, 0xde, 0x2a, 0xe3, 0xbd, 0xf7, 0x7b, 0x2f, 0x22, 0x5e, - 0xbc, 0xf7, 0xe2, 0xa3, 0x60, 0xe3, 0xf8, 0x7d, 0x5a, 0xd7, 0xad, 0xc6, 0xb1, 0x7b, 0x40, 0x1c, - 0x93, 0x30, 0x42, 0x1b, 0x27, 0xc4, 0xec, 0x5a, 0x4e, 0x43, 0x12, 0x34, 0x5b, 0x6f, 0x90, 0x53, - 0x46, 0x4c, 0xaa, 0x5b, 0x26, 0x6d, 0x9c, 0x3c, 0x38, 0x20, 0x4c, 0x7b, 0xd0, 0xe8, 0x11, 0x93, - 0x38, 0x1a, 0x23, 0xdd, 0xba, 0xed, 0x58, 0xcc, 0x42, 0xb7, 0x3d, 0xf6, 0xba, 0x66, 0xeb, 0xf5, - 0x90, 0xbd, 0x2e, 0xd9, 0x6f, 0xbd, 0xd9, 0xd3, 0xd9, 0x91, 0x7b, 0x50, 0xef, 0x58, 0xfd, 0x46, - 0xcf, 0xea, 0x59, 0x0d, 0x21, 0x75, 0xe0, 0x1e, 0x8a, 0x2f, 0xf1, 0x21, 0x7e, 0x79, 0x68, 0xb7, - 0xd4, 0x88, 0xf2, 0x8e, 0xe5, 0x90, 0xc6, 0x49, 0x4a, 0xe3, 0xad, 0x77, 0x42, 0x9e, 0xbe, 0xd6, - 0x39, 0xd2, 0x4d, 0xe2, 0x0c, 0x1a, 0xf6, 0x71, 0x4f, 0x08, 0x39, 0x84, 0x5a, 0xae, 0xd3, 0x21, - 0x63, 0x49, 0xd1, 0x46, 0x9f, 0x30, 0x2d, 0x4b, 0x57, 0x23, 0x4f, 0xca, 0x71, 0x4d, 0xa6, 0xf7, - 0xd3, 0x6a, 0xde, 0x1b, 0x26, 0x40, 0x3b, 0x47, 0xa4, 0xaf, 0xa5, 0xe4, 0xde, 0xce, 0x93, 0x73, - 0x99, 0x6e, 0x34, 0x74, 0x93, 0x51, 0xe6, 0x24, 0x85, 0xd4, 0x47, 0xb0, 0xb0, 0x6a, 0x18, 0xd6, - 0x0f, 0x48, 0x77, 0xc3, 0x20, 0xa7, 0xcf, 0x2c, 0xc3, 0xed, 0x13, 0x74, 0x17, 0x26, 0xbb, 0x8e, - 0x7e, 0x42, 0x9c, 0xaa, 0x72, 0x47, 0xb9, 0x37, 0xd5, 0x9c, 0xfd, 0xfc, 0xac, 0x76, 0xed, 0xfc, - 0xac, 0x36, 0xb9, 0x2e, 0x5a, 0xb1, 0xa4, 0xaa, 0x14, 0xe6, 0xa4, 0xf0, 0x53, 0x8b, 0xb2, 0x96, - 0xc6, 0x8e, 0xd0, 0x43, 0x00, 0x5b, 0x63, 0x47, 0x2d, 0x87, 0x1c, 0xea, 0xa7, 0x52, 0x1c, 0x49, - 0x71, 0x68, 0x05, 0x14, 0x1c, 0xe1, 0x42, 0xf7, 0xa1, 0xe2, 0x10, 0xad, 0xbb, 0x6b, 0x1a, 0x83, - 0x6a, 0xe1, 0x8e, 0x72, 0xaf, 0xd2, 0x9c, 0x97, 0x12, 0x15, 0x2c, 0xdb, 0x71, 0xc0, 0xa1, 0xfe, - 0xa5, 0x02, 0x5f, 0x5b, 0x73, 0x29, 0xb3, 0xfa, 0xdb, 0x84, 0x39, 0x7a, 0x67, 0xcd, 0x75, 0x1c, - 0x62, 0xb2, 0x36, 0xd3, 0x98, 0x4b, 0xd1, 0x1d, 0x28, 0x99, 0x5a, 0x9f, 0x48, 0xcd, 0xd7, 0x25, - 0x4e, 0x69, 0x47, 0xeb, 0x13, 0x2c, 0x28, 0xe8, 0x23, 0x98, 0x38, 0xd1, 0x0c, 0x97, 0x08, 0x55, - 0xd3, 0x0f, 0xeb, 0xf5, 0xd0, 0xfb, 0x82, 0x61, 0xab, 0xdb, 0xc7, 0x3d, 0xe1, 0x8e, 0xbe, 0x2f, - 0xd4, 0x3f, 0x74, 0x35, 0x93, 0xe9, 0x6c, 0xd0, 0x5c, 0x92, 0x90, 0xd7, 0xa5, 0xde, 0x67, 0x1c, - 0x0b, 0x7b, 0x90, 0xea, 0xef, 0xc0, 0xed, 0x5c, 0xd3, 0xb6, 0x74, 0xca, 0xd0, 0xc7, 0x30, 0xa1, - 0x33, 0xd2, 0xa7, 0x55, 0xe5, 0x4e, 0xf1, 0xde, 0xf4, 0xc3, 0xf7, 0xeb, 0x17, 0xba, 0x7e, 0x3d, - 0x17, 0xac, 0x39, 0x23, 0xcd, 0x98, 0xd8, 0xe4, 0x70, 0xd8, 0x43, 0x55, 0xff, 0x54, 0x01, 0x14, - 0x95, 0xd9, 0xd3, 0x9c, 0x1e, 0x61, 0x23, 0x0c, 0xca, 0x6f, 0x7c, 0xb5, 0x41, 0x59, 0x94, 0x90, - 0xd3, 0x9e, 0xc2, 0xd8, 0x98, 0xd8, 0xb0, 0x9c, 0x36, 0x49, 0x0c, 0xc6, 0xb3, 0xf8, 0x60, 0x3c, - 0x18, 0x63, 0x30, 0x3c, 0x94, 0x9c, 0x51, 0xf8, 0x61, 0x01, 0xa6, 0xd6, 0x35, 0xd2, 0xb7, 0xcc, - 0x36, 0x61, 0xe8, 0x13, 0xa8, 0xf0, 0xa5, 0xd9, 0xd5, 0x98, 0x26, 0x06, 0x60, 0xfa, 0xe1, 0x5b, - 0x17, 0xf5, 0x8e, 0xd6, 0x39, 0x77, 0xfd, 0xe4, 0x41, 0x7d, 0xf7, 0xe0, 0x53, 0xd2, 0x61, 0xdb, - 0x84, 0x69, 0xa1, 0x07, 0x87, 0x6d, 0x38, 0x40, 0x45, 0x3b, 0x50, 0xa2, 0x36, 0xe9, 0xc8, 0xb1, - 0xbb, 0x3f, 0xa4, 0x1b, 0x81, 0x65, 0x6d, 0x9b, 0x74, 0xc2, 0xc9, 0xe0, 0x5f, 0x58, 0xe0, 0xa0, - 0x67, 0x30, 0x49, 0xc5, 0x2c, 0x57, 0x8b, 0xa9, 0xd9, 0xb8, 0x18, 0xd1, 0xf3, 0x8d, 0x60, 0xb9, - 0x7a, 0xdf, 0x58, 0xa2, 0xa9, 0xff, 0x59, 0x00, 0x14, 0xf0, 0xae, 0x59, 0x66, 0x57, 0x67, 0xba, - 0x65, 0xa2, 0x0f, 0xa0, 0xc4, 0x06, 0xb6, 0xef, 0x1d, 0x77, 0x7d, 0x83, 0xf6, 0x06, 0x36, 0x79, - 0x7e, 0x56, 0x5b, 0x4e, 0x4b, 0x70, 0x0a, 0x16, 0x32, 0x68, 0x2b, 0x30, 0xb5, 0x20, 0xa4, 0xdf, - 0x89, 0xab, 0x7e, 0x7e, 0x56, 0xcb, 0x08, 0xc7, 0xf5, 0x00, 0x29, 0x6e, 0x20, 0x3a, 0x01, 0x64, - 0x68, 0x94, 0xed, 0x39, 0x9a, 0x49, 0x3d, 0x4d, 0x7a, 0x9f, 0xc8, 0x41, 0x78, 0x63, 0xb4, 0x49, - 0xe3, 0x12, 0xcd, 0x5b, 0xd2, 0x0a, 0xb4, 0x95, 0x42, 0xc3, 0x19, 0x1a, 0x78, 0xbc, 0x73, 0x88, - 0x46, 0x2d, 0xb3, 0x5a, 0x8a, 0xc7, 0x3b, 0x2c, 0x5a, 0xb1, 0xa4, 0xa2, 0xd7, 0xa1, 0xdc, 0x27, - 0x94, 0x6a, 0x3d, 0x52, 0x9d, 0x10, 0x8c, 0x73, 0x92, 0xb1, 0xbc, 0xed, 0x35, 0x63, 0x9f, 0xae, - 0xfe, 0x58, 0x81, 0x99, 0x60, 0xe4, 0x84, 0xb7, 0xff, 0x66, 0xca, 0x0f, 0xeb, 0xa3, 0x75, 0x89, - 0x4b, 0x0b, 0x2f, 0x0c, 0xa2, 0xa2, 0xdf, 0x12, 0xf1, 0xc1, 0x6d, 0x7f, 0x2d, 0x15, 0xc4, 0x5a, - 0xba, 0x37, 0xaa, 0xcb, 0xe4, 0x2c, 0xa1, 0x3f, 0x2b, 0x45, 0xcc, 0xe7, 0xae, 0x89, 0x3e, 0x86, - 0x0a, 0x25, 0x06, 0xe9, 0x30, 0xcb, 0x91, 0xe6, 0xbf, 0x3d, 0xa2, 0xf9, 0xda, 0x01, 0x31, 0xda, - 0x52, 0xb4, 0x79, 0x9d, 0xdb, 0xef, 0x7f, 0xe1, 0x00, 0x12, 0x7d, 0x08, 0x15, 0x46, 0xfa, 0xb6, - 0xa1, 0x31, 0x3f, 0x06, 0xbd, 0x16, 0xed, 0x02, 0xf7, 0x1c, 0x0e, 0xd6, 0xb2, 0xba, 0x7b, 0x92, - 0x4d, 0x2c, 0x9f, 0x60, 0x48, 0xfc, 0x56, 0x1c, 0xc0, 0xa0, 0x13, 0x98, 0x75, 0xed, 0x2e, 0xe7, - 0x64, 0x3c, 0xe3, 0xf5, 0x06, 0xd2, 0x93, 0xde, 0x1b, 0x75, 0x6c, 0xf6, 0x63, 0xd2, 0xcd, 0x65, - 0xa9, 0x6b, 0x36, 0xde, 0x8e, 0x13, 0x5a, 0xd0, 0x2a, 0xcc, 0xf5, 0x75, 0x93, 0x67, 0xae, 0x41, - 0x9b, 0x74, 0x2c, 0xb3, 0x4b, 0x85, 0x5b, 0x4d, 0x34, 0x6f, 0x4a, 0x80, 0xb9, 0xed, 0x38, 0x19, - 0x27, 0xf9, 0xd1, 0x77, 0x00, 0xf9, 0xdd, 0x78, 0xe2, 0x25, 0x6c, 0xdd, 0x32, 0x85, 0xcf, 0x15, - 0x43, 0xe7, 0xde, 0x4b, 0x71, 0xe0, 0x0c, 0x29, 0xb4, 0x05, 0x4b, 0x0e, 0x39, 0xd1, 0x79, 0x1f, - 0x9f, 0xea, 0x94, 0x59, 0xce, 0x60, 0x4b, 0xef, 0xeb, 0xac, 0x3a, 0x29, 0x6c, 0xaa, 0x9e, 0x9f, - 0xd5, 0x96, 0x70, 0x06, 0x1d, 0x67, 0x4a, 0xa9, 0x7f, 0x3e, 0x09, 0x73, 0x89, 0x78, 0x83, 0x9e, - 0xc1, 0x72, 0xc7, 0x4b, 0x4e, 0x3b, 0x6e, 0xff, 0x80, 0x38, 0xed, 0xce, 0x11, 0xe9, 0xba, 0x06, - 0xe9, 0x0a, 0x47, 0x99, 0x68, 0xae, 0x48, 0x8b, 0x97, 0xd7, 0x32, 0xb9, 0x70, 0x8e, 0x34, 0x1f, - 0x05, 0x53, 0x34, 0x6d, 0xeb, 0x94, 0x06, 0x98, 0x05, 0x81, 0x19, 0x8c, 0xc2, 0x4e, 0x8a, 0x03, - 0x67, 0x48, 0x71, 0x1b, 0xbb, 0x84, 0xea, 0x0e, 0xe9, 0x26, 0x6d, 0x2c, 0xc6, 0x6d, 0x5c, 0xcf, - 0xe4, 0xc2, 0x39, 0xd2, 0xe8, 0x5d, 0x98, 0xf6, 0xb4, 0x89, 0xf9, 0x93, 0x13, 0x1d, 0xa4, 0xc3, - 0x9d, 0x90, 0x84, 0xa3, 0x7c, 0xbc, 0x6b, 0xd6, 0x01, 0x25, 0xce, 0x09, 0xe9, 0xe6, 0x4f, 0xf0, - 0x6e, 0x8a, 0x03, 0x67, 0x48, 0xf1, 0xae, 0x79, 0x1e, 0x98, 0xea, 0xda, 0x64, 0xbc, 0x6b, 0xfb, - 0x99, 0x5c, 0x38, 0x47, 0x9a, 0xfb, 0xb1, 0x67, 0xf2, 0xea, 0x89, 0xa6, 0x1b, 0xda, 0x81, 0x41, - 0xaa, 0xe5, 0xb8, 0x1f, 0xef, 0xc4, 0xc9, 0x38, 0xc9, 0x8f, 0x9e, 0xc0, 0x82, 0xd7, 0xb4, 0x6f, - 0x6a, 0x01, 0x48, 0x45, 0x80, 0x7c, 0x4d, 0x82, 0x2c, 0xec, 0x24, 0x19, 0x70, 0x5a, 0x06, 0x7d, - 0x00, 0xb3, 0x1d, 0xcb, 0x30, 0x84, 0x3f, 0xae, 0x59, 0xae, 0xc9, 0xaa, 0x53, 0x02, 0x05, 0xf1, - 0xf5, 0xb8, 0x16, 0xa3, 0xe0, 0x04, 0x27, 0x22, 0x00, 0x1d, 0x3f, 0xe1, 0xd0, 0x2a, 0x8c, 0x54, - 0x6b, 0xa4, 0x93, 0x5e, 0x58, 0x03, 0x04, 0x4d, 0x14, 0x47, 0x80, 0xd5, 0x7f, 0x55, 0xe0, 0x66, - 0x4e, 0xe8, 0x40, 0xdf, 0x8a, 0xa5, 0xd8, 0x5f, 0x4e, 0xa4, 0xd8, 0x57, 0x72, 0xc4, 0x22, 0x79, - 0xd6, 0x84, 0x19, 0x87, 0xf7, 0xca, 0xec, 0x79, 0x2c, 0x32, 0x46, 0xbe, 0x3b, 0xa4, 0x1b, 0x38, - 0x2a, 0x13, 0xc6, 0xfc, 0x85, 0xf3, 0xb3, 0xda, 0x4c, 0x8c, 0x86, 0xe3, 0xf0, 0xea, 0x5f, 0x14, - 0x00, 0xd6, 0x89, 0x6d, 0x58, 0x83, 0x3e, 0x31, 0xaf, 0xa2, 0x86, 0xda, 0x8d, 0xd5, 0x50, 0x6f, - 0x0e, 0x9b, 0x9e, 0xc0, 0xb4, 0xdc, 0x22, 0xea, 0xd7, 0x13, 0x45, 0x54, 0x63, 0x74, 0xc8, 0x8b, - 0xab, 0xa8, 0x7f, 0x2f, 0xc2, 0x62, 0xc8, 0x1c, 0x96, 0x51, 0x8f, 0x62, 0x73, 0xfc, 0x4b, 0x89, - 0x39, 0xbe, 0x99, 0x21, 0xf2, 0xd2, 0xea, 0xa8, 0x17, 0x5f, 0xcf, 0xa0, 0x4f, 0x61, 0x96, 0x17, - 0x4e, 0x9e, 0x7b, 0x88, 0xb2, 0x6c, 0x72, 0xec, 0xb2, 0x2c, 0x48, 0xa0, 0x5b, 0x31, 0x24, 0x9c, - 0x40, 0xce, 0x29, 0x03, 0xcb, 0x2f, 0xbb, 0x0c, 0x54, 0x3f, 0x53, 0x60, 0x36, 0x9c, 0xa6, 0x2b, - 0x28, 0xda, 0x76, 0xe2, 0x45, 0xdb, 0xeb, 0x23, 0xbb, 0x68, 0x4e, 0xd5, 0xf6, 0xdf, 0xbc, 0xc0, - 0x0f, 0x98, 0xf8, 0x02, 0x3f, 0xd0, 0x3a, 0xc7, 0x23, 0x6c, 0xff, 0x7e, 0xa8, 0x00, 0x92, 0x59, - 0x60, 0xd5, 0x34, 0x2d, 0xa6, 0x79, 0xb1, 0xd2, 0x33, 0x6b, 0x73, 0x64, 0xb3, 0x7c, 0x8d, 0xf5, - 0xfd, 0x14, 0xd6, 0x63, 0x93, 0x39, 0x83, 0x70, 0x46, 0xd2, 0x0c, 0x38, 0xc3, 0x00, 0xa4, 0x01, - 0x38, 0x12, 0x73, 0xcf, 0x92, 0x0b, 0xf9, 0xcd, 0x11, 0x62, 0x1e, 0x17, 0x58, 0xb3, 0xcc, 0x43, - 0xbd, 0x17, 0x86, 0x1d, 0x1c, 0x00, 0xe1, 0x08, 0xe8, 0xad, 0xc7, 0x70, 0x33, 0xc7, 0x5a, 0x34, - 0x0f, 0xc5, 0x63, 0x32, 0xf0, 0x86, 0x0d, 0xf3, 0x9f, 0x68, 0x29, 0xba, 0x4d, 0x9e, 0x92, 0x3b, - 0xdc, 0x0f, 0x0a, 0xef, 0x2b, 0xea, 0x8f, 0x27, 0xa2, 0xbe, 0x23, 0x2a, 0xe6, 0x7b, 0x50, 0x71, - 0x88, 0x6d, 0xe8, 0x1d, 0x8d, 0xca, 0x42, 0xe8, 0xba, 0x77, 0xa4, 0xe1, 0xb5, 0xe1, 0x80, 0x1a, - 0xab, 0xad, 0x0b, 0x2f, 0xb7, 0xb6, 0x2e, 0xbe, 0x98, 0xda, 0xfa, 0xb7, 0xa0, 0x42, 0xfd, 0xaa, - 0xba, 0x24, 0x20, 0x1f, 0x8c, 0x11, 0x5f, 0x65, 0x41, 0x1d, 0x28, 0x08, 0x4a, 0xe9, 0x00, 0x34, - 0xab, 0x88, 0x9e, 0x18, 0xb3, 0x88, 0x7e, 0xa1, 0x85, 0x2f, 0x8f, 0xa9, 0xb6, 0xe6, 0x52, 0xd2, - 0x15, 0x81, 0xa8, 0x12, 0xc6, 0xd4, 0x96, 0x68, 0xc5, 0x92, 0x8a, 0x3e, 0x8e, 0xb9, 0x6c, 0xe5, - 0x32, 0x2e, 0x3b, 0x9b, 0xef, 0xae, 0x68, 0x1f, 0x6e, 0xda, 0x8e, 0xd5, 0x73, 0x08, 0xa5, 0xeb, - 0x44, 0xeb, 0x1a, 0xba, 0x49, 0xfc, 0xf1, 0xf1, 0x2a, 0xa2, 0x57, 0xce, 0xcf, 0x6a, 0x37, 0x5b, - 0xd9, 0x2c, 0x38, 0x4f, 0x56, 0xfd, 0xbc, 0x04, 0xf3, 0xc9, 0x0c, 0x98, 0x53, 0xa4, 0x2a, 0x97, - 0x2a, 0x52, 0xef, 0x47, 0x16, 0x83, 0x57, 0xc1, 0x47, 0xce, 0xf8, 0x52, 0x0b, 0x62, 0x15, 0xe6, - 0x64, 0x34, 0xf0, 0x89, 0xb2, 0x4c, 0x0f, 0x66, 0x7f, 0x3f, 0x4e, 0xc6, 0x49, 0x7e, 0x5e, 0x7a, - 0x86, 0x15, 0xa5, 0x0f, 0x52, 0x8a, 0x97, 0x9e, 0xab, 0x49, 0x06, 0x9c, 0x96, 0x41, 0xdb, 0xb0, - 0xe8, 0x9a, 0x69, 0x28, 0xcf, 0x1b, 0x5f, 0x91, 0x50, 0x8b, 0xfb, 0x69, 0x16, 0x9c, 0x25, 0x87, - 0x0e, 0x63, 0xd5, 0xe8, 0xa4, 0x88, 0xb0, 0x0f, 0x47, 0x5e, 0x3b, 0x23, 0x97, 0xa3, 0xe8, 0x11, - 0xcc, 0x38, 0x62, 0xdf, 0xe1, 0x1b, 0xec, 0xd5, 0xee, 0x37, 0xa4, 0xd8, 0x0c, 0x8e, 0x12, 0x71, - 0x9c, 0x37, 0xa3, 0xdc, 0xae, 0x8c, 0x5a, 0x6e, 0xab, 0xff, 0xac, 0x44, 0x93, 0x50, 0x50, 0x02, - 0x0f, 0x3b, 0x65, 0x4a, 0x49, 0x44, 0xaa, 0x23, 0x2b, 0xbb, 0xfa, 0x7d, 0x6f, 0xac, 0xea, 0x37, - 0x4c, 0x9e, 0xc3, 0xcb, 0xdf, 0x1f, 0x29, 0xb0, 0xbc, 0xd1, 0x7e, 0xe2, 0x58, 0xae, 0xed, 0x9b, - 0xb3, 0x6b, 0x7b, 0xe3, 0xfa, 0x0d, 0x28, 0x39, 0xae, 0xe1, 0xf7, 0xe3, 0x35, 0xbf, 0x1f, 0xd8, - 0x35, 0x78, 0x3f, 0x16, 0x13, 0x52, 0x5e, 0x27, 0xb8, 0x00, 0xda, 0x81, 0x49, 0x47, 0x33, 0x7b, - 0xc4, 0x4f, 0xab, 0x77, 0x87, 0x58, 0xbf, 0xb9, 0x8e, 0x39, 0x7b, 0xa4, 0x78, 0x13, 0xd2, 0x58, - 0xa2, 0xa8, 0x7f, 0xa4, 0xc0, 0xdc, 0xd3, 0xbd, 0xbd, 0xd6, 0xa6, 0x29, 0x56, 0xb4, 0x38, 0x7d, - 0xbf, 0x03, 0x25, 0x5b, 0x63, 0x47, 0xc9, 0x4c, 0xcf, 0x69, 0x58, 0x50, 0xd0, 0x77, 0xa1, 0xcc, - 0x23, 0x09, 0x31, 0xbb, 0x23, 0x96, 0xda, 0x12, 0xbe, 0xe9, 0x09, 0x85, 0x15, 0xa2, 0x6c, 0xc0, - 0x3e, 0x9c, 0x7a, 0x0c, 0x4b, 0x11, 0x73, 0xf8, 0x78, 0x88, 0x63, 0x60, 0xd4, 0x86, 0x09, 0xae, - 0xd9, 0x3f, 0xe5, 0x1d, 0x76, 0x98, 0x99, 0xe8, 0x52, 0x58, 0xe9, 0xf0, 0x2f, 0x8a, 0x3d, 0x2c, - 0x75, 0x1b, 0x66, 0xc4, 0x95, 0x83, 0xe5, 0x30, 0x31, 0x2c, 0xe8, 0x36, 0x14, 0xfb, 0xba, 0x29, - 0xf3, 0xec, 0xb4, 0x94, 0x29, 0xf2, 0x1c, 0xc1, 0xdb, 0x05, 0x59, 0x3b, 0x95, 0x91, 0x27, 0x24, - 0x6b, 0xa7, 0x98, 0xb7, 0xab, 0x4f, 0xa0, 0x2c, 0x87, 0x3b, 0x0a, 0x54, 0xbc, 0x18, 0xa8, 0x98, - 0x01, 0xb4, 0x0b, 0xe5, 0xcd, 0x56, 0xd3, 0xb0, 0xbc, 0xaa, 0xab, 0xa3, 0x77, 0x9d, 0xe4, 0x5c, - 0xac, 0x6d, 0xae, 0x63, 0x2c, 0x28, 0x48, 0x85, 0x49, 0x72, 0xda, 0x21, 0x36, 0x13, 0x1e, 0x31, - 0xd5, 0x04, 0x3e, 0xcb, 0x8f, 0x45, 0x0b, 0x96, 0x14, 0xf5, 0x8f, 0x0b, 0x50, 0x96, 0xc3, 0x71, - 0x05, 0xbb, 0xb0, 0xad, 0xd8, 0x2e, 0xec, 0x8d, 0xd1, 0x5c, 0x23, 0x77, 0x0b, 0xb6, 0x97, 0xd8, - 0x82, 0xdd, 0x1f, 0x11, 0xef, 0xe2, 0xfd, 0xd7, 0x3f, 0x28, 0x30, 0x1b, 0x77, 0x4a, 0xf4, 0x2e, - 0x4c, 0xf3, 0x84, 0xa3, 0x77, 0xc8, 0x4e, 0x58, 0xe7, 0x06, 0x87, 0x30, 0xed, 0x90, 0x84, 0xa3, - 0x7c, 0xa8, 0x17, 0x88, 0x71, 0x3f, 0x92, 0x9d, 0xce, 0x1f, 0x52, 0x97, 0xe9, 0x46, 0xdd, 0xbb, - 0x46, 0xab, 0x6f, 0x9a, 0x6c, 0xd7, 0x69, 0x33, 0x47, 0x37, 0x7b, 0x29, 0x45, 0xc2, 0x29, 0xa3, - 0xc8, 0xea, 0x3f, 0x29, 0x30, 0x2d, 0x4d, 0xbe, 0x82, 0x5d, 0xc5, 0xaf, 0xc5, 0x77, 0x15, 0x77, - 0x47, 0x5c, 0xe0, 0xd9, 0x5b, 0x8a, 0xbf, 0x09, 0x4d, 0xe7, 0x4b, 0x9a, 0x7b, 0xf5, 0x91, 0x45, - 0x59, 0xd2, 0xab, 0xf9, 0x62, 0xc4, 0x82, 0x82, 0x5c, 0x98, 0xd7, 0x13, 0x31, 0x40, 0x0e, 0x6d, - 0x63, 0x34, 0x4b, 0x02, 0xb1, 0x66, 0x55, 0xc2, 0xcf, 0x27, 0x29, 0x38, 0xa5, 0x42, 0x25, 0x90, - 0xe2, 0x42, 0x1f, 0x42, 0xe9, 0x88, 0x31, 0x3b, 0xe3, 0xbc, 0x7a, 0x48, 0xe4, 0x09, 0x4d, 0xa8, - 0x88, 0xde, 0xed, 0xed, 0xb5, 0xb0, 0x80, 0x52, 0xff, 0x27, 0x1c, 0x8f, 0xb6, 0xe7, 0xe3, 0x41, - 0x3c, 0x55, 0x2e, 0x13, 0x4f, 0xa7, 0xb3, 0x62, 0x29, 0x7a, 0x0a, 0x45, 0x66, 0x8c, 0xba, 0x2d, - 0x94, 0x88, 0x7b, 0x5b, 0xed, 0x30, 0x20, 0xed, 0x6d, 0xb5, 0x31, 0x87, 0x40, 0xbb, 0x30, 0xc1, - 0xb3, 0x0f, 0x5f, 0x82, 0xc5, 0xd1, 0x97, 0x34, 0xef, 0x7f, 0xe8, 0x10, 0xfc, 0x8b, 0x62, 0x0f, - 0x47, 0xfd, 0x3e, 0xcc, 0xc4, 0xd6, 0x29, 0xfa, 0x04, 0xae, 0x1b, 0x96, 0xd6, 0x6d, 0x6a, 0x86, - 0x66, 0x76, 0x88, 0x7f, 0x39, 0x70, 0x37, 0x6b, 0x87, 0xb1, 0x15, 0xe1, 0x93, 0xab, 0x3c, 0xb8, - 0x4e, 0x8d, 0xd2, 0x70, 0x0c, 0x51, 0xd5, 0x00, 0xc2, 0x3e, 0xa2, 0x1a, 0x4c, 0x70, 0x3f, 0xf3, - 0xf2, 0xc9, 0x54, 0x73, 0x8a, 0x5b, 0xc8, 0xdd, 0x8f, 0x62, 0xaf, 0x1d, 0x3d, 0x04, 0xa0, 0xa4, - 0xe3, 0x10, 0x26, 0x82, 0x41, 0x21, 0x7e, 0x05, 0xdd, 0x0e, 0x28, 0x38, 0xc2, 0xa5, 0xfe, 0x8b, - 0x02, 0x33, 0x3b, 0x84, 0xfd, 0xc0, 0x72, 0x8e, 0x5b, 0x96, 0xa1, 0x77, 0x06, 0x57, 0x10, 0x6c, - 0x71, 0x2c, 0xd8, 0xbe, 0x35, 0x64, 0x66, 0x62, 0xd6, 0xe5, 0x85, 0x5c, 0xf5, 0x33, 0x05, 0x6e, - 0xc6, 0x38, 0x1f, 0x87, 0x4b, 0x77, 0x1f, 0x26, 0x6c, 0xcb, 0x61, 0x7e, 0x22, 0x1e, 0x4b, 0x21, - 0x0f, 0x63, 0x91, 0x54, 0xcc, 0x61, 0xb0, 0x87, 0x86, 0xb6, 0xa0, 0xc0, 0x2c, 0xe9, 0xaa, 0xe3, - 0x61, 0x12, 0xe2, 0x34, 0x41, 0x62, 0x16, 0xf6, 0x2c, 0x5c, 0x60, 0x16, 0x9f, 0x88, 0x6a, 0x8c, - 0x2b, 0x1a, 0x7c, 0x5e, 0x52, 0x0f, 0x30, 0x94, 0x0e, 0x1d, 0xab, 0x7f, 0xe9, 0x3e, 0x04, 0x13, - 0xb1, 0xe1, 0x58, 0x7d, 0x2c, 0xb0, 0xd4, 0x9f, 0x28, 0xb0, 0x10, 0xe3, 0xbc, 0x82, 0xc0, 0xff, - 0x61, 0x3c, 0xf0, 0xdf, 0x1f, 0xa7, 0x23, 0x39, 0xe1, 0xff, 0x27, 0x85, 0x44, 0x37, 0x78, 0x87, - 0xd1, 0x21, 0x4c, 0xdb, 0x56, 0xb7, 0xfd, 0x02, 0xae, 0x03, 0xe7, 0x78, 0xde, 0x6c, 0x85, 0x58, - 0x38, 0x0a, 0x8c, 0x4e, 0x61, 0xc1, 0xd4, 0xfa, 0x84, 0xda, 0x5a, 0x87, 0xb4, 0x5f, 0xc0, 0x01, - 0xc9, 0x0d, 0x71, 0xdf, 0x90, 0x44, 0xc4, 0x69, 0x25, 0x68, 0x1b, 0xca, 0xba, 0x2d, 0xea, 0x38, - 0x59, 0xbb, 0x0c, 0xcd, 0xa2, 0x5e, 0xd5, 0xe7, 0xc5, 0x73, 0xf9, 0x81, 0x7d, 0x0c, 0xf5, 0x6f, - 0x93, 0xde, 0xc0, 0xfd, 0x0f, 0x3d, 0x81, 0x8a, 0x78, 0x84, 0xd3, 0xb1, 0x0c, 0xff, 0x66, 0x80, - 0xcf, 0x6c, 0x4b, 0xb6, 0x3d, 0x3f, 0xab, 0xbd, 0x92, 0x71, 0xe8, 0xeb, 0x93, 0x71, 0x20, 0x8c, - 0x76, 0xa0, 0x64, 0x7f, 0x95, 0x0a, 0x46, 0x24, 0x39, 0x51, 0xb6, 0x08, 0x1c, 0xf5, 0xf7, 0x8a, - 0x09, 0x73, 0x45, 0xaa, 0xfb, 0xf4, 0x85, 0xcd, 0x7a, 0x50, 0x31, 0xe5, 0xce, 0xfc, 0x01, 0x94, - 0x65, 0x86, 0x97, 0xce, 0xfc, 0x8d, 0x71, 0x9c, 0x39, 0x9a, 0xc5, 0x82, 0x0d, 0x8b, 0xdf, 0xe8, - 0x03, 0xa3, 0xef, 0xc1, 0x24, 0xf1, 0x54, 0x78, 0xb9, 0xf1, 0xbd, 0x71, 0x54, 0x84, 0x71, 0x35, - 0x2c, 0x54, 0x65, 0x9b, 0x44, 0x45, 0xdf, 0xe2, 0xe3, 0xc5, 0x79, 0xf9, 0x26, 0x90, 0x56, 0x4b, - 0x22, 0x5d, 0xdd, 0xf6, 0xba, 0x1d, 0x34, 0x3f, 0x3f, 0xab, 0x41, 0xf8, 0x89, 0xa3, 0x12, 0xea, - 0xbf, 0x29, 0xb0, 0x20, 0x46, 0xa8, 0xe3, 0x3a, 0x3a, 0x1b, 0x5c, 0x59, 0x62, 0x7a, 0x16, 0x4b, - 0x4c, 0xef, 0x0c, 0x19, 0x96, 0x94, 0x85, 0xb9, 0xc9, 0xe9, 0xa7, 0x0a, 0xdc, 0x48, 0x71, 0x5f, - 0x41, 0x5c, 0xdc, 0x8f, 0xc7, 0xc5, 0xb7, 0xc6, 0xed, 0x50, 0x4e, 0x6c, 0xfc, 0xab, 0xb9, 0x8c, - 0xee, 0x88, 0x95, 0xf2, 0x10, 0xc0, 0x76, 0xf4, 0x13, 0xdd, 0x20, 0x3d, 0x79, 0x09, 0x5e, 0x89, - 0x3c, 0x82, 0x0b, 0x28, 0x38, 0xc2, 0x85, 0x28, 0x2c, 0x77, 0xc9, 0xa1, 0xe6, 0x1a, 0x6c, 0xb5, - 0xdb, 0x5d, 0xd3, 0x6c, 0xed, 0x40, 0x37, 0x74, 0xa6, 0xcb, 0xe3, 0x82, 0xa9, 0xe6, 0x23, 0xef, - 0x72, 0x3a, 0x8b, 0xe3, 0xf9, 0x59, 0xed, 0x76, 0xd6, 0xed, 0x90, 0xcf, 0x32, 0xc0, 0x39, 0xd0, - 0x68, 0x00, 0x55, 0x87, 0x7c, 0xdf, 0xd5, 0x1d, 0xd2, 0x5d, 0x77, 0x2c, 0x3b, 0xa6, 0xb6, 0x28, - 0xd4, 0xfe, 0xea, 0xf9, 0x59, 0xad, 0x8a, 0x73, 0x78, 0x86, 0x2b, 0xce, 0x85, 0x47, 0x9f, 0xc2, - 0xa2, 0xe6, 0xbd, 0x1d, 0x8c, 0x69, 0xf5, 0x56, 0xc9, 0xfb, 0xe7, 0x67, 0xb5, 0xc5, 0xd5, 0x34, - 0x79, 0xb8, 0xc2, 0x2c, 0x50, 0xd4, 0x80, 0xf2, 0x89, 0x78, 0xd9, 0x48, 0xab, 0x13, 0x02, 0x9f, - 0x27, 0x82, 0xb2, 0xf7, 0xd8, 0x91, 0x63, 0x4e, 0x6e, 0xb4, 0xc5, 0xea, 0xf3, 0xb9, 0xf8, 0x86, - 0x92, 0xd7, 0x92, 0x72, 0xc5, 0x8b, 0x13, 0xe3, 0x4a, 0x18, 0xb5, 0x9e, 0x86, 0x24, 0x1c, 0xe5, - 0x43, 0x1f, 0xc3, 0xd4, 0x91, 0x3c, 0x95, 0xa0, 0xd5, 0xf2, 0x48, 0x49, 0x38, 0x76, 0x8a, 0xd1, - 0x5c, 0x90, 0x2a, 0xa6, 0xfc, 0x66, 0x8a, 0x43, 0x44, 0xf4, 0x3a, 0x94, 0xc5, 0xc7, 0xe6, 0xba, - 0x38, 0x8e, 0xab, 0x84, 0xb1, 0xed, 0xa9, 0xd7, 0x8c, 0x7d, 0xba, 0xcf, 0xba, 0xd9, 0x5a, 0x13, - 0xc7, 0xc2, 0x09, 0xd6, 0xcd, 0xd6, 0x1a, 0xf6, 0xe9, 0xe8, 0x13, 0x28, 0x53, 0xb2, 0xa5, 0x9b, - 0xee, 0x69, 0x15, 0x46, 0xba, 0x54, 0x6e, 0x3f, 0x16, 0xdc, 0x89, 0x83, 0xb1, 0x50, 0x83, 0xa4, - 0x63, 0x1f, 0x16, 0x1d, 0xc1, 0x94, 0xe3, 0x9a, 0xab, 0x74, 0x9f, 0x12, 0xa7, 0x3a, 0x2d, 0x74, - 0x0c, 0x0b, 0xe7, 0xd8, 0xe7, 0x4f, 0x6a, 0x09, 0x46, 0x28, 0xe0, 0xc0, 0x21, 0x38, 0xfa, 0x43, - 0x05, 0x10, 0x75, 0x6d, 0xdb, 0x20, 0x7d, 0x62, 0x32, 0xcd, 0x10, 0x67, 0x71, 0xb4, 0x7a, 0x5d, - 0xe8, 0xfc, 0xf6, 0xb0, 0x7e, 0xa5, 0x04, 0x93, 0xca, 0x83, 0x43, 0xef, 0x34, 0x2b, 0xce, 0xd0, - 0xcb, 0x87, 0xf6, 0x90, 0x8a, 0xdf, 0xd5, 0x99, 0x91, 0x86, 0x36, 0xfb, 0xcc, 0x31, 0x1c, 0x5a, - 0x49, 0xc7, 0x3e, 0x2c, 0x7a, 0x06, 0xcb, 0xfe, 0xc3, 0x58, 0x6c, 0x59, 0x6c, 0x43, 0x37, 0x08, - 0x1d, 0x50, 0x46, 0xfa, 0xd5, 0x59, 0x31, 0xed, 0xc1, 0xdb, 0x0f, 0x9c, 0xc9, 0x85, 0x73, 0xa4, - 0x51, 0x1f, 0x6a, 0x7e, 0xc8, 0xe0, 0xeb, 0x29, 0x88, 0x59, 0x8f, 0x69, 0x47, 0x33, 0xbc, 0x7b, - 0x80, 0x39, 0xa1, 0xe0, 0xb5, 0xf3, 0xb3, 0x5a, 0x6d, 0xfd, 0x62, 0x56, 0x3c, 0x0c, 0x0b, 0x7d, - 0x17, 0xaa, 0x5a, 0x9e, 0x9e, 0x79, 0xa1, 0xe7, 0x55, 0x1e, 0x87, 0x72, 0x15, 0xe4, 0x4a, 0x23, - 0x06, 0xf3, 0x5a, 0xfc, 0x89, 0x32, 0xad, 0x2e, 0x8c, 0x74, 0x10, 0x99, 0x78, 0xd9, 0x1c, 0x1e, - 0x46, 0x24, 0x08, 0x14, 0xa7, 0x34, 0xa0, 0xdf, 0x06, 0xa4, 0x25, 0x5f, 0x55, 0xd3, 0x2a, 0x1a, - 0x29, 0xfd, 0xa4, 0x9e, 0x63, 0x87, 0x6e, 0x97, 0x22, 0x51, 0x9c, 0xa1, 0x07, 0x6d, 0xc1, 0x92, - 0x6c, 0xdd, 0x37, 0xa9, 0x76, 0x48, 0xda, 0x03, 0xda, 0x61, 0x06, 0xad, 0x2e, 0x8a, 0xd8, 0x27, - 0x2e, 0xbe, 0x56, 0x33, 0xe8, 0x38, 0x53, 0x0a, 0x7d, 0x1b, 0xe6, 0x0f, 0x2d, 0xe7, 0x40, 0xef, - 0x76, 0x89, 0xe9, 0x23, 0x2d, 0x09, 0xa4, 0x25, 0x3e, 0x1a, 0x1b, 0x09, 0x1a, 0x4e, 0x71, 0x23, - 0x0a, 0x37, 0x24, 0x72, 0xcb, 0xb1, 0x3a, 0xdb, 0x96, 0x6b, 0x32, 0xaf, 0x24, 0xba, 0x11, 0xa4, - 0x98, 0x1b, 0xab, 0x59, 0x0c, 0xcf, 0xcf, 0x6a, 0x77, 0xb2, 0x2b, 0xe0, 0x90, 0x09, 0x67, 0x63, - 0x8b, 0x17, 0x2c, 0xf2, 0x3e, 0xe3, 0x6a, 0x5e, 0x01, 0x8f, 0xf7, 0x82, 0x25, 0x34, 0xed, 0x85, - 0xbd, 0x60, 0x89, 0x40, 0x5e, 0x7c, 0x82, 0xfa, 0x5f, 0x05, 0x58, 0x0c, 0x99, 0x47, 0x7e, 0xc1, - 0x92, 0x21, 0xf2, 0x8b, 0x97, 0xc0, 0xc3, 0x5f, 0x02, 0x7f, 0xa6, 0xc0, 0x6c, 0x38, 0x74, 0xff, - 0xf7, 0x5e, 0x95, 0x84, 0xb6, 0xe5, 0xd4, 0xb9, 0x7f, 0x5f, 0x88, 0x76, 0xe0, 0xff, 0xfd, 0xd3, - 0x86, 0xaf, 0xfe, 0x7c, 0x57, 0xfd, 0x69, 0x11, 0xe6, 0x93, 0xab, 0x31, 0x76, 0x03, 0xae, 0x0c, - 0xbd, 0x01, 0x6f, 0xc1, 0xd2, 0xa1, 0x6b, 0x18, 0x03, 0x31, 0x0c, 0x91, 0x6b, 0x70, 0xef, 0x06, - 0xeb, 0x55, 0x29, 0xb9, 0xb4, 0x91, 0xc1, 0x83, 0x33, 0x25, 0x73, 0x6e, 0xf3, 0x8b, 0x97, 0xba, - 0xcd, 0x4f, 0x5d, 0x2e, 0x97, 0xc6, 0xb8, 0x5c, 0xce, 0xbc, 0x99, 0x9f, 0xb8, 0xc4, 0xcd, 0xfc, - 0x65, 0xae, 0xd2, 0x33, 0x82, 0xd8, 0xd0, 0x97, 0x9d, 0xaf, 0xc2, 0x2d, 0x29, 0xc6, 0xc4, 0x2d, - 0xb7, 0xc9, 0x1c, 0xcb, 0x30, 0x88, 0xb3, 0xee, 0xf6, 0xfb, 0x03, 0xf5, 0x9b, 0x30, 0x1b, 0x7f, - 0xbf, 0xe1, 0xcd, 0xb4, 0xf7, 0x84, 0x44, 0xde, 0x23, 0x46, 0x66, 0xda, 0x6b, 0xc7, 0x01, 0x87, - 0xfa, 0xfb, 0x0a, 0x2c, 0x67, 0xbf, 0xd3, 0x44, 0x06, 0xcc, 0xf6, 0xb5, 0xd3, 0xe8, 0xdb, 0x59, - 0xe5, 0x92, 0x27, 0x3c, 0xe2, 0xe2, 0x7e, 0x3b, 0x86, 0x85, 0x13, 0xd8, 0xea, 0x97, 0x0a, 0xdc, - 0xcc, 0xb9, 0x32, 0xbf, 0x5a, 0x4b, 0xd0, 0x47, 0x50, 0xe9, 0x6b, 0xa7, 0x6d, 0xd7, 0xe9, 0x91, - 0x4b, 0x9f, 0x69, 0x89, 0x88, 0xb1, 0x2d, 0x51, 0x70, 0x80, 0xa7, 0xfe, 0x48, 0x81, 0x6a, 0xde, - 0xee, 0x02, 0xbd, 0x1b, 0xbb, 0xdc, 0xff, 0x7a, 0xe2, 0x72, 0x7f, 0x21, 0x25, 0xf7, 0x92, 0xae, - 0xf6, 0xff, 0x4e, 0x81, 0xe5, 0xec, 0x5d, 0x16, 0x7a, 0x3b, 0x66, 0x61, 0x2d, 0x61, 0xe1, 0x5c, - 0x42, 0x4a, 0xda, 0xf7, 0x3d, 0x98, 0x95, 0x7b, 0x31, 0x09, 0x23, 0x47, 0x55, 0xcd, 0x8a, 0x95, - 0x12, 0xc2, 0xdf, 0x7b, 0x88, 0xf9, 0x8a, 0xb7, 0xe1, 0x04, 0x9a, 0xfa, 0x07, 0x05, 0x98, 0x68, - 0x77, 0x34, 0x83, 0x5c, 0x41, 0x99, 0xf5, 0x9d, 0x58, 0x99, 0x35, 0xec, 0x7f, 0x2e, 0xc2, 0xaa, - 0xdc, 0x0a, 0x0b, 0x27, 0x2a, 0xac, 0x37, 0x46, 0x42, 0xbb, 0xb8, 0xb8, 0xfa, 0x15, 0x98, 0x0a, - 0x94, 0x8e, 0x17, 0xf3, 0xd5, 0xbf, 0x2e, 0xc0, 0x74, 0x44, 0xc5, 0x98, 0x19, 0xe3, 0x30, 0x96, - 0x69, 0x47, 0xf9, 0x77, 0x61, 0x44, 0x57, 0xdd, 0xcf, 0xad, 0xde, 0x3b, 0xcd, 0xf0, 0x65, 0x5e, - 0x3a, 0xe5, 0x7e, 0x13, 0x66, 0x99, 0xf8, 0xf7, 0x5d, 0x70, 0x12, 0x5c, 0x14, 0xbe, 0x18, 0xbc, - 0xee, 0xdd, 0x8b, 0x51, 0x71, 0x82, 0xfb, 0xd6, 0x23, 0x98, 0x89, 0x29, 0x1b, 0xeb, 0x99, 0xe5, - 0x3f, 0x2a, 0xf0, 0xf5, 0xa1, 0xfb, 0x74, 0xd4, 0x8c, 0x2d, 0x92, 0x7a, 0x62, 0x91, 0xac, 0xe4, - 0x03, 0xbc, 0xbc, 0xe7, 0x3a, 0xcd, 0x37, 0x3f, 0xff, 0x62, 0xe5, 0xda, 0xcf, 0xbe, 0x58, 0xb9, - 0xf6, 0xf3, 0x2f, 0x56, 0xae, 0xfd, 0xee, 0xf9, 0x8a, 0xf2, 0xf9, 0xf9, 0x8a, 0xf2, 0xb3, 0xf3, - 0x15, 0xe5, 0xe7, 0xe7, 0x2b, 0xca, 0x7f, 0x9c, 0xaf, 0x28, 0x7f, 0xf2, 0xe5, 0xca, 0xb5, 0x8f, - 0xca, 0x12, 0xee, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xa7, 0xd7, 0xb5, 0x56, 0x5c, 0x3d, 0x00, - 0x00, -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/generated.proto b/vendor/k8s.io/api/extensions/v1beta1/generated.proto deleted file mode 100644 index 1a9b7eb..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/generated.proto +++ /dev/null @@ -1,1166 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.extensions.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - -message CustomMetricCurrentStatus { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricCurrentStatusList { - repeated CustomMetricCurrentStatus items = 1; -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -message CustomMetricTarget { - // Custom Metric name. - optional string name = 1; - - // Custom Metric value (average). - optional k8s.io.apimachinery.pkg.api.resource.Quantity value = 2; -} - -message CustomMetricTargetList { - repeated CustomMetricTarget items = 1; -} - -// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for -// more information. -// DaemonSet represents the configuration of a daemon set. -message DaemonSet { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional DaemonSetSpec spec = 2; - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional DaemonSetStatus status = 3; -} - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -message DaemonSetCondition { - // Type of DaemonSet condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // Last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// DaemonSetList is a collection of daemon sets. -message DaemonSetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // A list of daemon sets. - repeated DaemonSet items = 2; -} - -// DaemonSetSpec is the specification of a daemon set. -message DaemonSetSpec { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - optional k8s.io.api.core.v1.PodTemplateSpec template = 2; - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - optional DaemonSetUpdateStrategy updateStrategy = 3; - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - optional int32 minReadySeconds = 4; - - // DEPRECATED. - // A sequence number representing a specific generation of the template. - // Populated by the system. It can be set only during the creation. - // +optional - optional int64 templateGeneration = 5; - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - optional int32 revisionHistoryLimit = 6; -} - -// DaemonSetStatus represents the current status of a daemon set. -message DaemonSetStatus { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 currentNumberScheduled = 1; - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 numberMisscheduled = 2; - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - optional int32 desiredNumberScheduled = 3; - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - optional int32 numberReady = 4; - - // The most recent generation observed by the daemon set controller. - // +optional - optional int64 observedGeneration = 5; - - // The total number of nodes that are running updated daemon pod - // +optional - optional int32 updatedNumberScheduled = 6; - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - optional int32 numberAvailable = 7; - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - optional int32 numberUnavailable = 8; - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - optional int32 collisionCount = 9; - - // Represents the latest available observations of a DaemonSet's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated DaemonSetCondition conditions = 10; -} - -message DaemonSetUpdateStrategy { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". - // Default is OnDelete. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if type = "RollingUpdate". - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - optional RollingUpdateDaemonSet rollingUpdate = 2; -} - -// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for -// more information. -// Deployment enables declarative updates for Pods and ReplicaSets. -message Deployment { - // Standard object metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the Deployment. - // +optional - optional DeploymentSpec spec = 2; - - // Most recently observed status of the Deployment. - // +optional - optional DeploymentStatus status = 3; -} - -// DeploymentCondition describes the state of a deployment at a certain point. -message DeploymentCondition { - // Type of deployment condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time this condition was updated. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastUpdateTime = 6; - - // Last time the condition transitioned from one status to another. - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 7; - - // The reason for the condition's last transition. - optional string reason = 4; - - // A human readable message indicating details about the transition. - optional string message = 5; -} - -// DeploymentList is a list of Deployments. -message DeploymentList { - // Standard list metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Deployments. - repeated Deployment items = 2; -} - -// DEPRECATED. -// DeploymentRollback stores the information required to rollback a deployment. -message DeploymentRollback { - // Required: This must match the Name of a deployment. - optional string name = 1; - - // The annotations to be updated to a deployment - // +optional - map updatedAnnotations = 2; - - // The config of this deployment rollback. - optional RollbackConfig rollbackTo = 3; -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -message DeploymentSpec { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - optional int32 replicas = 1; - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template describes the pods that will be created. - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - // +patchStrategy=retainKeys - optional DeploymentStrategy strategy = 4; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 5; - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - optional int32 revisionHistoryLimit = 6; - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - // +optional - optional bool paused = 7; - - // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - optional RollbackConfig rollbackTo = 8; - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. This is set to - // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". - // +optional - optional int32 progressDeadlineSeconds = 9; -} - -// DeploymentStatus is the most recently observed status of the Deployment. -message DeploymentStatus { - // The generation observed by the deployment controller. - // +optional - optional int64 observedGeneration = 1; - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - optional int32 replicas = 2; - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - optional int32 updatedReplicas = 3; - - // Total number of ready pods targeted by this deployment. - // +optional - optional int32 readyReplicas = 7; - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - optional int32 availableReplicas = 4; - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - optional int32 unavailableReplicas = 5; - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - repeated DeploymentCondition conditions = 6; - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - optional int32 collisionCount = 8; -} - -// DeploymentStrategy describes how to replace existing pods with new ones. -message DeploymentStrategy { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - optional string type = 1; - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - // --- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - optional RollingUpdateDeployment rollingUpdate = 2; -} - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -message HTTPIngressPath { - // Path is an extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - // +optional - optional string path = 1; - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - optional IngressBackend backend = 2; -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -message HTTPIngressRuleValue { - // A collection of paths that map requests to backends. - repeated HTTPIngressPath paths = 1; -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - -// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods -// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should -// not be included within this rule. -message IPBlock { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" - optional string cidr = 1; - - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" - // Except values will be rejected if they are outside the CIDR range - // +optional - repeated string except = 2; -} - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -message Ingress { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional IngressSpec spec = 2; - - // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional IngressStatus status = 3; -} - -// IngressBackend describes all endpoints for a given service and port. -message IngressBackend { - // Specifies the name of the referenced service. - optional string serviceName = 1; - - // Specifies the port of the referenced service. - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString servicePort = 2; -} - -// IngressList is a collection of Ingress. -message IngressList { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of Ingress. - repeated Ingress items = 2; -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -message IngressRule { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - // +optional - optional string host = 1; - - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - // +optional - optional IngressRuleValue ingressRuleValue = 2; -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -message IngressRuleValue { - // +optional - optional HTTPIngressRuleValue http = 1; -} - -// IngressSpec describes the Ingress the user wishes to exist. -message IngressSpec { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - // +optional - optional IngressBackend backend = 1; - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - // +optional - repeated IngressTLS tls = 2; - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - // +optional - repeated IngressRule rules = 3; -} - -// IngressStatus describe the current state of the Ingress. -message IngressStatus { - // LoadBalancer contains the current status of the load-balancer. - // +optional - optional k8s.io.api.core.v1.LoadBalancerStatus loadBalancer = 1; -} - -// IngressTLS describes the transport layer security associated with an Ingress. -message IngressTLS { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - // +optional - repeated string hosts = 1; - - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - // +optional - optional string secretName = 2; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. -// NetworkPolicy describes what network traffic is allowed for a set of Pods -message NetworkPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior for this NetworkPolicy. - // +optional - optional NetworkPolicySpec spec = 2; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. -// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods -// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. -// This type is beta-level in 1.8 -message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. - // Each item in this list is combined using a logical OR. If this field is - // empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows - // traffic only if the traffic matches at least one port in the list. - // +optional - repeated NetworkPolicyPort ports = 1; - - // List of destinations for outgoing traffic of pods selected for this rule. - // Items in this list are combined using a logical OR operation. If this field is - // empty or missing, this rule matches all destinations (traffic not restricted by - // destination). If this field is present and contains at least one item, this rule - // allows traffic only if the traffic matches at least one item in the to list. - // +optional - repeated NetworkPolicyPeer to = 2; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // +optional - repeated NetworkPolicyPort ports = 1; - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // +optional - repeated NetworkPolicyPeer from = 2; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. -// Network Policy List is a list of NetworkPolicy objects. -message NetworkPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated NetworkPolicy items = 2; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. -message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label - // selector semantics; if present but empty, it selects all pods. - // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. - // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - - // IPBlock defines policy on a particular IPBlock. If this field is set then - // neither of the other fields can be. - // +optional - optional IPBlock ipBlock = 3; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. -message NetworkPolicyPort { - // Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. - // If not specified, this field defaults to TCP. - // +optional - optional string protocol = 1; - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; -} - -// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. -message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not allow any traffic - // (and serves solely to ensure that the pods it selects are isolated by default). - // +optional - repeated NetworkPolicyIngressRule ingress = 2; - - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy - // otherwise allows the traffic), OR if the traffic matches at least one egress rule - // across all of the NetworkPolicy objects whose podSelector matches the pod. If - // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves - // solely to ensure that the pods it selects are isolated by default). - // This field is beta-level in 1.8 - // +optional - repeated NetworkPolicyEgressRule egress = 3; - - // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. - // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. - // Likewise, if you want to write a policy that specifies that no egress is allowed, - // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). - // This field is beta-level in 1.8 - // +optional - repeated string policyTypes = 4; -} - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is a white list of allowed volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is a white list of allowed host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; -} - -// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for -// more information. -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -message ReplicaSet { - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicaSetSpec spec = 2; - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - optional ReplicaSetStatus status = 3; -} - -// ReplicaSetCondition describes the state of a replica set at a certain point. -message ReplicaSetCondition { - // Type of replica set condition. - optional string type = 1; - - // Status of the condition, one of True, False, Unknown. - optional string status = 2; - - // The last time the condition transitioned from one status to another. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time lastTransitionTime = 3; - - // The reason for the condition's last transition. - // +optional - optional string reason = 4; - - // A human readable message indicating details about the transition. - // +optional - optional string message = 5; -} - -// ReplicaSetList is a collection of ReplicaSets. -message ReplicaSetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - repeated ReplicaSet items = 2; -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -message ReplicaSetSpec { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - optional int32 replicas = 1; - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - optional int32 minReadySeconds = 4; - - // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. - // Label keys and values that must match in order to be controlled by this replica set. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - optional k8s.io.api.core.v1.PodTemplateSpec template = 3; -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -message ReplicaSetStatus { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - optional int32 replicas = 1; - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - optional int32 fullyLabeledReplicas = 2; - - // The number of ready replicas for this replica set. - // +optional - optional int32 readyReplicas = 4; - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - optional int32 availableReplicas = 5; - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - // +optional - optional int64 observedGeneration = 3; - - // Represents the latest available observations of a replica set's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - repeated ReplicaSetCondition conditions = 6; -} - -// Dummy definition -message ReplicationControllerDummy { -} - -// DEPRECATED. -message RollbackConfig { - // The revision to rollback to. If set to 0, rollback to the last revision. - // +optional - optional int64 revision = 1; -} - -// Spec to control the desired behavior of daemon set rolling update. -message RollingUpdateDaemonSet { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; -} - -// Spec to control the desired behavior of rolling update. -message RollingUpdateDeployment { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 1; - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxSurge = 2; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - -// represents a scaling request for a resource. -message Scale { - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - optional ScaleSpec spec = 2; - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - optional ScaleStatus status = 3; -} - -// describes the attributes of a scale subresource -message ScaleSpec { - // desired number of instances for the scaled object. - // +optional - optional int32 replicas = 1; -} - -// represents the current status of a scale subresource. -message ScaleStatus { - // actual number of observed instances of the scaled object. - optional int32 replicas = 1; - - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - map selector = 2; - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - optional string targetSelector = 3; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/extensions/v1beta1/register.go b/vendor/k8s.io/api/extensions/v1beta1/register.go deleted file mode 100644 index 7625f67..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/register.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "extensions" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Deployment{}, - &DeploymentList{}, - &DeploymentRollback{}, - &ReplicationControllerDummy{}, - &Scale{}, - &DaemonSetList{}, - &DaemonSet{}, - &Ingress{}, - &IngressList{}, - &ReplicaSet{}, - &ReplicaSetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, - &NetworkPolicy{}, - &NetworkPolicyList{}, - ) - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/types.go b/vendor/k8s.io/api/extensions/v1beta1/types.go deleted file mode 100644 index 38e112d..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/types.go +++ /dev/null @@ -1,1351 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - appsv1beta1 "k8s.io/api/apps/v1beta1" - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/resource" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// describes the attributes of a scale subresource -type ScaleSpec struct { - // desired number of instances for the scaled object. - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` -} - -// represents the current status of a scale subresource. -type ScaleStatus struct { - // actual number of observed instances of the scaled object. - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors - // +optional - Selector map[string]string `json:"selector,omitempty" protobuf:"bytes,2,rep,name=selector"` - - // label selector for pods that should match the replicas count. This is a serializated - // version of both map-based and more expressive set-based selectors. This is done to - // avoid introspection in the clients. The string will be in the same format as the - // query-param syntax. If the target type only supports map-based selectors, both this - // field and map-based selector field are populated. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"` -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// represents a scaling request for a resource. -type Scale struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. - // +optional - Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only. - // +optional - Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Dummy definition -type ReplicationControllerDummy struct { - metav1.TypeMeta `json:",inline"` -} - -// Alpha-level support for Custom Metrics in HPA (as annotations). -type CustomMetricTarget struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - TargetValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricTargetList struct { - Items []CustomMetricTarget `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -type CustomMetricCurrentStatus struct { - // Custom Metric name. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // Custom Metric value (average). - CurrentValue resource.Quantity `json:"value" protobuf:"bytes,2,opt,name=value"` -} - -type CustomMetricCurrentStatusList struct { - Items []CustomMetricCurrentStatus `json:"items" protobuf:"bytes,1,rep,name=items"` -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for -// more information. -// Deployment enables declarative updates for Pods and ReplicaSets. -type Deployment struct { - metav1.TypeMeta `json:",inline"` - // Standard object metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the Deployment. - // +optional - Spec DeploymentSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Most recently observed status of the Deployment. - // +optional - Status DeploymentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// DeploymentSpec is the specification of the desired behavior of the Deployment. -type DeploymentSpec struct { - // Number of desired pods. This is a pointer to distinguish between explicit - // zero and not specified. Defaults to 1. - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Label selector for pods. Existing ReplicaSets whose pods are - // selected by this will be the ones affected by this deployment. - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // Template describes the pods that will be created. - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,3,opt,name=template"` - - // The deployment strategy to use to replace existing pods with new ones. - // +optional - // +patchStrategy=retainKeys - Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,5,opt,name=minReadySeconds"` - - // The number of old ReplicaSets to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` - - // Indicates that the deployment is paused and will not be processed by the - // deployment controller. - // +optional - Paused bool `json:"paused,omitempty" protobuf:"varint,7,opt,name=paused"` - - // DEPRECATED. - // The config this deployment is rolling back to. Will be cleared after rollback is done. - // +optional - RollbackTo *RollbackConfig `json:"rollbackTo,omitempty" protobuf:"bytes,8,opt,name=rollbackTo"` - - // The maximum time in seconds for a deployment to make progress before it - // is considered to be failed. The deployment controller will continue to - // process failed deployments and a condition with a ProgressDeadlineExceeded - // reason will be surfaced in the deployment status. Note that progress will - // not be estimated during the time a deployment is paused. This is set to - // the max value of int32 (i.e. 2147483647) by default, which means "no deadline". - // +optional - ProgressDeadlineSeconds *int32 `json:"progressDeadlineSeconds,omitempty" protobuf:"varint,9,opt,name=progressDeadlineSeconds"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED. -// DeploymentRollback stores the information required to rollback a deployment. -type DeploymentRollback struct { - metav1.TypeMeta `json:",inline"` - // Required: This must match the Name of a deployment. - Name string `json:"name" protobuf:"bytes,1,opt,name=name"` - // The annotations to be updated to a deployment - // +optional - UpdatedAnnotations map[string]string `json:"updatedAnnotations,omitempty" protobuf:"bytes,2,rep,name=updatedAnnotations"` - // The config of this deployment rollback. - RollbackTo RollbackConfig `json:"rollbackTo" protobuf:"bytes,3,opt,name=rollbackTo"` -} - -// DEPRECATED. -type RollbackConfig struct { - // The revision to rollback to. If set to 0, rollback to the last revision. - // +optional - Revision int64 `json:"revision,omitempty" protobuf:"varint,1,opt,name=revision"` -} - -const ( - // DefaultDeploymentUniqueLabelKey is the default key of the selector that is added - // to existing RCs (and label key that is added to its pods) to prevent the existing RCs - // to select new pods (and old pods being select by new RC). - DefaultDeploymentUniqueLabelKey string = "pod-template-hash" -) - -// DeploymentStrategy describes how to replace existing pods with new ones. -type DeploymentStrategy struct { - // Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - // +optional - Type DeploymentStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type,casttype=DeploymentStrategyType"` - - // Rolling update config params. Present only if DeploymentStrategyType = - // RollingUpdate. - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. - // +optional - RollingUpdate *RollingUpdateDeployment `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DeploymentStrategyType string - -const ( - // Kill all existing pods before creating new ones. - RecreateDeploymentStrategyType DeploymentStrategyType = "Recreate" - - // Replace the old RCs by new one using rolling update i.e gradually scale down the old RCs and scale up the new one. - RollingUpdateDeploymentStrategyType DeploymentStrategyType = "RollingUpdate" -) - -// Spec to control the desired behavior of rolling update. -type RollingUpdateDeployment struct { - // The maximum number of pods that can be unavailable during the update. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // Absolute number is calculated from percentage by rounding down. - // This can not be 0 if MaxSurge is 0. - // By default, a fixed value of 1 is used. - // Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods - // immediately when the rolling update starts. Once new pods are ready, old RC - // can be scaled down further, followed by scaling up the new RC, ensuring - // that the total number of pods available at all times during the update is at - // least 70% of desired pods. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` - - // The maximum number of pods that can be scheduled above the desired number of - // pods. - // Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). - // This can not be 0 if MaxUnavailable is 0. - // Absolute number is calculated from percentage by rounding up. - // By default, a value of 1 is used. - // Example: when this is set to 30%, the new RC can be scaled up immediately when - // the rolling update starts, such that the total number of old and new pods do not exceed - // 130% of desired pods. Once old pods have been killed, - // new RC can be scaled up further, ensuring that total number of pods running - // at any time during the update is atmost 130% of desired pods. - // +optional - MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"` -} - -// DeploymentStatus is the most recently observed status of the Deployment. -type DeploymentStatus struct { - // The generation observed by the deployment controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // Total number of non-terminated pods targeted by this deployment (their labels match the selector). - // +optional - Replicas int32 `json:"replicas,omitempty" protobuf:"varint,2,opt,name=replicas"` - - // Total number of non-terminated pods targeted by this deployment that have the desired template spec. - // +optional - UpdatedReplicas int32 `json:"updatedReplicas,omitempty" protobuf:"varint,3,opt,name=updatedReplicas"` - - // Total number of ready pods targeted by this deployment. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,7,opt,name=readyReplicas"` - - // Total number of available pods (ready for at least minReadySeconds) targeted by this deployment. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,4,opt,name=availableReplicas"` - - // Total number of unavailable pods targeted by this deployment. This is the total number of - // pods that are still required for the deployment to have 100% available capacity. They may - // either be pods that are running but not yet available or pods that still have not been created. - // +optional - UnavailableReplicas int32 `json:"unavailableReplicas,omitempty" protobuf:"varint,5,opt,name=unavailableReplicas"` - - // Represents the latest available observations of a deployment's current state. - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DeploymentCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` - - // Count of hash collisions for the Deployment. The Deployment controller uses this - // field as a collision avoidance mechanism when it needs to create the name for the - // newest ReplicaSet. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,8,opt,name=collisionCount"` -} - -type DeploymentConditionType string - -// These are valid conditions of a deployment. -const ( - // Available means the deployment is available, ie. at least the minimum available - // replicas required are up and running for at least minReadySeconds. - DeploymentAvailable DeploymentConditionType = "Available" - // Progressing means the deployment is progressing. Progress for a deployment is - // considered when a new replica set is created or adopted, and when new pods scale - // up or old pods scale down. Progress is not estimated for paused deployments or - // when progressDeadlineSeconds is not specified. - DeploymentProgressing DeploymentConditionType = "Progressing" - // ReplicaFailure is added in a deployment when one of its pods fails to be created - // or deleted. - DeploymentReplicaFailure DeploymentConditionType = "ReplicaFailure" -) - -// DeploymentCondition describes the state of a deployment at a certain point. -type DeploymentCondition struct { - // Type of deployment condition. - Type DeploymentConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DeploymentConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time this condition was updated. - LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty" protobuf:"bytes,6,opt,name=lastUpdateTime"` - // Last time the condition transitioned from one status to another. - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,7,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DeploymentList is a list of Deployments. -type DeploymentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Deployments. - Items []Deployment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -type DaemonSetUpdateStrategy struct { - // Type of daemon set update. Can be "RollingUpdate" or "OnDelete". - // Default is OnDelete. - // +optional - Type DaemonSetUpdateStrategyType `json:"type,omitempty" protobuf:"bytes,1,opt,name=type"` - - // Rolling update config params. Present only if type = "RollingUpdate". - //--- - // TODO: Update this to follow our convention for oneOf, whatever we decide it - // to be. Same as Deployment `strategy.rollingUpdate`. - // See https://github.com/kubernetes/kubernetes/issues/35345 - // +optional - RollingUpdate *RollingUpdateDaemonSet `json:"rollingUpdate,omitempty" protobuf:"bytes,2,opt,name=rollingUpdate"` -} - -type DaemonSetUpdateStrategyType string - -const ( - // Replace the old daemons by new ones using rolling update i.e replace them on each node one after the other. - RollingUpdateDaemonSetStrategyType DaemonSetUpdateStrategyType = "RollingUpdate" - - // Replace the old daemons only when it's killed - OnDeleteDaemonSetStrategyType DaemonSetUpdateStrategyType = "OnDelete" -) - -// Spec to control the desired behavior of daemon set rolling update. -type RollingUpdateDaemonSet struct { - // The maximum number of DaemonSet pods that can be unavailable during the - // update. Value can be an absolute number (ex: 5) or a percentage of total - // number of DaemonSet pods at the start of the update (ex: 10%). Absolute - // number is calculated from percentage by rounding up. - // This cannot be 0. - // Default value is 1. - // Example: when this is set to 30%, at most 30% of the total number of nodes - // that should be running the daemon pod (i.e. status.desiredNumberScheduled) - // can have their pods stopped for an update at any given - // time. The update starts by stopping at most 30% of those DaemonSet pods - // and then brings up new DaemonSet pods in their place. Once the new pods - // are available, it then proceeds onto other DaemonSet pods, thus ensuring - // that at least 70% of original number of DaemonSet pods are available at - // all times during the update. - // +optional - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,1,opt,name=maxUnavailable"` -} - -// DaemonSetSpec is the specification of a daemon set. -type DaemonSetSpec struct { - // A label query over pods that are managed by the daemon set. - // Must match in order to be controlled. - // If empty, defaulted to labels on Pod template. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - - // An object that describes the pod that will be created. - // The DaemonSet will create exactly one copy of this pod on every node - // that matches the template's node selector (or on every node if no node - // selector is specified). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - Template v1.PodTemplateSpec `json:"template" protobuf:"bytes,2,opt,name=template"` - - // An update strategy to replace existing DaemonSet pods with new pods. - // +optional - UpdateStrategy DaemonSetUpdateStrategy `json:"updateStrategy,omitempty" protobuf:"bytes,3,opt,name=updateStrategy"` - - // The minimum number of seconds for which a newly created DaemonSet pod should - // be ready without any of its container crashing, for it to be considered - // available. Defaults to 0 (pod will be considered available as soon as it - // is ready). - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // DEPRECATED. - // A sequence number representing a specific generation of the template. - // Populated by the system. It can be set only during the creation. - // +optional - TemplateGeneration int64 `json:"templateGeneration,omitempty" protobuf:"varint,5,opt,name=templateGeneration"` - - // The number of old history to retain to allow rollback. - // This is a pointer to distinguish between explicit zero and not specified. - // Defaults to 10. - // +optional - RevisionHistoryLimit *int32 `json:"revisionHistoryLimit,omitempty" protobuf:"varint,6,opt,name=revisionHistoryLimit"` -} - -// DaemonSetStatus represents the current status of a daemon set. -type DaemonSetStatus struct { - // The number of nodes that are running at least 1 - // daemon pod and are supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - CurrentNumberScheduled int32 `json:"currentNumberScheduled" protobuf:"varint,1,opt,name=currentNumberScheduled"` - - // The number of nodes that are running the daemon pod, but are - // not supposed to run the daemon pod. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - NumberMisscheduled int32 `json:"numberMisscheduled" protobuf:"varint,2,opt,name=numberMisscheduled"` - - // The total number of nodes that should be running the daemon - // pod (including nodes correctly running the daemon pod). - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/ - DesiredNumberScheduled int32 `json:"desiredNumberScheduled" protobuf:"varint,3,opt,name=desiredNumberScheduled"` - - // The number of nodes that should be running the daemon pod and have one - // or more of the daemon pod running and ready. - NumberReady int32 `json:"numberReady" protobuf:"varint,4,opt,name=numberReady"` - - // The most recent generation observed by the daemon set controller. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,5,opt,name=observedGeneration"` - - // The total number of nodes that are running updated daemon pod - // +optional - UpdatedNumberScheduled int32 `json:"updatedNumberScheduled,omitempty" protobuf:"varint,6,opt,name=updatedNumberScheduled"` - - // The number of nodes that should be running the - // daemon pod and have one or more of the daemon pod running and - // available (ready for at least spec.minReadySeconds) - // +optional - NumberAvailable int32 `json:"numberAvailable,omitempty" protobuf:"varint,7,opt,name=numberAvailable"` - - // The number of nodes that should be running the - // daemon pod and have none of the daemon pod running and available - // (ready for at least spec.minReadySeconds) - // +optional - NumberUnavailable int32 `json:"numberUnavailable,omitempty" protobuf:"varint,8,opt,name=numberUnavailable"` - - // Count of hash collisions for the DaemonSet. The DaemonSet controller - // uses this field as a collision avoidance mechanism when it needs to - // create the name for the newest ControllerRevision. - // +optional - CollisionCount *int32 `json:"collisionCount,omitempty" protobuf:"varint,9,opt,name=collisionCount"` - - // Represents the latest available observations of a DaemonSet's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []DaemonSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,10,rep,name=conditions"` -} - -type DaemonSetConditionType string - -// TODO: Add valid condition types of a DaemonSet. - -// DaemonSetCondition describes the state of a DaemonSet at a certain point. -type DaemonSetCondition struct { - // Type of DaemonSet condition. - Type DaemonSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=DaemonSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // Last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for -// more information. -// DaemonSet represents the configuration of a daemon set. -type DaemonSet struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The desired behavior of this daemon set. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // The current status of this daemon set. This data may be - // out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -const ( - // DEPRECATED: DefaultDaemonSetUniqueLabelKey is used instead. - // DaemonSetTemplateGenerationKey is the key of the labels that is added - // to daemon set pods to distinguish between old and new pod templates - // during DaemonSet template update. - DaemonSetTemplateGenerationKey string = "pod-template-generation" - - // DefaultDaemonSetUniqueLabelKey is the default label key that is added - // to existing DaemonSet pods to distinguish between old and new - // DaemonSet pods during DaemonSet template updates. - DefaultDaemonSetUniqueLabelKey = appsv1beta1.ControllerRevisionHashLabelKey -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DaemonSetList is a collection of daemon sets. -type DaemonSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // A list of daemon sets. - Items []DaemonSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Ingress is a collection of rules that allow inbound connections to reach the -// endpoints defined by a backend. An Ingress can be configured to give services -// externally-reachable urls, load balance traffic, terminate SSL, offer name -// based virtual hosting etc. -type Ingress struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec is the desired state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec IngressSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the current state of the Ingress. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status IngressStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// IngressList is a collection of Ingress. -type IngressList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of Ingress. - Items []Ingress `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// IngressSpec describes the Ingress the user wishes to exist. -type IngressSpec struct { - // A default backend capable of servicing requests that don't match any - // rule. At least one of 'backend' or 'rules' must be specified. This field - // is optional to allow the loadbalancer controller or defaulting logic to - // specify a global default. - // +optional - Backend *IngressBackend `json:"backend,omitempty" protobuf:"bytes,1,opt,name=backend"` - - // TLS configuration. Currently the Ingress only supports a single TLS - // port, 443. If multiple members of this list specify different hosts, they - // will be multiplexed on the same port according to the hostname specified - // through the SNI TLS extension, if the ingress controller fulfilling the - // ingress supports SNI. - // +optional - TLS []IngressTLS `json:"tls,omitempty" protobuf:"bytes,2,rep,name=tls"` - - // A list of host rules used to configure the Ingress. If unspecified, or - // no rule matches, all traffic is sent to the default backend. - // +optional - Rules []IngressRule `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"` - // TODO: Add the ability to specify load-balancer IP through claims -} - -// IngressTLS describes the transport layer security associated with an Ingress. -type IngressTLS struct { - // Hosts are a list of hosts included in the TLS certificate. The values in - // this list must match the name/s used in the tlsSecret. Defaults to the - // wildcard host setting for the loadbalancer controller fulfilling this - // Ingress, if left unspecified. - // +optional - Hosts []string `json:"hosts,omitempty" protobuf:"bytes,1,rep,name=hosts"` - // SecretName is the name of the secret used to terminate SSL traffic on 443. - // Field is left optional to allow SSL routing based on SNI hostname alone. - // If the SNI host in a listener conflicts with the "Host" header field used - // by an IngressRule, the SNI host is used for termination and value of the - // Host header is used for routing. - // +optional - SecretName string `json:"secretName,omitempty" protobuf:"bytes,2,opt,name=secretName"` - // TODO: Consider specifying different modes of termination, protocols etc. -} - -// IngressStatus describe the current state of the Ingress. -type IngressStatus struct { - // LoadBalancer contains the current status of the load-balancer. - // +optional - LoadBalancer v1.LoadBalancerStatus `json:"loadBalancer,omitempty" protobuf:"bytes,1,opt,name=loadBalancer"` -} - -// IngressRule represents the rules mapping the paths under a specified host to -// the related backend services. Incoming requests are first evaluated for a host -// match, then routed to the backend associated with the matching IngressRuleValue. -type IngressRule struct { - // Host is the fully qualified domain name of a network host, as defined - // by RFC 3986. Note the following deviations from the "host" part of the - // URI as defined in the RFC: - // 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the - // IP in the Spec of the parent Ingress. - // 2. The `:` delimiter is not respected because ports are not allowed. - // Currently the port of an Ingress is implicitly :80 for http and - // :443 for https. - // Both these may change in the future. - // Incoming requests are matched against the host before the IngressRuleValue. - // If the host is unspecified, the Ingress routes all traffic based on the - // specified IngressRuleValue. - // +optional - Host string `json:"host,omitempty" protobuf:"bytes,1,opt,name=host"` - // IngressRuleValue represents a rule to route requests for this IngressRule. - // If unspecified, the rule defaults to a http catch-all. Whether that sends - // just traffic matching the host to the default backend or all traffic to the - // default backend, is left to the controller fulfilling the Ingress. Http is - // currently the only supported IngressRuleValue. - // +optional - IngressRuleValue `json:",inline,omitempty" protobuf:"bytes,2,opt,name=ingressRuleValue"` -} - -// IngressRuleValue represents a rule to apply against incoming requests. If the -// rule is satisfied, the request is routed to the specified backend. Currently -// mixing different types of rules in a single Ingress is disallowed, so exactly -// one of the following must be set. -type IngressRuleValue struct { - //TODO: - // 1. Consider renaming this resource and the associated rules so they - // aren't tied to Ingress. They can be used to route intra-cluster traffic. - // 2. Consider adding fields for ingress-type specific global options - // usable by a loadbalancer, like http keep-alive. - - // +optional - HTTP *HTTPIngressRuleValue `json:"http,omitempty" protobuf:"bytes,1,opt,name=http"` -} - -// HTTPIngressRuleValue is a list of http selectors pointing to backends. -// In the example: http:///? -> backend where -// where parts of the url correspond to RFC 3986, this resource will be used -// to match against everything after the last '/' and before the first '?' -// or '#'. -type HTTPIngressRuleValue struct { - // A collection of paths that map requests to backends. - Paths []HTTPIngressPath `json:"paths" protobuf:"bytes,1,rep,name=paths"` - // TODO: Consider adding fields for ingress-type specific global - // options usable by a loadbalancer, like http keep-alive. -} - -// HTTPIngressPath associates a path regex with a backend. Incoming urls matching -// the path are forwarded to the backend. -type HTTPIngressPath struct { - // Path is an extended POSIX regex as defined by IEEE Std 1003.1, - // (i.e this follows the egrep/unix syntax, not the perl syntax) - // matched against the path of an incoming request. Currently it can - // contain characters disallowed from the conventional "path" - // part of a URL as defined by RFC 3986. Paths must begin with - // a '/'. If unspecified, the path defaults to a catch all sending - // traffic to the backend. - // +optional - Path string `json:"path,omitempty" protobuf:"bytes,1,opt,name=path"` - - // Backend defines the referenced service endpoint to which the traffic - // will be forwarded to. - Backend IngressBackend `json:"backend" protobuf:"bytes,2,opt,name=backend"` -} - -// IngressBackend describes all endpoints for a given service and port. -type IngressBackend struct { - // Specifies the name of the referenced service. - ServiceName string `json:"serviceName" protobuf:"bytes,1,opt,name=serviceName"` - - // Specifies the port of the referenced service. - ServicePort intstr.IntOrString `json:"servicePort" protobuf:"bytes,2,opt,name=servicePort"` -} - -// +genclient -// +genclient:method=GetScale,verb=get,subresource=scale,result=Scale -// +genclient:method=UpdateScale,verb=update,subresource=scale,input=Scale,result=Scale -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for -// more information. -// ReplicaSet ensures that a specified number of pod replicas are running at any given time. -type ReplicaSet struct { - metav1.TypeMeta `json:",inline"` - - // If the Labels of a ReplicaSet are empty, they are defaulted to - // be the same as the Pod(s) that the ReplicaSet manages. - // Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Spec defines the specification of the desired behavior of the ReplicaSet. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - - // Status is the most recently observed status of the ReplicaSet. - // This data may be out of date by some window of time. - // Populated by the system. - // Read-only. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status - // +optional - Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ReplicaSetList is a collection of ReplicaSets. -type ReplicaSetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // List of ReplicaSets. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller - Items []ReplicaSet `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// ReplicaSetSpec is the specification of a ReplicaSet. -type ReplicaSetSpec struct { - // Replicas is the number of desired replicas. - // This is a pointer to distinguish between explicit zero and unspecified. - // Defaults to 1. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - // +optional - Replicas *int32 `json:"replicas,omitempty" protobuf:"varint,1,opt,name=replicas"` - - // Minimum number of seconds for which a newly created pod should be ready - // without any of its container crashing, for it to be considered available. - // Defaults to 0 (pod will be considered available as soon as it is ready) - // +optional - MinReadySeconds int32 `json:"minReadySeconds,omitempty" protobuf:"varint,4,opt,name=minReadySeconds"` - - // Selector is a label query over pods that should match the replica count. - // If the selector is empty, it is defaulted to the labels present on the pod template. - // Label keys and values that must match in order to be controlled by this replica set. - // More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors - // +optional - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // Template is the object that describes the pod that will be created if - // insufficient replicas are detected. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template - // +optional - Template v1.PodTemplateSpec `json:"template,omitempty" protobuf:"bytes,3,opt,name=template"` -} - -// ReplicaSetStatus represents the current status of a ReplicaSet. -type ReplicaSetStatus struct { - // Replicas is the most recently oberved number of replicas. - // More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller - Replicas int32 `json:"replicas" protobuf:"varint,1,opt,name=replicas"` - - // The number of pods that have labels matching the labels of the pod template of the replicaset. - // +optional - FullyLabeledReplicas int32 `json:"fullyLabeledReplicas,omitempty" protobuf:"varint,2,opt,name=fullyLabeledReplicas"` - - // The number of ready replicas for this replica set. - // +optional - ReadyReplicas int32 `json:"readyReplicas,omitempty" protobuf:"varint,4,opt,name=readyReplicas"` - - // The number of available replicas (ready for at least minReadySeconds) for this replica set. - // +optional - AvailableReplicas int32 `json:"availableReplicas,omitempty" protobuf:"varint,5,opt,name=availableReplicas"` - - // ObservedGeneration reflects the generation of the most recently observed ReplicaSet. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,3,opt,name=observedGeneration"` - - // Represents the latest available observations of a replica set's current state. - // +optional - // +patchMergeKey=type - // +patchStrategy=merge - Conditions []ReplicaSetCondition `json:"conditions,omitempty" patchStrategy:"merge" patchMergeKey:"type" protobuf:"bytes,6,rep,name=conditions"` -} - -type ReplicaSetConditionType string - -// These are valid conditions of a replica set. -const ( - // ReplicaSetReplicaFailure is added in a replica set when one of its pods fails to be created - // due to insufficient quota, limit ranges, pod security policy, node selectors, etc. or deleted - // due to kubelet being down or finalizers are failing. - ReplicaSetReplicaFailure ReplicaSetConditionType = "ReplicaFailure" -) - -// ReplicaSetCondition describes the state of a replica set at a certain point. -type ReplicaSetCondition struct { - // Type of replica set condition. - Type ReplicaSetConditionType `json:"type" protobuf:"bytes,1,opt,name=type,casttype=ReplicaSetConditionType"` - // Status of the condition, one of True, False, Unknown. - Status v1.ConditionStatus `json:"status" protobuf:"bytes,2,opt,name=status,casttype=k8s.io/api/core/v1.ConditionStatus"` - // The last time the condition transitioned from one status to another. - // +optional - LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty" protobuf:"bytes,3,opt,name=lastTransitionTime"` - // The reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty" protobuf:"bytes,4,opt,name=reason"` - // A human readable message indicating details about the transition. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,5,opt,name=message"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -// Deprecated: use PodSecurityPolicy from policy API Group instead. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -// Deprecated: use PodSecurityPolicySpec from policy API Group instead. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is a white list of allowed volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is a white list of allowed host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -// Deprecated: use AllowedHostPath from policy API Group instead. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -// Deprecated: use FSType from policy API Group instead. -type FSType string - -var ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -// Deprecated: use AllowedFlexVolume from policy API Group instead. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -// Deprecated: use HostPortRange from policy API Group instead. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use SELinuxStrategyOptions from policy API Group instead. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -// Deprecated: use SELinuxStrategy from policy API Group instead. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - // Deprecated: use SELinuxStrategyMustRunAs from policy API Group instead. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - // Deprecated: use SELinuxStrategyRunAsAny from policy API Group instead. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -// Deprecated: use RunAsUserStrategyOptions from policy API Group instead. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -// Deprecated: use IDRange from policy API Group instead. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -// Deprecated: use RunAsUserStrategy from policy API Group instead. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - // Deprecated: use RunAsUserStrategyMustRunAs from policy API Group instead. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - // Deprecated: use RunAsUserStrategyMustRunAsNonRoot from policy API Group instead. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - // Deprecated: use RunAsUserStrategyRunAsAny from policy API Group instead. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use FSGroupStrategyOptions from policy API Group instead. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -// Deprecated: use FSGroupStrategyType from policy API Group instead. -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - // Deprecated: use FSGroupStrategyMustRunAs from policy API Group instead. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - // Deprecated: use FSGroupStrategyRunAsAny from policy API Group instead. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -// Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -// Deprecated: use SupplementalGroupsStrategyType from policy API Group instead. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - // Deprecated: use SupplementalGroupsStrategyMustRunAs from policy API Group instead. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - // Deprecated: use SupplementalGroupsStrategyRunAsAny from policy API Group instead. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -// Deprecated: use PodSecurityPolicyList from policy API Group instead. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. -// NetworkPolicy describes what network traffic is allowed for a set of Pods -type NetworkPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior for this NetworkPolicy. - // +optional - Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// DEPRECATED 1.9 - This group version of PolicyType is deprecated by networking/v1/PolicyType. -// Policy Type string describes the NetworkPolicy type -// This type is beta-level in 1.8 -type PolicyType string - -const ( - // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods - PolicyTypeIngress PolicyType = "Ingress" - // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods - PolicyTypeEgress PolicyType = "Egress" -) - -// DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec. -type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of ingress rules - // is applied to any pods selected by this field. Multiple network policies can select the - // same set of pods. In this case, the ingress rules for each are combined additively. - // This field is NOT optional and follows standard label selector semantics. - // An empty podSelector matches all pods in this namespace. - PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - - // List of ingress rules to be applied to the selected pods. - // Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod - // OR if the traffic source is the pod's local node, - // OR if the traffic matches at least one ingress rule across all of the NetworkPolicy - // objects whose podSelector matches the pod. - // If this field is empty then this NetworkPolicy does not allow any traffic - // (and serves solely to ensure that the pods it selects are isolated by default). - // +optional - Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy - // otherwise allows the traffic), OR if the traffic matches at least one egress rule - // across all of the NetworkPolicy objects whose podSelector matches the pod. If - // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves - // solely to ensure that the pods it selects are isolated by default). - // This field is beta-level in 1.8 - // +optional - Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - - // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. - // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. - // Likewise, if you want to write a policy that specifies that no egress is allowed, - // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). - // This field is beta-level in 1.8 - // +optional - PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. -// This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from. -type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this rule. - // Each item in this list is combined using a logical OR. - // If this field is empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows traffic - // only if the traffic matches at least one port in the list. - // +optional - Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. - // If this field is empty or missing, this rule matches all sources (traffic not restricted by source). - // If this field is present and contains at least on item, this rule allows traffic only if the - // traffic matches at least one item in the from list. - // +optional - From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. -// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods -// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. -// This type is beta-level in 1.8 -type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. - // Each item in this list is combined using a logical OR. If this field is - // empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows - // traffic only if the traffic matches at least one port in the list. - // +optional - Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - - // List of destinations for outgoing traffic of pods selected for this rule. - // Items in this list are combined using a logical OR operation. If this field is - // empty or missing, this rule matches all destinations (traffic not restricted by - // destination). If this field is present and contains at least one item, this rule - // allows traffic only if the traffic matches at least one item in the to list. - // +optional - To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort. -type NetworkPolicyPort struct { - // Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. - // If not specified, this field defaults to TCP. - // +optional - Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - - // If specified, the port on the given protocol. This can - // either be a numerical or named port on a pod. If this field is not provided, - // this matches all port names and numbers. - // If present, only traffic on the specified protocol AND port - // will be matched. - // +optional - Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` -} - -// DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods -// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should -// not be included within this rule. -type IPBlock struct { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" - CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" - // Except values will be rejected if they are outside the CIDR range - // +optional - Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` -} - -// DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer. -type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label - // selector semantics; if present but empty, it selects all pods. - // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. - // +optional - PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. - // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - - // IPBlock defines policy on a particular IPBlock. If this field is set then - // neither of the other fields can be. - // +optional - IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. -// Network Policy List is a list of NetworkPolicy objects. -type NetworkPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index cdbc490..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,648 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used. Deprecated: use AllowedFlexVolume from policy API Group instead.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined. Deprecated: use AllowedHostPath from policy API Group instead.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - -var map_CustomMetricCurrentStatus = map[string]string{ - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricCurrentStatus) SwaggerDoc() map[string]string { - return map_CustomMetricCurrentStatus -} - -var map_CustomMetricTarget = map[string]string{ - "": "Alpha-level support for Custom Metrics in HPA (as annotations).", - "name": "Custom Metric name.", - "value": "Custom Metric value (average).", -} - -func (CustomMetricTarget) SwaggerDoc() map[string]string { - return map_CustomMetricTarget -} - -var map_DaemonSet = map[string]string{ - "": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1beta2/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (DaemonSet) SwaggerDoc() map[string]string { - return map_DaemonSet -} - -var map_DaemonSetCondition = map[string]string{ - "": "DaemonSetCondition describes the state of a DaemonSet at a certain point.", - "type": "Type of DaemonSet condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DaemonSetCondition) SwaggerDoc() map[string]string { - return map_DaemonSetCondition -} - -var map_DaemonSetList = map[string]string{ - "": "DaemonSetList is a collection of daemon sets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "A list of daemon sets.", -} - -func (DaemonSetList) SwaggerDoc() map[string]string { - return map_DaemonSetList -} - -var map_DaemonSetSpec = map[string]string{ - "": "DaemonSetSpec is the specification of a daemon set.", - "selector": "A label query over pods that are managed by the daemon set. Must match in order to be controlled. If empty, defaulted to labels on Pod template. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "An object that describes the pod that will be created. The DaemonSet will create exactly one copy of this pod on every node that matches the template's node selector (or on every node if no node selector is specified). More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", - "updateStrategy": "An update strategy to replace existing DaemonSet pods with new pods.", - "minReadySeconds": "The minimum number of seconds for which a newly created DaemonSet pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).", - "templateGeneration": "DEPRECATED. A sequence number representing a specific generation of the template. Populated by the system. It can be set only during the creation.", - "revisionHistoryLimit": "The number of old history to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified. Defaults to 10.", -} - -func (DaemonSetSpec) SwaggerDoc() map[string]string { - return map_DaemonSetSpec -} - -var map_DaemonSetStatus = map[string]string{ - "": "DaemonSetStatus represents the current status of a daemon set.", - "currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/", - "numberReady": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and ready.", - "observedGeneration": "The most recent generation observed by the daemon set controller.", - "updatedNumberScheduled": "The total number of nodes that are running updated daemon pod", - "numberAvailable": "The number of nodes that should be running the daemon pod and have one or more of the daemon pod running and available (ready for at least spec.minReadySeconds)", - "numberUnavailable": "The number of nodes that should be running the daemon pod and have none of the daemon pod running and available (ready for at least spec.minReadySeconds)", - "collisionCount": "Count of hash collisions for the DaemonSet. The DaemonSet controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ControllerRevision.", - "conditions": "Represents the latest available observations of a DaemonSet's current state.", -} - -func (DaemonSetStatus) SwaggerDoc() map[string]string { - return map_DaemonSetStatus -} - -var map_DaemonSetUpdateStrategy = map[string]string{ - "type": "Type of daemon set update. Can be \"RollingUpdate\" or \"OnDelete\". Default is OnDelete.", - "rollingUpdate": "Rolling update config params. Present only if type = \"RollingUpdate\".", -} - -func (DaemonSetUpdateStrategy) SwaggerDoc() map[string]string { - return map_DaemonSetUpdateStrategy -} - -var map_Deployment = map[string]string{ - "": "DEPRECATED - This group version of Deployment is deprecated by apps/v1beta2/Deployment. See the release notes for more information. Deployment enables declarative updates for Pods and ReplicaSets.", - "metadata": "Standard object metadata.", - "spec": "Specification of the desired behavior of the Deployment.", - "status": "Most recently observed status of the Deployment.", -} - -func (Deployment) SwaggerDoc() map[string]string { - return map_Deployment -} - -var map_DeploymentCondition = map[string]string{ - "": "DeploymentCondition describes the state of a deployment at a certain point.", - "type": "Type of deployment condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastUpdateTime": "The last time this condition was updated.", - "lastTransitionTime": "Last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (DeploymentCondition) SwaggerDoc() map[string]string { - return map_DeploymentCondition -} - -var map_DeploymentList = map[string]string{ - "": "DeploymentList is a list of Deployments.", - "metadata": "Standard list metadata.", - "items": "Items is the list of Deployments.", -} - -func (DeploymentList) SwaggerDoc() map[string]string { - return map_DeploymentList -} - -var map_DeploymentRollback = map[string]string{ - "": "DEPRECATED. DeploymentRollback stores the information required to rollback a deployment.", - "name": "Required: This must match the Name of a deployment.", - "updatedAnnotations": "The annotations to be updated to a deployment", - "rollbackTo": "The config of this deployment rollback.", -} - -func (DeploymentRollback) SwaggerDoc() map[string]string { - return map_DeploymentRollback -} - -var map_DeploymentSpec = map[string]string{ - "": "DeploymentSpec is the specification of the desired behavior of the Deployment.", - "replicas": "Number of desired pods. This is a pointer to distinguish between explicit zero and not specified. Defaults to 1.", - "selector": "Label selector for pods. Existing ReplicaSets whose pods are selected by this will be the ones affected by this deployment.", - "template": "Template describes the pods that will be created.", - "strategy": "The deployment strategy to use to replace existing pods with new ones.", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "revisionHistoryLimit": "The number of old ReplicaSets to retain to allow rollback. This is a pointer to distinguish between explicit zero and not specified.", - "paused": "Indicates that the deployment is paused and will not be processed by the deployment controller.", - "rollbackTo": "DEPRECATED. The config this deployment is rolling back to. Will be cleared after rollback is done.", - "progressDeadlineSeconds": "The maximum time in seconds for a deployment to make progress before it is considered to be failed. The deployment controller will continue to process failed deployments and a condition with a ProgressDeadlineExceeded reason will be surfaced in the deployment status. Note that progress will not be estimated during the time a deployment is paused. This is set to the max value of int32 (i.e. 2147483647) by default, which means \"no deadline\".", -} - -func (DeploymentSpec) SwaggerDoc() map[string]string { - return map_DeploymentSpec -} - -var map_DeploymentStatus = map[string]string{ - "": "DeploymentStatus is the most recently observed status of the Deployment.", - "observedGeneration": "The generation observed by the deployment controller.", - "replicas": "Total number of non-terminated pods targeted by this deployment (their labels match the selector).", - "updatedReplicas": "Total number of non-terminated pods targeted by this deployment that have the desired template spec.", - "readyReplicas": "Total number of ready pods targeted by this deployment.", - "availableReplicas": "Total number of available pods (ready for at least minReadySeconds) targeted by this deployment.", - "unavailableReplicas": "Total number of unavailable pods targeted by this deployment. This is the total number of pods that are still required for the deployment to have 100% available capacity. They may either be pods that are running but not yet available or pods that still have not been created.", - "conditions": "Represents the latest available observations of a deployment's current state.", - "collisionCount": "Count of hash collisions for the Deployment. The Deployment controller uses this field as a collision avoidance mechanism when it needs to create the name for the newest ReplicaSet.", -} - -func (DeploymentStatus) SwaggerDoc() map[string]string { - return map_DeploymentStatus -} - -var map_DeploymentStrategy = map[string]string{ - "": "DeploymentStrategy describes how to replace existing pods with new ones.", - "type": "Type of deployment. Can be \"Recreate\" or \"RollingUpdate\". Default is RollingUpdate.", - "rollingUpdate": "Rolling update config params. Present only if DeploymentStrategyType = RollingUpdate.", -} - -func (DeploymentStrategy) SwaggerDoc() map[string]string { - return map_DeploymentStrategy -} - -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use FSGroupStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - -var map_HTTPIngressPath = map[string]string{ - "": "HTTPIngressPath associates a path regex with a backend. Incoming urls matching the path are forwarded to the backend.", - "path": "Path is an extended POSIX regex as defined by IEEE Std 1003.1, (i.e this follows the egrep/unix syntax, not the perl syntax) matched against the path of an incoming request. Currently it can contain characters disallowed from the conventional \"path\" part of a URL as defined by RFC 3986. Paths must begin with a '/'. If unspecified, the path defaults to a catch all sending traffic to the backend.", - "backend": "Backend defines the referenced service endpoint to which the traffic will be forwarded to.", -} - -func (HTTPIngressPath) SwaggerDoc() map[string]string { - return map_HTTPIngressPath -} - -var map_HTTPIngressRuleValue = map[string]string{ - "": "HTTPIngressRuleValue is a list of http selectors pointing to backends. In the example: http:///? -> backend where where parts of the url correspond to RFC 3986, this resource will be used to match against everything after the last '/' and before the first '?' or '#'.", - "paths": "A collection of paths that map requests to backends.", -} - -func (HTTPIngressRuleValue) SwaggerDoc() map[string]string { - return map_HTTPIngressRuleValue -} - -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined. Deprecated: use HostPortRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs. Deprecated: use IDRange from policy API Group instead.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - -var map_IPBlock = map[string]string{ - "": "DEPRECATED 1.9 - This group version of IPBlock is deprecated by networking/v1/IPBlock. IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", -} - -func (IPBlock) SwaggerDoc() map[string]string { - return map_IPBlock -} - -var map_Ingress = map[string]string{ - "": "Ingress is a collection of rules that allow inbound connections to reach the endpoints defined by a backend. An Ingress can be configured to give services externally-reachable urls, load balance traffic, terminate SSL, offer name based virtual hosting etc.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec is the desired state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the current state of the Ingress. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (Ingress) SwaggerDoc() map[string]string { - return map_Ingress -} - -var map_IngressBackend = map[string]string{ - "": "IngressBackend describes all endpoints for a given service and port.", - "serviceName": "Specifies the name of the referenced service.", - "servicePort": "Specifies the port of the referenced service.", -} - -func (IngressBackend) SwaggerDoc() map[string]string { - return map_IngressBackend -} - -var map_IngressList = map[string]string{ - "": "IngressList is a collection of Ingress.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of Ingress.", -} - -func (IngressList) SwaggerDoc() map[string]string { - return map_IngressList -} - -var map_IngressRule = map[string]string{ - "": "IngressRule represents the rules mapping the paths under a specified host to the related backend services. Incoming requests are first evaluated for a host match, then routed to the backend associated with the matching IngressRuleValue.", - "host": "Host is the fully qualified domain name of a network host, as defined by RFC 3986. Note the following deviations from the \"host\" part of the URI as defined in the RFC: 1. IPs are not allowed. Currently an IngressRuleValue can only apply to the\n\t IP in the Spec of the parent Ingress.\n2. The `:` delimiter is not respected because ports are not allowed.\n\t Currently the port of an Ingress is implicitly :80 for http and\n\t :443 for https.\nBoth these may change in the future. Incoming requests are matched against the host before the IngressRuleValue. If the host is unspecified, the Ingress routes all traffic based on the specified IngressRuleValue.", -} - -func (IngressRule) SwaggerDoc() map[string]string { - return map_IngressRule -} - -var map_IngressRuleValue = map[string]string{ - "": "IngressRuleValue represents a rule to apply against incoming requests. If the rule is satisfied, the request is routed to the specified backend. Currently mixing different types of rules in a single Ingress is disallowed, so exactly one of the following must be set.", -} - -func (IngressRuleValue) SwaggerDoc() map[string]string { - return map_IngressRuleValue -} - -var map_IngressSpec = map[string]string{ - "": "IngressSpec describes the Ingress the user wishes to exist.", - "backend": "A default backend capable of servicing requests that don't match any rule. At least one of 'backend' or 'rules' must be specified. This field is optional to allow the loadbalancer controller or defaulting logic to specify a global default.", - "tls": "TLS configuration. Currently the Ingress only supports a single TLS port, 443. If multiple members of this list specify different hosts, they will be multiplexed on the same port according to the hostname specified through the SNI TLS extension, if the ingress controller fulfilling the ingress supports SNI.", - "rules": "A list of host rules used to configure the Ingress. If unspecified, or no rule matches, all traffic is sent to the default backend.", -} - -func (IngressSpec) SwaggerDoc() map[string]string { - return map_IngressSpec -} - -var map_IngressStatus = map[string]string{ - "": "IngressStatus describe the current state of the Ingress.", - "loadBalancer": "LoadBalancer contains the current status of the load-balancer.", -} - -func (IngressStatus) SwaggerDoc() map[string]string { - return map_IngressStatus -} - -var map_IngressTLS = map[string]string{ - "": "IngressTLS describes the transport layer security associated with an Ingress.", - "hosts": "Hosts are a list of hosts included in the TLS certificate. The values in this list must match the name/s used in the tlsSecret. Defaults to the wildcard host setting for the loadbalancer controller fulfilling this Ingress, if left unspecified.", - "secretName": "SecretName is the name of the secret used to terminate SSL traffic on 443. Field is left optional to allow SSL routing based on SNI hostname alone. If the SNI host in a listener conflicts with the \"Host\" header field used by an IngressRule, the SNI host is used for termination and value of the Host header is used for routing.", -} - -func (IngressTLS) SwaggerDoc() map[string]string { - return map_IngressTLS -} - -var map_NetworkPolicy = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicy is deprecated by networking/v1/NetworkPolicy. NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", -} - -func (NetworkPolicy) SwaggerDoc() map[string]string { - return map_NetworkPolicy -} - -var map_NetworkPolicyEgressRule = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicyEgressRule is deprecated by networking/v1/NetworkPolicyEgressRule. NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", -} - -func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { - return map_NetworkPolicyEgressRule -} - -var map_NetworkPolicyIngressRule = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicyIngressRule is deprecated by networking/v1/NetworkPolicyIngressRule. This NetworkPolicyIngressRule matches traffic if and only if the traffic matches both ports AND from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", -} - -func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { - return map_NetworkPolicyIngressRule -} - -var map_NetworkPolicyList = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicyList is deprecated by networking/v1/NetworkPolicyList. Network Policy List is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (NetworkPolicyList) SwaggerDoc() map[string]string { - return map_NetworkPolicyList -} - -var map_NetworkPolicyPeer = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicyPeer is deprecated by networking/v1/NetworkPolicyPeer.", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", -} - -func (NetworkPolicyPeer) SwaggerDoc() map[string]string { - return map_NetworkPolicyPeer -} - -var map_NetworkPolicyPort = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicyPort is deprecated by networking/v1/NetworkPolicyPort.", - "protocol": "Optional. The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "If specified, the port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers. If present, only traffic on the specified protocol AND port will be matched.", -} - -func (NetworkPolicyPort) SwaggerDoc() map[string]string { - return map_NetworkPolicyPort -} - -var map_NetworkPolicySpec = map[string]string{ - "": "DEPRECATED 1.9 - This group version of NetworkPolicySpec is deprecated by networking/v1/NetworkPolicySpec.", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default).", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", -} - -func (NetworkPolicySpec) SwaggerDoc() map[string]string { - return map_NetworkPolicySpec -} - -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container. Deprecated: use PodSecurityPolicy from policy API Group instead.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects. Deprecated: use PodSecurityPolicyList from policy API Group instead.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced. Deprecated: use PodSecurityPolicySpec from policy API Group instead.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - -var map_ReplicaSet = map[string]string{ - "": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1beta2/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.", - "metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", - "status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status", -} - -func (ReplicaSet) SwaggerDoc() map[string]string { - return map_ReplicaSet -} - -var map_ReplicaSetCondition = map[string]string{ - "": "ReplicaSetCondition describes the state of a replica set at a certain point.", - "type": "Type of replica set condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "lastTransitionTime": "The last time the condition transitioned from one status to another.", - "reason": "The reason for the condition's last transition.", - "message": "A human readable message indicating details about the transition.", -} - -func (ReplicaSetCondition) SwaggerDoc() map[string]string { - return map_ReplicaSetCondition -} - -var map_ReplicaSetList = map[string]string{ - "": "ReplicaSetList is a collection of ReplicaSets.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller", -} - -func (ReplicaSetList) SwaggerDoc() map[string]string { - return map_ReplicaSetList -} - -var map_ReplicaSetSpec = map[string]string{ - "": "ReplicaSetSpec is the specification of a ReplicaSet.", - "replicas": "Replicas is the number of desired replicas. This is a pointer to distinguish between explicit zero and unspecified. Defaults to 1. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "minReadySeconds": "Minimum number of seconds for which a newly created pod should be ready without any of its container crashing, for it to be considered available. Defaults to 0 (pod will be considered available as soon as it is ready)", - "selector": "Selector is a label query over pods that should match the replica count. If the selector is empty, it is defaulted to the labels present on the pod template. Label keys and values that must match in order to be controlled by this replica set. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", - "template": "Template is the object that describes the pod that will be created if insufficient replicas are detected. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller#pod-template", -} - -func (ReplicaSetSpec) SwaggerDoc() map[string]string { - return map_ReplicaSetSpec -} - -var map_ReplicaSetStatus = map[string]string{ - "": "ReplicaSetStatus represents the current status of a ReplicaSet.", - "replicas": "Replicas is the most recently oberved number of replicas. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller/#what-is-a-replicationcontroller", - "fullyLabeledReplicas": "The number of pods that have labels matching the labels of the pod template of the replicaset.", - "readyReplicas": "The number of ready replicas for this replica set.", - "availableReplicas": "The number of available replicas (ready for at least minReadySeconds) for this replica set.", - "observedGeneration": "ObservedGeneration reflects the generation of the most recently observed ReplicaSet.", - "conditions": "Represents the latest available observations of a replica set's current state.", -} - -func (ReplicaSetStatus) SwaggerDoc() map[string]string { - return map_ReplicaSetStatus -} - -var map_ReplicationControllerDummy = map[string]string{ - "": "Dummy definition", -} - -func (ReplicationControllerDummy) SwaggerDoc() map[string]string { - return map_ReplicationControllerDummy -} - -var map_RollbackConfig = map[string]string{ - "": "DEPRECATED.", - "revision": "The revision to rollback to. If set to 0, rollback to the last revision.", -} - -func (RollbackConfig) SwaggerDoc() map[string]string { - return map_RollbackConfig -} - -var map_RollingUpdateDaemonSet = map[string]string{ - "": "Spec to control the desired behavior of daemon set rolling update.", - "maxUnavailable": "The maximum number of DaemonSet pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of total number of DaemonSet pods at the start of the update (ex: 10%). Absolute number is calculated from percentage by rounding up. This cannot be 0. Default value is 1. Example: when this is set to 30%, at most 30% of the total number of nodes that should be running the daemon pod (i.e. status.desiredNumberScheduled) can have their pods stopped for an update at any given time. The update starts by stopping at most 30% of those DaemonSet pods and then brings up new DaemonSet pods in their place. Once the new pods are available, it then proceeds onto other DaemonSet pods, thus ensuring that at least 70% of original number of DaemonSet pods are available at all times during the update.", -} - -func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string { - return map_RollingUpdateDaemonSet -} - -var map_RollingUpdateDeployment = map[string]string{ - "": "Spec to control the desired behavior of rolling update.", - "maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. By default, a fixed value of 1 is used. Example: when this is set to 30%, the old RC can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old RC can be scaled down further, followed by scaling up the new RC, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.", - "maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. By default, a value of 1 is used. Example: when this is set to 30%, the new RC can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new RC can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.", -} - -func (RollingUpdateDeployment) SwaggerDoc() map[string]string { - return map_RollingUpdateDeployment -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use RunAsUserStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. Deprecated: use SELinuxStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - -var map_Scale = map[string]string{ - "": "represents a scaling request for a resource.", - "metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.", - "spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.", - "status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.", -} - -func (Scale) SwaggerDoc() map[string]string { - return map_Scale -} - -var map_ScaleSpec = map[string]string{ - "": "describes the attributes of a scale subresource", - "replicas": "desired number of instances for the scaled object.", -} - -func (ScaleSpec) SwaggerDoc() map[string]string { - return map_ScaleSpec -} - -var map_ScaleStatus = map[string]string{ - "": "represents the current status of a scale subresource.", - "replicas": "actual number of observed instances of the scaled object.", - "selector": "label query over pods that should match the replicas count. More info: http://kubernetes.io/docs/user-guide/labels#label-selectors", - "targetSelector": "label selector for pods that should match the replicas count. This is a serializated version of both map-based and more expressive set-based selectors. This is done to avoid introspection in the clients. The string will be in the same format as the query-param syntax. If the target type only supports map-based selectors, both this field and map-based selector field are populated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors", -} - -func (ScaleStatus) SwaggerDoc() map[string]string { - return map_ScaleStatus -} - -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. Deprecated: use SupplementalGroupsStrategyOptions from policy API Group instead.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 65801c2..0000000 --- a/vendor/k8s.io/api/extensions/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,1499 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatus) DeepCopyInto(out *CustomMetricCurrentStatus) { - *out = *in - out.CurrentValue = in.CurrentValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatus. -func (in *CustomMetricCurrentStatus) DeepCopy() *CustomMetricCurrentStatus { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricCurrentStatusList) DeepCopyInto(out *CustomMetricCurrentStatusList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricCurrentStatus, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricCurrentStatusList. -func (in *CustomMetricCurrentStatusList) DeepCopy() *CustomMetricCurrentStatusList { - if in == nil { - return nil - } - out := new(CustomMetricCurrentStatusList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTarget) DeepCopyInto(out *CustomMetricTarget) { - *out = *in - out.TargetValue = in.TargetValue.DeepCopy() - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTarget. -func (in *CustomMetricTarget) DeepCopy() *CustomMetricTarget { - if in == nil { - return nil - } - out := new(CustomMetricTarget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *CustomMetricTargetList) DeepCopyInto(out *CustomMetricTargetList) { - *out = *in - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]CustomMetricTarget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CustomMetricTargetList. -func (in *CustomMetricTargetList) DeepCopy() *CustomMetricTargetList { - if in == nil { - return nil - } - out := new(CustomMetricTargetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSet) DeepCopyInto(out *DaemonSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSet. -func (in *DaemonSet) DeepCopy() *DaemonSet { - if in == nil { - return nil - } - out := new(DaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetCondition) DeepCopyInto(out *DaemonSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetCondition. -func (in *DaemonSetCondition) DeepCopy() *DaemonSetCondition { - if in == nil { - return nil - } - out := new(DaemonSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetList) DeepCopyInto(out *DaemonSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]DaemonSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetList. -func (in *DaemonSetList) DeepCopy() *DaemonSetList { - if in == nil { - return nil - } - out := new(DaemonSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DaemonSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetSpec) DeepCopyInto(out *DaemonSetSpec) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.UpdateStrategy.DeepCopyInto(&out.UpdateStrategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetSpec. -func (in *DaemonSetSpec) DeepCopy() *DaemonSetSpec { - if in == nil { - return nil - } - out := new(DaemonSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetStatus) DeepCopyInto(out *DaemonSetStatus) { - *out = *in - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DaemonSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetStatus. -func (in *DaemonSetStatus) DeepCopy() *DaemonSetStatus { - if in == nil { - return nil - } - out := new(DaemonSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DaemonSetUpdateStrategy) DeepCopyInto(out *DaemonSetUpdateStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDaemonSet) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DaemonSetUpdateStrategy. -func (in *DaemonSetUpdateStrategy) DeepCopy() *DaemonSetUpdateStrategy { - if in == nil { - return nil - } - out := new(DaemonSetUpdateStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Deployment) DeepCopyInto(out *Deployment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deployment. -func (in *Deployment) DeepCopy() *Deployment { - if in == nil { - return nil - } - out := new(Deployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Deployment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentCondition) DeepCopyInto(out *DeploymentCondition) { - *out = *in - in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime) - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentCondition. -func (in *DeploymentCondition) DeepCopy() *DeploymentCondition { - if in == nil { - return nil - } - out := new(DeploymentCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentList) DeepCopyInto(out *DeploymentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Deployment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentList. -func (in *DeploymentList) DeepCopy() *DeploymentList { - if in == nil { - return nil - } - out := new(DeploymentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentRollback) DeepCopyInto(out *DeploymentRollback) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.UpdatedAnnotations != nil { - in, out := &in.UpdatedAnnotations, &out.UpdatedAnnotations - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - out.RollbackTo = in.RollbackTo - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentRollback. -func (in *DeploymentRollback) DeepCopy() *DeploymentRollback { - if in == nil { - return nil - } - out := new(DeploymentRollback) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *DeploymentRollback) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentSpec) DeepCopyInto(out *DeploymentSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - in.Strategy.DeepCopyInto(&out.Strategy) - if in.RevisionHistoryLimit != nil { - in, out := &in.RevisionHistoryLimit, &out.RevisionHistoryLimit - *out = new(int32) - **out = **in - } - if in.RollbackTo != nil { - in, out := &in.RollbackTo, &out.RollbackTo - *out = new(RollbackConfig) - **out = **in - } - if in.ProgressDeadlineSeconds != nil { - in, out := &in.ProgressDeadlineSeconds, &out.ProgressDeadlineSeconds - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentSpec. -func (in *DeploymentSpec) DeepCopy() *DeploymentSpec { - if in == nil { - return nil - } - out := new(DeploymentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]DeploymentCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.CollisionCount != nil { - in, out := &in.CollisionCount, &out.CollisionCount - *out = new(int32) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatus. -func (in *DeploymentStatus) DeepCopy() *DeploymentStatus { - if in == nil { - return nil - } - out := new(DeploymentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *DeploymentStrategy) DeepCopyInto(out *DeploymentStrategy) { - *out = *in - if in.RollingUpdate != nil { - in, out := &in.RollingUpdate, &out.RollingUpdate - *out = new(RollingUpdateDeployment) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStrategy. -func (in *DeploymentStrategy) DeepCopy() *DeploymentStrategy { - if in == nil { - return nil - } - out := new(DeploymentStrategy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPIngressPath) DeepCopyInto(out *HTTPIngressPath) { - *out = *in - out.Backend = in.Backend - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressPath. -func (in *HTTPIngressPath) DeepCopy() *HTTPIngressPath { - if in == nil { - return nil - } - out := new(HTTPIngressPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HTTPIngressRuleValue) DeepCopyInto(out *HTTPIngressRuleValue) { - *out = *in - if in.Paths != nil { - in, out := &in.Paths, &out.Paths - *out = make([]HTTPIngressPath, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HTTPIngressRuleValue. -func (in *HTTPIngressRuleValue) DeepCopy() *HTTPIngressRuleValue { - if in == nil { - return nil - } - out := new(HTTPIngressRuleValue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPBlock) DeepCopyInto(out *IPBlock) { - *out = *in - if in.Except != nil { - in, out := &in.Except, &out.Except - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. -func (in *IPBlock) DeepCopy() *IPBlock { - if in == nil { - return nil - } - out := new(IPBlock) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Ingress) DeepCopyInto(out *Ingress) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Ingress. -func (in *Ingress) DeepCopy() *Ingress { - if in == nil { - return nil - } - out := new(Ingress) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Ingress) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressBackend) DeepCopyInto(out *IngressBackend) { - *out = *in - out.ServicePort = in.ServicePort - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressBackend. -func (in *IngressBackend) DeepCopy() *IngressBackend { - if in == nil { - return nil - } - out := new(IngressBackend) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressList) DeepCopyInto(out *IngressList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Ingress, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressList. -func (in *IngressList) DeepCopy() *IngressList { - if in == nil { - return nil - } - out := new(IngressList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *IngressList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRule) DeepCopyInto(out *IngressRule) { - *out = *in - in.IngressRuleValue.DeepCopyInto(&out.IngressRuleValue) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRule. -func (in *IngressRule) DeepCopy() *IngressRule { - if in == nil { - return nil - } - out := new(IngressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressRuleValue) DeepCopyInto(out *IngressRuleValue) { - *out = *in - if in.HTTP != nil { - in, out := &in.HTTP, &out.HTTP - *out = new(HTTPIngressRuleValue) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressRuleValue. -func (in *IngressRuleValue) DeepCopy() *IngressRuleValue { - if in == nil { - return nil - } - out := new(IngressRuleValue) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressSpec) DeepCopyInto(out *IngressSpec) { - *out = *in - if in.Backend != nil { - in, out := &in.Backend, &out.Backend - *out = new(IngressBackend) - **out = **in - } - if in.TLS != nil { - in, out := &in.TLS, &out.TLS - *out = make([]IngressTLS, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]IngressRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressSpec. -func (in *IngressSpec) DeepCopy() *IngressSpec { - if in == nil { - return nil - } - out := new(IngressSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressStatus) DeepCopyInto(out *IngressStatus) { - *out = *in - in.LoadBalancer.DeepCopyInto(&out.LoadBalancer) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressStatus. -func (in *IngressStatus) DeepCopy() *IngressStatus { - if in == nil { - return nil - } - out := new(IngressStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IngressTLS) DeepCopyInto(out *IngressTLS) { - *out = *in - if in.Hosts != nil { - in, out := &in.Hosts, &out.Hosts - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IngressTLS. -func (in *IngressTLS) DeepCopy() *IngressTLS { - if in == nil { - return nil - } - out := new(IngressTLS) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. -func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { - if in == nil { - return nil - } - out := new(NetworkPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.To != nil { - in, out := &in.To, &out.To - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. -func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { - if in == nil { - return nil - } - out := new(NetworkPolicyEgressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. -func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { - if in == nil { - return nil - } - out := new(NetworkPolicyIngressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NetworkPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. -func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { - if in == nil { - return nil - } - out := new(NetworkPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { - *out = *in - if in.PodSelector != nil { - in, out := &in.PodSelector, &out.PodSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.IPBlock != nil { - in, out := &in.IPBlock, &out.IPBlock - *out = new(IPBlock) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. -func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { - if in == nil { - return nil - } - out := new(NetworkPolicyPeer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { - *out = *in - if in.Protocol != nil { - in, out := &in.Protocol, &out.Protocol - *out = new(corev1.Protocol) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. -func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { - if in == nil { - return nil - } - out := new(NetworkPolicyPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { - *out = *in - in.PodSelector.DeepCopyInto(&out.PodSelector) - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Egress != nil { - in, out := &in.Egress, &out.Egress - *out = make([]NetworkPolicyEgressRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PolicyTypes != nil { - in, out := &in.PolicyTypes, &out.PolicyTypes - *out = make([]PolicyType, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. -func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { - if in == nil { - return nil - } - out := new(NetworkPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSet) DeepCopyInto(out *ReplicaSet) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSet. -func (in *ReplicaSet) DeepCopy() *ReplicaSet { - if in == nil { - return nil - } - out := new(ReplicaSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSet) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetCondition) DeepCopyInto(out *ReplicaSetCondition) { - *out = *in - in.LastTransitionTime.DeepCopyInto(&out.LastTransitionTime) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetCondition. -func (in *ReplicaSetCondition) DeepCopy() *ReplicaSetCondition { - if in == nil { - return nil - } - out := new(ReplicaSetCondition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetList) DeepCopyInto(out *ReplicaSetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ReplicaSet, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetList. -func (in *ReplicaSetList) DeepCopy() *ReplicaSetList { - if in == nil { - return nil - } - out := new(ReplicaSetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicaSetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetSpec) DeepCopyInto(out *ReplicaSetSpec) { - *out = *in - if in.Replicas != nil { - in, out := &in.Replicas, &out.Replicas - *out = new(int32) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - in.Template.DeepCopyInto(&out.Template) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetSpec. -func (in *ReplicaSetSpec) DeepCopy() *ReplicaSetSpec { - if in == nil { - return nil - } - out := new(ReplicaSetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicaSetStatus) DeepCopyInto(out *ReplicaSetStatus) { - *out = *in - if in.Conditions != nil { - in, out := &in.Conditions, &out.Conditions - *out = make([]ReplicaSetCondition, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicaSetStatus. -func (in *ReplicaSetStatus) DeepCopy() *ReplicaSetStatus { - if in == nil { - return nil - } - out := new(ReplicaSetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ReplicationControllerDummy) DeepCopyInto(out *ReplicationControllerDummy) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReplicationControllerDummy. -func (in *ReplicationControllerDummy) DeepCopy() *ReplicationControllerDummy { - if in == nil { - return nil - } - out := new(ReplicationControllerDummy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ReplicationControllerDummy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollbackConfig) DeepCopyInto(out *RollbackConfig) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollbackConfig. -func (in *RollbackConfig) DeepCopy() *RollbackConfig { - if in == nil { - return nil - } - out := new(RollbackConfig) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDaemonSet) DeepCopyInto(out *RollingUpdateDaemonSet) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDaemonSet. -func (in *RollingUpdateDaemonSet) DeepCopy() *RollingUpdateDaemonSet { - if in == nil { - return nil - } - out := new(RollingUpdateDaemonSet) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RollingUpdateDeployment) DeepCopyInto(out *RollingUpdateDeployment) { - *out = *in - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.MaxSurge != nil { - in, out := &in.MaxSurge, &out.MaxSurge - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RollingUpdateDeployment. -func (in *RollingUpdateDeployment) DeepCopy() *RollingUpdateDeployment { - if in == nil { - return nil - } - out := new(RollingUpdateDeployment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Scale) DeepCopyInto(out *Scale) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - out.Spec = in.Spec - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Scale. -func (in *Scale) DeepCopy() *Scale { - if in == nil { - return nil - } - out := new(Scale) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Scale) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleSpec) DeepCopyInto(out *ScaleSpec) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleSpec. -func (in *ScaleSpec) DeepCopy() *ScaleSpec { - if in == nil { - return nil - } - out := new(ScaleSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ScaleStatus) DeepCopyInto(out *ScaleStatus) { - *out = *in - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ScaleStatus. -func (in *ScaleStatus) DeepCopy() *ScaleStatus { - if in == nil { - return nil - } - out := new(ScaleStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/networking/v1/doc.go b/vendor/k8s.io/api/networking/v1/doc.go deleted file mode 100644 index ef9ae2a..0000000 --- a/vendor/k8s.io/api/networking/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +groupName=networking.k8s.io -package v1 // import "k8s.io/api/networking/v1" diff --git a/vendor/k8s.io/api/networking/v1/generated.pb.go b/vendor/k8s.io/api/networking/v1/generated.pb.go deleted file mode 100644 index 7b1c04b..0000000 --- a/vendor/k8s.io/api/networking/v1/generated.pb.go +++ /dev/null @@ -1,1868 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto - - It has these top-level messages: - IPBlock - NetworkPolicy - NetworkPolicyEgressRule - NetworkPolicyIngressRule - NetworkPolicyList - NetworkPolicyPeer - NetworkPolicyPort - NetworkPolicySpec -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *IPBlock) Reset() { *m = IPBlock{} } -func (*IPBlock) ProtoMessage() {} -func (*IPBlock) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *NetworkPolicy) Reset() { *m = NetworkPolicy{} } -func (*NetworkPolicy) ProtoMessage() {} -func (*NetworkPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *NetworkPolicyEgressRule) Reset() { *m = NetworkPolicyEgressRule{} } -func (*NetworkPolicyEgressRule) ProtoMessage() {} -func (*NetworkPolicyEgressRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *NetworkPolicyIngressRule) Reset() { *m = NetworkPolicyIngressRule{} } -func (*NetworkPolicyIngressRule) ProtoMessage() {} -func (*NetworkPolicyIngressRule) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{3} -} - -func (m *NetworkPolicyList) Reset() { *m = NetworkPolicyList{} } -func (*NetworkPolicyList) ProtoMessage() {} -func (*NetworkPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *NetworkPolicyPeer) Reset() { *m = NetworkPolicyPeer{} } -func (*NetworkPolicyPeer) ProtoMessage() {} -func (*NetworkPolicyPeer) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *NetworkPolicyPort) Reset() { *m = NetworkPolicyPort{} } -func (*NetworkPolicyPort) ProtoMessage() {} -func (*NetworkPolicyPort) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *NetworkPolicySpec) Reset() { *m = NetworkPolicySpec{} } -func (*NetworkPolicySpec) ProtoMessage() {} -func (*NetworkPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func init() { - proto.RegisterType((*IPBlock)(nil), "k8s.io.api.networking.v1.IPBlock") - proto.RegisterType((*NetworkPolicy)(nil), "k8s.io.api.networking.v1.NetworkPolicy") - proto.RegisterType((*NetworkPolicyEgressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyEgressRule") - proto.RegisterType((*NetworkPolicyIngressRule)(nil), "k8s.io.api.networking.v1.NetworkPolicyIngressRule") - proto.RegisterType((*NetworkPolicyList)(nil), "k8s.io.api.networking.v1.NetworkPolicyList") - proto.RegisterType((*NetworkPolicyPeer)(nil), "k8s.io.api.networking.v1.NetworkPolicyPeer") - proto.RegisterType((*NetworkPolicyPort)(nil), "k8s.io.api.networking.v1.NetworkPolicyPort") - proto.RegisterType((*NetworkPolicySpec)(nil), "k8s.io.api.networking.v1.NetworkPolicySpec") -} -func (m *IPBlock) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IPBlock) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.CIDR))) - i += copy(dAtA[i:], m.CIDR) - if len(m.Except) > 0 { - for _, s := range m.Except { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *NetworkPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *NetworkPolicyEgressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyEgressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.To) > 0 { - for _, msg := range m.To { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyIngressRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyIngressRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Ports) > 0 { - for _, msg := range m.Ports { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.From) > 0 { - for _, msg := range m.From { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *NetworkPolicyPeer) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyPeer) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PodSelector != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n4, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.NamespaceSelector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.NamespaceSelector.Size())) - n5, err := m.NamespaceSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.IPBlock != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.IPBlock.Size())) - n6, err := m.IPBlock.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - return i, nil -} - -func (m *NetworkPolicyPort) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicyPort) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Protocol != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Protocol))) - i += copy(dAtA[i:], *m.Protocol) - } - if m.Port != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Port.Size())) - n7, err := m.Port.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *NetworkPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NetworkPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodSelector.Size())) - n8, err := m.PodSelector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Ingress) > 0 { - for _, msg := range m.Ingress { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Egress) > 0 { - for _, msg := range m.Egress { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *IPBlock) Size() (n int) { - var l int - _ = l - l = len(m.CIDR) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Except) > 0 { - for _, s := range m.Except { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *NetworkPolicyEgressRule) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.To) > 0 { - for _, e := range m.To { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyIngressRule) Size() (n int) { - var l int - _ = l - if len(m.Ports) > 0 { - for _, e := range m.Ports { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.From) > 0 { - for _, e := range m.From { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *NetworkPolicyPeer) Size() (n int) { - var l int - _ = l - if m.PodSelector != nil { - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.NamespaceSelector != nil { - l = m.NamespaceSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.IPBlock != nil { - l = m.IPBlock.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NetworkPolicyPort) Size() (n int) { - var l int - _ = l - if m.Protocol != nil { - l = len(*m.Protocol) - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Port != nil { - l = m.Port.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *NetworkPolicySpec) Size() (n int) { - var l int - _ = l - l = m.PodSelector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ingress) > 0 { - for _, e := range m.Ingress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Egress) > 0 { - for _, e := range m.Egress { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.PolicyTypes) > 0 { - for _, s := range m.PolicyTypes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *IPBlock) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IPBlock{`, - `CIDR:` + fmt.Sprintf("%v", this.CIDR) + `,`, - `Except:` + fmt.Sprintf("%v", this.Except) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "NetworkPolicySpec", "NetworkPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyEgressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyEgressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `To:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.To), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyIngressRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyIngressRule{`, - `Ports:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ports), "NetworkPolicyPort", "NetworkPolicyPort", 1), `&`, ``, 1) + `,`, - `From:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.From), "NetworkPolicyPeer", "NetworkPolicyPeer", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "NetworkPolicy", "NetworkPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPeer) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPeer{`, - `PodSelector:` + strings.Replace(fmt.Sprintf("%v", this.PodSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `IPBlock:` + strings.Replace(fmt.Sprintf("%v", this.IPBlock), "IPBlock", "IPBlock", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicyPort) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicyPort{`, - `Protocol:` + valueToStringGenerated(this.Protocol) + `,`, - `Port:` + strings.Replace(fmt.Sprintf("%v", this.Port), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *NetworkPolicySpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&NetworkPolicySpec{`, - `PodSelector:` + strings.Replace(strings.Replace(this.PodSelector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Ingress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ingress), "NetworkPolicyIngressRule", "NetworkPolicyIngressRule", 1), `&`, ``, 1) + `,`, - `Egress:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Egress), "NetworkPolicyEgressRule", "NetworkPolicyEgressRule", 1), `&`, ``, 1) + `,`, - `PolicyTypes:` + fmt.Sprintf("%v", this.PolicyTypes) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *IPBlock) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IPBlock: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IPBlock: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field CIDR", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.CIDR = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Except", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Except = append(m.Except, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyEgressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyEgressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.To = append(m.To, NetworkPolicyPeer{}) - if err := m.To[len(m.To)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyIngressRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyIngressRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ports", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ports = append(m.Ports, NetworkPolicyPort{}) - if err := m.Ports[len(m.Ports)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.From = append(m.From, NetworkPolicyPeer{}) - if err := m.From[len(m.From)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, NetworkPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyPeer) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPeer: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPeer: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PodSelector == nil { - m.PodSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NamespaceSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NamespaceSelector == nil { - m.NamespaceSelector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.NamespaceSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IPBlock", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.IPBlock == nil { - m.IPBlock = &IPBlock{} - } - if err := m.IPBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicyPort) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicyPort: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicyPort: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Protocol", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.Protocol(dAtA[iNdEx:postIndex]) - m.Protocol = &s - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Port == nil { - m.Port = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.Port.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NetworkPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NetworkPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NetworkPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PodSelector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.PodSelector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ingress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ingress = append(m.Ingress, NetworkPolicyIngressRule{}) - if err := m.Ingress[len(m.Ingress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Egress", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Egress = append(m.Egress, NetworkPolicyEgressRule{}) - if err := m.Egress[len(m.Egress)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PolicyTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PolicyTypes = append(m.PolicyTypes, PolicyType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/networking/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 804 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x55, 0xcf, 0x8f, 0xdb, 0x44, - 0x14, 0x8e, 0x9d, 0x6c, 0x92, 0x4e, 0x28, 0x65, 0x07, 0x21, 0xac, 0x45, 0xd8, 0xc1, 0x17, 0x56, - 0xaa, 0x18, 0x93, 0x16, 0x21, 0x6e, 0x08, 0x43, 0x29, 0x91, 0xba, 0xbb, 0xd1, 0x6c, 0x2f, 0x20, - 0x90, 0x70, 0x9c, 0x59, 0xef, 0x34, 0xb1, 0xc7, 0x1a, 0x4f, 0x42, 0xf7, 0xc6, 0x9f, 0xc0, 0x1f, - 0xc2, 0x91, 0x1b, 0x87, 0x72, 0xdc, 0x63, 0x8f, 0x3d, 0x59, 0xac, 0xf9, 0x2f, 0xf6, 0x84, 0x66, - 0x3c, 0x89, 0xf3, 0xa3, 0x11, 0xd9, 0x15, 0xbd, 0x65, 0xde, 0xbc, 0xef, 0x7b, 0xf3, 0xde, 0xfb, - 0xf2, 0x19, 0x7c, 0x35, 0xfe, 0x22, 0x43, 0x94, 0x79, 0xe3, 0xe9, 0x90, 0xf0, 0x84, 0x08, 0x92, - 0x79, 0x33, 0x92, 0x8c, 0x18, 0xf7, 0xf4, 0x45, 0x90, 0x52, 0x2f, 0x21, 0xe2, 0x17, 0xc6, 0xc7, - 0x34, 0x89, 0xbc, 0x59, 0xcf, 0x8b, 0x48, 0x42, 0x78, 0x20, 0xc8, 0x08, 0xa5, 0x9c, 0x09, 0x06, - 0xad, 0x32, 0x13, 0x05, 0x29, 0x45, 0x55, 0x26, 0x9a, 0xf5, 0x0e, 0x3e, 0x89, 0xa8, 0x38, 0x9f, - 0x0e, 0x51, 0xc8, 0x62, 0x2f, 0x62, 0x11, 0xf3, 0x14, 0x60, 0x38, 0x3d, 0x53, 0x27, 0x75, 0x50, - 0xbf, 0x4a, 0xa2, 0x03, 0x77, 0xa9, 0x64, 0xc8, 0x38, 0x79, 0x4d, 0xb1, 0x83, 0xcf, 0xaa, 0x9c, - 0x38, 0x08, 0xcf, 0x69, 0x42, 0xf8, 0x85, 0x97, 0x8e, 0x23, 0x19, 0xc8, 0xbc, 0x98, 0x88, 0xe0, - 0x75, 0x28, 0x6f, 0x1b, 0x8a, 0x4f, 0x13, 0x41, 0x63, 0xb2, 0x01, 0xf8, 0xfc, 0xbf, 0x00, 0x59, - 0x78, 0x4e, 0xe2, 0x60, 0x03, 0xf7, 0x70, 0x1b, 0x6e, 0x2a, 0xe8, 0xc4, 0xa3, 0x89, 0xc8, 0x04, - 0x5f, 0x07, 0xb9, 0x27, 0xa0, 0xd5, 0x1f, 0xf8, 0x13, 0x16, 0x8e, 0x61, 0x17, 0x34, 0x42, 0x3a, - 0xe2, 0x96, 0xd1, 0x35, 0x0e, 0xef, 0xf8, 0x6f, 0x5d, 0xe6, 0x4e, 0xad, 0xc8, 0x9d, 0xc6, 0xd7, - 0xfd, 0x6f, 0x30, 0x56, 0x37, 0xd0, 0x05, 0x4d, 0xf2, 0x3c, 0x24, 0xa9, 0xb0, 0xcc, 0x6e, 0xfd, - 0xf0, 0x8e, 0x0f, 0x8a, 0xdc, 0x69, 0x3e, 0x52, 0x11, 0xac, 0x6f, 0xdc, 0xbf, 0x0c, 0x70, 0xf7, - 0xb8, 0xdc, 0xc4, 0x80, 0x4d, 0x68, 0x78, 0x01, 0x7f, 0x06, 0x6d, 0x39, 0x9b, 0x51, 0x20, 0x02, - 0xc5, 0xdd, 0x79, 0xf0, 0x29, 0xaa, 0xd6, 0xb6, 0x78, 0x2a, 0x4a, 0xc7, 0x91, 0x0c, 0x64, 0x48, - 0x66, 0xa3, 0x59, 0x0f, 0x9d, 0x0c, 0x9f, 0x91, 0x50, 0x1c, 0x11, 0x11, 0xf8, 0x50, 0xbf, 0x06, - 0x54, 0x31, 0xbc, 0x60, 0x85, 0x47, 0xa0, 0x91, 0xa5, 0x24, 0xb4, 0x4c, 0xc5, 0x7e, 0x1f, 0x6d, - 0x13, 0x05, 0x5a, 0x79, 0xd8, 0x69, 0x4a, 0xc2, 0xaa, 0x4d, 0x79, 0xc2, 0x8a, 0xc6, 0xfd, 0xc3, - 0x00, 0xef, 0xaf, 0x64, 0x3e, 0x8a, 0x38, 0xc9, 0x32, 0x3c, 0x9d, 0x10, 0x38, 0x00, 0x7b, 0x29, - 0xe3, 0x22, 0xb3, 0x8c, 0x6e, 0xfd, 0x06, 0xb5, 0x06, 0x8c, 0x0b, 0xff, 0xae, 0xae, 0xb5, 0x27, - 0x4f, 0x19, 0x2e, 0x89, 0xe0, 0x63, 0x60, 0x0a, 0xa6, 0x06, 0x7a, 0x03, 0x3a, 0x42, 0xb8, 0x0f, - 0x34, 0x9d, 0xf9, 0x94, 0x61, 0x53, 0x30, 0xf7, 0x4f, 0x03, 0x58, 0x2b, 0x59, 0xfd, 0xe4, 0x4d, - 0xbe, 0xfb, 0x08, 0x34, 0xce, 0x38, 0x8b, 0x6f, 0xf3, 0xf2, 0xc5, 0xd0, 0xbf, 0xe5, 0x2c, 0xc6, - 0x8a, 0xc6, 0x7d, 0x61, 0x80, 0xfd, 0x95, 0xcc, 0x27, 0x34, 0x13, 0xf0, 0xc7, 0x0d, 0xed, 0xa0, - 0xdd, 0xb4, 0x23, 0xd1, 0x4a, 0x39, 0xef, 0xe8, 0x5a, 0xed, 0x79, 0x64, 0x49, 0x37, 0x4f, 0xc0, - 0x1e, 0x15, 0x24, 0xce, 0x74, 0x0f, 0x1f, 0xef, 0xd8, 0x43, 0x35, 0x90, 0xbe, 0x44, 0xe3, 0x92, - 0xc4, 0x7d, 0x61, 0xae, 0x75, 0x20, 0x7b, 0x85, 0x67, 0xa0, 0x93, 0xb2, 0xd1, 0x29, 0x99, 0x90, - 0x50, 0x30, 0xae, 0x9b, 0x78, 0xb8, 0x63, 0x13, 0xc1, 0x90, 0x4c, 0xe6, 0x50, 0xff, 0x5e, 0x91, - 0x3b, 0x9d, 0x41, 0xc5, 0x85, 0x97, 0x89, 0xe1, 0x73, 0xb0, 0x9f, 0x04, 0x31, 0xc9, 0xd2, 0x20, - 0x24, 0x8b, 0x6a, 0xe6, 0xed, 0xab, 0xbd, 0x57, 0xe4, 0xce, 0xfe, 0xf1, 0x3a, 0x23, 0xde, 0x2c, - 0x02, 0xbf, 0x03, 0x2d, 0x9a, 0x2a, 0x0b, 0xb1, 0xea, 0xaa, 0xde, 0x47, 0xdb, 0xe7, 0xa8, 0xbd, - 0xc6, 0xef, 0x14, 0xb9, 0x33, 0x37, 0x1e, 0x3c, 0x87, 0xbb, 0xbf, 0xaf, 0x6b, 0x40, 0x0a, 0x0e, - 0x3e, 0x06, 0x6d, 0xe5, 0x55, 0x21, 0x9b, 0x68, 0x6f, 0xba, 0x2f, 0xf7, 0x39, 0xd0, 0xb1, 0xeb, - 0xdc, 0xf9, 0x60, 0xd3, 0xbc, 0xd1, 0xfc, 0x1a, 0x2f, 0xc0, 0xf0, 0x18, 0x34, 0xa4, 0x74, 0xf5, - 0x54, 0xb6, 0x9b, 0x90, 0xf4, 0x4b, 0x54, 0xfa, 0x25, 0xea, 0x27, 0xe2, 0x84, 0x9f, 0x0a, 0x4e, - 0x93, 0xc8, 0x6f, 0x4b, 0xc9, 0xca, 0x27, 0x61, 0xc5, 0xe3, 0x5e, 0xaf, 0x2f, 0x5c, 0x7a, 0x08, - 0x7c, 0xf6, 0xbf, 0x2d, 0xfc, 0x5d, 0x2d, 0xb3, 0xed, 0x4b, 0xff, 0x09, 0xb4, 0x68, 0xf9, 0x27, - 0xd7, 0x12, 0x7e, 0xb0, 0xa3, 0x84, 0x97, 0xac, 0xc1, 0xbf, 0xa7, 0xcb, 0xb4, 0xe6, 0xc1, 0x39, - 0x27, 0xfc, 0x1e, 0x34, 0x49, 0xc9, 0x5e, 0x57, 0xec, 0xbd, 0x1d, 0xd9, 0x2b, 0xbf, 0xf4, 0xdf, - 0xd6, 0xe4, 0x4d, 0x1d, 0xd3, 0x84, 0xf0, 0x4b, 0x39, 0x25, 0x99, 0xfb, 0xf4, 0x22, 0x25, 0x99, - 0xd5, 0x50, 0xdf, 0x93, 0x0f, 0xcb, 0x66, 0x17, 0xe1, 0xeb, 0xdc, 0x01, 0xd5, 0x11, 0x2f, 0x23, - 0xfc, 0xc3, 0xcb, 0x2b, 0xbb, 0xf6, 0xf2, 0xca, 0xae, 0xbd, 0xba, 0xb2, 0x6b, 0xbf, 0x16, 0xb6, - 0x71, 0x59, 0xd8, 0xc6, 0xcb, 0xc2, 0x36, 0x5e, 0x15, 0xb6, 0xf1, 0x77, 0x61, 0x1b, 0xbf, 0xfd, - 0x63, 0xd7, 0x7e, 0x30, 0x67, 0xbd, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xf7, 0x7b, 0xc9, 0x59, - 0x67, 0x08, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/networking/v1/generated.proto b/vendor/k8s.io/api/networking/v1/generated.proto deleted file mode 100644 index 4e068d0..0000000 --- a/vendor/k8s.io/api/networking/v1/generated.proto +++ /dev/null @@ -1,195 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.networking.v1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods -// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should -// not be included within this rule. -message IPBlock { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" - optional string cidr = 1; - - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" - // Except values will be rejected if they are outside the CIDR range - // +optional - repeated string except = 2; -} - -// NetworkPolicy describes what network traffic is allowed for a set of Pods -message NetworkPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior for this NetworkPolicy. - // +optional - optional NetworkPolicySpec spec = 2; -} - -// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods -// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. -// This type is beta-level in 1.8 -message NetworkPolicyEgressRule { - // List of destination ports for outgoing traffic. - // Each item in this list is combined using a logical OR. If this field is - // empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows - // traffic only if the traffic matches at least one port in the list. - // +optional - repeated NetworkPolicyPort ports = 1; - - // List of destinations for outgoing traffic of pods selected for this rule. - // Items in this list are combined using a logical OR operation. If this field is - // empty or missing, this rule matches all destinations (traffic not restricted by - // destination). If this field is present and contains at least one item, this rule - // allows traffic only if the traffic matches at least one item in the to list. - // +optional - repeated NetworkPolicyPeer to = 2; -} - -// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods -// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. -message NetworkPolicyIngressRule { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is - // empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows - // traffic only if the traffic matches at least one port in the list. - // +optional - repeated NetworkPolicyPort ports = 1; - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. If this field is - // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule - // allows traffic only if the traffic matches at least one item in the from list. - // +optional - repeated NetworkPolicyPeer from = 2; -} - -// NetworkPolicyList is a list of NetworkPolicy objects. -message NetworkPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated NetworkPolicy items = 2; -} - -// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of -// fields are allowed -message NetworkPolicyPeer { - // This is a label selector which selects Pods. This field follows standard label - // selector semantics; if present but empty, it selects all pods. - // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. - // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector namespaceSelector = 2; - - // IPBlock defines policy on a particular IPBlock. If this field is set then - // neither of the other fields can be. - // +optional - optional IPBlock ipBlock = 3; -} - -// NetworkPolicyPort describes a port to allow traffic on -message NetworkPolicyPort { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. - // +optional - optional string protocol = 1; - - // The port on the given protocol. This can either be a numerical or named port on - // a pod. If this field is not provided, this matches all port names and numbers. - // +optional - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString port = 2; -} - -// NetworkPolicySpec provides the specification of a NetworkPolicy -message NetworkPolicySpec { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector podSelector = 1; - - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod - // (and cluster policy otherwise allows the traffic), OR if the traffic source is - // the pod's local node, OR if the traffic matches at least one ingress rule - // across all of the NetworkPolicy objects whose podSelector matches the pod. If - // this field is empty then this NetworkPolicy does not allow any traffic (and serves - // solely to ensure that the pods it selects are isolated by default) - // +optional - repeated NetworkPolicyIngressRule ingress = 2; - - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy - // otherwise allows the traffic), OR if the traffic matches at least one egress rule - // across all of the NetworkPolicy objects whose podSelector matches the pod. If - // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves - // solely to ensure that the pods it selects are isolated by default). - // This field is beta-level in 1.8 - // +optional - repeated NetworkPolicyEgressRule egress = 3; - - // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. - // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. - // Likewise, if you want to write a policy that specifies that no egress is allowed, - // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). - // This field is beta-level in 1.8 - // +optional - repeated string policyTypes = 4; -} - diff --git a/vendor/k8s.io/api/networking/v1/register.go b/vendor/k8s.io/api/networking/v1/register.go deleted file mode 100644 index f47f22e..0000000 --- a/vendor/k8s.io/api/networking/v1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "networking.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &NetworkPolicy{}, - &NetworkPolicyList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/networking/v1/types.go b/vendor/k8s.io/api/networking/v1/types.go deleted file mode 100644 index ce70448..0000000 --- a/vendor/k8s.io/api/networking/v1/types.go +++ /dev/null @@ -1,203 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NetworkPolicy describes what network traffic is allowed for a set of Pods -type NetworkPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior for this NetworkPolicy. - // +optional - Spec NetworkPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// Policy Type string describes the NetworkPolicy type -// This type is beta-level in 1.8 -type PolicyType string - -const ( - // PolicyTypeIngress is a NetworkPolicy that affects ingress traffic on selected pods - PolicyTypeIngress PolicyType = "Ingress" - // PolicyTypeEgress is a NetworkPolicy that affects egress traffic on selected pods - PolicyTypeEgress PolicyType = "Egress" -) - -// NetworkPolicySpec provides the specification of a NetworkPolicy -type NetworkPolicySpec struct { - // Selects the pods to which this NetworkPolicy object applies. The array of - // ingress rules is applied to any pods selected by this field. Multiple network - // policies can select the same set of pods. In this case, the ingress rules for - // each are combined additively. This field is NOT optional and follows standard - // label selector semantics. An empty podSelector matches all pods in this - // namespace. - PodSelector metav1.LabelSelector `json:"podSelector" protobuf:"bytes,1,opt,name=podSelector"` - - // List of ingress rules to be applied to the selected pods. Traffic is allowed to - // a pod if there are no NetworkPolicies selecting the pod - // (and cluster policy otherwise allows the traffic), OR if the traffic source is - // the pod's local node, OR if the traffic matches at least one ingress rule - // across all of the NetworkPolicy objects whose podSelector matches the pod. If - // this field is empty then this NetworkPolicy does not allow any traffic (and serves - // solely to ensure that the pods it selects are isolated by default) - // +optional - Ingress []NetworkPolicyIngressRule `json:"ingress,omitempty" protobuf:"bytes,2,rep,name=ingress"` - - // List of egress rules to be applied to the selected pods. Outgoing traffic is - // allowed if there are no NetworkPolicies selecting the pod (and cluster policy - // otherwise allows the traffic), OR if the traffic matches at least one egress rule - // across all of the NetworkPolicy objects whose podSelector matches the pod. If - // this field is empty then this NetworkPolicy limits all outgoing traffic (and serves - // solely to ensure that the pods it selects are isolated by default). - // This field is beta-level in 1.8 - // +optional - Egress []NetworkPolicyEgressRule `json:"egress,omitempty" protobuf:"bytes,3,rep,name=egress"` - - // List of rule types that the NetworkPolicy relates to. - // Valid options are Ingress, Egress, or Ingress,Egress. - // If this field is not specified, it will default based on the existence of Ingress or Egress rules; - // policies that contain an Egress section are assumed to affect Egress, and all policies - // (whether or not they contain an Ingress section) are assumed to affect Ingress. - // If you want to write an egress-only policy, you must explicitly specify policyTypes [ "Egress" ]. - // Likewise, if you want to write a policy that specifies that no egress is allowed, - // you must specify a policyTypes value that include "Egress" (since such a policy would not include - // an Egress section and would otherwise default to just [ "Ingress" ]). - // This field is beta-level in 1.8 - // +optional - PolicyTypes []PolicyType `json:"policyTypes,omitempty" protobuf:"bytes,4,rep,name=policyTypes,casttype=PolicyType"` -} - -// NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods -// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from. -type NetworkPolicyIngressRule struct { - // List of ports which should be made accessible on the pods selected for this - // rule. Each item in this list is combined using a logical OR. If this field is - // empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows - // traffic only if the traffic matches at least one port in the list. - // +optional - Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - - // List of sources which should be able to access the pods selected for this rule. - // Items in this list are combined using a logical OR operation. If this field is - // empty or missing, this rule matches all sources (traffic not restricted by - // source). If this field is present and contains at least on item, this rule - // allows traffic only if the traffic matches at least one item in the from list. - // +optional - From []NetworkPolicyPeer `json:"from,omitempty" protobuf:"bytes,2,rep,name=from"` -} - -// NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods -// matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. -// This type is beta-level in 1.8 -type NetworkPolicyEgressRule struct { - // List of destination ports for outgoing traffic. - // Each item in this list is combined using a logical OR. If this field is - // empty or missing, this rule matches all ports (traffic not restricted by port). - // If this field is present and contains at least one item, then this rule allows - // traffic only if the traffic matches at least one port in the list. - // +optional - Ports []NetworkPolicyPort `json:"ports,omitempty" protobuf:"bytes,1,rep,name=ports"` - - // List of destinations for outgoing traffic of pods selected for this rule. - // Items in this list are combined using a logical OR operation. If this field is - // empty or missing, this rule matches all destinations (traffic not restricted by - // destination). If this field is present and contains at least one item, this rule - // allows traffic only if the traffic matches at least one item in the to list. - // +optional - To []NetworkPolicyPeer `json:"to,omitempty" protobuf:"bytes,2,rep,name=to"` -} - -// NetworkPolicyPort describes a port to allow traffic on -type NetworkPolicyPort struct { - // The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this - // field defaults to TCP. - // +optional - Protocol *v1.Protocol `json:"protocol,omitempty" protobuf:"bytes,1,opt,name=protocol,casttype=k8s.io/api/core/v1.Protocol"` - - // The port on the given protocol. This can either be a numerical or named port on - // a pod. If this field is not provided, this matches all port names and numbers. - // +optional - Port *intstr.IntOrString `json:"port,omitempty" protobuf:"bytes,2,opt,name=port"` -} - -// IPBlock describes a particular CIDR (Ex. "192.168.1.1/24") that is allowed to the pods -// matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should -// not be included within this rule. -type IPBlock struct { - // CIDR is a string representing the IP Block - // Valid examples are "192.168.1.1/24" - CIDR string `json:"cidr" protobuf:"bytes,1,name=cidr"` - // Except is a slice of CIDRs that should not be included within an IP Block - // Valid examples are "192.168.1.1/24" - // Except values will be rejected if they are outside the CIDR range - // +optional - Except []string `json:"except,omitempty" protobuf:"bytes,2,rep,name=except"` -} - -// NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of -// fields are allowed -type NetworkPolicyPeer struct { - // This is a label selector which selects Pods. This field follows standard label - // selector semantics; if present but empty, it selects all pods. - // - // If NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects the Pods matching PodSelector in the policy's own Namespace. - // +optional - PodSelector *metav1.LabelSelector `json:"podSelector,omitempty" protobuf:"bytes,1,opt,name=podSelector"` - - // Selects Namespaces using cluster-scoped labels. This field follows standard label - // selector semantics; if present but empty, it selects all namespaces. - // - // If PodSelector is also set, then the NetworkPolicyPeer as a whole selects - // the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. - // Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector. - // +optional - NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,2,opt,name=namespaceSelector"` - - // IPBlock defines policy on a particular IPBlock. If this field is set then - // neither of the other fields can be. - // +optional - IPBlock *IPBlock `json:"ipBlock,omitempty" protobuf:"bytes,3,rep,name=ipBlock"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// NetworkPolicyList is a list of NetworkPolicy objects. -type NetworkPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []NetworkPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go deleted file mode 100644 index f4363bc..0000000 --- a/vendor/k8s.io/api/networking/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,113 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_IPBlock = map[string]string{ - "": "IPBlock describes a particular CIDR (Ex. \"192.168.1.1/24\") that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The except entry describes CIDRs that should not be included within this rule.", - "cidr": "CIDR is a string representing the IP Block Valid examples are \"192.168.1.1/24\"", - "except": "Except is a slice of CIDRs that should not be included within an IP Block Valid examples are \"192.168.1.1/24\" Except values will be rejected if they are outside the CIDR range", -} - -func (IPBlock) SwaggerDoc() map[string]string { - return map_IPBlock -} - -var map_NetworkPolicy = map[string]string{ - "": "NetworkPolicy describes what network traffic is allowed for a set of Pods", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired behavior for this NetworkPolicy.", -} - -func (NetworkPolicy) SwaggerDoc() map[string]string { - return map_NetworkPolicy -} - -var map_NetworkPolicyEgressRule = map[string]string{ - "": "NetworkPolicyEgressRule describes a particular set of traffic that is allowed out of pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and to. This type is beta-level in 1.8", - "ports": "List of destination ports for outgoing traffic. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "to": "List of destinations for outgoing traffic of pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all destinations (traffic not restricted by destination). If this field is present and contains at least one item, this rule allows traffic only if the traffic matches at least one item in the to list.", -} - -func (NetworkPolicyEgressRule) SwaggerDoc() map[string]string { - return map_NetworkPolicyEgressRule -} - -var map_NetworkPolicyIngressRule = map[string]string{ - "": "NetworkPolicyIngressRule describes a particular set of traffic that is allowed to the pods matched by a NetworkPolicySpec's podSelector. The traffic must match both ports and from.", - "ports": "List of ports which should be made accessible on the pods selected for this rule. Each item in this list is combined using a logical OR. If this field is empty or missing, this rule matches all ports (traffic not restricted by port). If this field is present and contains at least one item, then this rule allows traffic only if the traffic matches at least one port in the list.", - "from": "List of sources which should be able to access the pods selected for this rule. Items in this list are combined using a logical OR operation. If this field is empty or missing, this rule matches all sources (traffic not restricted by source). If this field is present and contains at least on item, this rule allows traffic only if the traffic matches at least one item in the from list.", -} - -func (NetworkPolicyIngressRule) SwaggerDoc() map[string]string { - return map_NetworkPolicyIngressRule -} - -var map_NetworkPolicyList = map[string]string{ - "": "NetworkPolicyList is a list of NetworkPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (NetworkPolicyList) SwaggerDoc() map[string]string { - return map_NetworkPolicyList -} - -var map_NetworkPolicyPeer = map[string]string{ - "": "NetworkPolicyPeer describes a peer to allow traffic from. Only certain combinations of fields are allowed", - "podSelector": "This is a label selector which selects Pods. This field follows standard label selector semantics; if present but empty, it selects all pods.\n\nIf NamespaceSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects the Pods matching PodSelector in the policy's own Namespace.", - "namespaceSelector": "Selects Namespaces using cluster-scoped labels. This field follows standard label selector semantics; if present but empty, it selects all namespaces.\n\nIf PodSelector is also set, then the NetworkPolicyPeer as a whole selects the Pods matching PodSelector in the Namespaces selected by NamespaceSelector. Otherwise it selects all Pods in the Namespaces selected by NamespaceSelector.", - "ipBlock": "IPBlock defines policy on a particular IPBlock. If this field is set then neither of the other fields can be.", -} - -func (NetworkPolicyPeer) SwaggerDoc() map[string]string { - return map_NetworkPolicyPeer -} - -var map_NetworkPolicyPort = map[string]string{ - "": "NetworkPolicyPort describes a port to allow traffic on", - "protocol": "The protocol (TCP, UDP, or SCTP) which traffic must match. If not specified, this field defaults to TCP.", - "port": "The port on the given protocol. This can either be a numerical or named port on a pod. If this field is not provided, this matches all port names and numbers.", -} - -func (NetworkPolicyPort) SwaggerDoc() map[string]string { - return map_NetworkPolicyPort -} - -var map_NetworkPolicySpec = map[string]string{ - "": "NetworkPolicySpec provides the specification of a NetworkPolicy", - "podSelector": "Selects the pods to which this NetworkPolicy object applies. The array of ingress rules is applied to any pods selected by this field. Multiple network policies can select the same set of pods. In this case, the ingress rules for each are combined additively. This field is NOT optional and follows standard label selector semantics. An empty podSelector matches all pods in this namespace.", - "ingress": "List of ingress rules to be applied to the selected pods. Traffic is allowed to a pod if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic source is the pod's local node, OR if the traffic matches at least one ingress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy does not allow any traffic (and serves solely to ensure that the pods it selects are isolated by default)", - "egress": "List of egress rules to be applied to the selected pods. Outgoing traffic is allowed if there are no NetworkPolicies selecting the pod (and cluster policy otherwise allows the traffic), OR if the traffic matches at least one egress rule across all of the NetworkPolicy objects whose podSelector matches the pod. If this field is empty then this NetworkPolicy limits all outgoing traffic (and serves solely to ensure that the pods it selects are isolated by default). This field is beta-level in 1.8", - "policyTypes": "List of rule types that the NetworkPolicy relates to. Valid options are Ingress, Egress, or Ingress,Egress. If this field is not specified, it will default based on the existence of Ingress or Egress rules; policies that contain an Egress section are assumed to affect Egress, and all policies (whether or not they contain an Ingress section) are assumed to affect Ingress. If you want to write an egress-only policy, you must explicitly specify policyTypes [ \"Egress\" ]. Likewise, if you want to write a policy that specifies that no egress is allowed, you must specify a policyTypes value that include \"Egress\" (since such a policy would not include an Egress section and would otherwise default to just [ \"Ingress\" ]). This field is beta-level in 1.8", -} - -func (NetworkPolicySpec) SwaggerDoc() map[string]string { - return map_NetworkPolicySpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go deleted file mode 100644 index d1e4e88..0000000 --- a/vendor/k8s.io/api/networking/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,262 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IPBlock) DeepCopyInto(out *IPBlock) { - *out = *in - if in.Except != nil { - in, out := &in.Except, &out.Except - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IPBlock. -func (in *IPBlock) DeepCopy() *IPBlock { - if in == nil { - return nil - } - out := new(IPBlock) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicy) DeepCopyInto(out *NetworkPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicy. -func (in *NetworkPolicy) DeepCopy() *NetworkPolicy { - if in == nil { - return nil - } - out := new(NetworkPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyEgressRule) DeepCopyInto(out *NetworkPolicyEgressRule) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.To != nil { - in, out := &in.To, &out.To - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyEgressRule. -func (in *NetworkPolicyEgressRule) DeepCopy() *NetworkPolicyEgressRule { - if in == nil { - return nil - } - out := new(NetworkPolicyEgressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyIngressRule) DeepCopyInto(out *NetworkPolicyIngressRule) { - *out = *in - if in.Ports != nil { - in, out := &in.Ports, &out.Ports - *out = make([]NetworkPolicyPort, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.From != nil { - in, out := &in.From, &out.From - *out = make([]NetworkPolicyPeer, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyIngressRule. -func (in *NetworkPolicyIngressRule) DeepCopy() *NetworkPolicyIngressRule { - if in == nil { - return nil - } - out := new(NetworkPolicyIngressRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyList) DeepCopyInto(out *NetworkPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]NetworkPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyList. -func (in *NetworkPolicyList) DeepCopy() *NetworkPolicyList { - if in == nil { - return nil - } - out := new(NetworkPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *NetworkPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyPeer) DeepCopyInto(out *NetworkPolicyPeer) { - *out = *in - if in.PodSelector != nil { - in, out := &in.PodSelector, &out.PodSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.NamespaceSelector != nil { - in, out := &in.NamespaceSelector, &out.NamespaceSelector - *out = new(metav1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.IPBlock != nil { - in, out := &in.IPBlock, &out.IPBlock - *out = new(IPBlock) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPeer. -func (in *NetworkPolicyPeer) DeepCopy() *NetworkPolicyPeer { - if in == nil { - return nil - } - out := new(NetworkPolicyPeer) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicyPort) DeepCopyInto(out *NetworkPolicyPort) { - *out = *in - if in.Protocol != nil { - in, out := &in.Protocol, &out.Protocol - *out = new(corev1.Protocol) - **out = **in - } - if in.Port != nil { - in, out := &in.Port, &out.Port - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicyPort. -func (in *NetworkPolicyPort) DeepCopy() *NetworkPolicyPort { - if in == nil { - return nil - } - out := new(NetworkPolicyPort) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *NetworkPolicySpec) DeepCopyInto(out *NetworkPolicySpec) { - *out = *in - in.PodSelector.DeepCopyInto(&out.PodSelector) - if in.Ingress != nil { - in, out := &in.Ingress, &out.Ingress - *out = make([]NetworkPolicyIngressRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Egress != nil { - in, out := &in.Egress, &out.Egress - *out = make([]NetworkPolicyEgressRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.PolicyTypes != nil { - in, out := &in.PolicyTypes, &out.PolicyTypes - *out = make([]PolicyType, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NetworkPolicySpec. -func (in *NetworkPolicySpec) DeepCopy() *NetworkPolicySpec { - if in == nil { - return nil - } - out := new(NetworkPolicySpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/policy/v1beta1/doc.go b/vendor/k8s.io/api/policy/v1beta1/doc.go deleted file mode 100644 index 9c456f9..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/doc.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package - -// Package policy is for any kind of policy object. Suitable examples, even if -// they aren't all here, are PodDisruptionBudget, PodSecurityPolicy, -// NetworkPolicy, etc. -// +k8s:openapi-gen=true -package v1beta1 // import "k8s.io/api/policy/v1beta1" diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go b/vendor/k8s.io/api/policy/v1beta1/generated.pb.go deleted file mode 100644 index d7d62dd..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/generated.pb.go +++ /dev/null @@ -1,4112 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto - - It has these top-level messages: - AllowedFlexVolume - AllowedHostPath - Eviction - FSGroupStrategyOptions - HostPortRange - IDRange - PodDisruptionBudget - PodDisruptionBudgetList - PodDisruptionBudgetSpec - PodDisruptionBudgetStatus - PodSecurityPolicy - PodSecurityPolicyList - PodSecurityPolicySpec - RunAsUserStrategyOptions - SELinuxStrategyOptions - SupplementalGroupsStrategyOptions -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import k8s_io_apimachinery_pkg_util_intstr "k8s.io/apimachinery/pkg/util/intstr" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AllowedFlexVolume) Reset() { *m = AllowedFlexVolume{} } -func (*AllowedFlexVolume) ProtoMessage() {} -func (*AllowedFlexVolume) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *AllowedHostPath) Reset() { *m = AllowedHostPath{} } -func (*AllowedHostPath) ProtoMessage() {} -func (*AllowedHostPath) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *Eviction) Reset() { *m = Eviction{} } -func (*Eviction) ProtoMessage() {} -func (*Eviction) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *FSGroupStrategyOptions) Reset() { *m = FSGroupStrategyOptions{} } -func (*FSGroupStrategyOptions) ProtoMessage() {} -func (*FSGroupStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *HostPortRange) Reset() { *m = HostPortRange{} } -func (*HostPortRange) ProtoMessage() {} -func (*HostPortRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *IDRange) Reset() { *m = IDRange{} } -func (*IDRange) ProtoMessage() {} -func (*IDRange) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *PodDisruptionBudget) Reset() { *m = PodDisruptionBudget{} } -func (*PodDisruptionBudget) ProtoMessage() {} -func (*PodDisruptionBudget) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *PodDisruptionBudgetList) Reset() { *m = PodDisruptionBudgetList{} } -func (*PodDisruptionBudgetList) ProtoMessage() {} -func (*PodDisruptionBudgetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *PodDisruptionBudgetSpec) Reset() { *m = PodDisruptionBudgetSpec{} } -func (*PodDisruptionBudgetSpec) ProtoMessage() {} -func (*PodDisruptionBudgetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *PodDisruptionBudgetStatus) Reset() { *m = PodDisruptionBudgetStatus{} } -func (*PodDisruptionBudgetStatus) ProtoMessage() {} -func (*PodDisruptionBudgetStatus) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{9} -} - -func (m *PodSecurityPolicy) Reset() { *m = PodSecurityPolicy{} } -func (*PodSecurityPolicy) ProtoMessage() {} -func (*PodSecurityPolicy) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *PodSecurityPolicyList) Reset() { *m = PodSecurityPolicyList{} } -func (*PodSecurityPolicyList) ProtoMessage() {} -func (*PodSecurityPolicyList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func (m *PodSecurityPolicySpec) Reset() { *m = PodSecurityPolicySpec{} } -func (*PodSecurityPolicySpec) ProtoMessage() {} -func (*PodSecurityPolicySpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{12} } - -func (m *RunAsUserStrategyOptions) Reset() { *m = RunAsUserStrategyOptions{} } -func (*RunAsUserStrategyOptions) ProtoMessage() {} -func (*RunAsUserStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{13} -} - -func (m *SELinuxStrategyOptions) Reset() { *m = SELinuxStrategyOptions{} } -func (*SELinuxStrategyOptions) ProtoMessage() {} -func (*SELinuxStrategyOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{14} } - -func (m *SupplementalGroupsStrategyOptions) Reset() { *m = SupplementalGroupsStrategyOptions{} } -func (*SupplementalGroupsStrategyOptions) ProtoMessage() {} -func (*SupplementalGroupsStrategyOptions) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{15} -} - -func init() { - proto.RegisterType((*AllowedFlexVolume)(nil), "k8s.io.api.policy.v1beta1.AllowedFlexVolume") - proto.RegisterType((*AllowedHostPath)(nil), "k8s.io.api.policy.v1beta1.AllowedHostPath") - proto.RegisterType((*Eviction)(nil), "k8s.io.api.policy.v1beta1.Eviction") - proto.RegisterType((*FSGroupStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.FSGroupStrategyOptions") - proto.RegisterType((*HostPortRange)(nil), "k8s.io.api.policy.v1beta1.HostPortRange") - proto.RegisterType((*IDRange)(nil), "k8s.io.api.policy.v1beta1.IDRange") - proto.RegisterType((*PodDisruptionBudget)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudget") - proto.RegisterType((*PodDisruptionBudgetList)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetList") - proto.RegisterType((*PodDisruptionBudgetSpec)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetSpec") - proto.RegisterType((*PodDisruptionBudgetStatus)(nil), "k8s.io.api.policy.v1beta1.PodDisruptionBudgetStatus") - proto.RegisterType((*PodSecurityPolicy)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicy") - proto.RegisterType((*PodSecurityPolicyList)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicyList") - proto.RegisterType((*PodSecurityPolicySpec)(nil), "k8s.io.api.policy.v1beta1.PodSecurityPolicySpec") - proto.RegisterType((*RunAsUserStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.RunAsUserStrategyOptions") - proto.RegisterType((*SELinuxStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SELinuxStrategyOptions") - proto.RegisterType((*SupplementalGroupsStrategyOptions)(nil), "k8s.io.api.policy.v1beta1.SupplementalGroupsStrategyOptions") -} -func (m *AllowedFlexVolume) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedFlexVolume) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Driver))) - i += copy(dAtA[i:], m.Driver) - return i, nil -} - -func (m *AllowedHostPath) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AllowedHostPath) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.PathPrefix))) - i += copy(dAtA[i:], m.PathPrefix) - dAtA[i] = 0x10 - i++ - if m.ReadOnly { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - return i, nil -} - -func (m *Eviction) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Eviction) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if m.DeleteOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DeleteOptions.Size())) - n2, err := m.DeleteOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *FSGroupStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *FSGroupStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *HostPortRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostPortRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil -} - -func (m *IDRange) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *IDRange) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Min)) - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Max)) - return i, nil -} - -func (m *PodDisruptionBudget) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudget) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *PodDisruptionBudgetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudgetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodDisruptionBudgetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudgetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.MinAvailable != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MinAvailable.Size())) - n7, err := m.MinAvailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - if m.Selector != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n8, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.MaxUnavailable != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.MaxUnavailable.Size())) - n9, err := m.MaxUnavailable.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} - -func (m *PodDisruptionBudgetStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodDisruptionBudgetStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - keysForDisruptedPods := make([]string, 0, len(m.DisruptedPods)) - for k := range m.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - for _, k := range keysForDisruptedPods { - dAtA[i] = 0x12 - i++ - v := m.DisruptedPods[string(k)] - msgSize := 0 - if (&v) != nil { - msgSize = (&v).Size() - msgSize += 1 + sovGenerated(uint64(msgSize)) - } - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + msgSize - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64((&v).Size())) - n10, err := (&v).MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - } - } - dAtA[i] = 0x18 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.PodDisruptionsAllowed)) - dAtA[i] = 0x20 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.CurrentHealthy)) - dAtA[i] = 0x28 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DesiredHealthy)) - dAtA[i] = 0x30 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ExpectedPods)) - return i, nil -} - -func (m *PodSecurityPolicy) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicy) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n11, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n12, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n12 - return i, nil -} - -func (m *PodSecurityPolicyList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicyList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n13, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n13 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodSecurityPolicySpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodSecurityPolicySpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Privileged { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - dAtA[i] = 0x30 - i++ - if m.HostNetwork { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.HostPorts) > 0 { - for _, msg := range m.HostPorts { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x40 - i++ - if m.HostPID { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x48 - i++ - if m.HostIPC { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x52 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinux.Size())) - n14, err := m.SELinux.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n14 - dAtA[i] = 0x5a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RunAsUser.Size())) - n15, err := m.RunAsUser.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n15 - dAtA[i] = 0x62 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SupplementalGroups.Size())) - n16, err := m.SupplementalGroups.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n16 - dAtA[i] = 0x6a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.FSGroup.Size())) - n17, err := m.FSGroup.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n17 - dAtA[i] = 0x70 - i++ - if m.ReadOnlyRootFilesystem { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if m.DefaultAllowPrivilegeEscalation != nil { - dAtA[i] = 0x78 - i++ - if *m.DefaultAllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.AllowPrivilegeEscalation != nil { - dAtA[i] = 0x80 - i++ - dAtA[i] = 0x1 - i++ - if *m.AllowPrivilegeEscalation { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if len(m.AllowedHostPaths) > 0 { - for _, msg := range m.AllowedHostPaths { - dAtA[i] = 0x8a - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, msg := range m.AllowedFlexVolumes { - dAtA[i] = 0x92 - i++ - dAtA[i] = 0x1 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - dAtA[i] = 0x9a - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - dAtA[i] = 0xa2 - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - dAtA[i] = 0xaa - i++ - dAtA[i] = 0x1 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *RunAsUserStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RunAsUserStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *SELinuxStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SELinuxStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if m.SELinuxOptions != nil { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.SELinuxOptions.Size())) - n18, err := m.SELinuxOptions.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n18 - } - return i, nil -} - -func (m *SupplementalGroupsStrategyOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SupplementalGroupsStrategyOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Rule))) - i += copy(dAtA[i:], m.Rule) - if len(m.Ranges) > 0 { - for _, msg := range m.Ranges { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AllowedFlexVolume) Size() (n int) { - var l int - _ = l - l = len(m.Driver) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *AllowedHostPath) Size() (n int) { - var l int - _ = l - l = len(m.PathPrefix) - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - return n -} - -func (m *Eviction) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if m.DeleteOptions != nil { - l = m.DeleteOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *FSGroupStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *HostPortRange) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *IDRange) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.Min)) - n += 1 + sovGenerated(uint64(m.Max)) - return n -} - -func (m *PodDisruptionBudget) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodDisruptionBudgetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodDisruptionBudgetSpec) Size() (n int) { - var l int - _ = l - if m.MinAvailable != nil { - l = m.MinAvailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.Selector != nil { - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.MaxUnavailable != nil { - l = m.MaxUnavailable.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *PodDisruptionBudgetStatus) Size() (n int) { - var l int - _ = l - n += 1 + sovGenerated(uint64(m.ObservedGeneration)) - if len(m.DisruptedPods) > 0 { - for k, v := range m.DisruptedPods { - _ = k - _ = v - l = v.Size() - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - n += 1 + sovGenerated(uint64(m.PodDisruptionsAllowed)) - n += 1 + sovGenerated(uint64(m.CurrentHealthy)) - n += 1 + sovGenerated(uint64(m.DesiredHealthy)) - n += 1 + sovGenerated(uint64(m.ExpectedPods)) - return n -} - -func (m *PodSecurityPolicy) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodSecurityPolicyList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodSecurityPolicySpec) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.DefaultAddCapabilities) > 0 { - for _, s := range m.DefaultAddCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.RequiredDropCapabilities) > 0 { - for _, s := range m.RequiredDropCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedCapabilities) > 0 { - for _, s := range m.AllowedCapabilities { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, s := range m.Volumes { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - if len(m.HostPorts) > 0 { - for _, e := range m.HostPorts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - n += 2 - n += 2 - l = m.SELinux.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.RunAsUser.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.SupplementalGroups.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.FSGroup.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 2 - if m.DefaultAllowPrivilegeEscalation != nil { - n += 2 - } - if m.AllowPrivilegeEscalation != nil { - n += 3 - } - if len(m.AllowedHostPaths) > 0 { - for _, e := range m.AllowedHostPaths { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedFlexVolumes) > 0 { - for _, e := range m.AllowedFlexVolumes { - l = e.Size() - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedUnsafeSysctls) > 0 { - for _, s := range m.AllowedUnsafeSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.ForbiddenSysctls) > 0 { - for _, s := range m.ForbiddenSysctls { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - if len(m.AllowedProcMountTypes) > 0 { - for _, s := range m.AllowedProcMountTypes { - l = len(s) - n += 2 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RunAsUserStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *SELinuxStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if m.SELinuxOptions != nil { - l = m.SELinuxOptions.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *SupplementalGroupsStrategyOptions) Size() (n int) { - var l int - _ = l - l = len(m.Rule) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Ranges) > 0 { - for _, e := range m.Ranges { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AllowedFlexVolume) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedFlexVolume{`, - `Driver:` + fmt.Sprintf("%v", this.Driver) + `,`, - `}`, - }, "") - return s -} -func (this *AllowedHostPath) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AllowedHostPath{`, - `PathPrefix:` + fmt.Sprintf("%v", this.PathPrefix) + `,`, - `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, - `}`, - }, "") - return s -} -func (this *Eviction) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Eviction{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `DeleteOptions:` + strings.Replace(fmt.Sprintf("%v", this.DeleteOptions), "DeleteOptions", "k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *FSGroupStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&FSGroupStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *HostPortRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&HostPortRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *IDRange) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&IDRange{`, - `Min:` + fmt.Sprintf("%v", this.Min) + `,`, - `Max:` + fmt.Sprintf("%v", this.Max) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudget) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudget{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodDisruptionBudgetSpec", "PodDisruptionBudgetSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "PodDisruptionBudgetStatus", "PodDisruptionBudgetStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodDisruptionBudget", "PodDisruptionBudget", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodDisruptionBudgetSpec{`, - `MinAvailable:` + strings.Replace(fmt.Sprintf("%v", this.MinAvailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `Selector:` + strings.Replace(fmt.Sprintf("%v", this.Selector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`, - `MaxUnavailable:` + strings.Replace(fmt.Sprintf("%v", this.MaxUnavailable), "IntOrString", "k8s_io_apimachinery_pkg_util_intstr.IntOrString", 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodDisruptionBudgetStatus) String() string { - if this == nil { - return "nil" - } - keysForDisruptedPods := make([]string, 0, len(this.DisruptedPods)) - for k := range this.DisruptedPods { - keysForDisruptedPods = append(keysForDisruptedPods, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForDisruptedPods) - mapStringForDisruptedPods := "map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time{" - for _, k := range keysForDisruptedPods { - mapStringForDisruptedPods += fmt.Sprintf("%v: %v,", k, this.DisruptedPods[k]) - } - mapStringForDisruptedPods += "}" - s := strings.Join([]string{`&PodDisruptionBudgetStatus{`, - `ObservedGeneration:` + fmt.Sprintf("%v", this.ObservedGeneration) + `,`, - `DisruptedPods:` + mapStringForDisruptedPods + `,`, - `PodDisruptionsAllowed:` + fmt.Sprintf("%v", this.PodDisruptionsAllowed) + `,`, - `CurrentHealthy:` + fmt.Sprintf("%v", this.CurrentHealthy) + `,`, - `DesiredHealthy:` + fmt.Sprintf("%v", this.DesiredHealthy) + `,`, - `ExpectedPods:` + fmt.Sprintf("%v", this.ExpectedPods) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicy) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicy{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodSecurityPolicySpec", "PodSecurityPolicySpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicyList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicyList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodSecurityPolicy", "PodSecurityPolicy", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodSecurityPolicySpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodSecurityPolicySpec{`, - `Privileged:` + fmt.Sprintf("%v", this.Privileged) + `,`, - `DefaultAddCapabilities:` + fmt.Sprintf("%v", this.DefaultAddCapabilities) + `,`, - `RequiredDropCapabilities:` + fmt.Sprintf("%v", this.RequiredDropCapabilities) + `,`, - `AllowedCapabilities:` + fmt.Sprintf("%v", this.AllowedCapabilities) + `,`, - `Volumes:` + fmt.Sprintf("%v", this.Volumes) + `,`, - `HostNetwork:` + fmt.Sprintf("%v", this.HostNetwork) + `,`, - `HostPorts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HostPorts), "HostPortRange", "HostPortRange", 1), `&`, ``, 1) + `,`, - `HostPID:` + fmt.Sprintf("%v", this.HostPID) + `,`, - `HostIPC:` + fmt.Sprintf("%v", this.HostIPC) + `,`, - `SELinux:` + strings.Replace(strings.Replace(this.SELinux.String(), "SELinuxStrategyOptions", "SELinuxStrategyOptions", 1), `&`, ``, 1) + `,`, - `RunAsUser:` + strings.Replace(strings.Replace(this.RunAsUser.String(), "RunAsUserStrategyOptions", "RunAsUserStrategyOptions", 1), `&`, ``, 1) + `,`, - `SupplementalGroups:` + strings.Replace(strings.Replace(this.SupplementalGroups.String(), "SupplementalGroupsStrategyOptions", "SupplementalGroupsStrategyOptions", 1), `&`, ``, 1) + `,`, - `FSGroup:` + strings.Replace(strings.Replace(this.FSGroup.String(), "FSGroupStrategyOptions", "FSGroupStrategyOptions", 1), `&`, ``, 1) + `,`, - `ReadOnlyRootFilesystem:` + fmt.Sprintf("%v", this.ReadOnlyRootFilesystem) + `,`, - `DefaultAllowPrivilegeEscalation:` + valueToStringGenerated(this.DefaultAllowPrivilegeEscalation) + `,`, - `AllowPrivilegeEscalation:` + valueToStringGenerated(this.AllowPrivilegeEscalation) + `,`, - `AllowedHostPaths:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedHostPaths), "AllowedHostPath", "AllowedHostPath", 1), `&`, ``, 1) + `,`, - `AllowedFlexVolumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedFlexVolumes), "AllowedFlexVolume", "AllowedFlexVolume", 1), `&`, ``, 1) + `,`, - `AllowedUnsafeSysctls:` + fmt.Sprintf("%v", this.AllowedUnsafeSysctls) + `,`, - `ForbiddenSysctls:` + fmt.Sprintf("%v", this.ForbiddenSysctls) + `,`, - `AllowedProcMountTypes:` + fmt.Sprintf("%v", this.AllowedProcMountTypes) + `,`, - `}`, - }, "") - return s -} -func (this *RunAsUserStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RunAsUserStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *SELinuxStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SELinuxStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `SELinuxOptions:` + strings.Replace(fmt.Sprintf("%v", this.SELinuxOptions), "SELinuxOptions", "k8s_io_api_core_v1.SELinuxOptions", 1) + `,`, - `}`, - }, "") - return s -} -func (this *SupplementalGroupsStrategyOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&SupplementalGroupsStrategyOptions{`, - `Rule:` + fmt.Sprintf("%v", this.Rule) + `,`, - `Ranges:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Ranges), "IDRange", "IDRange", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AllowedFlexVolume) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedFlexVolume: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedFlexVolume: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Driver", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Driver = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AllowedHostPath) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AllowedHostPath: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AllowedHostPath: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PathPrefix", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PathPrefix = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnly = bool(v != 0) - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Eviction) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Eviction: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Eviction: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DeleteOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DeleteOptions == nil { - m.DeleteOptions = &k8s_io_apimachinery_pkg_apis_meta_v1.DeleteOptions{} - } - if err := m.DeleteOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *FSGroupStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: FSGroupStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: FSGroupStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = FSGroupStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HostPortRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostPortRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostPortRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *IDRange) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: IDRange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: IDRange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Min", wireType) - } - m.Min = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Min |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Max", wireType) - } - m.Max = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Max |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudget) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudget: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudget: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodDisruptionBudget{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MinAvailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MinAvailable == nil { - m.MinAvailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MinAvailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Selector == nil { - m.Selector = &k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{} - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MaxUnavailable", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.MaxUnavailable == nil { - m.MaxUnavailable = &k8s_io_apimachinery_pkg_util_intstr.IntOrString{} - } - if err := m.MaxUnavailable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodDisruptionBudgetStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodDisruptionBudgetStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ObservedGeneration", wireType) - } - m.ObservedGeneration = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ObservedGeneration |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DisruptedPods", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.DisruptedPods == nil { - m.DisruptedPods = make(map[string]k8s_io_apimachinery_pkg_apis_meta_v1.Time) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var mapmsglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - mapmsglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - postmsgIndex := iNdEx + mapmsglen - if mapmsglen < 0 { - return ErrInvalidLengthGenerated - } - if postmsgIndex > l { - return io.ErrUnexpectedEOF - } - mapvalue := &k8s_io_apimachinery_pkg_apis_meta_v1.Time{} - if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { - return err - } - iNdEx = postmsgIndex - m.DisruptedPods[mapkey] = *mapvalue - } else { - var mapvalue k8s_io_apimachinery_pkg_apis_meta_v1.Time - m.DisruptedPods[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PodDisruptionsAllowed", wireType) - } - m.PodDisruptionsAllowed = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.PodDisruptionsAllowed |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field CurrentHealthy", wireType) - } - m.CurrentHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.CurrentHealthy |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DesiredHealthy", wireType) - } - m.DesiredHealthy = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.DesiredHealthy |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ExpectedPods", wireType) - } - m.ExpectedPods = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.ExpectedPods |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicy) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicy: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicy: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicyList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicyList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicyList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodSecurityPolicy{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodSecurityPolicySpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodSecurityPolicySpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodSecurityPolicySpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Privileged", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Privileged = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAddCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DefaultAddCapabilities = append(m.DefaultAddCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequiredDropCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.RequiredDropCapabilities = append(m.RequiredDropCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedCapabilities", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedCapabilities = append(m.AllowedCapabilities, k8s_io_api_core_v1.Capability(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, FSType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostNetwork", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostNetwork = bool(v != 0) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPorts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.HostPorts = append(m.HostPorts, HostPortRange{}) - if err := m.HostPorts[len(m.HostPorts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostPID", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostPID = bool(v != 0) - case 9: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field HostIPC", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.HostIPC = bool(v != 0) - case 10: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinux", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SELinux.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RunAsUser", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RunAsUser.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 12: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SupplementalGroups", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.SupplementalGroups.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 13: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field FSGroup", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.FSGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 14: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ReadOnlyRootFilesystem", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ReadOnlyRootFilesystem = bool(v != 0) - case 15: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field DefaultAllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.DefaultAllowPrivilegeEscalation = &b - case 16: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowPrivilegeEscalation", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowPrivilegeEscalation = &b - case 17: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedHostPaths", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedHostPaths = append(m.AllowedHostPaths, AllowedHostPath{}) - if err := m.AllowedHostPaths[len(m.AllowedHostPaths)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 18: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedFlexVolumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedFlexVolumes = append(m.AllowedFlexVolumes, AllowedFlexVolume{}) - if err := m.AllowedFlexVolumes[len(m.AllowedFlexVolumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 19: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedUnsafeSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedUnsafeSysctls = append(m.AllowedUnsafeSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 20: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ForbiddenSysctls", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ForbiddenSysctls = append(m.ForbiddenSysctls, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 21: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedProcMountTypes", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedProcMountTypes = append(m.AllowedProcMountTypes, k8s_io_api_core_v1.ProcMountType(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RunAsUserStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RunAsUserStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = RunAsUserStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SELinuxStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SELinuxStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SELinuxStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SELinuxStrategy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SELinuxOptions", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.SELinuxOptions == nil { - m.SELinuxOptions = &k8s_io_api_core_v1.SELinuxOptions{} - } - if err := m.SELinuxOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SupplementalGroupsStrategyOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SupplementalGroupsStrategyOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rule", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rule = SupplementalGroupsStrategyType(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Ranges = append(m.Ranges, IDRange{}) - if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/policy/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 1715 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x9a, 0x92, 0x48, 0x8d, 0x24, 0x5a, 0x1a, 0xfd, 0xe9, 0x46, 0xa8, 0xb9, 0x0e, 0x03, - 0x14, 0x6e, 0x90, 0x2c, 0x63, 0x39, 0x69, 0x8d, 0xa6, 0x2d, 0xa2, 0x35, 0x25, 0x5b, 0x81, 0x55, - 0xb1, 0x43, 0x3b, 0x68, 0x0b, 0xb7, 0xe8, 0x70, 0x77, 0x44, 0x4e, 0xb4, 0xdc, 0xdd, 0xce, 0xcc, - 0x32, 0xe4, 0xad, 0x87, 0x1e, 0x7a, 0xec, 0x17, 0xc8, 0x27, 0x28, 0x7a, 0xea, 0x97, 0x50, 0x81, - 0xa2, 0xc8, 0x31, 0xe8, 0x81, 0xa8, 0x59, 0xf4, 0x4b, 0xf8, 0xd2, 0x60, 0x87, 0xb3, 0x24, 0xf7, - 0x0f, 0x29, 0x2b, 0x40, 0x7c, 0xdb, 0x9d, 0xf7, 0xfb, 0xfd, 0xde, 0x9b, 0x37, 0x6f, 0xde, 0xce, - 0x0e, 0xb0, 0x2e, 0x1f, 0x72, 0x93, 0xfa, 0xb5, 0xcb, 0xb0, 0x45, 0x98, 0x47, 0x04, 0xe1, 0xb5, - 0x1e, 0xf1, 0x1c, 0x9f, 0xd5, 0x94, 0x01, 0x07, 0xb4, 0x16, 0xf8, 0x2e, 0xb5, 0x07, 0xb5, 0xde, - 0xfd, 0x16, 0x11, 0xf8, 0x7e, 0xad, 0x4d, 0x3c, 0xc2, 0xb0, 0x20, 0x8e, 0x19, 0x30, 0x5f, 0xf8, - 0xf0, 0xad, 0x31, 0xd4, 0xc4, 0x01, 0x35, 0xc7, 0x50, 0x53, 0x41, 0x0f, 0xde, 0x6f, 0x53, 0xd1, - 0x09, 0x5b, 0xa6, 0xed, 0x77, 0x6b, 0x6d, 0xbf, 0xed, 0xd7, 0x24, 0xa3, 0x15, 0x5e, 0xc8, 0x37, - 0xf9, 0x22, 0x9f, 0xc6, 0x4a, 0x07, 0xd5, 0x19, 0xa7, 0xb6, 0xcf, 0x48, 0xad, 0x97, 0xf1, 0x76, - 0xf0, 0xe1, 0x14, 0xd3, 0xc5, 0x76, 0x87, 0x7a, 0x84, 0x0d, 0x6a, 0xc1, 0x65, 0x3b, 0x1a, 0xe0, - 0xb5, 0x2e, 0x11, 0x38, 0x8f, 0x55, 0x9b, 0xc7, 0x62, 0xa1, 0x27, 0x68, 0x97, 0x64, 0x08, 0x3f, - 0xba, 0x8e, 0xc0, 0xed, 0x0e, 0xe9, 0xe2, 0x0c, 0xef, 0xc1, 0x3c, 0x5e, 0x28, 0xa8, 0x5b, 0xa3, - 0x9e, 0xe0, 0x82, 0xa5, 0x49, 0xd5, 0x8f, 0xc1, 0xf6, 0x91, 0xeb, 0xfa, 0x5f, 0x10, 0xe7, 0xc4, - 0x25, 0xfd, 0xcf, 0x7c, 0x37, 0xec, 0x12, 0xf8, 0x03, 0xb0, 0xea, 0x30, 0xda, 0x23, 0x4c, 0xd7, - 0xee, 0x6a, 0xf7, 0xd6, 0xac, 0xf2, 0xd5, 0xd0, 0x58, 0x1a, 0x0d, 0x8d, 0xd5, 0xba, 0x1c, 0x45, - 0xca, 0x5a, 0xe5, 0xe0, 0xb6, 0x22, 0x3f, 0xf1, 0xb9, 0x68, 0x60, 0xd1, 0x81, 0x87, 0x00, 0x04, - 0x58, 0x74, 0x1a, 0x8c, 0x5c, 0xd0, 0xbe, 0xa2, 0x43, 0x45, 0x07, 0x8d, 0x89, 0x05, 0xcd, 0xa0, - 0xe0, 0x7b, 0xa0, 0xc4, 0x08, 0x76, 0xce, 0x3d, 0x77, 0xa0, 0xdf, 0xba, 0xab, 0xdd, 0x2b, 0x59, - 0x5b, 0x8a, 0x51, 0x42, 0x6a, 0x1c, 0x4d, 0x10, 0xd5, 0x7f, 0x6b, 0xa0, 0x74, 0xdc, 0xa3, 0xb6, - 0xa0, 0xbe, 0x07, 0x7f, 0x0f, 0x4a, 0x51, 0xde, 0x1d, 0x2c, 0xb0, 0x74, 0xb6, 0x7e, 0xf8, 0x81, - 0x39, 0xad, 0x89, 0x49, 0x1a, 0xcc, 0xe0, 0xb2, 0x1d, 0x0d, 0x70, 0x33, 0x42, 0x9b, 0xbd, 0xfb, - 0xe6, 0x79, 0xeb, 0x73, 0x62, 0x8b, 0x33, 0x22, 0xf0, 0x34, 0xbc, 0xe9, 0x18, 0x9a, 0xa8, 0x42, - 0x17, 0x6c, 0x3a, 0xc4, 0x25, 0x82, 0x9c, 0x07, 0x91, 0x47, 0x2e, 0x23, 0x5c, 0x3f, 0x7c, 0xf0, - 0x7a, 0x6e, 0xea, 0xb3, 0x54, 0x6b, 0x7b, 0x34, 0x34, 0x36, 0x13, 0x43, 0x28, 0x29, 0x5e, 0xfd, - 0x52, 0x03, 0xfb, 0x27, 0xcd, 0xc7, 0xcc, 0x0f, 0x83, 0xa6, 0x88, 0xd6, 0xa9, 0x3d, 0x50, 0x26, - 0xf8, 0x63, 0xb0, 0xcc, 0x42, 0x97, 0xa8, 0x9c, 0xbe, 0xa3, 0x82, 0x5e, 0x46, 0xa1, 0x4b, 0x5e, - 0x0d, 0x8d, 0x9d, 0x14, 0xeb, 0xd9, 0x20, 0x20, 0x48, 0x12, 0xe0, 0xa7, 0x60, 0x95, 0x61, 0xaf, - 0x4d, 0xa2, 0xd0, 0x0b, 0xf7, 0xd6, 0x0f, 0xab, 0xe6, 0xdc, 0x5d, 0x63, 0x9e, 0xd6, 0x51, 0x04, - 0x9d, 0xae, 0xb8, 0x7c, 0xe5, 0x48, 0x29, 0x54, 0xcf, 0xc0, 0xa6, 0x5c, 0x6a, 0x9f, 0x09, 0x69, - 0x81, 0x77, 0x40, 0xa1, 0x4b, 0x3d, 0x19, 0xd4, 0x8a, 0xb5, 0xae, 0x58, 0x85, 0x33, 0xea, 0xa1, - 0x68, 0x5c, 0x9a, 0x71, 0x5f, 0xe6, 0x6c, 0xd6, 0x8c, 0xfb, 0x28, 0x1a, 0xaf, 0x3e, 0x06, 0x45, - 0xe5, 0x71, 0x56, 0xa8, 0xb0, 0x58, 0xa8, 0x90, 0x23, 0xf4, 0xd7, 0x5b, 0x60, 0xa7, 0xe1, 0x3b, - 0x75, 0xca, 0x59, 0x28, 0xf3, 0x65, 0x85, 0x4e, 0x9b, 0x88, 0x37, 0x50, 0x1f, 0xcf, 0xc0, 0x32, - 0x0f, 0x88, 0xad, 0xca, 0xe2, 0x70, 0x41, 0x6e, 0x73, 0xe2, 0x6b, 0x06, 0xc4, 0xb6, 0x36, 0xe2, - 0xa5, 0x8c, 0xde, 0x90, 0x54, 0x83, 0x2f, 0xc0, 0x2a, 0x17, 0x58, 0x84, 0x5c, 0x2f, 0x48, 0xdd, - 0x0f, 0x6f, 0xa8, 0x2b, 0xb9, 0xd3, 0x55, 0x1c, 0xbf, 0x23, 0xa5, 0x59, 0xfd, 0xa7, 0x06, 0xbe, - 0x97, 0xc3, 0x7a, 0x4a, 0xb9, 0x80, 0x2f, 0x32, 0x19, 0x33, 0x5f, 0x2f, 0x63, 0x11, 0x5b, 0xe6, - 0x6b, 0xb2, 0x79, 0xe3, 0x91, 0x99, 0x6c, 0x35, 0xc1, 0x0a, 0x15, 0xa4, 0x1b, 0x97, 0xa2, 0x79, - 0xb3, 0x69, 0x59, 0x9b, 0x4a, 0x7a, 0xe5, 0x34, 0x12, 0x41, 0x63, 0xad, 0xea, 0xbf, 0x6e, 0xe5, - 0x4e, 0x27, 0x4a, 0x27, 0xbc, 0x00, 0x1b, 0x5d, 0xea, 0x1d, 0xf5, 0x30, 0x75, 0x71, 0x4b, 0xed, - 0x9e, 0x45, 0x45, 0x10, 0xf5, 0x4a, 0x73, 0xdc, 0x2b, 0xcd, 0x53, 0x4f, 0x9c, 0xb3, 0xa6, 0x60, - 0xd4, 0x6b, 0x5b, 0x5b, 0xa3, 0xa1, 0xb1, 0x71, 0x36, 0xa3, 0x84, 0x12, 0xba, 0xf0, 0xb7, 0xa0, - 0xc4, 0x89, 0x4b, 0x6c, 0xe1, 0xb3, 0x9b, 0x75, 0x88, 0xa7, 0xb8, 0x45, 0xdc, 0xa6, 0xa2, 0x5a, - 0x1b, 0x51, 0xde, 0xe2, 0x37, 0x34, 0x91, 0x84, 0x2e, 0x28, 0x77, 0x71, 0xff, 0xb9, 0x87, 0x27, - 0x13, 0x29, 0x7c, 0xcb, 0x89, 0xc0, 0xd1, 0xd0, 0x28, 0x9f, 0x25, 0xb4, 0x50, 0x4a, 0xbb, 0xfa, - 0xbf, 0x65, 0xf0, 0xd6, 0xdc, 0xaa, 0x82, 0x9f, 0x02, 0xe8, 0xb7, 0x38, 0x61, 0x3d, 0xe2, 0x3c, - 0x1e, 0x7f, 0x4d, 0xa8, 0x1f, 0x6f, 0xdc, 0x03, 0xb5, 0x40, 0xf0, 0x3c, 0x83, 0x40, 0x39, 0x2c, - 0xf8, 0x27, 0x0d, 0x6c, 0x3a, 0x63, 0x37, 0xc4, 0x69, 0xf8, 0x4e, 0x5c, 0x18, 0x8f, 0xbf, 0x4d, - 0xbd, 0x9b, 0xf5, 0x59, 0xa5, 0x63, 0x4f, 0xb0, 0x81, 0xb5, 0xa7, 0x02, 0xda, 0x4c, 0xd8, 0x50, - 0xd2, 0x29, 0x3c, 0x03, 0xd0, 0x99, 0x48, 0x72, 0xf5, 0x4d, 0x93, 0x29, 0x5e, 0xb1, 0xee, 0x28, - 0x85, 0xbd, 0x84, 0xdf, 0x18, 0x84, 0x72, 0x88, 0xf0, 0xe7, 0xa0, 0x6c, 0x87, 0x8c, 0x11, 0x4f, - 0x3c, 0x21, 0xd8, 0x15, 0x9d, 0x81, 0xbe, 0x2c, 0xa5, 0xf6, 0x95, 0x54, 0xf9, 0x51, 0xc2, 0x8a, - 0x52, 0xe8, 0x88, 0xef, 0x10, 0x4e, 0x19, 0x71, 0x62, 0xfe, 0x4a, 0x92, 0x5f, 0x4f, 0x58, 0x51, - 0x0a, 0x0d, 0x1f, 0x82, 0x0d, 0xd2, 0x0f, 0x88, 0x1d, 0xe7, 0x74, 0x55, 0xb2, 0x77, 0x15, 0x7b, - 0xe3, 0x78, 0xc6, 0x86, 0x12, 0xc8, 0x03, 0x17, 0xc0, 0x6c, 0x12, 0xe1, 0x16, 0x28, 0x5c, 0x92, - 0xc1, 0xf8, 0xcb, 0x83, 0xa2, 0x47, 0xf8, 0x09, 0x58, 0xe9, 0x61, 0x37, 0x24, 0xaa, 0xd6, 0xdf, - 0x7d, 0xbd, 0x5a, 0x7f, 0x46, 0xbb, 0x04, 0x8d, 0x89, 0x3f, 0xb9, 0xf5, 0x50, 0xab, 0xfe, 0x43, - 0x03, 0xdb, 0x0d, 0xdf, 0x69, 0x12, 0x3b, 0x64, 0x54, 0x0c, 0x1a, 0x72, 0x9d, 0xdf, 0x40, 0xcf, - 0x46, 0x89, 0x9e, 0xfd, 0xc1, 0xe2, 0x5a, 0x4b, 0x46, 0x37, 0xaf, 0x63, 0x57, 0xaf, 0x34, 0xb0, - 0x97, 0x41, 0xbf, 0x81, 0x8e, 0xfa, 0xcb, 0x64, 0x47, 0x7d, 0xef, 0x26, 0x93, 0x99, 0xd3, 0x4f, - 0xff, 0x5f, 0xce, 0x99, 0x8a, 0xec, 0xa6, 0xd1, 0xe9, 0x8e, 0xd1, 0x1e, 0x75, 0x49, 0x9b, 0x38, - 0x72, 0x32, 0xa5, 0x99, 0xd3, 0xdd, 0xc4, 0x82, 0x66, 0x50, 0x90, 0x83, 0x7d, 0x87, 0x5c, 0xe0, - 0xd0, 0x15, 0x47, 0x8e, 0xf3, 0x08, 0x07, 0xb8, 0x45, 0x5d, 0x2a, 0xa8, 0x3a, 0x8e, 0xac, 0x59, - 0x1f, 0x8f, 0x86, 0xc6, 0x7e, 0x3d, 0x17, 0xf1, 0x6a, 0x68, 0xdc, 0xc9, 0x9e, 0xcb, 0xcd, 0x09, - 0x64, 0x80, 0xe6, 0x48, 0xc3, 0x01, 0xd0, 0x19, 0xf9, 0x43, 0x18, 0x6d, 0x8a, 0x3a, 0xf3, 0x83, - 0x84, 0xdb, 0x82, 0x74, 0xfb, 0xb3, 0xd1, 0xd0, 0xd0, 0xd1, 0x1c, 0xcc, 0xf5, 0x8e, 0xe7, 0xca, - 0xc3, 0xcf, 0xc1, 0x0e, 0x1e, 0xf7, 0x81, 0x84, 0xd7, 0x65, 0xe9, 0xf5, 0xe1, 0x68, 0x68, 0xec, - 0x1c, 0x65, 0xcd, 0xd7, 0x3b, 0xcc, 0x13, 0x85, 0x35, 0x50, 0xec, 0xc9, 0x23, 0x3b, 0xd7, 0x57, - 0xa4, 0xfe, 0xde, 0x68, 0x68, 0x14, 0xc7, 0xa7, 0xf8, 0x48, 0x73, 0xf5, 0xa4, 0x29, 0x0f, 0x82, - 0x31, 0x0a, 0x7e, 0x04, 0xd6, 0x3b, 0x3e, 0x17, 0xbf, 0x20, 0xe2, 0x0b, 0x9f, 0x5d, 0xca, 0xc6, - 0x50, 0xb2, 0x76, 0xd4, 0x0a, 0xae, 0x3f, 0x99, 0x9a, 0xd0, 0x2c, 0x0e, 0xfe, 0x1a, 0xac, 0x75, - 0xd4, 0xb1, 0x8f, 0xeb, 0x45, 0x59, 0x68, 0xf7, 0x16, 0x14, 0x5a, 0xe2, 0x88, 0x68, 0x6d, 0x2b, - 0xf9, 0xb5, 0x78, 0x98, 0xa3, 0xa9, 0x1a, 0xfc, 0x21, 0x28, 0xca, 0x97, 0xd3, 0xba, 0x5e, 0x92, - 0xd1, 0xdc, 0x56, 0xf0, 0xe2, 0x93, 0xf1, 0x30, 0x8a, 0xed, 0x31, 0xf4, 0xb4, 0xf1, 0x48, 0x5f, - 0xcb, 0x42, 0x4f, 0x1b, 0x8f, 0x50, 0x6c, 0x87, 0x2f, 0x40, 0x91, 0x93, 0xa7, 0xd4, 0x0b, 0xfb, - 0x3a, 0x90, 0x5b, 0xee, 0xfe, 0x82, 0x70, 0x9b, 0xc7, 0x12, 0x99, 0x3a, 0x70, 0x4f, 0xd5, 0x95, - 0x1d, 0xc5, 0x92, 0xd0, 0x01, 0x6b, 0x2c, 0xf4, 0x8e, 0xf8, 0x73, 0x4e, 0x98, 0xbe, 0x9e, 0xf9, - 0xda, 0xa7, 0xf5, 0x51, 0x8c, 0x4d, 0x7b, 0x98, 0x64, 0x66, 0x82, 0x40, 0x53, 0x61, 0xf8, 0x67, - 0x0d, 0x40, 0x1e, 0x06, 0x81, 0x4b, 0xba, 0xc4, 0x13, 0xd8, 0x95, 0xe7, 0x7b, 0xae, 0x6f, 0x48, - 0x7f, 0x3f, 0x5d, 0x34, 0x9f, 0x0c, 0x29, 0xed, 0x78, 0xf2, 0x99, 0xce, 0x42, 0x51, 0x8e, 0xcf, - 0x28, 0x9d, 0x17, 0x5c, 0x3e, 0xeb, 0x9b, 0xd7, 0xa6, 0x33, 0xff, 0xff, 0x65, 0x9a, 0x4e, 0x65, - 0x47, 0xb1, 0x24, 0xfc, 0x0c, 0xec, 0xc7, 0x7f, 0x77, 0xc8, 0xf7, 0xc5, 0x09, 0x75, 0x09, 0x1f, - 0x70, 0x41, 0xba, 0x7a, 0x59, 0x2e, 0x73, 0x45, 0x31, 0xf7, 0x51, 0x2e, 0x0a, 0xcd, 0x61, 0xc3, - 0x2e, 0x30, 0xe2, 0xf6, 0x10, 0xed, 0x9d, 0x49, 0x7f, 0x3a, 0xe6, 0x36, 0x76, 0xc7, 0xa7, 0x96, - 0xdb, 0xd2, 0xc1, 0x3b, 0xa3, 0xa1, 0x61, 0xd4, 0x17, 0x43, 0xd1, 0x75, 0x5a, 0xf0, 0x57, 0x40, - 0xc7, 0xf3, 0xfc, 0x6c, 0x49, 0x3f, 0xdf, 0x8f, 0x7a, 0xce, 0x5c, 0x07, 0x73, 0xd9, 0x30, 0x00, - 0x5b, 0x38, 0xf9, 0x9f, 0xcd, 0xf5, 0x6d, 0xb9, 0x0b, 0xdf, 0x5d, 0xb0, 0x0e, 0xa9, 0x5f, 0x73, - 0x4b, 0x57, 0x69, 0xdc, 0x4a, 0x19, 0x38, 0xca, 0xa8, 0xc3, 0x3e, 0x80, 0x38, 0x7d, 0x2d, 0xc0, - 0x75, 0x78, 0xed, 0x27, 0x26, 0x73, 0x97, 0x30, 0x2d, 0xb5, 0x8c, 0x89, 0xa3, 0x1c, 0x1f, 0xf0, - 0x29, 0xd8, 0x55, 0xa3, 0xcf, 0x3d, 0x8e, 0x2f, 0x48, 0x73, 0xc0, 0x6d, 0xe1, 0x72, 0x7d, 0x47, - 0xf6, 0x37, 0x7d, 0x34, 0x34, 0x76, 0x8f, 0x72, 0xec, 0x28, 0x97, 0x05, 0x3f, 0x01, 0x5b, 0x17, - 0x3e, 0x6b, 0x51, 0xc7, 0x21, 0x5e, 0xac, 0xb4, 0x2b, 0x95, 0x76, 0xa3, 0x4c, 0x9c, 0xa4, 0x6c, - 0x28, 0x83, 0x86, 0x1c, 0xec, 0x29, 0xe5, 0x06, 0xf3, 0xed, 0x33, 0x3f, 0xf4, 0x44, 0xd4, 0x52, - 0xb9, 0xbe, 0x37, 0xf9, 0x8c, 0xec, 0x1d, 0xe5, 0x01, 0x5e, 0x0d, 0x8d, 0xbb, 0x39, 0x2d, 0x3d, - 0x01, 0x42, 0xf9, 0xda, 0xd5, 0x2f, 0x35, 0xa0, 0xcf, 0xeb, 0x1a, 0xf0, 0xa3, 0xc4, 0x45, 0xc0, - 0xdb, 0xa9, 0x8b, 0x80, 0xed, 0x0c, 0xef, 0x3b, 0xb8, 0x06, 0xf8, 0x9b, 0x06, 0xf6, 0xf3, 0xbb, - 0x26, 0x7c, 0x90, 0x88, 0xce, 0x48, 0x45, 0x77, 0x3b, 0xc5, 0x52, 0xb1, 0xfd, 0x0e, 0x94, 0x55, - 0x6f, 0x4d, 0xde, 0xb2, 0x24, 0x62, 0x8c, 0x32, 0x18, 0x1d, 0x8b, 0x94, 0x44, 0xdc, 0x57, 0xe4, - 0x0f, 0x4d, 0x72, 0x0c, 0xa5, 0xd4, 0xaa, 0x7f, 0xd7, 0xc0, 0xdb, 0xd7, 0x76, 0x45, 0x68, 0x25, - 0x42, 0x37, 0x53, 0xa1, 0x57, 0xe6, 0x0b, 0x7c, 0x37, 0x97, 0x2d, 0xd6, 0xfb, 0x57, 0x2f, 0x2b, - 0x4b, 0x5f, 0xbd, 0xac, 0x2c, 0x7d, 0xfd, 0xb2, 0xb2, 0xf4, 0xc7, 0x51, 0x45, 0xbb, 0x1a, 0x55, - 0xb4, 0xaf, 0x46, 0x15, 0xed, 0xeb, 0x51, 0x45, 0xfb, 0xcf, 0xa8, 0xa2, 0xfd, 0xe5, 0xbf, 0x95, - 0xa5, 0xdf, 0x14, 0x95, 0xdc, 0x37, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa8, 0xba, 0x23, 0xa4, 0x51, - 0x15, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/policy/v1beta1/generated.proto b/vendor/k8s.io/api/policy/v1beta1/generated.proto deleted file mode 100644 index aa37d94..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/generated.proto +++ /dev/null @@ -1,341 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.policy.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; -import "k8s.io/apimachinery/pkg/util/intstr/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -message AllowedFlexVolume { - // driver is the name of the Flexvolume driver. - optional string driver = 1; -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -message AllowedHostPath { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - optional string pathPrefix = 1; - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - optional bool readOnly = 2; -} - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//evictions. -message Eviction { - // ObjectMeta describes the pod that is being evicted. - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // DeleteOptions may be provided - optional k8s.io.apimachinery.pkg.apis.meta.v1.DeleteOptions deleteOptions = 2; -} - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -message FSGroupStrategyOptions { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -message HostPortRange { - // min is the start of the range, inclusive. - optional int32 min = 1; - - // max is the end of the range, inclusive. - optional int32 max = 2; -} - -// IDRange provides a min/max of an allowed range of IDs. -message IDRange { - // min is the start of the range, inclusive. - optional int64 min = 1; - - // max is the end of the range, inclusive. - optional int64 max = 2; -} - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -message PodDisruptionBudget { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired behavior of the PodDisruptionBudget. - optional PodDisruptionBudgetSpec spec = 2; - - // Most recently observed status of the PodDisruptionBudget. - optional PodDisruptionBudgetStatus status = 3; -} - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -message PodDisruptionBudgetList { - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - repeated PodDisruptionBudget items = 2; -} - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -message PodDisruptionBudgetSpec { - // An eviction is allowed if at least "minAvailable" pods selected by - // "selector" will still be available after the eviction, i.e. even in the - // absence of the evicted pod. So for example you can prevent all voluntary - // evictions by specifying "100%". - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString minAvailable = 1; - - // Label query over pods whose evictions are managed by the disruption - // budget. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 2; - - // An eviction is allowed if at most "maxUnavailable" pods selected by - // "selector" are unavailable after the eviction, i.e. even in absence of - // the evicted pod. For example, one can prevent all voluntary evictions - // by specifying 0. This is a mutually exclusive setting with "minAvailable". - optional k8s.io.apimachinery.pkg.util.intstr.IntOrString maxUnavailable = 3; -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -message PodDisruptionBudgetStatus { - // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. - // +optional - optional int64 observedGeneration = 1; - - // DisruptedPods contains information about pods whose eviction was - // processed by the API server eviction subresource handler but has not - // yet been observed by the PodDisruptionBudget controller. - // A pod will be in this map from the time when the API server processed the - // eviction request to the time when the pod is seen by PDB controller - // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod - // and the value is the time when the API server processed the eviction request. If - // the deletion didn't occur and a pod is still there it will be removed from - // the list automatically by PodDisruptionBudget controller after some time. - // If everything goes smooth this map should be empty for the most of the time. - // Large number of entries in the map may indicate problems with pod deletions. - // +optional - map disruptedPods = 2; - - // Number of pod disruptions that are currently allowed. - optional int32 disruptionsAllowed = 3; - - // current number of healthy pods - optional int32 currentHealthy = 4; - - // minimum desired number of healthy pods - optional int32 desiredHealthy = 5; - - // total number of pods counted by this disruption budget - optional int32 expectedPods = 6; -} - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -message PodSecurityPolicy { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // spec defines the policy enforced. - // +optional - optional PodSecurityPolicySpec spec = 2; -} - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -message PodSecurityPolicyList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is a list of schema objects. - repeated PodSecurityPolicy items = 2; -} - -// PodSecurityPolicySpec defines the policy enforced. -message PodSecurityPolicySpec { - // privileged determines if a pod can request to be run as privileged. - // +optional - optional bool privileged = 1; - - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - repeated string defaultAddCapabilities = 2; - - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - repeated string requiredDropCapabilities = 3; - - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - repeated string allowedCapabilities = 4; - - // volumes is a white list of allowed volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - repeated string volumes = 5; - - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - optional bool hostNetwork = 6; - - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - repeated HostPortRange hostPorts = 7; - - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - optional bool hostPID = 8; - - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - optional bool hostIPC = 9; - - // seLinux is the strategy that will dictate the allowable labels that may be set. - optional SELinuxStrategyOptions seLinux = 10; - - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - optional RunAsUserStrategyOptions runAsUser = 11; - - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - optional SupplementalGroupsStrategyOptions supplementalGroups = 12; - - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - optional FSGroupStrategyOptions fsGroup = 13; - - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - optional bool readOnlyRootFilesystem = 14; - - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - optional bool defaultAllowPrivilegeEscalation = 15; - - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - optional bool allowPrivilegeEscalation = 16; - - // allowedHostPaths is a white list of allowed host paths. Empty indicates - // that all host paths may be used. - // +optional - repeated AllowedHostPath allowedHostPaths = 17; - - // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - repeated AllowedFlexVolume allowedFlexVolumes = 18; - - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - repeated string allowedUnsafeSysctls = 19; - - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - repeated string forbiddenSysctls = 20; - - // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - repeated string allowedProcMountTypes = 21; -} - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -message RunAsUserStrategyOptions { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - optional string rule = 1; - - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -message SELinuxStrategyOptions { - // rule is the strategy that will dictate the allowable labels that may be set. - optional string rule = 1; - - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - optional k8s.io.api.core.v1.SELinuxOptions seLinuxOptions = 2; -} - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -message SupplementalGroupsStrategyOptions { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - optional string rule = 1; - - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - repeated IDRange ranges = 2; -} - diff --git a/vendor/k8s.io/api/policy/v1beta1/register.go b/vendor/k8s.io/api/policy/v1beta1/register.go deleted file mode 100644 index b3efd63..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "policy" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PodDisruptionBudget{}, - &PodDisruptionBudgetList{}, - &PodSecurityPolicy{}, - &PodSecurityPolicyList{}, - &Eviction{}, - ) - // Add the watch version that applies - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/policy/v1beta1/types.go b/vendor/k8s.io/api/policy/v1beta1/types.go deleted file mode 100644 index c1a2727..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/types.go +++ /dev/null @@ -1,399 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" -) - -// PodDisruptionBudgetSpec is a description of a PodDisruptionBudget. -type PodDisruptionBudgetSpec struct { - // An eviction is allowed if at least "minAvailable" pods selected by - // "selector" will still be available after the eviction, i.e. even in the - // absence of the evicted pod. So for example you can prevent all voluntary - // evictions by specifying "100%". - MinAvailable *intstr.IntOrString `json:"minAvailable,omitempty" protobuf:"bytes,1,opt,name=minAvailable"` - - // Label query over pods whose evictions are managed by the disruption - // budget. - Selector *metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,2,opt,name=selector"` - - // An eviction is allowed if at most "maxUnavailable" pods selected by - // "selector" are unavailable after the eviction, i.e. even in absence of - // the evicted pod. For example, one can prevent all voluntary evictions - // by specifying 0. This is a mutually exclusive setting with "minAvailable". - MaxUnavailable *intstr.IntOrString `json:"maxUnavailable,omitempty" protobuf:"bytes,3,opt,name=maxUnavailable"` -} - -// PodDisruptionBudgetStatus represents information about the status of a -// PodDisruptionBudget. Status may trail the actual state of a system. -type PodDisruptionBudgetStatus struct { - // Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other - // status informatio is valid only if observedGeneration equals to PDB's object generation. - // +optional - ObservedGeneration int64 `json:"observedGeneration,omitempty" protobuf:"varint,1,opt,name=observedGeneration"` - - // DisruptedPods contains information about pods whose eviction was - // processed by the API server eviction subresource handler but has not - // yet been observed by the PodDisruptionBudget controller. - // A pod will be in this map from the time when the API server processed the - // eviction request to the time when the pod is seen by PDB controller - // as having been marked for deletion (or after a timeout). The key in the map is the name of the pod - // and the value is the time when the API server processed the eviction request. If - // the deletion didn't occur and a pod is still there it will be removed from - // the list automatically by PodDisruptionBudget controller after some time. - // If everything goes smooth this map should be empty for the most of the time. - // Large number of entries in the map may indicate problems with pod deletions. - // +optional - DisruptedPods map[string]metav1.Time `json:"disruptedPods,omitempty" protobuf:"bytes,2,rep,name=disruptedPods"` - - // Number of pod disruptions that are currently allowed. - PodDisruptionsAllowed int32 `json:"disruptionsAllowed" protobuf:"varint,3,opt,name=disruptionsAllowed"` - - // current number of healthy pods - CurrentHealthy int32 `json:"currentHealthy" protobuf:"varint,4,opt,name=currentHealthy"` - - // minimum desired number of healthy pods - DesiredHealthy int32 `json:"desiredHealthy" protobuf:"varint,5,opt,name=desiredHealthy"` - - // total number of pods counted by this disruption budget - ExpectedPods int32 `json:"expectedPods" protobuf:"varint,6,opt,name=expectedPods"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods -type PodDisruptionBudget struct { - metav1.TypeMeta `json:",inline"` - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired behavior of the PodDisruptionBudget. - Spec PodDisruptionBudgetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` - // Most recently observed status of the PodDisruptionBudget. - Status PodDisruptionBudgetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodDisruptionBudgetList is a collection of PodDisruptionBudgets. -type PodDisruptionBudgetList struct { - metav1.TypeMeta `json:",inline"` - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - Items []PodDisruptionBudget `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:noVerbs -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Eviction evicts a pod from its node subject to certain policies and safety constraints. -// This is a subresource of Pod. A request to cause such an eviction is -// created by POSTing to .../pods//evictions. -type Eviction struct { - metav1.TypeMeta `json:",inline"` - - // ObjectMeta describes the pod that is being evicted. - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // DeleteOptions may be provided - DeleteOptions *metav1.DeleteOptions `json:"deleteOptions,omitempty" protobuf:"bytes,2,opt,name=deleteOptions"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSecurityPolicy governs the ability to make requests that affect the Security Context -// that will be applied to a pod and container. -type PodSecurityPolicy struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // spec defines the policy enforced. - // +optional - Spec PodSecurityPolicySpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodSecurityPolicySpec defines the policy enforced. -type PodSecurityPolicySpec struct { - // privileged determines if a pod can request to be run as privileged. - // +optional - Privileged bool `json:"privileged,omitempty" protobuf:"varint,1,opt,name=privileged"` - // defaultAddCapabilities is the default set of capabilities that will be added to the container - // unless the pod spec specifically drops the capability. You may not list a capability in both - // defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly - // allowed, and need not be included in the allowedCapabilities list. - // +optional - DefaultAddCapabilities []v1.Capability `json:"defaultAddCapabilities,omitempty" protobuf:"bytes,2,rep,name=defaultAddCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // requiredDropCapabilities are the capabilities that will be dropped from the container. These - // are required to be dropped and cannot be added. - // +optional - RequiredDropCapabilities []v1.Capability `json:"requiredDropCapabilities,omitempty" protobuf:"bytes,3,rep,name=requiredDropCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // allowedCapabilities is a list of capabilities that can be requested to add to the container. - // Capabilities in this field may be added at the pod author's discretion. - // You must not list a capability in both allowedCapabilities and requiredDropCapabilities. - // +optional - AllowedCapabilities []v1.Capability `json:"allowedCapabilities,omitempty" protobuf:"bytes,4,rep,name=allowedCapabilities,casttype=k8s.io/api/core/v1.Capability"` - // volumes is a white list of allowed volume plugins. Empty indicates that - // no volumes may be used. To allow all volumes you may use '*'. - // +optional - Volumes []FSType `json:"volumes,omitempty" protobuf:"bytes,5,rep,name=volumes,casttype=FSType"` - // hostNetwork determines if the policy allows the use of HostNetwork in the pod spec. - // +optional - HostNetwork bool `json:"hostNetwork,omitempty" protobuf:"varint,6,opt,name=hostNetwork"` - // hostPorts determines which host port ranges are allowed to be exposed. - // +optional - HostPorts []HostPortRange `json:"hostPorts,omitempty" protobuf:"bytes,7,rep,name=hostPorts"` - // hostPID determines if the policy allows the use of HostPID in the pod spec. - // +optional - HostPID bool `json:"hostPID,omitempty" protobuf:"varint,8,opt,name=hostPID"` - // hostIPC determines if the policy allows the use of HostIPC in the pod spec. - // +optional - HostIPC bool `json:"hostIPC,omitempty" protobuf:"varint,9,opt,name=hostIPC"` - // seLinux is the strategy that will dictate the allowable labels that may be set. - SELinux SELinuxStrategyOptions `json:"seLinux" protobuf:"bytes,10,opt,name=seLinux"` - // runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set. - RunAsUser RunAsUserStrategyOptions `json:"runAsUser" protobuf:"bytes,11,opt,name=runAsUser"` - // supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext. - SupplementalGroups SupplementalGroupsStrategyOptions `json:"supplementalGroups" protobuf:"bytes,12,opt,name=supplementalGroups"` - // fsGroup is the strategy that will dictate what fs group is used by the SecurityContext. - FSGroup FSGroupStrategyOptions `json:"fsGroup" protobuf:"bytes,13,opt,name=fsGroup"` - // readOnlyRootFilesystem when set to true will force containers to run with a read only root file - // system. If the container specifically requests to run with a non-read only root file system - // the PSP should deny the pod. - // If set to false the container may run with a read only root file system if it wishes but it - // will not be forced to. - // +optional - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem,omitempty" protobuf:"varint,14,opt,name=readOnlyRootFilesystem"` - // defaultAllowPrivilegeEscalation controls the default setting for whether a - // process can gain more privileges than its parent process. - // +optional - DefaultAllowPrivilegeEscalation *bool `json:"defaultAllowPrivilegeEscalation,omitempty" protobuf:"varint,15,opt,name=defaultAllowPrivilegeEscalation"` - // allowPrivilegeEscalation determines if a pod can request to allow - // privilege escalation. If unspecified, defaults to true. - // +optional - AllowPrivilegeEscalation *bool `json:"allowPrivilegeEscalation,omitempty" protobuf:"varint,16,opt,name=allowPrivilegeEscalation"` - // allowedHostPaths is a white list of allowed host paths. Empty indicates - // that all host paths may be used. - // +optional - AllowedHostPaths []AllowedHostPath `json:"allowedHostPaths,omitempty" protobuf:"bytes,17,rep,name=allowedHostPaths"` - // allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all - // Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes - // is allowed in the "volumes" field. - // +optional - AllowedFlexVolumes []AllowedFlexVolume `json:"allowedFlexVolumes,omitempty" protobuf:"bytes,18,rep,name=allowedFlexVolumes"` - // allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. - // Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection. - // - // Examples: - // e.g. "foo/*" allows "foo/bar", "foo/baz", etc. - // e.g. "foo.*" allows "foo.bar", "foo.baz", etc. - // +optional - AllowedUnsafeSysctls []string `json:"allowedUnsafeSysctls,omitempty" protobuf:"bytes,19,rep,name=allowedUnsafeSysctls"` - // forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. - // Each entry is either a plain sysctl name or ends in "*" in which case it is considered - // as a prefix of forbidden sysctls. Single * means all sysctls are forbidden. - // - // Examples: - // e.g. "foo/*" forbids "foo/bar", "foo/baz", etc. - // e.g. "foo.*" forbids "foo.bar", "foo.baz", etc. - // +optional - ForbiddenSysctls []string `json:"forbiddenSysctls,omitempty" protobuf:"bytes,20,rep,name=forbiddenSysctls"` - // AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. - // Empty or nil indicates that only the DefaultProcMountType may be used. - // This requires the ProcMountType feature flag to be enabled. - // +optional - AllowedProcMountTypes []v1.ProcMountType `json:"allowedProcMountTypes,omitempty" protobuf:"bytes,21,opt,name=allowedProcMountTypes"` -} - -// AllowedHostPath defines the host volume conditions that will be enabled by a policy -// for pods to use. It requires the path prefix to be defined. -type AllowedHostPath struct { - // pathPrefix is the path prefix that the host volume must match. - // It does not support `*`. - // Trailing slashes are trimmed when validating the path prefix with a host path. - // - // Examples: - // `/foo` would allow `/foo`, `/foo/` and `/foo/bar` - // `/foo` would not allow `/food` or `/etc/foo` - PathPrefix string `json:"pathPrefix,omitempty" protobuf:"bytes,1,rep,name=pathPrefix"` - - // when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly. - // +optional - ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,2,opt,name=readOnly"` -} - -// FSType gives strong typing to different file systems that are used by volumes. -type FSType string - -var ( - AzureFile FSType = "azureFile" - Flocker FSType = "flocker" - FlexVolume FSType = "flexVolume" - HostPath FSType = "hostPath" - EmptyDir FSType = "emptyDir" - GCEPersistentDisk FSType = "gcePersistentDisk" - AWSElasticBlockStore FSType = "awsElasticBlockStore" - GitRepo FSType = "gitRepo" - Secret FSType = "secret" - NFS FSType = "nfs" - ISCSI FSType = "iscsi" - Glusterfs FSType = "glusterfs" - PersistentVolumeClaim FSType = "persistentVolumeClaim" - RBD FSType = "rbd" - Cinder FSType = "cinder" - CephFS FSType = "cephFS" - DownwardAPI FSType = "downwardAPI" - FC FSType = "fc" - ConfigMap FSType = "configMap" - Quobyte FSType = "quobyte" - AzureDisk FSType = "azureDisk" - All FSType = "*" -) - -// AllowedFlexVolume represents a single Flexvolume that is allowed to be used. -type AllowedFlexVolume struct { - // driver is the name of the Flexvolume driver. - Driver string `json:"driver" protobuf:"bytes,1,opt,name=driver"` -} - -// HostPortRange defines a range of host ports that will be enabled by a policy -// for pods to use. It requires both the start and end to be defined. -type HostPortRange struct { - // min is the start of the range, inclusive. - Min int32 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int32 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// SELinuxStrategyOptions defines the strategy type and any options used to create the strategy. -type SELinuxStrategyOptions struct { - // rule is the strategy that will dictate the allowable labels that may be set. - Rule SELinuxStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=SELinuxStrategy"` - // seLinuxOptions required to run as; required for MustRunAs - // More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - // +optional - SELinuxOptions *v1.SELinuxOptions `json:"seLinuxOptions,omitempty" protobuf:"bytes,2,opt,name=seLinuxOptions"` -} - -// SELinuxStrategy denotes strategy types for generating SELinux options for a -// Security Context. -type SELinuxStrategy string - -const ( - // SELinuxStrategyMustRunAs means that container must have SELinux labels of X applied. - SELinuxStrategyMustRunAs SELinuxStrategy = "MustRunAs" - // SELinuxStrategyRunAsAny means that container may make requests for any SELinux context labels. - SELinuxStrategyRunAsAny SELinuxStrategy = "RunAsAny" -) - -// RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy. -type RunAsUserStrategyOptions struct { - // rule is the strategy that will dictate the allowable RunAsUser values that may be set. - Rule RunAsUserStrategy `json:"rule" protobuf:"bytes,1,opt,name=rule,casttype=RunAsUserStrategy"` - // ranges are the allowed ranges of uids that may be used. If you would like to force a single uid - // then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// IDRange provides a min/max of an allowed range of IDs. -type IDRange struct { - // min is the start of the range, inclusive. - Min int64 `json:"min" protobuf:"varint,1,opt,name=min"` - // max is the end of the range, inclusive. - Max int64 `json:"max" protobuf:"varint,2,opt,name=max"` -} - -// RunAsUserStrategy denotes strategy types for generating RunAsUser values for a -// Security Context. -type RunAsUserStrategy string - -const ( - // RunAsUserStrategyMustRunAs means that container must run as a particular uid. - RunAsUserStrategyMustRunAs RunAsUserStrategy = "MustRunAs" - // RunAsUserStrategyMustRunAsNonRoot means that container must run as a non-root uid. - RunAsUserStrategyMustRunAsNonRoot RunAsUserStrategy = "MustRunAsNonRoot" - // RunAsUserStrategyRunAsAny means that container may make requests for any uid. - RunAsUserStrategyRunAsAny RunAsUserStrategy = "RunAsAny" -) - -// FSGroupStrategyOptions defines the strategy type and options used to create the strategy. -type FSGroupStrategyOptions struct { - // rule is the strategy that will dictate what FSGroup is used in the SecurityContext. - // +optional - Rule FSGroupStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=FSGroupStrategyType"` - // ranges are the allowed ranges of fs groups. If you would like to force a single - // fs group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// FSGroupStrategyType denotes strategy types for generating FSGroup values for a -// SecurityContext -type FSGroupStrategyType string - -const ( - // FSGroupStrategyMustRunAs meant that container must have FSGroup of X applied. - FSGroupStrategyMustRunAs FSGroupStrategyType = "MustRunAs" - // FSGroupStrategyRunAsAny means that container may make requests for any FSGroup labels. - FSGroupStrategyRunAsAny FSGroupStrategyType = "RunAsAny" -) - -// SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy. -type SupplementalGroupsStrategyOptions struct { - // rule is the strategy that will dictate what supplemental groups is used in the SecurityContext. - // +optional - Rule SupplementalGroupsStrategyType `json:"rule,omitempty" protobuf:"bytes,1,opt,name=rule,casttype=SupplementalGroupsStrategyType"` - // ranges are the allowed ranges of supplemental groups. If you would like to force a single - // supplemental group then supply a single range with the same start and end. Required for MustRunAs. - // +optional - Ranges []IDRange `json:"ranges,omitempty" protobuf:"bytes,2,rep,name=ranges"` -} - -// SupplementalGroupsStrategyType denotes strategy types for determining valid supplemental -// groups for a SecurityContext. -type SupplementalGroupsStrategyType string - -const ( - // SupplementalGroupsStrategyMustRunAs means that container must run as a particular gid. - SupplementalGroupsStrategyMustRunAs SupplementalGroupsStrategyType = "MustRunAs" - // SupplementalGroupsStrategyRunAsAny means that container may make requests for any gid. - SupplementalGroupsStrategyRunAsAny SupplementalGroupsStrategyType = "RunAsAny" -) - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodSecurityPolicyList is a list of PodSecurityPolicy objects. -type PodSecurityPolicyList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is a list of schema objects. - Items []PodSecurityPolicy `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index df10b2a..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AllowedFlexVolume = map[string]string{ - "": "AllowedFlexVolume represents a single Flexvolume that is allowed to be used.", - "driver": "driver is the name of the Flexvolume driver.", -} - -func (AllowedFlexVolume) SwaggerDoc() map[string]string { - return map_AllowedFlexVolume -} - -var map_AllowedHostPath = map[string]string{ - "": "AllowedHostPath defines the host volume conditions that will be enabled by a policy for pods to use. It requires the path prefix to be defined.", - "pathPrefix": "pathPrefix is the path prefix that the host volume must match. It does not support `*`. Trailing slashes are trimmed when validating the path prefix with a host path.\n\nExamples: `/foo` would allow `/foo`, `/foo/` and `/foo/bar` `/foo` would not allow `/food` or `/etc/foo`", - "readOnly": "when set to true, will allow host volumes matching the pathPrefix only if all volume mounts are readOnly.", -} - -func (AllowedHostPath) SwaggerDoc() map[string]string { - return map_AllowedHostPath -} - -var map_Eviction = map[string]string{ - "": "Eviction evicts a pod from its node subject to certain policies and safety constraints. This is a subresource of Pod. A request to cause such an eviction is created by POSTing to .../pods//evictions.", - "metadata": "ObjectMeta describes the pod that is being evicted.", - "deleteOptions": "DeleteOptions may be provided", -} - -func (Eviction) SwaggerDoc() map[string]string { - return map_Eviction -} - -var map_FSGroupStrategyOptions = map[string]string{ - "": "FSGroupStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what FSGroup is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of fs groups. If you would like to force a single fs group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (FSGroupStrategyOptions) SwaggerDoc() map[string]string { - return map_FSGroupStrategyOptions -} - -var map_HostPortRange = map[string]string{ - "": "HostPortRange defines a range of host ports that will be enabled by a policy for pods to use. It requires both the start and end to be defined.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (HostPortRange) SwaggerDoc() map[string]string { - return map_HostPortRange -} - -var map_IDRange = map[string]string{ - "": "IDRange provides a min/max of an allowed range of IDs.", - "min": "min is the start of the range, inclusive.", - "max": "max is the end of the range, inclusive.", -} - -func (IDRange) SwaggerDoc() map[string]string { - return map_IDRange -} - -var map_PodDisruptionBudget = map[string]string{ - "": "PodDisruptionBudget is an object to define the max disruption that can be caused to a collection of pods", - "spec": "Specification of the desired behavior of the PodDisruptionBudget.", - "status": "Most recently observed status of the PodDisruptionBudget.", -} - -func (PodDisruptionBudget) SwaggerDoc() map[string]string { - return map_PodDisruptionBudget -} - -var map_PodDisruptionBudgetList = map[string]string{ - "": "PodDisruptionBudgetList is a collection of PodDisruptionBudgets.", -} - -func (PodDisruptionBudgetList) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetList -} - -var map_PodDisruptionBudgetSpec = map[string]string{ - "": "PodDisruptionBudgetSpec is a description of a PodDisruptionBudget.", - "minAvailable": "An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".", - "selector": "Label query over pods whose evictions are managed by the disruption budget.", - "maxUnavailable": "An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".", -} - -func (PodDisruptionBudgetSpec) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetSpec -} - -var map_PodDisruptionBudgetStatus = map[string]string{ - "": "PodDisruptionBudgetStatus represents information about the status of a PodDisruptionBudget. Status may trail the actual state of a system.", - "observedGeneration": "Most recent generation observed when updating this PDB status. PodDisruptionsAllowed and other status informatio is valid only if observedGeneration equals to PDB's object generation.", - "disruptedPods": "DisruptedPods contains information about pods whose eviction was processed by the API server eviction subresource handler but has not yet been observed by the PodDisruptionBudget controller. A pod will be in this map from the time when the API server processed the eviction request to the time when the pod is seen by PDB controller as having been marked for deletion (or after a timeout). The key in the map is the name of the pod and the value is the time when the API server processed the eviction request. If the deletion didn't occur and a pod is still there it will be removed from the list automatically by PodDisruptionBudget controller after some time. If everything goes smooth this map should be empty for the most of the time. Large number of entries in the map may indicate problems with pod deletions.", - "disruptionsAllowed": "Number of pod disruptions that are currently allowed.", - "currentHealthy": "current number of healthy pods", - "desiredHealthy": "minimum desired number of healthy pods", - "expectedPods": "total number of pods counted by this disruption budget", -} - -func (PodDisruptionBudgetStatus) SwaggerDoc() map[string]string { - return map_PodDisruptionBudgetStatus -} - -var map_PodSecurityPolicy = map[string]string{ - "": "PodSecurityPolicy governs the ability to make requests that affect the Security Context that will be applied to a pod and container.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "spec defines the policy enforced.", -} - -func (PodSecurityPolicy) SwaggerDoc() map[string]string { - return map_PodSecurityPolicy -} - -var map_PodSecurityPolicyList = map[string]string{ - "": "PodSecurityPolicyList is a list of PodSecurityPolicy objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is a list of schema objects.", -} - -func (PodSecurityPolicyList) SwaggerDoc() map[string]string { - return map_PodSecurityPolicyList -} - -var map_PodSecurityPolicySpec = map[string]string{ - "": "PodSecurityPolicySpec defines the policy enforced.", - "privileged": "privileged determines if a pod can request to be run as privileged.", - "defaultAddCapabilities": "defaultAddCapabilities is the default set of capabilities that will be added to the container unless the pod spec specifically drops the capability. You may not list a capability in both defaultAddCapabilities and requiredDropCapabilities. Capabilities added here are implicitly allowed, and need not be included in the allowedCapabilities list.", - "requiredDropCapabilities": "requiredDropCapabilities are the capabilities that will be dropped from the container. These are required to be dropped and cannot be added.", - "allowedCapabilities": "allowedCapabilities is a list of capabilities that can be requested to add to the container. Capabilities in this field may be added at the pod author's discretion. You must not list a capability in both allowedCapabilities and requiredDropCapabilities.", - "volumes": "volumes is a white list of allowed volume plugins. Empty indicates that no volumes may be used. To allow all volumes you may use '*'.", - "hostNetwork": "hostNetwork determines if the policy allows the use of HostNetwork in the pod spec.", - "hostPorts": "hostPorts determines which host port ranges are allowed to be exposed.", - "hostPID": "hostPID determines if the policy allows the use of HostPID in the pod spec.", - "hostIPC": "hostIPC determines if the policy allows the use of HostIPC in the pod spec.", - "seLinux": "seLinux is the strategy that will dictate the allowable labels that may be set.", - "runAsUser": "runAsUser is the strategy that will dictate the allowable RunAsUser values that may be set.", - "supplementalGroups": "supplementalGroups is the strategy that will dictate what supplemental groups are used by the SecurityContext.", - "fsGroup": "fsGroup is the strategy that will dictate what fs group is used by the SecurityContext.", - "readOnlyRootFilesystem": "readOnlyRootFilesystem when set to true will force containers to run with a read only root file system. If the container specifically requests to run with a non-read only root file system the PSP should deny the pod. If set to false the container may run with a read only root file system if it wishes but it will not be forced to.", - "defaultAllowPrivilegeEscalation": "defaultAllowPrivilegeEscalation controls the default setting for whether a process can gain more privileges than its parent process.", - "allowPrivilegeEscalation": "allowPrivilegeEscalation determines if a pod can request to allow privilege escalation. If unspecified, defaults to true.", - "allowedHostPaths": "allowedHostPaths is a white list of allowed host paths. Empty indicates that all host paths may be used.", - "allowedFlexVolumes": "allowedFlexVolumes is a whitelist of allowed Flexvolumes. Empty or nil indicates that all Flexvolumes may be used. This parameter is effective only when the usage of the Flexvolumes is allowed in the \"volumes\" field.", - "allowedUnsafeSysctls": "allowedUnsafeSysctls is a list of explicitly allowed unsafe sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of allowed sysctls. Single * means all unsafe sysctls are allowed. Kubelet has to whitelist all allowed unsafe sysctls explicitly to avoid rejection.\n\nExamples: e.g. \"foo/*\" allows \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" allows \"foo.bar\", \"foo.baz\", etc.", - "forbiddenSysctls": "forbiddenSysctls is a list of explicitly forbidden sysctls, defaults to none. Each entry is either a plain sysctl name or ends in \"*\" in which case it is considered as a prefix of forbidden sysctls. Single * means all sysctls are forbidden.\n\nExamples: e.g. \"foo/*\" forbids \"foo/bar\", \"foo/baz\", etc. e.g. \"foo.*\" forbids \"foo.bar\", \"foo.baz\", etc.", - "allowedProcMountTypes": "AllowedProcMountTypes is a whitelist of allowed ProcMountTypes. Empty or nil indicates that only the DefaultProcMountType may be used. This requires the ProcMountType feature flag to be enabled.", -} - -func (PodSecurityPolicySpec) SwaggerDoc() map[string]string { - return map_PodSecurityPolicySpec -} - -var map_RunAsUserStrategyOptions = map[string]string{ - "": "RunAsUserStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable RunAsUser values that may be set.", - "ranges": "ranges are the allowed ranges of uids that may be used. If you would like to force a single uid then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (RunAsUserStrategyOptions) SwaggerDoc() map[string]string { - return map_RunAsUserStrategyOptions -} - -var map_SELinuxStrategyOptions = map[string]string{ - "": "SELinuxStrategyOptions defines the strategy type and any options used to create the strategy.", - "rule": "rule is the strategy that will dictate the allowable labels that may be set.", - "seLinuxOptions": "seLinuxOptions required to run as; required for MustRunAs More info: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/", -} - -func (SELinuxStrategyOptions) SwaggerDoc() map[string]string { - return map_SELinuxStrategyOptions -} - -var map_SupplementalGroupsStrategyOptions = map[string]string{ - "": "SupplementalGroupsStrategyOptions defines the strategy type and options used to create the strategy.", - "rule": "rule is the strategy that will dictate what supplemental groups is used in the SecurityContext.", - "ranges": "ranges are the allowed ranges of supplemental groups. If you would like to force a single supplemental group then supply a single range with the same start and end. Required for MustRunAs.", -} - -func (SupplementalGroupsStrategyOptions) SwaggerDoc() map[string]string { - return map_SupplementalGroupsStrategyOptions -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 9af268a..0000000 --- a/vendor/k8s.io/api/policy/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,462 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - corev1 "k8s.io/api/core/v1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - intstr "k8s.io/apimachinery/pkg/util/intstr" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedFlexVolume) DeepCopyInto(out *AllowedFlexVolume) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedFlexVolume. -func (in *AllowedFlexVolume) DeepCopy() *AllowedFlexVolume { - if in == nil { - return nil - } - out := new(AllowedFlexVolume) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AllowedHostPath) DeepCopyInto(out *AllowedHostPath) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AllowedHostPath. -func (in *AllowedHostPath) DeepCopy() *AllowedHostPath { - if in == nil { - return nil - } - out := new(AllowedHostPath) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Eviction) DeepCopyInto(out *Eviction) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.DeleteOptions != nil { - in, out := &in.DeleteOptions, &out.DeleteOptions - *out = new(v1.DeleteOptions) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Eviction. -func (in *Eviction) DeepCopy() *Eviction { - if in == nil { - return nil - } - out := new(Eviction) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Eviction) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *FSGroupStrategyOptions) DeepCopyInto(out *FSGroupStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FSGroupStrategyOptions. -func (in *FSGroupStrategyOptions) DeepCopy() *FSGroupStrategyOptions { - if in == nil { - return nil - } - out := new(FSGroupStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *HostPortRange) DeepCopyInto(out *HostPortRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new HostPortRange. -func (in *HostPortRange) DeepCopy() *HostPortRange { - if in == nil { - return nil - } - out := new(HostPortRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *IDRange) DeepCopyInto(out *IDRange) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new IDRange. -func (in *IDRange) DeepCopy() *IDRange { - if in == nil { - return nil - } - out := new(IDRange) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDisruptionBudget) DeepCopyInto(out *PodDisruptionBudget) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudget. -func (in *PodDisruptionBudget) DeepCopy() *PodDisruptionBudget { - if in == nil { - return nil - } - out := new(PodDisruptionBudget) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodDisruptionBudget) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDisruptionBudgetList) DeepCopyInto(out *PodDisruptionBudgetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodDisruptionBudget, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetList. -func (in *PodDisruptionBudgetList) DeepCopy() *PodDisruptionBudgetList { - if in == nil { - return nil - } - out := new(PodDisruptionBudgetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodDisruptionBudgetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDisruptionBudgetSpec) DeepCopyInto(out *PodDisruptionBudgetSpec) { - *out = *in - if in.MinAvailable != nil { - in, out := &in.MinAvailable, &out.MinAvailable - *out = new(intstr.IntOrString) - **out = **in - } - if in.Selector != nil { - in, out := &in.Selector, &out.Selector - *out = new(v1.LabelSelector) - (*in).DeepCopyInto(*out) - } - if in.MaxUnavailable != nil { - in, out := &in.MaxUnavailable, &out.MaxUnavailable - *out = new(intstr.IntOrString) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetSpec. -func (in *PodDisruptionBudgetSpec) DeepCopy() *PodDisruptionBudgetSpec { - if in == nil { - return nil - } - out := new(PodDisruptionBudgetSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodDisruptionBudgetStatus) DeepCopyInto(out *PodDisruptionBudgetStatus) { - *out = *in - if in.DisruptedPods != nil { - in, out := &in.DisruptedPods, &out.DisruptedPods - *out = make(map[string]v1.Time, len(*in)) - for key, val := range *in { - (*out)[key] = *val.DeepCopy() - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodDisruptionBudgetStatus. -func (in *PodDisruptionBudgetStatus) DeepCopy() *PodDisruptionBudgetStatus { - if in == nil { - return nil - } - out := new(PodDisruptionBudgetStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicy) DeepCopyInto(out *PodSecurityPolicy) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicy. -func (in *PodSecurityPolicy) DeepCopy() *PodSecurityPolicy { - if in == nil { - return nil - } - out := new(PodSecurityPolicy) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicy) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicyList) DeepCopyInto(out *PodSecurityPolicyList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodSecurityPolicy, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicyList. -func (in *PodSecurityPolicyList) DeepCopy() *PodSecurityPolicyList { - if in == nil { - return nil - } - out := new(PodSecurityPolicyList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodSecurityPolicyList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodSecurityPolicySpec) DeepCopyInto(out *PodSecurityPolicySpec) { - *out = *in - if in.DefaultAddCapabilities != nil { - in, out := &in.DefaultAddCapabilities, &out.DefaultAddCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.RequiredDropCapabilities != nil { - in, out := &in.RequiredDropCapabilities, &out.RequiredDropCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.AllowedCapabilities != nil { - in, out := &in.AllowedCapabilities, &out.AllowedCapabilities - *out = make([]corev1.Capability, len(*in)) - copy(*out, *in) - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]FSType, len(*in)) - copy(*out, *in) - } - if in.HostPorts != nil { - in, out := &in.HostPorts, &out.HostPorts - *out = make([]HostPortRange, len(*in)) - copy(*out, *in) - } - in.SELinux.DeepCopyInto(&out.SELinux) - in.RunAsUser.DeepCopyInto(&out.RunAsUser) - in.SupplementalGroups.DeepCopyInto(&out.SupplementalGroups) - in.FSGroup.DeepCopyInto(&out.FSGroup) - if in.DefaultAllowPrivilegeEscalation != nil { - in, out := &in.DefaultAllowPrivilegeEscalation, &out.DefaultAllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowPrivilegeEscalation != nil { - in, out := &in.AllowPrivilegeEscalation, &out.AllowPrivilegeEscalation - *out = new(bool) - **out = **in - } - if in.AllowedHostPaths != nil { - in, out := &in.AllowedHostPaths, &out.AllowedHostPaths - *out = make([]AllowedHostPath, len(*in)) - copy(*out, *in) - } - if in.AllowedFlexVolumes != nil { - in, out := &in.AllowedFlexVolumes, &out.AllowedFlexVolumes - *out = make([]AllowedFlexVolume, len(*in)) - copy(*out, *in) - } - if in.AllowedUnsafeSysctls != nil { - in, out := &in.AllowedUnsafeSysctls, &out.AllowedUnsafeSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ForbiddenSysctls != nil { - in, out := &in.ForbiddenSysctls, &out.ForbiddenSysctls - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowedProcMountTypes != nil { - in, out := &in.AllowedProcMountTypes, &out.AllowedProcMountTypes - *out = make([]corev1.ProcMountType, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodSecurityPolicySpec. -func (in *PodSecurityPolicySpec) DeepCopy() *PodSecurityPolicySpec { - if in == nil { - return nil - } - out := new(PodSecurityPolicySpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RunAsUserStrategyOptions) DeepCopyInto(out *RunAsUserStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RunAsUserStrategyOptions. -func (in *RunAsUserStrategyOptions) DeepCopy() *RunAsUserStrategyOptions { - if in == nil { - return nil - } - out := new(RunAsUserStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SELinuxStrategyOptions) DeepCopyInto(out *SELinuxStrategyOptions) { - *out = *in - if in.SELinuxOptions != nil { - in, out := &in.SELinuxOptions, &out.SELinuxOptions - *out = new(corev1.SELinuxOptions) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SELinuxStrategyOptions. -func (in *SELinuxStrategyOptions) DeepCopy() *SELinuxStrategyOptions { - if in == nil { - return nil - } - out := new(SELinuxStrategyOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *SupplementalGroupsStrategyOptions) DeepCopyInto(out *SupplementalGroupsStrategyOptions) { - *out = *in - if in.Ranges != nil { - in, out := &in.Ranges, &out.Ranges - *out = make([]IDRange, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SupplementalGroupsStrategyOptions. -func (in *SupplementalGroupsStrategyOptions) DeepCopy() *SupplementalGroupsStrategyOptions { - if in == nil { - return nil - } - out := new(SupplementalGroupsStrategyOptions) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/rbac/v1/doc.go b/vendor/k8s.io/api/rbac/v1/doc.go deleted file mode 100644 index 28ceb26..0000000 --- a/vendor/k8s.io/api/rbac/v1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=rbac.authorization.k8s.io -package v1 // import "k8s.io/api/rbac/v1" diff --git a/vendor/k8s.io/api/rbac/v1/generated.pb.go b/vendor/k8s.io/api/rbac/v1/generated.pb.go deleted file mode 100644 index 21010fb..0000000 --- a/vendor/k8s.io/api/rbac/v1/generated.pb.go +++ /dev/null @@ -1,2748 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func init() { - proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1.AggregationRule") - proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1.PolicyRule") - proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1.Role") - proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1.RoleList") - proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1.RoleRef") - proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1.Subject") -} -func (m *AggregationRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRole) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AggregationRule != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PolicyRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *Subject) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AggregationRule) Size() (n int) { - var l int - _ = l - if len(m.ClusterRoleSelectors) > 0 { - for _, e := range m.ClusterRoleSelectors { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRole) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AggregationRule != nil { - l = m.AggregationRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterRoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterRoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PolicyRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleRef) Size() (n int) { - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Subject) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AggregationRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRole) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, - `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleBindingList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PolicyRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PolicyRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, - `}`, - }, "") - return s -} -func (this *Role) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, - `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleBindingList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleRef{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Subject) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Subject{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AggregationRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) - if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRole) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AggregationRule == nil { - m.AggregationRule = &AggregationRule{} - } - if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRole{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PolicyRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Role{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Subject) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subject: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 807 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x55, 0xcf, 0x6f, 0xe3, 0x44, - 0x14, 0xce, 0xa4, 0x89, 0x1a, 0x4f, 0x88, 0x42, 0x87, 0x0a, 0x59, 0x05, 0x39, 0x95, 0x91, 0x50, - 0x25, 0xc0, 0x26, 0x05, 0x01, 0x12, 0xea, 0xa1, 0x2e, 0x02, 0x55, 0x2d, 0xa5, 0x9a, 0x0a, 0x0e, - 0x88, 0x03, 0x63, 0x67, 0xea, 0x0e, 0xf1, 0x2f, 0xcd, 0xd8, 0x91, 0x2a, 0x2e, 0x08, 0x89, 0x03, - 0xb7, 0x3d, 0xee, 0xfe, 0x05, 0x7b, 0xd9, 0x3d, 0xee, 0x5f, 0xb0, 0x97, 0x1e, 0x7b, 0xec, 0x29, - 0xda, 0x7a, 0xff, 0x90, 0x5d, 0xf9, 0x57, 0x9c, 0x1f, 0xee, 0x36, 0xa7, 0x48, 0xab, 0x3d, 0xb5, - 0xf3, 0xde, 0xf7, 0xbe, 0xf7, 0xcd, 0xe7, 0x79, 0x2f, 0xf0, 0xfb, 0xe1, 0x77, 0x42, 0x63, 0xbe, - 0x3e, 0x8c, 0x4c, 0xca, 0x3d, 0x1a, 0x52, 0xa1, 0x8f, 0xa8, 0x37, 0xf0, 0xb9, 0x9e, 0x27, 0x48, - 0xc0, 0x74, 0x6e, 0x12, 0x4b, 0x1f, 0xf5, 0x75, 0x9b, 0x7a, 0x94, 0x93, 0x90, 0x0e, 0xb4, 0x80, - 0xfb, 0xa1, 0x8f, 0x50, 0x86, 0xd1, 0x48, 0xc0, 0xb4, 0x04, 0xa3, 0x8d, 0xfa, 0x5b, 0x5f, 0xd8, - 0x2c, 0xbc, 0x88, 0x4c, 0xcd, 0xf2, 0x5d, 0xdd, 0xf6, 0x6d, 0x5f, 0x4f, 0xa1, 0x66, 0x74, 0x9e, - 0x9e, 0xd2, 0x43, 0xfa, 0x5f, 0x46, 0xb1, 0xf5, 0x75, 0xd9, 0xc6, 0x25, 0xd6, 0x05, 0xf3, 0x28, - 0xbf, 0xd4, 0x83, 0xa1, 0x9d, 0x04, 0x84, 0xee, 0xd2, 0x90, 0x54, 0x34, 0xde, 0xd2, 0xef, 0xaa, - 0xe2, 0x91, 0x17, 0x32, 0x97, 0x2e, 0x14, 0x7c, 0x73, 0x5f, 0x81, 0xb0, 0x2e, 0xa8, 0x4b, 0xe6, - 0xeb, 0xd4, 0x47, 0x00, 0x76, 0xf7, 0x6d, 0x9b, 0x53, 0x9b, 0x84, 0xcc, 0xf7, 0x70, 0xe4, 0x50, - 0xf4, 0x1f, 0x80, 0x9b, 0x96, 0x13, 0x89, 0x90, 0x72, 0xec, 0x3b, 0xf4, 0x8c, 0x3a, 0xd4, 0x0a, - 0x7d, 0x2e, 0x64, 0xb0, 0xbd, 0xb6, 0xd3, 0xde, 0xfd, 0x4a, 0x2b, 0x5d, 0x99, 0xf4, 0xd2, 0x82, - 0xa1, 0x9d, 0x04, 0x84, 0x96, 0x5c, 0x49, 0x1b, 0xf5, 0xb5, 0x63, 0x62, 0x52, 0xa7, 0xa8, 0x35, - 0x3e, 0xbe, 0x1a, 0xf7, 0x6a, 0xf1, 0xb8, 0xb7, 0x79, 0x50, 0x41, 0x8c, 0x2b, 0xdb, 0xa9, 0x0f, - 0xeb, 0xb0, 0x3d, 0x05, 0x47, 0x7f, 0xc2, 0x56, 0x42, 0x3e, 0x20, 0x21, 0x91, 0xc1, 0x36, 0xd8, - 0x69, 0xef, 0x7e, 0xb9, 0x9c, 0x94, 0x5f, 0xcc, 0xbf, 0xa8, 0x15, 0xfe, 0x4c, 0x43, 0x62, 0xa0, - 0x5c, 0x07, 0x2c, 0x63, 0x78, 0xc2, 0x8a, 0x0e, 0x60, 0x93, 0x47, 0x0e, 0x15, 0x72, 0x3d, 0xbd, - 0xa9, 0xa2, 0x2d, 0x7e, 0x7f, 0xed, 0xd4, 0x77, 0x98, 0x75, 0x99, 0x18, 0x65, 0x74, 0x72, 0xb2, - 0x66, 0x72, 0x12, 0x38, 0xab, 0x45, 0x26, 0xec, 0x92, 0x59, 0x47, 0xe5, 0xb5, 0x54, 0xed, 0x27, - 0x55, 0x74, 0x73, 0xe6, 0x1b, 0x1f, 0xc4, 0xe3, 0xde, 0xfc, 0x17, 0xc1, 0xf3, 0x84, 0xea, 0xff, - 0x75, 0x88, 0xa6, 0xac, 0x31, 0x98, 0x37, 0x60, 0x9e, 0xbd, 0x02, 0x87, 0x0e, 0x61, 0x4b, 0x44, - 0x69, 0xa2, 0x30, 0xe9, 0xa3, 0xaa, 0x5b, 0x9d, 0x65, 0x18, 0xe3, 0xfd, 0x9c, 0xac, 0x95, 0x07, - 0x04, 0x9e, 0x94, 0xa3, 0x1f, 0xe1, 0x3a, 0xf7, 0x1d, 0x8a, 0xe9, 0x79, 0xee, 0x4f, 0x25, 0x13, - 0xce, 0x20, 0x46, 0x37, 0x67, 0x5a, 0xcf, 0x03, 0xb8, 0x28, 0x56, 0x9f, 0x03, 0xf8, 0xe1, 0xa2, - 0x17, 0xc7, 0x4c, 0x84, 0xe8, 0x8f, 0x05, 0x3f, 0xb4, 0x25, 0x1f, 0x2f, 0x13, 0x99, 0x1b, 0x93, - 0x0b, 0x14, 0x91, 0x29, 0x2f, 0x8e, 0x60, 0x93, 0x85, 0xd4, 0x2d, 0x8c, 0xf8, 0xb4, 0x4a, 0xfe, - 0xa2, 0xb0, 0xf2, 0xd5, 0x1c, 0x26, 0xc5, 0x38, 0xe3, 0x50, 0x9f, 0x01, 0xd8, 0x9d, 0x02, 0xaf, - 0x40, 0xfe, 0x0f, 0xb3, 0xf2, 0x7b, 0xf7, 0xc9, 0xaf, 0xd6, 0xfd, 0x0a, 0x40, 0x58, 0x8e, 0x04, - 0xea, 0xc1, 0xe6, 0x88, 0x72, 0x33, 0xdb, 0x15, 0x92, 0x21, 0x25, 0xf8, 0xdf, 0x92, 0x00, 0xce, - 0xe2, 0xe8, 0x33, 0x28, 0x91, 0x80, 0xfd, 0xc4, 0xfd, 0x28, 0xc8, 0x3a, 0x4b, 0x46, 0x27, 0x1e, - 0xf7, 0xa4, 0xfd, 0xd3, 0xc3, 0x2c, 0x88, 0xcb, 0x7c, 0x02, 0xe6, 0x54, 0xf8, 0x11, 0xb7, 0xa8, - 0x90, 0xd7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x6f, 0x61, 0xa7, 0x38, 0x9c, 0x10, 0x97, - 0x0a, 0xb9, 0x91, 0x16, 0x6c, 0xc4, 0xe3, 0x5e, 0x07, 0x4f, 0x27, 0xf0, 0x2c, 0x0e, 0xed, 0xc1, - 0xae, 0xe7, 0x7b, 0x05, 0xe4, 0x57, 0x7c, 0x2c, 0xe4, 0x66, 0x5a, 0x9a, 0xce, 0xe2, 0xc9, 0x6c, - 0x0a, 0xcf, 0x63, 0xd5, 0xa7, 0x00, 0x36, 0xde, 0xa2, 0xfd, 0xa4, 0xfe, 0x5b, 0x87, 0xed, 0x77, - 0x7e, 0x69, 0x24, 0xe3, 0xb6, 0xda, 0x6d, 0xb1, 0xcc, 0xb8, 0xdd, 0xbf, 0x26, 0x1e, 0x03, 0xd8, - 0x5a, 0xd1, 0x7e, 0xd8, 0x9b, 0x15, 0x2c, 0xdf, 0x29, 0xb8, 0x5a, 0xe9, 0xdf, 0xb0, 0x70, 0x1d, - 0x7d, 0x0e, 0x5b, 0xc5, 0x4c, 0xa7, 0x3a, 0xa5, 0xb2, 0x6f, 0x31, 0xf6, 0x78, 0x82, 0x40, 0xdb, - 0xb0, 0x31, 0x64, 0xde, 0x40, 0xae, 0xa7, 0xc8, 0xf7, 0x72, 0x64, 0xe3, 0x88, 0x79, 0x03, 0x9c, - 0x66, 0x12, 0x84, 0x47, 0xdc, 0xec, 0x67, 0x75, 0x0a, 0x91, 0x4c, 0x33, 0x4e, 0x33, 0xea, 0x13, - 0x00, 0xd7, 0xf3, 0xd7, 0x33, 0xe1, 0x03, 0x77, 0xf2, 0x4d, 0xeb, 0xab, 0x2f, 0xa3, 0xef, 0xcd, - 0xdd, 0x91, 0x0e, 0xa5, 0xe4, 0xaf, 0x08, 0x88, 0x45, 0xe5, 0x46, 0x0a, 0xdb, 0xc8, 0x61, 0xd2, - 0x49, 0x91, 0xc0, 0x25, 0xc6, 0xd8, 0xb9, 0xba, 0x55, 0x6a, 0xd7, 0xb7, 0x4a, 0xed, 0xe6, 0x56, - 0xa9, 0xfd, 0x13, 0x2b, 0xe0, 0x2a, 0x56, 0xc0, 0x75, 0xac, 0x80, 0x9b, 0x58, 0x01, 0x2f, 0x62, - 0x05, 0x3c, 0x78, 0xa9, 0xd4, 0x7e, 0xaf, 0x8f, 0xfa, 0xaf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x24, - 0xa1, 0x47, 0x98, 0xcf, 0x0a, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1/generated.proto b/vendor/k8s.io/api/rbac/v1/generated.proto deleted file mode 100644 index 4b321a7..0000000 --- a/vendor/k8s.io/api/rbac/v1/generated.proto +++ /dev/null @@ -1,197 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.rbac.v1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole -message AggregationRule { - // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. - // If any of the selectors match, then the ClusterRole's permissions will be added - // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; -} - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -message ClusterRole { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this ClusterRole - repeated PolicyRule rules = 2; - - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. - // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be - // stomped by the controller. - // +optional - optional AggregationRule aggregationRule = 3; -} - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -message ClusterRoleBinding { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - // +optional - repeated Subject subjects = 2; - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional RoleRef roleRef = 3; -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -message ClusterRoleBindingList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ClusterRoleBindings - repeated ClusterRoleBinding items = 2; -} - -// ClusterRoleList is a collection of ClusterRoles -message ClusterRoleList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ClusterRoles - repeated ClusterRole items = 2; -} - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - repeated string verbs = 1; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - // +optional - repeated string apiGroups = 2; - - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - // +optional - repeated string resources = 3; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - // +optional - repeated string resourceNames = 4; - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - // +optional - repeated string nonResourceURLs = 5; -} - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -message Role { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this Role - repeated PolicyRule rules = 2; -} - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -message RoleBinding { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - // +optional - repeated Subject subjects = 2; - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional RoleRef roleRef = 3; -} - -// RoleBindingList is a collection of RoleBindings -message RoleBindingList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of RoleBindings - repeated RoleBinding items = 2; -} - -// RoleList is a collection of Roles -message RoleList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of Roles - repeated Role items = 2; -} - -// RoleRef contains information that points to the role being used -message RoleRef { - // APIGroup is the group for the resource being referenced - optional string apiGroup = 1; - - // Kind is the type of resource being referenced - optional string kind = 2; - - // Name is the name of resource being referenced - optional string name = 3; -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -message Subject { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - optional string kind = 1; - - // APIGroup holds the API group of the referenced subject. - // Defaults to "" for ServiceAccount subjects. - // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - // +optional - optional string apiGroup = 2; - - // Name of the object being referenced. - optional string name = 3; - - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - // +optional - optional string namespace = 4; -} - diff --git a/vendor/k8s.io/api/rbac/v1/register.go b/vendor/k8s.io/api/rbac/v1/register.go deleted file mode 100644 index 8f1fd46..0000000 --- a/vendor/k8s.io/api/rbac/v1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "rbac.authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Role{}, - &RoleBinding{}, - &RoleBindingList{}, - &RoleList{}, - - &ClusterRole{}, - &ClusterRoleBinding{}, - &ClusterRoleBindingList{}, - &ClusterRoleList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/rbac/v1/types.go b/vendor/k8s.io/api/rbac/v1/types.go deleted file mode 100644 index 17163cb..0000000 --- a/vendor/k8s.io/api/rbac/v1/types.go +++ /dev/null @@ -1,235 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -const ( - APIGroupAll = "*" - ResourceAll = "*" - VerbAll = "*" - NonResourceAll = "*" - - GroupKind = "Group" - ServiceAccountKind = "ServiceAccount" - UserKind = "User" - - // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" - AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - // +optional - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - // +optional - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - // +optional - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - // +optional - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -type Subject struct { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // APIGroup holds the API group of the referenced subject. - // Defaults to "" for ServiceAccount subjects. - // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` - // Name of the object being referenced. - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// RoleRef contains information that points to the role being used -type RoleRef struct { - // APIGroup is the group for the resource being referenced - APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced - Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -type Role struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this Role - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -type RoleBinding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - // +optional - Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleBindingList is a collection of RoleBindings -type RoleBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of RoleBindings - Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleList is a collection of Roles -type RoleList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of Roles - Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -type ClusterRole struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this ClusterRole - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` - - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. - // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be - // stomped by the controller. - // +optional - AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` -} - -// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole -type AggregationRule struct { - // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. - // If any of the selectors match, then the ClusterRole's permissions will be added - // +optional - ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -type ClusterRoleBinding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - // +optional - Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -type ClusterRoleBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoleBindings - Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleList is a collection of ClusterRoles -type ClusterRoleList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoles - Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go deleted file mode 100644 index 0ec20c8..0000000 --- a/vendor/k8s.io/api/rbac/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", - "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", -} - -func (AggregationRule) SwaggerDoc() map[string]string { - return map_AggregationRule -} - -var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", - "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", -} - -func (ClusterRole) SwaggerDoc() map[string]string { - return map_ClusterRole -} - -var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (ClusterRoleBinding) SwaggerDoc() map[string]string { - return map_ClusterRoleBinding -} - -var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoleBindings", -} - -func (ClusterRoleBindingList) SwaggerDoc() map[string]string { - return map_ClusterRoleBindingList -} - -var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoles", -} - -func (ClusterRoleList) SwaggerDoc() map[string]string { - return map_ClusterRoleList -} - -var map_PolicyRule = map[string]string{ - "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", -} - -func (PolicyRule) SwaggerDoc() map[string]string { - return map_PolicyRule -} - -var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this Role", -} - -func (Role) SwaggerDoc() map[string]string { - return map_Role -} - -var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (RoleBinding) SwaggerDoc() map[string]string { - return map_RoleBinding -} - -var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of RoleBindings", -} - -func (RoleBindingList) SwaggerDoc() map[string]string { - return map_RoleBindingList -} - -var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of Roles", -} - -func (RoleList) SwaggerDoc() map[string]string { - return map_RoleList -} - -var map_RoleRef = map[string]string{ - "": "RoleRef contains information that points to the role being used", - "apiGroup": "APIGroup is the group for the resource being referenced", - "kind": "Kind is the type of resource being referenced", - "name": "Name is the name of resource being referenced", -} - -func (RoleRef) SwaggerDoc() map[string]string { - return map_RoleRef -} - -var map_Subject = map[string]string{ - "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", - "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", - "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", - "name": "Name of the object being referenced.", - "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", -} - -func (Subject) SwaggerDoc() map[string]string { - return map_Subject -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go deleted file mode 100644 index 07eb321..0000000 --- a/vendor/k8s.io/api/rbac/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,389 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { - *out = *in - if in.ClusterRoleSelectors != nil { - in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors - *out = make([]metav1.LabelSelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. -func (in *AggregationRule) DeepCopy() *AggregationRule { - if in == nil { - return nil - } - out := new(AggregationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AggregationRule != nil { - in, out := &in.AggregationRule, &out.AggregationRule - *out = new(AggregationRule) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. -func (in *ClusterRole) DeepCopy() *ClusterRole { - if in == nil { - return nil - } - out := new(ClusterRole) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRole) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - out.RoleRef = in.RoleRef - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. -func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { - if in == nil { - return nil - } - out := new(ClusterRoleBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. -func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { - if in == nil { - return nil - } - out := new(ClusterRoleBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. -func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { - if in == nil { - return nil - } - out := new(ClusterRoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. -func (in *PolicyRule) DeepCopy() *PolicyRule { - if in == nil { - return nil - } - out := new(PolicyRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Role) DeepCopyInto(out *Role) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. -func (in *Role) DeepCopy() *Role { - if in == nil { - return nil - } - out := new(Role) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Role) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - out.RoleRef = in.RoleRef - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. -func (in *RoleBinding) DeepCopy() *RoleBinding { - if in == nil { - return nil - } - out := new(RoleBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. -func (in *RoleBindingList) DeepCopy() *RoleBindingList { - if in == nil { - return nil - } - out := new(RoleBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleList) DeepCopyInto(out *RoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. -func (in *RoleList) DeepCopy() *RoleList { - if in == nil { - return nil - } - out := new(RoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleRef) DeepCopyInto(out *RoleRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. -func (in *RoleRef) DeepCopy() *RoleRef { - if in == nil { - return nil - } - out := new(RoleRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Subject) DeepCopyInto(out *Subject) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. -func (in *Subject) DeepCopy() *Subject { - if in == nil { - return nil - } - out := new(Subject) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/doc.go b/vendor/k8s.io/api/rbac/v1alpha1/doc.go deleted file mode 100644 index 5236a47..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=rbac.authorization.k8s.io -package v1alpha1 // import "k8s.io/api/rbac/v1alpha1" diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go b/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go deleted file mode 100644 index 71eced8..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.pb.go +++ /dev/null @@ -1,2749 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func init() { - proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1alpha1.AggregationRule") - proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1alpha1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1alpha1.PolicyRule") - proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1alpha1.Role") - proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1alpha1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1alpha1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1alpha1.RoleList") - proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1alpha1.RoleRef") - proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1alpha1.Subject") -} -func (m *AggregationRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRole) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AggregationRule != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PolicyRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x32 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *Subject) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIVersion))) - i += copy(dAtA[i:], m.APIVersion) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AggregationRule) Size() (n int) { - var l int - _ = l - if len(m.ClusterRoleSelectors) > 0 { - for _, e := range m.ClusterRoleSelectors { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRole) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AggregationRule != nil { - l = m.AggregationRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterRoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterRoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PolicyRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleRef) Size() (n int) { - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Subject) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIVersion) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AggregationRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRole) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, - `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleBindingList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PolicyRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PolicyRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, - `}`, - }, "") - return s -} -func (this *Role) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, - `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleBindingList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleRef{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Subject) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Subject{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `APIVersion:` + fmt.Sprintf("%v", this.APIVersion) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AggregationRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) - if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRole) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AggregationRule == nil { - m.AggregationRule = &AggregationRule{} - } - if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRole{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PolicyRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Role{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Subject) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subject: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIVersion", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIVersion = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 830 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x8f, 0xe3, 0x44, - 0x14, 0xce, 0x64, 0x13, 0x36, 0x99, 0x25, 0x0a, 0x37, 0x9c, 0x90, 0xb5, 0x42, 0xce, 0x62, 0x81, - 0x74, 0x88, 0xc3, 0x66, 0x17, 0x04, 0x34, 0x14, 0xf1, 0x15, 0x28, 0x10, 0xf6, 0x96, 0x39, 0x71, - 0x05, 0xa2, 0x60, 0xe2, 0xcc, 0x39, 0x43, 0x6c, 0x8f, 0x35, 0x63, 0x47, 0x3a, 0xd1, 0xd0, 0xd0, - 0x22, 0x1a, 0x0a, 0x7a, 0x5a, 0x1a, 0x28, 0xf9, 0x07, 0x96, 0xee, 0xca, 0xad, 0x22, 0xd6, 0xfc, - 0x21, 0x20, 0x8f, 0xed, 0xd8, 0xf9, 0x45, 0x52, 0x45, 0x42, 0xba, 0x2a, 0x99, 0xf7, 0xbe, 0xf7, - 0xbd, 0xf7, 0xbe, 0x99, 0xf7, 0x0c, 0xfb, 0xd3, 0x0f, 0xa5, 0xc9, 0xb8, 0x35, 0x8d, 0x47, 0x54, - 0x04, 0x34, 0xa2, 0xd2, 0x9a, 0xd1, 0x60, 0xcc, 0x85, 0x95, 0x3b, 0x48, 0xc8, 0x2c, 0x31, 0x22, - 0x8e, 0x35, 0x3b, 0x27, 0x5e, 0x38, 0x21, 0xe7, 0x96, 0x4b, 0x03, 0x2a, 0x48, 0x44, 0xc7, 0x66, - 0x28, 0x78, 0xc4, 0x91, 0x96, 0x21, 0x4d, 0x12, 0x32, 0x33, 0x45, 0x9a, 0x05, 0xf2, 0xf4, 0x6d, - 0x97, 0x45, 0x93, 0x78, 0x64, 0x3a, 0xdc, 0xb7, 0x5c, 0xee, 0x72, 0x4b, 0x05, 0x8c, 0xe2, 0x27, - 0xea, 0xa4, 0x0e, 0xea, 0x5f, 0x46, 0x74, 0xfa, 0x5e, 0x99, 0xd2, 0x27, 0xce, 0x84, 0x05, 0x54, - 0x3c, 0xb5, 0xc2, 0xa9, 0x9b, 0x1a, 0xa4, 0xe5, 0xd3, 0x88, 0x58, 0xb3, 0xb5, 0xf4, 0xa7, 0xd6, - 0xb6, 0x28, 0x11, 0x07, 0x11, 0xf3, 0xe9, 0x5a, 0xc0, 0xfb, 0xbb, 0x02, 0xa4, 0x33, 0xa1, 0x3e, - 0x59, 0x8d, 0x33, 0x7e, 0x06, 0xb0, 0xdb, 0x77, 0x5d, 0x41, 0x5d, 0x12, 0x31, 0x1e, 0xe0, 0xd8, - 0xa3, 0xe8, 0x7b, 0x00, 0xef, 0x3a, 0x5e, 0x2c, 0x23, 0x2a, 0x30, 0xf7, 0xe8, 0x23, 0xea, 0x51, - 0x27, 0xe2, 0x42, 0x6a, 0xe0, 0xec, 0xe8, 0xde, 0xc9, 0xc5, 0xbb, 0x66, 0xa9, 0xcd, 0x22, 0x97, - 0x19, 0x4e, 0xdd, 0xd4, 0x20, 0xcd, 0xb4, 0x25, 0x73, 0x76, 0x6e, 0x0e, 0xc9, 0x88, 0x7a, 0x45, - 0xac, 0xfd, 0xea, 0xf5, 0xbc, 0x57, 0x4b, 0xe6, 0xbd, 0xbb, 0x0f, 0x36, 0x10, 0xe3, 0x8d, 0xe9, - 0x8c, 0x5f, 0xea, 0xf0, 0xa4, 0x02, 0x47, 0x5f, 0xc3, 0x56, 0x4a, 0x3e, 0x26, 0x11, 0xd1, 0xc0, - 0x19, 0xb8, 0x77, 0x72, 0xf1, 0xce, 0x7e, 0xa5, 0x3c, 0x1c, 0x7d, 0x43, 0x9d, 0xe8, 0x33, 0x1a, - 0x11, 0x1b, 0xe5, 0x75, 0xc0, 0xd2, 0x86, 0x17, 0xac, 0x68, 0x00, 0x9b, 0x22, 0xf6, 0xa8, 0xd4, - 0xea, 0xaa, 0xd3, 0xd7, 0xcd, 0x6d, 0xaf, 0xc0, 0xbc, 0xe2, 0x1e, 0x73, 0x9e, 0xa6, 0x72, 0xd9, - 0x9d, 0x9c, 0xb2, 0x99, 0x9e, 0x24, 0xce, 0x18, 0xd0, 0x04, 0x76, 0xc9, 0xb2, 0xae, 0xda, 0x91, - 0xaa, 0xf9, 0xcd, 0xed, 0xa4, 0x2b, 0x17, 0x61, 0xbf, 0x9c, 0xcc, 0x7b, 0xab, 0xb7, 0x83, 0x57, - 0x69, 0x8d, 0x9f, 0xea, 0x10, 0x55, 0x64, 0xb2, 0x59, 0x30, 0x66, 0x81, 0x7b, 0x00, 0xb5, 0x1e, - 0xc2, 0x96, 0x8c, 0x95, 0xa3, 0x10, 0xec, 0xb5, 0xed, 0xbd, 0x3d, 0xca, 0x90, 0xf6, 0x4b, 0x39, - 0x65, 0x2b, 0x37, 0x48, 0xbc, 0x20, 0x41, 0x43, 0x78, 0x2c, 0xb8, 0x47, 0x31, 0x7d, 0x92, 0x6b, - 0xf5, 0x1f, 0x7c, 0x38, 0x03, 0xda, 0xdd, 0x9c, 0xef, 0x38, 0x37, 0xe0, 0x82, 0xc2, 0xf8, 0x13, - 0xc0, 0x57, 0xd6, 0x75, 0x19, 0x32, 0x19, 0xa1, 0xaf, 0xd6, 0xb4, 0x31, 0xf7, 0x7c, 0xd4, 0x4c, - 0x66, 0xca, 0x2c, 0xda, 0x28, 0x2c, 0x15, 0x5d, 0x3e, 0x87, 0x4d, 0x16, 0x51, 0xbf, 0x10, 0xe5, - 0xfe, 0xf6, 0x26, 0xd6, 0xcb, 0x2b, 0x5f, 0xd3, 0x20, 0xa5, 0xc0, 0x19, 0x93, 0xf1, 0x07, 0x80, - 0xdd, 0x0a, 0xf8, 0x00, 0x4d, 0x7c, 0xb2, 0xdc, 0xc4, 0x1b, 0xfb, 0x35, 0xb1, 0xb9, 0xfa, 0x7f, - 0x00, 0x84, 0xe5, 0xc0, 0xa0, 0x1e, 0x6c, 0xce, 0xa8, 0x18, 0x65, 0xfb, 0xa4, 0x6d, 0xb7, 0x53, - 0xfc, 0xe3, 0xd4, 0x80, 0x33, 0x3b, 0x7a, 0x0b, 0xb6, 0x49, 0xc8, 0x3e, 0x16, 0x3c, 0x0e, 0xa5, - 0x76, 0xa4, 0x40, 0x9d, 0x64, 0xde, 0x6b, 0xf7, 0xaf, 0x06, 0x99, 0x11, 0x97, 0xfe, 0x14, 0x2c, - 0xa8, 0xe4, 0xb1, 0x70, 0xa8, 0xd4, 0x1a, 0x25, 0x18, 0x17, 0x46, 0x5c, 0xfa, 0xd1, 0x07, 0xb0, - 0x53, 0x1c, 0x2e, 0x89, 0x4f, 0xa5, 0xd6, 0x54, 0x01, 0x77, 0x92, 0x79, 0xaf, 0x83, 0xab, 0x0e, - 0xbc, 0x8c, 0x43, 0x1f, 0xc1, 0x6e, 0xc0, 0x83, 0x02, 0xf2, 0x05, 0x1e, 0x4a, 0xed, 0x05, 0x15, - 0xaa, 0x66, 0xf4, 0x72, 0xd9, 0x85, 0x57, 0xb1, 0xc6, 0xef, 0x00, 0x36, 0xfe, 0x77, 0x3b, 0xcc, - 0xf8, 0xa1, 0x0e, 0x4f, 0x9e, 0xaf, 0x94, 0xca, 0x4a, 0x49, 0xc7, 0xf0, 0xb0, 0xbb, 0x64, 0xff, - 0x31, 0xdc, 0xbd, 0x44, 0x7e, 0x05, 0xb0, 0x75, 0xa0, 0xed, 0xf1, 0x60, 0xb9, 0x6c, 0x7d, 0x47, - 0xd9, 0x9b, 0xeb, 0xfd, 0x16, 0x16, 0x37, 0x80, 0xee, 0xc3, 0x56, 0x31, 0xf1, 0xaa, 0xda, 0x76, - 0x99, 0xbd, 0x58, 0x0a, 0x78, 0x81, 0x40, 0x67, 0xb0, 0x31, 0x65, 0xc1, 0x58, 0xab, 0x2b, 0xe4, - 0x8b, 0x39, 0xb2, 0xf1, 0x29, 0x0b, 0xc6, 0x58, 0x79, 0x52, 0x44, 0x40, 0xfc, 0xec, 0x93, 0x5c, - 0x41, 0xa4, 0xb3, 0x8e, 0x95, 0xc7, 0xf8, 0x0d, 0xc0, 0xe3, 0xfc, 0x3d, 0x2d, 0xf8, 0xc0, 0x56, - 0xbe, 0x0b, 0x08, 0x49, 0xc8, 0x1e, 0x53, 0x21, 0x19, 0x0f, 0xf2, 0xbc, 0x8b, 0x97, 0xde, 0xbf, - 0x1a, 0xe4, 0x1e, 0x5c, 0x41, 0xed, 0xae, 0x01, 0x59, 0xb0, 0x9d, 0xfe, 0xca, 0x90, 0x38, 0x54, - 0x6b, 0x28, 0xd8, 0x9d, 0x1c, 0xd6, 0xbe, 0x2c, 0x1c, 0xb8, 0xc4, 0xd8, 0xe6, 0xf5, 0xad, 0x5e, - 0x7b, 0x76, 0xab, 0xd7, 0x6e, 0x6e, 0xf5, 0xda, 0x77, 0x89, 0x0e, 0xae, 0x13, 0x1d, 0x3c, 0x4b, - 0x74, 0x70, 0x93, 0xe8, 0xe0, 0xaf, 0x44, 0x07, 0x3f, 0xfe, 0xad, 0xd7, 0xbe, 0x6c, 0x15, 0xe2, - 0xff, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x73, 0x15, 0x10, 0x29, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto b/vendor/k8s.io/api/rbac/v1alpha1/generated.proto deleted file mode 100644 index cde3aaa..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/generated.proto +++ /dev/null @@ -1,199 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.rbac.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole -message AggregationRule { - // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. - // If any of the selectors match, then the ClusterRole's permissions will be added - // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; -} - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -message ClusterRole { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this ClusterRole - repeated PolicyRule rules = 2; - - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. - // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be - // stomped by the controller. - // +optional - optional AggregationRule aggregationRule = 3; -} - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -message ClusterRoleBinding { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - // +optional - repeated Subject subjects = 2; - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional RoleRef roleRef = 3; -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -message ClusterRoleBindingList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ClusterRoleBindings - repeated ClusterRoleBinding items = 2; -} - -// ClusterRoleList is a collection of ClusterRoles -message ClusterRoleList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ClusterRoles - repeated ClusterRole items = 2; -} - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - repeated string verbs = 1; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - // +optional - repeated string apiGroups = 3; - - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - // +optional - repeated string resources = 4; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - // +optional - repeated string resourceNames = 5; - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - // +optional - repeated string nonResourceURLs = 6; -} - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -message Role { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this Role - repeated PolicyRule rules = 2; -} - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -message RoleBinding { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - // +optional - repeated Subject subjects = 2; - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional RoleRef roleRef = 3; -} - -// RoleBindingList is a collection of RoleBindings -message RoleBindingList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of RoleBindings - repeated RoleBinding items = 2; -} - -// RoleList is a collection of Roles -message RoleList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of Roles - repeated Role items = 2; -} - -// RoleRef contains information that points to the role being used -message RoleRef { - // APIGroup is the group for the resource being referenced - optional string apiGroup = 1; - - // Kind is the type of resource being referenced - optional string kind = 2; - - // Name is the name of resource being referenced - optional string name = 3; -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -message Subject { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - optional string kind = 1; - - // APIVersion holds the API group and version of the referenced subject. - // Defaults to "v1" for ServiceAccount subjects. - // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. - // +k8s:conversion-gen=false - // +optional - optional string apiVersion = 2; - - // Name of the object being referenced. - optional string name = 3; - - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - // +optional - optional string namespace = 4; -} - diff --git a/vendor/k8s.io/api/rbac/v1alpha1/register.go b/vendor/k8s.io/api/rbac/v1alpha1/register.go deleted file mode 100644 index 0c69776..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "rbac.authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Role{}, - &RoleBinding{}, - &RoleBindingList{}, - &RoleList{}, - - &ClusterRole{}, - &ClusterRoleBinding{}, - &ClusterRoleBindingList{}, - &ClusterRoleList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types.go b/vendor/k8s.io/api/rbac/v1alpha1/types.go deleted file mode 100644 index 398d6a1..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/types.go +++ /dev/null @@ -1,237 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -const ( - APIGroupAll = "*" - ResourceAll = "*" - VerbAll = "*" - NonResourceAll = "*" - - GroupKind = "Group" - ServiceAccountKind = "ServiceAccount" - UserKind = "User" - - // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" - AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - // +optional - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,3,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. ResourceAll represents all resources. - // +optional - Resources []string `json:"resources,omitempty" protobuf:"bytes,4,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - // +optional - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,5,rep,name=resourceNames"` - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - // +optional - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,6,rep,name=nonResourceURLs"` -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -type Subject struct { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // APIVersion holds the API group and version of the referenced subject. - // Defaults to "v1" for ServiceAccount subjects. - // Defaults to "rbac.authorization.k8s.io/v1alpha1" for User and Group subjects. - // +k8s:conversion-gen=false - // +optional - APIVersion string `json:"apiVersion,omitempty" protobuf:"bytes,2,opt.name=apiVersion"` - // Name of the object being referenced. - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// RoleRef contains information that points to the role being used -type RoleRef struct { - // APIGroup is the group for the resource being referenced - APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced - Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -type Role struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this Role - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -type RoleBinding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - // +optional - Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleBindingList is a collection of RoleBindings -type RoleBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of RoleBindings - Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleList is a collection of Roles -type RoleList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of Roles - Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -type ClusterRole struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this ClusterRole - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` - - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. - // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be - // stomped by the controller. - // +optional - AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` -} - -// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole -type AggregationRule struct { - // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. - // If any of the selectors match, then the ClusterRole's permissions will be added - // +optional - ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -type ClusterRoleBinding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - // +optional - Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -type ClusterRoleBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoleBindings - Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleList is a collection of ClusterRoles -type ClusterRoleList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoles - Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 1d6ef30..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", - "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", -} - -func (AggregationRule) SwaggerDoc() map[string]string { - return map_AggregationRule -} - -var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", - "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", -} - -func (ClusterRole) SwaggerDoc() map[string]string { - return map_ClusterRole -} - -var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (ClusterRoleBinding) SwaggerDoc() map[string]string { - return map_ClusterRoleBinding -} - -var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoleBindings", -} - -func (ClusterRoleBindingList) SwaggerDoc() map[string]string { - return map_ClusterRoleBindingList -} - -var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoles", -} - -func (ClusterRoleList) SwaggerDoc() map[string]string { - return map_ClusterRoleList -} - -var map_PolicyRule = map[string]string{ - "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. ResourceAll represents all resources.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path This name is intentionally different than the internal type so that the DefaultConvert works nicely and because the ordering may be different. Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", -} - -func (PolicyRule) SwaggerDoc() map[string]string { - return map_PolicyRule -} - -var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this Role", -} - -func (Role) SwaggerDoc() map[string]string { - return map_Role -} - -var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (RoleBinding) SwaggerDoc() map[string]string { - return map_RoleBinding -} - -var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of RoleBindings", -} - -func (RoleBindingList) SwaggerDoc() map[string]string { - return map_RoleBindingList -} - -var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of Roles", -} - -func (RoleList) SwaggerDoc() map[string]string { - return map_RoleList -} - -var map_RoleRef = map[string]string{ - "": "RoleRef contains information that points to the role being used", - "apiGroup": "APIGroup is the group for the resource being referenced", - "kind": "Kind is the type of resource being referenced", - "name": "Name is the name of resource being referenced", -} - -func (RoleRef) SwaggerDoc() map[string]string { - return map_RoleRef -} - -var map_Subject = map[string]string{ - "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", - "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", - "apiVersion": "APIVersion holds the API group and version of the referenced subject. Defaults to \"v1\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io/v1alpha1\" for User and Group subjects.", - "name": "Name of the object being referenced.", - "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", -} - -func (Subject) SwaggerDoc() map[string]string { - return map_Subject -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 97f6333..0000000 --- a/vendor/k8s.io/api/rbac/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,389 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { - *out = *in - if in.ClusterRoleSelectors != nil { - in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors - *out = make([]v1.LabelSelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. -func (in *AggregationRule) DeepCopy() *AggregationRule { - if in == nil { - return nil - } - out := new(AggregationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AggregationRule != nil { - in, out := &in.AggregationRule, &out.AggregationRule - *out = new(AggregationRule) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. -func (in *ClusterRole) DeepCopy() *ClusterRole { - if in == nil { - return nil - } - out := new(ClusterRole) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRole) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - out.RoleRef = in.RoleRef - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. -func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { - if in == nil { - return nil - } - out := new(ClusterRoleBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. -func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { - if in == nil { - return nil - } - out := new(ClusterRoleBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. -func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { - if in == nil { - return nil - } - out := new(ClusterRoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. -func (in *PolicyRule) DeepCopy() *PolicyRule { - if in == nil { - return nil - } - out := new(PolicyRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Role) DeepCopyInto(out *Role) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. -func (in *Role) DeepCopy() *Role { - if in == nil { - return nil - } - out := new(Role) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Role) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - out.RoleRef = in.RoleRef - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. -func (in *RoleBinding) DeepCopy() *RoleBinding { - if in == nil { - return nil - } - out := new(RoleBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. -func (in *RoleBindingList) DeepCopy() *RoleBindingList { - if in == nil { - return nil - } - out := new(RoleBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleList) DeepCopyInto(out *RoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. -func (in *RoleList) DeepCopy() *RoleList { - if in == nil { - return nil - } - out := new(RoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleRef) DeepCopyInto(out *RoleRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. -func (in *RoleRef) DeepCopy() *RoleRef { - if in == nil { - return nil - } - out := new(RoleRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Subject) DeepCopyInto(out *Subject) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. -func (in *Subject) DeepCopy() *Subject { - if in == nil { - return nil - } - out := new(Subject) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/doc.go b/vendor/k8s.io/api/rbac/v1beta1/doc.go deleted file mode 100644 index 4b77c9c..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=rbac.authorization.k8s.io -package v1beta1 // import "k8s.io/api/rbac/v1beta1" diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go b/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go deleted file mode 100644 index 71e5799..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.pb.go +++ /dev/null @@ -1,2748 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto - - It has these top-level messages: - AggregationRule - ClusterRole - ClusterRoleBinding - ClusterRoleBindingList - ClusterRoleList - PolicyRule - Role - RoleBinding - RoleBindingList - RoleList - RoleRef - Subject -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_apimachinery_pkg_apis_meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *AggregationRule) Reset() { *m = AggregationRule{} } -func (*AggregationRule) ProtoMessage() {} -func (*AggregationRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *ClusterRole) Reset() { *m = ClusterRole{} } -func (*ClusterRole) ProtoMessage() {} -func (*ClusterRole) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *ClusterRoleBinding) Reset() { *m = ClusterRoleBinding{} } -func (*ClusterRoleBinding) ProtoMessage() {} -func (*ClusterRoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *ClusterRoleBindingList) Reset() { *m = ClusterRoleBindingList{} } -func (*ClusterRoleBindingList) ProtoMessage() {} -func (*ClusterRoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *ClusterRoleList) Reset() { *m = ClusterRoleList{} } -func (*ClusterRoleList) ProtoMessage() {} -func (*ClusterRoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *PolicyRule) Reset() { *m = PolicyRule{} } -func (*PolicyRule) ProtoMessage() {} -func (*PolicyRule) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *Role) Reset() { *m = Role{} } -func (*Role) ProtoMessage() {} -func (*Role) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *RoleBinding) Reset() { *m = RoleBinding{} } -func (*RoleBinding) ProtoMessage() {} -func (*RoleBinding) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func (m *RoleBindingList) Reset() { *m = RoleBindingList{} } -func (*RoleBindingList) ProtoMessage() {} -func (*RoleBindingList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{8} } - -func (m *RoleList) Reset() { *m = RoleList{} } -func (*RoleList) ProtoMessage() {} -func (*RoleList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{9} } - -func (m *RoleRef) Reset() { *m = RoleRef{} } -func (*RoleRef) ProtoMessage() {} -func (*RoleRef) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{10} } - -func (m *Subject) Reset() { *m = Subject{} } -func (*Subject) ProtoMessage() {} -func (*Subject) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{11} } - -func init() { - proto.RegisterType((*AggregationRule)(nil), "k8s.io.api.rbac.v1beta1.AggregationRule") - proto.RegisterType((*ClusterRole)(nil), "k8s.io.api.rbac.v1beta1.ClusterRole") - proto.RegisterType((*ClusterRoleBinding)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBinding") - proto.RegisterType((*ClusterRoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleBindingList") - proto.RegisterType((*ClusterRoleList)(nil), "k8s.io.api.rbac.v1beta1.ClusterRoleList") - proto.RegisterType((*PolicyRule)(nil), "k8s.io.api.rbac.v1beta1.PolicyRule") - proto.RegisterType((*Role)(nil), "k8s.io.api.rbac.v1beta1.Role") - proto.RegisterType((*RoleBinding)(nil), "k8s.io.api.rbac.v1beta1.RoleBinding") - proto.RegisterType((*RoleBindingList)(nil), "k8s.io.api.rbac.v1beta1.RoleBindingList") - proto.RegisterType((*RoleList)(nil), "k8s.io.api.rbac.v1beta1.RoleList") - proto.RegisterType((*RoleRef)(nil), "k8s.io.api.rbac.v1beta1.RoleRef") - proto.RegisterType((*Subject)(nil), "k8s.io.api.rbac.v1beta1.Subject") -} -func (m *AggregationRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AggregationRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.ClusterRoleSelectors) > 0 { - for _, msg := range m.ClusterRoleSelectors { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRole) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRole) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if m.AggregationRule != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AggregationRule.Size())) - n2, err := m.AggregationRule.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - return i, nil -} - -func (m *ClusterRoleBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n4, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - return i, nil -} - -func (m *ClusterRoleBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n5, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *ClusterRoleList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ClusterRoleList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PolicyRule) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PolicyRule) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - dAtA[i] = 0xa - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - dAtA[i] = 0x12 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - dAtA[i] = 0x1a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - dAtA[i] = 0x22 - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - return i, nil -} - -func (m *Role) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Role) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n7, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - if len(m.Rules) > 0 { - for _, msg := range m.Rules { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleBinding) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleBinding) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n8, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - if len(m.Subjects) > 0 { - for _, msg := range m.Subjects { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.RoleRef.Size())) - n9, err := m.RoleRef.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - return i, nil -} - -func (m *RoleBindingList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleBindingList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n10, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n11, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n11 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *RoleRef) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RoleRef) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - return i, nil -} - -func (m *Subject) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *Subject) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Kind))) - i += copy(dAtA[i:], m.Kind) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.APIGroup))) - i += copy(dAtA[i:], m.APIGroup) - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) - i += copy(dAtA[i:], m.Name) - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Namespace))) - i += copy(dAtA[i:], m.Namespace) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *AggregationRule) Size() (n int) { - var l int - _ = l - if len(m.ClusterRoleSelectors) > 0 { - for _, e := range m.ClusterRoleSelectors { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRole) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AggregationRule != nil { - l = m.AggregationRule.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *ClusterRoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *ClusterRoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *ClusterRoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PolicyRule) Size() (n int) { - var l int - _ = l - if len(m.Verbs) > 0 { - for _, s := range m.Verbs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.APIGroups) > 0 { - for _, s := range m.APIGroups { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Resources) > 0 { - for _, s := range m.Resources { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.ResourceNames) > 0 { - for _, s := range m.ResourceNames { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.NonResourceURLs) > 0 { - for _, s := range m.NonResourceURLs { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *Role) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Rules) > 0 { - for _, e := range m.Rules { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleBinding) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Subjects) > 0 { - for _, e := range m.Subjects { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - l = m.RoleRef.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *RoleBindingList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *RoleRef) Size() (n int) { - var l int - _ = l - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *Subject) Size() (n int) { - var l int - _ = l - l = len(m.Kind) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.APIGroup) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Name) - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Namespace) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *AggregationRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&AggregationRule{`, - `ClusterRoleSelectors:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ClusterRoleSelectors), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRole) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRole{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `AggregationRule:` + strings.Replace(fmt.Sprintf("%v", this.AggregationRule), "AggregationRule", "AggregationRule", 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, - `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleBindingList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRoleBinding", "ClusterRoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *ClusterRoleList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&ClusterRoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "ClusterRole", "ClusterRole", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PolicyRule) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PolicyRule{`, - `Verbs:` + fmt.Sprintf("%v", this.Verbs) + `,`, - `APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`, - `Resources:` + fmt.Sprintf("%v", this.Resources) + `,`, - `ResourceNames:` + fmt.Sprintf("%v", this.ResourceNames) + `,`, - `NonResourceURLs:` + fmt.Sprintf("%v", this.NonResourceURLs) + `,`, - `}`, - }, "") - return s -} -func (this *Role) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Role{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Rules:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Rules), "PolicyRule", "PolicyRule", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleBinding) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleBinding{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Subjects:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Subjects), "Subject", "Subject", 1), `&`, ``, 1) + `,`, - `RoleRef:` + strings.Replace(strings.Replace(this.RoleRef.String(), "RoleRef", "RoleRef", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleBindingList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleBindingList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "RoleBinding", "RoleBinding", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "Role", "Role", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *RoleRef) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&RoleRef{`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `}`, - }, "") - return s -} -func (this *Subject) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&Subject{`, - `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, - `APIGroup:` + fmt.Sprintf("%v", this.APIGroup) + `,`, - `Name:` + fmt.Sprintf("%v", this.Name) + `,`, - `Namespace:` + fmt.Sprintf("%v", this.Namespace) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *AggregationRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AggregationRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AggregationRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ClusterRoleSelectors", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ClusterRoleSelectors = append(m.ClusterRoleSelectors, k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector{}) - if err := m.ClusterRoleSelectors[len(m.ClusterRoleSelectors)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRole) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRole: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRole: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AggregationRule", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AggregationRule == nil { - m.AggregationRule = &AggregationRule{} - } - if err := m.AggregationRule.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ClusterRoleList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ClusterRoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ClusterRoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, ClusterRole{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PolicyRule) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PolicyRule: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PolicyRule: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Verbs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Verbs = append(m.Verbs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroups", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroups = append(m.APIGroups, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ResourceNames", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.ResourceNames = append(m.ResourceNames, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NonResourceURLs", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NonResourceURLs = append(m.NonResourceURLs, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Role) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Role: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Rules", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Rules = append(m.Rules, PolicyRule{}) - if err := m.Rules[len(m.Rules)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBinding) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBinding: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBinding: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Subjects", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Subjects = append(m.Subjects, Subject{}) - if err := m.Subjects[len(m.Subjects)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RoleRef", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.RoleRef.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleBindingList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleBindingList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleBindingList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, RoleBinding{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, Role{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RoleRef) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RoleRef: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RoleRef: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Subject) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Subject: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Subject: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Kind = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field APIGroup", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.APIGroup = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Namespace", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Namespace = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/rbac/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 808 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbd, 0x6f, 0xfb, 0x44, - 0x18, 0xce, 0xa5, 0x89, 0x12, 0x5f, 0x88, 0xc2, 0xef, 0xa8, 0xc0, 0xaa, 0xc0, 0x89, 0x02, 0x43, - 0xa5, 0x52, 0x9b, 0x16, 0x04, 0x2c, 0x48, 0xd4, 0x0c, 0x50, 0xb5, 0x84, 0xea, 0x2a, 0x18, 0x10, - 0x03, 0x67, 0xe7, 0xea, 0x1e, 0xf1, 0x97, 0xee, 0xec, 0x48, 0x15, 0x0b, 0x0b, 0x1b, 0x03, 0x12, - 0x13, 0x2b, 0x33, 0x13, 0x23, 0x7f, 0x41, 0xc6, 0x8e, 0x9d, 0x22, 0x6a, 0xfe, 0x10, 0xd0, 0xf9, - 0x23, 0xce, 0x67, 0x9b, 0x29, 0x12, 0x12, 0x53, 0x7b, 0xef, 0xfb, 0xbc, 0xcf, 0xfb, 0xbc, 0x8f, - 0xef, 0xde, 0xc0, 0x8f, 0x47, 0x1f, 0x0a, 0x9d, 0x05, 0xc6, 0x28, 0xb6, 0x28, 0xf7, 0x69, 0x44, - 0x85, 0x31, 0xa6, 0xfe, 0x30, 0xe0, 0x46, 0x9e, 0x20, 0x21, 0x33, 0xb8, 0x45, 0x6c, 0x63, 0x7c, - 0x62, 0xd1, 0x88, 0x9c, 0x18, 0x0e, 0xf5, 0x29, 0x27, 0x11, 0x1d, 0xea, 0x21, 0x0f, 0xa2, 0x00, - 0xbd, 0x96, 0x01, 0x75, 0x12, 0x32, 0x5d, 0x02, 0xf5, 0x1c, 0x78, 0x70, 0xec, 0xb0, 0xe8, 0x36, - 0xb6, 0x74, 0x3b, 0xf0, 0x0c, 0x27, 0x70, 0x02, 0x23, 0xc5, 0x5b, 0xf1, 0x4d, 0x7a, 0x4a, 0x0f, - 0xe9, 0x7f, 0x19, 0xcf, 0xc1, 0x7b, 0x65, 0x43, 0x8f, 0xd8, 0xb7, 0xcc, 0xa7, 0xfc, 0xce, 0x08, - 0x47, 0x8e, 0x0c, 0x08, 0xc3, 0xa3, 0x11, 0x31, 0xc6, 0x2b, 0xdd, 0x0f, 0x8c, 0x4d, 0x55, 0x3c, - 0xf6, 0x23, 0xe6, 0xd1, 0x95, 0x82, 0xf7, 0x9f, 0x2b, 0x10, 0xf6, 0x2d, 0xf5, 0xc8, 0x72, 0x5d, - 0xff, 0x57, 0x00, 0x3b, 0x67, 0x8e, 0xc3, 0xa9, 0x43, 0x22, 0x16, 0xf8, 0x38, 0x76, 0x29, 0xfa, - 0x11, 0xc0, 0x7d, 0xdb, 0x8d, 0x45, 0x44, 0x39, 0x0e, 0x5c, 0x7a, 0x4d, 0x5d, 0x6a, 0x47, 0x01, - 0x17, 0x2a, 0xe8, 0xed, 0x1d, 0xb6, 0x4e, 0xdf, 0xd5, 0x4b, 0x6b, 0x66, 0xbd, 0xf4, 0x70, 0xe4, - 0xc8, 0x80, 0xd0, 0xe5, 0x48, 0xfa, 0xf8, 0x44, 0xbf, 0x24, 0x16, 0x75, 0x8b, 0x5a, 0xf3, 0xf5, - 0xc9, 0xb4, 0x5b, 0x49, 0xa6, 0xdd, 0xfd, 0x4f, 0xd6, 0x10, 0xe3, 0xb5, 0xed, 0xfa, 0xbf, 0x55, - 0x61, 0x6b, 0x0e, 0x8e, 0xbe, 0x85, 0x4d, 0x49, 0x3e, 0x24, 0x11, 0x51, 0x41, 0x0f, 0x1c, 0xb6, - 0x4e, 0xdf, 0xd9, 0x4e, 0xca, 0x17, 0xd6, 0x77, 0xd4, 0x8e, 0x3e, 0xa7, 0x11, 0x31, 0x51, 0xae, - 0x03, 0x96, 0x31, 0x3c, 0x63, 0x45, 0x9f, 0xc1, 0x3a, 0x8f, 0x5d, 0x2a, 0xd4, 0x6a, 0x3a, 0xe9, - 0x9b, 0xfa, 0x86, 0x4b, 0xa0, 0x5f, 0x05, 0x2e, 0xb3, 0xef, 0xa4, 0x5b, 0x66, 0x3b, 0x67, 0xac, - 0xcb, 0x93, 0xc0, 0x19, 0x01, 0x72, 0x60, 0x87, 0x2c, 0xda, 0xaa, 0xee, 0xa5, 0x92, 0x0f, 0x37, - 0x72, 0x2e, 0x7d, 0x06, 0xf3, 0x95, 0x64, 0xda, 0x5d, 0xfe, 0x36, 0x78, 0x99, 0xb5, 0xff, 0x4b, - 0x15, 0xa2, 0x39, 0x93, 0x4c, 0xe6, 0x0f, 0x99, 0xef, 0xec, 0xc0, 0xab, 0x01, 0x6c, 0x8a, 0x38, - 0x4d, 0x14, 0x76, 0xf5, 0x36, 0x8e, 0x76, 0x9d, 0x01, 0xcd, 0x97, 0x73, 0xc6, 0x66, 0x1e, 0x10, - 0x78, 0xc6, 0x81, 0x2e, 0x60, 0x83, 0x07, 0x2e, 0xc5, 0xf4, 0x26, 0x77, 0x6a, 0x33, 0x1d, 0xce, - 0x70, 0x66, 0x27, 0xa7, 0x6b, 0xe4, 0x01, 0x5c, 0x30, 0xf4, 0x27, 0x00, 0xbe, 0xba, 0xea, 0xca, - 0x25, 0x13, 0x11, 0xfa, 0x66, 0xc5, 0x19, 0x7d, 0xcb, 0x0b, 0xcd, 0x44, 0xe6, 0xcb, 0x6c, 0x8a, - 0x22, 0x32, 0xe7, 0xca, 0x15, 0xac, 0xb3, 0x88, 0x7a, 0x85, 0x25, 0x47, 0x1b, 0x67, 0x58, 0x55, - 0x57, 0xde, 0xa4, 0x73, 0xc9, 0x80, 0x33, 0xa2, 0xfe, 0x9f, 0x00, 0x76, 0xe6, 0xc0, 0x3b, 0x98, - 0xe1, 0x7c, 0x71, 0x86, 0xb7, 0xb6, 0x9a, 0x61, 0xbd, 0xf8, 0x7f, 0x00, 0x84, 0xe5, 0x5b, 0x41, - 0x5d, 0x58, 0x1f, 0x53, 0x6e, 0x65, 0x9b, 0x44, 0x31, 0x15, 0x89, 0xff, 0x4a, 0x06, 0x70, 0x16, - 0x47, 0x47, 0x50, 0x21, 0x21, 0xfb, 0x94, 0x07, 0x71, 0x98, 0xb5, 0x57, 0xcc, 0x76, 0x32, 0xed, - 0x2a, 0x67, 0x57, 0xe7, 0x59, 0x10, 0x97, 0x79, 0x09, 0xe6, 0x54, 0x04, 0x31, 0xb7, 0xa9, 0x50, - 0xf7, 0x4a, 0x30, 0x2e, 0x82, 0xb8, 0xcc, 0xa3, 0x0f, 0x60, 0xbb, 0x38, 0x0c, 0x88, 0x47, 0x85, - 0x5a, 0x4b, 0x0b, 0x5e, 0x24, 0xd3, 0x6e, 0x1b, 0xcf, 0x27, 0xf0, 0x22, 0x0e, 0x7d, 0x04, 0x3b, - 0x7e, 0xe0, 0x17, 0x90, 0x2f, 0xf1, 0xa5, 0x50, 0xeb, 0x69, 0x69, 0xfa, 0x3e, 0x07, 0x8b, 0x29, - 0xbc, 0x8c, 0xed, 0xff, 0x01, 0x60, 0xed, 0xbf, 0xb6, 0xbd, 0xfa, 0x3f, 0x55, 0x61, 0xeb, 0xff, - 0x6d, 0x32, 0xdb, 0x26, 0xf2, 0x09, 0xee, 0x76, 0x8d, 0x6c, 0xfd, 0x04, 0x9f, 0xdf, 0x1f, 0xbf, - 0x03, 0xd8, 0xdc, 0xd1, 0xe2, 0x30, 0x17, 0x55, 0xbf, 0xf1, 0xb4, 0xea, 0xf5, 0x72, 0xbf, 0x87, - 0x85, 0xff, 0xe8, 0x6d, 0xd8, 0x2c, 0x1e, 0x7b, 0x2a, 0x56, 0x29, 0x9b, 0x17, 0xfb, 0x00, 0xcf, - 0x10, 0xa8, 0x07, 0x6b, 0x23, 0xe6, 0x0f, 0xd5, 0x6a, 0x8a, 0x7c, 0x29, 0x47, 0xd6, 0x2e, 0x98, - 0x3f, 0xc4, 0x69, 0x46, 0x22, 0x7c, 0xe2, 0x65, 0x3f, 0xc4, 0x73, 0x08, 0xf9, 0xcc, 0x71, 0x9a, - 0x91, 0x5e, 0x35, 0xf2, 0xcb, 0x34, 0xe3, 0x03, 0x1b, 0xf9, 0xe6, 0xf5, 0x55, 0xb7, 0xd1, 0xf7, - 0x74, 0x77, 0x64, 0x40, 0x45, 0xfe, 0x15, 0x21, 0xb1, 0xa9, 0x5a, 0x4b, 0x61, 0x2f, 0x72, 0x98, - 0x32, 0x28, 0x12, 0xb8, 0xc4, 0x98, 0xc7, 0x93, 0x47, 0xad, 0x72, 0xff, 0xa8, 0x55, 0x1e, 0x1e, - 0xb5, 0xca, 0x0f, 0x89, 0x06, 0x26, 0x89, 0x06, 0xee, 0x13, 0x0d, 0x3c, 0x24, 0x1a, 0xf8, 0x2b, - 0xd1, 0xc0, 0xcf, 0x7f, 0x6b, 0x95, 0xaf, 0x1b, 0xb9, 0xeb, 0xff, 0x06, 0x00, 0x00, 0xff, 0xff, - 0x37, 0x8f, 0x77, 0xcd, 0x15, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/generated.proto b/vendor/k8s.io/api/rbac/v1beta1/generated.proto deleted file mode 100644 index 27bd30c..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/generated.proto +++ /dev/null @@ -1,198 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.rbac.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole -message AggregationRule { - // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. - // If any of the selectors match, then the ClusterRole's permissions will be added - // +optional - repeated k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector clusterRoleSelectors = 1; -} - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -message ClusterRole { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this ClusterRole - repeated PolicyRule rules = 2; - - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. - // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be - // stomped by the controller. - // +optional - optional AggregationRule aggregationRule = 3; -} - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -message ClusterRoleBinding { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - // +optional - repeated Subject subjects = 2; - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional RoleRef roleRef = 3; -} - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -message ClusterRoleBindingList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ClusterRoleBindings - repeated ClusterRoleBinding items = 2; -} - -// ClusterRoleList is a collection of ClusterRoles -message ClusterRoleList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of ClusterRoles - repeated ClusterRole items = 2; -} - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -message PolicyRule { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - repeated string verbs = 1; - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - // +optional - repeated string apiGroups = 2; - - // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. - // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. - // +optional - repeated string resources = 3; - - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - // +optional - repeated string resourceNames = 4; - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - // +optional - repeated string nonResourceURLs = 5; -} - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -message Role { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Rules holds all the PolicyRules for this Role - repeated PolicyRule rules = 2; -} - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -message RoleBinding { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Subjects holds references to the objects the role applies to. - // +optional - repeated Subject subjects = 2; - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - optional RoleRef roleRef = 3; -} - -// RoleBindingList is a collection of RoleBindings -message RoleBindingList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of RoleBindings - repeated RoleBinding items = 2; -} - -// RoleList is a collection of Roles -message RoleList { - // Standard object's metadata. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of Roles - repeated Role items = 2; -} - -// RoleRef contains information that points to the role being used -message RoleRef { - // APIGroup is the group for the resource being referenced - optional string apiGroup = 1; - - // Kind is the type of resource being referenced - optional string kind = 2; - - // Name is the name of resource being referenced - optional string name = 3; -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -message Subject { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - optional string kind = 1; - - // APIGroup holds the API group of the referenced subject. - // Defaults to "" for ServiceAccount subjects. - // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - // +optional - optional string apiGroup = 2; - - // Name of the object being referenced. - optional string name = 3; - - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - // +optional - optional string namespace = 4; -} - diff --git a/vendor/k8s.io/api/rbac/v1beta1/register.go b/vendor/k8s.io/api/rbac/v1beta1/register.go deleted file mode 100644 index c8526a6..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/register.go +++ /dev/null @@ -1,58 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const GroupName = "rbac.authorization.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &Role{}, - &RoleBinding{}, - &RoleBindingList{}, - &RoleList{}, - - &ClusterRole{}, - &ClusterRoleBinding{}, - &ClusterRoleBindingList{}, - &ClusterRoleList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/types.go b/vendor/k8s.io/api/rbac/v1beta1/types.go deleted file mode 100644 index 857b67a..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/types.go +++ /dev/null @@ -1,235 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -const ( - APIGroupAll = "*" - ResourceAll = "*" - VerbAll = "*" - NonResourceAll = "*" - - GroupKind = "Group" - ServiceAccountKind = "ServiceAccount" - UserKind = "User" - - // AutoUpdateAnnotationKey is the name of an annotation which prevents reconciliation if set to "false" - AutoUpdateAnnotationKey = "rbac.authorization.kubernetes.io/autoupdate" -) - -// Authorization is calculated against -// 1. evaluation of ClusterRoleBindings - short circuit on match -// 2. evaluation of RoleBindings in the namespace requested - short circuit on match -// 3. deny by default - -// PolicyRule holds information that describes a policy rule, but does not contain information -// about who the rule applies to or which namespace the rule applies to. -type PolicyRule struct { - // Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds. - Verbs []string `json:"verbs" protobuf:"bytes,1,rep,name=verbs"` - - // APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of - // the enumerated resources in any API group will be allowed. - // +optional - APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,2,rep,name=apiGroups"` - // Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. - // '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups. - // +optional - Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"` - // ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed. - // +optional - ResourceNames []string `json:"resourceNames,omitempty" protobuf:"bytes,4,rep,name=resourceNames"` - - // NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path - // Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. - // Rules can either apply to API resources (such as "pods" or "secrets") or non-resource URL paths (such as "/api"), but not both. - // +optional - NonResourceURLs []string `json:"nonResourceURLs,omitempty" protobuf:"bytes,5,rep,name=nonResourceURLs"` -} - -// Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, -// or a value for non-objects such as user and group names. -type Subject struct { - // Kind of object being referenced. Values defined by this API group are "User", "Group", and "ServiceAccount". - // If the Authorizer does not recognized the kind value, the Authorizer should report an error. - Kind string `json:"kind" protobuf:"bytes,1,opt,name=kind"` - // APIGroup holds the API group of the referenced subject. - // Defaults to "" for ServiceAccount subjects. - // Defaults to "rbac.authorization.k8s.io" for User and Group subjects. - // +optional - APIGroup string `json:"apiGroup,omitempty" protobuf:"bytes,2,opt.name=apiGroup"` - // Name of the object being referenced. - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` - // Namespace of the referenced object. If the object kind is non-namespace, such as "User" or "Group", and this value is not empty - // the Authorizer should report an error. - // +optional - Namespace string `json:"namespace,omitempty" protobuf:"bytes,4,opt,name=namespace"` -} - -// RoleRef contains information that points to the role being used -type RoleRef struct { - // APIGroup is the group for the resource being referenced - APIGroup string `json:"apiGroup" protobuf:"bytes,1,opt,name=apiGroup"` - // Kind is the type of resource being referenced - Kind string `json:"kind" protobuf:"bytes,2,opt,name=kind"` - // Name is the name of resource being referenced - Name string `json:"name" protobuf:"bytes,3,opt,name=name"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding. -type Role struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this Role - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` -} - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. -// It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given -// namespace only have effect in that namespace. -type RoleBinding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - // +optional - Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleBindingList is a collection of RoleBindings -type RoleBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of RoleBindings - Items []RoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// RoleList is a collection of Roles -type RoleList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of Roles - Items []Role `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding. -type ClusterRole struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Rules holds all the PolicyRules for this ClusterRole - Rules []PolicyRule `json:"rules" protobuf:"bytes,2,rep,name=rules"` - // AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. - // If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be - // stomped by the controller. - // +optional - AggregationRule *AggregationRule `json:"aggregationRule,omitempty" protobuf:"bytes,3,opt,name=aggregationRule"` -} - -// AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole -type AggregationRule struct { - // ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. - // If any of the selectors match, then the ClusterRole's permissions will be added - // +optional - ClusterRoleSelectors []metav1.LabelSelector `json:"clusterRoleSelectors,omitempty" protobuf:"bytes,1,rep,name=clusterRoleSelectors"` -} - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, -// and adds who information via Subject. -type ClusterRoleBinding struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Subjects holds references to the objects the role applies to. - // +optional - Subjects []Subject `json:"subjects,omitempty" protobuf:"bytes,2,rep,name=subjects"` - - // RoleRef can only reference a ClusterRole in the global namespace. - // If the RoleRef cannot be resolved, the Authorizer must return an error. - RoleRef RoleRef `json:"roleRef" protobuf:"bytes,3,opt,name=roleRef"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleBindingList is a collection of ClusterRoleBindings -type ClusterRoleBindingList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoleBindings - Items []ClusterRoleBinding `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// ClusterRoleList is a collection of ClusterRoles -type ClusterRoleList struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of ClusterRoles - Items []ClusterRole `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 66dba6c..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,158 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_AggregationRule = map[string]string{ - "": "AggregationRule describes how to locate ClusterRoles to aggregate into the ClusterRole", - "clusterRoleSelectors": "ClusterRoleSelectors holds a list of selectors which will be used to find ClusterRoles and create the rules. If any of the selectors match, then the ClusterRole's permissions will be added", -} - -func (AggregationRule) SwaggerDoc() map[string]string { - return map_AggregationRule -} - -var map_ClusterRole = map[string]string{ - "": "ClusterRole is a cluster level, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding or ClusterRoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this ClusterRole", - "aggregationRule": "AggregationRule is an optional field that describes how to build the Rules for this ClusterRole. If AggregationRule is set, then the Rules are controller managed and direct changes to Rules will be stomped by the controller.", -} - -func (ClusterRole) SwaggerDoc() map[string]string { - return map_ClusterRole -} - -var map_ClusterRoleBinding = map[string]string{ - "": "ClusterRoleBinding references a ClusterRole, but not contain it. It can reference a ClusterRole in the global namespace, and adds who information via Subject.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can only reference a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (ClusterRoleBinding) SwaggerDoc() map[string]string { - return map_ClusterRoleBinding -} - -var map_ClusterRoleBindingList = map[string]string{ - "": "ClusterRoleBindingList is a collection of ClusterRoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoleBindings", -} - -func (ClusterRoleBindingList) SwaggerDoc() map[string]string { - return map_ClusterRoleBindingList -} - -var map_ClusterRoleList = map[string]string{ - "": "ClusterRoleList is a collection of ClusterRoles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of ClusterRoles", -} - -func (ClusterRoleList) SwaggerDoc() map[string]string { - return map_ClusterRoleList -} - -var map_PolicyRule = map[string]string{ - "": "PolicyRule holds information that describes a policy rule, but does not contain information about who the rule applies to or which namespace the rule applies to.", - "verbs": "Verbs is a list of Verbs that apply to ALL the ResourceKinds and AttributeRestrictions contained in this rule. VerbAll represents all kinds.", - "apiGroups": "APIGroups is the name of the APIGroup that contains the resources. If multiple API groups are specified, any action requested against one of the enumerated resources in any API group will be allowed.", - "resources": "Resources is a list of resources this rule applies to. '*' represents all resources in the specified apiGroups. '*/foo' represents the subresource 'foo' for all resources in the specified apiGroups.", - "resourceNames": "ResourceNames is an optional white list of names that the rule applies to. An empty set means that everything is allowed.", - "nonResourceURLs": "NonResourceURLs is a set of partial urls that a user should have access to. *s are allowed, but only as the full, final step in the path Since non-resource URLs are not namespaced, this field is only applicable for ClusterRoles referenced from a ClusterRoleBinding. Rules can either apply to API resources (such as \"pods\" or \"secrets\") or non-resource URL paths (such as \"/api\"), but not both.", -} - -func (PolicyRule) SwaggerDoc() map[string]string { - return map_PolicyRule -} - -var map_Role = map[string]string{ - "": "Role is a namespaced, logical grouping of PolicyRules that can be referenced as a unit by a RoleBinding.", - "metadata": "Standard object's metadata.", - "rules": "Rules holds all the PolicyRules for this Role", -} - -func (Role) SwaggerDoc() map[string]string { - return map_Role -} - -var map_RoleBinding = map[string]string{ - "": "RoleBinding references a role, but does not contain it. It can reference a Role in the same namespace or a ClusterRole in the global namespace. It adds who information via Subjects and namespace information by which namespace it exists in. RoleBindings in a given namespace only have effect in that namespace.", - "metadata": "Standard object's metadata.", - "subjects": "Subjects holds references to the objects the role applies to.", - "roleRef": "RoleRef can reference a Role in the current namespace or a ClusterRole in the global namespace. If the RoleRef cannot be resolved, the Authorizer must return an error.", -} - -func (RoleBinding) SwaggerDoc() map[string]string { - return map_RoleBinding -} - -var map_RoleBindingList = map[string]string{ - "": "RoleBindingList is a collection of RoleBindings", - "metadata": "Standard object's metadata.", - "items": "Items is a list of RoleBindings", -} - -func (RoleBindingList) SwaggerDoc() map[string]string { - return map_RoleBindingList -} - -var map_RoleList = map[string]string{ - "": "RoleList is a collection of Roles", - "metadata": "Standard object's metadata.", - "items": "Items is a list of Roles", -} - -func (RoleList) SwaggerDoc() map[string]string { - return map_RoleList -} - -var map_RoleRef = map[string]string{ - "": "RoleRef contains information that points to the role being used", - "apiGroup": "APIGroup is the group for the resource being referenced", - "kind": "Kind is the type of resource being referenced", - "name": "Name is the name of resource being referenced", -} - -func (RoleRef) SwaggerDoc() map[string]string { - return map_RoleRef -} - -var map_Subject = map[string]string{ - "": "Subject contains a reference to the object or user identities a role binding applies to. This can either hold a direct API object reference, or a value for non-objects such as user and group names.", - "kind": "Kind of object being referenced. Values defined by this API group are \"User\", \"Group\", and \"ServiceAccount\". If the Authorizer does not recognized the kind value, the Authorizer should report an error.", - "apiGroup": "APIGroup holds the API group of the referenced subject. Defaults to \"\" for ServiceAccount subjects. Defaults to \"rbac.authorization.k8s.io\" for User and Group subjects.", - "name": "Name of the object being referenced.", - "namespace": "Namespace of the referenced object. If the object kind is non-namespace, such as \"User\" or \"Group\", and this value is not empty the Authorizer should report an error.", -} - -func (Subject) SwaggerDoc() map[string]string { - return map_Subject -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index c085c90..0000000 --- a/vendor/k8s.io/api/rbac/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,389 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *AggregationRule) DeepCopyInto(out *AggregationRule) { - *out = *in - if in.ClusterRoleSelectors != nil { - in, out := &in.ClusterRoleSelectors, &out.ClusterRoleSelectors - *out = make([]v1.LabelSelector, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AggregationRule. -func (in *AggregationRule) DeepCopy() *AggregationRule { - if in == nil { - return nil - } - out := new(AggregationRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRole) DeepCopyInto(out *ClusterRole) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.AggregationRule != nil { - in, out := &in.AggregationRule, &out.AggregationRule - *out = new(AggregationRule) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRole. -func (in *ClusterRole) DeepCopy() *ClusterRole { - if in == nil { - return nil - } - out := new(ClusterRole) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRole) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleBinding) DeepCopyInto(out *ClusterRoleBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - out.RoleRef = in.RoleRef - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBinding. -func (in *ClusterRoleBinding) DeepCopy() *ClusterRoleBinding { - if in == nil { - return nil - } - out := new(ClusterRoleBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleBindingList) DeepCopyInto(out *ClusterRoleBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRoleBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleBindingList. -func (in *ClusterRoleBindingList) DeepCopy() *ClusterRoleBindingList { - if in == nil { - return nil - } - out := new(ClusterRoleBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ClusterRoleList) DeepCopyInto(out *ClusterRoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]ClusterRole, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterRoleList. -func (in *ClusterRoleList) DeepCopy() *ClusterRoleList { - if in == nil { - return nil - } - out := new(ClusterRoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *ClusterRoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PolicyRule) DeepCopyInto(out *PolicyRule) { - *out = *in - if in.Verbs != nil { - in, out := &in.Verbs, &out.Verbs - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.APIGroups != nil { - in, out := &in.APIGroups, &out.APIGroups - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.Resources != nil { - in, out := &in.Resources, &out.Resources - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.ResourceNames != nil { - in, out := &in.ResourceNames, &out.ResourceNames - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.NonResourceURLs != nil { - in, out := &in.NonResourceURLs, &out.NonResourceURLs - *out = make([]string, len(*in)) - copy(*out, *in) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyRule. -func (in *PolicyRule) DeepCopy() *PolicyRule { - if in == nil { - return nil - } - out := new(PolicyRule) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Role) DeepCopyInto(out *Role) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Rules != nil { - in, out := &in.Rules, &out.Rules - *out = make([]PolicyRule, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Role. -func (in *Role) DeepCopy() *Role { - if in == nil { - return nil - } - out := new(Role) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Role) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleBinding) DeepCopyInto(out *RoleBinding) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Subjects != nil { - in, out := &in.Subjects, &out.Subjects - *out = make([]Subject, len(*in)) - copy(*out, *in) - } - out.RoleRef = in.RoleRef - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBinding. -func (in *RoleBinding) DeepCopy() *RoleBinding { - if in == nil { - return nil - } - out := new(RoleBinding) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleBinding) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleBindingList) DeepCopyInto(out *RoleBindingList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]RoleBinding, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleBindingList. -func (in *RoleBindingList) DeepCopy() *RoleBindingList { - if in == nil { - return nil - } - out := new(RoleBindingList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleBindingList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleList) DeepCopyInto(out *RoleList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]Role, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleList. -func (in *RoleList) DeepCopy() *RoleList { - if in == nil { - return nil - } - out := new(RoleList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *RoleList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *RoleRef) DeepCopyInto(out *RoleRef) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RoleRef. -func (in *RoleRef) DeepCopy() *RoleRef { - if in == nil { - return nil - } - out := new(RoleRef) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Subject) DeepCopyInto(out *Subject) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subject. -func (in *Subject) DeepCopy() *Subject { - if in == nil { - return nil - } - out := new(Subject) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go b/vendor/k8s.io/api/scheduling/v1alpha1/doc.go deleted file mode 100644 index e10d07f..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=scheduling.k8s.io -package v1alpha1 // import "k8s.io/api/scheduling/v1alpha1" diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go deleted file mode 100644 index 97c07c9..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.pb.go +++ /dev/null @@ -1,640 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto - - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClass") - proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1alpha1.PriorityClassList") -} -func (m *PriorityClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ - if m.GlobalDefault { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil -} - -func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PriorityClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Value)) - n += 2 - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PriorityClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PriorityClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `}`, - }, "") - return s -} -func (this *PriorityClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PriorityClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.GlobalDefault = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PriorityClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PriorityClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 447 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0x4f, 0x8b, 0xd3, 0x40, - 0x18, 0xc6, 0x33, 0x5d, 0x0b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0xe8, 0x61, 0x36, 0xac, 0x97, 0x5c, - 0x76, 0xc6, 0x2e, 0x2a, 0x82, 0xb7, 0xb8, 0xb0, 0x08, 0x8a, 0x92, 0x83, 0x07, 0xf1, 0xe0, 0x24, - 0x79, 0x37, 0x1d, 0x9b, 0x64, 0xc2, 0xcc, 0x24, 0xb0, 0x37, 0xcf, 0x9e, 0xfc, 0x52, 0x42, 0x8f, - 0x7b, 0xdc, 0xd3, 0x62, 0xe3, 0x17, 0x91, 0xa4, 0x69, 0xd3, 0x5a, 0xfc, 0x73, 0xcb, 0x3c, 0xef, - 0xef, 0x79, 0xe6, 0xcd, 0xc3, 0xe0, 0x8b, 0xc5, 0x73, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0xe3, 0x69, 0x31, 0xe7, 0x33, 0x96, 0x40, 0x0e, 0x8a, 0x1b, 0x88, - 0x69, 0xa1, 0xa4, 0x91, 0x36, 0x59, 0xf3, 0x94, 0x17, 0x82, 0xf6, 0x3c, 0xdd, 0xf0, 0xd3, 0xd3, - 0x44, 0x98, 0x79, 0x19, 0xd2, 0x48, 0x66, 0x2c, 0x91, 0x89, 0x64, 0xad, 0x2d, 0x2c, 0x2f, 0xdb, - 0x53, 0x7b, 0x68, 0xbf, 0xd6, 0x71, 0xd3, 0x27, 0xfd, 0xf5, 0x19, 0x8f, 0xe6, 0x22, 0x07, 0x75, - 0xc5, 0x8a, 0x45, 0xd2, 0x08, 0x9a, 0x65, 0x60, 0x38, 0xab, 0x0e, 0x96, 0x98, 0xb2, 0x3f, 0xb9, - 0x54, 0x99, 0x1b, 0x91, 0xc1, 0x81, 0xe1, 0xd9, 0xbf, 0x0c, 0xcd, 0xaf, 0x64, 0xfc, 0x77, 0xdf, - 0xc9, 0xd7, 0x01, 0x9e, 0xbc, 0x53, 0x42, 0x2a, 0x61, 0xae, 0x5e, 0xa6, 0x5c, 0x6b, 0xfb, 0x13, - 0x1e, 0x35, 0x5b, 0xc5, 0xdc, 0x70, 0x07, 0xb9, 0xc8, 0x1b, 0x9f, 0x3d, 0xa6, 0x7d, 0x25, 0xdb, - 0x70, 0x5a, 0x2c, 0x92, 0x46, 0xd0, 0xb4, 0xa1, 0x69, 0x35, 0xa3, 0x6f, 0xc3, 0xcf, 0x10, 0x99, - 0x37, 0x60, 0xb8, 0x6f, 0x2f, 0x6f, 0x8f, 0xad, 0xfa, 0xf6, 0x18, 0xf7, 0x5a, 0xb0, 0x4d, 0xb5, - 0x1f, 0xe1, 0x61, 0xc5, 0xd3, 0x12, 0x9c, 0x81, 0x8b, 0xbc, 0xa1, 0x3f, 0xe9, 0xe0, 0xe1, 0xfb, - 0x46, 0x0c, 0xd6, 0x33, 0xfb, 0x05, 0x9e, 0x24, 0xa9, 0x0c, 0x79, 0x7a, 0x0e, 0x97, 0xbc, 0x4c, - 0x8d, 0x73, 0xe4, 0x22, 0x6f, 0xe4, 0x3f, 0xec, 0xe0, 0xc9, 0xc5, 0xee, 0x30, 0xd8, 0x67, 0xed, - 0xa7, 0x78, 0x1c, 0x83, 0x8e, 0x94, 0x28, 0x8c, 0x90, 0xb9, 0x73, 0xc7, 0x45, 0xde, 0x5d, 0xff, - 0x41, 0x67, 0x1d, 0x9f, 0xf7, 0xa3, 0x60, 0x97, 0x3b, 0xf9, 0x8e, 0xf0, 0xfd, 0xbd, 0x32, 0x5e, - 0x0b, 0x6d, 0xec, 0x8f, 0x07, 0x85, 0xd0, 0xff, 0x2b, 0xa4, 0x71, 0xb7, 0x75, 0xdc, 0xeb, 0x6e, - 0x1e, 0x6d, 0x94, 0x9d, 0x32, 0x02, 0x3c, 0x14, 0x06, 0x32, 0xed, 0x0c, 0xdc, 0x23, 0x6f, 0x7c, - 0x76, 0x4a, 0xff, 0xfe, 0xfc, 0xe8, 0xde, 0x7e, 0x7d, 0x77, 0xaf, 0x9a, 0x8c, 0x60, 0x1d, 0xe5, - 0xd3, 0xe5, 0x8a, 0x58, 0xd7, 0x2b, 0x62, 0xdd, 0xac, 0x88, 0xf5, 0xa5, 0x26, 0x68, 0x59, 0x13, - 0x74, 0x5d, 0x13, 0x74, 0x53, 0x13, 0xf4, 0xa3, 0x26, 0xe8, 0xdb, 0x4f, 0x62, 0x7d, 0x18, 0x6d, - 0x32, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xab, 0x20, 0x12, 0x63, 0x3c, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto b/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto deleted file mode 100644 index 5fb5472..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/generated.proto +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.scheduling.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -message PriorityClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - optional int32 value = 2; - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - optional bool globalDefault = 3; - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - optional string description = 4; -} - -// PriorityClassList is a collection of priority classes. -message PriorityClassList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of PriorityClasses - repeated PriorityClass items = 2; -} - diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/register.go b/vendor/k8s.io/api/scheduling/v1alpha1/register.go deleted file mode 100644 index 24689f0..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "scheduling.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PriorityClass{}, - &PriorityClassList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types.go b/vendor/k8s.io/api/scheduling/v1alpha1/types.go deleted file mode 100644 index 21e3df0..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -type PriorityClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClassList is a collection of priority classes. -type PriorityClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of PriorityClasses - Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index f406f44..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", -} - -func (PriorityClass) SwaggerDoc() map[string]string { - return map_PriorityClass -} - -var map_PriorityClassList = map[string]string{ - "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of PriorityClasses", -} - -func (PriorityClassList) SwaggerDoc() map[string]string { - return map_PriorityClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index fe0c860..0000000 --- a/vendor/k8s.io/api/scheduling/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. -func (in *PriorityClass) DeepCopy() *PriorityClass { - if in == nil { - return nil - } - out := new(PriorityClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PriorityClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. -func (in *PriorityClassList) DeepCopy() *PriorityClassList { - if in == nil { - return nil - } - out := new(PriorityClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/doc.go b/vendor/k8s.io/api/scheduling/v1beta1/doc.go deleted file mode 100644 index f2dd1cf..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=scheduling.k8s.io -package v1beta1 // import "k8s.io/api/scheduling/v1beta1" diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go b/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go deleted file mode 100644 index ea8f8d5..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.pb.go +++ /dev/null @@ -1,640 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto - - It has these top-level messages: - PriorityClass - PriorityClassList -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *PriorityClass) Reset() { *m = PriorityClass{} } -func (*PriorityClass) ProtoMessage() {} -func (*PriorityClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PriorityClassList) Reset() { *m = PriorityClassList{} } -func (*PriorityClassList) ProtoMessage() {} -func (*PriorityClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*PriorityClass)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClass") - proto.RegisterType((*PriorityClassList)(nil), "k8s.io.api.scheduling.v1beta1.PriorityClassList") -} -func (m *PriorityClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x10 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Value)) - dAtA[i] = 0x18 - i++ - if m.GlobalDefault { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Description))) - i += copy(dAtA[i:], m.Description) - return i, nil -} - -func (m *PriorityClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PriorityClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PriorityClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - n += 1 + sovGenerated(uint64(m.Value)) - n += 2 - l = len(m.Description) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PriorityClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PriorityClass) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Value:` + fmt.Sprintf("%v", this.Value) + `,`, - `GlobalDefault:` + fmt.Sprintf("%v", this.GlobalDefault) + `,`, - `Description:` + fmt.Sprintf("%v", this.Description) + `,`, - `}`, - }, "") - return s -} -func (this *PriorityClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PriorityClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PriorityClass", "PriorityClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PriorityClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) - } - m.Value = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - m.Value |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field GlobalDefault", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.GlobalDefault = bool(v != 0) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Description = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PriorityClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PriorityClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PriorityClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PriorityClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/scheduling/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 448 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8b, 0xd3, 0x40, - 0x18, 0xc5, 0x33, 0x5d, 0x8b, 0x75, 0x4a, 0x41, 0x23, 0x42, 0x28, 0x38, 0x1b, 0xd6, 0x4b, 0x0e, - 0xee, 0x8c, 0x5d, 0x54, 0x04, 0x6f, 0x71, 0x51, 0x04, 0x45, 0xcd, 0xc1, 0x83, 0x78, 0x70, 0x92, - 0x7c, 0x9b, 0x8e, 0x4d, 0x32, 0x61, 0x66, 0x12, 0xd8, 0x9b, 0x67, 0x4f, 0xfe, 0x51, 0x1e, 0x7a, - 0xdc, 0xe3, 0x9e, 0x16, 0x1b, 0xff, 0x11, 0x49, 0x1a, 0x37, 0xad, 0x45, 0xdd, 0x5b, 0xe6, 0x7d, - 0xbf, 0xf7, 0xe6, 0xcb, 0x63, 0xf0, 0xf3, 0xc5, 0x13, 0x4d, 0x85, 0x64, 0x8b, 0x32, 0x04, 0x95, - 0x83, 0x01, 0xcd, 0x2a, 0xc8, 0x63, 0xa9, 0x58, 0x37, 0xe0, 0x85, 0x60, 0x3a, 0x9a, 0x43, 0x5c, - 0xa6, 0x22, 0x4f, 0x58, 0x35, 0x0b, 0xc1, 0xf0, 0x19, 0x4b, 0x20, 0x07, 0xc5, 0x0d, 0xc4, 0xb4, - 0x50, 0xd2, 0x48, 0xfb, 0xee, 0x1a, 0xa7, 0xbc, 0x10, 0xb4, 0xc7, 0x69, 0x87, 0x4f, 0x0f, 0x13, - 0x61, 0xe6, 0x65, 0x48, 0x23, 0x99, 0xb1, 0x44, 0x26, 0x92, 0xb5, 0xae, 0xb0, 0x3c, 0x69, 0x4f, - 0xed, 0xa1, 0xfd, 0x5a, 0xa7, 0x4d, 0x1f, 0xf6, 0x97, 0x67, 0x3c, 0x9a, 0x8b, 0x1c, 0xd4, 0x29, - 0x2b, 0x16, 0x49, 0x23, 0x68, 0x96, 0x81, 0xe1, 0xac, 0xda, 0xd9, 0x61, 0xca, 0xfe, 0xe6, 0x52, - 0x65, 0x6e, 0x44, 0x06, 0x3b, 0x86, 0xc7, 0xff, 0x33, 0x34, 0x7f, 0x92, 0xf1, 0x3f, 0x7d, 0x07, - 0x5f, 0x07, 0x78, 0xf2, 0x56, 0x09, 0xa9, 0x84, 0x39, 0x7d, 0x96, 0x72, 0xad, 0xed, 0x4f, 0x78, - 0xd4, 0x6c, 0x15, 0x73, 0xc3, 0x1d, 0xe4, 0x22, 0x6f, 0x7c, 0xf4, 0x80, 0xf6, 0x8d, 0x5c, 0x86, - 0xd3, 0x62, 0x91, 0x34, 0x82, 0xa6, 0x0d, 0x4d, 0xab, 0x19, 0x7d, 0x13, 0x7e, 0x86, 0xc8, 0xbc, - 0x06, 0xc3, 0x7d, 0x7b, 0x79, 0xb1, 0x6f, 0xd5, 0x17, 0xfb, 0xb8, 0xd7, 0x82, 0xcb, 0x54, 0xfb, - 0x1e, 0x1e, 0x56, 0x3c, 0x2d, 0xc1, 0x19, 0xb8, 0xc8, 0x1b, 0xfa, 0x93, 0x0e, 0x1e, 0xbe, 0x6f, - 0xc4, 0x60, 0x3d, 0xb3, 0x9f, 0xe2, 0x49, 0x92, 0xca, 0x90, 0xa7, 0xc7, 0x70, 0xc2, 0xcb, 0xd4, - 0x38, 0x7b, 0x2e, 0xf2, 0x46, 0xfe, 0x9d, 0x0e, 0x9e, 0xbc, 0xd8, 0x1c, 0x06, 0xdb, 0xac, 0xfd, - 0x08, 0x8f, 0x63, 0xd0, 0x91, 0x12, 0x85, 0x11, 0x32, 0x77, 0xae, 0xb9, 0xc8, 0xbb, 0xe1, 0xdf, - 0xee, 0xac, 0xe3, 0xe3, 0x7e, 0x14, 0x6c, 0x72, 0x07, 0xdf, 0x11, 0xbe, 0xb5, 0x55, 0xc6, 0x2b, - 0xa1, 0x8d, 0xfd, 0x71, 0xa7, 0x10, 0x7a, 0xb5, 0x42, 0x1a, 0x77, 0x5b, 0xc7, 0xcd, 0xee, 0xe6, - 0xd1, 0x6f, 0x65, 0xa3, 0x8c, 0x77, 0x78, 0x28, 0x0c, 0x64, 0xda, 0x19, 0xb8, 0x7b, 0xde, 0xf8, - 0xe8, 0x3e, 0xfd, 0xe7, 0xeb, 0xa3, 0x5b, 0xeb, 0xf5, 0xd5, 0xbd, 0x6c, 0x22, 0x82, 0x75, 0x92, - 0x7f, 0xb8, 0x5c, 0x11, 0xeb, 0x6c, 0x45, 0xac, 0xf3, 0x15, 0xb1, 0xbe, 0xd4, 0x04, 0x2d, 0x6b, - 0x82, 0xce, 0x6a, 0x82, 0xce, 0x6b, 0x82, 0x7e, 0xd4, 0x04, 0x7d, 0xfb, 0x49, 0xac, 0x0f, 0xd7, - 0xbb, 0xc8, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x41, 0x74, 0x8a, 0x60, 0x38, 0x03, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto b/vendor/k8s.io/api/scheduling/v1beta1/generated.proto deleted file mode 100644 index 0a95755..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/generated.proto +++ /dev/null @@ -1,67 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.scheduling.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -message PriorityClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - optional int32 value = 2; - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - optional bool globalDefault = 3; - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - optional string description = 4; -} - -// PriorityClassList is a collection of priority classes. -message PriorityClassList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // items is the list of PriorityClasses - repeated PriorityClass items = 2; -} - diff --git a/vendor/k8s.io/api/scheduling/v1beta1/register.go b/vendor/k8s.io/api/scheduling/v1beta1/register.go deleted file mode 100644 index fb26557..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "scheduling.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PriorityClass{}, - &PriorityClassList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types.go b/vendor/k8s.io/api/scheduling/v1beta1/types.go deleted file mode 100644 index a9aaa86..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/types.go +++ /dev/null @@ -1,66 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClass defines mapping from a priority class name to the priority -// integer value. The value can be any valid integer. -type PriorityClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // The value of this priority class. This is the actual priority that pods - // receive when they have the name of this class in their pod spec. - Value int32 `json:"value" protobuf:"bytes,2,opt,name=value"` - - // globalDefault specifies whether this PriorityClass should be considered as - // the default priority for pods that do not have any priority class. - // Only one PriorityClass can be marked as `globalDefault`. However, if more than - // one PriorityClasses exists with their `globalDefault` field set to true, - // the smallest value of such global default PriorityClasses will be used as the default priority. - // +optional - GlobalDefault bool `json:"globalDefault,omitempty" protobuf:"bytes,3,opt,name=globalDefault"` - - // description is an arbitrary string that usually provides guidelines on - // when this priority class should be used. - // +optional - Description string `json:"description,omitempty" protobuf:"bytes,4,opt,name=description"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PriorityClassList is a collection of priority classes. -type PriorityClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // items is the list of PriorityClasses - Items []PriorityClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index c18f54a..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PriorityClass = map[string]string{ - "": "PriorityClass defines mapping from a priority class name to the priority integer value. The value can be any valid integer.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "value": "The value of this priority class. This is the actual priority that pods receive when they have the name of this class in their pod spec.", - "globalDefault": "globalDefault specifies whether this PriorityClass should be considered as the default priority for pods that do not have any priority class. Only one PriorityClass can be marked as `globalDefault`. However, if more than one PriorityClasses exists with their `globalDefault` field set to true, the smallest value of such global default PriorityClasses will be used as the default priority.", - "description": "description is an arbitrary string that usually provides guidelines on when this priority class should be used.", -} - -func (PriorityClass) SwaggerDoc() map[string]string { - return map_PriorityClass -} - -var map_PriorityClassList = map[string]string{ - "": "PriorityClassList is a collection of priority classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "items is the list of PriorityClasses", -} - -func (PriorityClassList) SwaggerDoc() map[string]string { - return map_PriorityClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 6f68e4a..0000000 --- a/vendor/k8s.io/api/scheduling/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClass) DeepCopyInto(out *PriorityClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClass. -func (in *PriorityClass) DeepCopy() *PriorityClass { - if in == nil { - return nil - } - out := new(PriorityClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PriorityClassList) DeepCopyInto(out *PriorityClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PriorityClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PriorityClassList. -func (in *PriorityClassList) DeepCopy() *PriorityClassList { - if in == nil { - return nil - } - out := new(PriorityClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PriorityClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/doc.go b/vendor/k8s.io/api/settings/v1alpha1/doc.go deleted file mode 100644 index 05a62c5..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/doc.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true - -// +groupName=settings.k8s.io -package v1alpha1 // import "k8s.io/api/settings/v1alpha1" diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go b/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go deleted file mode 100644 index 15285ba..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.pb.go +++ /dev/null @@ -1,929 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto - - It has these top-level messages: - PodPreset - PodPresetList - PodPresetSpec -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *PodPreset) Reset() { *m = PodPreset{} } -func (*PodPreset) ProtoMessage() {} -func (*PodPreset) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PodPresetList) Reset() { *m = PodPresetList{} } -func (*PodPresetList) ProtoMessage() {} -func (*PodPresetList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *PodPresetSpec) Reset() { *m = PodPresetSpec{} } -func (*PodPresetSpec) ProtoMessage() {} -func (*PodPresetSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*PodPreset)(nil), "k8s.io.api.settings.v1alpha1.PodPreset") - proto.RegisterType((*PodPresetList)(nil), "k8s.io.api.settings.v1alpha1.PodPresetList") - proto.RegisterType((*PodPresetSpec)(nil), "k8s.io.api.settings.v1alpha1.PodPresetSpec") -} -func (m *PodPreset) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPreset) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - return i, nil -} - -func (m *PodPresetList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n3, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *PodPresetSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PodPresetSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Selector.Size())) - n4, err := m.Selector.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Env) > 0 { - for _, msg := range m.Env { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.EnvFrom) > 0 { - for _, msg := range m.EnvFrom { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.Volumes) > 0 { - for _, msg := range m.Volumes { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - if len(m.VolumeMounts) > 0 { - for _, msg := range m.VolumeMounts { - dAtA[i] = 0x2a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PodPreset) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PodPresetList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *PodPresetSpec) Size() (n int) { - var l int - _ = l - l = m.Selector.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Env) > 0 { - for _, e := range m.Env { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.EnvFrom) > 0 { - for _, e := range m.EnvFrom { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.Volumes) > 0 { - for _, e := range m.Volumes { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - if len(m.VolumeMounts) > 0 { - for _, e := range m.VolumeMounts { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PodPreset) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPreset{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "PodPresetSpec", "PodPresetSpec", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPresetList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "PodPreset", "PodPreset", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PodPresetSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PodPresetSpec{`, - `Selector:` + strings.Replace(strings.Replace(this.Selector.String(), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1), `&`, ``, 1) + `,`, - `Env:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Env), "EnvVar", "k8s_io_api_core_v1.EnvVar", 1), `&`, ``, 1) + `,`, - `EnvFrom:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.EnvFrom), "EnvFromSource", "k8s_io_api_core_v1.EnvFromSource", 1), `&`, ``, 1) + `,`, - `Volumes:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Volumes), "Volume", "k8s_io_api_core_v1.Volume", 1), `&`, ``, 1) + `,`, - `VolumeMounts:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.VolumeMounts), "VolumeMount", "k8s_io_api_core_v1.VolumeMount", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PodPreset) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPreset: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPreset: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, PodPreset{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PodPresetSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PodPresetSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PodPresetSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Selector", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Selector.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Env = append(m.Env, k8s_io_api_core_v1.EnvVar{}) - if err := m.Env[len(m.Env)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field EnvFrom", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.EnvFrom = append(m.EnvFrom, k8s_io_api_core_v1.EnvFromSource{}) - if err := m.EnvFrom[len(m.EnvFrom)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Volumes", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Volumes = append(m.Volumes, k8s_io_api_core_v1.Volume{}) - if err := m.Volumes[len(m.Volumes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeMounts", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.VolumeMounts = append(m.VolumeMounts, k8s_io_api_core_v1.VolumeMount{}) - if err := m.VolumeMounts[len(m.VolumeMounts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/settings/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 542 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x92, 0xc1, 0x8e, 0xd2, 0x40, - 0x1c, 0xc6, 0xe9, 0xb2, 0x04, 0x1c, 0xd8, 0x68, 0x1a, 0x0f, 0x0d, 0x31, 0x65, 0xe5, 0xe2, 0x26, - 0xc6, 0x19, 0x59, 0x8d, 0xd1, 0x6b, 0x13, 0x4c, 0x4c, 0x20, 0x6e, 0x4a, 0xb2, 0x89, 0xc6, 0x83, - 0x43, 0xf9, 0x5b, 0x2a, 0xb4, 0xd3, 0xcc, 0x4c, 0x9b, 0x78, 0xf3, 0x11, 0x7c, 0x01, 0x9f, 0x44, - 0x1f, 0x80, 0xe3, 0x1e, 0xf7, 0xb4, 0x91, 0xfa, 0x22, 0x66, 0x86, 0x29, 0xa0, 0x88, 0x72, 0x9b, - 0xff, 0x9f, 0xef, 0xfb, 0xcd, 0xf7, 0x31, 0x45, 0xfd, 0xd9, 0x73, 0x81, 0x23, 0x46, 0x66, 0xd9, - 0x18, 0x78, 0x02, 0x12, 0x04, 0xc9, 0x21, 0x99, 0x30, 0x4e, 0xcc, 0x0f, 0x34, 0x8d, 0x88, 0x00, - 0x29, 0xa3, 0x24, 0x14, 0x24, 0xef, 0xd1, 0x79, 0x3a, 0xa5, 0x3d, 0x12, 0x42, 0x02, 0x9c, 0x4a, - 0x98, 0xe0, 0x94, 0x33, 0xc9, 0xec, 0x7b, 0x2b, 0x35, 0xa6, 0x69, 0x84, 0x4b, 0x35, 0x2e, 0xd5, - 0xed, 0x47, 0x61, 0x24, 0xa7, 0xd9, 0x18, 0x07, 0x2c, 0x26, 0x21, 0x0b, 0x19, 0xd1, 0xa6, 0x71, - 0xf6, 0x41, 0x4f, 0x7a, 0xd0, 0xa7, 0x15, 0xac, 0xdd, 0xdd, 0xba, 0x3a, 0x60, 0x1c, 0x48, 0xbe, - 0x73, 0x61, 0xfb, 0xe9, 0x46, 0x13, 0xd3, 0x60, 0x1a, 0x25, 0xc0, 0x3f, 0x91, 0x74, 0x16, 0xaa, - 0x85, 0x20, 0x31, 0x48, 0xfa, 0x37, 0x17, 0xd9, 0xe7, 0xe2, 0x59, 0x22, 0xa3, 0x18, 0x76, 0x0c, - 0xcf, 0xfe, 0x67, 0x10, 0xc1, 0x14, 0x62, 0xfa, 0xa7, 0xaf, 0xfb, 0xdd, 0x42, 0xb7, 0x2e, 0xd8, - 0xe4, 0x82, 0x83, 0x00, 0x69, 0xbf, 0x47, 0x0d, 0x95, 0x68, 0x42, 0x25, 0x75, 0xac, 0x53, 0xeb, - 0xac, 0x79, 0xfe, 0x18, 0x6f, 0xfe, 0xb0, 0x35, 0x18, 0xa7, 0xb3, 0x50, 0x2d, 0x04, 0x56, 0x6a, - 0x9c, 0xf7, 0xf0, 0xeb, 0xf1, 0x47, 0x08, 0xe4, 0x10, 0x24, 0xf5, 0xec, 0xc5, 0x4d, 0xa7, 0x52, - 0xdc, 0x74, 0xd0, 0x66, 0xe7, 0xaf, 0xa9, 0xf6, 0x10, 0x1d, 0x8b, 0x14, 0x02, 0xe7, 0x48, 0xd3, - 0x1f, 0xe2, 0x7f, 0x3d, 0x07, 0x5e, 0x07, 0x1b, 0xa5, 0x10, 0x78, 0x2d, 0x03, 0x3e, 0x56, 0x93, - 0xaf, 0x31, 0xdd, 0x6f, 0x16, 0x3a, 0x59, 0xab, 0x06, 0x91, 0x90, 0xf6, 0xbb, 0x9d, 0x0a, 0xf8, - 0xb0, 0x0a, 0xca, 0xad, 0x0b, 0xdc, 0x31, 0xf7, 0x34, 0xca, 0xcd, 0x56, 0xfc, 0x01, 0xaa, 0x45, - 0x12, 0x62, 0xe1, 0x1c, 0x9d, 0x56, 0xcf, 0x9a, 0xe7, 0x0f, 0x0e, 0xcc, 0xef, 0x9d, 0x18, 0x66, - 0xed, 0x95, 0x72, 0xfb, 0x2b, 0x48, 0xf7, 0x6b, 0x75, 0x2b, 0xbd, 0x6a, 0x65, 0x53, 0xd4, 0x10, - 0x30, 0x87, 0x40, 0x32, 0x6e, 0xd2, 0x3f, 0x39, 0x30, 0x3d, 0x1d, 0xc3, 0x7c, 0x64, 0xac, 0x9b, - 0x0a, 0xe5, 0xc6, 0x5f, 0x63, 0xed, 0x17, 0xa8, 0x0a, 0x49, 0x6e, 0x0a, 0xb4, 0xb7, 0x0b, 0xa8, - 0x4f, 0x58, 0xb1, 0xfa, 0x49, 0x7e, 0x49, 0xb9, 0xd7, 0x34, 0x90, 0x6a, 0x3f, 0xc9, 0x7d, 0xe5, - 0xb1, 0x07, 0xa8, 0x0e, 0x49, 0xfe, 0x92, 0xb3, 0xd8, 0xa9, 0x6a, 0xfb, 0xfd, 0x3d, 0x76, 0x25, - 0x19, 0xb1, 0x8c, 0x07, 0xe0, 0xdd, 0x36, 0x94, 0xba, 0x59, 0xfb, 0x25, 0xc2, 0xee, 0xa3, 0x7a, - 0xce, 0xe6, 0x59, 0x0c, 0xc2, 0x39, 0xde, 0x1f, 0xe6, 0x52, 0x4b, 0x36, 0x98, 0xd5, 0x2c, 0xfc, - 0xd2, 0x6b, 0xbf, 0x41, 0xad, 0xd5, 0x71, 0xc8, 0xb2, 0x44, 0x0a, 0xa7, 0xa6, 0x59, 0x9d, 0xfd, - 0x2c, 0xad, 0xf3, 0xee, 0x1a, 0x60, 0x6b, 0x6b, 0x29, 0xfc, 0xdf, 0x50, 0x1e, 0x5e, 0x2c, 0xdd, - 0xca, 0xd5, 0xd2, 0xad, 0x5c, 0x2f, 0xdd, 0xca, 0xe7, 0xc2, 0xb5, 0x16, 0x85, 0x6b, 0x5d, 0x15, - 0xae, 0x75, 0x5d, 0xb8, 0xd6, 0x8f, 0xc2, 0xb5, 0xbe, 0xfc, 0x74, 0x2b, 0x6f, 0x1b, 0xe5, 0x7b, - 0xff, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x46, 0x15, 0xf2, 0x97, 0xa4, 0x04, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/generated.proto b/vendor/k8s.io/api/settings/v1alpha1/generated.proto deleted file mode 100644 index d5534c4..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/generated.proto +++ /dev/null @@ -1,75 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.settings.v1alpha1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -message PodPreset { - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // +optional - optional PodPresetSpec spec = 2; -} - -// PodPresetList is a list of PodPreset objects. -message PodPresetList { - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is a list of schema objects. - repeated PodPreset items = 2; -} - -// PodPresetSpec is a description of a pod preset. -message PodPresetSpec { - // Selector is a label query over a set of resources, in this case pods. - // Required. - optional k8s.io.apimachinery.pkg.apis.meta.v1.LabelSelector selector = 1; - - // Env defines the collection of EnvVar to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvVar env = 2; - - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - repeated k8s.io.api.core.v1.EnvFromSource envFrom = 3; - - // Volumes defines the collection of Volume to inject into the pod. - // +optional - repeated k8s.io.api.core.v1.Volume volumes = 4; - - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - repeated k8s.io.api.core.v1.VolumeMount volumeMounts = 5; -} - diff --git a/vendor/k8s.io/api/settings/v1alpha1/register.go b/vendor/k8s.io/api/settings/v1alpha1/register.go deleted file mode 100644 index eee278d..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/register.go +++ /dev/null @@ -1,52 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "settings.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &PodPreset{}, - &PodPresetList{}, - ) - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types.go b/vendor/k8s.io/api/settings/v1alpha1/types.go deleted file mode 100644 index 506aacf..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPreset is a policy resource that defines additional runtime -// requirements for a Pod. -type PodPreset struct { - metav1.TypeMeta `json:",inline"` - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // +optional - Spec PodPresetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"` -} - -// PodPresetSpec is a description of a pod preset. -type PodPresetSpec struct { - // Selector is a label query over a set of resources, in this case pods. - // Required. - Selector metav1.LabelSelector `json:"selector,omitempty" protobuf:"bytes,1,opt,name=selector"` - - // Env defines the collection of EnvVar to inject into containers. - // +optional - Env []v1.EnvVar `json:"env,omitempty" protobuf:"bytes,2,rep,name=env"` - // EnvFrom defines the collection of EnvFromSource to inject into containers. - // +optional - EnvFrom []v1.EnvFromSource `json:"envFrom,omitempty" protobuf:"bytes,3,rep,name=envFrom"` - // Volumes defines the collection of Volume to inject into the pod. - // +optional - Volumes []v1.Volume `json:"volumes,omitempty" protobuf:"bytes,4,rep,name=volumes"` - // VolumeMounts defines the collection of VolumeMount to inject into containers. - // +optional - VolumeMounts []v1.VolumeMount `json:"volumeMounts,omitempty" protobuf:"bytes,5,rep,name=volumeMounts"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// PodPresetList is a list of PodPreset objects. -type PodPresetList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is a list of schema objects. - Items []PodPreset `json:"items" protobuf:"bytes,2,rep,name=items"` -} diff --git a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 508c452..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,61 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PodPreset = map[string]string{ - "": "PodPreset is a policy resource that defines additional runtime requirements for a Pod.", -} - -func (PodPreset) SwaggerDoc() map[string]string { - return map_PodPreset -} - -var map_PodPresetList = map[string]string{ - "": "PodPresetList is a list of PodPreset objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is a list of schema objects.", -} - -func (PodPresetList) SwaggerDoc() map[string]string { - return map_PodPresetList -} - -var map_PodPresetSpec = map[string]string{ - "": "PodPresetSpec is a description of a pod preset.", - "selector": "Selector is a label query over a set of resources, in this case pods. Required.", - "env": "Env defines the collection of EnvVar to inject into containers.", - "envFrom": "EnvFrom defines the collection of EnvFromSource to inject into containers.", - "volumes": "Volumes defines the collection of Volume to inject into the pod.", - "volumeMounts": "VolumeMounts defines the collection of VolumeMount to inject into containers.", -} - -func (PodPresetSpec) SwaggerDoc() map[string]string { - return map_PodPresetSpec -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index 6397a88..0000000 --- a/vendor/k8s.io/api/settings/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,131 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPreset) DeepCopyInto(out *PodPreset) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPreset. -func (in *PodPreset) DeepCopy() *PodPreset { - if in == nil { - return nil - } - out := new(PodPreset) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPreset) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetList) DeepCopyInto(out *PodPresetList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]PodPreset, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetList. -func (in *PodPresetList) DeepCopy() *PodPresetList { - if in == nil { - return nil - } - out := new(PodPresetList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PodPresetList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PodPresetSpec) DeepCopyInto(out *PodPresetSpec) { - *out = *in - in.Selector.DeepCopyInto(&out.Selector) - if in.Env != nil { - in, out := &in.Env, &out.Env - *out = make([]v1.EnvVar, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.EnvFrom != nil { - in, out := &in.EnvFrom, &out.EnvFrom - *out = make([]v1.EnvFromSource, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.Volumes != nil { - in, out := &in.Volumes, &out.Volumes - *out = make([]v1.Volume, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - if in.VolumeMounts != nil { - in, out := &in.VolumeMounts, &out.VolumeMounts - *out = make([]v1.VolumeMount, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodPresetSpec. -func (in *PodPresetSpec) DeepCopy() *PodPresetSpec { - if in == nil { - return nil - } - out := new(PodPresetSpec) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/storage/v1/doc.go b/vendor/k8s.io/api/storage/v1/doc.go deleted file mode 100644 index 8f4a404..0000000 --- a/vendor/k8s.io/api/storage/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=storage.k8s.io -// +k8s:openapi-gen=true -package v1 diff --git a/vendor/k8s.io/api/storage/v1/generated.pb.go b/vendor/k8s.io/api/storage/v1/generated.pb.go deleted file mode 100644 index d43a982..0000000 --- a/vendor/k8s.io/api/storage/v1/generated.pb.go +++ /dev/null @@ -1,978 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto -// DO NOT EDIT! - -/* - Package v1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList -*/ -package v1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func init() { - proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1.StorageClassList") -} -func (m *StorageClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ - if *m.AllowVolumeExpansion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) - } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StorageClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *StorageClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AllowVolumeExpansion != nil { - n += 2 - } - if m.VolumeBindingMode != nil { - l = len(*m.VolumeBindingMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedTopologies) > 0 { - for _, e := range m.AllowedTopologies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, - `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 656 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x53, 0xcd, 0x6e, 0xd3, 0x4a, - 0x14, 0x8e, 0x93, 0x9b, 0xde, 0x74, 0xd2, 0xea, 0x26, 0xbe, 0xbd, 0x92, 0x6f, 0x16, 0x4e, 0x54, - 0x36, 0x11, 0x12, 0xe3, 0xa6, 0x14, 0x54, 0x21, 0x81, 0x54, 0xa3, 0x4a, 0x20, 0xb5, 0x6a, 0xe4, - 0x56, 0x15, 0x42, 0x2c, 0x98, 0x38, 0x07, 0x77, 0x88, 0xed, 0x31, 0x33, 0x63, 0x43, 0x76, 0xbc, - 0x00, 0x12, 0xcf, 0xc3, 0x13, 0x74, 0xd9, 0x65, 0x57, 0x16, 0x35, 0x6f, 0xd1, 0x15, 0xf2, 0x0f, - 0x8d, 0x9b, 0x04, 0xd1, 0xdd, 0xcc, 0x77, 0xbe, 0xef, 0x3b, 0x33, 0xe7, 0x07, 0x3d, 0x9b, 0xec, - 0x0a, 0x4c, 0x99, 0x31, 0x09, 0x47, 0xc0, 0x7d, 0x90, 0x20, 0x8c, 0x08, 0xfc, 0x31, 0xe3, 0x46, - 0x11, 0x20, 0x01, 0x35, 0x84, 0x64, 0x9c, 0x38, 0x60, 0x44, 0x03, 0xc3, 0x01, 0x1f, 0x38, 0x91, - 0x30, 0xc6, 0x01, 0x67, 0x92, 0xa9, 0xff, 0xe5, 0x34, 0x4c, 0x02, 0x8a, 0x0b, 0x1a, 0x8e, 0x06, - 0x9d, 0x07, 0x0e, 0x95, 0x67, 0xe1, 0x08, 0xdb, 0xcc, 0x33, 0x1c, 0xe6, 0x30, 0x23, 0x63, 0x8f, - 0xc2, 0x77, 0xd9, 0x2d, 0xbb, 0x64, 0xa7, 0xdc, 0xa5, 0xb3, 0x59, 0x4a, 0x66, 0x33, 0xbe, 0x2c, - 0x53, 0x67, 0x67, 0xc6, 0xf1, 0x88, 0x7d, 0x46, 0x7d, 0xe0, 0x53, 0x23, 0x98, 0x38, 0x29, 0x20, - 0x0c, 0x0f, 0x24, 0x59, 0xa6, 0x32, 0x7e, 0xa7, 0xe2, 0xa1, 0x2f, 0xa9, 0x07, 0x0b, 0x82, 0xc7, - 0x7f, 0x12, 0x08, 0xfb, 0x0c, 0x3c, 0x32, 0xaf, 0xdb, 0xfc, 0xb2, 0x82, 0xd6, 0x8e, 0xf3, 0x02, - 0x3c, 0x77, 0x89, 0x10, 0xea, 0x5b, 0xd4, 0x48, 0x1f, 0x35, 0x26, 0x92, 0x68, 0x4a, 0x4f, 0xe9, - 0x37, 0xb7, 0xb7, 0xf0, 0xac, 0x58, 0x37, 0xde, 0x38, 0x98, 0x38, 0x29, 0x20, 0x70, 0xca, 0xc6, - 0xd1, 0x00, 0x1f, 0x8d, 0xde, 0x83, 0x2d, 0x0f, 0x41, 0x12, 0x53, 0x3d, 0x8f, 0xbb, 0x95, 0x24, - 0xee, 0xa2, 0x19, 0x66, 0xdd, 0xb8, 0xaa, 0x8f, 0x50, 0x33, 0xe0, 0x2c, 0xa2, 0x82, 0x32, 0x1f, - 0xb8, 0x56, 0xed, 0x29, 0xfd, 0x55, 0xf3, 0xdf, 0x42, 0xd2, 0x1c, 0xce, 0x42, 0x56, 0x99, 0xa7, - 0x3a, 0x08, 0x05, 0x84, 0x13, 0x0f, 0x24, 0x70, 0xa1, 0xd5, 0x7a, 0xb5, 0x7e, 0x73, 0xfb, 0x21, - 0x5e, 0xda, 0x47, 0x5c, 0xfe, 0x11, 0x1e, 0xde, 0xa8, 0xf6, 0x7d, 0xc9, 0xa7, 0xb3, 0xd7, 0xcd, - 0x02, 0x56, 0xc9, 0x5a, 0x9d, 0xa0, 0x75, 0x0e, 0xb6, 0x4b, 0xa8, 0x37, 0x64, 0x2e, 0xb5, 0xa7, - 0xda, 0x5f, 0xd9, 0x0b, 0xf7, 0x93, 0xb8, 0xbb, 0x6e, 0x95, 0x03, 0xd7, 0x71, 0x77, 0x6b, 0x71, - 0x02, 0xf0, 0x10, 0xb8, 0xa0, 0x42, 0x82, 0x2f, 0x4f, 0x99, 0x1b, 0x7a, 0x70, 0x4b, 0x63, 0xdd, - 0xf6, 0x56, 0x77, 0xd0, 0x9a, 0xc7, 0x42, 0x5f, 0x1e, 0x05, 0x92, 0x32, 0x5f, 0x68, 0xf5, 0x5e, - 0xad, 0xbf, 0x6a, 0xb6, 0x92, 0xb8, 0xbb, 0x76, 0x58, 0xc2, 0xad, 0x5b, 0x2c, 0xf5, 0x00, 0x6d, - 0x10, 0xd7, 0x65, 0x1f, 0xf3, 0x04, 0xfb, 0x9f, 0x02, 0xe2, 0xa7, 0x55, 0xd2, 0x56, 0x7a, 0x4a, - 0xbf, 0x61, 0x6a, 0x49, 0xdc, 0xdd, 0xd8, 0x5b, 0x12, 0xb7, 0x96, 0xaa, 0xd4, 0x57, 0xa8, 0x1d, - 0x65, 0x90, 0x49, 0xfd, 0x31, 0xf5, 0x9d, 0x43, 0x36, 0x06, 0xed, 0xef, 0xec, 0xd3, 0xf7, 0x93, - 0xb8, 0xdb, 0x3e, 0x9d, 0x0f, 0x5e, 0x2f, 0x03, 0xad, 0x45, 0x13, 0xf5, 0x03, 0x6a, 0x67, 0x19, - 0x61, 0x7c, 0xc2, 0x02, 0xe6, 0x32, 0x87, 0x82, 0xd0, 0x1a, 0x59, 0xeb, 0xfa, 0xe5, 0xd6, 0xa5, - 0xa5, 0x4b, 0xfb, 0x56, 0xb0, 0xa6, 0xc7, 0xe0, 0x82, 0x2d, 0x19, 0x3f, 0x01, 0xee, 0x99, 0xff, - 0x17, 0xfd, 0x6a, 0xef, 0xcd, 0x5b, 0x59, 0x8b, 0xee, 0x9d, 0xa7, 0xe8, 0x9f, 0xb9, 0x86, 0xab, - 0x2d, 0x54, 0x9b, 0xc0, 0x34, 0x9b, 0xe6, 0x55, 0x2b, 0x3d, 0xaa, 0x1b, 0xa8, 0x1e, 0x11, 0x37, - 0x84, 0x7c, 0xf8, 0xac, 0xfc, 0xf2, 0xa4, 0xba, 0xab, 0x6c, 0x7e, 0x53, 0x50, 0xab, 0x3c, 0x3d, - 0x07, 0x54, 0x48, 0xf5, 0xcd, 0xc2, 0x4e, 0xe0, 0xbb, 0xed, 0x44, 0xaa, 0xce, 0x36, 0xa2, 0x55, - 0xfc, 0xa1, 0xf1, 0x0b, 0x29, 0xed, 0xc3, 0x0b, 0x54, 0xa7, 0x12, 0x3c, 0xa1, 0x55, 0xb3, 0xc2, - 0xdc, 0xbb, 0xc3, 0x4c, 0x9b, 0xeb, 0x85, 0x5f, 0xfd, 0x65, 0xaa, 0xb4, 0x72, 0x03, 0xb3, 0x7f, - 0x7e, 0xa5, 0x57, 0x2e, 0xae, 0xf4, 0xca, 0xe5, 0x95, 0x5e, 0xf9, 0x9c, 0xe8, 0xca, 0x79, 0xa2, - 0x2b, 0x17, 0x89, 0xae, 0x5c, 0x26, 0xba, 0xf2, 0x3d, 0xd1, 0x95, 0xaf, 0x3f, 0xf4, 0xca, 0xeb, - 0x6a, 0x34, 0xf8, 0x19, 0x00, 0x00, 0xff, 0xff, 0x0d, 0x64, 0x41, 0x83, 0x40, 0x05, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1/generated.proto b/vendor/k8s.io/api/storage/v1/generated.proto deleted file mode 100644 index d178565..0000000 --- a/vendor/k8s.io/api/storage/v1/generated.proto +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.storage.v1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1"; - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -message StorageClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Provisioner indicates the type of the provisioner. - optional string provisioner = 2; - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - map parameters = 3; - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. - // +optional - optional string reclaimPolicy = 4; - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - - // mount of the PVs will simply fail if one is invalid. - // +optional - repeated string mountOptions = 5; - - // AllowVolumeExpansion shows whether the storage class allow volume expand - // +optional - optional bool allowVolumeExpansion = 6; - - // VolumeBindingMode indicates how PersistentVolumeClaims should be - // provisioned and bound. When unset, VolumeBindingImmediate is used. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - optional string volumeBindingMode = 7; - - // Restrict the node topologies where volumes can be dynamically provisioned. - // Each volume plugin defines its own supported topology specifications. - // An empty TopologySelectorTerm list means there is no topology restriction. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; -} - -// StorageClassList is a collection of storage classes. -message StorageClassList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of StorageClasses - repeated StorageClass items = 2; -} - diff --git a/vendor/k8s.io/api/storage/v1/register.go b/vendor/k8s.io/api/storage/v1/register.go deleted file mode 100644 index c058add..0000000 --- a/vendor/k8s.io/api/storage/v1/register.go +++ /dev/null @@ -1,53 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "storage.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &StorageClass{}, - &StorageClassList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/storage/v1/types.go b/vendor/k8s.io/api/storage/v1/types.go deleted file mode 100644 index 30e6d6d..0000000 --- a/vendor/k8s.io/api/storage/v1/types.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -type StorageClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Provisioner indicates the type of the provisioner. - Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. - // +optional - ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - - // mount of the PVs will simply fail if one is invalid. - // +optional - MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - - // AllowVolumeExpansion shows whether the storage class allow volume expand - // +optional - AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - - // VolumeBindingMode indicates how PersistentVolumeClaims should be - // provisioned and bound. When unset, VolumeBindingImmediate is used. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - - // Restrict the node topologies where volumes can be dynamically provisioned. - // Each volume plugin defines its own supported topology specifications. - // An empty TopologySelectorTerm list means there is no topology restriction. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of StorageClasses - Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. -type VolumeBindingMode string - -const ( - // VolumeBindingImmediate indicates that PersistentVolumeClaims should be - // immediately provisioned and bound. This is the default mode. - VolumeBindingImmediate VolumeBindingMode = "Immediate" - - // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims - // should not be provisioned and bound until the first Pod is created that - // references the PeristentVolumeClaim. The volume provisioning and - // binding will occur during Pod scheduing. - VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" -) diff --git a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go deleted file mode 100644 index 23b76e2..0000000 --- a/vendor/k8s.io/api/storage/v1/types_swagger_doc_generated.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_StorageClass = map[string]string{ - "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", -} - -func (StorageClass) SwaggerDoc() map[string]string { - return map_StorageClass -} - -var map_StorageClassList = map[string]string{ - "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", -} - -func (StorageClassList) SwaggerDoc() map[string]string { - return map_StorageClassList -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go deleted file mode 100644 index 0e850dc..0000000 --- a/vendor/k8s.io/api/storage/v1/zz_generated.deepcopy.go +++ /dev/null @@ -1,119 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1 - -import ( - corev1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageClass) DeepCopyInto(out *StorageClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ReclaimPolicy != nil { - in, out := &in.ReclaimPolicy, &out.ReclaimPolicy - *out = new(corev1.PersistentVolumeReclaimPolicy) - **out = **in - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowVolumeExpansion != nil { - in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion - *out = new(bool) - **out = **in - } - if in.VolumeBindingMode != nil { - in, out := &in.VolumeBindingMode, &out.VolumeBindingMode - *out = new(VolumeBindingMode) - **out = **in - } - if in.AllowedTopologies != nil { - in, out := &in.AllowedTopologies, &out.AllowedTopologies - *out = make([]corev1.TopologySelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass. -func (in *StorageClass) DeepCopy() *StorageClass { - if in == nil { - return nil - } - out := new(StorageClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList. -func (in *StorageClassList) DeepCopy() *StorageClassList { - if in == nil { - return nil - } - out := new(StorageClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/doc.go b/vendor/k8s.io/api/storage/v1alpha1/doc.go deleted file mode 100644 index aa94aff..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package,register -// +groupName=storage.k8s.io -// +k8s:openapi-gen=true -package v1alpha1 // import "k8s.io/api/storage/v1alpha1" diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go b/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go deleted file mode 100644 index 507b5c1..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.pb.go +++ /dev/null @@ -1,1520 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto -// DO NOT EDIT! - -/* - Package v1alpha1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto - - It has these top-level messages: - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ -package v1alpha1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func init() { - proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachment") - proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentList") - proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSource") - proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentSpec") - proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1alpha1.VolumeAttachmentStatus") - proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1alpha1.VolumeError") -} -func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n2, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n3, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - return i, nil -} - -func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n4, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) - } - return i, nil -} - -func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n5, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil -} - -func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.AttachmentMetadata) > 0 { - keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) - for k := range m.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { - dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n6, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n7, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - } - return i, nil -} - -func (m *VolumeError) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n8, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *VolumeAttachment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachmentSource) Size() (n int) { - var l int - _ = l - if m.PersistentVolumeName != nil { - l = len(*m.PersistentVolumeName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeAttachmentSpec) Size() (n int) { - var l int - _ = l - l = len(m.Attacher) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentStatus) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.AttachmentMetadata) > 0 { - for k, v := range m.AttachmentMetadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AttachError != nil { - l = m.AttachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DetachError != nil { - l = m.DetachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeError) Size() (n int) { - var l int - _ = l - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, VolumeAttachment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.PersistentVolumeName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attacher = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Attached = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.AttachmentMetadata == nil { - m.AttachmentMetadata = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AttachError == nil { - m.AttachError = &VolumeError{} - } - if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DetachError == nil { - m.DetachError = &VolumeError{} - } - if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeError) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1alpha1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 704 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x94, 0x4d, 0x6f, 0xd3, 0x4c, - 0x10, 0xc7, 0xe3, 0x24, 0x6d, 0xd3, 0xcd, 0xf3, 0x52, 0xad, 0xa2, 0xe7, 0x89, 0x82, 0xe4, 0x54, - 0x39, 0x15, 0x44, 0xd7, 0xa4, 0x20, 0x54, 0x71, 0x8b, 0xd5, 0x1e, 0x10, 0x6d, 0x41, 0x5b, 0xc4, - 0x01, 0x38, 0xb0, 0xb1, 0xa7, 0x8e, 0x9b, 0xfa, 0x45, 0xbb, 0xeb, 0x48, 0xbd, 0x71, 0xe2, 0xcc, - 0x8d, 0x6f, 0xc0, 0x67, 0xc9, 0x8d, 0x1e, 0x7b, 0x8a, 0xa8, 0xf9, 0x16, 0x5c, 0x40, 0x5e, 0x6f, - 0x5e, 0x68, 0x52, 0x68, 0x7b, 0xf3, 0xcc, 0xce, 0xfc, 0x66, 0xe6, 0xbf, 0xb3, 0x46, 0x3b, 0xfd, - 0x6d, 0x41, 0xfc, 0xc8, 0xea, 0x27, 0x5d, 0xe0, 0x21, 0x48, 0x10, 0xd6, 0x00, 0x42, 0x37, 0xe2, - 0x96, 0x3e, 0x60, 0xb1, 0x6f, 0x09, 0x19, 0x71, 0xe6, 0x81, 0x35, 0x68, 0xb3, 0x93, 0xb8, 0xc7, - 0xda, 0x96, 0x07, 0x21, 0x70, 0x26, 0xc1, 0x25, 0x31, 0x8f, 0x64, 0x84, 0xef, 0xe4, 0xc1, 0x84, - 0xc5, 0x3e, 0xd1, 0xc1, 0x64, 0x1c, 0xdc, 0xd8, 0xf4, 0x7c, 0xd9, 0x4b, 0xba, 0xc4, 0x89, 0x02, - 0xcb, 0x8b, 0xbc, 0xc8, 0x52, 0x39, 0xdd, 0xe4, 0x48, 0x59, 0xca, 0x50, 0x5f, 0x39, 0xab, 0xf1, - 0x68, 0x5a, 0x38, 0x60, 0x4e, 0xcf, 0x0f, 0x81, 0x9f, 0x5a, 0x71, 0xdf, 0xcb, 0x1c, 0xc2, 0x0a, - 0x40, 0x32, 0x6b, 0x30, 0xd7, 0x41, 0xc3, 0xba, 0x2a, 0x8b, 0x27, 0xa1, 0xf4, 0x03, 0x98, 0x4b, - 0x78, 0xfc, 0xa7, 0x04, 0xe1, 0xf4, 0x20, 0x60, 0x97, 0xf3, 0x5a, 0x9f, 0x8b, 0x68, 0xed, 0x55, - 0x74, 0x92, 0x04, 0xd0, 0x91, 0x92, 0x39, 0xbd, 0x00, 0x42, 0x89, 0xdf, 0xa1, 0x4a, 0xd6, 0x98, - 0xcb, 0x24, 0xab, 0x1b, 0xeb, 0xc6, 0x46, 0x75, 0xeb, 0x01, 0x99, 0x4a, 0x32, 0xe1, 0x93, 0xb8, - 0xef, 0x65, 0x0e, 0x41, 0xb2, 0x68, 0x32, 0x68, 0x93, 0xe7, 0xdd, 0x63, 0x70, 0xe4, 0x3e, 0x48, - 0x66, 0xe3, 0xe1, 0xa8, 0x59, 0x48, 0x47, 0x4d, 0x34, 0xf5, 0xd1, 0x09, 0x15, 0x1f, 0xa2, 0xb2, - 0x88, 0xc1, 0xa9, 0x17, 0x15, 0xbd, 0x4d, 0x7e, 0x23, 0x38, 0xb9, 0xdc, 0xde, 0x61, 0x0c, 0x8e, - 0xfd, 0x97, 0xc6, 0x97, 0x33, 0x8b, 0x2a, 0x18, 0x7e, 0x83, 0x96, 0x85, 0x64, 0x32, 0x11, 0xf5, - 0x92, 0xc2, 0x3e, 0xbc, 0x19, 0x56, 0xa5, 0xda, 0xff, 0x68, 0xf0, 0x72, 0x6e, 0x53, 0x8d, 0x6c, - 0x0d, 0x0d, 0x54, 0xbb, 0x9c, 0xb2, 0xe7, 0x0b, 0x89, 0xdf, 0xce, 0x89, 0x45, 0xae, 0x27, 0x56, - 0x96, 0xad, 0xa4, 0x5a, 0xd3, 0x25, 0x2b, 0x63, 0xcf, 0x8c, 0x50, 0x14, 0x2d, 0xf9, 0x12, 0x02, - 0x51, 0x2f, 0xae, 0x97, 0x36, 0xaa, 0x5b, 0x9b, 0x37, 0x1a, 0xc9, 0xfe, 0x5b, 0x93, 0x97, 0x9e, - 0x66, 0x0c, 0x9a, 0xa3, 0x5a, 0x47, 0xe8, 0xbf, 0xb9, 0xe1, 0xa3, 0x84, 0x3b, 0x80, 0xf7, 0x50, - 0x2d, 0x06, 0x2e, 0x7c, 0x21, 0x21, 0x94, 0x79, 0xcc, 0x01, 0x0b, 0x40, 0xcd, 0xb5, 0x6a, 0xd7, - 0xd3, 0x51, 0xb3, 0xf6, 0x62, 0xc1, 0x39, 0x5d, 0x98, 0xd5, 0xfa, 0xb2, 0x40, 0xb2, 0xec, 0xba, - 0xf0, 0x7d, 0x54, 0x61, 0xca, 0x03, 0x5c, 0xa3, 0x27, 0x12, 0x74, 0xb4, 0x9f, 0x4e, 0x22, 0xd4, - 0xb5, 0xaa, 0xf6, 0xf4, 0xb6, 0xdc, 0xf0, 0x5a, 0x55, 0xea, 0xcc, 0xb5, 0x2a, 0x9b, 0x6a, 0x64, - 0xd6, 0x4a, 0x18, 0xb9, 0xf9, 0x94, 0xa5, 0x5f, 0x5b, 0x39, 0xd0, 0x7e, 0x3a, 0x89, 0x68, 0xfd, - 0x28, 0x2d, 0x90, 0x4e, 0xed, 0xc7, 0xcc, 0x4c, 0xae, 0x9a, 0xa9, 0x32, 0x37, 0x93, 0x3b, 0x99, - 0xc9, 0xc5, 0x9f, 0x0c, 0x84, 0xd9, 0x04, 0xb1, 0x3f, 0xde, 0x9f, 0xfc, 0x92, 0x9f, 0xdd, 0x62, - 0x6f, 0x49, 0x67, 0x8e, 0xb6, 0x1b, 0x4a, 0x7e, 0x6a, 0x37, 0x74, 0x17, 0x78, 0x3e, 0x80, 0x2e, - 0x68, 0x01, 0x1f, 0xa3, 0x6a, 0xee, 0xdd, 0xe5, 0x3c, 0xe2, 0xfa, 0x25, 0x6d, 0x5c, 0xa3, 0x23, - 0x15, 0x6f, 0x9b, 0xe9, 0xa8, 0x59, 0xed, 0x4c, 0x01, 0xdf, 0x47, 0xcd, 0xea, 0xcc, 0x39, 0x9d, - 0x85, 0x67, 0xb5, 0x5c, 0x98, 0xd6, 0x2a, 0xdf, 0xa6, 0xd6, 0x0e, 0x5c, 0x5d, 0x6b, 0x06, 0xde, - 0xd8, 0x45, 0xff, 0x5f, 0x21, 0x11, 0x5e, 0x43, 0xa5, 0x3e, 0x9c, 0xe6, 0x9b, 0x48, 0xb3, 0x4f, - 0x5c, 0x43, 0x4b, 0x03, 0x76, 0x92, 0xe4, 0x1b, 0xb7, 0x4a, 0x73, 0xe3, 0x49, 0x71, 0xdb, 0x68, - 0x7d, 0x30, 0xd0, 0x6c, 0x0d, 0xbc, 0x87, 0xca, 0xd9, 0xef, 0x55, 0xbf, 0xfc, 0x7b, 0xd7, 0x7b, - 0xf9, 0x2f, 0xfd, 0x00, 0xa6, 0x7f, 0xb0, 0xcc, 0xa2, 0x8a, 0x82, 0xef, 0xa2, 0x95, 0x00, 0x84, - 0x60, 0x9e, 0xae, 0x6c, 0xff, 0xab, 0x83, 0x56, 0xf6, 0x73, 0x37, 0x1d, 0x9f, 0xdb, 0x64, 0x78, - 0x61, 0x16, 0xce, 0x2e, 0xcc, 0xc2, 0xf9, 0x85, 0x59, 0x78, 0x9f, 0x9a, 0xc6, 0x30, 0x35, 0x8d, - 0xb3, 0xd4, 0x34, 0xce, 0x53, 0xd3, 0xf8, 0x9a, 0x9a, 0xc6, 0xc7, 0x6f, 0x66, 0xe1, 0x75, 0x65, - 0x2c, 0xdc, 0xcf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x42, 0xba, 0xdb, 0x12, 0x1a, 0x07, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/generated.proto b/vendor/k8s.io/api/storage/v1alpha1/generated.proto deleted file mode 100644 index ccb9475..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/generated.proto +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.storage.v1alpha1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1alpha1"; - -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -message VolumeAttachment { - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - optional VolumeAttachmentSpec spec = 2; - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - optional VolumeAttachmentStatus status = 3; -} - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -message VolumeAttachmentList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of VolumeAttachments - repeated VolumeAttachment items = 2; -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -message VolumeAttachmentSource { - // Name of the persistent volume to attach. - // +optional - optional string persistentVolumeName = 1; -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - optional string attacher = 1; - - // Source represents the volume that should be attached. - optional VolumeAttachmentSource source = 2; - - // The node that the volume should be attached to. - optional string nodeName = 3; -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - optional bool attached = 1; - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - map attachmentMetadata = 2; - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - optional VolumeError attachError = 3; - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - optional VolumeError detachError = 4; -} - -// VolumeError captures an error encountered during a volume operation. -message VolumeError { - // Time the error was encountered. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - - // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive - // information. - // +optional - optional string message = 2; -} - diff --git a/vendor/k8s.io/api/storage/v1alpha1/register.go b/vendor/k8s.io/api/storage/v1alpha1/register.go deleted file mode 100644 index 7b81ee4..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/register.go +++ /dev/null @@ -1,50 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "storage.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - AddToScheme = SchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &VolumeAttachment{}, - &VolumeAttachmentList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types.go b/vendor/k8s.io/api/storage/v1alpha1/types.go deleted file mode 100644 index 964bb5f..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/types.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -type VolumeAttachment struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -type VolumeAttachmentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of VolumeAttachments - Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - - // Source represents the volume that should be attached. - Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - - // The node that the volume should be attached to. - NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. - // +optional - PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - - // Placeholder for *VolumeSource to accommodate inline volumes in pods. -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` -} - -// VolumeError captures an error encountered during a volume operation. -type VolumeError struct { - // Time the error was encountered. - // +optional - Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - - // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive - // information. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} diff --git a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go deleted file mode 100644 index 32d7dcc..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/types_swagger_doc_generated.go +++ /dev/null @@ -1,93 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1alpha1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_VolumeAttachment = map[string]string{ - "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachment) SwaggerDoc() map[string]string { - return map_VolumeAttachment -} - -var map_VolumeAttachmentList = map[string]string{ - "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", -} - -func (VolumeAttachmentList) SwaggerDoc() map[string]string { - return map_VolumeAttachmentList -} - -var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", -} - -func (VolumeAttachmentSource) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSource -} - -var map_VolumeAttachmentSpec = map[string]string{ - "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", -} - -func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSpec -} - -var map_VolumeAttachmentStatus = map[string]string{ - "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { - return map_VolumeAttachmentStatus -} - -var map_VolumeError = map[string]string{ - "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", -} - -func (VolumeError) SwaggerDoc() map[string]string { - return map_VolumeError -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go deleted file mode 100644 index e27c6ff..0000000 --- a/vendor/k8s.io/api/storage/v1alpha1/zz_generated.deepcopy.go +++ /dev/null @@ -1,174 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. -func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { - if in == nil { - return nil - } - out := new(VolumeAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. -func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { - if in == nil { - return nil - } - out := new(VolumeAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { - *out = *in - if in.PersistentVolumeName != nil { - in, out := &in.PersistentVolumeName, &out.PersistentVolumeName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. -func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { - if in == nil { - return nil - } - out := new(VolumeAttachmentSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. -func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { - if in == nil { - return nil - } - out := new(VolumeAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { - *out = *in - if in.AttachmentMetadata != nil { - in, out := &in.AttachmentMetadata, &out.AttachmentMetadata - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AttachError != nil { - in, out := &in.AttachError, &out.AttachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - if in.DetachError != nil { - in, out := &in.DetachError, &out.DetachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. -func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { - if in == nil { - return nil - } - out := new(VolumeAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeError) DeepCopyInto(out *VolumeError) { - *out = *in - in.Time.DeepCopyInto(&out.Time) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. -func (in *VolumeError) DeepCopy() *VolumeError { - if in == nil { - return nil - } - out := new(VolumeError) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/api/storage/v1beta1/doc.go b/vendor/k8s.io/api/storage/v1beta1/doc.go deleted file mode 100644 index 8957a4c..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +groupName=storage.k8s.io -// +k8s:openapi-gen=true -package v1beta1 // import "k8s.io/api/storage/v1beta1" diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go b/vendor/k8s.io/api/storage/v1beta1/generated.pb.go deleted file mode 100644 index fed8c7a..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/generated.pb.go +++ /dev/null @@ -1,2261 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto - - It has these top-level messages: - StorageClass - StorageClassList - VolumeAttachment - VolumeAttachmentList - VolumeAttachmentSource - VolumeAttachmentSpec - VolumeAttachmentStatus - VolumeError -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import k8s_io_api_core_v1 "k8s.io/api/core/v1" - -import github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *StorageClass) Reset() { *m = StorageClass{} } -func (*StorageClass) ProtoMessage() {} -func (*StorageClass) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *StorageClassList) Reset() { *m = StorageClassList{} } -func (*StorageClassList) ProtoMessage() {} -func (*StorageClassList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{1} } - -func (m *VolumeAttachment) Reset() { *m = VolumeAttachment{} } -func (*VolumeAttachment) ProtoMessage() {} -func (*VolumeAttachment) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func (m *VolumeAttachmentList) Reset() { *m = VolumeAttachmentList{} } -func (*VolumeAttachmentList) ProtoMessage() {} -func (*VolumeAttachmentList) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{3} } - -func (m *VolumeAttachmentSource) Reset() { *m = VolumeAttachmentSource{} } -func (*VolumeAttachmentSource) ProtoMessage() {} -func (*VolumeAttachmentSource) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{4} } - -func (m *VolumeAttachmentSpec) Reset() { *m = VolumeAttachmentSpec{} } -func (*VolumeAttachmentSpec) ProtoMessage() {} -func (*VolumeAttachmentSpec) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{5} } - -func (m *VolumeAttachmentStatus) Reset() { *m = VolumeAttachmentStatus{} } -func (*VolumeAttachmentStatus) ProtoMessage() {} -func (*VolumeAttachmentStatus) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{6} } - -func (m *VolumeError) Reset() { *m = VolumeError{} } -func (*VolumeError) ProtoMessage() {} -func (*VolumeError) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{7} } - -func init() { - proto.RegisterType((*StorageClass)(nil), "k8s.io.api.storage.v1beta1.StorageClass") - proto.RegisterType((*StorageClassList)(nil), "k8s.io.api.storage.v1beta1.StorageClassList") - proto.RegisterType((*VolumeAttachment)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachment") - proto.RegisterType((*VolumeAttachmentList)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentList") - proto.RegisterType((*VolumeAttachmentSource)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSource") - proto.RegisterType((*VolumeAttachmentSpec)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentSpec") - proto.RegisterType((*VolumeAttachmentStatus)(nil), "k8s.io.api.storage.v1beta1.VolumeAttachmentStatus") - proto.RegisterType((*VolumeError)(nil), "k8s.io.api.storage.v1beta1.VolumeError") -} -func (m *StorageClass) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageClass) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Provisioner))) - i += copy(dAtA[i:], m.Provisioner) - if len(m.Parameters) > 0 { - keysForParameters := make([]string, 0, len(m.Parameters)) - for k := range m.Parameters { - keysForParameters = append(keysForParameters, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - for _, k := range keysForParameters { - dAtA[i] = 0x1a - i++ - v := m.Parameters[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.ReclaimPolicy != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.ReclaimPolicy))) - i += copy(dAtA[i:], *m.ReclaimPolicy) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - dAtA[i] = 0x2a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.AllowVolumeExpansion != nil { - dAtA[i] = 0x30 - i++ - if *m.AllowVolumeExpansion { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - } - if m.VolumeBindingMode != nil { - dAtA[i] = 0x3a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.VolumeBindingMode))) - i += copy(dAtA[i:], *m.VolumeBindingMode) - } - if len(m.AllowedTopologies) > 0 { - for _, msg := range m.AllowedTopologies { - dAtA[i] = 0x42 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *StorageClassList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *StorageClassList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n2, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *VolumeAttachment) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachment) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n3, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Spec.Size())) - n4, err := m.Spec.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Status.Size())) - n5, err := m.Status.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n5 - return i, nil -} - -func (m *VolumeAttachmentList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ListMeta.Size())) - n6, err := m.ListMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n6 - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *VolumeAttachmentSource) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSource) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.PersistentVolumeName != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(*m.PersistentVolumeName))) - i += copy(dAtA[i:], *m.PersistentVolumeName) - } - return i, nil -} - -func (m *VolumeAttachmentSpec) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentSpec) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Attacher))) - i += copy(dAtA[i:], m.Attacher) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Source.Size())) - n7, err := m.Source.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n7 - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.NodeName))) - i += copy(dAtA[i:], m.NodeName) - return i, nil -} - -func (m *VolumeAttachmentStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeAttachmentStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0x8 - i++ - if m.Attached { - dAtA[i] = 1 - } else { - dAtA[i] = 0 - } - i++ - if len(m.AttachmentMetadata) > 0 { - keysForAttachmentMetadata := make([]string, 0, len(m.AttachmentMetadata)) - for k := range m.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, string(k)) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - for _, k := range keysForAttachmentMetadata { - dAtA[i] = 0x12 - i++ - v := m.AttachmentMetadata[string(k)] - mapSize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - i = encodeVarintGenerated(dAtA, i, uint64(mapSize)) - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(k))) - i += copy(dAtA[i:], k) - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(v))) - i += copy(dAtA[i:], v) - } - } - if m.AttachError != nil { - dAtA[i] = 0x1a - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.AttachError.Size())) - n8, err := m.AttachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n8 - } - if m.DetachError != nil { - dAtA[i] = 0x22 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.DetachError.Size())) - n9, err := m.DetachError.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n9 - } - return i, nil -} - -func (m *VolumeError) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *VolumeError) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.Time.Size())) - n10, err := m.Time.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n10 - dAtA[i] = 0x12 - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.Message))) - i += copy(dAtA[i:], m.Message) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *StorageClass) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Provisioner) - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Parameters) > 0 { - for k, v := range m.Parameters { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.ReclaimPolicy != nil { - l = len(*m.ReclaimPolicy) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.MountOptions) > 0 { - for _, s := range m.MountOptions { - l = len(s) - n += 1 + l + sovGenerated(uint64(l)) - } - } - if m.AllowVolumeExpansion != nil { - n += 2 - } - if m.VolumeBindingMode != nil { - l = len(*m.VolumeBindingMode) - n += 1 + l + sovGenerated(uint64(l)) - } - if len(m.AllowedTopologies) > 0 { - for _, e := range m.AllowedTopologies { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *StorageClassList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachment) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Spec.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = m.Status.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentList) Size() (n int) { - var l int - _ = l - l = m.ListMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *VolumeAttachmentSource) Size() (n int) { - var l int - _ = l - if m.PersistentVolumeName != nil { - l = len(*m.PersistentVolumeName) - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeAttachmentSpec) Size() (n int) { - var l int - _ = l - l = len(m.Attacher) - n += 1 + l + sovGenerated(uint64(l)) - l = m.Source.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.NodeName) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *VolumeAttachmentStatus) Size() (n int) { - var l int - _ = l - n += 2 - if len(m.AttachmentMetadata) > 0 { - for k, v := range m.AttachmentMetadata { - _ = k - _ = v - mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + len(v) + sovGenerated(uint64(len(v))) - n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) - } - } - if m.AttachError != nil { - l = m.AttachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - if m.DetachError != nil { - l = m.DetachError.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - return n -} - -func (m *VolumeError) Size() (n int) { - var l int - _ = l - l = m.Time.Size() - n += 1 + l + sovGenerated(uint64(l)) - l = len(m.Message) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *StorageClass) String() string { - if this == nil { - return "nil" - } - keysForParameters := make([]string, 0, len(this.Parameters)) - for k := range this.Parameters { - keysForParameters = append(keysForParameters, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForParameters) - mapStringForParameters := "map[string]string{" - for _, k := range keysForParameters { - mapStringForParameters += fmt.Sprintf("%v: %v,", k, this.Parameters[k]) - } - mapStringForParameters += "}" - s := strings.Join([]string{`&StorageClass{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Provisioner:` + fmt.Sprintf("%v", this.Provisioner) + `,`, - `Parameters:` + mapStringForParameters + `,`, - `ReclaimPolicy:` + valueToStringGenerated(this.ReclaimPolicy) + `,`, - `MountOptions:` + fmt.Sprintf("%v", this.MountOptions) + `,`, - `AllowVolumeExpansion:` + valueToStringGenerated(this.AllowVolumeExpansion) + `,`, - `VolumeBindingMode:` + valueToStringGenerated(this.VolumeBindingMode) + `,`, - `AllowedTopologies:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.AllowedTopologies), "TopologySelectorTerm", "k8s_io_api_core_v1.TopologySelectorTerm", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *StorageClassList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&StorageClassList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "StorageClass", "StorageClass", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachment) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachment{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `Spec:` + strings.Replace(strings.Replace(this.Spec.String(), "VolumeAttachmentSpec", "VolumeAttachmentSpec", 1), `&`, ``, 1) + `,`, - `Status:` + strings.Replace(strings.Replace(this.Status.String(), "VolumeAttachmentStatus", "VolumeAttachmentStatus", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentList{`, - `ListMeta:` + strings.Replace(strings.Replace(this.ListMeta.String(), "ListMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ListMeta", 1), `&`, ``, 1) + `,`, - `Items:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Items), "VolumeAttachment", "VolumeAttachment", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSource) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSource{`, - `PersistentVolumeName:` + valueToStringGenerated(this.PersistentVolumeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentSpec) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeAttachmentSpec{`, - `Attacher:` + fmt.Sprintf("%v", this.Attacher) + `,`, - `Source:` + strings.Replace(strings.Replace(this.Source.String(), "VolumeAttachmentSource", "VolumeAttachmentSource", 1), `&`, ``, 1) + `,`, - `NodeName:` + fmt.Sprintf("%v", this.NodeName) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeAttachmentStatus) String() string { - if this == nil { - return "nil" - } - keysForAttachmentMetadata := make([]string, 0, len(this.AttachmentMetadata)) - for k := range this.AttachmentMetadata { - keysForAttachmentMetadata = append(keysForAttachmentMetadata, k) - } - github_com_gogo_protobuf_sortkeys.Strings(keysForAttachmentMetadata) - mapStringForAttachmentMetadata := "map[string]string{" - for _, k := range keysForAttachmentMetadata { - mapStringForAttachmentMetadata += fmt.Sprintf("%v: %v,", k, this.AttachmentMetadata[k]) - } - mapStringForAttachmentMetadata += "}" - s := strings.Join([]string{`&VolumeAttachmentStatus{`, - `Attached:` + fmt.Sprintf("%v", this.Attached) + `,`, - `AttachmentMetadata:` + mapStringForAttachmentMetadata + `,`, - `AttachError:` + strings.Replace(fmt.Sprintf("%v", this.AttachError), "VolumeError", "VolumeError", 1) + `,`, - `DetachError:` + strings.Replace(fmt.Sprintf("%v", this.DetachError), "VolumeError", "VolumeError", 1) + `,`, - `}`, - }, "") - return s -} -func (this *VolumeError) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&VolumeError{`, - `Time:` + strings.Replace(strings.Replace(this.Time.String(), "Time", "k8s_io_apimachinery_pkg_apis_meta_v1.Time", 1), `&`, ``, 1) + `,`, - `Message:` + fmt.Sprintf("%v", this.Message) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *StorageClass) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClass: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClass: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Provisioner", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Provisioner = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Parameters", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.Parameters == nil { - m.Parameters = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.Parameters[mapkey] = mapvalue - } else { - var mapvalue string - m.Parameters[mapkey] = mapvalue - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ReclaimPolicy", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := k8s_io_api_core_v1.PersistentVolumeReclaimPolicy(dAtA[iNdEx:postIndex]) - m.ReclaimPolicy = &s - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field MountOptions", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.MountOptions = append(m.MountOptions, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowVolumeExpansion", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - b := bool(v != 0) - m.AllowVolumeExpansion = &b - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field VolumeBindingMode", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := VolumeBindingMode(dAtA[iNdEx:postIndex]) - m.VolumeBindingMode = &s - iNdEx = postIndex - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AllowedTopologies", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.AllowedTopologies = append(m.AllowedTopologies, k8s_io_api_core_v1.TopologySelectorTerm{}) - if err := m.AllowedTopologies[len(m.AllowedTopologies)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *StorageClassList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: StorageClassList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: StorageClassList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, StorageClass{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachment) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachment: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachment: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Status", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Status.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, VolumeAttachment{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentSource) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSource: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSource: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PersistentVolumeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.PersistentVolumeName = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentSpec) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentSpec: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentSpec: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Attacher", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Attacher = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Source", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Source.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeName", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.NodeName = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeAttachmentStatus) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeAttachmentStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeAttachmentStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Attached", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Attached = bool(v != 0) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachmentMetadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - if m.AttachmentMetadata == nil { - m.AttachmentMetadata = make(map[string]string) - } - if iNdEx < postIndex { - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthGenerated - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - m.AttachmentMetadata[mapkey] = mapvalue - } else { - var mapvalue string - m.AttachmentMetadata[mapkey] = mapvalue - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field AttachError", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.AttachError == nil { - m.AttachError = &VolumeError{} - } - if err := m.AttachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DetachError", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.DetachError == nil { - m.DetachError = &VolumeError{} - } - if err := m.DetachError.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *VolumeError) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: VolumeError: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: VolumeError: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Time.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Message = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/api/storage/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 988 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x56, 0x4d, 0x6f, 0x1b, 0x45, - 0x18, 0xce, 0xc6, 0xf9, 0x70, 0xc6, 0x09, 0x4d, 0x86, 0x08, 0x8c, 0x0f, 0x76, 0xe4, 0x0b, 0xa6, - 0x6a, 0x77, 0x9b, 0xa8, 0xa0, 0x08, 0x89, 0x83, 0xb7, 0xe4, 0x00, 0x8a, 0xdb, 0x30, 0x89, 0x2a, - 0x54, 0x71, 0x60, 0xb2, 0xfb, 0x76, 0xb3, 0x78, 0x77, 0x67, 0x99, 0x19, 0x1b, 0x72, 0xe3, 0xc4, - 0x19, 0x71, 0xe0, 0x17, 0xf0, 0x3f, 0x38, 0x92, 0x13, 0xea, 0xb1, 0x27, 0x8b, 0x2c, 0xff, 0x22, - 0xe2, 0x80, 0x66, 0x76, 0x62, 0xaf, 0xbd, 0x0e, 0x6d, 0x7a, 0xe8, 0xcd, 0xef, 0xc7, 0xf3, 0xbc, - 0xdf, 0xb3, 0x46, 0x8f, 0xfa, 0xfb, 0xc2, 0x0e, 0x99, 0xd3, 0x1f, 0x9c, 0x02, 0x4f, 0x40, 0x82, - 0x70, 0x86, 0x90, 0xf8, 0x8c, 0x3b, 0xc6, 0x40, 0xd3, 0xd0, 0x11, 0x92, 0x71, 0x1a, 0x80, 0x33, - 0xdc, 0x3d, 0x05, 0x49, 0x77, 0x9d, 0x00, 0x12, 0xe0, 0x54, 0x82, 0x6f, 0xa7, 0x9c, 0x49, 0x86, - 0x1b, 0xb9, 0xaf, 0x4d, 0xd3, 0xd0, 0x36, 0xbe, 0xb6, 0xf1, 0x6d, 0xdc, 0x0f, 0x42, 0x79, 0x36, - 0x38, 0xb5, 0x3d, 0x16, 0x3b, 0x01, 0x0b, 0x98, 0xa3, 0x21, 0xa7, 0x83, 0xe7, 0x5a, 0xd2, 0x82, - 0xfe, 0x95, 0x53, 0x35, 0xda, 0x85, 0xb0, 0x1e, 0xe3, 0x2a, 0xe6, 0x6c, 0xb8, 0xc6, 0xc3, 0x89, - 0x4f, 0x4c, 0xbd, 0xb3, 0x30, 0x01, 0x7e, 0xee, 0xa4, 0xfd, 0x40, 0x29, 0x84, 0x13, 0x83, 0xa4, - 0xf3, 0x50, 0xce, 0x4d, 0x28, 0x3e, 0x48, 0x64, 0x18, 0x43, 0x09, 0xf0, 0xc9, 0xab, 0x00, 0xc2, - 0x3b, 0x83, 0x98, 0xce, 0xe2, 0xda, 0xbf, 0xae, 0xa0, 0xf5, 0xe3, 0xbc, 0x0b, 0x8f, 0x22, 0x2a, - 0x04, 0xfe, 0x16, 0x55, 0x55, 0x52, 0x3e, 0x95, 0xb4, 0x6e, 0xed, 0x58, 0x9d, 0xda, 0xde, 0x03, - 0x7b, 0xd2, 0xb1, 0x31, 0xb7, 0x9d, 0xf6, 0x03, 0xa5, 0x10, 0xb6, 0xf2, 0xb6, 0x87, 0xbb, 0xf6, - 0x93, 0xd3, 0xef, 0xc0, 0x93, 0x3d, 0x90, 0xd4, 0xc5, 0x17, 0xa3, 0xd6, 0x42, 0x36, 0x6a, 0xa1, - 0x89, 0x8e, 0x8c, 0x59, 0xf1, 0xc7, 0xa8, 0x96, 0x72, 0x36, 0x0c, 0x45, 0xc8, 0x12, 0xe0, 0xf5, - 0xc5, 0x1d, 0xab, 0xb3, 0xe6, 0xbe, 0x6b, 0x20, 0xb5, 0xa3, 0x89, 0x89, 0x14, 0xfd, 0x70, 0x84, - 0x50, 0x4a, 0x39, 0x8d, 0x41, 0x02, 0x17, 0xf5, 0xca, 0x4e, 0xa5, 0x53, 0xdb, 0xdb, 0xb7, 0x6f, - 0x1e, 0xa6, 0x5d, 0x2c, 0xcb, 0x3e, 0x1a, 0x43, 0x0f, 0x12, 0xc9, 0xcf, 0x27, 0x29, 0x4e, 0x0c, - 0xa4, 0xc0, 0x8f, 0xfb, 0x68, 0x83, 0x83, 0x17, 0xd1, 0x30, 0x3e, 0x62, 0x51, 0xe8, 0x9d, 0xd7, - 0x97, 0x74, 0x9a, 0x07, 0xd9, 0xa8, 0xb5, 0x41, 0x8a, 0x86, 0xab, 0x51, 0xeb, 0x41, 0x79, 0x0d, - 0xec, 0x23, 0xe0, 0x22, 0x14, 0x12, 0x12, 0xf9, 0x94, 0x45, 0x83, 0x18, 0xa6, 0x30, 0x64, 0x9a, - 0x1b, 0x3f, 0x44, 0xeb, 0x31, 0x1b, 0x24, 0xf2, 0x49, 0x2a, 0x43, 0x96, 0x88, 0xfa, 0xf2, 0x4e, - 0xa5, 0xb3, 0xe6, 0x6e, 0x66, 0xa3, 0xd6, 0x7a, 0xaf, 0xa0, 0x27, 0x53, 0x5e, 0xf8, 0x10, 0x6d, - 0xd3, 0x28, 0x62, 0x3f, 0xe4, 0x01, 0x0e, 0x7e, 0x4c, 0x69, 0xa2, 0x5a, 0x55, 0x5f, 0xd9, 0xb1, - 0x3a, 0x55, 0xb7, 0x9e, 0x8d, 0x5a, 0xdb, 0xdd, 0x39, 0x76, 0x32, 0x17, 0x85, 0xbf, 0x46, 0x5b, - 0x43, 0xad, 0x72, 0xc3, 0xc4, 0x0f, 0x93, 0xa0, 0xc7, 0x7c, 0xa8, 0xaf, 0xea, 0xa2, 0xef, 0x66, - 0xa3, 0xd6, 0xd6, 0xd3, 0x59, 0xe3, 0xd5, 0x3c, 0x25, 0x29, 0x93, 0xe0, 0xef, 0xd1, 0x96, 0x8e, - 0x08, 0xfe, 0x09, 0x4b, 0x59, 0xc4, 0x82, 0x10, 0x44, 0xbd, 0xaa, 0xe7, 0xd7, 0x29, 0xce, 0x4f, - 0xb5, 0x4e, 0x2d, 0x92, 0xf1, 0x3a, 0x3f, 0x86, 0x08, 0x3c, 0xc9, 0xf8, 0x09, 0xf0, 0xd8, 0xfd, - 0xc0, 0xcc, 0x6b, 0xab, 0x3b, 0x4b, 0x45, 0xca, 0xec, 0x8d, 0xcf, 0xd0, 0x9d, 0x99, 0x81, 0xe3, - 0x4d, 0x54, 0xe9, 0xc3, 0xb9, 0x5e, 0xe9, 0x35, 0xa2, 0x7e, 0xe2, 0x6d, 0xb4, 0x3c, 0xa4, 0xd1, - 0x00, 0xf2, 0x0d, 0x24, 0xb9, 0xf0, 0xe9, 0xe2, 0xbe, 0xd5, 0xfe, 0xc3, 0x42, 0x9b, 0xc5, 0xed, - 0x39, 0x0c, 0x85, 0xc4, 0xdf, 0x94, 0x0e, 0xc3, 0x7e, 0xbd, 0xc3, 0x50, 0x68, 0x7d, 0x16, 0x9b, - 0xa6, 0x86, 0xea, 0xb5, 0xa6, 0x70, 0x14, 0x3d, 0xb4, 0x1c, 0x4a, 0x88, 0x45, 0x7d, 0xb1, 0xdc, - 0x98, 0xff, 0x5b, 0x6c, 0x77, 0xc3, 0x90, 0x2e, 0x7f, 0xa1, 0xe0, 0x24, 0x67, 0x69, 0xff, 0xbe, - 0x88, 0x36, 0xf3, 0xe1, 0x74, 0xa5, 0xa4, 0xde, 0x59, 0x0c, 0x89, 0x7c, 0x0b, 0xa7, 0x4d, 0xd0, - 0x92, 0x48, 0xc1, 0xd3, 0x1d, 0x9d, 0x66, 0x2f, 0x15, 0x31, 0x9b, 0xdd, 0x71, 0x0a, 0x9e, 0xbb, - 0x6e, 0xd8, 0x97, 0x94, 0x44, 0x34, 0x17, 0x7e, 0x86, 0x56, 0x84, 0xa4, 0x72, 0xa0, 0x6e, 0x5e, - 0xb1, 0xee, 0xdd, 0x8a, 0x55, 0x23, 0xdd, 0x77, 0x0c, 0xef, 0x4a, 0x2e, 0x13, 0xc3, 0xd8, 0xfe, - 0xd3, 0x42, 0xdb, 0xb3, 0x90, 0xb7, 0x30, 0xec, 0xaf, 0xa6, 0x87, 0x7d, 0xef, 0x36, 0x15, 0xdd, - 0x30, 0xf0, 0xe7, 0xe8, 0xbd, 0x52, 0xed, 0x6c, 0xc0, 0x3d, 0x50, 0xcf, 0x44, 0x3a, 0xf3, 0x18, - 0x3d, 0xa6, 0x31, 0xe4, 0x97, 0x90, 0x3f, 0x13, 0x47, 0x73, 0xec, 0x64, 0x2e, 0xaa, 0xfd, 0xd7, - 0x9c, 0x8e, 0xa9, 0x61, 0xe1, 0x7b, 0xa8, 0x4a, 0xb5, 0x06, 0xb8, 0xa1, 0x1e, 0x77, 0xa0, 0x6b, - 0xf4, 0x64, 0xec, 0xa1, 0x87, 0xaa, 0xd3, 0x33, 0xab, 0x72, 0xbb, 0xa1, 0x6a, 0x64, 0x61, 0xa8, - 0x5a, 0x26, 0x86, 0x51, 0x65, 0x92, 0x30, 0x3f, 0x2f, 0xb2, 0x32, 0x9d, 0xc9, 0x63, 0xa3, 0x27, - 0x63, 0x8f, 0xf6, 0xbf, 0x95, 0x39, 0x9d, 0xd3, 0xdb, 0x51, 0x28, 0xc9, 0xd7, 0x25, 0x55, 0x4b, - 0x25, 0xf9, 0xe3, 0x92, 0x7c, 0xfc, 0x9b, 0x85, 0x30, 0x1d, 0x53, 0xf4, 0xae, 0xb7, 0x27, 0x1f, - 0xf1, 0x97, 0xb7, 0x5f, 0x5a, 0xbb, 0x5b, 0x22, 0xcb, 0x3f, 0x5d, 0x0d, 0x93, 0x04, 0x2e, 0x3b, - 0x90, 0x39, 0x19, 0xe0, 0x10, 0xd5, 0x72, 0xed, 0x01, 0xe7, 0x8c, 0x9b, 0x2b, 0xfa, 0xf0, 0xd5, - 0x09, 0x69, 0x77, 0xb7, 0xa9, 0x3e, 0xca, 0xdd, 0x09, 0xfe, 0x6a, 0xd4, 0xaa, 0x15, 0xec, 0xa4, - 0xc8, 0xad, 0x42, 0xf9, 0x30, 0x09, 0xb5, 0xf4, 0x06, 0xa1, 0x3e, 0x87, 0x9b, 0x43, 0x15, 0xb8, - 0x1b, 0x07, 0xe8, 0xfd, 0x1b, 0x1a, 0x74, 0xab, 0xa7, 0xfe, 0x67, 0x0b, 0x15, 0x63, 0xe0, 0x43, - 0xb4, 0xa4, 0xfe, 0x2e, 0x99, 0xa3, 0xbf, 0xfb, 0x7a, 0x47, 0x7f, 0x12, 0xc6, 0x30, 0x79, 0xbb, - 0x94, 0x44, 0x34, 0x0b, 0xfe, 0x08, 0xad, 0xc6, 0x20, 0x04, 0x0d, 0x4c, 0x64, 0xf7, 0x8e, 0x71, - 0x5a, 0xed, 0xe5, 0x6a, 0x72, 0x6d, 0x77, 0xef, 0x5f, 0x5c, 0x36, 0x17, 0x5e, 0x5c, 0x36, 0x17, - 0x5e, 0x5e, 0x36, 0x17, 0x7e, 0xca, 0x9a, 0xd6, 0x45, 0xd6, 0xb4, 0x5e, 0x64, 0x4d, 0xeb, 0x65, - 0xd6, 0xb4, 0xfe, 0xce, 0x9a, 0xd6, 0x2f, 0xff, 0x34, 0x17, 0x9e, 0xad, 0x9a, 0xbe, 0xfd, 0x17, - 0x00, 0x00, 0xff, 0xff, 0xb4, 0x63, 0x7e, 0xa7, 0x0b, 0x0b, 0x00, 0x00, -} diff --git a/vendor/k8s.io/api/storage/v1beta1/generated.proto b/vendor/k8s.io/api/storage/v1beta1/generated.proto deleted file mode 100644 index ecf53be..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/generated.proto +++ /dev/null @@ -1,186 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.api.storage.v1beta1; - -import "k8s.io/api/core/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -message StorageClass { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Provisioner indicates the type of the provisioner. - optional string provisioner = 2; - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - map parameters = 3; - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. - // +optional - optional string reclaimPolicy = 4; - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - - // mount of the PVs will simply fail if one is invalid. - // +optional - repeated string mountOptions = 5; - - // AllowVolumeExpansion shows whether the storage class allow volume expand - // +optional - optional bool allowVolumeExpansion = 6; - - // VolumeBindingMode indicates how PersistentVolumeClaims should be - // provisioned and bound. When unset, VolumeBindingImmediate is used. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - optional string volumeBindingMode = 7; - - // Restrict the node topologies where volumes can be dynamically provisioned. - // Each volume plugin defines its own supported topology specifications. - // An empty TopologySelectorTerm list means there is no topology restriction. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - repeated k8s.io.api.core.v1.TopologySelectorTerm allowedTopologies = 8; -} - -// StorageClassList is a collection of storage classes. -message StorageClassList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of StorageClasses - repeated StorageClass items = 2; -} - -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -message VolumeAttachment { - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - optional VolumeAttachmentSpec spec = 2; - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - optional VolumeAttachmentStatus status = 3; -} - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -message VolumeAttachmentList { - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; - - // Items is the list of VolumeAttachments - repeated VolumeAttachment items = 2; -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -message VolumeAttachmentSource { - // Name of the persistent volume to attach. - // +optional - optional string persistentVolumeName = 1; -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -message VolumeAttachmentSpec { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - optional string attacher = 1; - - // Source represents the volume that should be attached. - optional VolumeAttachmentSource source = 2; - - // The node that the volume should be attached to. - optional string nodeName = 3; -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -message VolumeAttachmentStatus { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - optional bool attached = 1; - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - map attachmentMetadata = 2; - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - optional VolumeError attachError = 3; - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - optional VolumeError detachError = 4; -} - -// VolumeError captures an error encountered during a volume operation. -message VolumeError { - // Time the error was encountered. - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.Time time = 1; - - // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive - // information. - // +optional - optional string message = 2; -} - diff --git a/vendor/k8s.io/api/storage/v1beta1/register.go b/vendor/k8s.io/api/storage/v1beta1/register.go deleted file mode 100644 index 06b0f3d..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/register.go +++ /dev/null @@ -1,56 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name use in this package -const GroupName = "storage.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Resource takes an unqualified resource and returns a Group qualified GroupResource -func Resource(resource string) schema.GroupResource { - return SchemeGroupVersion.WithResource(resource).GroupResource() -} - -var ( - // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. - // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. - SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) - localSchemeBuilder = &SchemeBuilder - AddToScheme = localSchemeBuilder.AddToScheme -) - -// Adds the list of known types to the given scheme. -func addKnownTypes(scheme *runtime.Scheme) error { - scheme.AddKnownTypes(SchemeGroupVersion, - &StorageClass{}, - &StorageClassList{}, - - &VolumeAttachment{}, - &VolumeAttachmentList{}, - ) - - metav1.AddToGroupVersion(scheme, SchemeGroupVersion) - return nil -} diff --git a/vendor/k8s.io/api/storage/v1beta1/types.go b/vendor/k8s.io/api/storage/v1beta1/types.go deleted file mode 100644 index 5702c21..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/types.go +++ /dev/null @@ -1,211 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StorageClass describes the parameters for a class of storage for -// which PersistentVolumes can be dynamically provisioned. -// -// StorageClasses are non-namespaced; the name of the storage class -// according to etcd is in ObjectMeta.Name. -type StorageClass struct { - metav1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Provisioner indicates the type of the provisioner. - Provisioner string `json:"provisioner" protobuf:"bytes,2,opt,name=provisioner"` - - // Parameters holds the parameters for the provisioner that should - // create volumes of this storage class. - // +optional - Parameters map[string]string `json:"parameters,omitempty" protobuf:"bytes,3,rep,name=parameters"` - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with this reclaimPolicy. Defaults to Delete. - // +optional - ReclaimPolicy *v1.PersistentVolumeReclaimPolicy `json:"reclaimPolicy,omitempty" protobuf:"bytes,4,opt,name=reclaimPolicy,casttype=k8s.io/api/core/v1.PersistentVolumeReclaimPolicy"` - - // Dynamically provisioned PersistentVolumes of this storage class are - // created with these mountOptions, e.g. ["ro", "soft"]. Not validated - - // mount of the PVs will simply fail if one is invalid. - // +optional - MountOptions []string `json:"mountOptions,omitempty" protobuf:"bytes,5,opt,name=mountOptions"` - - // AllowVolumeExpansion shows whether the storage class allow volume expand - // +optional - AllowVolumeExpansion *bool `json:"allowVolumeExpansion,omitempty" protobuf:"varint,6,opt,name=allowVolumeExpansion"` - - // VolumeBindingMode indicates how PersistentVolumeClaims should be - // provisioned and bound. When unset, VolumeBindingImmediate is used. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - VolumeBindingMode *VolumeBindingMode `json:"volumeBindingMode,omitempty" protobuf:"bytes,7,opt,name=volumeBindingMode"` - - // Restrict the node topologies where volumes can be dynamically provisioned. - // Each volume plugin defines its own supported topology specifications. - // An empty TopologySelectorTerm list means there is no topology restriction. - // This field is only honored by servers that enable the VolumeScheduling feature. - // +optional - AllowedTopologies []v1.TopologySelectorTerm `json:"allowedTopologies,omitempty" protobuf:"bytes,8,rep,name=allowedTopologies"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// StorageClassList is a collection of storage classes. -type StorageClassList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of StorageClasses - Items []StorageClass `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeBindingMode indicates how PersistentVolumeClaims should be bound. -type VolumeBindingMode string - -const ( - // VolumeBindingImmediate indicates that PersistentVolumeClaims should be - // immediately provisioned and bound. This is the default mode. - VolumeBindingImmediate VolumeBindingMode = "Immediate" - - // VolumeBindingWaitForFirstConsumer indicates that PersistentVolumeClaims - // should not be provisioned and bound until the first Pod is created that - // references the PeristentVolumeClaim. The volume provisioning and - // binding will occur during Pod scheduing. - VolumeBindingWaitForFirstConsumer VolumeBindingMode = "WaitForFirstConsumer" -) - -// +genclient -// +genclient:nonNamespaced -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachment captures the intent to attach or detach the specified volume -// to/from the specified node. -// -// VolumeAttachment objects are non-namespaced. -type VolumeAttachment struct { - metav1.TypeMeta `json:",inline"` - - // Standard object metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Specification of the desired attach/detach volume behavior. - // Populated by the Kubernetes system. - Spec VolumeAttachmentSpec `json:"spec" protobuf:"bytes,2,opt,name=spec"` - - // Status of the VolumeAttachment request. - // Populated by the entity completing the attach or detach - // operation, i.e. the external-attacher. - // +optional - Status VolumeAttachmentStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"` -} - -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object - -// VolumeAttachmentList is a collection of VolumeAttachment objects. -type VolumeAttachmentList struct { - metav1.TypeMeta `json:",inline"` - // Standard list metadata - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` - - // Items is the list of VolumeAttachments - Items []VolumeAttachment `json:"items" protobuf:"bytes,2,rep,name=items"` -} - -// VolumeAttachmentSpec is the specification of a VolumeAttachment request. -type VolumeAttachmentSpec struct { - // Attacher indicates the name of the volume driver that MUST handle this - // request. This is the name returned by GetPluginName(). - Attacher string `json:"attacher" protobuf:"bytes,1,opt,name=attacher"` - - // Source represents the volume that should be attached. - Source VolumeAttachmentSource `json:"source" protobuf:"bytes,2,opt,name=source"` - - // The node that the volume should be attached to. - NodeName string `json:"nodeName" protobuf:"bytes,3,opt,name=nodeName"` -} - -// VolumeAttachmentSource represents a volume that should be attached. -// Right now only PersistenVolumes can be attached via external attacher, -// in future we may allow also inline volumes in pods. -// Exactly one member can be set. -type VolumeAttachmentSource struct { - // Name of the persistent volume to attach. - // +optional - PersistentVolumeName *string `json:"persistentVolumeName,omitempty" protobuf:"bytes,1,opt,name=persistentVolumeName"` - - // Placeholder for *VolumeSource to accommodate inline volumes in pods. -} - -// VolumeAttachmentStatus is the status of a VolumeAttachment request. -type VolumeAttachmentStatus struct { - // Indicates the volume is successfully attached. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - Attached bool `json:"attached" protobuf:"varint,1,opt,name=attached"` - - // Upon successful attach, this field is populated with any - // information returned by the attach operation that must be passed - // into subsequent WaitForAttach or Mount calls. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachmentMetadata map[string]string `json:"attachmentMetadata,omitempty" protobuf:"bytes,2,rep,name=attachmentMetadata"` - - // The last error encountered during attach operation, if any. - // This field must only be set by the entity completing the attach - // operation, i.e. the external-attacher. - // +optional - AttachError *VolumeError `json:"attachError,omitempty" protobuf:"bytes,3,opt,name=attachError,casttype=VolumeError"` - - // The last error encountered during detach operation, if any. - // This field must only be set by the entity completing the detach - // operation, i.e. the external-attacher. - // +optional - DetachError *VolumeError `json:"detachError,omitempty" protobuf:"bytes,4,opt,name=detachError,casttype=VolumeError"` -} - -// VolumeError captures an error encountered during a volume operation. -type VolumeError struct { - // Time the error was encountered. - // +optional - Time metav1.Time `json:"time,omitempty" protobuf:"bytes,1,opt,name=time"` - - // String detailing the error encountered during Attach or Detach operation. - // This string maybe logged, so it should not contain sensitive - // information. - // +optional - Message string `json:"message,omitempty" protobuf:"bytes,2,opt,name=message"` -} diff --git a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 044d69f..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,119 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_StorageClass = map[string]string{ - "": "StorageClass describes the parameters for a class of storage for which PersistentVolumes can be dynamically provisioned.\n\nStorageClasses are non-namespaced; the name of the storage class according to etcd is in ObjectMeta.Name.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "provisioner": "Provisioner indicates the type of the provisioner.", - "parameters": "Parameters holds the parameters for the provisioner that should create volumes of this storage class.", - "reclaimPolicy": "Dynamically provisioned PersistentVolumes of this storage class are created with this reclaimPolicy. Defaults to Delete.", - "mountOptions": "Dynamically provisioned PersistentVolumes of this storage class are created with these mountOptions, e.g. [\"ro\", \"soft\"]. Not validated - mount of the PVs will simply fail if one is invalid.", - "allowVolumeExpansion": "AllowVolumeExpansion shows whether the storage class allow volume expand", - "volumeBindingMode": "VolumeBindingMode indicates how PersistentVolumeClaims should be provisioned and bound. When unset, VolumeBindingImmediate is used. This field is only honored by servers that enable the VolumeScheduling feature.", - "allowedTopologies": "Restrict the node topologies where volumes can be dynamically provisioned. Each volume plugin defines its own supported topology specifications. An empty TopologySelectorTerm list means there is no topology restriction. This field is only honored by servers that enable the VolumeScheduling feature.", -} - -func (StorageClass) SwaggerDoc() map[string]string { - return map_StorageClass -} - -var map_StorageClassList = map[string]string{ - "": "StorageClassList is a collection of storage classes.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of StorageClasses", -} - -func (StorageClassList) SwaggerDoc() map[string]string { - return map_StorageClassList -} - -var map_VolumeAttachment = map[string]string{ - "": "VolumeAttachment captures the intent to attach or detach the specified volume to/from the specified node.\n\nVolumeAttachment objects are non-namespaced.", - "metadata": "Standard object metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "spec": "Specification of the desired attach/detach volume behavior. Populated by the Kubernetes system.", - "status": "Status of the VolumeAttachment request. Populated by the entity completing the attach or detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachment) SwaggerDoc() map[string]string { - return map_VolumeAttachment -} - -var map_VolumeAttachmentList = map[string]string{ - "": "VolumeAttachmentList is a collection of VolumeAttachment objects.", - "metadata": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", - "items": "Items is the list of VolumeAttachments", -} - -func (VolumeAttachmentList) SwaggerDoc() map[string]string { - return map_VolumeAttachmentList -} - -var map_VolumeAttachmentSource = map[string]string{ - "": "VolumeAttachmentSource represents a volume that should be attached. Right now only PersistenVolumes can be attached via external attacher, in future we may allow also inline volumes in pods. Exactly one member can be set.", - "persistentVolumeName": "Name of the persistent volume to attach.", -} - -func (VolumeAttachmentSource) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSource -} - -var map_VolumeAttachmentSpec = map[string]string{ - "": "VolumeAttachmentSpec is the specification of a VolumeAttachment request.", - "attacher": "Attacher indicates the name of the volume driver that MUST handle this request. This is the name returned by GetPluginName().", - "source": "Source represents the volume that should be attached.", - "nodeName": "The node that the volume should be attached to.", -} - -func (VolumeAttachmentSpec) SwaggerDoc() map[string]string { - return map_VolumeAttachmentSpec -} - -var map_VolumeAttachmentStatus = map[string]string{ - "": "VolumeAttachmentStatus is the status of a VolumeAttachment request.", - "attached": "Indicates the volume is successfully attached. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachmentMetadata": "Upon successful attach, this field is populated with any information returned by the attach operation that must be passed into subsequent WaitForAttach or Mount calls. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "attachError": "The last error encountered during attach operation, if any. This field must only be set by the entity completing the attach operation, i.e. the external-attacher.", - "detachError": "The last error encountered during detach operation, if any. This field must only be set by the entity completing the detach operation, i.e. the external-attacher.", -} - -func (VolumeAttachmentStatus) SwaggerDoc() map[string]string { - return map_VolumeAttachmentStatus -} - -var map_VolumeError = map[string]string{ - "": "VolumeError captures an error encountered during a volume operation.", - "time": "Time the error was encountered.", - "message": "String detailing the error encountered during Attach or Detach operation. This string maybe logged, so it should not contain sensitive information.", -} - -func (VolumeError) SwaggerDoc() map[string]string { - return map_VolumeError -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index 8096dba..0000000 --- a/vendor/k8s.io/api/storage/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,268 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1 "k8s.io/api/core/v1" - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageClass) DeepCopyInto(out *StorageClass) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - if in.Parameters != nil { - in, out := &in.Parameters, &out.Parameters - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.ReclaimPolicy != nil { - in, out := &in.ReclaimPolicy, &out.ReclaimPolicy - *out = new(v1.PersistentVolumeReclaimPolicy) - **out = **in - } - if in.MountOptions != nil { - in, out := &in.MountOptions, &out.MountOptions - *out = make([]string, len(*in)) - copy(*out, *in) - } - if in.AllowVolumeExpansion != nil { - in, out := &in.AllowVolumeExpansion, &out.AllowVolumeExpansion - *out = new(bool) - **out = **in - } - if in.VolumeBindingMode != nil { - in, out := &in.VolumeBindingMode, &out.VolumeBindingMode - *out = new(VolumeBindingMode) - **out = **in - } - if in.AllowedTopologies != nil { - in, out := &in.AllowedTopologies, &out.AllowedTopologies - *out = make([]v1.TopologySelectorTerm, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClass. -func (in *StorageClass) DeepCopy() *StorageClass { - if in == nil { - return nil - } - out := new(StorageClass) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageClass) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *StorageClassList) DeepCopyInto(out *StorageClassList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]StorageClass, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new StorageClassList. -func (in *StorageClassList) DeepCopy() *StorageClassList { - if in == nil { - return nil - } - out := new(StorageClassList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *StorageClassList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachment) DeepCopyInto(out *VolumeAttachment) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - in.Spec.DeepCopyInto(&out.Spec) - in.Status.DeepCopyInto(&out.Status) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachment. -func (in *VolumeAttachment) DeepCopy() *VolumeAttachment { - if in == nil { - return nil - } - out := new(VolumeAttachment) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachment) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentList) DeepCopyInto(out *VolumeAttachmentList) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]VolumeAttachment, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentList. -func (in *VolumeAttachmentList) DeepCopy() *VolumeAttachmentList { - if in == nil { - return nil - } - out := new(VolumeAttachmentList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *VolumeAttachmentList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSource) DeepCopyInto(out *VolumeAttachmentSource) { - *out = *in - if in.PersistentVolumeName != nil { - in, out := &in.PersistentVolumeName, &out.PersistentVolumeName - *out = new(string) - **out = **in - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSource. -func (in *VolumeAttachmentSource) DeepCopy() *VolumeAttachmentSource { - if in == nil { - return nil - } - out := new(VolumeAttachmentSource) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentSpec) DeepCopyInto(out *VolumeAttachmentSpec) { - *out = *in - in.Source.DeepCopyInto(&out.Source) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentSpec. -func (in *VolumeAttachmentSpec) DeepCopy() *VolumeAttachmentSpec { - if in == nil { - return nil - } - out := new(VolumeAttachmentSpec) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeAttachmentStatus) DeepCopyInto(out *VolumeAttachmentStatus) { - *out = *in - if in.AttachmentMetadata != nil { - in, out := &in.AttachmentMetadata, &out.AttachmentMetadata - *out = make(map[string]string, len(*in)) - for key, val := range *in { - (*out)[key] = val - } - } - if in.AttachError != nil { - in, out := &in.AttachError, &out.AttachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - if in.DetachError != nil { - in, out := &in.DetachError, &out.DetachError - *out = new(VolumeError) - (*in).DeepCopyInto(*out) - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeAttachmentStatus. -func (in *VolumeAttachmentStatus) DeepCopy() *VolumeAttachmentStatus { - if in == nil { - return nil - } - out := new(VolumeAttachmentStatus) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *VolumeError) DeepCopyInto(out *VolumeError) { - *out = *in - in.Time.DeepCopyInto(&out.Time) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new VolumeError. -func (in *VolumeError) DeepCopy() *VolumeError { - if in == nil { - return nil - } - out := new(VolumeError) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS b/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS deleted file mode 100644 index 5f729ff..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/OWNERS +++ /dev/null @@ -1,25 +0,0 @@ -reviewers: -- thockin -- smarterclayton -- wojtek-t -- deads2k -- brendandburns -- derekwaynecarr -- caesarxuchao -- mikedanese -- liggitt -- nikhiljindal -- gmarek -- janetkuo -- ncdc -- eparis -- dims -- krousey -- markturansky -- fabioy -- resouer -- david-mcmahon -- mfojtik -- jianhuiz -- feihujiang -- ghodss diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go b/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go deleted file mode 100644 index b6d42ac..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package meta provides functions for retrieving API metadata from objects -// belonging to the Kubernetes API -package meta // import "k8s.io/apimachinery/pkg/api/meta" diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go b/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go deleted file mode 100644 index cbf5d02..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/errors.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" -) - -// AmbiguousResourceError is returned if the RESTMapper finds multiple matches for a resource -type AmbiguousResourceError struct { - PartialResource schema.GroupVersionResource - - MatchingResources []schema.GroupVersionResource - MatchingKinds []schema.GroupVersionKind -} - -func (e *AmbiguousResourceError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialResource, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialResource, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialResource, e.MatchingResources) - } - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialResource) -} - -// AmbiguousKindError is returned if the RESTMapper finds multiple matches for a kind -type AmbiguousKindError struct { - PartialKind schema.GroupVersionKind - - MatchingResources []schema.GroupVersionResource - MatchingKinds []schema.GroupVersionKind -} - -func (e *AmbiguousKindError) Error() string { - switch { - case len(e.MatchingKinds) > 0 && len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v and kinds %v", e.PartialKind, e.MatchingResources, e.MatchingKinds) - case len(e.MatchingKinds) > 0: - return fmt.Sprintf("%v matches multiple kinds %v", e.PartialKind, e.MatchingKinds) - case len(e.MatchingResources) > 0: - return fmt.Sprintf("%v matches multiple resources %v", e.PartialKind, e.MatchingResources) - } - return fmt.Sprintf("%v matches multiple resources or kinds", e.PartialKind) -} - -func IsAmbiguousError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case *AmbiguousResourceError, *AmbiguousKindError: - return true - default: - return false - } -} - -// NoResourceMatchError is returned if the RESTMapper can't find any match for a resource -type NoResourceMatchError struct { - PartialResource schema.GroupVersionResource -} - -func (e *NoResourceMatchError) Error() string { - return fmt.Sprintf("no matches for %v", e.PartialResource) -} - -// NoKindMatchError is returned if the RESTMapper can't find any match for a kind -type NoKindMatchError struct { - // GroupKind is the API group and kind that was searched - GroupKind schema.GroupKind - // SearchedVersions is the optional list of versions the search was restricted to - SearchedVersions []string -} - -func (e *NoKindMatchError) Error() string { - searchedVersions := sets.NewString() - for _, v := range e.SearchedVersions { - searchedVersions.Insert(schema.GroupVersion{Group: e.GroupKind.Group, Version: v}.String()) - } - - switch len(searchedVersions) { - case 0: - return fmt.Sprintf("no matches for kind %q in group %q", e.GroupKind.Kind, e.GroupKind.Group) - case 1: - return fmt.Sprintf("no matches for kind %q in version %q", e.GroupKind.Kind, searchedVersions.List()[0]) - default: - return fmt.Sprintf("no matches for kind %q in versions %q", e.GroupKind.Kind, searchedVersions.List()) - } -} - -func IsNoMatchError(err error) bool { - if err == nil { - return false - } - switch err.(type) { - case *NoResourceMatchError, *NoKindMatchError: - return true - default: - return false - } -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go deleted file mode 100644 index fd22100..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/firsthit_restmapper.go +++ /dev/null @@ -1,97 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// FirstHitRESTMapper is a wrapper for multiple RESTMappers which returns the -// first successful result for the singular requests -type FirstHitRESTMapper struct { - MultiRESTMapper -} - -func (m FirstHitRESTMapper) String() string { - return fmt.Sprintf("FirstHitRESTMapper{\n\t%v\n}", m.MultiRESTMapper) -} - -func (m FirstHitRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - errors := []error{} - for _, t := range m.MultiRESTMapper { - ret, err := t.ResourceFor(resource) - if err == nil { - return ret, nil - } - errors = append(errors, err) - } - - return schema.GroupVersionResource{}, collapseAggregateErrors(errors) -} - -func (m FirstHitRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - errors := []error{} - for _, t := range m.MultiRESTMapper { - ret, err := t.KindFor(resource) - if err == nil { - return ret, nil - } - errors = append(errors, err) - } - - return schema.GroupVersionKind{}, collapseAggregateErrors(errors) -} - -// RESTMapping provides the REST mapping for the resource based on the -// kind and version. This implementation supports multiple REST schemas and -// return the first match. -func (m FirstHitRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - errors := []error{} - for _, t := range m.MultiRESTMapper { - ret, err := t.RESTMapping(gk, versions...) - if err == nil { - return ret, nil - } - errors = append(errors, err) - } - - return nil, collapseAggregateErrors(errors) -} - -// collapseAggregateErrors returns the minimal errors. it handles empty as nil, handles one item in a list -// by returning the item, and collapses all NoMatchErrors to a single one (since they should all be the same) -func collapseAggregateErrors(errors []error) error { - if len(errors) == 0 { - return nil - } - if len(errors) == 1 { - return errors[0] - } - - allNoMatchErrors := true - for _, err := range errors { - allNoMatchErrors = allNoMatchErrors && IsNoMatchError(err) - } - if allNoMatchErrors { - return errors[0] - } - - return utilerrors.NewAggregate(errors) -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go b/vendor/k8s.io/apimachinery/pkg/api/meta/help.go deleted file mode 100644 index c70b3d2..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/help.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "reflect" - - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" -) - -// IsListType returns true if the provided Object has a slice called Items -func IsListType(obj runtime.Object) bool { - // if we're a runtime.Unstructured, check whether this is a list. - // TODO: refactor GetItemsPtr to use an interface that returns []runtime.Object - if unstructured, ok := obj.(runtime.Unstructured); ok { - return unstructured.IsList() - } - - _, err := GetItemsPtr(obj) - return err == nil -} - -// GetItemsPtr returns a pointer to the list object's Items member. -// If 'list' doesn't have an Items member, it's not really a list type -// and an error will be returned. -// This function will either return a pointer to a slice, or an error, but not both. -func GetItemsPtr(list runtime.Object) (interface{}, error) { - v, err := conversion.EnforcePtr(list) - if err != nil { - return nil, err - } - - items := v.FieldByName("Items") - if !items.IsValid() { - return nil, fmt.Errorf("no Items field in %#v", list) - } - switch items.Kind() { - case reflect.Interface, reflect.Ptr: - target := reflect.TypeOf(items.Interface()).Elem() - if target.Kind() != reflect.Slice { - return nil, fmt.Errorf("items: Expected slice, got %s", target.Kind()) - } - return items.Interface(), nil - case reflect.Slice: - return items.Addr().Interface(), nil - default: - return nil, fmt.Errorf("items: Expected slice, got %s", items.Kind()) - } -} - -// EachListItem invokes fn on each runtime.Object in the list. Any error immediately terminates -// the loop. -func EachListItem(obj runtime.Object, fn func(runtime.Object) error) error { - if unstructured, ok := obj.(runtime.Unstructured); ok { - return unstructured.EachListItem(fn) - } - // TODO: Change to an interface call? - itemsPtr, err := GetItemsPtr(obj) - if err != nil { - return err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return err - } - len := items.Len() - if len == 0 { - return nil - } - takeAddr := false - if elemType := items.Type().Elem(); elemType.Kind() != reflect.Ptr && elemType.Kind() != reflect.Interface { - if !items.Index(0).CanAddr() { - return fmt.Errorf("unable to take address of items in %T for EachListItem", obj) - } - takeAddr = true - } - - for i := 0; i < len; i++ { - raw := items.Index(i) - if takeAddr { - raw = raw.Addr() - } - switch item := raw.Interface().(type) { - case *runtime.RawExtension: - if err := fn(item.Object); err != nil { - return err - } - case runtime.Object: - if err := fn(item); err != nil { - return err - } - default: - obj, ok := item.(runtime.Object) - if !ok { - return fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) - } - if err := fn(obj); err != nil { - return err - } - } - } - return nil -} - -// ExtractList returns obj's Items element as an array of runtime.Objects. -// Returns an error if obj is not a List type (does not have an Items member). -func ExtractList(obj runtime.Object) ([]runtime.Object, error) { - itemsPtr, err := GetItemsPtr(obj) - if err != nil { - return nil, err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return nil, err - } - list := make([]runtime.Object, items.Len()) - for i := range list { - raw := items.Index(i) - switch item := raw.Interface().(type) { - case runtime.RawExtension: - switch { - case item.Object != nil: - list[i] = item.Object - case item.Raw != nil: - // TODO: Set ContentEncoding and ContentType correctly. - list[i] = &runtime.Unknown{Raw: item.Raw} - default: - list[i] = nil - } - case runtime.Object: - list[i] = item - default: - var found bool - if list[i], found = raw.Addr().Interface().(runtime.Object); !found { - return nil, fmt.Errorf("%v: item[%v]: Expected object, got %#v(%s)", obj, i, raw.Interface(), raw.Kind()) - } - } - } - return list, nil -} - -// objectSliceType is the type of a slice of Objects -var objectSliceType = reflect.TypeOf([]runtime.Object{}) - -// SetList sets the given list object's Items member have the elements given in -// objects. -// Returns an error if list is not a List type (does not have an Items member), -// or if any of the objects are not of the right type. -func SetList(list runtime.Object, objects []runtime.Object) error { - itemsPtr, err := GetItemsPtr(list) - if err != nil { - return err - } - items, err := conversion.EnforcePtr(itemsPtr) - if err != nil { - return err - } - if items.Type() == objectSliceType { - items.Set(reflect.ValueOf(objects)) - return nil - } - slice := reflect.MakeSlice(items.Type(), len(objects), len(objects)) - for i := range objects { - dest := slice.Index(i) - if dest.Type() == reflect.TypeOf(runtime.RawExtension{}) { - dest = dest.FieldByName("Object") - } - - // check to see if you're directly assignable - if reflect.TypeOf(objects[i]).AssignableTo(dest.Type()) { - dest.Set(reflect.ValueOf(objects[i])) - continue - } - - src, err := conversion.EnforcePtr(objects[i]) - if err != nil { - return err - } - if src.Type().AssignableTo(dest.Type()) { - dest.Set(src) - } else if src.Type().ConvertibleTo(dest.Type()) { - dest.Set(src.Convert(dest.Type())) - } else { - return fmt.Errorf("item[%d]: can't assign or convert %v into %v", i, src.Type(), dest.Type()) - } - } - items.Set(slice) - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go b/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go deleted file mode 100644 index 42eac3a..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/interfaces.go +++ /dev/null @@ -1,134 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" -) - -type ListMetaAccessor interface { - GetListMeta() List -} - -// List lets you work with list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field will be a no-op and return a default value. -type List metav1.ListInterface - -// Type exposes the type and APIVersion of versioned or internal API objects. -type Type metav1.Type - -// MetadataAccessor lets you work with object and list metadata from any of the versioned or -// internal API objects. Attempting to set or retrieve a field on an object that does -// not support that field (Name, UID, Namespace on lists) will be a no-op and return -// a default value. -// -// MetadataAccessor exposes Interface in a way that can be used with multiple objects. -type MetadataAccessor interface { - APIVersion(obj runtime.Object) (string, error) - SetAPIVersion(obj runtime.Object, version string) error - - Kind(obj runtime.Object) (string, error) - SetKind(obj runtime.Object, kind string) error - - Namespace(obj runtime.Object) (string, error) - SetNamespace(obj runtime.Object, namespace string) error - - Name(obj runtime.Object) (string, error) - SetName(obj runtime.Object, name string) error - - GenerateName(obj runtime.Object) (string, error) - SetGenerateName(obj runtime.Object, name string) error - - UID(obj runtime.Object) (types.UID, error) - SetUID(obj runtime.Object, uid types.UID) error - - SelfLink(obj runtime.Object) (string, error) - SetSelfLink(obj runtime.Object, selfLink string) error - - Labels(obj runtime.Object) (map[string]string, error) - SetLabels(obj runtime.Object, labels map[string]string) error - - Annotations(obj runtime.Object) (map[string]string, error) - SetAnnotations(obj runtime.Object, annotations map[string]string) error - - Continue(obj runtime.Object) (string, error) - SetContinue(obj runtime.Object, c string) error - - runtime.ResourceVersioner -} - -type RESTScopeName string - -const ( - RESTScopeNameNamespace RESTScopeName = "namespace" - RESTScopeNameRoot RESTScopeName = "root" -) - -// RESTScope contains the information needed to deal with REST resources that are in a resource hierarchy -type RESTScope interface { - // Name of the scope - Name() RESTScopeName -} - -// RESTMapping contains the information needed to deal with objects of a specific -// resource and kind in a RESTful manner. -type RESTMapping struct { - // Resource is the GroupVersionResource (location) for this endpoint - Resource schema.GroupVersionResource - - // GroupVersionKind is the GroupVersionKind (data format) to submit to this endpoint - GroupVersionKind schema.GroupVersionKind - - // Scope contains the information needed to deal with REST Resources that are in a resource hierarchy - Scope RESTScope -} - -// RESTMapper allows clients to map resources to kind, and map kind and version -// to interfaces for manipulating those objects. It is primarily intended for -// consumers of Kubernetes compatible REST APIs as defined in docs/devel/api-conventions.md. -// -// The Kubernetes API provides versioned resources and object kinds which are scoped -// to API groups. In other words, kinds and resources should not be assumed to be -// unique across groups. -// -// TODO: split into sub-interfaces -type RESTMapper interface { - // KindFor takes a partial resource and returns the single match. Returns an error if there are multiple matches - KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) - - // KindsFor takes a partial resource and returns the list of potential kinds in priority order - KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) - - // ResourceFor takes a partial resource and returns the single match. Returns an error if there are multiple matches - ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) - - // ResourcesFor takes a partial resource and returns the list of potential resource in priority order - ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) - - // RESTMapping identifies a preferred resource mapping for the provided group kind. - RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) - // RESTMappings returns all resource mappings for the provided group kind if no - // version search is provided. Otherwise identifies a preferred resource mapping for - // the provided version(s). - RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) - - ResourceSingularizer(resource string) (singular string, err error) -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go b/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go deleted file mode 100644 index 431a0a6..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/lazy.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "sync" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// lazyObject defers loading the mapper and typer until necessary. -type lazyObject struct { - loader func() (RESTMapper, error) - - lock sync.Mutex - loaded bool - err error - mapper RESTMapper -} - -// NewLazyObjectLoader handles unrecoverable errors when creating a RESTMapper / ObjectTyper by -// returning those initialization errors when the interface methods are invoked. This defers the -// initialization and any server calls until a client actually needs to perform the action. -func NewLazyRESTMapperLoader(fn func() (RESTMapper, error)) RESTMapper { - obj := &lazyObject{loader: fn} - return obj -} - -// init lazily loads the mapper and typer, returning an error if initialization has failed. -func (o *lazyObject) init() error { - o.lock.Lock() - defer o.lock.Unlock() - if o.loaded { - return o.err - } - o.mapper, o.err = o.loader() - o.loaded = true - return o.err -} - -var _ RESTMapper = &lazyObject{} - -func (o *lazyObject) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - if err := o.init(); err != nil { - return schema.GroupVersionKind{}, err - } - return o.mapper.KindFor(resource) -} - -func (o *lazyObject) KindsFor(resource schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - if err := o.init(); err != nil { - return []schema.GroupVersionKind{}, err - } - return o.mapper.KindsFor(resource) -} - -func (o *lazyObject) ResourceFor(input schema.GroupVersionResource) (schema.GroupVersionResource, error) { - if err := o.init(); err != nil { - return schema.GroupVersionResource{}, err - } - return o.mapper.ResourceFor(input) -} - -func (o *lazyObject) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - if err := o.init(); err != nil { - return []schema.GroupVersionResource{}, err - } - return o.mapper.ResourcesFor(input) -} - -func (o *lazyObject) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - if err := o.init(); err != nil { - return nil, err - } - return o.mapper.RESTMapping(gk, versions...) -} - -func (o *lazyObject) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - if err := o.init(); err != nil { - return nil, err - } - return o.mapper.RESTMappings(gk, versions...) -} - -func (o *lazyObject) ResourceSingularizer(resource string) (singular string, err error) { - if err := o.init(); err != nil { - return "", err - } - return o.mapper.ResourceSingularizer(resource) -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go b/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go deleted file mode 100644 index 1c2a83c..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/meta.go +++ /dev/null @@ -1,650 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "reflect" - - "github.com/golang/glog" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - metav1beta1 "k8s.io/apimachinery/pkg/apis/meta/v1beta1" - "k8s.io/apimachinery/pkg/conversion" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" -) - -// errNotList is returned when an object implements the Object style interfaces but not the List style -// interfaces. -var errNotList = fmt.Errorf("object does not implement the List interfaces") - -var errNotCommon = fmt.Errorf("object does not implement the common interface for accessing the SelfLink") - -// CommonAccessor returns a Common interface for the provided object or an error if the object does -// not provide List. -func CommonAccessor(obj interface{}) (metav1.Common, error) { - switch t := obj.(type) { - case List: - return t, nil - case metav1.ListInterface: - return t, nil - case ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotCommon - case metav1.ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotCommon - case metav1.Object: - return t, nil - case metav1.ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotCommon - default: - return nil, errNotCommon - } -} - -// ListAccessor returns a List interface for the provided object or an error if the object does -// not provide List. -// IMPORTANT: Objects are NOT a superset of lists. Do not use this check to determine whether an -// object *is* a List. -func ListAccessor(obj interface{}) (List, error) { - switch t := obj.(type) { - case List: - return t, nil - case metav1.ListInterface: - return t, nil - case ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotList - case metav1.ListMetaAccessor: - if m := t.GetListMeta(); m != nil { - return m, nil - } - return nil, errNotList - default: - return nil, errNotList - } -} - -// errNotObject is returned when an object implements the List style interfaces but not the Object style -// interfaces. -var errNotObject = fmt.Errorf("object does not implement the Object interfaces") - -// Accessor takes an arbitrary object pointer and returns meta.Interface. -// obj must be a pointer to an API type. An error is returned if the minimum -// required fields are missing. Fields that are not required return the default -// value and are a no-op if set. -func Accessor(obj interface{}) (metav1.Object, error) { - switch t := obj.(type) { - case metav1.Object: - return t, nil - case metav1.ObjectMetaAccessor: - if m := t.GetObjectMeta(); m != nil { - return m, nil - } - return nil, errNotObject - default: - return nil, errNotObject - } -} - -// AsPartialObjectMetadata takes the metav1 interface and returns a partial object. -// TODO: consider making this solely a conversion action. -func AsPartialObjectMetadata(m metav1.Object) *metav1beta1.PartialObjectMetadata { - switch t := m.(type) { - case *metav1.ObjectMeta: - return &metav1beta1.PartialObjectMetadata{ObjectMeta: *t} - default: - return &metav1beta1.PartialObjectMetadata{ - ObjectMeta: metav1.ObjectMeta{ - Name: m.GetName(), - GenerateName: m.GetGenerateName(), - Namespace: m.GetNamespace(), - SelfLink: m.GetSelfLink(), - UID: m.GetUID(), - ResourceVersion: m.GetResourceVersion(), - Generation: m.GetGeneration(), - CreationTimestamp: m.GetCreationTimestamp(), - DeletionTimestamp: m.GetDeletionTimestamp(), - DeletionGracePeriodSeconds: m.GetDeletionGracePeriodSeconds(), - Labels: m.GetLabels(), - Annotations: m.GetAnnotations(), - OwnerReferences: m.GetOwnerReferences(), - Finalizers: m.GetFinalizers(), - ClusterName: m.GetClusterName(), - Initializers: m.GetInitializers(), - }, - } - } -} - -// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion -// and Kind of an in-memory internal object. -// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta -// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube -// api conventions). -func TypeAccessor(obj interface{}) (Type, error) { - if typed, ok := obj.(runtime.Object); ok { - return objectAccessor{typed}, nil - } - v, err := conversion.EnforcePtr(obj) - if err != nil { - return nil, err - } - t := v.Type() - if v.Kind() != reflect.Struct { - return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface()) - } - - typeMeta := v.FieldByName("TypeMeta") - if !typeMeta.IsValid() { - return nil, fmt.Errorf("struct %v lacks embedded TypeMeta type", t) - } - a := &genericAccessor{} - if err := extractFromTypeMeta(typeMeta, a); err != nil { - return nil, fmt.Errorf("unable to find type fields on %#v: %v", typeMeta, err) - } - return a, nil -} - -type objectAccessor struct { - runtime.Object -} - -func (obj objectAccessor) GetKind() string { - return obj.GetObjectKind().GroupVersionKind().Kind -} - -func (obj objectAccessor) SetKind(kind string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gvk.Kind = kind - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -func (obj objectAccessor) GetAPIVersion() string { - return obj.GetObjectKind().GroupVersionKind().GroupVersion().String() -} - -func (obj objectAccessor) SetAPIVersion(version string) { - gvk := obj.GetObjectKind().GroupVersionKind() - gv, err := schema.ParseGroupVersion(version) - if err != nil { - gv = schema.GroupVersion{Version: version} - } - gvk.Group, gvk.Version = gv.Group, gv.Version - obj.GetObjectKind().SetGroupVersionKind(gvk) -} - -// NewAccessor returns a MetadataAccessor that can retrieve -// or manipulate resource version on objects derived from core API -// metadata concepts. -func NewAccessor() MetadataAccessor { - return resourceAccessor{} -} - -// resourceAccessor implements ResourceVersioner and SelfLinker. -type resourceAccessor struct{} - -func (resourceAccessor) Kind(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetKind(), nil -} - -func (resourceAccessor) SetKind(obj runtime.Object, kind string) error { - objectAccessor{obj}.SetKind(kind) - return nil -} - -func (resourceAccessor) APIVersion(obj runtime.Object) (string, error) { - return objectAccessor{obj}.GetAPIVersion(), nil -} - -func (resourceAccessor) SetAPIVersion(obj runtime.Object, version string) error { - objectAccessor{obj}.SetAPIVersion(version) - return nil -} - -func (resourceAccessor) Namespace(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetNamespace(), nil -} - -func (resourceAccessor) SetNamespace(obj runtime.Object, namespace string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetNamespace(namespace) - return nil -} - -func (resourceAccessor) Name(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetName(), nil -} - -func (resourceAccessor) SetName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetName(name) - return nil -} - -func (resourceAccessor) GenerateName(obj runtime.Object) (string, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetGenerateName(), nil -} - -func (resourceAccessor) SetGenerateName(obj runtime.Object, name string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetGenerateName(name) - return nil -} - -func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) { - accessor, err := Accessor(obj) - if err != nil { - return "", err - } - return accessor.GetUID(), nil -} - -func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetUID(uid) - return nil -} - -func (resourceAccessor) SelfLink(obj runtime.Object) (string, error) { - accessor, err := CommonAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetSelfLink(), nil -} - -func (resourceAccessor) SetSelfLink(obj runtime.Object, selfLink string) error { - accessor, err := CommonAccessor(obj) - if err != nil { - return err - } - accessor.SetSelfLink(selfLink) - return nil -} - -func (resourceAccessor) Labels(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetLabels(), nil -} - -func (resourceAccessor) SetLabels(obj runtime.Object, labels map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetLabels(labels) - return nil -} - -func (resourceAccessor) Annotations(obj runtime.Object) (map[string]string, error) { - accessor, err := Accessor(obj) - if err != nil { - return nil, err - } - return accessor.GetAnnotations(), nil -} - -func (resourceAccessor) SetAnnotations(obj runtime.Object, annotations map[string]string) error { - accessor, err := Accessor(obj) - if err != nil { - return err - } - accessor.SetAnnotations(annotations) - return nil -} - -func (resourceAccessor) ResourceVersion(obj runtime.Object) (string, error) { - accessor, err := CommonAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetResourceVersion(), nil -} - -func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) error { - accessor, err := CommonAccessor(obj) - if err != nil { - return err - } - accessor.SetResourceVersion(version) - return nil -} - -func (resourceAccessor) Continue(obj runtime.Object) (string, error) { - accessor, err := ListAccessor(obj) - if err != nil { - return "", err - } - return accessor.GetContinue(), nil -} - -func (resourceAccessor) SetContinue(obj runtime.Object, version string) error { - accessor, err := ListAccessor(obj) - if err != nil { - return err - } - accessor.SetContinue(version) - return nil -} - -// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object. -func extractFromOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { - if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil { - return err - } - if err := runtime.Field(v, "Kind", &o.Kind); err != nil { - return err - } - if err := runtime.Field(v, "Name", &o.Name); err != nil { - return err - } - if err := runtime.Field(v, "UID", &o.UID); err != nil { - return err - } - var controllerPtr *bool - if err := runtime.Field(v, "Controller", &controllerPtr); err != nil { - return err - } - if controllerPtr != nil { - controller := *controllerPtr - o.Controller = &controller - } - var blockOwnerDeletionPtr *bool - if err := runtime.Field(v, "BlockOwnerDeletion", &blockOwnerDeletionPtr); err != nil { - return err - } - if blockOwnerDeletionPtr != nil { - block := *blockOwnerDeletionPtr - o.BlockOwnerDeletion = &block - } - return nil -} - -// setOwnerReference sets v to o. v is the OwnerReferences field of an object. -func setOwnerReference(v reflect.Value, o *metav1.OwnerReference) error { - if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil { - return err - } - if err := runtime.SetField(o.Kind, v, "Kind"); err != nil { - return err - } - if err := runtime.SetField(o.Name, v, "Name"); err != nil { - return err - } - if err := runtime.SetField(o.UID, v, "UID"); err != nil { - return err - } - if o.Controller != nil { - controller := *(o.Controller) - if err := runtime.SetField(&controller, v, "Controller"); err != nil { - return err - } - } - if o.BlockOwnerDeletion != nil { - block := *(o.BlockOwnerDeletion) - if err := runtime.SetField(&block, v, "BlockOwnerDeletion"); err != nil { - return err - } - } - return nil -} - -// genericAccessor contains pointers to strings that can modify an arbitrary -// struct and implements the Accessor interface. -type genericAccessor struct { - namespace *string - name *string - generateName *string - uid *types.UID - apiVersion *string - kind *string - resourceVersion *string - selfLink *string - creationTimestamp *metav1.Time - deletionTimestamp **metav1.Time - labels *map[string]string - annotations *map[string]string - ownerReferences reflect.Value - finalizers *[]string -} - -func (a genericAccessor) GetNamespace() string { - if a.namespace == nil { - return "" - } - return *a.namespace -} - -func (a genericAccessor) SetNamespace(namespace string) { - if a.namespace == nil { - return - } - *a.namespace = namespace -} - -func (a genericAccessor) GetName() string { - if a.name == nil { - return "" - } - return *a.name -} - -func (a genericAccessor) SetName(name string) { - if a.name == nil { - return - } - *a.name = name -} - -func (a genericAccessor) GetGenerateName() string { - if a.generateName == nil { - return "" - } - return *a.generateName -} - -func (a genericAccessor) SetGenerateName(generateName string) { - if a.generateName == nil { - return - } - *a.generateName = generateName -} - -func (a genericAccessor) GetUID() types.UID { - if a.uid == nil { - return "" - } - return *a.uid -} - -func (a genericAccessor) SetUID(uid types.UID) { - if a.uid == nil { - return - } - *a.uid = uid -} - -func (a genericAccessor) GetAPIVersion() string { - return *a.apiVersion -} - -func (a genericAccessor) SetAPIVersion(version string) { - *a.apiVersion = version -} - -func (a genericAccessor) GetKind() string { - return *a.kind -} - -func (a genericAccessor) SetKind(kind string) { - *a.kind = kind -} - -func (a genericAccessor) GetResourceVersion() string { - return *a.resourceVersion -} - -func (a genericAccessor) SetResourceVersion(version string) { - *a.resourceVersion = version -} - -func (a genericAccessor) GetSelfLink() string { - return *a.selfLink -} - -func (a genericAccessor) SetSelfLink(selfLink string) { - *a.selfLink = selfLink -} - -func (a genericAccessor) GetCreationTimestamp() metav1.Time { - return *a.creationTimestamp -} - -func (a genericAccessor) SetCreationTimestamp(timestamp metav1.Time) { - *a.creationTimestamp = timestamp -} - -func (a genericAccessor) GetDeletionTimestamp() *metav1.Time { - return *a.deletionTimestamp -} - -func (a genericAccessor) SetDeletionTimestamp(timestamp *metav1.Time) { - *a.deletionTimestamp = timestamp -} - -func (a genericAccessor) GetLabels() map[string]string { - if a.labels == nil { - return nil - } - return *a.labels -} - -func (a genericAccessor) SetLabels(labels map[string]string) { - *a.labels = labels -} - -func (a genericAccessor) GetAnnotations() map[string]string { - if a.annotations == nil { - return nil - } - return *a.annotations -} - -func (a genericAccessor) SetAnnotations(annotations map[string]string) { - if a.annotations == nil { - emptyAnnotations := make(map[string]string) - a.annotations = &emptyAnnotations - } - *a.annotations = annotations -} - -func (a genericAccessor) GetFinalizers() []string { - if a.finalizers == nil { - return nil - } - return *a.finalizers -} - -func (a genericAccessor) SetFinalizers(finalizers []string) { - *a.finalizers = finalizers -} - -func (a genericAccessor) GetOwnerReferences() []metav1.OwnerReference { - var ret []metav1.OwnerReference - s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) - return ret - } - s = s.Elem() - // Set the capacity to one element greater to avoid copy if the caller later append an element. - ret = make([]metav1.OwnerReference, s.Len(), s.Len()+1) - for i := 0; i < s.Len(); i++ { - if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil { - glog.Errorf("extractFromOwnerReference failed: %v", err) - return ret - } - } - return ret -} - -func (a genericAccessor) SetOwnerReferences(references []metav1.OwnerReference) { - s := a.ownerReferences - if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice { - glog.Errorf("expect %v to be a pointer to slice", s) - } - s = s.Elem() - newReferences := reflect.MakeSlice(s.Type(), len(references), len(references)) - for i := 0; i < len(references); i++ { - if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil { - glog.Errorf("setOwnerReference failed: %v", err) - return - } - } - s.Set(newReferences) -} - -// extractFromTypeMeta extracts pointers to version and kind fields from an object -func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error { - if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil { - return err - } - if err := runtime.FieldPtr(v, "Kind", &a.kind); err != nil { - return err - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go deleted file mode 100644 index 6b01bf1..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/multirestmapper.go +++ /dev/null @@ -1,210 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - "strings" - - "k8s.io/apimachinery/pkg/runtime/schema" - utilerrors "k8s.io/apimachinery/pkg/util/errors" -) - -// MultiRESTMapper is a wrapper for multiple RESTMappers. -type MultiRESTMapper []RESTMapper - -func (m MultiRESTMapper) String() string { - nested := []string{} - for _, t := range m { - currString := fmt.Sprintf("%v", t) - splitStrings := strings.Split(currString, "\n") - nested = append(nested, strings.Join(splitStrings, "\n\t")) - } - - return fmt.Sprintf("MultiRESTMapper{\n\t%s\n}", strings.Join(nested, "\n\t")) -} - -// ResourceSingularizer converts a REST resource name from plural to singular (e.g., from pods to pod) -// This implementation supports multiple REST schemas and return the first match. -func (m MultiRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - for _, t := range m { - singular, err = t.ResourceSingularizer(resource) - if err == nil { - return - } - } - return -} - -func (m MultiRESTMapper) ResourcesFor(resource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - allGVRs := []schema.GroupVersionResource{} - for _, t := range m { - gvrs, err := t.ResourcesFor(resource) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - return nil, err - } - - // walk the existing values to de-dup - for _, curr := range gvrs { - found := false - for _, existing := range allGVRs { - if curr == existing { - found = true - break - } - } - - if !found { - allGVRs = append(allGVRs, curr) - } - } - } - - if len(allGVRs) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - return allGVRs, nil -} - -func (m MultiRESTMapper) KindsFor(resource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { - allGVKs := []schema.GroupVersionKind{} - for _, t := range m { - gvks, err := t.KindsFor(resource) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - return nil, err - } - - // walk the existing values to de-dup - for _, curr := range gvks { - found := false - for _, existing := range allGVKs { - if curr == existing { - found = true - break - } - } - - if !found { - allGVKs = append(allGVKs, curr) - } - } - } - - if len(allGVKs) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - return allGVKs, nil -} - -func (m MultiRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return schema.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m MultiRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return schema.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -// RESTMapping provides the REST mapping for the resource based on the -// kind and version. This implementation supports multiple REST schemas and -// return the first match. -func (m MultiRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - allMappings := []*RESTMapping{} - errors := []error{} - - for _, t := range m { - currMapping, err := t.RESTMapping(gk, versions...) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - errors = append(errors, err) - continue - } - - allMappings = append(allMappings, currMapping) - } - - // if we got exactly one mapping, then use it even if other requested failed - if len(allMappings) == 1 { - return allMappings[0], nil - } - if len(allMappings) > 1 { - var kinds []schema.GroupVersionKind - for _, m := range allMappings { - kinds = append(kinds, m.GroupVersionKind) - } - return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} - } - if len(errors) > 0 { - return nil, utilerrors.NewAggregate(errors) - } - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} -} - -// RESTMappings returns all possible RESTMappings for the provided group kind, or an error -// if the type is not recognized. -func (m MultiRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - var allMappings []*RESTMapping - var errors []error - - for _, t := range m { - currMappings, err := t.RESTMappings(gk, versions...) - // ignore "no match" errors, but any other error percolates back up - if IsNoMatchError(err) { - continue - } - if err != nil { - errors = append(errors, err) - continue - } - allMappings = append(allMappings, currMappings...) - } - if len(errors) > 0 { - return nil, utilerrors.NewAggregate(errors) - } - if len(allMappings) == 0 { - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} - } - return allMappings, nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go b/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go deleted file mode 100644 index fa11c58..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/priority.go +++ /dev/null @@ -1,222 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package meta - -import ( - "fmt" - - "k8s.io/apimachinery/pkg/runtime/schema" -) - -const ( - AnyGroup = "*" - AnyVersion = "*" - AnyResource = "*" - AnyKind = "*" -) - -// PriorityRESTMapper is a wrapper for automatically choosing a particular Resource or Kind -// when multiple matches are possible -type PriorityRESTMapper struct { - // Delegate is the RESTMapper to use to locate all the Kind and Resource matches - Delegate RESTMapper - - // ResourcePriority is a list of priority patterns to apply to matching resources. - // The list of all matching resources is narrowed based on the patterns until only one remains. - // A pattern with no matches is skipped. A pattern with more than one match uses its - // matches as the list to continue matching against. - ResourcePriority []schema.GroupVersionResource - - // KindPriority is a list of priority patterns to apply to matching kinds. - // The list of all matching kinds is narrowed based on the patterns until only one remains. - // A pattern with no matches is skipped. A pattern with more than one match uses its - // matches as the list to continue matching against. - KindPriority []schema.GroupVersionKind -} - -func (m PriorityRESTMapper) String() string { - return fmt.Sprintf("PriorityRESTMapper{\n\t%v\n\t%v\n\t%v\n}", m.ResourcePriority, m.KindPriority, m.Delegate) -} - -// ResourceFor finds all resources, then passes them through the ResourcePriority patterns to find a single matching hit. -func (m PriorityRESTMapper) ResourceFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - originalGVRs, originalErr := m.Delegate.ResourcesFor(partiallySpecifiedResource) - if originalErr != nil && len(originalGVRs) == 0 { - return schema.GroupVersionResource{}, originalErr - } - if len(originalGVRs) == 1 { - return originalGVRs[0], originalErr - } - - remainingGVRs := append([]schema.GroupVersionResource{}, originalGVRs...) - for _, pattern := range m.ResourcePriority { - matchedGVRs := []schema.GroupVersionResource{} - for _, gvr := range remainingGVRs { - if resourceMatches(pattern, gvr) { - matchedGVRs = append(matchedGVRs, gvr) - } - } - - switch len(matchedGVRs) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matchedGVRs[0], originalErr - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remainingGVRs = matchedGVRs - } - } - - return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingResources: originalGVRs} -} - -// KindFor finds all kinds, then passes them through the KindPriority patterns to find a single matching hit. -func (m PriorityRESTMapper) KindFor(partiallySpecifiedResource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - originalGVKs, originalErr := m.Delegate.KindsFor(partiallySpecifiedResource) - if originalErr != nil && len(originalGVKs) == 0 { - return schema.GroupVersionKind{}, originalErr - } - if len(originalGVKs) == 1 { - return originalGVKs[0], originalErr - } - - remainingGVKs := append([]schema.GroupVersionKind{}, originalGVKs...) - for _, pattern := range m.KindPriority { - matchedGVKs := []schema.GroupVersionKind{} - for _, gvr := range remainingGVKs { - if kindMatches(pattern, gvr) { - matchedGVKs = append(matchedGVKs, gvr) - } - } - - switch len(matchedGVKs) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matchedGVKs[0], originalErr - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remainingGVKs = matchedGVKs - } - } - - return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: partiallySpecifiedResource, MatchingKinds: originalGVKs} -} - -func resourceMatches(pattern schema.GroupVersionResource, resource schema.GroupVersionResource) bool { - if pattern.Group != AnyGroup && pattern.Group != resource.Group { - return false - } - if pattern.Version != AnyVersion && pattern.Version != resource.Version { - return false - } - if pattern.Resource != AnyResource && pattern.Resource != resource.Resource { - return false - } - - return true -} - -func kindMatches(pattern schema.GroupVersionKind, kind schema.GroupVersionKind) bool { - if pattern.Group != AnyGroup && pattern.Group != kind.Group { - return false - } - if pattern.Version != AnyVersion && pattern.Version != kind.Version { - return false - } - if pattern.Kind != AnyKind && pattern.Kind != kind.Kind { - return false - } - - return true -} - -func (m PriorityRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (mapping *RESTMapping, err error) { - mappings, originalErr := m.Delegate.RESTMappings(gk, versions...) - if originalErr != nil && len(mappings) == 0 { - return nil, originalErr - } - - // any versions the user provides take priority - priorities := m.KindPriority - if len(versions) > 0 { - priorities = make([]schema.GroupVersionKind, 0, len(m.KindPriority)+len(versions)) - for _, version := range versions { - gv := schema.GroupVersion{ - Version: version, - Group: gk.Group, - } - priorities = append(priorities, gv.WithKind(AnyKind)) - } - priorities = append(priorities, m.KindPriority...) - } - - remaining := append([]*RESTMapping{}, mappings...) - for _, pattern := range priorities { - var matching []*RESTMapping - for _, m := range remaining { - if kindMatches(pattern, m.GroupVersionKind) { - matching = append(matching, m) - } - } - - switch len(matching) { - case 0: - // if you have no matches, then nothing matched this pattern just move to the next - continue - case 1: - // one match, return - return matching[0], originalErr - default: - // more than one match, use the matched hits as the list moving to the next pattern. - // this way you can have a series of selection criteria - remaining = matching - } - } - if len(remaining) == 1 { - return remaining[0], originalErr - } - - var kinds []schema.GroupVersionKind - for _, m := range mappings { - kinds = append(kinds, m.GroupVersionKind) - } - return nil, &AmbiguousKindError{PartialKind: gk.WithVersion(""), MatchingKinds: kinds} -} - -func (m PriorityRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - return m.Delegate.RESTMappings(gk, versions...) -} - -func (m PriorityRESTMapper) ResourceSingularizer(resource string) (singular string, err error) { - return m.Delegate.ResourceSingularizer(resource) -} - -func (m PriorityRESTMapper) ResourcesFor(partiallySpecifiedResource schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - return m.Delegate.ResourcesFor(partiallySpecifiedResource) -} - -func (m PriorityRESTMapper) KindsFor(partiallySpecifiedResource schema.GroupVersionResource) (gvk []schema.GroupVersionKind, err error) { - return m.Delegate.KindsFor(partiallySpecifiedResource) -} diff --git a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go b/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go deleted file mode 100644 index 41b60d7..0000000 --- a/vendor/k8s.io/apimachinery/pkg/api/meta/restmapper.go +++ /dev/null @@ -1,518 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// TODO: move everything in this file to pkg/api/rest -package meta - -import ( - "fmt" - "sort" - "strings" - - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// Implements RESTScope interface -type restScope struct { - name RESTScopeName -} - -func (r *restScope) Name() RESTScopeName { - return r.name -} - -var RESTScopeNamespace = &restScope{ - name: RESTScopeNameNamespace, -} - -var RESTScopeRoot = &restScope{ - name: RESTScopeNameRoot, -} - -// DefaultRESTMapper exposes mappings between the types defined in a -// runtime.Scheme. It assumes that all types defined the provided scheme -// can be mapped with the provided MetadataAccessor and Codec interfaces. -// -// The resource name of a Kind is defined as the lowercase, -// English-plural version of the Kind string. -// When converting from resource to Kind, the singular version of the -// resource name is also accepted for convenience. -// -// TODO: Only accept plural for some operations for increased control? -// (`get pod bar` vs `get pods bar`) -type DefaultRESTMapper struct { - defaultGroupVersions []schema.GroupVersion - - resourceToKind map[schema.GroupVersionResource]schema.GroupVersionKind - kindToPluralResource map[schema.GroupVersionKind]schema.GroupVersionResource - kindToScope map[schema.GroupVersionKind]RESTScope - singularToPlural map[schema.GroupVersionResource]schema.GroupVersionResource - pluralToSingular map[schema.GroupVersionResource]schema.GroupVersionResource -} - -func (m *DefaultRESTMapper) String() string { - return fmt.Sprintf("DefaultRESTMapper{kindToPluralResource=%v}", m.kindToPluralResource) -} - -var _ RESTMapper = &DefaultRESTMapper{} - -// NewDefaultRESTMapper initializes a mapping between Kind and APIVersion -// to a resource name and back based on the objects in a runtime.Scheme -// and the Kubernetes API conventions. Takes a group name, a priority list of the versions -// to search when an object has no default version (set empty to return an error), -// and a function that retrieves the correct metadata for a given version. -func NewDefaultRESTMapper(defaultGroupVersions []schema.GroupVersion) *DefaultRESTMapper { - resourceToKind := make(map[schema.GroupVersionResource]schema.GroupVersionKind) - kindToPluralResource := make(map[schema.GroupVersionKind]schema.GroupVersionResource) - kindToScope := make(map[schema.GroupVersionKind]RESTScope) - singularToPlural := make(map[schema.GroupVersionResource]schema.GroupVersionResource) - pluralToSingular := make(map[schema.GroupVersionResource]schema.GroupVersionResource) - // TODO: verify name mappings work correctly when versions differ - - return &DefaultRESTMapper{ - resourceToKind: resourceToKind, - kindToPluralResource: kindToPluralResource, - kindToScope: kindToScope, - defaultGroupVersions: defaultGroupVersions, - singularToPlural: singularToPlural, - pluralToSingular: pluralToSingular, - } -} - -func (m *DefaultRESTMapper) Add(kind schema.GroupVersionKind, scope RESTScope) { - plural, singular := UnsafeGuessKindToResource(kind) - m.AddSpecific(kind, plural, singular, scope) -} - -func (m *DefaultRESTMapper) AddSpecific(kind schema.GroupVersionKind, plural, singular schema.GroupVersionResource, scope RESTScope) { - m.singularToPlural[singular] = plural - m.pluralToSingular[plural] = singular - - m.resourceToKind[singular] = kind - m.resourceToKind[plural] = kind - - m.kindToPluralResource[kind] = plural - m.kindToScope[kind] = scope -} - -// unpluralizedSuffixes is a list of resource suffixes that are the same plural and singular -// This is only is only necessary because some bits of code are lazy and don't actually use the RESTMapper like they should. -// TODO eliminate this so that different callers can correctly map to resources. This probably means updating all -// callers to use the RESTMapper they mean. -var unpluralizedSuffixes = []string{ - "endpoints", -} - -// UnsafeGuessKindToResource converts Kind to a resource name. -// Broken. This method only "sort of" works when used outside of this package. It assumes that Kinds and Resources match -// and they aren't guaranteed to do so. -func UnsafeGuessKindToResource(kind schema.GroupVersionKind) ( /*plural*/ schema.GroupVersionResource /*singular*/, schema.GroupVersionResource) { - kindName := kind.Kind - if len(kindName) == 0 { - return schema.GroupVersionResource{}, schema.GroupVersionResource{} - } - singularName := strings.ToLower(kindName) - singular := kind.GroupVersion().WithResource(singularName) - - for _, skip := range unpluralizedSuffixes { - if strings.HasSuffix(singularName, skip) { - return singular, singular - } - } - - switch string(singularName[len(singularName)-1]) { - case "s": - return kind.GroupVersion().WithResource(singularName + "es"), singular - case "y": - return kind.GroupVersion().WithResource(strings.TrimSuffix(singularName, "y") + "ies"), singular - } - - return kind.GroupVersion().WithResource(singularName + "s"), singular -} - -// ResourceSingularizer implements RESTMapper -// It converts a resource name from plural to singular (e.g., from pods to pod) -func (m *DefaultRESTMapper) ResourceSingularizer(resourceType string) (string, error) { - partialResource := schema.GroupVersionResource{Resource: resourceType} - resources, err := m.ResourcesFor(partialResource) - if err != nil { - return resourceType, err - } - - singular := schema.GroupVersionResource{} - for _, curr := range resources { - currSingular, ok := m.pluralToSingular[curr] - if !ok { - continue - } - if singular.Empty() { - singular = currSingular - continue - } - - if currSingular.Resource != singular.Resource { - return resourceType, fmt.Errorf("multiple possible singular resources (%v) found for %v", resources, resourceType) - } - } - - if singular.Empty() { - return resourceType, fmt.Errorf("no singular of resource %v has been defined", resourceType) - } - - return singular.Resource, nil -} - -// coerceResourceForMatching makes the resource lower case and converts internal versions to unspecified (legacy behavior) -func coerceResourceForMatching(resource schema.GroupVersionResource) schema.GroupVersionResource { - resource.Resource = strings.ToLower(resource.Resource) - if resource.Version == runtime.APIVersionInternal { - resource.Version = "" - } - - return resource -} - -func (m *DefaultRESTMapper) ResourcesFor(input schema.GroupVersionResource) ([]schema.GroupVersionResource, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []schema.GroupVersionResource{} - switch { - case hasGroup && hasVersion: - // fully qualified. Find the exact match - for plural, singular := range m.pluralToSingular { - if singular == resource { - ret = append(ret, plural) - break - } - if plural == resource { - ret = append(ret, plural) - break - } - } - - case hasGroup: - // given a group, prefer an exact match. If you don't find one, resort to a prefix match on group - foundExactMatch := false - requestedGroupResource := resource.GroupResource() - for plural, singular := range m.pluralToSingular { - if singular.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, plural) - } - if plural.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, plural) - } - } - - // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match - // storageclass.storage.k8s.io - if !foundExactMatch { - for plural, singular := range m.pluralToSingular { - if !strings.HasPrefix(plural.Group, requestedGroupResource.Group) { - continue - } - if singular.Resource == requestedGroupResource.Resource { - ret = append(ret, plural) - } - if plural.Resource == requestedGroupResource.Resource { - ret = append(ret, plural) - } - } - - } - - case hasVersion: - for plural, singular := range m.pluralToSingular { - if singular.Version == resource.Version && singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Version == resource.Version && plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - - default: - for plural, singular := range m.pluralToSingular { - if singular.Resource == resource.Resource { - ret = append(ret, plural) - } - if plural.Resource == resource.Resource { - ret = append(ret, plural) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: resource} - } - - sort.Sort(resourceByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) ResourceFor(resource schema.GroupVersionResource) (schema.GroupVersionResource, error) { - resources, err := m.ResourcesFor(resource) - if err != nil { - return schema.GroupVersionResource{}, err - } - if len(resources) == 1 { - return resources[0], nil - } - - return schema.GroupVersionResource{}, &AmbiguousResourceError{PartialResource: resource, MatchingResources: resources} -} - -func (m *DefaultRESTMapper) KindsFor(input schema.GroupVersionResource) ([]schema.GroupVersionKind, error) { - resource := coerceResourceForMatching(input) - - hasResource := len(resource.Resource) > 0 - hasGroup := len(resource.Group) > 0 - hasVersion := len(resource.Version) > 0 - - if !hasResource { - return nil, fmt.Errorf("a resource must be present, got: %v", resource) - } - - ret := []schema.GroupVersionKind{} - switch { - // fully qualified. Find the exact match - case hasGroup && hasVersion: - kind, exists := m.resourceToKind[resource] - if exists { - ret = append(ret, kind) - } - - case hasGroup: - foundExactMatch := false - requestedGroupResource := resource.GroupResource() - for currResource, currKind := range m.resourceToKind { - if currResource.GroupResource() == requestedGroupResource { - foundExactMatch = true - ret = append(ret, currKind) - } - } - - // if you didn't find an exact match, match on group prefixing. This allows storageclass.storage to match - // storageclass.storage.k8s.io - if !foundExactMatch { - for currResource, currKind := range m.resourceToKind { - if !strings.HasPrefix(currResource.Group, requestedGroupResource.Group) { - continue - } - if currResource.Resource == requestedGroupResource.Resource { - ret = append(ret, currKind) - } - } - - } - - case hasVersion: - for currResource, currKind := range m.resourceToKind { - if currResource.Version == resource.Version && currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - - default: - for currResource, currKind := range m.resourceToKind { - if currResource.Resource == resource.Resource { - ret = append(ret, currKind) - } - } - } - - if len(ret) == 0 { - return nil, &NoResourceMatchError{PartialResource: input} - } - - sort.Sort(kindByPreferredGroupVersion{ret, m.defaultGroupVersions}) - return ret, nil -} - -func (m *DefaultRESTMapper) KindFor(resource schema.GroupVersionResource) (schema.GroupVersionKind, error) { - kinds, err := m.KindsFor(resource) - if err != nil { - return schema.GroupVersionKind{}, err - } - if len(kinds) == 1 { - return kinds[0], nil - } - - return schema.GroupVersionKind{}, &AmbiguousResourceError{PartialResource: resource, MatchingKinds: kinds} -} - -type kindByPreferredGroupVersion struct { - list []schema.GroupVersionKind - sortOrder []schema.GroupVersion -} - -func (o kindByPreferredGroupVersion) Len() int { return len(o.list) } -func (o kindByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o kindByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Kind < rhs.Kind - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -type resourceByPreferredGroupVersion struct { - list []schema.GroupVersionResource - sortOrder []schema.GroupVersion -} - -func (o resourceByPreferredGroupVersion) Len() int { return len(o.list) } -func (o resourceByPreferredGroupVersion) Swap(i, j int) { o.list[i], o.list[j] = o.list[j], o.list[i] } -func (o resourceByPreferredGroupVersion) Less(i, j int) bool { - lhs := o.list[i] - rhs := o.list[j] - if lhs == rhs { - return false - } - - if lhs.GroupVersion() == rhs.GroupVersion() { - return lhs.Resource < rhs.Resource - } - - // otherwise, the difference is in the GroupVersion, so we need to sort with respect to the preferred order - lhsIndex := -1 - rhsIndex := -1 - - for i := range o.sortOrder { - if o.sortOrder[i] == lhs.GroupVersion() { - lhsIndex = i - } - if o.sortOrder[i] == rhs.GroupVersion() { - rhsIndex = i - } - } - - if rhsIndex == -1 { - return true - } - - return lhsIndex < rhsIndex -} - -// RESTMapping returns a struct representing the resource path and conversion interfaces a -// RESTClient should use to operate on the provided group/kind in order of versions. If a version search -// order is not provided, the search order provided to DefaultRESTMapper will be used to resolve which -// version should be used to access the named group/kind. -func (m *DefaultRESTMapper) RESTMapping(gk schema.GroupKind, versions ...string) (*RESTMapping, error) { - mappings, err := m.RESTMappings(gk, versions...) - if err != nil { - return nil, err - } - if len(mappings) == 0 { - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} - } - // since we rely on RESTMappings method - // take the first match and return to the caller - // as this was the existing behavior. - return mappings[0], nil -} - -// RESTMappings returns the RESTMappings for the provided group kind. If a version search order -// is not provided, the search order provided to DefaultRESTMapper will be used. -func (m *DefaultRESTMapper) RESTMappings(gk schema.GroupKind, versions ...string) ([]*RESTMapping, error) { - mappings := make([]*RESTMapping, 0) - potentialGVK := make([]schema.GroupVersionKind, 0) - hadVersion := false - - // Pick an appropriate version - for _, version := range versions { - if len(version) == 0 || version == runtime.APIVersionInternal { - continue - } - currGVK := gk.WithVersion(version) - hadVersion = true - if _, ok := m.kindToPluralResource[currGVK]; ok { - potentialGVK = append(potentialGVK, currGVK) - break - } - } - // Use the default preferred versions - if !hadVersion && len(potentialGVK) == 0 { - for _, gv := range m.defaultGroupVersions { - if gv.Group != gk.Group { - continue - } - potentialGVK = append(potentialGVK, gk.WithVersion(gv.Version)) - } - } - - if len(potentialGVK) == 0 { - return nil, &NoKindMatchError{GroupKind: gk, SearchedVersions: versions} - } - - for _, gvk := range potentialGVK { - //Ensure we have a REST mapping - res, ok := m.kindToPluralResource[gvk] - if !ok { - continue - } - - // Ensure we have a REST scope - scope, ok := m.kindToScope[gvk] - if !ok { - return nil, fmt.Errorf("the provided version %q and kind %q cannot be mapped to a supported scope", gvk.GroupVersion(), gvk.Kind) - } - - mappings = append(mappings, &RESTMapping{ - Resource: res, - GroupVersionKind: gvk, - Scope: scope, - }) - } - - if len(mappings) == 0 { - return nil, &NoResourceMatchError{PartialResource: schema.GroupVersionResource{Group: gk.Group, Resource: gk.Kind}} - } - return mappings, nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go deleted file mode 100644 index f3e5e4c..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/conversion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import "k8s.io/apimachinery/pkg/conversion" - -// Convert_Slice_string_To_v1beta1_IncludeObjectPolicy allows converting a URL query parameter value -func Convert_Slice_string_To_v1beta1_IncludeObjectPolicy(input *[]string, out *IncludeObjectPolicy, s conversion.Scope) error { - if len(*input) > 0 { - *out = IncludeObjectPolicy((*input)[0]) - } - return nil -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go deleted file mode 100644 index 3b2bedd..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/deepcopy.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import "k8s.io/apimachinery/pkg/runtime" - -func (in *TableRow) DeepCopy() *TableRow { - if in == nil { - return nil - } - - out := new(TableRow) - - if in.Cells != nil { - out.Cells = make([]interface{}, len(in.Cells)) - for i := range in.Cells { - out.Cells[i] = runtime.DeepCopyJSONValue(in.Cells[i]) - } - } - - if in.Conditions != nil { - out.Conditions = make([]TableRowCondition, len(in.Conditions)) - for i := range in.Conditions { - in.Conditions[i].DeepCopyInto(&out.Conditions[i]) - } - } - - in.Object.DeepCopyInto(&out.Object) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go deleted file mode 100644 index dc461cc..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/doc.go +++ /dev/null @@ -1,22 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// +k8s:deepcopy-gen=package -// +k8s:openapi-gen=true -// +k8s:defaulter-gen=TypeMeta - -// +groupName=meta.k8s.io -package v1beta1 diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go deleted file mode 100644 index fe3df69..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.pb.go +++ /dev/null @@ -1,632 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by protoc-gen-gogo. -// source: k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto -// DO NOT EDIT! - -/* - Package v1beta1 is a generated protocol buffer package. - - It is generated from these files: - k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto - - It has these top-level messages: - PartialObjectMetadata - PartialObjectMetadataList - TableOptions -*/ -package v1beta1 - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import strings "strings" -import reflect "reflect" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -func (m *PartialObjectMetadata) Reset() { *m = PartialObjectMetadata{} } -func (*PartialObjectMetadata) ProtoMessage() {} -func (*PartialObjectMetadata) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{0} } - -func (m *PartialObjectMetadataList) Reset() { *m = PartialObjectMetadataList{} } -func (*PartialObjectMetadataList) ProtoMessage() {} -func (*PartialObjectMetadataList) Descriptor() ([]byte, []int) { - return fileDescriptorGenerated, []int{1} -} - -func (m *TableOptions) Reset() { *m = TableOptions{} } -func (*TableOptions) ProtoMessage() {} -func (*TableOptions) Descriptor() ([]byte, []int) { return fileDescriptorGenerated, []int{2} } - -func init() { - proto.RegisterType((*PartialObjectMetadata)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadata") - proto.RegisterType((*PartialObjectMetadataList)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.PartialObjectMetadataList") - proto.RegisterType((*TableOptions)(nil), "k8s.io.apimachinery.pkg.apis.meta.v1beta1.TableOptions") -} -func (m *PartialObjectMetadata) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PartialObjectMetadata) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(m.ObjectMeta.Size())) - n1, err := m.ObjectMeta.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - return i, nil -} - -func (m *PartialObjectMetadataList) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PartialObjectMetadataList) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Items) > 0 { - for _, msg := range m.Items { - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(msg.Size())) - n, err := msg.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n - } - } - return i, nil -} - -func (m *TableOptions) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *TableOptions) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - dAtA[i] = 0xa - i++ - i = encodeVarintGenerated(dAtA, i, uint64(len(m.IncludeObject))) - i += copy(dAtA[i:], m.IncludeObject) - return i, nil -} - -func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - dAtA[offset+4] = uint8(v >> 32) - dAtA[offset+5] = uint8(v >> 40) - dAtA[offset+6] = uint8(v >> 48) - dAtA[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int { - dAtA[offset] = uint8(v) - dAtA[offset+1] = uint8(v >> 8) - dAtA[offset+2] = uint8(v >> 16) - dAtA[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *PartialObjectMetadata) Size() (n int) { - var l int - _ = l - l = m.ObjectMeta.Size() - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func (m *PartialObjectMetadataList) Size() (n int) { - var l int - _ = l - if len(m.Items) > 0 { - for _, e := range m.Items { - l = e.Size() - n += 1 + l + sovGenerated(uint64(l)) - } - } - return n -} - -func (m *TableOptions) Size() (n int) { - var l int - _ = l - l = len(m.IncludeObject) - n += 1 + l + sovGenerated(uint64(l)) - return n -} - -func sovGenerated(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozGenerated(x uint64) (n int) { - return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (this *PartialObjectMetadata) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadata{`, - `ObjectMeta:` + strings.Replace(strings.Replace(this.ObjectMeta.String(), "ObjectMeta", "k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta", 1), `&`, ``, 1) + `,`, - `}`, - }, "") - return s -} -func (this *PartialObjectMetadataList) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&PartialObjectMetadataList{`, - `Items:` + strings.Replace(fmt.Sprintf("%v", this.Items), "PartialObjectMetadata", "PartialObjectMetadata", 1) + `,`, - `}`, - }, "") - return s -} -func (this *TableOptions) String() string { - if this == nil { - return "nil" - } - s := strings.Join([]string{`&TableOptions{`, - `IncludeObject:` + fmt.Sprintf("%v", this.IncludeObject) + `,`, - `}`, - }, "") - return s -} -func valueToStringGenerated(v interface{}) string { - rv := reflect.ValueOf(v) - if rv.IsNil() { - return "nil" - } - pv := reflect.Indirect(rv).Interface() - return fmt.Sprintf("*%v", pv) -} -func (m *PartialObjectMetadata) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PartialObjectMetadataList) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PartialObjectMetadataList: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PartialObjectMetadataList: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Items = append(m.Items, &PartialObjectMetadata{}) - if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *TableOptions) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: TableOptions: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: TableOptions: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IncludeObject", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowGenerated - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthGenerated - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IncludeObject = IncludeObjectPolicy(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipGenerated(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthGenerated - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipGenerated(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthGenerated - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowGenerated - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipGenerated(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") -) - -func init() { - proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto", fileDescriptorGenerated) -} - -var fileDescriptorGenerated = []byte{ - // 375 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x91, 0xcd, 0x0a, 0xd3, 0x40, - 0x10, 0xc7, 0xb3, 0x48, 0xd1, 0x6e, 0xed, 0x25, 0x22, 0xd4, 0x1e, 0x36, 0xa5, 0xa7, 0x0a, 0x76, - 0xd7, 0x16, 0x11, 0x8f, 0x92, 0x5b, 0x41, 0x69, 0x09, 0x9e, 0x3c, 0xb9, 0x49, 0xc6, 0x74, 0xcd, - 0xc7, 0x86, 0xec, 0xa6, 0xd0, 0x8b, 0xf8, 0x08, 0x3e, 0x56, 0x8f, 0x3d, 0xf6, 0x14, 0x6c, 0x7c, - 0x0b, 0x4f, 0x92, 0x0f, 0xec, 0x87, 0x15, 0x7b, 0x9b, 0xf9, 0x0f, 0xbf, 0x5f, 0x66, 0xb2, 0xd8, - 0x09, 0xdf, 0x28, 0x2a, 0x24, 0x0b, 0x73, 0x17, 0xb2, 0x04, 0x34, 0x28, 0xb6, 0x81, 0xc4, 0x97, - 0x19, 0x6b, 0x07, 0x3c, 0x15, 0x31, 0xf7, 0xd6, 0x22, 0x81, 0x6c, 0xcb, 0xd2, 0x30, 0xa8, 0x02, - 0xc5, 0x62, 0xd0, 0x9c, 0x6d, 0x66, 0x2e, 0x68, 0x3e, 0x63, 0x01, 0x24, 0x90, 0x71, 0x0d, 0x3e, - 0x4d, 0x33, 0xa9, 0xa5, 0xf9, 0xbc, 0x41, 0xe9, 0x39, 0x4a, 0xd3, 0x30, 0xa8, 0x02, 0x45, 0x2b, - 0x94, 0xb6, 0xe8, 0x70, 0x1a, 0x08, 0xbd, 0xce, 0x5d, 0xea, 0xc9, 0x98, 0x05, 0x32, 0x90, 0xac, - 0x36, 0xb8, 0xf9, 0xe7, 0xba, 0xab, 0x9b, 0xba, 0x6a, 0xcc, 0xc3, 0x57, 0xf7, 0x2c, 0x75, 0xbd, - 0xcf, 0xf0, 0x9f, 0xa7, 0x64, 0x79, 0xa2, 0x45, 0x0c, 0x7f, 0x01, 0xaf, 0xff, 0x07, 0x28, 0x6f, - 0x0d, 0x31, 0xbf, 0xe6, 0xc6, 0x5b, 0xfc, 0x74, 0xc5, 0x33, 0x2d, 0x78, 0xb4, 0x74, 0xbf, 0x80, - 0xa7, 0xdf, 0x83, 0xe6, 0x3e, 0xd7, 0xdc, 0xfc, 0x84, 0x1f, 0xc5, 0x6d, 0x3d, 0x40, 0x23, 0x34, - 0xe9, 0xcd, 0x5f, 0xd2, 0x7b, 0x7e, 0x12, 0x3d, 0x79, 0x6c, 0x73, 0x57, 0x58, 0x46, 0x59, 0x58, - 0xf8, 0x94, 0x39, 0x7f, 0xac, 0xe3, 0xaf, 0xf8, 0xd9, 0xcd, 0x4f, 0xbf, 0x13, 0x4a, 0x9b, 0x1c, - 0x77, 0x84, 0x86, 0x58, 0x0d, 0xd0, 0xe8, 0xc1, 0xa4, 0x37, 0x7f, 0x4b, 0xef, 0x7e, 0x20, 0x7a, - 0x53, 0x6a, 0x77, 0xcb, 0xc2, 0xea, 0x2c, 0x2a, 0xa5, 0xd3, 0x98, 0xc7, 0x2e, 0x7e, 0xfc, 0x81, - 0xbb, 0x11, 0x2c, 0x53, 0x2d, 0x64, 0xa2, 0x4c, 0x07, 0xf7, 0x45, 0xe2, 0x45, 0xb9, 0x0f, 0x0d, - 0x5a, 0x9f, 0xdd, 0xb5, 0x5f, 0xb4, 0x47, 0xf4, 0x17, 0xe7, 0xc3, 0x5f, 0x85, 0xf5, 0xe4, 0x22, - 0x58, 0xc9, 0x48, 0x78, 0x5b, 0xe7, 0x52, 0x61, 0x4f, 0x77, 0x47, 0x62, 0xec, 0x8f, 0xc4, 0x38, - 0x1c, 0x89, 0xf1, 0xad, 0x24, 0x68, 0x57, 0x12, 0xb4, 0x2f, 0x09, 0x3a, 0x94, 0x04, 0xfd, 0x28, - 0x09, 0xfa, 0xfe, 0x93, 0x18, 0x1f, 0x1f, 0xb6, 0xab, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf3, - 0xe1, 0xde, 0x86, 0xdb, 0x02, 0x00, 0x00, -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto deleted file mode 100644 index 83be997..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/generated.proto +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - - -// This file was autogenerated by go-to-protobuf. Do not edit it manually! - -syntax = 'proto2'; - -package k8s.io.apimachinery.pkg.apis.meta.v1beta1; - -import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/generated.proto"; -import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; - -// Package-wide variables from generator "generated". -option go_package = "v1beta1"; - -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadata { - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; -} - -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message PartialObjectMetadataList { - // items contains each of the included items. - repeated PartialObjectMetadata items = 1; -} - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -message TableOptions { - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - optional string includeObject = 1; -} - diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go deleted file mode 100644 index d13254b..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/register.go +++ /dev/null @@ -1,57 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// GroupName is the group name for this API. -const GroupName = "meta.k8s.io" - -// SchemeGroupVersion is group version used to register these objects -var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} - -// Kind takes an unqualified kind and returns a Group qualified GroupKind -func Kind(kind string) schema.GroupKind { - return SchemeGroupVersion.WithKind(kind).GroupKind() -} - -// scheme is the registry for the common types that adhere to the meta v1beta1 API spec. -var scheme = runtime.NewScheme() - -// ParameterCodec knows about query parameters used with the meta v1beta1 API spec. -var ParameterCodec = runtime.NewParameterCodec(scheme) - -func init() { - scheme.AddKnownTypes(SchemeGroupVersion, - &Table{}, - &TableOptions{}, - &PartialObjectMetadata{}, - &PartialObjectMetadataList{}, - ) - - if err := scheme.AddConversionFuncs( - Convert_Slice_string_To_v1beta1_IncludeObjectPolicy, - ); err != nil { - panic(err) - } - - // register manually. This usually goes through the SchemeBuilder, which we cannot use here. - //scheme.AddGeneratedDeepCopyFuncs(GetGeneratedDeepCopyFuncs()...) -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go deleted file mode 100644 index 344c533..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types.go +++ /dev/null @@ -1,161 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// package v1beta1 is alpha objects from meta that will be introduced. -package v1beta1 - -import ( - "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -// TODO: Table does not generate to protobuf because of the interface{} - fix protobuf -// generation to support a meta type that can accept any valid JSON. - -// Table is a tabular representation of a set of API resources. The server transforms the -// object into a set of preferred columns for quickly reviewing the objects. -// +protobuf=false -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type Table struct { - v1.TypeMeta `json:",inline"` - // Standard list metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds - // +optional - v1.ListMeta `json:"metadata,omitempty"` - - // columnDefinitions describes each column in the returned items array. The number of cells per row - // will always match the number of column definitions. - ColumnDefinitions []TableColumnDefinition `json:"columnDefinitions"` - // rows is the list of items in the table. - Rows []TableRow `json:"rows"` -} - -// TableColumnDefinition contains information about a column returned in the Table. -// +protobuf=false -type TableColumnDefinition struct { - // name is a human readable name for the column. - Name string `json:"name"` - // type is an OpenAPI type definition for this column. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Type string `json:"type"` - // format is an optional OpenAPI type definition for this column. The 'name' format is applied - // to the primary identifier column to assist in clients identifying column is the resource name. - // See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more. - Format string `json:"format"` - // description is a human readable description of this column. - Description string `json:"description"` - // priority is an integer defining the relative importance of this column compared to others. Lower - // numbers are considered higher priority. Columns that may be omitted in limited space scenarios - // should be given a higher priority. - Priority int32 `json:"priority"` -} - -// TableRow is an individual row in a table. -// +protobuf=false -type TableRow struct { - // cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple - // maps, or lists, or null. See the type field of the column definition for a more detailed description. - Cells []interface{} `json:"cells"` - // conditions describe additional status of a row that are relevant for a human user. - // +optional - Conditions []TableRowCondition `json:"conditions,omitempty"` - // This field contains the requested additional information about each object based on the includeObject - // policy when requesting the Table. If "None", this field is empty, if "Object" this will be the - // default serialization of the object for the current API version, and if "Metadata" (the default) will - // contain the object metadata. Check the returned kind and apiVersion of the object before parsing. - // +optional - Object runtime.RawExtension `json:"object,omitempty"` -} - -// TableRowCondition allows a row to be marked with additional information. -// +protobuf=false -type TableRowCondition struct { - // Type of row condition. - Type RowConditionType `json:"type"` - // Status of the condition, one of True, False, Unknown. - Status ConditionStatus `json:"status"` - // (brief) machine readable reason for the condition's last transition. - // +optional - Reason string `json:"reason,omitempty"` - // Human readable message indicating details about last transition. - // +optional - Message string `json:"message,omitempty"` -} - -type RowConditionType string - -// These are valid conditions of a row. This list is not exhaustive and new conditions may be -// included by other resources. -const ( - // RowCompleted means the underlying resource has reached completion and may be given less - // visual priority than other resources. - RowCompleted RowConditionType = "Completed" -) - -type ConditionStatus string - -// These are valid condition statuses. "ConditionTrue" means a resource is in the condition. -// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes -// can't decide if a resource is in the condition or not. In the future, we could add other -// intermediate conditions, e.g. ConditionDegraded. -const ( - ConditionTrue ConditionStatus = "True" - ConditionFalse ConditionStatus = "False" - ConditionUnknown ConditionStatus = "Unknown" -) - -// IncludeObjectPolicy controls which portion of the object is returned with a Table. -type IncludeObjectPolicy string - -const ( - // IncludeNone returns no object. - IncludeNone IncludeObjectPolicy = "None" - // IncludeMetadata serializes the object containing only its metadata field. - IncludeMetadata IncludeObjectPolicy = "Metadata" - // IncludeObject contains the full object. - IncludeObject IncludeObjectPolicy = "Object" -) - -// TableOptions are used when a Table is requested by the caller. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type TableOptions struct { - v1.TypeMeta `json:",inline"` - // includeObject decides whether to include each object along with its columnar information. - // Specifying "None" will return no object, specifying "Object" will return the full object contents, and - // specifying "Metadata" (the default) will return the object's metadata in the PartialObjectMetadata kind - // in version v1beta1 of the meta.k8s.io API group. - IncludeObject IncludeObjectPolicy `json:"includeObject,omitempty" protobuf:"bytes,1,opt,name=includeObject,casttype=IncludeObjectPolicy"` -} - -// PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients -// to get access to a particular ObjectMeta schema without knowing the details of the version. -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadata struct { - v1.TypeMeta `json:",inline"` - // Standard object's metadata. - // More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata - // +optional - v1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` -} - -// PartialObjectMetadataList contains a list of objects containing only their metadata -// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object -type PartialObjectMetadataList struct { - v1.TypeMeta `json:",inline"` - - // items contains each of the included items. - Items []*PartialObjectMetadata `json:"items" protobuf:"bytes,1,rep,name=items"` -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go deleted file mode 100644 index 7394535..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/types_swagger_doc_generated.go +++ /dev/null @@ -1,104 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -// This file contains a collection of methods that can be used from go-restful to -// generate Swagger API documentation for its models. Please read this PR for more -// information on the implementation: https://github.com/emicklei/go-restful/pull/215 -// -// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if -// they are on one line! For multiple line or blocks that you want to ignore use ---. -// Any context after a --- is ignored. -// -// Those methods can be generated by using hack/update-generated-swagger-docs.sh - -// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT. -var map_PartialObjectMetadata = map[string]string{ - "": "PartialObjectMetadata is a generic representation of any object with ObjectMeta. It allows clients to get access to a particular ObjectMeta schema without knowing the details of the version.", - "metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata", -} - -func (PartialObjectMetadata) SwaggerDoc() map[string]string { - return map_PartialObjectMetadata -} - -var map_PartialObjectMetadataList = map[string]string{ - "": "PartialObjectMetadataList contains a list of objects containing only their metadata", - "items": "items contains each of the included items.", -} - -func (PartialObjectMetadataList) SwaggerDoc() map[string]string { - return map_PartialObjectMetadataList -} - -var map_Table = map[string]string{ - "": "Table is a tabular representation of a set of API resources. The server transforms the object into a set of preferred columns for quickly reviewing the objects.", - "metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds", - "columnDefinitions": "columnDefinitions describes each column in the returned items array. The number of cells per row will always match the number of column definitions.", - "rows": "rows is the list of items in the table.", -} - -func (Table) SwaggerDoc() map[string]string { - return map_Table -} - -var map_TableColumnDefinition = map[string]string{ - "": "TableColumnDefinition contains information about a column returned in the Table.", - "name": "name is a human readable name for the column.", - "type": "type is an OpenAPI type definition for this column. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "format": "format is an optional OpenAPI type definition for this column. The 'name' format is applied to the primary identifier column to assist in clients identifying column is the resource name. See https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md#data-types for more.", - "description": "description is a human readable description of this column.", - "priority": "priority is an integer defining the relative importance of this column compared to others. Lower numbers are considered higher priority. Columns that may be omitted in limited space scenarios should be given a higher priority.", -} - -func (TableColumnDefinition) SwaggerDoc() map[string]string { - return map_TableColumnDefinition -} - -var map_TableOptions = map[string]string{ - "": "TableOptions are used when a Table is requested by the caller.", - "includeObject": "includeObject decides whether to include each object along with its columnar information. Specifying \"None\" will return no object, specifying \"Object\" will return the full object contents, and specifying \"Metadata\" (the default) will return the object's metadata in the PartialObjectMetadata kind in version v1beta1 of the meta.k8s.io API group.", -} - -func (TableOptions) SwaggerDoc() map[string]string { - return map_TableOptions -} - -var map_TableRow = map[string]string{ - "": "TableRow is an individual row in a table.", - "cells": "cells will be as wide as headers and may contain strings, numbers (float64 or int64), booleans, simple maps, or lists, or null. See the type field of the column definition for a more detailed description.", - "conditions": "conditions describe additional status of a row that are relevant for a human user.", - "object": "This field contains the requested additional information about each object based on the includeObject policy when requesting the Table. If \"None\", this field is empty, if \"Object\" this will be the default serialization of the object for the current API version, and if \"Metadata\" (the default) will contain the object metadata. Check the returned kind and apiVersion of the object before parsing.", -} - -func (TableRow) SwaggerDoc() map[string]string { - return map_TableRow -} - -var map_TableRowCondition = map[string]string{ - "": "TableRowCondition allows a row to be marked with additional information.", - "type": "Type of row condition.", - "status": "Status of the condition, one of True, False, Unknown.", - "reason": "(brief) machine readable reason for the condition's last transition.", - "message": "Human readable message indicating details about last transition.", -} - -func (TableRowCondition) SwaggerDoc() map[string]string { - return map_TableRowCondition -} - -// AUTO-GENERATED FUNCTIONS END HERE diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go deleted file mode 100644 index b77db1b..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.deepcopy.go +++ /dev/null @@ -1,189 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by deepcopy-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadata) DeepCopyInto(out *PartialObjectMetadata) { - *out = *in - out.TypeMeta = in.TypeMeta - in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadata. -func (in *PartialObjectMetadata) DeepCopy() *PartialObjectMetadata { - if in == nil { - return nil - } - out := new(PartialObjectMetadata) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadata) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *PartialObjectMetadataList) DeepCopyInto(out *PartialObjectMetadataList) { - *out = *in - out.TypeMeta = in.TypeMeta - if in.Items != nil { - in, out := &in.Items, &out.Items - *out = make([]*PartialObjectMetadata, len(*in)) - for i := range *in { - if (*in)[i] != nil { - in, out := &(*in)[i], &(*out)[i] - *out = new(PartialObjectMetadata) - (*in).DeepCopyInto(*out) - } - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PartialObjectMetadataList. -func (in *PartialObjectMetadataList) DeepCopy() *PartialObjectMetadataList { - if in == nil { - return nil - } - out := new(PartialObjectMetadataList) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *PartialObjectMetadataList) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *Table) DeepCopyInto(out *Table) { - *out = *in - out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta - if in.ColumnDefinitions != nil { - in, out := &in.ColumnDefinitions, &out.ColumnDefinitions - *out = make([]TableColumnDefinition, len(*in)) - copy(*out, *in) - } - if in.Rows != nil { - in, out := &in.Rows, &out.Rows - *out = make([]TableRow, len(*in)) - for i := range *in { - (*in)[i].DeepCopyInto(&(*out)[i]) - } - } - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Table. -func (in *Table) DeepCopy() *Table { - if in == nil { - return nil - } - out := new(Table) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *Table) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableColumnDefinition) DeepCopyInto(out *TableColumnDefinition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableColumnDefinition. -func (in *TableColumnDefinition) DeepCopy() *TableColumnDefinition { - if in == nil { - return nil - } - out := new(TableColumnDefinition) - in.DeepCopyInto(out) - return out -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableOptions) DeepCopyInto(out *TableOptions) { - *out = *in - out.TypeMeta = in.TypeMeta - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableOptions. -func (in *TableOptions) DeepCopy() *TableOptions { - if in == nil { - return nil - } - out := new(TableOptions) - in.DeepCopyInto(out) - return out -} - -// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. -func (in *TableOptions) DeepCopyObject() runtime.Object { - if c := in.DeepCopy(); c != nil { - return c - } - return nil -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRow) DeepCopyInto(out *TableRow) { - clone := in.DeepCopy() - *out = *clone - return -} - -// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *TableRowCondition) DeepCopyInto(out *TableRowCondition) { - *out = *in - return -} - -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TableRowCondition. -func (in *TableRowCondition) DeepCopy() *TableRowCondition { - if in == nil { - return nil - } - out := new(TableRowCondition) - in.DeepCopyInto(out) - return out -} diff --git a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go b/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go deleted file mode 100644 index 73e63fc..0000000 --- a/vendor/k8s.io/apimachinery/pkg/apis/meta/v1beta1/zz_generated.defaults.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !ignore_autogenerated - -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by defaulter-gen. DO NOT EDIT. - -package v1beta1 - -import ( - runtime "k8s.io/apimachinery/pkg/runtime" -) - -// RegisterDefaults adds defaulters functions to the given scheme. -// Public to allow building arbitrary schemes. -// All generated defaulters are covering - they call all nested defaulters. -func RegisterDefaults(scheme *runtime.Scheme) error { - return nil -} diff --git a/vendor/k8s.io/client-go/discovery/cached_discovery.go b/vendor/k8s.io/client-go/discovery/cached_discovery.go deleted file mode 100644 index e98cad9..0000000 --- a/vendor/k8s.io/client-go/discovery/cached_discovery.go +++ /dev/null @@ -1,282 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "errors" - "io/ioutil" - "net/http" - "os" - "path/filepath" - "sync" - "time" - - "github.com/golang/glog" - "github.com/googleapis/gnostic/OpenAPIv2" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" -) - -// CachedDiscoveryClient implements the functions that discovery server-supported API groups, -// versions and resources. -type CachedDiscoveryClient struct { - delegate DiscoveryInterface - - // cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. - cacheDirectory string - - // ttl is how long the cache should be considered valid - ttl time.Duration - - // mutex protects the variables below - mutex sync.Mutex - - // ourFiles are all filenames of cache files created by this process - ourFiles map[string]struct{} - // invalidated is true if all cache files should be ignored that are not ours (e.g. after Invalidate() was called) - invalidated bool - // fresh is true if all used cache files were ours - fresh bool -} - -var _ CachedDiscoveryInterface = &CachedDiscoveryClient{} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *CachedDiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) { - filename := filepath.Join(d.cacheDirectory, groupVersion, "serverresources.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedResources := &metav1.APIResourceList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedResources); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedResources, nil - } - } - - liveResources, err := d.delegate.ServerResourcesForGroupVersion(groupVersion) - if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveResources, err - } - if liveResources == nil || len(liveResources.APIResources) == 0 { - glog.V(3).Infof("skipped caching discovery info, no resources found") - return liveResources, err - } - - if err := d.writeCachedFile(filename, liveResources); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveResources, nil -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *CachedDiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - return ServerResources(d) -} - -func (d *CachedDiscoveryClient) ServerGroups() (*metav1.APIGroupList, error) { - filename := filepath.Join(d.cacheDirectory, "servergroups.json") - cachedBytes, err := d.getCachedFile(filename) - // don't fail on errors, we either don't have a file or won't be able to run the cached check. Either way we can fallback. - if err == nil { - cachedGroups := &metav1.APIGroupList{} - if err := runtime.DecodeInto(scheme.Codecs.UniversalDecoder(), cachedBytes, cachedGroups); err == nil { - glog.V(10).Infof("returning cached discovery info from %v", filename) - return cachedGroups, nil - } - } - - liveGroups, err := d.delegate.ServerGroups() - if err != nil { - glog.V(3).Infof("skipped caching discovery info due to %v", err) - return liveGroups, err - } - if liveGroups == nil || len(liveGroups.Groups) == 0 { - glog.V(3).Infof("skipped caching discovery info, no groups found") - return liveGroups, err - } - - if err := d.writeCachedFile(filename, liveGroups); err != nil { - glog.V(1).Infof("failed to write cache to %v due to %v", filename, err) - } - - return liveGroups, nil -} - -func (d *CachedDiscoveryClient) getCachedFile(filename string) ([]byte, error) { - // after invalidation ignore cache files not created by this process - d.mutex.Lock() - _, ourFile := d.ourFiles[filename] - if d.invalidated && !ourFile { - d.mutex.Unlock() - return nil, errors.New("cache invalidated") - } - d.mutex.Unlock() - - file, err := os.Open(filename) - if err != nil { - return nil, err - } - defer file.Close() - - fileInfo, err := file.Stat() - if err != nil { - return nil, err - } - - if time.Now().After(fileInfo.ModTime().Add(d.ttl)) { - return nil, errors.New("cache expired") - } - - // the cache is present and its valid. Try to read and use it. - cachedBytes, err := ioutil.ReadAll(file) - if err != nil { - return nil, err - } - - d.mutex.Lock() - defer d.mutex.Unlock() - d.fresh = d.fresh && ourFile - - return cachedBytes, nil -} - -func (d *CachedDiscoveryClient) writeCachedFile(filename string, obj runtime.Object) error { - if err := os.MkdirAll(filepath.Dir(filename), 0750); err != nil { - return err - } - - bytes, err := runtime.Encode(scheme.Codecs.LegacyCodec(), obj) - if err != nil { - return err - } - - f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+".") - if err != nil { - return err - } - defer os.Remove(f.Name()) - _, err = f.Write(bytes) - if err != nil { - return err - } - - err = os.Chmod(f.Name(), 0660) - if err != nil { - return err - } - - name := f.Name() - err = f.Close() - if err != nil { - return err - } - - // atomic rename - d.mutex.Lock() - defer d.mutex.Unlock() - err = os.Rename(name, filename) - if err == nil { - d.ourFiles[filename] = struct{}{} - } - return err -} - -func (d *CachedDiscoveryClient) RESTClient() restclient.Interface { - return d.delegate.RESTClient() -} - -func (d *CachedDiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredResources(d) -} - -func (d *CachedDiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredNamespacedResources(d) -} - -func (d *CachedDiscoveryClient) ServerVersion() (*version.Info, error) { - return d.delegate.ServerVersion() -} - -func (d *CachedDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - return d.delegate.OpenAPISchema() -} - -func (d *CachedDiscoveryClient) Fresh() bool { - d.mutex.Lock() - defer d.mutex.Unlock() - - return d.fresh -} - -func (d *CachedDiscoveryClient) Invalidate() { - d.mutex.Lock() - defer d.mutex.Unlock() - - d.ourFiles = map[string]struct{}{} - d.fresh = true - d.invalidated = true -} - -// NewCachedDiscoveryClientForConfig creates a new DiscoveryClient for the given config, and wraps -// the created client in a CachedDiscoveryClient. The provided configuration is updated with a -// custom transport that understands cache responses. -// We receive two distinct cache directories for now, in order to preserve old behavior -// which makes use of the --cache-dir flag value for storing cache data from the CacheRoundTripper, -// and makes use of the hardcoded destination (~/.kube/cache/discovery/...) for storing -// CachedDiscoveryClient cache data. If httpCacheDir is empty, the restconfig's transport will not -// be updated with a roundtripper that understands cache responses. -// If discoveryCacheDir is empty, cached server resource data will be looked up in the current directory. -// TODO(juanvallejo): the value of "--cache-dir" should be honored. Consolidate discoveryCacheDir with httpCacheDir -// so that server resources and http-cache data are stored in the same location, provided via config flags. -func NewCachedDiscoveryClientForConfig(config *restclient.Config, discoveryCacheDir, httpCacheDir string, ttl time.Duration) (*CachedDiscoveryClient, error) { - if len(httpCacheDir) > 0 { - // update the given restconfig with a custom roundtripper that - // understands how to handle cache responses. - wt := config.WrapTransport - config.WrapTransport = func(rt http.RoundTripper) http.RoundTripper { - if wt != nil { - rt = wt(rt) - } - return newCacheRoundTripper(httpCacheDir, rt) - } - } - - discoveryClient, err := NewDiscoveryClientForConfig(config) - if err != nil { - return nil, err - } - - return newCachedDiscoveryClient(discoveryClient, discoveryCacheDir, ttl), nil -} - -// NewCachedDiscoveryClient creates a new DiscoveryClient. cacheDirectory is the directory where discovery docs are held. It must be unique per host:port combination to work well. -func newCachedDiscoveryClient(delegate DiscoveryInterface, cacheDirectory string, ttl time.Duration) *CachedDiscoveryClient { - return &CachedDiscoveryClient{ - delegate: delegate, - cacheDirectory: cacheDirectory, - ttl: ttl, - ourFiles: map[string]struct{}{}, - fresh: true, - } -} diff --git a/vendor/k8s.io/client-go/discovery/discovery_client.go b/vendor/k8s.io/client-go/discovery/discovery_client.go deleted file mode 100644 index a966029..0000000 --- a/vendor/k8s.io/client-go/discovery/discovery_client.go +++ /dev/null @@ -1,472 +0,0 @@ -/* -Copyright 2015 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "encoding/json" - "fmt" - "net/url" - "sort" - "strings" - "sync" - "time" - - "github.com/golang/protobuf/proto" - "github.com/googleapis/gnostic/OpenAPIv2" - - "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/version" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" -) - -const ( - // defaultRetries is the number of times a resource discovery is repeated if an api group disappears on the fly (e.g. ThirdPartyResources). - defaultRetries = 2 - // protobuf mime type - mimePb = "application/com.github.proto-openapi.spec.v2@v1.0+protobuf" - // defaultTimeout is the maximum amount of time per request when no timeout has been set on a RESTClient. - // Defaults to 32s in order to have a distinguishable length of time, relative to other timeouts that exist. - defaultTimeout = 32 * time.Second -) - -// DiscoveryInterface holds the methods that discover server-supported API groups, -// versions and resources. -type DiscoveryInterface interface { - RESTClient() restclient.Interface - ServerGroupsInterface - ServerResourcesInterface - ServerVersionInterface - OpenAPISchemaInterface -} - -// CachedDiscoveryInterface is a DiscoveryInterface with cache invalidation and freshness. -type CachedDiscoveryInterface interface { - DiscoveryInterface - // Fresh is supposed to tell the caller whether or not to retry if the cache - // fails to find something (false = retry, true = no need to retry). - // - // TODO: this needs to be revisited, this interface can't be locked properly - // and doesn't make a lot of sense. - Fresh() bool - // Invalidate enforces that no cached data is used in the future that is older than the current time. - Invalidate() -} - -// ServerGroupsInterface has methods for obtaining supported groups on the API server -type ServerGroupsInterface interface { - // ServerGroups returns the supported groups, with information like supported versions and the - // preferred version. - ServerGroups() (*metav1.APIGroupList, error) -} - -// ServerResourcesInterface has methods for obtaining supported resources on the API server -type ServerResourcesInterface interface { - // ServerResourcesForGroupVersion returns the supported resources for a group and version. - ServerResourcesForGroupVersion(groupVersion string) (*metav1.APIResourceList, error) - // ServerResources returns the supported resources for all groups and versions. - ServerResources() ([]*metav1.APIResourceList, error) - // ServerPreferredResources returns the supported resources with the version preferred by the - // server. - ServerPreferredResources() ([]*metav1.APIResourceList, error) - // ServerPreferredNamespacedResources returns the supported namespaced resources with the - // version preferred by the server. - ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) -} - -// ServerVersionInterface has a method for retrieving the server's version. -type ServerVersionInterface interface { - // ServerVersion retrieves and parses the server's version (git version). - ServerVersion() (*version.Info, error) -} - -// OpenAPISchemaInterface has a method to retrieve the open API schema. -type OpenAPISchemaInterface interface { - // OpenAPISchema retrieves and parses the swagger API schema the server supports. - OpenAPISchema() (*openapi_v2.Document, error) -} - -// DiscoveryClient implements the functions that discover server-supported API groups, -// versions and resources. -type DiscoveryClient struct { - restClient restclient.Interface - - LegacyPrefix string -} - -// Convert metav1.APIVersions to metav1.APIGroup. APIVersions is used by legacy v1, so -// group would be "". -func apiVersionsToAPIGroup(apiVersions *metav1.APIVersions) (apiGroup metav1.APIGroup) { - groupVersions := []metav1.GroupVersionForDiscovery{} - for _, version := range apiVersions.Versions { - groupVersion := metav1.GroupVersionForDiscovery{ - GroupVersion: version, - Version: version, - } - groupVersions = append(groupVersions, groupVersion) - } - apiGroup.Versions = groupVersions - // There should be only one groupVersion returned at /api - apiGroup.PreferredVersion = groupVersions[0] - return -} - -// ServerGroups returns the supported groups, with information like supported versions and the -// preferred version. -func (d *DiscoveryClient) ServerGroups() (apiGroupList *metav1.APIGroupList, err error) { - // Get the groupVersions exposed at /api - v := &metav1.APIVersions{} - err = d.restClient.Get().AbsPath(d.LegacyPrefix).Do().Into(v) - apiGroup := metav1.APIGroup{} - if err == nil && len(v.Versions) != 0 { - apiGroup = apiVersionsToAPIGroup(v) - } - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - - // Get the groupVersions exposed at /apis - apiGroupList = &metav1.APIGroupList{} - err = d.restClient.Get().AbsPath("/apis").Do().Into(apiGroupList) - if err != nil && !errors.IsNotFound(err) && !errors.IsForbidden(err) { - return nil, err - } - // to be compatible with a v1.0 server, if it's a 403 or 404, ignore and return whatever we got from /api - if err != nil && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - apiGroupList = &metav1.APIGroupList{} - } - - // prepend the group retrieved from /api to the list if not empty - if len(v.Versions) != 0 { - apiGroupList.Groups = append([]metav1.APIGroup{apiGroup}, apiGroupList.Groups...) - } - return apiGroupList, nil -} - -// ServerResourcesForGroupVersion returns the supported resources for a group and version. -func (d *DiscoveryClient) ServerResourcesForGroupVersion(groupVersion string) (resources *metav1.APIResourceList, err error) { - url := url.URL{} - if len(groupVersion) == 0 { - return nil, fmt.Errorf("groupVersion shouldn't be empty") - } - if len(d.LegacyPrefix) > 0 && groupVersion == "v1" { - url.Path = d.LegacyPrefix + "/" + groupVersion - } else { - url.Path = "/apis/" + groupVersion - } - resources = &metav1.APIResourceList{ - GroupVersion: groupVersion, - } - err = d.restClient.Get().AbsPath(url.String()).Do().Into(resources) - if err != nil { - // ignore 403 or 404 error to be compatible with an v1.0 server. - if groupVersion == "v1" && (errors.IsNotFound(err) || errors.IsForbidden(err)) { - return resources, nil - } - return nil, err - } - return resources, nil -} - -// serverResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) serverResources() ([]*metav1.APIResourceList, error) { - return ServerResources(d) -} - -// ServerResources returns the supported resources for all groups and versions. -func (d *DiscoveryClient) ServerResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, d.serverResources) -} - -// ErrGroupDiscoveryFailed is returned if one or more API groups fail to load. -type ErrGroupDiscoveryFailed struct { - // Groups is a list of the groups that failed to load and the error cause - Groups map[schema.GroupVersion]error -} - -// Error implements the error interface -func (e *ErrGroupDiscoveryFailed) Error() string { - var groups []string - for k, v := range e.Groups { - groups = append(groups, fmt.Sprintf("%s: %v", k, v)) - } - sort.Strings(groups) - return fmt.Sprintf("unable to retrieve the complete list of server APIs: %s", strings.Join(groups, ", ")) -} - -// IsGroupDiscoveryFailedError returns true if the provided error indicates the server was unable to discover -// a complete list of APIs for the client to use. -func IsGroupDiscoveryFailedError(err error) bool { - _, ok := err.(*ErrGroupDiscoveryFailed) - return err != nil && ok -} - -// serverPreferredResources returns the supported resources with the version preferred by the server. -func (d *DiscoveryClient) serverPreferredResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredResources(d) -} - -// ServerResources uses the provided discovery interface to look up supported resources for all groups and versions. -func ServerResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - apiGroups, err := d.ServerGroups() - if err != nil { - return nil, err - } - - groupVersionResources, failedGroups := fetchGroupVersionResources(d, apiGroups) - - // order results by group/version discovery order - result := []*metav1.APIResourceList{} - for _, apiGroup := range apiGroups.Groups { - for _, version := range apiGroup.Versions { - gv := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - if resources, ok := groupVersionResources[gv]; ok { - result = append(result, resources) - } - } - } - - if len(failedGroups) == 0 { - return result, nil - } - - return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} -} - -// ServerPreferredResources uses the provided discovery interface to look up preferred resources -func ServerPreferredResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - serverGroupList, err := d.ServerGroups() - if err != nil { - return nil, err - } - - groupVersionResources, failedGroups := fetchGroupVersionResources(d, serverGroupList) - - result := []*metav1.APIResourceList{} - grVersions := map[schema.GroupResource]string{} // selected version of a GroupResource - grApiResources := map[schema.GroupResource]*metav1.APIResource{} // selected APIResource for a GroupResource - gvApiResourceLists := map[schema.GroupVersion]*metav1.APIResourceList{} // blueprint for a APIResourceList for later grouping - - for _, apiGroup := range serverGroupList.Groups { - for _, version := range apiGroup.Versions { - groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - - apiResourceList, ok := groupVersionResources[groupVersion] - if !ok { - continue - } - - // create empty list which is filled later in another loop - emptyApiResourceList := metav1.APIResourceList{ - GroupVersion: version.GroupVersion, - } - gvApiResourceLists[groupVersion] = &emptyApiResourceList - result = append(result, &emptyApiResourceList) - - for i := range apiResourceList.APIResources { - apiResource := &apiResourceList.APIResources[i] - if strings.Contains(apiResource.Name, "/") { - continue - } - gv := schema.GroupResource{Group: apiGroup.Name, Resource: apiResource.Name} - if _, ok := grApiResources[gv]; ok && version.Version != apiGroup.PreferredVersion.Version { - // only override with preferred version - continue - } - grVersions[gv] = version.Version - grApiResources[gv] = apiResource - } - } - } - - // group selected APIResources according to GroupVersion into APIResourceLists - for groupResource, apiResource := range grApiResources { - version := grVersions[groupResource] - groupVersion := schema.GroupVersion{Group: groupResource.Group, Version: version} - apiResourceList := gvApiResourceLists[groupVersion] - apiResourceList.APIResources = append(apiResourceList.APIResources, *apiResource) - } - - if len(failedGroups) == 0 { - return result, nil - } - - return result, &ErrGroupDiscoveryFailed{Groups: failedGroups} -} - -// fetchServerResourcesForGroupVersions uses the discovery client to fetch the resources for the specified groups in parallel -func fetchGroupVersionResources(d DiscoveryInterface, apiGroups *metav1.APIGroupList) (map[schema.GroupVersion]*metav1.APIResourceList, map[schema.GroupVersion]error) { - groupVersionResources := make(map[schema.GroupVersion]*metav1.APIResourceList) - failedGroups := make(map[schema.GroupVersion]error) - - wg := &sync.WaitGroup{} - resultLock := &sync.Mutex{} - for _, apiGroup := range apiGroups.Groups { - for _, version := range apiGroup.Versions { - groupVersion := schema.GroupVersion{Group: apiGroup.Name, Version: version.Version} - wg.Add(1) - go func() { - defer wg.Done() - defer utilruntime.HandleCrash() - - apiResourceList, err := d.ServerResourcesForGroupVersion(groupVersion.String()) - - // lock to record results - resultLock.Lock() - defer resultLock.Unlock() - - if err != nil { - // TODO: maybe restrict this to NotFound errors - failedGroups[groupVersion] = err - } else { - groupVersionResources[groupVersion] = apiResourceList - } - }() - } - } - wg.Wait() - - return groupVersionResources, failedGroups -} - -// ServerPreferredResources returns the supported resources with the version preferred by the -// server. -func (d *DiscoveryClient) ServerPreferredResources() ([]*metav1.APIResourceList, error) { - return withRetries(defaultRetries, d.serverPreferredResources) -} - -// ServerPreferredNamespacedResources returns the supported namespaced resources with the -// version preferred by the server. -func (d *DiscoveryClient) ServerPreferredNamespacedResources() ([]*metav1.APIResourceList, error) { - return ServerPreferredNamespacedResources(d) -} - -// ServerPreferredNamespacedResources uses the provided discovery interface to look up preferred namespaced resources -func ServerPreferredNamespacedResources(d DiscoveryInterface) ([]*metav1.APIResourceList, error) { - all, err := ServerPreferredResources(d) - return FilteredBy(ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool { - return r.Namespaced - }), all), err -} - -// ServerVersion retrieves and parses the server's version (git version). -func (d *DiscoveryClient) ServerVersion() (*version.Info, error) { - body, err := d.restClient.Get().AbsPath("/version").Do().Raw() - if err != nil { - return nil, err - } - var info version.Info - err = json.Unmarshal(body, &info) - if err != nil { - return nil, fmt.Errorf("got '%s': %v", string(body), err) - } - return &info, nil -} - -// OpenAPISchema fetches the open api schema using a rest client and parses the proto. -func (d *DiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) { - data, err := d.restClient.Get().AbsPath("/openapi/v2").SetHeader("Accept", mimePb).Do().Raw() - if err != nil { - if errors.IsForbidden(err) || errors.IsNotFound(err) || errors.IsNotAcceptable(err) { - // single endpoint not found/registered in old server, try to fetch old endpoint - // TODO(roycaihw): remove this in 1.11 - data, err = d.restClient.Get().AbsPath("/swagger-2.0.0.pb-v1").Do().Raw() - if err != nil { - return nil, err - } - } else { - return nil, err - } - } - document := &openapi_v2.Document{} - err = proto.Unmarshal(data, document) - if err != nil { - return nil, err - } - return document, nil -} - -// withRetries retries the given recovery function in case the groups supported by the server change after ServerGroup() returns. -func withRetries(maxRetries int, f func() ([]*metav1.APIResourceList, error)) ([]*metav1.APIResourceList, error) { - var result []*metav1.APIResourceList - var err error - for i := 0; i < maxRetries; i++ { - result, err = f() - if err == nil { - return result, nil - } - if _, ok := err.(*ErrGroupDiscoveryFailed); !ok { - return nil, err - } - } - return result, err -} - -func setDiscoveryDefaults(config *restclient.Config) error { - config.APIPath = "" - config.GroupVersion = nil - if config.Timeout == 0 { - config.Timeout = defaultTimeout - } - codec := runtime.NoopEncoder{Decoder: scheme.Codecs.UniversalDecoder()} - config.NegotiatedSerializer = serializer.NegotiatedSerializerWrapper(runtime.SerializerInfo{Serializer: codec}) - if len(config.UserAgent) == 0 { - config.UserAgent = restclient.DefaultKubernetesUserAgent() - } - return nil -} - -// NewDiscoveryClientForConfig creates a new DiscoveryClient for the given config. This client -// can be used to discover supported resources in the API server. -func NewDiscoveryClientForConfig(c *restclient.Config) (*DiscoveryClient, error) { - config := *c - if err := setDiscoveryDefaults(&config); err != nil { - return nil, err - } - client, err := restclient.UnversionedRESTClientFor(&config) - return &DiscoveryClient{restClient: client, LegacyPrefix: "/api"}, err -} - -// NewDiscoveryClientForConfigOrDie creates a new DiscoveryClient for the given config. If -// there is an error, it panics. -func NewDiscoveryClientForConfigOrDie(c *restclient.Config) *DiscoveryClient { - client, err := NewDiscoveryClientForConfig(c) - if err != nil { - panic(err) - } - return client - -} - -// NewDiscoveryClient returns a new DiscoveryClient for the given RESTClient. -func NewDiscoveryClient(c restclient.Interface) *DiscoveryClient { - return &DiscoveryClient{restClient: c, LegacyPrefix: "/api"} -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *DiscoveryClient) RESTClient() restclient.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/discovery/doc.go b/vendor/k8s.io/client-go/discovery/doc.go deleted file mode 100644 index 7649558..0000000 --- a/vendor/k8s.io/client-go/discovery/doc.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Package discovery provides ways to discover server-supported -// API groups, versions and resources. -package discovery diff --git a/vendor/k8s.io/client-go/discovery/helper.go b/vendor/k8s.io/client-go/discovery/helper.go deleted file mode 100644 index 353d34b..0000000 --- a/vendor/k8s.io/client-go/discovery/helper.go +++ /dev/null @@ -1,121 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "fmt" - - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/util/sets" - apimachineryversion "k8s.io/apimachinery/pkg/version" -) - -// MatchesServerVersion queries the server to compares the build version -// (git hash) of the client with the server's build version. It returns an error -// if it failed to contact the server or if the versions are not an exact match. -func MatchesServerVersion(clientVersion apimachineryversion.Info, client DiscoveryInterface) error { - sVer, err := client.ServerVersion() - if err != nil { - return fmt.Errorf("couldn't read version from server: %v\n", err) - } - // GitVersion includes GitCommit and GitTreeState, but best to be safe? - if clientVersion.GitVersion != sVer.GitVersion || clientVersion.GitCommit != sVer.GitCommit || clientVersion.GitTreeState != sVer.GitTreeState { - return fmt.Errorf("server version (%#v) differs from client version (%#v)!\n", sVer, clientVersion) - } - - return nil -} - -// ServerSupportsVersion returns an error if the server doesn't have the required version -func ServerSupportsVersion(client DiscoveryInterface, requiredGV schema.GroupVersion) error { - groups, err := client.ServerGroups() - if err != nil { - // This is almost always a connection error, and higher level code should treat this as a generic error, - // not a negotiation specific error. - return err - } - versions := metav1.ExtractGroupVersions(groups) - serverVersions := sets.String{} - for _, v := range versions { - serverVersions.Insert(v) - } - - if serverVersions.Has(requiredGV.String()) { - return nil - } - - // If the server supports no versions, then we should pretend it has the version because of old servers. - // This can happen because discovery fails due to 403 Forbidden errors - if len(serverVersions) == 0 { - return nil - } - - return fmt.Errorf("server does not support API version %q", requiredGV) -} - -// GroupVersionResources converts APIResourceLists to the GroupVersionResources. -func GroupVersionResources(rls []*metav1.APIResourceList) (map[schema.GroupVersionResource]struct{}, error) { - gvrs := map[schema.GroupVersionResource]struct{}{} - for _, rl := range rls { - gv, err := schema.ParseGroupVersion(rl.GroupVersion) - if err != nil { - return nil, err - } - for i := range rl.APIResources { - gvrs[schema.GroupVersionResource{Group: gv.Group, Version: gv.Version, Resource: rl.APIResources[i].Name}] = struct{}{} - } - } - return gvrs, nil -} - -// FilteredBy filters by the given predicate. Empty APIResourceLists are dropped. -func FilteredBy(pred ResourcePredicate, rls []*metav1.APIResourceList) []*metav1.APIResourceList { - result := []*metav1.APIResourceList{} - for _, rl := range rls { - filtered := *rl - filtered.APIResources = nil - for i := range rl.APIResources { - if pred.Match(rl.GroupVersion, &rl.APIResources[i]) { - filtered.APIResources = append(filtered.APIResources, rl.APIResources[i]) - } - } - if filtered.APIResources != nil { - result = append(result, &filtered) - } - } - return result -} - -type ResourcePredicate interface { - Match(groupVersion string, r *metav1.APIResource) bool -} - -type ResourcePredicateFunc func(groupVersion string, r *metav1.APIResource) bool - -func (fn ResourcePredicateFunc) Match(groupVersion string, r *metav1.APIResource) bool { - return fn(groupVersion, r) -} - -// SupportsAllVerbs is a predicate matching a resource iff all given verbs are supported. -type SupportsAllVerbs struct { - Verbs []string -} - -func (p SupportsAllVerbs) Match(groupVersion string, r *metav1.APIResource) bool { - return sets.NewString([]string(r.Verbs)...).HasAll(p.Verbs...) -} diff --git a/vendor/k8s.io/client-go/discovery/round_tripper.go b/vendor/k8s.io/client-go/discovery/round_tripper.go deleted file mode 100644 index 86081c1..0000000 --- a/vendor/k8s.io/client-go/discovery/round_tripper.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package discovery - -import ( - "net/http" - "os" - "path/filepath" - - "github.com/golang/glog" - "github.com/gregjones/httpcache" - "github.com/gregjones/httpcache/diskcache" - "github.com/peterbourgon/diskv" -) - -type cacheRoundTripper struct { - rt *httpcache.Transport -} - -// newCacheRoundTripper creates a roundtripper that reads the ETag on -// response headers and send the If-None-Match header on subsequent -// corresponding requests. -func newCacheRoundTripper(cacheDir string, rt http.RoundTripper) http.RoundTripper { - d := diskv.New(diskv.Options{ - PathPerm: os.FileMode(0750), - FilePerm: os.FileMode(0660), - BasePath: cacheDir, - TempDir: filepath.Join(cacheDir, ".diskv-temp"), - }) - t := httpcache.NewTransport(diskcache.NewWithDiskv(d)) - t.Transport = rt - - return &cacheRoundTripper{rt: t} -} - -func (rt *cacheRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return rt.rt.RoundTrip(req) -} - -func (rt *cacheRoundTripper) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := rt.rt.Transport.(canceler); ok { - cr.CancelRequest(req) - } else { - glog.Errorf("CancelRequest not implemented by %T", rt.rt.Transport) - } -} - -func (rt *cacheRoundTripper) WrappedRoundTripper() http.RoundTripper { return rt.rt.Transport } diff --git a/vendor/k8s.io/client-go/kubernetes/clientset.go b/vendor/k8s.io/client-go/kubernetes/clientset.go deleted file mode 100644 index 122e4bb..0000000 --- a/vendor/k8s.io/client-go/kubernetes/clientset.go +++ /dev/null @@ -1,646 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package kubernetes - -import ( - discovery "k8s.io/client-go/discovery" - admissionregistrationv1alpha1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1" - admissionregistrationv1beta1 "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1" - appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" - appsv1beta1 "k8s.io/client-go/kubernetes/typed/apps/v1beta1" - appsv1beta2 "k8s.io/client-go/kubernetes/typed/apps/v1beta2" - authenticationv1 "k8s.io/client-go/kubernetes/typed/authentication/v1" - authenticationv1beta1 "k8s.io/client-go/kubernetes/typed/authentication/v1beta1" - authorizationv1 "k8s.io/client-go/kubernetes/typed/authorization/v1" - authorizationv1beta1 "k8s.io/client-go/kubernetes/typed/authorization/v1beta1" - autoscalingv1 "k8s.io/client-go/kubernetes/typed/autoscaling/v1" - autoscalingv2beta1 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1" - autoscalingv2beta2 "k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2" - batchv1 "k8s.io/client-go/kubernetes/typed/batch/v1" - batchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - batchv2alpha1 "k8s.io/client-go/kubernetes/typed/batch/v2alpha1" - certificatesv1beta1 "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" - coordinationv1beta1 "k8s.io/client-go/kubernetes/typed/coordination/v1beta1" - corev1 "k8s.io/client-go/kubernetes/typed/core/v1" - eventsv1beta1 "k8s.io/client-go/kubernetes/typed/events/v1beta1" - extensionsv1beta1 "k8s.io/client-go/kubernetes/typed/extensions/v1beta1" - networkingv1 "k8s.io/client-go/kubernetes/typed/networking/v1" - policyv1beta1 "k8s.io/client-go/kubernetes/typed/policy/v1beta1" - rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" - rbacv1alpha1 "k8s.io/client-go/kubernetes/typed/rbac/v1alpha1" - rbacv1beta1 "k8s.io/client-go/kubernetes/typed/rbac/v1beta1" - schedulingv1alpha1 "k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1" - schedulingv1beta1 "k8s.io/client-go/kubernetes/typed/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/client-go/kubernetes/typed/settings/v1alpha1" - storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1" - storagev1alpha1 "k8s.io/client-go/kubernetes/typed/storage/v1alpha1" - storagev1beta1 "k8s.io/client-go/kubernetes/typed/storage/v1beta1" - rest "k8s.io/client-go/rest" - flowcontrol "k8s.io/client-go/util/flowcontrol" -) - -type Interface interface { - Discovery() discovery.DiscoveryInterface - AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface - AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface - AppsV1beta1() appsv1beta1.AppsV1beta1Interface - AppsV1beta2() appsv1beta2.AppsV1beta2Interface - AppsV1() appsv1.AppsV1Interface - // Deprecated: please explicitly pick a version if possible. - Apps() appsv1.AppsV1Interface - AuthenticationV1() authenticationv1.AuthenticationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authentication() authenticationv1.AuthenticationV1Interface - AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface - AuthorizationV1() authorizationv1.AuthorizationV1Interface - // Deprecated: please explicitly pick a version if possible. - Authorization() authorizationv1.AuthorizationV1Interface - AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface - AutoscalingV1() autoscalingv1.AutoscalingV1Interface - // Deprecated: please explicitly pick a version if possible. - Autoscaling() autoscalingv1.AutoscalingV1Interface - AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface - AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface - BatchV1() batchv1.BatchV1Interface - // Deprecated: please explicitly pick a version if possible. - Batch() batchv1.BatchV1Interface - BatchV1beta1() batchv1beta1.BatchV1beta1Interface - BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface - CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Certificates() certificatesv1beta1.CertificatesV1beta1Interface - CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Coordination() coordinationv1beta1.CoordinationV1beta1Interface - CoreV1() corev1.CoreV1Interface - // Deprecated: please explicitly pick a version if possible. - Core() corev1.CoreV1Interface - EventsV1beta1() eventsv1beta1.EventsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Events() eventsv1beta1.EventsV1beta1Interface - ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Extensions() extensionsv1beta1.ExtensionsV1beta1Interface - NetworkingV1() networkingv1.NetworkingV1Interface - // Deprecated: please explicitly pick a version if possible. - Networking() networkingv1.NetworkingV1Interface - PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Policy() policyv1beta1.PolicyV1beta1Interface - RbacV1() rbacv1.RbacV1Interface - // Deprecated: please explicitly pick a version if possible. - Rbac() rbacv1.RbacV1Interface - RbacV1beta1() rbacv1beta1.RbacV1beta1Interface - RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface - SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface - SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface - // Deprecated: please explicitly pick a version if possible. - Scheduling() schedulingv1beta1.SchedulingV1beta1Interface - SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface - // Deprecated: please explicitly pick a version if possible. - Settings() settingsv1alpha1.SettingsV1alpha1Interface - StorageV1beta1() storagev1beta1.StorageV1beta1Interface - StorageV1() storagev1.StorageV1Interface - // Deprecated: please explicitly pick a version if possible. - Storage() storagev1.StorageV1Interface - StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface -} - -// Clientset contains the clients for groups. Each group has exactly one -// version included in a Clientset. -type Clientset struct { - *discovery.DiscoveryClient - admissionregistrationV1alpha1 *admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Client - admissionregistrationV1beta1 *admissionregistrationv1beta1.AdmissionregistrationV1beta1Client - appsV1beta1 *appsv1beta1.AppsV1beta1Client - appsV1beta2 *appsv1beta2.AppsV1beta2Client - appsV1 *appsv1.AppsV1Client - authenticationV1 *authenticationv1.AuthenticationV1Client - authenticationV1beta1 *authenticationv1beta1.AuthenticationV1beta1Client - authorizationV1 *authorizationv1.AuthorizationV1Client - authorizationV1beta1 *authorizationv1beta1.AuthorizationV1beta1Client - autoscalingV1 *autoscalingv1.AutoscalingV1Client - autoscalingV2beta1 *autoscalingv2beta1.AutoscalingV2beta1Client - autoscalingV2beta2 *autoscalingv2beta2.AutoscalingV2beta2Client - batchV1 *batchv1.BatchV1Client - batchV1beta1 *batchv1beta1.BatchV1beta1Client - batchV2alpha1 *batchv2alpha1.BatchV2alpha1Client - certificatesV1beta1 *certificatesv1beta1.CertificatesV1beta1Client - coordinationV1beta1 *coordinationv1beta1.CoordinationV1beta1Client - coreV1 *corev1.CoreV1Client - eventsV1beta1 *eventsv1beta1.EventsV1beta1Client - extensionsV1beta1 *extensionsv1beta1.ExtensionsV1beta1Client - networkingV1 *networkingv1.NetworkingV1Client - policyV1beta1 *policyv1beta1.PolicyV1beta1Client - rbacV1 *rbacv1.RbacV1Client - rbacV1beta1 *rbacv1beta1.RbacV1beta1Client - rbacV1alpha1 *rbacv1alpha1.RbacV1alpha1Client - schedulingV1alpha1 *schedulingv1alpha1.SchedulingV1alpha1Client - schedulingV1beta1 *schedulingv1beta1.SchedulingV1beta1Client - settingsV1alpha1 *settingsv1alpha1.SettingsV1alpha1Client - storageV1beta1 *storagev1beta1.StorageV1beta1Client - storageV1 *storagev1.StorageV1Client - storageV1alpha1 *storagev1alpha1.StorageV1alpha1Client -} - -// AdmissionregistrationV1alpha1 retrieves the AdmissionregistrationV1alpha1Client -func (c *Clientset) AdmissionregistrationV1alpha1() admissionregistrationv1alpha1.AdmissionregistrationV1alpha1Interface { - return c.admissionregistrationV1alpha1 -} - -// AdmissionregistrationV1beta1 retrieves the AdmissionregistrationV1beta1Client -func (c *Clientset) AdmissionregistrationV1beta1() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return c.admissionregistrationV1beta1 -} - -// Deprecated: Admissionregistration retrieves the default version of AdmissionregistrationClient. -// Please explicitly pick a version. -func (c *Clientset) Admissionregistration() admissionregistrationv1beta1.AdmissionregistrationV1beta1Interface { - return c.admissionregistrationV1beta1 -} - -// AppsV1beta1 retrieves the AppsV1beta1Client -func (c *Clientset) AppsV1beta1() appsv1beta1.AppsV1beta1Interface { - return c.appsV1beta1 -} - -// AppsV1beta2 retrieves the AppsV1beta2Client -func (c *Clientset) AppsV1beta2() appsv1beta2.AppsV1beta2Interface { - return c.appsV1beta2 -} - -// AppsV1 retrieves the AppsV1Client -func (c *Clientset) AppsV1() appsv1.AppsV1Interface { - return c.appsV1 -} - -// Deprecated: Apps retrieves the default version of AppsClient. -// Please explicitly pick a version. -func (c *Clientset) Apps() appsv1.AppsV1Interface { - return c.appsV1 -} - -// AuthenticationV1 retrieves the AuthenticationV1Client -func (c *Clientset) AuthenticationV1() authenticationv1.AuthenticationV1Interface { - return c.authenticationV1 -} - -// Deprecated: Authentication retrieves the default version of AuthenticationClient. -// Please explicitly pick a version. -func (c *Clientset) Authentication() authenticationv1.AuthenticationV1Interface { - return c.authenticationV1 -} - -// AuthenticationV1beta1 retrieves the AuthenticationV1beta1Client -func (c *Clientset) AuthenticationV1beta1() authenticationv1beta1.AuthenticationV1beta1Interface { - return c.authenticationV1beta1 -} - -// AuthorizationV1 retrieves the AuthorizationV1Client -func (c *Clientset) AuthorizationV1() authorizationv1.AuthorizationV1Interface { - return c.authorizationV1 -} - -// Deprecated: Authorization retrieves the default version of AuthorizationClient. -// Please explicitly pick a version. -func (c *Clientset) Authorization() authorizationv1.AuthorizationV1Interface { - return c.authorizationV1 -} - -// AuthorizationV1beta1 retrieves the AuthorizationV1beta1Client -func (c *Clientset) AuthorizationV1beta1() authorizationv1beta1.AuthorizationV1beta1Interface { - return c.authorizationV1beta1 -} - -// AutoscalingV1 retrieves the AutoscalingV1Client -func (c *Clientset) AutoscalingV1() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - -// Deprecated: Autoscaling retrieves the default version of AutoscalingClient. -// Please explicitly pick a version. -func (c *Clientset) Autoscaling() autoscalingv1.AutoscalingV1Interface { - return c.autoscalingV1 -} - -// AutoscalingV2beta1 retrieves the AutoscalingV2beta1Client -func (c *Clientset) AutoscalingV2beta1() autoscalingv2beta1.AutoscalingV2beta1Interface { - return c.autoscalingV2beta1 -} - -// AutoscalingV2beta2 retrieves the AutoscalingV2beta2Client -func (c *Clientset) AutoscalingV2beta2() autoscalingv2beta2.AutoscalingV2beta2Interface { - return c.autoscalingV2beta2 -} - -// BatchV1 retrieves the BatchV1Client -func (c *Clientset) BatchV1() batchv1.BatchV1Interface { - return c.batchV1 -} - -// Deprecated: Batch retrieves the default version of BatchClient. -// Please explicitly pick a version. -func (c *Clientset) Batch() batchv1.BatchV1Interface { - return c.batchV1 -} - -// BatchV1beta1 retrieves the BatchV1beta1Client -func (c *Clientset) BatchV1beta1() batchv1beta1.BatchV1beta1Interface { - return c.batchV1beta1 -} - -// BatchV2alpha1 retrieves the BatchV2alpha1Client -func (c *Clientset) BatchV2alpha1() batchv2alpha1.BatchV2alpha1Interface { - return c.batchV2alpha1 -} - -// CertificatesV1beta1 retrieves the CertificatesV1beta1Client -func (c *Clientset) CertificatesV1beta1() certificatesv1beta1.CertificatesV1beta1Interface { - return c.certificatesV1beta1 -} - -// Deprecated: Certificates retrieves the default version of CertificatesClient. -// Please explicitly pick a version. -func (c *Clientset) Certificates() certificatesv1beta1.CertificatesV1beta1Interface { - return c.certificatesV1beta1 -} - -// CoordinationV1beta1 retrieves the CoordinationV1beta1Client -func (c *Clientset) CoordinationV1beta1() coordinationv1beta1.CoordinationV1beta1Interface { - return c.coordinationV1beta1 -} - -// Deprecated: Coordination retrieves the default version of CoordinationClient. -// Please explicitly pick a version. -func (c *Clientset) Coordination() coordinationv1beta1.CoordinationV1beta1Interface { - return c.coordinationV1beta1 -} - -// CoreV1 retrieves the CoreV1Client -func (c *Clientset) CoreV1() corev1.CoreV1Interface { - return c.coreV1 -} - -// Deprecated: Core retrieves the default version of CoreClient. -// Please explicitly pick a version. -func (c *Clientset) Core() corev1.CoreV1Interface { - return c.coreV1 -} - -// EventsV1beta1 retrieves the EventsV1beta1Client -func (c *Clientset) EventsV1beta1() eventsv1beta1.EventsV1beta1Interface { - return c.eventsV1beta1 -} - -// Deprecated: Events retrieves the default version of EventsClient. -// Please explicitly pick a version. -func (c *Clientset) Events() eventsv1beta1.EventsV1beta1Interface { - return c.eventsV1beta1 -} - -// ExtensionsV1beta1 retrieves the ExtensionsV1beta1Client -func (c *Clientset) ExtensionsV1beta1() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 -} - -// Deprecated: Extensions retrieves the default version of ExtensionsClient. -// Please explicitly pick a version. -func (c *Clientset) Extensions() extensionsv1beta1.ExtensionsV1beta1Interface { - return c.extensionsV1beta1 -} - -// NetworkingV1 retrieves the NetworkingV1Client -func (c *Clientset) NetworkingV1() networkingv1.NetworkingV1Interface { - return c.networkingV1 -} - -// Deprecated: Networking retrieves the default version of NetworkingClient. -// Please explicitly pick a version. -func (c *Clientset) Networking() networkingv1.NetworkingV1Interface { - return c.networkingV1 -} - -// PolicyV1beta1 retrieves the PolicyV1beta1Client -func (c *Clientset) PolicyV1beta1() policyv1beta1.PolicyV1beta1Interface { - return c.policyV1beta1 -} - -// Deprecated: Policy retrieves the default version of PolicyClient. -// Please explicitly pick a version. -func (c *Clientset) Policy() policyv1beta1.PolicyV1beta1Interface { - return c.policyV1beta1 -} - -// RbacV1 retrieves the RbacV1Client -func (c *Clientset) RbacV1() rbacv1.RbacV1Interface { - return c.rbacV1 -} - -// Deprecated: Rbac retrieves the default version of RbacClient. -// Please explicitly pick a version. -func (c *Clientset) Rbac() rbacv1.RbacV1Interface { - return c.rbacV1 -} - -// RbacV1beta1 retrieves the RbacV1beta1Client -func (c *Clientset) RbacV1beta1() rbacv1beta1.RbacV1beta1Interface { - return c.rbacV1beta1 -} - -// RbacV1alpha1 retrieves the RbacV1alpha1Client -func (c *Clientset) RbacV1alpha1() rbacv1alpha1.RbacV1alpha1Interface { - return c.rbacV1alpha1 -} - -// SchedulingV1alpha1 retrieves the SchedulingV1alpha1Client -func (c *Clientset) SchedulingV1alpha1() schedulingv1alpha1.SchedulingV1alpha1Interface { - return c.schedulingV1alpha1 -} - -// SchedulingV1beta1 retrieves the SchedulingV1beta1Client -func (c *Clientset) SchedulingV1beta1() schedulingv1beta1.SchedulingV1beta1Interface { - return c.schedulingV1beta1 -} - -// Deprecated: Scheduling retrieves the default version of SchedulingClient. -// Please explicitly pick a version. -func (c *Clientset) Scheduling() schedulingv1beta1.SchedulingV1beta1Interface { - return c.schedulingV1beta1 -} - -// SettingsV1alpha1 retrieves the SettingsV1alpha1Client -func (c *Clientset) SettingsV1alpha1() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - -// Deprecated: Settings retrieves the default version of SettingsClient. -// Please explicitly pick a version. -func (c *Clientset) Settings() settingsv1alpha1.SettingsV1alpha1Interface { - return c.settingsV1alpha1 -} - -// StorageV1beta1 retrieves the StorageV1beta1Client -func (c *Clientset) StorageV1beta1() storagev1beta1.StorageV1beta1Interface { - return c.storageV1beta1 -} - -// StorageV1 retrieves the StorageV1Client -func (c *Clientset) StorageV1() storagev1.StorageV1Interface { - return c.storageV1 -} - -// Deprecated: Storage retrieves the default version of StorageClient. -// Please explicitly pick a version. -func (c *Clientset) Storage() storagev1.StorageV1Interface { - return c.storageV1 -} - -// StorageV1alpha1 retrieves the StorageV1alpha1Client -func (c *Clientset) StorageV1alpha1() storagev1alpha1.StorageV1alpha1Interface { - return c.storageV1alpha1 -} - -// Discovery retrieves the DiscoveryClient -func (c *Clientset) Discovery() discovery.DiscoveryInterface { - if c == nil { - return nil - } - return c.DiscoveryClient -} - -// NewForConfig creates a new Clientset for the given config. -func NewForConfig(c *rest.Config) (*Clientset, error) { - configShallowCopy := *c - if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { - configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) - } - var cs Clientset - var err error - cs.admissionregistrationV1alpha1, err = admissionregistrationv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.admissionregistrationV1beta1, err = admissionregistrationv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.appsV1beta1, err = appsv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.appsV1beta2, err = appsv1beta2.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.appsV1, err = appsv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.authenticationV1, err = authenticationv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.authenticationV1beta1, err = authenticationv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.authorizationV1, err = authorizationv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.authorizationV1beta1, err = authorizationv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.autoscalingV1, err = autoscalingv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.autoscalingV2beta1, err = autoscalingv2beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.autoscalingV2beta2, err = autoscalingv2beta2.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.batchV1, err = batchv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.batchV1beta1, err = batchv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.batchV2alpha1, err = batchv2alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.certificatesV1beta1, err = certificatesv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.coordinationV1beta1, err = coordinationv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.coreV1, err = corev1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.eventsV1beta1, err = eventsv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.extensionsV1beta1, err = extensionsv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.networkingV1, err = networkingv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.policyV1beta1, err = policyv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.rbacV1, err = rbacv1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.rbacV1beta1, err = rbacv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.rbacV1alpha1, err = rbacv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.schedulingV1alpha1, err = schedulingv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.schedulingV1beta1, err = schedulingv1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.settingsV1alpha1, err = settingsv1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.storageV1beta1, err = storagev1beta1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.storageV1, err = storagev1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - cs.storageV1alpha1, err = storagev1alpha1.NewForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - - cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) - if err != nil { - return nil, err - } - return &cs, nil -} - -// NewForConfigOrDie creates a new Clientset for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *Clientset { - var cs Clientset - cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.NewForConfigOrDie(c) - cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.NewForConfigOrDie(c) - cs.appsV1beta1 = appsv1beta1.NewForConfigOrDie(c) - cs.appsV1beta2 = appsv1beta2.NewForConfigOrDie(c) - cs.appsV1 = appsv1.NewForConfigOrDie(c) - cs.authenticationV1 = authenticationv1.NewForConfigOrDie(c) - cs.authenticationV1beta1 = authenticationv1beta1.NewForConfigOrDie(c) - cs.authorizationV1 = authorizationv1.NewForConfigOrDie(c) - cs.authorizationV1beta1 = authorizationv1beta1.NewForConfigOrDie(c) - cs.autoscalingV1 = autoscalingv1.NewForConfigOrDie(c) - cs.autoscalingV2beta1 = autoscalingv2beta1.NewForConfigOrDie(c) - cs.autoscalingV2beta2 = autoscalingv2beta2.NewForConfigOrDie(c) - cs.batchV1 = batchv1.NewForConfigOrDie(c) - cs.batchV1beta1 = batchv1beta1.NewForConfigOrDie(c) - cs.batchV2alpha1 = batchv2alpha1.NewForConfigOrDie(c) - cs.certificatesV1beta1 = certificatesv1beta1.NewForConfigOrDie(c) - cs.coordinationV1beta1 = coordinationv1beta1.NewForConfigOrDie(c) - cs.coreV1 = corev1.NewForConfigOrDie(c) - cs.eventsV1beta1 = eventsv1beta1.NewForConfigOrDie(c) - cs.extensionsV1beta1 = extensionsv1beta1.NewForConfigOrDie(c) - cs.networkingV1 = networkingv1.NewForConfigOrDie(c) - cs.policyV1beta1 = policyv1beta1.NewForConfigOrDie(c) - cs.rbacV1 = rbacv1.NewForConfigOrDie(c) - cs.rbacV1beta1 = rbacv1beta1.NewForConfigOrDie(c) - cs.rbacV1alpha1 = rbacv1alpha1.NewForConfigOrDie(c) - cs.schedulingV1alpha1 = schedulingv1alpha1.NewForConfigOrDie(c) - cs.schedulingV1beta1 = schedulingv1beta1.NewForConfigOrDie(c) - cs.settingsV1alpha1 = settingsv1alpha1.NewForConfigOrDie(c) - cs.storageV1beta1 = storagev1beta1.NewForConfigOrDie(c) - cs.storageV1 = storagev1.NewForConfigOrDie(c) - cs.storageV1alpha1 = storagev1alpha1.NewForConfigOrDie(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) - return &cs -} - -// New creates a new Clientset for the given RESTClient. -func New(c rest.Interface) *Clientset { - var cs Clientset - cs.admissionregistrationV1alpha1 = admissionregistrationv1alpha1.New(c) - cs.admissionregistrationV1beta1 = admissionregistrationv1beta1.New(c) - cs.appsV1beta1 = appsv1beta1.New(c) - cs.appsV1beta2 = appsv1beta2.New(c) - cs.appsV1 = appsv1.New(c) - cs.authenticationV1 = authenticationv1.New(c) - cs.authenticationV1beta1 = authenticationv1beta1.New(c) - cs.authorizationV1 = authorizationv1.New(c) - cs.authorizationV1beta1 = authorizationv1beta1.New(c) - cs.autoscalingV1 = autoscalingv1.New(c) - cs.autoscalingV2beta1 = autoscalingv2beta1.New(c) - cs.autoscalingV2beta2 = autoscalingv2beta2.New(c) - cs.batchV1 = batchv1.New(c) - cs.batchV1beta1 = batchv1beta1.New(c) - cs.batchV2alpha1 = batchv2alpha1.New(c) - cs.certificatesV1beta1 = certificatesv1beta1.New(c) - cs.coordinationV1beta1 = coordinationv1beta1.New(c) - cs.coreV1 = corev1.New(c) - cs.eventsV1beta1 = eventsv1beta1.New(c) - cs.extensionsV1beta1 = extensionsv1beta1.New(c) - cs.networkingV1 = networkingv1.New(c) - cs.policyV1beta1 = policyv1beta1.New(c) - cs.rbacV1 = rbacv1.New(c) - cs.rbacV1beta1 = rbacv1beta1.New(c) - cs.rbacV1alpha1 = rbacv1alpha1.New(c) - cs.schedulingV1alpha1 = schedulingv1alpha1.New(c) - cs.schedulingV1beta1 = schedulingv1beta1.New(c) - cs.settingsV1alpha1 = settingsv1alpha1.New(c) - cs.storageV1beta1 = storagev1beta1.New(c) - cs.storageV1 = storagev1.New(c) - cs.storageV1alpha1 = storagev1alpha1.New(c) - - cs.DiscoveryClient = discovery.NewDiscoveryClient(c) - return &cs -} diff --git a/vendor/k8s.io/client-go/kubernetes/doc.go b/vendor/k8s.io/client-go/kubernetes/doc.go deleted file mode 100644 index b272334..0000000 --- a/vendor/k8s.io/client-go/kubernetes/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated clientset. -package kubernetes diff --git a/vendor/k8s.io/client-go/kubernetes/import.go b/vendor/k8s.io/client-go/kubernetes/import.go deleted file mode 100644 index c4f9a91..0000000 --- a/vendor/k8s.io/client-go/kubernetes/import.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// This file exists to enforce this clientset's vanity import path. - -package kubernetes // import "k8s.io/client-go/kubernetes" diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go b/vendor/k8s.io/client-go/kubernetes/scheme/doc.go deleted file mode 100644 index 7dc3756..0000000 --- a/vendor/k8s.io/client-go/kubernetes/scheme/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package contains the scheme of the automatically generated clientset. -package scheme diff --git a/vendor/k8s.io/client-go/kubernetes/scheme/register.go b/vendor/k8s.io/client-go/kubernetes/scheme/register.go deleted file mode 100644 index 9ca89b7..0000000 --- a/vendor/k8s.io/client-go/kubernetes/scheme/register.go +++ /dev/null @@ -1,116 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package scheme - -import ( - admissionregistrationv1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - admissionregistrationv1beta1 "k8s.io/api/admissionregistration/v1beta1" - appsv1 "k8s.io/api/apps/v1" - appsv1beta1 "k8s.io/api/apps/v1beta1" - appsv1beta2 "k8s.io/api/apps/v1beta2" - authenticationv1 "k8s.io/api/authentication/v1" - authenticationv1beta1 "k8s.io/api/authentication/v1beta1" - authorizationv1 "k8s.io/api/authorization/v1" - authorizationv1beta1 "k8s.io/api/authorization/v1beta1" - autoscalingv1 "k8s.io/api/autoscaling/v1" - autoscalingv2beta1 "k8s.io/api/autoscaling/v2beta1" - autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2" - batchv1 "k8s.io/api/batch/v1" - batchv1beta1 "k8s.io/api/batch/v1beta1" - batchv2alpha1 "k8s.io/api/batch/v2alpha1" - certificatesv1beta1 "k8s.io/api/certificates/v1beta1" - coordinationv1beta1 "k8s.io/api/coordination/v1beta1" - corev1 "k8s.io/api/core/v1" - eventsv1beta1 "k8s.io/api/events/v1beta1" - extensionsv1beta1 "k8s.io/api/extensions/v1beta1" - networkingv1 "k8s.io/api/networking/v1" - policyv1beta1 "k8s.io/api/policy/v1beta1" - rbacv1 "k8s.io/api/rbac/v1" - rbacv1alpha1 "k8s.io/api/rbac/v1alpha1" - rbacv1beta1 "k8s.io/api/rbac/v1beta1" - schedulingv1alpha1 "k8s.io/api/scheduling/v1alpha1" - schedulingv1beta1 "k8s.io/api/scheduling/v1beta1" - settingsv1alpha1 "k8s.io/api/settings/v1alpha1" - storagev1 "k8s.io/api/storage/v1" - storagev1alpha1 "k8s.io/api/storage/v1alpha1" - storagev1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - runtime "k8s.io/apimachinery/pkg/runtime" - schema "k8s.io/apimachinery/pkg/runtime/schema" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" -) - -var Scheme = runtime.NewScheme() -var Codecs = serializer.NewCodecFactory(Scheme) -var ParameterCodec = runtime.NewParameterCodec(Scheme) -var localSchemeBuilder = runtime.SchemeBuilder{ - admissionregistrationv1alpha1.AddToScheme, - admissionregistrationv1beta1.AddToScheme, - appsv1beta1.AddToScheme, - appsv1beta2.AddToScheme, - appsv1.AddToScheme, - authenticationv1.AddToScheme, - authenticationv1beta1.AddToScheme, - authorizationv1.AddToScheme, - authorizationv1beta1.AddToScheme, - autoscalingv1.AddToScheme, - autoscalingv2beta1.AddToScheme, - autoscalingv2beta2.AddToScheme, - batchv1.AddToScheme, - batchv1beta1.AddToScheme, - batchv2alpha1.AddToScheme, - certificatesv1beta1.AddToScheme, - coordinationv1beta1.AddToScheme, - corev1.AddToScheme, - eventsv1beta1.AddToScheme, - extensionsv1beta1.AddToScheme, - networkingv1.AddToScheme, - policyv1beta1.AddToScheme, - rbacv1.AddToScheme, - rbacv1beta1.AddToScheme, - rbacv1alpha1.AddToScheme, - schedulingv1alpha1.AddToScheme, - schedulingv1beta1.AddToScheme, - settingsv1alpha1.AddToScheme, - storagev1beta1.AddToScheme, - storagev1.AddToScheme, - storagev1alpha1.AddToScheme, -} - -// AddToScheme adds all types of this clientset into the given scheme. This allows composition -// of clientsets, like in: -// -// import ( -// "k8s.io/client-go/kubernetes" -// clientsetscheme "k8s.io/client-go/kubernetes/scheme" -// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" -// ) -// -// kclientset, _ := kubernetes.NewForConfig(c) -// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) -// -// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types -// correctly. -var AddToScheme = localSchemeBuilder.AddToScheme - -func init() { - v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) - utilruntime.Must(AddToScheme(Scheme)) -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go deleted file mode 100644 index 5e02f72..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/admissionregistration_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AdmissionregistrationV1alpha1Interface interface { - RESTClient() rest.Interface - InitializerConfigurationsGetter -} - -// AdmissionregistrationV1alpha1Client is used to interact with features provided by the admissionregistration.k8s.io group. -type AdmissionregistrationV1alpha1Client struct { - restClient rest.Interface -} - -func (c *AdmissionregistrationV1alpha1Client) InitializerConfigurations() InitializerConfigurationInterface { - return newInitializerConfigurations(c) -} - -// NewForConfig creates a new AdmissionregistrationV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*AdmissionregistrationV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AdmissionregistrationV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new AdmissionregistrationV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AdmissionregistrationV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *AdmissionregistrationV1alpha1Client { - return &AdmissionregistrationV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AdmissionregistrationV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go deleted file mode 100644 index df51baa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go deleted file mode 100644 index 1e29b96..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type InitializerConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go deleted file mode 100644 index e014ea7..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1/initializerconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/admissionregistration/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// InitializerConfigurationsGetter has a method to return a InitializerConfigurationInterface. -// A group's client should implement this interface. -type InitializerConfigurationsGetter interface { - InitializerConfigurations() InitializerConfigurationInterface -} - -// InitializerConfigurationInterface has methods to work with InitializerConfiguration resources. -type InitializerConfigurationInterface interface { - Create(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) - Update(*v1alpha1.InitializerConfiguration) (*v1alpha1.InitializerConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.InitializerConfiguration, error) - List(opts v1.ListOptions) (*v1alpha1.InitializerConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) - InitializerConfigurationExpansion -} - -// initializerConfigurations implements InitializerConfigurationInterface -type initializerConfigurations struct { - client rest.Interface -} - -// newInitializerConfigurations returns a InitializerConfigurations -func newInitializerConfigurations(c *AdmissionregistrationV1alpha1Client) *initializerConfigurations { - return &initializerConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the initializerConfiguration, and returns the corresponding initializerConfiguration object, and an error if there is any. -func (c *initializerConfigurations) Get(name string, options v1.GetOptions) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Get(). - Resource("initializerconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of InitializerConfigurations that match those selectors. -func (c *initializerConfigurations) List(opts v1.ListOptions) (result *v1alpha1.InitializerConfigurationList, err error) { - result = &v1alpha1.InitializerConfigurationList{} - err = c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested initializerConfigurations. -func (c *initializerConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("initializerconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a initializerConfiguration and creates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Create(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Post(). - Resource("initializerconfigurations"). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a initializerConfiguration and updates it. Returns the server's representation of the initializerConfiguration, and an error, if there is any. -func (c *initializerConfigurations) Update(initializerConfiguration *v1alpha1.InitializerConfiguration) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Put(). - Resource("initializerconfigurations"). - Name(initializerConfiguration.Name). - Body(initializerConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the initializerConfiguration and deletes it. Returns an error if one occurs. -func (c *initializerConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *initializerConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("initializerconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched initializerConfiguration. -func (c *initializerConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.InitializerConfiguration, err error) { - result = &v1alpha1.InitializerConfiguration{} - err = c.client.Patch(pt). - Resource("initializerconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go deleted file mode 100644 index b13ea79..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/admissionregistration_client.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AdmissionregistrationV1beta1Interface interface { - RESTClient() rest.Interface - MutatingWebhookConfigurationsGetter - ValidatingWebhookConfigurationsGetter -} - -// AdmissionregistrationV1beta1Client is used to interact with features provided by the admissionregistration.k8s.io group. -type AdmissionregistrationV1beta1Client struct { - restClient rest.Interface -} - -func (c *AdmissionregistrationV1beta1Client) MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface { - return newMutatingWebhookConfigurations(c) -} - -func (c *AdmissionregistrationV1beta1Client) ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface { - return newValidatingWebhookConfigurations(c) -} - -// NewForConfig creates a new AdmissionregistrationV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*AdmissionregistrationV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AdmissionregistrationV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new AdmissionregistrationV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AdmissionregistrationV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AdmissionregistrationV1beta1Client for the given RESTClient. -func New(c rest.Interface) *AdmissionregistrationV1beta1Client { - return &AdmissionregistrationV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AdmissionregistrationV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go deleted file mode 100644 index 2aeb9c9..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type MutatingWebhookConfigurationExpansion interface{} - -type ValidatingWebhookConfigurationExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go deleted file mode 100644 index cb01571..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/mutatingwebhookconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// MutatingWebhookConfigurationsGetter has a method to return a MutatingWebhookConfigurationInterface. -// A group's client should implement this interface. -type MutatingWebhookConfigurationsGetter interface { - MutatingWebhookConfigurations() MutatingWebhookConfigurationInterface -} - -// MutatingWebhookConfigurationInterface has methods to work with MutatingWebhookConfiguration resources. -type MutatingWebhookConfigurationInterface interface { - Create(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) - Update(*v1beta1.MutatingWebhookConfiguration) (*v1beta1.MutatingWebhookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.MutatingWebhookConfiguration, error) - List(opts v1.ListOptions) (*v1beta1.MutatingWebhookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) - MutatingWebhookConfigurationExpansion -} - -// mutatingWebhookConfigurations implements MutatingWebhookConfigurationInterface -type mutatingWebhookConfigurations struct { - client rest.Interface -} - -// newMutatingWebhookConfigurations returns a MutatingWebhookConfigurations -func newMutatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *mutatingWebhookConfigurations { - return &mutatingWebhookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the mutatingWebhookConfiguration, and returns the corresponding mutatingWebhookConfiguration object, and an error if there is any. -func (c *mutatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of MutatingWebhookConfigurations that match those selectors. -func (c *mutatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.MutatingWebhookConfigurationList, err error) { - result = &v1beta1.MutatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested mutatingWebhookConfigurations. -func (c *mutatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a mutatingWebhookConfiguration and creates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Create(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Post(). - Resource("mutatingwebhookconfigurations"). - Body(mutatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a mutatingWebhookConfiguration and updates it. Returns the server's representation of the mutatingWebhookConfiguration, and an error, if there is any. -func (c *mutatingWebhookConfigurations) Update(mutatingWebhookConfiguration *v1beta1.MutatingWebhookConfiguration) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Put(). - Resource("mutatingwebhookconfigurations"). - Name(mutatingWebhookConfiguration.Name). - Body(mutatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the mutatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *mutatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *mutatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("mutatingwebhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched mutatingWebhookConfiguration. -func (c *mutatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.MutatingWebhookConfiguration, err error) { - result = &v1beta1.MutatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("mutatingwebhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go b/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go deleted file mode 100644 index 3a9339f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1/validatingwebhookconfiguration.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/admissionregistration/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ValidatingWebhookConfigurationsGetter has a method to return a ValidatingWebhookConfigurationInterface. -// A group's client should implement this interface. -type ValidatingWebhookConfigurationsGetter interface { - ValidatingWebhookConfigurations() ValidatingWebhookConfigurationInterface -} - -// ValidatingWebhookConfigurationInterface has methods to work with ValidatingWebhookConfiguration resources. -type ValidatingWebhookConfigurationInterface interface { - Create(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) - Update(*v1beta1.ValidatingWebhookConfiguration) (*v1beta1.ValidatingWebhookConfiguration, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ValidatingWebhookConfiguration, error) - List(opts v1.ListOptions) (*v1beta1.ValidatingWebhookConfigurationList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) - ValidatingWebhookConfigurationExpansion -} - -// validatingWebhookConfigurations implements ValidatingWebhookConfigurationInterface -type validatingWebhookConfigurations struct { - client rest.Interface -} - -// newValidatingWebhookConfigurations returns a ValidatingWebhookConfigurations -func newValidatingWebhookConfigurations(c *AdmissionregistrationV1beta1Client) *validatingWebhookConfigurations { - return &validatingWebhookConfigurations{ - client: c.RESTClient(), - } -} - -// Get takes name of the validatingWebhookConfiguration, and returns the corresponding validatingWebhookConfiguration object, and an error if there is any. -func (c *validatingWebhookConfigurations) Get(name string, options v1.GetOptions) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ValidatingWebhookConfigurations that match those selectors. -func (c *validatingWebhookConfigurations) List(opts v1.ListOptions) (result *v1beta1.ValidatingWebhookConfigurationList, err error) { - result = &v1beta1.ValidatingWebhookConfigurationList{} - err = c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested validatingWebhookConfigurations. -func (c *validatingWebhookConfigurations) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a validatingWebhookConfiguration and creates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Create(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Post(). - Resource("validatingwebhookconfigurations"). - Body(validatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Update takes the representation of a validatingWebhookConfiguration and updates it. Returns the server's representation of the validatingWebhookConfiguration, and an error, if there is any. -func (c *validatingWebhookConfigurations) Update(validatingWebhookConfiguration *v1beta1.ValidatingWebhookConfiguration) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Put(). - Resource("validatingwebhookconfigurations"). - Name(validatingWebhookConfiguration.Name). - Body(validatingWebhookConfiguration). - Do(). - Into(result) - return -} - -// Delete takes name of the validatingWebhookConfiguration and deletes it. Returns an error if one occurs. -func (c *validatingWebhookConfigurations) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *validatingWebhookConfigurations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("validatingwebhookconfigurations"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched validatingWebhookConfiguration. -func (c *validatingWebhookConfigurations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ValidatingWebhookConfiguration, err error) { - result = &v1beta1.ValidatingWebhookConfiguration{} - err = c.client.Patch(pt). - Resource("validatingwebhookconfigurations"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go deleted file mode 100644 index da19c75..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/apps_client.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/apps/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AppsV1Interface interface { - RESTClient() rest.Interface - ControllerRevisionsGetter - DaemonSetsGetter - DeploymentsGetter - ReplicaSetsGetter - StatefulSetsGetter -} - -// AppsV1Client is used to interact with features provided by the apps group. -type AppsV1Client struct { - restClient rest.Interface -} - -func (c *AppsV1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { - return newControllerRevisions(c, namespace) -} - -func (c *AppsV1Client) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *AppsV1Client) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *AppsV1Client) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -func (c *AppsV1Client) StatefulSets(namespace string) StatefulSetInterface { - return newStatefulSets(c, namespace) -} - -// NewForConfig creates a new AppsV1Client for the given config. -func NewForConfig(c *rest.Config) (*AppsV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AppsV1Client{client}, nil -} - -// NewForConfigOrDie creates a new AppsV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AppsV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AppsV1Client for the given RESTClient. -func New(c rest.Interface) *AppsV1Client { - return &AppsV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AppsV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go deleted file mode 100644 index 1ddaa1a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/controllerrevision.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. -// A group's client should implement this interface. -type ControllerRevisionsGetter interface { - ControllerRevisions(namespace string) ControllerRevisionInterface -} - -// ControllerRevisionInterface has methods to work with ControllerRevision resources. -type ControllerRevisionInterface interface { - Create(*v1.ControllerRevision) (*v1.ControllerRevision, error) - Update(*v1.ControllerRevision) (*v1.ControllerRevision, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ControllerRevision, error) - List(opts metav1.ListOptions) (*v1.ControllerRevisionList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) - ControllerRevisionExpansion -} - -// controllerRevisions implements ControllerRevisionInterface -type controllerRevisions struct { - client rest.Interface - ns string -} - -// newControllerRevisions returns a ControllerRevisions -func newControllerRevisions(c *AppsV1Client, namespace string) *controllerRevisions { - return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options metav1.GetOptions) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts metav1.ListOptions) (result *v1.ControllerRevisionList, err error) { - result = &v1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *v1.ControllerRevision) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ControllerRevision, err error) { - result = &v1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go deleted file mode 100644 index 03a8706..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/daemonset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*v1.DaemonSet) (*v1.DaemonSet, error) - Update(*v1.DaemonSet) (*v1.DaemonSet, error) - UpdateStatus(*v1.DaemonSet) (*v1.DaemonSet, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.DaemonSet, error) - List(opts metav1.ListOptions) (*v1.DaemonSetList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client rest.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *AppsV1Client, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options metav1.GetOptions) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts metav1.ListOptions) (result *v1.DaemonSetList, err error) { - result = &v1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1.DaemonSet) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.DaemonSet, err error) { - result = &v1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go deleted file mode 100644 index 73d46f8..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/deployment.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*v1.Deployment) (*v1.Deployment, error) - Update(*v1.Deployment) (*v1.Deployment, error) - UpdateStatus(*v1.Deployment) (*v1.Deployment, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Deployment, error) - List(opts metav1.ListOptions) (*v1.DeploymentList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *AppsV1Client, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options metav1.GetOptions) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts metav1.ListOptions) (result *v1.DeploymentList, err error) { - result = &v1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1.Deployment) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1.Deployment) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1.Deployment) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Deployment, err error) { - result = &v1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go deleted file mode 100644 index 88cfe4e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/generated_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type ControllerRevisionExpansion interface{} - -type DaemonSetExpansion interface{} - -type DeploymentExpansion interface{} - -type ReplicaSetExpansion interface{} - -type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go deleted file mode 100644 index 0779411..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/replicaset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*v1.ReplicaSet) (*v1.ReplicaSet, error) - Update(*v1.ReplicaSet) (*v1.ReplicaSet, error) - UpdateStatus(*v1.ReplicaSet) (*v1.ReplicaSet, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ReplicaSet, error) - List(opts metav1.ListOptions) (*v1.ReplicaSetList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client rest.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *AppsV1Client, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options metav1.GetOptions) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts metav1.ListOptions) (result *v1.ReplicaSetList, err error) { - result = &v1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1.ReplicaSet) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicaSet, err error) { - result = &v1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go deleted file mode 100644 index 54322d9..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1/statefulset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/apps/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// StatefulSetsGetter has a method to return a StatefulSetInterface. -// A group's client should implement this interface. -type StatefulSetsGetter interface { - StatefulSets(namespace string) StatefulSetInterface -} - -// StatefulSetInterface has methods to work with StatefulSet resources. -type StatefulSetInterface interface { - Create(*v1.StatefulSet) (*v1.StatefulSet, error) - Update(*v1.StatefulSet) (*v1.StatefulSet, error) - UpdateStatus(*v1.StatefulSet) (*v1.StatefulSet, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.StatefulSet, error) - List(opts metav1.ListOptions) (*v1.StatefulSetList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) - StatefulSetExpansion -} - -// statefulSets implements StatefulSetInterface -type statefulSets struct { - client rest.Interface - ns string -} - -// newStatefulSets returns a StatefulSets -func newStatefulSets(c *AppsV1Client, namespace string) *statefulSets { - return &statefulSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options metav1.GetOptions) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts metav1.ListOptions) (result *v1.StatefulSetList, err error) { - result = &v1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - Body(statefulSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *v1.StatefulSet) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StatefulSet, err error) { - result = &v1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go deleted file mode 100644 index 4d882e2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/apps_client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AppsV1beta1Interface interface { - RESTClient() rest.Interface - ControllerRevisionsGetter - DeploymentsGetter - ScalesGetter - StatefulSetsGetter -} - -// AppsV1beta1Client is used to interact with features provided by the apps group. -type AppsV1beta1Client struct { - restClient rest.Interface -} - -func (c *AppsV1beta1Client) ControllerRevisions(namespace string) ControllerRevisionInterface { - return newControllerRevisions(c, namespace) -} - -func (c *AppsV1beta1Client) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *AppsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -func (c *AppsV1beta1Client) StatefulSets(namespace string) StatefulSetInterface { - return newStatefulSets(c, namespace) -} - -// NewForConfig creates a new AppsV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*AppsV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AppsV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new AppsV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AppsV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AppsV1beta1Client for the given RESTClient. -func New(c rest.Interface) *AppsV1beta1Client { - return &AppsV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AppsV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go deleted file mode 100644 index ec8fa92..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/controllerrevision.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. -// A group's client should implement this interface. -type ControllerRevisionsGetter interface { - ControllerRevisions(namespace string) ControllerRevisionInterface -} - -// ControllerRevisionInterface has methods to work with ControllerRevision resources. -type ControllerRevisionInterface interface { - Create(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) - Update(*v1beta1.ControllerRevision) (*v1beta1.ControllerRevision, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ControllerRevision, error) - List(opts v1.ListOptions) (*v1beta1.ControllerRevisionList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) - ControllerRevisionExpansion -} - -// controllerRevisions implements ControllerRevisionInterface -type controllerRevisions struct { - client rest.Interface - ns string -} - -// newControllerRevisions returns a ControllerRevisions -func newControllerRevisions(c *AppsV1beta1Client, namespace string) *controllerRevisions { - return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta1.ControllerRevisionList, err error) { - result = &v1beta1.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *v1beta1.ControllerRevision) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ControllerRevision, err error) { - result = &v1beta1.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go deleted file mode 100644 index 365e06f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/deployment.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) - List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *AppsV1beta1Client, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go deleted file mode 100644 index b2bfd73..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/generated_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type ControllerRevisionExpansion interface{} - -type DeploymentExpansion interface{} - -type ScaleExpansion interface{} - -type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go deleted file mode 100644 index cef27bd..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go deleted file mode 100644 index 6517454..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta1/statefulset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/apps/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// StatefulSetsGetter has a method to return a StatefulSetInterface. -// A group's client should implement this interface. -type StatefulSetsGetter interface { - StatefulSets(namespace string) StatefulSetInterface -} - -// StatefulSetInterface has methods to work with StatefulSet resources. -type StatefulSetInterface interface { - Create(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - Update(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - UpdateStatus(*v1beta1.StatefulSet) (*v1beta1.StatefulSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.StatefulSet, error) - List(opts v1.ListOptions) (*v1beta1.StatefulSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) - StatefulSetExpansion -} - -// statefulSets implements StatefulSetInterface -type statefulSets struct { - client rest.Interface - ns string -} - -// newStatefulSets returns a StatefulSets -func newStatefulSets(c *AppsV1beta1Client, namespace string) *statefulSets { - return &statefulSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta1.StatefulSetList, err error) { - result = &v1beta1.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - Body(statefulSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *v1beta1.StatefulSet) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StatefulSet, err error) { - result = &v1beta1.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go deleted file mode 100644 index 2754949..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/apps_client.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AppsV1beta2Interface interface { - RESTClient() rest.Interface - ControllerRevisionsGetter - DaemonSetsGetter - DeploymentsGetter - ReplicaSetsGetter - ScalesGetter - StatefulSetsGetter -} - -// AppsV1beta2Client is used to interact with features provided by the apps group. -type AppsV1beta2Client struct { - restClient rest.Interface -} - -func (c *AppsV1beta2Client) ControllerRevisions(namespace string) ControllerRevisionInterface { - return newControllerRevisions(c, namespace) -} - -func (c *AppsV1beta2Client) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *AppsV1beta2Client) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *AppsV1beta2Client) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -func (c *AppsV1beta2Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -func (c *AppsV1beta2Client) StatefulSets(namespace string) StatefulSetInterface { - return newStatefulSets(c, namespace) -} - -// NewForConfig creates a new AppsV1beta2Client for the given config. -func NewForConfig(c *rest.Config) (*AppsV1beta2Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AppsV1beta2Client{client}, nil -} - -// NewForConfigOrDie creates a new AppsV1beta2Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AppsV1beta2Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AppsV1beta2Client for the given RESTClient. -func New(c rest.Interface) *AppsV1beta2Client { - return &AppsV1beta2Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta2.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AppsV1beta2Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go deleted file mode 100644 index 1271cc6..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/controllerrevision.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ControllerRevisionsGetter has a method to return a ControllerRevisionInterface. -// A group's client should implement this interface. -type ControllerRevisionsGetter interface { - ControllerRevisions(namespace string) ControllerRevisionInterface -} - -// ControllerRevisionInterface has methods to work with ControllerRevision resources. -type ControllerRevisionInterface interface { - Create(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) - Update(*v1beta2.ControllerRevision) (*v1beta2.ControllerRevision, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.ControllerRevision, error) - List(opts v1.ListOptions) (*v1beta2.ControllerRevisionList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) - ControllerRevisionExpansion -} - -// controllerRevisions implements ControllerRevisionInterface -type controllerRevisions struct { - client rest.Interface - ns string -} - -// newControllerRevisions returns a ControllerRevisions -func newControllerRevisions(c *AppsV1beta2Client, namespace string) *controllerRevisions { - return &controllerRevisions{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the controllerRevision, and returns the corresponding controllerRevision object, and an error if there is any. -func (c *controllerRevisions) Get(name string, options v1.GetOptions) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ControllerRevisions that match those selectors. -func (c *controllerRevisions) List(opts v1.ListOptions) (result *v1beta2.ControllerRevisionList, err error) { - result = &v1beta2.ControllerRevisionList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested controllerRevisions. -func (c *controllerRevisions) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a controllerRevision and creates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Create(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Post(). - Namespace(c.ns). - Resource("controllerrevisions"). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Update takes the representation of a controllerRevision and updates it. Returns the server's representation of the controllerRevision, and an error, if there is any. -func (c *controllerRevisions) Update(controllerRevision *v1beta2.ControllerRevision) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Put(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(controllerRevision.Name). - Body(controllerRevision). - Do(). - Into(result) - return -} - -// Delete takes name of the controllerRevision and deletes it. Returns an error if one occurs. -func (c *controllerRevisions) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *controllerRevisions) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("controllerrevisions"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched controllerRevision. -func (c *controllerRevisions) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ControllerRevision, err error) { - result = &v1beta2.ControllerRevision{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("controllerrevisions"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go deleted file mode 100644 index 683c068..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/daemonset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - Update(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - UpdateStatus(*v1beta2.DaemonSet) (*v1beta2.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.DaemonSet, error) - List(opts v1.ListOptions) (*v1beta2.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client rest.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *AppsV1beta2Client, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta2.DaemonSetList, err error) { - result = &v1beta2.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1beta2.DaemonSet) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.DaemonSet, err error) { - result = &v1beta2.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go deleted file mode 100644 index 9a04513..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/deployment.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*v1beta2.Deployment) (*v1beta2.Deployment, error) - Update(*v1beta2.Deployment) (*v1beta2.Deployment, error) - UpdateStatus(*v1beta2.Deployment) (*v1beta2.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.Deployment, error) - List(opts v1.ListOptions) (*v1beta2.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *AppsV1beta2Client, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta2.DeploymentList, err error) { - result = &v1beta2.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta2.Deployment) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.Deployment, err error) { - result = &v1beta2.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go deleted file mode 100644 index 56518ef..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go deleted file mode 100644 index bceae59..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/generated_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -type ControllerRevisionExpansion interface{} - -type DaemonSetExpansion interface{} - -type DeploymentExpansion interface{} - -type ReplicaSetExpansion interface{} - -type ScaleExpansion interface{} - -type StatefulSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go deleted file mode 100644 index 9fd9de9..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/replicaset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - Update(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - UpdateStatus(*v1beta2.ReplicaSet) (*v1beta2.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.ReplicaSet, error) - List(opts v1.ListOptions) (*v1beta2.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client rest.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *AppsV1beta2Client, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta2.ReplicaSetList, err error) { - result = &v1beta2.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1beta2.ReplicaSet) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.ReplicaSet, err error) { - result = &v1beta2.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go deleted file mode 100644 index f8d6a7f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *AppsV1beta2Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go b/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go deleted file mode 100644 index 095601e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/apps/v1beta2/statefulset.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta2 - -import ( - v1beta2 "k8s.io/api/apps/v1beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// StatefulSetsGetter has a method to return a StatefulSetInterface. -// A group's client should implement this interface. -type StatefulSetsGetter interface { - StatefulSets(namespace string) StatefulSetInterface -} - -// StatefulSetInterface has methods to work with StatefulSet resources. -type StatefulSetInterface interface { - Create(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - Update(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - UpdateStatus(*v1beta2.StatefulSet) (*v1beta2.StatefulSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta2.StatefulSet, error) - List(opts v1.ListOptions) (*v1beta2.StatefulSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) - GetScale(statefulSetName string, options v1.GetOptions) (*v1beta2.Scale, error) - UpdateScale(statefulSetName string, scale *v1beta2.Scale) (*v1beta2.Scale, error) - - StatefulSetExpansion -} - -// statefulSets implements StatefulSetInterface -type statefulSets struct { - client rest.Interface - ns string -} - -// newStatefulSets returns a StatefulSets -func newStatefulSets(c *AppsV1beta2Client, namespace string) *statefulSets { - return &statefulSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the statefulSet, and returns the corresponding statefulSet object, and an error if there is any. -func (c *statefulSets) Get(name string, options v1.GetOptions) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StatefulSets that match those selectors. -func (c *statefulSets) List(opts v1.ListOptions) (result *v1beta2.StatefulSetList, err error) { - result = &v1beta2.StatefulSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested statefulSets. -func (c *statefulSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a statefulSet and creates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Create(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("statefulsets"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a statefulSet and updates it. Returns the server's representation of the statefulSet, and an error, if there is any. -func (c *statefulSets) Update(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - Body(statefulSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *statefulSets) UpdateStatus(statefulSet *v1beta2.StatefulSet) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSet.Name). - SubResource("status"). - Body(statefulSet). - Do(). - Into(result) - return -} - -// Delete takes name of the statefulSet and deletes it. Returns an error if one occurs. -func (c *statefulSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *statefulSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("statefulsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched statefulSet. -func (c *statefulSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta2.StatefulSet, err error) { - result = &v1beta2.StatefulSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("statefulsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the statefulSet, and returns the corresponding v1beta2.Scale object, and an error if there is any. -func (c *statefulSets) GetScale(statefulSetName string, options v1.GetOptions) (result *v1beta2.Scale, err error) { - result = &v1beta2.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *statefulSets) UpdateScale(statefulSetName string, scale *v1beta2.Scale) (result *v1beta2.Scale, err error) { - result = &v1beta2.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("statefulsets"). - Name(statefulSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go deleted file mode 100644 index 3bdcee5..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/authentication_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/authentication/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AuthenticationV1Interface interface { - RESTClient() rest.Interface - TokenReviewsGetter -} - -// AuthenticationV1Client is used to interact with features provided by the authentication.k8s.io group. -type AuthenticationV1Client struct { - restClient rest.Interface -} - -func (c *AuthenticationV1Client) TokenReviews() TokenReviewInterface { - return newTokenReviews(c) -} - -// NewForConfig creates a new AuthenticationV1Client for the given config. -func NewForConfig(c *rest.Config) (*AuthenticationV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthenticationV1Client{client}, nil -} - -// NewForConfigOrDie creates a new AuthenticationV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuthenticationV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthenticationV1Client for the given RESTClient. -func New(c rest.Interface) *AuthenticationV1Client { - return &AuthenticationV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthenticationV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go deleted file mode 100644 index 177209e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go deleted file mode 100644 index 25a8d6a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - rest "k8s.io/client-go/rest" -) - -// TokenReviewsGetter has a method to return a TokenReviewInterface. -// A group's client should implement this interface. -type TokenReviewsGetter interface { - TokenReviews() TokenReviewInterface -} - -// TokenReviewInterface has methods to work with TokenReview resources. -type TokenReviewInterface interface { - TokenReviewExpansion -} - -// tokenReviews implements TokenReviewInterface -type tokenReviews struct { - client rest.Interface -} - -// newTokenReviews returns a TokenReviews -func newTokenReviews(c *AuthenticationV1Client) *tokenReviews { - return &tokenReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go deleted file mode 100644 index ea21f1b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1/tokenreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authenticationapi "k8s.io/api/authentication/v1" -) - -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) -} - -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go deleted file mode 100644 index 7f3334a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/authentication_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/authentication/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AuthenticationV1beta1Interface interface { - RESTClient() rest.Interface - TokenReviewsGetter -} - -// AuthenticationV1beta1Client is used to interact with features provided by the authentication.k8s.io group. -type AuthenticationV1beta1Client struct { - restClient rest.Interface -} - -func (c *AuthenticationV1beta1Client) TokenReviews() TokenReviewInterface { - return newTokenReviews(c) -} - -// NewForConfig creates a new AuthenticationV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*AuthenticationV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthenticationV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new AuthenticationV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuthenticationV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthenticationV1beta1Client for the given RESTClient. -func New(c rest.Interface) *AuthenticationV1beta1Client { - return &AuthenticationV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthenticationV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go deleted file mode 100644 index f6df769..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go deleted file mode 100644 index 0ac3561..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// TokenReviewsGetter has a method to return a TokenReviewInterface. -// A group's client should implement this interface. -type TokenReviewsGetter interface { - TokenReviews() TokenReviewInterface -} - -// TokenReviewInterface has methods to work with TokenReview resources. -type TokenReviewInterface interface { - TokenReviewExpansion -} - -// tokenReviews implements TokenReviewInterface -type tokenReviews struct { - client rest.Interface -} - -// newTokenReviews returns a TokenReviews -func newTokenReviews(c *AuthenticationV1beta1Client) *tokenReviews { - return &tokenReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go deleted file mode 100644 index 8f186fa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authentication/v1beta1/tokenreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authenticationapi "k8s.io/api/authentication/v1beta1" -) - -type TokenReviewExpansion interface { - Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) -} - -func (c *tokenReviews) Create(tokenReview *authenticationapi.TokenReview) (result *authenticationapi.TokenReview, err error) { - result = &authenticationapi.TokenReview{} - err = c.client.Post(). - Resource("tokenreviews"). - Body(tokenReview). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go deleted file mode 100644 index e84b900..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/authorization_client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/authorization/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AuthorizationV1Interface interface { - RESTClient() rest.Interface - LocalSubjectAccessReviewsGetter - SelfSubjectAccessReviewsGetter - SelfSubjectRulesReviewsGetter - SubjectAccessReviewsGetter -} - -// AuthorizationV1Client is used to interact with features provided by the authorization.k8s.io group. -type AuthorizationV1Client struct { - restClient rest.Interface -} - -func (c *AuthorizationV1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { - return newLocalSubjectAccessReviews(c, namespace) -} - -func (c *AuthorizationV1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { - return newSelfSubjectAccessReviews(c) -} - -func (c *AuthorizationV1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { - return newSelfSubjectRulesReviews(c) -} - -func (c *AuthorizationV1Client) SubjectAccessReviews() SubjectAccessReviewInterface { - return newSubjectAccessReviews(c) -} - -// NewForConfig creates a new AuthorizationV1Client for the given config. -func NewForConfig(c *rest.Config) (*AuthorizationV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthorizationV1Client{client}, nil -} - -// NewForConfigOrDie creates a new AuthorizationV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuthorizationV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthorizationV1Client for the given RESTClient. -func New(c rest.Interface) *AuthorizationV1Client { - return &AuthorizationV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthorizationV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go deleted file mode 100644 index 177209e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go deleted file mode 100644 index 0292c78..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - rest "k8s.io/client-go/rest" -) - -// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. -// A group's client should implement this interface. -type LocalSubjectAccessReviewsGetter interface { - LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface -} - -// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. -type LocalSubjectAccessReviewInterface interface { - LocalSubjectAccessReviewExpansion -} - -// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type localSubjectAccessReviews struct { - client rest.Interface - ns string -} - -// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews -func newLocalSubjectAccessReviews(c *AuthorizationV1Client, namespace string) *localSubjectAccessReviews { - return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go deleted file mode 100644 index 0c123b0..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authorizationapi "k8s.io/api/authorization/v1" -) - -type LocalSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) -} - -func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - result = &authorizationapi.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go deleted file mode 100644 index 1e3a458..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - rest "k8s.io/client-go/rest" -) - -// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. -// A group's client should implement this interface. -type SelfSubjectAccessReviewsGetter interface { - SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface -} - -// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. -type SelfSubjectAccessReviewInterface interface { - SelfSubjectAccessReviewExpansion -} - -// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type selfSubjectAccessReviews struct { - client rest.Interface -} - -// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews -func newSelfSubjectAccessReviews(c *AuthorizationV1Client) *selfSubjectAccessReviews { - return &selfSubjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go deleted file mode 100644 index 5b70a27..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authorizationapi "k8s.io/api/authorization/v1" -) - -type SelfSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) -} - -func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - result = &authorizationapi.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go deleted file mode 100644 index 50a0233..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - rest "k8s.io/client-go/rest" -) - -// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. -// A group's client should implement this interface. -type SelfSubjectRulesReviewsGetter interface { - SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface -} - -// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. -type SelfSubjectRulesReviewInterface interface { - SelfSubjectRulesReviewExpansion -} - -// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type selfSubjectRulesReviews struct { - client rest.Interface -} - -// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews -func newSelfSubjectRulesReviews(c *AuthorizationV1Client) *selfSubjectRulesReviews { - return &selfSubjectRulesReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go deleted file mode 100644 index e2cad88..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authorizationapi "k8s.io/api/authorization/v1" -) - -type SelfSubjectRulesReviewExpansion interface { - Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) -} - -func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - result = &authorizationapi.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - Body(srr). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go deleted file mode 100644 index 9c09008..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - rest "k8s.io/client-go/rest" -) - -// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. -// A group's client should implement this interface. -type SubjectAccessReviewsGetter interface { - SubjectAccessReviews() SubjectAccessReviewInterface -} - -// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. -type SubjectAccessReviewInterface interface { - SubjectAccessReviewExpansion -} - -// subjectAccessReviews implements SubjectAccessReviewInterface -type subjectAccessReviews struct { - client rest.Interface -} - -// newSubjectAccessReviews returns a SubjectAccessReviews -func newSubjectAccessReviews(c *AuthorizationV1Client) *subjectAccessReviews { - return &subjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go deleted file mode 100644 index b5ed87d..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1/subjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authorizationapi "k8s.io/api/authorization/v1" -) - -// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. -type SubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) -} - -func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - result = &authorizationapi.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go deleted file mode 100644 index 7f236f6..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/authorization_client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/authorization/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AuthorizationV1beta1Interface interface { - RESTClient() rest.Interface - LocalSubjectAccessReviewsGetter - SelfSubjectAccessReviewsGetter - SelfSubjectRulesReviewsGetter - SubjectAccessReviewsGetter -} - -// AuthorizationV1beta1Client is used to interact with features provided by the authorization.k8s.io group. -type AuthorizationV1beta1Client struct { - restClient rest.Interface -} - -func (c *AuthorizationV1beta1Client) LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface { - return newLocalSubjectAccessReviews(c, namespace) -} - -func (c *AuthorizationV1beta1Client) SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface { - return newSelfSubjectAccessReviews(c) -} - -func (c *AuthorizationV1beta1Client) SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface { - return newSelfSubjectRulesReviews(c) -} - -func (c *AuthorizationV1beta1Client) SubjectAccessReviews() SubjectAccessReviewInterface { - return newSubjectAccessReviews(c) -} - -// NewForConfig creates a new AuthorizationV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*AuthorizationV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AuthorizationV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new AuthorizationV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AuthorizationV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AuthorizationV1beta1Client for the given RESTClient. -func New(c rest.Interface) *AuthorizationV1beta1Client { - return &AuthorizationV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AuthorizationV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go deleted file mode 100644 index f6df769..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go deleted file mode 100644 index f5e86a7..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// LocalSubjectAccessReviewsGetter has a method to return a LocalSubjectAccessReviewInterface. -// A group's client should implement this interface. -type LocalSubjectAccessReviewsGetter interface { - LocalSubjectAccessReviews(namespace string) LocalSubjectAccessReviewInterface -} - -// LocalSubjectAccessReviewInterface has methods to work with LocalSubjectAccessReview resources. -type LocalSubjectAccessReviewInterface interface { - LocalSubjectAccessReviewExpansion -} - -// localSubjectAccessReviews implements LocalSubjectAccessReviewInterface -type localSubjectAccessReviews struct { - client rest.Interface - ns string -} - -// newLocalSubjectAccessReviews returns a LocalSubjectAccessReviews -func newLocalSubjectAccessReviews(c *AuthorizationV1beta1Client, namespace string) *localSubjectAccessReviews { - return &localSubjectAccessReviews{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go deleted file mode 100644 index bf1b8a5..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/localsubjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -type LocalSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) -} - -func (c *localSubjectAccessReviews) Create(sar *authorizationapi.LocalSubjectAccessReview) (result *authorizationapi.LocalSubjectAccessReview, err error) { - result = &authorizationapi.LocalSubjectAccessReview{} - err = c.client.Post(). - Namespace(c.ns). - Resource("localsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go deleted file mode 100644 index 906712c..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// SelfSubjectAccessReviewsGetter has a method to return a SelfSubjectAccessReviewInterface. -// A group's client should implement this interface. -type SelfSubjectAccessReviewsGetter interface { - SelfSubjectAccessReviews() SelfSubjectAccessReviewInterface -} - -// SelfSubjectAccessReviewInterface has methods to work with SelfSubjectAccessReview resources. -type SelfSubjectAccessReviewInterface interface { - SelfSubjectAccessReviewExpansion -} - -// selfSubjectAccessReviews implements SelfSubjectAccessReviewInterface -type selfSubjectAccessReviews struct { - client rest.Interface -} - -// newSelfSubjectAccessReviews returns a SelfSubjectAccessReviews -func newSelfSubjectAccessReviews(c *AuthorizationV1beta1Client) *selfSubjectAccessReviews { - return &selfSubjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go deleted file mode 100644 index 58fecfd..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectaccessreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -type SelfSubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) -} - -func (c *selfSubjectAccessReviews) Create(sar *authorizationapi.SelfSubjectAccessReview) (result *authorizationapi.SelfSubjectAccessReview, err error) { - result = &authorizationapi.SelfSubjectAccessReview{} - err = c.client.Post(). - Resource("selfsubjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go deleted file mode 100644 index 56c0f99..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// SelfSubjectRulesReviewsGetter has a method to return a SelfSubjectRulesReviewInterface. -// A group's client should implement this interface. -type SelfSubjectRulesReviewsGetter interface { - SelfSubjectRulesReviews() SelfSubjectRulesReviewInterface -} - -// SelfSubjectRulesReviewInterface has methods to work with SelfSubjectRulesReview resources. -type SelfSubjectRulesReviewInterface interface { - SelfSubjectRulesReviewExpansion -} - -// selfSubjectRulesReviews implements SelfSubjectRulesReviewInterface -type selfSubjectRulesReviews struct { - client rest.Interface -} - -// newSelfSubjectRulesReviews returns a SelfSubjectRulesReviews -func newSelfSubjectRulesReviews(c *AuthorizationV1beta1Client) *selfSubjectRulesReviews { - return &selfSubjectRulesReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go deleted file mode 100644 index 5f1f37e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/selfsubjectrulesreview_expansion.go +++ /dev/null @@ -1,35 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -type SelfSubjectRulesReviewExpansion interface { - Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) -} - -func (c *selfSubjectRulesReviews) Create(srr *authorizationapi.SelfSubjectRulesReview) (result *authorizationapi.SelfSubjectRulesReview, err error) { - result = &authorizationapi.SelfSubjectRulesReview{} - err = c.client.Post(). - Resource("selfsubjectrulesreviews"). - Body(srr). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go deleted file mode 100644 index 79f1ec5..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// SubjectAccessReviewsGetter has a method to return a SubjectAccessReviewInterface. -// A group's client should implement this interface. -type SubjectAccessReviewsGetter interface { - SubjectAccessReviews() SubjectAccessReviewInterface -} - -// SubjectAccessReviewInterface has methods to work with SubjectAccessReview resources. -type SubjectAccessReviewInterface interface { - SubjectAccessReviewExpansion -} - -// subjectAccessReviews implements SubjectAccessReviewInterface -type subjectAccessReviews struct { - client rest.Interface -} - -// newSubjectAccessReviews returns a SubjectAccessReviews -func newSubjectAccessReviews(c *AuthorizationV1beta1Client) *subjectAccessReviews { - return &subjectAccessReviews{ - client: c.RESTClient(), - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go deleted file mode 100644 index 4f93689..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/authorization/v1beta1/subjectaccessreview_expansion.go +++ /dev/null @@ -1,36 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - authorizationapi "k8s.io/api/authorization/v1beta1" -) - -// The SubjectAccessReviewExpansion interface allows manually adding extra methods to the AuthorizationInterface. -type SubjectAccessReviewExpansion interface { - Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) -} - -func (c *subjectAccessReviews) Create(sar *authorizationapi.SubjectAccessReview) (result *authorizationapi.SubjectAccessReview, err error) { - result = &authorizationapi.SubjectAccessReview{} - err = c.client.Post(). - Resource("subjectaccessreviews"). - Body(sar). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go deleted file mode 100644 index 2bd49e2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/autoscaling_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/autoscaling/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AutoscalingV1Interface interface { - RESTClient() rest.Interface - HorizontalPodAutoscalersGetter -} - -// AutoscalingV1Client is used to interact with features provided by the autoscaling group. -type AutoscalingV1Client struct { - restClient rest.Interface -} - -func (c *AutoscalingV1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingV1Client for the given config. -func NewForConfig(c *rest.Config) (*AutoscalingV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingV1Client{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AutoscalingV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingV1Client for the given RESTClient. -func New(c rest.Interface) *AutoscalingV1Client { - return &AutoscalingV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go deleted file mode 100644 index c60028b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go deleted file mode 100644 index 6891b6b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v1/horizontalpodautoscaler.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/autoscaling/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Update(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - UpdateStatus(*v1.HorizontalPodAutoscaler) (*v1.HorizontalPodAutoscaler, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.HorizontalPodAutoscaler, error) - List(opts metav1.ListOptions) (*v1.HorizontalPodAutoscalerList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client rest.Interface - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingV1Client, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options metav1.GetOptions) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts metav1.ListOptions) (result *v1.HorizontalPodAutoscalerList, err error) { - result = &v1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v1.HorizontalPodAutoscaler) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.HorizontalPodAutoscaler, err error) { - result = &v1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go deleted file mode 100644 index 3a49b26..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/autoscaling_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2beta1 - -import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AutoscalingV2beta1Interface interface { - RESTClient() rest.Interface - HorizontalPodAutoscalersGetter -} - -// AutoscalingV2beta1Client is used to interact with features provided by the autoscaling group. -type AutoscalingV2beta1Client struct { - restClient rest.Interface -} - -func (c *AutoscalingV2beta1Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingV2beta1Client for the given config. -func NewForConfig(c *rest.Config) (*AutoscalingV2beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingV2beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingV2beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingV2beta1Client for the given RESTClient. -func New(c rest.Interface) *AutoscalingV2beta1Client { - return &AutoscalingV2beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v2beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingV2beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go deleted file mode 100644 index 06fd344..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v2beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go deleted file mode 100644 index 6f1704f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2beta1 - -type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go deleted file mode 100644 index 4ac8cce..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1/horizontalpodautoscaler.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2beta1 - -import ( - v2beta1 "k8s.io/api/autoscaling/v2beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - Update(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - UpdateStatus(*v2beta1.HorizontalPodAutoscaler) (*v2beta1.HorizontalPodAutoscaler, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2beta1.HorizontalPodAutoscaler, error) - List(opts v1.ListOptions) (*v2beta1.HorizontalPodAutoscalerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client rest.Interface - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingV2beta1Client, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta1.HorizontalPodAutoscalerList, err error) { - result = &v2beta1.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta1.HorizontalPodAutoscaler) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta1.HorizontalPodAutoscaler, err error) { - result = &v2beta1.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go deleted file mode 100644 index 03fe25e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/autoscaling_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2beta2 - -import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type AutoscalingV2beta2Interface interface { - RESTClient() rest.Interface - HorizontalPodAutoscalersGetter -} - -// AutoscalingV2beta2Client is used to interact with features provided by the autoscaling group. -type AutoscalingV2beta2Client struct { - restClient rest.Interface -} - -func (c *AutoscalingV2beta2Client) HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface { - return newHorizontalPodAutoscalers(c, namespace) -} - -// NewForConfig creates a new AutoscalingV2beta2Client for the given config. -func NewForConfig(c *rest.Config) (*AutoscalingV2beta2Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &AutoscalingV2beta2Client{client}, nil -} - -// NewForConfigOrDie creates a new AutoscalingV2beta2Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *AutoscalingV2beta2Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new AutoscalingV2beta2Client for the given RESTClient. -func New(c rest.Interface) *AutoscalingV2beta2Client { - return &AutoscalingV2beta2Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v2beta2.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *AutoscalingV2beta2Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go deleted file mode 100644 index c600965..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v2beta2 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go deleted file mode 100644 index 822e062..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2beta2 - -type HorizontalPodAutoscalerExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go b/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go deleted file mode 100644 index ddabda7..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2/horizontalpodautoscaler.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2beta2 - -import ( - v2beta2 "k8s.io/api/autoscaling/v2beta2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// HorizontalPodAutoscalersGetter has a method to return a HorizontalPodAutoscalerInterface. -// A group's client should implement this interface. -type HorizontalPodAutoscalersGetter interface { - HorizontalPodAutoscalers(namespace string) HorizontalPodAutoscalerInterface -} - -// HorizontalPodAutoscalerInterface has methods to work with HorizontalPodAutoscaler resources. -type HorizontalPodAutoscalerInterface interface { - Create(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - Update(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - UpdateStatus(*v2beta2.HorizontalPodAutoscaler) (*v2beta2.HorizontalPodAutoscaler, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2beta2.HorizontalPodAutoscaler, error) - List(opts v1.ListOptions) (*v2beta2.HorizontalPodAutoscalerList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) - HorizontalPodAutoscalerExpansion -} - -// horizontalPodAutoscalers implements HorizontalPodAutoscalerInterface -type horizontalPodAutoscalers struct { - client rest.Interface - ns string -} - -// newHorizontalPodAutoscalers returns a HorizontalPodAutoscalers -func newHorizontalPodAutoscalers(c *AutoscalingV2beta2Client, namespace string) *horizontalPodAutoscalers { - return &horizontalPodAutoscalers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the horizontalPodAutoscaler, and returns the corresponding horizontalPodAutoscaler object, and an error if there is any. -func (c *horizontalPodAutoscalers) Get(name string, options v1.GetOptions) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of HorizontalPodAutoscalers that match those selectors. -func (c *horizontalPodAutoscalers) List(opts v1.ListOptions) (result *v2beta2.HorizontalPodAutoscalerList, err error) { - result = &v2beta2.HorizontalPodAutoscalerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested horizontalPodAutoscalers. -func (c *horizontalPodAutoscalers) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a horizontalPodAutoscaler and creates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Create(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Post(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Update takes the representation of a horizontalPodAutoscaler and updates it. Returns the server's representation of the horizontalPodAutoscaler, and an error, if there is any. -func (c *horizontalPodAutoscalers) Update(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *horizontalPodAutoscalers) UpdateStatus(horizontalPodAutoscaler *v2beta2.HorizontalPodAutoscaler) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Put(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(horizontalPodAutoscaler.Name). - SubResource("status"). - Body(horizontalPodAutoscaler). - Do(). - Into(result) - return -} - -// Delete takes name of the horizontalPodAutoscaler and deletes it. Returns an error if one occurs. -func (c *horizontalPodAutoscalers) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *horizontalPodAutoscalers) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched horizontalPodAutoscaler. -func (c *horizontalPodAutoscalers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2beta2.HorizontalPodAutoscaler, err error) { - result = &v2beta2.HorizontalPodAutoscaler{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("horizontalpodautoscalers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go deleted file mode 100644 index d5e35e6..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/batch_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/batch/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type BatchV1Interface interface { - RESTClient() rest.Interface - JobsGetter -} - -// BatchV1Client is used to interact with features provided by the batch group. -type BatchV1Client struct { - restClient rest.Interface -} - -func (c *BatchV1Client) Jobs(namespace string) JobInterface { - return newJobs(c, namespace) -} - -// NewForConfig creates a new BatchV1Client for the given config. -func NewForConfig(c *rest.Config) (*BatchV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchV1Client{client}, nil -} - -// NewForConfigOrDie creates a new BatchV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *BatchV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new BatchV1Client for the given RESTClient. -func New(c rest.Interface) *BatchV1Client { - return &BatchV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *BatchV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go deleted file mode 100644 index dc41429..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type JobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go deleted file mode 100644 index ba8332a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1/job.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/batch/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// JobsGetter has a method to return a JobInterface. -// A group's client should implement this interface. -type JobsGetter interface { - Jobs(namespace string) JobInterface -} - -// JobInterface has methods to work with Job resources. -type JobInterface interface { - Create(*v1.Job) (*v1.Job, error) - Update(*v1.Job) (*v1.Job, error) - UpdateStatus(*v1.Job) (*v1.Job, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Job, error) - List(opts metav1.ListOptions) (*v1.JobList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) - JobExpansion -} - -// jobs implements JobInterface -type jobs struct { - client rest.Interface - ns string -} - -// newJobs returns a Jobs -func newJobs(c *BatchV1Client, namespace string) *jobs { - return &jobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the job, and returns the corresponding job object, and an error if there is any. -func (c *jobs) Get(name string, options metav1.GetOptions) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Jobs that match those selectors. -func (c *jobs) List(opts metav1.ListOptions) (result *v1.JobList, err error) { - result = &v1.JobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested jobs. -func (c *jobs) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a job and creates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Create(job *v1.Job) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Post(). - Namespace(c.ns). - Resource("jobs"). - Body(job). - Do(). - Into(result) - return -} - -// Update takes the representation of a job and updates it. Returns the server's representation of the job, and an error, if there is any. -func (c *jobs) Update(job *v1.Job) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - Body(job). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *jobs) UpdateStatus(job *v1.Job) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Put(). - Namespace(c.ns). - Resource("jobs"). - Name(job.Name). - SubResource("status"). - Body(job). - Do(). - Into(result) - return -} - -// Delete takes name of the job and deletes it. Returns an error if one occurs. -func (c *jobs) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *jobs) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("jobs"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched job. -func (c *jobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Job, err error) { - result = &v1.Job{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("jobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go deleted file mode 100644 index aa71ca8..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/batch_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/batch/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type BatchV1beta1Interface interface { - RESTClient() rest.Interface - CronJobsGetter -} - -// BatchV1beta1Client is used to interact with features provided by the batch group. -type BatchV1beta1Client struct { - restClient rest.Interface -} - -func (c *BatchV1beta1Client) CronJobs(namespace string) CronJobInterface { - return newCronJobs(c, namespace) -} - -// NewForConfig creates a new BatchV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*BatchV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new BatchV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *BatchV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new BatchV1beta1Client for the given RESTClient. -func New(c rest.Interface) *BatchV1beta1Client { - return &BatchV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *BatchV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go deleted file mode 100644 index 04637c3..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/cronjob.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/batch/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CronJobsGetter has a method to return a CronJobInterface. -// A group's client should implement this interface. -type CronJobsGetter interface { - CronJobs(namespace string) CronJobInterface -} - -// CronJobInterface has methods to work with CronJob resources. -type CronJobInterface interface { - Create(*v1beta1.CronJob) (*v1beta1.CronJob, error) - Update(*v1beta1.CronJob) (*v1beta1.CronJob, error) - UpdateStatus(*v1beta1.CronJob) (*v1beta1.CronJob, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CronJob, error) - List(opts v1.ListOptions) (*v1beta1.CronJobList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) - CronJobExpansion -} - -// cronJobs implements CronJobInterface -type cronJobs struct { - client rest.Interface - ns string -} - -// newCronJobs returns a CronJobs -func newCronJobs(c *BatchV1beta1Client, namespace string) *cronJobs { - return &cronJobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts v1.ListOptions) (result *v1beta1.CronJobList, err error) { - result = &v1beta1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - Body(cronJob). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(cronJob *v1beta1.CronJob) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CronJob, err error) { - result = &v1beta1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go deleted file mode 100644 index 145e14a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v1beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go deleted file mode 100644 index e6c6306..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/batch_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2alpha1 - -import ( - v2alpha1 "k8s.io/api/batch/v2alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type BatchV2alpha1Interface interface { - RESTClient() rest.Interface - CronJobsGetter -} - -// BatchV2alpha1Client is used to interact with features provided by the batch group. -type BatchV2alpha1Client struct { - restClient rest.Interface -} - -func (c *BatchV2alpha1Client) CronJobs(namespace string) CronJobInterface { - return newCronJobs(c, namespace) -} - -// NewForConfig creates a new BatchV2alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*BatchV2alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &BatchV2alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new BatchV2alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *BatchV2alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new BatchV2alpha1Client for the given RESTClient. -func New(c rest.Interface) *BatchV2alpha1Client { - return &BatchV2alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v2alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *BatchV2alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go deleted file mode 100644 index 4d922f9..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/cronjob.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2alpha1 - -import ( - v2alpha1 "k8s.io/api/batch/v2alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CronJobsGetter has a method to return a CronJobInterface. -// A group's client should implement this interface. -type CronJobsGetter interface { - CronJobs(namespace string) CronJobInterface -} - -// CronJobInterface has methods to work with CronJob resources. -type CronJobInterface interface { - Create(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) - Update(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) - UpdateStatus(*v2alpha1.CronJob) (*v2alpha1.CronJob, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v2alpha1.CronJob, error) - List(opts v1.ListOptions) (*v2alpha1.CronJobList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) - CronJobExpansion -} - -// cronJobs implements CronJobInterface -type cronJobs struct { - client rest.Interface - ns string -} - -// newCronJobs returns a CronJobs -func newCronJobs(c *BatchV2alpha1Client, namespace string) *cronJobs { - return &cronJobs{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the cronJob, and returns the corresponding cronJob object, and an error if there is any. -func (c *cronJobs) Get(name string, options v1.GetOptions) (result *v2alpha1.CronJob, err error) { - result = &v2alpha1.CronJob{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CronJobs that match those selectors. -func (c *cronJobs) List(opts v1.ListOptions) (result *v2alpha1.CronJobList, err error) { - result = &v2alpha1.CronJobList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested cronJobs. -func (c *cronJobs) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a cronJob and creates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Create(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { - result = &v2alpha1.CronJob{} - err = c.client.Post(). - Namespace(c.ns). - Resource("cronjobs"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Update takes the representation of a cronJob and updates it. Returns the server's representation of the cronJob, and an error, if there is any. -func (c *cronJobs) Update(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { - result = &v2alpha1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - Body(cronJob). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *cronJobs) UpdateStatus(cronJob *v2alpha1.CronJob) (result *v2alpha1.CronJob, err error) { - result = &v2alpha1.CronJob{} - err = c.client.Put(). - Namespace(c.ns). - Resource("cronjobs"). - Name(cronJob.Name). - SubResource("status"). - Body(cronJob). - Do(). - Into(result) - return -} - -// Delete takes name of the cronJob and deletes it. Returns an error if one occurs. -func (c *cronJobs) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *cronJobs) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("cronjobs"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched cronJob. -func (c *cronJobs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v2alpha1.CronJob, err error) { - result = &v2alpha1.CronJob{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("cronjobs"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go deleted file mode 100644 index 3efe0d2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v2alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go deleted file mode 100644 index 34dafc4..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/batch/v2alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v2alpha1 - -type CronJobExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go deleted file mode 100644 index baac42e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificates_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/certificates/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type CertificatesV1beta1Interface interface { - RESTClient() rest.Interface - CertificateSigningRequestsGetter -} - -// CertificatesV1beta1Client is used to interact with features provided by the certificates.k8s.io group. -type CertificatesV1beta1Client struct { - restClient rest.Interface -} - -func (c *CertificatesV1beta1Client) CertificateSigningRequests() CertificateSigningRequestInterface { - return newCertificateSigningRequests(c) -} - -// NewForConfig creates a new CertificatesV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*CertificatesV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CertificatesV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new CertificatesV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CertificatesV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CertificatesV1beta1Client for the given RESTClient. -func New(c rest.Interface) *CertificatesV1beta1Client { - return &CertificatesV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CertificatesV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go deleted file mode 100644 index b39169a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/certificates/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// CertificateSigningRequestsGetter has a method to return a CertificateSigningRequestInterface. -// A group's client should implement this interface. -type CertificateSigningRequestsGetter interface { - CertificateSigningRequests() CertificateSigningRequestInterface -} - -// CertificateSigningRequestInterface has methods to work with CertificateSigningRequest resources. -type CertificateSigningRequestInterface interface { - Create(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - Update(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - UpdateStatus(*v1beta1.CertificateSigningRequest) (*v1beta1.CertificateSigningRequest, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.CertificateSigningRequest, error) - List(opts v1.ListOptions) (*v1beta1.CertificateSigningRequestList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) - CertificateSigningRequestExpansion -} - -// certificateSigningRequests implements CertificateSigningRequestInterface -type certificateSigningRequests struct { - client rest.Interface -} - -// newCertificateSigningRequests returns a CertificateSigningRequests -func newCertificateSigningRequests(c *CertificatesV1beta1Client) *certificateSigningRequests { - return &certificateSigningRequests{ - client: c.RESTClient(), - } -} - -// Get takes name of the certificateSigningRequest, and returns the corresponding certificateSigningRequest object, and an error if there is any. -func (c *certificateSigningRequests) Get(name string, options v1.GetOptions) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of CertificateSigningRequests that match those selectors. -func (c *certificateSigningRequests) List(opts v1.ListOptions) (result *v1beta1.CertificateSigningRequestList, err error) { - result = &v1beta1.CertificateSigningRequestList{} - err = c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested certificateSigningRequests. -func (c *certificateSigningRequests) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("certificatesigningrequests"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a certificateSigningRequest and creates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Create(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Post(). - Resource("certificatesigningrequests"). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// Update takes the representation of a certificateSigningRequest and updates it. Returns the server's representation of the certificateSigningRequest, and an error, if there is any. -func (c *certificateSigningRequests) Update(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *certificateSigningRequests) UpdateStatus(certificateSigningRequest *v1beta1.CertificateSigningRequest) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - SubResource("status"). - Body(certificateSigningRequest). - Do(). - Into(result) - return -} - -// Delete takes name of the certificateSigningRequest and deletes it. Returns an error if one occurs. -func (c *certificateSigningRequests) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *certificateSigningRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("certificatesigningrequests"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched certificateSigningRequest. -func (c *certificateSigningRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.CertificateSigningRequest, err error) { - result = &v1beta1.CertificateSigningRequest{} - err = c.client.Patch(pt). - Resource("certificatesigningrequests"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go deleted file mode 100644 index c63b806..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/certificatesigningrequest_expansion.go +++ /dev/null @@ -1,37 +0,0 @@ -/* -Copyright 2017 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - certificates "k8s.io/api/certificates/v1beta1" -) - -type CertificateSigningRequestExpansion interface { - UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) -} - -func (c *certificateSigningRequests) UpdateApproval(certificateSigningRequest *certificates.CertificateSigningRequest) (result *certificates.CertificateSigningRequest, err error) { - result = &certificates.CertificateSigningRequest{} - err = c.client.Put(). - Resource("certificatesigningrequests"). - Name(certificateSigningRequest.Name). - Body(certificateSigningRequest). - SubResource("approval"). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go deleted file mode 100644 index f6df769..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/certificates/v1beta1/generated_expansion.go +++ /dev/null @@ -1,19 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go deleted file mode 100644 index 91a7648..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/coordination_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/coordination/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type CoordinationV1beta1Interface interface { - RESTClient() rest.Interface - LeasesGetter -} - -// CoordinationV1beta1Client is used to interact with features provided by the coordination.k8s.io group. -type CoordinationV1beta1Client struct { - restClient rest.Interface -} - -func (c *CoordinationV1beta1Client) Leases(namespace string) LeaseInterface { - return newLeases(c, namespace) -} - -// NewForConfig creates a new CoordinationV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*CoordinationV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoordinationV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new CoordinationV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoordinationV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoordinationV1beta1Client for the given RESTClient. -func New(c rest.Interface) *CoordinationV1beta1Client { - return &CoordinationV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoordinationV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go deleted file mode 100644 index dfd180d..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type LeaseExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go b/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go deleted file mode 100644 index 1627725..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/coordination/v1beta1/lease.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/coordination/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// LeasesGetter has a method to return a LeaseInterface. -// A group's client should implement this interface. -type LeasesGetter interface { - Leases(namespace string) LeaseInterface -} - -// LeaseInterface has methods to work with Lease resources. -type LeaseInterface interface { - Create(*v1beta1.Lease) (*v1beta1.Lease, error) - Update(*v1beta1.Lease) (*v1beta1.Lease, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Lease, error) - List(opts v1.ListOptions) (*v1beta1.LeaseList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) - LeaseExpansion -} - -// leases implements LeaseInterface -type leases struct { - client rest.Interface - ns string -} - -// newLeases returns a Leases -func newLeases(c *CoordinationV1beta1Client, namespace string) *leases { - return &leases{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the lease, and returns the corresponding lease object, and an error if there is any. -func (c *leases) Get(name string, options v1.GetOptions) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Leases that match those selectors. -func (c *leases) List(opts v1.ListOptions) (result *v1beta1.LeaseList, err error) { - result = &v1beta1.LeaseList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested leases. -func (c *leases) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a lease and creates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Create(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Post(). - Namespace(c.ns). - Resource("leases"). - Body(lease). - Do(). - Into(result) - return -} - -// Update takes the representation of a lease and updates it. Returns the server's representation of the lease, and an error, if there is any. -func (c *leases) Update(lease *v1beta1.Lease) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Put(). - Namespace(c.ns). - Resource("leases"). - Name(lease.Name). - Body(lease). - Do(). - Into(result) - return -} - -// Delete takes name of the lease and deletes it. Returns an error if one occurs. -func (c *leases) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *leases) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("leases"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched lease. -func (c *leases) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Lease, err error) { - result = &v1beta1.Lease{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("leases"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go deleted file mode 100644 index e497661..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/componentstatus.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ComponentStatusesGetter has a method to return a ComponentStatusInterface. -// A group's client should implement this interface. -type ComponentStatusesGetter interface { - ComponentStatuses() ComponentStatusInterface -} - -// ComponentStatusInterface has methods to work with ComponentStatus resources. -type ComponentStatusInterface interface { - Create(*v1.ComponentStatus) (*v1.ComponentStatus, error) - Update(*v1.ComponentStatus) (*v1.ComponentStatus, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ComponentStatus, error) - List(opts metav1.ListOptions) (*v1.ComponentStatusList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) - ComponentStatusExpansion -} - -// componentStatuses implements ComponentStatusInterface -type componentStatuses struct { - client rest.Interface -} - -// newComponentStatuses returns a ComponentStatuses -func newComponentStatuses(c *CoreV1Client) *componentStatuses { - return &componentStatuses{ - client: c.RESTClient(), - } -} - -// Get takes name of the componentStatus, and returns the corresponding componentStatus object, and an error if there is any. -func (c *componentStatuses) Get(name string, options metav1.GetOptions) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Get(). - Resource("componentstatuses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ComponentStatuses that match those selectors. -func (c *componentStatuses) List(opts metav1.ListOptions) (result *v1.ComponentStatusList, err error) { - result = &v1.ComponentStatusList{} - err = c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested componentStatuses. -func (c *componentStatuses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("componentstatuses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a componentStatus and creates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Create(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Post(). - Resource("componentstatuses"). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Update takes the representation of a componentStatus and updates it. Returns the server's representation of the componentStatus, and an error, if there is any. -func (c *componentStatuses) Update(componentStatus *v1.ComponentStatus) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Put(). - Resource("componentstatuses"). - Name(componentStatus.Name). - Body(componentStatus). - Do(). - Into(result) - return -} - -// Delete takes name of the componentStatus and deletes it. Returns an error if one occurs. -func (c *componentStatuses) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *componentStatuses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Resource("componentstatuses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched componentStatus. -func (c *componentStatuses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ComponentStatus, err error) { - result = &v1.ComponentStatus{} - err = c.client.Patch(pt). - Resource("componentstatuses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go deleted file mode 100644 index 0984ae7..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/configmap.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ConfigMapsGetter has a method to return a ConfigMapInterface. -// A group's client should implement this interface. -type ConfigMapsGetter interface { - ConfigMaps(namespace string) ConfigMapInterface -} - -// ConfigMapInterface has methods to work with ConfigMap resources. -type ConfigMapInterface interface { - Create(*v1.ConfigMap) (*v1.ConfigMap, error) - Update(*v1.ConfigMap) (*v1.ConfigMap, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ConfigMap, error) - List(opts metav1.ListOptions) (*v1.ConfigMapList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) - ConfigMapExpansion -} - -// configMaps implements ConfigMapInterface -type configMaps struct { - client rest.Interface - ns string -} - -// newConfigMaps returns a ConfigMaps -func newConfigMaps(c *CoreV1Client, namespace string) *configMaps { - return &configMaps{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the configMap, and returns the corresponding configMap object, and an error if there is any. -func (c *configMaps) Get(name string, options metav1.GetOptions) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ConfigMaps that match those selectors. -func (c *configMaps) List(opts metav1.ListOptions) (result *v1.ConfigMapList, err error) { - result = &v1.ConfigMapList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested configMaps. -func (c *configMaps) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a configMap and creates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Create(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Post(). - Namespace(c.ns). - Resource("configmaps"). - Body(configMap). - Do(). - Into(result) - return -} - -// Update takes the representation of a configMap and updates it. Returns the server's representation of the configMap, and an error, if there is any. -func (c *configMaps) Update(configMap *v1.ConfigMap) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Put(). - Namespace(c.ns). - Resource("configmaps"). - Name(configMap.Name). - Body(configMap). - Do(). - Into(result) - return -} - -// Delete takes name of the configMap and deletes it. Returns an error if one occurs. -func (c *configMaps) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *configMaps) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("configmaps"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched configMap. -func (c *configMaps) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ConfigMap, err error) { - result = &v1.ConfigMap{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("configmaps"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go deleted file mode 100644 index 044a28e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/core_client.go +++ /dev/null @@ -1,165 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type CoreV1Interface interface { - RESTClient() rest.Interface - ComponentStatusesGetter - ConfigMapsGetter - EndpointsGetter - EventsGetter - LimitRangesGetter - NamespacesGetter - NodesGetter - PersistentVolumesGetter - PersistentVolumeClaimsGetter - PodsGetter - PodTemplatesGetter - ReplicationControllersGetter - ResourceQuotasGetter - SecretsGetter - ServicesGetter - ServiceAccountsGetter -} - -// CoreV1Client is used to interact with features provided by the group. -type CoreV1Client struct { - restClient rest.Interface -} - -func (c *CoreV1Client) ComponentStatuses() ComponentStatusInterface { - return newComponentStatuses(c) -} - -func (c *CoreV1Client) ConfigMaps(namespace string) ConfigMapInterface { - return newConfigMaps(c, namespace) -} - -func (c *CoreV1Client) Endpoints(namespace string) EndpointsInterface { - return newEndpoints(c, namespace) -} - -func (c *CoreV1Client) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -func (c *CoreV1Client) LimitRanges(namespace string) LimitRangeInterface { - return newLimitRanges(c, namespace) -} - -func (c *CoreV1Client) Namespaces() NamespaceInterface { - return newNamespaces(c) -} - -func (c *CoreV1Client) Nodes() NodeInterface { - return newNodes(c) -} - -func (c *CoreV1Client) PersistentVolumes() PersistentVolumeInterface { - return newPersistentVolumes(c) -} - -func (c *CoreV1Client) PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface { - return newPersistentVolumeClaims(c, namespace) -} - -func (c *CoreV1Client) Pods(namespace string) PodInterface { - return newPods(c, namespace) -} - -func (c *CoreV1Client) PodTemplates(namespace string) PodTemplateInterface { - return newPodTemplates(c, namespace) -} - -func (c *CoreV1Client) ReplicationControllers(namespace string) ReplicationControllerInterface { - return newReplicationControllers(c, namespace) -} - -func (c *CoreV1Client) ResourceQuotas(namespace string) ResourceQuotaInterface { - return newResourceQuotas(c, namespace) -} - -func (c *CoreV1Client) Secrets(namespace string) SecretInterface { - return newSecrets(c, namespace) -} - -func (c *CoreV1Client) Services(namespace string) ServiceInterface { - return newServices(c, namespace) -} - -func (c *CoreV1Client) ServiceAccounts(namespace string) ServiceAccountInterface { - return newServiceAccounts(c, namespace) -} - -// NewForConfig creates a new CoreV1Client for the given config. -func NewForConfig(c *rest.Config) (*CoreV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &CoreV1Client{client}, nil -} - -// NewForConfigOrDie creates a new CoreV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *CoreV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new CoreV1Client for the given RESTClient. -func New(c rest.Interface) *CoreV1Client { - return &CoreV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/api" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *CoreV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go deleted file mode 100644 index dd82167..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/endpoints.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// EndpointsGetter has a method to return a EndpointsInterface. -// A group's client should implement this interface. -type EndpointsGetter interface { - Endpoints(namespace string) EndpointsInterface -} - -// EndpointsInterface has methods to work with Endpoints resources. -type EndpointsInterface interface { - Create(*v1.Endpoints) (*v1.Endpoints, error) - Update(*v1.Endpoints) (*v1.Endpoints, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Endpoints, error) - List(opts metav1.ListOptions) (*v1.EndpointsList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) - EndpointsExpansion -} - -// endpoints implements EndpointsInterface -type endpoints struct { - client rest.Interface - ns string -} - -// newEndpoints returns a Endpoints -func newEndpoints(c *CoreV1Client, namespace string) *endpoints { - return &endpoints{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the endpoints, and returns the corresponding endpoints object, and an error if there is any. -func (c *endpoints) Get(name string, options metav1.GetOptions) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Endpoints that match those selectors. -func (c *endpoints) List(opts metav1.ListOptions) (result *v1.EndpointsList, err error) { - result = &v1.EndpointsList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested endpoints. -func (c *endpoints) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a endpoints and creates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Create(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Post(). - Namespace(c.ns). - Resource("endpoints"). - Body(endpoints). - Do(). - Into(result) - return -} - -// Update takes the representation of a endpoints and updates it. Returns the server's representation of the endpoints, and an error, if there is any. -func (c *endpoints) Update(endpoints *v1.Endpoints) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Put(). - Namespace(c.ns). - Resource("endpoints"). - Name(endpoints.Name). - Body(endpoints). - Do(). - Into(result) - return -} - -// Delete takes name of the endpoints and deletes it. Returns an error if one occurs. -func (c *endpoints) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *endpoints) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("endpoints"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched endpoints. -func (c *endpoints) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Endpoints, err error) { - result = &v1.Endpoints{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("endpoints"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go deleted file mode 100644 index 57d30f9..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*v1.Event) (*v1.Event, error) - Update(*v1.Event) (*v1.Event, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Event, error) - List(opts metav1.ListOptions) (*v1.EventList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client rest.Interface - ns string -} - -// newEvents returns a Events -func newEvents(c *CoreV1Client, namespace string) *events { - return &events{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options metav1.GetOptions) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts metav1.ListOptions) (result *v1.EventList, err error) { - result = &v1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *v1.Event) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *v1.Event) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Event, err error) { - result = &v1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go deleted file mode 100644 index 6929ade..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/event_expansion.go +++ /dev/null @@ -1,164 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "fmt" - - "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/fields" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - ref "k8s.io/client-go/tools/reference" -) - -// The EventExpansion interface allows manually adding extra methods to the EventInterface. -type EventExpansion interface { - // CreateWithEventNamespace is the same as a Create, except that it sends the request to the event.Namespace. - CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) - // UpdateWithEventNamespace is the same as a Update, except that it sends the request to the event.Namespace. - UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) - PatchWithEventNamespace(event *v1.Event, data []byte) (*v1.Event, error) - // Search finds events about the specified object - Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) - // Returns the appropriate field selector based on the API version being used to communicate with the server. - // The returned field selector can be used with List and Watch to filter desired events. - GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector -} - -// CreateWithEventNamespace makes a new event. Returns the copy of the event the server returns, -// or an error. The namespace to create the event within is deduced from the -// event; it must either match this event client's namespace, or this event -// client must have been created with the "" namespace. -func (e *events) CreateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - if e.ns != "" && event.Namespace != e.ns { - return nil, fmt.Errorf("can't create an event with namespace '%v' in namespace '%v'", event.Namespace, e.ns) - } - result := &v1.Event{} - err := e.client.Post(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Body(event). - Do(). - Into(result) - return result, err -} - -// UpdateWithEventNamespace modifies an existing event. It returns the copy of the event that the server returns, -// or an error. The namespace and key to update the event within is deduced from the event. The -// namespace must either match this event client's namespace, or this event client must have been -// created with the "" namespace. Update also requires the ResourceVersion to be set in the event -// object. -func (e *events) UpdateWithEventNamespace(event *v1.Event) (*v1.Event, error) { - result := &v1.Event{} - err := e.client.Put(). - NamespaceIfScoped(event.Namespace, len(event.Namespace) > 0). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return result, err -} - -// PatchWithEventNamespace modifies an existing event. It returns the copy of -// the event that the server returns, or an error. The namespace and name of the -// target event is deduced from the incompleteEvent. The namespace must either -// match this event client's namespace, or this event client must have been -// created with the "" namespace. -func (e *events) PatchWithEventNamespace(incompleteEvent *v1.Event, data []byte) (*v1.Event, error) { - if e.ns != "" && incompleteEvent.Namespace != e.ns { - return nil, fmt.Errorf("can't patch an event with namespace '%v' in namespace '%v'", incompleteEvent.Namespace, e.ns) - } - result := &v1.Event{} - err := e.client.Patch(types.StrategicMergePatchType). - NamespaceIfScoped(incompleteEvent.Namespace, len(incompleteEvent.Namespace) > 0). - Resource("events"). - Name(incompleteEvent.Name). - Body(data). - Do(). - Into(result) - return result, err -} - -// Search finds events about the specified object. The namespace of the -// object must match this event's client namespace unless the event client -// was made with the "" namespace. -func (e *events) Search(scheme *runtime.Scheme, objOrRef runtime.Object) (*v1.EventList, error) { - ref, err := ref.GetReference(scheme, objOrRef) - if err != nil { - return nil, err - } - if e.ns != "" && ref.Namespace != e.ns { - return nil, fmt.Errorf("won't be able to find any events of namespace '%v' in namespace '%v'", ref.Namespace, e.ns) - } - stringRefKind := string(ref.Kind) - var refKind *string - if stringRefKind != "" { - refKind = &stringRefKind - } - stringRefUID := string(ref.UID) - var refUID *string - if stringRefUID != "" { - refUID = &stringRefUID - } - fieldSelector := e.GetFieldSelector(&ref.Name, &ref.Namespace, refKind, refUID) - return e.List(metav1.ListOptions{FieldSelector: fieldSelector.String()}) -} - -// Returns the appropriate field selector based on the API version being used to communicate with the server. -// The returned field selector can be used with List and Watch to filter desired events. -func (e *events) GetFieldSelector(involvedObjectName, involvedObjectNamespace, involvedObjectKind, involvedObjectUID *string) fields.Selector { - apiVersion := e.client.APIVersion().String() - field := fields.Set{} - if involvedObjectName != nil { - field[GetInvolvedObjectNameFieldLabel(apiVersion)] = *involvedObjectName - } - if involvedObjectNamespace != nil { - field["involvedObject.namespace"] = *involvedObjectNamespace - } - if involvedObjectKind != nil { - field["involvedObject.kind"] = *involvedObjectKind - } - if involvedObjectUID != nil { - field["involvedObject.uid"] = *involvedObjectUID - } - return field.AsSelector() -} - -// Returns the appropriate field label to use for name of the involved object as per the given API version. -func GetInvolvedObjectNameFieldLabel(version string) string { - return "involvedObject.name" -} - -// TODO: This is a temporary arrangement and will be removed once all clients are moved to use the clientset. -type EventSinkImpl struct { - Interface EventInterface -} - -func (e *EventSinkImpl) Create(event *v1.Event) (*v1.Event, error) { - return e.Interface.CreateWithEventNamespace(event) -} - -func (e *EventSinkImpl) Update(event *v1.Event) (*v1.Event, error) { - return e.Interface.UpdateWithEventNamespace(event) -} - -func (e *EventSinkImpl) Patch(event *v1.Event, data []byte) (*v1.Event, error) { - return e.Interface.PatchWithEventNamespace(event, data) -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go deleted file mode 100644 index 6e8591b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/generated_expansion.go +++ /dev/null @@ -1,39 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type ComponentStatusExpansion interface{} - -type ConfigMapExpansion interface{} - -type EndpointsExpansion interface{} - -type LimitRangeExpansion interface{} - -type PersistentVolumeExpansion interface{} - -type PersistentVolumeClaimExpansion interface{} - -type PodTemplateExpansion interface{} - -type ReplicationControllerExpansion interface{} - -type ResourceQuotaExpansion interface{} - -type SecretExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go deleted file mode 100644 index 5b38566..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/limitrange.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// LimitRangesGetter has a method to return a LimitRangeInterface. -// A group's client should implement this interface. -type LimitRangesGetter interface { - LimitRanges(namespace string) LimitRangeInterface -} - -// LimitRangeInterface has methods to work with LimitRange resources. -type LimitRangeInterface interface { - Create(*v1.LimitRange) (*v1.LimitRange, error) - Update(*v1.LimitRange) (*v1.LimitRange, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.LimitRange, error) - List(opts metav1.ListOptions) (*v1.LimitRangeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) - LimitRangeExpansion -} - -// limitRanges implements LimitRangeInterface -type limitRanges struct { - client rest.Interface - ns string -} - -// newLimitRanges returns a LimitRanges -func newLimitRanges(c *CoreV1Client, namespace string) *limitRanges { - return &limitRanges{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the limitRange, and returns the corresponding limitRange object, and an error if there is any. -func (c *limitRanges) Get(name string, options metav1.GetOptions) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of LimitRanges that match those selectors. -func (c *limitRanges) List(opts metav1.ListOptions) (result *v1.LimitRangeList, err error) { - result = &v1.LimitRangeList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested limitRanges. -func (c *limitRanges) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a limitRange and creates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Create(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Post(). - Namespace(c.ns). - Resource("limitranges"). - Body(limitRange). - Do(). - Into(result) - return -} - -// Update takes the representation of a limitRange and updates it. Returns the server's representation of the limitRange, and an error, if there is any. -func (c *limitRanges) Update(limitRange *v1.LimitRange) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Put(). - Namespace(c.ns). - Resource("limitranges"). - Name(limitRange.Name). - Body(limitRange). - Do(). - Into(result) - return -} - -// Delete takes name of the limitRange and deletes it. Returns an error if one occurs. -func (c *limitRanges) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *limitRanges) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("limitranges"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched limitRange. -func (c *limitRanges) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.LimitRange, err error) { - result = &v1.LimitRange{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("limitranges"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go deleted file mode 100644 index e22d07d..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace.go +++ /dev/null @@ -1,152 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// NamespacesGetter has a method to return a NamespaceInterface. -// A group's client should implement this interface. -type NamespacesGetter interface { - Namespaces() NamespaceInterface -} - -// NamespaceInterface has methods to work with Namespace resources. -type NamespaceInterface interface { - Create(*v1.Namespace) (*v1.Namespace, error) - Update(*v1.Namespace) (*v1.Namespace, error) - UpdateStatus(*v1.Namespace) (*v1.Namespace, error) - Delete(name string, options *metav1.DeleteOptions) error - Get(name string, options metav1.GetOptions) (*v1.Namespace, error) - List(opts metav1.ListOptions) (*v1.NamespaceList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) - NamespaceExpansion -} - -// namespaces implements NamespaceInterface -type namespaces struct { - client rest.Interface -} - -// newNamespaces returns a Namespaces -func newNamespaces(c *CoreV1Client) *namespaces { - return &namespaces{ - client: c.RESTClient(), - } -} - -// Get takes name of the namespace, and returns the corresponding namespace object, and an error if there is any. -func (c *namespaces) Get(name string, options metav1.GetOptions) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Get(). - Resource("namespaces"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Namespaces that match those selectors. -func (c *namespaces) List(opts metav1.ListOptions) (result *v1.NamespaceList, err error) { - result = &v1.NamespaceList{} - err = c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested namespaces. -func (c *namespaces) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("namespaces"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a namespace and creates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Create(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Post(). - Resource("namespaces"). - Body(namespace). - Do(). - Into(result) - return -} - -// Update takes the representation of a namespace and updates it. Returns the server's representation of the namespace, and an error, if there is any. -func (c *namespaces) Update(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - Body(namespace). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *namespaces) UpdateStatus(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put(). - Resource("namespaces"). - Name(namespace.Name). - SubResource("status"). - Body(namespace). - Do(). - Into(result) - return -} - -// Delete takes name of the namespace and deletes it. Returns an error if one occurs. -func (c *namespaces) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("namespaces"). - Name(name). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched namespace. -func (c *namespaces) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Patch(pt). - Resource("namespaces"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go deleted file mode 100644 index 17effe2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/namespace_expansion.go +++ /dev/null @@ -1,31 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import "k8s.io/api/core/v1" - -// The NamespaceExpansion interface allows manually adding extra methods to the NamespaceInterface. -type NamespaceExpansion interface { - Finalize(item *v1.Namespace) (*v1.Namespace, error) -} - -// Finalize takes the representation of a namespace to update. Returns the server's representation of the namespace, and an error, if it occurs. -func (c *namespaces) Finalize(namespace *v1.Namespace) (result *v1.Namespace, err error) { - result = &v1.Namespace{} - err = c.client.Put().Resource("namespaces").Name(namespace.Name).SubResource("finalize").Body(namespace).Do().Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go deleted file mode 100644 index 5c769c1..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// NodesGetter has a method to return a NodeInterface. -// A group's client should implement this interface. -type NodesGetter interface { - Nodes() NodeInterface -} - -// NodeInterface has methods to work with Node resources. -type NodeInterface interface { - Create(*v1.Node) (*v1.Node, error) - Update(*v1.Node) (*v1.Node, error) - UpdateStatus(*v1.Node) (*v1.Node, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Node, error) - List(opts metav1.ListOptions) (*v1.NodeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) - NodeExpansion -} - -// nodes implements NodeInterface -type nodes struct { - client rest.Interface -} - -// newNodes returns a Nodes -func newNodes(c *CoreV1Client) *nodes { - return &nodes{ - client: c.RESTClient(), - } -} - -// Get takes name of the node, and returns the corresponding node object, and an error if there is any. -func (c *nodes) Get(name string, options metav1.GetOptions) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Get(). - Resource("nodes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Nodes that match those selectors. -func (c *nodes) List(opts metav1.ListOptions) (result *v1.NodeList, err error) { - result = &v1.NodeList{} - err = c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested nodes. -func (c *nodes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("nodes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a node and creates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Create(node *v1.Node) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Post(). - Resource("nodes"). - Body(node). - Do(). - Into(result) - return -} - -// Update takes the representation of a node and updates it. Returns the server's representation of the node, and an error, if there is any. -func (c *nodes) Update(node *v1.Node) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - Body(node). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *nodes) UpdateStatus(node *v1.Node) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Put(). - Resource("nodes"). - Name(node.Name). - SubResource("status"). - Body(node). - Do(). - Into(result) - return -} - -// Delete takes name of the node and deletes it. Returns an error if one occurs. -func (c *nodes) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("nodes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *nodes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Resource("nodes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched node. -func (c *nodes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Node, err error) { - result = &v1.Node{} - err = c.client.Patch(pt). - Resource("nodes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go deleted file mode 100644 index 5db29c3..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/node_expansion.go +++ /dev/null @@ -1,43 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/types" -) - -// The NodeExpansion interface allows manually adding extra methods to the NodeInterface. -type NodeExpansion interface { - // PatchStatus modifies the status of an existing node. It returns the copy - // of the node that the server returns, or an error. - PatchStatus(nodeName string, data []byte) (*v1.Node, error) -} - -// PatchStatus modifies the status of an existing node. It returns the copy of -// the node that the server returns, or an error. -func (c *nodes) PatchStatus(nodeName string, data []byte) (*v1.Node, error) { - result := &v1.Node{} - err := c.client.Patch(types.StrategicMergePatchType). - Resource("nodes"). - Name(nodeName). - SubResource("status"). - Body(data). - Do(). - Into(result) - return result, err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go deleted file mode 100644 index d5f19ae..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolume.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PersistentVolumesGetter has a method to return a PersistentVolumeInterface. -// A group's client should implement this interface. -type PersistentVolumesGetter interface { - PersistentVolumes() PersistentVolumeInterface -} - -// PersistentVolumeInterface has methods to work with PersistentVolume resources. -type PersistentVolumeInterface interface { - Create(*v1.PersistentVolume) (*v1.PersistentVolume, error) - Update(*v1.PersistentVolume) (*v1.PersistentVolume, error) - UpdateStatus(*v1.PersistentVolume) (*v1.PersistentVolume, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PersistentVolume, error) - List(opts metav1.ListOptions) (*v1.PersistentVolumeList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) - PersistentVolumeExpansion -} - -// persistentVolumes implements PersistentVolumeInterface -type persistentVolumes struct { - client rest.Interface -} - -// newPersistentVolumes returns a PersistentVolumes -func newPersistentVolumes(c *CoreV1Client) *persistentVolumes { - return &persistentVolumes{ - client: c.RESTClient(), - } -} - -// Get takes name of the persistentVolume, and returns the corresponding persistentVolume object, and an error if there is any. -func (c *persistentVolumes) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Get(). - Resource("persistentvolumes"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumes that match those selectors. -func (c *persistentVolumes) List(opts metav1.ListOptions) (result *v1.PersistentVolumeList, err error) { - result = &v1.PersistentVolumeList{} - err = c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumes. -func (c *persistentVolumes) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("persistentvolumes"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a persistentVolume and creates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Create(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Post(). - Resource("persistentvolumes"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolume and updates it. Returns the server's representation of the persistentVolume, and an error, if there is any. -func (c *persistentVolumes) Update(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumes) UpdateStatus(persistentVolume *v1.PersistentVolume) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Put(). - Resource("persistentvolumes"). - Name(persistentVolume.Name). - SubResource("status"). - Body(persistentVolume). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolume and deletes it. Returns an error if one occurs. -func (c *persistentVolumes) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Resource("persistentvolumes"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched persistentVolume. -func (c *persistentVolumes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolume, err error) { - result = &v1.PersistentVolume{} - err = c.client.Patch(pt). - Resource("persistentvolumes"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go deleted file mode 100644 index d32ae5d..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/persistentvolumeclaim.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PersistentVolumeClaimsGetter has a method to return a PersistentVolumeClaimInterface. -// A group's client should implement this interface. -type PersistentVolumeClaimsGetter interface { - PersistentVolumeClaims(namespace string) PersistentVolumeClaimInterface -} - -// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources. -type PersistentVolumeClaimInterface interface { - Create(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - Update(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - UpdateStatus(*v1.PersistentVolumeClaim) (*v1.PersistentVolumeClaim, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PersistentVolumeClaim, error) - List(opts metav1.ListOptions) (*v1.PersistentVolumeClaimList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) - PersistentVolumeClaimExpansion -} - -// persistentVolumeClaims implements PersistentVolumeClaimInterface -type persistentVolumeClaims struct { - client rest.Interface - ns string -} - -// newPersistentVolumeClaims returns a PersistentVolumeClaims -func newPersistentVolumeClaims(c *CoreV1Client, namespace string) *persistentVolumeClaims { - return &persistentVolumeClaims{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the persistentVolumeClaim, and returns the corresponding persistentVolumeClaim object, and an error if there is any. -func (c *persistentVolumeClaims) Get(name string, options metav1.GetOptions) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PersistentVolumeClaims that match those selectors. -func (c *persistentVolumeClaims) List(opts metav1.ListOptions) (result *v1.PersistentVolumeClaimList, err error) { - result = &v1.PersistentVolumeClaimList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested persistentVolumeClaims. -func (c *persistentVolumeClaims) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a persistentVolumeClaim and creates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Create(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Post(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Update takes the representation of a persistentVolumeClaim and updates it. Returns the server's representation of the persistentVolumeClaim, and an error, if there is any. -func (c *persistentVolumeClaims) Update(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *persistentVolumeClaims) UpdateStatus(persistentVolumeClaim *v1.PersistentVolumeClaim) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Put(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(persistentVolumeClaim.Name). - SubResource("status"). - Body(persistentVolumeClaim). - Do(). - Into(result) - return -} - -// Delete takes name of the persistentVolumeClaim and deletes it. Returns an error if one occurs. -func (c *persistentVolumeClaims) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *persistentVolumeClaims) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched persistentVolumeClaim. -func (c *persistentVolumeClaims) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PersistentVolumeClaim, err error) { - result = &v1.PersistentVolumeClaim{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("persistentvolumeclaims"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go deleted file mode 100644 index b19c5a5..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodsGetter has a method to return a PodInterface. -// A group's client should implement this interface. -type PodsGetter interface { - Pods(namespace string) PodInterface -} - -// PodInterface has methods to work with Pod resources. -type PodInterface interface { - Create(*v1.Pod) (*v1.Pod, error) - Update(*v1.Pod) (*v1.Pod, error) - UpdateStatus(*v1.Pod) (*v1.Pod, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Pod, error) - List(opts metav1.ListOptions) (*v1.PodList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) - PodExpansion -} - -// pods implements PodInterface -type pods struct { - client rest.Interface - ns string -} - -// newPods returns a Pods -func newPods(c *CoreV1Client, namespace string) *pods { - return &pods{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the pod, and returns the corresponding pod object, and an error if there is any. -func (c *pods) Get(name string, options metav1.GetOptions) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Pods that match those selectors. -func (c *pods) List(opts metav1.ListOptions) (result *v1.PodList, err error) { - result = &v1.PodList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested pods. -func (c *pods) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a pod and creates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Create(pod *v1.Pod) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Post(). - Namespace(c.ns). - Resource("pods"). - Body(pod). - Do(). - Into(result) - return -} - -// Update takes the representation of a pod and updates it. Returns the server's representation of the pod, and an error, if there is any. -func (c *pods) Update(pod *v1.Pod) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - Body(pod). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *pods) UpdateStatus(pod *v1.Pod) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Put(). - Namespace(c.ns). - Resource("pods"). - Name(pod.Name). - SubResource("status"). - Body(pod). - Do(). - Into(result) - return -} - -// Delete takes name of the pod and deletes it. Returns an error if one occurs. -func (c *pods) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *pods) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("pods"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched pod. -func (c *pods) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Pod, err error) { - result = &v1.Pod{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("pods"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go deleted file mode 100644 index ed876be..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/pod_expansion.go +++ /dev/null @@ -1,45 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/api/core/v1" - policy "k8s.io/api/policy/v1beta1" - "k8s.io/client-go/kubernetes/scheme" - restclient "k8s.io/client-go/rest" -) - -// The PodExpansion interface allows manually adding extra methods to the PodInterface. -type PodExpansion interface { - Bind(binding *v1.Binding) error - Evict(eviction *policy.Eviction) error - GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request -} - -// Bind applies the provided binding to the named pod in the current namespace (binding.Namespace is ignored). -func (c *pods) Bind(binding *v1.Binding) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(binding.Name).SubResource("binding").Body(binding).Do().Error() -} - -func (c *pods) Evict(eviction *policy.Eviction) error { - return c.client.Post().Namespace(c.ns).Resource("pods").Name(eviction.Name).SubResource("eviction").Body(eviction).Do().Error() -} - -// Get constructs a request for getting the logs for a pod -func (c *pods) GetLogs(name string, opts *v1.PodLogOptions) *restclient.Request { - return c.client.Get().Namespace(c.ns).Name(name).Resource("pods").SubResource("log").VersionedParams(opts, scheme.ParameterCodec) -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go deleted file mode 100644 index d644e17..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/podtemplate.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodTemplatesGetter has a method to return a PodTemplateInterface. -// A group's client should implement this interface. -type PodTemplatesGetter interface { - PodTemplates(namespace string) PodTemplateInterface -} - -// PodTemplateInterface has methods to work with PodTemplate resources. -type PodTemplateInterface interface { - Create(*v1.PodTemplate) (*v1.PodTemplate, error) - Update(*v1.PodTemplate) (*v1.PodTemplate, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.PodTemplate, error) - List(opts metav1.ListOptions) (*v1.PodTemplateList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) - PodTemplateExpansion -} - -// podTemplates implements PodTemplateInterface -type podTemplates struct { - client rest.Interface - ns string -} - -// newPodTemplates returns a PodTemplates -func newPodTemplates(c *CoreV1Client, namespace string) *podTemplates { - return &podTemplates{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podTemplate, and returns the corresponding podTemplate object, and an error if there is any. -func (c *podTemplates) Get(name string, options metav1.GetOptions) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodTemplates that match those selectors. -func (c *podTemplates) List(opts metav1.ListOptions) (result *v1.PodTemplateList, err error) { - result = &v1.PodTemplateList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podTemplates. -func (c *podTemplates) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podTemplate and creates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Create(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podtemplates"). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Update takes the representation of a podTemplate and updates it. Returns the server's representation of the podTemplate, and an error, if there is any. -func (c *podTemplates) Update(podTemplate *v1.PodTemplate) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podtemplates"). - Name(podTemplate.Name). - Body(podTemplate). - Do(). - Into(result) - return -} - -// Delete takes name of the podTemplate and deletes it. Returns an error if one occurs. -func (c *podTemplates) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podTemplates) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podtemplates"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podTemplate. -func (c *podTemplates) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PodTemplate, err error) { - result = &v1.PodTemplate{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podtemplates"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go deleted file mode 100644 index 17622f1..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/replicationcontroller.go +++ /dev/null @@ -1,206 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - v1beta1 "k8s.io/api/extensions/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ReplicationControllersGetter has a method to return a ReplicationControllerInterface. -// A group's client should implement this interface. -type ReplicationControllersGetter interface { - ReplicationControllers(namespace string) ReplicationControllerInterface -} - -// ReplicationControllerInterface has methods to work with ReplicationController resources. -type ReplicationControllerInterface interface { - Create(*v1.ReplicationController) (*v1.ReplicationController, error) - Update(*v1.ReplicationController) (*v1.ReplicationController, error) - UpdateStatus(*v1.ReplicationController) (*v1.ReplicationController, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ReplicationController, error) - List(opts metav1.ListOptions) (*v1.ReplicationControllerList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) - GetScale(replicationControllerName string, options metav1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) - - ReplicationControllerExpansion -} - -// replicationControllers implements ReplicationControllerInterface -type replicationControllers struct { - client rest.Interface - ns string -} - -// newReplicationControllers returns a ReplicationControllers -func newReplicationControllers(c *CoreV1Client, namespace string) *replicationControllers { - return &replicationControllers{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicationController, and returns the corresponding replicationController object, and an error if there is any. -func (c *replicationControllers) Get(name string, options metav1.GetOptions) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicationControllers that match those selectors. -func (c *replicationControllers) List(opts metav1.ListOptions) (result *v1.ReplicationControllerList, err error) { - result = &v1.ReplicationControllerList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicationControllers. -func (c *replicationControllers) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicationController and creates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Create(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicationController and updates it. Returns the server's representation of the replicationController, and an error, if there is any. -func (c *replicationControllers) Update(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - Body(replicationController). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicationControllers) UpdateStatus(replicationController *v1.ReplicationController) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationController.Name). - SubResource("status"). - Body(replicationController). - Do(). - Into(result) - return -} - -// Delete takes name of the replicationController and deletes it. Returns an error if one occurs. -func (c *replicationControllers) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicationControllers) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicationcontrollers"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicationController. -func (c *replicationControllers) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ReplicationController, err error) { - result = &v1.ReplicationController{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicationcontrollers"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the replicationController, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicationControllers) GetScale(replicationControllerName string, options metav1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationControllerName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicationControllers) UpdateScale(replicationControllerName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicationcontrollers"). - Name(replicationControllerName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go deleted file mode 100644 index 8b74a40..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/resourcequota.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ResourceQuotasGetter has a method to return a ResourceQuotaInterface. -// A group's client should implement this interface. -type ResourceQuotasGetter interface { - ResourceQuotas(namespace string) ResourceQuotaInterface -} - -// ResourceQuotaInterface has methods to work with ResourceQuota resources. -type ResourceQuotaInterface interface { - Create(*v1.ResourceQuota) (*v1.ResourceQuota, error) - Update(*v1.ResourceQuota) (*v1.ResourceQuota, error) - UpdateStatus(*v1.ResourceQuota) (*v1.ResourceQuota, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ResourceQuota, error) - List(opts metav1.ListOptions) (*v1.ResourceQuotaList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) - ResourceQuotaExpansion -} - -// resourceQuotas implements ResourceQuotaInterface -type resourceQuotas struct { - client rest.Interface - ns string -} - -// newResourceQuotas returns a ResourceQuotas -func newResourceQuotas(c *CoreV1Client, namespace string) *resourceQuotas { - return &resourceQuotas{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the resourceQuota, and returns the corresponding resourceQuota object, and an error if there is any. -func (c *resourceQuotas) Get(name string, options metav1.GetOptions) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ResourceQuotas that match those selectors. -func (c *resourceQuotas) List(opts metav1.ListOptions) (result *v1.ResourceQuotaList, err error) { - result = &v1.ResourceQuotaList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested resourceQuotas. -func (c *resourceQuotas) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a resourceQuota and creates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Create(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Post(). - Namespace(c.ns). - Resource("resourcequotas"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Update takes the representation of a resourceQuota and updates it. Returns the server's representation of the resourceQuota, and an error, if there is any. -func (c *resourceQuotas) Update(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *resourceQuotas) UpdateStatus(resourceQuota *v1.ResourceQuota) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Put(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(resourceQuota.Name). - SubResource("status"). - Body(resourceQuota). - Do(). - Into(result) - return -} - -// Delete takes name of the resourceQuota and deletes it. Returns an error if one occurs. -func (c *resourceQuotas) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *resourceQuotas) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("resourcequotas"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched resourceQuota. -func (c *resourceQuotas) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ResourceQuota, err error) { - result = &v1.ResourceQuota{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("resourcequotas"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go deleted file mode 100644 index 4ea9796..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/secret.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// SecretsGetter has a method to return a SecretInterface. -// A group's client should implement this interface. -type SecretsGetter interface { - Secrets(namespace string) SecretInterface -} - -// SecretInterface has methods to work with Secret resources. -type SecretInterface interface { - Create(*v1.Secret) (*v1.Secret, error) - Update(*v1.Secret) (*v1.Secret, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Secret, error) - List(opts metav1.ListOptions) (*v1.SecretList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) - SecretExpansion -} - -// secrets implements SecretInterface -type secrets struct { - client rest.Interface - ns string -} - -// newSecrets returns a Secrets -func newSecrets(c *CoreV1Client, namespace string) *secrets { - return &secrets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the secret, and returns the corresponding secret object, and an error if there is any. -func (c *secrets) Get(name string, options metav1.GetOptions) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Secrets that match those selectors. -func (c *secrets) List(opts metav1.ListOptions) (result *v1.SecretList, err error) { - result = &v1.SecretList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested secrets. -func (c *secrets) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a secret and creates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Create(secret *v1.Secret) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Post(). - Namespace(c.ns). - Resource("secrets"). - Body(secret). - Do(). - Into(result) - return -} - -// Update takes the representation of a secret and updates it. Returns the server's representation of the secret, and an error, if there is any. -func (c *secrets) Update(secret *v1.Secret) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Put(). - Namespace(c.ns). - Resource("secrets"). - Name(secret.Name). - Body(secret). - Do(). - Into(result) - return -} - -// Delete takes name of the secret and deletes it. Returns an error if one occurs. -func (c *secrets) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *secrets) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("secrets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched secret. -func (c *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) { - result = &v1.Secret{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("secrets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go deleted file mode 100644 index 6c42ca8..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ServicesGetter has a method to return a ServiceInterface. -// A group's client should implement this interface. -type ServicesGetter interface { - Services(namespace string) ServiceInterface -} - -// ServiceInterface has methods to work with Service resources. -type ServiceInterface interface { - Create(*v1.Service) (*v1.Service, error) - Update(*v1.Service) (*v1.Service, error) - UpdateStatus(*v1.Service) (*v1.Service, error) - Delete(name string, options *metav1.DeleteOptions) error - Get(name string, options metav1.GetOptions) (*v1.Service, error) - List(opts metav1.ListOptions) (*v1.ServiceList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) - ServiceExpansion -} - -// services implements ServiceInterface -type services struct { - client rest.Interface - ns string -} - -// newServices returns a Services -func newServices(c *CoreV1Client, namespace string) *services { - return &services{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the service, and returns the corresponding service object, and an error if there is any. -func (c *services) Get(name string, options metav1.GetOptions) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Services that match those selectors. -func (c *services) List(opts metav1.ListOptions) (result *v1.ServiceList, err error) { - result = &v1.ServiceList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested services. -func (c *services) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("services"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a service and creates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Create(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Post(). - Namespace(c.ns). - Resource("services"). - Body(service). - Do(). - Into(result) - return -} - -// Update takes the representation of a service and updates it. Returns the server's representation of the service, and an error, if there is any. -func (c *services) Update(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - Body(service). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *services) UpdateStatus(service *v1.Service) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Put(). - Namespace(c.ns). - Resource("services"). - Name(service.Name). - SubResource("status"). - Body(service). - Do(). - Into(result) - return -} - -// Delete takes name of the service and deletes it. Returns an error if one occurs. -func (c *services) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("services"). - Name(name). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched service. -func (c *services) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Service, err error) { - result = &v1.Service{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("services"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go deleted file mode 100644 index 4937fd1..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/service_expansion.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - "k8s.io/apimachinery/pkg/util/net" - restclient "k8s.io/client-go/rest" -) - -// The ServiceExpansion interface allows manually adding extra methods to the ServiceInterface. -type ServiceExpansion interface { - ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper -} - -// ProxyGet returns a response of the service by calling it through the proxy. -func (c *services) ProxyGet(scheme, name, port, path string, params map[string]string) restclient.ResponseWrapper { - request := c.client.Get(). - Namespace(c.ns). - Resource("services"). - SubResource("proxy"). - Name(net.JoinSchemeNamePort(scheme, name, port)). - Suffix(path) - for k, v := range params { - request = request.Param(k, v) - } - return request -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go deleted file mode 100644 index f3ab7eb..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ServiceAccountsGetter has a method to return a ServiceAccountInterface. -// A group's client should implement this interface. -type ServiceAccountsGetter interface { - ServiceAccounts(namespace string) ServiceAccountInterface -} - -// ServiceAccountInterface has methods to work with ServiceAccount resources. -type ServiceAccountInterface interface { - Create(*v1.ServiceAccount) (*v1.ServiceAccount, error) - Update(*v1.ServiceAccount) (*v1.ServiceAccount, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ServiceAccount, error) - List(opts metav1.ListOptions) (*v1.ServiceAccountList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) - ServiceAccountExpansion -} - -// serviceAccounts implements ServiceAccountInterface -type serviceAccounts struct { - client rest.Interface - ns string -} - -// newServiceAccounts returns a ServiceAccounts -func newServiceAccounts(c *CoreV1Client, namespace string) *serviceAccounts { - return &serviceAccounts{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the serviceAccount, and returns the corresponding serviceAccount object, and an error if there is any. -func (c *serviceAccounts) Get(name string, options metav1.GetOptions) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ServiceAccounts that match those selectors. -func (c *serviceAccounts) List(opts metav1.ListOptions) (result *v1.ServiceAccountList, err error) { - result = &v1.ServiceAccountList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested serviceAccounts. -func (c *serviceAccounts) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a serviceAccount and creates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Create(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Update takes the representation of a serviceAccount and updates it. Returns the server's representation of the serviceAccount, and an error, if there is any. -func (c *serviceAccounts) Update(serviceAccount *v1.ServiceAccount) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Put(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(serviceAccount.Name). - Body(serviceAccount). - Do(). - Into(result) - return -} - -// Delete takes name of the serviceAccount and deletes it. Returns an error if one occurs. -func (c *serviceAccounts) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *serviceAccounts) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("serviceaccounts"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched serviceAccount. -func (c *serviceAccounts) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ServiceAccount, err error) { - result = &v1.ServiceAccount{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("serviceaccounts"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go deleted file mode 100644 index eaf643f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/core/v1/serviceaccount_expansion.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Copyright 2018 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1 - -import ( - authenticationv1 "k8s.io/api/authentication/v1" -) - -// The ServiceAccountExpansion interface allows manually adding extra methods -// to the ServiceAccountInterface. -type ServiceAccountExpansion interface { - CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) -} - -// CreateToken creates a new token for a serviceaccount. -func (c *serviceAccounts) CreateToken(name string, tr *authenticationv1.TokenRequest) (*authenticationv1.TokenRequest, error) { - result := &authenticationv1.TokenRequest{} - err := c.client.Post(). - Namespace(c.ns). - Resource("serviceaccounts"). - SubResource("token"). - Name(name). - Body(tr). - Do(). - Into(result) - return result, err -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go deleted file mode 100644 index af7d060..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/event.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/events/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// EventsGetter has a method to return a EventInterface. -// A group's client should implement this interface. -type EventsGetter interface { - Events(namespace string) EventInterface -} - -// EventInterface has methods to work with Event resources. -type EventInterface interface { - Create(*v1beta1.Event) (*v1beta1.Event, error) - Update(*v1beta1.Event) (*v1beta1.Event, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Event, error) - List(opts v1.ListOptions) (*v1beta1.EventList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) - EventExpansion -} - -// events implements EventInterface -type events struct { - client rest.Interface - ns string -} - -// newEvents returns a Events -func newEvents(c *EventsV1beta1Client, namespace string) *events { - return &events{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the event, and returns the corresponding event object, and an error if there is any. -func (c *events) Get(name string, options v1.GetOptions) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Events that match those selectors. -func (c *events) List(opts v1.ListOptions) (result *v1beta1.EventList, err error) { - result = &v1beta1.EventList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested events. -func (c *events) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a event and creates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Create(event *v1beta1.Event) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Post(). - Namespace(c.ns). - Resource("events"). - Body(event). - Do(). - Into(result) - return -} - -// Update takes the representation of a event and updates it. Returns the server's representation of the event, and an error, if there is any. -func (c *events) Update(event *v1beta1.Event) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Put(). - Namespace(c.ns). - Resource("events"). - Name(event.Name). - Body(event). - Do(). - Into(result) - return -} - -// Delete takes name of the event and deletes it. Returns an error if one occurs. -func (c *events) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *events) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("events"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched event. -func (c *events) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Event, err error) { - result = &v1beta1.Event{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("events"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go deleted file mode 100644 index fb59635..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/events_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/events/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type EventsV1beta1Interface interface { - RESTClient() rest.Interface - EventsGetter -} - -// EventsV1beta1Client is used to interact with features provided by the events.k8s.io group. -type EventsV1beta1Client struct { - restClient rest.Interface -} - -func (c *EventsV1beta1Client) Events(namespace string) EventInterface { - return newEvents(c, namespace) -} - -// NewForConfig creates a new EventsV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*EventsV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &EventsV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new EventsV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *EventsV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new EventsV1beta1Client for the given RESTClient. -func New(c rest.Interface) *EventsV1beta1Client { - return &EventsV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *EventsV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go deleted file mode 100644 index e27f693..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/events/v1beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type EventExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go deleted file mode 100644 index 85294be..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/daemonset.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DaemonSetsGetter has a method to return a DaemonSetInterface. -// A group's client should implement this interface. -type DaemonSetsGetter interface { - DaemonSets(namespace string) DaemonSetInterface -} - -// DaemonSetInterface has methods to work with DaemonSet resources. -type DaemonSetInterface interface { - Create(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Update(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - UpdateStatus(*v1beta1.DaemonSet) (*v1beta1.DaemonSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.DaemonSet, error) - List(opts v1.ListOptions) (*v1beta1.DaemonSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) - DaemonSetExpansion -} - -// daemonSets implements DaemonSetInterface -type daemonSets struct { - client rest.Interface - ns string -} - -// newDaemonSets returns a DaemonSets -func newDaemonSets(c *ExtensionsV1beta1Client, namespace string) *daemonSets { - return &daemonSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the daemonSet, and returns the corresponding daemonSet object, and an error if there is any. -func (c *daemonSets) Get(name string, options v1.GetOptions) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of DaemonSets that match those selectors. -func (c *daemonSets) List(opts v1.ListOptions) (result *v1beta1.DaemonSetList, err error) { - result = &v1beta1.DaemonSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested daemonSets. -func (c *daemonSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a daemonSet and creates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Create(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("daemonsets"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a daemonSet and updates it. Returns the server's representation of the daemonSet, and an error, if there is any. -func (c *daemonSets) Update(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - Body(daemonSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *daemonSets) UpdateStatus(daemonSet *v1beta1.DaemonSet) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("daemonsets"). - Name(daemonSet.Name). - SubResource("status"). - Body(daemonSet). - Do(). - Into(result) - return -} - -// Delete takes name of the daemonSet and deletes it. Returns an error if one occurs. -func (c *daemonSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *daemonSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("daemonsets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched daemonSet. -func (c *daemonSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.DaemonSet, err error) { - result = &v1beta1.DaemonSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("daemonsets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go deleted file mode 100644 index 89183d2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// DeploymentsGetter has a method to return a DeploymentInterface. -// A group's client should implement this interface. -type DeploymentsGetter interface { - Deployments(namespace string) DeploymentInterface -} - -// DeploymentInterface has methods to work with Deployment resources. -type DeploymentInterface interface { - Create(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Update(*v1beta1.Deployment) (*v1beta1.Deployment, error) - UpdateStatus(*v1beta1.Deployment) (*v1beta1.Deployment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Deployment, error) - List(opts v1.ListOptions) (*v1beta1.DeploymentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) - GetScale(deploymentName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(deploymentName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) - - DeploymentExpansion -} - -// deployments implements DeploymentInterface -type deployments struct { - client rest.Interface - ns string -} - -// newDeployments returns a Deployments -func newDeployments(c *ExtensionsV1beta1Client, namespace string) *deployments { - return &deployments{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the deployment, and returns the corresponding deployment object, and an error if there is any. -func (c *deployments) Get(name string, options v1.GetOptions) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Deployments that match those selectors. -func (c *deployments) List(opts v1.ListOptions) (result *v1beta1.DeploymentList, err error) { - result = &v1beta1.DeploymentList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested deployments. -func (c *deployments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a deployment and creates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Create(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Post(). - Namespace(c.ns). - Resource("deployments"). - Body(deployment). - Do(). - Into(result) - return -} - -// Update takes the representation of a deployment and updates it. Returns the server's representation of the deployment, and an error, if there is any. -func (c *deployments) Update(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - Body(deployment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *deployments) UpdateStatus(deployment *v1beta1.Deployment) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deployment.Name). - SubResource("status"). - Body(deployment). - Do(). - Into(result) - return -} - -// Delete takes name of the deployment and deletes it. Returns an error if one occurs. -func (c *deployments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *deployments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("deployments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched deployment. -func (c *deployments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Deployment, err error) { - result = &v1beta1.Deployment{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("deployments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the deployment, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *deployments) GetScale(deploymentName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *deployments) UpdateScale(deploymentName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("deployments"). - Name(deploymentName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go deleted file mode 100644 index 24734be..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/deployment_expansion.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import "k8s.io/api/extensions/v1beta1" - -// The DeploymentExpansion interface allows manually adding extra methods to the DeploymentInterface. -type DeploymentExpansion interface { - Rollback(*v1beta1.DeploymentRollback) error -} - -// Rollback applied the provided DeploymentRollback to the named deployment in the current namespace. -func (c *deployments) Rollback(deploymentRollback *v1beta1.DeploymentRollback) error { - return c.client.Post().Namespace(c.ns).Resource("deployments").Name(deploymentRollback.Name).SubResource("rollback").Body(deploymentRollback).Do().Error() -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go deleted file mode 100644 index 1961ffc..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/extensions_client.go +++ /dev/null @@ -1,115 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type ExtensionsV1beta1Interface interface { - RESTClient() rest.Interface - DaemonSetsGetter - DeploymentsGetter - IngressesGetter - PodSecurityPoliciesGetter - ReplicaSetsGetter - ScalesGetter -} - -// ExtensionsV1beta1Client is used to interact with features provided by the extensions group. -type ExtensionsV1beta1Client struct { - restClient rest.Interface -} - -func (c *ExtensionsV1beta1Client) DaemonSets(namespace string) DaemonSetInterface { - return newDaemonSets(c, namespace) -} - -func (c *ExtensionsV1beta1Client) Deployments(namespace string) DeploymentInterface { - return newDeployments(c, namespace) -} - -func (c *ExtensionsV1beta1Client) Ingresses(namespace string) IngressInterface { - return newIngresses(c, namespace) -} - -func (c *ExtensionsV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - -func (c *ExtensionsV1beta1Client) ReplicaSets(namespace string) ReplicaSetInterface { - return newReplicaSets(c, namespace) -} - -func (c *ExtensionsV1beta1Client) Scales(namespace string) ScaleInterface { - return newScales(c, namespace) -} - -// NewForConfig creates a new ExtensionsV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*ExtensionsV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &ExtensionsV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new ExtensionsV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *ExtensionsV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new ExtensionsV1beta1Client for the given RESTClient. -func New(c rest.Interface) *ExtensionsV1beta1Client { - return &ExtensionsV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *ExtensionsV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go deleted file mode 100644 index cfaeebd..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/generated_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type DaemonSetExpansion interface{} - -type IngressExpansion interface{} - -type PodSecurityPolicyExpansion interface{} - -type ReplicaSetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go deleted file mode 100644 index f8b664c..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/ingress.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// IngressesGetter has a method to return a IngressInterface. -// A group's client should implement this interface. -type IngressesGetter interface { - Ingresses(namespace string) IngressInterface -} - -// IngressInterface has methods to work with Ingress resources. -type IngressInterface interface { - Create(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Update(*v1beta1.Ingress) (*v1beta1.Ingress, error) - UpdateStatus(*v1beta1.Ingress) (*v1beta1.Ingress, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Ingress, error) - List(opts v1.ListOptions) (*v1beta1.IngressList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) - IngressExpansion -} - -// ingresses implements IngressInterface -type ingresses struct { - client rest.Interface - ns string -} - -// newIngresses returns a Ingresses -func newIngresses(c *ExtensionsV1beta1Client, namespace string) *ingresses { - return &ingresses{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the ingress, and returns the corresponding ingress object, and an error if there is any. -func (c *ingresses) Get(name string, options v1.GetOptions) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Ingresses that match those selectors. -func (c *ingresses) List(opts v1.ListOptions) (result *v1beta1.IngressList, err error) { - result = &v1beta1.IngressList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested ingresses. -func (c *ingresses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a ingress and creates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Create(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Post(). - Namespace(c.ns). - Resource("ingresses"). - Body(ingress). - Do(). - Into(result) - return -} - -// Update takes the representation of a ingress and updates it. Returns the server's representation of the ingress, and an error, if there is any. -func (c *ingresses) Update(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - Body(ingress). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *ingresses) UpdateStatus(ingress *v1beta1.Ingress) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Put(). - Namespace(c.ns). - Resource("ingresses"). - Name(ingress.Name). - SubResource("status"). - Body(ingress). - Do(). - Into(result) - return -} - -// Delete takes name of the ingress and deletes it. Returns an error if one occurs. -func (c *ingresses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *ingresses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("ingresses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched ingress. -func (c *ingresses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Ingress, err error) { - result = &v1beta1.Ingress{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("ingresses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 8099d77..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *ExtensionsV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go deleted file mode 100644 index 7e61fa2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/replicaset.go +++ /dev/null @@ -1,205 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/extensions/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ReplicaSetsGetter has a method to return a ReplicaSetInterface. -// A group's client should implement this interface. -type ReplicaSetsGetter interface { - ReplicaSets(namespace string) ReplicaSetInterface -} - -// ReplicaSetInterface has methods to work with ReplicaSet resources. -type ReplicaSetInterface interface { - Create(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Update(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - UpdateStatus(*v1beta1.ReplicaSet) (*v1beta1.ReplicaSet, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ReplicaSet, error) - List(opts v1.ListOptions) (*v1beta1.ReplicaSetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) - GetScale(replicaSetName string, options v1.GetOptions) (*v1beta1.Scale, error) - UpdateScale(replicaSetName string, scale *v1beta1.Scale) (*v1beta1.Scale, error) - - ReplicaSetExpansion -} - -// replicaSets implements ReplicaSetInterface -type replicaSets struct { - client rest.Interface - ns string -} - -// newReplicaSets returns a ReplicaSets -func newReplicaSets(c *ExtensionsV1beta1Client, namespace string) *replicaSets { - return &replicaSets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the replicaSet, and returns the corresponding replicaSet object, and an error if there is any. -func (c *replicaSets) Get(name string, options v1.GetOptions) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ReplicaSets that match those selectors. -func (c *replicaSets) List(opts v1.ListOptions) (result *v1beta1.ReplicaSetList, err error) { - result = &v1beta1.ReplicaSetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested replicaSets. -func (c *replicaSets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a replicaSet and creates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Create(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Post(). - Namespace(c.ns). - Resource("replicasets"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Update takes the representation of a replicaSet and updates it. Returns the server's representation of the replicaSet, and an error, if there is any. -func (c *replicaSets) Update(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - Body(replicaSet). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *replicaSets) UpdateStatus(replicaSet *v1beta1.ReplicaSet) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSet.Name). - SubResource("status"). - Body(replicaSet). - Do(). - Into(result) - return -} - -// Delete takes name of the replicaSet and deletes it. Returns an error if one occurs. -func (c *replicaSets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *replicaSets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("replicasets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched replicaSet. -func (c *replicaSets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ReplicaSet, err error) { - result = &v1beta1.ReplicaSet{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("replicasets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} - -// GetScale takes name of the replicaSet, and returns the corresponding v1beta1.Scale object, and an error if there is any. -func (c *replicaSets) GetScale(replicaSetName string, options v1.GetOptions) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Get(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// UpdateScale takes the top resource name and the representation of a scale and updates it. Returns the server's representation of the scale, and an error, if there is any. -func (c *replicaSets) UpdateScale(replicaSetName string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - err = c.client.Put(). - Namespace(c.ns). - Resource("replicasets"). - Name(replicaSetName). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go deleted file mode 100644 index 6ee677a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// ScalesGetter has a method to return a ScaleInterface. -// A group's client should implement this interface. -type ScalesGetter interface { - Scales(namespace string) ScaleInterface -} - -// ScaleInterface has methods to work with Scale resources. -type ScaleInterface interface { - ScaleExpansion -} - -// scales implements ScaleInterface -type scales struct { - client rest.Interface - ns string -} - -// newScales returns a Scales -func newScales(c *ExtensionsV1beta1Client, namespace string) *scales { - return &scales{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go deleted file mode 100644 index c9733cb..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/extensions/v1beta1/scale_expansion.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - "k8s.io/api/extensions/v1beta1" - "k8s.io/apimachinery/pkg/api/meta" - "k8s.io/apimachinery/pkg/runtime/schema" -) - -// The ScaleExpansion interface allows manually adding extra methods to the ScaleInterface. -type ScaleExpansion interface { - Get(kind string, name string) (*v1beta1.Scale, error) - Update(kind string, scale *v1beta1.Scale) (*v1beta1.Scale, error) -} - -// Get takes the reference to scale subresource and returns the subresource or error, if one occurs. -func (c *scales) Get(kind string, name string) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Get(). - Namespace(c.ns). - Resource(resource.Resource). - Name(name). - SubResource("scale"). - Do(). - Into(result) - return -} - -func (c *scales) Update(kind string, scale *v1beta1.Scale) (result *v1beta1.Scale, err error) { - result = &v1beta1.Scale{} - - // TODO this method needs to take a proper unambiguous kind - fullyQualifiedKind := schema.GroupVersionKind{Kind: kind} - resource, _ := meta.UnsafeGuessKindToResource(fullyQualifiedKind) - - err = c.client.Put(). - Namespace(scale.Namespace). - Resource(resource.Resource). - Name(scale.Name). - SubResource("scale"). - Body(scale). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go deleted file mode 100644 index 7d77495..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type NetworkPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go deleted file mode 100644 index 8684db4..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networking_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/networking/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type NetworkingV1Interface interface { - RESTClient() rest.Interface - NetworkPoliciesGetter -} - -// NetworkingV1Client is used to interact with features provided by the networking.k8s.io group. -type NetworkingV1Client struct { - restClient rest.Interface -} - -func (c *NetworkingV1Client) NetworkPolicies(namespace string) NetworkPolicyInterface { - return newNetworkPolicies(c, namespace) -} - -// NewForConfig creates a new NetworkingV1Client for the given config. -func NewForConfig(c *rest.Config) (*NetworkingV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &NetworkingV1Client{client}, nil -} - -// NewForConfigOrDie creates a new NetworkingV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *NetworkingV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new NetworkingV1Client for the given RESTClient. -func New(c rest.Interface) *NetworkingV1Client { - return &NetworkingV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *NetworkingV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go deleted file mode 100644 index d8f0a6b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/networking/v1/networkpolicy.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/networking/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// NetworkPoliciesGetter has a method to return a NetworkPolicyInterface. -// A group's client should implement this interface. -type NetworkPoliciesGetter interface { - NetworkPolicies(namespace string) NetworkPolicyInterface -} - -// NetworkPolicyInterface has methods to work with NetworkPolicy resources. -type NetworkPolicyInterface interface { - Create(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) - Update(*v1.NetworkPolicy) (*v1.NetworkPolicy, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.NetworkPolicy, error) - List(opts metav1.ListOptions) (*v1.NetworkPolicyList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) - NetworkPolicyExpansion -} - -// networkPolicies implements NetworkPolicyInterface -type networkPolicies struct { - client rest.Interface - ns string -} - -// newNetworkPolicies returns a NetworkPolicies -func newNetworkPolicies(c *NetworkingV1Client, namespace string) *networkPolicies { - return &networkPolicies{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the networkPolicy, and returns the corresponding networkPolicy object, and an error if there is any. -func (c *networkPolicies) Get(name string, options metav1.GetOptions) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of NetworkPolicies that match those selectors. -func (c *networkPolicies) List(opts metav1.ListOptions) (result *v1.NetworkPolicyList, err error) { - result = &v1.NetworkPolicyList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested networkPolicies. -func (c *networkPolicies) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a networkPolicy and creates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Create(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Post(). - Namespace(c.ns). - Resource("networkpolicies"). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a networkPolicy and updates it. Returns the server's representation of the networkPolicy, and an error, if there is any. -func (c *networkPolicies) Update(networkPolicy *v1.NetworkPolicy) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Put(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(networkPolicy.Name). - Body(networkPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the networkPolicy and deletes it. Returns an error if one occurs. -func (c *networkPolicies) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *networkPolicies) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("networkpolicies"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched networkPolicy. -func (c *networkPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NetworkPolicy, err error) { - result = &v1.NetworkPolicy{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("networkpolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go deleted file mode 100644 index 12e8e76..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction.go +++ /dev/null @@ -1,48 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - rest "k8s.io/client-go/rest" -) - -// EvictionsGetter has a method to return a EvictionInterface. -// A group's client should implement this interface. -type EvictionsGetter interface { - Evictions(namespace string) EvictionInterface -} - -// EvictionInterface has methods to work with Eviction resources. -type EvictionInterface interface { - EvictionExpansion -} - -// evictions implements EvictionInterface -type evictions struct { - client rest.Interface - ns string -} - -// newEvictions returns a Evictions -func newEvictions(c *PolicyV1beta1Client, namespace string) *evictions { - return &evictions{ - client: c.RESTClient(), - ns: namespace, - } -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go deleted file mode 100644 index 40bad26..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/eviction_expansion.go +++ /dev/null @@ -1,38 +0,0 @@ -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package v1beta1 - -import ( - policy "k8s.io/api/policy/v1beta1" -) - -// The EvictionExpansion interface allows manually adding extra methods to the ScaleInterface. -type EvictionExpansion interface { - Evict(eviction *policy.Eviction) error -} - -func (c *evictions) Evict(eviction *policy.Eviction) error { - return c.client.Post(). - AbsPath("/api/v1"). - Namespace(eviction.Namespace). - Resource("pods"). - Name(eviction.Name). - SubResource("eviction"). - Body(eviction). - Do(). - Error() -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go deleted file mode 100644 index 078c16d..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type PodDisruptionBudgetExpansion interface{} - -type PodSecurityPolicyExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go deleted file mode 100644 index a11f27e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/poddisruptionbudget.go +++ /dev/null @@ -1,174 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodDisruptionBudgetsGetter has a method to return a PodDisruptionBudgetInterface. -// A group's client should implement this interface. -type PodDisruptionBudgetsGetter interface { - PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface -} - -// PodDisruptionBudgetInterface has methods to work with PodDisruptionBudget resources. -type PodDisruptionBudgetInterface interface { - Create(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - Update(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - UpdateStatus(*v1beta1.PodDisruptionBudget) (*v1beta1.PodDisruptionBudget, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PodDisruptionBudget, error) - List(opts v1.ListOptions) (*v1beta1.PodDisruptionBudgetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) - PodDisruptionBudgetExpansion -} - -// podDisruptionBudgets implements PodDisruptionBudgetInterface -type podDisruptionBudgets struct { - client rest.Interface - ns string -} - -// newPodDisruptionBudgets returns a PodDisruptionBudgets -func newPodDisruptionBudgets(c *PolicyV1beta1Client, namespace string) *podDisruptionBudgets { - return &podDisruptionBudgets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podDisruptionBudget, and returns the corresponding podDisruptionBudget object, and an error if there is any. -func (c *podDisruptionBudgets) Get(name string, options v1.GetOptions) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodDisruptionBudgets that match those selectors. -func (c *podDisruptionBudgets) List(opts v1.ListOptions) (result *v1beta1.PodDisruptionBudgetList, err error) { - result = &v1beta1.PodDisruptionBudgetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podDisruptionBudgets. -func (c *podDisruptionBudgets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podDisruptionBudget and creates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Create(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Post(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// Update takes the representation of a podDisruptionBudget and updates it. Returns the server's representation of the podDisruptionBudget, and an error, if there is any. -func (c *podDisruptionBudgets) Update(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *podDisruptionBudgets) UpdateStatus(podDisruptionBudget *v1beta1.PodDisruptionBudget) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Put(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(podDisruptionBudget.Name). - SubResource("status"). - Body(podDisruptionBudget). - Do(). - Into(result) - return -} - -// Delete takes name of the podDisruptionBudget and deletes it. Returns an error if one occurs. -func (c *podDisruptionBudgets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podDisruptionBudgets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podDisruptionBudget. -func (c *podDisruptionBudgets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodDisruptionBudget, err error) { - result = &v1beta1.PodDisruptionBudget{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("poddisruptionbudgets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go deleted file mode 100644 index 355be1e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/podsecuritypolicy.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodSecurityPoliciesGetter has a method to return a PodSecurityPolicyInterface. -// A group's client should implement this interface. -type PodSecurityPoliciesGetter interface { - PodSecurityPolicies() PodSecurityPolicyInterface -} - -// PodSecurityPolicyInterface has methods to work with PodSecurityPolicy resources. -type PodSecurityPolicyInterface interface { - Create(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Update(*v1beta1.PodSecurityPolicy) (*v1beta1.PodSecurityPolicy, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PodSecurityPolicy, error) - List(opts v1.ListOptions) (*v1beta1.PodSecurityPolicyList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) - PodSecurityPolicyExpansion -} - -// podSecurityPolicies implements PodSecurityPolicyInterface -type podSecurityPolicies struct { - client rest.Interface -} - -// newPodSecurityPolicies returns a PodSecurityPolicies -func newPodSecurityPolicies(c *PolicyV1beta1Client) *podSecurityPolicies { - return &podSecurityPolicies{ - client: c.RESTClient(), - } -} - -// Get takes name of the podSecurityPolicy, and returns the corresponding podSecurityPolicy object, and an error if there is any. -func (c *podSecurityPolicies) Get(name string, options v1.GetOptions) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodSecurityPolicies that match those selectors. -func (c *podSecurityPolicies) List(opts v1.ListOptions) (result *v1beta1.PodSecurityPolicyList, err error) { - result = &v1beta1.PodSecurityPolicyList{} - err = c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podSecurityPolicies. -func (c *podSecurityPolicies) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("podsecuritypolicies"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podSecurityPolicy and creates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Create(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Post(). - Resource("podsecuritypolicies"). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Update takes the representation of a podSecurityPolicy and updates it. Returns the server's representation of the podSecurityPolicy, and an error, if there is any. -func (c *podSecurityPolicies) Update(podSecurityPolicy *v1beta1.PodSecurityPolicy) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Put(). - Resource("podsecuritypolicies"). - Name(podSecurityPolicy.Name). - Body(podSecurityPolicy). - Do(). - Into(result) - return -} - -// Delete takes name of the podSecurityPolicy and deletes it. Returns an error if one occurs. -func (c *podSecurityPolicies) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podSecurityPolicies) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("podsecuritypolicies"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podSecurityPolicy. -func (c *podSecurityPolicies) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PodSecurityPolicy, err error) { - result = &v1beta1.PodSecurityPolicy{} - err = c.client.Patch(pt). - Resource("podsecuritypolicies"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go b/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go deleted file mode 100644 index 020e185..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/policy/v1beta1/policy_client.go +++ /dev/null @@ -1,100 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/policy/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type PolicyV1beta1Interface interface { - RESTClient() rest.Interface - EvictionsGetter - PodDisruptionBudgetsGetter - PodSecurityPoliciesGetter -} - -// PolicyV1beta1Client is used to interact with features provided by the policy group. -type PolicyV1beta1Client struct { - restClient rest.Interface -} - -func (c *PolicyV1beta1Client) Evictions(namespace string) EvictionInterface { - return newEvictions(c, namespace) -} - -func (c *PolicyV1beta1Client) PodDisruptionBudgets(namespace string) PodDisruptionBudgetInterface { - return newPodDisruptionBudgets(c, namespace) -} - -func (c *PolicyV1beta1Client) PodSecurityPolicies() PodSecurityPolicyInterface { - return newPodSecurityPolicies(c) -} - -// NewForConfig creates a new PolicyV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*PolicyV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &PolicyV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new PolicyV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *PolicyV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new PolicyV1beta1Client for the given RESTClient. -func New(c rest.Interface) *PolicyV1beta1Client { - return &PolicyV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *PolicyV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go deleted file mode 100644 index c4299d4..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrole.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterRolesGetter has a method to return a ClusterRoleInterface. -// A group's client should implement this interface. -type ClusterRolesGetter interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - Create(*v1.ClusterRole) (*v1.ClusterRole, error) - Update(*v1.ClusterRole) (*v1.ClusterRole, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ClusterRole, error) - List(opts metav1.ListOptions) (*v1.ClusterRoleList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) - ClusterRoleExpansion -} - -// clusterRoles implements ClusterRoleInterface -type clusterRoles struct { - client rest.Interface -} - -// newClusterRoles returns a ClusterRoles -func newClusterRoles(c *RbacV1Client) *clusterRoles { - return &clusterRoles{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options metav1.GetOptions) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts metav1.ListOptions) (result *v1.ClusterRoleList, err error) { - result = &v1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *v1.ClusterRole) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRole, err error) { - result = &v1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go deleted file mode 100644 index 30c0469..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/clusterrolebinding.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. -// A group's client should implement this interface. -type ClusterRoleBindingsGetter interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - Create(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) - Update(*v1.ClusterRoleBinding) (*v1.ClusterRoleBinding, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.ClusterRoleBinding, error) - List(opts metav1.ListOptions) (*v1.ClusterRoleBindingList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) - ClusterRoleBindingExpansion -} - -// clusterRoleBindings implements ClusterRoleBindingInterface -type clusterRoleBindings struct { - client rest.Interface -} - -// newClusterRoleBindings returns a ClusterRoleBindings -func newClusterRoleBindings(c *RbacV1Client) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options metav1.GetOptions) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts metav1.ListOptions) (result *v1.ClusterRoleBindingList, err error) { - result = &v1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *v1.ClusterRoleBinding) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.ClusterRoleBinding, err error) { - result = &v1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go deleted file mode 100644 index e3f1b02..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/generated_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type ClusterRoleExpansion interface{} - -type ClusterRoleBindingExpansion interface{} - -type RoleExpansion interface{} - -type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go deleted file mode 100644 index e3855bb..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rbac_client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/rbac/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type RbacV1Interface interface { - RESTClient() rest.Interface - ClusterRolesGetter - ClusterRoleBindingsGetter - RolesGetter - RoleBindingsGetter -} - -// RbacV1Client is used to interact with features provided by the rbac.authorization.k8s.io group. -type RbacV1Client struct { - restClient rest.Interface -} - -func (c *RbacV1Client) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -func (c *RbacV1Client) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacV1Client) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacV1Client) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -// NewForConfig creates a new RbacV1Client for the given config. -func NewForConfig(c *rest.Config) (*RbacV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacV1Client{client}, nil -} - -// NewForConfigOrDie creates a new RbacV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *RbacV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new RbacV1Client for the given RESTClient. -func New(c rest.Interface) *RbacV1Client { - return &RbacV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *RbacV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go deleted file mode 100644 index 81ea12a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/role.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RolesGetter has a method to return a RoleInterface. -// A group's client should implement this interface. -type RolesGetter interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - Create(*v1.Role) (*v1.Role, error) - Update(*v1.Role) (*v1.Role, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.Role, error) - List(opts metav1.ListOptions) (*v1.RoleList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) - RoleExpansion -} - -// roles implements RoleInterface -type roles struct { - client rest.Interface - ns string -} - -// newRoles returns a Roles -func newRoles(c *RbacV1Client, namespace string) *roles { - return &roles{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options metav1.GetOptions) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts metav1.ListOptions) (result *v1.RoleList, err error) { - result = &v1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *v1.Role) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - Body(role). - Do(). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *v1.Role) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - Body(role). - Do(). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Role, err error) { - result = &v1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go deleted file mode 100644 index 17c6f99..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1/rolebinding.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/rbac/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RoleBindingsGetter has a method to return a RoleBindingInterface. -// A group's client should implement this interface. -type RoleBindingsGetter interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - Create(*v1.RoleBinding) (*v1.RoleBinding, error) - Update(*v1.RoleBinding) (*v1.RoleBinding, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.RoleBinding, error) - List(opts metav1.ListOptions) (*v1.RoleBindingList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) - RoleBindingExpansion -} - -// roleBindings implements RoleBindingInterface -type roleBindings struct { - client rest.Interface - ns string -} - -// newRoleBindings returns a RoleBindings -func newRoleBindings(c *RbacV1Client, namespace string) *roleBindings { - return &roleBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options metav1.GetOptions) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts metav1.ListOptions) (result *v1.RoleBindingList, err error) { - result = &v1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *v1.RoleBinding) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.RoleBinding, err error) { - result = &v1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go deleted file mode 100644 index 37a5457..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrole.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterRolesGetter has a method to return a ClusterRoleInterface. -// A group's client should implement this interface. -type ClusterRolesGetter interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - Create(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) - Update(*v1alpha1.ClusterRole) (*v1alpha1.ClusterRole, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRole, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterRoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) - ClusterRoleExpansion -} - -// clusterRoles implements ClusterRoleInterface -type clusterRoles struct { - client rest.Interface -} - -// newClusterRoles returns a ClusterRoles -func newClusterRoles(c *RbacV1alpha1Client) *clusterRoles { - return &clusterRoles{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleList, err error) { - result = &v1alpha1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *v1alpha1.ClusterRole) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRole, err error) { - result = &v1alpha1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go deleted file mode 100644 index 6050789..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/clusterrolebinding.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. -// A group's client should implement this interface. -type ClusterRoleBindingsGetter interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - Create(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) - Update(*v1alpha1.ClusterRoleBinding) (*v1alpha1.ClusterRoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.ClusterRoleBinding, error) - List(opts v1.ListOptions) (*v1alpha1.ClusterRoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) - ClusterRoleBindingExpansion -} - -// clusterRoleBindings implements ClusterRoleBindingInterface -type clusterRoleBindings struct { - client rest.Interface -} - -// newClusterRoleBindings returns a ClusterRoleBindings -func newClusterRoleBindings(c *RbacV1alpha1Client) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1alpha1.ClusterRoleBindingList, err error) { - result = &v1alpha1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *v1alpha1.ClusterRoleBinding) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.ClusterRoleBinding, err error) { - result = &v1alpha1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go deleted file mode 100644 index df51baa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go deleted file mode 100644 index b8b5c78..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type ClusterRoleExpansion interface{} - -type ClusterRoleBindingExpansion interface{} - -type RoleExpansion interface{} - -type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go deleted file mode 100644 index de83531..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rbac_client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type RbacV1alpha1Interface interface { - RESTClient() rest.Interface - ClusterRolesGetter - ClusterRoleBindingsGetter - RolesGetter - RoleBindingsGetter -} - -// RbacV1alpha1Client is used to interact with features provided by the rbac.authorization.k8s.io group. -type RbacV1alpha1Client struct { - restClient rest.Interface -} - -func (c *RbacV1alpha1Client) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -func (c *RbacV1alpha1Client) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacV1alpha1Client) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacV1alpha1Client) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -// NewForConfig creates a new RbacV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*RbacV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new RbacV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *RbacV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new RbacV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *RbacV1alpha1Client { - return &RbacV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *RbacV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go deleted file mode 100644 index aa6954b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/role.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RolesGetter has a method to return a RoleInterface. -// A group's client should implement this interface. -type RolesGetter interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - Create(*v1alpha1.Role) (*v1alpha1.Role, error) - Update(*v1alpha1.Role) (*v1alpha1.Role, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.Role, error) - List(opts v1.ListOptions) (*v1alpha1.RoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) - RoleExpansion -} - -// roles implements RoleInterface -type roles struct { - client rest.Interface - ns string -} - -// newRoles returns a Roles -func newRoles(c *RbacV1alpha1Client, namespace string) *roles { - return &roles{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *v1alpha1.RoleList, err error) { - result = &v1alpha1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - Body(role). - Do(). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - Body(role). - Do(). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { - result = &v1alpha1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go deleted file mode 100644 index 0941b8e..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1alpha1/rolebinding.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/rbac/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RoleBindingsGetter has a method to return a RoleBindingInterface. -// A group's client should implement this interface. -type RoleBindingsGetter interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - Create(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) - Update(*v1alpha1.RoleBinding) (*v1alpha1.RoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.RoleBinding, error) - List(opts v1.ListOptions) (*v1alpha1.RoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) - RoleBindingExpansion -} - -// roleBindings implements RoleBindingInterface -type roleBindings struct { - client rest.Interface - ns string -} - -// newRoleBindings returns a RoleBindings -func newRoleBindings(c *RbacV1alpha1Client, namespace string) *roleBindings { - return &roleBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *v1alpha1.RoleBindingList, err error) { - result = &v1alpha1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *v1alpha1.RoleBinding) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.RoleBinding, err error) { - result = &v1alpha1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go deleted file mode 100644 index bac951c..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrole.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterRolesGetter has a method to return a ClusterRoleInterface. -// A group's client should implement this interface. -type ClusterRolesGetter interface { - ClusterRoles() ClusterRoleInterface -} - -// ClusterRoleInterface has methods to work with ClusterRole resources. -type ClusterRoleInterface interface { - Create(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) - Update(*v1beta1.ClusterRole) (*v1beta1.ClusterRole, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ClusterRole, error) - List(opts v1.ListOptions) (*v1beta1.ClusterRoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) - ClusterRoleExpansion -} - -// clusterRoles implements ClusterRoleInterface -type clusterRoles struct { - client rest.Interface -} - -// newClusterRoles returns a ClusterRoles -func newClusterRoles(c *RbacV1beta1Client) *clusterRoles { - return &clusterRoles{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRole, and returns the corresponding clusterRole object, and an error if there is any. -func (c *clusterRoles) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Get(). - Resource("clusterroles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoles that match those selectors. -func (c *clusterRoles) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleList, err error) { - result = &v1beta1.ClusterRoleList{} - err = c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoles. -func (c *clusterRoles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterroles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRole and creates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Create(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Post(). - Resource("clusterroles"). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRole and updates it. Returns the server's representation of the clusterRole, and an error, if there is any. -func (c *clusterRoles) Update(clusterRole *v1beta1.ClusterRole) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Put(). - Resource("clusterroles"). - Name(clusterRole.Name). - Body(clusterRole). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRole and deletes it. Returns an error if one occurs. -func (c *clusterRoles) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusterroles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRole. -func (c *clusterRoles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRole, err error) { - result = &v1beta1.ClusterRole{} - err = c.client.Patch(pt). - Resource("clusterroles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go deleted file mode 100644 index 96c91de..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/clusterrolebinding.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// ClusterRoleBindingsGetter has a method to return a ClusterRoleBindingInterface. -// A group's client should implement this interface. -type ClusterRoleBindingsGetter interface { - ClusterRoleBindings() ClusterRoleBindingInterface -} - -// ClusterRoleBindingInterface has methods to work with ClusterRoleBinding resources. -type ClusterRoleBindingInterface interface { - Create(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) - Update(*v1beta1.ClusterRoleBinding) (*v1beta1.ClusterRoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.ClusterRoleBinding, error) - List(opts v1.ListOptions) (*v1beta1.ClusterRoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) - ClusterRoleBindingExpansion -} - -// clusterRoleBindings implements ClusterRoleBindingInterface -type clusterRoleBindings struct { - client rest.Interface -} - -// newClusterRoleBindings returns a ClusterRoleBindings -func newClusterRoleBindings(c *RbacV1beta1Client) *clusterRoleBindings { - return &clusterRoleBindings{ - client: c.RESTClient(), - } -} - -// Get takes name of the clusterRoleBinding, and returns the corresponding clusterRoleBinding object, and an error if there is any. -func (c *clusterRoleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Get(). - Resource("clusterrolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of ClusterRoleBindings that match those selectors. -func (c *clusterRoleBindings) List(opts v1.ListOptions) (result *v1beta1.ClusterRoleBindingList, err error) { - result = &v1beta1.ClusterRoleBindingList{} - err = c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested clusterRoleBindings. -func (c *clusterRoleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("clusterrolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a clusterRoleBinding and creates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Create(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Post(). - Resource("clusterrolebindings"). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a clusterRoleBinding and updates it. Returns the server's representation of the clusterRoleBinding, and an error, if there is any. -func (c *clusterRoleBindings) Update(clusterRoleBinding *v1beta1.ClusterRoleBinding) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Put(). - Resource("clusterrolebindings"). - Name(clusterRoleBinding.Name). - Body(clusterRoleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the clusterRoleBinding and deletes it. Returns an error if one occurs. -func (c *clusterRoleBindings) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *clusterRoleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("clusterrolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched clusterRoleBinding. -func (c *clusterRoleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.ClusterRoleBinding, err error) { - result = &v1beta1.ClusterRoleBinding{} - err = c.client.Patch(pt). - Resource("clusterrolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go deleted file mode 100644 index e7be79f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/generated_expansion.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type ClusterRoleExpansion interface{} - -type ClusterRoleBindingExpansion interface{} - -type RoleExpansion interface{} - -type RoleBindingExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go deleted file mode 100644 index 46718d7..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rbac_client.go +++ /dev/null @@ -1,105 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type RbacV1beta1Interface interface { - RESTClient() rest.Interface - ClusterRolesGetter - ClusterRoleBindingsGetter - RolesGetter - RoleBindingsGetter -} - -// RbacV1beta1Client is used to interact with features provided by the rbac.authorization.k8s.io group. -type RbacV1beta1Client struct { - restClient rest.Interface -} - -func (c *RbacV1beta1Client) ClusterRoles() ClusterRoleInterface { - return newClusterRoles(c) -} - -func (c *RbacV1beta1Client) ClusterRoleBindings() ClusterRoleBindingInterface { - return newClusterRoleBindings(c) -} - -func (c *RbacV1beta1Client) Roles(namespace string) RoleInterface { - return newRoles(c, namespace) -} - -func (c *RbacV1beta1Client) RoleBindings(namespace string) RoleBindingInterface { - return newRoleBindings(c, namespace) -} - -// NewForConfig creates a new RbacV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*RbacV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &RbacV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new RbacV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *RbacV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new RbacV1beta1Client for the given RESTClient. -func New(c rest.Interface) *RbacV1beta1Client { - return &RbacV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *RbacV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go deleted file mode 100644 index 66f382c..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/role.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RolesGetter has a method to return a RoleInterface. -// A group's client should implement this interface. -type RolesGetter interface { - Roles(namespace string) RoleInterface -} - -// RoleInterface has methods to work with Role resources. -type RoleInterface interface { - Create(*v1beta1.Role) (*v1beta1.Role, error) - Update(*v1beta1.Role) (*v1beta1.Role, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.Role, error) - List(opts v1.ListOptions) (*v1beta1.RoleList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) - RoleExpansion -} - -// roles implements RoleInterface -type roles struct { - client rest.Interface - ns string -} - -// newRoles returns a Roles -func newRoles(c *RbacV1beta1Client, namespace string) *roles { - return &roles{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the role, and returns the corresponding role object, and an error if there is any. -func (c *roles) Get(name string, options v1.GetOptions) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of Roles that match those selectors. -func (c *roles) List(opts v1.ListOptions) (result *v1beta1.RoleList, err error) { - result = &v1beta1.RoleList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roles. -func (c *roles) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Create(role *v1beta1.Role) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Post(). - Namespace(c.ns). - Resource("roles"). - Body(role). - Do(). - Into(result) - return -} - -// Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. -func (c *roles) Update(role *v1beta1.Role) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Put(). - Namespace(c.ns). - Resource("roles"). - Name(role.Name). - Body(role). - Do(). - Into(result) - return -} - -// Delete takes name of the role and deletes it. Returns an error if one occurs. -func (c *roles) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roles) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("roles"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched role. -func (c *roles) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.Role, err error) { - result = &v1beta1.Role{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("roles"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go b/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go deleted file mode 100644 index 67d3d33..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/rbac/v1beta1/rolebinding.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/rbac/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// RoleBindingsGetter has a method to return a RoleBindingInterface. -// A group's client should implement this interface. -type RoleBindingsGetter interface { - RoleBindings(namespace string) RoleBindingInterface -} - -// RoleBindingInterface has methods to work with RoleBinding resources. -type RoleBindingInterface interface { - Create(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) - Update(*v1beta1.RoleBinding) (*v1beta1.RoleBinding, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.RoleBinding, error) - List(opts v1.ListOptions) (*v1beta1.RoleBindingList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) - RoleBindingExpansion -} - -// roleBindings implements RoleBindingInterface -type roleBindings struct { - client rest.Interface - ns string -} - -// newRoleBindings returns a RoleBindings -func newRoleBindings(c *RbacV1beta1Client, namespace string) *roleBindings { - return &roleBindings{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the roleBinding, and returns the corresponding roleBinding object, and an error if there is any. -func (c *roleBindings) Get(name string, options v1.GetOptions) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of RoleBindings that match those selectors. -func (c *roleBindings) List(opts v1.ListOptions) (result *v1beta1.RoleBindingList, err error) { - result = &v1beta1.RoleBindingList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested roleBindings. -func (c *roleBindings) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a roleBinding and creates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Create(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Post(). - Namespace(c.ns). - Resource("rolebindings"). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Update takes the representation of a roleBinding and updates it. Returns the server's representation of the roleBinding, and an error, if there is any. -func (c *roleBindings) Update(roleBinding *v1beta1.RoleBinding) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Put(). - Namespace(c.ns). - Resource("rolebindings"). - Name(roleBinding.Name). - Body(roleBinding). - Do(). - Into(result) - return -} - -// Delete takes name of the roleBinding and deletes it. Returns an error if one occurs. -func (c *roleBindings) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *roleBindings) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("rolebindings"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched roleBinding. -func (c *roleBindings) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.RoleBinding, err error) { - result = &v1beta1.RoleBinding{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("rolebindings"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go deleted file mode 100644 index df51baa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go deleted file mode 100644 index 52f81d8..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go deleted file mode 100644 index 6845d25..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/priorityclass.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PriorityClassesGetter has a method to return a PriorityClassInterface. -// A group's client should implement this interface. -type PriorityClassesGetter interface { - PriorityClasses() PriorityClassInterface -} - -// PriorityClassInterface has methods to work with PriorityClass resources. -type PriorityClassInterface interface { - Create(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) - Update(*v1alpha1.PriorityClass) (*v1alpha1.PriorityClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.PriorityClass, error) - List(opts v1.ListOptions) (*v1alpha1.PriorityClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) - PriorityClassExpansion -} - -// priorityClasses implements PriorityClassInterface -type priorityClasses struct { - client rest.Interface -} - -// newPriorityClasses returns a PriorityClasses -func newPriorityClasses(c *SchedulingV1alpha1Client) *priorityClasses { - return &priorityClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts v1.ListOptions) (result *v1alpha1.PriorityClassList, err error) { - result = &v1alpha1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1alpha1.PriorityClass) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PriorityClass, err error) { - result = &v1alpha1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go deleted file mode 100644 index 375f41b..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1/scheduling_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/scheduling/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type SchedulingV1alpha1Interface interface { - RESTClient() rest.Interface - PriorityClassesGetter -} - -// SchedulingV1alpha1Client is used to interact with features provided by the scheduling.k8s.io group. -type SchedulingV1alpha1Client struct { - restClient rest.Interface -} - -func (c *SchedulingV1alpha1Client) PriorityClasses() PriorityClassInterface { - return newPriorityClasses(c) -} - -// NewForConfig creates a new SchedulingV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*SchedulingV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SchedulingV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new SchedulingV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SchedulingV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *SchedulingV1alpha1Client { - return &SchedulingV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SchedulingV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go deleted file mode 100644 index 3bab873..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type PriorityClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go deleted file mode 100644 index 57b9766..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/priorityclass.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/scheduling/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PriorityClassesGetter has a method to return a PriorityClassInterface. -// A group's client should implement this interface. -type PriorityClassesGetter interface { - PriorityClasses() PriorityClassInterface -} - -// PriorityClassInterface has methods to work with PriorityClass resources. -type PriorityClassInterface interface { - Create(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) - Update(*v1beta1.PriorityClass) (*v1beta1.PriorityClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.PriorityClass, error) - List(opts v1.ListOptions) (*v1beta1.PriorityClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) - PriorityClassExpansion -} - -// priorityClasses implements PriorityClassInterface -type priorityClasses struct { - client rest.Interface -} - -// newPriorityClasses returns a PriorityClasses -func newPriorityClasses(c *SchedulingV1beta1Client) *priorityClasses { - return &priorityClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the priorityClass, and returns the corresponding priorityClass object, and an error if there is any. -func (c *priorityClasses) Get(name string, options v1.GetOptions) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Get(). - Resource("priorityclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PriorityClasses that match those selectors. -func (c *priorityClasses) List(opts v1.ListOptions) (result *v1beta1.PriorityClassList, err error) { - result = &v1beta1.PriorityClassList{} - err = c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested priorityClasses. -func (c *priorityClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("priorityclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a priorityClass and creates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Create(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Post(). - Resource("priorityclasses"). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a priorityClass and updates it. Returns the server's representation of the priorityClass, and an error, if there is any. -func (c *priorityClasses) Update(priorityClass *v1beta1.PriorityClass) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Put(). - Resource("priorityclasses"). - Name(priorityClass.Name). - Body(priorityClass). - Do(). - Into(result) - return -} - -// Delete takes name of the priorityClass and deletes it. Returns an error if one occurs. -func (c *priorityClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *priorityClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("priorityclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched priorityClass. -func (c *priorityClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.PriorityClass, err error) { - result = &v1beta1.PriorityClass{} - err = c.client.Patch(pt). - Resource("priorityclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go b/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go deleted file mode 100644 index 6feec4a..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/scheduling/v1beta1/scheduling_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/scheduling/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type SchedulingV1beta1Interface interface { - RESTClient() rest.Interface - PriorityClassesGetter -} - -// SchedulingV1beta1Client is used to interact with features provided by the scheduling.k8s.io group. -type SchedulingV1beta1Client struct { - restClient rest.Interface -} - -func (c *SchedulingV1beta1Client) PriorityClasses() PriorityClassInterface { - return newPriorityClasses(c) -} - -// NewForConfig creates a new SchedulingV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*SchedulingV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SchedulingV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new SchedulingV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SchedulingV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SchedulingV1beta1Client for the given RESTClient. -func New(c rest.Interface) *SchedulingV1beta1Client { - return &SchedulingV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SchedulingV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go deleted file mode 100644 index df51baa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go deleted file mode 100644 index 23d9f94..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type PodPresetExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go deleted file mode 100644 index f000ae4..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/podpreset.go +++ /dev/null @@ -1,157 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// PodPresetsGetter has a method to return a PodPresetInterface. -// A group's client should implement this interface. -type PodPresetsGetter interface { - PodPresets(namespace string) PodPresetInterface -} - -// PodPresetInterface has methods to work with PodPreset resources. -type PodPresetInterface interface { - Create(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) - Update(*v1alpha1.PodPreset) (*v1alpha1.PodPreset, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.PodPreset, error) - List(opts v1.ListOptions) (*v1alpha1.PodPresetList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) - PodPresetExpansion -} - -// podPresets implements PodPresetInterface -type podPresets struct { - client rest.Interface - ns string -} - -// newPodPresets returns a PodPresets -func newPodPresets(c *SettingsV1alpha1Client, namespace string) *podPresets { - return &podPresets{ - client: c.RESTClient(), - ns: namespace, - } -} - -// Get takes name of the podPreset, and returns the corresponding podPreset object, and an error if there is any. -func (c *podPresets) Get(name string, options v1.GetOptions) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of PodPresets that match those selectors. -func (c *podPresets) List(opts v1.ListOptions) (result *v1alpha1.PodPresetList, err error) { - result = &v1alpha1.PodPresetList{} - err = c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested podPresets. -func (c *podPresets) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a podPreset and creates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Create(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Post(). - Namespace(c.ns). - Resource("podpresets"). - Body(podPreset). - Do(). - Into(result) - return -} - -// Update takes the representation of a podPreset and updates it. Returns the server's representation of the podPreset, and an error, if there is any. -func (c *podPresets) Update(podPreset *v1alpha1.PodPreset) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Put(). - Namespace(c.ns). - Resource("podpresets"). - Name(podPreset.Name). - Body(podPreset). - Do(). - Into(result) - return -} - -// Delete takes name of the podPreset and deletes it. Returns an error if one occurs. -func (c *podPresets) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *podPresets) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Namespace(c.ns). - Resource("podpresets"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched podPreset. -func (c *podPresets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PodPreset, err error) { - result = &v1alpha1.PodPreset{} - err = c.client.Patch(pt). - Namespace(c.ns). - Resource("podpresets"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go b/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go deleted file mode 100644 index c2a03b9..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/settings/v1alpha1/settings_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/settings/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type SettingsV1alpha1Interface interface { - RESTClient() rest.Interface - PodPresetsGetter -} - -// SettingsV1alpha1Client is used to interact with features provided by the settings.k8s.io group. -type SettingsV1alpha1Client struct { - restClient rest.Interface -} - -func (c *SettingsV1alpha1Client) PodPresets(namespace string) PodPresetInterface { - return newPodPresets(c, namespace) -} - -// NewForConfig creates a new SettingsV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*SettingsV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &SettingsV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new SettingsV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *SettingsV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new SettingsV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *SettingsV1alpha1Client { - return &SettingsV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *SettingsV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go deleted file mode 100644 index 3af5d05..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go deleted file mode 100644 index 2bea7ec..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -type StorageClassExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go deleted file mode 100644 index ac48f49..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storage_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/storage/v1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type StorageV1Interface interface { - RESTClient() rest.Interface - StorageClassesGetter -} - -// StorageV1Client is used to interact with features provided by the storage.k8s.io group. -type StorageV1Client struct { - restClient rest.Interface -} - -func (c *StorageV1Client) StorageClasses() StorageClassInterface { - return newStorageClasses(c) -} - -// NewForConfig creates a new StorageV1Client for the given config. -func NewForConfig(c *rest.Config) (*StorageV1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &StorageV1Client{client}, nil -} - -// NewForConfigOrDie creates a new StorageV1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *StorageV1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new StorageV1Client for the given RESTClient. -func New(c rest.Interface) *StorageV1Client { - return &StorageV1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *StorageV1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go deleted file mode 100644 index 0f7f57f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1/storageclass.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1 - -import ( - v1 "k8s.io/api/storage/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// StorageClassesGetter has a method to return a StorageClassInterface. -// A group's client should implement this interface. -type StorageClassesGetter interface { - StorageClasses() StorageClassInterface -} - -// StorageClassInterface has methods to work with StorageClass resources. -type StorageClassInterface interface { - Create(*v1.StorageClass) (*v1.StorageClass, error) - Update(*v1.StorageClass) (*v1.StorageClass, error) - Delete(name string, options *metav1.DeleteOptions) error - DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error - Get(name string, options metav1.GetOptions) (*v1.StorageClass, error) - List(opts metav1.ListOptions) (*v1.StorageClassList, error) - Watch(opts metav1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) - StorageClassExpansion -} - -// storageClasses implements StorageClassInterface -type storageClasses struct { - client rest.Interface -} - -// newStorageClasses returns a StorageClasses -func newStorageClasses(c *StorageV1Client) *storageClasses { - return &storageClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options metav1.GetOptions) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts metav1.ListOptions) (result *v1.StorageClassList, err error) { - result = &v1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts metav1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - Body(storageClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(storageClass *v1.StorageClass) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - Body(storageClass). - Do(). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(name string, options *metav1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.StorageClass, err error) { - result = &v1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go deleted file mode 100644 index df51baa..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1alpha1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go deleted file mode 100644 index cdb7ab2..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/generated_expansion.go +++ /dev/null @@ -1,21 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go deleted file mode 100644 index c52f630..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/storage_client.go +++ /dev/null @@ -1,90 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type StorageV1alpha1Interface interface { - RESTClient() rest.Interface - VolumeAttachmentsGetter -} - -// StorageV1alpha1Client is used to interact with features provided by the storage.k8s.io group. -type StorageV1alpha1Client struct { - restClient rest.Interface -} - -func (c *StorageV1alpha1Client) VolumeAttachments() VolumeAttachmentInterface { - return newVolumeAttachments(c) -} - -// NewForConfig creates a new StorageV1alpha1Client for the given config. -func NewForConfig(c *rest.Config) (*StorageV1alpha1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &StorageV1alpha1Client{client}, nil -} - -// NewForConfigOrDie creates a new StorageV1alpha1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *StorageV1alpha1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new StorageV1alpha1Client for the given RESTClient. -func New(c rest.Interface) *StorageV1alpha1Client { - return &StorageV1alpha1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1alpha1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *StorageV1alpha1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go deleted file mode 100644 index e6af001..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1alpha1/volumeattachment.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1alpha1 - -import ( - v1alpha1 "k8s.io/api/storage/v1alpha1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. -// A group's client should implement this interface. -type VolumeAttachmentsGetter interface { - VolumeAttachments() VolumeAttachmentInterface -} - -// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. -type VolumeAttachmentInterface interface { - Create(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - Update(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - UpdateStatus(*v1alpha1.VolumeAttachment) (*v1alpha1.VolumeAttachment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1alpha1.VolumeAttachment, error) - List(opts v1.ListOptions) (*v1alpha1.VolumeAttachmentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) - VolumeAttachmentExpansion -} - -// volumeAttachments implements VolumeAttachmentInterface -type volumeAttachments struct { - client rest.Interface -} - -// newVolumeAttachments returns a VolumeAttachments -func newVolumeAttachments(c *StorageV1alpha1Client) *volumeAttachments { - return &volumeAttachments{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1alpha1.VolumeAttachmentList, err error) { - result = &v1alpha1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1alpha1.VolumeAttachment) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.VolumeAttachment, err error) { - result = &v1alpha1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go deleted file mode 100644 index 7711019..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/doc.go +++ /dev/null @@ -1,20 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -// This package has the automatically generated typed clients. -package v1beta1 diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go deleted file mode 100644 index 559f88f..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/generated_expansion.go +++ /dev/null @@ -1,23 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -type StorageClassExpansion interface{} - -type VolumeAttachmentExpansion interface{} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go deleted file mode 100644 index 4bdebb8..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storage_client.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/storage/v1beta1" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" - "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -type StorageV1beta1Interface interface { - RESTClient() rest.Interface - StorageClassesGetter - VolumeAttachmentsGetter -} - -// StorageV1beta1Client is used to interact with features provided by the storage.k8s.io group. -type StorageV1beta1Client struct { - restClient rest.Interface -} - -func (c *StorageV1beta1Client) StorageClasses() StorageClassInterface { - return newStorageClasses(c) -} - -func (c *StorageV1beta1Client) VolumeAttachments() VolumeAttachmentInterface { - return newVolumeAttachments(c) -} - -// NewForConfig creates a new StorageV1beta1Client for the given config. -func NewForConfig(c *rest.Config) (*StorageV1beta1Client, error) { - config := *c - if err := setConfigDefaults(&config); err != nil { - return nil, err - } - client, err := rest.RESTClientFor(&config) - if err != nil { - return nil, err - } - return &StorageV1beta1Client{client}, nil -} - -// NewForConfigOrDie creates a new StorageV1beta1Client for the given config and -// panics if there is an error in the config. -func NewForConfigOrDie(c *rest.Config) *StorageV1beta1Client { - client, err := NewForConfig(c) - if err != nil { - panic(err) - } - return client -} - -// New creates a new StorageV1beta1Client for the given RESTClient. -func New(c rest.Interface) *StorageV1beta1Client { - return &StorageV1beta1Client{c} -} - -func setConfigDefaults(config *rest.Config) error { - gv := v1beta1.SchemeGroupVersion - config.GroupVersion = &gv - config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} - - if config.UserAgent == "" { - config.UserAgent = rest.DefaultKubernetesUserAgent() - } - - return nil -} - -// RESTClient returns a RESTClient that is used to communicate -// with API server by this client implementation. -func (c *StorageV1beta1Client) RESTClient() rest.Interface { - if c == nil { - return nil - } - return c.restClient -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go deleted file mode 100644 index fbe1fd4..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/storageclass.go +++ /dev/null @@ -1,147 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// StorageClassesGetter has a method to return a StorageClassInterface. -// A group's client should implement this interface. -type StorageClassesGetter interface { - StorageClasses() StorageClassInterface -} - -// StorageClassInterface has methods to work with StorageClass resources. -type StorageClassInterface interface { - Create(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) - Update(*v1beta1.StorageClass) (*v1beta1.StorageClass, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.StorageClass, error) - List(opts v1.ListOptions) (*v1beta1.StorageClassList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) - StorageClassExpansion -} - -// storageClasses implements StorageClassInterface -type storageClasses struct { - client rest.Interface -} - -// newStorageClasses returns a StorageClasses -func newStorageClasses(c *StorageV1beta1Client) *storageClasses { - return &storageClasses{ - client: c.RESTClient(), - } -} - -// Get takes name of the storageClass, and returns the corresponding storageClass object, and an error if there is any. -func (c *storageClasses) Get(name string, options v1.GetOptions) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Get(). - Resource("storageclasses"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of StorageClasses that match those selectors. -func (c *storageClasses) List(opts v1.ListOptions) (result *v1beta1.StorageClassList, err error) { - result = &v1beta1.StorageClassList{} - err = c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested storageClasses. -func (c *storageClasses) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("storageclasses"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a storageClass and creates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Create(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Post(). - Resource("storageclasses"). - Body(storageClass). - Do(). - Into(result) - return -} - -// Update takes the representation of a storageClass and updates it. Returns the server's representation of the storageClass, and an error, if there is any. -func (c *storageClasses) Update(storageClass *v1beta1.StorageClass) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Put(). - Resource("storageclasses"). - Name(storageClass.Name). - Body(storageClass). - Do(). - Into(result) - return -} - -// Delete takes name of the storageClass and deletes it. Returns an error if one occurs. -func (c *storageClasses) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *storageClasses) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("storageclasses"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched storageClass. -func (c *storageClasses) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.StorageClass, err error) { - result = &v1beta1.StorageClass{} - err = c.client.Patch(pt). - Resource("storageclasses"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go b/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go deleted file mode 100644 index 5cd2d39..0000000 --- a/vendor/k8s.io/client-go/kubernetes/typed/storage/v1beta1/volumeattachment.go +++ /dev/null @@ -1,163 +0,0 @@ -/* -Copyright The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -// Code generated by client-gen. DO NOT EDIT. - -package v1beta1 - -import ( - v1beta1 "k8s.io/api/storage/v1beta1" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - types "k8s.io/apimachinery/pkg/types" - watch "k8s.io/apimachinery/pkg/watch" - scheme "k8s.io/client-go/kubernetes/scheme" - rest "k8s.io/client-go/rest" -) - -// VolumeAttachmentsGetter has a method to return a VolumeAttachmentInterface. -// A group's client should implement this interface. -type VolumeAttachmentsGetter interface { - VolumeAttachments() VolumeAttachmentInterface -} - -// VolumeAttachmentInterface has methods to work with VolumeAttachment resources. -type VolumeAttachmentInterface interface { - Create(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - Update(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - UpdateStatus(*v1beta1.VolumeAttachment) (*v1beta1.VolumeAttachment, error) - Delete(name string, options *v1.DeleteOptions) error - DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error - Get(name string, options v1.GetOptions) (*v1beta1.VolumeAttachment, error) - List(opts v1.ListOptions) (*v1beta1.VolumeAttachmentList, error) - Watch(opts v1.ListOptions) (watch.Interface, error) - Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) - VolumeAttachmentExpansion -} - -// volumeAttachments implements VolumeAttachmentInterface -type volumeAttachments struct { - client rest.Interface -} - -// newVolumeAttachments returns a VolumeAttachments -func newVolumeAttachments(c *StorageV1beta1Client) *volumeAttachments { - return &volumeAttachments{ - client: c.RESTClient(), - } -} - -// Get takes name of the volumeAttachment, and returns the corresponding volumeAttachment object, and an error if there is any. -func (c *volumeAttachments) Get(name string, options v1.GetOptions) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Get(). - Resource("volumeattachments"). - Name(name). - VersionedParams(&options, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// List takes label and field selectors, and returns the list of VolumeAttachments that match those selectors. -func (c *volumeAttachments) List(opts v1.ListOptions) (result *v1beta1.VolumeAttachmentList, err error) { - result = &v1beta1.VolumeAttachmentList{} - err = c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Do(). - Into(result) - return -} - -// Watch returns a watch.Interface that watches the requested volumeAttachments. -func (c *volumeAttachments) Watch(opts v1.ListOptions) (watch.Interface, error) { - opts.Watch = true - return c.client.Get(). - Resource("volumeattachments"). - VersionedParams(&opts, scheme.ParameterCodec). - Watch() -} - -// Create takes the representation of a volumeAttachment and creates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Create(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Post(). - Resource("volumeattachments"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Update takes the representation of a volumeAttachment and updates it. Returns the server's representation of the volumeAttachment, and an error, if there is any. -func (c *volumeAttachments) Update(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// UpdateStatus was generated because the type contains a Status member. -// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). - -func (c *volumeAttachments) UpdateStatus(volumeAttachment *v1beta1.VolumeAttachment) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Put(). - Resource("volumeattachments"). - Name(volumeAttachment.Name). - SubResource("status"). - Body(volumeAttachment). - Do(). - Into(result) - return -} - -// Delete takes name of the volumeAttachment and deletes it. Returns an error if one occurs. -func (c *volumeAttachments) Delete(name string, options *v1.DeleteOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - Name(name). - Body(options). - Do(). - Error() -} - -// DeleteCollection deletes a collection of objects. -func (c *volumeAttachments) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { - return c.client.Delete(). - Resource("volumeattachments"). - VersionedParams(&listOptions, scheme.ParameterCodec). - Body(options). - Do(). - Error() -} - -// Patch applies the patch and returns the patched volumeAttachment. -func (c *volumeAttachments) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1beta1.VolumeAttachment, err error) { - result = &v1beta1.VolumeAttachment{} - err = c.client.Patch(pt). - Resource("volumeattachments"). - SubResource(subresources...). - Name(name). - Body(data). - Do(). - Into(result) - return -} diff --git a/vendor/k8s.io/client-go/tools/reference/ref.go b/vendor/k8s.io/client-go/tools/reference/ref.go deleted file mode 100644 index 573d948..0000000 --- a/vendor/k8s.io/client-go/tools/reference/ref.go +++ /dev/null @@ -1,126 +0,0 @@ -/* -Copyright 2014 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package reference - -import ( - "errors" - "fmt" - "net/url" - "strings" - - "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/api/meta" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" -) - -var ( - // Errors that could be returned by GetReference. - ErrNilObject = errors.New("can't reference a nil object") - ErrNoSelfLink = errors.New("selfLink was empty, can't make reference") -) - -// GetReference returns an ObjectReference which refers to the given -// object, or an error if the object doesn't follow the conventions -// that would allow this. -// TODO: should take a meta.Interface see http://issue.k8s.io/7127 -func GetReference(scheme *runtime.Scheme, obj runtime.Object) (*v1.ObjectReference, error) { - if obj == nil { - return nil, ErrNilObject - } - if ref, ok := obj.(*v1.ObjectReference); ok { - // Don't make a reference to a reference. - return ref, nil - } - - gvk := obj.GetObjectKind().GroupVersionKind() - - // if the object referenced is actually persisted, we can just get kind from meta - // if we are building an object reference to something not yet persisted, we should fallback to scheme - kind := gvk.Kind - if len(kind) == 0 { - // TODO: this is wrong - gvks, _, err := scheme.ObjectKinds(obj) - if err != nil { - return nil, err - } - kind = gvks[0].Kind - } - - // An object that implements only List has enough metadata to build a reference - var listMeta metav1.Common - objectMeta, err := meta.Accessor(obj) - if err != nil { - listMeta, err = meta.CommonAccessor(obj) - if err != nil { - return nil, err - } - } else { - listMeta = objectMeta - } - - // if the object referenced is actually persisted, we can also get version from meta - version := gvk.GroupVersion().String() - if len(version) == 0 { - selfLink := listMeta.GetSelfLink() - if len(selfLink) == 0 { - return nil, ErrNoSelfLink - } - selfLinkUrl, err := url.Parse(selfLink) - if err != nil { - return nil, err - } - // example paths: ///* - parts := strings.Split(selfLinkUrl.Path, "/") - if len(parts) < 4 { - return nil, fmt.Errorf("unexpected self link format: '%v'; got version '%v'", selfLink, version) - } - if parts[1] == "api" { - version = parts[2] - } else { - version = parts[2] + "/" + parts[3] - } - } - - // only has list metadata - if objectMeta == nil { - return &v1.ObjectReference{ - Kind: kind, - APIVersion: version, - ResourceVersion: listMeta.GetResourceVersion(), - }, nil - } - - return &v1.ObjectReference{ - Kind: kind, - APIVersion: version, - Name: objectMeta.GetName(), - Namespace: objectMeta.GetNamespace(), - UID: objectMeta.GetUID(), - ResourceVersion: objectMeta.GetResourceVersion(), - }, nil -} - -// GetPartialReference is exactly like GetReference, but allows you to set the FieldPath. -func GetPartialReference(scheme *runtime.Scheme, obj runtime.Object, fieldPath string) (*v1.ObjectReference, error) { - ref, err := GetReference(scheme, obj) - if err != nil { - return nil, err - } - ref.FieldPath = fieldPath - return ref, nil -} diff --git a/vendor/modules.txt b/vendor/modules.txt index a353e95..0823afb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -31,27 +31,13 @@ github.com/gogo/protobuf/sortkeys github.com/golang/glog # github.com/golang/protobuf v1.4.2 github.com/golang/protobuf/proto -github.com/golang/protobuf/ptypes -github.com/golang/protobuf/ptypes/any -github.com/golang/protobuf/ptypes/duration -github.com/golang/protobuf/ptypes/timestamp # github.com/gomodule/redigo v2.0.0+incompatible github.com/gomodule/redigo/internal github.com/gomodule/redigo/redis -# github.com/google/btree v1.0.0 -github.com/google/btree # github.com/google/gofuzz v1.1.0 github.com/google/gofuzz # github.com/google/uuid v1.1.1 github.com/google/uuid -# github.com/googleapis/gnostic v0.5.1 -github.com/googleapis/gnostic/OpenAPIv2 -github.com/googleapis/gnostic/compiler -github.com/googleapis/gnostic/extensions -github.com/googleapis/gnostic/jsonschema -# github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 -github.com/gregjones/httpcache -github.com/gregjones/httpcache/diskcache # github.com/hashicorp/golang-lru v0.5.3 github.com/hashicorp/golang-lru github.com/hashicorp/golang-lru/simplelru @@ -73,8 +59,6 @@ github.com/modern-go/reflect2 github.com/opentracing/opentracing-go github.com/opentracing/opentracing-go/ext github.com/opentracing/opentracing-go/log -# github.com/peterbourgon/diskv v2.0.1+incompatible -github.com/peterbourgon/diskv # github.com/prometheus/client_golang v1.1.0 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -159,56 +143,17 @@ google.golang.org/protobuf/reflect/protoreflect google.golang.org/protobuf/reflect/protoregistry google.golang.org/protobuf/runtime/protoiface google.golang.org/protobuf/runtime/protoimpl -google.golang.org/protobuf/types/known/anypb -google.golang.org/protobuf/types/known/durationpb -google.golang.org/protobuf/types/known/timestamppb # gopkg.in/go-playground/validator.v8 v8.18.2 gopkg.in/go-playground/validator.v8 # gopkg.in/inf.v0 v0.9.1 gopkg.in/inf.v0 # gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 -# gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 -gopkg.in/yaml.v3 -# k8s.io/api v0.0.0-20190708094356-59223ed9f6ce -k8s.io/api/admissionregistration/v1alpha1 -k8s.io/api/admissionregistration/v1beta1 -k8s.io/api/apps/v1 -k8s.io/api/apps/v1beta1 -k8s.io/api/apps/v1beta2 -k8s.io/api/authentication/v1 -k8s.io/api/authentication/v1beta1 -k8s.io/api/authorization/v1 -k8s.io/api/authorization/v1beta1 -k8s.io/api/autoscaling/v1 -k8s.io/api/autoscaling/v2beta1 -k8s.io/api/autoscaling/v2beta2 -k8s.io/api/batch/v1 -k8s.io/api/batch/v1beta1 -k8s.io/api/batch/v2alpha1 -k8s.io/api/certificates/v1beta1 -k8s.io/api/coordination/v1beta1 -k8s.io/api/core/v1 -k8s.io/api/events/v1beta1 -k8s.io/api/extensions/v1beta1 -k8s.io/api/networking/v1 -k8s.io/api/policy/v1beta1 -k8s.io/api/rbac/v1 -k8s.io/api/rbac/v1alpha1 -k8s.io/api/rbac/v1beta1 -k8s.io/api/scheduling/v1alpha1 -k8s.io/api/scheduling/v1beta1 -k8s.io/api/settings/v1alpha1 -k8s.io/api/storage/v1 -k8s.io/api/storage/v1alpha1 -k8s.io/api/storage/v1beta1 # k8s.io/apimachinery v0.0.0-20190221084156-01f179d85dbc k8s.io/apimachinery/pkg/api/errors -k8s.io/apimachinery/pkg/api/meta k8s.io/apimachinery/pkg/api/resource k8s.io/apimachinery/pkg/apis/meta/v1 k8s.io/apimachinery/pkg/apis/meta/v1/unstructured -k8s.io/apimachinery/pkg/apis/meta/v1beta1 k8s.io/apimachinery/pkg/conversion k8s.io/apimachinery/pkg/conversion/queryparams k8s.io/apimachinery/pkg/fields @@ -239,40 +184,6 @@ k8s.io/apimachinery/pkg/version k8s.io/apimachinery/pkg/watch k8s.io/apimachinery/third_party/forked/golang/reflect # k8s.io/client-go v0.0.0-20190708094436-77c08c6b86df -k8s.io/client-go/discovery -k8s.io/client-go/kubernetes -k8s.io/client-go/kubernetes/scheme -k8s.io/client-go/kubernetes/typed/admissionregistration/v1alpha1 -k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1 -k8s.io/client-go/kubernetes/typed/apps/v1 -k8s.io/client-go/kubernetes/typed/apps/v1beta1 -k8s.io/client-go/kubernetes/typed/apps/v1beta2 -k8s.io/client-go/kubernetes/typed/authentication/v1 -k8s.io/client-go/kubernetes/typed/authentication/v1beta1 -k8s.io/client-go/kubernetes/typed/authorization/v1 -k8s.io/client-go/kubernetes/typed/authorization/v1beta1 -k8s.io/client-go/kubernetes/typed/autoscaling/v1 -k8s.io/client-go/kubernetes/typed/autoscaling/v2beta1 -k8s.io/client-go/kubernetes/typed/autoscaling/v2beta2 -k8s.io/client-go/kubernetes/typed/batch/v1 -k8s.io/client-go/kubernetes/typed/batch/v1beta1 -k8s.io/client-go/kubernetes/typed/batch/v2alpha1 -k8s.io/client-go/kubernetes/typed/certificates/v1beta1 -k8s.io/client-go/kubernetes/typed/coordination/v1beta1 -k8s.io/client-go/kubernetes/typed/core/v1 -k8s.io/client-go/kubernetes/typed/events/v1beta1 -k8s.io/client-go/kubernetes/typed/extensions/v1beta1 -k8s.io/client-go/kubernetes/typed/networking/v1 -k8s.io/client-go/kubernetes/typed/policy/v1beta1 -k8s.io/client-go/kubernetes/typed/rbac/v1 -k8s.io/client-go/kubernetes/typed/rbac/v1alpha1 -k8s.io/client-go/kubernetes/typed/rbac/v1beta1 -k8s.io/client-go/kubernetes/typed/scheduling/v1alpha1 -k8s.io/client-go/kubernetes/typed/scheduling/v1beta1 -k8s.io/client-go/kubernetes/typed/settings/v1alpha1 -k8s.io/client-go/kubernetes/typed/storage/v1 -k8s.io/client-go/kubernetes/typed/storage/v1alpha1 -k8s.io/client-go/kubernetes/typed/storage/v1beta1 k8s.io/client-go/pkg/apis/clientauthentication k8s.io/client-go/pkg/apis/clientauthentication/v1alpha1 k8s.io/client-go/pkg/apis/clientauthentication/v1beta1 @@ -282,7 +193,6 @@ k8s.io/client-go/rest k8s.io/client-go/rest/watch k8s.io/client-go/tools/clientcmd/api k8s.io/client-go/tools/metrics -k8s.io/client-go/tools/reference k8s.io/client-go/transport k8s.io/client-go/util/cert k8s.io/client-go/util/connrotation